max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
exercicios_curso_em_video/Exercicio 97.py | Sposigor/Caminho_do_Python | 1 | 6615651 | <reponame>Sposigor/Caminho_do_Python
def escreva(x):
y = len(x) + 4
print('~' * y)
print(f' {x}')
print('~' * y)
escreva("Oi")
escreva('Um bom exemplo de uma função para determinada situação') | def escreva(x):
y = len(x) + 4
print('~' * y)
print(f' {x}')
print('~' * y)
escreva("Oi")
escreva('Um bom exemplo de uma função para determinada situação') | none | 1 | 3.474569 | 3 | |
taky/cli/__init__.py | skadakar/taky | 0 | 6615652 | from .build_client_cmd import build_client, build_client_reg
from .setup_taky_cmd import setup_taky, setup_taky_reg
from .systemd_cmd import systemd, systemd_reg
| from .build_client_cmd import build_client, build_client_reg
from .setup_taky_cmd import setup_taky, setup_taky_reg
from .systemd_cmd import systemd, systemd_reg
| none | 1 | 0.961358 | 1 | |
turntable/hue.py | correl/turntable | 0 | 6615653 | import audioop
import logging
from multiprocessing import Process, Queue
import os
import queue
import time
from typing import Any, Optional
import requests
from turntable.events import *
from turntable.models import PCM
logger = logging.getLogger(__name__)
class HueError(Exception):
...
def hue_response(response: requests.Response) -> Any:
try:
response.raise_for_status()
result = response.json()
try:
raise HueError(response.json()[0]["error"]["description"])
except (IndexError, KeyError, TypeError):
return result
except requests.HTTPError as e:
raise HueError(f"http error: {e}") from e
except ValueError:
raise HueError("invalid response")
def hue_error(response: Any) -> Optional[str]:
try:
return response.json()[0]["error"]["description"]
except ValueError:
return "invalid response"
except (IndexError, KeyError, TypeError):
return None
class Hue(Process):
def __init__(
self,
pcm_in: "Queue[PCM]",
events: "Queue[Event]",
host: str,
username: str,
light: str,
):
super().__init__()
self.pcm_in = pcm_in
self.events = events
self.host = host
self.username = username
self.light = light
self.light_id = None
self.light_state = dict()
self.active = False
try:
lights = hue_response(
requests.get(f"http://{self.host}/api/{self.username}/lights")
)
except HueError as error:
logger.warn(f"Error fetching lights: %s", error)
return
try:
self.light_id, self.light_state = next(
filter(
lambda i: i[1]["name"].lower() == self.light.lower(), lights.items()
)
)
except StopIteration:
logger.warn(f"Could not find a light named '%s'", light)
return
logger.info("Hue ready")
def run(self) -> None:
if not self.light_id:
logger.warn("No light identified, not starting Hue")
return
logger.debug("Starting Hue")
max_peak = 3000
audio = None
stopping = False
while not stopping:
try:
while event := self.events.get(False):
if isinstance(event, StartedPlaying):
try:
self.light_state = hue_response(
requests.get(
f"http://{self.host}/api/{self.username}/lights/{self.light_id}"
)
)
logger.debug("Stored light state")
except HueError as e:
logger.warn(f"Error loading current light state: %s", e)
self.active = True
elif isinstance(event, StoppedPlaying):
self.active = False
original_brightness = self.light_state.get("state", {}).get(
"bri"
)
if original_brightness is not None:
try:
hue_response(
requests.put(
f"http://{self.host}/api/{self.username}/lights/{self.light_id}/state",
json={"bri": original_brightness},
)
)
logger.info(
"Restored %s to previous brightness", self.light
)
except HueError as e:
logger.warn(f"Error restoring light brightness: %s", e)
elif isinstance(event, Exit):
stopping = True
except queue.Empty:
...
if stopping:
break
try:
while sample := self.pcm_in.get(False):
audio = sample
except queue.Empty:
...
if audio and self.active:
rms = audioop.rms(audio.raw, audio.channels)
peak = audioop.max(audio.raw, audio.channels)
max_peak = max(peak, max_peak)
brightness = int(peak / max_peak * 255)
logger.debug(f"Brightness: {brightness}")
requests.put(
f"http://{self.host}/api/{self.username}/lights/{self.light_id}/state",
json={"bri": brightness, "transitiontime": 1},
)
time.sleep(0.1)
logger.info("Hue stopped")
| import audioop
import logging
from multiprocessing import Process, Queue
import os
import queue
import time
from typing import Any, Optional
import requests
from turntable.events import *
from turntable.models import PCM
logger = logging.getLogger(__name__)
class HueError(Exception):
...
def hue_response(response: requests.Response) -> Any:
try:
response.raise_for_status()
result = response.json()
try:
raise HueError(response.json()[0]["error"]["description"])
except (IndexError, KeyError, TypeError):
return result
except requests.HTTPError as e:
raise HueError(f"http error: {e}") from e
except ValueError:
raise HueError("invalid response")
def hue_error(response: Any) -> Optional[str]:
try:
return response.json()[0]["error"]["description"]
except ValueError:
return "invalid response"
except (IndexError, KeyError, TypeError):
return None
class Hue(Process):
def __init__(
self,
pcm_in: "Queue[PCM]",
events: "Queue[Event]",
host: str,
username: str,
light: str,
):
super().__init__()
self.pcm_in = pcm_in
self.events = events
self.host = host
self.username = username
self.light = light
self.light_id = None
self.light_state = dict()
self.active = False
try:
lights = hue_response(
requests.get(f"http://{self.host}/api/{self.username}/lights")
)
except HueError as error:
logger.warn(f"Error fetching lights: %s", error)
return
try:
self.light_id, self.light_state = next(
filter(
lambda i: i[1]["name"].lower() == self.light.lower(), lights.items()
)
)
except StopIteration:
logger.warn(f"Could not find a light named '%s'", light)
return
logger.info("Hue ready")
def run(self) -> None:
if not self.light_id:
logger.warn("No light identified, not starting Hue")
return
logger.debug("Starting Hue")
max_peak = 3000
audio = None
stopping = False
while not stopping:
try:
while event := self.events.get(False):
if isinstance(event, StartedPlaying):
try:
self.light_state = hue_response(
requests.get(
f"http://{self.host}/api/{self.username}/lights/{self.light_id}"
)
)
logger.debug("Stored light state")
except HueError as e:
logger.warn(f"Error loading current light state: %s", e)
self.active = True
elif isinstance(event, StoppedPlaying):
self.active = False
original_brightness = self.light_state.get("state", {}).get(
"bri"
)
if original_brightness is not None:
try:
hue_response(
requests.put(
f"http://{self.host}/api/{self.username}/lights/{self.light_id}/state",
json={"bri": original_brightness},
)
)
logger.info(
"Restored %s to previous brightness", self.light
)
except HueError as e:
logger.warn(f"Error restoring light brightness: %s", e)
elif isinstance(event, Exit):
stopping = True
except queue.Empty:
...
if stopping:
break
try:
while sample := self.pcm_in.get(False):
audio = sample
except queue.Empty:
...
if audio and self.active:
rms = audioop.rms(audio.raw, audio.channels)
peak = audioop.max(audio.raw, audio.channels)
max_peak = max(peak, max_peak)
brightness = int(peak / max_peak * 255)
logger.debug(f"Brightness: {brightness}")
requests.put(
f"http://{self.host}/api/{self.username}/lights/{self.light_id}/state",
json={"bri": brightness, "transitiontime": 1},
)
time.sleep(0.1)
logger.info("Hue stopped")
| none | 1 | 2.39992 | 2 | |
PyHEADTAIL/rfq/rfq.py | fsoubelet/PyHEADTAIL | 0 | 6615654 | <filename>PyHEADTAIL/rfq/rfq.py<gh_stars>0
"""
This module contains the Python implementation of a pillbox-cavity RF
quadrupole - referred to as the RFQ - as it was proposed by <NAME> in 'Radio frequency quadrupole for Landau damping in
accelerators', Phys. Rev. Special Topics - Accelerators and Beams 17,
011001 (2014) [1]. Similar to a 'Landau' octupole magnet, this device
is intended to introduce an incoherent tune spread such that Landau
damping can prevent the growth of transverse collective instabilities.
The formulae that are used are based on [1] and make use of the thin-
lens approximation. On the one hand, the RFQ introduces a longitudinal
spread of the betatron frequency and on the other hand, a transverse
spread of the synchrotron frequency.
The effect in the transverse plane is modelled in two different
ways
(I) RFQ as a detuner acting directly on each particles' betatron
tunes,
(II) RFQ as a localized kick acting on each particles' momenta xp
and yp.
The effect in the longitudinal plane is always modelled as a localized
kick, i.e. a change in a particle's normalized momentum dp. For model
(II), the incoherent betatron detuning is not applied directly, but is
a consequence of the change in momenta xp and yp.
@author <NAME>, <NAME>
@date July, 10th 2014
@brief Python implementation of a pillbox cavity RF quadrupole for
Landau damping.
@copyright CERN
"""
from abc import ABCMeta, abstractmethod
from scipy.constants import c, e
import numpy as np
import PyHEADTAIL.general.pmath as pm
from PyHEADTAIL.trackers.detuners import DetunerCollection
class RFQTransverseDetuner(DetunerCollection):
"""Collection class to contain/manage the segment-wise defined
RFQ elements RFQTransverseDetunerSegment acting on the
betatron tunes (detuner model of the RFQ). This is a pure
Python class and it derives from the DetunerCollection class
defined in the module PyHEADTAIL.trackers.detuners.
"""
def __init__(self, v_2, omega, phi_0, beta_x_RFQ, beta_y_RFQ):
"""An RFQ element is fully characterized by the parameters
v_2: quadrupolar expansion coefficient of the accelerating
voltage (~strength of the RFQ), in [V/m^2]. One-turn
value.
omega: Angular frequency of the RF wave, in [rad/s].
phi_0: Constant phase offset wrt. bunch center (z=0), in
[rad].
beta_x_RFQ and beta_y_RFQ are the beta functions at the
position of the RFQ, although in the detuner model of the RFQ,
the RFQ should not actually be understood as being localized.
"""
self.v_2 = v_2
self.omega = omega
self.phi_0 = phi_0
self.beta_x_RFQ = beta_x_RFQ
self.beta_y_RFQ = beta_y_RFQ
self.segment_detuners = []
def generate_segment_detuner(self, dmu_x, dmu_y, **kwargs):
"""Instantiate a RFQTransverseSegmentDetuner for the
specified segment of the accelerator ring.
Note that the bare betatron
phase advances over the current segment, dmu_x and dmu_y, are
given as relative values, i.e. in units of the overall phase
advance around the whole accelerator (the betatron tune).
The method is called by the TransverseMap object which manages
the creation of a detuner for every defined segment.
"""
dapp_xz = self.beta_x_RFQ * self.v_2 * e / (2.*np.pi*self.omega)
dapp_yz = -self.beta_y_RFQ * self.v_2 * e / (2.*np.pi*self.omega)
dapp_xz *= dmu_x
dapp_yz *= dmu_y
detuner = RFQTransverseDetunerSegment(
dapp_xz, dapp_yz, self.omega, self.phi_0)
self.segment_detuners.append(detuner)
class RFQTransverseDetunerSegment(object):
"""Python implementation of the RFQ element acting directly on the
particles' betatron tunes (i.e. RFQ detuner model).
"""
def __init__(self, dapp_xz, dapp_yz, omega, phi_0):
"""Creates an instance of the RFQTransverseDetunerSegment
class. The RFQ is characterized by
omega: Angular frequency of the RF wave, in [rad/s].
phi_0: Constant phase offset wrt. bunch center (z=0), in
[rad].
dapp_xz: Strength of detuning in the horizontal plane, scaled
to the relative bare betatron phase advance in x.
dapp_yz: Strength of detuning in the vertical plane, scaled
to the relative bare betatron phase advance in y.
"""
self.dapp_xz = dapp_xz
self.dapp_yz = dapp_yz
self.omega = omega
self.phi_0 = phi_0
def detune(self, beam):
""" Calculates for each particle its betatron detuning
dQ_x, dQ_y according to formulae taken from [1] (see
above).
dQ_x = dapp_xz / p * \cos(omega / (beta c) z + phi_0)
dQ_y = dapp_yz / p * \cos(omega / (beta c) z + phi_0)
with
dapp_xz = beta_x_RFQ * v_2 * e / (2 Pi * omega)
dapp_yz = -beta_y_RFQ * v_2 * e / (2 Pi * omega)
and p the particle momentum p = (1 + dp) p0.
(Probably, it would make sense to approximate p by p0 for better
performance). """
p = (1. + beam.dp) * beam.p0
cos_term = pm.cos(self.omega / (beam.beta * c) * beam.z + self.phi_0) / p
dQ_x = self.dapp_xz * cos_term
dQ_y = self.dapp_yz * cos_term
return dQ_x, dQ_y
class RFQKick(object, metaclass=ABCMeta):
"""Python base class to describe the RFQ element in the
localized kick model for both the transverse and the
longitudinal coordinates.
"""
@abstractmethod
def track(self, beam):
pass
class RFQTransverseKick(RFQKick):
"""Python implementation of the RFQ element acting on the
particles' transverse coordinates (i.e. localized kick
model).
"""
def __init__(self, v_2, omega, phi_0):
"""An RFQ element is fully characterized by the parameters
v_2: quadrupolar expansion coefficient of the
accelerating voltage (~strength of the RFQ), in
[V/m^2].
omega: Angular frequency of the RF wave, in [rad/s].
phi_0: Constant phase offset wrt. bunch center (z=0), in
[rad].
"""
self.v_2 = v_2
self.omega = omega
self.phi_0 = phi_0
def track(self, beam):
"""The formula that describes the transverse kick experienced
by an ultra-relativistic particle traversing the RFQ
longitudinally is based on the thin-lens approximation
\Delta p_x = -x*(2 e v_2 / omega) *
cos(omega z / (beta c) + phi_0),
\Delta p_y = y*(2 e v_2 / omega) *
cos(omega z / (beta c) + phi_0).
"""
cos_term = (2. * e * self.v_2 / self.omega *
pm.cos(self.omega / (beam.beta * c) * beam.z + self.phi_0))
beam.xp += -beam.x * cos_term / beam.p0
beam.yp += beam.y * cos_term / beam.p0
class RFQLongitudinalKick(RFQKick):
"""Python implementation of the RFQ element acting on the
particles' longitudinal coordinate dp."""
def __init__(self, v_2, omega, phi_0):
"""An RFQ element is fully characterized by the parameters
v_2: quadrupolar expansion coefficient of the
accelerating voltage (~strength of the RFQ), in
[V/m^2].
omega: Angular frequency of the RF wave, in [rad/s].
phi_0: Constant phase offset wrt. bunch center (z=0), in
[rad].
"""
self.v_2 = v_2
self.omega = omega
self.phi_0 = phi_0
def track(self, beam):
"""The formula used to describe the longitudinal kick is given
by
\Delta p_z = -(x^2 - y^2) (e v_2 / (beta c)) *
sin(omega z / (beta c) + phi_0).
"""
sin_term = (e * self.v_2 / (beam.beta * c) *
pm.sin(self.omega / (beam.beta * c) * beam.z + self.phi_0))
beam.dp += -(beam.x*beam.x - beam.y*beam.y) * sin_term / beam.p0
| <filename>PyHEADTAIL/rfq/rfq.py<gh_stars>0
"""
This module contains the Python implementation of a pillbox-cavity RF
quadrupole - referred to as the RFQ - as it was proposed by <NAME> in 'Radio frequency quadrupole for Landau damping in
accelerators', Phys. Rev. Special Topics - Accelerators and Beams 17,
011001 (2014) [1]. Similar to a 'Landau' octupole magnet, this device
is intended to introduce an incoherent tune spread such that Landau
damping can prevent the growth of transverse collective instabilities.
The formulae that are used are based on [1] and make use of the thin-
lens approximation. On the one hand, the RFQ introduces a longitudinal
spread of the betatron frequency and on the other hand, a transverse
spread of the synchrotron frequency.
The effect in the transverse plane is modelled in two different
ways
(I) RFQ as a detuner acting directly on each particles' betatron
tunes,
(II) RFQ as a localized kick acting on each particles' momenta xp
and yp.
The effect in the longitudinal plane is always modelled as a localized
kick, i.e. a change in a particle's normalized momentum dp. For model
(II), the incoherent betatron detuning is not applied directly, but is
a consequence of the change in momenta xp and yp.
@author <NAME>, <NAME>
@date July, 10th 2014
@brief Python implementation of a pillbox cavity RF quadrupole for
Landau damping.
@copyright CERN
"""
from abc import ABCMeta, abstractmethod
from scipy.constants import c, e
import numpy as np
import PyHEADTAIL.general.pmath as pm
from PyHEADTAIL.trackers.detuners import DetunerCollection
class RFQTransverseDetuner(DetunerCollection):
"""Collection class to contain/manage the segment-wise defined
RFQ elements RFQTransverseDetunerSegment acting on the
betatron tunes (detuner model of the RFQ). This is a pure
Python class and it derives from the DetunerCollection class
defined in the module PyHEADTAIL.trackers.detuners.
"""
def __init__(self, v_2, omega, phi_0, beta_x_RFQ, beta_y_RFQ):
"""An RFQ element is fully characterized by the parameters
v_2: quadrupolar expansion coefficient of the accelerating
voltage (~strength of the RFQ), in [V/m^2]. One-turn
value.
omega: Angular frequency of the RF wave, in [rad/s].
phi_0: Constant phase offset wrt. bunch center (z=0), in
[rad].
beta_x_RFQ and beta_y_RFQ are the beta functions at the
position of the RFQ, although in the detuner model of the RFQ,
the RFQ should not actually be understood as being localized.
"""
self.v_2 = v_2
self.omega = omega
self.phi_0 = phi_0
self.beta_x_RFQ = beta_x_RFQ
self.beta_y_RFQ = beta_y_RFQ
self.segment_detuners = []
def generate_segment_detuner(self, dmu_x, dmu_y, **kwargs):
"""Instantiate a RFQTransverseSegmentDetuner for the
specified segment of the accelerator ring.
Note that the bare betatron
phase advances over the current segment, dmu_x and dmu_y, are
given as relative values, i.e. in units of the overall phase
advance around the whole accelerator (the betatron tune).
The method is called by the TransverseMap object which manages
the creation of a detuner for every defined segment.
"""
dapp_xz = self.beta_x_RFQ * self.v_2 * e / (2.*np.pi*self.omega)
dapp_yz = -self.beta_y_RFQ * self.v_2 * e / (2.*np.pi*self.omega)
dapp_xz *= dmu_x
dapp_yz *= dmu_y
detuner = RFQTransverseDetunerSegment(
dapp_xz, dapp_yz, self.omega, self.phi_0)
self.segment_detuners.append(detuner)
class RFQTransverseDetunerSegment(object):
"""Python implementation of the RFQ element acting directly on the
particles' betatron tunes (i.e. RFQ detuner model).
"""
def __init__(self, dapp_xz, dapp_yz, omega, phi_0):
"""Creates an instance of the RFQTransverseDetunerSegment
class. The RFQ is characterized by
omega: Angular frequency of the RF wave, in [rad/s].
phi_0: Constant phase offset wrt. bunch center (z=0), in
[rad].
dapp_xz: Strength of detuning in the horizontal plane, scaled
to the relative bare betatron phase advance in x.
dapp_yz: Strength of detuning in the vertical plane, scaled
to the relative bare betatron phase advance in y.
"""
self.dapp_xz = dapp_xz
self.dapp_yz = dapp_yz
self.omega = omega
self.phi_0 = phi_0
def detune(self, beam):
""" Calculates for each particle its betatron detuning
dQ_x, dQ_y according to formulae taken from [1] (see
above).
dQ_x = dapp_xz / p * \cos(omega / (beta c) z + phi_0)
dQ_y = dapp_yz / p * \cos(omega / (beta c) z + phi_0)
with
dapp_xz = beta_x_RFQ * v_2 * e / (2 Pi * omega)
dapp_yz = -beta_y_RFQ * v_2 * e / (2 Pi * omega)
and p the particle momentum p = (1 + dp) p0.
(Probably, it would make sense to approximate p by p0 for better
performance). """
p = (1. + beam.dp) * beam.p0
cos_term = pm.cos(self.omega / (beam.beta * c) * beam.z + self.phi_0) / p
dQ_x = self.dapp_xz * cos_term
dQ_y = self.dapp_yz * cos_term
return dQ_x, dQ_y
class RFQKick(object, metaclass=ABCMeta):
"""Python base class to describe the RFQ element in the
localized kick model for both the transverse and the
longitudinal coordinates.
"""
@abstractmethod
def track(self, beam):
pass
class RFQTransverseKick(RFQKick):
"""Python implementation of the RFQ element acting on the
particles' transverse coordinates (i.e. localized kick
model).
"""
def __init__(self, v_2, omega, phi_0):
"""An RFQ element is fully characterized by the parameters
v_2: quadrupolar expansion coefficient of the
accelerating voltage (~strength of the RFQ), in
[V/m^2].
omega: Angular frequency of the RF wave, in [rad/s].
phi_0: Constant phase offset wrt. bunch center (z=0), in
[rad].
"""
self.v_2 = v_2
self.omega = omega
self.phi_0 = phi_0
def track(self, beam):
"""The formula that describes the transverse kick experienced
by an ultra-relativistic particle traversing the RFQ
longitudinally is based on the thin-lens approximation
\Delta p_x = -x*(2 e v_2 / omega) *
cos(omega z / (beta c) + phi_0),
\Delta p_y = y*(2 e v_2 / omega) *
cos(omega z / (beta c) + phi_0).
"""
cos_term = (2. * e * self.v_2 / self.omega *
pm.cos(self.omega / (beam.beta * c) * beam.z + self.phi_0))
beam.xp += -beam.x * cos_term / beam.p0
beam.yp += beam.y * cos_term / beam.p0
class RFQLongitudinalKick(RFQKick):
"""Python implementation of the RFQ element acting on the
particles' longitudinal coordinate dp."""
def __init__(self, v_2, omega, phi_0):
"""An RFQ element is fully characterized by the parameters
v_2: quadrupolar expansion coefficient of the
accelerating voltage (~strength of the RFQ), in
[V/m^2].
omega: Angular frequency of the RF wave, in [rad/s].
phi_0: Constant phase offset wrt. bunch center (z=0), in
[rad].
"""
self.v_2 = v_2
self.omega = omega
self.phi_0 = phi_0
def track(self, beam):
"""The formula used to describe the longitudinal kick is given
by
\Delta p_z = -(x^2 - y^2) (e v_2 / (beta c)) *
sin(omega z / (beta c) + phi_0).
"""
sin_term = (e * self.v_2 / (beam.beta * c) *
pm.sin(self.omega / (beam.beta * c) * beam.z + self.phi_0))
beam.dp += -(beam.x*beam.x - beam.y*beam.y) * sin_term / beam.p0
| en | 0.8219 | This module contains the Python implementation of a pillbox-cavity RF quadrupole - referred to as the RFQ - as it was proposed by <NAME> in 'Radio frequency quadrupole for Landau damping in accelerators', Phys. Rev. Special Topics - Accelerators and Beams 17, 011001 (2014) [1]. Similar to a 'Landau' octupole magnet, this device is intended to introduce an incoherent tune spread such that Landau damping can prevent the growth of transverse collective instabilities. The formulae that are used are based on [1] and make use of the thin- lens approximation. On the one hand, the RFQ introduces a longitudinal spread of the betatron frequency and on the other hand, a transverse spread of the synchrotron frequency. The effect in the transverse plane is modelled in two different ways (I) RFQ as a detuner acting directly on each particles' betatron tunes, (II) RFQ as a localized kick acting on each particles' momenta xp and yp. The effect in the longitudinal plane is always modelled as a localized kick, i.e. a change in a particle's normalized momentum dp. For model (II), the incoherent betatron detuning is not applied directly, but is a consequence of the change in momenta xp and yp. @author <NAME>, <NAME> @date July, 10th 2014 @brief Python implementation of a pillbox cavity RF quadrupole for Landau damping. @copyright CERN Collection class to contain/manage the segment-wise defined RFQ elements RFQTransverseDetunerSegment acting on the betatron tunes (detuner model of the RFQ). This is a pure Python class and it derives from the DetunerCollection class defined in the module PyHEADTAIL.trackers.detuners. An RFQ element is fully characterized by the parameters v_2: quadrupolar expansion coefficient of the accelerating voltage (~strength of the RFQ), in [V/m^2]. One-turn value. omega: Angular frequency of the RF wave, in [rad/s]. phi_0: Constant phase offset wrt. bunch center (z=0), in [rad]. beta_x_RFQ and beta_y_RFQ are the beta functions at the position of the RFQ, although in the detuner model of the RFQ, the RFQ should not actually be understood as being localized. Instantiate a RFQTransverseSegmentDetuner for the specified segment of the accelerator ring. Note that the bare betatron phase advances over the current segment, dmu_x and dmu_y, are given as relative values, i.e. in units of the overall phase advance around the whole accelerator (the betatron tune). The method is called by the TransverseMap object which manages the creation of a detuner for every defined segment. Python implementation of the RFQ element acting directly on the particles' betatron tunes (i.e. RFQ detuner model). Creates an instance of the RFQTransverseDetunerSegment class. The RFQ is characterized by omega: Angular frequency of the RF wave, in [rad/s]. phi_0: Constant phase offset wrt. bunch center (z=0), in [rad]. dapp_xz: Strength of detuning in the horizontal plane, scaled to the relative bare betatron phase advance in x. dapp_yz: Strength of detuning in the vertical plane, scaled to the relative bare betatron phase advance in y. Calculates for each particle its betatron detuning dQ_x, dQ_y according to formulae taken from [1] (see above). dQ_x = dapp_xz / p * \cos(omega / (beta c) z + phi_0) dQ_y = dapp_yz / p * \cos(omega / (beta c) z + phi_0) with dapp_xz = beta_x_RFQ * v_2 * e / (2 Pi * omega) dapp_yz = -beta_y_RFQ * v_2 * e / (2 Pi * omega) and p the particle momentum p = (1 + dp) p0. (Probably, it would make sense to approximate p by p0 for better performance). Python base class to describe the RFQ element in the localized kick model for both the transverse and the longitudinal coordinates. Python implementation of the RFQ element acting on the particles' transverse coordinates (i.e. localized kick model). An RFQ element is fully characterized by the parameters v_2: quadrupolar expansion coefficient of the accelerating voltage (~strength of the RFQ), in [V/m^2]. omega: Angular frequency of the RF wave, in [rad/s]. phi_0: Constant phase offset wrt. bunch center (z=0), in [rad]. The formula that describes the transverse kick experienced by an ultra-relativistic particle traversing the RFQ longitudinally is based on the thin-lens approximation \Delta p_x = -x*(2 e v_2 / omega) * cos(omega z / (beta c) + phi_0), \Delta p_y = y*(2 e v_2 / omega) * cos(omega z / (beta c) + phi_0). Python implementation of the RFQ element acting on the particles' longitudinal coordinate dp. An RFQ element is fully characterized by the parameters v_2: quadrupolar expansion coefficient of the accelerating voltage (~strength of the RFQ), in [V/m^2]. omega: Angular frequency of the RF wave, in [rad/s]. phi_0: Constant phase offset wrt. bunch center (z=0), in [rad]. The formula used to describe the longitudinal kick is given by \Delta p_z = -(x^2 - y^2) (e v_2 / (beta c)) * sin(omega z / (beta c) + phi_0). | 2.381241 | 2 |
prm/prm/model_distribution/prometheus/processing.py | Akiros001/platform-resource-manager | 47 | 6615655 | # Copyright (C) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
#
#
# SPDX-License-Identifier: Apache-2.0
import logging
import numpy as np
import pandas as pd
from prm.model_distribution.metric import GroupInfo, Metric, GroupLabel
from prm.model_distribution.prometheus.query import PromHttp
log = logging.getLogger(__name__)
class NotExistInPrometheus(Exception):
pass
class PromProcessor(object):
""" Processing data from promentheus, aggregrating metrics by cpu_model,
application and cpu_assignment.
"""
def __init__(self, url, timeout):
self.prom_http = PromHttp(url, timeout)
self.metric_names = self.prom_http.get_all_metrics_value_names()
def non_exsist_hint(self, metric_name):
raise NotExistInPrometheus("Can not query {} in prometheus,all "
"avaliable metrics in prometheus: {} "
"\n".format(metric_name, self.metric_names))
def _transfer_models_to_nested(self, models):
"""
build thresholds for each unique combination of cpu_model, application, cpu_assignment ,
but store each cpu_model as an unique key into database
"""
nested_models = {}
for model in models:
if nested_models.get(model.cpu_model) is None:
nested_models[model.cpu_model] = {
model.application: {
model.initial_task_cpu_assignment: True}
}
elif nested_models.get(model.cpu_model).get(model.application) \
is None:
temp = nested_models[model.cpu_model]
temp[model.application] = {
model.initial_task_cpu_assignment: True}
nested_models[model.cpu_model] = temp
elif nested_models.get(model.cpu_model).get(model.application).get(
model.initial_task_cpu_assignment) is None:
temp = nested_models.get(
model.cpu_model).get(model.application)
temp[model.initial_task_cpu_assignment] = True
nested_models[model.cpu_model][model.application] = temp
return nested_models
def generate_existing_models_by_cpu_util(self, starts_ends):
# query all series in the timerange
series = []
for start_end in starts_ends:
serie = self.prom_http.get_series_with_label(
Metric.UTIL, start_end[0], start_end[1], {})
series = series + serie
# make unique group labels
models = {}
for s in series:
if GroupInfo.CPU_MODEL not in s or \
GroupInfo.APPLICATION not in s or \
GroupInfo.INITIAL_TASK_CPU_ASSIGNMENT not in s:
continue
temp_model = GroupLabel(
s[GroupInfo.CPU_MODEL], s[GroupInfo.APPLICATION],
s[GroupInfo.INITIAL_TASK_CPU_ASSIGNMENT])
if models.get(temp_model) is None:
models[temp_model] = True
if len(models) == 0:
log.warning(
"no data at this time range, please set a larger timerange")
# transfer models to nested dict
return list(models), self._transfer_models_to_nested(list(models))
def aggregrate_metric_by_application_and_label(
self, metric_name, group_label, start, end, step):
"""prometheus db data format
"data": {
"resultType": "matrix",
"result": [
{
"metric": {
"__name__": "memory_bandwidth",
"application": "stress_ng",
....
},
"values": [
[
1555056098.363,
"11707465728"
],
....
},
{
"metric": {
"__name__": "memory_bandwidth",
"application": "stress_ng",
....
},
"values": [
[
1555056098.363,
"11707465728"
],
....
}
...
]
}
"""
if metric_name not in self.metric_names:
self.non_exsist_hint(metric_name)
data = self.prom_http.get_data_with_label(
metric_name, start, end, group_label, step)
if len(data['result']) == 0:
log.info("{} data is empty from {} to {}.".format(metric_name, start, end))
return 0, []
metric_arrary = [[], []]
for result in data['result']:
value = np.transpose(result['values']).astype(np.float)
# group metric by same labels
metric_arrary = np.concatenate((metric_arrary, value), axis=1)
# timestamp:axis=0, value:axis=1
return len(metric_arrary[1]), metric_arrary[1]
def generate_new_metric_dataframes(self, metric_name_list, group_label, starts_ends, step):
frames = []
for start_end in starts_ends:
frame = self.generate_new_metric_dataframe(
metric_name_list, group_label, start_end[0], start_end[1], step)
frames.append(frame)
return pd.concat(frames)
def generate_new_metric_dataframe(self, metric_name_list, group_label, start, end, step):
metric_lengths = []
metric_data = {}
for metric_name in metric_name_list:
metric_length, metric_data[metric_name] = \
self.aggregrate_metric_by_application_and_label(
metric_name, group_label, start, end, step)
metric_lengths.append(metric_length)
# align timestamp between differnt metrics
if len(set(metric_lengths)) > 1:
log.info('Length of values does not match length of index for {} '.format(group_label))
final_length = min(metric_lengths)
for key, value in metric_data.items():
metric_data[key] = value[:final_length]
return pd.DataFrame.from_dict(metric_data)
| # Copyright (C) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
#
#
# SPDX-License-Identifier: Apache-2.0
import logging
import numpy as np
import pandas as pd
from prm.model_distribution.metric import GroupInfo, Metric, GroupLabel
from prm.model_distribution.prometheus.query import PromHttp
log = logging.getLogger(__name__)
class NotExistInPrometheus(Exception):
pass
class PromProcessor(object):
""" Processing data from promentheus, aggregrating metrics by cpu_model,
application and cpu_assignment.
"""
def __init__(self, url, timeout):
self.prom_http = PromHttp(url, timeout)
self.metric_names = self.prom_http.get_all_metrics_value_names()
def non_exsist_hint(self, metric_name):
raise NotExistInPrometheus("Can not query {} in prometheus,all "
"avaliable metrics in prometheus: {} "
"\n".format(metric_name, self.metric_names))
def _transfer_models_to_nested(self, models):
"""
build thresholds for each unique combination of cpu_model, application, cpu_assignment ,
but store each cpu_model as an unique key into database
"""
nested_models = {}
for model in models:
if nested_models.get(model.cpu_model) is None:
nested_models[model.cpu_model] = {
model.application: {
model.initial_task_cpu_assignment: True}
}
elif nested_models.get(model.cpu_model).get(model.application) \
is None:
temp = nested_models[model.cpu_model]
temp[model.application] = {
model.initial_task_cpu_assignment: True}
nested_models[model.cpu_model] = temp
elif nested_models.get(model.cpu_model).get(model.application).get(
model.initial_task_cpu_assignment) is None:
temp = nested_models.get(
model.cpu_model).get(model.application)
temp[model.initial_task_cpu_assignment] = True
nested_models[model.cpu_model][model.application] = temp
return nested_models
def generate_existing_models_by_cpu_util(self, starts_ends):
# query all series in the timerange
series = []
for start_end in starts_ends:
serie = self.prom_http.get_series_with_label(
Metric.UTIL, start_end[0], start_end[1], {})
series = series + serie
# make unique group labels
models = {}
for s in series:
if GroupInfo.CPU_MODEL not in s or \
GroupInfo.APPLICATION not in s or \
GroupInfo.INITIAL_TASK_CPU_ASSIGNMENT not in s:
continue
temp_model = GroupLabel(
s[GroupInfo.CPU_MODEL], s[GroupInfo.APPLICATION],
s[GroupInfo.INITIAL_TASK_CPU_ASSIGNMENT])
if models.get(temp_model) is None:
models[temp_model] = True
if len(models) == 0:
log.warning(
"no data at this time range, please set a larger timerange")
# transfer models to nested dict
return list(models), self._transfer_models_to_nested(list(models))
def aggregrate_metric_by_application_and_label(
self, metric_name, group_label, start, end, step):
"""prometheus db data format
"data": {
"resultType": "matrix",
"result": [
{
"metric": {
"__name__": "memory_bandwidth",
"application": "stress_ng",
....
},
"values": [
[
1555056098.363,
"11707465728"
],
....
},
{
"metric": {
"__name__": "memory_bandwidth",
"application": "stress_ng",
....
},
"values": [
[
1555056098.363,
"11707465728"
],
....
}
...
]
}
"""
if metric_name not in self.metric_names:
self.non_exsist_hint(metric_name)
data = self.prom_http.get_data_with_label(
metric_name, start, end, group_label, step)
if len(data['result']) == 0:
log.info("{} data is empty from {} to {}.".format(metric_name, start, end))
return 0, []
metric_arrary = [[], []]
for result in data['result']:
value = np.transpose(result['values']).astype(np.float)
# group metric by same labels
metric_arrary = np.concatenate((metric_arrary, value), axis=1)
# timestamp:axis=0, value:axis=1
return len(metric_arrary[1]), metric_arrary[1]
def generate_new_metric_dataframes(self, metric_name_list, group_label, starts_ends, step):
frames = []
for start_end in starts_ends:
frame = self.generate_new_metric_dataframe(
metric_name_list, group_label, start_end[0], start_end[1], step)
frames.append(frame)
return pd.concat(frames)
def generate_new_metric_dataframe(self, metric_name_list, group_label, start, end, step):
metric_lengths = []
metric_data = {}
for metric_name in metric_name_list:
metric_length, metric_data[metric_name] = \
self.aggregrate_metric_by_application_and_label(
metric_name, group_label, start, end, step)
metric_lengths.append(metric_length)
# align timestamp between differnt metrics
if len(set(metric_lengths)) > 1:
log.info('Length of values does not match length of index for {} '.format(group_label))
final_length = min(metric_lengths)
for key, value in metric_data.items():
metric_data[key] = value[:final_length]
return pd.DataFrame.from_dict(metric_data)
| en | 0.740718 | # Copyright (C) 2018 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions # and limitations under the License. # # # SPDX-License-Identifier: Apache-2.0 Processing data from promentheus, aggregrating metrics by cpu_model, application and cpu_assignment. build thresholds for each unique combination of cpu_model, application, cpu_assignment , but store each cpu_model as an unique key into database # query all series in the timerange # make unique group labels # transfer models to nested dict prometheus db data format "data": { "resultType": "matrix", "result": [ { "metric": { "__name__": "memory_bandwidth", "application": "stress_ng", .... }, "values": [ [ 1555056098.363, "11707465728" ], .... }, { "metric": { "__name__": "memory_bandwidth", "application": "stress_ng", .... }, "values": [ [ 1555056098.363, "11707465728" ], .... } ... ] } # group metric by same labels # timestamp:axis=0, value:axis=1 # align timestamp between differnt metrics | 2.124344 | 2 |
cqlengine/tests/model/test_model.py | jpuerta/cqlengine | 57 | 6615656 | <gh_stars>10-100
from unittest import TestCase
from cqlengine.models import Model, ModelDefinitionException
from cqlengine import columns
class TestModel(TestCase):
""" Tests the non-io functionality of models """
def test_instance_equality(self):
""" tests the model equality functionality """
class EqualityModel(Model):
pk = columns.Integer(primary_key=True)
m0 = EqualityModel(pk=0)
m1 = EqualityModel(pk=1)
self.assertEqual(m0, m0)
self.assertNotEqual(m0, m1)
def test_model_equality(self):
""" tests the model equality functionality """
class EqualityModel0(Model):
pk = columns.Integer(primary_key=True)
class EqualityModel1(Model):
kk = columns.Integer(primary_key=True)
m0 = EqualityModel0(pk=0)
m1 = EqualityModel1(kk=1)
self.assertEqual(m0, m0)
self.assertNotEqual(m0, m1)
class BuiltInAttributeConflictTest(TestCase):
"""tests Model definitions that conflict with built-in attributes/methods"""
def test_model_with_attribute_name_conflict(self):
"""should raise exception when model defines column that conflicts with built-in attribute"""
with self.assertRaises(ModelDefinitionException):
class IllegalTimestampColumnModel(Model):
my_primary_key = columns.Integer(primary_key=True)
timestamp = columns.BigInt()
def test_model_with_method_name_conflict(self):
"""should raise exception when model defines column that conflicts with built-in method"""
with self.assertRaises(ModelDefinitionException):
class IllegalFilterColumnModel(Model):
my_primary_key = columns.Integer(primary_key=True)
filter = columns.Text()
| from unittest import TestCase
from cqlengine.models import Model, ModelDefinitionException
from cqlengine import columns
class TestModel(TestCase):
""" Tests the non-io functionality of models """
def test_instance_equality(self):
""" tests the model equality functionality """
class EqualityModel(Model):
pk = columns.Integer(primary_key=True)
m0 = EqualityModel(pk=0)
m1 = EqualityModel(pk=1)
self.assertEqual(m0, m0)
self.assertNotEqual(m0, m1)
def test_model_equality(self):
""" tests the model equality functionality """
class EqualityModel0(Model):
pk = columns.Integer(primary_key=True)
class EqualityModel1(Model):
kk = columns.Integer(primary_key=True)
m0 = EqualityModel0(pk=0)
m1 = EqualityModel1(kk=1)
self.assertEqual(m0, m0)
self.assertNotEqual(m0, m1)
class BuiltInAttributeConflictTest(TestCase):
"""tests Model definitions that conflict with built-in attributes/methods"""
def test_model_with_attribute_name_conflict(self):
"""should raise exception when model defines column that conflicts with built-in attribute"""
with self.assertRaises(ModelDefinitionException):
class IllegalTimestampColumnModel(Model):
my_primary_key = columns.Integer(primary_key=True)
timestamp = columns.BigInt()
def test_model_with_method_name_conflict(self):
"""should raise exception when model defines column that conflicts with built-in method"""
with self.assertRaises(ModelDefinitionException):
class IllegalFilterColumnModel(Model):
my_primary_key = columns.Integer(primary_key=True)
filter = columns.Text() | en | 0.858351 | Tests the non-io functionality of models tests the model equality functionality tests the model equality functionality tests Model definitions that conflict with built-in attributes/methods should raise exception when model defines column that conflicts with built-in attribute should raise exception when model defines column that conflicts with built-in method | 3.257613 | 3 |
local_settings/urls.py | hackoregon/2019-examplar-backend | 0 | 6615657 | from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from rest_framework_swagger.views import get_swagger_view
from rest_framework.documentation import include_docs_urls
router = DefaultRouter()
api_title = 'Hack Oregon Examplar 2019 API'
schema_view = get_swagger_view(title=api_title)
urlpatterns = [
url(r'^examplar/schema/', schema_view),
url(r'^examplar/api/', include('hackoregon_examplar.api.urls')),
url(r'^examplar/docs/', include_docs_urls(title=api_title)),
url(r'^examplar/health/', include('health_check.urls'))
]
url(r'^$', schema_view)
| from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from rest_framework_swagger.views import get_swagger_view
from rest_framework.documentation import include_docs_urls
router = DefaultRouter()
api_title = 'Hack Oregon Examplar 2019 API'
schema_view = get_swagger_view(title=api_title)
urlpatterns = [
url(r'^examplar/schema/', schema_view),
url(r'^examplar/api/', include('hackoregon_examplar.api.urls')),
url(r'^examplar/docs/', include_docs_urls(title=api_title)),
url(r'^examplar/health/', include('health_check.urls'))
]
url(r'^$', schema_view)
| none | 1 | 1.871332 | 2 | |
LeetCode/0390. Elimination Game/solution.py | InnoFang/oh-my-algorithms | 1 | 6615658 | """
3377 / 3377 test cases passed.
Runtime: 44 ms
Memory Usage: 15 MB
"""
class Solution:
def lastRemaining(self, n: int) -> int:
head, step, left = 1, 1, True
while n > 1:
if left or n & 1 == 1:
head += step
step <<= 1
n >>= 1
left = not left
return head
| """
3377 / 3377 test cases passed.
Runtime: 44 ms
Memory Usage: 15 MB
"""
class Solution:
def lastRemaining(self, n: int) -> int:
head, step, left = 1, 1, True
while n > 1:
if left or n & 1 == 1:
head += step
step <<= 1
n >>= 1
left = not left
return head
| en | 0.445694 | 3377 / 3377 test cases passed. Runtime: 44 ms Memory Usage: 15 MB | 3.252776 | 3 |
nvmeof_perf/utils.py | Eideticom/nvmeof-perf | 0 | 6615659 | ########################################################################
##
## Copyright 2015 PMC-Sierra, Inc.
## Copyright 2018 Eidetic Communications Inc.
##
## Licensed under the Apache License, Version 2.0 (the "License"); you
## may not use this file except in compliance with the License. You may
## obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0 Unless required by
## applicable law or agreed to in writing, software distributed under the
## License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
## CONDITIONS OF ANY KIND, either express or implied. See the License for
## the specific language governing permissions and limitations under the
## License.
##
########################################################################
import sys
import time
import curses
class DummyContext(object):
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
class Timeline(DummyContext):
def __init__(self, period=1.0, *args, **kwargs):
super().__init__(*args, **kwargs)
self.period = period
self.last_time = time.time() - period
self.duration = None
self.first = True
def wait_until_ready(self):
tm = self.last_time + self.period
if tm > time.time():
time.sleep(tm - time.time())
def next(self):
self.wait_until_ready()
if not self.first:
self.duration = time.time() - self.last_time
self.first = False
self.last_time = time.time()
class CursesContext(object):
def __enter__(self):
curses.setupterm()
self.cmd("smcup")
return self
def __exit__(self, type, value, traceback):
self.cmd("rmcup")
def cmd(self, name, *args):
s = curses.tigetstr(name)
sys.stdout.buffer.write(curses.tparm(s, *args))
def clear(self):
self.cmd("clear")
self.cmd("cup", 0, 0)
| ########################################################################
##
## Copyright 2015 PMC-Sierra, Inc.
## Copyright 2018 Eidetic Communications Inc.
##
## Licensed under the Apache License, Version 2.0 (the "License"); you
## may not use this file except in compliance with the License. You may
## obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0 Unless required by
## applicable law or agreed to in writing, software distributed under the
## License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
## CONDITIONS OF ANY KIND, either express or implied. See the License for
## the specific language governing permissions and limitations under the
## License.
##
########################################################################
import sys
import time
import curses
class DummyContext(object):
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
class Timeline(DummyContext):
def __init__(self, period=1.0, *args, **kwargs):
super().__init__(*args, **kwargs)
self.period = period
self.last_time = time.time() - period
self.duration = None
self.first = True
def wait_until_ready(self):
tm = self.last_time + self.period
if tm > time.time():
time.sleep(tm - time.time())
def next(self):
self.wait_until_ready()
if not self.first:
self.duration = time.time() - self.last_time
self.first = False
self.last_time = time.time()
class CursesContext(object):
def __enter__(self):
curses.setupterm()
self.cmd("smcup")
return self
def __exit__(self, type, value, traceback):
self.cmd("rmcup")
def cmd(self, name, *args):
s = curses.tigetstr(name)
sys.stdout.buffer.write(curses.tparm(s, *args))
def clear(self):
self.cmd("clear")
self.cmd("cup", 0, 0)
| en | 0.568015 | ######################################################################## ## ## Copyright 2015 PMC-Sierra, Inc. ## Copyright 2018 Eidetic Communications Inc. ## ## Licensed under the Apache License, Version 2.0 (the "License"); you ## may not use this file except in compliance with the License. You may ## obtain a copy of the License at ## http://www.apache.org/licenses/LICENSE-2.0 Unless required by ## applicable law or agreed to in writing, software distributed under the ## License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR ## CONDITIONS OF ANY KIND, either express or implied. See the License for ## the specific language governing permissions and limitations under the ## License. ## ######################################################################## | 2.630265 | 3 |
checkin.py | jdholtz/auto-southwest-check-in | 5 | 6615660 | <gh_stars>1-10
import sys
from lib.account import Account
if __name__ == "__main__":
arguments = sys.argv
confirmation_number = arguments[1]
first_name = arguments[2]
last_name = arguments[3]
account = Account(first_name=first_name, last_name=last_name)
account.get_checkin_info(confirmation_number)
| import sys
from lib.account import Account
if __name__ == "__main__":
arguments = sys.argv
confirmation_number = arguments[1]
first_name = arguments[2]
last_name = arguments[3]
account = Account(first_name=first_name, last_name=last_name)
account.get_checkin_info(confirmation_number) | none | 1 | 2.033301 | 2 | |
jia/scheduler/scheduler.py | joshblum/chronology | 0 | 6615661 | <reponame>joshblum/chronology
from __future__ import absolute_import
import atexit
import datetime
import gevent
import gipc
import traceback
import sys
from heapq import heappush, heappop, heapify
from jia.errors import PyCodeError
from jia.utils import send_mail
from scheduler import get_app
from scheduler.common.concurrent import GIPCExecutor
from scheduler.models import Task
class Scheduler(object):
"""Inteval based code execution scheduler"""
def __init__(self):
"""Initialize the queue and spawn the main loop thread
Upon initialization, tasks stored in the database are immediately
scheduled.
_task_queue is a priority queue ordered using Python's heapq functionality.
Elements in _task_queue are tuples of the form (datetime, task) where
datetime is the scheduled run time and task is a dictionary as defined
in the above docstring for the Scheduler class.
For concurrency safety reasons, never write to _task_queue outside the
_loop() thread.
"""
self._task_queue = [] # Never write to this outside the _loop thread
self._pending_cancels = set()
self._executor = GIPCExecutor()
# Load previously scheduled tasks from database
now = datetime.datetime.now()
with get_app().app_context():
saved_schedule = Task.query.filter_by(active=True)
for task in saved_schedule:
new_task = {
'id': task.id,
'interval': task.interval,
'code': task.code
}
# Writing directly to the _task_queue is safe since we haven't started
# the _loop yet
self._task_queue.append((now, new_task))
# Make _task_queue a priority queue
heapify(self._task_queue)
# Spawn main loop and save writer for future communication
(read, write) = gipc.pipe()
self._main_thread = gevent.spawn(self._loop, read)
self._schedule_pipe = write
atexit.register(self._interrupt)
def schedule(self, task):
"""Pass schedule request to the main loop
Tasks should be dictionaries with the following attributes:
task = {
'id': 'a93de0f3',
'code': ..., # string of Python code
'interval': 600, # in seconds
}
An interval of 0 indicates the task should only be run once.
"""
self._schedule_pipe.put(('schedule', task))
def cancel(self, task_id):
"""Pass cancel request to the main loop"""
self._schedule_pipe.put(('cancel', task_id))
def _schedule(self, task, next_run=None):
if not next_run:
next_run = datetime.datetime.now()
heappush(self._task_queue, (next_run, task))
def _cancel(self, task_id):
self._pending_cancels.add(task_id)
def _interrupt(self):
self._main_thread.kill()
#TODO(derek): kill child threads
def _loop(self, reader):
"""Main execution loop of the scheduler.
The loop runs every second. Between iterations, the loop listens for
schedule or cancel requests coming from Flask via over the gipc pipe
(reader) and modifies the queue accordingly.
When a task completes, it is rescheduled
"""
results = set()
while True:
now = datetime.datetime.now()
if self._task_queue and self._task_queue[0][0] <= now:
task = heappop(self._task_queue)[1]
if task['id'] not in self._pending_cancels:
result = self._executor.submit(_execute, task)
results.add(result)
else:
self._pending_cancels.remove(task['id'])
else:
# Check for new tasks coming from HTTP
with gevent.Timeout(0.5, False) as t:
message = reader.get(timeout=t)
if message[0] == 'schedule':
self._schedule(message[1], next_run=now)
elif message[0] == 'cancel':
self._cancel(message[1])
# Reschedule completed tasks
if not results:
gevent.sleep(0.5)
continue
ready = self._executor.wait(results, num=1, timeout=0.5)
for result in ready:
results.remove(result)
if result.value:
task = result.value
interval = int(task['interval'])
if interval:
run_at = now + datetime.timedelta(seconds=int(task['interval']))
self._schedule(task, next_run=run_at)
else:
err_msg = result.exception
sys.stderr.write("ERROR: %s" % err_msg)
email_msg = 'Task %s failed at %s\n\n%s' % (
task['id'],
datetime.datetime.now(),
err_msg
)
send_mail(get_app().config['SCHEDULER_FAILURE_EMAILS'],
'Scheduler Failure',
email_msg)
def _execute(task):
"""A wrapper around exec
This exists outside the Scheduler class because it is pickled after it is
sent to the executor.
"""
print "[%s] -- %s -- START" % (datetime.datetime.now(), task['id'])
try:
with get_app().app_context():
exec task['code'] in {}, {}
print "[%s] -- %s -- COMPLETE" % (datetime.datetime.now(), task['id'])
except Exception as e:
if isinstance(e, PyCodeError):
err_msg = "%s: %s\n%s" % (e.data['name'], e.data['message'],
''.join(e.data['traceback']))
else:
err_msg = traceback.format_exc()
sys.stderr.write(err_msg)
sys.stderr.write("[%s] -- %s -- FAIL\n" % (datetime.datetime.now(),
task['id']))
email_msg = 'Task %s failed at %s\n\n%s' % (task['id'],
datetime.datetime.now(),
err_msg)
send_mail(get_app().config['SCHEDULER_FAILURE_EMAILS'],
'Scheduler Failure', email_msg)
finally:
return task
| from __future__ import absolute_import
import atexit
import datetime
import gevent
import gipc
import traceback
import sys
from heapq import heappush, heappop, heapify
from jia.errors import PyCodeError
from jia.utils import send_mail
from scheduler import get_app
from scheduler.common.concurrent import GIPCExecutor
from scheduler.models import Task
class Scheduler(object):
"""Inteval based code execution scheduler"""
def __init__(self):
"""Initialize the queue and spawn the main loop thread
Upon initialization, tasks stored in the database are immediately
scheduled.
_task_queue is a priority queue ordered using Python's heapq functionality.
Elements in _task_queue are tuples of the form (datetime, task) where
datetime is the scheduled run time and task is a dictionary as defined
in the above docstring for the Scheduler class.
For concurrency safety reasons, never write to _task_queue outside the
_loop() thread.
"""
self._task_queue = [] # Never write to this outside the _loop thread
self._pending_cancels = set()
self._executor = GIPCExecutor()
# Load previously scheduled tasks from database
now = datetime.datetime.now()
with get_app().app_context():
saved_schedule = Task.query.filter_by(active=True)
for task in saved_schedule:
new_task = {
'id': task.id,
'interval': task.interval,
'code': task.code
}
# Writing directly to the _task_queue is safe since we haven't started
# the _loop yet
self._task_queue.append((now, new_task))
# Make _task_queue a priority queue
heapify(self._task_queue)
# Spawn main loop and save writer for future communication
(read, write) = gipc.pipe()
self._main_thread = gevent.spawn(self._loop, read)
self._schedule_pipe = write
atexit.register(self._interrupt)
def schedule(self, task):
"""Pass schedule request to the main loop
Tasks should be dictionaries with the following attributes:
task = {
'id': 'a93de0f3',
'code': ..., # string of Python code
'interval': 600, # in seconds
}
An interval of 0 indicates the task should only be run once.
"""
self._schedule_pipe.put(('schedule', task))
def cancel(self, task_id):
"""Pass cancel request to the main loop"""
self._schedule_pipe.put(('cancel', task_id))
def _schedule(self, task, next_run=None):
if not next_run:
next_run = datetime.datetime.now()
heappush(self._task_queue, (next_run, task))
def _cancel(self, task_id):
self._pending_cancels.add(task_id)
def _interrupt(self):
self._main_thread.kill()
#TODO(derek): kill child threads
def _loop(self, reader):
"""Main execution loop of the scheduler.
The loop runs every second. Between iterations, the loop listens for
schedule or cancel requests coming from Flask via over the gipc pipe
(reader) and modifies the queue accordingly.
When a task completes, it is rescheduled
"""
results = set()
while True:
now = datetime.datetime.now()
if self._task_queue and self._task_queue[0][0] <= now:
task = heappop(self._task_queue)[1]
if task['id'] not in self._pending_cancels:
result = self._executor.submit(_execute, task)
results.add(result)
else:
self._pending_cancels.remove(task['id'])
else:
# Check for new tasks coming from HTTP
with gevent.Timeout(0.5, False) as t:
message = reader.get(timeout=t)
if message[0] == 'schedule':
self._schedule(message[1], next_run=now)
elif message[0] == 'cancel':
self._cancel(message[1])
# Reschedule completed tasks
if not results:
gevent.sleep(0.5)
continue
ready = self._executor.wait(results, num=1, timeout=0.5)
for result in ready:
results.remove(result)
if result.value:
task = result.value
interval = int(task['interval'])
if interval:
run_at = now + datetime.timedelta(seconds=int(task['interval']))
self._schedule(task, next_run=run_at)
else:
err_msg = result.exception
sys.stderr.write("ERROR: %s" % err_msg)
email_msg = 'Task %s failed at %s\n\n%s' % (
task['id'],
datetime.datetime.now(),
err_msg
)
send_mail(get_app().config['SCHEDULER_FAILURE_EMAILS'],
'Scheduler Failure',
email_msg)
def _execute(task):
"""A wrapper around exec
This exists outside the Scheduler class because it is pickled after it is
sent to the executor.
"""
print "[%s] -- %s -- START" % (datetime.datetime.now(), task['id'])
try:
with get_app().app_context():
exec task['code'] in {}, {}
print "[%s] -- %s -- COMPLETE" % (datetime.datetime.now(), task['id'])
except Exception as e:
if isinstance(e, PyCodeError):
err_msg = "%s: %s\n%s" % (e.data['name'], e.data['message'],
''.join(e.data['traceback']))
else:
err_msg = traceback.format_exc()
sys.stderr.write(err_msg)
sys.stderr.write("[%s] -- %s -- FAIL\n" % (datetime.datetime.now(),
task['id']))
email_msg = 'Task %s failed at %s\n\n%s' % (task['id'],
datetime.datetime.now(),
err_msg)
send_mail(get_app().config['SCHEDULER_FAILURE_EMAILS'],
'Scheduler Failure', email_msg)
finally:
return task | en | 0.858002 | Inteval based code execution scheduler Initialize the queue and spawn the main loop thread Upon initialization, tasks stored in the database are immediately scheduled. _task_queue is a priority queue ordered using Python's heapq functionality. Elements in _task_queue are tuples of the form (datetime, task) where datetime is the scheduled run time and task is a dictionary as defined in the above docstring for the Scheduler class. For concurrency safety reasons, never write to _task_queue outside the _loop() thread. # Never write to this outside the _loop thread # Load previously scheduled tasks from database # Writing directly to the _task_queue is safe since we haven't started # the _loop yet # Make _task_queue a priority queue # Spawn main loop and save writer for future communication Pass schedule request to the main loop Tasks should be dictionaries with the following attributes: task = { 'id': 'a93de0f3', 'code': ..., # string of Python code 'interval': 600, # in seconds } An interval of 0 indicates the task should only be run once. Pass cancel request to the main loop #TODO(derek): kill child threads Main execution loop of the scheduler. The loop runs every second. Between iterations, the loop listens for schedule or cancel requests coming from Flask via over the gipc pipe (reader) and modifies the queue accordingly. When a task completes, it is rescheduled # Check for new tasks coming from HTTP # Reschedule completed tasks A wrapper around exec This exists outside the Scheduler class because it is pickled after it is sent to the executor. | 2.28912 | 2 |
gen_mips.py | meryacine/MIPS-VHDL | 4 | 6615662 | #!/usr/bin/env python3
import os.path
import os
VHDL_DIRS = ['FetchDecode',
'RegisterRead',
'Execution',
'Memory',
'WriteBack',
'Buffers',
'.',]
PROJECT_DIR = os.path.dirname(__file__)
def main():
vhdls = [os.path.abspath(os.path.join(vhdl_dir, vhdl))
for vhdl_dir in VHDL_DIRS
for vhdl in os.listdir(os.path.join(PROJECT_DIR, vhdl_dir))
if vhdl.endswith(('.vhd', '.vhdl'))]
vhdls.sort()
vhdl_count = FILE_COUNT.format(len(vhdls))
vhdl_files = '\n'.join([NEW_FILE.format(i, vhdl)
for (i, vhdl) in enumerate(vhdls)])
mips_mpf = TMPL.format(vhdl_count, vhdl_files)
with open('mips.mpf', 'w') as file:
file.write(mips_mpf)
FILE_COUNT = '''Project_Files_Count = {0}'''
NEW_FILE='''Project_File_{0} = {1}\nProject_File_P_{0} = vhdl_novitalcheck 0 file_type vhdl group_id 0 cover_nofec 0 vhdl_nodebug 0 vhdl_1164 1 vhdl_noload 0 vhdl_synth 0 vhdl_enable0In 0 folder {{Top Level}} last_compile 0 vhdl_disableopt 0 vhdl_vital 0 cover_excludedefault 0 vhdl_warn1 1 vhdl_warn2 1 vhdl_explicit 1 vhdl_showsource 0 vhdl_warn3 1 cover_covercells 0 vhdl_0InOptions {{}} vhdl_warn4 1 voptflow 1 cover_optlevel 3 vhdl_options {{}} vhdl_warn5 1 toggle - ood 1 cover_noshort 0 compile_to work compile_order {0} cover_nosub 0 dont_compile 0 vhdl_use93 2008'''
TMPL = '''; Copyright 1991-2009 Mentor Graphics Corporation
;
; All Rights Reserved.
;
; THIS WORK CONTAINS TRADE SECRET AND PROPRIETARY INFORMATION WHICH IS THE PROPERTY OF
; MENTOR GRAPHICS CORPORATION OR ITS LICENSORS AND IS SUBJECT TO LICENSE TERMS.
;
[Library]
std = $MODEL_TECH/../std
ieee = $MODEL_TECH/../ieee
verilog = $MODEL_TECH/../verilog
vital2000 = $MODEL_TECH/../vital2000
std_developerskit = $MODEL_TECH/../std_developerskit
synopsys = $MODEL_TECH/../synopsys
modelsim_lib = $MODEL_TECH/../modelsim_lib
sv_std = $MODEL_TECH/../sv_std
; Altera Primitive libraries
;
; VHDL Section
;
altera_mf = $MODEL_TECH/../altera/vhdl/altera_mf
altera = $MODEL_TECH/../altera/vhdl/altera
altera_lnsim = $MODEL_TECH/../altera/vhdl/altera_lnsim
lpm = $MODEL_TECH/../altera/vhdl/220model
220model = $MODEL_TECH/../altera/vhdl/220model
maxii = $MODEL_TECH/../altera/vhdl/maxii
maxv = $MODEL_TECH/../altera/vhdl/maxv
fiftyfivenm = $MODEL_TECH/../altera/vhdl/fiftyfivenm
sgate = $MODEL_TECH/../altera/vhdl/sgate
arriaii = $MODEL_TECH/../altera/vhdl/arriaii
arriaii_hssi = $MODEL_TECH/../altera/vhdl/arriaii_hssi
arriaii_pcie_hip = $MODEL_TECH/../altera/vhdl/arriaii_pcie_hip
arriaiigz = $MODEL_TECH/../altera/vhdl/arriaiigz
arriaiigz_hssi = $MODEL_TECH/../altera/vhdl/arriaiigz_hssi
arriaiigz_pcie_hip = $MODEL_TECH/../altera/vhdl/arriaiigz_pcie_hip
stratixiv = $MODEL_TECH/../altera/vhdl/stratixiv
stratixiv_hssi = $MODEL_TECH/../altera/vhdl/stratixiv_hssi
stratixiv_pcie_hip = $MODEL_TECH/../altera/vhdl/stratixiv_pcie_hip
cycloneiv = $MODEL_TECH/../altera/vhdl/cycloneiv
cycloneiv_hssi = $MODEL_TECH/../altera/vhdl/cycloneiv_hssi
cycloneiv_pcie_hip = $MODEL_TECH/../altera/vhdl/cycloneiv_pcie_hip
cycloneive = $MODEL_TECH/../altera/vhdl/cycloneive
stratixv = $MODEL_TECH/../altera/vhdl/stratixv
stratixv_hssi = $MODEL_TECH/../altera/vhdl/stratixv_hssi
stratixv_pcie_hip = $MODEL_TECH/../altera/vhdl/stratixv_pcie_hip
arriavgz = $MODEL_TECH/../altera/vhdl/arriavgz
arriavgz_hssi = $MODEL_TECH/../altera/vhdl/arriavgz_hssi
arriavgz_pcie_hip = $MODEL_TECH/../altera/vhdl/arriavgz_pcie_hip
arriav = $MODEL_TECH/../altera/vhdl/arriav
cyclonev = $MODEL_TECH/../altera/vhdl/cyclonev
twentynm = $MODEL_TECH/../altera/vhdl/twentynm
twentynm_hssi = $MODEL_TECH/../altera/vhdl/twentynm_hssi
twentynm_hip = $MODEL_TECH/../altera/vhdl/twentynm_hip
cyclone10lp = $MODEL_TECH/../altera/vhdl/cyclone10lp
;
; Verilog Section
;
altera_mf_ver = $MODEL_TECH/../altera/verilog/altera_mf
altera_ver = $MODEL_TECH/../altera/verilog/altera
altera_lnsim_ver = $MODEL_TECH/../altera/verilog/altera_lnsim
lpm_ver = $MODEL_TECH/../altera/verilog/220model
220model_ver = $MODEL_TECH/../altera/verilog/220model
maxii_ver = $MODEL_TECH/../altera/verilog/maxii
maxv_ver = $MODEL_TECH/../altera/verilog/maxv
fiftyfivenm_ver = $MODEL_TECH/../altera/verilog/fiftyfivenm
sgate_ver = $MODEL_TECH/../altera/verilog/sgate
arriaii_ver = $MODEL_TECH/../altera/verilog/arriaii
arriaii_hssi_ver = $MODEL_TECH/../altera/verilog/arriaii_hssi
arriaii_pcie_hip_ver = $MODEL_TECH/../altera/verilog/arriaii_pcie_hip
arriaiigz_ver = $MODEL_TECH/../altera/verilog/arriaiigz
arriaiigz_hssi_ver = $MODEL_TECH/../altera/verilog/arriaiigz_hssi
arriaiigz_pcie_hip_ver = $MODEL_TECH/../altera/verilog/arriaiigz_pcie_hip
stratixiv_ver = $MODEL_TECH/../altera/verilog/stratixiv
stratixiv_hssi_ver = $MODEL_TECH/../altera/verilog/stratixiv_hssi
stratixiv_pcie_hip_ver = $MODEL_TECH/../altera/verilog/stratixiv_pcie_hip
stratixv_ver = $MODEL_TECH/../altera/verilog/stratixv
stratixv_hssi_ver = $MODEL_TECH/../altera/verilog/stratixv_hssi
stratixv_pcie_hip_ver = $MODEL_TECH/../altera/verilog/stratixv_pcie_hip
arriavgz_ver = $MODEL_TECH/../altera/verilog/arriavgz
arriavgz_hssi_ver = $MODEL_TECH/../altera/verilog/arriavgz_hssi
arriavgz_pcie_hip_ver = $MODEL_TECH/../altera/verilog/arriavgz_pcie_hip
arriav_ver = $MODEL_TECH/../altera/verilog/arriav
arriav_hssi_ver = $MODEL_TECH/../altera/verilog/arriav_hssi
arriav_pcie_hip_ver = $MODEL_TECH/../altera/verilog/arriav_pcie_hip
cyclonev_ver = $MODEL_TECH/../altera/verilog/cyclonev
cyclonev_hssi_ver = $MODEL_TECH/../altera/verilog/cyclonev_hssi
cyclonev_pcie_hip_ver = $MODEL_TECH/../altera/verilog/cyclonev_pcie_hip
cycloneiv_ver = $MODEL_TECH/../altera/verilog/cycloneiv
cycloneiv_hssi_ver = $MODEL_TECH/../altera/verilog/cycloneiv_hssi
cycloneiv_pcie_hip_ver = $MODEL_TECH/../altera/verilog/cycloneiv_pcie_hip
cycloneive_ver = $MODEL_TECH/../altera/verilog/cycloneive
twentynm_ver = $MODEL_TECH/../altera/verilog/twentynm
twentynm_hssi_ver = $MODEL_TECH/../altera/verilog/twentynm_hssi
twentynm_hip_ver = $MODEL_TECH/../altera/verilog/twentynm_hip
cyclone10lp_ver = $MODEL_TECH/../altera/verilog/cyclone10lp
work = work
[vcom]
; VHDL93 variable selects language version as the default.
; Default is VHDL-2002.
; Value of 0 or 1987 for VHDL-1987.
; Value of 1 or 1993 for VHDL-1993.
; Default or value of 2 or 2002 for VHDL-2002.
; Default or value of 3 or 2008 for VHDL-2008.
VHDL93 = 2008
; Show source line containing error. Default is off.
; Show_source = 1
; Turn off unbound-component warnings. Default is on.
; Show_Warning1 = 0
; Turn off process-without-a-wait-statement warnings. Default is on.
; Show_Warning2 = 0
; Turn off null-range warnings. Default is on.
; Show_Warning3 = 0
; Turn off no-space-in-time-literal warnings. Default is on.
; Show_Warning4 = 0
; Turn off multiple-drivers-on-unresolved-signal warnings. Default is on.
; Show_Warning5 = 0
; Turn off optimization for IEEE std_logic_1164 package. Default is on.
; Optimize_1164 = 0
; Turn on resolving of ambiguous function overloading in favor of the
; "explicit" function declaration (not the one automatically created by
; the compiler for each type declaration). Default is off.
; The .ini file has Explicit enabled so that std_logic_signed/unsigned
; will match the behavior of synthesis tools.
Explicit = 1
; Turn off acceleration of the VITAL packages. Default is to accelerate.
; NoVital = 1
; Turn off VITAL compliance checking. Default is checking on.
; NoVitalCheck = 1
; Ignore VITAL compliance checking errors. Default is to not ignore.
; IgnoreVitalErrors = 1
; Turn off VITAL compliance checking warnings. Default is to show warnings.
; Show_VitalChecksWarnings = 0
; Keep silent about case statement static warnings.
; Default is to give a warning.
; NoCaseStaticError = 1
; Keep silent about warnings caused by aggregates that are not locally static.
; Default is to give a warning.
; NoOthersStaticError = 1
; Turn off inclusion of debugging info within design units.
; Default is to include debugging info.
; NoDebug = 1
; Turn off "Loading..." messages. Default is messages on.
; Quiet = 1
; Turn on some limited synthesis rule compliance checking. Checks only:
; -- signals used (read) by a process must be in the sensitivity list
; CheckSynthesis = 1
; Activate optimizations on expressions that do not involve signals,
; waits, or function/procedure/task invocations. Default is off.
; ScalarOpts = 1
; Require the user to specify a configuration for all bindings,
; and do not generate a compile time default binding for the
; component. This will result in an elaboration error of
; 'component not bound' if the user fails to do so. Avoids the rare
; issue of a false dependency upon the unused default binding.
; RequireConfigForAllDefaultBinding = 1
; Inhibit range checking on subscripts of arrays. Range checking on
; scalars defined with subtypes is inhibited by default.
; NoIndexCheck = 1
; Inhibit range checks on all (implicit and explicit) assignments to
; scalar objects defined with subtypes.
; NoRangeCheck = 1
[vlog]
; Turn off inclusion of debugging info within design units.
; Default is to include debugging info.
; NoDebug = 1
; Turn off "loading..." messages. Default is messages on.
; Quiet = 1
; Turn on Verilog hazard checking (order-dependent accessing of global vars).
; Default is off.
; Hazard = 1
; Turn on converting regular Verilog identifiers to uppercase. Allows case
; insensitivity for module names. Default is no conversion.
; UpCase = 1
; Turn on incremental compilation of modules. Default is off.
; Incremental = 1
; Turns on lint-style checking.
; Show_Lint = 1
[vsim]
; Simulator resolution
; Set to fs, ps, ns, us, ms, or sec with optional prefix of 1, 10, or 100.
Resolution = ps
; User time unit for run commands
; Set to default, fs, ps, ns, us, ms, or sec. The default is to use the
; unit specified for Resolution. For example, if Resolution is 100ps,
; then UserTimeUnit defaults to ps.
; Should generally be set to default.
UserTimeUnit = default
; Default run length
RunLength = 100
; Maximum iterations that can be run without advancing simulation time
IterationLimit = 5000
; Directive to license manager:
; vhdl Immediately reserve a VHDL license
; vlog Immediately reserve a Verilog license
; plus Immediately reserve a VHDL and Verilog license
; nomgc Do not look for Mentor Graphics Licenses
; nomti Do not look for Model Technology Licenses
; noqueue Do not wait in the license queue when a license isn't available
; viewsim Try for viewer license but accept simulator license(s) instead
; of queuing for viewer license
; License = plus
; Stop the simulator after a VHDL/Verilog assertion message
; 0 = Note 1 = Warning 2 = Error 3 = Failure 4 = Fatal
BreakOnAssertion = 3
; Assertion Message Format
; %S - Severity Level
; %R - Report Message
; %T - Time of assertion
; %D - Delta
; %I - Instance or Region pathname (if available)
; %% - print '%' character
; AssertionFormat = "** %S: %R\n Time: %T Iteration: %D%I\n"
; Assertion File - alternate file for storing VHDL/Verilog assertion messages
; AssertFile = assert.log
; Default radix for all windows and commands...
; Set to symbolic, ascii, binary, octal, decimal, hex, unsigned
DefaultRadix = symbolic
; VSIM Startup command
; Startup = do startup.do
; File for saving command transcript
TranscriptFile = transcript
; File for saving command history
; CommandHistory = cmdhist.log
; Specify whether paths in simulator commands should be described
; in VHDL or Verilog format.
; For VHDL, PathSeparator = /
; For Verilog, PathSeparator = .
; Must not be the same character as DatasetSeparator.
PathSeparator = /
; Specify the dataset separator for fully rooted contexts.
; The default is ':'. For example, sim:/top
; Must not be the same character as PathSeparator.
DatasetSeparator = :
; Disable VHDL assertion messages
; IgnoreNote = 1
; IgnoreWarning = 1
; IgnoreError = 1
; IgnoreFailure = 1
; Default force kind. May be freeze, drive, deposit, or default
; or in other terms, fixed, wired, or charged.
; A value of "default" will use the signal kind to determine the
; force kind, drive for resolved signals, freeze for unresolved signals
; DefaultForceKind = freeze
; If zero, open files when elaborated; otherwise, open files on
; first read or write. Default is 0.
; DelayFileOpen = 1
; Control VHDL files opened for write.
; 0 = Buffered, 1 = Unbuffered
UnbufferedOutput = 0
; Control the number of VHDL files open concurrently.
; This number should always be less than the current ulimit
; setting for max file descriptors.
; 0 = unlimited
ConcurrentFileLimit = 40
; Control the number of hierarchical regions displayed as
; part of a signal name shown in the Wave window.
; A value of zero tells VSIM to display the full name.
; The default is 0.
; WaveSignalNameWidth = 0
; Turn off warnings from the std_logic_arith, std_logic_unsigned
; and std_logic_signed packages.
; StdArithNoWarnings = 1
; Turn off warnings from the IEEE numeric_std and numeric_bit packages.
; NumericStdNoWarnings = 1
; Control the format of the (VHDL) FOR generate statement label
; for each iteration. Do not quote it.
; The format string here must contain the conversion codes %s and %d,
; in that order, and no other conversion codes. The %s represents
; the generate_label; the %d represents the generate parameter value
; at a particular generate iteration (this is the position number if
; the generate parameter is of an enumeration type). Embedded whitespace
; is allowed (but discouraged); leading and trailing whitespace is ignored.
; Application of the format must result in a unique scope name over all
; such names in the design so that name lookup can function properly.
; GenerateFormat = %s__%d
; Specify whether checkpoint files should be compressed.
; The default is 1 (compressed).
; CheckpointCompressMode = 0
; List of dynamically loaded objects for Verilog PLI applications
; Veriuser = veriuser.sl
; Specify default options for the restart command. Options can be one
; or more of: -force -nobreakpoint -nolist -nolog -nowave
; DefaultRestartOptions = -force
; HP-UX 10.20 ONLY - Enable memory locking to speed up large designs
; (> 500 megabyte memory footprint). Default is disabled.
; Specify number of megabytes to lock.
; LockedMemory = 1000
; Turn on (1) or off (0) WLF file compression.
; The default is 1 (compress WLF file).
; WLFCompress = 0
; Specify whether to save all design hierarchy (1) in the WLF file
; or only regions containing logged signals (0).
; The default is 0 (save only regions with logged signals).
; WLFSaveAllRegions = 1
; WLF file time limit. Limit WLF file by time, as closely as possible,
; to the specified amount of simulation time. When the limit is exceeded
; the earliest times get truncated from the file.
; If both time and size limits are specified the most restrictive is used.
; UserTimeUnits are used if time units are not specified.
; The default is 0 (no limit). Example: WLFTimeLimit = {{100 ms}}
; WLFTimeLimit = 0
; WLF file size limit. Limit WLF file size, as closely as possible,
; to the specified number of megabytes. If both time and size limits
; are specified then the most restrictive is used.
; The default is 0 (no limit).
; WLFSizeLimit = 1000
; Specify whether or not a WLF file should be deleted when the
; simulation ends. A value of 1 will cause the WLF file to be deleted.
; The default is 0 (do not delete WLF file when simulation ends).
; WLFDeleteOnQuit = 1
; Automatic SDF compilation
; Disables automatic compilation of SDF files in flows that support it.
; Default is on, uncomment to turn off.
; NoAutoSDFCompile = 1
[lmc]
[msg_system]
; Change a message severity or suppress a message.
; The format is: <msg directive> = <msg number>[,<msg number>...]
; Examples:
; note = 3009
; warning = 3033
; error = 3010,3016
; fatal = 3016,3033
; suppress = 3009,3016,3043
; The command verror <msg number> can be used to get the complete
; description of a message.
; Control transcripting of elaboration/runtime messages.
; The default is to have messages appear in the transcript and
; recorded in the wlf file (messages that are recorded in the
; wlf file can be viewed in the MsgViewer). The other settings
; are to send messages only to the transcript or only to the
; wlf file. The valid values are
; both {{default}}
; tran {{transcript only}}
; wlf {{wlf file only}}
; msgmode = both
[Project]
** Warning: ; Warning -- Do not edit the project properties directly.
; Property names are dynamic in nature and property
; values have special syntax. Changing property data directly
; can result in a corrupt MPF file. All project properties
; can be modified through project window dialogs.
Project_Version = 6
Project_DefaultLib = work
Project_SortMethod = unused
{0}
{1}
Project_Sim_Count = 0
Project_Folder_Count = 0
Echo_Compile_Output = 0
Save_Compile_Report = 1
Project_Opt_Count = 0
ForceSoftPaths = 0
ProjectStatusDelay = 5000
VERILOG_DoubleClick = Edit
VERILOG_CustomDoubleClick =
SYSTEMVERILOG_DoubleClick = Edit
SYSTEMVERILOG_CustomDoubleClick =
VHDL_DoubleClick = Edit
VHDL_CustomDoubleClick =
PSL_DoubleClick = Edit
PSL_CustomDoubleClick =
TEXT_DoubleClick = Edit
TEXT_CustomDoubleClick =
SYSTEMC_DoubleClick = Edit
SYSTEMC_CustomDoubleClick =
TCL_DoubleClick = Edit
TCL_CustomDoubleClick =
MACRO_DoubleClick = Edit
MACRO_CustomDoubleClick =
VCD_DoubleClick = Edit
VCD_CustomDoubleClick =
SDF_DoubleClick = Edit
SDF_CustomDoubleClick =
XML_DoubleClick = Edit
XML_CustomDoubleClick =
LOGFILE_DoubleClick = Edit
LOGFILE_CustomDoubleClick =
UCDB_DoubleClick = Edit
UCDB_CustomDoubleClick =
TDB_DoubleClick = Edit
TDB_CustomDoubleClick =
UPF_DoubleClick = Edit
UPF_CustomDoubleClick =
PCF_DoubleClick = Edit
PCF_CustomDoubleClick =
PROJECT_DoubleClick = Edit
PROJECT_CustomDoubleClick =
VRM_DoubleClick = Edit
VRM_CustomDoubleClick =
DEBUGDATABASE_DoubleClick = Edit
DEBUGDATABASE_CustomDoubleClick =
DEBUGARCHIVE_DoubleClick = Edit
DEBUGARCHIVE_CustomDoubleClick =
Project_Major_Version = 2020
Project_Minor_Version = 1
'''
main()
| #!/usr/bin/env python3
import os.path
import os
VHDL_DIRS = ['FetchDecode',
'RegisterRead',
'Execution',
'Memory',
'WriteBack',
'Buffers',
'.',]
PROJECT_DIR = os.path.dirname(__file__)
def main():
vhdls = [os.path.abspath(os.path.join(vhdl_dir, vhdl))
for vhdl_dir in VHDL_DIRS
for vhdl in os.listdir(os.path.join(PROJECT_DIR, vhdl_dir))
if vhdl.endswith(('.vhd', '.vhdl'))]
vhdls.sort()
vhdl_count = FILE_COUNT.format(len(vhdls))
vhdl_files = '\n'.join([NEW_FILE.format(i, vhdl)
for (i, vhdl) in enumerate(vhdls)])
mips_mpf = TMPL.format(vhdl_count, vhdl_files)
with open('mips.mpf', 'w') as file:
file.write(mips_mpf)
FILE_COUNT = '''Project_Files_Count = {0}'''
NEW_FILE='''Project_File_{0} = {1}\nProject_File_P_{0} = vhdl_novitalcheck 0 file_type vhdl group_id 0 cover_nofec 0 vhdl_nodebug 0 vhdl_1164 1 vhdl_noload 0 vhdl_synth 0 vhdl_enable0In 0 folder {{Top Level}} last_compile 0 vhdl_disableopt 0 vhdl_vital 0 cover_excludedefault 0 vhdl_warn1 1 vhdl_warn2 1 vhdl_explicit 1 vhdl_showsource 0 vhdl_warn3 1 cover_covercells 0 vhdl_0InOptions {{}} vhdl_warn4 1 voptflow 1 cover_optlevel 3 vhdl_options {{}} vhdl_warn5 1 toggle - ood 1 cover_noshort 0 compile_to work compile_order {0} cover_nosub 0 dont_compile 0 vhdl_use93 2008'''
TMPL = '''; Copyright 1991-2009 Mentor Graphics Corporation
;
; All Rights Reserved.
;
; THIS WORK CONTAINS TRADE SECRET AND PROPRIETARY INFORMATION WHICH IS THE PROPERTY OF
; MENTOR GRAPHICS CORPORATION OR ITS LICENSORS AND IS SUBJECT TO LICENSE TERMS.
;
[Library]
std = $MODEL_TECH/../std
ieee = $MODEL_TECH/../ieee
verilog = $MODEL_TECH/../verilog
vital2000 = $MODEL_TECH/../vital2000
std_developerskit = $MODEL_TECH/../std_developerskit
synopsys = $MODEL_TECH/../synopsys
modelsim_lib = $MODEL_TECH/../modelsim_lib
sv_std = $MODEL_TECH/../sv_std
; Altera Primitive libraries
;
; VHDL Section
;
altera_mf = $MODEL_TECH/../altera/vhdl/altera_mf
altera = $MODEL_TECH/../altera/vhdl/altera
altera_lnsim = $MODEL_TECH/../altera/vhdl/altera_lnsim
lpm = $MODEL_TECH/../altera/vhdl/220model
220model = $MODEL_TECH/../altera/vhdl/220model
maxii = $MODEL_TECH/../altera/vhdl/maxii
maxv = $MODEL_TECH/../altera/vhdl/maxv
fiftyfivenm = $MODEL_TECH/../altera/vhdl/fiftyfivenm
sgate = $MODEL_TECH/../altera/vhdl/sgate
arriaii = $MODEL_TECH/../altera/vhdl/arriaii
arriaii_hssi = $MODEL_TECH/../altera/vhdl/arriaii_hssi
arriaii_pcie_hip = $MODEL_TECH/../altera/vhdl/arriaii_pcie_hip
arriaiigz = $MODEL_TECH/../altera/vhdl/arriaiigz
arriaiigz_hssi = $MODEL_TECH/../altera/vhdl/arriaiigz_hssi
arriaiigz_pcie_hip = $MODEL_TECH/../altera/vhdl/arriaiigz_pcie_hip
stratixiv = $MODEL_TECH/../altera/vhdl/stratixiv
stratixiv_hssi = $MODEL_TECH/../altera/vhdl/stratixiv_hssi
stratixiv_pcie_hip = $MODEL_TECH/../altera/vhdl/stratixiv_pcie_hip
cycloneiv = $MODEL_TECH/../altera/vhdl/cycloneiv
cycloneiv_hssi = $MODEL_TECH/../altera/vhdl/cycloneiv_hssi
cycloneiv_pcie_hip = $MODEL_TECH/../altera/vhdl/cycloneiv_pcie_hip
cycloneive = $MODEL_TECH/../altera/vhdl/cycloneive
stratixv = $MODEL_TECH/../altera/vhdl/stratixv
stratixv_hssi = $MODEL_TECH/../altera/vhdl/stratixv_hssi
stratixv_pcie_hip = $MODEL_TECH/../altera/vhdl/stratixv_pcie_hip
arriavgz = $MODEL_TECH/../altera/vhdl/arriavgz
arriavgz_hssi = $MODEL_TECH/../altera/vhdl/arriavgz_hssi
arriavgz_pcie_hip = $MODEL_TECH/../altera/vhdl/arriavgz_pcie_hip
arriav = $MODEL_TECH/../altera/vhdl/arriav
cyclonev = $MODEL_TECH/../altera/vhdl/cyclonev
twentynm = $MODEL_TECH/../altera/vhdl/twentynm
twentynm_hssi = $MODEL_TECH/../altera/vhdl/twentynm_hssi
twentynm_hip = $MODEL_TECH/../altera/vhdl/twentynm_hip
cyclone10lp = $MODEL_TECH/../altera/vhdl/cyclone10lp
;
; Verilog Section
;
altera_mf_ver = $MODEL_TECH/../altera/verilog/altera_mf
altera_ver = $MODEL_TECH/../altera/verilog/altera
altera_lnsim_ver = $MODEL_TECH/../altera/verilog/altera_lnsim
lpm_ver = $MODEL_TECH/../altera/verilog/220model
220model_ver = $MODEL_TECH/../altera/verilog/220model
maxii_ver = $MODEL_TECH/../altera/verilog/maxii
maxv_ver = $MODEL_TECH/../altera/verilog/maxv
fiftyfivenm_ver = $MODEL_TECH/../altera/verilog/fiftyfivenm
sgate_ver = $MODEL_TECH/../altera/verilog/sgate
arriaii_ver = $MODEL_TECH/../altera/verilog/arriaii
arriaii_hssi_ver = $MODEL_TECH/../altera/verilog/arriaii_hssi
arriaii_pcie_hip_ver = $MODEL_TECH/../altera/verilog/arriaii_pcie_hip
arriaiigz_ver = $MODEL_TECH/../altera/verilog/arriaiigz
arriaiigz_hssi_ver = $MODEL_TECH/../altera/verilog/arriaiigz_hssi
arriaiigz_pcie_hip_ver = $MODEL_TECH/../altera/verilog/arriaiigz_pcie_hip
stratixiv_ver = $MODEL_TECH/../altera/verilog/stratixiv
stratixiv_hssi_ver = $MODEL_TECH/../altera/verilog/stratixiv_hssi
stratixiv_pcie_hip_ver = $MODEL_TECH/../altera/verilog/stratixiv_pcie_hip
stratixv_ver = $MODEL_TECH/../altera/verilog/stratixv
stratixv_hssi_ver = $MODEL_TECH/../altera/verilog/stratixv_hssi
stratixv_pcie_hip_ver = $MODEL_TECH/../altera/verilog/stratixv_pcie_hip
arriavgz_ver = $MODEL_TECH/../altera/verilog/arriavgz
arriavgz_hssi_ver = $MODEL_TECH/../altera/verilog/arriavgz_hssi
arriavgz_pcie_hip_ver = $MODEL_TECH/../altera/verilog/arriavgz_pcie_hip
arriav_ver = $MODEL_TECH/../altera/verilog/arriav
arriav_hssi_ver = $MODEL_TECH/../altera/verilog/arriav_hssi
arriav_pcie_hip_ver = $MODEL_TECH/../altera/verilog/arriav_pcie_hip
cyclonev_ver = $MODEL_TECH/../altera/verilog/cyclonev
cyclonev_hssi_ver = $MODEL_TECH/../altera/verilog/cyclonev_hssi
cyclonev_pcie_hip_ver = $MODEL_TECH/../altera/verilog/cyclonev_pcie_hip
cycloneiv_ver = $MODEL_TECH/../altera/verilog/cycloneiv
cycloneiv_hssi_ver = $MODEL_TECH/../altera/verilog/cycloneiv_hssi
cycloneiv_pcie_hip_ver = $MODEL_TECH/../altera/verilog/cycloneiv_pcie_hip
cycloneive_ver = $MODEL_TECH/../altera/verilog/cycloneive
twentynm_ver = $MODEL_TECH/../altera/verilog/twentynm
twentynm_hssi_ver = $MODEL_TECH/../altera/verilog/twentynm_hssi
twentynm_hip_ver = $MODEL_TECH/../altera/verilog/twentynm_hip
cyclone10lp_ver = $MODEL_TECH/../altera/verilog/cyclone10lp
work = work
[vcom]
; VHDL93 variable selects language version as the default.
; Default is VHDL-2002.
; Value of 0 or 1987 for VHDL-1987.
; Value of 1 or 1993 for VHDL-1993.
; Default or value of 2 or 2002 for VHDL-2002.
; Default or value of 3 or 2008 for VHDL-2008.
VHDL93 = 2008
; Show source line containing error. Default is off.
; Show_source = 1
; Turn off unbound-component warnings. Default is on.
; Show_Warning1 = 0
; Turn off process-without-a-wait-statement warnings. Default is on.
; Show_Warning2 = 0
; Turn off null-range warnings. Default is on.
; Show_Warning3 = 0
; Turn off no-space-in-time-literal warnings. Default is on.
; Show_Warning4 = 0
; Turn off multiple-drivers-on-unresolved-signal warnings. Default is on.
; Show_Warning5 = 0
; Turn off optimization for IEEE std_logic_1164 package. Default is on.
; Optimize_1164 = 0
; Turn on resolving of ambiguous function overloading in favor of the
; "explicit" function declaration (not the one automatically created by
; the compiler for each type declaration). Default is off.
; The .ini file has Explicit enabled so that std_logic_signed/unsigned
; will match the behavior of synthesis tools.
Explicit = 1
; Turn off acceleration of the VITAL packages. Default is to accelerate.
; NoVital = 1
; Turn off VITAL compliance checking. Default is checking on.
; NoVitalCheck = 1
; Ignore VITAL compliance checking errors. Default is to not ignore.
; IgnoreVitalErrors = 1
; Turn off VITAL compliance checking warnings. Default is to show warnings.
; Show_VitalChecksWarnings = 0
; Keep silent about case statement static warnings.
; Default is to give a warning.
; NoCaseStaticError = 1
; Keep silent about warnings caused by aggregates that are not locally static.
; Default is to give a warning.
; NoOthersStaticError = 1
; Turn off inclusion of debugging info within design units.
; Default is to include debugging info.
; NoDebug = 1
; Turn off "Loading..." messages. Default is messages on.
; Quiet = 1
; Turn on some limited synthesis rule compliance checking. Checks only:
; -- signals used (read) by a process must be in the sensitivity list
; CheckSynthesis = 1
; Activate optimizations on expressions that do not involve signals,
; waits, or function/procedure/task invocations. Default is off.
; ScalarOpts = 1
; Require the user to specify a configuration for all bindings,
; and do not generate a compile time default binding for the
; component. This will result in an elaboration error of
; 'component not bound' if the user fails to do so. Avoids the rare
; issue of a false dependency upon the unused default binding.
; RequireConfigForAllDefaultBinding = 1
; Inhibit range checking on subscripts of arrays. Range checking on
; scalars defined with subtypes is inhibited by default.
; NoIndexCheck = 1
; Inhibit range checks on all (implicit and explicit) assignments to
; scalar objects defined with subtypes.
; NoRangeCheck = 1
[vlog]
; Turn off inclusion of debugging info within design units.
; Default is to include debugging info.
; NoDebug = 1
; Turn off "loading..." messages. Default is messages on.
; Quiet = 1
; Turn on Verilog hazard checking (order-dependent accessing of global vars).
; Default is off.
; Hazard = 1
; Turn on converting regular Verilog identifiers to uppercase. Allows case
; insensitivity for module names. Default is no conversion.
; UpCase = 1
; Turn on incremental compilation of modules. Default is off.
; Incremental = 1
; Turns on lint-style checking.
; Show_Lint = 1
[vsim]
; Simulator resolution
; Set to fs, ps, ns, us, ms, or sec with optional prefix of 1, 10, or 100.
Resolution = ps
; User time unit for run commands
; Set to default, fs, ps, ns, us, ms, or sec. The default is to use the
; unit specified for Resolution. For example, if Resolution is 100ps,
; then UserTimeUnit defaults to ps.
; Should generally be set to default.
UserTimeUnit = default
; Default run length
RunLength = 100
; Maximum iterations that can be run without advancing simulation time
IterationLimit = 5000
; Directive to license manager:
; vhdl Immediately reserve a VHDL license
; vlog Immediately reserve a Verilog license
; plus Immediately reserve a VHDL and Verilog license
; nomgc Do not look for Mentor Graphics Licenses
; nomti Do not look for Model Technology Licenses
; noqueue Do not wait in the license queue when a license isn't available
; viewsim Try for viewer license but accept simulator license(s) instead
; of queuing for viewer license
; License = plus
; Stop the simulator after a VHDL/Verilog assertion message
; 0 = Note 1 = Warning 2 = Error 3 = Failure 4 = Fatal
BreakOnAssertion = 3
; Assertion Message Format
; %S - Severity Level
; %R - Report Message
; %T - Time of assertion
; %D - Delta
; %I - Instance or Region pathname (if available)
; %% - print '%' character
; AssertionFormat = "** %S: %R\n Time: %T Iteration: %D%I\n"
; Assertion File - alternate file for storing VHDL/Verilog assertion messages
; AssertFile = assert.log
; Default radix for all windows and commands...
; Set to symbolic, ascii, binary, octal, decimal, hex, unsigned
DefaultRadix = symbolic
; VSIM Startup command
; Startup = do startup.do
; File for saving command transcript
TranscriptFile = transcript
; File for saving command history
; CommandHistory = cmdhist.log
; Specify whether paths in simulator commands should be described
; in VHDL or Verilog format.
; For VHDL, PathSeparator = /
; For Verilog, PathSeparator = .
; Must not be the same character as DatasetSeparator.
PathSeparator = /
; Specify the dataset separator for fully rooted contexts.
; The default is ':'. For example, sim:/top
; Must not be the same character as PathSeparator.
DatasetSeparator = :
; Disable VHDL assertion messages
; IgnoreNote = 1
; IgnoreWarning = 1
; IgnoreError = 1
; IgnoreFailure = 1
; Default force kind. May be freeze, drive, deposit, or default
; or in other terms, fixed, wired, or charged.
; A value of "default" will use the signal kind to determine the
; force kind, drive for resolved signals, freeze for unresolved signals
; DefaultForceKind = freeze
; If zero, open files when elaborated; otherwise, open files on
; first read or write. Default is 0.
; DelayFileOpen = 1
; Control VHDL files opened for write.
; 0 = Buffered, 1 = Unbuffered
UnbufferedOutput = 0
; Control the number of VHDL files open concurrently.
; This number should always be less than the current ulimit
; setting for max file descriptors.
; 0 = unlimited
ConcurrentFileLimit = 40
; Control the number of hierarchical regions displayed as
; part of a signal name shown in the Wave window.
; A value of zero tells VSIM to display the full name.
; The default is 0.
; WaveSignalNameWidth = 0
; Turn off warnings from the std_logic_arith, std_logic_unsigned
; and std_logic_signed packages.
; StdArithNoWarnings = 1
; Turn off warnings from the IEEE numeric_std and numeric_bit packages.
; NumericStdNoWarnings = 1
; Control the format of the (VHDL) FOR generate statement label
; for each iteration. Do not quote it.
; The format string here must contain the conversion codes %s and %d,
; in that order, and no other conversion codes. The %s represents
; the generate_label; the %d represents the generate parameter value
; at a particular generate iteration (this is the position number if
; the generate parameter is of an enumeration type). Embedded whitespace
; is allowed (but discouraged); leading and trailing whitespace is ignored.
; Application of the format must result in a unique scope name over all
; such names in the design so that name lookup can function properly.
; GenerateFormat = %s__%d
; Specify whether checkpoint files should be compressed.
; The default is 1 (compressed).
; CheckpointCompressMode = 0
; List of dynamically loaded objects for Verilog PLI applications
; Veriuser = veriuser.sl
; Specify default options for the restart command. Options can be one
; or more of: -force -nobreakpoint -nolist -nolog -nowave
; DefaultRestartOptions = -force
; HP-UX 10.20 ONLY - Enable memory locking to speed up large designs
; (> 500 megabyte memory footprint). Default is disabled.
; Specify number of megabytes to lock.
; LockedMemory = 1000
; Turn on (1) or off (0) WLF file compression.
; The default is 1 (compress WLF file).
; WLFCompress = 0
; Specify whether to save all design hierarchy (1) in the WLF file
; or only regions containing logged signals (0).
; The default is 0 (save only regions with logged signals).
; WLFSaveAllRegions = 1
; WLF file time limit. Limit WLF file by time, as closely as possible,
; to the specified amount of simulation time. When the limit is exceeded
; the earliest times get truncated from the file.
; If both time and size limits are specified the most restrictive is used.
; UserTimeUnits are used if time units are not specified.
; The default is 0 (no limit). Example: WLFTimeLimit = {{100 ms}}
; WLFTimeLimit = 0
; WLF file size limit. Limit WLF file size, as closely as possible,
; to the specified number of megabytes. If both time and size limits
; are specified then the most restrictive is used.
; The default is 0 (no limit).
; WLFSizeLimit = 1000
; Specify whether or not a WLF file should be deleted when the
; simulation ends. A value of 1 will cause the WLF file to be deleted.
; The default is 0 (do not delete WLF file when simulation ends).
; WLFDeleteOnQuit = 1
; Automatic SDF compilation
; Disables automatic compilation of SDF files in flows that support it.
; Default is on, uncomment to turn off.
; NoAutoSDFCompile = 1
[lmc]
[msg_system]
; Change a message severity or suppress a message.
; The format is: <msg directive> = <msg number>[,<msg number>...]
; Examples:
; note = 3009
; warning = 3033
; error = 3010,3016
; fatal = 3016,3033
; suppress = 3009,3016,3043
; The command verror <msg number> can be used to get the complete
; description of a message.
; Control transcripting of elaboration/runtime messages.
; The default is to have messages appear in the transcript and
; recorded in the wlf file (messages that are recorded in the
; wlf file can be viewed in the MsgViewer). The other settings
; are to send messages only to the transcript or only to the
; wlf file. The valid values are
; both {{default}}
; tran {{transcript only}}
; wlf {{wlf file only}}
; msgmode = both
[Project]
** Warning: ; Warning -- Do not edit the project properties directly.
; Property names are dynamic in nature and property
; values have special syntax. Changing property data directly
; can result in a corrupt MPF file. All project properties
; can be modified through project window dialogs.
Project_Version = 6
Project_DefaultLib = work
Project_SortMethod = unused
{0}
{1}
Project_Sim_Count = 0
Project_Folder_Count = 0
Echo_Compile_Output = 0
Save_Compile_Report = 1
Project_Opt_Count = 0
ForceSoftPaths = 0
ProjectStatusDelay = 5000
VERILOG_DoubleClick = Edit
VERILOG_CustomDoubleClick =
SYSTEMVERILOG_DoubleClick = Edit
SYSTEMVERILOG_CustomDoubleClick =
VHDL_DoubleClick = Edit
VHDL_CustomDoubleClick =
PSL_DoubleClick = Edit
PSL_CustomDoubleClick =
TEXT_DoubleClick = Edit
TEXT_CustomDoubleClick =
SYSTEMC_DoubleClick = Edit
SYSTEMC_CustomDoubleClick =
TCL_DoubleClick = Edit
TCL_CustomDoubleClick =
MACRO_DoubleClick = Edit
MACRO_CustomDoubleClick =
VCD_DoubleClick = Edit
VCD_CustomDoubleClick =
SDF_DoubleClick = Edit
SDF_CustomDoubleClick =
XML_DoubleClick = Edit
XML_CustomDoubleClick =
LOGFILE_DoubleClick = Edit
LOGFILE_CustomDoubleClick =
UCDB_DoubleClick = Edit
UCDB_CustomDoubleClick =
TDB_DoubleClick = Edit
TDB_CustomDoubleClick =
UPF_DoubleClick = Edit
UPF_CustomDoubleClick =
PCF_DoubleClick = Edit
PCF_CustomDoubleClick =
PROJECT_DoubleClick = Edit
PROJECT_CustomDoubleClick =
VRM_DoubleClick = Edit
VRM_CustomDoubleClick =
DEBUGDATABASE_DoubleClick = Edit
DEBUGDATABASE_CustomDoubleClick =
DEBUGARCHIVE_DoubleClick = Edit
DEBUGARCHIVE_CustomDoubleClick =
Project_Major_Version = 2020
Project_Minor_Version = 1
'''
main()
| en | 0.566698 | #!/usr/bin/env python3 Project_Files_Count = {0} Project_File_{0} = {1}\nProject_File_P_{0} = vhdl_novitalcheck 0 file_type vhdl group_id 0 cover_nofec 0 vhdl_nodebug 0 vhdl_1164 1 vhdl_noload 0 vhdl_synth 0 vhdl_enable0In 0 folder {{Top Level}} last_compile 0 vhdl_disableopt 0 vhdl_vital 0 cover_excludedefault 0 vhdl_warn1 1 vhdl_warn2 1 vhdl_explicit 1 vhdl_showsource 0 vhdl_warn3 1 cover_covercells 0 vhdl_0InOptions {{}} vhdl_warn4 1 voptflow 1 cover_optlevel 3 vhdl_options {{}} vhdl_warn5 1 toggle - ood 1 cover_noshort 0 compile_to work compile_order {0} cover_nosub 0 dont_compile 0 vhdl_use93 2008 ; Copyright 1991-2009 Mentor Graphics Corporation ; ; All Rights Reserved. ; ; THIS WORK CONTAINS TRADE SECRET AND PROPRIETARY INFORMATION WHICH IS THE PROPERTY OF ; MENTOR GRAPHICS CORPORATION OR ITS LICENSORS AND IS SUBJECT TO LICENSE TERMS. ; [Library] std = $MODEL_TECH/../std ieee = $MODEL_TECH/../ieee verilog = $MODEL_TECH/../verilog vital2000 = $MODEL_TECH/../vital2000 std_developerskit = $MODEL_TECH/../std_developerskit synopsys = $MODEL_TECH/../synopsys modelsim_lib = $MODEL_TECH/../modelsim_lib sv_std = $MODEL_TECH/../sv_std ; Altera Primitive libraries ; ; VHDL Section ; altera_mf = $MODEL_TECH/../altera/vhdl/altera_mf altera = $MODEL_TECH/../altera/vhdl/altera altera_lnsim = $MODEL_TECH/../altera/vhdl/altera_lnsim lpm = $MODEL_TECH/../altera/vhdl/220model 220model = $MODEL_TECH/../altera/vhdl/220model maxii = $MODEL_TECH/../altera/vhdl/maxii maxv = $MODEL_TECH/../altera/vhdl/maxv fiftyfivenm = $MODEL_TECH/../altera/vhdl/fiftyfivenm sgate = $MODEL_TECH/../altera/vhdl/sgate arriaii = $MODEL_TECH/../altera/vhdl/arriaii arriaii_hssi = $MODEL_TECH/../altera/vhdl/arriaii_hssi arriaii_pcie_hip = $MODEL_TECH/../altera/vhdl/arriaii_pcie_hip arriaiigz = $MODEL_TECH/../altera/vhdl/arriaiigz arriaiigz_hssi = $MODEL_TECH/../altera/vhdl/arriaiigz_hssi arriaiigz_pcie_hip = $MODEL_TECH/../altera/vhdl/arriaiigz_pcie_hip stratixiv = $MODEL_TECH/../altera/vhdl/stratixiv stratixiv_hssi = $MODEL_TECH/../altera/vhdl/stratixiv_hssi stratixiv_pcie_hip = $MODEL_TECH/../altera/vhdl/stratixiv_pcie_hip cycloneiv = $MODEL_TECH/../altera/vhdl/cycloneiv cycloneiv_hssi = $MODEL_TECH/../altera/vhdl/cycloneiv_hssi cycloneiv_pcie_hip = $MODEL_TECH/../altera/vhdl/cycloneiv_pcie_hip cycloneive = $MODEL_TECH/../altera/vhdl/cycloneive stratixv = $MODEL_TECH/../altera/vhdl/stratixv stratixv_hssi = $MODEL_TECH/../altera/vhdl/stratixv_hssi stratixv_pcie_hip = $MODEL_TECH/../altera/vhdl/stratixv_pcie_hip arriavgz = $MODEL_TECH/../altera/vhdl/arriavgz arriavgz_hssi = $MODEL_TECH/../altera/vhdl/arriavgz_hssi arriavgz_pcie_hip = $MODEL_TECH/../altera/vhdl/arriavgz_pcie_hip arriav = $MODEL_TECH/../altera/vhdl/arriav cyclonev = $MODEL_TECH/../altera/vhdl/cyclonev twentynm = $MODEL_TECH/../altera/vhdl/twentynm twentynm_hssi = $MODEL_TECH/../altera/vhdl/twentynm_hssi twentynm_hip = $MODEL_TECH/../altera/vhdl/twentynm_hip cyclone10lp = $MODEL_TECH/../altera/vhdl/cyclone10lp ; ; Verilog Section ; altera_mf_ver = $MODEL_TECH/../altera/verilog/altera_mf altera_ver = $MODEL_TECH/../altera/verilog/altera altera_lnsim_ver = $MODEL_TECH/../altera/verilog/altera_lnsim lpm_ver = $MODEL_TECH/../altera/verilog/220model 220model_ver = $MODEL_TECH/../altera/verilog/220model maxii_ver = $MODEL_TECH/../altera/verilog/maxii maxv_ver = $MODEL_TECH/../altera/verilog/maxv fiftyfivenm_ver = $MODEL_TECH/../altera/verilog/fiftyfivenm sgate_ver = $MODEL_TECH/../altera/verilog/sgate arriaii_ver = $MODEL_TECH/../altera/verilog/arriaii arriaii_hssi_ver = $MODEL_TECH/../altera/verilog/arriaii_hssi arriaii_pcie_hip_ver = $MODEL_TECH/../altera/verilog/arriaii_pcie_hip arriaiigz_ver = $MODEL_TECH/../altera/verilog/arriaiigz arriaiigz_hssi_ver = $MODEL_TECH/../altera/verilog/arriaiigz_hssi arriaiigz_pcie_hip_ver = $MODEL_TECH/../altera/verilog/arriaiigz_pcie_hip stratixiv_ver = $MODEL_TECH/../altera/verilog/stratixiv stratixiv_hssi_ver = $MODEL_TECH/../altera/verilog/stratixiv_hssi stratixiv_pcie_hip_ver = $MODEL_TECH/../altera/verilog/stratixiv_pcie_hip stratixv_ver = $MODEL_TECH/../altera/verilog/stratixv stratixv_hssi_ver = $MODEL_TECH/../altera/verilog/stratixv_hssi stratixv_pcie_hip_ver = $MODEL_TECH/../altera/verilog/stratixv_pcie_hip arriavgz_ver = $MODEL_TECH/../altera/verilog/arriavgz arriavgz_hssi_ver = $MODEL_TECH/../altera/verilog/arriavgz_hssi arriavgz_pcie_hip_ver = $MODEL_TECH/../altera/verilog/arriavgz_pcie_hip arriav_ver = $MODEL_TECH/../altera/verilog/arriav arriav_hssi_ver = $MODEL_TECH/../altera/verilog/arriav_hssi arriav_pcie_hip_ver = $MODEL_TECH/../altera/verilog/arriav_pcie_hip cyclonev_ver = $MODEL_TECH/../altera/verilog/cyclonev cyclonev_hssi_ver = $MODEL_TECH/../altera/verilog/cyclonev_hssi cyclonev_pcie_hip_ver = $MODEL_TECH/../altera/verilog/cyclonev_pcie_hip cycloneiv_ver = $MODEL_TECH/../altera/verilog/cycloneiv cycloneiv_hssi_ver = $MODEL_TECH/../altera/verilog/cycloneiv_hssi cycloneiv_pcie_hip_ver = $MODEL_TECH/../altera/verilog/cycloneiv_pcie_hip cycloneive_ver = $MODEL_TECH/../altera/verilog/cycloneive twentynm_ver = $MODEL_TECH/../altera/verilog/twentynm twentynm_hssi_ver = $MODEL_TECH/../altera/verilog/twentynm_hssi twentynm_hip_ver = $MODEL_TECH/../altera/verilog/twentynm_hip cyclone10lp_ver = $MODEL_TECH/../altera/verilog/cyclone10lp work = work [vcom] ; VHDL93 variable selects language version as the default. ; Default is VHDL-2002. ; Value of 0 or 1987 for VHDL-1987. ; Value of 1 or 1993 for VHDL-1993. ; Default or value of 2 or 2002 for VHDL-2002. ; Default or value of 3 or 2008 for VHDL-2008. VHDL93 = 2008 ; Show source line containing error. Default is off. ; Show_source = 1 ; Turn off unbound-component warnings. Default is on. ; Show_Warning1 = 0 ; Turn off process-without-a-wait-statement warnings. Default is on. ; Show_Warning2 = 0 ; Turn off null-range warnings. Default is on. ; Show_Warning3 = 0 ; Turn off no-space-in-time-literal warnings. Default is on. ; Show_Warning4 = 0 ; Turn off multiple-drivers-on-unresolved-signal warnings. Default is on. ; Show_Warning5 = 0 ; Turn off optimization for IEEE std_logic_1164 package. Default is on. ; Optimize_1164 = 0 ; Turn on resolving of ambiguous function overloading in favor of the ; "explicit" function declaration (not the one automatically created by ; the compiler for each type declaration). Default is off. ; The .ini file has Explicit enabled so that std_logic_signed/unsigned ; will match the behavior of synthesis tools. Explicit = 1 ; Turn off acceleration of the VITAL packages. Default is to accelerate. ; NoVital = 1 ; Turn off VITAL compliance checking. Default is checking on. ; NoVitalCheck = 1 ; Ignore VITAL compliance checking errors. Default is to not ignore. ; IgnoreVitalErrors = 1 ; Turn off VITAL compliance checking warnings. Default is to show warnings. ; Show_VitalChecksWarnings = 0 ; Keep silent about case statement static warnings. ; Default is to give a warning. ; NoCaseStaticError = 1 ; Keep silent about warnings caused by aggregates that are not locally static. ; Default is to give a warning. ; NoOthersStaticError = 1 ; Turn off inclusion of debugging info within design units. ; Default is to include debugging info. ; NoDebug = 1 ; Turn off "Loading..." messages. Default is messages on. ; Quiet = 1 ; Turn on some limited synthesis rule compliance checking. Checks only: ; -- signals used (read) by a process must be in the sensitivity list ; CheckSynthesis = 1 ; Activate optimizations on expressions that do not involve signals, ; waits, or function/procedure/task invocations. Default is off. ; ScalarOpts = 1 ; Require the user to specify a configuration for all bindings, ; and do not generate a compile time default binding for the ; component. This will result in an elaboration error of ; 'component not bound' if the user fails to do so. Avoids the rare ; issue of a false dependency upon the unused default binding. ; RequireConfigForAllDefaultBinding = 1 ; Inhibit range checking on subscripts of arrays. Range checking on ; scalars defined with subtypes is inhibited by default. ; NoIndexCheck = 1 ; Inhibit range checks on all (implicit and explicit) assignments to ; scalar objects defined with subtypes. ; NoRangeCheck = 1 [vlog] ; Turn off inclusion of debugging info within design units. ; Default is to include debugging info. ; NoDebug = 1 ; Turn off "loading..." messages. Default is messages on. ; Quiet = 1 ; Turn on Verilog hazard checking (order-dependent accessing of global vars). ; Default is off. ; Hazard = 1 ; Turn on converting regular Verilog identifiers to uppercase. Allows case ; insensitivity for module names. Default is no conversion. ; UpCase = 1 ; Turn on incremental compilation of modules. Default is off. ; Incremental = 1 ; Turns on lint-style checking. ; Show_Lint = 1 [vsim] ; Simulator resolution ; Set to fs, ps, ns, us, ms, or sec with optional prefix of 1, 10, or 100. Resolution = ps ; User time unit for run commands ; Set to default, fs, ps, ns, us, ms, or sec. The default is to use the ; unit specified for Resolution. For example, if Resolution is 100ps, ; then UserTimeUnit defaults to ps. ; Should generally be set to default. UserTimeUnit = default ; Default run length RunLength = 100 ; Maximum iterations that can be run without advancing simulation time IterationLimit = 5000 ; Directive to license manager: ; vhdl Immediately reserve a VHDL license ; vlog Immediately reserve a Verilog license ; plus Immediately reserve a VHDL and Verilog license ; nomgc Do not look for Mentor Graphics Licenses ; nomti Do not look for Model Technology Licenses ; noqueue Do not wait in the license queue when a license isn't available ; viewsim Try for viewer license but accept simulator license(s) instead ; of queuing for viewer license ; License = plus ; Stop the simulator after a VHDL/Verilog assertion message ; 0 = Note 1 = Warning 2 = Error 3 = Failure 4 = Fatal BreakOnAssertion = 3 ; Assertion Message Format ; %S - Severity Level ; %R - Report Message ; %T - Time of assertion ; %D - Delta ; %I - Instance or Region pathname (if available) ; %% - print '%' character ; AssertionFormat = "** %S: %R\n Time: %T Iteration: %D%I\n" ; Assertion File - alternate file for storing VHDL/Verilog assertion messages ; AssertFile = assert.log ; Default radix for all windows and commands... ; Set to symbolic, ascii, binary, octal, decimal, hex, unsigned DefaultRadix = symbolic ; VSIM Startup command ; Startup = do startup.do ; File for saving command transcript TranscriptFile = transcript ; File for saving command history ; CommandHistory = cmdhist.log ; Specify whether paths in simulator commands should be described ; in VHDL or Verilog format. ; For VHDL, PathSeparator = / ; For Verilog, PathSeparator = . ; Must not be the same character as DatasetSeparator. PathSeparator = / ; Specify the dataset separator for fully rooted contexts. ; The default is ':'. For example, sim:/top ; Must not be the same character as PathSeparator. DatasetSeparator = : ; Disable VHDL assertion messages ; IgnoreNote = 1 ; IgnoreWarning = 1 ; IgnoreError = 1 ; IgnoreFailure = 1 ; Default force kind. May be freeze, drive, deposit, or default ; or in other terms, fixed, wired, or charged. ; A value of "default" will use the signal kind to determine the ; force kind, drive for resolved signals, freeze for unresolved signals ; DefaultForceKind = freeze ; If zero, open files when elaborated; otherwise, open files on ; first read or write. Default is 0. ; DelayFileOpen = 1 ; Control VHDL files opened for write. ; 0 = Buffered, 1 = Unbuffered UnbufferedOutput = 0 ; Control the number of VHDL files open concurrently. ; This number should always be less than the current ulimit ; setting for max file descriptors. ; 0 = unlimited ConcurrentFileLimit = 40 ; Control the number of hierarchical regions displayed as ; part of a signal name shown in the Wave window. ; A value of zero tells VSIM to display the full name. ; The default is 0. ; WaveSignalNameWidth = 0 ; Turn off warnings from the std_logic_arith, std_logic_unsigned ; and std_logic_signed packages. ; StdArithNoWarnings = 1 ; Turn off warnings from the IEEE numeric_std and numeric_bit packages. ; NumericStdNoWarnings = 1 ; Control the format of the (VHDL) FOR generate statement label ; for each iteration. Do not quote it. ; The format string here must contain the conversion codes %s and %d, ; in that order, and no other conversion codes. The %s represents ; the generate_label; the %d represents the generate parameter value ; at a particular generate iteration (this is the position number if ; the generate parameter is of an enumeration type). Embedded whitespace ; is allowed (but discouraged); leading and trailing whitespace is ignored. ; Application of the format must result in a unique scope name over all ; such names in the design so that name lookup can function properly. ; GenerateFormat = %s__%d ; Specify whether checkpoint files should be compressed. ; The default is 1 (compressed). ; CheckpointCompressMode = 0 ; List of dynamically loaded objects for Verilog PLI applications ; Veriuser = veriuser.sl ; Specify default options for the restart command. Options can be one ; or more of: -force -nobreakpoint -nolist -nolog -nowave ; DefaultRestartOptions = -force ; HP-UX 10.20 ONLY - Enable memory locking to speed up large designs ; (> 500 megabyte memory footprint). Default is disabled. ; Specify number of megabytes to lock. ; LockedMemory = 1000 ; Turn on (1) or off (0) WLF file compression. ; The default is 1 (compress WLF file). ; WLFCompress = 0 ; Specify whether to save all design hierarchy (1) in the WLF file ; or only regions containing logged signals (0). ; The default is 0 (save only regions with logged signals). ; WLFSaveAllRegions = 1 ; WLF file time limit. Limit WLF file by time, as closely as possible, ; to the specified amount of simulation time. When the limit is exceeded ; the earliest times get truncated from the file. ; If both time and size limits are specified the most restrictive is used. ; UserTimeUnits are used if time units are not specified. ; The default is 0 (no limit). Example: WLFTimeLimit = {{100 ms}} ; WLFTimeLimit = 0 ; WLF file size limit. Limit WLF file size, as closely as possible, ; to the specified number of megabytes. If both time and size limits ; are specified then the most restrictive is used. ; The default is 0 (no limit). ; WLFSizeLimit = 1000 ; Specify whether or not a WLF file should be deleted when the ; simulation ends. A value of 1 will cause the WLF file to be deleted. ; The default is 0 (do not delete WLF file when simulation ends). ; WLFDeleteOnQuit = 1 ; Automatic SDF compilation ; Disables automatic compilation of SDF files in flows that support it. ; Default is on, uncomment to turn off. ; NoAutoSDFCompile = 1 [lmc] [msg_system] ; Change a message severity or suppress a message. ; The format is: <msg directive> = <msg number>[,<msg number>...] ; Examples: ; note = 3009 ; warning = 3033 ; error = 3010,3016 ; fatal = 3016,3033 ; suppress = 3009,3016,3043 ; The command verror <msg number> can be used to get the complete ; description of a message. ; Control transcripting of elaboration/runtime messages. ; The default is to have messages appear in the transcript and ; recorded in the wlf file (messages that are recorded in the ; wlf file can be viewed in the MsgViewer). The other settings ; are to send messages only to the transcript or only to the ; wlf file. The valid values are ; both {{default}} ; tran {{transcript only}} ; wlf {{wlf file only}} ; msgmode = both [Project] ** Warning: ; Warning -- Do not edit the project properties directly. ; Property names are dynamic in nature and property ; values have special syntax. Changing property data directly ; can result in a corrupt MPF file. All project properties ; can be modified through project window dialogs. Project_Version = 6 Project_DefaultLib = work Project_SortMethod = unused {0} {1} Project_Sim_Count = 0 Project_Folder_Count = 0 Echo_Compile_Output = 0 Save_Compile_Report = 1 Project_Opt_Count = 0 ForceSoftPaths = 0 ProjectStatusDelay = 5000 VERILOG_DoubleClick = Edit VERILOG_CustomDoubleClick = SYSTEMVERILOG_DoubleClick = Edit SYSTEMVERILOG_CustomDoubleClick = VHDL_DoubleClick = Edit VHDL_CustomDoubleClick = PSL_DoubleClick = Edit PSL_CustomDoubleClick = TEXT_DoubleClick = Edit TEXT_CustomDoubleClick = SYSTEMC_DoubleClick = Edit SYSTEMC_CustomDoubleClick = TCL_DoubleClick = Edit TCL_CustomDoubleClick = MACRO_DoubleClick = Edit MACRO_CustomDoubleClick = VCD_DoubleClick = Edit VCD_CustomDoubleClick = SDF_DoubleClick = Edit SDF_CustomDoubleClick = XML_DoubleClick = Edit XML_CustomDoubleClick = LOGFILE_DoubleClick = Edit LOGFILE_CustomDoubleClick = UCDB_DoubleClick = Edit UCDB_CustomDoubleClick = TDB_DoubleClick = Edit TDB_CustomDoubleClick = UPF_DoubleClick = Edit UPF_CustomDoubleClick = PCF_DoubleClick = Edit PCF_CustomDoubleClick = PROJECT_DoubleClick = Edit PROJECT_CustomDoubleClick = VRM_DoubleClick = Edit VRM_CustomDoubleClick = DEBUGDATABASE_DoubleClick = Edit DEBUGDATABASE_CustomDoubleClick = DEBUGARCHIVE_DoubleClick = Edit DEBUGARCHIVE_CustomDoubleClick = Project_Major_Version = 2020 Project_Minor_Version = 1 | 2.14231 | 2 |
setup.py | Akshay-knows/python-xlsx | 79 | 6615663 | #!/usr/bin/env python
import os
import re
# from ez_setup import use_setuptools
# use_setuptools()
from setuptools import setup
MAIN_PKG = 'xlsx'
thisdir = os.path.dirname(__file__)
# history_path = os.path.join(thisdir, 'HISTORY.rst')
init_py_path = os.path.join(thisdir, MAIN_PKG, '__init__.py')
license_path = os.path.join(thisdir, 'LICENSE')
readme_path = os.path.join(thisdir, 'README.rst')
# with open(history_path) as f:
# history = f.read()
with open(license_path) as f:
license = f.read()
with open(readme_path) as f:
readme = f.read()
with open(init_py_path) as f:
version = re.search("__version__ = '([^']+)'", f.read()).group(1)
NAME = 'python-xlsx'
VERSION = version
DESCRIPTION = (
'Create and modify Excel .xlsx files'
)
# LONG_DESCRIPTION = '%s\n\n%s' % (readme, history)
LONG_DESCRIPTION = '%s' % (readme)
KEYWORDS = 'excel open xml xslx office'
AUTHOR = '<NAME>'
AUTHOR_EMAIL = '<EMAIL>'
URL = 'https://github.com/python-openxml/python-xlsx'
LICENSE = license
# MODULES = ['ez_setup']
PACKAGES = ['xlsx']
# ENTRY_POINTS = {
# 'console_scripts': [
# 'opc = opcdiag.cli:main'
# ]
# }
INSTALL_REQUIRES = [
'lxml >= 3.0',
]
TEST_SUITE = 'tests'
TESTS_REQUIRE = [
'behave >= 1.2.3',
'mock >= 1.0.1',
'pytest >= 2.3.4'
]
CLASSIFIERS = [
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Office/Business :: Office Suites',
'Topic :: Software Development :: Libraries'
]
params = {
'name': NAME,
'version': VERSION,
'description': DESCRIPTION,
'keywords': KEYWORDS,
'long_description': LONG_DESCRIPTION,
'author': AUTHOR,
'author_email': AUTHOR_EMAIL,
'url': URL,
'license': LICENSE,
'packages': PACKAGES,
# 'py_modules': MODULES,
# 'entry_points': ENTRY_POINTS,
'install_requires': INSTALL_REQUIRES,
'tests_require': TESTS_REQUIRE,
'test_suite': TEST_SUITE,
'classifiers': CLASSIFIERS,
}
setup(**params)
| #!/usr/bin/env python
import os
import re
# from ez_setup import use_setuptools
# use_setuptools()
from setuptools import setup
MAIN_PKG = 'xlsx'
thisdir = os.path.dirname(__file__)
# history_path = os.path.join(thisdir, 'HISTORY.rst')
init_py_path = os.path.join(thisdir, MAIN_PKG, '__init__.py')
license_path = os.path.join(thisdir, 'LICENSE')
readme_path = os.path.join(thisdir, 'README.rst')
# with open(history_path) as f:
# history = f.read()
with open(license_path) as f:
license = f.read()
with open(readme_path) as f:
readme = f.read()
with open(init_py_path) as f:
version = re.search("__version__ = '([^']+)'", f.read()).group(1)
NAME = 'python-xlsx'
VERSION = version
DESCRIPTION = (
'Create and modify Excel .xlsx files'
)
# LONG_DESCRIPTION = '%s\n\n%s' % (readme, history)
LONG_DESCRIPTION = '%s' % (readme)
KEYWORDS = 'excel open xml xslx office'
AUTHOR = '<NAME>'
AUTHOR_EMAIL = '<EMAIL>'
URL = 'https://github.com/python-openxml/python-xlsx'
LICENSE = license
# MODULES = ['ez_setup']
PACKAGES = ['xlsx']
# ENTRY_POINTS = {
# 'console_scripts': [
# 'opc = opcdiag.cli:main'
# ]
# }
INSTALL_REQUIRES = [
'lxml >= 3.0',
]
TEST_SUITE = 'tests'
TESTS_REQUIRE = [
'behave >= 1.2.3',
'mock >= 1.0.1',
'pytest >= 2.3.4'
]
CLASSIFIERS = [
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Office/Business :: Office Suites',
'Topic :: Software Development :: Libraries'
]
params = {
'name': NAME,
'version': VERSION,
'description': DESCRIPTION,
'keywords': KEYWORDS,
'long_description': LONG_DESCRIPTION,
'author': AUTHOR,
'author_email': AUTHOR_EMAIL,
'url': URL,
'license': LICENSE,
'packages': PACKAGES,
# 'py_modules': MODULES,
# 'entry_points': ENTRY_POINTS,
'install_requires': INSTALL_REQUIRES,
'tests_require': TESTS_REQUIRE,
'test_suite': TEST_SUITE,
'classifiers': CLASSIFIERS,
}
setup(**params)
| en | 0.339876 | #!/usr/bin/env python # from ez_setup import use_setuptools # use_setuptools() # history_path = os.path.join(thisdir, 'HISTORY.rst') # with open(history_path) as f: # history = f.read() # LONG_DESCRIPTION = '%s\n\n%s' % (readme, history) # MODULES = ['ez_setup'] # ENTRY_POINTS = { # 'console_scripts': [ # 'opc = opcdiag.cli:main' # ] # } # 'py_modules': MODULES, # 'entry_points': ENTRY_POINTS, | 1.757021 | 2 |
src/libutil.py | natduca/osx-trace | 2 | 6615664 | # Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
import shlex
import subprocess
import urllib2
import sys
from exceptions import *
class LibUtil(object):
def __init__(self, cache_dir, verbose=False):
if not platform.platform().startswith("Darwin"):
raise Exception("Only supported on OSX.")
self.cache_dir = cache_dir
# Sanity check: are we on a platform we understand?
if platform.mac_ver()[0].startswith('10.6'):
self.ver = 21
elif platform.mac_ver()[0].startswith('10.7'):
self.ver = 25
else:
raise Exception("Unrecognized OSX version: %s" % platform.mac_ver())
# Sanity check: does cc exist?
if not os.path.exists("/usr/bin/cc"):
raise CompilerNeededException()
# look the result in build dir
if not os.path.exists(os.path.join(cache_dir, "libutil-%s" % self.ver, "libutil1.0.dylib")):
self._download_and_compile(verbose)
self.did_compile = True
else:
self.did_compile = False
assert os.path.exists(os.path.join(cache_dir, "libutil-%s" % self.ver, "libutil1.0.dylib"))
def _download_and_compile(self, verbose=False):
if verbose:
sys.stderr.write("Downloading libUtil...\n")
# Download
req = urllib2.urlopen('http://opensource.apple.com/tarballs/libutil/libutil-%s.tar.gz' % self.ver)
tarfilename = os.path.join(self.cache_dir, 'libutil-%s.tar.gz' % self.ver)
f = open(tarfilename, 'w')
f.write(req.read())
f.close()
req.close()
# Untar
if verbose:
sys.stderr.write("Extracting libUtil...\n")
oldcwd = os.getcwd()
try:
os.chdir(os.path.dirname(tarfilename))
ret = self._system('tar xfz %s' % tarfilename)
assert ret == 0
finally:
os.chdir(oldcwd)
os.unlink(tarfilename)
# Compile
if verbose:
sys.stderr.write("Compiling libUtil...\n")
folder_name = os.path.join(self.cache_dir, "libutil-%s" % self.ver)
assert os.path.exists(os.path.join(folder_name, "Makefile"))
oldcwd = os.getcwd()
try:
os.chdir(folder_name)
self._system("make")
finally:
os.chdir(oldcwd)
def _system(self, cmd):
args = shlex.split(cmd)
p = subprocess.Popen(args,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
p.communicate()
return p.returncode
@property
def include_path(self):
return os.path.join(self.cache_dir, "libutil-%s" % self.ver)
@property
def link_path(self):
return os.path.join(self.cache_dir, "libutil-%s" % self.ver)
| # Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
import shlex
import subprocess
import urllib2
import sys
from exceptions import *
class LibUtil(object):
def __init__(self, cache_dir, verbose=False):
if not platform.platform().startswith("Darwin"):
raise Exception("Only supported on OSX.")
self.cache_dir = cache_dir
# Sanity check: are we on a platform we understand?
if platform.mac_ver()[0].startswith('10.6'):
self.ver = 21
elif platform.mac_ver()[0].startswith('10.7'):
self.ver = 25
else:
raise Exception("Unrecognized OSX version: %s" % platform.mac_ver())
# Sanity check: does cc exist?
if not os.path.exists("/usr/bin/cc"):
raise CompilerNeededException()
# look the result in build dir
if not os.path.exists(os.path.join(cache_dir, "libutil-%s" % self.ver, "libutil1.0.dylib")):
self._download_and_compile(verbose)
self.did_compile = True
else:
self.did_compile = False
assert os.path.exists(os.path.join(cache_dir, "libutil-%s" % self.ver, "libutil1.0.dylib"))
def _download_and_compile(self, verbose=False):
if verbose:
sys.stderr.write("Downloading libUtil...\n")
# Download
req = urllib2.urlopen('http://opensource.apple.com/tarballs/libutil/libutil-%s.tar.gz' % self.ver)
tarfilename = os.path.join(self.cache_dir, 'libutil-%s.tar.gz' % self.ver)
f = open(tarfilename, 'w')
f.write(req.read())
f.close()
req.close()
# Untar
if verbose:
sys.stderr.write("Extracting libUtil...\n")
oldcwd = os.getcwd()
try:
os.chdir(os.path.dirname(tarfilename))
ret = self._system('tar xfz %s' % tarfilename)
assert ret == 0
finally:
os.chdir(oldcwd)
os.unlink(tarfilename)
# Compile
if verbose:
sys.stderr.write("Compiling libUtil...\n")
folder_name = os.path.join(self.cache_dir, "libutil-%s" % self.ver)
assert os.path.exists(os.path.join(folder_name, "Makefile"))
oldcwd = os.getcwd()
try:
os.chdir(folder_name)
self._system("make")
finally:
os.chdir(oldcwd)
def _system(self, cmd):
args = shlex.split(cmd)
p = subprocess.Popen(args,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
p.communicate()
return p.returncode
@property
def include_path(self):
return os.path.join(self.cache_dir, "libutil-%s" % self.ver)
@property
def link_path(self):
return os.path.join(self.cache_dir, "libutil-%s" % self.ver)
| en | 0.843583 | # Copyright 2011 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Sanity check: are we on a platform we understand? # Sanity check: does cc exist? # look the result in build dir # Download # Untar # Compile | 2.260298 | 2 |
arm64/Arm64TypeAnalyzer.py | alibaba-edu/Driver-Security-Analyzer | 35 | 6615665 | # Copyright (C) 2020 Alibaba Group Holding Limited
import idaapi
idaapi.require("Arm64Utils")
from Arm64Utils import *
idaapi.require("AnalysisUtils")
def checkIfAllGotFuncIsInStubs():
allGOTSegs = getAllSegsOfGOT()
allStubsSegs = getAllSegsOfSTUBS()
if len(allGOTSegs) == 0 and len(allStubsSegs) == 0:
return
for gotSeg in allGOTSegs:
gotSegStartEA = gotSeg.startEA
gotSegEndEA = gotSeg.endEA
currentEA = gotSegStartEA
while currentEA < gotSegEndEA:
realItemEA = Qword(currentEA)
if is_func(GetFlags(realItemEA)):
xref = get_first_dref_to(currentEA)
while xref != None and xref != BADADDR:
xrefSegName = get_segm_name(xref)
if not xrefSegName.endswith(":__stubs"):
print "[!] GOT func item @{:016X} refer @{:016X} is not in stubs".format(currentEA, xref)
xref = get_next_dref_to(currentEA, xref)
currentEA += 8
def getConstructorsInKextTEXT(kextPrefix):
CTextEA2InfoMap = {}
textSegStartEA, textSegEndEA = getTextAreaForKEXT(kextPrefix)
for funcEA in Functions(textSegStartEA, textSegEndEA):
funcName = getName(funcEA)
if not None is funcName and isMangledFuncNameConstructor(funcName):
realFuncDeName = getDeFuncNameOfName(funcName)
className = realFuncDeName[:len(realFuncDeName)/2-1]
ClassInstFuncInfo_C = ClassInstFuncInfo(funcName, className, IndicatorKind.INDNAME, [0], False)
CTextEA2InfoMap[funcEA] = ClassInstFuncInfo_C
return CTextEA2InfoMap
def getConstructorsInKextSTUBS(kextPrefix):
stubsSegName = kextPrefix + ":__stubs"
CStubEA2InfoMap = {}
stubsSeg = get_segm_by_name(stubsSegName)
if None is stubsSeg:
return CStubEA2InfoMap
stubsSegStartEA = stubsSeg.startEA
stubsSegEndEA = stubsSeg.endEA
currentEA = stubsSegStartEA
while currentEA < stubsSegEndEA:
stubFuncName = getName(currentEA)
gotItemName = GetOpnd(currentEA, 1)[1:-5]
realFuncName = gotItemName[:gotItemName.rfind("_ptr_")]
if isMangledFuncNameConstructor(realFuncName):
realFuncDeName = getDeFuncNameOfName(realFuncName)
className = realFuncDeName[:len(realFuncDeName)/2-1]
ClassInstFuncInfo_C = ClassInstFuncInfo(realFuncName, className, IndicatorKind.INDNAME, [0], False)
CStubEA2InfoMap[currentEA] = ClassInstFuncInfo_C
currentEA += 12
return CStubEA2InfoMap
def isMangledFuncNameConstructor(mangledFuncName):
if mangledFuncName.startswith("__ZN11OSMetaClassC2EPKcPKS_j"):
return False
deFuncName = getDeFuncNameOfName(mangledFuncName)
return not None is deFuncName and deFuncName[:len(deFuncName)/2-1] == deFuncName[len(deFuncName)/2+1:]
def findUsageOfFuncEAs(usageSegName, funcEAs):
usageOfSpecialFuncs = {}
for funcEA in funcEAs:
usageOfSpecialFuncs[funcEA] = set()
xrefs = getXRefsTo(funcEA)
for xref in xrefs:
xrefSegName = get_segm_name(xref)
if xrefSegName == usageSegName:
usageOfSpecialFuncs[funcEA].add(xref)
elif xrefSegName.endswith(":__text"):
print "[!] Stub In %s: %s refed in %s"%(kextPrefix, funcEA, xrefSegName)
return usageOfSpecialFuncs
def findUsageOfStubFuncNames(stubsSegName, usageSegName, searchFuncNames):
stubsSeg = get_segm_by_name(stubsSegName)
if None is stubsSeg:
return {}
stubsSegStartEA = stubsSeg.startEA
stubsSegEndEA = stubsSeg.endEA
usageOfSpecialFuncs = {}
for specialFuncName in searchFuncNames:
usageOfSpecialFuncs[specialFuncName] = set()
for funcEA in range(stubsSegStartEA, stubsSegEndEA, 12):
funcName = getName(funcEA)
for specialFuncName in searchFuncNames:
if funcName.startswith(specialFuncName):
#print "[+] Found ", funcName, specialFuncName
xrefs = getXRefsTo(funcEA)
for xref in xrefs:
xrefSegName = get_segm_name(xref)
if xrefSegName == usageSegName:
usageOfSpecialFuncs[specialFuncName].add(xref)
elif xrefSegName.endswith(":__text"):
print "[!] Stub In %s: %s refed in %s"%(kextPrefix, specialFuncName, xrefSegName)
return usageOfSpecialFuncs
def shouldByPassSolveTypes(funcEA):
funcName = getName(funcEA)
if "_InitFunc_" in funcName:
return True
elif GetMnem(funcEA) == "B":
return True
return False
def solveVarTypesByPropInTextSeg(textSegStartEA, textSegEndEA, crossKEXT=False):
for funcStartEA in Functions(textSegStartEA, textSegEndEA):
if isFuncContainObjArg(funcStartEA):
if not shouldByPassSolveTypes(funcStartEA):
AnalysisUtils.forward_analysis_in_func(funcStartEA, crossKEXT=crossKEXT)
else:
#print "[#] func at {:016X} does not have obj arg".format(funcStartEA)
pass
def solveVarTypesByPropInAll():
print "[+] solveVarTypesByPropInAll"
for textSeg in getAllSegsOfText():
solveVarTypesByPropInTextSeg(textSeg.startEA, textSeg.endEA)
def solveVarTypesByPropInKEXT(kextPrefix):
startea, endea = getTextAreaForKEXT(kextPrefix)
if startea == BADADDR:
return
solveVarTypesByPropInTextSeg(startea, endea, False)
def processVFuncArgsForClass(className):
vtableStartEA, vtableEndEA = getVTableAddrOfClass(className)
currentEA = vtableStartEA
vtableStructId = getVTableStructIdOfClass(className)
parentClassName, parentVTableStartEA, parentVTableEndEA = findNearestAncestorHaveVT(className)
if parentVTableStartEA == BADADDR:
print "[!] {}'s parent {}'s vtable is not found! Abort typing".format(className, parentClassName)
return
while currentEA != vtableEndEA:
funcEA = Qword(currentEA)
offset = currentEA-vtableStartEA
shouldProcess = True
if not None is parentClassName and parentVTableStartEA != BADADDR and parentVTableStartEA + offset < parentVTableEndEA:
parentFuncEA = Qword(parentVTableStartEA + offset)
if funcEA != parentFuncEA:
funcName = getName(funcEA)
if None is funcName:
currentEA += 8
continue
if funcName.startswith("__"):
deFuncName = getDeFuncNameOfName(funcName)
if deFuncName:
funcClassName = deFuncName[:deFuncName.rfind("::")]
if funcClassName != className:
shouldProcess = False
elif "::" in funcName:
funcClassName = funcName[:funcName.rfind("::")]
if funcClassName != className:
shouldProcess = False
elif funcName == "___cxa_pure_virtual":
shouldProcess = False
if shouldProcess:
processFuncArgs(funcEA, True, className, parentFuncEA)
else:
processFuncArgs(funcEA, True, className, None)
keepCon_VFuncAndVTSMember(funcEA, vtableStructId, offset, False, True)
currentEA += 8
def processVFuncArgsBFS(className):
if not className in kernelClassNameSet:
processVFuncArgsForClass(className)
if className in classNameToChildClassNameSetMap:
childClassNames = classNameToChildClassNameSetMap[className]
for childClassName in childClassNames:
processVFuncArgsBFS(childClassName)
def processVFuncArgsForKext(kextPrefix):
#print moduleNameToClassNamesMap
if not kextPrefix in moduleNameToClassNamesMap:
return
classNameSet = moduleNameToClassNamesMap[kextPrefix]
for className in classNameSet:
processVFuncArgsForClass(className)
#if className in classNameToVTableFuncEAListMap:
# processVFuncArgsForClass(className)
def processNamedFuncArgsForKext(kextPrefix):
#kextPrefix += ":__text"
#textSeg = get_segm_by_name(kextPrefix)
textSegStartEA, textSegEndEA = getTextAreaForKEXT(kextPrefix)
processNamedFuncArgsForSeg(textSegStartEA, textSegEndEA)
def processNamedFuncArgsForSeg(textSegStartEA, textSegEndEA):
for funcEA in Functions(textSegStartEA, textSegEndEA):
funcName = getName(funcEA)
if funcName.startswith("__"):
funcDeName = getDeFuncNameOfName(funcName)
if funcDeName and funcName != "___cxa_pure_virtual":
if "::" in funcDeName:
className = funcDeName[:funcDeName.rfind("::")]
# This may incur error since not all functions are non-static
processFuncArgs(funcEA, True, className, None)
else:
processFuncArgs(funcEA, False, None, None)
def processNamedFuncArgsForAll():
print "[+] Process All Named Functions' Arguments"
for seg in getAllSegsOfText():
processNamedFuncArgsForSeg(seg.startEA, seg.endEA)
def processVFuncArgsForAll():
print "[+] Process All Virtual Functions' Arguments"
roots = kernelClassNameSet
if len(roots) == 0:
roots = findRootClasses()
for className in roots:
processVFuncArgsBFS(className)
keepAllCon_VTAndVTS()
def setTypeForAllGlobalVars():
for ea,name in Names():
if None is name:
continue
if name.endswith("10gMetaClassE"):
deName = getDeNameOfName(name)
metaClassName = deName[:-12] + "::MetaClass"
SetType(ea, metaClassName)
elif name.endswith("9metaClassE"):
deName = getDeNameOfName(name)
metaClassName = deName[:-12] + "::MetaClass"
SetType(ea, metaClassName + "*")
elif name.startswith("__ZTV"):
vtableDeName = getDeNameOfName(name)
if not None is vtableDeName:
className = vtableDeName[12:]
wholeVTableStructId = GetStrucIdByName("whole_vtable_" + className)
if wholeVTableStructId == BADADDR or GetStrucSize(wholeVTableStructId) != GetStrucSize(getVTableStructIdOfClass(className))+0x10:
wholeVTableStructId = createWholeVTableStructForClass(className)
if wholeVTableStructId != BADADDR:
SetType(ea, "whole_vtable_" + className)
''' SetType(ea, "whole_vtable_" + className) will make the vtable const a chaos'''
processAllVTableConst(True)
def analyzeTypesForKEXT(kextPrefix):
processNamedFuncArgsForKext(kextPrefix)
processVFuncArgsForKext(kextPrefix)
# I think this one is useless
#setTypeForAllGlobalVars()
def analyzeTypesForAll():
print "[+] Start Analyzing Types"
processNamedFuncArgsForAll()
processVFuncArgsForAll()
# I think this one is useless
#setTypeForAllGlobalVars()
# Keep GOT consistency for type-analyzed funcs and vars
processAllGOTSegs()
def findSMethodArrayForUCClass(ucClassName):
vtableStartEA, vtableEndEA = getVTableAddrOfClass(ucClassName)
if vtableStartEA != BADADDR:
externMethodNamePrefix = "__ZN" + str(len(ucClassName)) + ucClassName + "14externalMethodE"
getTargetNamePrefix = "__ZN" + str(len(ucClassName)) + ucClassName + "26getTargetAndMethodForIndexE"
for vtEA in range(vtableStartEA, vtableEndEA, 4):
funcEA = Qword(vtEA)
funcName = getName(funcEA)
if funcName.startswith(externMethodNamePrefix):
None
elif funcName.startswith(getTargetNamePrefix):
None
def findSMethodArrayForKext(kextPrefix=None):
externSMethods = []
targetSMethods = []
targetConstSegName = "__const"
targetTextSegName = "__text"
if kextPrefix:
targetConstSegName = kextPrefix + ":__const"
targetTextSegName = kextPrefix + ":__text"
for segStartEA in Segments():
seg = getseg(segStartEA)
segName = get_segm_name(segStartEA)
if segName != targetSegName:
continue
constSegStartEA = seg.startEA
constSegEndEA = seg.endEA
currentEA = constSegStartEA
isInVT = False
while currentEA < constSegEndEA:
currentName = getName(currentEA)
if currentName.startswith("__ZTV"):
currentEA += 0x10
isInVT = True
continue
if isInVT:
if Qword(currentEA) == 0:
isInVT = False
currentEA += 8
continue
xrefs = getXRefsTo(currentEA)
if len(xrefs) == 0:
currentEA += 8
continue
else:
for xref in xrefs:
xrefSegName = SegName(xref)
if xrefSegName == targetTextSegName:
xrefFunc = get_func(xref)
if not None is xrefFunc:
xrefFuncName = getName(xrefFunc.startEA)
xrefDeFuncName = getDeFuncNameOfName(xrefFuncName)
className = None
if not None is xrefDeFuncName:
className = xrefDeFuncName[:xrefDeFuncName.rfind("::")]
elif "::" in xrefFuncName:
className = xrefFuncName[:xrefFuncName.rfind("::")]
sMethods_IOExternalMethodDispatch_cnt = 0
guessEA = currentEA
while True:
guessValue0 = Qword(guessEA)
guessValue1 = Qword(guessEA+8)
guessValue2 = Qword(guessEA+0x10)
guessValue3 = Qword(guessEA+0x18)
if isIOExternalMethodDispatchAtEA(guessEA) :
guessEA += 0x18
sMethods_IOExternalMethodDispatch_cnt += 1
elif guessValue0 == 0 and guessValue1 == 0 and guessValue2 == 0 and \
isIOExternalMethodDispatchAtEA(guessEA+0x18, True):
guessEA += 0x18
sMethods_IOExternalMethodDispatch_cnt += 1
else:
break
if sMethods_IOExternalMethodDispatch_cnt != 0:
externSMethods.append((currentEA, sMethods_IOExternalMethodDispatch_cnt+1, className))
if not None is className:
parseSMethodArrayAtAddr(currentEA, sMethods_IOExternalMethodDispatch_cnt+1, className, True)
currentEA = guessEA + 0x18
continue
currentEA += 8
return externSMethods, targetSMethods
def findSMethodArrayForAll():
externSMethods = []
targetSMethods = []
for kextPrefix in getAllKEXTPrefixes():
externSMethodsOfKext, targetSMethodsOfKext = findSMethodArrayForKext(kextPrefix)
externSMethods.extend(externSMethodsOfKext)
targetSMethods.extend(targetSMethodsOfKext)
externSMethodsOfKext, targetSMethodsOfKext = findSMethodArrayForKext()
externSMethods.extend(externSMethodsOfKext)
targetSMethods.extend(targetSMethodsOfKext)
print "[+] Found SMethods: EA, Size, ClassName"
for sMethod in externSMethods:
print "{:016X}, {}, {}".format(sMethod[0], sMethod[1], sMethod[2])
print "[+] Arm64TypeAnalyzer loaded"
| # Copyright (C) 2020 Alibaba Group Holding Limited
import idaapi
idaapi.require("Arm64Utils")
from Arm64Utils import *
idaapi.require("AnalysisUtils")
def checkIfAllGotFuncIsInStubs():
allGOTSegs = getAllSegsOfGOT()
allStubsSegs = getAllSegsOfSTUBS()
if len(allGOTSegs) == 0 and len(allStubsSegs) == 0:
return
for gotSeg in allGOTSegs:
gotSegStartEA = gotSeg.startEA
gotSegEndEA = gotSeg.endEA
currentEA = gotSegStartEA
while currentEA < gotSegEndEA:
realItemEA = Qword(currentEA)
if is_func(GetFlags(realItemEA)):
xref = get_first_dref_to(currentEA)
while xref != None and xref != BADADDR:
xrefSegName = get_segm_name(xref)
if not xrefSegName.endswith(":__stubs"):
print "[!] GOT func item @{:016X} refer @{:016X} is not in stubs".format(currentEA, xref)
xref = get_next_dref_to(currentEA, xref)
currentEA += 8
def getConstructorsInKextTEXT(kextPrefix):
CTextEA2InfoMap = {}
textSegStartEA, textSegEndEA = getTextAreaForKEXT(kextPrefix)
for funcEA in Functions(textSegStartEA, textSegEndEA):
funcName = getName(funcEA)
if not None is funcName and isMangledFuncNameConstructor(funcName):
realFuncDeName = getDeFuncNameOfName(funcName)
className = realFuncDeName[:len(realFuncDeName)/2-1]
ClassInstFuncInfo_C = ClassInstFuncInfo(funcName, className, IndicatorKind.INDNAME, [0], False)
CTextEA2InfoMap[funcEA] = ClassInstFuncInfo_C
return CTextEA2InfoMap
def getConstructorsInKextSTUBS(kextPrefix):
stubsSegName = kextPrefix + ":__stubs"
CStubEA2InfoMap = {}
stubsSeg = get_segm_by_name(stubsSegName)
if None is stubsSeg:
return CStubEA2InfoMap
stubsSegStartEA = stubsSeg.startEA
stubsSegEndEA = stubsSeg.endEA
currentEA = stubsSegStartEA
while currentEA < stubsSegEndEA:
stubFuncName = getName(currentEA)
gotItemName = GetOpnd(currentEA, 1)[1:-5]
realFuncName = gotItemName[:gotItemName.rfind("_ptr_")]
if isMangledFuncNameConstructor(realFuncName):
realFuncDeName = getDeFuncNameOfName(realFuncName)
className = realFuncDeName[:len(realFuncDeName)/2-1]
ClassInstFuncInfo_C = ClassInstFuncInfo(realFuncName, className, IndicatorKind.INDNAME, [0], False)
CStubEA2InfoMap[currentEA] = ClassInstFuncInfo_C
currentEA += 12
return CStubEA2InfoMap
def isMangledFuncNameConstructor(mangledFuncName):
if mangledFuncName.startswith("__ZN11OSMetaClassC2EPKcPKS_j"):
return False
deFuncName = getDeFuncNameOfName(mangledFuncName)
return not None is deFuncName and deFuncName[:len(deFuncName)/2-1] == deFuncName[len(deFuncName)/2+1:]
def findUsageOfFuncEAs(usageSegName, funcEAs):
usageOfSpecialFuncs = {}
for funcEA in funcEAs:
usageOfSpecialFuncs[funcEA] = set()
xrefs = getXRefsTo(funcEA)
for xref in xrefs:
xrefSegName = get_segm_name(xref)
if xrefSegName == usageSegName:
usageOfSpecialFuncs[funcEA].add(xref)
elif xrefSegName.endswith(":__text"):
print "[!] Stub In %s: %s refed in %s"%(kextPrefix, funcEA, xrefSegName)
return usageOfSpecialFuncs
def findUsageOfStubFuncNames(stubsSegName, usageSegName, searchFuncNames):
stubsSeg = get_segm_by_name(stubsSegName)
if None is stubsSeg:
return {}
stubsSegStartEA = stubsSeg.startEA
stubsSegEndEA = stubsSeg.endEA
usageOfSpecialFuncs = {}
for specialFuncName in searchFuncNames:
usageOfSpecialFuncs[specialFuncName] = set()
for funcEA in range(stubsSegStartEA, stubsSegEndEA, 12):
funcName = getName(funcEA)
for specialFuncName in searchFuncNames:
if funcName.startswith(specialFuncName):
#print "[+] Found ", funcName, specialFuncName
xrefs = getXRefsTo(funcEA)
for xref in xrefs:
xrefSegName = get_segm_name(xref)
if xrefSegName == usageSegName:
usageOfSpecialFuncs[specialFuncName].add(xref)
elif xrefSegName.endswith(":__text"):
print "[!] Stub In %s: %s refed in %s"%(kextPrefix, specialFuncName, xrefSegName)
return usageOfSpecialFuncs
def shouldByPassSolveTypes(funcEA):
funcName = getName(funcEA)
if "_InitFunc_" in funcName:
return True
elif GetMnem(funcEA) == "B":
return True
return False
def solveVarTypesByPropInTextSeg(textSegStartEA, textSegEndEA, crossKEXT=False):
for funcStartEA in Functions(textSegStartEA, textSegEndEA):
if isFuncContainObjArg(funcStartEA):
if not shouldByPassSolveTypes(funcStartEA):
AnalysisUtils.forward_analysis_in_func(funcStartEA, crossKEXT=crossKEXT)
else:
#print "[#] func at {:016X} does not have obj arg".format(funcStartEA)
pass
def solveVarTypesByPropInAll():
print "[+] solveVarTypesByPropInAll"
for textSeg in getAllSegsOfText():
solveVarTypesByPropInTextSeg(textSeg.startEA, textSeg.endEA)
def solveVarTypesByPropInKEXT(kextPrefix):
startea, endea = getTextAreaForKEXT(kextPrefix)
if startea == BADADDR:
return
solveVarTypesByPropInTextSeg(startea, endea, False)
def processVFuncArgsForClass(className):
vtableStartEA, vtableEndEA = getVTableAddrOfClass(className)
currentEA = vtableStartEA
vtableStructId = getVTableStructIdOfClass(className)
parentClassName, parentVTableStartEA, parentVTableEndEA = findNearestAncestorHaveVT(className)
if parentVTableStartEA == BADADDR:
print "[!] {}'s parent {}'s vtable is not found! Abort typing".format(className, parentClassName)
return
while currentEA != vtableEndEA:
funcEA = Qword(currentEA)
offset = currentEA-vtableStartEA
shouldProcess = True
if not None is parentClassName and parentVTableStartEA != BADADDR and parentVTableStartEA + offset < parentVTableEndEA:
parentFuncEA = Qword(parentVTableStartEA + offset)
if funcEA != parentFuncEA:
funcName = getName(funcEA)
if None is funcName:
currentEA += 8
continue
if funcName.startswith("__"):
deFuncName = getDeFuncNameOfName(funcName)
if deFuncName:
funcClassName = deFuncName[:deFuncName.rfind("::")]
if funcClassName != className:
shouldProcess = False
elif "::" in funcName:
funcClassName = funcName[:funcName.rfind("::")]
if funcClassName != className:
shouldProcess = False
elif funcName == "___cxa_pure_virtual":
shouldProcess = False
if shouldProcess:
processFuncArgs(funcEA, True, className, parentFuncEA)
else:
processFuncArgs(funcEA, True, className, None)
keepCon_VFuncAndVTSMember(funcEA, vtableStructId, offset, False, True)
currentEA += 8
def processVFuncArgsBFS(className):
if not className in kernelClassNameSet:
processVFuncArgsForClass(className)
if className in classNameToChildClassNameSetMap:
childClassNames = classNameToChildClassNameSetMap[className]
for childClassName in childClassNames:
processVFuncArgsBFS(childClassName)
def processVFuncArgsForKext(kextPrefix):
#print moduleNameToClassNamesMap
if not kextPrefix in moduleNameToClassNamesMap:
return
classNameSet = moduleNameToClassNamesMap[kextPrefix]
for className in classNameSet:
processVFuncArgsForClass(className)
#if className in classNameToVTableFuncEAListMap:
# processVFuncArgsForClass(className)
def processNamedFuncArgsForKext(kextPrefix):
#kextPrefix += ":__text"
#textSeg = get_segm_by_name(kextPrefix)
textSegStartEA, textSegEndEA = getTextAreaForKEXT(kextPrefix)
processNamedFuncArgsForSeg(textSegStartEA, textSegEndEA)
def processNamedFuncArgsForSeg(textSegStartEA, textSegEndEA):
for funcEA in Functions(textSegStartEA, textSegEndEA):
funcName = getName(funcEA)
if funcName.startswith("__"):
funcDeName = getDeFuncNameOfName(funcName)
if funcDeName and funcName != "___cxa_pure_virtual":
if "::" in funcDeName:
className = funcDeName[:funcDeName.rfind("::")]
# This may incur error since not all functions are non-static
processFuncArgs(funcEA, True, className, None)
else:
processFuncArgs(funcEA, False, None, None)
def processNamedFuncArgsForAll():
print "[+] Process All Named Functions' Arguments"
for seg in getAllSegsOfText():
processNamedFuncArgsForSeg(seg.startEA, seg.endEA)
def processVFuncArgsForAll():
print "[+] Process All Virtual Functions' Arguments"
roots = kernelClassNameSet
if len(roots) == 0:
roots = findRootClasses()
for className in roots:
processVFuncArgsBFS(className)
keepAllCon_VTAndVTS()
def setTypeForAllGlobalVars():
for ea,name in Names():
if None is name:
continue
if name.endswith("10gMetaClassE"):
deName = getDeNameOfName(name)
metaClassName = deName[:-12] + "::MetaClass"
SetType(ea, metaClassName)
elif name.endswith("9metaClassE"):
deName = getDeNameOfName(name)
metaClassName = deName[:-12] + "::MetaClass"
SetType(ea, metaClassName + "*")
elif name.startswith("__ZTV"):
vtableDeName = getDeNameOfName(name)
if not None is vtableDeName:
className = vtableDeName[12:]
wholeVTableStructId = GetStrucIdByName("whole_vtable_" + className)
if wholeVTableStructId == BADADDR or GetStrucSize(wholeVTableStructId) != GetStrucSize(getVTableStructIdOfClass(className))+0x10:
wholeVTableStructId = createWholeVTableStructForClass(className)
if wholeVTableStructId != BADADDR:
SetType(ea, "whole_vtable_" + className)
''' SetType(ea, "whole_vtable_" + className) will make the vtable const a chaos'''
processAllVTableConst(True)
def analyzeTypesForKEXT(kextPrefix):
processNamedFuncArgsForKext(kextPrefix)
processVFuncArgsForKext(kextPrefix)
# I think this one is useless
#setTypeForAllGlobalVars()
def analyzeTypesForAll():
print "[+] Start Analyzing Types"
processNamedFuncArgsForAll()
processVFuncArgsForAll()
# I think this one is useless
#setTypeForAllGlobalVars()
# Keep GOT consistency for type-analyzed funcs and vars
processAllGOTSegs()
def findSMethodArrayForUCClass(ucClassName):
vtableStartEA, vtableEndEA = getVTableAddrOfClass(ucClassName)
if vtableStartEA != BADADDR:
externMethodNamePrefix = "__ZN" + str(len(ucClassName)) + ucClassName + "14externalMethodE"
getTargetNamePrefix = "__ZN" + str(len(ucClassName)) + ucClassName + "26getTargetAndMethodForIndexE"
for vtEA in range(vtableStartEA, vtableEndEA, 4):
funcEA = Qword(vtEA)
funcName = getName(funcEA)
if funcName.startswith(externMethodNamePrefix):
None
elif funcName.startswith(getTargetNamePrefix):
None
def findSMethodArrayForKext(kextPrefix=None):
externSMethods = []
targetSMethods = []
targetConstSegName = "__const"
targetTextSegName = "__text"
if kextPrefix:
targetConstSegName = kextPrefix + ":__const"
targetTextSegName = kextPrefix + ":__text"
for segStartEA in Segments():
seg = getseg(segStartEA)
segName = get_segm_name(segStartEA)
if segName != targetSegName:
continue
constSegStartEA = seg.startEA
constSegEndEA = seg.endEA
currentEA = constSegStartEA
isInVT = False
while currentEA < constSegEndEA:
currentName = getName(currentEA)
if currentName.startswith("__ZTV"):
currentEA += 0x10
isInVT = True
continue
if isInVT:
if Qword(currentEA) == 0:
isInVT = False
currentEA += 8
continue
xrefs = getXRefsTo(currentEA)
if len(xrefs) == 0:
currentEA += 8
continue
else:
for xref in xrefs:
xrefSegName = SegName(xref)
if xrefSegName == targetTextSegName:
xrefFunc = get_func(xref)
if not None is xrefFunc:
xrefFuncName = getName(xrefFunc.startEA)
xrefDeFuncName = getDeFuncNameOfName(xrefFuncName)
className = None
if not None is xrefDeFuncName:
className = xrefDeFuncName[:xrefDeFuncName.rfind("::")]
elif "::" in xrefFuncName:
className = xrefFuncName[:xrefFuncName.rfind("::")]
sMethods_IOExternalMethodDispatch_cnt = 0
guessEA = currentEA
while True:
guessValue0 = Qword(guessEA)
guessValue1 = Qword(guessEA+8)
guessValue2 = Qword(guessEA+0x10)
guessValue3 = Qword(guessEA+0x18)
if isIOExternalMethodDispatchAtEA(guessEA) :
guessEA += 0x18
sMethods_IOExternalMethodDispatch_cnt += 1
elif guessValue0 == 0 and guessValue1 == 0 and guessValue2 == 0 and \
isIOExternalMethodDispatchAtEA(guessEA+0x18, True):
guessEA += 0x18
sMethods_IOExternalMethodDispatch_cnt += 1
else:
break
if sMethods_IOExternalMethodDispatch_cnt != 0:
externSMethods.append((currentEA, sMethods_IOExternalMethodDispatch_cnt+1, className))
if not None is className:
parseSMethodArrayAtAddr(currentEA, sMethods_IOExternalMethodDispatch_cnt+1, className, True)
currentEA = guessEA + 0x18
continue
currentEA += 8
return externSMethods, targetSMethods
def findSMethodArrayForAll():
externSMethods = []
targetSMethods = []
for kextPrefix in getAllKEXTPrefixes():
externSMethodsOfKext, targetSMethodsOfKext = findSMethodArrayForKext(kextPrefix)
externSMethods.extend(externSMethodsOfKext)
targetSMethods.extend(targetSMethodsOfKext)
externSMethodsOfKext, targetSMethodsOfKext = findSMethodArrayForKext()
externSMethods.extend(externSMethodsOfKext)
targetSMethods.extend(targetSMethodsOfKext)
print "[+] Found SMethods: EA, Size, ClassName"
for sMethod in externSMethods:
print "{:016X}, {}, {}".format(sMethod[0], sMethod[1], sMethod[2])
print "[+] Arm64TypeAnalyzer loaded"
| en | 0.455662 | # Copyright (C) 2020 Alibaba Group Holding Limited #print "[+] Found ", funcName, specialFuncName #print "[#] func at {:016X} does not have obj arg".format(funcStartEA) #print moduleNameToClassNamesMap #if className in classNameToVTableFuncEAListMap: # processVFuncArgsForClass(className) #kextPrefix += ":__text" #textSeg = get_segm_by_name(kextPrefix) # This may incur error since not all functions are non-static SetType(ea, "whole_vtable_" + className) will make the vtable const a chaos # I think this one is useless #setTypeForAllGlobalVars() # I think this one is useless #setTypeForAllGlobalVars() # Keep GOT consistency for type-analyzed funcs and vars | 2.139007 | 2 |
classicMonments/spider/spiders/ClassicSpider.py | wigginzz/classicMoment | 0 | 6615666 | <reponame>wigginzz/classicMoment
# -*- coding: utf-8 -*-
import re
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from classicMonments.spider.items import ClassicItem
class ClassicSpider(CrawlSpider):
name = 'ClassicSpider'
allowed_domains = ['www.gov.cn','sousuo.gov.cn']
start_urls = ['http://www.gov.cn/premier/index.htm']
# 规则是一个元组,可以写多个规则,每一个对着就是Rule对象,
# 参数1:LinkExtractor(allow=r'Items/')链接提取器
# 参数2:回调,处理链接提取器提取的url的响应结果,
# callback=方法名,和Spider不同
# 参数3:跟进,是否要接着按照这个规则进行提取链接
#//*[@id="public_chckmore"]/a
rules = (
Rule(LinkExtractor(allow='/premier/*'), callback='parse_item', follow=True),
Rule(LinkExtractor(allow='http://sousuo.gov.cn/column/.*'), follow=True),
)
def __init__(self, keywords=None,*a, **kw):
super(ClassicSpider, self).__init__(*a, **kw)
self.__keywords = keywords
def parse_item(self, response):
content = response.xpath('//div[@class="article oneColumn pub_border"]')
item = ClassicItem()
keywords = self.__keywords;
pattern = keywords[0]
for key in range(1,len(keywords),1):
pattern = pattern + '|' + keywords[key]
news_conntent = content.xpath('div[@class="pages_content"]/p/text()').extract()
match = re.findall(pattern,''.join(news_conntent))
if len(match):
imgUrls = content.xpath('div[@class="pages_content"]/p/img/@src').extract()
for imgurl in imgUrls:
item['url'] = response.url
item['title'] = content.xpath('h1/text()').extract_first()
item['image_url'] = re.sub(r'[^\/]+$','',response.url) + imgurl
yield item;
| # -*- coding: utf-8 -*-
import re
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from classicMonments.spider.items import ClassicItem
class ClassicSpider(CrawlSpider):
name = 'ClassicSpider'
allowed_domains = ['www.gov.cn','sousuo.gov.cn']
start_urls = ['http://www.gov.cn/premier/index.htm']
# 规则是一个元组,可以写多个规则,每一个对着就是Rule对象,
# 参数1:LinkExtractor(allow=r'Items/')链接提取器
# 参数2:回调,处理链接提取器提取的url的响应结果,
# callback=方法名,和Spider不同
# 参数3:跟进,是否要接着按照这个规则进行提取链接
#//*[@id="public_chckmore"]/a
rules = (
Rule(LinkExtractor(allow='/premier/*'), callback='parse_item', follow=True),
Rule(LinkExtractor(allow='http://sousuo.gov.cn/column/.*'), follow=True),
)
def __init__(self, keywords=None,*a, **kw):
super(ClassicSpider, self).__init__(*a, **kw)
self.__keywords = keywords
def parse_item(self, response):
content = response.xpath('//div[@class="article oneColumn pub_border"]')
item = ClassicItem()
keywords = self.__keywords;
pattern = keywords[0]
for key in range(1,len(keywords),1):
pattern = pattern + '|' + keywords[key]
news_conntent = content.xpath('div[@class="pages_content"]/p/text()').extract()
match = re.findall(pattern,''.join(news_conntent))
if len(match):
imgUrls = content.xpath('div[@class="pages_content"]/p/img/@src').extract()
for imgurl in imgUrls:
item['url'] = response.url
item['title'] = content.xpath('h1/text()').extract_first()
item['image_url'] = re.sub(r'[^\/]+$','',response.url) + imgurl
yield item; | zh | 0.849631 | # -*- coding: utf-8 -*- # 规则是一个元组,可以写多个规则,每一个对着就是Rule对象, # 参数1:LinkExtractor(allow=r'Items/')链接提取器 # 参数2:回调,处理链接提取器提取的url的响应结果, # callback=方法名,和Spider不同 # 参数3:跟进,是否要接着按照这个规则进行提取链接 #//*[@id="public_chckmore"]/a | 2.69534 | 3 |
src/generate_conditional_samples.py | marktgodfrey/gpt-2 | 0 | 6615667 | <reponame>marktgodfrey/gpt-2
#!/usr/bin/env python3
import fire
import json
import os
import numpy as np
import tensorflow as tf
import model, sample, encoder
def interact_model(
model_name='117M',
seed=None,
nsamples=1,
length=None,
max_context_length=None,
temperature=1,
top_k=0,
top_p=0.0,
models_dir='models',
checkpoint_dir='checkpoint',
run_name='117M',
prompt_path=None,
out_path=None
):
"""
Interactively run the model
:model_name=117M : String, which model to use
:seed=None : Integer seed for random number generators, fix seed to reproduce
results
:nsamples=1 : Number of samples to return total
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:max_context_length=None : Number of tokens to use as context, affects
how much we'll generate each iteration
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:top_p=0.0 : Float value controlling diversity. Implements nucleus sampling,
overriding top_k if set to a value > 0. A good setting is 0.9.
"""
models_dir = os.path.expanduser(os.path.expandvars(models_dir))
enc = encoder.get_encoder(model_name, models_dir)
hparams = model.default_hparams()
with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
context_tokens = []
with open(prompt_path, 'r') as fp:
raw_text = fp.read()
if not raw_text:
print('Prompt should not be empty!')
return
context_tokens = enc.encode(raw_text)
if length is None:
# length = hparams.n_ctx // 2
length = hparams.n_ctx - len(context_tokens)
# elif len(context_tokens) > hparams.n_ctx - length:
# raise ValueError("Can't get samples longer than window size - context: %s" % hparams.n_ctx - len(context_tokens))
# elif length > hparams.n_ctx:
# raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
print('using context of length: %d' % len(context_tokens))
if max_context_length is None:
max_context_length = hparams.n_ctx // 2
elif max_context_length > hparams.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
if len(context_tokens) > max_context_length:
print('context is too long! will be truncated...')
max_block_length = hparams.n_ctx - max_context_length
with tf.Session(graph=tf.Graph()) as sess:
np.random.seed(seed)
tf.set_random_seed(seed)
ckpt = tf.train.latest_checkpoint(os.path.join(checkpoint_dir, run_name))
generated = 0
all_text = []
for _ in range(nsamples):
generated_tokens = []
context_buffer = None
while len(generated_tokens) < length:
if not context_buffer:
context_buffer = context_tokens[-hparams.n_ctx:]
context_length = min(max_context_length, len(context_buffer))
block_length = hparams.n_ctx - context_length
if len(generated_tokens) + block_length > length:
block_length = length - len(generated_tokens)
context_length = hparams.n_ctx - block_length
print('generating block of %d tokens with context:\n%s' % (block_length, enc.decode(context_buffer[-context_length:])))
context = tf.placeholder(tf.int32, [1, None])
output = sample.sample_sequence(
hparams=hparams,
length=block_length,
context=context,
batch_size=1,
temperature=temperature,
top_k=top_k,
top_p=top_p
)
saver = tf.train.Saver()
saver.restore(sess, ckpt)
out = sess.run(output, feed_dict={
context: [context_buffer[-context_length:]]
})[0, -block_length:]
print('generated:\n%s (%d)' % (enc.decode(out), len(out)))
if len(context_buffer) < hparams.n_ctx:
context_buffer.extend(out) # should be at n_ctx now...
else:
# rotate context, newly generated context at the end
context_buffer[:context_length] = context_buffer[-context_length:]
context_buffer[-block_length:] = out
generated_tokens.extend(out)
print('generated %d of %d tokens' % (len(generated_tokens), length))
generated += 1
text = enc.decode(context_tokens)
text += enc.decode(generated_tokens)
separator = '=' * 40 + ' SAMPLE ' + str(generated) + ' ' + '=' * 40 + '\n'
print(separator + text)
all_text.append(separator + text)
if out_path:
with open(out_path, 'w') as fp:
fp.write('\n'.join(all_text))
if __name__ == '__main__':
fire.Fire(interact_model)
| #!/usr/bin/env python3
import fire
import json
import os
import numpy as np
import tensorflow as tf
import model, sample, encoder
def interact_model(
model_name='117M',
seed=None,
nsamples=1,
length=None,
max_context_length=None,
temperature=1,
top_k=0,
top_p=0.0,
models_dir='models',
checkpoint_dir='checkpoint',
run_name='117M',
prompt_path=None,
out_path=None
):
"""
Interactively run the model
:model_name=117M : String, which model to use
:seed=None : Integer seed for random number generators, fix seed to reproduce
results
:nsamples=1 : Number of samples to return total
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:max_context_length=None : Number of tokens to use as context, affects
how much we'll generate each iteration
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:top_p=0.0 : Float value controlling diversity. Implements nucleus sampling,
overriding top_k if set to a value > 0. A good setting is 0.9.
"""
models_dir = os.path.expanduser(os.path.expandvars(models_dir))
enc = encoder.get_encoder(model_name, models_dir)
hparams = model.default_hparams()
with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
context_tokens = []
with open(prompt_path, 'r') as fp:
raw_text = fp.read()
if not raw_text:
print('Prompt should not be empty!')
return
context_tokens = enc.encode(raw_text)
if length is None:
# length = hparams.n_ctx // 2
length = hparams.n_ctx - len(context_tokens)
# elif len(context_tokens) > hparams.n_ctx - length:
# raise ValueError("Can't get samples longer than window size - context: %s" % hparams.n_ctx - len(context_tokens))
# elif length > hparams.n_ctx:
# raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
print('using context of length: %d' % len(context_tokens))
if max_context_length is None:
max_context_length = hparams.n_ctx // 2
elif max_context_length > hparams.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
if len(context_tokens) > max_context_length:
print('context is too long! will be truncated...')
max_block_length = hparams.n_ctx - max_context_length
with tf.Session(graph=tf.Graph()) as sess:
np.random.seed(seed)
tf.set_random_seed(seed)
ckpt = tf.train.latest_checkpoint(os.path.join(checkpoint_dir, run_name))
generated = 0
all_text = []
for _ in range(nsamples):
generated_tokens = []
context_buffer = None
while len(generated_tokens) < length:
if not context_buffer:
context_buffer = context_tokens[-hparams.n_ctx:]
context_length = min(max_context_length, len(context_buffer))
block_length = hparams.n_ctx - context_length
if len(generated_tokens) + block_length > length:
block_length = length - len(generated_tokens)
context_length = hparams.n_ctx - block_length
print('generating block of %d tokens with context:\n%s' % (block_length, enc.decode(context_buffer[-context_length:])))
context = tf.placeholder(tf.int32, [1, None])
output = sample.sample_sequence(
hparams=hparams,
length=block_length,
context=context,
batch_size=1,
temperature=temperature,
top_k=top_k,
top_p=top_p
)
saver = tf.train.Saver()
saver.restore(sess, ckpt)
out = sess.run(output, feed_dict={
context: [context_buffer[-context_length:]]
})[0, -block_length:]
print('generated:\n%s (%d)' % (enc.decode(out), len(out)))
if len(context_buffer) < hparams.n_ctx:
context_buffer.extend(out) # should be at n_ctx now...
else:
# rotate context, newly generated context at the end
context_buffer[:context_length] = context_buffer[-context_length:]
context_buffer[-block_length:] = out
generated_tokens.extend(out)
print('generated %d of %d tokens' % (len(generated_tokens), length))
generated += 1
text = enc.decode(context_tokens)
text += enc.decode(generated_tokens)
separator = '=' * 40 + ' SAMPLE ' + str(generated) + ' ' + '=' * 40 + '\n'
print(separator + text)
all_text.append(separator + text)
if out_path:
with open(out_path, 'w') as fp:
fp.write('\n'.join(all_text))
if __name__ == '__main__':
fire.Fire(interact_model) | en | 0.755422 | #!/usr/bin/env python3 Interactively run the model :model_name=117M : String, which model to use :seed=None : Integer seed for random number generators, fix seed to reproduce results :nsamples=1 : Number of samples to return total :length=None : Number of tokens in generated text, if None (default), is determined by model hyperparameters :max_context_length=None : Number of tokens to use as context, affects how much we'll generate each iteration :temperature=1 : Float value controlling randomness in boltzmann distribution. Lower temperature results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive. Higher temperature results in more random completions. :top_k=0 : Integer value controlling diversity. 1 means only 1 word is considered for each step (token), resulting in deterministic completions, while 40 means 40 words are considered at each step. 0 (default) is a special setting meaning no restrictions. 40 generally is a good value. :top_p=0.0 : Float value controlling diversity. Implements nucleus sampling, overriding top_k if set to a value > 0. A good setting is 0.9. # length = hparams.n_ctx // 2 # elif len(context_tokens) > hparams.n_ctx - length: # raise ValueError("Can't get samples longer than window size - context: %s" % hparams.n_ctx - len(context_tokens)) # elif length > hparams.n_ctx: # raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx) # should be at n_ctx now... # rotate context, newly generated context at the end | 2.789852 | 3 |
imix/models/vqa_models/vilbert/utils.py | linxi1158/iMIX | 23 | 6615668 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
from torch._six import inf
import logging
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class MultiTaskStopOnPlateau(object):
def __init__(
self,
mode='min',
patience=10,
continue_threshold=0.005,
verbose=False,
threshold=1e-4,
threshold_mode='rel',
cooldown=0,
min_lr=0,
eps=1e-8,
):
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0
self.mode = mode
self.threshold = threshold
self.threshold_mode = threshold_mode
self.best = None
self.num_bad_epochs = None
self.mode_worse = None # the worse value for the chosen mode
self.is_better = None
self.in_stop = False
self.eps = eps
self.last_epoch = -1
self.continue_threshold = continue_threshold
self._init_is_better(mode=mode, threshold=threshold, threshold_mode=threshold_mode)
self._init_continue_is_better(mode='min', threshold=continue_threshold, threshold_mode=threshold_mode)
self._reset()
def _reset(self):
"""Resets num_bad_epochs counter and cooldown counter."""
self.best = self.mode_worse
self.cooldown_counter = 0
self.num_bad_epochs = 0
self.in_stop = False
def step(self, metrics, epoch=None):
# convert `metrics` to float, in case it's a zero-dim Tensor
current = float(metrics)
if epoch is None:
epoch = self.last_epoch = self.last_epoch + 1
self.last_epoch = epoch
if self.is_better(current, self.best):
self.best = current
self.num_bad_epochs = 0
else:
self.num_bad_epochs += 1
if self.in_cooldown:
self.cooldown_counter -= 1
self.num_bad_epochs = 0 # ignore any bad epochs in cooldown
if self.num_bad_epochs > self.patience:
self.in_stop = True
self.cooldown_counter = self.cooldown
self.num_bad_epochs = 0
# if the perforance is keep dropping, then start optimizing again.
elif self.continue_is_better(current, self.best) and self.in_stop:
self.in_stop = False
self.cooldown_counter = self.cooldown
self.num_bad_epochs = 0
# if we lower the learning rate, then
# call reset.
@property
def in_cooldown(self):
return self.cooldown_counter > 0
def _cmp(self, mode, threshold_mode, threshold, a, best):
if mode == 'min' and threshold_mode == 'rel':
rel_epsilon = 1.0 - threshold
return a < best * rel_epsilon
elif mode == 'min' and threshold_mode == 'abs':
return a < best - threshold
elif mode == 'max' and threshold_mode == 'rel':
rel_epsilon = threshold + 1.0
return a > best * rel_epsilon
else: # mode == 'max' and epsilon_mode == 'abs':
return a > best + threshold
def _init_is_better(self, mode, threshold, threshold_mode):
if mode not in {'min', 'max'}:
raise ValueError('mode ' + mode + ' is unknown!')
if threshold_mode not in {'rel', 'abs'}:
raise ValueError('threshold mode ' + threshold_mode + ' is unknown!')
if mode == 'min':
self.mode_worse = inf
else: # mode == 'max':
self.mode_worse = -inf
self.is_better = partial(self._cmp, mode, threshold_mode, threshold)
def _init_continue_is_better(self, mode, threshold, threshold_mode):
self.continue_is_better = partial(self._cmp, mode, threshold_mode, threshold)
| # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
from torch._six import inf
import logging
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class MultiTaskStopOnPlateau(object):
def __init__(
self,
mode='min',
patience=10,
continue_threshold=0.005,
verbose=False,
threshold=1e-4,
threshold_mode='rel',
cooldown=0,
min_lr=0,
eps=1e-8,
):
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0
self.mode = mode
self.threshold = threshold
self.threshold_mode = threshold_mode
self.best = None
self.num_bad_epochs = None
self.mode_worse = None # the worse value for the chosen mode
self.is_better = None
self.in_stop = False
self.eps = eps
self.last_epoch = -1
self.continue_threshold = continue_threshold
self._init_is_better(mode=mode, threshold=threshold, threshold_mode=threshold_mode)
self._init_continue_is_better(mode='min', threshold=continue_threshold, threshold_mode=threshold_mode)
self._reset()
def _reset(self):
"""Resets num_bad_epochs counter and cooldown counter."""
self.best = self.mode_worse
self.cooldown_counter = 0
self.num_bad_epochs = 0
self.in_stop = False
def step(self, metrics, epoch=None):
# convert `metrics` to float, in case it's a zero-dim Tensor
current = float(metrics)
if epoch is None:
epoch = self.last_epoch = self.last_epoch + 1
self.last_epoch = epoch
if self.is_better(current, self.best):
self.best = current
self.num_bad_epochs = 0
else:
self.num_bad_epochs += 1
if self.in_cooldown:
self.cooldown_counter -= 1
self.num_bad_epochs = 0 # ignore any bad epochs in cooldown
if self.num_bad_epochs > self.patience:
self.in_stop = True
self.cooldown_counter = self.cooldown
self.num_bad_epochs = 0
# if the perforance is keep dropping, then start optimizing again.
elif self.continue_is_better(current, self.best) and self.in_stop:
self.in_stop = False
self.cooldown_counter = self.cooldown
self.num_bad_epochs = 0
# if we lower the learning rate, then
# call reset.
@property
def in_cooldown(self):
return self.cooldown_counter > 0
def _cmp(self, mode, threshold_mode, threshold, a, best):
if mode == 'min' and threshold_mode == 'rel':
rel_epsilon = 1.0 - threshold
return a < best * rel_epsilon
elif mode == 'min' and threshold_mode == 'abs':
return a < best - threshold
elif mode == 'max' and threshold_mode == 'rel':
rel_epsilon = threshold + 1.0
return a > best * rel_epsilon
else: # mode == 'max' and epsilon_mode == 'abs':
return a > best + threshold
def _init_is_better(self, mode, threshold, threshold_mode):
if mode not in {'min', 'max'}:
raise ValueError('mode ' + mode + ' is unknown!')
if threshold_mode not in {'rel', 'abs'}:
raise ValueError('threshold mode ' + threshold_mode + ' is unknown!')
if mode == 'min':
self.mode_worse = inf
else: # mode == 'max':
self.mode_worse = -inf
self.is_better = partial(self._cmp, mode, threshold_mode, threshold)
def _init_continue_is_better(self, mode, threshold, threshold_mode):
self.continue_is_better = partial(self._cmp, mode, threshold_mode, threshold)
| en | 0.779415 | # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pylint: disable=invalid-name # the worse value for the chosen mode Resets num_bad_epochs counter and cooldown counter. # convert `metrics` to float, in case it's a zero-dim Tensor # ignore any bad epochs in cooldown # if the perforance is keep dropping, then start optimizing again. # if we lower the learning rate, then # call reset. # mode == 'max' and epsilon_mode == 'abs': # mode == 'max': | 1.912355 | 2 |
examples/buzsaki/buzsaki.py | rodriguez-facundo/spiky | 2 | 6615669 | <filename>examples/buzsaki/buzsaki.py<gh_stars>1-10
# by <NAME>
import spiky
# path to dataset
raw = 'data/raw_data.dat'
# path to parameter configuration file
params = 'parameters/parameters.json'
# create the clustering object
buz = spiky.New()
# load the parameters to perform the clustering
buz.loadParams(pfile=params)
# load the dataset
buz.loadRawFile(rfile=raw)
# run the algorithm
buz.run()
# plot the spikes
buz.plotClusters()
# compute confusion matrix
buz.blur()
| <filename>examples/buzsaki/buzsaki.py<gh_stars>1-10
# by <NAME>
import spiky
# path to dataset
raw = 'data/raw_data.dat'
# path to parameter configuration file
params = 'parameters/parameters.json'
# create the clustering object
buz = spiky.New()
# load the parameters to perform the clustering
buz.loadParams(pfile=params)
# load the dataset
buz.loadRawFile(rfile=raw)
# run the algorithm
buz.run()
# plot the spikes
buz.plotClusters()
# compute confusion matrix
buz.blur()
| en | 0.552584 | # by <NAME> # path to dataset # path to parameter configuration file # create the clustering object # load the parameters to perform the clustering # load the dataset # run the algorithm # plot the spikes # compute confusion matrix | 2.618718 | 3 |
corefacility/core/views/__init__.py | serik1987/corefacility | 0 | 6615670 | from .main_window import MainWindow
from .user import UserViewSet
from .group import GroupViewSet
from .project import ProjectViewSet
from .access_level import AccessLevelView
from .project_permission_viewset import ProjectPermissionViewSet
from .login import LoginView
from .profile import ProfileView
from .synchronization_view import SynchronizationView
from .view_404 import View404
| from .main_window import MainWindow
from .user import UserViewSet
from .group import GroupViewSet
from .project import ProjectViewSet
from .access_level import AccessLevelView
from .project_permission_viewset import ProjectPermissionViewSet
from .login import LoginView
from .profile import ProfileView
from .synchronization_view import SynchronizationView
from .view_404 import View404
| none | 1 | 1.006945 | 1 | |
config/ftp.py | mkaminsky11/cyberpatriot | 6 | 6615671 | import subprocess
import os.path
#####################
# TESTED, ALL GOOD! #
#####################
#FIRST, MAKE A BACKUP
#=======================
subprocess.call("sudo apt-get install vsftpd -y".split()) #actually, first make sure that you have ftp
subprocess.call("cp /etc/vsftpd.conf /etc/vsftpd_backup.conf".split())
if os.path.exists("/etc/vsftpd.conf") == True:
#THEN, READ IT
#=====================
file = open("/etc/vsftpd.conf","r+")
text = file.read().strip("\n").split("\n")
#REMOVE POTENTIALLY OFFENDING LINES
for i in range(len(text)):
line = text[i]
if ("anonymous_enable" in line) == True:
text[i] = ""
if ("local_enable" in line) == True:
text[i] = ""
if ("write_enable" in line) == True:
text[i] = ""
if ("chroot_local_user" in line) == True:
text[i] = ""
#ADD NEW LINES
#====================
text.append("anonymous_enable=NO")
text.append("local_enable=YES")
text.append("write_enable=YES ")
text.append("chroot_local_user=YES")
#FINALLY, WRITE AND RESTART
#======================
text = '\n'.join([str(x) for x in text])
file.seek(0)
file.write(text)
file.truncate()
file.close()
subprocess.call("sudo /etc/init.d/vsftpd restart".split())
else:
print("/etc/vsftpd.conf does not exist!")
| import subprocess
import os.path
#####################
# TESTED, ALL GOOD! #
#####################
#FIRST, MAKE A BACKUP
#=======================
subprocess.call("sudo apt-get install vsftpd -y".split()) #actually, first make sure that you have ftp
subprocess.call("cp /etc/vsftpd.conf /etc/vsftpd_backup.conf".split())
if os.path.exists("/etc/vsftpd.conf") == True:
#THEN, READ IT
#=====================
file = open("/etc/vsftpd.conf","r+")
text = file.read().strip("\n").split("\n")
#REMOVE POTENTIALLY OFFENDING LINES
for i in range(len(text)):
line = text[i]
if ("anonymous_enable" in line) == True:
text[i] = ""
if ("local_enable" in line) == True:
text[i] = ""
if ("write_enable" in line) == True:
text[i] = ""
if ("chroot_local_user" in line) == True:
text[i] = ""
#ADD NEW LINES
#====================
text.append("anonymous_enable=NO")
text.append("local_enable=YES")
text.append("write_enable=YES ")
text.append("chroot_local_user=YES")
#FINALLY, WRITE AND RESTART
#======================
text = '\n'.join([str(x) for x in text])
file.seek(0)
file.write(text)
file.truncate()
file.close()
subprocess.call("sudo /etc/init.d/vsftpd restart".split())
else:
print("/etc/vsftpd.conf does not exist!")
| en | 0.393492 | ##################### # TESTED, ALL GOOD! # ##################### #FIRST, MAKE A BACKUP #======================= #actually, first make sure that you have ftp #THEN, READ IT #===================== #REMOVE POTENTIALLY OFFENDING LINES #ADD NEW LINES #==================== #FINALLY, WRITE AND RESTART #====================== | 2.310979 | 2 |
tg_bot.py | delphython/fish-shop | 0 | 6615672 | <reponame>delphython/fish-shop
import os
import redis
from dotenv import load_dotenv
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
from telegram.ext import Filters, Updater
from telegram.ext import CallbackQueryHandler, CommandHandler, MessageHandler
from textwrap import dedent
from validate_email import validate_email
from moltin_api import (
fetch_fish_shop_good,
add_good_to_cart,
get_product_image_url,
remove_cart_item,
create_customer,
)
from send_messages import (
send_total_cart_message,
send_initial_message,
)
_database = None
def start(bot, update):
chat_id = update.effective_chat.id
send_initial_message(chat_id, bot)
return "HANDLE_MENU"
def handle_menu(bot, update):
query = update.callback_query
chat_id = query.message.chat_id
if query.data == "cart":
send_total_cart_message(chat_id, bot, query)
return "HANDLE_CART"
else:
weight_buttons = []
max_good_quantity = 3
good_id = query.data
fish_shop_good = fetch_fish_shop_good(good_id)["data"]
good_price = fish_shop_good["meta"]["display_price"]["with_tax"][
"formatted"
]
good_weight = fish_shop_good["weight"]["kg"]
message_text = dedent(
f"""\
{fish_shop_good['name']}
{good_price} per {good_weight} kg
{fish_shop_good['meta']['stock']['level']} kg in stock
{fish_shop_good['description']}"""
)
image_id = (
fish_shop_good.get("relationships", {})
.get("main_image", {})
.get("data", {})
.get("id")
)
for good_quantity in range(1, max_good_quantity + 1):
weight_buttons.append(
InlineKeyboardButton(
f"{good_weight * good_quantity} kg",
callback_data=f"{good_id}|{good_quantity}",
)
)
keyboard = [
weight_buttons,
[InlineKeyboardButton("Корзина", callback_data="cart")],
[InlineKeyboardButton("Назад", callback_data="back")],
]
reply_markup = InlineKeyboardMarkup(keyboard)
if image_id:
bot.send_photo(
chat_id=chat_id,
photo=get_product_image_url(image_id)["data"]["link"]["href"],
caption=message_text,
parse_mode="html",
reply_markup=reply_markup,
)
else:
bot.send_message(
chat_id=chat_id,
text=message_text,
reply_markup=reply_markup,
)
bot.delete_message(
chat_id=chat_id,
message_id=query.message.message_id,
)
return "HANDLE_DESCRIPTION"
def handle_description(bot, update):
query = update.callback_query
chat_id = query.message.chat_id
if query.data == "back":
send_total_cart_message(chat_id, bot, query)
elif query.data == "cart":
send_total_cart_message(chat_id, bot, query)
return "HANDLE_CART"
else:
good_id, good_quantity = query.data.split("|")
add_good_to_cart(
good_id,
chat_id,
int(good_quantity),
)
return "HANDLE_DESCRIPTION"
return "HANDLE_MENU"
def handle_cart(bot, update):
query = update.callback_query
chat_id = query.message.chat_id
if query.data == "menu":
send_initial_message(chat_id, bot)
bot.delete_message(
chat_id=chat_id,
message_id=query.message.message_id,
)
return "HANDLE_MENU"
elif query.data == "payment":
bot.send_message(
chat_id=chat_id,
text="Введите адрес электронной почты:",
)
return "WAITING_EMAIL"
else:
remove_cart_item(chat_id, query.data)
send_total_cart_message(chat_id, bot, query)
return "HANDLE_CART"
def waiting_email(bot, update):
user_email = update.message.text
user_id = update.message.chat_id
is_email_valid = validate_email(
email_address=user_email,
check_format=True,
check_blacklist=False,
check_dns=False,
check_smtp=False,
)
if is_email_valid:
update.message.reply_text(
f"Вы ввели адрес электронной почты: {user_email}"
)
create_customer(str(user_id), user_email)
return "START"
else:
update.message.reply_text(
"Вы ввели некорректный адрес электронной почты"
)
return "WAITING_EMAIL"
def handle_users_reply(bot, update):
db = get_database_connection()
if update.message:
user_reply = update.message.text
chat_id = update.message.chat_id
elif update.callback_query:
user_reply = update.callback_query.data
chat_id = update.callback_query.message.chat_id
else:
return
if user_reply == "/start":
user_state = "START"
else:
user_state = db.get(chat_id).decode("utf-8")
states_functions = {
"START": start,
"HANDLE_MENU": handle_menu,
"HANDLE_DESCRIPTION": handle_description,
"HANDLE_CART": handle_cart,
"WAITING_EMAIL": waiting_email,
}
state_handler = states_functions[user_state]
try:
next_state = state_handler(bot, update)
db.set(chat_id, next_state)
except Exception as err:
print(err)
def get_database_connection():
global _database
if not _database:
database_password = os.getenv("REDIS_PASS")
database_host = os.getenv("REDIS_HOST")
database_port = os.getenv("REDIS_PORT")
_database = redis.Redis(
host=database_host, port=database_port, password=<PASSWORD>_password
)
return _database
def main():
load_dotenv()
telegram_token = os.getenv("TELEGRAM_TOKEN")
updater = Updater(telegram_token)
dispatcher = updater.dispatcher
dispatcher.add_handler(CallbackQueryHandler(handle_users_reply))
dispatcher.add_handler(MessageHandler(Filters.text, handle_users_reply))
dispatcher.add_handler(CommandHandler("start", handle_users_reply))
updater.start_polling()
updater.idle()
if __name__ == "__main__":
main()
| import os
import redis
from dotenv import load_dotenv
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
from telegram.ext import Filters, Updater
from telegram.ext import CallbackQueryHandler, CommandHandler, MessageHandler
from textwrap import dedent
from validate_email import validate_email
from moltin_api import (
fetch_fish_shop_good,
add_good_to_cart,
get_product_image_url,
remove_cart_item,
create_customer,
)
from send_messages import (
send_total_cart_message,
send_initial_message,
)
_database = None
def start(bot, update):
chat_id = update.effective_chat.id
send_initial_message(chat_id, bot)
return "HANDLE_MENU"
def handle_menu(bot, update):
query = update.callback_query
chat_id = query.message.chat_id
if query.data == "cart":
send_total_cart_message(chat_id, bot, query)
return "HANDLE_CART"
else:
weight_buttons = []
max_good_quantity = 3
good_id = query.data
fish_shop_good = fetch_fish_shop_good(good_id)["data"]
good_price = fish_shop_good["meta"]["display_price"]["with_tax"][
"formatted"
]
good_weight = fish_shop_good["weight"]["kg"]
message_text = dedent(
f"""\
{fish_shop_good['name']}
{good_price} per {good_weight} kg
{fish_shop_good['meta']['stock']['level']} kg in stock
{fish_shop_good['description']}"""
)
image_id = (
fish_shop_good.get("relationships", {})
.get("main_image", {})
.get("data", {})
.get("id")
)
for good_quantity in range(1, max_good_quantity + 1):
weight_buttons.append(
InlineKeyboardButton(
f"{good_weight * good_quantity} kg",
callback_data=f"{good_id}|{good_quantity}",
)
)
keyboard = [
weight_buttons,
[InlineKeyboardButton("Корзина", callback_data="cart")],
[InlineKeyboardButton("Назад", callback_data="back")],
]
reply_markup = InlineKeyboardMarkup(keyboard)
if image_id:
bot.send_photo(
chat_id=chat_id,
photo=get_product_image_url(image_id)["data"]["link"]["href"],
caption=message_text,
parse_mode="html",
reply_markup=reply_markup,
)
else:
bot.send_message(
chat_id=chat_id,
text=message_text,
reply_markup=reply_markup,
)
bot.delete_message(
chat_id=chat_id,
message_id=query.message.message_id,
)
return "HANDLE_DESCRIPTION"
def handle_description(bot, update):
query = update.callback_query
chat_id = query.message.chat_id
if query.data == "back":
send_total_cart_message(chat_id, bot, query)
elif query.data == "cart":
send_total_cart_message(chat_id, bot, query)
return "HANDLE_CART"
else:
good_id, good_quantity = query.data.split("|")
add_good_to_cart(
good_id,
chat_id,
int(good_quantity),
)
return "HANDLE_DESCRIPTION"
return "HANDLE_MENU"
def handle_cart(bot, update):
query = update.callback_query
chat_id = query.message.chat_id
if query.data == "menu":
send_initial_message(chat_id, bot)
bot.delete_message(
chat_id=chat_id,
message_id=query.message.message_id,
)
return "HANDLE_MENU"
elif query.data == "payment":
bot.send_message(
chat_id=chat_id,
text="Введите адрес электронной почты:",
)
return "WAITING_EMAIL"
else:
remove_cart_item(chat_id, query.data)
send_total_cart_message(chat_id, bot, query)
return "HANDLE_CART"
def waiting_email(bot, update):
user_email = update.message.text
user_id = update.message.chat_id
is_email_valid = validate_email(
email_address=user_email,
check_format=True,
check_blacklist=False,
check_dns=False,
check_smtp=False,
)
if is_email_valid:
update.message.reply_text(
f"Вы ввели адрес электронной почты: {user_email}"
)
create_customer(str(user_id), user_email)
return "START"
else:
update.message.reply_text(
"Вы ввели некорректный адрес электронной почты"
)
return "WAITING_EMAIL"
def handle_users_reply(bot, update):
db = get_database_connection()
if update.message:
user_reply = update.message.text
chat_id = update.message.chat_id
elif update.callback_query:
user_reply = update.callback_query.data
chat_id = update.callback_query.message.chat_id
else:
return
if user_reply == "/start":
user_state = "START"
else:
user_state = db.get(chat_id).decode("utf-8")
states_functions = {
"START": start,
"HANDLE_MENU": handle_menu,
"HANDLE_DESCRIPTION": handle_description,
"HANDLE_CART": handle_cart,
"WAITING_EMAIL": waiting_email,
}
state_handler = states_functions[user_state]
try:
next_state = state_handler(bot, update)
db.set(chat_id, next_state)
except Exception as err:
print(err)
def get_database_connection():
global _database
if not _database:
database_password = os.getenv("REDIS_PASS")
database_host = os.getenv("REDIS_HOST")
database_port = os.getenv("REDIS_PORT")
_database = redis.Redis(
host=database_host, port=database_port, password=<PASSWORD>_password
)
return _database
def main():
load_dotenv()
telegram_token = os.getenv("TELEGRAM_TOKEN")
updater = Updater(telegram_token)
dispatcher = updater.dispatcher
dispatcher.add_handler(CallbackQueryHandler(handle_users_reply))
dispatcher.add_handler(MessageHandler(Filters.text, handle_users_reply))
dispatcher.add_handler(CommandHandler("start", handle_users_reply))
updater.start_polling()
updater.idle()
if __name__ == "__main__":
main() | en | 0.223634 | \ {fish_shop_good['name']} {good_price} per {good_weight} kg {fish_shop_good['meta']['stock']['level']} kg in stock {fish_shop_good['description']} | 2.280487 | 2 |
benchmarks/dicodile_hubble.py | hndgzkn/dicodile | 15 | 6615673 |
import numpy as np
from scipy import sparse
from dicodile import dicodile
from dicodile.data.images import get_hubble
from dicodile.utils.viz import plot_atom_and_coefs
from dicodile.utils.dictionary import init_dictionary
n_atoms = 25
random_state = 42
def run_dicodile_hubble(size, reg, L):
X = get_hubble(size=size)
D_init = init_dictionary(
X, n_atoms, (L, L), random_state=random_state)
dicod_kwargs = dict(soft_lock='border')
D_hat, z_hat, pobj, times = dicodile(
X, D_init, reg=reg, z_positive=True, n_iter=100, n_workers=400,
eps=1e-5, tol=1e-3, verbose=2, dicod_kwargs=dicod_kwargs)
# Save the atoms
prefix = (f"K{n_atoms}_L{L}_reg{reg}"
f"_seed{random_state}_dicodile_{size}_")
prefix = prefix.replace(" ", "")
np.save(f"hubble/{prefix}D_hat.npy", D_hat)
z_hat[z_hat < 1e-2] = 0
z_hat_save = [sparse.csr_matrix(z) for z in z_hat]
np.save(f"hubble/{prefix}z_hat.npy", z_hat_save)
plot_atom_and_coefs(D_hat, z_hat, prefix)
def plot_dicodile_hubble(size, reg, L):
# Save the atoms
prefix = (f"K{n_atoms}_L{L}_reg{reg}"
f"_seed{random_state}_dicodile_{size}_")
D_hat = np.load(f"hubble/{prefix}D_hat.npy")
z_hat = np.load(f"hubble/{prefix}z_hat.npy")
plot_atom_and_coefs(D_hat, z_hat, prefix)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser('')
parser.add_argument('--plot', action='store_true',
help='Plot the results from saved dictionaries')
parser.add_argument('--all', action='store_true',
help='Plot the results from saved dictionaries')
args = parser.parse_args()
display_params = ("Medium", .1, 32)
if args.plot:
run_func = plot_dicodile_hubble
else:
run_func = run_dicodile_hubble
if args.all:
for size in ['Large', 'Medium']:
for reg in [.1, .3, .05]:
for L in [32, 28]:
try:
run_func(size, reg, L)
except FileNotFoundError:
continue
else:
run_func(*display_params)
|
import numpy as np
from scipy import sparse
from dicodile import dicodile
from dicodile.data.images import get_hubble
from dicodile.utils.viz import plot_atom_and_coefs
from dicodile.utils.dictionary import init_dictionary
n_atoms = 25
random_state = 42
def run_dicodile_hubble(size, reg, L):
X = get_hubble(size=size)
D_init = init_dictionary(
X, n_atoms, (L, L), random_state=random_state)
dicod_kwargs = dict(soft_lock='border')
D_hat, z_hat, pobj, times = dicodile(
X, D_init, reg=reg, z_positive=True, n_iter=100, n_workers=400,
eps=1e-5, tol=1e-3, verbose=2, dicod_kwargs=dicod_kwargs)
# Save the atoms
prefix = (f"K{n_atoms}_L{L}_reg{reg}"
f"_seed{random_state}_dicodile_{size}_")
prefix = prefix.replace(" ", "")
np.save(f"hubble/{prefix}D_hat.npy", D_hat)
z_hat[z_hat < 1e-2] = 0
z_hat_save = [sparse.csr_matrix(z) for z in z_hat]
np.save(f"hubble/{prefix}z_hat.npy", z_hat_save)
plot_atom_and_coefs(D_hat, z_hat, prefix)
def plot_dicodile_hubble(size, reg, L):
# Save the atoms
prefix = (f"K{n_atoms}_L{L}_reg{reg}"
f"_seed{random_state}_dicodile_{size}_")
D_hat = np.load(f"hubble/{prefix}D_hat.npy")
z_hat = np.load(f"hubble/{prefix}z_hat.npy")
plot_atom_and_coefs(D_hat, z_hat, prefix)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser('')
parser.add_argument('--plot', action='store_true',
help='Plot the results from saved dictionaries')
parser.add_argument('--all', action='store_true',
help='Plot the results from saved dictionaries')
args = parser.parse_args()
display_params = ("Medium", .1, 32)
if args.plot:
run_func = plot_dicodile_hubble
else:
run_func = run_dicodile_hubble
if args.all:
for size in ['Large', 'Medium']:
for reg in [.1, .3, .05]:
for L in [32, 28]:
try:
run_func(size, reg, L)
except FileNotFoundError:
continue
else:
run_func(*display_params)
| en | 0.334346 | # Save the atoms # Save the atoms | 2.243745 | 2 |
cloud/utils.py | shawnsarwar/logiak-cloud-function-api-1 | 1 | 6615674 | <filename>cloud/utils.py
#!/usr/bin/env python
# Copyright (C) 2020 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import List
def escape_email(s):
s = s.replace('.', '-dot-')
s = s.replace('@', '-at-')
return s
def escape_version(s):
return s.replace('.', '-')
def missing_required(d, required):
if not d:
return required
return [k for k in required if k not in d]
def path_stripper(to_exclude: List):
def _fn(path_parts: List) -> List:
for rm in to_exclude:
try:
idx = path_parts.index(rm)
path_parts.pop(idx)
except ValueError:
pass
return path_parts
return _fn
def chunk(obj, size):
n = max(1, size)
return (
obj[i:i + size] for i in range(0, len(obj), n)
)
| <filename>cloud/utils.py
#!/usr/bin/env python
# Copyright (C) 2020 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import List
def escape_email(s):
s = s.replace('.', '-dot-')
s = s.replace('@', '-at-')
return s
def escape_version(s):
return s.replace('.', '-')
def missing_required(d, required):
if not d:
return required
return [k for k in required if k not in d]
def path_stripper(to_exclude: List):
def _fn(path_parts: List) -> List:
for rm in to_exclude:
try:
idx = path_parts.index(rm)
path_parts.pop(idx)
except ValueError:
pass
return path_parts
return _fn
def chunk(obj, size):
n = max(1, size)
return (
obj[i:i + size] for i in range(0, len(obj), n)
)
| en | 0.847646 | #!/usr/bin/env python # Copyright (C) 2020 by eHealth Africa : http://www.eHealthAfrica.org # # See the NOTICE file distributed with this work for additional information # regarding copyright ownership. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. | 1.995113 | 2 |
libdoc_sample/MyLibrary.py | thinkAmi-sandbox/RobotFramework-sample | 9 | 6615675 | from robot.api import logger
class MyLibrary:
"""マイライブラリ
| =タイトル= | =もう一つタイトル= |
| 1行1列目 | 1行2列目 |
| | 1列目が空白 |
| 2列目が空白 | |
= カスタムセクション =
ここがカスタムセクション
= 次のセクション =
`カスタムセクション` へのリンク
セクションへのリンク
- `introduction`
- `importing`
- `shortcuts`
- `keywords`
*太字です*
_イタリックです_
普通です
- リスト1
- リスト2
Googleへ https://google.co.jp
こちらも [https://google.co.jp|Googleへ]
`Hello World` へ
``インラインコードスタイル``
複数行の *bold\n
try* みる
"""
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
def hello_world(self, name='foo'):
"""ハローワールドを出力します"""
logger.console(f'hello, world {name} !')
def no_args(self):
pass
def multi_args(self, one, two='2', *args, **kwargs):
pass
| from robot.api import logger
class MyLibrary:
"""マイライブラリ
| =タイトル= | =もう一つタイトル= |
| 1行1列目 | 1行2列目 |
| | 1列目が空白 |
| 2列目が空白 | |
= カスタムセクション =
ここがカスタムセクション
= 次のセクション =
`カスタムセクション` へのリンク
セクションへのリンク
- `introduction`
- `importing`
- `shortcuts`
- `keywords`
*太字です*
_イタリックです_
普通です
- リスト1
- リスト2
Googleへ https://google.co.jp
こちらも [https://google.co.jp|Googleへ]
`Hello World` へ
``インラインコードスタイル``
複数行の *bold\n
try* みる
"""
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
def hello_world(self, name='foo'):
"""ハローワールドを出力します"""
logger.console(f'hello, world {name} !')
def no_args(self):
pass
def multi_args(self, one, two='2', *args, **kwargs):
pass
| ja | 0.996571 | マイライブラリ | =タイトル= | =もう一つタイトル= | | 1行1列目 | 1行2列目 | | | 1列目が空白 | | 2列目が空白 | | = カスタムセクション = ここがカスタムセクション = 次のセクション = `カスタムセクション` へのリンク セクションへのリンク - `introduction` - `importing` - `shortcuts` - `keywords` *太字です* _イタリックです_ 普通です - リスト1 - リスト2 Googleへ https://google.co.jp こちらも [https://google.co.jp|Googleへ] `Hello World` へ ``インラインコードスタイル`` 複数行の *bold\n try* みる ハローワールドを出力します | 2.912857 | 3 |
LuoguCodes/AT2507.py | Anguei/OI-Codes | 0 | 6615676 | print max(sum(map(int, raw_input().split())), sum(map(int, raw_input().split()))) | print max(sum(map(int, raw_input().split())), sum(map(int, raw_input().split()))) | none | 1 | 1.779377 | 2 | |
africanus/model/spectral/dask.py | ratt-ru/codex-africanus | 13 | 6615677 | # -*- coding: utf-8 -*-
from africanus.model.spectral.spec_model import (
spectral_model as np_spectral_model,
SPECTRAL_MODEL_DOC)
from africanus.util.requirements import requires_optional
try:
import dask.array as da
except ImportError as e:
opt_import_error = e
else:
opt_import_error = None
def spectral_model_wrapper(stokes, spi, ref_freq, frequencies, base=None):
return np_spectral_model(stokes, spi[0], ref_freq, frequencies, base=base)
@requires_optional("dask.array", opt_import_error)
def spectral_model(stokes, spi, ref_freq, frequencies, base=0):
if len(spi.chunks[1]) != 1:
raise ValueError("Chunking along the spi dimension unsupported")
pol_dim = () if stokes.ndim == 1 else ("pol",)
return da.blockwise(spectral_model_wrapper, ("source", "chan",) + pol_dim,
stokes, ("source",) + pol_dim,
spi, ("source", "spi") + pol_dim,
ref_freq, ("source",),
frequencies, ("chan",),
base=base,
dtype=stokes.dtype)
try:
spectral_model.__doc__ = SPECTRAL_MODEL_DOC.substitute(
array_type=":class:`dask.array.Array`")
except AttributeError:
pass
| # -*- coding: utf-8 -*-
from africanus.model.spectral.spec_model import (
spectral_model as np_spectral_model,
SPECTRAL_MODEL_DOC)
from africanus.util.requirements import requires_optional
try:
import dask.array as da
except ImportError as e:
opt_import_error = e
else:
opt_import_error = None
def spectral_model_wrapper(stokes, spi, ref_freq, frequencies, base=None):
return np_spectral_model(stokes, spi[0], ref_freq, frequencies, base=base)
@requires_optional("dask.array", opt_import_error)
def spectral_model(stokes, spi, ref_freq, frequencies, base=0):
if len(spi.chunks[1]) != 1:
raise ValueError("Chunking along the spi dimension unsupported")
pol_dim = () if stokes.ndim == 1 else ("pol",)
return da.blockwise(spectral_model_wrapper, ("source", "chan",) + pol_dim,
stokes, ("source",) + pol_dim,
spi, ("source", "spi") + pol_dim,
ref_freq, ("source",),
frequencies, ("chan",),
base=base,
dtype=stokes.dtype)
try:
spectral_model.__doc__ = SPECTRAL_MODEL_DOC.substitute(
array_type=":class:`dask.array.Array`")
except AttributeError:
pass
| en | 0.769321 | # -*- coding: utf-8 -*- | 2.247977 | 2 |
openstack_dashboard/api/ceilometer.py | kbujold/stx-horizon | 0 | 6615678 | <reponame>kbujold/stx-horizon
#
# Copyright (c) 2013-2017 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from ceilometerclient import client as ceilometer_client
from django.conf import settings
from openstack_dashboard.api import base
from horizon.utils.memoized import memoized # noqa
class Pipeline(base.APIResourceWrapper):
"""Represents one Ceilometer pipeline entry."""
_attrs = ['name', 'enabled', 'meters', 'location', 'max_bytes',
'backup_count', 'compress']
def __init__(self, apipipeline):
super(Pipeline, self).__init__(apipipeline)
@memoized
def ceilometerclient(request):
"""Initialization of Ceilometer client."""
endpoint = base.url_for(request, 'metering')
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
return ceilometer_client.Client('2', endpoint,
token=(lambda: request.user.token.id),
insecure=insecure,
cacert=cacert)
def pipeline_list(request):
"""List the configured pipeline."""
pipeline_entries = ceilometerclient(request).pipelines.list()
pipelines = [Pipeline(p) for p in pipeline_entries]
return pipelines
def pipeline_update(request, pipeline_name, some_dict):
pipeline = ceilometerclient(request).pipelines.update(pipeline_name,
**some_dict)
if not pipeline:
raise ValueError(
'No match found for pipeline_name "%s".' % pipeline_name)
return Pipeline(pipeline)
| #
# Copyright (c) 2013-2017 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from ceilometerclient import client as ceilometer_client
from django.conf import settings
from openstack_dashboard.api import base
from horizon.utils.memoized import memoized # noqa
class Pipeline(base.APIResourceWrapper):
"""Represents one Ceilometer pipeline entry."""
_attrs = ['name', 'enabled', 'meters', 'location', 'max_bytes',
'backup_count', 'compress']
def __init__(self, apipipeline):
super(Pipeline, self).__init__(apipipeline)
@memoized
def ceilometerclient(request):
"""Initialization of Ceilometer client."""
endpoint = base.url_for(request, 'metering')
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
return ceilometer_client.Client('2', endpoint,
token=(lambda: request.user.token.id),
insecure=insecure,
cacert=cacert)
def pipeline_list(request):
"""List the configured pipeline."""
pipeline_entries = ceilometerclient(request).pipelines.list()
pipelines = [Pipeline(p) for p in pipeline_entries]
return pipelines
def pipeline_update(request, pipeline_name, some_dict):
pipeline = ceilometerclient(request).pipelines.update(pipeline_name,
**some_dict)
if not pipeline:
raise ValueError(
'No match found for pipeline_name "%s".' % pipeline_name)
return Pipeline(pipeline) | en | 0.633832 | # # Copyright (c) 2013-2017 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # # noqa Represents one Ceilometer pipeline entry. Initialization of Ceilometer client. List the configured pipeline. | 2.210848 | 2 |
Code/classification_system/feature_extraction/doc2vec_features.py | sxd942/fascist_text_classification | 0 | 6615679 | <gh_stars>0
import numpy as np
from gensim.models import Doc2Vec
from gensim.models.doc2vec import TaggedDocument
from gensim.parsing.preprocessing import preprocess_string
from tqdm import tqdm
from sklearn import utils
"""
doc2vec_features.py contains the class Doc2vec_features.
Doc2vec_features is a Doc2Vec vectorizer class. It can be used as a feature extractor transformer in
scikit-learn's Pipeline as it implements both fit() and transform() methods.
Unlike the word vectors generated by Word2vec that generate a feature vector for every word in a corpus,
Doc2vec extracts vector representations of variable length. Therefore, feature vectors are
computed for each document in the corpus.
Doc2Vec trains a neural network to derive paragraph vectors, it does this by training the network to predict
the probability distribution of words within a paragraph.
-
Code references:
(C) was adapted to create the Paragraph vector Doc2vec class.
(C) <NAME>., (2019). A Text Classification Approach Using Vector Space Modelling(Doc2vec) & PCA. [online] Medium.
Available at:
<https://medium.com/swlh/a-text-classification-approach-using-vector-space-modelling-doc2vec-pca-74fb6fd73760>
[Accessed 3 July 2020].
-
@Author: <NAME>
Date: July 2020
"""
class Doc2vec_features:
"""
Constructor: Loads in document vectors.
vector_size -> Dimensionality of feature vectors.
window -> Max distance between the current and predicted word in a sentence.
min_count -> Ignores all words with total frequency less than this number.
epochs -> Number of times to iterate over corpus.
dm -> if dm = 0: d2v model is PV-DBOW (distributed bag of words), if dm = 1: d2v model is PV-DM (distributed memory)
workers -> How many worker threads to use to train model.
"""
def __init__(self):
# Doc2Vec constructor.
print("Loading vectors (...)")
self.model = None
self.vector_size = 200
self.window = 3
self.min_count = 1
self.epochs = 20
self.dm = 0
self.workers = 4
print("Loading vectors completed.")
def fit(self, X_data, y=None):
# For each document in X_data, create a list of tokens using gensim's preprocess_string function/
# Next, assign it a unique tag (i) to use as an input to train model using gensim's TaggedDocument().
X_tagged_docs = [TaggedDocument(preprocess_string(document), [i]) for i, document in enumerate(X_data)]
# Initialize model with constructor parameters.
d2v_model = Doc2Vec(
vector_size=self.vector_size,
window=self.window,
min_count=self.min_count,
epochs=self.epochs,
dm=self.dm,
workers=self.workers
)
# Build a vocabulary for the model using the tagged documents in X_data
# Use tqdm to output a progress bar.
d2v_model.build_vocab([x for x in tqdm(X_tagged_docs)])
print('Doc2vec training commencing...')
for epoch in range(self.epochs):
print('\n')
print('epoch: ' + str(epoch))
# Train D2V model using the shuffled vocab in tagged X_data documents.
# Repeat for given number of epochs.
d2v_model.train(utils.shuffle([x for x in tqdm(X_tagged_docs)]), total_examples=len(X_tagged_docs),
epochs=1)
print('\n' + 'Training finished.' + '\n')
# d2v_model.save('/Users/siondavies/Desktop/NLP/Feature_Extraction/DOC2VEC/d2vmodel')
self.model = d2v_model
return self
def transform(self, X_data, y=None):
# infer_vector -> infer a vector for given post-training document in X_data, return in vector matrix.
return np.asmatrix(np.array([self.model.infer_vector(preprocess_string(document))
for i, document in enumerate(X_data)]))
def fit_transform(self, X_data, y=None):
self.fit(X_data)
return self.transform(X_data)
| import numpy as np
from gensim.models import Doc2Vec
from gensim.models.doc2vec import TaggedDocument
from gensim.parsing.preprocessing import preprocess_string
from tqdm import tqdm
from sklearn import utils
"""
doc2vec_features.py contains the class Doc2vec_features.
Doc2vec_features is a Doc2Vec vectorizer class. It can be used as a feature extractor transformer in
scikit-learn's Pipeline as it implements both fit() and transform() methods.
Unlike the word vectors generated by Word2vec that generate a feature vector for every word in a corpus,
Doc2vec extracts vector representations of variable length. Therefore, feature vectors are
computed for each document in the corpus.
Doc2Vec trains a neural network to derive paragraph vectors, it does this by training the network to predict
the probability distribution of words within a paragraph.
-
Code references:
(C) was adapted to create the Paragraph vector Doc2vec class.
(C) <NAME>., (2019). A Text Classification Approach Using Vector Space Modelling(Doc2vec) & PCA. [online] Medium.
Available at:
<https://medium.com/swlh/a-text-classification-approach-using-vector-space-modelling-doc2vec-pca-74fb6fd73760>
[Accessed 3 July 2020].
-
@Author: <NAME>
Date: July 2020
"""
class Doc2vec_features:
"""
Constructor: Loads in document vectors.
vector_size -> Dimensionality of feature vectors.
window -> Max distance between the current and predicted word in a sentence.
min_count -> Ignores all words with total frequency less than this number.
epochs -> Number of times to iterate over corpus.
dm -> if dm = 0: d2v model is PV-DBOW (distributed bag of words), if dm = 1: d2v model is PV-DM (distributed memory)
workers -> How many worker threads to use to train model.
"""
def __init__(self):
# Doc2Vec constructor.
print("Loading vectors (...)")
self.model = None
self.vector_size = 200
self.window = 3
self.min_count = 1
self.epochs = 20
self.dm = 0
self.workers = 4
print("Loading vectors completed.")
def fit(self, X_data, y=None):
# For each document in X_data, create a list of tokens using gensim's preprocess_string function/
# Next, assign it a unique tag (i) to use as an input to train model using gensim's TaggedDocument().
X_tagged_docs = [TaggedDocument(preprocess_string(document), [i]) for i, document in enumerate(X_data)]
# Initialize model with constructor parameters.
d2v_model = Doc2Vec(
vector_size=self.vector_size,
window=self.window,
min_count=self.min_count,
epochs=self.epochs,
dm=self.dm,
workers=self.workers
)
# Build a vocabulary for the model using the tagged documents in X_data
# Use tqdm to output a progress bar.
d2v_model.build_vocab([x for x in tqdm(X_tagged_docs)])
print('Doc2vec training commencing...')
for epoch in range(self.epochs):
print('\n')
print('epoch: ' + str(epoch))
# Train D2V model using the shuffled vocab in tagged X_data documents.
# Repeat for given number of epochs.
d2v_model.train(utils.shuffle([x for x in tqdm(X_tagged_docs)]), total_examples=len(X_tagged_docs),
epochs=1)
print('\n' + 'Training finished.' + '\n')
# d2v_model.save('/Users/siondavies/Desktop/NLP/Feature_Extraction/DOC2VEC/d2vmodel')
self.model = d2v_model
return self
def transform(self, X_data, y=None):
# infer_vector -> infer a vector for given post-training document in X_data, return in vector matrix.
return np.asmatrix(np.array([self.model.infer_vector(preprocess_string(document))
for i, document in enumerate(X_data)]))
def fit_transform(self, X_data, y=None):
self.fit(X_data)
return self.transform(X_data) | en | 0.80017 | doc2vec_features.py contains the class Doc2vec_features. Doc2vec_features is a Doc2Vec vectorizer class. It can be used as a feature extractor transformer in scikit-learn's Pipeline as it implements both fit() and transform() methods. Unlike the word vectors generated by Word2vec that generate a feature vector for every word in a corpus, Doc2vec extracts vector representations of variable length. Therefore, feature vectors are computed for each document in the corpus. Doc2Vec trains a neural network to derive paragraph vectors, it does this by training the network to predict the probability distribution of words within a paragraph. - Code references: (C) was adapted to create the Paragraph vector Doc2vec class. (C) <NAME>., (2019). A Text Classification Approach Using Vector Space Modelling(Doc2vec) & PCA. [online] Medium. Available at: <https://medium.com/swlh/a-text-classification-approach-using-vector-space-modelling-doc2vec-pca-74fb6fd73760> [Accessed 3 July 2020]. - @Author: <NAME> Date: July 2020 Constructor: Loads in document vectors. vector_size -> Dimensionality of feature vectors. window -> Max distance between the current and predicted word in a sentence. min_count -> Ignores all words with total frequency less than this number. epochs -> Number of times to iterate over corpus. dm -> if dm = 0: d2v model is PV-DBOW (distributed bag of words), if dm = 1: d2v model is PV-DM (distributed memory) workers -> How many worker threads to use to train model. # Doc2Vec constructor. # For each document in X_data, create a list of tokens using gensim's preprocess_string function/ # Next, assign it a unique tag (i) to use as an input to train model using gensim's TaggedDocument(). # Initialize model with constructor parameters. # Build a vocabulary for the model using the tagged documents in X_data # Use tqdm to output a progress bar. # Train D2V model using the shuffled vocab in tagged X_data documents. # Repeat for given number of epochs. # d2v_model.save('/Users/siondavies/Desktop/NLP/Feature_Extraction/DOC2VEC/d2vmodel') # infer_vector -> infer a vector for given post-training document in X_data, return in vector matrix. | 3.117333 | 3 |
2020/D25/D25.py | buchasia/advent-of-code | 0 | 6615680 | import timeit
def solveParts(doorsSubject, cardsSubject):
nextNumber = 7
encryptionKey = doorsSubject
while nextNumber != cardsSubject:
nextNumber = (nextNumber * 7) % 20201227
encryptionKey = (encryptionKey * doorsSubject) % 20201227
return encryptionKey
def solve(doorsSubject, cardsSubject):
print([solveParts(doorsSubject, cardsSubject)])
#Timer Start
start = timeit.default_timer()
solve(5099500, 7648211)
# Timer ends
stop = timeit.default_timer()
print('Time: ', stop - start)
| import timeit
def solveParts(doorsSubject, cardsSubject):
nextNumber = 7
encryptionKey = doorsSubject
while nextNumber != cardsSubject:
nextNumber = (nextNumber * 7) % 20201227
encryptionKey = (encryptionKey * doorsSubject) % 20201227
return encryptionKey
def solve(doorsSubject, cardsSubject):
print([solveParts(doorsSubject, cardsSubject)])
#Timer Start
start = timeit.default_timer()
solve(5099500, 7648211)
# Timer ends
stop = timeit.default_timer()
print('Time: ', stop - start)
| en | 0.457879 | #Timer Start # Timer ends | 3.029544 | 3 |
gym_acnportal/gym_acnsim/envs/observation.py | caltech-netlab/gym-acnportal | 0 | 6615681 | # coding=utf-8
"""
Module containing definition of a gym_acnsim observation and factory
functions for different builtin observations.
See the SimObservation docstring for more information on the
SimObservation class.
Each factory function takes no arguments and returns an instance of type
SimObservation. Each factory function defines a space_function and and
an obs_function with the following signatures:
space_function: Callable[[GymInterface], spaces.Space]
obs_function: Callable[[GymInterface], np.ndarray]
The space_function gives a gym space for a given observation type.
The obs_function gives a gym observation for a given observation type.
The observation returned by obs_function is a point in the space
returned by space_function.
"""
from typing import Callable
import numpy as np
from gym import spaces
from acnportal.acnsim import EV
from ..interfaces import GymTrainedInterface
class SimObservation:
"""
Class representing an OpenAI Gym observation of an ACN-Sim
simulation.
An instance of SimObservation contains a space_function, which
generates a gym space from an input Interface using attributes and
functions of the input Interface, and an obs_function, which
generates a gym observation from an input Interface using attributes
and functions of the input Interface. Each instance also requires a
name (given as a string).
This class enables Simulation environments with customizable
observations, as a SimObservation object with user-defined or built
in space and obs functions can be input to a BaseSimEnv-like object
to enable a new observation without creating a new environment.
Each type of observation is the same type of object, but the details
of the space and obs functions are different. This was done because
space and obs functions are static, as observations of a specific
type do not have any attributes. However, each observation type
requires both a space and observation generating function, so a
wrapping data structure is required.
Attributes:
_space_function (Callable[[GymInterface], spaces.Space]):
Function that accepts a GymInterface and generates a gym
space in which all observations for this instance exist.
_obs_function (Callable[[GymInterface], np.ndarray]): Function
that accepts a GymInterface and generates a gym observation
based on the input interface.
name (str): Name of this observation. This attribute allows an
environment to distinguish between different types of
observation.
"""
_space_function: Callable[[GymTrainedInterface], spaces.Space]
_obs_function: Callable[[GymTrainedInterface], np.ndarray]
name: str
def __init__(
self,
space_function: Callable[[GymTrainedInterface], spaces.Space],
obs_function: Callable[[GymTrainedInterface], np.ndarray],
name: str,
) -> None:
"""
Args:
space_function (Callable[[GymInterface], spaces.Space]):
Function that accepts a GymInterface and generates a
gym space in which all observations for this instance
exist.
obs_function (Callable[[GymInterface], np.ndarray]):
Function that accepts a GymInterface and generates a
gym observation based on the input interface.
name (str): Name of this observation. This attribute allows
an environment to distinguish between different types of
observation.
Returns:
None.
"""
self._space_function = space_function
self._obs_function = obs_function
self.name = name
def get_space(self, interface: GymTrainedInterface) -> spaces.Space:
"""
Returns the gym space in which all observations for this
observation type exist. The characteristics of the interface
(for example, number of EVSEs if station demands are observed)
may change the dimensions of the returned space, so this method
requires a GymInterface as input.
Args:
interface (GymTrainedInterface): Interface to an ACN-Sim Simulation
that contains details of and functions to generate
details about the current Simulation.
Returns:
spaces.Space: A gym space in which all observations for this
observation type exist.
"""
return self._space_function(interface)
def get_obs(self, interface: GymTrainedInterface) -> np.ndarray:
"""
Returns a gym observation for the state of the simulation given
by interface. The exact observation depends on both the input
interface and the observation generating function obs_func with
which this object was initialized.
Args:
interface (GymTrainedInterface): Interface to an ACN-Sim Simulation
that contains details of and functions to generate
details about the current Simulation.
Returns:
np.ndarray: A gym observation generated by _obs_function
with this interface.
"""
return self._obs_function(interface)
# Per active EV observation factory functions. Note that all EV data
# is shifted up by 1, as 0's indicate no EV is plugged in.
def _ev_observation(
attribute_function: Callable[[GymTrainedInterface, EV], float], name: str
) -> SimObservation:
# noinspection PyMissingOrEmptyDocstring
def space_function(interface: GymTrainedInterface) -> spaces.Space:
return spaces.Box(
low=0, high=np.inf, shape=(len(interface.station_ids),), dtype="float"
)
# noinspection PyMissingOrEmptyDocstring
def obs_function(interface: GymTrainedInterface) -> np.ndarray:
attribute_values: dict = {station_id: 0 for station_id in interface.station_ids}
for ev in interface.active_evs:
attribute_values[ev.station_id] = attribute_function(interface, ev) + 1
return np.array(list(attribute_values.values()))
return SimObservation(space_function, obs_function, name=name)
def arrival_observation() -> SimObservation:
""" Generates a SimObservation instance that wraps functions to
observe active EV arrivals.
Zeros in the output observation array indicate no EV is plugged in;
as such, all observations are shifted up by 1.
"""
return _ev_observation(lambda _, ev: ev.arrival, "arrivals")
def departure_observation() -> SimObservation:
""" Generates a SimObservation instance that wraps functions to
observe active EV departures.
Zeros in the output observation array indicate no EV is plugged in;
as such, all observations are shifted up by 1.
"""
return _ev_observation(lambda _, ev: ev.departure, "departures")
def remaining_demand_observation() -> SimObservation:
""" Generates a SimObservation instance that wraps functions to
observe active EV remaining energy demands in amp periods.
Zeros in the output observation array indicate no EV is plugged in;
as such, all observations are shifted up by 1.
"""
return _ev_observation(
lambda interface, ev: interface.remaining_amp_periods(ev), "demands"
)
# Network-wide observation factory functions.
def _constraints_observation(attribute: str, name: str) -> SimObservation:
# noinspection PyMissingOrEmptyDocstring
def space_function(interface: GymTrainedInterface) -> spaces.Space:
return spaces.Box(
low=-np.inf,
high=np.inf,
shape=getattr(interface.get_constraints(), attribute).shape,
dtype="float",
)
# noinspection PyMissingOrEmptyDocstring
def obs_function(interface: GymTrainedInterface) -> np.ndarray:
return getattr(interface.get_constraints(), attribute)
return SimObservation(space_function, obs_function, name=name)
def constraint_matrix_observation() -> SimObservation:
""" Generates a SimObservation instance that wraps functions to
observe the network constraint matrix.
"""
return _constraints_observation("constraint_matrix", "constraint matrix")
def magnitudes_observation() -> SimObservation:
""" Generates a SimObservation instance that wraps functions to
observe the network limiting current magnitudes in amps.
"""
return _constraints_observation("magnitudes", "magnitudes")
def phases_observation() -> SimObservation:
""" Generates a SimObservation instance that wraps functions to
observe the network phases.
"""
# noinspection PyMissingOrEmptyDocstring
def space_function(interface: GymTrainedInterface) -> spaces.Space:
return spaces.Box(
low=-np.inf,
high=np.inf,
shape=interface.infrastructure_info().phases.shape,
dtype="float",
)
# noinspection PyMissingOrEmptyDocstring
def obs_function(interface: GymTrainedInterface) -> np.ndarray:
return interface.infrastructure_info().phases
return SimObservation(space_function, obs_function, name="phases")
def timestep_observation() -> SimObservation:
""" Generates a SimObservation instance that wraps functions to
observe the current timestep of the simulation, in periods.
To comply with the timesteps returned by arrival and departure
observations, the observed timestep is one greater than than that
returned by the simulation. Simulations thus start at timestep 1
from an RL agent's perspective.
"""
# noinspection PyUnusedLocal
# noinspection PyMissingOrEmptyDocstring
def space_function(interface: GymTrainedInterface) -> spaces.Space:
return spaces.Box(low=0, high=np.inf, shape=(1,), dtype="float")
# noinspection PyMissingOrEmptyDocstring
def obs_function(interface: GymTrainedInterface) -> np.ndarray:
return np.array(interface.current_time + 1)
return SimObservation(space_function, obs_function, name="timestep")
| # coding=utf-8
"""
Module containing definition of a gym_acnsim observation and factory
functions for different builtin observations.
See the SimObservation docstring for more information on the
SimObservation class.
Each factory function takes no arguments and returns an instance of type
SimObservation. Each factory function defines a space_function and and
an obs_function with the following signatures:
space_function: Callable[[GymInterface], spaces.Space]
obs_function: Callable[[GymInterface], np.ndarray]
The space_function gives a gym space for a given observation type.
The obs_function gives a gym observation for a given observation type.
The observation returned by obs_function is a point in the space
returned by space_function.
"""
from typing import Callable
import numpy as np
from gym import spaces
from acnportal.acnsim import EV
from ..interfaces import GymTrainedInterface
class SimObservation:
"""
Class representing an OpenAI Gym observation of an ACN-Sim
simulation.
An instance of SimObservation contains a space_function, which
generates a gym space from an input Interface using attributes and
functions of the input Interface, and an obs_function, which
generates a gym observation from an input Interface using attributes
and functions of the input Interface. Each instance also requires a
name (given as a string).
This class enables Simulation environments with customizable
observations, as a SimObservation object with user-defined or built
in space and obs functions can be input to a BaseSimEnv-like object
to enable a new observation without creating a new environment.
Each type of observation is the same type of object, but the details
of the space and obs functions are different. This was done because
space and obs functions are static, as observations of a specific
type do not have any attributes. However, each observation type
requires both a space and observation generating function, so a
wrapping data structure is required.
Attributes:
_space_function (Callable[[GymInterface], spaces.Space]):
Function that accepts a GymInterface and generates a gym
space in which all observations for this instance exist.
_obs_function (Callable[[GymInterface], np.ndarray]): Function
that accepts a GymInterface and generates a gym observation
based on the input interface.
name (str): Name of this observation. This attribute allows an
environment to distinguish between different types of
observation.
"""
_space_function: Callable[[GymTrainedInterface], spaces.Space]
_obs_function: Callable[[GymTrainedInterface], np.ndarray]
name: str
def __init__(
self,
space_function: Callable[[GymTrainedInterface], spaces.Space],
obs_function: Callable[[GymTrainedInterface], np.ndarray],
name: str,
) -> None:
"""
Args:
space_function (Callable[[GymInterface], spaces.Space]):
Function that accepts a GymInterface and generates a
gym space in which all observations for this instance
exist.
obs_function (Callable[[GymInterface], np.ndarray]):
Function that accepts a GymInterface and generates a
gym observation based on the input interface.
name (str): Name of this observation. This attribute allows
an environment to distinguish between different types of
observation.
Returns:
None.
"""
self._space_function = space_function
self._obs_function = obs_function
self.name = name
def get_space(self, interface: GymTrainedInterface) -> spaces.Space:
"""
Returns the gym space in which all observations for this
observation type exist. The characteristics of the interface
(for example, number of EVSEs if station demands are observed)
may change the dimensions of the returned space, so this method
requires a GymInterface as input.
Args:
interface (GymTrainedInterface): Interface to an ACN-Sim Simulation
that contains details of and functions to generate
details about the current Simulation.
Returns:
spaces.Space: A gym space in which all observations for this
observation type exist.
"""
return self._space_function(interface)
def get_obs(self, interface: GymTrainedInterface) -> np.ndarray:
"""
Returns a gym observation for the state of the simulation given
by interface. The exact observation depends on both the input
interface and the observation generating function obs_func with
which this object was initialized.
Args:
interface (GymTrainedInterface): Interface to an ACN-Sim Simulation
that contains details of and functions to generate
details about the current Simulation.
Returns:
np.ndarray: A gym observation generated by _obs_function
with this interface.
"""
return self._obs_function(interface)
# Per active EV observation factory functions. Note that all EV data
# is shifted up by 1, as 0's indicate no EV is plugged in.
def _ev_observation(
attribute_function: Callable[[GymTrainedInterface, EV], float], name: str
) -> SimObservation:
# noinspection PyMissingOrEmptyDocstring
def space_function(interface: GymTrainedInterface) -> spaces.Space:
return spaces.Box(
low=0, high=np.inf, shape=(len(interface.station_ids),), dtype="float"
)
# noinspection PyMissingOrEmptyDocstring
def obs_function(interface: GymTrainedInterface) -> np.ndarray:
attribute_values: dict = {station_id: 0 for station_id in interface.station_ids}
for ev in interface.active_evs:
attribute_values[ev.station_id] = attribute_function(interface, ev) + 1
return np.array(list(attribute_values.values()))
return SimObservation(space_function, obs_function, name=name)
def arrival_observation() -> SimObservation:
""" Generates a SimObservation instance that wraps functions to
observe active EV arrivals.
Zeros in the output observation array indicate no EV is plugged in;
as such, all observations are shifted up by 1.
"""
return _ev_observation(lambda _, ev: ev.arrival, "arrivals")
def departure_observation() -> SimObservation:
""" Generates a SimObservation instance that wraps functions to
observe active EV departures.
Zeros in the output observation array indicate no EV is plugged in;
as such, all observations are shifted up by 1.
"""
return _ev_observation(lambda _, ev: ev.departure, "departures")
def remaining_demand_observation() -> SimObservation:
""" Generates a SimObservation instance that wraps functions to
observe active EV remaining energy demands in amp periods.
Zeros in the output observation array indicate no EV is plugged in;
as such, all observations are shifted up by 1.
"""
return _ev_observation(
lambda interface, ev: interface.remaining_amp_periods(ev), "demands"
)
# Network-wide observation factory functions.
def _constraints_observation(attribute: str, name: str) -> SimObservation:
# noinspection PyMissingOrEmptyDocstring
def space_function(interface: GymTrainedInterface) -> spaces.Space:
return spaces.Box(
low=-np.inf,
high=np.inf,
shape=getattr(interface.get_constraints(), attribute).shape,
dtype="float",
)
# noinspection PyMissingOrEmptyDocstring
def obs_function(interface: GymTrainedInterface) -> np.ndarray:
return getattr(interface.get_constraints(), attribute)
return SimObservation(space_function, obs_function, name=name)
def constraint_matrix_observation() -> SimObservation:
""" Generates a SimObservation instance that wraps functions to
observe the network constraint matrix.
"""
return _constraints_observation("constraint_matrix", "constraint matrix")
def magnitudes_observation() -> SimObservation:
""" Generates a SimObservation instance that wraps functions to
observe the network limiting current magnitudes in amps.
"""
return _constraints_observation("magnitudes", "magnitudes")
def phases_observation() -> SimObservation:
""" Generates a SimObservation instance that wraps functions to
observe the network phases.
"""
# noinspection PyMissingOrEmptyDocstring
def space_function(interface: GymTrainedInterface) -> spaces.Space:
return spaces.Box(
low=-np.inf,
high=np.inf,
shape=interface.infrastructure_info().phases.shape,
dtype="float",
)
# noinspection PyMissingOrEmptyDocstring
def obs_function(interface: GymTrainedInterface) -> np.ndarray:
return interface.infrastructure_info().phases
return SimObservation(space_function, obs_function, name="phases")
def timestep_observation() -> SimObservation:
""" Generates a SimObservation instance that wraps functions to
observe the current timestep of the simulation, in periods.
To comply with the timesteps returned by arrival and departure
observations, the observed timestep is one greater than than that
returned by the simulation. Simulations thus start at timestep 1
from an RL agent's perspective.
"""
# noinspection PyUnusedLocal
# noinspection PyMissingOrEmptyDocstring
def space_function(interface: GymTrainedInterface) -> spaces.Space:
return spaces.Box(low=0, high=np.inf, shape=(1,), dtype="float")
# noinspection PyMissingOrEmptyDocstring
def obs_function(interface: GymTrainedInterface) -> np.ndarray:
return np.array(interface.current_time + 1)
return SimObservation(space_function, obs_function, name="timestep")
| en | 0.807902 | # coding=utf-8 Module containing definition of a gym_acnsim observation and factory functions for different builtin observations. See the SimObservation docstring for more information on the SimObservation class. Each factory function takes no arguments and returns an instance of type SimObservation. Each factory function defines a space_function and and an obs_function with the following signatures: space_function: Callable[[GymInterface], spaces.Space] obs_function: Callable[[GymInterface], np.ndarray] The space_function gives a gym space for a given observation type. The obs_function gives a gym observation for a given observation type. The observation returned by obs_function is a point in the space returned by space_function. Class representing an OpenAI Gym observation of an ACN-Sim simulation. An instance of SimObservation contains a space_function, which generates a gym space from an input Interface using attributes and functions of the input Interface, and an obs_function, which generates a gym observation from an input Interface using attributes and functions of the input Interface. Each instance also requires a name (given as a string). This class enables Simulation environments with customizable observations, as a SimObservation object with user-defined or built in space and obs functions can be input to a BaseSimEnv-like object to enable a new observation without creating a new environment. Each type of observation is the same type of object, but the details of the space and obs functions are different. This was done because space and obs functions are static, as observations of a specific type do not have any attributes. However, each observation type requires both a space and observation generating function, so a wrapping data structure is required. Attributes: _space_function (Callable[[GymInterface], spaces.Space]): Function that accepts a GymInterface and generates a gym space in which all observations for this instance exist. _obs_function (Callable[[GymInterface], np.ndarray]): Function that accepts a GymInterface and generates a gym observation based on the input interface. name (str): Name of this observation. This attribute allows an environment to distinguish between different types of observation. Args: space_function (Callable[[GymInterface], spaces.Space]): Function that accepts a GymInterface and generates a gym space in which all observations for this instance exist. obs_function (Callable[[GymInterface], np.ndarray]): Function that accepts a GymInterface and generates a gym observation based on the input interface. name (str): Name of this observation. This attribute allows an environment to distinguish between different types of observation. Returns: None. Returns the gym space in which all observations for this observation type exist. The characteristics of the interface (for example, number of EVSEs if station demands are observed) may change the dimensions of the returned space, so this method requires a GymInterface as input. Args: interface (GymTrainedInterface): Interface to an ACN-Sim Simulation that contains details of and functions to generate details about the current Simulation. Returns: spaces.Space: A gym space in which all observations for this observation type exist. Returns a gym observation for the state of the simulation given by interface. The exact observation depends on both the input interface and the observation generating function obs_func with which this object was initialized. Args: interface (GymTrainedInterface): Interface to an ACN-Sim Simulation that contains details of and functions to generate details about the current Simulation. Returns: np.ndarray: A gym observation generated by _obs_function with this interface. # Per active EV observation factory functions. Note that all EV data # is shifted up by 1, as 0's indicate no EV is plugged in. # noinspection PyMissingOrEmptyDocstring # noinspection PyMissingOrEmptyDocstring Generates a SimObservation instance that wraps functions to observe active EV arrivals. Zeros in the output observation array indicate no EV is plugged in; as such, all observations are shifted up by 1. Generates a SimObservation instance that wraps functions to observe active EV departures. Zeros in the output observation array indicate no EV is plugged in; as such, all observations are shifted up by 1. Generates a SimObservation instance that wraps functions to observe active EV remaining energy demands in amp periods. Zeros in the output observation array indicate no EV is plugged in; as such, all observations are shifted up by 1. # Network-wide observation factory functions. # noinspection PyMissingOrEmptyDocstring # noinspection PyMissingOrEmptyDocstring Generates a SimObservation instance that wraps functions to observe the network constraint matrix. Generates a SimObservation instance that wraps functions to observe the network limiting current magnitudes in amps. Generates a SimObservation instance that wraps functions to observe the network phases. # noinspection PyMissingOrEmptyDocstring # noinspection PyMissingOrEmptyDocstring Generates a SimObservation instance that wraps functions to observe the current timestep of the simulation, in periods. To comply with the timesteps returned by arrival and departure observations, the observed timestep is one greater than than that returned by the simulation. Simulations thus start at timestep 1 from an RL agent's perspective. # noinspection PyUnusedLocal # noinspection PyMissingOrEmptyDocstring # noinspection PyMissingOrEmptyDocstring | 2.906483 | 3 |
ex2.py | isabellanunes/blueedtech-test-class | 0 | 6615682 | bill1 = float(input('Type the first bill price: '))
bill2 = float(input('Type the second bill price: '))
bill3 = float(input('Type the third bill price: '))
bill4 = float(input('Type the fourth bill price: '))
monthlyAverage = (bill1 + bill2 + bill3 + bill4) / 4
print('The monthly average is:', monthlyAverage) | bill1 = float(input('Type the first bill price: '))
bill2 = float(input('Type the second bill price: '))
bill3 = float(input('Type the third bill price: '))
bill4 = float(input('Type the fourth bill price: '))
monthlyAverage = (bill1 + bill2 + bill3 + bill4) / 4
print('The monthly average is:', monthlyAverage) | none | 1 | 3.915906 | 4 | |
setup.py | sheikheddy/grakn-python | 0 | 6615683 | <filename>setup.py<gh_stars>0
from distutils.core import setup
setup(
name='grakn',
packages=['grakn'],
version='0.9.0',
description='A Python client for Grakn',
long_description=open('README.rst').read(),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/graknlabs/grakn-python',
download_url='https://github.com/graknlabs/grakn-python/archive/v0.8.1.tar.gz',
keywords=['grakn', 'database', 'graph', 'hyper-relational'],
classifiers=[
'Development Status :: 5 - Production/Stable'
],
install_requires=['grpcio']
)
| <filename>setup.py<gh_stars>0
from distutils.core import setup
setup(
name='grakn',
packages=['grakn'],
version='0.9.0',
description='A Python client for Grakn',
long_description=open('README.rst').read(),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/graknlabs/grakn-python',
download_url='https://github.com/graknlabs/grakn-python/archive/v0.8.1.tar.gz',
keywords=['grakn', 'database', 'graph', 'hyper-relational'],
classifiers=[
'Development Status :: 5 - Production/Stable'
],
install_requires=['grpcio']
)
| none | 1 | 1.250126 | 1 | |
maxent.py | tpoll/salmon-of-knowledge | 0 | 6615684 | <gh_stars>0
import yelp_data
import operator
import codecs
import os
import operator
import nltk
from collections import defaultdict
from collections import Counter
from math import log
from sets import ImmutableSet
import json
import spacy.en
from sets import ImmutableSet
unknown_token = 'UNK'
positive_class = "positive"
negative_class = "negative"
STARS = 0
TEXT = 1
TAG = 2
CHUNK = 3
class Maxent(object):
def __init__(self, vocab, nlp):
self.vocab = vocab
self.features = {}
self.chunks = defaultdict(int)
self.AcceptedPOSTags = ImmutableSet([nlp.vocab.strings['JJ'], nlp.vocab.strings['VB'], nlp.vocab.strings['RB'],
nlp.vocab.strings['RBR'], nlp.vocab.strings['JJR'], nlp.vocab.strings['JJS'], nlp.vocab.strings['RBS'],
nlp.vocab.strings['VBN'], nlp.vocab.strings['VBD'], nlp.vocab.strings['VBP']])
def buildChunks(self, dataset):
for review in dataset:
for chunk in review[CHUNK]:
self.chunks[chunk] += 1
def buildFeatures(self, ngrams, N):
counter = 0
for i in range(1, N + 1):
for feature, count in ngrams.counts[i].iteritems():
if (i==2) or (i==3) or (i==1 and ngrams.tags[feature][0] in self.AcceptedPOSTags):
self.features[feature] = counter
counter += 1
for feature, count in self.chunks.iteritems():
if count > 5 and len(feature) > 1 and feature not in self.features:
self.features[feature] = counter
counter += 1
def buildData(self, dataset, nGram):
matrix = [defaultdict(int) for x in xrange(len(dataset))]
for i, sent in enumerate(dataset):
for N in range(1, nGram + 1):
for j, word in enumerate(sent[TEXT][nGram - N:]):
if word is not "</S>" and word is not "<S>":
gram = tuple(sent[TEXT][j - N:j])
if gram in self.features:
matrix[i][self.features[gram]] += 1
for chunk in sent[CHUNK]:
if chunk in self.features:
matrix[i][self.features[chunk]] += 1
return matrix
def getSentiment(self, sentence):
if sentence[STARS] >= 4:
return str(len(self.features)) + " positive"
else:
return str(len(self.features)) + " negative"
def buildARFFfile(self, dataset, filename, nGram):
num_features = len(self.features)
with codecs.open(filename, 'wb', encoding='utf-8') as f:
f.write("@relation maxent\n\n")
features = sorted(self.features.items(), key=operator.itemgetter(1))
for feature in features:
f.write("@attribute \"" + ' '.join(feature[0]) + "\" NUMERIC\n")
f.write("@attribute __sentiment__ {positive, negative}\n\n")
f.write("@data\n")
dataMatrix = self.buildData(dataset, nGram)
for i, sent in enumerate(dataMatrix):
f.write("{")
for feature in sorted(sent.iteritems()):
f.write(str(feature[0]) + " " + str(feature[1]) + ",")
f.write(self.getSentiment(dataset[i]) + "}\n")
class Ngrams(object):
"""NaiveBayes for sentiment analysis"""
def __init__(self, nlp):
self.counts = defaultdict(lambda: defaultdict(int))
self.tags = {}
self.Verbs = ImmutableSet([nlp.vocab.strings['VB'], nlp.vocab.strings['VBN'], nlp.vocab.strings['VBD'], nlp.vocab.strings['VBP']])
self.Adj = ImmutableSet([nlp.vocab.strings['JJ'], nlp.vocab.strings['JJR'], nlp.vocab.strings['JJS']])
self.Nouns = ImmutableSet([nlp.vocab.strings['NN']])
self.Adverbs = ImmutableSet([nlp.vocab.strings['RB'], nlp.vocab.strings['RBR'], nlp.vocab.strings['RBS']])
self.AcceptedPOSTags = ImmutableSet([nlp.vocab.strings['JJ'], nlp.vocab.strings['NN'], nlp.vocab.strings['VB'], nlp.vocab.strings['RB'],
nlp.vocab.strings['RBR'], nlp.vocab.strings['JJR'], nlp.vocab.strings['JJS'], nlp.vocab.strings['RBS'],
nlp.vocab.strings['VBN'], nlp.vocab.strings['VBD'], nlp.vocab.strings['VBP'] ])
def Train(self, training_set, nGram=1):
for N in range(1, nGram + 1):
for review in training_set:
for i, word in enumerate(review[TEXT][nGram - N:]):
if word is not "</S>" and word is not "<S>":
gram = tuple(review[TEXT][i - N:i])
if gram:
self.tags[gram] = review[TAG][i - N:i]
self.counts[N][gram] += 1
#Calculate Pointwise Mutual information of N-grams
def CalculateNgramPMI(self, k, N):
nSum = sum([self.counts[N][x] for x in self.counts[N]])
unSum = sum([self.counts[1][x] for x in self.counts[1]])
wordProbs = {x[0]: float(self.counts[1][x]) / unSum for x in self.counts[1]} # word probabilities
jointProbs = {x: float(self.counts[N][x]) / nSum for x in self.counts[N] if self.counts[N][x] > 15 } # joint probabilites
probs = {} # PMI of N-grams
for nGram, jProb in jointProbs.iteritems():
indvSum = 1.0
for i in range(0, N):
indvSum *= float(wordProbs[nGram[i]])
probs[nGram] = log((jProb / indvSum), 2)
topK = sorted(probs.iteritems(), key=operator.itemgetter(1), reverse=True)
newK = []
for gram in topK:
if all([self.tags[gram[0]][i] in self.AcceptedPOSTags for i in range(0,N)]):
if all([self.tags[gram[0]][i] not in self.Nouns for i in range(0,N)]):
newK.append(gram)
newK = newK[0:k]
self.counts[N] = {key[0]: self.counts[N][key[0]] for key in newK} # Replace nGrams with high information features
def main():
N = 3
(reviews, nlp) = yelp_data.getReviewsTokenizedandTagged(1000)
training_set = reviews[0:900]
test_set = reviews[900:1000]
vocab = yelp_data.buildVocab(training_set)
training_set_prep = yelp_data.preProcess(training_set, vocab)
test_set_prep = yelp_data.preProcess(test_set, vocab)
ngrams = Ngrams(nlp)
ngrams.Train(training_set_prep, N)
ngrams.CalculateNgramPMI(2800, 2) #Select the k POS bigrams with the highest PMI
ngrams.CalculateNgramPMI(2800, 3) #Select the k POS trigrams with the highest PMI
me = Maxent(vocab, nlp)
me.buildChunks(training_set_prep)
me.buildFeatures(ngrams, N)
me.buildARFFfile(training_set_prep, "yelp_maxent_training.arff", N)
me.buildARFFfile(test_set_prep, "yelp_maxent_test.arff", N)
if __name__ == '__main__':
main() | import yelp_data
import operator
import codecs
import os
import operator
import nltk
from collections import defaultdict
from collections import Counter
from math import log
from sets import ImmutableSet
import json
import spacy.en
from sets import ImmutableSet
unknown_token = 'UNK'
positive_class = "positive"
negative_class = "negative"
STARS = 0
TEXT = 1
TAG = 2
CHUNK = 3
class Maxent(object):
def __init__(self, vocab, nlp):
self.vocab = vocab
self.features = {}
self.chunks = defaultdict(int)
self.AcceptedPOSTags = ImmutableSet([nlp.vocab.strings['JJ'], nlp.vocab.strings['VB'], nlp.vocab.strings['RB'],
nlp.vocab.strings['RBR'], nlp.vocab.strings['JJR'], nlp.vocab.strings['JJS'], nlp.vocab.strings['RBS'],
nlp.vocab.strings['VBN'], nlp.vocab.strings['VBD'], nlp.vocab.strings['VBP']])
def buildChunks(self, dataset):
for review in dataset:
for chunk in review[CHUNK]:
self.chunks[chunk] += 1
def buildFeatures(self, ngrams, N):
counter = 0
for i in range(1, N + 1):
for feature, count in ngrams.counts[i].iteritems():
if (i==2) or (i==3) or (i==1 and ngrams.tags[feature][0] in self.AcceptedPOSTags):
self.features[feature] = counter
counter += 1
for feature, count in self.chunks.iteritems():
if count > 5 and len(feature) > 1 and feature not in self.features:
self.features[feature] = counter
counter += 1
def buildData(self, dataset, nGram):
matrix = [defaultdict(int) for x in xrange(len(dataset))]
for i, sent in enumerate(dataset):
for N in range(1, nGram + 1):
for j, word in enumerate(sent[TEXT][nGram - N:]):
if word is not "</S>" and word is not "<S>":
gram = tuple(sent[TEXT][j - N:j])
if gram in self.features:
matrix[i][self.features[gram]] += 1
for chunk in sent[CHUNK]:
if chunk in self.features:
matrix[i][self.features[chunk]] += 1
return matrix
def getSentiment(self, sentence):
if sentence[STARS] >= 4:
return str(len(self.features)) + " positive"
else:
return str(len(self.features)) + " negative"
def buildARFFfile(self, dataset, filename, nGram):
num_features = len(self.features)
with codecs.open(filename, 'wb', encoding='utf-8') as f:
f.write("@relation maxent\n\n")
features = sorted(self.features.items(), key=operator.itemgetter(1))
for feature in features:
f.write("@attribute \"" + ' '.join(feature[0]) + "\" NUMERIC\n")
f.write("@attribute __sentiment__ {positive, negative}\n\n")
f.write("@data\n")
dataMatrix = self.buildData(dataset, nGram)
for i, sent in enumerate(dataMatrix):
f.write("{")
for feature in sorted(sent.iteritems()):
f.write(str(feature[0]) + " " + str(feature[1]) + ",")
f.write(self.getSentiment(dataset[i]) + "}\n")
class Ngrams(object):
"""NaiveBayes for sentiment analysis"""
def __init__(self, nlp):
self.counts = defaultdict(lambda: defaultdict(int))
self.tags = {}
self.Verbs = ImmutableSet([nlp.vocab.strings['VB'], nlp.vocab.strings['VBN'], nlp.vocab.strings['VBD'], nlp.vocab.strings['VBP']])
self.Adj = ImmutableSet([nlp.vocab.strings['JJ'], nlp.vocab.strings['JJR'], nlp.vocab.strings['JJS']])
self.Nouns = ImmutableSet([nlp.vocab.strings['NN']])
self.Adverbs = ImmutableSet([nlp.vocab.strings['RB'], nlp.vocab.strings['RBR'], nlp.vocab.strings['RBS']])
self.AcceptedPOSTags = ImmutableSet([nlp.vocab.strings['JJ'], nlp.vocab.strings['NN'], nlp.vocab.strings['VB'], nlp.vocab.strings['RB'],
nlp.vocab.strings['RBR'], nlp.vocab.strings['JJR'], nlp.vocab.strings['JJS'], nlp.vocab.strings['RBS'],
nlp.vocab.strings['VBN'], nlp.vocab.strings['VBD'], nlp.vocab.strings['VBP'] ])
def Train(self, training_set, nGram=1):
for N in range(1, nGram + 1):
for review in training_set:
for i, word in enumerate(review[TEXT][nGram - N:]):
if word is not "</S>" and word is not "<S>":
gram = tuple(review[TEXT][i - N:i])
if gram:
self.tags[gram] = review[TAG][i - N:i]
self.counts[N][gram] += 1
#Calculate Pointwise Mutual information of N-grams
def CalculateNgramPMI(self, k, N):
nSum = sum([self.counts[N][x] for x in self.counts[N]])
unSum = sum([self.counts[1][x] for x in self.counts[1]])
wordProbs = {x[0]: float(self.counts[1][x]) / unSum for x in self.counts[1]} # word probabilities
jointProbs = {x: float(self.counts[N][x]) / nSum for x in self.counts[N] if self.counts[N][x] > 15 } # joint probabilites
probs = {} # PMI of N-grams
for nGram, jProb in jointProbs.iteritems():
indvSum = 1.0
for i in range(0, N):
indvSum *= float(wordProbs[nGram[i]])
probs[nGram] = log((jProb / indvSum), 2)
topK = sorted(probs.iteritems(), key=operator.itemgetter(1), reverse=True)
newK = []
for gram in topK:
if all([self.tags[gram[0]][i] in self.AcceptedPOSTags for i in range(0,N)]):
if all([self.tags[gram[0]][i] not in self.Nouns for i in range(0,N)]):
newK.append(gram)
newK = newK[0:k]
self.counts[N] = {key[0]: self.counts[N][key[0]] for key in newK} # Replace nGrams with high information features
def main():
N = 3
(reviews, nlp) = yelp_data.getReviewsTokenizedandTagged(1000)
training_set = reviews[0:900]
test_set = reviews[900:1000]
vocab = yelp_data.buildVocab(training_set)
training_set_prep = yelp_data.preProcess(training_set, vocab)
test_set_prep = yelp_data.preProcess(test_set, vocab)
ngrams = Ngrams(nlp)
ngrams.Train(training_set_prep, N)
ngrams.CalculateNgramPMI(2800, 2) #Select the k POS bigrams with the highest PMI
ngrams.CalculateNgramPMI(2800, 3) #Select the k POS trigrams with the highest PMI
me = Maxent(vocab, nlp)
me.buildChunks(training_set_prep)
me.buildFeatures(ngrams, N)
me.buildARFFfile(training_set_prep, "yelp_maxent_training.arff", N)
me.buildARFFfile(test_set_prep, "yelp_maxent_test.arff", N)
if __name__ == '__main__':
main() | en | 0.780561 | NaiveBayes for sentiment analysis #Calculate Pointwise Mutual information of N-grams # word probabilities # joint probabilites # PMI of N-grams # Replace nGrams with high information features #Select the k POS bigrams with the highest PMI #Select the k POS trigrams with the highest PMI | 2.465213 | 2 |
agenda_administrativa/apps/atividades/admin_forms.py | pmsserrana/agenda | 0 | 6615685 | from django import forms
from .models import AgendaAdministrativa
| from django import forms
from .models import AgendaAdministrativa
| none | 1 | 1.011894 | 1 | |
perfectextractor/extract.py | UUDigitalHumanitieslab/time-in-translation | 3 | 6615686 | import time
import click
from perfectextractor.corpora.bnc.extractor import BNCExtractor
from perfectextractor.corpora.bnc.perfect import BNCPerfectExtractor
from perfectextractor.corpora.bnc.pos import BNCPoSExtractor
from perfectextractor.corpora.dpc.extractor import DPCExtractor
from perfectextractor.corpora.dpc.perfect import DPCPerfectExtractor
from perfectextractor.corpora.dpc.pos import DPCPoSExtractor
from perfectextractor.corpora.opus.extractor import OPUSExtractor
from perfectextractor.corpora.opus.perfect import OPUSPerfectExtractor
from perfectextractor.corpora.opus.pos import OPUSPoSExtractor
from perfectextractor.corpora.opus.recentpast import OPUSRecentPastExtractor
from perfectextractor.corpora.opus.since import OPUSSinceDurationExtractor
from perfectextractor.apps.extractor.utils import TXT, XML, CSV, XLSX
from perfectextractor.apps.extractor.perfectextractor import PRESENT, PAST
# Corpora
BNC = 'bnc'
DPC = 'dpc'
OPUS = 'opus'
# Extractor types
BASE = 'base'
POS = 'pos'
PERFECT = 'perfect'
RECENT_PAST = 'recent_past'
SINCE_DURATION = 'since_duration'
def process_data_folders(extractor, path):
for directory in extractor.list_directories(path):
t0 = time.time()
click.echo('Now processing {} for {}'.format(directory, extractor.l_from))
extractor.process_folder(directory)
click.echo('Processing finished, took {:.3} seconds'.format(time.time() - t0))
@click.command()
@click.argument('folder')
@click.argument('language_from')
@click.argument('languages_to', nargs=-1) # nargs=-1 eats up all remaining arguments
@click.option('--corpus', default=OPUS, type=click.Choice([OPUS, DPC, BNC]),
help='Which type of corpus to use')
@click.option('--extractor', default=BASE, type=click.Choice([BASE, POS, PERFECT, RECENT_PAST, SINCE_DURATION]),
help='Which kind of extractor to use')
@click.option('--file_names', '-f', multiple=True,
help='Limits the file names searched into')
@click.option('--sentence_ids', '-s', multiple=True,
help='Limits the sentence IDs searched into')
@click.option('--lemmata', '-l', multiple=True,
help='Limits the lemmata searched for')
@click.option('--regex', '-r', multiple=True,
help='Use regular expression to match words')
@click.option('--pos', '-p', multiple=True,
help='Limits the POS-tags searched for')
@click.option('--tokens', '-t', multiple=True, type=click.Tuple([str, str]),
help='Limits the tokens searched for. Format: -t [start_token] [end_token]')
@click.option('--metadata', '-m', multiple=True, type=click.Tuple([str, str]),
help='Adds additional metadata. Format: -m [tag] [level]')
@click.option('--outfile', '-o',
help='Output file')
@click.option('--position', default=0,
help='The position of the searched item')
@click.option('--search_in_to', is_flag=True,
help='Also search for perfects in the to language(s)?')
@click.option('--tense', default=PRESENT, type=click.Choice([PRESENT, PAST]),
help='The tense of perfect (present, past, future)')
@click.option('--output', default=TXT, type=click.Choice([TXT, XML]),
help='Output results in text or XML format')
@click.option('--format', 'format_', default=CSV, type=click.Choice([CSV, XLSX]),
help='Output file in .csv or .xlsx format')
@click.option('--one_per_sentence', is_flag=True,
help='Output all sentences, and only one classification per sentence')
@click.option('--sort_by_certainty', is_flag=True,
help='Sort by certainty?')
@click.option('--no_order_languages', is_flag=True,
help='Do not order the languages alphabetically on alignment')
@click.option('--file_limit', default=0,
help='Limit number of files searched')
@click.option('--min_file_size', default=0,
help='Limits the minimal size of the files searched')
@click.option('--max_file_size', default=0,
help='Limits the maximal size of the files searched')
def extract(folder, language_from, languages_to, corpus='opus', extractor='base',
pos=None, search_in_to=False, tense=PRESENT,
output=TXT, format_=CSV, file_names=None, sentence_ids=None,
lemmata=None, regex=None, position=None, tokens=None, metadata=None,
outfile=None, one_per_sentence=False, sort_by_certainty=False,
no_order_languages=False,
file_limit=0, min_file_size=0, max_file_size=0):
# Set the default arguments
kwargs = dict(output=output, file_names=file_names, sentence_ids=sentence_ids,
lemmata=lemmata, regex=regex, position=position, tokens=tokens, metadata=metadata,
outfile=outfile, format_=format_, one_per_sentence=one_per_sentence,
sort_by_certainty=sort_by_certainty, no_order_languages=no_order_languages,
file_limit=file_limit, min_file_size=min_file_size, max_file_size=max_file_size)
# Determine the extractor to be used
# TODO: add more varieties
resulting_extractor = None
if corpus == OPUS:
if extractor == POS:
resulting_extractor = OPUSPoSExtractor
elif extractor == PERFECT:
resulting_extractor = OPUSPerfectExtractor
elif extractor == RECENT_PAST:
resulting_extractor = OPUSRecentPastExtractor
elif extractor == SINCE_DURATION:
resulting_extractor = OPUSSinceDurationExtractor
else:
resulting_extractor = OPUSExtractor
elif corpus == DPC:
if extractor == POS:
resulting_extractor = DPCPoSExtractor
elif extractor == PERFECT:
resulting_extractor = DPCPerfectExtractor
elif extractor == RECENT_PAST:
raise click.ClickException('Corpus or extractor type not implemented!')
elif extractor == SINCE_DURATION:
raise click.ClickException('Corpus or extractor type not implemented!')
else:
resulting_extractor = DPCExtractor
elif corpus == BNC:
if extractor == POS:
resulting_extractor = BNCPoSExtractor
elif extractor == PERFECT:
resulting_extractor = BNCPerfectExtractor
elif extractor == RECENT_PAST:
raise click.ClickException('Corpus or extractor type not implemented!')
elif extractor == SINCE_DURATION:
raise click.ClickException('Corpus or extractor type not implemented!')
else:
resulting_extractor = BNCExtractor
if extractor == PERFECT:
kwargs['search_in_to'] = search_in_to
kwargs['tense'] = tense
if extractor == POS:
kwargs['pos'] = pos
if not resulting_extractor:
raise click.ClickException('Unknown value for either corpus or extractor type')
# Start the extraction!
process_data_folders(resulting_extractor(language_from, languages_to, **kwargs), folder)
if __name__ == "__main__":
extract()
| import time
import click
from perfectextractor.corpora.bnc.extractor import BNCExtractor
from perfectextractor.corpora.bnc.perfect import BNCPerfectExtractor
from perfectextractor.corpora.bnc.pos import BNCPoSExtractor
from perfectextractor.corpora.dpc.extractor import DPCExtractor
from perfectextractor.corpora.dpc.perfect import DPCPerfectExtractor
from perfectextractor.corpora.dpc.pos import DPCPoSExtractor
from perfectextractor.corpora.opus.extractor import OPUSExtractor
from perfectextractor.corpora.opus.perfect import OPUSPerfectExtractor
from perfectextractor.corpora.opus.pos import OPUSPoSExtractor
from perfectextractor.corpora.opus.recentpast import OPUSRecentPastExtractor
from perfectextractor.corpora.opus.since import OPUSSinceDurationExtractor
from perfectextractor.apps.extractor.utils import TXT, XML, CSV, XLSX
from perfectextractor.apps.extractor.perfectextractor import PRESENT, PAST
# Corpora
BNC = 'bnc'
DPC = 'dpc'
OPUS = 'opus'
# Extractor types
BASE = 'base'
POS = 'pos'
PERFECT = 'perfect'
RECENT_PAST = 'recent_past'
SINCE_DURATION = 'since_duration'
def process_data_folders(extractor, path):
for directory in extractor.list_directories(path):
t0 = time.time()
click.echo('Now processing {} for {}'.format(directory, extractor.l_from))
extractor.process_folder(directory)
click.echo('Processing finished, took {:.3} seconds'.format(time.time() - t0))
@click.command()
@click.argument('folder')
@click.argument('language_from')
@click.argument('languages_to', nargs=-1) # nargs=-1 eats up all remaining arguments
@click.option('--corpus', default=OPUS, type=click.Choice([OPUS, DPC, BNC]),
help='Which type of corpus to use')
@click.option('--extractor', default=BASE, type=click.Choice([BASE, POS, PERFECT, RECENT_PAST, SINCE_DURATION]),
help='Which kind of extractor to use')
@click.option('--file_names', '-f', multiple=True,
help='Limits the file names searched into')
@click.option('--sentence_ids', '-s', multiple=True,
help='Limits the sentence IDs searched into')
@click.option('--lemmata', '-l', multiple=True,
help='Limits the lemmata searched for')
@click.option('--regex', '-r', multiple=True,
help='Use regular expression to match words')
@click.option('--pos', '-p', multiple=True,
help='Limits the POS-tags searched for')
@click.option('--tokens', '-t', multiple=True, type=click.Tuple([str, str]),
help='Limits the tokens searched for. Format: -t [start_token] [end_token]')
@click.option('--metadata', '-m', multiple=True, type=click.Tuple([str, str]),
help='Adds additional metadata. Format: -m [tag] [level]')
@click.option('--outfile', '-o',
help='Output file')
@click.option('--position', default=0,
help='The position of the searched item')
@click.option('--search_in_to', is_flag=True,
help='Also search for perfects in the to language(s)?')
@click.option('--tense', default=PRESENT, type=click.Choice([PRESENT, PAST]),
help='The tense of perfect (present, past, future)')
@click.option('--output', default=TXT, type=click.Choice([TXT, XML]),
help='Output results in text or XML format')
@click.option('--format', 'format_', default=CSV, type=click.Choice([CSV, XLSX]),
help='Output file in .csv or .xlsx format')
@click.option('--one_per_sentence', is_flag=True,
help='Output all sentences, and only one classification per sentence')
@click.option('--sort_by_certainty', is_flag=True,
help='Sort by certainty?')
@click.option('--no_order_languages', is_flag=True,
help='Do not order the languages alphabetically on alignment')
@click.option('--file_limit', default=0,
help='Limit number of files searched')
@click.option('--min_file_size', default=0,
help='Limits the minimal size of the files searched')
@click.option('--max_file_size', default=0,
help='Limits the maximal size of the files searched')
def extract(folder, language_from, languages_to, corpus='opus', extractor='base',
pos=None, search_in_to=False, tense=PRESENT,
output=TXT, format_=CSV, file_names=None, sentence_ids=None,
lemmata=None, regex=None, position=None, tokens=None, metadata=None,
outfile=None, one_per_sentence=False, sort_by_certainty=False,
no_order_languages=False,
file_limit=0, min_file_size=0, max_file_size=0):
# Set the default arguments
kwargs = dict(output=output, file_names=file_names, sentence_ids=sentence_ids,
lemmata=lemmata, regex=regex, position=position, tokens=tokens, metadata=metadata,
outfile=outfile, format_=format_, one_per_sentence=one_per_sentence,
sort_by_certainty=sort_by_certainty, no_order_languages=no_order_languages,
file_limit=file_limit, min_file_size=min_file_size, max_file_size=max_file_size)
# Determine the extractor to be used
# TODO: add more varieties
resulting_extractor = None
if corpus == OPUS:
if extractor == POS:
resulting_extractor = OPUSPoSExtractor
elif extractor == PERFECT:
resulting_extractor = OPUSPerfectExtractor
elif extractor == RECENT_PAST:
resulting_extractor = OPUSRecentPastExtractor
elif extractor == SINCE_DURATION:
resulting_extractor = OPUSSinceDurationExtractor
else:
resulting_extractor = OPUSExtractor
elif corpus == DPC:
if extractor == POS:
resulting_extractor = DPCPoSExtractor
elif extractor == PERFECT:
resulting_extractor = DPCPerfectExtractor
elif extractor == RECENT_PAST:
raise click.ClickException('Corpus or extractor type not implemented!')
elif extractor == SINCE_DURATION:
raise click.ClickException('Corpus or extractor type not implemented!')
else:
resulting_extractor = DPCExtractor
elif corpus == BNC:
if extractor == POS:
resulting_extractor = BNCPoSExtractor
elif extractor == PERFECT:
resulting_extractor = BNCPerfectExtractor
elif extractor == RECENT_PAST:
raise click.ClickException('Corpus or extractor type not implemented!')
elif extractor == SINCE_DURATION:
raise click.ClickException('Corpus or extractor type not implemented!')
else:
resulting_extractor = BNCExtractor
if extractor == PERFECT:
kwargs['search_in_to'] = search_in_to
kwargs['tense'] = tense
if extractor == POS:
kwargs['pos'] = pos
if not resulting_extractor:
raise click.ClickException('Unknown value for either corpus or extractor type')
# Start the extraction!
process_data_folders(resulting_extractor(language_from, languages_to, **kwargs), folder)
if __name__ == "__main__":
extract()
| en | 0.433716 | # Corpora # Extractor types # nargs=-1 eats up all remaining arguments # Set the default arguments # Determine the extractor to be used # TODO: add more varieties # Start the extraction! | 2.019877 | 2 |
DictionaryOfNewZealandEnglish/user/views.py | eResearchSandpit/DictionaryOfNewZealandEnglish | 0 | 6615687 | <reponame>eResearchSandpit/DictionaryOfNewZealandEnglish<gh_stars>0
# -*- coding: utf-8 -*-
# Users
from flask import (Blueprint, request, render_template, flash,
url_for, redirect, session)
from DictionaryOfNewZealandEnglish.extensions import bcrypt
from flask.ext.login import login_required, current_user, logout_user
from DictionaryOfNewZealandEnglish.user.forms import *
from DictionaryOfNewZealandEnglish.utils import flash_errors
from DictionaryOfNewZealandEnglish.user.models import User
from datetime import datetime as dt
from sqlalchemy.exc import IntegrityError, InvalidRequestError
from DictionaryOfNewZealandEnglish.database import db
blueprint = Blueprint("user", __name__, url_prefix='/users',
static_folder="../static")
@blueprint.route("/", methods=['GET', 'POST'])
@login_required
def members():
form = RegisterForm(request.form, obj=current_user, csrf_enabled=False)
return render_template("users/show.html", user=current_user,
form=form,
action='edit')
@blueprint.route('/logout/')
@login_required
def logout():
logout_user()
flash('You are logged out.', 'info')
return redirect(url_for('public.home'))
@blueprint.route("/register", methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form, csrf_enabled=False)
if form.validate_on_submit():
new_user = User.create(username=form.username.data,
email=form.email.data,
institution=form.institution.data,
country=form.country.data,
interest=form.interest.data,
updated_at=dt.utcnow(),
password=form.password.data,
active=True )
flash("Thank you for registering. You can now log in.", 'success')
return redirect(url_for('public.home'))
else:
flash_errors(form)
return render_template('users/new.html', form=form)
@blueprint.route("/edit", methods=["POST"])
@login_required
def edit():
user = User.query.filter_by(id=current_user.id).first()
form = UserForm(request.form, obj=user, csrf_enabled=False)
user_email = request.form['email']
if request.method == "POST" and form.validate_on_submit():
data = __set_data_for_user(user, form)
if data:
flash("Edit of %s is saved." % data.username, 'success')
return render_template("users/show.html", user=user,
form=form,
action='edit')
@blueprint.route("/admin", methods=["GET", "POST"])
@login_required
def admin():
if not current_user.is_admin:
return redirect(url_for('public.home'))
email = ""
user = None
searchForm = SearchForm(request.form)
adminForm = None
copy_request_form = request.form
all_users = User.query.all()
if request.method == "POST":
email = request.form['email']
user = User.query.filter_by(email=email).first()
if user:
# adjust admin status
if 'is_admin' in request.form:
is_admin = request.form['is_admin']
checked = False
if is_admin and is_admin=='y':
checked = True
User.update(user,
updated_at = dt.utcnow(),
is_admin = checked)
user = User.query.filter_by(email=email).first()
elif user.id != current_user.id:
# does not allow current admin user to "un-admin" themselves
User.update(user,
updated_at = dt.utcnow(),
is_admin = False)
else:
flash("An administrator cannot withdraw their own administrator " +
"privilages", 'warning')
# delete user
if 'delete_user' in request.form:
# does not allow current user to delete themselves
if user.id != current_user.id:
User.delete(user)
flash(user.username + " has been deleted", 'warning')
user = None
copy_request_form = request.form.copy()
copy_request_form['email'] = ""
else:
flash("An administrator cannot delete thier own account", 'warning')
searchForm = SearchForm(copy_request_form, obj=user)
adminForm = AdminForm(request.form)
return render_template("users/admin.html", user=user,
form=searchForm,
adminForm=adminForm,
all_users=all_users)
##########################################################################
## private methods
def __set_data_for_user(user, form):
try:
if form.username.data:
User.update(user,
username = form.username.data,
updated_at = dt.utcnow() )
if form.email.data:
User.update(user,
email = form.email.data,
updated_at = dt.utcnow() )
if form.institution.data:
User.update(user,
institution = form.institution.data,
updated_at = dt.utcnow() )
if form.country.data:
User.update(user,
country = form.country.data,
updated_at = dt.utcnow() )
if form.interest.data:
User.update(user,
interest = form.interest.data,
updated_at = dt.utcnow() )
if form.password.data:
User.update(user,
password = <PASSWORD>.generate_password_hash(form.password.data),
updated_at = dt.utcnow() )
except (IntegrityError, InvalidRequestError):
db.session.rollback()
flash("The email %s is already taken." % form.email.data, 'warning')
return None
return User.query.filter_by(email=form.email.data).first()
| # -*- coding: utf-8 -*-
# Users
from flask import (Blueprint, request, render_template, flash,
url_for, redirect, session)
from DictionaryOfNewZealandEnglish.extensions import bcrypt
from flask.ext.login import login_required, current_user, logout_user
from DictionaryOfNewZealandEnglish.user.forms import *
from DictionaryOfNewZealandEnglish.utils import flash_errors
from DictionaryOfNewZealandEnglish.user.models import User
from datetime import datetime as dt
from sqlalchemy.exc import IntegrityError, InvalidRequestError
from DictionaryOfNewZealandEnglish.database import db
blueprint = Blueprint("user", __name__, url_prefix='/users',
static_folder="../static")
@blueprint.route("/", methods=['GET', 'POST'])
@login_required
def members():
form = RegisterForm(request.form, obj=current_user, csrf_enabled=False)
return render_template("users/show.html", user=current_user,
form=form,
action='edit')
@blueprint.route('/logout/')
@login_required
def logout():
logout_user()
flash('You are logged out.', 'info')
return redirect(url_for('public.home'))
@blueprint.route("/register", methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form, csrf_enabled=False)
if form.validate_on_submit():
new_user = User.create(username=form.username.data,
email=form.email.data,
institution=form.institution.data,
country=form.country.data,
interest=form.interest.data,
updated_at=dt.utcnow(),
password=form.password.data,
active=True )
flash("Thank you for registering. You can now log in.", 'success')
return redirect(url_for('public.home'))
else:
flash_errors(form)
return render_template('users/new.html', form=form)
@blueprint.route("/edit", methods=["POST"])
@login_required
def edit():
user = User.query.filter_by(id=current_user.id).first()
form = UserForm(request.form, obj=user, csrf_enabled=False)
user_email = request.form['email']
if request.method == "POST" and form.validate_on_submit():
data = __set_data_for_user(user, form)
if data:
flash("Edit of %s is saved." % data.username, 'success')
return render_template("users/show.html", user=user,
form=form,
action='edit')
@blueprint.route("/admin", methods=["GET", "POST"])
@login_required
def admin():
if not current_user.is_admin:
return redirect(url_for('public.home'))
email = ""
user = None
searchForm = SearchForm(request.form)
adminForm = None
copy_request_form = request.form
all_users = User.query.all()
if request.method == "POST":
email = request.form['email']
user = User.query.filter_by(email=email).first()
if user:
# adjust admin status
if 'is_admin' in request.form:
is_admin = request.form['is_admin']
checked = False
if is_admin and is_admin=='y':
checked = True
User.update(user,
updated_at = dt.utcnow(),
is_admin = checked)
user = User.query.filter_by(email=email).first()
elif user.id != current_user.id:
# does not allow current admin user to "un-admin" themselves
User.update(user,
updated_at = dt.utcnow(),
is_admin = False)
else:
flash("An administrator cannot withdraw their own administrator " +
"privilages", 'warning')
# delete user
if 'delete_user' in request.form:
# does not allow current user to delete themselves
if user.id != current_user.id:
User.delete(user)
flash(user.username + " has been deleted", 'warning')
user = None
copy_request_form = request.form.copy()
copy_request_form['email'] = ""
else:
flash("An administrator cannot delete thier own account", 'warning')
searchForm = SearchForm(copy_request_form, obj=user)
adminForm = AdminForm(request.form)
return render_template("users/admin.html", user=user,
form=searchForm,
adminForm=adminForm,
all_users=all_users)
##########################################################################
## private methods
def __set_data_for_user(user, form):
try:
if form.username.data:
User.update(user,
username = form.username.data,
updated_at = dt.utcnow() )
if form.email.data:
User.update(user,
email = form.email.data,
updated_at = dt.utcnow() )
if form.institution.data:
User.update(user,
institution = form.institution.data,
updated_at = dt.utcnow() )
if form.country.data:
User.update(user,
country = form.country.data,
updated_at = dt.utcnow() )
if form.interest.data:
User.update(user,
interest = form.interest.data,
updated_at = dt.utcnow() )
if form.password.data:
User.update(user,
password = <PASSWORD>.generate_password_hash(form.password.data),
updated_at = dt.utcnow() )
except (IntegrityError, InvalidRequestError):
db.session.rollback()
flash("The email %s is already taken." % form.email.data, 'warning')
return None
return User.query.filter_by(email=form.email.data).first() | en | 0.702447 | # -*- coding: utf-8 -*- # Users # adjust admin status # does not allow current admin user to "un-admin" themselves # delete user # does not allow current user to delete themselves ########################################################################## ## private methods | 2.515347 | 3 |
broadinstitute_psp/plot/generate_qc_plots_for_metadata_field.py | cmap/psp | 8 | 6615688 | import sys
import pandas as pd
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import argparse
import logging
import cmapPy.pandasGEXpress.setup_GCToo_logger as setup_logger
import cmapPy.pandasGEXpress.parse as parse
logger = logging.getLogger(setup_logger.LOGGER_NAME)
def build_parser():
"""Build argument parser."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Required args
parser.add_argument("--list_of_gcts", "-l", nargs="+", required=True,
help="space separated filepaths to 1+ input GCTs")
parser.add_argument("--metadata_field", "-m", default="det_well_enrichment_score",
help="name of metadata field to plot on x axis")
parser.add_argument("--output_name", "-o", default="probe_scatter.pdf",
help="name of output pdf file generated")
parser.add_argument("-verbose", "-v", action="store_true", default=False,
help="increase the number of messages reported")
return parser
def main(args):
# Read GCTs into a list
gctoo_list = [parse.parse(gct) for gct in args.list_of_gcts]
# Create superset of all probes in GCTs
probe_superset = create_probe_superset(gctoo_list)
# Create pdf in which each page is a probe of the superset
create_output_pdf(probe_superset, gctoo_list, args.metadata_field, args.output_name)
def create_probe_superset(gctoo_list):
# Create list of sets of probes in each gct and return union of all sets
list_of_probe_sets = [set(gct.data_df.index) for gct in gctoo_list]
probe_superset = reduce(lambda a, b: a.union(b), list_of_probe_sets)
return probe_superset
def create_output_pdf(probe_superset, gctoo_list, metadata_field, output_name):
with PdfPages(output_name) as pdf:
for probe in probe_superset:
page_figure = plotify(probe, gctoo_list, metadata_field)
pdf.savefig(page_figure)
plt.close()
return
def plotify(probe, gctoo_list, metadata_field):
""" Iterates through provided GCTs to plot GCT values for given
metadata field against probe quant values
Args:
probe (string) name of probe row
gctoo_list (list of GCToo objects)
metadata_field (string) name of metadata column
Returns:
figure (plot)
"""
if len(gctoo_list) > 1:
plt.figure()
fig, axes = plt.subplots(1, len(gctoo_list), sharey=True, sharex=True)
plt.suptitle(probe, fontsize=16)
plt.xlabel(metadata_field)
for i in range(len(gctoo_list)):
gct = gctoo_list[i]
x_vals = gct.col_metadata_df.loc[:, metadata_field]
# Account for GCTs in which probe field may have been filtered
try:
y_vals = gct.data_df.loc[probe, :]
except KeyError as error:
# If probe does not exist in GCT y values are null
y_vals = pd.Series(index=gct.data_df.columns)
# Set up plot sizing
axes[i].tick_params(axis='both', which='major', labelsize=8)
axes[i].set_title(gct.src, fontsize=5)
axes[i].scatter(x_vals, y_vals)
# Set y axis label on first / left-most plot only
if i == 0:
axes[i].set_ylabel("Probe Quant Value")
else:
fig = plt.figure()
plt.title(probe)
plt.xlabel(metadata_field)
plt.ylabel("Probe Quant Value")
x_vals = gctoo_list[0].col_metadata_df.loc[:, metadata_field]
try:
y_vals = gctoo_list[0].data_df.loc[probe, :]
except KeyError as error:
# If probe does not exist in GCT y values are null
y_vals = pd.Series(index=gctoo_list[0].data_df.columns)
plt.scatter(x_vals, y_vals)
plt.close()
return fig
if __name__ == "__main__":
args = build_parser().parse_args(sys.argv[1:])
setup_logger.setup(verbose=args.verbose)
logger.debug("args: {}".format(args))
main(args) | import sys
import pandas as pd
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import argparse
import logging
import cmapPy.pandasGEXpress.setup_GCToo_logger as setup_logger
import cmapPy.pandasGEXpress.parse as parse
logger = logging.getLogger(setup_logger.LOGGER_NAME)
def build_parser():
"""Build argument parser."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Required args
parser.add_argument("--list_of_gcts", "-l", nargs="+", required=True,
help="space separated filepaths to 1+ input GCTs")
parser.add_argument("--metadata_field", "-m", default="det_well_enrichment_score",
help="name of metadata field to plot on x axis")
parser.add_argument("--output_name", "-o", default="probe_scatter.pdf",
help="name of output pdf file generated")
parser.add_argument("-verbose", "-v", action="store_true", default=False,
help="increase the number of messages reported")
return parser
def main(args):
# Read GCTs into a list
gctoo_list = [parse.parse(gct) for gct in args.list_of_gcts]
# Create superset of all probes in GCTs
probe_superset = create_probe_superset(gctoo_list)
# Create pdf in which each page is a probe of the superset
create_output_pdf(probe_superset, gctoo_list, args.metadata_field, args.output_name)
def create_probe_superset(gctoo_list):
# Create list of sets of probes in each gct and return union of all sets
list_of_probe_sets = [set(gct.data_df.index) for gct in gctoo_list]
probe_superset = reduce(lambda a, b: a.union(b), list_of_probe_sets)
return probe_superset
def create_output_pdf(probe_superset, gctoo_list, metadata_field, output_name):
with PdfPages(output_name) as pdf:
for probe in probe_superset:
page_figure = plotify(probe, gctoo_list, metadata_field)
pdf.savefig(page_figure)
plt.close()
return
def plotify(probe, gctoo_list, metadata_field):
""" Iterates through provided GCTs to plot GCT values for given
metadata field against probe quant values
Args:
probe (string) name of probe row
gctoo_list (list of GCToo objects)
metadata_field (string) name of metadata column
Returns:
figure (plot)
"""
if len(gctoo_list) > 1:
plt.figure()
fig, axes = plt.subplots(1, len(gctoo_list), sharey=True, sharex=True)
plt.suptitle(probe, fontsize=16)
plt.xlabel(metadata_field)
for i in range(len(gctoo_list)):
gct = gctoo_list[i]
x_vals = gct.col_metadata_df.loc[:, metadata_field]
# Account for GCTs in which probe field may have been filtered
try:
y_vals = gct.data_df.loc[probe, :]
except KeyError as error:
# If probe does not exist in GCT y values are null
y_vals = pd.Series(index=gct.data_df.columns)
# Set up plot sizing
axes[i].tick_params(axis='both', which='major', labelsize=8)
axes[i].set_title(gct.src, fontsize=5)
axes[i].scatter(x_vals, y_vals)
# Set y axis label on first / left-most plot only
if i == 0:
axes[i].set_ylabel("Probe Quant Value")
else:
fig = plt.figure()
plt.title(probe)
plt.xlabel(metadata_field)
plt.ylabel("Probe Quant Value")
x_vals = gctoo_list[0].col_metadata_df.loc[:, metadata_field]
try:
y_vals = gctoo_list[0].data_df.loc[probe, :]
except KeyError as error:
# If probe does not exist in GCT y values are null
y_vals = pd.Series(index=gctoo_list[0].data_df.columns)
plt.scatter(x_vals, y_vals)
plt.close()
return fig
if __name__ == "__main__":
args = build_parser().parse_args(sys.argv[1:])
setup_logger.setup(verbose=args.verbose)
logger.debug("args: {}".format(args))
main(args) | en | 0.739248 | Build argument parser. # Required args # Read GCTs into a list # Create superset of all probes in GCTs # Create pdf in which each page is a probe of the superset # Create list of sets of probes in each gct and return union of all sets Iterates through provided GCTs to plot GCT values for given metadata field against probe quant values Args: probe (string) name of probe row gctoo_list (list of GCToo objects) metadata_field (string) name of metadata column Returns: figure (plot) # Account for GCTs in which probe field may have been filtered # If probe does not exist in GCT y values are null # Set up plot sizing # Set y axis label on first / left-most plot only # If probe does not exist in GCT y values are null | 2.413866 | 2 |
imputation/cluster_save_imputation_params.py | ratschlab/circEWS | 34 | 6615689 | '''
Cluster dispatcher for the script <save_imputation_params.py>
'''
import subprocess
import os
import os.path
import sys
import argparse
import circews.functions.util.filesystem as mlhc_fs
def cluster_save_imputation_params(configs):
compute_script_path=configs["compute_script_path"]
job_index=0
mem_in_mbytes=configs["mem_in_mbytes"]
n_cpu_cores=configs["n_cpu_cores"]
n_compute_hours=configs["n_compute_hours"]
bad_hosts=["lm-a2-002","lm-a2-003","lm-a2-004"]
for reduce_config in configs["reduce_configs"]:
for split_key in configs["split_configs"]:
print("Generating imputation parameters for split {} with reduced data: {}".format(split_key, reduce_config))
job_name="imputationparams_{}_{}".format(split_key,reduce_config)
log_result_file=os.path.join(configs["log_dir"],"{}_RESULT.txt".format(job_name))
mlhc_fs.delete_if_exist(log_result_file)
cmd_line=" ".join(["bsub", "-R", "rusage[mem={}]".format(mem_in_mbytes), "-n", "{}".format(n_cpu_cores), "-r", "-W", "{}:00".format(n_compute_hours),
" ".join(['-R "select[hname!=\'{}\']"'.format(bad_host) for bad_host in bad_hosts]),
"-J","{}".format(job_name), "-o", log_result_file, "python3", compute_script_path, "--run_mode CLUSTER",
"--split_key {}".format(split_key), "--data_mode {}".format(reduce_config)])
assert(" rm " not in cmd_line)
job_index+=1
if configs["dry_run"]:
print(cmd_line)
else:
subprocess.call([cmd_line], shell=True)
def parse_cmd_args():
parser=argparse.ArgumentParser()
# Input paths
parser.add_argument("--compute_script_path", default="/cluster/home/mhueser/git/projects/2016/ICUscore/mhueser/scripts/imputation/save_imputation_params.py",
help="Script to dispatch")
# Output paths
parser.add_argument("--log_dir", default="/cluster/work/grlab/clinical/Inselspital/DataReleases/01-19-2017/InselSpital/misc_derived/mhueser/log", help="Logging directory")
# Arguments
parser.add_argument("--mem_in_mbytes", type=int, default=8000, help="Number of mbytes to request")
parser.add_argument("--n_cpu_cores", type=int, default=1, help="Number of CPU cores to use")
parser.add_argument("--n_compute_hours", type=int, default=24, help="Number of CPU hours to request")
parser.add_argument("--dry_run", action="store_true", default=False, help="Should script be run in dry-run mode")
args=parser.parse_args()
configs=vars(args)
configs["reduce_configs"] = ["reduced"]
configs["split_configs"] = ["temporal_2"]
return configs
if __name__=="__main__":
configs=parse_cmd_args()
cluster_save_imputation_params(configs)
| '''
Cluster dispatcher for the script <save_imputation_params.py>
'''
import subprocess
import os
import os.path
import sys
import argparse
import circews.functions.util.filesystem as mlhc_fs
def cluster_save_imputation_params(configs):
compute_script_path=configs["compute_script_path"]
job_index=0
mem_in_mbytes=configs["mem_in_mbytes"]
n_cpu_cores=configs["n_cpu_cores"]
n_compute_hours=configs["n_compute_hours"]
bad_hosts=["lm-a2-002","lm-a2-003","lm-a2-004"]
for reduce_config in configs["reduce_configs"]:
for split_key in configs["split_configs"]:
print("Generating imputation parameters for split {} with reduced data: {}".format(split_key, reduce_config))
job_name="imputationparams_{}_{}".format(split_key,reduce_config)
log_result_file=os.path.join(configs["log_dir"],"{}_RESULT.txt".format(job_name))
mlhc_fs.delete_if_exist(log_result_file)
cmd_line=" ".join(["bsub", "-R", "rusage[mem={}]".format(mem_in_mbytes), "-n", "{}".format(n_cpu_cores), "-r", "-W", "{}:00".format(n_compute_hours),
" ".join(['-R "select[hname!=\'{}\']"'.format(bad_host) for bad_host in bad_hosts]),
"-J","{}".format(job_name), "-o", log_result_file, "python3", compute_script_path, "--run_mode CLUSTER",
"--split_key {}".format(split_key), "--data_mode {}".format(reduce_config)])
assert(" rm " not in cmd_line)
job_index+=1
if configs["dry_run"]:
print(cmd_line)
else:
subprocess.call([cmd_line], shell=True)
def parse_cmd_args():
parser=argparse.ArgumentParser()
# Input paths
parser.add_argument("--compute_script_path", default="/cluster/home/mhueser/git/projects/2016/ICUscore/mhueser/scripts/imputation/save_imputation_params.py",
help="Script to dispatch")
# Output paths
parser.add_argument("--log_dir", default="/cluster/work/grlab/clinical/Inselspital/DataReleases/01-19-2017/InselSpital/misc_derived/mhueser/log", help="Logging directory")
# Arguments
parser.add_argument("--mem_in_mbytes", type=int, default=8000, help="Number of mbytes to request")
parser.add_argument("--n_cpu_cores", type=int, default=1, help="Number of CPU cores to use")
parser.add_argument("--n_compute_hours", type=int, default=24, help="Number of CPU hours to request")
parser.add_argument("--dry_run", action="store_true", default=False, help="Should script be run in dry-run mode")
args=parser.parse_args()
configs=vars(args)
configs["reduce_configs"] = ["reduced"]
configs["split_configs"] = ["temporal_2"]
return configs
if __name__=="__main__":
configs=parse_cmd_args()
cluster_save_imputation_params(configs)
| en | 0.424545 | Cluster dispatcher for the script <save_imputation_params.py> # Input paths # Output paths # Arguments | 2.469423 | 2 |
templates/django_app_name/exceptions.py | luiscberrocal/django_ansible_config | 0 | 6615690 | class {{django_app_name | to_camel_case}}Exception(Exception):
pass
| class {{django_app_name | to_camel_case}}Exception(Exception):
pass
| none | 1 | 1.19528 | 1 | |
src/hypergol/dataset_factory.py | hypergol/hypergol | 49 | 6615691 | from pathlib import Path
from hypergol.repr import Repr
from hypergol.dataset import Dataset
from hypergol.repo_data import RepoData
class DatasetFactory(Repr):
"""Convenience class to create lots of datasets at once. Used in pipelines where multiple datasets are created into the same location, project, branch
"""
def __init__(self, location, project, branch, chunkCount, repoData=None):
"""
Parameters
----------
location : str
path the project is in
project : str
project name
branch : str
branch name
repoData : RepoData
stores the commit information at the creation of the dataset
chunkCount : int = {16 , 256, 4096}
How many files the data will be stored in, sets the granularity of multithreaded processing
"""
self.location = location
self.project = project
self.branch = branch
self.chunkCount = chunkCount
self.repoData = repoData or RepoData.get_dummy()
@property
def projectDirectory(self):
return Path(self.location, self.project)
@property
def branchDirectory(self):
return Path(self.location, self.project, self.branch)
def get(self, dataType, name, branch=None, chunkCount=None):
"""Creates a dataset with the parameters given and the factory's own parameters
Parameters
----------
dataType : BaseData
Type of the dataset
branch : str=None
Name of the branch to load the dataset from (if None, defaults to current)
name : str
Name of the dataset (recommended to be in snakecase)
chunkCount : int=None
Number of chunks, if None, the factory's own value will be used
"""
if chunkCount is None:
chunkCount = self.chunkCount
if branch is None:
branch = self.branch
return Dataset(
dataType=dataType,
location=self.location,
project=self.project,
branch=branch,
name=name,
chunkCount=chunkCount,
repoData=self.repoData
)
| from pathlib import Path
from hypergol.repr import Repr
from hypergol.dataset import Dataset
from hypergol.repo_data import RepoData
class DatasetFactory(Repr):
"""Convenience class to create lots of datasets at once. Used in pipelines where multiple datasets are created into the same location, project, branch
"""
def __init__(self, location, project, branch, chunkCount, repoData=None):
"""
Parameters
----------
location : str
path the project is in
project : str
project name
branch : str
branch name
repoData : RepoData
stores the commit information at the creation of the dataset
chunkCount : int = {16 , 256, 4096}
How many files the data will be stored in, sets the granularity of multithreaded processing
"""
self.location = location
self.project = project
self.branch = branch
self.chunkCount = chunkCount
self.repoData = repoData or RepoData.get_dummy()
@property
def projectDirectory(self):
return Path(self.location, self.project)
@property
def branchDirectory(self):
return Path(self.location, self.project, self.branch)
def get(self, dataType, name, branch=None, chunkCount=None):
"""Creates a dataset with the parameters given and the factory's own parameters
Parameters
----------
dataType : BaseData
Type of the dataset
branch : str=None
Name of the branch to load the dataset from (if None, defaults to current)
name : str
Name of the dataset (recommended to be in snakecase)
chunkCount : int=None
Number of chunks, if None, the factory's own value will be used
"""
if chunkCount is None:
chunkCount = self.chunkCount
if branch is None:
branch = self.branch
return Dataset(
dataType=dataType,
location=self.location,
project=self.project,
branch=branch,
name=name,
chunkCount=chunkCount,
repoData=self.repoData
)
| en | 0.707584 | Convenience class to create lots of datasets at once. Used in pipelines where multiple datasets are created into the same location, project, branch Parameters ---------- location : str path the project is in project : str project name branch : str branch name repoData : RepoData stores the commit information at the creation of the dataset chunkCount : int = {16 , 256, 4096} How many files the data will be stored in, sets the granularity of multithreaded processing Creates a dataset with the parameters given and the factory's own parameters Parameters ---------- dataType : BaseData Type of the dataset branch : str=None Name of the branch to load the dataset from (if None, defaults to current) name : str Name of the dataset (recommended to be in snakecase) chunkCount : int=None Number of chunks, if None, the factory's own value will be used | 3.009975 | 3 |
rwp/tropospheric_attenuation.py | mikelytaev/wave-propagation | 15 | 6615692 | from rwp.environment import Polarization
import math as fm
def log10k(freq_hz, polarz: Polarization):
freq_ghz = freq_hz * 1e-9
if polarz == Polarization.HORIZONTAL:
a = [-5.33980, -0.35351, -0.23789, -0.94158]
b = [-0.10008, 1.26970, 0.86036, 0.64552]
c = [1.13098, 0.45400, 0.15354, 0.16817]
m_k = -0.18961
c_k = 0.71147
else:
a = [-3.80595, -3.44965, -0.39902, 0.50167]
b = [0.56934, -0.22911, 0.73042, 1.07319]
c = [0.81061, 0.51059, 0.11899, 0.27195]
m_k = -0.16398
c_k = 0.63297
return sum([a[i] * fm.exp(-((fm.log10(freq_ghz) - b[i]) / c[i]) ** 2) for i in [0, 1, 2, 3]]) +\
m_k * fm.log10(freq_ghz) + c_k
def alpha(freq_hz, polarz: Polarization):
freq_ghz = freq_hz * 1e-9
if polarz == Polarization.HORIZONTAL:
a = [-0.14318, 0.29591, 0.32177, -5.37610, 16.1721]
b = [1.82442, 0.77564, 0.63773, -0.96230, -3.29980]
c = [-0.55187, 0.19822, 0.13164, 1.47828, 3.43990]
m_a = 0.67849
c_a = -1.95537
else:
a = [-0.07771, 0.56727, -0.20238, -48.2991, 48.5833]
b = [2.33840, 0.95545, 1.14520, 0.791669, 0.791459]
c = [-0.76284, 0.54039, 0.26809, 0.116226, 0.116479]
m_a = -0.053739
c_a = 0.83433
return sum([a[i] * fm.exp(-((fm.log10(freq_ghz) - b[i]) / c[i]) ** 2) for i in [0, 1, 2, 3, 4]]) +\
m_a * fm.log10(freq_ghz) + c_a
def gamma(r, freq_hz, polarz: Polarization):
return 10 ** log10k(freq_hz, polarz) * r ** alpha(freq_hz, polarz)
| from rwp.environment import Polarization
import math as fm
def log10k(freq_hz, polarz: Polarization):
freq_ghz = freq_hz * 1e-9
if polarz == Polarization.HORIZONTAL:
a = [-5.33980, -0.35351, -0.23789, -0.94158]
b = [-0.10008, 1.26970, 0.86036, 0.64552]
c = [1.13098, 0.45400, 0.15354, 0.16817]
m_k = -0.18961
c_k = 0.71147
else:
a = [-3.80595, -3.44965, -0.39902, 0.50167]
b = [0.56934, -0.22911, 0.73042, 1.07319]
c = [0.81061, 0.51059, 0.11899, 0.27195]
m_k = -0.16398
c_k = 0.63297
return sum([a[i] * fm.exp(-((fm.log10(freq_ghz) - b[i]) / c[i]) ** 2) for i in [0, 1, 2, 3]]) +\
m_k * fm.log10(freq_ghz) + c_k
def alpha(freq_hz, polarz: Polarization):
freq_ghz = freq_hz * 1e-9
if polarz == Polarization.HORIZONTAL:
a = [-0.14318, 0.29591, 0.32177, -5.37610, 16.1721]
b = [1.82442, 0.77564, 0.63773, -0.96230, -3.29980]
c = [-0.55187, 0.19822, 0.13164, 1.47828, 3.43990]
m_a = 0.67849
c_a = -1.95537
else:
a = [-0.07771, 0.56727, -0.20238, -48.2991, 48.5833]
b = [2.33840, 0.95545, 1.14520, 0.791669, 0.791459]
c = [-0.76284, 0.54039, 0.26809, 0.116226, 0.116479]
m_a = -0.053739
c_a = 0.83433
return sum([a[i] * fm.exp(-((fm.log10(freq_ghz) - b[i]) / c[i]) ** 2) for i in [0, 1, 2, 3, 4]]) +\
m_a * fm.log10(freq_ghz) + c_a
def gamma(r, freq_hz, polarz: Polarization):
return 10 ** log10k(freq_hz, polarz) * r ** alpha(freq_hz, polarz)
| none | 1 | 2.094779 | 2 | |
fhir2dataset/tools/graph.py | arkhn/FHIR2Dataset | 26 | 6615693 | <gh_stars>10-100
"""
Module containing functions useful for the analysis and exploitation of graphs
"""
import logging
import networkx as nx
logger = logging.getLogger(__name__)
def join_path(graph: nx.Graph) -> list:
"""transforms the query graph into an Eulerian graph in order to be able to find an Eulerian path in it.
An Eulerian path is a trail in a finite graph that visits every edge exactly once (allowing for revisiting vertices).
Since the initial graph is not necessarily an Eulerian graph, the Eulerian path is reprocessed so that each join is made only once.
Arguments:
graph {nx.Graph} -- instance of GraphQuery
Returns:
list -- List of tuples indicating the successive joints to be made
""" # noqa
euler_graph = nx.eulerize(graph)
euler_path = list(nx.eulerian_path(euler_graph))
path = clean_euler_path(euler_path)
return path
def clean_euler_path(eulerian_path: list) -> list:
"""Cleans a Eulerian path so that each edge (not directed) appears only once in the list. If a edge appears more than once, only the first occurrence is kept.
Arguments:
eulerian_path {list} -- Eulerian path
Returns:
list -- cleaned Eulerian path
""" # noqa
path = []
for edge in eulerian_path:
if edge not in path and edge[::-1] not in path:
path.append(edge)
return path
| """
Module containing functions useful for the analysis and exploitation of graphs
"""
import logging
import networkx as nx
logger = logging.getLogger(__name__)
def join_path(graph: nx.Graph) -> list:
"""transforms the query graph into an Eulerian graph in order to be able to find an Eulerian path in it.
An Eulerian path is a trail in a finite graph that visits every edge exactly once (allowing for revisiting vertices).
Since the initial graph is not necessarily an Eulerian graph, the Eulerian path is reprocessed so that each join is made only once.
Arguments:
graph {nx.Graph} -- instance of GraphQuery
Returns:
list -- List of tuples indicating the successive joints to be made
""" # noqa
euler_graph = nx.eulerize(graph)
euler_path = list(nx.eulerian_path(euler_graph))
path = clean_euler_path(euler_path)
return path
def clean_euler_path(eulerian_path: list) -> list:
"""Cleans a Eulerian path so that each edge (not directed) appears only once in the list. If a edge appears more than once, only the first occurrence is kept.
Arguments:
eulerian_path {list} -- Eulerian path
Returns:
list -- cleaned Eulerian path
""" # noqa
path = []
for edge in eulerian_path:
if edge not in path and edge[::-1] not in path:
path.append(edge)
return path | en | 0.871941 | Module containing functions useful for the analysis and exploitation of graphs transforms the query graph into an Eulerian graph in order to be able to find an Eulerian path in it. An Eulerian path is a trail in a finite graph that visits every edge exactly once (allowing for revisiting vertices). Since the initial graph is not necessarily an Eulerian graph, the Eulerian path is reprocessed so that each join is made only once. Arguments: graph {nx.Graph} -- instance of GraphQuery Returns: list -- List of tuples indicating the successive joints to be made # noqa Cleans a Eulerian path so that each edge (not directed) appears only once in the list. If a edge appears more than once, only the first occurrence is kept. Arguments: eulerian_path {list} -- Eulerian path Returns: list -- cleaned Eulerian path # noqa | 3.390201 | 3 |
src/stackoverflow/54841363/content_provider.py | mrdulin/python-codelab | 0 | 6615694 | class ContentUser():
def getUserRef(self, username):
userRef = ''
return userRef
class ContentReportGeneralSearch():
def getReport(self, username, search_text, search_type='0'):
user = ContentUser()
user.getUserRef(username=username)
| class ContentUser():
def getUserRef(self, username):
userRef = ''
return userRef
class ContentReportGeneralSearch():
def getReport(self, username, search_text, search_type='0'):
user = ContentUser()
user.getUserRef(username=username)
| none | 1 | 2.726812 | 3 | |
till_looping/1_6.py | mdazharuddin1011999/IoT_Assignment_2 | 0 | 6615695 | <reponame>mdazharuddin1011999/IoT_Assignment_2<filename>till_looping/1_6.py
num = input("Enter a number: ")
k = int(input("Enter K: "))
print("\nFront:",num[k-1], "\nBack:",num[-k]) if k<len(num) else print("\nInvalid K") | num = input("Enter a number: ")
k = int(input("Enter K: "))
print("\nFront:",num[k-1], "\nBack:",num[-k]) if k<len(num) else print("\nInvalid K") | none | 1 | 3.757449 | 4 | |
l3ex1.py | AlekseiSpasiuk/python | 0 | 6615696 | #!python3
# 1. Реализовать функцию, принимающую два числа (позиционные аргументы) и выполняющую их деление.
# Числа запрашивать у пользователя, предусмотреть обработку ситуации деления на ноль.
def division(a:int, b:int) -> float:
if not b:
print("division by zero")
else:
return a / b
a = int(input("a = "))
b = int(input("b = "))
c = division(a,b)
if c:
print(c)
| #!python3
# 1. Реализовать функцию, принимающую два числа (позиционные аргументы) и выполняющую их деление.
# Числа запрашивать у пользователя, предусмотреть обработку ситуации деления на ноль.
def division(a:int, b:int) -> float:
if not b:
print("division by zero")
else:
return a / b
a = int(input("a = "))
b = int(input("b = "))
c = division(a,b)
if c:
print(c)
| ru | 0.99605 | #!python3 # 1. Реализовать функцию, принимающую два числа (позиционные аргументы) и выполняющую их деление. # Числа запрашивать у пользователя, предусмотреть обработку ситуации деления на ноль. | 3.827787 | 4 |
censusbuddy/__main__.py | joshleejosh/censusbuddy | 0 | 6615697 | # -*- coding: utf-8 -*-
"""
TODO: Main module does nothing for now
"""
if __name__ == '__main__':
pass
| # -*- coding: utf-8 -*-
"""
TODO: Main module does nothing for now
"""
if __name__ == '__main__':
pass
| en | 0.735355 | # -*- coding: utf-8 -*- TODO: Main module does nothing for now | 0.921167 | 1 |
omegaup/candy_collection.py | corahama/python | 1 | 6615698 | #!/usr/bin/python3
def max_interval(arr):
_max = 0
_curr = 0
for i in arr:
_curr = _curr + i
_curr = max(_curr, 0)
_max = max(_max, _curr)
return _max
def _main() -> None:
T = int(input())
res = []
for _ in range(T):
N = int(input())
arr = [int(a) for a in input().split()]
if all(map(lambda x: x<0, arr)):
res.append(max(arr))
else:
res.append(max_interval(arr))
for i in range(T):
print(f"Case #{i+1}: {res[i]}")
if __name__ == '__main__':
_main()
| #!/usr/bin/python3
def max_interval(arr):
_max = 0
_curr = 0
for i in arr:
_curr = _curr + i
_curr = max(_curr, 0)
_max = max(_max, _curr)
return _max
def _main() -> None:
T = int(input())
res = []
for _ in range(T):
N = int(input())
arr = [int(a) for a in input().split()]
if all(map(lambda x: x<0, arr)):
res.append(max(arr))
else:
res.append(max_interval(arr))
for i in range(T):
print(f"Case #{i+1}: {res[i]}")
if __name__ == '__main__':
_main()
| fr | 0.131219 | #!/usr/bin/python3 #{i+1}: {res[i]}") | 3.261355 | 3 |
obstacle-avoidance/lbi/geom/__init__.py | irom-lab/performance-limits | 3 | 6615699 | from .collision import ray_circle_distance, ray_circle_intersections, ray_plane_intersection, ray_plane_distance, ray_aabb_distance
from .types import *
| from .collision import ray_circle_distance, ray_circle_intersections, ray_plane_intersection, ray_plane_distance, ray_aabb_distance
from .types import *
| none | 1 | 1.076499 | 1 | |
value/factors/migrations/0004_auto_20170124_1819.py | M3SOulu/value | 2 | 6615700 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2017-01-24 18:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('factors', '0003_auto_20160325_1155'),
]
operations = [
migrations.AlterField(
model_name='factor',
name='name',
field=models.CharField(max_length=255, verbose_name='name'),
),
migrations.AlterField(
model_name='group',
name='name',
field=models.CharField(max_length=255, verbose_name='name'),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2017-01-24 18:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('factors', '0003_auto_20160325_1155'),
]
operations = [
migrations.AlterField(
model_name='factor',
name='name',
field=models.CharField(max_length=255, verbose_name='name'),
),
migrations.AlterField(
model_name='group',
name='name',
field=models.CharField(max_length=255, verbose_name='name'),
),
]
| en | 0.825616 | # -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2017-01-24 18:19 | 1.553489 | 2 |
nextvending/successwidget.py | fernandoleira/NextVending | 0 | 6615701 | import sys
from os import path, getcwd
from PyQt5 import QtCore, QtGui, QtWidgets
class SuccessWidget(QtWidgets.QWidget):
def __init__(self):
QtWidgets.QWidget.__init__(self)
self.setObjectName("SuccessWidget")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.loadingMovie = QtGui.QMovie(path.join(getcwd(), "nextvending", "assets", "img", "gifs", "loading.gif"))
size = self.loadingMovie.scaledSize()
self.successMovie = QtGui.QMovie(path.join(getcwd(), "nextvending", "assets", "img", "gifs", "checkmark.gif"))
self.successMovie.setScaledSize(size)
self.label = QtWidgets.QLabel()
self.label.setMovie(self.loadingMovie)
self.label.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.verticalLayout.addWidget(self.label)
self.setLayout(self.verticalLayout)
self.loadingMovie.start()
self.animationTimer = QtCore.QTimer()
self.animationTimer.setSingleShot = True
self.animationTimer.timeout.connect(self.loading_completed)
def start(self):
self.animationTimer.start(3000)
def reset(self):
self.animationTimer.stop()
self.successMovie.stop()
self.label.setMovie(self.loadingMovie)
self.loadingMovie.start()
@QtCore.pyqtSlot()
def loading_completed(self):
self.loadingMovie.stop()
self.label.setMovie(self.successMovie)
self.successMovie.start() | import sys
from os import path, getcwd
from PyQt5 import QtCore, QtGui, QtWidgets
class SuccessWidget(QtWidgets.QWidget):
def __init__(self):
QtWidgets.QWidget.__init__(self)
self.setObjectName("SuccessWidget")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.loadingMovie = QtGui.QMovie(path.join(getcwd(), "nextvending", "assets", "img", "gifs", "loading.gif"))
size = self.loadingMovie.scaledSize()
self.successMovie = QtGui.QMovie(path.join(getcwd(), "nextvending", "assets", "img", "gifs", "checkmark.gif"))
self.successMovie.setScaledSize(size)
self.label = QtWidgets.QLabel()
self.label.setMovie(self.loadingMovie)
self.label.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.verticalLayout.addWidget(self.label)
self.setLayout(self.verticalLayout)
self.loadingMovie.start()
self.animationTimer = QtCore.QTimer()
self.animationTimer.setSingleShot = True
self.animationTimer.timeout.connect(self.loading_completed)
def start(self):
self.animationTimer.start(3000)
def reset(self):
self.animationTimer.stop()
self.successMovie.stop()
self.label.setMovie(self.loadingMovie)
self.loadingMovie.start()
@QtCore.pyqtSlot()
def loading_completed(self):
self.loadingMovie.stop()
self.label.setMovie(self.successMovie)
self.successMovie.start() | none | 1 | 2.421214 | 2 | |
palo_alto_firewall_analyzer/validators/bad_log_setting.py | moshekaplan/palo_alto_firewall_analyzer | 4 | 6615702 | <filename>palo_alto_firewall_analyzer/validators/bad_log_setting.py
from palo_alto_firewall_analyzer.core import BadEntry, register_policy_validator
@register_policy_validator("BadLogSetting", "Rule uses an incorrect log profile")
def find_bad_log_setting(profilepackage):
mandated_log_profile = profilepackage.mandated_log_profile
device_groups = profilepackage.device_groups
devicegroup_exclusive_objects = profilepackage.devicegroup_exclusive_objects
verbose = profilepackage.verbose
badentries = []
if verbose:
print ("*"*80)
print ("Checking for incorrect log settings")
for i, device_group in enumerate(device_groups):
for ruletype in ('SecurityPreRules', 'SecurityPostRules'):
rules = devicegroup_exclusive_objects[device_group][ruletype]
if verbose:
print (f"({i+1}/{len(device_groups)}) Checking {device_group}'s {ruletype}")
for entry in rules:
rule_name = entry.get('name')
# Disabled rules can be ignored
if entry.find("./disabled") is not None and entry.find("./disabled").text == "yes":
continue
log_setting_node = entry.find("./log-setting")
if log_setting_node is not None:
log_setting = log_setting_node.text
elif mandated_log_profile == 'default':
# 'default' has special treatment, in that if the 'default'
# profile exists, entries without a value will automatically
# use the 'default' log profile.
continue
else:
log_setting = None
if mandated_log_profile and log_setting != mandated_log_profile:
text = f"Device Group {device_group}'s {ruletype} '{rule_name}' doesn't use log profile '{mandated_log_profile}', instead it uses '{log_setting}'"
if verbose:
print(text)
badentries.append( BadEntry(data=entry, text=text, device_group=device_group, entry_type=ruletype) )
elif log_setting is None:
text = f"Device Group {device_group}'s {ruletype} '{rule_name}' doesn't use any log profile!"
if verbose:
print (text)
badentries.append( BadEntry(data=entry, text=text, device_group=device_group, entry_type=ruletype) )
return badentries | <filename>palo_alto_firewall_analyzer/validators/bad_log_setting.py
from palo_alto_firewall_analyzer.core import BadEntry, register_policy_validator
@register_policy_validator("BadLogSetting", "Rule uses an incorrect log profile")
def find_bad_log_setting(profilepackage):
mandated_log_profile = profilepackage.mandated_log_profile
device_groups = profilepackage.device_groups
devicegroup_exclusive_objects = profilepackage.devicegroup_exclusive_objects
verbose = profilepackage.verbose
badentries = []
if verbose:
print ("*"*80)
print ("Checking for incorrect log settings")
for i, device_group in enumerate(device_groups):
for ruletype in ('SecurityPreRules', 'SecurityPostRules'):
rules = devicegroup_exclusive_objects[device_group][ruletype]
if verbose:
print (f"({i+1}/{len(device_groups)}) Checking {device_group}'s {ruletype}")
for entry in rules:
rule_name = entry.get('name')
# Disabled rules can be ignored
if entry.find("./disabled") is not None and entry.find("./disabled").text == "yes":
continue
log_setting_node = entry.find("./log-setting")
if log_setting_node is not None:
log_setting = log_setting_node.text
elif mandated_log_profile == 'default':
# 'default' has special treatment, in that if the 'default'
# profile exists, entries without a value will automatically
# use the 'default' log profile.
continue
else:
log_setting = None
if mandated_log_profile and log_setting != mandated_log_profile:
text = f"Device Group {device_group}'s {ruletype} '{rule_name}' doesn't use log profile '{mandated_log_profile}', instead it uses '{log_setting}'"
if verbose:
print(text)
badentries.append( BadEntry(data=entry, text=text, device_group=device_group, entry_type=ruletype) )
elif log_setting is None:
text = f"Device Group {device_group}'s {ruletype} '{rule_name}' doesn't use any log profile!"
if verbose:
print (text)
badentries.append( BadEntry(data=entry, text=text, device_group=device_group, entry_type=ruletype) )
return badentries | en | 0.529851 | # Disabled rules can be ignored # 'default' has special treatment, in that if the 'default' # profile exists, entries without a value will automatically # use the 'default' log profile. | 2.6701 | 3 |
src/aceinna/__init__.py | baweiji/python-openimu | 0 | 6615703 | # Package Version
VERSION = '2.5.0'
PACKAGE_NAME = 'openimu'
| # Package Version
VERSION = '2.5.0'
PACKAGE_NAME = 'openimu'
| en | 0.366001 | # Package Version | 0.991014 | 1 |
ABTestAnalysis.py | Kahiro-M/ABTestAnalysis | 0 | 6615704 | #!/usr/bin/python
# coding: UTF-8
# -*- Coding: utf-8 -*-
import numpy as np
import pandas as pd
from scipy import stats
html_header = """
<!doctype html>
<html lang="ja">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css">
<style type="text/css">
<!--
table {
display:inline;
border:1px lightgray;
margin-right: 3px;
}
-->
</style>
</head>
<body>
"""
html_footer = """
</body>
</html>
"""
a_csvData = pd.read_csv("./A.csv",encoding="utf_8")
b_csvData = pd.read_csv("./B.csv",encoding="utf_8")
anlyDf = pd.DataFrame({
"User":np.concatenate([a_csvData.A_user,b_csvData.B_user]),
"Group":np.concatenate([np.tile("A",len(a_csvData.A_data)),(np.tile("B",len(b_csvData.B_data)))]),
"Data":np.concatenate([a_csvData.A_data,b_csvData.B_data]),
})
abDf=pd.crosstab(
index=anlyDf["Group"],
columns=anlyDf["Data"]
)
chi2Value, chi2PValue, chi2DoF, chi2EF = stats.chi2_contingency(abDf, correction=False)
chi2ResultStrPVal = "p値 : "+str('{:.10f}'.format(chi2PValue))
chi2ResultStrVal = "カイ二乗値 : "+str(chi2Value)
chi2ResultStrDoF = "自由度 : "+str(chi2DoF)
if chi2PValue<0.05:
resultStrChi2Test = "<b>カイ二乗検定 <font color=red>有意差あり(GroupとDataには関連がある)</font></b>"
else:
resultStrChi2Test = "<b>カイ二乗検定 有意差なし(GroupとDataには関連がない)</b>"
np.array([[2,2],[2,2]]).shape
if np.array([[2,2],[2,2]]).shape != abDf.shape:
fisherResultStrPVal = "2要素 x 2群の計4パターンで表現できる入力データで実行してください。"
resultStrFisherTest = "<b>要素が多すぎるため、フィッシャーの正確検定を実行できませんでした。</b>"
else:
fisherOddsRatio, fisherPValue = stats.fisher_exact(abDf)
fisherResultStrPVal = "p値 : "+str('{:.10f}'.format(fisherPValue))
if fisherPValue<0.05:
resultStrFisherTest = "<b>フィッシャーの正確検定 <font color=red>有意差あり(GroupとDataには関連がある)</font></b>"
else:
resultStrFisherTest = "<b>フィッシャーの正確検定 有意差なし(GroupとDataには関連がない)</b>"
abDf4display=pd.crosstab(
index=anlyDf["Group"],
columns=anlyDf["Data"],
margins=True,
normalize=False
)
# html output
with open("result.html", mode="w", encoding="utf_8") as fileObj:
fileObj.write(html_header)
fileObj.write(resultStrChi2Test)
fileObj.write("<br>")
fileObj.write(chi2ResultStrPVal)
fileObj.write(" ")
fileObj.write(chi2ResultStrVal)
fileObj.write(" ")
fileObj.write(chi2ResultStrDoF)
fileObj.write("<br>")
fileObj.write("<br>")
fileObj.write(resultStrFisherTest)
fileObj.write("<br>")
fileObj.write(fisherResultStrPVal)
fileObj.write("<br>")
fileObj.write("<br>")
fileObj.write("<br>")
fileObj.write("入力データ")
fileObj.write(anlyDf.to_html())
fileObj.write(" クロス集計表")
fileObj.write(abDf4display.to_html())
fileObj.write("<br>")
fileObj.write(html_footer)
| #!/usr/bin/python
# coding: UTF-8
# -*- Coding: utf-8 -*-
import numpy as np
import pandas as pd
from scipy import stats
html_header = """
<!doctype html>
<html lang="ja">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css">
<style type="text/css">
<!--
table {
display:inline;
border:1px lightgray;
margin-right: 3px;
}
-->
</style>
</head>
<body>
"""
html_footer = """
</body>
</html>
"""
a_csvData = pd.read_csv("./A.csv",encoding="utf_8")
b_csvData = pd.read_csv("./B.csv",encoding="utf_8")
anlyDf = pd.DataFrame({
"User":np.concatenate([a_csvData.A_user,b_csvData.B_user]),
"Group":np.concatenate([np.tile("A",len(a_csvData.A_data)),(np.tile("B",len(b_csvData.B_data)))]),
"Data":np.concatenate([a_csvData.A_data,b_csvData.B_data]),
})
abDf=pd.crosstab(
index=anlyDf["Group"],
columns=anlyDf["Data"]
)
chi2Value, chi2PValue, chi2DoF, chi2EF = stats.chi2_contingency(abDf, correction=False)
chi2ResultStrPVal = "p値 : "+str('{:.10f}'.format(chi2PValue))
chi2ResultStrVal = "カイ二乗値 : "+str(chi2Value)
chi2ResultStrDoF = "自由度 : "+str(chi2DoF)
if chi2PValue<0.05:
resultStrChi2Test = "<b>カイ二乗検定 <font color=red>有意差あり(GroupとDataには関連がある)</font></b>"
else:
resultStrChi2Test = "<b>カイ二乗検定 有意差なし(GroupとDataには関連がない)</b>"
np.array([[2,2],[2,2]]).shape
if np.array([[2,2],[2,2]]).shape != abDf.shape:
fisherResultStrPVal = "2要素 x 2群の計4パターンで表現できる入力データで実行してください。"
resultStrFisherTest = "<b>要素が多すぎるため、フィッシャーの正確検定を実行できませんでした。</b>"
else:
fisherOddsRatio, fisherPValue = stats.fisher_exact(abDf)
fisherResultStrPVal = "p値 : "+str('{:.10f}'.format(fisherPValue))
if fisherPValue<0.05:
resultStrFisherTest = "<b>フィッシャーの正確検定 <font color=red>有意差あり(GroupとDataには関連がある)</font></b>"
else:
resultStrFisherTest = "<b>フィッシャーの正確検定 有意差なし(GroupとDataには関連がない)</b>"
abDf4display=pd.crosstab(
index=anlyDf["Group"],
columns=anlyDf["Data"],
margins=True,
normalize=False
)
# html output
with open("result.html", mode="w", encoding="utf_8") as fileObj:
fileObj.write(html_header)
fileObj.write(resultStrChi2Test)
fileObj.write("<br>")
fileObj.write(chi2ResultStrPVal)
fileObj.write(" ")
fileObj.write(chi2ResultStrVal)
fileObj.write(" ")
fileObj.write(chi2ResultStrDoF)
fileObj.write("<br>")
fileObj.write("<br>")
fileObj.write(resultStrFisherTest)
fileObj.write("<br>")
fileObj.write(fisherResultStrPVal)
fileObj.write("<br>")
fileObj.write("<br>")
fileObj.write("<br>")
fileObj.write("入力データ")
fileObj.write(anlyDf.to_html())
fileObj.write(" クロス集計表")
fileObj.write(abDf4display.to_html())
fileObj.write("<br>")
fileObj.write(html_footer)
| en | 0.299097 | #!/usr/bin/python # coding: UTF-8 # -*- Coding: utf-8 -*- <!doctype html> <html lang="ja"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no"> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css"> <style type="text/css"> <!-- table { display:inline; border:1px lightgray; margin-right: 3px; } --> </style> </head> <body> </body> </html> # html output | 2.502389 | 3 |
nnvm/python/nnvm/testing/alexnet.py | TharinduRusira/tvm | 0 | 6615705 | from .. import symbol as sym
from .utils import create_workload
""" Basic AlexNet workload
adopted from https://github.com/IntelLabs/Latte.py/blob/master/benchmarks/alexnet.py
"""
def get_symbol(num_classes=1000, **kwargs):
data = sym.Variable(name="data")
conv1 = sym.conv2d(data=data, channels=64, kernel_size=(11,11), strides=(4,4), padding=(0,0), use_bias=True, name="conv1")
relu1 = sym.relu(data=conv1, name="relu1")
pool1 = sym.max_pool2d(data=relu1, pool_size=(3,3), strides=(2,2), padding=(0,0), name="pool1")
conv2 = sym.conv2d(data=pool1, channels=192, kernel_size=(5,5), strides=(1,1), padding=(2,2), use_bias=True, name="conv2")
relu2 = sym.relu(data=conv2, name="relu2")
pool2 = sym.max_pool2d(data=relu2, pool_size=(3,3), strides=(2,2), padding=(0,0), name="pool2")
conv3 = sym.conv2d(data=pool2, channels=384, kernel_size=(3,3), strides=(1,1), padding=(1,1), use_bias=True, name="conv3")
relu3 = sym.relu(data=conv3, name="relu3")
conv4 = sym.conv2d(data=relu3, channels=256, kernel_size=(3,3), strides=(1,1), padding=(1,1), use_bias=True, name="conv4")
relu4 = sym.relu(data=conv4, name="relu4")
conv5 = sym.conv2d(data=relu4, channels=256, kernel_size=(3,3), strides=(1,1), padding=(1,1), use_bias=True, name="conv5")
relu5 = sym.relu(data=conv5, name="relu5")
pool5 = sym.max_pool2d(data=relu4, pool_size=(3,3), strides=(2,2), padding=(0,0), name="pool5")
flatten = sym.flatten(data=pool5, name="flatten")
fc6bias = sym.dense(data=flatten, units=4096, name="fc6bias")
fc7bias = sym.dense(data=fc6bias, units=4096, name="fc7bias")
fc8bias = sym.dense(data=fc7bias, units=num_classes, name="fc8bias")
softmax = sym.softmax(data=fc8bias, name="softmax")
return softmax
def get_workload(batch_size=1, num_classes=1008,
image_shape=(3, 227, 227), dtype="float32", **kwargs):
"""Get benchmark workload for AlexNet
Parameters
----------
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of classes
image_shape : tuple, optional
The input image shape
dtype : str, optional
The data type
kwargs : dict
Extra arguments
Returns
-------
net : nnvm.Symbol
The computational graph
params : dict of str to NDArray
The parameters.
"""
net = get_symbol(num_classes=num_classes, **kwargs)
return create_workload(net, batch_size, image_shape, dtype)
| from .. import symbol as sym
from .utils import create_workload
""" Basic AlexNet workload
adopted from https://github.com/IntelLabs/Latte.py/blob/master/benchmarks/alexnet.py
"""
def get_symbol(num_classes=1000, **kwargs):
data = sym.Variable(name="data")
conv1 = sym.conv2d(data=data, channels=64, kernel_size=(11,11), strides=(4,4), padding=(0,0), use_bias=True, name="conv1")
relu1 = sym.relu(data=conv1, name="relu1")
pool1 = sym.max_pool2d(data=relu1, pool_size=(3,3), strides=(2,2), padding=(0,0), name="pool1")
conv2 = sym.conv2d(data=pool1, channels=192, kernel_size=(5,5), strides=(1,1), padding=(2,2), use_bias=True, name="conv2")
relu2 = sym.relu(data=conv2, name="relu2")
pool2 = sym.max_pool2d(data=relu2, pool_size=(3,3), strides=(2,2), padding=(0,0), name="pool2")
conv3 = sym.conv2d(data=pool2, channels=384, kernel_size=(3,3), strides=(1,1), padding=(1,1), use_bias=True, name="conv3")
relu3 = sym.relu(data=conv3, name="relu3")
conv4 = sym.conv2d(data=relu3, channels=256, kernel_size=(3,3), strides=(1,1), padding=(1,1), use_bias=True, name="conv4")
relu4 = sym.relu(data=conv4, name="relu4")
conv5 = sym.conv2d(data=relu4, channels=256, kernel_size=(3,3), strides=(1,1), padding=(1,1), use_bias=True, name="conv5")
relu5 = sym.relu(data=conv5, name="relu5")
pool5 = sym.max_pool2d(data=relu4, pool_size=(3,3), strides=(2,2), padding=(0,0), name="pool5")
flatten = sym.flatten(data=pool5, name="flatten")
fc6bias = sym.dense(data=flatten, units=4096, name="fc6bias")
fc7bias = sym.dense(data=fc6bias, units=4096, name="fc7bias")
fc8bias = sym.dense(data=fc7bias, units=num_classes, name="fc8bias")
softmax = sym.softmax(data=fc8bias, name="softmax")
return softmax
def get_workload(batch_size=1, num_classes=1008,
image_shape=(3, 227, 227), dtype="float32", **kwargs):
"""Get benchmark workload for AlexNet
Parameters
----------
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of classes
image_shape : tuple, optional
The input image shape
dtype : str, optional
The data type
kwargs : dict
Extra arguments
Returns
-------
net : nnvm.Symbol
The computational graph
params : dict of str to NDArray
The parameters.
"""
net = get_symbol(num_classes=num_classes, **kwargs)
return create_workload(net, batch_size, image_shape, dtype)
| en | 0.387591 | Basic AlexNet workload adopted from https://github.com/IntelLabs/Latte.py/blob/master/benchmarks/alexnet.py Get benchmark workload for AlexNet Parameters ---------- batch_size : int The batch size used in the model num_classes : int, optional Number of classes image_shape : tuple, optional The input image shape dtype : str, optional The data type kwargs : dict Extra arguments Returns ------- net : nnvm.Symbol The computational graph params : dict of str to NDArray The parameters. | 2.402314 | 2 |
client.py | makloooo/Multiplayer-Hangman | 0 | 6615706 | #Socket client for python
import socket # socket library
import sys # for exit handling
import threading
import random
import select
from check import ip_checksum
class TransportLayer(threading.Thread):
def __init__(self, data_q, reply_q):
super(TransportLayer, self).__init__()
self.data_q = data_q # Receive Queue
self.reply_q = reply_q # Send Queue
self.stop_request = threading.Event()
# Only players need their own port number
random.seed(None)
self.host = ''
self.port = random.randint(7000,7500)
self.playing = False
self.parent = ''
self.destport = 8000
self.data = None; # Data received
self.reply = None; # Data to send
self.t = None;
self.state = 0;
try:
# Create a IPv4 UDP socket in python
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM);
except socket.error, msg:
print 'Failed to create socket. Error code: ' + str(msg[0]) \
+ ' , Error message : ' + msg[1]
sys.exit();
print 'Socket Created'
try:
remote_ip = socket.gethostbyname(self.host)
except socket.gaierror:
# could not resolve
print 'Hostname could not be resolved. Exiting.'
sys.exit();
print 'Socket Connected to ' + self.host + ' on ip ' + remote_ip
def poll_data(self):
# Check from server or client
readable, writable, exceptional = select.select([self.s], [], [], 0);
if readable: # Process here
self.data = self.s.recvfrom(1024)[0] # Unreliable one-way packets. no ACK
#print '\nReceived server packet : ' + str(self.data)
self.process_pkt()
return False # Not really, just have no reason to progress SM
elif not self.data_q.empty():
self.data = self.data_q.get(False)
#print 'Client packet contents : ' + str(self.data)
if self.data.splitlines()[0].isupper():
self.process_pkt()
return False # For client use only, don't send
return True # Send to server
return False
def process_pkt(self):
msg = self.data.splitlines() # Splits up the packet into args
self.reply = self.data
#print msg
if not msg :
self.reply = ''
elif msg[0] == 'broadcast': # Sent from server, print to everyone
print '\n' + msg[1]
return
elif msg[0] == 'chat': # Sent from user
# if sender is themselves, dont print newline, since already echoed
if not msg[1] == self.parent: print ''
sys.stdout.write(msg[1] + msg[2] + ' | ')
sys.stdout.flush()
return
elif msg[0] == 'update':
del msg[0]
print ''
for line in msg: print line
return
elif msg[0] == 'reinput':
if self.playing: sys.stdout.write('> Chat: ')
else: sys.stdout.write('> User Input: ')
sys.stdout.flush()
elif msg[0] == 'lose':
print '\nIt\'s over for you! Better luck next time sucker!'
print 'Press Enter to Escape the void back to the Main Menu'
self.reply = msg[0]
elif msg[0] == 'win':
print '\nCongratulations, you won!\nPress Enter to return to the Main Menu'
self.reply = msg[0]
# Uppercase for client use only, these are mainly for output formatting
elif msg[0] == 'PARENT':
self.parent = msg[1]
self.reply = True
elif msg[0] == 'PLAYING':
self.playing = (str(msg[1]) == "True")
self.reply = str(self.playing)
#print 'Sending up : ' + str(self.reply)
self.reply_q.put(self.reply)
return
def make_pkt(self, flag, chksum):
# Lets just send it as a string
packet = str(self.port) + str(flag) + self.data + chksum;
#print 'Packet Contents : ' + packet
return packet
def isACK(self, flag):
return self.rcvpkt[3] == str(flag)
def corrupt(self):
return not (self.rcvpkt[0:3] == "ACK");
def udt_send(self, flag):
checksum = ip_checksum(self.data);
sndpkt = self.make_pkt(flag, checksum);
self.s.sendto(sndpkt, (self.host, self.destport));
def tick(self):
# State actions
if self.state == 0: # Wait on the master application
# Parent thread is your master application
if not self.poll_data() : return
# Wait on event from parent thread
if self.data == None:
self.s.close();
sys.exit();
elif self.state == 1: # Wait for ACK0
# This blocks the system for you, so you won't continue until
# you receive a packet from the server
#print 'Waiting on packet with ACK0...'
self.rcvpkt = self.s.recv(1024);
elif self.state == 2: # Wait on the master application (the console)
# Wait on event from parent thread
if not self.poll_data() : return
if self.data == None:
self.s.close();
sys.exit();
elif self.state == 3: # Wait on ACK1
#print 'Waiting on packet with ACK1...'
self.rcvpkt = self.s.recv(1024);
# Transitions
if self.state == 0: # Send data
# Once you get data, make packet, send packet, create thread
self.udt_send(0);
# Start timer here
self.t = threading.Timer(5.0, self.udt_send, [0]);
self.t.start();
self.state = 1;
elif self.state == 1: # Check for packet integrity from server
if (not self.corrupt() and self.isACK(0)):
#print 'Packet recieved!'
#print 'Server Reply : ' + self.rcvpkt;
self.t.cancel(); # Stop timer here
self.data = self.rcvpkt[5:] # Get rid of the ACK
self.process_pkt();
self.state = 2;
elif (self.corrupt() or self.isACK(1)):
print 'Corrupt or duplicate packet received'
print 'Server Reply : ' + self.rcvpkt;
elif self.state == 2: # Send data
# Once you get data, make packet, send packet, create thread
self.udt_send(1);
# Start timer here again
self.t = threading.Timer(5.0, self.udt_send, [1]);
self.t.start();
self.state = 3;
elif self.state == 3: # Check for packet integrity from server
if (not self.corrupt() and self.isACK(1)):
#print 'Packet recieved!'
#print 'Server Reply : ' + self.rcvpkt;
self.t.cancel(); # Stop timer here
self.data = self.rcvpkt[5:]
self.process_pkt()
self.state = 0;
elif (self.corrupt() or self.isACK(0)):
print 'Corrupted or duplicate packet received'
print 'Server Reply : ' + self.rcvpkt;
def run(self):
# Keep on running until terminated by the player process
# Continue receiving data from player and sending it to
# the server using the rdt send process.
while not self.stop_request.isSet():
self.tick()
s.close()
def join(self, timeout=None):
self.stop_request.set()
super(TransportLayer, self).join(timeout)
| #Socket client for python
import socket # socket library
import sys # for exit handling
import threading
import random
import select
from check import ip_checksum
class TransportLayer(threading.Thread):
def __init__(self, data_q, reply_q):
super(TransportLayer, self).__init__()
self.data_q = data_q # Receive Queue
self.reply_q = reply_q # Send Queue
self.stop_request = threading.Event()
# Only players need their own port number
random.seed(None)
self.host = ''
self.port = random.randint(7000,7500)
self.playing = False
self.parent = ''
self.destport = 8000
self.data = None; # Data received
self.reply = None; # Data to send
self.t = None;
self.state = 0;
try:
# Create a IPv4 UDP socket in python
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM);
except socket.error, msg:
print 'Failed to create socket. Error code: ' + str(msg[0]) \
+ ' , Error message : ' + msg[1]
sys.exit();
print 'Socket Created'
try:
remote_ip = socket.gethostbyname(self.host)
except socket.gaierror:
# could not resolve
print 'Hostname could not be resolved. Exiting.'
sys.exit();
print 'Socket Connected to ' + self.host + ' on ip ' + remote_ip
def poll_data(self):
# Check from server or client
readable, writable, exceptional = select.select([self.s], [], [], 0);
if readable: # Process here
self.data = self.s.recvfrom(1024)[0] # Unreliable one-way packets. no ACK
#print '\nReceived server packet : ' + str(self.data)
self.process_pkt()
return False # Not really, just have no reason to progress SM
elif not self.data_q.empty():
self.data = self.data_q.get(False)
#print 'Client packet contents : ' + str(self.data)
if self.data.splitlines()[0].isupper():
self.process_pkt()
return False # For client use only, don't send
return True # Send to server
return False
def process_pkt(self):
msg = self.data.splitlines() # Splits up the packet into args
self.reply = self.data
#print msg
if not msg :
self.reply = ''
elif msg[0] == 'broadcast': # Sent from server, print to everyone
print '\n' + msg[1]
return
elif msg[0] == 'chat': # Sent from user
# if sender is themselves, dont print newline, since already echoed
if not msg[1] == self.parent: print ''
sys.stdout.write(msg[1] + msg[2] + ' | ')
sys.stdout.flush()
return
elif msg[0] == 'update':
del msg[0]
print ''
for line in msg: print line
return
elif msg[0] == 'reinput':
if self.playing: sys.stdout.write('> Chat: ')
else: sys.stdout.write('> User Input: ')
sys.stdout.flush()
elif msg[0] == 'lose':
print '\nIt\'s over for you! Better luck next time sucker!'
print 'Press Enter to Escape the void back to the Main Menu'
self.reply = msg[0]
elif msg[0] == 'win':
print '\nCongratulations, you won!\nPress Enter to return to the Main Menu'
self.reply = msg[0]
# Uppercase for client use only, these are mainly for output formatting
elif msg[0] == 'PARENT':
self.parent = msg[1]
self.reply = True
elif msg[0] == 'PLAYING':
self.playing = (str(msg[1]) == "True")
self.reply = str(self.playing)
#print 'Sending up : ' + str(self.reply)
self.reply_q.put(self.reply)
return
def make_pkt(self, flag, chksum):
# Lets just send it as a string
packet = str(self.port) + str(flag) + self.data + chksum;
#print 'Packet Contents : ' + packet
return packet
def isACK(self, flag):
return self.rcvpkt[3] == str(flag)
def corrupt(self):
return not (self.rcvpkt[0:3] == "ACK");
def udt_send(self, flag):
checksum = ip_checksum(self.data);
sndpkt = self.make_pkt(flag, checksum);
self.s.sendto(sndpkt, (self.host, self.destport));
def tick(self):
# State actions
if self.state == 0: # Wait on the master application
# Parent thread is your master application
if not self.poll_data() : return
# Wait on event from parent thread
if self.data == None:
self.s.close();
sys.exit();
elif self.state == 1: # Wait for ACK0
# This blocks the system for you, so you won't continue until
# you receive a packet from the server
#print 'Waiting on packet with ACK0...'
self.rcvpkt = self.s.recv(1024);
elif self.state == 2: # Wait on the master application (the console)
# Wait on event from parent thread
if not self.poll_data() : return
if self.data == None:
self.s.close();
sys.exit();
elif self.state == 3: # Wait on ACK1
#print 'Waiting on packet with ACK1...'
self.rcvpkt = self.s.recv(1024);
# Transitions
if self.state == 0: # Send data
# Once you get data, make packet, send packet, create thread
self.udt_send(0);
# Start timer here
self.t = threading.Timer(5.0, self.udt_send, [0]);
self.t.start();
self.state = 1;
elif self.state == 1: # Check for packet integrity from server
if (not self.corrupt() and self.isACK(0)):
#print 'Packet recieved!'
#print 'Server Reply : ' + self.rcvpkt;
self.t.cancel(); # Stop timer here
self.data = self.rcvpkt[5:] # Get rid of the ACK
self.process_pkt();
self.state = 2;
elif (self.corrupt() or self.isACK(1)):
print 'Corrupt or duplicate packet received'
print 'Server Reply : ' + self.rcvpkt;
elif self.state == 2: # Send data
# Once you get data, make packet, send packet, create thread
self.udt_send(1);
# Start timer here again
self.t = threading.Timer(5.0, self.udt_send, [1]);
self.t.start();
self.state = 3;
elif self.state == 3: # Check for packet integrity from server
if (not self.corrupt() and self.isACK(1)):
#print 'Packet recieved!'
#print 'Server Reply : ' + self.rcvpkt;
self.t.cancel(); # Stop timer here
self.data = self.rcvpkt[5:]
self.process_pkt()
self.state = 0;
elif (self.corrupt() or self.isACK(0)):
print 'Corrupted or duplicate packet received'
print 'Server Reply : ' + self.rcvpkt;
def run(self):
# Keep on running until terminated by the player process
# Continue receiving data from player and sending it to
# the server using the rdt send process.
while not self.stop_request.isSet():
self.tick()
s.close()
def join(self, timeout=None):
self.stop_request.set()
super(TransportLayer, self).join(timeout)
| en | 0.819231 | #Socket client for python # socket library # for exit handling # Receive Queue # Send Queue # Only players need their own port number # Data received # Data to send # Create a IPv4 UDP socket in python # could not resolve # Check from server or client # Process here # Unreliable one-way packets. no ACK #print '\nReceived server packet : ' + str(self.data) # Not really, just have no reason to progress SM #print 'Client packet contents : ' + str(self.data) # For client use only, don't send # Send to server # Splits up the packet into args #print msg # Sent from server, print to everyone # Sent from user # if sender is themselves, dont print newline, since already echoed # Uppercase for client use only, these are mainly for output formatting #print 'Sending up : ' + str(self.reply) # Lets just send it as a string #print 'Packet Contents : ' + packet # State actions # Wait on the master application # Parent thread is your master application # Wait on event from parent thread # Wait for ACK0 # This blocks the system for you, so you won't continue until # you receive a packet from the server #print 'Waiting on packet with ACK0...' # Wait on the master application (the console) # Wait on event from parent thread # Wait on ACK1 #print 'Waiting on packet with ACK1...' # Transitions # Send data # Once you get data, make packet, send packet, create thread # Start timer here # Check for packet integrity from server #print 'Packet recieved!' #print 'Server Reply : ' + self.rcvpkt; # Stop timer here # Get rid of the ACK # Send data # Once you get data, make packet, send packet, create thread # Start timer here again # Check for packet integrity from server #print 'Packet recieved!' #print 'Server Reply : ' + self.rcvpkt; # Stop timer here # Keep on running until terminated by the player process # Continue receiving data from player and sending it to # the server using the rdt send process. | 2.911278 | 3 |
ex8_draw.py | Yasir323/Image-Processing | 0 | 6615707 | import cv2
import numpy as np
# A black grayscale image
img_grayscale = np.zeros((512, 512), dtype=np.uint8)
print(img_grayscale)
img_color = np.zeros((512, 512, 3), np.uint8) # Still a black image
img_color[:] = 255, 0, 0 # Whole image turns blue
img_color[100:200, 200:300] = 0, 255, 0 # A green patch in the middle
cv2.imshow("Grayscale", img_grayscale)
cv2.imshow("Colored", img_color)
print(img_color)
cv2.waitKey(0) | import cv2
import numpy as np
# A black grayscale image
img_grayscale = np.zeros((512, 512), dtype=np.uint8)
print(img_grayscale)
img_color = np.zeros((512, 512, 3), np.uint8) # Still a black image
img_color[:] = 255, 0, 0 # Whole image turns blue
img_color[100:200, 200:300] = 0, 255, 0 # A green patch in the middle
cv2.imshow("Grayscale", img_grayscale)
cv2.imshow("Colored", img_color)
print(img_color)
cv2.waitKey(0) | en | 0.734696 | # A black grayscale image # Still a black image # Whole image turns blue # A green patch in the middle | 3.504916 | 4 |
tests/test_pattern.py | EnigmaCurry/isobar | 1 | 6615708 | """ Unit tests for isobar """
import pytest
import isobar
import time
import os
def test_pattern():
p = isobar.PSeq([ 1, 2, 3, 4 ], 1)
assert next(p) == 1
assert next(p) == 2
assert next(p) == 3
assert next(p) == 4
with pytest.raises(StopIteration) as excinfo:
next(p)
| """ Unit tests for isobar """
import pytest
import isobar
import time
import os
def test_pattern():
p = isobar.PSeq([ 1, 2, 3, 4 ], 1)
assert next(p) == 1
assert next(p) == 2
assert next(p) == 3
assert next(p) == 4
with pytest.raises(StopIteration) as excinfo:
next(p)
| en | 0.841238 | Unit tests for isobar | 3.031569 | 3 |
pyretina/optimize/__init__.py | ZloVechno/pyretina | 0 | 6615709 | <reponame>ZloVechno/pyretina<gh_stars>0
from _grid_search import maxima
from _grid_search import grid_search
from _multi_start import multi_start
from _gen_multistart import gen_multi_start
from _gen_multistart import multistart_until | from _grid_search import maxima
from _grid_search import grid_search
from _multi_start import multi_start
from _gen_multistart import gen_multi_start
from _gen_multistart import multistart_until | none | 1 | 1.052807 | 1 | |
src/tests/test_kinderminer.py | rmillikin/fast_km | 0 | 6615710 | import pytest
import os
import shutil
from indexing.index import Index
from indexing.index_builder import IndexBuilder
from workers import kinderminer as km
from indexing import km_util as util
from .test_index_building import data_dir
def test_fisher_exact_test():
# example shown in figure 1 of:
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5543342/
a_term_set = set(range(0, 2027)) # embryonic stem cell
b_term_set = set(range(2012, 2071)) # NANOG
total_set = set(range(0,17012366))
table = km.get_contingency_table(a_term_set, b_term_set, len(total_set))
assert table == [[15,2012],[44,17010295]]
pvalue = km.fisher_exact(table)
assert pvalue == pytest.approx(5.219e-46, abs=1e-46)
sort_ratio = km.get_sort_ratio(table)
assert sort_ratio == pytest.approx(15 / 59)
def test_kinderminer(data_dir):
index_dir = util.get_index_dir(data_dir)
# delete the index if it exists already
if os.path.exists(index_dir):
shutil.rmtree(index_dir)
assert not os.path.exists(index_dir)
# build the index
indexer = IndexBuilder(data_dir)
indexer.build_index()
# run kinderminer query
idx = Index(data_dir)
km_result = km.kinderminer_search('cancer', 'brca1', idx, return_pmids=True)
assert km_result['pmid_intersection'] == {34580114}
km_or_result = km.kinderminer_search('cancer/carcinoma', 'brca1', idx)
km_and_result = km.kinderminer_search('cancer&carcinoma', 'brca1', idx)
# assertions
assert km_or_result['len(a_term_set)'] > km_result['len(a_term_set)']
assert km_and_result['len(a_term_set)'] < km_result['len(a_term_set)']
assert km_or_result['len(b_term_set)'] == km_result['len(b_term_set)']
assert km_and_result['len(b_term_set)'] == km_result['len(b_term_set)'] | import pytest
import os
import shutil
from indexing.index import Index
from indexing.index_builder import IndexBuilder
from workers import kinderminer as km
from indexing import km_util as util
from .test_index_building import data_dir
def test_fisher_exact_test():
# example shown in figure 1 of:
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5543342/
a_term_set = set(range(0, 2027)) # embryonic stem cell
b_term_set = set(range(2012, 2071)) # NANOG
total_set = set(range(0,17012366))
table = km.get_contingency_table(a_term_set, b_term_set, len(total_set))
assert table == [[15,2012],[44,17010295]]
pvalue = km.fisher_exact(table)
assert pvalue == pytest.approx(5.219e-46, abs=1e-46)
sort_ratio = km.get_sort_ratio(table)
assert sort_ratio == pytest.approx(15 / 59)
def test_kinderminer(data_dir):
index_dir = util.get_index_dir(data_dir)
# delete the index if it exists already
if os.path.exists(index_dir):
shutil.rmtree(index_dir)
assert not os.path.exists(index_dir)
# build the index
indexer = IndexBuilder(data_dir)
indexer.build_index()
# run kinderminer query
idx = Index(data_dir)
km_result = km.kinderminer_search('cancer', 'brca1', idx, return_pmids=True)
assert km_result['pmid_intersection'] == {34580114}
km_or_result = km.kinderminer_search('cancer/carcinoma', 'brca1', idx)
km_and_result = km.kinderminer_search('cancer&carcinoma', 'brca1', idx)
# assertions
assert km_or_result['len(a_term_set)'] > km_result['len(a_term_set)']
assert km_and_result['len(a_term_set)'] < km_result['len(a_term_set)']
assert km_or_result['len(b_term_set)'] == km_result['len(b_term_set)']
assert km_and_result['len(b_term_set)'] == km_result['len(b_term_set)'] | en | 0.646972 | # example shown in figure 1 of: # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5543342/ # embryonic stem cell # NANOG # delete the index if it exists already # build the index # run kinderminer query # assertions | 2.24063 | 2 |
Python/leetcode/LargestNumber.py | darrencheng0817/AlgorithmLearning | 2 | 6615711 | '''
Created on 1.12.2016
@author: Darren
''''''
Given a list of non negative integers, arrange them such that they form the largest number.
For example, given [3, 30, 34, 5, 9], the largest formed number is 9534330.
Note: The result may be very large, so you need to return a string instead of an integer.
Credits:Special thanks to @ts for adding this problem and creating all test cases."
'''
| '''
Created on 1.12.2016
@author: Darren
''''''
Given a list of non negative integers, arrange them such that they form the largest number.
For example, given [3, 30, 34, 5, 9], the largest formed number is 9534330.
Note: The result may be very large, so you need to return a string instead of an integer.
Credits:Special thanks to @ts for adding this problem and creating all test cases."
'''
| en | 0.795514 | Created on 1.12.2016
@author: Darren Given a list of non negative integers, arrange them such that they form the largest number.
For example, given [3, 30, 34, 5, 9], the largest formed number is 9534330.
Note: The result may be very large, so you need to return a string instead of an integer.
Credits:Special thanks to @ts for adding this problem and creating all test cases." | 3.908479 | 4 |
Exercicios/Outros/ex13.py | rafaelbhcosta/Python_para_iniciantes | 0 | 6615712 | <gh_stars>0
# Crie uma aplicação que vai ler vários digitos que vai perguntar se ele deve continuar rodando
# as opções devem ser 1 continua rodando o para de funcionar
# No final ele deve informar quantas vezes ele rodou até parar | # Crie uma aplicação que vai ler vários digitos que vai perguntar se ele deve continuar rodando
# as opções devem ser 1 continua rodando o para de funcionar
# No final ele deve informar quantas vezes ele rodou até parar | pt | 0.997653 | # Crie uma aplicação que vai ler vários digitos que vai perguntar se ele deve continuar rodando # as opções devem ser 1 continua rodando o para de funcionar # No final ele deve informar quantas vezes ele rodou até parar | 1.990683 | 2 |
timewarp/util.py | tobi-wan-kenobi/timewarp | 5 | 6615713 |
class Event(object):
def __init__(self, msg):
self._msg = msg
def __str__(self):
return self._msg
callbacks = []
def emit(event):
if isinstance(event, str):
event = Event(event)
for cb in callbacks:
cb(event)
def register(cb):
callbacks.append(cb)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
class Event(object):
def __init__(self, msg):
self._msg = msg
def __str__(self):
return self._msg
callbacks = []
def emit(event):
if isinstance(event, str):
event = Event(event)
for cb in callbacks:
cb(event)
def register(cb):
callbacks.append(cb)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| de | 0.26704 | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 | 2.698743 | 3 |
scons-local-1.1.0/SCons/Tool/tex.py | frew/simpleproto | 0 | 6615714 | <gh_stars>0
"""SCons.Tool.tex
Tool-specific initialization for TeX.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/tex.py 3603 2008/10/10 05:46:45 scons"
import os.path
import re
import string
import shutil
import SCons.Action
import SCons.Node
import SCons.Node.FS
import SCons.Util
Verbose = False
must_rerun_latex = True
# these are files that just need to be checked for changes and then rerun latex
check_suffixes = ['.toc', '.lof', '.lot', '.out', '.nav', '.snm']
# these are files that require bibtex or makeindex to be run when they change
all_suffixes = check_suffixes + ['.bbl', '.idx', '.nlo', '.glo']
#
# regular expressions used to search for Latex features
# or outputs that require rerunning latex
#
# search for all .aux files opened by latex (recorded in the .log file)
openout_aux_re = re.compile(r"\\openout.*`(.*\.aux)'")
#printindex_re = re.compile(r"^[^%]*\\printindex", re.MULTILINE)
#printnomenclature_re = re.compile(r"^[^%]*\\printnomenclature", re.MULTILINE)
#printglossary_re = re.compile(r"^[^%]*\\printglossary", re.MULTILINE)
# search to find rerun warnings
warning_rerun_str = '(^LaTeX Warning:.*Rerun)|(^Package \w+ Warning:.*Rerun)'
warning_rerun_re = re.compile(warning_rerun_str, re.MULTILINE)
# search to find citation rerun warnings
rerun_citations_str = "^LaTeX Warning:.*\n.*Rerun to get citations correct"
rerun_citations_re = re.compile(rerun_citations_str, re.MULTILINE)
# search to find undefined references or citations warnings
undefined_references_str = '(^LaTeX Warning:.*undefined references)|(^Package \w+ Warning:.*undefined citations)'
undefined_references_re = re.compile(undefined_references_str, re.MULTILINE)
# used by the emitter
auxfile_re = re.compile(r".", re.MULTILINE)
tableofcontents_re = re.compile(r"^[^%]*\\tableofcontents", re.MULTILINE)
makeindex_re = re.compile(r"^[^%]*\\makeindex", re.MULTILINE)
bibliography_re = re.compile(r"^[^%]*\\bibliography", re.MULTILINE)
listoffigures_re = re.compile(r"^[^%]*\\listoffigures", re.MULTILINE)
listoftables_re = re.compile(r"^[^%]*\\listoftables", re.MULTILINE)
hyperref_re = re.compile(r"^[^%]*\\usepackage.*\{hyperref\}", re.MULTILINE)
makenomenclature_re = re.compile(r"^[^%]*\\makenomenclature", re.MULTILINE)
makeglossary_re = re.compile(r"^[^%]*\\makeglossary", re.MULTILINE)
beamer_re = re.compile(r"^[^%]*\\documentclass\{beamer\}", re.MULTILINE)
# search to find all files opened by Latex (recorded in .log file)
openout_re = re.compile(r"\\openout.*`(.*)'")
# An Action sufficient to build any generic tex file.
TeXAction = None
# An action to build a latex file. This action might be needed more
# than once if we are dealing with labels and bibtex.
LaTeXAction = None
# An action to run BibTeX on a file.
BibTeXAction = None
# An action to run MakeIndex on a file.
MakeIndexAction = None
# An action to run MakeIndex (for nomencl) on a file.
MakeNclAction = None
# An action to run MakeIndex (for glossary) on a file.
MakeGlossaryAction = None
# Used as a return value of modify_env_var if the variable is not set.
class _Null:
pass
_null = _Null
# The user specifies the paths in env[variable], similar to other builders.
# They may be relative and must be converted to absolute, as expected
# by LaTeX and Co. The environment may already have some paths in
# env['ENV'][var]. These paths are honored, but the env[var] paths have
# higher precedence. All changes are un-done on exit.
def modify_env_var(env, var, abspath):
try:
save = env['ENV'][var]
except KeyError:
save = _null
env.PrependENVPath(var, abspath)
try:
if SCons.Util.is_List(env[var]):
#TODO(1.5) env.PrependENVPath(var, [os.path.abspath(str(p)) for p in env[var]])
env.PrependENVPath(var, map(lambda p: os.path.abspath(str(p)), env[var]))
else:
# Split at os.pathsep to convert into absolute path
#TODO(1.5) env.PrependENVPath(var, [os.path.abspath(p) for p in str(env[var]).split(os.pathsep)])
env.PrependENVPath(var, map(lambda p: os.path.abspath(p), str(env[var]).split(os.pathsep)))
except KeyError:
pass
# Convert into a string explicitly to append ":" (without which it won't search system
# paths as well). The problem is that env.AppendENVPath(var, ":")
# does not work, refuses to append ":" (os.pathsep).
if SCons.Util.is_List(env['ENV'][var]):
env['ENV'][var] = os.pathsep.join(env['ENV'][var])
# Append the trailing os.pathsep character here to catch the case with no env[var]
env['ENV'][var] = env['ENV'][var] + os.pathsep
return save
def InternalLaTeXAuxAction(XXXLaTeXAction, target = None, source= None, env=None):
"""A builder for LaTeX files that checks the output in the aux file
and decides how many times to use LaTeXAction, and BibTeXAction."""
global must_rerun_latex
# This routine is called with two actions. In this file for DVI builds
# with LaTeXAction and from the pdflatex.py with PDFLaTeXAction
# set this up now for the case where the user requests a different extension
# for the target filename
if (XXXLaTeXAction == LaTeXAction):
callerSuffix = ".dvi"
else:
callerSuffix = env['PDFSUFFIX']
basename = SCons.Util.splitext(str(source[0]))[0]
basedir = os.path.split(str(source[0]))[0]
basefile = os.path.split(str(basename))[1]
abspath = os.path.abspath(basedir)
targetext = os.path.splitext(str(target[0]))[1]
targetdir = os.path.split(str(target[0]))[0]
saved_env = {}
for var in SCons.Scanner.LaTeX.LaTeX.env_variables:
saved_env[var] = modify_env_var(env, var, abspath)
# Create a base file names with the target directory since the auxiliary files
# will be made there. That's because the *COM variables have the cd
# command in the prolog. We check
# for the existence of files before opening them--even ones like the
# aux file that TeX always creates--to make it possible to write tests
# with stubs that don't necessarily generate all of the same files.
targetbase = os.path.join(targetdir, basefile)
# if there is a \makeindex there will be a .idx and thus
# we have to run makeindex at least once to keep the build
# happy even if there is no index.
# Same for glossaries and nomenclature
src_content = source[0].get_contents()
run_makeindex = makeindex_re.search(src_content) and not os.path.exists(targetbase + '.idx')
run_nomenclature = makenomenclature_re.search(src_content) and not os.path.exists(targetbase + '.nlo')
run_glossary = makeglossary_re.search(src_content) and not os.path.exists(targetbase + '.glo')
saved_hashes = {}
suffix_nodes = {}
for suffix in all_suffixes:
theNode = env.fs.File(targetbase + suffix)
suffix_nodes[suffix] = theNode
saved_hashes[suffix] = theNode.get_csig()
if Verbose:
print "hashes: ",saved_hashes
must_rerun_latex = True
#
# routine to update MD5 hash and compare
#
def check_MD5(filenode, suffix, saved_hashes=saved_hashes):
global must_rerun_latex
# two calls to clear old csig
filenode.clear_memoized_values()
filenode.ninfo = filenode.new_ninfo()
new_md5 = filenode.get_csig()
if saved_hashes[suffix] == new_md5:
if Verbose:
print "file %s not changed" % (targetbase+suffix)
return False # unchanged
saved_hashes[suffix] = new_md5
must_rerun_latex = True
if Verbose:
print "file %s changed, rerunning Latex, new hash = " % (targetbase+suffix), new_md5
return True # changed
# generate the file name that latex will generate
resultfilename = targetbase + callerSuffix
count = 0
while (must_rerun_latex and count < int(env.subst('$LATEXRETRIES'))) :
result = XXXLaTeXAction(target, source, env)
if result != 0:
return result
count = count + 1
must_rerun_latex = False
# Decide if various things need to be run, or run again.
# Read the log file to find all .aux files
logfilename = targetbase + '.log'
logContent = ''
auxfiles = []
if os.path.exists(logfilename):
logContent = open(logfilename, "rb").read()
auxfiles = openout_aux_re.findall(logContent)
# Now decide if bibtex will need to be run.
# The information that bibtex reads from the .aux file is
# pass-independent. If we find (below) that the .bbl file is unchanged,
# then the last latex saw a correct bibliography.
# Therefore only do this on the first pass
if count == 1:
for auxfilename in auxfiles:
target_aux = os.path.join(targetdir, auxfilename)
if os.path.exists(target_aux):
content = open(target_aux, "rb").read()
if string.find(content, "bibdata") != -1:
if Verbose:
print "Need to run bibtex"
bibfile = env.fs.File(targetbase)
result = BibTeXAction(bibfile, bibfile, env)
if result != 0:
return result
must_rerun_latex = check_MD5(suffix_nodes['.bbl'],'.bbl')
break
# Now decide if latex will need to be run again due to index.
if check_MD5(suffix_nodes['.idx'],'.idx') or (count == 1 and run_makeindex):
# We must run makeindex
if Verbose:
print "Need to run makeindex"
idxfile = suffix_nodes['.idx']
result = MakeIndexAction(idxfile, idxfile, env)
if result != 0:
return result
# TO-DO: need to add a way for the user to extend this list for whatever
# auxiliary files they create in other (or their own) packages
# Harder is case is where an action needs to be called -- that should be rare (I hope?)
for index in check_suffixes:
check_MD5(suffix_nodes[index],index)
# Now decide if latex will need to be run again due to nomenclature.
if check_MD5(suffix_nodes['.nlo'],'.nlo') or (count == 1 and run_nomenclature):
# We must run makeindex
if Verbose:
print "Need to run makeindex for nomenclature"
nclfile = suffix_nodes['.nlo']
result = MakeNclAction(nclfile, nclfile, env)
if result != 0:
return result
# Now decide if latex will need to be run again due to glossary.
if check_MD5(suffix_nodes['.glo'],'.glo') or (count == 1 and run_glossary):
# We must run makeindex
if Verbose:
print "Need to run makeindex for glossary"
glofile = suffix_nodes['.glo']
result = MakeGlossaryAction(glofile, glofile, env)
if result != 0:
return result
# Now decide if latex needs to be run yet again to resolve warnings.
if warning_rerun_re.search(logContent):
must_rerun_latex = True
if Verbose:
print "rerun Latex due to latex or package rerun warning"
if rerun_citations_re.search(logContent):
must_rerun_latex = True
if Verbose:
print "rerun Latex due to 'Rerun to get citations correct' warning"
if undefined_references_re.search(logContent):
must_rerun_latex = True
if Verbose:
print "rerun Latex due to undefined references or citations"
if (count >= int(env.subst('$LATEXRETRIES')) and must_rerun_latex):
print "reached max number of retries on Latex ,",int(env.subst('$LATEXRETRIES'))
# end of while loop
# rename Latex's output to what the target name is
if not (str(target[0]) == resultfilename and os.path.exists(resultfilename)):
if os.path.exists(resultfilename):
print "move %s to %s" % (resultfilename, str(target[0]), )
shutil.move(resultfilename,str(target[0]))
# Original comment (when TEXPICTS was not restored):
# The TEXPICTS enviroment variable is needed by a dvi -> pdf step
# later on Mac OSX so leave it
#
# It is also used when searching for pictures (implicit dependencies).
# Why not set the variable again in the respective builder instead
# of leaving local modifications in the environment? What if multiple
# latex builds in different directories need different TEXPICTS?
for var in SCons.Scanner.LaTeX.LaTeX.env_variables:
if var == 'TEXPICTS':
continue
if saved_env[var] is _null:
try:
del env['ENV'][var]
except KeyError:
pass # was never set
else:
env['ENV'][var] = saved_env[var]
return result
def LaTeXAuxAction(target = None, source= None, env=None):
result = InternalLaTeXAuxAction( LaTeXAction, target, source, env )
return result
LaTeX_re = re.compile("\\\\document(style|class)")
def is_LaTeX(flist):
# Scan a file list to decide if it's TeX- or LaTeX-flavored.
for f in flist:
content = f.get_contents()
if LaTeX_re.search(content):
return 1
return 0
def TeXLaTeXFunction(target = None, source= None, env=None):
"""A builder for TeX and LaTeX that scans the source file to
decide the "flavor" of the source and then executes the appropriate
program."""
if is_LaTeX(source):
result = LaTeXAuxAction(target,source,env)
else:
result = TeXAction(target,source,env)
return result
def TeXLaTeXStrFunction(target = None, source= None, env=None):
"""A strfunction for TeX and LaTeX that scans the source file to
decide the "flavor" of the source and then returns the appropriate
command string."""
if env.GetOption("no_exec"):
if is_LaTeX(source):
result = env.subst('$LATEXCOM',0,target,source)+" ..."
else:
result = env.subst("$TEXCOM",0,target,source)+" ..."
else:
result = ''
return result
def tex_emitter(target, source, env):
"""An emitter for TeX and LaTeX sources.
For LaTeX sources we try and find the common created files that
are needed on subsequent runs of latex to finish tables of contents,
bibliographies, indices, lists of figures, and hyperlink references.
"""
targetbase = SCons.Util.splitext(str(target[0]))[0]
basename = SCons.Util.splitext(str(source[0]))[0]
basefile = os.path.split(str(basename))[1]
basedir = os.path.split(str(source[0]))[0]
abspath = os.path.abspath(basedir)
target[0].attributes.path = abspath
#
# file names we will make use of in searching the sources and log file
#
emit_suffixes = ['.aux', '.log', '.ilg', '.blg', '.nls', '.nlg', '.gls', '.glg'] + all_suffixes
auxfilename = targetbase + '.aux'
logfilename = targetbase + '.log'
env.SideEffect(auxfilename,target[0])
env.SideEffect(logfilename,target[0])
env.Clean(target[0],auxfilename)
env.Clean(target[0],logfilename)
content = source[0].get_contents()
idx_exists = os.path.exists(targetbase + '.idx')
nlo_exists = os.path.exists(targetbase + '.nlo')
glo_exists = os.path.exists(targetbase + '.glo')
file_tests = [(auxfile_re.search(content),['.aux']),
(makeindex_re.search(content) or idx_exists,['.idx', '.ind', '.ilg']),
(bibliography_re.search(content),['.bbl', '.blg']),
(tableofcontents_re.search(content),['.toc']),
(listoffigures_re.search(content),['.lof']),
(listoftables_re.search(content),['.lot']),
(hyperref_re.search(content),['.out']),
(makenomenclature_re.search(content) or nlo_exists,['.nlo', '.nls', '.nlg']),
(makeglossary_re.search(content) or glo_exists,['.glo', '.gls', '.glg']),
(beamer_re.search(content),['.nav', '.snm', '.out', '.toc']) ]
# Note we add the various makeindex files if the file produced by latex exists (.idx, .glo, .nlo)
# This covers the case where the \makeindex, \makenomenclature, or \makeglossary
# is not in the main file but we want to clean the files and those made by makeindex
# TO-DO: need to add a way for the user to extend this list for whatever
# auxiliary files they create in other (or their own) packages
for (theSearch,suffix_list) in file_tests:
if theSearch:
for suffix in suffix_list:
env.SideEffect(targetbase + suffix,target[0])
env.Clean(target[0],targetbase + suffix)
# read log file to get all other files that latex creates and will read on the next pass
if os.path.exists(logfilename):
content = open(logfilename, "rb").read()
out_files = openout_re.findall(content)
env.SideEffect(out_files,target[0])
env.Clean(target[0],out_files)
return (target, source)
TeXLaTeXAction = None
def generate(env):
"""Add Builders and construction variables for TeX to an Environment."""
# A generic tex file Action, sufficient for all tex files.
global TeXAction
if TeXAction is None:
TeXAction = SCons.Action.Action("$TEXCOM", "$TEXCOMSTR")
# An Action to build a latex file. This might be needed more
# than once if we are dealing with labels and bibtex.
global LaTeXAction
if LaTeXAction is None:
LaTeXAction = SCons.Action.Action("$LATEXCOM", "$LATEXCOMSTR")
# Define an action to run BibTeX on a file.
global BibTeXAction
if BibTeXAction is None:
BibTeXAction = SCons.Action.Action("$BIBTEXCOM", "$BIBTEXCOMSTR")
# Define an action to run MakeIndex on a file.
global MakeIndexAction
if MakeIndexAction is None:
MakeIndexAction = SCons.Action.Action("$MAKEINDEXCOM", "$MAKEINDEXCOMSTR")
# Define an action to run MakeIndex on a file for nomenclatures.
global MakeNclAction
if MakeNclAction is None:
MakeNclAction = SCons.Action.Action("$MAKENCLCOM", "$MAKENCLCOMSTR")
# Define an action to run MakeIndex on a file for glossaries.
global MakeGlossaryAction
if MakeGlossaryAction is None:
MakeGlossaryAction = SCons.Action.Action("$MAKEGLOSSARYCOM", "$MAKEGLOSSARYCOMSTR")
global TeXLaTeXAction
if TeXLaTeXAction is None:
TeXLaTeXAction = SCons.Action.Action(TeXLaTeXFunction,
strfunction=TeXLaTeXStrFunction)
import dvi
dvi.generate(env)
bld = env['BUILDERS']['DVI']
bld.add_action('.tex', TeXLaTeXAction)
bld.add_emitter('.tex', tex_emitter)
env['TEX'] = 'tex'
env['TEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode')
env['TEXCOM'] = 'cd ${TARGET.dir} && $TEX $TEXFLAGS ${SOURCE.file}'
# Duplicate from latex.py. If latex.py goes away, then this is still OK.
env['LATEX'] = 'latex'
env['LATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode')
env['LATEXCOM'] = 'cd ${TARGET.dir} && $LATEX $LATEXFLAGS ${SOURCE.file}'
env['LATEXRETRIES'] = 3
env['BIBTEX'] = 'bibtex'
env['BIBTEXFLAGS'] = SCons.Util.CLVar('')
env['BIBTEXCOM'] = 'cd ${TARGET.dir} && $BIBTEX $BIBTEXFLAGS ${SOURCE.filebase}'
env['MAKEINDEX'] = 'makeindex'
env['MAKEINDEXFLAGS'] = SCons.Util.CLVar('')
env['MAKEINDEXCOM'] = 'cd ${TARGET.dir} && $MAKEINDEX $MAKEINDEXFLAGS ${SOURCE.file}'
env['MAKEGLOSSARY'] = 'makeindex'
env['MAKEGLOSSARYSTYLE'] = '${SOURCE.filebase}.ist'
env['MAKEGLOSSARYFLAGS'] = SCons.Util.CLVar('-s ${MAKEGLOSSARYSTYLE} -t ${SOURCE.filebase}.glg')
env['MAKEGLOSSARYCOM'] = 'cd ${TARGET.dir} && $MAKEGLOSSARY ${SOURCE.filebase}.glo $MAKEGLOSSARYFLAGS -o ${SOURCE.filebase}.gls'
env['MAKENCL'] = 'makeindex'
env['MAKENCLSTYLE'] = '$nomencl.ist'
env['MAKENCLFLAGS'] = '-s ${MAKENCLSTYLE} -t ${SOURCE.filebase}.nlg'
env['MAKENCLCOM'] = 'cd ${TARGET.dir} && $MAKENCL ${SOURCE.filebase}.nlo $MAKENCLFLAGS -o ${SOURCE.filebase}.nls'
# Duplicate from pdflatex.py. If latex.py goes away, then this is still OK.
env['PDFLATEX'] = 'pdflatex'
env['PDFLATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode')
env['PDFLATEXCOM'] = 'cd ${TARGET.dir} && $PDFLATEX $PDFLATEXFLAGS ${SOURCE.file}'
def exists(env):
return env.Detect('tex')
| """SCons.Tool.tex
Tool-specific initialization for TeX.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/tex.py 3603 2008/10/10 05:46:45 scons"
import os.path
import re
import string
import shutil
import SCons.Action
import SCons.Node
import SCons.Node.FS
import SCons.Util
Verbose = False
must_rerun_latex = True
# these are files that just need to be checked for changes and then rerun latex
check_suffixes = ['.toc', '.lof', '.lot', '.out', '.nav', '.snm']
# these are files that require bibtex or makeindex to be run when they change
all_suffixes = check_suffixes + ['.bbl', '.idx', '.nlo', '.glo']
#
# regular expressions used to search for Latex features
# or outputs that require rerunning latex
#
# search for all .aux files opened by latex (recorded in the .log file)
openout_aux_re = re.compile(r"\\openout.*`(.*\.aux)'")
#printindex_re = re.compile(r"^[^%]*\\printindex", re.MULTILINE)
#printnomenclature_re = re.compile(r"^[^%]*\\printnomenclature", re.MULTILINE)
#printglossary_re = re.compile(r"^[^%]*\\printglossary", re.MULTILINE)
# search to find rerun warnings
warning_rerun_str = '(^LaTeX Warning:.*Rerun)|(^Package \w+ Warning:.*Rerun)'
warning_rerun_re = re.compile(warning_rerun_str, re.MULTILINE)
# search to find citation rerun warnings
rerun_citations_str = "^LaTeX Warning:.*\n.*Rerun to get citations correct"
rerun_citations_re = re.compile(rerun_citations_str, re.MULTILINE)
# search to find undefined references or citations warnings
undefined_references_str = '(^LaTeX Warning:.*undefined references)|(^Package \w+ Warning:.*undefined citations)'
undefined_references_re = re.compile(undefined_references_str, re.MULTILINE)
# used by the emitter
auxfile_re = re.compile(r".", re.MULTILINE)
tableofcontents_re = re.compile(r"^[^%]*\\tableofcontents", re.MULTILINE)
makeindex_re = re.compile(r"^[^%]*\\makeindex", re.MULTILINE)
bibliography_re = re.compile(r"^[^%]*\\bibliography", re.MULTILINE)
listoffigures_re = re.compile(r"^[^%]*\\listoffigures", re.MULTILINE)
listoftables_re = re.compile(r"^[^%]*\\listoftables", re.MULTILINE)
hyperref_re = re.compile(r"^[^%]*\\usepackage.*\{hyperref\}", re.MULTILINE)
makenomenclature_re = re.compile(r"^[^%]*\\makenomenclature", re.MULTILINE)
makeglossary_re = re.compile(r"^[^%]*\\makeglossary", re.MULTILINE)
beamer_re = re.compile(r"^[^%]*\\documentclass\{beamer\}", re.MULTILINE)
# search to find all files opened by Latex (recorded in .log file)
openout_re = re.compile(r"\\openout.*`(.*)'")
# An Action sufficient to build any generic tex file.
TeXAction = None
# An action to build a latex file. This action might be needed more
# than once if we are dealing with labels and bibtex.
LaTeXAction = None
# An action to run BibTeX on a file.
BibTeXAction = None
# An action to run MakeIndex on a file.
MakeIndexAction = None
# An action to run MakeIndex (for nomencl) on a file.
MakeNclAction = None
# An action to run MakeIndex (for glossary) on a file.
MakeGlossaryAction = None
# Used as a return value of modify_env_var if the variable is not set.
class _Null:
pass
_null = _Null
# The user specifies the paths in env[variable], similar to other builders.
# They may be relative and must be converted to absolute, as expected
# by LaTeX and Co. The environment may already have some paths in
# env['ENV'][var]. These paths are honored, but the env[var] paths have
# higher precedence. All changes are un-done on exit.
def modify_env_var(env, var, abspath):
try:
save = env['ENV'][var]
except KeyError:
save = _null
env.PrependENVPath(var, abspath)
try:
if SCons.Util.is_List(env[var]):
#TODO(1.5) env.PrependENVPath(var, [os.path.abspath(str(p)) for p in env[var]])
env.PrependENVPath(var, map(lambda p: os.path.abspath(str(p)), env[var]))
else:
# Split at os.pathsep to convert into absolute path
#TODO(1.5) env.PrependENVPath(var, [os.path.abspath(p) for p in str(env[var]).split(os.pathsep)])
env.PrependENVPath(var, map(lambda p: os.path.abspath(p), str(env[var]).split(os.pathsep)))
except KeyError:
pass
# Convert into a string explicitly to append ":" (without which it won't search system
# paths as well). The problem is that env.AppendENVPath(var, ":")
# does not work, refuses to append ":" (os.pathsep).
if SCons.Util.is_List(env['ENV'][var]):
env['ENV'][var] = os.pathsep.join(env['ENV'][var])
# Append the trailing os.pathsep character here to catch the case with no env[var]
env['ENV'][var] = env['ENV'][var] + os.pathsep
return save
def InternalLaTeXAuxAction(XXXLaTeXAction, target = None, source= None, env=None):
"""A builder for LaTeX files that checks the output in the aux file
and decides how many times to use LaTeXAction, and BibTeXAction."""
global must_rerun_latex
# This routine is called with two actions. In this file for DVI builds
# with LaTeXAction and from the pdflatex.py with PDFLaTeXAction
# set this up now for the case where the user requests a different extension
# for the target filename
if (XXXLaTeXAction == LaTeXAction):
callerSuffix = ".dvi"
else:
callerSuffix = env['PDFSUFFIX']
basename = SCons.Util.splitext(str(source[0]))[0]
basedir = os.path.split(str(source[0]))[0]
basefile = os.path.split(str(basename))[1]
abspath = os.path.abspath(basedir)
targetext = os.path.splitext(str(target[0]))[1]
targetdir = os.path.split(str(target[0]))[0]
saved_env = {}
for var in SCons.Scanner.LaTeX.LaTeX.env_variables:
saved_env[var] = modify_env_var(env, var, abspath)
# Create a base file names with the target directory since the auxiliary files
# will be made there. That's because the *COM variables have the cd
# command in the prolog. We check
# for the existence of files before opening them--even ones like the
# aux file that TeX always creates--to make it possible to write tests
# with stubs that don't necessarily generate all of the same files.
targetbase = os.path.join(targetdir, basefile)
# if there is a \makeindex there will be a .idx and thus
# we have to run makeindex at least once to keep the build
# happy even if there is no index.
# Same for glossaries and nomenclature
src_content = source[0].get_contents()
run_makeindex = makeindex_re.search(src_content) and not os.path.exists(targetbase + '.idx')
run_nomenclature = makenomenclature_re.search(src_content) and not os.path.exists(targetbase + '.nlo')
run_glossary = makeglossary_re.search(src_content) and not os.path.exists(targetbase + '.glo')
saved_hashes = {}
suffix_nodes = {}
for suffix in all_suffixes:
theNode = env.fs.File(targetbase + suffix)
suffix_nodes[suffix] = theNode
saved_hashes[suffix] = theNode.get_csig()
if Verbose:
print "hashes: ",saved_hashes
must_rerun_latex = True
#
# routine to update MD5 hash and compare
#
def check_MD5(filenode, suffix, saved_hashes=saved_hashes):
global must_rerun_latex
# two calls to clear old csig
filenode.clear_memoized_values()
filenode.ninfo = filenode.new_ninfo()
new_md5 = filenode.get_csig()
if saved_hashes[suffix] == new_md5:
if Verbose:
print "file %s not changed" % (targetbase+suffix)
return False # unchanged
saved_hashes[suffix] = new_md5
must_rerun_latex = True
if Verbose:
print "file %s changed, rerunning Latex, new hash = " % (targetbase+suffix), new_md5
return True # changed
# generate the file name that latex will generate
resultfilename = targetbase + callerSuffix
count = 0
while (must_rerun_latex and count < int(env.subst('$LATEXRETRIES'))) :
result = XXXLaTeXAction(target, source, env)
if result != 0:
return result
count = count + 1
must_rerun_latex = False
# Decide if various things need to be run, or run again.
# Read the log file to find all .aux files
logfilename = targetbase + '.log'
logContent = ''
auxfiles = []
if os.path.exists(logfilename):
logContent = open(logfilename, "rb").read()
auxfiles = openout_aux_re.findall(logContent)
# Now decide if bibtex will need to be run.
# The information that bibtex reads from the .aux file is
# pass-independent. If we find (below) that the .bbl file is unchanged,
# then the last latex saw a correct bibliography.
# Therefore only do this on the first pass
if count == 1:
for auxfilename in auxfiles:
target_aux = os.path.join(targetdir, auxfilename)
if os.path.exists(target_aux):
content = open(target_aux, "rb").read()
if string.find(content, "bibdata") != -1:
if Verbose:
print "Need to run bibtex"
bibfile = env.fs.File(targetbase)
result = BibTeXAction(bibfile, bibfile, env)
if result != 0:
return result
must_rerun_latex = check_MD5(suffix_nodes['.bbl'],'.bbl')
break
# Now decide if latex will need to be run again due to index.
if check_MD5(suffix_nodes['.idx'],'.idx') or (count == 1 and run_makeindex):
# We must run makeindex
if Verbose:
print "Need to run makeindex"
idxfile = suffix_nodes['.idx']
result = MakeIndexAction(idxfile, idxfile, env)
if result != 0:
return result
# TO-DO: need to add a way for the user to extend this list for whatever
# auxiliary files they create in other (or their own) packages
# Harder is case is where an action needs to be called -- that should be rare (I hope?)
for index in check_suffixes:
check_MD5(suffix_nodes[index],index)
# Now decide if latex will need to be run again due to nomenclature.
if check_MD5(suffix_nodes['.nlo'],'.nlo') or (count == 1 and run_nomenclature):
# We must run makeindex
if Verbose:
print "Need to run makeindex for nomenclature"
nclfile = suffix_nodes['.nlo']
result = MakeNclAction(nclfile, nclfile, env)
if result != 0:
return result
# Now decide if latex will need to be run again due to glossary.
if check_MD5(suffix_nodes['.glo'],'.glo') or (count == 1 and run_glossary):
# We must run makeindex
if Verbose:
print "Need to run makeindex for glossary"
glofile = suffix_nodes['.glo']
result = MakeGlossaryAction(glofile, glofile, env)
if result != 0:
return result
# Now decide if latex needs to be run yet again to resolve warnings.
if warning_rerun_re.search(logContent):
must_rerun_latex = True
if Verbose:
print "rerun Latex due to latex or package rerun warning"
if rerun_citations_re.search(logContent):
must_rerun_latex = True
if Verbose:
print "rerun Latex due to 'Rerun to get citations correct' warning"
if undefined_references_re.search(logContent):
must_rerun_latex = True
if Verbose:
print "rerun Latex due to undefined references or citations"
if (count >= int(env.subst('$LATEXRETRIES')) and must_rerun_latex):
print "reached max number of retries on Latex ,",int(env.subst('$LATEXRETRIES'))
# end of while loop
# rename Latex's output to what the target name is
if not (str(target[0]) == resultfilename and os.path.exists(resultfilename)):
if os.path.exists(resultfilename):
print "move %s to %s" % (resultfilename, str(target[0]), )
shutil.move(resultfilename,str(target[0]))
# Original comment (when TEXPICTS was not restored):
# The TEXPICTS enviroment variable is needed by a dvi -> pdf step
# later on Mac OSX so leave it
#
# It is also used when searching for pictures (implicit dependencies).
# Why not set the variable again in the respective builder instead
# of leaving local modifications in the environment? What if multiple
# latex builds in different directories need different TEXPICTS?
for var in SCons.Scanner.LaTeX.LaTeX.env_variables:
if var == 'TEXPICTS':
continue
if saved_env[var] is _null:
try:
del env['ENV'][var]
except KeyError:
pass # was never set
else:
env['ENV'][var] = saved_env[var]
return result
def LaTeXAuxAction(target = None, source= None, env=None):
result = InternalLaTeXAuxAction( LaTeXAction, target, source, env )
return result
LaTeX_re = re.compile("\\\\document(style|class)")
def is_LaTeX(flist):
# Scan a file list to decide if it's TeX- or LaTeX-flavored.
for f in flist:
content = f.get_contents()
if LaTeX_re.search(content):
return 1
return 0
def TeXLaTeXFunction(target = None, source= None, env=None):
"""A builder for TeX and LaTeX that scans the source file to
decide the "flavor" of the source and then executes the appropriate
program."""
if is_LaTeX(source):
result = LaTeXAuxAction(target,source,env)
else:
result = TeXAction(target,source,env)
return result
def TeXLaTeXStrFunction(target = None, source= None, env=None):
"""A strfunction for TeX and LaTeX that scans the source file to
decide the "flavor" of the source and then returns the appropriate
command string."""
if env.GetOption("no_exec"):
if is_LaTeX(source):
result = env.subst('$LATEXCOM',0,target,source)+" ..."
else:
result = env.subst("$TEXCOM",0,target,source)+" ..."
else:
result = ''
return result
def tex_emitter(target, source, env):
"""An emitter for TeX and LaTeX sources.
For LaTeX sources we try and find the common created files that
are needed on subsequent runs of latex to finish tables of contents,
bibliographies, indices, lists of figures, and hyperlink references.
"""
targetbase = SCons.Util.splitext(str(target[0]))[0]
basename = SCons.Util.splitext(str(source[0]))[0]
basefile = os.path.split(str(basename))[1]
basedir = os.path.split(str(source[0]))[0]
abspath = os.path.abspath(basedir)
target[0].attributes.path = abspath
#
# file names we will make use of in searching the sources and log file
#
emit_suffixes = ['.aux', '.log', '.ilg', '.blg', '.nls', '.nlg', '.gls', '.glg'] + all_suffixes
auxfilename = targetbase + '.aux'
logfilename = targetbase + '.log'
env.SideEffect(auxfilename,target[0])
env.SideEffect(logfilename,target[0])
env.Clean(target[0],auxfilename)
env.Clean(target[0],logfilename)
content = source[0].get_contents()
idx_exists = os.path.exists(targetbase + '.idx')
nlo_exists = os.path.exists(targetbase + '.nlo')
glo_exists = os.path.exists(targetbase + '.glo')
file_tests = [(auxfile_re.search(content),['.aux']),
(makeindex_re.search(content) or idx_exists,['.idx', '.ind', '.ilg']),
(bibliography_re.search(content),['.bbl', '.blg']),
(tableofcontents_re.search(content),['.toc']),
(listoffigures_re.search(content),['.lof']),
(listoftables_re.search(content),['.lot']),
(hyperref_re.search(content),['.out']),
(makenomenclature_re.search(content) or nlo_exists,['.nlo', '.nls', '.nlg']),
(makeglossary_re.search(content) or glo_exists,['.glo', '.gls', '.glg']),
(beamer_re.search(content),['.nav', '.snm', '.out', '.toc']) ]
# Note we add the various makeindex files if the file produced by latex exists (.idx, .glo, .nlo)
# This covers the case where the \makeindex, \makenomenclature, or \makeglossary
# is not in the main file but we want to clean the files and those made by makeindex
# TO-DO: need to add a way for the user to extend this list for whatever
# auxiliary files they create in other (or their own) packages
for (theSearch,suffix_list) in file_tests:
if theSearch:
for suffix in suffix_list:
env.SideEffect(targetbase + suffix,target[0])
env.Clean(target[0],targetbase + suffix)
# read log file to get all other files that latex creates and will read on the next pass
if os.path.exists(logfilename):
content = open(logfilename, "rb").read()
out_files = openout_re.findall(content)
env.SideEffect(out_files,target[0])
env.Clean(target[0],out_files)
return (target, source)
TeXLaTeXAction = None
def generate(env):
"""Add Builders and construction variables for TeX to an Environment."""
# A generic tex file Action, sufficient for all tex files.
global TeXAction
if TeXAction is None:
TeXAction = SCons.Action.Action("$TEXCOM", "$TEXCOMSTR")
# An Action to build a latex file. This might be needed more
# than once if we are dealing with labels and bibtex.
global LaTeXAction
if LaTeXAction is None:
LaTeXAction = SCons.Action.Action("$LATEXCOM", "$LATEXCOMSTR")
# Define an action to run BibTeX on a file.
global BibTeXAction
if BibTeXAction is None:
BibTeXAction = SCons.Action.Action("$BIBTEXCOM", "$BIBTEXCOMSTR")
# Define an action to run MakeIndex on a file.
global MakeIndexAction
if MakeIndexAction is None:
MakeIndexAction = SCons.Action.Action("$MAKEINDEXCOM", "$MAKEINDEXCOMSTR")
# Define an action to run MakeIndex on a file for nomenclatures.
global MakeNclAction
if MakeNclAction is None:
MakeNclAction = SCons.Action.Action("$MAKENCLCOM", "$MAKENCLCOMSTR")
# Define an action to run MakeIndex on a file for glossaries.
global MakeGlossaryAction
if MakeGlossaryAction is None:
MakeGlossaryAction = SCons.Action.Action("$MAKEGLOSSARYCOM", "$MAKEGLOSSARYCOMSTR")
global TeXLaTeXAction
if TeXLaTeXAction is None:
TeXLaTeXAction = SCons.Action.Action(TeXLaTeXFunction,
strfunction=TeXLaTeXStrFunction)
import dvi
dvi.generate(env)
bld = env['BUILDERS']['DVI']
bld.add_action('.tex', TeXLaTeXAction)
bld.add_emitter('.tex', tex_emitter)
env['TEX'] = 'tex'
env['TEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode')
env['TEXCOM'] = 'cd ${TARGET.dir} && $TEX $TEXFLAGS ${SOURCE.file}'
# Duplicate from latex.py. If latex.py goes away, then this is still OK.
env['LATEX'] = 'latex'
env['LATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode')
env['LATEXCOM'] = 'cd ${TARGET.dir} && $LATEX $LATEXFLAGS ${SOURCE.file}'
env['LATEXRETRIES'] = 3
env['BIBTEX'] = 'bibtex'
env['BIBTEXFLAGS'] = SCons.Util.CLVar('')
env['BIBTEXCOM'] = 'cd ${TARGET.dir} && $BIBTEX $BIBTEXFLAGS ${SOURCE.filebase}'
env['MAKEINDEX'] = 'makeindex'
env['MAKEINDEXFLAGS'] = SCons.Util.CLVar('')
env['MAKEINDEXCOM'] = 'cd ${TARGET.dir} && $MAKEINDEX $MAKEINDEXFLAGS ${SOURCE.file}'
env['MAKEGLOSSARY'] = 'makeindex'
env['MAKEGLOSSARYSTYLE'] = '${SOURCE.filebase}.ist'
env['MAKEGLOSSARYFLAGS'] = SCons.Util.CLVar('-s ${MAKEGLOSSARYSTYLE} -t ${SOURCE.filebase}.glg')
env['MAKEGLOSSARYCOM'] = 'cd ${TARGET.dir} && $MAKEGLOSSARY ${SOURCE.filebase}.glo $MAKEGLOSSARYFLAGS -o ${SOURCE.filebase}.gls'
env['MAKENCL'] = 'makeindex'
env['MAKENCLSTYLE'] = '$nomencl.ist'
env['MAKENCLFLAGS'] = '-s ${MAKENCLSTYLE} -t ${SOURCE.filebase}.nlg'
env['MAKENCLCOM'] = 'cd ${TARGET.dir} && $MAKENCL ${SOURCE.filebase}.nlo $MAKENCLFLAGS -o ${SOURCE.filebase}.nls'
# Duplicate from pdflatex.py. If latex.py goes away, then this is still OK.
env['PDFLATEX'] = 'pdflatex'
env['PDFLATEXFLAGS'] = SCons.Util.CLVar('-interaction=nonstopmode')
env['PDFLATEXCOM'] = 'cd ${TARGET.dir} && $PDFLATEX $PDFLATEXFLAGS ${SOURCE.file}'
def exists(env):
return env.Detect('tex') | en | 0.844202 | SCons.Tool.tex Tool-specific initialization for TeX. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # these are files that just need to be checked for changes and then rerun latex # these are files that require bibtex or makeindex to be run when they change # # regular expressions used to search for Latex features # or outputs that require rerunning latex # # search for all .aux files opened by latex (recorded in the .log file) #printindex_re = re.compile(r"^[^%]*\\printindex", re.MULTILINE) #printnomenclature_re = re.compile(r"^[^%]*\\printnomenclature", re.MULTILINE) #printglossary_re = re.compile(r"^[^%]*\\printglossary", re.MULTILINE) # search to find rerun warnings # search to find citation rerun warnings # search to find undefined references or citations warnings # used by the emitter # search to find all files opened by Latex (recorded in .log file) # An Action sufficient to build any generic tex file. # An action to build a latex file. This action might be needed more # than once if we are dealing with labels and bibtex. # An action to run BibTeX on a file. # An action to run MakeIndex on a file. # An action to run MakeIndex (for nomencl) on a file. # An action to run MakeIndex (for glossary) on a file. # Used as a return value of modify_env_var if the variable is not set. # The user specifies the paths in env[variable], similar to other builders. # They may be relative and must be converted to absolute, as expected # by LaTeX and Co. The environment may already have some paths in # env['ENV'][var]. These paths are honored, but the env[var] paths have # higher precedence. All changes are un-done on exit. #TODO(1.5) env.PrependENVPath(var, [os.path.abspath(str(p)) for p in env[var]]) # Split at os.pathsep to convert into absolute path #TODO(1.5) env.PrependENVPath(var, [os.path.abspath(p) for p in str(env[var]).split(os.pathsep)]) # Convert into a string explicitly to append ":" (without which it won't search system # paths as well). The problem is that env.AppendENVPath(var, ":") # does not work, refuses to append ":" (os.pathsep). # Append the trailing os.pathsep character here to catch the case with no env[var] A builder for LaTeX files that checks the output in the aux file and decides how many times to use LaTeXAction, and BibTeXAction. # This routine is called with two actions. In this file for DVI builds # with LaTeXAction and from the pdflatex.py with PDFLaTeXAction # set this up now for the case where the user requests a different extension # for the target filename # Create a base file names with the target directory since the auxiliary files # will be made there. That's because the *COM variables have the cd # command in the prolog. We check # for the existence of files before opening them--even ones like the # aux file that TeX always creates--to make it possible to write tests # with stubs that don't necessarily generate all of the same files. # if there is a \makeindex there will be a .idx and thus # we have to run makeindex at least once to keep the build # happy even if there is no index. # Same for glossaries and nomenclature # # routine to update MD5 hash and compare # # two calls to clear old csig # unchanged # changed # generate the file name that latex will generate # Decide if various things need to be run, or run again. # Read the log file to find all .aux files # Now decide if bibtex will need to be run. # The information that bibtex reads from the .aux file is # pass-independent. If we find (below) that the .bbl file is unchanged, # then the last latex saw a correct bibliography. # Therefore only do this on the first pass # Now decide if latex will need to be run again due to index. # We must run makeindex # TO-DO: need to add a way for the user to extend this list for whatever # auxiliary files they create in other (or their own) packages # Harder is case is where an action needs to be called -- that should be rare (I hope?) # Now decide if latex will need to be run again due to nomenclature. # We must run makeindex # Now decide if latex will need to be run again due to glossary. # We must run makeindex # Now decide if latex needs to be run yet again to resolve warnings. # end of while loop # rename Latex's output to what the target name is # Original comment (when TEXPICTS was not restored): # The TEXPICTS enviroment variable is needed by a dvi -> pdf step # later on Mac OSX so leave it # # It is also used when searching for pictures (implicit dependencies). # Why not set the variable again in the respective builder instead # of leaving local modifications in the environment? What if multiple # latex builds in different directories need different TEXPICTS? # was never set # Scan a file list to decide if it's TeX- or LaTeX-flavored. A builder for TeX and LaTeX that scans the source file to decide the "flavor" of the source and then executes the appropriate program. A strfunction for TeX and LaTeX that scans the source file to decide the "flavor" of the source and then returns the appropriate command string. An emitter for TeX and LaTeX sources. For LaTeX sources we try and find the common created files that are needed on subsequent runs of latex to finish tables of contents, bibliographies, indices, lists of figures, and hyperlink references. # # file names we will make use of in searching the sources and log file # # Note we add the various makeindex files if the file produced by latex exists (.idx, .glo, .nlo) # This covers the case where the \makeindex, \makenomenclature, or \makeglossary # is not in the main file but we want to clean the files and those made by makeindex # TO-DO: need to add a way for the user to extend this list for whatever # auxiliary files they create in other (or their own) packages # read log file to get all other files that latex creates and will read on the next pass Add Builders and construction variables for TeX to an Environment. # A generic tex file Action, sufficient for all tex files. # An Action to build a latex file. This might be needed more # than once if we are dealing with labels and bibtex. # Define an action to run BibTeX on a file. # Define an action to run MakeIndex on a file. # Define an action to run MakeIndex on a file for nomenclatures. # Define an action to run MakeIndex on a file for glossaries. # Duplicate from latex.py. If latex.py goes away, then this is still OK. # Duplicate from pdflatex.py. If latex.py goes away, then this is still OK. | 1.689106 | 2 |
moler/cmd/unix/env.py | jochenparm/moler | 57 | 6615715 | <gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Env command module.
"""
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
import re
from moler.cmd.unix.genericunix import GenericUnixCommand
from moler.exceptions import ParsingDone
class Env(GenericUnixCommand):
def __init__(self, connection, prompt=None, newline_chars=None, runner=None):
super(Env, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars, runner=runner)
self.ret_required = True
def build_command_string(self):
cmd = "env"
return cmd
def on_new_line(self, line, is_full_line):
if is_full_line:
try:
self._parse_name_line(line)
except ParsingDone:
pass
return super(Env, self).on_new_line(line, is_full_line)
_re_name_line = re.compile(r"^(?P<title>\S+)=(?P<content>.*)$")
def _parse_name_line(self, line):
if self._regex_helper.search_compiled(Env._re_name_line, line):
name = self._regex_helper.group("title")
self.current_ret[name] = self._regex_helper.group("content")
raise ParsingDone
COMMAND_OUTPUT = """
host:~# env
LESSKEY=/etc/lesskey.bin
NNTPSERVER=news
MANPATH=/usr/share/man:/usr/local/man:/usr/local/share/man
XDG_SESSION_ID=26352
HOSTNAME=FZM-TDD-249
XKEYSYMDB=/usr/X11R6/lib/X11/XKeysymDB
HOST=FZM-TDD-249
TERM=xterm-mono
SHELL=/bin/bash
PROFILEREAD=true
HISTSIZE=1000
SSH_CLIENT=10.83.200.37 40356 22
MORE=-sl
OLDPWD=/root
SSH_TTY=/dev/pts/3
NO_PROXY=localhost, 127.0.0.1, 192.168.0.0/16, 10.83.0.0/16, 10.254.0.0/16, 10.0.0.0/16
http_proxy=http://172.16.31.10:8080
JRE_HOME=/usr/lib64/jvm/jre-1.7.0
USER=root
LS_COLORS=
XNLSPATH=/usr/share/X11/nls
QEMU_AUDIO_DRV=pa
HOSTTYPE=x86_64
ftp_proxy=http://172.16.31.10:8080
CONFIG_SITE=/usr/share/site/x86_64-unknown-linux-gnu
FROM_HEADER=
PAGER=less
CSHEDIT=emacs
XDG_CONFIG_DIRS=/etc/xdg
LIBGL_DEBUG=quiet
MINICOM=-c on
MAIL=/var/mail/root
PATH=/sbin:/usr/sbin:/usr/local/sbin:/root/bin:/usr/local/bin:/usr/bin:/bin:/usr/bin/X11:/usr/games:/usr/lib/mit/bin:/usr/lib/mit/sbin:/home/emssim/lte1702/bin/shared/
CPU=x86_64
JAVA_BINDIR=/usr/java/latest/bin
SSH_SENDS_LOCALE=yes
INPUTRC=/etc/inputrc
PWD=/l
gopher_proxy=
JAVA_HOME=/usr/java/latest
LANG=en_US.UTF-8
PYTHONSTARTUP=/etc/pythonstart
https_proxy=http://172.16.31.10:8080
GPG_TTY=/dev/pts/3
AUDIODRIVER=pulseaudio
QT_SYSTEM_DIR=/usr/share/desktop-data
SHLVL=1
HOME=/root
ALSA_CONFIG_PATH=/etc/alsa-pulse.conf
SDL_AUDIODRIVER=pulse
LESS_ADVANCED_PREPROCESSOR=no
OSTYPE=linux
LS_OPTIONS=-A -N --color=none -T 0
no_proxy=localhost, 127.0.0.1, 192.168.0.0/16, 10.83.0.0/16, 10.254.0.0/16, 10.0.0.0/16
XCURSOR_THEME=DMZ
WINDOWMANAGER=/usr/bin/kde4
G_FILENAME_ENCODING=@locale,UTF-8,ISO-8859-15,CP1252
LESS=-M -I -R
MACHTYPE=x86_64-suse-linux
LOGNAME=root
CVS_RSH=ssh
XDG_DATA_DIRS=/usr/share
SSH_CONNECTION=10.83.200.37 40356 10.83.205.103 22
LESSOPEN=lessopen.sh %s
XDG_RUNTIME_DIR=/run/user/0
BTS_SITE_MANAGER_INSTALL_PATH=/opt/NSN/Managers/BTS Site/BTS Site Manager
VDPAU_DRIVER=va_gl
NO_AT_BRIDGE=1
LESSCLOSE=lessclose.sh %s %s
G_BROKEN_FILENAMES=1
JAVA_ROOT=/usr/java/latest
COLORTERM=1
BASH_FUNC_mc%%=() { . /usr/share/mc/mc-wrapper.sh
_=/usr/bin/env
host:~#"""
COMMAND_RESULT = {
'ALSA_CONFIG_PATH': '/etc/alsa-pulse.conf',
'AUDIODRIVER': 'pulseaudio',
'BASH_FUNC_mc%%': '() { . /usr/share/mc/mc-wrapper.sh',
'BTS_SITE_MANAGER_INSTALL_PATH': '/opt/NSN/Managers/BTS Site/BTS Site Manager',
'COLORTERM': '1',
'CONFIG_SITE': '/usr/share/site/x86_64-unknown-linux-gnu',
'CPU': 'x86_64',
'CSHEDIT': 'emacs',
'CVS_RSH': 'ssh',
'FROM_HEADER': '',
'GPG_TTY': '/dev/pts/3',
'G_BROKEN_FILENAMES': '1',
'G_FILENAME_ENCODING': '@locale,UTF-8,ISO-8859-15,CP1252',
'HISTSIZE': '1000',
'HOME': '/root',
'HOST': 'FZM-TDD-249',
'HOSTNAME': 'FZM-TDD-249',
'HOSTTYPE': 'x86_64',
'INPUTRC': '/etc/inputrc',
'JAVA_BINDIR': '/usr/java/latest/bin',
'JAVA_HOME': '/usr/java/latest',
'JAVA_ROOT': '/usr/java/latest',
'JRE_HOME': '/usr/lib64/jvm/jre-1.7.0',
'LANG': 'en_US.UTF-8',
'LESS': '-M -I -R',
'LESSCLOSE': 'lessclose.sh %s %s',
'LESSKEY': '/etc/lesskey.bin',
'LESSOPEN': 'lessopen.sh %s',
'LESS_ADVANCED_PREPROCESSOR': 'no',
'LIBGL_DEBUG': 'quiet',
'LOGNAME': 'root',
'LS_COLORS': '',
'LS_OPTIONS': '-A -N --color=none -T 0',
'MACHTYPE': 'x86_64-suse-linux',
'MAIL': '/var/mail/root',
'MANPATH': '/usr/share/man:/usr/local/man:/usr/local/share/man',
'MINICOM': '-c on',
'MORE': '-sl',
'NNTPSERVER': 'news',
'NO_AT_BRIDGE': '1',
'NO_PROXY': 'localhost, 127.0.0.1, 1172.16.58.3/16, 10.83.0.0/16, 10.254.0.0/16, 10.0.0.0/16',
'OLDPWD': '/root',
'OSTYPE': 'linux',
'PAGER': 'less',
'PATH': '/sbin:/usr/sbin:/usr/local/sbin:/root/bin:/usr/local/bin:/usr/bin:/bin:/usr/bin/X11:/usr/games:/usr/lib/mit/bin:/usr/lib/mit/sbin:/home/emssim/lte1702/bin/shared/',
'PROFILEREAD': 'true',
'PWD': <PASSWORD>',
'PYTHONSTARTUP': '/etc/pythonstart',
'QEMU_AUDIO_DRV': 'pa',
'QT_SYSTEM_DIR': '/usr/share/desktop-data',
'SDL_AUDIODRIVER': 'pulse',
'SHELL': '/bin/bash',
'SHLVL': '1',
'SSH_CLIENT': '10.83.200.37 40356 22',
'SSH_CONNECTION': '10.83.200.37 40356 10.83.205.103 22',
'SSH_SENDS_LOCALE': 'yes',
'SSH_TTY': '/dev/pts/3',
'TERM': 'xterm-mono',
'USER': 'root',
'VDPAU_DRIVER': 'va_gl',
'WINDOWMANAGER': '/usr/bin/kde4',
'XCURSOR_THEME': 'DMZ',
'XDG_CONFIG_DIRS': '/etc/xdg',
'XDG_DATA_DIRS': '/usr/share',
'XDG_RUNTIME_DIR': '/run/user/0',
'XDG_SESSION_ID': '26352',
'XKEYSYMDB': '/usr/X11R6/lib/X11/XKeysymDB',
'XNLSPATH': '/usr/share/X11/nls',
'_': '/usr/bin/env',
'ftp_proxy': 'http://172.16.31.10:8080',
'gopher_proxy': '',
'http_proxy': 'http://172.16.31.10:8080',
'https_proxy': 'http://172.16.31.10:8080',
'no_proxy': 'localhost, 127.0.0.1, 192.168.0.0/16, 10.83.0.0/16, 10.254.0.0/16, 10.0.0.0/16'
}
COMMAND_KWARGS = {}
| # -*- coding: utf-8 -*-
"""
Env command module.
"""
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
import re
from moler.cmd.unix.genericunix import GenericUnixCommand
from moler.exceptions import ParsingDone
class Env(GenericUnixCommand):
def __init__(self, connection, prompt=None, newline_chars=None, runner=None):
super(Env, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars, runner=runner)
self.ret_required = True
def build_command_string(self):
cmd = "env"
return cmd
def on_new_line(self, line, is_full_line):
if is_full_line:
try:
self._parse_name_line(line)
except ParsingDone:
pass
return super(Env, self).on_new_line(line, is_full_line)
_re_name_line = re.compile(r"^(?P<title>\S+)=(?P<content>.*)$")
def _parse_name_line(self, line):
if self._regex_helper.search_compiled(Env._re_name_line, line):
name = self._regex_helper.group("title")
self.current_ret[name] = self._regex_helper.group("content")
raise ParsingDone
COMMAND_OUTPUT = """
host:~# env
LESSKEY=/etc/lesskey.bin
NNTPSERVER=news
MANPATH=/usr/share/man:/usr/local/man:/usr/local/share/man
XDG_SESSION_ID=26352
HOSTNAME=FZM-TDD-249
XKEYSYMDB=/usr/X11R6/lib/X11/XKeysymDB
HOST=FZM-TDD-249
TERM=xterm-mono
SHELL=/bin/bash
PROFILEREAD=true
HISTSIZE=1000
SSH_CLIENT=10.83.200.37 40356 22
MORE=-sl
OLDPWD=/root
SSH_TTY=/dev/pts/3
NO_PROXY=localhost, 127.0.0.1, 192.168.0.0/16, 10.83.0.0/16, 10.254.0.0/16, 10.0.0.0/16
http_proxy=http://172.16.31.10:8080
JRE_HOME=/usr/lib64/jvm/jre-1.7.0
USER=root
LS_COLORS=
XNLSPATH=/usr/share/X11/nls
QEMU_AUDIO_DRV=pa
HOSTTYPE=x86_64
ftp_proxy=http://172.16.31.10:8080
CONFIG_SITE=/usr/share/site/x86_64-unknown-linux-gnu
FROM_HEADER=
PAGER=less
CSHEDIT=emacs
XDG_CONFIG_DIRS=/etc/xdg
LIBGL_DEBUG=quiet
MINICOM=-c on
MAIL=/var/mail/root
PATH=/sbin:/usr/sbin:/usr/local/sbin:/root/bin:/usr/local/bin:/usr/bin:/bin:/usr/bin/X11:/usr/games:/usr/lib/mit/bin:/usr/lib/mit/sbin:/home/emssim/lte1702/bin/shared/
CPU=x86_64
JAVA_BINDIR=/usr/java/latest/bin
SSH_SENDS_LOCALE=yes
INPUTRC=/etc/inputrc
PWD=/l
gopher_proxy=
JAVA_HOME=/usr/java/latest
LANG=en_US.UTF-8
PYTHONSTARTUP=/etc/pythonstart
https_proxy=http://172.16.31.10:8080
GPG_TTY=/dev/pts/3
AUDIODRIVER=pulseaudio
QT_SYSTEM_DIR=/usr/share/desktop-data
SHLVL=1
HOME=/root
ALSA_CONFIG_PATH=/etc/alsa-pulse.conf
SDL_AUDIODRIVER=pulse
LESS_ADVANCED_PREPROCESSOR=no
OSTYPE=linux
LS_OPTIONS=-A -N --color=none -T 0
no_proxy=localhost, 127.0.0.1, 192.168.0.0/16, 10.83.0.0/16, 10.254.0.0/16, 10.0.0.0/16
XCURSOR_THEME=DMZ
WINDOWMANAGER=/usr/bin/kde4
G_FILENAME_ENCODING=@locale,UTF-8,ISO-8859-15,CP1252
LESS=-M -I -R
MACHTYPE=x86_64-suse-linux
LOGNAME=root
CVS_RSH=ssh
XDG_DATA_DIRS=/usr/share
SSH_CONNECTION=10.83.200.37 40356 10.83.205.103 22
LESSOPEN=lessopen.sh %s
XDG_RUNTIME_DIR=/run/user/0
BTS_SITE_MANAGER_INSTALL_PATH=/opt/NSN/Managers/BTS Site/BTS Site Manager
VDPAU_DRIVER=va_gl
NO_AT_BRIDGE=1
LESSCLOSE=lessclose.sh %s %s
G_BROKEN_FILENAMES=1
JAVA_ROOT=/usr/java/latest
COLORTERM=1
BASH_FUNC_mc%%=() { . /usr/share/mc/mc-wrapper.sh
_=/usr/bin/env
host:~#"""
COMMAND_RESULT = {
'ALSA_CONFIG_PATH': '/etc/alsa-pulse.conf',
'AUDIODRIVER': 'pulseaudio',
'BASH_FUNC_mc%%': '() { . /usr/share/mc/mc-wrapper.sh',
'BTS_SITE_MANAGER_INSTALL_PATH': '/opt/NSN/Managers/BTS Site/BTS Site Manager',
'COLORTERM': '1',
'CONFIG_SITE': '/usr/share/site/x86_64-unknown-linux-gnu',
'CPU': 'x86_64',
'CSHEDIT': 'emacs',
'CVS_RSH': 'ssh',
'FROM_HEADER': '',
'GPG_TTY': '/dev/pts/3',
'G_BROKEN_FILENAMES': '1',
'G_FILENAME_ENCODING': '@locale,UTF-8,ISO-8859-15,CP1252',
'HISTSIZE': '1000',
'HOME': '/root',
'HOST': 'FZM-TDD-249',
'HOSTNAME': 'FZM-TDD-249',
'HOSTTYPE': 'x86_64',
'INPUTRC': '/etc/inputrc',
'JAVA_BINDIR': '/usr/java/latest/bin',
'JAVA_HOME': '/usr/java/latest',
'JAVA_ROOT': '/usr/java/latest',
'JRE_HOME': '/usr/lib64/jvm/jre-1.7.0',
'LANG': 'en_US.UTF-8',
'LESS': '-M -I -R',
'LESSCLOSE': 'lessclose.sh %s %s',
'LESSKEY': '/etc/lesskey.bin',
'LESSOPEN': 'lessopen.sh %s',
'LESS_ADVANCED_PREPROCESSOR': 'no',
'LIBGL_DEBUG': 'quiet',
'LOGNAME': 'root',
'LS_COLORS': '',
'LS_OPTIONS': '-A -N --color=none -T 0',
'MACHTYPE': 'x86_64-suse-linux',
'MAIL': '/var/mail/root',
'MANPATH': '/usr/share/man:/usr/local/man:/usr/local/share/man',
'MINICOM': '-c on',
'MORE': '-sl',
'NNTPSERVER': 'news',
'NO_AT_BRIDGE': '1',
'NO_PROXY': 'localhost, 127.0.0.1, 1172.16.58.3/16, 10.83.0.0/16, 10.254.0.0/16, 10.0.0.0/16',
'OLDPWD': '/root',
'OSTYPE': 'linux',
'PAGER': 'less',
'PATH': '/sbin:/usr/sbin:/usr/local/sbin:/root/bin:/usr/local/bin:/usr/bin:/bin:/usr/bin/X11:/usr/games:/usr/lib/mit/bin:/usr/lib/mit/sbin:/home/emssim/lte1702/bin/shared/',
'PROFILEREAD': 'true',
'PWD': <PASSWORD>',
'PYTHONSTARTUP': '/etc/pythonstart',
'QEMU_AUDIO_DRV': 'pa',
'QT_SYSTEM_DIR': '/usr/share/desktop-data',
'SDL_AUDIODRIVER': 'pulse',
'SHELL': '/bin/bash',
'SHLVL': '1',
'SSH_CLIENT': '10.83.200.37 40356 22',
'SSH_CONNECTION': '10.83.200.37 40356 10.83.205.103 22',
'SSH_SENDS_LOCALE': 'yes',
'SSH_TTY': '/dev/pts/3',
'TERM': 'xterm-mono',
'USER': 'root',
'VDPAU_DRIVER': 'va_gl',
'WINDOWMANAGER': '/usr/bin/kde4',
'XCURSOR_THEME': 'DMZ',
'XDG_CONFIG_DIRS': '/etc/xdg',
'XDG_DATA_DIRS': '/usr/share',
'XDG_RUNTIME_DIR': '/run/user/0',
'XDG_SESSION_ID': '26352',
'XKEYSYMDB': '/usr/X11R6/lib/X11/XKeysymDB',
'XNLSPATH': '/usr/share/X11/nls',
'_': '/usr/bin/env',
'ftp_proxy': 'http://172.16.31.10:8080',
'gopher_proxy': '',
'http_proxy': 'http://172.16.31.10:8080',
'https_proxy': 'http://172.16.31.10:8080',
'no_proxy': 'localhost, 127.0.0.1, 192.168.0.0/16, 10.83.0.0/16, 10.254.0.0/16, 10.0.0.0/16'
}
COMMAND_KWARGS = {} | en | 0.236995 | # -*- coding: utf-8 -*- Env command module. host:~# env LESSKEY=/etc/lesskey.bin NNTPSERVER=news MANPATH=/usr/share/man:/usr/local/man:/usr/local/share/man XDG_SESSION_ID=26352 HOSTNAME=FZM-TDD-249 XKEYSYMDB=/usr/X11R6/lib/X11/XKeysymDB HOST=FZM-TDD-249 TERM=xterm-mono SHELL=/bin/bash PROFILEREAD=true HISTSIZE=1000 SSH_CLIENT=10.83.200.37 40356 22 MORE=-sl OLDPWD=/root SSH_TTY=/dev/pts/3 NO_PROXY=localhost, 127.0.0.1, 192.168.0.0/16, 10.83.0.0/16, 10.254.0.0/16, 10.0.0.0/16 http_proxy=http://172.16.31.10:8080 JRE_HOME=/usr/lib64/jvm/jre-1.7.0 USER=root LS_COLORS= XNLSPATH=/usr/share/X11/nls QEMU_AUDIO_DRV=pa HOSTTYPE=x86_64 ftp_proxy=http://172.16.31.10:8080 CONFIG_SITE=/usr/share/site/x86_64-unknown-linux-gnu FROM_HEADER= PAGER=less CSHEDIT=emacs XDG_CONFIG_DIRS=/etc/xdg LIBGL_DEBUG=quiet MINICOM=-c on MAIL=/var/mail/root PATH=/sbin:/usr/sbin:/usr/local/sbin:/root/bin:/usr/local/bin:/usr/bin:/bin:/usr/bin/X11:/usr/games:/usr/lib/mit/bin:/usr/lib/mit/sbin:/home/emssim/lte1702/bin/shared/ CPU=x86_64 JAVA_BINDIR=/usr/java/latest/bin SSH_SENDS_LOCALE=yes INPUTRC=/etc/inputrc PWD=/l gopher_proxy= JAVA_HOME=/usr/java/latest LANG=en_US.UTF-8 PYTHONSTARTUP=/etc/pythonstart https_proxy=http://172.16.31.10:8080 GPG_TTY=/dev/pts/3 AUDIODRIVER=pulseaudio QT_SYSTEM_DIR=/usr/share/desktop-data SHLVL=1 HOME=/root ALSA_CONFIG_PATH=/etc/alsa-pulse.conf SDL_AUDIODRIVER=pulse LESS_ADVANCED_PREPROCESSOR=no OSTYPE=linux LS_OPTIONS=-A -N --color=none -T 0 no_proxy=localhost, 127.0.0.1, 192.168.0.0/16, 10.83.0.0/16, 10.254.0.0/16, 10.0.0.0/16 XCURSOR_THEME=DMZ WINDOWMANAGER=/usr/bin/kde4 G_FILENAME_ENCODING=@locale,UTF-8,ISO-8859-15,CP1252 LESS=-M -I -R MACHTYPE=x86_64-suse-linux LOGNAME=root CVS_RSH=ssh XDG_DATA_DIRS=/usr/share SSH_CONNECTION=10.83.200.37 40356 10.83.205.103 22 LESSOPEN=lessopen.sh %s XDG_RUNTIME_DIR=/run/user/0 BTS_SITE_MANAGER_INSTALL_PATH=/opt/NSN/Managers/BTS Site/BTS Site Manager VDPAU_DRIVER=va_gl NO_AT_BRIDGE=1 LESSCLOSE=lessclose.sh %s %s G_BROKEN_FILENAMES=1 JAVA_ROOT=/usr/java/latest COLORTERM=1 BASH_FUNC_mc%%=() { . /usr/share/mc/mc-wrapper.sh _=/usr/bin/env host:~# | 2.195026 | 2 |
pastebin/views.py | fkmclane/paste-example | 0 | 6615716 | from datetime import timedelta
from django.shortcuts import get_object_or_404, render
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.http import HttpResponse
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
from .models import Paste
def index(request):
link = None
if 'code' in request.POST:
data = {}
name = request.POST['name']
if name:
data['name'] = name
language = request.POST['language']
if language:
data['language'] = language
try:
days = int(request.POST['expires'])
except ValueError:
days = 7
data['expires'] = timezone.now() + timedelta(days=days)
data['code'] = request.POST['code']
paste = Paste(**data)
paste.save()
link = request.scheme + '://' + request.META['HTTP_HOST'] + request.path + str(paste.id)
context = {
'link': link,
}
return render(request, 'index.html', context)
def latest(request):
latest = Paste.objects.order_by('-date')[:20]
context = {
'latest': latest,
}
return render(request, 'latest.html', context)
def prune(request):
deletions = []
for paste in Paste.objects.all():
if paste.should_prune():
deletions.append(paste)
for paste in deletions:
paste.delete()
return HttpResponse('Pruned')
def paste(request, paste_id):
paste = get_object_or_404(Paste, id=paste_id)
try:
lexer = get_lexer_by_name(paste.language, stripall=True)
formatter = HtmlFormatter(linenos=True)
highlighted = highlight(paste.code, lexer, formatter)
except:
highlighted = paste.code
context = {
'pygments': HtmlFormatter().get_style_defs('.highlight'),
'date': paste.date,
'name': paste.name,
'language': paste.language,
'expires': paste.expires,
'code': mark_safe(highlighted),
}
return render(request, 'paste.html', context)
| from datetime import timedelta
from django.shortcuts import get_object_or_404, render
from django.utils import timezone
from django.utils.safestring import mark_safe
from django.http import HttpResponse
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
from .models import Paste
def index(request):
link = None
if 'code' in request.POST:
data = {}
name = request.POST['name']
if name:
data['name'] = name
language = request.POST['language']
if language:
data['language'] = language
try:
days = int(request.POST['expires'])
except ValueError:
days = 7
data['expires'] = timezone.now() + timedelta(days=days)
data['code'] = request.POST['code']
paste = Paste(**data)
paste.save()
link = request.scheme + '://' + request.META['HTTP_HOST'] + request.path + str(paste.id)
context = {
'link': link,
}
return render(request, 'index.html', context)
def latest(request):
latest = Paste.objects.order_by('-date')[:20]
context = {
'latest': latest,
}
return render(request, 'latest.html', context)
def prune(request):
deletions = []
for paste in Paste.objects.all():
if paste.should_prune():
deletions.append(paste)
for paste in deletions:
paste.delete()
return HttpResponse('Pruned')
def paste(request, paste_id):
paste = get_object_or_404(Paste, id=paste_id)
try:
lexer = get_lexer_by_name(paste.language, stripall=True)
formatter = HtmlFormatter(linenos=True)
highlighted = highlight(paste.code, lexer, formatter)
except:
highlighted = paste.code
context = {
'pygments': HtmlFormatter().get_style_defs('.highlight'),
'date': paste.date,
'name': paste.name,
'language': paste.language,
'expires': paste.expires,
'code': mark_safe(highlighted),
}
return render(request, 'paste.html', context)
| none | 1 | 2.127559 | 2 | |
Aula-06/ex004.py | matheussantanads/exercicios-python | 1 | 6615717 | # Curso Python 06
# ---Desafio 04---
# Faça um programa que leia algo pelo teclado
# e mostre na tela o seu tipo primitivo e todas
# as informações possíveis sobre ele.
algo = input('Digite algo: ')
print(f'O tipo primitivo desse valor é {algo.__class__}')
print(f'É numérico? {algo.isnumeric()}')
print(f'É alfa? {algo.isalpha()}')
print(f'É alfa-numérico? {algo.isalnum()}')
print(f'É maiúsculo? {algo.isupper()}')
print(f'É minúsculo? {algo.islower()}')
print(f'É captalizado? {algo.istitle()}')
| # Curso Python 06
# ---Desafio 04---
# Faça um programa que leia algo pelo teclado
# e mostre na tela o seu tipo primitivo e todas
# as informações possíveis sobre ele.
algo = input('Digite algo: ')
print(f'O tipo primitivo desse valor é {algo.__class__}')
print(f'É numérico? {algo.isnumeric()}')
print(f'É alfa? {algo.isalpha()}')
print(f'É alfa-numérico? {algo.isalnum()}')
print(f'É maiúsculo? {algo.isupper()}')
print(f'É minúsculo? {algo.islower()}')
print(f'É captalizado? {algo.istitle()}')
| pt | 0.979622 | # Curso Python 06 # ---Desafio 04--- # Faça um programa que leia algo pelo teclado # e mostre na tela o seu tipo primitivo e todas # as informações possíveis sobre ele. | 4.247569 | 4 |
src/melange/src/soc/views/helper/decorators.py | MatthewWilkes/mw4068-packaging | 0 | 6615718 | <reponame>MatthewWilkes/mw4068-packaging
#!/usr/bin/env python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Views decorators.
"""
__authors__ = [
'"<NAME>" <<EMAIL>>',
'"<NAME>" <<EMAIL>>',
'"<NAME>" <<EMAIL>>',
]
from functools import wraps
from django import http
from django.utils.translation import ugettext
from soc.logic import dicts
from soc.views.helper import responses
class Error(Exception):
"""Base class for all exceptions raised by this module.
"""
pass
def view(func):
"""Decorator that insists that exceptions are handled by view.
"""
@wraps(func)
def view_wrapper(request, *args, **kwds):
"""View decorator wrapper method.
"""
return func(request, *args, **kwds)
return view_wrapper
def merge_params(func):
"""Decorator that merges 'params' with self._params.
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
"""Decorator wrapper method.
"""
params = kwargs.get('params', {})
kwargs['params'] = dicts.merge(params, self._params)
return func(self, *args, **kwargs)
return wrapper
def check_access(func):
"""This decorator does access checks for the specified view method.
The rights dictionary is extracted from 'params', or, if either 'params' or
'rights' do not exist, from self._params['rights'].
"""
# Do not pollute helper.decorators with access specific imports
from soc.views import out_of_band
from soc.views import helper
from soc.views.helper import responses
@wraps(func)
def wrapper(self, request, access_type, *args, **kwargs):
"""Decorator wrapper method.
"""
params = kwargs.get('params', {})
# Try to extract rights
if 'rights' in params:
rights = params['rights']
else:
rights = self._params['rights']
check_kwargs = kwargs.copy()
context = responses.getUniversalContext(request)
responses.useJavaScript(context, self._params['js_uses_all'])
id = context['account']
user = context['user']
check_kwargs['GET'] = request.GET
check_kwargs['POST'] = request.POST
check_kwargs['context'] = context
# reset and pre-fill the Checker's cache
rights.setCurrentUser(id, user)
# Do the access check dance
try:
rights.checkAccess(access_type, check_kwargs)
except out_of_band.Error, error:
return helper.responses.errorResponse(error, request)
return func(self, request, access_type, *args, **kwargs)
return wrapper
def mutation(func):
"""This decorator indicates that the view is a mutation operation and is
therefore restricted to POST requests.
XSRF checking is performed automatically by the xsrf middleware.
"""
@wraps(func)
def wrapper(self, request, *args, **kwargs):
if request.method != "POST":
return http.HttpResponse("Invoked a mutation view w/o POST.", status=403)
return func(self, request, *args, **kwargs)
return wrapper
| #!/usr/bin/env python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Views decorators.
"""
__authors__ = [
'"<NAME>" <<EMAIL>>',
'"<NAME>" <<EMAIL>>',
'"<NAME>" <<EMAIL>>',
]
from functools import wraps
from django import http
from django.utils.translation import ugettext
from soc.logic import dicts
from soc.views.helper import responses
class Error(Exception):
"""Base class for all exceptions raised by this module.
"""
pass
def view(func):
"""Decorator that insists that exceptions are handled by view.
"""
@wraps(func)
def view_wrapper(request, *args, **kwds):
"""View decorator wrapper method.
"""
return func(request, *args, **kwds)
return view_wrapper
def merge_params(func):
"""Decorator that merges 'params' with self._params.
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
"""Decorator wrapper method.
"""
params = kwargs.get('params', {})
kwargs['params'] = dicts.merge(params, self._params)
return func(self, *args, **kwargs)
return wrapper
def check_access(func):
"""This decorator does access checks for the specified view method.
The rights dictionary is extracted from 'params', or, if either 'params' or
'rights' do not exist, from self._params['rights'].
"""
# Do not pollute helper.decorators with access specific imports
from soc.views import out_of_band
from soc.views import helper
from soc.views.helper import responses
@wraps(func)
def wrapper(self, request, access_type, *args, **kwargs):
"""Decorator wrapper method.
"""
params = kwargs.get('params', {})
# Try to extract rights
if 'rights' in params:
rights = params['rights']
else:
rights = self._params['rights']
check_kwargs = kwargs.copy()
context = responses.getUniversalContext(request)
responses.useJavaScript(context, self._params['js_uses_all'])
id = context['account']
user = context['user']
check_kwargs['GET'] = request.GET
check_kwargs['POST'] = request.POST
check_kwargs['context'] = context
# reset and pre-fill the Checker's cache
rights.setCurrentUser(id, user)
# Do the access check dance
try:
rights.checkAccess(access_type, check_kwargs)
except out_of_band.Error, error:
return helper.responses.errorResponse(error, request)
return func(self, request, access_type, *args, **kwargs)
return wrapper
def mutation(func):
"""This decorator indicates that the view is a mutation operation and is
therefore restricted to POST requests.
XSRF checking is performed automatically by the xsrf middleware.
"""
@wraps(func)
def wrapper(self, request, *args, **kwargs):
if request.method != "POST":
return http.HttpResponse("Invoked a mutation view w/o POST.", status=403)
return func(self, request, *args, **kwargs)
return wrapper | en | 0.827931 | #!/usr/bin/env python2.5 # # Copyright 2008 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Views decorators. Base class for all exceptions raised by this module. Decorator that insists that exceptions are handled by view. View decorator wrapper method. Decorator that merges 'params' with self._params. Decorator wrapper method. This decorator does access checks for the specified view method. The rights dictionary is extracted from 'params', or, if either 'params' or 'rights' do not exist, from self._params['rights']. # Do not pollute helper.decorators with access specific imports Decorator wrapper method. # Try to extract rights # reset and pre-fill the Checker's cache # Do the access check dance This decorator indicates that the view is a mutation operation and is therefore restricted to POST requests. XSRF checking is performed automatically by the xsrf middleware. | 2.237769 | 2 |
students/forms.py | yassinebebba/django-school-platform- | 0 | 6615719 | from management.models import Account
from .models import Student
from django import forms
class StudentAccountUpdateForm(forms.ModelForm):
class Meta:
model = Account
fields = ('email',)
class StudentInfoUpdateForm(forms.ModelForm):
notes = forms.CharField(label='Notes',
widget=forms.Textarea(attrs={'cols': '60', 'rows': '4'}),
required=False)
class Meta:
model = Student
fields = ('phone_number', 'address_line_1', 'address_line_2', 'postcode',
'guardian_first_name', 'guardian_last_name', 'guardian_relationship', 'guardian_phone_number',
'notes') | from management.models import Account
from .models import Student
from django import forms
class StudentAccountUpdateForm(forms.ModelForm):
class Meta:
model = Account
fields = ('email',)
class StudentInfoUpdateForm(forms.ModelForm):
notes = forms.CharField(label='Notes',
widget=forms.Textarea(attrs={'cols': '60', 'rows': '4'}),
required=False)
class Meta:
model = Student
fields = ('phone_number', 'address_line_1', 'address_line_2', 'postcode',
'guardian_first_name', 'guardian_last_name', 'guardian_relationship', 'guardian_phone_number',
'notes') | none | 1 | 2.25804 | 2 | |
rl/networks/policy_gradient.py | jrobine/smaller-world-models | 0 | 6615720 | from abc import ABC, abstractmethod
from typing import Optional, Tuple
from torch import nn, Tensor
from torch.distributions import Distribution
from rl.spaces.base import TensorSpace
__all__ = ['PolicyGradientNetwork']
class PolicyGradientNetwork(nn.Module, ABC):
"""Base class for networks that can compute an action distribution based on an observation."""
@property
@abstractmethod
def observation_space(self) -> TensorSpace:
"""Returns a space that describes the observations that this network expects as input."""
pass
@property
@abstractmethod
def action_space(self) -> TensorSpace:
"""Returns a space that describes the actions that this network assumes for the action distribution."""
pass
@property
@abstractmethod
def is_recurrent(self) -> bool:
pass
@abstractmethod
def init_recurrent_state(self, batch_size: int) -> Optional[Tensor]:
"""TODO docstring"""
pass
@abstractmethod
def mask_recurrent_state(self, recurrent_state: Optional[Tensor], terminal: Tensor) -> Optional[Tensor]:
"""TODO docstring"""
pass
@abstractmethod
def compute_action_distribution(
self,
observation: Tensor,
recurrent_state: Optional[Tensor] = None,
train: bool = False) -> Tuple[Distribution, Optional[Tensor]]:
"""Computes the action distribution for a given batch of observations.
Arguments:
observation (Tensor): A batch of observations of a single time step.
recurrent_state (Tensor, optional): TODO docstring
train (bool, optional): Indicates whether the network is currently
used for training. Defaults to ``False``.
Returns:
Distribution object that describes the action distribution. TODO docstring
"""
pass
| from abc import ABC, abstractmethod
from typing import Optional, Tuple
from torch import nn, Tensor
from torch.distributions import Distribution
from rl.spaces.base import TensorSpace
__all__ = ['PolicyGradientNetwork']
class PolicyGradientNetwork(nn.Module, ABC):
"""Base class for networks that can compute an action distribution based on an observation."""
@property
@abstractmethod
def observation_space(self) -> TensorSpace:
"""Returns a space that describes the observations that this network expects as input."""
pass
@property
@abstractmethod
def action_space(self) -> TensorSpace:
"""Returns a space that describes the actions that this network assumes for the action distribution."""
pass
@property
@abstractmethod
def is_recurrent(self) -> bool:
pass
@abstractmethod
def init_recurrent_state(self, batch_size: int) -> Optional[Tensor]:
"""TODO docstring"""
pass
@abstractmethod
def mask_recurrent_state(self, recurrent_state: Optional[Tensor], terminal: Tensor) -> Optional[Tensor]:
"""TODO docstring"""
pass
@abstractmethod
def compute_action_distribution(
self,
observation: Tensor,
recurrent_state: Optional[Tensor] = None,
train: bool = False) -> Tuple[Distribution, Optional[Tensor]]:
"""Computes the action distribution for a given batch of observations.
Arguments:
observation (Tensor): A batch of observations of a single time step.
recurrent_state (Tensor, optional): TODO docstring
train (bool, optional): Indicates whether the network is currently
used for training. Defaults to ``False``.
Returns:
Distribution object that describes the action distribution. TODO docstring
"""
pass
| en | 0.809114 | Base class for networks that can compute an action distribution based on an observation. Returns a space that describes the observations that this network expects as input. Returns a space that describes the actions that this network assumes for the action distribution. TODO docstring TODO docstring Computes the action distribution for a given batch of observations. Arguments: observation (Tensor): A batch of observations of a single time step. recurrent_state (Tensor, optional): TODO docstring train (bool, optional): Indicates whether the network is currently used for training. Defaults to ``False``. Returns: Distribution object that describes the action distribution. TODO docstring | 2.769997 | 3 |
apps/chat/admin.py | SeniorDev34/Django_React_Chat | 58 | 6615721 | <gh_stars>10-100
from django.contrib import admin
from . import models
class MessageAdmin(admin.ModelAdmin):
list_display = ('room', 'timestamp')
admin.site.register(models.Message, MessageAdmin)
class RoomAdmin(admin.ModelAdmin):
pass
admin.site.register(models.Room, RoomAdmin)
class UserAdmin(admin.ModelAdmin):
pass
admin.site.register(models.User, UserAdmin)
| from django.contrib import admin
from . import models
class MessageAdmin(admin.ModelAdmin):
list_display = ('room', 'timestamp')
admin.site.register(models.Message, MessageAdmin)
class RoomAdmin(admin.ModelAdmin):
pass
admin.site.register(models.Room, RoomAdmin)
class UserAdmin(admin.ModelAdmin):
pass
admin.site.register(models.User, UserAdmin) | none | 1 | 1.741035 | 2 | |
tests/basic/tests/__init__.py | pavanv/django-tastypie | 1,570 | 6615722 | from basic.tests.resources import * # noqa
from basic.tests.views import * # noqa
| from basic.tests.resources import * # noqa
from basic.tests.views import * # noqa
| uz | 0.443564 | # noqa # noqa | 1.046958 | 1 |
python-django/app/urls.py | aalves932/python-notes | 0 | 6615723 | <reponame>aalves932/python-notes<gh_stars>0
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^hello', views.hello),
url(r'^compute', views.compute),
url(r'^countries', views.countries),
url(r'^users', views.users),
]
| from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^hello', views.hello),
url(r'^compute', views.compute),
url(r'^countries', views.countries),
url(r'^users', views.users),
] | none | 1 | 1.617623 | 2 | |
V501/plot.py | nsalewski/laboratory | 1 | 6615724 | import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from astropy.io import ascii
from uncertainties import ufloat
import uncertainties.unumpy as unp
def y(x, m, b):
return m * x + b
##########################################################################################
# E-Feld
x=np.linspace(-12,38)
n_, v_säge = np.genfromtxt("Messdaten/frequenzsaege.txt",unpack=True)
ascii.write([n_, v_säge], 'Messdaten/tab_saegi.tex', format="latex", names=['Frequenzverhältnis','frequenz'])
vwechsel=v_säge/n_
vwechsel=ufloat(np.mean(vwechsel),np.std(vwechsel, ddof=1) / np.sqrt(len(vwechsel)))
print(vwechsel)
D, Ud400, Ud300, Ud200 = np.genfromtxt("Messdaten/efeld.txt",unpack=True)
ascii.write([D*2.54, Ud400, Ud300, Ud200], 'Messdaten/tab_efeld.tex', format="latex")
D=D*2.54
params400, covariance400 = curve_fit(y,Ud400,D)
errors400 = np.sqrt(np.diag(covariance400))
params300, covariance300 = curve_fit(y, Ud300,D)
errors300 = np.sqrt(np.diag(covariance300))
params200, covariance200 = curve_fit(y, Ud200,D)
errors200 = np.sqrt(np.diag(covariance200))
print('m400 = ', params400[0], '+/-', errors400[0])
print('m300 = ', params300[0], '+/-', errors300[0])
print('m200 = ', params200[0], '+/-', errors200[0])
m=[params200[0],params300[0],params400[0]]
Ud=[10**3/200,10**3/300,10**3/400]
paramsud, covarianceud = curve_fit(y,Ud,m)
errorsud = np.sqrt(np.diag(covarianceud))
print('m_ud = ', paramsud[0], '+/-', errorsud[0])
Uud=np.linspace(1/160,1/460)
Uud=Uud*10**3
plt.plot(Uud,paramsud[0]*Uud+paramsud[1], 'b-',label=r'Regressionsgrade')
plt.plot(Ud,m, 'rx', label=r'Messwerte')
plt.ylabel(r"$\frac{D}{U_\mathrm{d}}$/$\si{\centi\meter\per\volt}$")
plt.xlabel(r"$\frac{1}{U_\mathrm{B}}\cdot 10^3$/$\si{\per\volt}$")
plt.xlim(2.2,6.0)
#plt.ylim(-2,14)
plt.legend()
plt.tight_layout()
plt.savefig('Messdaten/plotm.pdf')
plt.clf()
plt.plot(x, params200[0]*x+params200[1], 'g-',label=r'Regression $U_\mathrm{B}=\SI{200}{Volt}$')
plt.plot(Ud200,D, 'gx', label=r'Messwerte $U_\mathrm{B}=\SI{200}{Volt}$')
plt.plot(x, params300[0]*x+params300[1], 'b-',label=r'Regression $U_\mathrm{B}=\SI{300}{Volt}$ ')
plt.plot(Ud300,D, 'bx', label=r'Messwerte $U_\mathrm{B}=\SI{300}{Volt}$')
plt.plot(x, params400[0]*x+params400[1], 'r-',label=r'Regression $U_\mathrm{B}=\SI{400}{Volt}$ ')
plt.plot(Ud400,D, 'rx', label=r'Messwerte $U_\mathrm{B}=\SI{400}{Volt}$')
plt.ylabel(r"$D$/$\si{\centi\meter}$")
plt.xlabel(r"$U_\mathrm{d}$/$\si{\volt}$")
plt.xlim(-12,38)
plt.ylim(-2,14)
plt.legend()
plt.tight_layout()
plt.savefig('Messdaten/plotefeld.pdf')
plt.clf()
#########################################################################################
# B-Feld
I250, D_, I450 = np.genfromtxt("Messdaten/messdaten502a.txt",unpack=True)
ascii.write([D_*2.54, I250, I450], 'Messdaten/tab_bfeld.tex', format="latex")
params, covariance = curve_fit(y, 4*np.pi*10**(-7)*8/np.sqrt(125)*20*I250/0.282, D_/(D_**2+0.143**2))
errors = np.sqrt(np.diag(covariance))
print('m = ', params[0], '+/-', errors[0])
print('b = ', params[1], '+/-', errors[1])
m = unp.uarray(params[0], errors[0])
e_theo = unp.uarray(1.6021766208*10**(-19), 0.0000000098*10**(-19))
m_theo = unp.uarray(9.10938356*10**(-31), 0.00000011*10**(-31))
e_m=m**2*8*250
e_m_theo = e_theo/m_theo
print('experiment = ', e_m)
print('theorie = ', e_m_theo)
plt.plot(np.linspace(0,0.0002), params[0]*np.linspace(0,0.0002)+params[1], 'b-',label='Ausgleichsgerade')
plt.plot(4*np.pi*10**(-7)*8/np.sqrt(125)*20*I250/0.282,D_/(D_**2+0.143**2) , 'rx', label='Messwerte')
plt.ylabel(r"$\frac{D}{D^2+L^2}$/$\si{\per\meter}$")
plt.xlabel(r"$B$/$\si{\tesla}$")
plt.tight_layout()
plt.savefig('Messdaten/plotbfeld.pdf')
plt.clf()
D_ = D_[0:-1]
I450 = I450[0:-1]
params, covariance = curve_fit(y, 4*np.pi*10**(-7)*8/np.sqrt(125)*20*I450/0.282, D_/(D_**2+0.143**2))
errors = np.sqrt(np.diag(covariance))
print('m = ', params[0], '+/-', errors[0])
print('b = ', params[1], '+/-', errors[1])
plt.plot(np.linspace(0,0.0002), params[0]*np.linspace(0,0.0002)+params[1], 'b-',label='Ausgleichsgerade')
plt.plot(4*np.pi*10**(-7)*8/np.sqrt(125)*20*I450/0.282,D_/(D_**2+0.143**2) , 'rx', label='Messwerte')
plt.ylabel(r"$\frac{D}{D^2+L^2}$/$\si{\per\meter}$")
plt.xlabel(r"$B$/$\si{\tesla}$")
plt.tight_layout()
plt.savefig('Messdaten/plotbfeld2.pdf')
m2 = unp.uarray(params[0], errors[0])
e_m=m**2*8*450
print('experiment = ', e_m)
#plt.plot(theta, w/1000, 'rx', label="Messwerte")
#plt.plot(thetaplot, theorie(thetaplot)/1000, 'b-', label="Theoriekurve")
#
#plt.ylabel(r"$\omega/\si{\kilo\hertz}$")
#plt.xlabel(r"$\theta/\si{\radian}$")
#plt.legend(loc='best')
#plt.tight_layout()
#plt.savefig('Bilder/b1.pdf')
#
| import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from astropy.io import ascii
from uncertainties import ufloat
import uncertainties.unumpy as unp
def y(x, m, b):
return m * x + b
##########################################################################################
# E-Feld
x=np.linspace(-12,38)
n_, v_säge = np.genfromtxt("Messdaten/frequenzsaege.txt",unpack=True)
ascii.write([n_, v_säge], 'Messdaten/tab_saegi.tex', format="latex", names=['Frequenzverhältnis','frequenz'])
vwechsel=v_säge/n_
vwechsel=ufloat(np.mean(vwechsel),np.std(vwechsel, ddof=1) / np.sqrt(len(vwechsel)))
print(vwechsel)
D, Ud400, Ud300, Ud200 = np.genfromtxt("Messdaten/efeld.txt",unpack=True)
ascii.write([D*2.54, Ud400, Ud300, Ud200], 'Messdaten/tab_efeld.tex', format="latex")
D=D*2.54
params400, covariance400 = curve_fit(y,Ud400,D)
errors400 = np.sqrt(np.diag(covariance400))
params300, covariance300 = curve_fit(y, Ud300,D)
errors300 = np.sqrt(np.diag(covariance300))
params200, covariance200 = curve_fit(y, Ud200,D)
errors200 = np.sqrt(np.diag(covariance200))
print('m400 = ', params400[0], '+/-', errors400[0])
print('m300 = ', params300[0], '+/-', errors300[0])
print('m200 = ', params200[0], '+/-', errors200[0])
m=[params200[0],params300[0],params400[0]]
Ud=[10**3/200,10**3/300,10**3/400]
paramsud, covarianceud = curve_fit(y,Ud,m)
errorsud = np.sqrt(np.diag(covarianceud))
print('m_ud = ', paramsud[0], '+/-', errorsud[0])
Uud=np.linspace(1/160,1/460)
Uud=Uud*10**3
plt.plot(Uud,paramsud[0]*Uud+paramsud[1], 'b-',label=r'Regressionsgrade')
plt.plot(Ud,m, 'rx', label=r'Messwerte')
plt.ylabel(r"$\frac{D}{U_\mathrm{d}}$/$\si{\centi\meter\per\volt}$")
plt.xlabel(r"$\frac{1}{U_\mathrm{B}}\cdot 10^3$/$\si{\per\volt}$")
plt.xlim(2.2,6.0)
#plt.ylim(-2,14)
plt.legend()
plt.tight_layout()
plt.savefig('Messdaten/plotm.pdf')
plt.clf()
plt.plot(x, params200[0]*x+params200[1], 'g-',label=r'Regression $U_\mathrm{B}=\SI{200}{Volt}$')
plt.plot(Ud200,D, 'gx', label=r'Messwerte $U_\mathrm{B}=\SI{200}{Volt}$')
plt.plot(x, params300[0]*x+params300[1], 'b-',label=r'Regression $U_\mathrm{B}=\SI{300}{Volt}$ ')
plt.plot(Ud300,D, 'bx', label=r'Messwerte $U_\mathrm{B}=\SI{300}{Volt}$')
plt.plot(x, params400[0]*x+params400[1], 'r-',label=r'Regression $U_\mathrm{B}=\SI{400}{Volt}$ ')
plt.plot(Ud400,D, 'rx', label=r'Messwerte $U_\mathrm{B}=\SI{400}{Volt}$')
plt.ylabel(r"$D$/$\si{\centi\meter}$")
plt.xlabel(r"$U_\mathrm{d}$/$\si{\volt}$")
plt.xlim(-12,38)
plt.ylim(-2,14)
plt.legend()
plt.tight_layout()
plt.savefig('Messdaten/plotefeld.pdf')
plt.clf()
#########################################################################################
# B-Feld
I250, D_, I450 = np.genfromtxt("Messdaten/messdaten502a.txt",unpack=True)
ascii.write([D_*2.54, I250, I450], 'Messdaten/tab_bfeld.tex', format="latex")
params, covariance = curve_fit(y, 4*np.pi*10**(-7)*8/np.sqrt(125)*20*I250/0.282, D_/(D_**2+0.143**2))
errors = np.sqrt(np.diag(covariance))
print('m = ', params[0], '+/-', errors[0])
print('b = ', params[1], '+/-', errors[1])
m = unp.uarray(params[0], errors[0])
e_theo = unp.uarray(1.6021766208*10**(-19), 0.0000000098*10**(-19))
m_theo = unp.uarray(9.10938356*10**(-31), 0.00000011*10**(-31))
e_m=m**2*8*250
e_m_theo = e_theo/m_theo
print('experiment = ', e_m)
print('theorie = ', e_m_theo)
plt.plot(np.linspace(0,0.0002), params[0]*np.linspace(0,0.0002)+params[1], 'b-',label='Ausgleichsgerade')
plt.plot(4*np.pi*10**(-7)*8/np.sqrt(125)*20*I250/0.282,D_/(D_**2+0.143**2) , 'rx', label='Messwerte')
plt.ylabel(r"$\frac{D}{D^2+L^2}$/$\si{\per\meter}$")
plt.xlabel(r"$B$/$\si{\tesla}$")
plt.tight_layout()
plt.savefig('Messdaten/plotbfeld.pdf')
plt.clf()
D_ = D_[0:-1]
I450 = I450[0:-1]
params, covariance = curve_fit(y, 4*np.pi*10**(-7)*8/np.sqrt(125)*20*I450/0.282, D_/(D_**2+0.143**2))
errors = np.sqrt(np.diag(covariance))
print('m = ', params[0], '+/-', errors[0])
print('b = ', params[1], '+/-', errors[1])
plt.plot(np.linspace(0,0.0002), params[0]*np.linspace(0,0.0002)+params[1], 'b-',label='Ausgleichsgerade')
plt.plot(4*np.pi*10**(-7)*8/np.sqrt(125)*20*I450/0.282,D_/(D_**2+0.143**2) , 'rx', label='Messwerte')
plt.ylabel(r"$\frac{D}{D^2+L^2}$/$\si{\per\meter}$")
plt.xlabel(r"$B$/$\si{\tesla}$")
plt.tight_layout()
plt.savefig('Messdaten/plotbfeld2.pdf')
m2 = unp.uarray(params[0], errors[0])
e_m=m**2*8*450
print('experiment = ', e_m)
#plt.plot(theta, w/1000, 'rx', label="Messwerte")
#plt.plot(thetaplot, theorie(thetaplot)/1000, 'b-', label="Theoriekurve")
#
#plt.ylabel(r"$\omega/\si{\kilo\hertz}$")
#plt.xlabel(r"$\theta/\si{\radian}$")
#plt.legend(loc='best')
#plt.tight_layout()
#plt.savefig('Bilder/b1.pdf')
#
| de | 0.478882 | ########################################################################################## # E-Feld #plt.ylim(-2,14) ######################################################################################### # B-Feld #plt.plot(theta, w/1000, 'rx', label="Messwerte") #plt.plot(thetaplot, theorie(thetaplot)/1000, 'b-', label="Theoriekurve") # #plt.ylabel(r"$\omega/\si{\kilo\hertz}$") #plt.xlabel(r"$\theta/\si{\radian}$") #plt.legend(loc='best') #plt.tight_layout() #plt.savefig('Bilder/b1.pdf') # | 2.313015 | 2 |
settings.py | bmrrossi/web_crawler_popular | 0 | 6615725 | BOT_NAME = 'web_crawler_popular'
SPIDER_MODULES = ['web_crawler_popular.spiders']
NEWSPIDER_MODULE = 'web_crawler_popular.spiders'
CLOSESPIDER_TIMEOUT = 180
DOWNLOAD_TIMEOUT = 200
DOWNLOAD_DELAY = 5
DEPTH_LIMIT = 15
EXTENSIONS = {
'scrapy.extensions.telnet.TelnetConsole': None,
'scrapy.extensions.closespider.CloseSpider': 1
} | BOT_NAME = 'web_crawler_popular'
SPIDER_MODULES = ['web_crawler_popular.spiders']
NEWSPIDER_MODULE = 'web_crawler_popular.spiders'
CLOSESPIDER_TIMEOUT = 180
DOWNLOAD_TIMEOUT = 200
DOWNLOAD_DELAY = 5
DEPTH_LIMIT = 15
EXTENSIONS = {
'scrapy.extensions.telnet.TelnetConsole': None,
'scrapy.extensions.closespider.CloseSpider': 1
} | none | 1 | 1.542262 | 2 | |
src/preppipe/vnimport/document.py | PrepPipe/preppipe-python | 1 | 6615726 | #!/usr/bin/env python3
import io, sys
import typing
import preppipe.documentmodel as documentmodel
import preppipe.visualnovelmodel as visualnovelmodel
def get_visual_novel_model_from_document(doc : documentmodel.DocumentModel) -> visualnovelmodel.VisualNovelModel:
"""Convert a DocumentModel into a VisualNovelModel
This function makes best effort in converting all information, even if there are errors
"""
result = visualnovelmodel.VisualNovelModel()
# TODO import all images
currentContext = result.getEmptyContext()
currentBlock = None
def getParentNode() -> visualnovelmodel.VNElementBlock:
nonlocal currentBlock
nonlocal currentContext
nonlocal result
if currentBlock is None:
currentBlock = visualnovelmodel.VNElementBlock(currentContext)
result.addBlock(currentBlock)
return currentBlock
for p in doc.paragraph_list:
# ignore empty paragraphs
if p.empty():
continue
# pattern detection
# default case
block = getParentNode();
block.addElement(visualnovelmodel.VNClearElement())
for e in p.element_list:
if isinstance(e, documentmodel.TextElement):
sayText = e.getText()
attributeDict = {}
sayStyle = e.getStyle()
if sayStyle.bold():
attributeDict[visualnovelmodel.VNTextAttribute.Bold] = True
if sayStyle.italic():
attributeDict[visualnovelmodel.VNTextAttribute.Italic] = True
if sayStyle.has_nonzero_sizelevel():
attributeDict[visualnovelmodel.VNTextAttribute.Size] = sayStyle.size_level()
if sayStyle.has_text_color():
attributeDict[visualnovelmodel.VNTextAttribute.TextColor] = sayStyle.text_color()
if sayStyle.has_background_color():
attributeDict[visualnovelmodel.VNTextAttribute.BackgroundColor] = sayStyle.background_color()
textElement = visualnovelmodel.VNSayTextElement(sayText, attributeDict)
block.addElement(textElement)
else:
raise RuntimeError("Unhandled element type")
return result
| #!/usr/bin/env python3
import io, sys
import typing
import preppipe.documentmodel as documentmodel
import preppipe.visualnovelmodel as visualnovelmodel
def get_visual_novel_model_from_document(doc : documentmodel.DocumentModel) -> visualnovelmodel.VisualNovelModel:
"""Convert a DocumentModel into a VisualNovelModel
This function makes best effort in converting all information, even if there are errors
"""
result = visualnovelmodel.VisualNovelModel()
# TODO import all images
currentContext = result.getEmptyContext()
currentBlock = None
def getParentNode() -> visualnovelmodel.VNElementBlock:
nonlocal currentBlock
nonlocal currentContext
nonlocal result
if currentBlock is None:
currentBlock = visualnovelmodel.VNElementBlock(currentContext)
result.addBlock(currentBlock)
return currentBlock
for p in doc.paragraph_list:
# ignore empty paragraphs
if p.empty():
continue
# pattern detection
# default case
block = getParentNode();
block.addElement(visualnovelmodel.VNClearElement())
for e in p.element_list:
if isinstance(e, documentmodel.TextElement):
sayText = e.getText()
attributeDict = {}
sayStyle = e.getStyle()
if sayStyle.bold():
attributeDict[visualnovelmodel.VNTextAttribute.Bold] = True
if sayStyle.italic():
attributeDict[visualnovelmodel.VNTextAttribute.Italic] = True
if sayStyle.has_nonzero_sizelevel():
attributeDict[visualnovelmodel.VNTextAttribute.Size] = sayStyle.size_level()
if sayStyle.has_text_color():
attributeDict[visualnovelmodel.VNTextAttribute.TextColor] = sayStyle.text_color()
if sayStyle.has_background_color():
attributeDict[visualnovelmodel.VNTextAttribute.BackgroundColor] = sayStyle.background_color()
textElement = visualnovelmodel.VNSayTextElement(sayText, attributeDict)
block.addElement(textElement)
else:
raise RuntimeError("Unhandled element type")
return result
| en | 0.554358 | #!/usr/bin/env python3 Convert a DocumentModel into a VisualNovelModel This function makes best effort in converting all information, even if there are errors # TODO import all images # ignore empty paragraphs # pattern detection # default case | 2.602826 | 3 |
ppjoin/p4join.py | usc-isi-i2/ppjoin | 2 | 6615727 | <gh_stars>1-10
"""
P4Join algorithm
Paper:
Sehili, Ziad, et al.
"Privacy preserving record linkage with PPJoin."
Datenbanksysteme für Business, Technologie und Web (BTW 2015) (2015).
Implemented by GreatYYX https://github.com/greatyyx
"""
import collections
from itertools import product
from functools import reduce
from typing import List, Tuple, Set
import hashlib
import hmac
from ppjoin.ppjoin_ import ceil
def list_to_vec(l):
vec = 0
for idx, e in enumerate(reversed(l)):
vec |= (e << idx)
return vec
def vec_to_list(vec, l_len):
l = [0] * l_len
idx = l_len - 1
while vec != 0 and l_len >= 0:
l[idx] = vec & 1
vec >>= 1; idx -= 1
return l
def str_to_byte(s):
return s.encode('utf-8')
def byte_to_str(b):
return b.decode('utf-8')
def all_sb_idx(b, vec_len):
"""
Get set-bit indices
"""
l = []
for idx in reversed(range(vec_len)):
if b & 1:
l.append(idx)
b >>= 1
return list(reversed(l))
def set_bit(b, vec_len, idx):
return b | 1 << (vec_len - 1 - idx)
def base_hash(key, msg, method):
return int(hmac.new(key=key, msg=msg, digestmod=method).hexdigest(), 16)
def encode_record(record: List[List[str]], hmac_key: str, vec_len: int, k: int = 2) -> List[int]:
hmac_key = str_to_byte(hmac_key)
vec = 0
for t in record:
t = str_to_byte(t)
for i in range(1, k+1):
set_bit_idx = (
base_hash(key=hmac_key, msg=t, method=hashlib.sha1) +
base_hash(key=hmac_key, msg=t, method=hashlib.md5) * i
) % vec_len
vec = set_bit(vec, vec_len, set_bit_idx)
return vec
def prefix(vec, vec_len, t):
sb_idx = all_sb_idx(vec, vec_len)
# prefix_length = ceil((1 - t) * len(sb_idx)) + 1
prefix_length = len(sb_idx) - ceil(t * len(sb_idx)) + 1
prefix_length = min(prefix_length, len(sb_idx))
prefix_sb_idx = sb_idx[:prefix_length]
prefix_vec = map(lambda x: set_bit(0, vec_len, x), prefix_sb_idx[:])
return reduce(lambda x, y: x | y, prefix_vec)
def compare(records, vec_len, t, order_map):
cp = set()
lmap = collections.defaultdict(set)
if t == 0:
return set(filter(lambda x: x[0] != x[1], product(range(len(records)), range(len(records)))))
for xr_idx, xr in enumerate(records):
xl = len(all_sb_idx(xr, vec_len))
for el in list(lmap.keys()):
if el < xl * t: # length filter
del lmap[el]
continue
for (yr_idx, yr) in lmap[el]:
xp = prefix(xr, vec_len, t)
yp = prefix(yr, vec_len, t)
if xp & yp == 0: # prefix filter
continue
yl = len(all_sb_idx(yr, vec_len))
if positional_filter(xp, yp, xl, yl, t, vec_len):
continue
score = jaccard(xr, yr, vec_len)
if score >= t:
cp.add((xr_idx, yr_idx))
lmap[xl].add((xr_idx, xr))
return cp
def positional_filter(xp, yp, xl, yl, t, vec_len):
overlap = len(all_sb_idx(xp & yp, vec_len))
sb_idx1 = all_sb_idx(xp, vec_len)
sb_idx2 = all_sb_idx(yp, vec_len)
p1, p2 = sb_idx1[-1], sb_idx2[-1]
diff1, diff2 = 0, 0
if p1 > p2:
diff1 = len([sb for sb in sb_idx1 if sb > p2])
else:
diff2 = len([sb for sb in sb_idx2 if sb > p1])
rest = min(xl - len(sb_idx1) + diff1, yl - len(sb_idx2) + diff2)
return overlap + rest < ceil((xl + yl) * t / (t + 1))
def preprocess(records, vec_len):
# get all set bits index of records
records_sb_idx = []
for vec in records:
records_sb_idx.append(all_sb_idx(vec, vec_len))
# get frequency order of index of all set bits
elements = [e for r in records_sb_idx for e in r]
order_map = dict(
(el, i)
for i, (el, count) in enumerate(sorted(collections.Counter(elements).items(), key=lambda x: (x[1], x[0])))
) # (element, order)
# reorder set bit of all records
reordered_records = []
for vec_sb_idx in records_sb_idx:
vec = 0
for set_bit_idx in sorted(vec_sb_idx, key=lambda x: order_map[x]):
vec = set_bit(vec, vec_len, set_bit_idx)
reordered_records.append(vec)
# sort reordered records based on cardinality
argsort = sorted(range(len(reordered_records)), key=lambda i: len(all_sb_idx(reordered_records[i], vec_len)))
reordered_records.sort(key=lambda r: len(all_sb_idx(r, vec_len)))
return reordered_records, argsort, order_map
def jaccard(n1, n2, vec_len):
return 1.0 * len(all_sb_idx(n1 & n2, vec_len)) / len(all_sb_idx(n1 | n2, vec_len))
def join(datasets: List[List[int]], t: float = 0, vec_len: int = 0) -> Set[Tuple[Tuple]]:
ret = set()
if not datasets:
return ret
dataset = []
dataset_id_offset = [0]
for d in datasets:
dataset += d
dataset_id_offset.append(len(d) + dataset_id_offset[-1])
if len(dataset_id_offset) > 1:
dataset_id_offset = dataset_id_offset[:-1]
records_sorted, original_order, order_map = preprocess(dataset, vec_len)
result = compare(records_sorted, vec_len, t, order_map)
for r in result:
r1id, r2id = r[0], r[1]
r1id, r2id = original_order[r1id], original_order[r2id]
if r1id == r2id:
continue
# r1id should <= r2id
if r1id > r2id:
r1id, r2id = r2id, r1id
# find which original datasets the rids belong to
ds1_offset = next(x for x in reversed(dataset_id_offset) if x <= r1id)
ds2_offset = next(x for x in reversed(dataset_id_offset) if x <= r2id)
# both are from one source (except only one dataset is provided)
if len(dataset_id_offset) > 1 and ds1_offset == ds2_offset:
continue
ret.add((
(dataset_id_offset.index(ds1_offset), r1id - ds1_offset),
(dataset_id_offset.index(ds2_offset), r2id - ds2_offset)
))
return ret
| """
P4Join algorithm
Paper:
Sehili, Ziad, et al.
"Privacy preserving record linkage with PPJoin."
Datenbanksysteme für Business, Technologie und Web (BTW 2015) (2015).
Implemented by GreatYYX https://github.com/greatyyx
"""
import collections
from itertools import product
from functools import reduce
from typing import List, Tuple, Set
import hashlib
import hmac
from ppjoin.ppjoin_ import ceil
def list_to_vec(l):
vec = 0
for idx, e in enumerate(reversed(l)):
vec |= (e << idx)
return vec
def vec_to_list(vec, l_len):
l = [0] * l_len
idx = l_len - 1
while vec != 0 and l_len >= 0:
l[idx] = vec & 1
vec >>= 1; idx -= 1
return l
def str_to_byte(s):
return s.encode('utf-8')
def byte_to_str(b):
return b.decode('utf-8')
def all_sb_idx(b, vec_len):
"""
Get set-bit indices
"""
l = []
for idx in reversed(range(vec_len)):
if b & 1:
l.append(idx)
b >>= 1
return list(reversed(l))
def set_bit(b, vec_len, idx):
return b | 1 << (vec_len - 1 - idx)
def base_hash(key, msg, method):
return int(hmac.new(key=key, msg=msg, digestmod=method).hexdigest(), 16)
def encode_record(record: List[List[str]], hmac_key: str, vec_len: int, k: int = 2) -> List[int]:
hmac_key = str_to_byte(hmac_key)
vec = 0
for t in record:
t = str_to_byte(t)
for i in range(1, k+1):
set_bit_idx = (
base_hash(key=hmac_key, msg=t, method=hashlib.sha1) +
base_hash(key=hmac_key, msg=t, method=hashlib.md5) * i
) % vec_len
vec = set_bit(vec, vec_len, set_bit_idx)
return vec
def prefix(vec, vec_len, t):
sb_idx = all_sb_idx(vec, vec_len)
# prefix_length = ceil((1 - t) * len(sb_idx)) + 1
prefix_length = len(sb_idx) - ceil(t * len(sb_idx)) + 1
prefix_length = min(prefix_length, len(sb_idx))
prefix_sb_idx = sb_idx[:prefix_length]
prefix_vec = map(lambda x: set_bit(0, vec_len, x), prefix_sb_idx[:])
return reduce(lambda x, y: x | y, prefix_vec)
def compare(records, vec_len, t, order_map):
cp = set()
lmap = collections.defaultdict(set)
if t == 0:
return set(filter(lambda x: x[0] != x[1], product(range(len(records)), range(len(records)))))
for xr_idx, xr in enumerate(records):
xl = len(all_sb_idx(xr, vec_len))
for el in list(lmap.keys()):
if el < xl * t: # length filter
del lmap[el]
continue
for (yr_idx, yr) in lmap[el]:
xp = prefix(xr, vec_len, t)
yp = prefix(yr, vec_len, t)
if xp & yp == 0: # prefix filter
continue
yl = len(all_sb_idx(yr, vec_len))
if positional_filter(xp, yp, xl, yl, t, vec_len):
continue
score = jaccard(xr, yr, vec_len)
if score >= t:
cp.add((xr_idx, yr_idx))
lmap[xl].add((xr_idx, xr))
return cp
def positional_filter(xp, yp, xl, yl, t, vec_len):
overlap = len(all_sb_idx(xp & yp, vec_len))
sb_idx1 = all_sb_idx(xp, vec_len)
sb_idx2 = all_sb_idx(yp, vec_len)
p1, p2 = sb_idx1[-1], sb_idx2[-1]
diff1, diff2 = 0, 0
if p1 > p2:
diff1 = len([sb for sb in sb_idx1 if sb > p2])
else:
diff2 = len([sb for sb in sb_idx2 if sb > p1])
rest = min(xl - len(sb_idx1) + diff1, yl - len(sb_idx2) + diff2)
return overlap + rest < ceil((xl + yl) * t / (t + 1))
def preprocess(records, vec_len):
# get all set bits index of records
records_sb_idx = []
for vec in records:
records_sb_idx.append(all_sb_idx(vec, vec_len))
# get frequency order of index of all set bits
elements = [e for r in records_sb_idx for e in r]
order_map = dict(
(el, i)
for i, (el, count) in enumerate(sorted(collections.Counter(elements).items(), key=lambda x: (x[1], x[0])))
) # (element, order)
# reorder set bit of all records
reordered_records = []
for vec_sb_idx in records_sb_idx:
vec = 0
for set_bit_idx in sorted(vec_sb_idx, key=lambda x: order_map[x]):
vec = set_bit(vec, vec_len, set_bit_idx)
reordered_records.append(vec)
# sort reordered records based on cardinality
argsort = sorted(range(len(reordered_records)), key=lambda i: len(all_sb_idx(reordered_records[i], vec_len)))
reordered_records.sort(key=lambda r: len(all_sb_idx(r, vec_len)))
return reordered_records, argsort, order_map
def jaccard(n1, n2, vec_len):
return 1.0 * len(all_sb_idx(n1 & n2, vec_len)) / len(all_sb_idx(n1 | n2, vec_len))
def join(datasets: List[List[int]], t: float = 0, vec_len: int = 0) -> Set[Tuple[Tuple]]:
ret = set()
if not datasets:
return ret
dataset = []
dataset_id_offset = [0]
for d in datasets:
dataset += d
dataset_id_offset.append(len(d) + dataset_id_offset[-1])
if len(dataset_id_offset) > 1:
dataset_id_offset = dataset_id_offset[:-1]
records_sorted, original_order, order_map = preprocess(dataset, vec_len)
result = compare(records_sorted, vec_len, t, order_map)
for r in result:
r1id, r2id = r[0], r[1]
r1id, r2id = original_order[r1id], original_order[r2id]
if r1id == r2id:
continue
# r1id should <= r2id
if r1id > r2id:
r1id, r2id = r2id, r1id
# find which original datasets the rids belong to
ds1_offset = next(x for x in reversed(dataset_id_offset) if x <= r1id)
ds2_offset = next(x for x in reversed(dataset_id_offset) if x <= r2id)
# both are from one source (except only one dataset is provided)
if len(dataset_id_offset) > 1 and ds1_offset == ds2_offset:
continue
ret.add((
(dataset_id_offset.index(ds1_offset), r1id - ds1_offset),
(dataset_id_offset.index(ds2_offset), r2id - ds2_offset)
))
return ret | en | 0.734757 | P4Join algorithm Paper: Sehili, Ziad, et al. "Privacy preserving record linkage with PPJoin." Datenbanksysteme für Business, Technologie und Web (BTW 2015) (2015). Implemented by GreatYYX https://github.com/greatyyx Get set-bit indices # prefix_length = ceil((1 - t) * len(sb_idx)) + 1 # length filter # prefix filter # get all set bits index of records # get frequency order of index of all set bits # (element, order) # reorder set bit of all records # sort reordered records based on cardinality # r1id should <= r2id # find which original datasets the rids belong to # both are from one source (except only one dataset is provided) | 2.377194 | 2 |
doto/model/repeat.py | raphaelahrens/doto | 1 | 6615728 | """
Description of a recurring event.
"""
import doto.model
import doto.model.crud
from dateutil import rrule
import pytz
CREATE_CMD = """
CREATE TABLE IF NOT EXISTS
repeats (
id INTEGER NOT NULL,
repeat_rule rrule NOT NULL,
event INTEGER, -- id of the event either a task or apmt
PRIMARY KEY (id)
);
"""
PATTERNS = {
'@yearly': rrule.YEARLY,
'@monthly': rrule.MONTHLY,
'@weekly': rrule.WEEKLY,
'@daily': rrule.DAILY,
'@hourly': rrule.HOURLY,
}
REV_PATTERNS = {
rrule.YEARLY: '@yearly',
rrule.MONTHLY: '@monthly',
rrule.WEEKLY: '@weekly',
rrule.DAILY: '@daily',
rrule.HOURLY: '@hourly',
}
class Repeat(object):
"""
A Repeat is meta data for a task with due date or an appointment
It indicates that the event will repeat in a specific pattern.
"""
def __init__(self, repeat_rule, event):
""" constructor for Repeat """
self.event = event
self.repeat_rule = repeat_rule
@staticmethod
def row_to_obj(row, store):
""" Create Repeat from database row """
repeat = doto.model.unwrap_row(store,
row,
Repeat,
('repeat_rule', 'event'),
('id',)
)
return repeat
@staticmethod
def obj_to_row(obj):
"""
Create Row from repeat object
"""
row_dict = doto.model.unwrap_obj(obj)
return row_dict
def next(self, after_dt):
""" return the next event after after_dt """
utc_after = pytz.utc.normalize(after_dt).replace(tzinfo=None)
return self.repeat_rule.after(utc_after).replace(tzinfo=pytz.utc)
def __eq__(self, obj):
return str(self.repeat_rule) == str(obj.repeat_rule)
def __str__(self):
return REV_PATTERNS[self.repeat_rule._freq]
def parse(rule_pattern, start_dt, event):
utc_start = pytz.utc.normalize(start_dt)
return Repeat(rrule.rrule(PATTERNS[rule_pattern], dtstart=utc_start), event=event)
insert_query = """INSERT INTO repeats ( repeat_rule, event)
VALUES (:repeat_rule, :event);
"""
update_query = """UPDATE repeats SET repeat_rule = :repeat_rule,
event = :event
WHERE id = :id;
"""
delete_query = 'DELETE FROM repeats WHERE id = ?;'
select_query = """SELECT * FROM repeats WHERE id = :id; """
update = doto.model.crud.update(update_query, Repeat)
add_new = doto.model.crud.insert(insert_query, Repeat)
delete = doto.model.crud.delete(delete_query)
get = doto.model.crud.get(select_query, Repeat)
def convert_rrule(rule_str):
return rrule.rrulestr(rule_str.decode("utf-8"))
doto.model.setup_module(CREATE_CMD, ((rrule.rrule, str, convert_rrule),))
| """
Description of a recurring event.
"""
import doto.model
import doto.model.crud
from dateutil import rrule
import pytz
CREATE_CMD = """
CREATE TABLE IF NOT EXISTS
repeats (
id INTEGER NOT NULL,
repeat_rule rrule NOT NULL,
event INTEGER, -- id of the event either a task or apmt
PRIMARY KEY (id)
);
"""
PATTERNS = {
'@yearly': rrule.YEARLY,
'@monthly': rrule.MONTHLY,
'@weekly': rrule.WEEKLY,
'@daily': rrule.DAILY,
'@hourly': rrule.HOURLY,
}
REV_PATTERNS = {
rrule.YEARLY: '@yearly',
rrule.MONTHLY: '@monthly',
rrule.WEEKLY: '@weekly',
rrule.DAILY: '@daily',
rrule.HOURLY: '@hourly',
}
class Repeat(object):
"""
A Repeat is meta data for a task with due date or an appointment
It indicates that the event will repeat in a specific pattern.
"""
def __init__(self, repeat_rule, event):
""" constructor for Repeat """
self.event = event
self.repeat_rule = repeat_rule
@staticmethod
def row_to_obj(row, store):
""" Create Repeat from database row """
repeat = doto.model.unwrap_row(store,
row,
Repeat,
('repeat_rule', 'event'),
('id',)
)
return repeat
@staticmethod
def obj_to_row(obj):
"""
Create Row from repeat object
"""
row_dict = doto.model.unwrap_obj(obj)
return row_dict
def next(self, after_dt):
""" return the next event after after_dt """
utc_after = pytz.utc.normalize(after_dt).replace(tzinfo=None)
return self.repeat_rule.after(utc_after).replace(tzinfo=pytz.utc)
def __eq__(self, obj):
return str(self.repeat_rule) == str(obj.repeat_rule)
def __str__(self):
return REV_PATTERNS[self.repeat_rule._freq]
def parse(rule_pattern, start_dt, event):
utc_start = pytz.utc.normalize(start_dt)
return Repeat(rrule.rrule(PATTERNS[rule_pattern], dtstart=utc_start), event=event)
insert_query = """INSERT INTO repeats ( repeat_rule, event)
VALUES (:repeat_rule, :event);
"""
update_query = """UPDATE repeats SET repeat_rule = :repeat_rule,
event = :event
WHERE id = :id;
"""
delete_query = 'DELETE FROM repeats WHERE id = ?;'
select_query = """SELECT * FROM repeats WHERE id = :id; """
update = doto.model.crud.update(update_query, Repeat)
add_new = doto.model.crud.insert(insert_query, Repeat)
delete = doto.model.crud.delete(delete_query)
get = doto.model.crud.get(select_query, Repeat)
def convert_rrule(rule_str):
return rrule.rrulestr(rule_str.decode("utf-8"))
doto.model.setup_module(CREATE_CMD, ((rrule.rrule, str, convert_rrule),))
| en | 0.759707 | Description of a recurring event. CREATE TABLE IF NOT EXISTS repeats ( id INTEGER NOT NULL, repeat_rule rrule NOT NULL, event INTEGER, -- id of the event either a task or apmt PRIMARY KEY (id) ); A Repeat is meta data for a task with due date or an appointment It indicates that the event will repeat in a specific pattern. constructor for Repeat Create Repeat from database row Create Row from repeat object return the next event after after_dt INSERT INTO repeats ( repeat_rule, event) VALUES (:repeat_rule, :event); UPDATE repeats SET repeat_rule = :repeat_rule, event = :event WHERE id = :id; SELECT * FROM repeats WHERE id = :id; | 2.980342 | 3 |
src/20210323/mysql.py | ngwork0301/ngw-teratail-answer-sample | 0 | 6615729 | #!/usr/bin/env python
# -*- coding:utf-8-*-
import pymysql
# MySQLに接続する
connection = pymysql.connect(host='localhost',
user='ユーザー名',
password='<PASSWORD>',
db='データベース名',
charset='utf8',
cursorclass=pymysql.cursors.DictCursor)
# SQLを操作する
with connection.cursor() as cursor:
#「my_table」から「tw_id」が重複を省いた仮テーブル「my_table_temp」を作成する
sql = "select * from START_END_ITEM where Start_datetime >= '2021/3/1 00:00:00' order by Start_datetime;"
cursor.execute(sql)
records = cursor.fetchall()
new_records = [[records[idx]['End_datetime'], records[idx+1]['Start_datetime'], records[idx]['Item']] for idx in range(len(records)-1)]
# MySQLから切断する
connection.close()
# 結果表示
for record in new_records:
print(record[0].strftime("%Y/%m/%d %H:%M:%S"), record[1].strftime("%Y/%m/%d %H:%M:%S"), str(record[2]))
| #!/usr/bin/env python
# -*- coding:utf-8-*-
import pymysql
# MySQLに接続する
connection = pymysql.connect(host='localhost',
user='ユーザー名',
password='<PASSWORD>',
db='データベース名',
charset='utf8',
cursorclass=pymysql.cursors.DictCursor)
# SQLを操作する
with connection.cursor() as cursor:
#「my_table」から「tw_id」が重複を省いた仮テーブル「my_table_temp」を作成する
sql = "select * from START_END_ITEM where Start_datetime >= '2021/3/1 00:00:00' order by Start_datetime;"
cursor.execute(sql)
records = cursor.fetchall()
new_records = [[records[idx]['End_datetime'], records[idx+1]['Start_datetime'], records[idx]['Item']] for idx in range(len(records)-1)]
# MySQLから切断する
connection.close()
# 結果表示
for record in new_records:
print(record[0].strftime("%Y/%m/%d %H:%M:%S"), record[1].strftime("%Y/%m/%d %H:%M:%S"), str(record[2]))
| ja | 0.993726 | #!/usr/bin/env python # -*- coding:utf-8-*- # MySQLに接続する # SQLを操作する #「my_table」から「tw_id」が重複を省いた仮テーブル「my_table_temp」を作成する # MySQLから切断する # 結果表示 | 3.337496 | 3 |
libraries/stc-1.2.9/Selected_Track_Control/SelectedTrackControl.py | lushfuture/Liveduino | 2 | 6615730 | import Live
import MIDI
import settings
#from Logging import log
from SessionControl import SessionControl
from MixerControl import MixerControl
from GlobalControl import GlobalControl
from ViewControl import ViewControl
from DeviceControl import DeviceControl
from QuantizationControl import QuantizationControl
class SelectedTrackControl:
__module__ = __name__
__doc__ = 'MIDI Remote Script to control the selected track'
__name__ = "SelectedTrackControl MIDI Remote Script"
def __init__(self, c_instance):
#log("SelectedTrackControl::__init__")
self.c_instance = c_instance
# mappings for registered MIDI notes/CCs
self.midi_callbacks = {}
# lookup object for fast lookup of cc to mode
self.midi_cc_to_mode = {}
# parse midi_mapping recursive for MIDI.CC
self.mapping_parse_recursive(settings.midi_mapping.values())
self._device_control = DeviceControl(c_instance, self)
self.components = (
SessionControl(c_instance, self),
MixerControl(c_instance, self),
GlobalControl(c_instance, self),
ViewControl(c_instance, self),
self._device_control,
QuantizationControl(c_instance, self),
)
def mapping_parse_recursive(self, mapping):
tuple_type = type((1,2));
for command in mapping:
if type(command) == tuple_type:
self.mapping_parse_recursive(command)
elif isinstance(command, MIDI.CC):
#log("MIDI CC %d is %s" % (command.key, command.mode))
self.midi_cc_to_mode[command.key] = command.mode
def suggest_map_mode(self, cc_no):
#log("suggest_map_mode")
if cc_no in self.midi_cc_to_mode:
return self.midi_cc_to_mode[cc_no]
return MIDI.ABSOLUTE # see MIDI.py for definitions of modes
def disconnect(self):
for c in self.components:
c.disconnect()
def refresh_state(self):
#log("refresh_state")
#for c in self.components:
# c.refresh_state()
pass
def update_display(self):
#log("update_display")
#for c in self.components:
# c.update_display()
pass
def connect_script_instances(self, instanciated_scripts):
pass
# called from Live to build the MIDI bindings
def build_midi_map(self, midi_map_handle):
#log("SelectedTrackControl::build_midi_map")
script_handle = self.c_instance.handle()
for channel in range(16):
callbacks = self.midi_callbacks.get(channel, {})
for note in callbacks.get(MIDI.NOTEON_STATUS,{}).keys():
Live.MidiMap.forward_midi_note(script_handle, midi_map_handle, channel, note)
for cc in callbacks.get(MIDI.CC_STATUS,{}).keys():
Live.MidiMap.forward_midi_cc(script_handle, midi_map_handle, channel, cc)
# called from Live when MIDI messages are received
def receive_midi(self, midi_bytes):
channel = (midi_bytes[0] & MIDI.CHAN_MASK)
status = (midi_bytes[0] & MIDI.STATUS_MASK)
key = midi_bytes[1]
value = midi_bytes[2]
#log("receive_midi on channel %d, status %d, key %d, value %d" % (channel, status, key, value))
# execute callbacks that are registered for this event
callbacks = self.midi_callbacks.get(channel,{}).get(status,{}).get(key,[])
mode = MIDI.ABSOLUTE
if status == MIDI.CC_STATUS:
# get mode and calculate signed int for MIDI value
mode = self.suggest_map_mode(key)
value = MIDI.relative_to_signed_int[mode](value)
for callback in callbacks:
callback(value, mode, status)
def suggest_input_port(self):
return str('Kimidi Input')
def suggest_output_port(self):
return str('Kimidi Output')
def can_lock_to_devices(self):
return True
def lock_to_device(self, device):
assert (self._device_control != None)
self._device_control.set_lock_to_device(True, device)
def unlock_from_device(self, device):
assert (self._device_control != None)
self._device_control.set_lock_to_device(False, device)
def set_appointed_device(self, device):
assert ((device == None) or isinstance(device, Live.Device.Device))
assert (self._device_control != None)
self._device_control.set_device(device)
# internal method to register callbacks from different controls
def register_midi_callback(self, callback, key, mode, status, channel):
if not channel in self.midi_callbacks:
self.midi_callbacks[channel] = {}
if not status in self.midi_callbacks[channel]:
self.midi_callbacks[channel][status] = {
key: [callback,]
}
else:
if key in self.midi_callbacks[channel][status]:
self.midi_callbacks[channel][status][key].append(callback)
else:
self.midi_callbacks[channel][status][key] = [callback, ]
| import Live
import MIDI
import settings
#from Logging import log
from SessionControl import SessionControl
from MixerControl import MixerControl
from GlobalControl import GlobalControl
from ViewControl import ViewControl
from DeviceControl import DeviceControl
from QuantizationControl import QuantizationControl
class SelectedTrackControl:
__module__ = __name__
__doc__ = 'MIDI Remote Script to control the selected track'
__name__ = "SelectedTrackControl MIDI Remote Script"
def __init__(self, c_instance):
#log("SelectedTrackControl::__init__")
self.c_instance = c_instance
# mappings for registered MIDI notes/CCs
self.midi_callbacks = {}
# lookup object for fast lookup of cc to mode
self.midi_cc_to_mode = {}
# parse midi_mapping recursive for MIDI.CC
self.mapping_parse_recursive(settings.midi_mapping.values())
self._device_control = DeviceControl(c_instance, self)
self.components = (
SessionControl(c_instance, self),
MixerControl(c_instance, self),
GlobalControl(c_instance, self),
ViewControl(c_instance, self),
self._device_control,
QuantizationControl(c_instance, self),
)
def mapping_parse_recursive(self, mapping):
tuple_type = type((1,2));
for command in mapping:
if type(command) == tuple_type:
self.mapping_parse_recursive(command)
elif isinstance(command, MIDI.CC):
#log("MIDI CC %d is %s" % (command.key, command.mode))
self.midi_cc_to_mode[command.key] = command.mode
def suggest_map_mode(self, cc_no):
#log("suggest_map_mode")
if cc_no in self.midi_cc_to_mode:
return self.midi_cc_to_mode[cc_no]
return MIDI.ABSOLUTE # see MIDI.py for definitions of modes
def disconnect(self):
for c in self.components:
c.disconnect()
def refresh_state(self):
#log("refresh_state")
#for c in self.components:
# c.refresh_state()
pass
def update_display(self):
#log("update_display")
#for c in self.components:
# c.update_display()
pass
def connect_script_instances(self, instanciated_scripts):
pass
# called from Live to build the MIDI bindings
def build_midi_map(self, midi_map_handle):
#log("SelectedTrackControl::build_midi_map")
script_handle = self.c_instance.handle()
for channel in range(16):
callbacks = self.midi_callbacks.get(channel, {})
for note in callbacks.get(MIDI.NOTEON_STATUS,{}).keys():
Live.MidiMap.forward_midi_note(script_handle, midi_map_handle, channel, note)
for cc in callbacks.get(MIDI.CC_STATUS,{}).keys():
Live.MidiMap.forward_midi_cc(script_handle, midi_map_handle, channel, cc)
# called from Live when MIDI messages are received
def receive_midi(self, midi_bytes):
channel = (midi_bytes[0] & MIDI.CHAN_MASK)
status = (midi_bytes[0] & MIDI.STATUS_MASK)
key = midi_bytes[1]
value = midi_bytes[2]
#log("receive_midi on channel %d, status %d, key %d, value %d" % (channel, status, key, value))
# execute callbacks that are registered for this event
callbacks = self.midi_callbacks.get(channel,{}).get(status,{}).get(key,[])
mode = MIDI.ABSOLUTE
if status == MIDI.CC_STATUS:
# get mode and calculate signed int for MIDI value
mode = self.suggest_map_mode(key)
value = MIDI.relative_to_signed_int[mode](value)
for callback in callbacks:
callback(value, mode, status)
def suggest_input_port(self):
return str('Kimidi Input')
def suggest_output_port(self):
return str('Kimidi Output')
def can_lock_to_devices(self):
return True
def lock_to_device(self, device):
assert (self._device_control != None)
self._device_control.set_lock_to_device(True, device)
def unlock_from_device(self, device):
assert (self._device_control != None)
self._device_control.set_lock_to_device(False, device)
def set_appointed_device(self, device):
assert ((device == None) or isinstance(device, Live.Device.Device))
assert (self._device_control != None)
self._device_control.set_device(device)
# internal method to register callbacks from different controls
def register_midi_callback(self, callback, key, mode, status, channel):
if not channel in self.midi_callbacks:
self.midi_callbacks[channel] = {}
if not status in self.midi_callbacks[channel]:
self.midi_callbacks[channel][status] = {
key: [callback,]
}
else:
if key in self.midi_callbacks[channel][status]:
self.midi_callbacks[channel][status][key].append(callback)
else:
self.midi_callbacks[channel][status][key] = [callback, ]
| en | 0.710573 | #from Logging import log #log("SelectedTrackControl::__init__") # mappings for registered MIDI notes/CCs # lookup object for fast lookup of cc to mode # parse midi_mapping recursive for MIDI.CC #log("MIDI CC %d is %s" % (command.key, command.mode)) #log("suggest_map_mode") # see MIDI.py for definitions of modes #log("refresh_state") #for c in self.components: # c.refresh_state() #log("update_display") #for c in self.components: # c.update_display() # called from Live to build the MIDI bindings #log("SelectedTrackControl::build_midi_map") # called from Live when MIDI messages are received #log("receive_midi on channel %d, status %d, key %d, value %d" % (channel, status, key, value)) # execute callbacks that are registered for this event # get mode and calculate signed int for MIDI value # internal method to register callbacks from different controls | 2.118668 | 2 |
stockze/example_app/models.py | vendari12/django-ai-algotrade | 10 | 6615731 | <gh_stars>1-10
from django.db import models
class TransactionQuerySet(models.QuerySet):
def active(self):
return self.filter(is_active=True)
class TransactionManager(models.Manager):
def get_queryset(self):
return TransactionQuerySet(self.model, using=self.db)
def all(self):
return self.get_queryset().active()
class Transaction(models.Model):
transaction_code = models.CharField(default=True, null=True, blank=True, max_length=100)
TRANSACTIONS = (
('B', 'Buy'),
('H', 'Hold'),
('S', 'Sell'),
)
action = models.CharField(default=True, null=True, blank=True, max_length=4, choices=TRANSACTIONS)
symbol = models.CharField(default=True, null=True, blank=True, max_length=6)
date_time = models.DateTimeField(auto_now=True, null=True)
share_price = models.FloatField(default=True, null=True, blank=True)
share_quant = models.FloatField(default=True, null=True, blank=True)
share_equity = models.FloatField(default=True, null=True, blank=True)
roi_total = models.FloatField(default=True, null=True, blank=True)
roi_net = models.FloatField(default=True, null=True, blank=True)
avg_buy_price = models.FloatField(default=True, null=True, blank=True)
testing = models.BooleanField(default=True)
objects = TransactionManager()
def __str__(self):
return self.transaction_code
| from django.db import models
class TransactionQuerySet(models.QuerySet):
def active(self):
return self.filter(is_active=True)
class TransactionManager(models.Manager):
def get_queryset(self):
return TransactionQuerySet(self.model, using=self.db)
def all(self):
return self.get_queryset().active()
class Transaction(models.Model):
transaction_code = models.CharField(default=True, null=True, blank=True, max_length=100)
TRANSACTIONS = (
('B', 'Buy'),
('H', 'Hold'),
('S', 'Sell'),
)
action = models.CharField(default=True, null=True, blank=True, max_length=4, choices=TRANSACTIONS)
symbol = models.CharField(default=True, null=True, blank=True, max_length=6)
date_time = models.DateTimeField(auto_now=True, null=True)
share_price = models.FloatField(default=True, null=True, blank=True)
share_quant = models.FloatField(default=True, null=True, blank=True)
share_equity = models.FloatField(default=True, null=True, blank=True)
roi_total = models.FloatField(default=True, null=True, blank=True)
roi_net = models.FloatField(default=True, null=True, blank=True)
avg_buy_price = models.FloatField(default=True, null=True, blank=True)
testing = models.BooleanField(default=True)
objects = TransactionManager()
def __str__(self):
return self.transaction_code | none | 1 | 2.103784 | 2 | |
cutimages.py | billfreeman44/cotnd-flair | 0 | 6615732 | from PIL import Image
import os
#read in image list and cut out
#images we already edited so we
#dont make infinite images
pngfolder='C:\\Users\\munka\\Desktop\\cotnd\\full_images'
imgs=os.listdir(pngfolder)
imgs.pop()
cimgs=[]
for img in imgs:
s=img.split(".")
if s[-1] == 'png': cimgs.append(s[0])
#get info about how many cols and rows
f=open("cutimages.csv",'r')
names=[]
ncols=[]
nrows=[]
nset=[]
ischaracter=[]
for line in f:
x=line.split(" ")
names.append(x[0])
ncols.append(int(x[1]))
nrows.append(int(x[2]))
nset.append(int(x[3]))
ischaracter.append(int(x[4]))
f.close()
#get info about images to skip
f=open("blacklist.txt",'r')
blist=[]
for line in f:
blist.append(line.rstrip('\n'))
f.close()
for img in cimgs:
if (img+'.png' in names) == False: print "WARNING, "+img+" not found!!"
if img+'.png' in names:
#get index for ncols and n rows
index=names.index(img+'.png')
if ischaracter[index] == 0 and ncols[index] != 0:
#load image
z=Image.open('full_images\\'+img+'.png')
z.load()
#get size
size = z.size
nx=size[0]
ny=size[1]
#loop over number of sets
for set_number in range(nset[index]):
skip_factor=abs(ncols[index]/nset[index])
print img,set_number,ncols[index],nset[index],skip_factor
single_width=nx/ncols[index]
im1=z
left=single_width*(set_number*skip_factor)
upper=0
right=single_width*(set_number*skip_factor+1)
lower=ny/nrows[index]
if set_number == 0:
post_str=''
else:
post_str='v'+str(set_number+1)
if (img+post_str+'.png').replace("_","") not in blist:
im1.crop((left, upper, right, lower)).save('cutouts\\'+img.replace("_","")+post_str+'.png')
#im1.load()
| from PIL import Image
import os
#read in image list and cut out
#images we already edited so we
#dont make infinite images
pngfolder='C:\\Users\\munka\\Desktop\\cotnd\\full_images'
imgs=os.listdir(pngfolder)
imgs.pop()
cimgs=[]
for img in imgs:
s=img.split(".")
if s[-1] == 'png': cimgs.append(s[0])
#get info about how many cols and rows
f=open("cutimages.csv",'r')
names=[]
ncols=[]
nrows=[]
nset=[]
ischaracter=[]
for line in f:
x=line.split(" ")
names.append(x[0])
ncols.append(int(x[1]))
nrows.append(int(x[2]))
nset.append(int(x[3]))
ischaracter.append(int(x[4]))
f.close()
#get info about images to skip
f=open("blacklist.txt",'r')
blist=[]
for line in f:
blist.append(line.rstrip('\n'))
f.close()
for img in cimgs:
if (img+'.png' in names) == False: print "WARNING, "+img+" not found!!"
if img+'.png' in names:
#get index for ncols and n rows
index=names.index(img+'.png')
if ischaracter[index] == 0 and ncols[index] != 0:
#load image
z=Image.open('full_images\\'+img+'.png')
z.load()
#get size
size = z.size
nx=size[0]
ny=size[1]
#loop over number of sets
for set_number in range(nset[index]):
skip_factor=abs(ncols[index]/nset[index])
print img,set_number,ncols[index],nset[index],skip_factor
single_width=nx/ncols[index]
im1=z
left=single_width*(set_number*skip_factor)
upper=0
right=single_width*(set_number*skip_factor+1)
lower=ny/nrows[index]
if set_number == 0:
post_str=''
else:
post_str='v'+str(set_number+1)
if (img+post_str+'.png').replace("_","") not in blist:
im1.crop((left, upper, right, lower)).save('cutouts\\'+img.replace("_","")+post_str+'.png')
#im1.load()
| en | 0.704706 | #read in image list and cut out #images we already edited so we #dont make infinite images #get info about how many cols and rows #get info about images to skip #get index for ncols and n rows #load image #get size #loop over number of sets #im1.load() | 2.722188 | 3 |
connect.py | iamgomes/geoconding | 1 | 6615733 | #Conecta ao banco oracle SIE
import cx_Oracle
import time
import pandas as pd
con = cx_Oracle.connect('iam/c0nsulta@ORCL_SIE')
cur = con.cursor()
cur.prepare('select * from sie.rf_cpf where num_cpf = :id')
cur.execute(None,{'id':'03169726145'})
res = cur.fetchmany(numRows=1)
print(res)
cur.close()
con.close() | #Conecta ao banco oracle SIE
import cx_Oracle
import time
import pandas as pd
con = cx_Oracle.connect('iam/c0nsulta@ORCL_SIE')
cur = con.cursor()
cur.prepare('select * from sie.rf_cpf where num_cpf = :id')
cur.execute(None,{'id':'03169726145'})
res = cur.fetchmany(numRows=1)
print(res)
cur.close()
con.close() | pt | 0.617801 | #Conecta ao banco oracle SIE | 2.53189 | 3 |
azure-servicefabric/azure/servicefabric/models/compose_application_status_info.py | SUSE/azure-sdk-for-python | 2 | 6615734 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ComposeApplicationStatusInfo(Model):
"""Information about a Service Fabric compose application.
:param name:
:type name: str
:param status: Possible values include: 'Invalid', 'Provisioning',
'Creating', 'Created', 'Unprovisioning', 'Deleting', 'Failed'
:type status: str
:param status_details: The status details of compose application
including failure message.
:type status_details: str
"""
_attribute_map = {
'name': {'key': 'Name', 'type': 'str'},
'status': {'key': 'Status', 'type': 'str'},
'status_details': {'key': 'StatusDetails', 'type': 'str'},
}
def __init__(self, name=None, status=None, status_details=None):
self.name = name
self.status = status
self.status_details = status_details
| # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ComposeApplicationStatusInfo(Model):
"""Information about a Service Fabric compose application.
:param name:
:type name: str
:param status: Possible values include: 'Invalid', 'Provisioning',
'Creating', 'Created', 'Unprovisioning', 'Deleting', 'Failed'
:type status: str
:param status_details: The status details of compose application
including failure message.
:type status_details: str
"""
_attribute_map = {
'name': {'key': 'Name', 'type': 'str'},
'status': {'key': 'Status', 'type': 'str'},
'status_details': {'key': 'StatusDetails', 'type': 'str'},
}
def __init__(self, name=None, status=None, status_details=None):
self.name = name
self.status = status
self.status_details = status_details
| en | 0.614819 | # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- Information about a Service Fabric compose application. :param name: :type name: str :param status: Possible values include: 'Invalid', 'Provisioning', 'Creating', 'Created', 'Unprovisioning', 'Deleting', 'Failed' :type status: str :param status_details: The status details of compose application including failure message. :type status_details: str | 1.89885 | 2 |
morp_test.py | firstopinion/morp | 1 | 6615735 | <reponame>firstopinion/morp<filename>morp_test.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import logging
import sys
import time
import os
from unittest import TestCase
import inspect
import subprocess
from collections import defaultdict
import testdata
#import morp
from morp.compat import *
from morp import Message, Connection, DsnConnection
from morp.interface.sqs import SQS
from morp.interface import get_interfaces
from morp.exception import ReleaseMessage, AckMessage
# configure root logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
log_handler = logging.StreamHandler(stream=sys.stderr)
log_formatter = logging.Formatter('[%(levelname).1s] %(message)s')
log_handler.setFormatter(log_formatter)
logger.addHandler(log_handler)
logger = logging.getLogger('boto3')
logger.setLevel(logging.WARNING)
logger = logging.getLogger('botocore')
logger.setLevel(logging.WARNING)
class Client(object):
def __init__(self, contents):
module_info = testdata.create_module(contents=contents)
self.directory = module_info.basedir
self.module = module_info.module
self.message_classes = []
clear_names = {}
for _, message_class in inspect.getmembers(self.module, inspect.isclass):
if issubclass(message_class, Message):
clear_names[message_class.get_name()] = message_class
self.message_classes.append(message_class)
for message_class in clear_names.values():
message_class.clear()
def send(self, **fields):
return self.message_classes[0].create(fields)
def recv(self):
return self.run(self.message_classes[0].name)
def run(self, name, count=1, **options):
python_cmd = String(subprocess.check_output(["which", "python"]).strip())
cmd = "{} -m morp --count={} --directory={} {}".format(
python_cmd,
count,
self.directory,
name
)
expected_ret_code = options.get('code', 0)
is_py2 = True
is_py3 = False
def get_output_str(output):
return "\n".join(String(o) for o in output)
# if is_py2:
# return "\n".join(output)
# elif is_py3:
# return "\n".join((o.decode("utf-8") for o in output))
process = None
output = []
try:
process = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=os.getcwd(),
)
for line in iter(process.stdout.readline, b""):
line = line.rstrip()
print(line)
output.append(line)
process.wait()
if process.returncode != expected_ret_code:
raise RuntimeError("cmd returned {} with output: {}".format(
process.returncode,
get_output_str(output)
))
except subprocess.CalledProcessError as e:
raise RuntimeError("cmd returned {} with output: {}".format(e.returncode, e.output))
finally:
if process:
process.stdout.close()
return get_output_str(output)
class BaseInterfaceTestCase(TestCase):
interface_class = None
interfaces = defaultdict(list)
# def setUp(self):
# i = self.get_interface()
# n = self.get_name()
# i.clear(n)
@classmethod
def tearDownClass(cls):
# clean up all the queues we made and close all the interfaces
for name, interfaces in cls.interfaces.items():
for i in interfaces:
if i:
if name:
i.unsafe_delete(name)
i.close()
def create_message(self, name="", interface=None, **fields):
name = self.get_name(name)
interface = interface or self.get_interface()
if not fields:
fields[testdata.get_ascii()] = testdata.get_int()
fields[testdata.get_ascii()] = testdata.get_int()
msg = interface.create_message(name, fields=fields)
type(self).interfaces[name].append(interface)
return msg
def get_config(self, dsn="", **options):
dsn = dsn or os.environ['MORP_DSN_1']
config = DsnConnection(os.environ['MORP_DSN_1'])
for k, v in options.items():
config.options[k] = v
return config
def get_interface(self, config=None):
"""get a connected interface"""
config = config or self.get_config()
i = self.interface_class(config)
i.connect()
type(self).interfaces[""].append(i)
self.assertTrue(i.connected)
return i
def get_encrypted_interface(self, config=None):
"""get a connected interface"""
options = {}
if testdata.yes():
options['key'] = testdata.create_file("/morp.key", testdata.get_ascii(100))
else:
options['key'] = testdata.get_ascii(testdata.get_int(10, 200))
if config:
for k, v in options.items():
config.options[k] = v
else:
config = self.get_config(**options)
return self.get_interface(config)
def get_name(self, name=""):
if not name:
name = 'morp-test-' + testdata.get_ascii(12)
#name = 'morp-test-sqs'
type(self).interfaces[name].append(None)
return name
def test_message_lifecycle(self):
i = self.get_encrypted_interface()
im = i.create_message(name="message-lifecycle")
fields = {"foo": 1, "bar": 2}
im.fields = fields
body = im.body
im2 = i.create_message(name="message-lifecycle")
im2.body = body
self.assertEqual(im.fields, im2.fields)
self.assertEqual(im.fields, fields)
def assertEventuallyEqual(self, v1, callback, msg="", count=10, wait=1.0):
ret = False
for x in range(count - 1):
if callback() == v1:
ret = True
break
else:
time.sleep(wait)
if not ret:
self.assertEqual(v1, callback(), msg)
def test_message_encode_decode(self):
fields = {"foo": testdata.get_words(), "bar": testdata.get_int()}
i = self.get_encrypted_interface()
im = self.create_message(name="message-lifecycle", interface=i, **fields)
cipher_text = im.body
im2 = i.create_message(name="message-lifecycle", body=cipher_text)
self.assertEqual(fields, im2.fields)
class SQSInterfaceTest(BaseInterfaceTestCase):
interface_class = SQS
def test_queue_auto_create(self):
"""SQS queues will auto-create, this just makes sure that works as intended"""
m = self.create_message()
name = m.name
i = m.interface
i.unsafe_delete(name)
def test_send_count_recv(self):
interface_msg = self.create_message()
interface_msg.send()
# re-connect to receive the message
i2 = self.get_interface()
interface_msg2 = i2.recv(interface_msg.name)
self.assertEqual(interface_msg.fields, interface_msg2.fields)
interface_msg2.ack()
self.assertEventuallyEqual(0, lambda: i2.count(interface_msg.name))
def test_recv_timeout(self):
m = self.create_message()
start = time.time()
m.interface.recv(m.name, 1)
stop = time.time()
self.assertLessEqual(1.0, stop - start)
def test_send_recv_encrypted(self):
m1 = self.create_message(interface=self.get_encrypted_interface())
m1.send()
m2 = m1.interface.recv(m1.name)
self.assertEqual(m1.fields, m2.fields)
m2.ack()
def test_send_recv_aws_encryption(self):
config = self.get_config(KmsMasterKeyId="alias/aws/sqs")
i = self.get_interface(config)
m1 = self.create_message(interface=i)
m1.send()
m2 = m1.interface.recv(m1.name)
self.assertEqual(m1.fields, m2.fields)
m2.ack()
def test_get_attrs(self):
i = self.get_interface()
attrs = i.get_attrs(KmsMasterKeyId="foo-bar", KmsDataKeyReusePeriodSeconds=3600)
self.assertTrue("KmsMasterKeyId" in attrs)
class MessageTest(BaseInterfaceTestCase):
interface_class = SQS
# def get_name(self):
# #return super(MessageTest, self).get_name('morp-test-message')
# return 'morp-test-message'
def get_msg(self, *fields, **fields_kwargs):
m = self.create_message()
n = m.name
i = m.interface
class TMsg(Message):
interface = i
@classmethod
def get_name(cls): return n
m = TMsg(*fields, **fields_kwargs)
return m
def test_create(self):
m = self.get_msg(foo=1, bar=2)
self.assertEqual(1, m.foo)
self.assertEqual(2, m.bar)
m2 = Message(
foo=3,
bar=4,
morp_classpath="{}.{}".format(Message.__module__, Message.__name__)
)
self.assertEqual(3, m2.foo)
self.assertEqual(4, m2.bar)
def test_fields(self):
"""just make sure interface_message doesn't end up in the fields dict"""
m = self.get_msg()
m.interface_message = 1
self.assertFalse("interface_message" in m.fields)
def test_backoff(self):
# TODO make this work with a backoff, this test works but doesn't do any
# sort of visibility backoff
m = self.get_msg()
mcls = m.__class__
foo = testdata.get_int()
m.foo = foo
m.send()
count = 0
for x in range(2):
with self.assertRaises(RuntimeError):
with mcls.recv() as m2:
self.assertGreater(m2.interface_message._count, count)
count = m2.interface_message._count
raise RuntimeError()
with mcls.recv() as m2:
self.assertGreater(m2.interface_message._count, count)
self.assertEqual(m2.foo, m.foo)
def test_release_1(self):
m = self.get_msg(foo=testdata.get_int())
mcls = m.__class__
m.send()
with self.assertRaises(RuntimeError):
with mcls.recv() as m2:
raise RuntimeError()
with mcls.recv() as m2:
self.assertEqual(m2.foo, m.foo)
def test_release_message(self):
m = self.get_msg(foo=testdata.get_int())
mcls = m.__class__
m.send()
with mcls.recv() as m2:
raise ReleaseMessage(2)
with mcls.recv_for(1) as m2:
self.assertEqual(None, m2)
time.sleep(1)
with mcls.recv_for(1) as m2:
self.assertEqual(m.foo, m2.foo)
def test_ack_message(self):
m = self.get_msg(foo=testdata.get_int())
mcls = m.__class__
m.send()
with mcls.recv() as m2:
raise AckMessage()
with mcls.recv_for(timeout=1) as m2:
self.assertEqual(None, m2)
def test_send_recv(self):
m = self.get_msg(foo=1, bar=2)
m.send()
with m.__class__.recv() as m2:
self.assertEqual(m.fields, m2.fields)
def test_send_later(self):
m = self.get_msg(foo=1, bar=2)
m.send_later(2)
with m.__class__.recv_for(1) as m2:
self.assertEqual(None, m2)
time.sleep(1)
with m.__class__.recv_for(1) as m2:
self.assertEqual(m.fields, m2.fields)
def test_recv_block_success(self):
m = self.get_msg(foo=10, bar=20)
m.send()
with m.__class__.recv() as m2:
self.assertEqual(m.fields, m2.fields)
def test_recv_block_error(self):
m = self.get_msg(foo=10)
mcls = m.__class__
m.send()
kwargs = {
"vtimeout": 1,
"timeout": 2
}
with self.assertRaises(RuntimeError):
with mcls.recv(**kwargs) as m2:
raise RuntimeError()
time.sleep(1.2)
kwargs["ack_on_recv"] = True
with self.assertRaises(RuntimeError):
with mcls.recv(**kwargs) as m2:
raise RuntimeError()
time.sleep(1.2)
with mcls.recv_for(timeout=1) as m2:
self.assertEqual(None, m2)
class ConnectionTest(TestCase):
def test_key(self):
c = Connection()
self.assertEqual("", c.key)
self.assertEqual(c.key, c.key)
key = testdata.get_ascii(100)
c = Connection(options=dict(key=key))
self.assertNotEqual(b"", ByteString(c.key))
self.assertEqual(c.key, c.key)
key_path = testdata.create_file("morp.key", testdata.get_ascii(100))
c = Connection(options=dict(key=key_path))
self.assertNotEqual(b"", ByteString(c.key))
self.assertEqual(c.key, c.key)
def test_dsn_connection(self):
tests = [
(
'path.to.Interface://127.0.0.1:4151',
dict(
hosts=[('127.0.0.1', 4151)],
interface_name="path.to.Interface",
name=''
)
),
(
'module.path.to.Interface://example.com:4161#name',
dict(
hosts=[('example.com', 4161)],
interface_name='module.path.to.Interface',
name="name"
)
),
(
'module.path.to.Interface://example.com:4161?foo=bar&bar=che#name',
dict(
hosts=[('example.com', 4161)],
interface_name='module.path.to.Interface',
options={"foo": "bar", "bar": "che"},
name="name"
)
),
(
"morp.interface.sqs.SQS://AWS_ID:AWS_KEY@?read_lock=120",
dict(
username='AWS_ID',
password='<PASSWORD>',
interface_name='morp.interface.sqs.SQS',
options={'read_lock': '120'}
)
),
(
"morp.interface.sqs.SQS://AWS_ID:AWS_KEY@",
dict(
username='AWS_ID',
password='<PASSWORD>',
interface_name='morp.interface.sqs.SQS',
options={}
)
)
]
for t in tests:
c = DsnConnection(t[0])
for k, v in t[1].items():
self.assertEqual(v, getattr(c, k))
def test_attrs_and_sqs_alias(self):
c = DsnConnection("SQS://AWS_ID:AWS_KEY@?KmsMasterKeyId=foo-bar")
self.assertTrue(c.interface_name.startswith("morp"))
self.assertTrue("KmsMasterKeyId" in c.options)
class CLITest(TestCase):
def test_consume(self):
c = Client([
"from morp import Message",
"",
"class Consume(Message):",
" def target(self):",
" print(self.text)"
"",
"class Consume2(Consume):",
" pass",
])
m = c.message_classes[0].create(text="foobar")
r = c.recv()
self.assertTrue(m.text in r)
m = c.message_classes[1].create(text="bazche")
r = c.recv()
self.assertTrue(m.text in r)
# so test runner won't try and run it
del BaseInterfaceTestCase
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
import logging
import sys
import time
import os
from unittest import TestCase
import inspect
import subprocess
from collections import defaultdict
import testdata
#import morp
from morp.compat import *
from morp import Message, Connection, DsnConnection
from morp.interface.sqs import SQS
from morp.interface import get_interfaces
from morp.exception import ReleaseMessage, AckMessage
# configure root logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
log_handler = logging.StreamHandler(stream=sys.stderr)
log_formatter = logging.Formatter('[%(levelname).1s] %(message)s')
log_handler.setFormatter(log_formatter)
logger.addHandler(log_handler)
logger = logging.getLogger('boto3')
logger.setLevel(logging.WARNING)
logger = logging.getLogger('botocore')
logger.setLevel(logging.WARNING)
class Client(object):
def __init__(self, contents):
module_info = testdata.create_module(contents=contents)
self.directory = module_info.basedir
self.module = module_info.module
self.message_classes = []
clear_names = {}
for _, message_class in inspect.getmembers(self.module, inspect.isclass):
if issubclass(message_class, Message):
clear_names[message_class.get_name()] = message_class
self.message_classes.append(message_class)
for message_class in clear_names.values():
message_class.clear()
def send(self, **fields):
return self.message_classes[0].create(fields)
def recv(self):
return self.run(self.message_classes[0].name)
def run(self, name, count=1, **options):
python_cmd = String(subprocess.check_output(["which", "python"]).strip())
cmd = "{} -m morp --count={} --directory={} {}".format(
python_cmd,
count,
self.directory,
name
)
expected_ret_code = options.get('code', 0)
is_py2 = True
is_py3 = False
def get_output_str(output):
return "\n".join(String(o) for o in output)
# if is_py2:
# return "\n".join(output)
# elif is_py3:
# return "\n".join((o.decode("utf-8") for o in output))
process = None
output = []
try:
process = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=os.getcwd(),
)
for line in iter(process.stdout.readline, b""):
line = line.rstrip()
print(line)
output.append(line)
process.wait()
if process.returncode != expected_ret_code:
raise RuntimeError("cmd returned {} with output: {}".format(
process.returncode,
get_output_str(output)
))
except subprocess.CalledProcessError as e:
raise RuntimeError("cmd returned {} with output: {}".format(e.returncode, e.output))
finally:
if process:
process.stdout.close()
return get_output_str(output)
class BaseInterfaceTestCase(TestCase):
interface_class = None
interfaces = defaultdict(list)
# def setUp(self):
# i = self.get_interface()
# n = self.get_name()
# i.clear(n)
@classmethod
def tearDownClass(cls):
# clean up all the queues we made and close all the interfaces
for name, interfaces in cls.interfaces.items():
for i in interfaces:
if i:
if name:
i.unsafe_delete(name)
i.close()
def create_message(self, name="", interface=None, **fields):
name = self.get_name(name)
interface = interface or self.get_interface()
if not fields:
fields[testdata.get_ascii()] = testdata.get_int()
fields[testdata.get_ascii()] = testdata.get_int()
msg = interface.create_message(name, fields=fields)
type(self).interfaces[name].append(interface)
return msg
def get_config(self, dsn="", **options):
dsn = dsn or os.environ['MORP_DSN_1']
config = DsnConnection(os.environ['MORP_DSN_1'])
for k, v in options.items():
config.options[k] = v
return config
def get_interface(self, config=None):
"""get a connected interface"""
config = config or self.get_config()
i = self.interface_class(config)
i.connect()
type(self).interfaces[""].append(i)
self.assertTrue(i.connected)
return i
def get_encrypted_interface(self, config=None):
"""get a connected interface"""
options = {}
if testdata.yes():
options['key'] = testdata.create_file("/morp.key", testdata.get_ascii(100))
else:
options['key'] = testdata.get_ascii(testdata.get_int(10, 200))
if config:
for k, v in options.items():
config.options[k] = v
else:
config = self.get_config(**options)
return self.get_interface(config)
def get_name(self, name=""):
if not name:
name = 'morp-test-' + testdata.get_ascii(12)
#name = 'morp-test-sqs'
type(self).interfaces[name].append(None)
return name
def test_message_lifecycle(self):
i = self.get_encrypted_interface()
im = i.create_message(name="message-lifecycle")
fields = {"foo": 1, "bar": 2}
im.fields = fields
body = im.body
im2 = i.create_message(name="message-lifecycle")
im2.body = body
self.assertEqual(im.fields, im2.fields)
self.assertEqual(im.fields, fields)
def assertEventuallyEqual(self, v1, callback, msg="", count=10, wait=1.0):
ret = False
for x in range(count - 1):
if callback() == v1:
ret = True
break
else:
time.sleep(wait)
if not ret:
self.assertEqual(v1, callback(), msg)
def test_message_encode_decode(self):
fields = {"foo": testdata.get_words(), "bar": testdata.get_int()}
i = self.get_encrypted_interface()
im = self.create_message(name="message-lifecycle", interface=i, **fields)
cipher_text = im.body
im2 = i.create_message(name="message-lifecycle", body=cipher_text)
self.assertEqual(fields, im2.fields)
class SQSInterfaceTest(BaseInterfaceTestCase):
interface_class = SQS
def test_queue_auto_create(self):
"""SQS queues will auto-create, this just makes sure that works as intended"""
m = self.create_message()
name = m.name
i = m.interface
i.unsafe_delete(name)
def test_send_count_recv(self):
interface_msg = self.create_message()
interface_msg.send()
# re-connect to receive the message
i2 = self.get_interface()
interface_msg2 = i2.recv(interface_msg.name)
self.assertEqual(interface_msg.fields, interface_msg2.fields)
interface_msg2.ack()
self.assertEventuallyEqual(0, lambda: i2.count(interface_msg.name))
def test_recv_timeout(self):
m = self.create_message()
start = time.time()
m.interface.recv(m.name, 1)
stop = time.time()
self.assertLessEqual(1.0, stop - start)
def test_send_recv_encrypted(self):
m1 = self.create_message(interface=self.get_encrypted_interface())
m1.send()
m2 = m1.interface.recv(m1.name)
self.assertEqual(m1.fields, m2.fields)
m2.ack()
def test_send_recv_aws_encryption(self):
config = self.get_config(KmsMasterKeyId="alias/aws/sqs")
i = self.get_interface(config)
m1 = self.create_message(interface=i)
m1.send()
m2 = m1.interface.recv(m1.name)
self.assertEqual(m1.fields, m2.fields)
m2.ack()
def test_get_attrs(self):
i = self.get_interface()
attrs = i.get_attrs(KmsMasterKeyId="foo-bar", KmsDataKeyReusePeriodSeconds=3600)
self.assertTrue("KmsMasterKeyId" in attrs)
class MessageTest(BaseInterfaceTestCase):
interface_class = SQS
# def get_name(self):
# #return super(MessageTest, self).get_name('morp-test-message')
# return 'morp-test-message'
def get_msg(self, *fields, **fields_kwargs):
m = self.create_message()
n = m.name
i = m.interface
class TMsg(Message):
interface = i
@classmethod
def get_name(cls): return n
m = TMsg(*fields, **fields_kwargs)
return m
def test_create(self):
m = self.get_msg(foo=1, bar=2)
self.assertEqual(1, m.foo)
self.assertEqual(2, m.bar)
m2 = Message(
foo=3,
bar=4,
morp_classpath="{}.{}".format(Message.__module__, Message.__name__)
)
self.assertEqual(3, m2.foo)
self.assertEqual(4, m2.bar)
def test_fields(self):
"""just make sure interface_message doesn't end up in the fields dict"""
m = self.get_msg()
m.interface_message = 1
self.assertFalse("interface_message" in m.fields)
def test_backoff(self):
# TODO make this work with a backoff, this test works but doesn't do any
# sort of visibility backoff
m = self.get_msg()
mcls = m.__class__
foo = testdata.get_int()
m.foo = foo
m.send()
count = 0
for x in range(2):
with self.assertRaises(RuntimeError):
with mcls.recv() as m2:
self.assertGreater(m2.interface_message._count, count)
count = m2.interface_message._count
raise RuntimeError()
with mcls.recv() as m2:
self.assertGreater(m2.interface_message._count, count)
self.assertEqual(m2.foo, m.foo)
def test_release_1(self):
m = self.get_msg(foo=testdata.get_int())
mcls = m.__class__
m.send()
with self.assertRaises(RuntimeError):
with mcls.recv() as m2:
raise RuntimeError()
with mcls.recv() as m2:
self.assertEqual(m2.foo, m.foo)
def test_release_message(self):
m = self.get_msg(foo=testdata.get_int())
mcls = m.__class__
m.send()
with mcls.recv() as m2:
raise ReleaseMessage(2)
with mcls.recv_for(1) as m2:
self.assertEqual(None, m2)
time.sleep(1)
with mcls.recv_for(1) as m2:
self.assertEqual(m.foo, m2.foo)
def test_ack_message(self):
m = self.get_msg(foo=testdata.get_int())
mcls = m.__class__
m.send()
with mcls.recv() as m2:
raise AckMessage()
with mcls.recv_for(timeout=1) as m2:
self.assertEqual(None, m2)
def test_send_recv(self):
m = self.get_msg(foo=1, bar=2)
m.send()
with m.__class__.recv() as m2:
self.assertEqual(m.fields, m2.fields)
def test_send_later(self):
m = self.get_msg(foo=1, bar=2)
m.send_later(2)
with m.__class__.recv_for(1) as m2:
self.assertEqual(None, m2)
time.sleep(1)
with m.__class__.recv_for(1) as m2:
self.assertEqual(m.fields, m2.fields)
def test_recv_block_success(self):
m = self.get_msg(foo=10, bar=20)
m.send()
with m.__class__.recv() as m2:
self.assertEqual(m.fields, m2.fields)
def test_recv_block_error(self):
m = self.get_msg(foo=10)
mcls = m.__class__
m.send()
kwargs = {
"vtimeout": 1,
"timeout": 2
}
with self.assertRaises(RuntimeError):
with mcls.recv(**kwargs) as m2:
raise RuntimeError()
time.sleep(1.2)
kwargs["ack_on_recv"] = True
with self.assertRaises(RuntimeError):
with mcls.recv(**kwargs) as m2:
raise RuntimeError()
time.sleep(1.2)
with mcls.recv_for(timeout=1) as m2:
self.assertEqual(None, m2)
class ConnectionTest(TestCase):
def test_key(self):
c = Connection()
self.assertEqual("", c.key)
self.assertEqual(c.key, c.key)
key = testdata.get_ascii(100)
c = Connection(options=dict(key=key))
self.assertNotEqual(b"", ByteString(c.key))
self.assertEqual(c.key, c.key)
key_path = testdata.create_file("morp.key", testdata.get_ascii(100))
c = Connection(options=dict(key=key_path))
self.assertNotEqual(b"", ByteString(c.key))
self.assertEqual(c.key, c.key)
def test_dsn_connection(self):
tests = [
(
'path.to.Interface://127.0.0.1:4151',
dict(
hosts=[('127.0.0.1', 4151)],
interface_name="path.to.Interface",
name=''
)
),
(
'module.path.to.Interface://example.com:4161#name',
dict(
hosts=[('example.com', 4161)],
interface_name='module.path.to.Interface',
name="name"
)
),
(
'module.path.to.Interface://example.com:4161?foo=bar&bar=che#name',
dict(
hosts=[('example.com', 4161)],
interface_name='module.path.to.Interface',
options={"foo": "bar", "bar": "che"},
name="name"
)
),
(
"morp.interface.sqs.SQS://AWS_ID:AWS_KEY@?read_lock=120",
dict(
username='AWS_ID',
password='<PASSWORD>',
interface_name='morp.interface.sqs.SQS',
options={'read_lock': '120'}
)
),
(
"morp.interface.sqs.SQS://AWS_ID:AWS_KEY@",
dict(
username='AWS_ID',
password='<PASSWORD>',
interface_name='morp.interface.sqs.SQS',
options={}
)
)
]
for t in tests:
c = DsnConnection(t[0])
for k, v in t[1].items():
self.assertEqual(v, getattr(c, k))
def test_attrs_and_sqs_alias(self):
c = DsnConnection("SQS://AWS_ID:AWS_KEY@?KmsMasterKeyId=foo-bar")
self.assertTrue(c.interface_name.startswith("morp"))
self.assertTrue("KmsMasterKeyId" in c.options)
class CLITest(TestCase):
def test_consume(self):
c = Client([
"from morp import Message",
"",
"class Consume(Message):",
" def target(self):",
" print(self.text)"
"",
"class Consume2(Consume):",
" pass",
])
m = c.message_classes[0].create(text="foobar")
r = c.recv()
self.assertTrue(m.text in r)
m = c.message_classes[1].create(text="bazche")
r = c.recv()
self.assertTrue(m.text in r)
# so test runner won't try and run it
del BaseInterfaceTestCase | en | 0.740744 | # -*- coding: utf-8 -*- #import morp # configure root logger # if is_py2: # return "\n".join(output) # elif is_py3: # return "\n".join((o.decode("utf-8") for o in output)) # def setUp(self): # i = self.get_interface() # n = self.get_name() # i.clear(n) # clean up all the queues we made and close all the interfaces get a connected interface get a connected interface #name = 'morp-test-sqs' SQS queues will auto-create, this just makes sure that works as intended # re-connect to receive the message # def get_name(self): # #return super(MessageTest, self).get_name('morp-test-message') # return 'morp-test-message' just make sure interface_message doesn't end up in the fields dict # TODO make this work with a backoff, this test works but doesn't do any # sort of visibility backoff #name', #name', # so test runner won't try and run it | 2.073481 | 2 |
info/utils/common.py | rymmx/My_information | 1 | 6615736 | """
过滤器本质是函数
自定义过滤器步骤
1.自定义一个python函数去实现业务逻辑
2.通过app对象将函数添加到系统过滤器中
3.使用自定义过滤器
"""
# 1.自定义一个python函数去实现业务逻辑
from flask import session,current_app,jsonify,g
from info.response_code import RET
def do_ranklist_class(index):
if index == 0:
return "first"
elif index == 1:
return "second"
elif index == 2:
return "third"
else:
return ""
import functools
"""
需求:查询当前登陆用户对象的代码在多个视图函数都需要使用,我们可以用装饰器将其封装起来
view_func ,被装饰的函数名称
问题:装饰器会改变被装饰的视图函数的名称
方案:functools_wraps(视图函数名称)
"""
def get_user_info(view_func):
@functools.wraps(view_func)
def wrapper(*args,**kwargs):
# 1.装饰视图函数新增的需求
# 获取session用户中的id
user_id = session.get("user_id")
# 延迟导入解决循环导入的问题
from info.models import User
# 根据user_id查询当前用户对象
user = None # type:User
if user_id:
try:
user = User.query.get("user_id")
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR,errmsg="查询用户对象异常")
# 将用户对象保存起来供给视图函数使用
# 全局的临时变量g保存用户对象,只要请求未结束,g变量中的值就不会改变
g.user = user
# 2.被装饰的视图函数原有功能实现
result = view_func(*args,**kwargs)
return result
return wrapper
| """
过滤器本质是函数
自定义过滤器步骤
1.自定义一个python函数去实现业务逻辑
2.通过app对象将函数添加到系统过滤器中
3.使用自定义过滤器
"""
# 1.自定义一个python函数去实现业务逻辑
from flask import session,current_app,jsonify,g
from info.response_code import RET
def do_ranklist_class(index):
if index == 0:
return "first"
elif index == 1:
return "second"
elif index == 2:
return "third"
else:
return ""
import functools
"""
需求:查询当前登陆用户对象的代码在多个视图函数都需要使用,我们可以用装饰器将其封装起来
view_func ,被装饰的函数名称
问题:装饰器会改变被装饰的视图函数的名称
方案:functools_wraps(视图函数名称)
"""
def get_user_info(view_func):
@functools.wraps(view_func)
def wrapper(*args,**kwargs):
# 1.装饰视图函数新增的需求
# 获取session用户中的id
user_id = session.get("user_id")
# 延迟导入解决循环导入的问题
from info.models import User
# 根据user_id查询当前用户对象
user = None # type:User
if user_id:
try:
user = User.query.get("user_id")
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR,errmsg="查询用户对象异常")
# 将用户对象保存起来供给视图函数使用
# 全局的临时变量g保存用户对象,只要请求未结束,g变量中的值就不会改变
g.user = user
# 2.被装饰的视图函数原有功能实现
result = view_func(*args,**kwargs)
return result
return wrapper
| zh | 0.961524 | 过滤器本质是函数 自定义过滤器步骤 1.自定义一个python函数去实现业务逻辑 2.通过app对象将函数添加到系统过滤器中 3.使用自定义过滤器 # 1.自定义一个python函数去实现业务逻辑 需求:查询当前登陆用户对象的代码在多个视图函数都需要使用,我们可以用装饰器将其封装起来 view_func ,被装饰的函数名称 问题:装饰器会改变被装饰的视图函数的名称 方案:functools_wraps(视图函数名称) # 1.装饰视图函数新增的需求 # 获取session用户中的id # 延迟导入解决循环导入的问题 # 根据user_id查询当前用户对象 # type:User # 将用户对象保存起来供给视图函数使用 # 全局的临时变量g保存用户对象,只要请求未结束,g变量中的值就不会改变 # 2.被装饰的视图函数原有功能实现 | 2.95747 | 3 |
tests/test_hygene.py | cedorman/footballmodel | 0 | 6615737 | import logging
from unittest import TestCase
import numpy as np
from hygene.cue import Cue
from hygene.hygene import Hygene
from tests.original_data import *
logging.basicConfig(
level=logging.INFO,
# level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
class TestHygene(TestCase):
def test_hygene(self):
hy = Hygene(0, TEST_ACTIVATION_THRESHOLD)
hy.set_probe(Cue.probe(TEST_PROBE))
hy.set_traces([Cue(TEST_DATA[ii], TEST_HYPO[ii], TEST_EVENT[ii]) for ii in range(len(TEST_DATA))])
hy.compute_activations()
# --------------------
# For each Cue, make sure that the activation is correct
# --------------------
for ii in range(0, len(TEST_ACTIVATION)):
act = hy.get_activation(ii)
np.testing.assert_almost_equal(act, TEST_ACTIVATION[ii], decimal=4)
# --------------------
# Get the content vector and make sure that it is correct
# --------------------
content = hy.calculate_content_vectors()
logging.warning(f"Content vector: {content.vals}")
for ii in range(0, len(content.vals)):
np.testing.assert_almost_equal(content.vals[ii], TEST_CONTENT_VECTOR[ii], decimal=1)
logging.warning(f"Content hypo vector: {content.hypo}")
for ii in range(0, len(content.hypo)):
np.testing.assert_almost_equal(content.hypo[ii], TEST_CONTENT_HYPO_VECTOR[ii], decimal=1)
# --------------------
# Get unspecified probe
# --------------------
unspec_probe = hy.get_unspecified_probe()
logging.warning(f"Content unspec_probe: {unspec_probe}")
for ii in range(0, len(TEST_UNSPEC_PROBE_DATA)):
np.testing.assert_almost_equal(unspec_probe.vals[ii], TEST_UNSPEC_PROBE_DATA[ii], decimal=1)
for ii in range(0, len(TEST_UNSPEC_PROBE_HYPO)):
np.testing.assert_almost_equal(unspec_probe.hypo[ii], TEST_UNSPEC_PROBE_HYPO[ii], decimal=1)
# --------------------
# Calc relevant hypotheses
# --------------------
hy.set_semantic_memory(
[Cue(TEST_SEMANTIC_MEMORY_DATA[ii],
TEST_SEMANTIC_MEMORY_HYPO[ii],
TEST_SEMANTIC_MEMORY_EVENT[ii])
for ii in range(len(TEST_SEMANTIC_MEMORY_DATA))])
semantic_hypothesis_activations = hy.get_semantic_activations()
logging.warning(f"Hypothesis activations: {semantic_hypothesis_activations}")
for ii, semantic_cue in enumerate(hy.semantic):
act = semantic_cue.get_activation()
np.testing.assert_almost_equal(act, TEST_SEMANTIC_ACTIVATION_NORMED[ii], decimal=2)
# --------------------
# Sample
# --------------------
hy.sample_hypotheses()
# --------------------
# Calc probabilities
# --------------------
hy.set_soc([0])
echo_intensities= hy.get_echo_intensities()
logging.warning(f"Echo intensity for the first semantic memory component: {echo_intensities}")
np.testing.assert_almost_equal(echo_intensities[0], TEST_H1_ECHO_INTENSITY, decimal=3)
probs = hy.get_probabilities()
logging.warning(f"Probability the first semantic memory component: {probs}")
np.testing.assert_almost_equal(probs[0], TEST_H1_PROBABILITY)
| import logging
from unittest import TestCase
import numpy as np
from hygene.cue import Cue
from hygene.hygene import Hygene
from tests.original_data import *
logging.basicConfig(
level=logging.INFO,
# level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
class TestHygene(TestCase):
def test_hygene(self):
hy = Hygene(0, TEST_ACTIVATION_THRESHOLD)
hy.set_probe(Cue.probe(TEST_PROBE))
hy.set_traces([Cue(TEST_DATA[ii], TEST_HYPO[ii], TEST_EVENT[ii]) for ii in range(len(TEST_DATA))])
hy.compute_activations()
# --------------------
# For each Cue, make sure that the activation is correct
# --------------------
for ii in range(0, len(TEST_ACTIVATION)):
act = hy.get_activation(ii)
np.testing.assert_almost_equal(act, TEST_ACTIVATION[ii], decimal=4)
# --------------------
# Get the content vector and make sure that it is correct
# --------------------
content = hy.calculate_content_vectors()
logging.warning(f"Content vector: {content.vals}")
for ii in range(0, len(content.vals)):
np.testing.assert_almost_equal(content.vals[ii], TEST_CONTENT_VECTOR[ii], decimal=1)
logging.warning(f"Content hypo vector: {content.hypo}")
for ii in range(0, len(content.hypo)):
np.testing.assert_almost_equal(content.hypo[ii], TEST_CONTENT_HYPO_VECTOR[ii], decimal=1)
# --------------------
# Get unspecified probe
# --------------------
unspec_probe = hy.get_unspecified_probe()
logging.warning(f"Content unspec_probe: {unspec_probe}")
for ii in range(0, len(TEST_UNSPEC_PROBE_DATA)):
np.testing.assert_almost_equal(unspec_probe.vals[ii], TEST_UNSPEC_PROBE_DATA[ii], decimal=1)
for ii in range(0, len(TEST_UNSPEC_PROBE_HYPO)):
np.testing.assert_almost_equal(unspec_probe.hypo[ii], TEST_UNSPEC_PROBE_HYPO[ii], decimal=1)
# --------------------
# Calc relevant hypotheses
# --------------------
hy.set_semantic_memory(
[Cue(TEST_SEMANTIC_MEMORY_DATA[ii],
TEST_SEMANTIC_MEMORY_HYPO[ii],
TEST_SEMANTIC_MEMORY_EVENT[ii])
for ii in range(len(TEST_SEMANTIC_MEMORY_DATA))])
semantic_hypothesis_activations = hy.get_semantic_activations()
logging.warning(f"Hypothesis activations: {semantic_hypothesis_activations}")
for ii, semantic_cue in enumerate(hy.semantic):
act = semantic_cue.get_activation()
np.testing.assert_almost_equal(act, TEST_SEMANTIC_ACTIVATION_NORMED[ii], decimal=2)
# --------------------
# Sample
# --------------------
hy.sample_hypotheses()
# --------------------
# Calc probabilities
# --------------------
hy.set_soc([0])
echo_intensities= hy.get_echo_intensities()
logging.warning(f"Echo intensity for the first semantic memory component: {echo_intensities}")
np.testing.assert_almost_equal(echo_intensities[0], TEST_H1_ECHO_INTENSITY, decimal=3)
probs = hy.get_probabilities()
logging.warning(f"Probability the first semantic memory component: {probs}")
np.testing.assert_almost_equal(probs[0], TEST_H1_PROBABILITY)
| en | 0.486805 | # level=logging.DEBUG, # -------------------- # For each Cue, make sure that the activation is correct # -------------------- # -------------------- # Get the content vector and make sure that it is correct # -------------------- # -------------------- # Get unspecified probe # -------------------- # -------------------- # Calc relevant hypotheses # -------------------- # -------------------- # Sample # -------------------- # -------------------- # Calc probabilities # -------------------- | 2.457782 | 2 |
CBD/3-pyspark/programs.py | zhonskate/MCPD | 0 | 6615738 | # 0 Para cada tienda, obtener la transacción de máximo importe.
sc.textFile("/datasets/purchases/purchases.txt").map(lambda s: s.split("\t")).map(lambda rec: (rec[2], float(rec[4]))).reduceByKey(max).take(1000)
# 1 Suma total de ventas para cada categoría de producto.
sc.textFile("/datasets/purchases/purchases.txt").map(lambda s: s.split("\t")).map(lambda rec: (rec[3], float(rec[4]))).reduceByKey(lambda x,y:x+y ).take(1000)
# 2 Número total de accesos al recurso "/assets/img/home-logo.png”
sc.textFile("/datasets/accesslog/access_log").map(lambda s: s.split(" ")).map(lambda x: (x[6],1)).filter(lambda rec: rec[0]=="/assets/img/home-logo.png").count()
# 3 Número total de accesos desde la misma dirección IP: 10.223.157.186
sc.textFile("/datasets/accesslog/access_log").map(lambda s: s.split(" ")).map(lambda rec: (rec[0],1)).filter(lambda rec: rec[0]=="10.223.157.186").count()
# 4 Recurso web con mayor número de accesos
sc.textFile("/datasets/accesslog/access_log").map(lambda s: s.split(" ")).map(lambda rec: (rec[6],1)).reduceByKey(lambda x,y: x+y).max(key=lambda x: x[1])
| # 0 Para cada tienda, obtener la transacción de máximo importe.
sc.textFile("/datasets/purchases/purchases.txt").map(lambda s: s.split("\t")).map(lambda rec: (rec[2], float(rec[4]))).reduceByKey(max).take(1000)
# 1 Suma total de ventas para cada categoría de producto.
sc.textFile("/datasets/purchases/purchases.txt").map(lambda s: s.split("\t")).map(lambda rec: (rec[3], float(rec[4]))).reduceByKey(lambda x,y:x+y ).take(1000)
# 2 Número total de accesos al recurso "/assets/img/home-logo.png”
sc.textFile("/datasets/accesslog/access_log").map(lambda s: s.split(" ")).map(lambda x: (x[6],1)).filter(lambda rec: rec[0]=="/assets/img/home-logo.png").count()
# 3 Número total de accesos desde la misma dirección IP: 10.223.157.186
sc.textFile("/datasets/accesslog/access_log").map(lambda s: s.split(" ")).map(lambda rec: (rec[0],1)).filter(lambda rec: rec[0]=="10.223.157.186").count()
# 4 Recurso web con mayor número de accesos
sc.textFile("/datasets/accesslog/access_log").map(lambda s: s.split(" ")).map(lambda rec: (rec[6],1)).reduceByKey(lambda x,y: x+y).max(key=lambda x: x[1])
| es | 0.960286 | # 0 Para cada tienda, obtener la transacción de máximo importe. # 1 Suma total de ventas para cada categoría de producto. # 2 Número total de accesos al recurso "/assets/img/home-logo.png” # 3 Número total de accesos desde la misma dirección IP: 10.223.157.186 # 4 Recurso web con mayor número de accesos | 2.480329 | 2 |
ig/functions.py | M-b850/ig-scraper | 0 | 6615739 | # All functions related to collecting data are here.
import datetime
from os.path import dirname, abspath
import random
import time
from bson import Int64
from itertools import dropwhile, takewhile
from instaloader import Instaloader, Profile
import const
# Date
SINCE = datetime.datetime.now()
UNTIL = datetime.datetime.now() - datetime.timedelta(days=365)
DIR = dirname(dirname(abspath(__file__)))
def file_name(realse_date):
suffix = '_'
realse_date = realse_date.replace(':', '_').replace(' ', '_')
return realse_date
def get_comments(db, post):
db.comments_col()
comments = post.get_comments()
for c in comments:
filter = {'id': Int64(c.id)}
if not db.find_one(filter):
comment = {
'id': c.id,
'InfoUpdateDate': datetime.datetime.utcnow(),
'InsPageLink': const.IG_PROFILE + post.owner_username,
'InsPostlink': const.IG_URL + post.shortcode,
'PostRelaseDate': post.date_utc,
'CommentDate': c.created_at_utc,
'CommetDescription': c.text,
'CommentLike': c.likes_count,
'ReplyCount': sum(1 for _ in comments) + 1,
}
db.insert_one(comment)
def get_data(L, db, inst_username):
profile = Profile.from_username(L.context, inst_username)
PostFolowerPostShare = profile.followers
PostFolowingPostShare = profile.followees
PostCount = profile.mediacount
all_posts = profile.get_posts()
for one_post in takewhile(lambda p: p.date > UNTIL, dropwhile(lambda p: p.date > SINCE, all_posts)):
# for one_post in all_posts:
InsPostlink = const.IG_URL + one_post.shortcode
get_comments(db, one_post)
"""
If object doesn't exists it will be added to database.-
Other ways it won't.
"""
db.posts_col()
filter = {'InsPostlink': str(InsPostlink)}
if not db.find_one(filter):
# Sleeep
insomnia = random.uniform(3, 10)
print('\n~~~~Post Insomnia is:', insomnia)
time.sleep(insomnia)
each_post = {
'InfoUpdateDate': datetime.datetime.utcnow(),
'InsPageLink': const.IG_PROFILE + inst_username,
'PostFolowerPostShare': PostFolowerPostShare,
'PostFolowingPostShare': PostFolowingPostShare,
'PostCount': PostCount,
'RelaseDate': one_post.date_utc,
'InsPostlink': InsPostlink,
'PostImagelink': one_post.url,
'PostLike': one_post.likes,
'PostComment': one_post.comments,
'PostSaveCount': None,
'PostSendCount': None,
'PostDiscription': one_post.caption,
}
_file_name = file_name(str(each_post['RelaseDate'])) + \
'_UTC' + f'_{inst_username}'
image_address = f'media/{_file_name}'
L.download_pic(
image_address,
each_post['PostImagelink'],
each_post['RelaseDate'],
)
each_post['PostImagelink'] = '/root/code/ig-scraper/' + image_address + '.jpg'
db.insert_one(each_post) # Insert to Database
res = {
'InsPageLink': const.IG_PROFILE + inst_username,
'InsPageName': inst_username,
'BioText': profile.biography,
'FolowerAtUpdate': PostFolowerPostShare,
'FolowingAtUpdate': PostFolowingPostShare,
'PostCount': sum(1 for _ in all_posts),
'SiteLink': profile.external_url,
'Check': True,
}
return res
| # All functions related to collecting data are here.
import datetime
from os.path import dirname, abspath
import random
import time
from bson import Int64
from itertools import dropwhile, takewhile
from instaloader import Instaloader, Profile
import const
# Date
SINCE = datetime.datetime.now()
UNTIL = datetime.datetime.now() - datetime.timedelta(days=365)
DIR = dirname(dirname(abspath(__file__)))
def file_name(realse_date):
suffix = '_'
realse_date = realse_date.replace(':', '_').replace(' ', '_')
return realse_date
def get_comments(db, post):
db.comments_col()
comments = post.get_comments()
for c in comments:
filter = {'id': Int64(c.id)}
if not db.find_one(filter):
comment = {
'id': c.id,
'InfoUpdateDate': datetime.datetime.utcnow(),
'InsPageLink': const.IG_PROFILE + post.owner_username,
'InsPostlink': const.IG_URL + post.shortcode,
'PostRelaseDate': post.date_utc,
'CommentDate': c.created_at_utc,
'CommetDescription': c.text,
'CommentLike': c.likes_count,
'ReplyCount': sum(1 for _ in comments) + 1,
}
db.insert_one(comment)
def get_data(L, db, inst_username):
profile = Profile.from_username(L.context, inst_username)
PostFolowerPostShare = profile.followers
PostFolowingPostShare = profile.followees
PostCount = profile.mediacount
all_posts = profile.get_posts()
for one_post in takewhile(lambda p: p.date > UNTIL, dropwhile(lambda p: p.date > SINCE, all_posts)):
# for one_post in all_posts:
InsPostlink = const.IG_URL + one_post.shortcode
get_comments(db, one_post)
"""
If object doesn't exists it will be added to database.-
Other ways it won't.
"""
db.posts_col()
filter = {'InsPostlink': str(InsPostlink)}
if not db.find_one(filter):
# Sleeep
insomnia = random.uniform(3, 10)
print('\n~~~~Post Insomnia is:', insomnia)
time.sleep(insomnia)
each_post = {
'InfoUpdateDate': datetime.datetime.utcnow(),
'InsPageLink': const.IG_PROFILE + inst_username,
'PostFolowerPostShare': PostFolowerPostShare,
'PostFolowingPostShare': PostFolowingPostShare,
'PostCount': PostCount,
'RelaseDate': one_post.date_utc,
'InsPostlink': InsPostlink,
'PostImagelink': one_post.url,
'PostLike': one_post.likes,
'PostComment': one_post.comments,
'PostSaveCount': None,
'PostSendCount': None,
'PostDiscription': one_post.caption,
}
_file_name = file_name(str(each_post['RelaseDate'])) + \
'_UTC' + f'_{inst_username}'
image_address = f'media/{_file_name}'
L.download_pic(
image_address,
each_post['PostImagelink'],
each_post['RelaseDate'],
)
each_post['PostImagelink'] = '/root/code/ig-scraper/' + image_address + '.jpg'
db.insert_one(each_post) # Insert to Database
res = {
'InsPageLink': const.IG_PROFILE + inst_username,
'InsPageName': inst_username,
'BioText': profile.biography,
'FolowerAtUpdate': PostFolowerPostShare,
'FolowingAtUpdate': PostFolowingPostShare,
'PostCount': sum(1 for _ in all_posts),
'SiteLink': profile.external_url,
'Check': True,
}
return res
| en | 0.876061 | # All functions related to collecting data are here. # Date # for one_post in all_posts: If object doesn't exists it will be added to database.- Other ways it won't. # Sleeep # Insert to Database | 2.481756 | 2 |
scripts/ad-hoc/vowel_embedding.py | MaxStrange/ArtieInfant | 1 | 6615740 | """
Loads an autoencoder model, plots the latent space for the test set and the
plots sounds from a directory on top of that space, with arrows pointing to
each of the overlaid sounds. The arrows have labels that are the file names
of the sounds (without the extension).
"""
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
# Load the stuff we need from ArtieInfant proper
sys.path.append(os.path.abspath("../../"))
sys.path.append(os.path.abspath("../../Artie"))
from experiment.thesis import phase1 # pylint: disable=locally-disabled, import-error
from experiment.analysis.vae import plotvae # pylint: disable=locally-disabled, import-error
def _plot_projections(test_set_embeddings: np.ndarray, special_embeddings: np.ndarray, labels: [str]) -> None:
"""
Projects the 3D embeddings onto the three planes (X, Y), (X, Z), and (Y, Z).
Asserts that the embeddings are 3-dimensional.
"""
if test_set_embeddings.shape[0] == 0:
print("No test_set_embeddings. Can't project.")
return
assert test_set_embeddings.shape[1] == 3, "This only works for 3D embeddings."
fig = plt.figure()
ax = fig.add_subplot(131)
ax.set_xlabel('(X, Y)')
ax.scatter(test_set_embeddings[:, 0], test_set_embeddings[:, 1])
ax.scatter(special_embeddings[:, 0], special_embeddings[:, 1], c='red')
ax = fig.add_subplot(132)
ax.set_xlabel('(X, Z)')
ax.scatter(test_set_embeddings[:, 0], test_set_embeddings[:, 2])
ax.scatter(special_embeddings[:, 0], special_embeddings[:, 2], c='red')
ax = fig.add_subplot(133)
ax.set_xlabel('(Y, Z)')
ax.scatter(test_set_embeddings[:, 1], test_set_embeddings[:, 2])
ax.scatter(special_embeddings[:, 1], special_embeddings[:, 2], c='red')
fig.suptitle("Projection of 3D Embeddings")
save = "scatter_embeddings_ad_hoc_projections.png"
plt.savefig(save)
plt.show()
plt.clf()
def _plot(test_embeddings: np.ndarray, special_embeddings: np.ndarray, special_labels: [str], ndims: int) -> None:
"""
Plots the given embeddings and labels.
"""
fig = plt.figure()
if ndims == 1:
ax = fig.add_subplot(111)
ax.set_xlabel('X')
ax.scatter(test_embeddings, np.zeros_like(test_embeddings))
ax.scatter(special_embeddings, np.zeros_like(special_embeddings), c='red')
elif ndims == 2:
ax = fig.add_subplot(111)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.scatter(test_embeddings[:, 0], test_embeddings[:, 1])
ax.scatter(special_embeddings[:, 0], special_embeddings[:, 1], c='red')
elif ndims == 3:
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.scatter(test_embeddings[:, 0], test_embeddings[:, 1], test_embeddings[:, 2])
ax.scatter(special_embeddings[:, 0], special_embeddings[:, 1], special_embeddings[:, 2], c='red')
else:
raise ValueError("`ndims` must be 1, 2, or 3, but is {}".format(ndims))
ax.set_title("Scatter Plot of Embeddings")
save = "scatter_embeddings_ad_hoc.png"
print("Saving", save)
plt.savefig(save)
plt.show()
plt.clf()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('aemodelpath', type=str, help="Path to the Auto Encoder weights.")
parser.add_argument('specmode', choices=['long', 'short'], help="Long: 241x20x1 spectrograms; Short: 81x18x1")
parser.add_argument('overlaydir', type=str, help="Directory that contains the sound files you want to overlay on the test set's embeddings")
parser.add_argument('--ndims', default=3, type=int, help="The number of dimensions of the latent space for the given model of autoencoder.")
parser.add_argument('--projection', action='store_true', help="If present, we will project the plot onto the three planes (X, Y), (X, Z), and (Y, Z). Only works if ndims is 3, ignored otherwise.")
args = parser.parse_args()
# Validate args
if not os.path.isfile(args.aemodelpath):
print("Not a file: {}".format(args.aemodelpath))
exit(1)
elif not os.path.isdir(args.overlaydir):
print("Not a directory: {}".format(args.overlaydir))
exit(2)
# Set stuff up based on what mode we are
if args.specmode == 'long':
input_shape = [241, 20, 1]
testdir = "/home/max/Dropbox/thesis/harddrive_backup/test_spectrogram_images/test_set"
sample_rate_hz = 16000.0
duration_s = 0.5
window_length_s = 0.03
ae = phase1._build_vae1(is_variational=False, input_shape=input_shape, latent_dim=args.ndims, optimizer='adadelta', loss='mse', tbdir=None, kl_loss_prop=None, recon_loss_prop=None, std_loss_prop=None)
else:
input_shape = [81, 18, 1]
testdir = "/home/max/Dropbox/thesis/harddrive_backup/filterbank_images/test_set"
sample_rate_hz = 8000.0
duration_s = 0.3
window_length_s = 0.02
ae = phase1._build_vae2(is_variational=False, input_shape=input_shape, latent_dim=args.ndims, optimizer='adadelta', loss='mse', tbdir=None, kl_loss_prop=None, recon_loss_prop=None, std_loss_prop=None)
# Load the weights into the autoencoder
ae.load_weights(args.aemodelpath)
# Encode the test set
_, _, test_set_embeddings = plotvae._predict_on_spectrograms(testdir, ae, batchsize=32, nworkers=4, imshapes=input_shape)
# Encode the audio files found in the directory
_, _, special_embeddings, labels = plotvae._predict_on_sound_files(fpaths=None, dpath=args.overlaydir, model=ae, sample_rate_hz=sample_rate_hz, duration_s=duration_s, window_length_s=window_length_s)
# Now plot the embedding space
_plot(test_set_embeddings, special_embeddings, labels, args.ndims)
if args.ndims == 3 and args.projection:
# We want to project the 3D plot onto the three planes
_plot_projections(test_set_embeddings, special_embeddings, labels)
| """
Loads an autoencoder model, plots the latent space for the test set and the
plots sounds from a directory on top of that space, with arrows pointing to
each of the overlaid sounds. The arrows have labels that are the file names
of the sounds (without the extension).
"""
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
# Load the stuff we need from ArtieInfant proper
sys.path.append(os.path.abspath("../../"))
sys.path.append(os.path.abspath("../../Artie"))
from experiment.thesis import phase1 # pylint: disable=locally-disabled, import-error
from experiment.analysis.vae import plotvae # pylint: disable=locally-disabled, import-error
def _plot_projections(test_set_embeddings: np.ndarray, special_embeddings: np.ndarray, labels: [str]) -> None:
"""
Projects the 3D embeddings onto the three planes (X, Y), (X, Z), and (Y, Z).
Asserts that the embeddings are 3-dimensional.
"""
if test_set_embeddings.shape[0] == 0:
print("No test_set_embeddings. Can't project.")
return
assert test_set_embeddings.shape[1] == 3, "This only works for 3D embeddings."
fig = plt.figure()
ax = fig.add_subplot(131)
ax.set_xlabel('(X, Y)')
ax.scatter(test_set_embeddings[:, 0], test_set_embeddings[:, 1])
ax.scatter(special_embeddings[:, 0], special_embeddings[:, 1], c='red')
ax = fig.add_subplot(132)
ax.set_xlabel('(X, Z)')
ax.scatter(test_set_embeddings[:, 0], test_set_embeddings[:, 2])
ax.scatter(special_embeddings[:, 0], special_embeddings[:, 2], c='red')
ax = fig.add_subplot(133)
ax.set_xlabel('(Y, Z)')
ax.scatter(test_set_embeddings[:, 1], test_set_embeddings[:, 2])
ax.scatter(special_embeddings[:, 1], special_embeddings[:, 2], c='red')
fig.suptitle("Projection of 3D Embeddings")
save = "scatter_embeddings_ad_hoc_projections.png"
plt.savefig(save)
plt.show()
plt.clf()
def _plot(test_embeddings: np.ndarray, special_embeddings: np.ndarray, special_labels: [str], ndims: int) -> None:
"""
Plots the given embeddings and labels.
"""
fig = plt.figure()
if ndims == 1:
ax = fig.add_subplot(111)
ax.set_xlabel('X')
ax.scatter(test_embeddings, np.zeros_like(test_embeddings))
ax.scatter(special_embeddings, np.zeros_like(special_embeddings), c='red')
elif ndims == 2:
ax = fig.add_subplot(111)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.scatter(test_embeddings[:, 0], test_embeddings[:, 1])
ax.scatter(special_embeddings[:, 0], special_embeddings[:, 1], c='red')
elif ndims == 3:
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.scatter(test_embeddings[:, 0], test_embeddings[:, 1], test_embeddings[:, 2])
ax.scatter(special_embeddings[:, 0], special_embeddings[:, 1], special_embeddings[:, 2], c='red')
else:
raise ValueError("`ndims` must be 1, 2, or 3, but is {}".format(ndims))
ax.set_title("Scatter Plot of Embeddings")
save = "scatter_embeddings_ad_hoc.png"
print("Saving", save)
plt.savefig(save)
plt.show()
plt.clf()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('aemodelpath', type=str, help="Path to the Auto Encoder weights.")
parser.add_argument('specmode', choices=['long', 'short'], help="Long: 241x20x1 spectrograms; Short: 81x18x1")
parser.add_argument('overlaydir', type=str, help="Directory that contains the sound files you want to overlay on the test set's embeddings")
parser.add_argument('--ndims', default=3, type=int, help="The number of dimensions of the latent space for the given model of autoencoder.")
parser.add_argument('--projection', action='store_true', help="If present, we will project the plot onto the three planes (X, Y), (X, Z), and (Y, Z). Only works if ndims is 3, ignored otherwise.")
args = parser.parse_args()
# Validate args
if not os.path.isfile(args.aemodelpath):
print("Not a file: {}".format(args.aemodelpath))
exit(1)
elif not os.path.isdir(args.overlaydir):
print("Not a directory: {}".format(args.overlaydir))
exit(2)
# Set stuff up based on what mode we are
if args.specmode == 'long':
input_shape = [241, 20, 1]
testdir = "/home/max/Dropbox/thesis/harddrive_backup/test_spectrogram_images/test_set"
sample_rate_hz = 16000.0
duration_s = 0.5
window_length_s = 0.03
ae = phase1._build_vae1(is_variational=False, input_shape=input_shape, latent_dim=args.ndims, optimizer='adadelta', loss='mse', tbdir=None, kl_loss_prop=None, recon_loss_prop=None, std_loss_prop=None)
else:
input_shape = [81, 18, 1]
testdir = "/home/max/Dropbox/thesis/harddrive_backup/filterbank_images/test_set"
sample_rate_hz = 8000.0
duration_s = 0.3
window_length_s = 0.02
ae = phase1._build_vae2(is_variational=False, input_shape=input_shape, latent_dim=args.ndims, optimizer='adadelta', loss='mse', tbdir=None, kl_loss_prop=None, recon_loss_prop=None, std_loss_prop=None)
# Load the weights into the autoencoder
ae.load_weights(args.aemodelpath)
# Encode the test set
_, _, test_set_embeddings = plotvae._predict_on_spectrograms(testdir, ae, batchsize=32, nworkers=4, imshapes=input_shape)
# Encode the audio files found in the directory
_, _, special_embeddings, labels = plotvae._predict_on_sound_files(fpaths=None, dpath=args.overlaydir, model=ae, sample_rate_hz=sample_rate_hz, duration_s=duration_s, window_length_s=window_length_s)
# Now plot the embedding space
_plot(test_set_embeddings, special_embeddings, labels, args.ndims)
if args.ndims == 3 and args.projection:
# We want to project the 3D plot onto the three planes
_plot_projections(test_set_embeddings, special_embeddings, labels)
| en | 0.83762 | Loads an autoencoder model, plots the latent space for the test set and the plots sounds from a directory on top of that space, with arrows pointing to each of the overlaid sounds. The arrows have labels that are the file names of the sounds (without the extension). # Load the stuff we need from ArtieInfant proper # pylint: disable=locally-disabled, import-error # pylint: disable=locally-disabled, import-error Projects the 3D embeddings onto the three planes (X, Y), (X, Z), and (Y, Z). Asserts that the embeddings are 3-dimensional. Plots the given embeddings and labels. # Validate args # Set stuff up based on what mode we are # Load the weights into the autoencoder # Encode the test set # Encode the audio files found in the directory # Now plot the embedding space # We want to project the 3D plot onto the three planes | 2.419418 | 2 |
reporting/reporting_calm_transformer/src/transform.py | TheStanfordDaily/loris-archives | 0 | 6615741 | <gh_stars>0
import math
from copy import deepcopy
from dateutil.parser import parse
def convert_date_to_iso(date_string):
try:
return parse(date_string).date().isoformat()
except (ValueError, TypeError):
return None
def transform(record):
transformed_record = deepcopy(record)
for key, value in record.items():
new_value = deepcopy(value)
if isinstance(new_value, (int, float, complex)):
if math.isnan(value):
new_value = None
if isinstance(new_value, list) and len(value) == 1:
new_value = record[key][0]
if isinstance(new_value, str):
if new_value.startswith("'") and new_value.endswith("'"):
new_value = new_value[1:-1]
if key in keys_to_parse:
transformed_record[key + "_raw"] = value
new_value = convert_date_to_iso(new_value)
transformed_record[key] = new_value
return transformed_record
keys_to_parse = {
"Modified",
"Created",
"UserDate1",
"UserDate2",
"UserDate3",
"UserDate4",
}
| import math
from copy import deepcopy
from dateutil.parser import parse
def convert_date_to_iso(date_string):
try:
return parse(date_string).date().isoformat()
except (ValueError, TypeError):
return None
def transform(record):
transformed_record = deepcopy(record)
for key, value in record.items():
new_value = deepcopy(value)
if isinstance(new_value, (int, float, complex)):
if math.isnan(value):
new_value = None
if isinstance(new_value, list) and len(value) == 1:
new_value = record[key][0]
if isinstance(new_value, str):
if new_value.startswith("'") and new_value.endswith("'"):
new_value = new_value[1:-1]
if key in keys_to_parse:
transformed_record[key + "_raw"] = value
new_value = convert_date_to_iso(new_value)
transformed_record[key] = new_value
return transformed_record
keys_to_parse = {
"Modified",
"Created",
"UserDate1",
"UserDate2",
"UserDate3",
"UserDate4",
} | none | 1 | 3.095453 | 3 | |
preliminary_analysis/generate_table.py | shaggyday/evaluating-human-rationales | 3 | 6615742 | import pandas as pd
pd.set_option("display.precision", 1)
data_df = pd.read_csv("")
corr_dataset_dict = {}
corr_dataset_dict["Wikipedia personal attacks"] = {"abbv": "WikiAttack", "Task type": "Cls", "Granularity": "Token", "Comprehensive": "CHECKMARK", "Class asymmetry": "CHECKMARK"}
corr_dataset_dict["Stanford treebank"] = {"abbv": "SST", "Task type": "Cls", "Granularity": "Token", "Comprehensive": "CHECKMARK", "Class asymmetry": "CROSSMARK"}
corr_dataset_dict["Movie reviews"] = {"abbv": "Movie", "Task type": "Cls", "Granularity": "Token", "Comprehensive": "CROSSMARK", "Class asymmetry": "CROSSMARK"}
corr_dataset_dict["MultiRC"] = {"abbv": "MultiRC", "Task type": "RC", "Granularity": "Sentence", "Comprehensive": "CHECKMARK", "Class asymmetry": "CROSSMARK"}
corr_dataset_dict["FEVER"] = {"abbv": "FEVER", "Task type": "RC", "Granularity": "Sentence", "Comprehensive": "CROSSMARK", "Class asymmetry": "CROSSMARK"}
corr_dataset_dict["E-SNLI"] = {"abbv": "E-SNLI", "Task type": "RC", "Granularity": "Token", "Comprehensive": "CHECKMARK", "Class asymmetry": "CHECKMARK"}
data_df["Task type"] = data_df["dataset"].apply(lambda s: corr_dataset_dict[s]['Task type'])
data_df["Granularity"] = data_df["dataset"].apply(lambda s: corr_dataset_dict[s]['Granularity'])
data_df["Comprehensive"] = data_df["dataset"].apply(lambda s: corr_dataset_dict[s]['Comprehensive'])
data_df["Class asymmetry"] = data_df["dataset"].apply(lambda s: corr_dataset_dict[s]['Class asymmetry'])
data_df["mean_rationale_percent"] = data_df["mean_rationale_percent"].apply(lambda s: 100*s)
data_df["dataset"] = data_df["dataset"].apply(lambda s: corr_dataset_dict[s]['abbv'])
data_df = data_df[["dataset", "mean_text_length", "Task type", "mean_rationale_length", "mean_rationale_percent", "Comprehensive", "Granularity", "Class asymmetry"]]
data_df.columns = ["Dataset", "Text length", "Task type", "Rationale length", "Ratio", "Comprehensive", "Granularity", "Class asymmetry"]
print(data_df)
print(data_df.to_latex(index=False))
print("Done!")
| import pandas as pd
pd.set_option("display.precision", 1)
data_df = pd.read_csv("")
corr_dataset_dict = {}
corr_dataset_dict["Wikipedia personal attacks"] = {"abbv": "WikiAttack", "Task type": "Cls", "Granularity": "Token", "Comprehensive": "CHECKMARK", "Class asymmetry": "CHECKMARK"}
corr_dataset_dict["Stanford treebank"] = {"abbv": "SST", "Task type": "Cls", "Granularity": "Token", "Comprehensive": "CHECKMARK", "Class asymmetry": "CROSSMARK"}
corr_dataset_dict["Movie reviews"] = {"abbv": "Movie", "Task type": "Cls", "Granularity": "Token", "Comprehensive": "CROSSMARK", "Class asymmetry": "CROSSMARK"}
corr_dataset_dict["MultiRC"] = {"abbv": "MultiRC", "Task type": "RC", "Granularity": "Sentence", "Comprehensive": "CHECKMARK", "Class asymmetry": "CROSSMARK"}
corr_dataset_dict["FEVER"] = {"abbv": "FEVER", "Task type": "RC", "Granularity": "Sentence", "Comprehensive": "CROSSMARK", "Class asymmetry": "CROSSMARK"}
corr_dataset_dict["E-SNLI"] = {"abbv": "E-SNLI", "Task type": "RC", "Granularity": "Token", "Comprehensive": "CHECKMARK", "Class asymmetry": "CHECKMARK"}
data_df["Task type"] = data_df["dataset"].apply(lambda s: corr_dataset_dict[s]['Task type'])
data_df["Granularity"] = data_df["dataset"].apply(lambda s: corr_dataset_dict[s]['Granularity'])
data_df["Comprehensive"] = data_df["dataset"].apply(lambda s: corr_dataset_dict[s]['Comprehensive'])
data_df["Class asymmetry"] = data_df["dataset"].apply(lambda s: corr_dataset_dict[s]['Class asymmetry'])
data_df["mean_rationale_percent"] = data_df["mean_rationale_percent"].apply(lambda s: 100*s)
data_df["dataset"] = data_df["dataset"].apply(lambda s: corr_dataset_dict[s]['abbv'])
data_df = data_df[["dataset", "mean_text_length", "Task type", "mean_rationale_length", "mean_rationale_percent", "Comprehensive", "Granularity", "Class asymmetry"]]
data_df.columns = ["Dataset", "Text length", "Task type", "Rationale length", "Ratio", "Comprehensive", "Granularity", "Class asymmetry"]
print(data_df)
print(data_df.to_latex(index=False))
print("Done!")
| none | 1 | 2.700462 | 3 | |
crf_baseline/validation.py | dhlab-epfl/LinkedBooksDeepReferenceParsing | 11 | 6615743 | <gh_stars>10-100
import random
import numpy as np
import time
# Python objects
import pickle
# Plot
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
# CRF
import sklearn_crfsuite
from sklearn_crfsuite import scorers, metrics
from sklearn.metrics import make_scorer, confusion_matrix
from sklearn.externals import joblib
from sklearn.model_selection import RandomizedSearchCV
# For model validation
import scipy
# Utils functions
from code.feature_extraction_supporting_functions_words import *
from code.feature_extraction_words import *
from code.utils import *
# Load validation data
window = 2
X_valid_w, valid_t1, valid_t2, valid_t3 = load_data("../dataset/clean_valid.txt")
X_valid = [[word2features(text, i, window=window) for i in range(len(text))] for text in X_valid_w]
# TASK 1
y_valid = valid_t1
crf = pickle.load(open("models/crf_t1.pkl", "rb" ))
print(crf)
y_pred = crf.predict(X_valid)
print(metrics.flat_classification_report(
y_valid, y_pred, digits=6
))
# Task 2
y_valid = valid_t2
crf = pickle.load(open("models/crf_t2.pkl", "rb" ))
print(crf)
y_pred = crf.predict(X_valid)
print(metrics.flat_classification_report(
y_valid, y_pred, digits=6
))
# Task 3
y_valid = valid_t3
crf = pickle.load(open("models/crf_t3.pkl", "rb" ))
print(crf)
y_pred = crf.predict(X_valid)
print(metrics.flat_classification_report(
y_valid, y_pred, digits=6
))
| import random
import numpy as np
import time
# Python objects
import pickle
# Plot
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
# CRF
import sklearn_crfsuite
from sklearn_crfsuite import scorers, metrics
from sklearn.metrics import make_scorer, confusion_matrix
from sklearn.externals import joblib
from sklearn.model_selection import RandomizedSearchCV
# For model validation
import scipy
# Utils functions
from code.feature_extraction_supporting_functions_words import *
from code.feature_extraction_words import *
from code.utils import *
# Load validation data
window = 2
X_valid_w, valid_t1, valid_t2, valid_t3 = load_data("../dataset/clean_valid.txt")
X_valid = [[word2features(text, i, window=window) for i in range(len(text))] for text in X_valid_w]
# TASK 1
y_valid = valid_t1
crf = pickle.load(open("models/crf_t1.pkl", "rb" ))
print(crf)
y_pred = crf.predict(X_valid)
print(metrics.flat_classification_report(
y_valid, y_pred, digits=6
))
# Task 2
y_valid = valid_t2
crf = pickle.load(open("models/crf_t2.pkl", "rb" ))
print(crf)
y_pred = crf.predict(X_valid)
print(metrics.flat_classification_report(
y_valid, y_pred, digits=6
))
# Task 3
y_valid = valid_t3
crf = pickle.load(open("models/crf_t3.pkl", "rb" ))
print(crf)
y_pred = crf.predict(X_valid)
print(metrics.flat_classification_report(
y_valid, y_pred, digits=6
)) | en | 0.462588 | # Python objects # Plot # CRF # For model validation # Utils functions # Load validation data # TASK 1 # Task 2 # Task 3 | 2.488657 | 2 |
day3/primewithoutflag.py | nikhilsamninan/python-files | 0 | 6615744 | num=int(input("enter the number"))
i=2
for x in range(i,num):
if(num%x==0):
print("It is not a prime")
break
else:
i+=1
else:
print("It is a prime number")
| num=int(input("enter the number"))
i=2
for x in range(i,num):
if(num%x==0):
print("It is not a prime")
break
else:
i+=1
else:
print("It is a prime number")
| none | 1 | 4.065031 | 4 | |
main.py | YuriMotoshima/b3_empresas | 0 | 6615745 | from scripts.selenium_driver import configChromeDriver, check_exists_elements
import pandas as pd
wb = configChromeDriver(webVisible=False)
wb.get(url="http://www.b3.com.br/pt_br/produtos-e-servicos/negociacao/renda-variavel/empresas-listadas.htm")
wb.find_element_by_id("onetrust-accept-btn-handler").click()
wb.switch_to.frame("bvmf_iframe")
select_emp = [n.text for n in wb.find_element_by_class_name("inline-list-letra").find_elements_by_tag_name("a")]
for n in select_emp:
wb.find_element_by_link_text(n).click()
check_exists_elements(wb=wb, method="css_selector", element="table[id='ctl00_contentPlaceHolderConteudo_BuscaNomeEmpresa1_grdEmpresa_ctl01']")
table = wb.find_element_by_css_selector("table[id='ctl00_contentPlaceHolderConteudo_BuscaNomeEmpresa1_grdEmpresa_ctl01']").get_attribute("outerHTML")
df = pd.read_html(table, header=0, index_col=False)[0]
print(df.shape)
wb.find_element_by_id("ctl00_botaoNavegacaoVoltar").click()
wb.quit()
print(wb)
| from scripts.selenium_driver import configChromeDriver, check_exists_elements
import pandas as pd
wb = configChromeDriver(webVisible=False)
wb.get(url="http://www.b3.com.br/pt_br/produtos-e-servicos/negociacao/renda-variavel/empresas-listadas.htm")
wb.find_element_by_id("onetrust-accept-btn-handler").click()
wb.switch_to.frame("bvmf_iframe")
select_emp = [n.text for n in wb.find_element_by_class_name("inline-list-letra").find_elements_by_tag_name("a")]
for n in select_emp:
wb.find_element_by_link_text(n).click()
check_exists_elements(wb=wb, method="css_selector", element="table[id='ctl00_contentPlaceHolderConteudo_BuscaNomeEmpresa1_grdEmpresa_ctl01']")
table = wb.find_element_by_css_selector("table[id='ctl00_contentPlaceHolderConteudo_BuscaNomeEmpresa1_grdEmpresa_ctl01']").get_attribute("outerHTML")
df = pd.read_html(table, header=0, index_col=False)[0]
print(df.shape)
wb.find_element_by_id("ctl00_botaoNavegacaoVoltar").click()
wb.quit()
print(wb)
| none | 1 | 3.129254 | 3 | |
corelogistics/views.py | kdfler/lambda_logistics | 0 | 6615746 | <reponame>kdfler/lambda_logistics<filename>corelogistics/views.py
from datetime import datetime
from django.shortcuts import render, redirect, get_object_or_404, render_to_response
from django.http import HttpResponseRedirect
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from datetime import date
from django.db.models import Q
from .forms import *
from .models import *
from .plotting import *
from .custom_decorators import group_required
from django.db.models import Sum
# PARCEL HANDLING VIEWS AND FUNCTIONS
@group_required('Client', 'Warehouse Manager')
@login_required #Running
def create_parcel(request):
if request.method == 'POST':
form = CreateParcel(request.POST)
if form.is_valid():
form.save(commit=False)
parcel = form.save()
sh_weight = (parcel.p_depth * parcel.p_depth * parcel.p_height) / (5000*1000)
if parcel.distance > 500:
if sh_weight > parcel.weight:
c = round(sh_weight * 199)
print(1)
else:
c = round(parcel.weight * 199)
print(2)
else:
if sh_weight > parcel.weight:
c = round(sh_weight * 99)
print(3)
else:
c = round(parcel.weight * 99)
print(4)
print(c)
parcel.price = c
parcel.owner = request.user
parcel.current_location = form.cleaned_data['sender_city']
parcel.confirmed = True
parcel.save()
return render(request, 'confirm_parcel.html', {'parcel': parcel})
else:
print(form.errors)
return render(request, 'create_parcel.html', {'form': form})
else:
form = CreateParcel()
return render(request, 'create_parcel.html', {'form': form})
#running
def confirm_parcel(request, pk):
obj = Parcel.objects.get(pk=pk)
if request.GET.get['confirm'] == 'confirm':
con = obj.save()
con.confirmed = True
con.save()
return HttpResponseRedirect('parcel_list')
elif request.GET.get['cancel'] == 'cancel':
obj.delete()
return HttpResponseRedirect('parcel_list')
else:
return HttpResponseRedirect('parcel_list')
#running
@login_required
def cancel_parcel(request, pk):
obj = get_object_or_404(Parcel, pk=pk)
obj.confirmed = 'True'
obj.save()
return HttpResponseRedirect('parcel_list')
#running
@login_required #running
def parcel_list(request):
model = Parcel.objects.all()
template = 'parcel_list.html'
data = model.exclude(status='DE')
data2 = model
data3 = model.filter(status='DC')
dict = {
'parcel_list_active': data,
'parcel_list_all': data2,
'parcel_list_delivered': data3,
}
return render(request, template, dict)
#running
@login_required #running; for administrative use only
def status_update_admin(request, pk):
parcel = Parcel.objects.get(pk=pk)
if parcel.status == 'Created':
parcel.status = 'Fetched'
parcel.date_fetched = date.today()
parcel.save()
return redirect('/core/parcel/list/')
else:
if parcel.status == 'Fetched':
parcel.status = 'In Hub Inbound'
parcel.date_inhub = date.today()
parcel.save()
return redirect('/core/parcel/list/')
else:
if parcel.status == 'In Hub Inbound':
parcel.status = 'In Hub Outbound'
parcel.date_inhub = date.today()
parcel.save()
return redirect('/core/parcel/list/')
else:
if parcel.status == 'In Hub Outbound':
parcel.status = 'In Transit'
parcel.current_location = parcel.recipient_city
parcel.date_intransit = date.today()
parcel.save()
return redirect('/core/parcel/list/')
else:
if parcel.status == 'In Transit':
parcel.status = 'Delivered'
parcel.date_delivered = date.today()
parcel.save()
return redirect('/core/parcel/list/')
else:
if parcel.status == 'Delivery Failed':
parcel.status = 'Delivered'
parcel.date_delivered = date.today()
parcel.save()
return redirect('/core/parcel/list/')
else:
return redirect('/core/parcel/list/')
#running
@login_required #failing
def delivery_fails_admin(request, pk):
parcel = Parcel.objects.get(pk=pk)
if parcel.status == 'In Transit' or parcel.status == 'Delivery Failed':
parcel.status = 'Delivery Failed'
parcel.failed += 1
parcel.save()
return redirect('/core/parcel/list/')
else:
return redirect('/core/parcel/list/')
#running
@login_required
def delivery_reset_admin(request, pk):
parcel = Parcel.objects.get(pk=pk)
parcel.status = 'Created'
parcel.save()
return redirect('/core/parcel/list/')
#running
@login_required #running; View to update parcels as warehouse mgr and driver
def status_update(request, pk):
parcel = Parcel.objects.get(pk=pk)
if parcel.status == 'Created':
parcel.status = 'Fetched'
parcel.date_fetched = date.today()
parcel.save()
return redirect('/core/driver/log/')
else:
if parcel.status == 'Fetched':
parcel.status = 'In Hub Inbound'
parcel.date_inhub = date.today()
parcel.save()
return redirect('/core/driver/log/')
else:
if parcel.status == 'In Hub Inbound':
parcel.status = 'In Hub Outbound'
parcel.date_inhub = date.today()
parcel.save()
return redirect('/core/driver/log/')
else:
if parcel.status == 'In Hub Outbound':
parcel.status = 'In Transit'
parcel.current_location = parcel.recipient_city
parcel.date_intransit = date.today()
parcel.save()
return redirect('/core/driver/log/')
else:
if parcel.status == 'In Transit':
parcel.status = 'Delivered'
parcel.date_delivered = date.today()
parcel.save()
return redirect('/core/driver/log/')
else:
if parcel.status == 'Delivery Failed':
parcel.status = 'Delivered'
parcel.date_delivered = date.today()
parcel.save()
return redirect('/core/driver/log/')
else:
return redirect('/core/driver/log/')
#running
@login_required #failing
def delivery_fails(request, pk):
parcel = Parcel.objects.get(pk=pk)
if parcel.status == 'In Transit' or parcel.status == 'Delivery Failed':
parcel.status = 'Delivery Failed'
parcel.failed += 1
parcel.save()
return redirect('/core/driver/log/')
else:
return redirect('/core/driver/log/')
#running
@login_required
def delivery_reset(request, pk):
parcel = Parcel.objects.get(pk=pk)
parcel.status = 'Created'
parcel.save()
return redirect('/core/parcel/list/')
#running
@login_required #running
def parcel_detail(request, pk):
parcel = get_object_or_404(Parcel, pk=pk)
return render_to_response('parcel_detail.html', {'parcel': parcel})
#running
def track_parcel(request):
parcel = Parcel.objects.all()
query = request.GET.get('term')
if query:
parcel = parcel.filter(Q(track_n__iexact=query))
return render(request, 'search.html', {'parcel': parcel})
else:
return render(request, 'search.html')
# OTHER VIEWS
#running !!! loader.template does not pass any data except the template. So no user authentication possible.
def landing_page(request):
template = 'index.html'
context = ''
return render(request, template, {'context': context})
#running
def login_user(request):
if request.POST:
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/')
return render(request, 'login.html')
#running
@group_required('Management', 'Client')
@login_required
def dashboard(request):
###### MANAGEMENT DASHBOARD ######
# Status Chart
ccr = Parcel.objects.filter(status='Created').count()
cft = Parcel.objects.filter(status='Fetched').count()
chbi = Parcel.objects.filter(status='In Hub Inbound').count()
chbo = Parcel.objects.filter(status='In Hub Outbound').count()
cit = Parcel.objects.filter(status='In Transit').count()
cde = Parcel.objects.filter(status='Delivered').count()
cdf = Parcel.objects.filter(status='Delivery Failed').count()
current = barchart(x_data=['Created', 'Fetched', 'In Hub Inbound', 'In Hub Outbound', 'In Transit', 'Delivered',
'Delivery Failed'],
y_data=[ccr, cft, chbi, chbo, cit, cde, cdf], name='Logistics')
# Overview Chart
parcel = Parcel.objects.all()
dates_created = []
dates_fetched = []
dates_inhub = []
dates_intransit = []
dates_delivered = []
for i in parcel:
# Creation Date
c = i.date_created
dates_created.append(c)
# Fetch Date
f = i.date_fetched
dates_fetched.append(f)
# Dates Inhub
h = i.date_inhub
dates_inhub.append(h)
# Dates In Transit
t = i.date_intransit
dates_intransit.append(t)
# Dates Delivered
d = i.date_delivered
dates_delivered.append(d)
created = dict()
for date in dates_created:
if date in created:
created[date] += 1
else:
created[date] = 1
fetched = dict()
for date in dates_fetched:
if date in fetched:
fetched[date] += 1
else:
fetched[date] = 1
inhub = dict()
for date in dates_inhub:
if date in inhub:
inhub[date] += 1
else:
inhub[date] = 1
transit = dict()
for date in dates_intransit:
if date in transit:
transit[date] += 1
else:
transit[date] = 1
delivered = dict()
for date in dates_delivered:
if date in delivered:
delivered[date] += 1
else:
delivered[date] = 1
overview = linegraph(x_data=list(created.keys()), y1=list(created.values()), y2=list(fetched.values()),
y3=list(inhub.values()), y4=list(transit.values()), y5=list(delivered.values()))
# Pie Chart for Weight Distribution and Distance statistics
total = Parcel.objects.all().count()
# Weight statistics (if statement to avoid division by zero error)
if total > 0:
d1 = (Parcel.objects.filter(weight__lt=5)).count() / total
d2 = (Parcel.objects.filter(weight__range=(5, 10))).count() / total
d3 = (Parcel.objects.filter(weight__range=(10, 20))).count() / total
d4 = (Parcel.objects.filter(weight__range=(20, 30))).count() / total
d5 = (Parcel.objects.filter(weight__range=(30, 50))).count() / total
d6 = (Parcel.objects.filter(weight__gte=50)).count() / total
# Distance Statistics
d7 = (Parcel.objects.filter(distance__lt=500)).count() / total
d8 = (Parcel.objects.filter(distance__gte=500)).count() / total
else:
d1 = 0
d2 = 0
d3 = 0
d4 = 0
d5 = 0
d6 = 0
d7 = 0
d8 = 0
dist_charts = pie_chart(d1=d1, d2=d2, d3=d3, d4=d4, d5=d5, d6=d6, l1='< 5kg', l2='5kg < x < 10kg',
l3='10kg < x < 20kg', l4='20kg < x < 30kg', l5='30kg < x < 50kg', l6='> 50kg', d7=d7,
d8=d8, l7='Short Distance', l8='Long Distance')
year = datetime.now().year
month = datetime.now().month
previous_month = month-1
total_costs = Parcel.objects.all().aggregate(Sum('price')).get('price__sum', 0.00)
costs_current = Parcel.objects.filter(date_created__month=month, date_created__year=year).aggregate(Sum('price')).get('price__sum', 0.00)
costs_previous = Parcel.objects.filter(date_created__month=previous_month, date_created__year=year).aggregate(Sum('price')).get('price__sum', 0.00)
###### CLIENT DASHBOARD ######
client = request.user
# Status Chart
ccr_c = Parcel.objects.filter(status='Created', owner=client).count()
cft_c = Parcel.objects.filter(status='Fetched', owner=client).count()
chbi_c = Parcel.objects.filter(status='In Hub Inbound', owner=client).count()
chbo_c = Parcel.objects.filter(status='In Hub Outbound', owner=client).count()
cit_c = Parcel.objects.filter(status='In Transit', owner=client).count()
cde_c = Parcel.objects.filter(status='Delivered', owner=client).count()
cdf_c = Parcel.objects.filter(status='Delivery Failed').count()
current_c = barchart(x_data=['Created', 'Fetched', 'In Hub Inbound', 'In Hub Outbound', 'In Transit', 'Delivered',
'Delivery Failed'],
y_data=[ccr_c, cft_c, chbi_c, chbo_c, cit_c, cde_c, cdf_c], name='Logistics')
# Client Overview Chart
parcel_c = Parcel.objects.filter(owner=client)
dates_created_c = []
dates_fetched_c = []
dates_inhub_c = []
dates_intransit_c = []
dates_delivered_c = []
for i in parcel_c:
# Creation Date
c = i.date_created
dates_created_c.append(c)
# Fetch Date
f = i.date_fetched
dates_fetched_c.append(f)
# Dates Inhub
h = i.date_inhub
dates_inhub_c.append(h)
# Dates In Transit
t = i.date_intransit
dates_intransit_c.append(t)
# Dates Delivered
d = i.date_delivered
dates_delivered_c.append(d)
created_c = dict()
for date in dates_created_c:
if date in created_c:
created_c[date] += 1
else:
created_c[date] = 1
fetched_c = dict()
for date in dates_fetched_c:
if date in fetched_c:
fetched_c[date] += 1
else:
fetched_c[date] = 1
inhub_c = dict()
for date in dates_inhub_c:
if date in inhub_c:
inhub_c[date] += 1
else:
inhub_c[date] = 1
transit_c = dict()
for date in dates_intransit_c:
if date in transit_c:
transit_c[date] += 1
else:
transit_c[date] = 1
delivered_c = dict()
for date in dates_delivered_c:
if date in delivered_c:
delivered_c[date] += 1
else:
delivered_c[date] = 1
overview_c = linegraph(x_data=list(created_c.keys()), y1=list(created_c.values()), y2=list(fetched_c.values()),
y3=list(inhub_c.values()), y4=list(transit_c.values()), y5=list(delivered_c.values()))
# Pie Chart for Weight Distribution and Distance statistics
total_c = Parcel.objects.filter(owner=client).count()
# Weight statistics (if statement to avoid division by zero error)
if total_c > 0:
d1_c = (Parcel.objects.filter(weight__lt=5, owner=client)).count() / total_c
d2_c = (Parcel.objects.filter(weight__range=(5, 10), owner=client)).count() / total_c
d3_c = (Parcel.objects.filter(weight__range=(10, 20), owner=client)).count() / total_c
d4_c = (Parcel.objects.filter(weight__range=(20, 30), owner=client)).count() / total_c
d5_c = (Parcel.objects.filter(weight__range=(30, 50), owner=client)).count() / total_c
d6_c = (Parcel.objects.filter(weight__gte=50, owner=client)).count() / total_c
# Distance Statistics
d7_c = (Parcel.objects.filter(distance__lt=500, owner=client)).count() / total_c
d8_c = (Parcel.objects.filter(distance__gte=500, owner=client)).count() / total_c
else:
d1_c = 0
d2_c = 0
d3_c = 0
d4_c = 0
d5_c = 0
d6_c = 0
d7_c = 0
d8_c = 0
dist_charts_c = pie_chart(d1=d1_c, d2=d2_c, d3=d3_c, d4=d4_c, d5=d5_c, d6=d6_c, l1='< 5kg', l2='5kg < x < 10kg',
l3='10kg < x < 20kg', l4='20kg < x < 30kg', l5='30kg < x < 50kg', l6='> 50kg', d7=d7_c,
d8=d8_c, l7='Short Distance', l8='Long Distance')
###### DASHBOARD MANAGEMENT ######
client_total_costs = Parcel.objects.filter(owner=client).aggregate(Sum('price')).get('price__sum', 0.00)
client_costs_current = Parcel.objects.filter(date_created__month=month, date_created__year=year, owner=client).aggregate(
Sum('price')).get('price__sum', 0.00)
client_costs_previous = Parcel.objects.filter(date_created__month=previous_month, date_created__year=year, owner=client).aggregate(
Sum('price')).get('price__sum', 0.00)
context = {
'current': current,
'overview': overview,
'stat_pie': dist_charts,
'tot_costs': total_costs,
'current_costs': costs_current,
'previous_costs': costs_previous,
'client_tot_costs': client_total_costs,
'client_costs_current': client_costs_current,
'client_costs_previous': client_costs_previous,
'overview_c': overview_c,
'current_c': current_c,
'dist_charts_c': dist_charts_c,
}
return render(request, 'dashboard.html', context)
@login_required
@group_required('Driver', 'Warehouse Manager')
def driver_logbook_initial(request):
city = Parcel.objects.all()
template = 'logbook.html'
user = request.user.employee
city_parcel_inbound = Parcel.objects.filter(current_location__city__icontains=user.location.city,
status='In Hub Inbound')
city_parcel_outbound = Parcel.objects.filter(current_location__city__icontains=user.location.city,
status='In Hub Outbound')
office = Office.objects.all()
term = request.GET.get('term')
analytics = []
for i in city:
h = i.date_inhub
analytics.append(h)
hub = dict()
for date in analytics:
if date in hub:
hub[date] += 1
else:
hub[date] = 1
b_local = Parcel.objects.filter(current_location__city__icontains=user.location.city).aggregate(Sum('price')).get('price__sum', 0.00)
b_total = Parcel.objects.all().aggregate(Sum('price')).get('price__sum', 0.00)
l = linegraph_warehouse(x=list(hub.keys()), y=list(hub.values()), y_title='Sum of Parcels')
b = barchart_warehouse(x_data=['Total Revenue', 'Local Revenue'], y_data=[b_total, b_local], name='')
if term:
city_filtered_fetch = city.filter(current_location__city__icontains=term, status='Created')
city_filtered_hub = city.filter(current_location__city__icontains=term, status='In Hub Outbound')
city_filtered_deliver = city.filter(current_location__city__icontains=term, status='In Transit')
tpia = city.filter(current_location__city__icontains=term).count()
term = term
context = {
'city_fetch': city_filtered_fetch,
'city_hub': city_filtered_hub,
'city_deliver': city_filtered_deliver,
'total_parcels_in_area': tpia,
'term': term,
}
return render(request, template, context)
else:
context = {
'office_list': office,
'city_inbound': city_parcel_inbound,
'city_outbound': city_parcel_outbound,
'parcel_traffic': l,
'parcel_fin_measures_city': b
}
return render(request, template, context) | from datetime import datetime
from django.shortcuts import render, redirect, get_object_or_404, render_to_response
from django.http import HttpResponseRedirect
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from datetime import date
from django.db.models import Q
from .forms import *
from .models import *
from .plotting import *
from .custom_decorators import group_required
from django.db.models import Sum
# PARCEL HANDLING VIEWS AND FUNCTIONS
@group_required('Client', 'Warehouse Manager')
@login_required #Running
def create_parcel(request):
if request.method == 'POST':
form = CreateParcel(request.POST)
if form.is_valid():
form.save(commit=False)
parcel = form.save()
sh_weight = (parcel.p_depth * parcel.p_depth * parcel.p_height) / (5000*1000)
if parcel.distance > 500:
if sh_weight > parcel.weight:
c = round(sh_weight * 199)
print(1)
else:
c = round(parcel.weight * 199)
print(2)
else:
if sh_weight > parcel.weight:
c = round(sh_weight * 99)
print(3)
else:
c = round(parcel.weight * 99)
print(4)
print(c)
parcel.price = c
parcel.owner = request.user
parcel.current_location = form.cleaned_data['sender_city']
parcel.confirmed = True
parcel.save()
return render(request, 'confirm_parcel.html', {'parcel': parcel})
else:
print(form.errors)
return render(request, 'create_parcel.html', {'form': form})
else:
form = CreateParcel()
return render(request, 'create_parcel.html', {'form': form})
#running
def confirm_parcel(request, pk):
obj = Parcel.objects.get(pk=pk)
if request.GET.get['confirm'] == 'confirm':
con = obj.save()
con.confirmed = True
con.save()
return HttpResponseRedirect('parcel_list')
elif request.GET.get['cancel'] == 'cancel':
obj.delete()
return HttpResponseRedirect('parcel_list')
else:
return HttpResponseRedirect('parcel_list')
#running
@login_required
def cancel_parcel(request, pk):
obj = get_object_or_404(Parcel, pk=pk)
obj.confirmed = 'True'
obj.save()
return HttpResponseRedirect('parcel_list')
#running
@login_required #running
def parcel_list(request):
model = Parcel.objects.all()
template = 'parcel_list.html'
data = model.exclude(status='DE')
data2 = model
data3 = model.filter(status='DC')
dict = {
'parcel_list_active': data,
'parcel_list_all': data2,
'parcel_list_delivered': data3,
}
return render(request, template, dict)
#running
@login_required #running; for administrative use only
def status_update_admin(request, pk):
parcel = Parcel.objects.get(pk=pk)
if parcel.status == 'Created':
parcel.status = 'Fetched'
parcel.date_fetched = date.today()
parcel.save()
return redirect('/core/parcel/list/')
else:
if parcel.status == 'Fetched':
parcel.status = 'In Hub Inbound'
parcel.date_inhub = date.today()
parcel.save()
return redirect('/core/parcel/list/')
else:
if parcel.status == 'In Hub Inbound':
parcel.status = 'In Hub Outbound'
parcel.date_inhub = date.today()
parcel.save()
return redirect('/core/parcel/list/')
else:
if parcel.status == 'In Hub Outbound':
parcel.status = 'In Transit'
parcel.current_location = parcel.recipient_city
parcel.date_intransit = date.today()
parcel.save()
return redirect('/core/parcel/list/')
else:
if parcel.status == 'In Transit':
parcel.status = 'Delivered'
parcel.date_delivered = date.today()
parcel.save()
return redirect('/core/parcel/list/')
else:
if parcel.status == 'Delivery Failed':
parcel.status = 'Delivered'
parcel.date_delivered = date.today()
parcel.save()
return redirect('/core/parcel/list/')
else:
return redirect('/core/parcel/list/')
#running
@login_required #failing
def delivery_fails_admin(request, pk):
parcel = Parcel.objects.get(pk=pk)
if parcel.status == 'In Transit' or parcel.status == 'Delivery Failed':
parcel.status = 'Delivery Failed'
parcel.failed += 1
parcel.save()
return redirect('/core/parcel/list/')
else:
return redirect('/core/parcel/list/')
#running
@login_required
def delivery_reset_admin(request, pk):
parcel = Parcel.objects.get(pk=pk)
parcel.status = 'Created'
parcel.save()
return redirect('/core/parcel/list/')
#running
@login_required #running; View to update parcels as warehouse mgr and driver
def status_update(request, pk):
parcel = Parcel.objects.get(pk=pk)
if parcel.status == 'Created':
parcel.status = 'Fetched'
parcel.date_fetched = date.today()
parcel.save()
return redirect('/core/driver/log/')
else:
if parcel.status == 'Fetched':
parcel.status = 'In Hub Inbound'
parcel.date_inhub = date.today()
parcel.save()
return redirect('/core/driver/log/')
else:
if parcel.status == 'In Hub Inbound':
parcel.status = 'In Hub Outbound'
parcel.date_inhub = date.today()
parcel.save()
return redirect('/core/driver/log/')
else:
if parcel.status == 'In Hub Outbound':
parcel.status = 'In Transit'
parcel.current_location = parcel.recipient_city
parcel.date_intransit = date.today()
parcel.save()
return redirect('/core/driver/log/')
else:
if parcel.status == 'In Transit':
parcel.status = 'Delivered'
parcel.date_delivered = date.today()
parcel.save()
return redirect('/core/driver/log/')
else:
if parcel.status == 'Delivery Failed':
parcel.status = 'Delivered'
parcel.date_delivered = date.today()
parcel.save()
return redirect('/core/driver/log/')
else:
return redirect('/core/driver/log/')
#running
@login_required #failing
def delivery_fails(request, pk):
parcel = Parcel.objects.get(pk=pk)
if parcel.status == 'In Transit' or parcel.status == 'Delivery Failed':
parcel.status = 'Delivery Failed'
parcel.failed += 1
parcel.save()
return redirect('/core/driver/log/')
else:
return redirect('/core/driver/log/')
#running
@login_required
def delivery_reset(request, pk):
parcel = Parcel.objects.get(pk=pk)
parcel.status = 'Created'
parcel.save()
return redirect('/core/parcel/list/')
#running
@login_required #running
def parcel_detail(request, pk):
parcel = get_object_or_404(Parcel, pk=pk)
return render_to_response('parcel_detail.html', {'parcel': parcel})
#running
def track_parcel(request):
parcel = Parcel.objects.all()
query = request.GET.get('term')
if query:
parcel = parcel.filter(Q(track_n__iexact=query))
return render(request, 'search.html', {'parcel': parcel})
else:
return render(request, 'search.html')
# OTHER VIEWS
#running !!! loader.template does not pass any data except the template. So no user authentication possible.
def landing_page(request):
template = 'index.html'
context = ''
return render(request, template, {'context': context})
#running
def login_user(request):
if request.POST:
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/')
return render(request, 'login.html')
#running
@group_required('Management', 'Client')
@login_required
def dashboard(request):
###### MANAGEMENT DASHBOARD ######
# Status Chart
ccr = Parcel.objects.filter(status='Created').count()
cft = Parcel.objects.filter(status='Fetched').count()
chbi = Parcel.objects.filter(status='In Hub Inbound').count()
chbo = Parcel.objects.filter(status='In Hub Outbound').count()
cit = Parcel.objects.filter(status='In Transit').count()
cde = Parcel.objects.filter(status='Delivered').count()
cdf = Parcel.objects.filter(status='Delivery Failed').count()
current = barchart(x_data=['Created', 'Fetched', 'In Hub Inbound', 'In Hub Outbound', 'In Transit', 'Delivered',
'Delivery Failed'],
y_data=[ccr, cft, chbi, chbo, cit, cde, cdf], name='Logistics')
# Overview Chart
parcel = Parcel.objects.all()
dates_created = []
dates_fetched = []
dates_inhub = []
dates_intransit = []
dates_delivered = []
for i in parcel:
# Creation Date
c = i.date_created
dates_created.append(c)
# Fetch Date
f = i.date_fetched
dates_fetched.append(f)
# Dates Inhub
h = i.date_inhub
dates_inhub.append(h)
# Dates In Transit
t = i.date_intransit
dates_intransit.append(t)
# Dates Delivered
d = i.date_delivered
dates_delivered.append(d)
created = dict()
for date in dates_created:
if date in created:
created[date] += 1
else:
created[date] = 1
fetched = dict()
for date in dates_fetched:
if date in fetched:
fetched[date] += 1
else:
fetched[date] = 1
inhub = dict()
for date in dates_inhub:
if date in inhub:
inhub[date] += 1
else:
inhub[date] = 1
transit = dict()
for date in dates_intransit:
if date in transit:
transit[date] += 1
else:
transit[date] = 1
delivered = dict()
for date in dates_delivered:
if date in delivered:
delivered[date] += 1
else:
delivered[date] = 1
overview = linegraph(x_data=list(created.keys()), y1=list(created.values()), y2=list(fetched.values()),
y3=list(inhub.values()), y4=list(transit.values()), y5=list(delivered.values()))
# Pie Chart for Weight Distribution and Distance statistics
total = Parcel.objects.all().count()
# Weight statistics (if statement to avoid division by zero error)
if total > 0:
d1 = (Parcel.objects.filter(weight__lt=5)).count() / total
d2 = (Parcel.objects.filter(weight__range=(5, 10))).count() / total
d3 = (Parcel.objects.filter(weight__range=(10, 20))).count() / total
d4 = (Parcel.objects.filter(weight__range=(20, 30))).count() / total
d5 = (Parcel.objects.filter(weight__range=(30, 50))).count() / total
d6 = (Parcel.objects.filter(weight__gte=50)).count() / total
# Distance Statistics
d7 = (Parcel.objects.filter(distance__lt=500)).count() / total
d8 = (Parcel.objects.filter(distance__gte=500)).count() / total
else:
d1 = 0
d2 = 0
d3 = 0
d4 = 0
d5 = 0
d6 = 0
d7 = 0
d8 = 0
dist_charts = pie_chart(d1=d1, d2=d2, d3=d3, d4=d4, d5=d5, d6=d6, l1='< 5kg', l2='5kg < x < 10kg',
l3='10kg < x < 20kg', l4='20kg < x < 30kg', l5='30kg < x < 50kg', l6='> 50kg', d7=d7,
d8=d8, l7='Short Distance', l8='Long Distance')
year = datetime.now().year
month = datetime.now().month
previous_month = month-1
total_costs = Parcel.objects.all().aggregate(Sum('price')).get('price__sum', 0.00)
costs_current = Parcel.objects.filter(date_created__month=month, date_created__year=year).aggregate(Sum('price')).get('price__sum', 0.00)
costs_previous = Parcel.objects.filter(date_created__month=previous_month, date_created__year=year).aggregate(Sum('price')).get('price__sum', 0.00)
###### CLIENT DASHBOARD ######
client = request.user
# Status Chart
ccr_c = Parcel.objects.filter(status='Created', owner=client).count()
cft_c = Parcel.objects.filter(status='Fetched', owner=client).count()
chbi_c = Parcel.objects.filter(status='In Hub Inbound', owner=client).count()
chbo_c = Parcel.objects.filter(status='In Hub Outbound', owner=client).count()
cit_c = Parcel.objects.filter(status='In Transit', owner=client).count()
cde_c = Parcel.objects.filter(status='Delivered', owner=client).count()
cdf_c = Parcel.objects.filter(status='Delivery Failed').count()
current_c = barchart(x_data=['Created', 'Fetched', 'In Hub Inbound', 'In Hub Outbound', 'In Transit', 'Delivered',
'Delivery Failed'],
y_data=[ccr_c, cft_c, chbi_c, chbo_c, cit_c, cde_c, cdf_c], name='Logistics')
# Client Overview Chart
parcel_c = Parcel.objects.filter(owner=client)
dates_created_c = []
dates_fetched_c = []
dates_inhub_c = []
dates_intransit_c = []
dates_delivered_c = []
for i in parcel_c:
# Creation Date
c = i.date_created
dates_created_c.append(c)
# Fetch Date
f = i.date_fetched
dates_fetched_c.append(f)
# Dates Inhub
h = i.date_inhub
dates_inhub_c.append(h)
# Dates In Transit
t = i.date_intransit
dates_intransit_c.append(t)
# Dates Delivered
d = i.date_delivered
dates_delivered_c.append(d)
created_c = dict()
for date in dates_created_c:
if date in created_c:
created_c[date] += 1
else:
created_c[date] = 1
fetched_c = dict()
for date in dates_fetched_c:
if date in fetched_c:
fetched_c[date] += 1
else:
fetched_c[date] = 1
inhub_c = dict()
for date in dates_inhub_c:
if date in inhub_c:
inhub_c[date] += 1
else:
inhub_c[date] = 1
transit_c = dict()
for date in dates_intransit_c:
if date in transit_c:
transit_c[date] += 1
else:
transit_c[date] = 1
delivered_c = dict()
for date in dates_delivered_c:
if date in delivered_c:
delivered_c[date] += 1
else:
delivered_c[date] = 1
overview_c = linegraph(x_data=list(created_c.keys()), y1=list(created_c.values()), y2=list(fetched_c.values()),
y3=list(inhub_c.values()), y4=list(transit_c.values()), y5=list(delivered_c.values()))
# Pie Chart for Weight Distribution and Distance statistics
total_c = Parcel.objects.filter(owner=client).count()
# Weight statistics (if statement to avoid division by zero error)
if total_c > 0:
d1_c = (Parcel.objects.filter(weight__lt=5, owner=client)).count() / total_c
d2_c = (Parcel.objects.filter(weight__range=(5, 10), owner=client)).count() / total_c
d3_c = (Parcel.objects.filter(weight__range=(10, 20), owner=client)).count() / total_c
d4_c = (Parcel.objects.filter(weight__range=(20, 30), owner=client)).count() / total_c
d5_c = (Parcel.objects.filter(weight__range=(30, 50), owner=client)).count() / total_c
d6_c = (Parcel.objects.filter(weight__gte=50, owner=client)).count() / total_c
# Distance Statistics
d7_c = (Parcel.objects.filter(distance__lt=500, owner=client)).count() / total_c
d8_c = (Parcel.objects.filter(distance__gte=500, owner=client)).count() / total_c
else:
d1_c = 0
d2_c = 0
d3_c = 0
d4_c = 0
d5_c = 0
d6_c = 0
d7_c = 0
d8_c = 0
dist_charts_c = pie_chart(d1=d1_c, d2=d2_c, d3=d3_c, d4=d4_c, d5=d5_c, d6=d6_c, l1='< 5kg', l2='5kg < x < 10kg',
l3='10kg < x < 20kg', l4='20kg < x < 30kg', l5='30kg < x < 50kg', l6='> 50kg', d7=d7_c,
d8=d8_c, l7='Short Distance', l8='Long Distance')
###### DASHBOARD MANAGEMENT ######
client_total_costs = Parcel.objects.filter(owner=client).aggregate(Sum('price')).get('price__sum', 0.00)
client_costs_current = Parcel.objects.filter(date_created__month=month, date_created__year=year, owner=client).aggregate(
Sum('price')).get('price__sum', 0.00)
client_costs_previous = Parcel.objects.filter(date_created__month=previous_month, date_created__year=year, owner=client).aggregate(
Sum('price')).get('price__sum', 0.00)
context = {
'current': current,
'overview': overview,
'stat_pie': dist_charts,
'tot_costs': total_costs,
'current_costs': costs_current,
'previous_costs': costs_previous,
'client_tot_costs': client_total_costs,
'client_costs_current': client_costs_current,
'client_costs_previous': client_costs_previous,
'overview_c': overview_c,
'current_c': current_c,
'dist_charts_c': dist_charts_c,
}
return render(request, 'dashboard.html', context)
@login_required
@group_required('Driver', 'Warehouse Manager')
def driver_logbook_initial(request):
city = Parcel.objects.all()
template = 'logbook.html'
user = request.user.employee
city_parcel_inbound = Parcel.objects.filter(current_location__city__icontains=user.location.city,
status='In Hub Inbound')
city_parcel_outbound = Parcel.objects.filter(current_location__city__icontains=user.location.city,
status='In Hub Outbound')
office = Office.objects.all()
term = request.GET.get('term')
analytics = []
for i in city:
h = i.date_inhub
analytics.append(h)
hub = dict()
for date in analytics:
if date in hub:
hub[date] += 1
else:
hub[date] = 1
b_local = Parcel.objects.filter(current_location__city__icontains=user.location.city).aggregate(Sum('price')).get('price__sum', 0.00)
b_total = Parcel.objects.all().aggregate(Sum('price')).get('price__sum', 0.00)
l = linegraph_warehouse(x=list(hub.keys()), y=list(hub.values()), y_title='Sum of Parcels')
b = barchart_warehouse(x_data=['Total Revenue', 'Local Revenue'], y_data=[b_total, b_local], name='')
if term:
city_filtered_fetch = city.filter(current_location__city__icontains=term, status='Created')
city_filtered_hub = city.filter(current_location__city__icontains=term, status='In Hub Outbound')
city_filtered_deliver = city.filter(current_location__city__icontains=term, status='In Transit')
tpia = city.filter(current_location__city__icontains=term).count()
term = term
context = {
'city_fetch': city_filtered_fetch,
'city_hub': city_filtered_hub,
'city_deliver': city_filtered_deliver,
'total_parcels_in_area': tpia,
'term': term,
}
return render(request, template, context)
else:
context = {
'office_list': office,
'city_inbound': city_parcel_inbound,
'city_outbound': city_parcel_outbound,
'parcel_traffic': l,
'parcel_fin_measures_city': b
}
return render(request, template, context) | en | 0.650347 | # PARCEL HANDLING VIEWS AND FUNCTIONS #Running #running #running #running #running #running #running; for administrative use only #running #failing #running #running #running; View to update parcels as warehouse mgr and driver #running #failing #running #running #running #running # OTHER VIEWS #running !!! loader.template does not pass any data except the template. So no user authentication possible. #running #running ###### MANAGEMENT DASHBOARD ###### # Status Chart # Overview Chart # Creation Date # Fetch Date # Dates Inhub # Dates In Transit # Dates Delivered # Pie Chart for Weight Distribution and Distance statistics # Weight statistics (if statement to avoid division by zero error) # Distance Statistics ###### CLIENT DASHBOARD ###### # Status Chart # Client Overview Chart # Creation Date # Fetch Date # Dates Inhub # Dates In Transit # Dates Delivered # Pie Chart for Weight Distribution and Distance statistics # Weight statistics (if statement to avoid division by zero error) # Distance Statistics ###### DASHBOARD MANAGEMENT ###### | 2.169046 | 2 |
lstm_bs_refinement.py | bioinsilico/LSTM_CONV2D_RRI | 1 | 6615747 | <gh_stars>1-10
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn import init
import numpy as np
import json
import os.path
import subprocess
import random
from operator import itemgetter
import sklearn.metrics as metrics
np.set_printoptions(linewidth=1000000000)
torch.cuda.manual_seed(1)
training_data = []
testing_data = []
I = open("pssm_list.tsv","r").readlines()
pssm_data = list(map(str.strip, I))
pdb_features = dict()
all_sequence = dict()
for i in pssm_data:
I = iter(list(map(str.strip,open("PSSM/"+i,"r").readlines())))
r = i.split("_")
pdb = r[0]+"_"+r[1]
ch = r[2]
if not pdb in pdb_features:
pdb_features[pdb] = dict()
if not pdb in all_sequence:
all_sequence[pdb] = dict()
next(I)
for j in I:
r = j.split(" ")
res_id = r[1]
pdb_features[pdb][res_id+ch] = dict()
if not ch in all_sequence[pdb]:
all_sequence[pdb][ch] = list()
all_sequence[pdb][ch].append(res_id+ch)
pdb_features[pdb][res_id+ch]['pssm'] = list(map(float,r[3:23]))
I = open("rri_list.tsv","r").readlines()
pdb_list = list(map(str.strip, I))
pdb_bs = dict()
chain_list = dict()
N_chains = 0
for i in pdb_list:
pdb_bs[ i+"_l" ] = dict()
chain_list[i] = { "r":{}, "l":{} }
I = iter(list(map(str.strip,open("bestResults/struct_2/"+i+".res.tab.lig","r").readlines())))
next(I)
next(I)
for j in I:
R = j.split(" ")
if int(R[2]) > 0:
pdb_bs[ i+"_l" ][ R[1]+R[0] ]= True
if not R[0] in chain_list[i]["l"]:
N_chains += 1
if R[1]+R[0] in pdb_features[ i+"_l" ]:
pdb_features[ i+"_l" ][ R[1]+R[0] ]['score'] = float(R[3])
chain_list[i]["l"][R[0]] = True
pdb_bs[ i+"_r" ] = dict()
I = iter(list(map(str.strip,open("bestResults/struct_2/"+i+".res.tab.rec","r").readlines())))
next(I)
next(I)
for j in I:
R = j.split(" ")
if int(R[2]) > 0:
pdb_bs[ i+"_r" ][ R[1]+R[0] ]= True
if not R[0] in chain_list[i]["r"]:
N_chains += 1
if R[1]+R[0] in pdb_features[ i+"_r" ]:
pdb_features[ i+"_r" ][ R[1]+R[0] ]['score'] = float(R[3])
chain_list[i]["r"][R[0]] = True
def get_native_bs( pdb, ch):
BS = []
for aa in all_sequence[pdb][ch]:
if aa in pdb_bs[pdb]:
BS.append(1)
else:
BS.append(0)
return autograd.Variable(torch.LongTensor(BS)).cuda()
class BiLSTM(nn.Module):
def __init__( self, input_dim=21, lstm_hidden_dim=250, hidden_1_dim=1024, hidden_2_dim=512, bs_size=2 ):
super(BiLSTM, self).__init__()
self.input_dim = input_dim
self.lstm_hidden_dim = lstm_hidden_dim
self.hidden_1_dim = hidden_1_dim
self.hidden_2_dim = hidden_2_dim
self.bs_size = bs_size
self.lstm_h0 = None
self.lstm_c0 = None
self.update_lstm_hidden()
self.LSTM = nn.LSTM(input_dim, lstm_hidden_dim, num_layers=2, bidirectional=True, dropout=0.5)
self.drop_hidden_1 = nn.Dropout(p=0.5)
self.lstm2hidden_1 = nn.Linear(2*lstm_hidden_dim, hidden_1_dim)
self.drop_hidden_2 = nn.Dropout(p=0.5)
self.hidden2hidden_2 = nn.Linear(hidden_1_dim, hidden_2_dim)
self.hidden2out = nn.Linear(hidden_2_dim, bs_size)
def update_lstm_hidden(self):
self.lstm_h0 = autograd.Variable(torch.zeros(4, 1, self.lstm_hidden_dim)).cuda()
self.lstm_c0 = autograd.Variable(torch.zeros(4, 1, self.lstm_hidden_dim)).cuda()
def prepare_data(self, pdb, sequence):
list_pssm = []
list_initial_scores = []
for aa in sequence:
v = list(pdb_features[pdb][aa]["pssm"])
if "score" in pdb_features[pdb][aa]:
v.append(pdb_features[pdb][aa]["score"])
list_initial_scores.append( pdb_features[pdb][aa]["score"] )
else:########SCORE WAS NOT FOUND !!!!!! WHY ????
v.append(0)
list_initial_scores.append(0)
list_pssm.append( v )
return autograd.Variable( torch.unsqueeze(torch.FloatTensor(list_pssm),dim=1) ).cuda(), torch.FloatTensor(list_initial_scores)
def forward(self, pdb, sequence ):
v_in, init_scores = self.prepare_data( pdb, sequence )
out_LSTM, (hidden_LSTM, content_LSTM) = self.LSTM( v_in, (self.lstm_h0, self.lstm_c0))
hidden_1 = self.lstm2hidden_1( out_LSTM.view(len(sequence), -1) )
hidden_1 = self.drop_hidden_1(hidden_1)
out_hidden_1 = F.relu(hidden_1)
hidden_2 = self.hidden2hidden_2( out_hidden_1 )
hidden_2 = self.drop_hidden_2(hidden_2)
out_hidden_2 = F.relu(hidden_2)
bs_out = self.hidden2out( out_hidden_2 )
bs_out = F.log_softmax( bs_out )
return bs_out, init_scores
model = BiLSTM(input_dim=21, lstm_hidden_dim=250, hidden_1_dim=1024, hidden_2_dim=512, bs_size=2)
model.cuda()
print(model)
loss_function = nn.NLLLoss()
#optimizer = optim.Adam(model.parameters(), lr=0.01)
N = len(training_data)
current_n = 1
print("Neural networking ...")
for target in chain_list:
lr = 0.1
for epoch in range(1000):
optimizer = optim.SGD(model.parameters(), lr=lr)
lr *= 0.99
current_n = N_chains
for pdb in chain_list:
if pdb == target:
continue
for rl in ["r","l"]:
for ch in chain_list[pdb][rl]:
print("%d %s_%s - %s \r" %(current_n, pdb,rl,ch),end="")
current_n -= 1
local_sequence = all_sequence[pdb+"_"+rl][ch]
model.update_lstm_hidden()
model.zero_grad()
optimizer.zero_grad()
predicted_bs, init_scores = model( pdb+"_"+rl, local_sequence )
native_bs = get_native_bs( pdb+"_"+rl, ch )
loss = loss_function( predicted_bs, native_bs )
loss.backward()
optimizer.step()
#np_prediction = predicted_bs.data.cpu()[:,1].numpy()
#np_class = native_bs.data.cpu().numpy()
#np_init = init_scores.numpy()
##TESTING FOR EACH EPOCH
model.train(mode=False)
for rl in ["r","l"]:
for ch in chain_list[target][rl]:
print("%s : %s : %s : %d"%(target,rl,ch,epoch))
local_sequence = all_sequence[target+"_"+rl][ch]
model.update_lstm_hidden()
model.zero_grad()
optimizer.zero_grad()
predicted_bs, init_scores = model( target+"_"+rl, local_sequence )
native_bs = get_native_bs( target+"_"+rl, ch )
np_class = native_bs.data.cpu().numpy()
np_init = init_scores.numpy()
np_prediction = predicted_bs.data.cpu()[:,1].numpy()
fpr, tpr, thresholds = metrics.roc_curve(np_class, np_init, pos_label=1)
init_auc = metrics.auc(fpr, tpr)
fpr, tpr, thresholds = metrics.roc_curve(np_class, np_prediction, pos_label=1)
new_auc = metrics.auc(fpr, tpr)
print("INIT AUC=%0.4f - NEW AUC=%0.4f"%(init_auc, new_auc))
model.train(mode=True)
exit()
| import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn import init
import numpy as np
import json
import os.path
import subprocess
import random
from operator import itemgetter
import sklearn.metrics as metrics
np.set_printoptions(linewidth=1000000000)
torch.cuda.manual_seed(1)
training_data = []
testing_data = []
I = open("pssm_list.tsv","r").readlines()
pssm_data = list(map(str.strip, I))
pdb_features = dict()
all_sequence = dict()
for i in pssm_data:
I = iter(list(map(str.strip,open("PSSM/"+i,"r").readlines())))
r = i.split("_")
pdb = r[0]+"_"+r[1]
ch = r[2]
if not pdb in pdb_features:
pdb_features[pdb] = dict()
if not pdb in all_sequence:
all_sequence[pdb] = dict()
next(I)
for j in I:
r = j.split(" ")
res_id = r[1]
pdb_features[pdb][res_id+ch] = dict()
if not ch in all_sequence[pdb]:
all_sequence[pdb][ch] = list()
all_sequence[pdb][ch].append(res_id+ch)
pdb_features[pdb][res_id+ch]['pssm'] = list(map(float,r[3:23]))
I = open("rri_list.tsv","r").readlines()
pdb_list = list(map(str.strip, I))
pdb_bs = dict()
chain_list = dict()
N_chains = 0
for i in pdb_list:
pdb_bs[ i+"_l" ] = dict()
chain_list[i] = { "r":{}, "l":{} }
I = iter(list(map(str.strip,open("bestResults/struct_2/"+i+".res.tab.lig","r").readlines())))
next(I)
next(I)
for j in I:
R = j.split(" ")
if int(R[2]) > 0:
pdb_bs[ i+"_l" ][ R[1]+R[0] ]= True
if not R[0] in chain_list[i]["l"]:
N_chains += 1
if R[1]+R[0] in pdb_features[ i+"_l" ]:
pdb_features[ i+"_l" ][ R[1]+R[0] ]['score'] = float(R[3])
chain_list[i]["l"][R[0]] = True
pdb_bs[ i+"_r" ] = dict()
I = iter(list(map(str.strip,open("bestResults/struct_2/"+i+".res.tab.rec","r").readlines())))
next(I)
next(I)
for j in I:
R = j.split(" ")
if int(R[2]) > 0:
pdb_bs[ i+"_r" ][ R[1]+R[0] ]= True
if not R[0] in chain_list[i]["r"]:
N_chains += 1
if R[1]+R[0] in pdb_features[ i+"_r" ]:
pdb_features[ i+"_r" ][ R[1]+R[0] ]['score'] = float(R[3])
chain_list[i]["r"][R[0]] = True
def get_native_bs( pdb, ch):
BS = []
for aa in all_sequence[pdb][ch]:
if aa in pdb_bs[pdb]:
BS.append(1)
else:
BS.append(0)
return autograd.Variable(torch.LongTensor(BS)).cuda()
class BiLSTM(nn.Module):
def __init__( self, input_dim=21, lstm_hidden_dim=250, hidden_1_dim=1024, hidden_2_dim=512, bs_size=2 ):
super(BiLSTM, self).__init__()
self.input_dim = input_dim
self.lstm_hidden_dim = lstm_hidden_dim
self.hidden_1_dim = hidden_1_dim
self.hidden_2_dim = hidden_2_dim
self.bs_size = bs_size
self.lstm_h0 = None
self.lstm_c0 = None
self.update_lstm_hidden()
self.LSTM = nn.LSTM(input_dim, lstm_hidden_dim, num_layers=2, bidirectional=True, dropout=0.5)
self.drop_hidden_1 = nn.Dropout(p=0.5)
self.lstm2hidden_1 = nn.Linear(2*lstm_hidden_dim, hidden_1_dim)
self.drop_hidden_2 = nn.Dropout(p=0.5)
self.hidden2hidden_2 = nn.Linear(hidden_1_dim, hidden_2_dim)
self.hidden2out = nn.Linear(hidden_2_dim, bs_size)
def update_lstm_hidden(self):
self.lstm_h0 = autograd.Variable(torch.zeros(4, 1, self.lstm_hidden_dim)).cuda()
self.lstm_c0 = autograd.Variable(torch.zeros(4, 1, self.lstm_hidden_dim)).cuda()
def prepare_data(self, pdb, sequence):
list_pssm = []
list_initial_scores = []
for aa in sequence:
v = list(pdb_features[pdb][aa]["pssm"])
if "score" in pdb_features[pdb][aa]:
v.append(pdb_features[pdb][aa]["score"])
list_initial_scores.append( pdb_features[pdb][aa]["score"] )
else:########SCORE WAS NOT FOUND !!!!!! WHY ????
v.append(0)
list_initial_scores.append(0)
list_pssm.append( v )
return autograd.Variable( torch.unsqueeze(torch.FloatTensor(list_pssm),dim=1) ).cuda(), torch.FloatTensor(list_initial_scores)
def forward(self, pdb, sequence ):
v_in, init_scores = self.prepare_data( pdb, sequence )
out_LSTM, (hidden_LSTM, content_LSTM) = self.LSTM( v_in, (self.lstm_h0, self.lstm_c0))
hidden_1 = self.lstm2hidden_1( out_LSTM.view(len(sequence), -1) )
hidden_1 = self.drop_hidden_1(hidden_1)
out_hidden_1 = F.relu(hidden_1)
hidden_2 = self.hidden2hidden_2( out_hidden_1 )
hidden_2 = self.drop_hidden_2(hidden_2)
out_hidden_2 = F.relu(hidden_2)
bs_out = self.hidden2out( out_hidden_2 )
bs_out = F.log_softmax( bs_out )
return bs_out, init_scores
model = BiLSTM(input_dim=21, lstm_hidden_dim=250, hidden_1_dim=1024, hidden_2_dim=512, bs_size=2)
model.cuda()
print(model)
loss_function = nn.NLLLoss()
#optimizer = optim.Adam(model.parameters(), lr=0.01)
N = len(training_data)
current_n = 1
print("Neural networking ...")
for target in chain_list:
lr = 0.1
for epoch in range(1000):
optimizer = optim.SGD(model.parameters(), lr=lr)
lr *= 0.99
current_n = N_chains
for pdb in chain_list:
if pdb == target:
continue
for rl in ["r","l"]:
for ch in chain_list[pdb][rl]:
print("%d %s_%s - %s \r" %(current_n, pdb,rl,ch),end="")
current_n -= 1
local_sequence = all_sequence[pdb+"_"+rl][ch]
model.update_lstm_hidden()
model.zero_grad()
optimizer.zero_grad()
predicted_bs, init_scores = model( pdb+"_"+rl, local_sequence )
native_bs = get_native_bs( pdb+"_"+rl, ch )
loss = loss_function( predicted_bs, native_bs )
loss.backward()
optimizer.step()
#np_prediction = predicted_bs.data.cpu()[:,1].numpy()
#np_class = native_bs.data.cpu().numpy()
#np_init = init_scores.numpy()
##TESTING FOR EACH EPOCH
model.train(mode=False)
for rl in ["r","l"]:
for ch in chain_list[target][rl]:
print("%s : %s : %s : %d"%(target,rl,ch,epoch))
local_sequence = all_sequence[target+"_"+rl][ch]
model.update_lstm_hidden()
model.zero_grad()
optimizer.zero_grad()
predicted_bs, init_scores = model( target+"_"+rl, local_sequence )
native_bs = get_native_bs( target+"_"+rl, ch )
np_class = native_bs.data.cpu().numpy()
np_init = init_scores.numpy()
np_prediction = predicted_bs.data.cpu()[:,1].numpy()
fpr, tpr, thresholds = metrics.roc_curve(np_class, np_init, pos_label=1)
init_auc = metrics.auc(fpr, tpr)
fpr, tpr, thresholds = metrics.roc_curve(np_class, np_prediction, pos_label=1)
new_auc = metrics.auc(fpr, tpr)
print("INIT AUC=%0.4f - NEW AUC=%0.4f"%(init_auc, new_auc))
model.train(mode=True)
exit() | en | 0.173779 | ########SCORE WAS NOT FOUND !!!!!! WHY ???? #optimizer = optim.Adam(model.parameters(), lr=0.01) #np_prediction = predicted_bs.data.cpu()[:,1].numpy() #np_class = native_bs.data.cpu().numpy() #np_init = init_scores.numpy() ##TESTING FOR EACH EPOCH | 1.925725 | 2 |
models/resnet_quant.py | iimmortall/QuantLib | 0 | 6615748 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from models.util import get_func
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
# class BasicBlock(nn.Module):
# expansion = 1
#
# def __init__(self, func, inplanes, planes, stride=1, num_bit=1, wgt_sigma=1, wgt_temp=2, act_sigma=2, act_temp=2):
# super(BasicBlock, self).__init__()
# self.conv1 = func(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, num_bit=num_bit,
# wgt_sigma=wgt_sigma, wgt_temp=wgt_temp, act_sigma=act_sigma, act_temp=act_temp)
# self.bn1 = nn.BatchNorm2d(planes)
# self.relu = nn.ReLU(inplace=True)
# self.conv2 = func(planes, planes, kernel_size=3, stride=1, padding=1, bias=False, num_bit=num_bit,
# wgt_sigma=wgt_sigma, wgt_temp=wgt_temp, act_sigma=act_sigma, act_temp=act_temp)
# self.bn2 = nn.BatchNorm2d(planes)
# self.shortcut = nn.Sequential()
# if stride != 1 or inplanes != planes:
# self.shortcut = LambdaLayer(lambda x: F.pad(x[:, :, fc00:e968:6179::de52:7100, ::2], (0, 0, 0, 0, planes//4, planes//4),
# "constant", 0))
#
# def forward(self, x):
# conv1_out = F.relu(self.bn1(self.conv1(x)))
# conv2_out = self.bn2(self.conv2(conv1_out))
# out = conv2_out + self.shortcut(x)
# out = F.relu(out)
# return out, conv1_out, conv2_out
#
#
# class ResNet(nn.Module):
#
# def __init__(self, block, num_blocks, num_classes=10, num_bit=1, wgt_sigma=1, wgt_temp=2, act_sigma=2, act_temp=2):
# super(ResNet, self).__init__()
# self.in_planes = 16
# self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
# self.bn1 = nn.BatchNorm2d(16)
# self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1, num_bit=num_bit,
# wgt_sigma=wgt_sigma, wgt_temp=wgt_temp, act_sigma=act_sigma, act_temp=act_temp)
# self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2, num_bit=num_bit,
# wgt_sigma=wgt_sigma, wgt_temp=wgt_temp, act_sigma=act_sigma, act_temp=act_temp)
# self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2, num_bit=num_bit,
# wgt_sigma=wgt_sigma, wgt_temp=wgt_temp, act_sigma=act_sigma, act_temp=act_temp)
#
# self.bn2 = nn.BatchNorm1d(64)
#
# self.linear = nn.Linear(64, num_classes)
#
# def _make_layer(self, block, planes, num_blocks, stride, num_bit, wgt_sigma, wgt_temp, act_sigma, act_temp):
# strides = [stride] + [1]*(num_blocks-1)
# ret_dict = dict()
#
# for i, stride in enumerate(strides):
# layers = []
# layers.append(block(self.in_planes, planes, stride, num_bit, wgt_sigma, wgt_temp, act_sigma, act_temp))
# ret_dict['block_{}'.format(i)] = nn.Sequential(*layers)
# self.in_planes = planes * block.expansion
#
# return nn.Sequential(OrderedDict(ret_dict))
#
# def forward(self, x):
# ret_dict = dict()
# out = F.relu(self.conv1(x))
# layer_names = self.layer1._modules.keys()
# for i, layer_name in enumerate(layer_names):
# out, conv1_out, conv2_out = self.layer1._modules[layer_name](out)
# ret_dict['layer1_{}_conv1'.format(i)] = conv1_out
# ret_dict['layer1_{}_conv2'.format(i)] = conv2_out
#
# layer_names = self.layer2._modules.keys()
# for i, layer_name in enumerate(layer_names):
# out, conv1_out, conv2_out = self.layer2._modules[layer_name](out)
# ret_dict['layer2_{}_conv1'.format(i)] = conv1_out
# ret_dict['layer2_{}_conv2'.format(i)] = conv2_out
#
# layer_names = self.layer3._modules.keys()
# for i, layer_name in enumerate(layer_names):
# out, conv1_out, conv2_out = self.layer3._modules[layer_name](out)
# ret_dict['layer3_{}_conv1'.format(i)] = conv1_out
# ret_dict['layer3_{}_conv2'.format(i)] = conv2_out
#
# out = F.avg_pool2d(out, out.size()[3])
# out = out.view(out.size(0), -1)
# out = self.bn2(out)
# out = self.linear(out)
# ret_dict['out'] = out
# return ret_dict
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, func, params, inplanes, planes, stride=1):
super(BasicBlock, self).__init__()
conv = get_func(func)
self.conv1 = conv(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, **params)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv(planes, planes, kernel_size=3, stride=1, padding=1, bias=False, **params)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or inplanes != planes:
self.shortcut = LambdaLayer(lambda x: F.pad(x[:, :, fc00:e968:6179::de52:7100, ::2], (0, 0, 0, 0, planes//4, planes//4),
"constant", 0))
def forward(self, x):
conv1_out = F.relu(self.bn1(self.conv1(x)))
conv2_out = self.bn2(self.conv2(conv1_out))
out = conv2_out + self.shortcut(x)
out = F.relu(out)
return out, conv1_out, conv2_out
class ResNet(nn.Module):
def __init__(self, func, params, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(func, params, block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(func, params, block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(func, params, block, 64, num_blocks[2], stride=2)
self.bn2 = nn.BatchNorm1d(64)
self.linear = nn.Linear(64, num_classes)
def _make_layer(self, func, params, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
ret_dict = dict()
for i, stride in enumerate(strides):
layers = []
layers.append(block(func, params, self.in_planes, planes, stride))
ret_dict['block_{}'.format(i)] = nn.Sequential(*layers)
self.in_planes = planes * block.expansion
return nn.Sequential(OrderedDict(ret_dict))
def forward(self, x):
ret_dict = dict()
out = F.relu(self.conv1(x))
layer_names = self.layer1._modules.keys()
for i, layer_name in enumerate(layer_names):
out, conv1_out, conv2_out = self.layer1._modules[layer_name](out)
ret_dict['layer1_{}_conv1'.format(i)] = conv1_out
ret_dict['layer1_{}_conv2'.format(i)] = conv2_out
layer_names = self.layer2._modules.keys()
for i, layer_name in enumerate(layer_names):
out, conv1_out, conv2_out = self.layer2._modules[layer_name](out)
ret_dict['layer2_{}_conv1'.format(i)] = conv1_out
ret_dict['layer2_{}_conv2'.format(i)] = conv2_out
layer_names = self.layer3._modules.keys()
for i, layer_name in enumerate(layer_names):
out, conv1_out, conv2_out = self.layer3._modules[layer_name](out)
ret_dict['layer3_{}_conv1'.format(i)] = conv1_out
ret_dict['layer3_{}_conv2'.format(i)] = conv2_out
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.bn2(out)
out = self.linear(out)
ret_dict['out'] = out
return ret_dict
def resnet20(quant_func, quant_params, **kwargs):
"""ResNet-20 model.
"""
print(kwargs)
return ResNet(quant_func, quant_params, BasicBlock, [3, 3, 3], **kwargs)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from models.util import get_func
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
# class BasicBlock(nn.Module):
# expansion = 1
#
# def __init__(self, func, inplanes, planes, stride=1, num_bit=1, wgt_sigma=1, wgt_temp=2, act_sigma=2, act_temp=2):
# super(BasicBlock, self).__init__()
# self.conv1 = func(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, num_bit=num_bit,
# wgt_sigma=wgt_sigma, wgt_temp=wgt_temp, act_sigma=act_sigma, act_temp=act_temp)
# self.bn1 = nn.BatchNorm2d(planes)
# self.relu = nn.ReLU(inplace=True)
# self.conv2 = func(planes, planes, kernel_size=3, stride=1, padding=1, bias=False, num_bit=num_bit,
# wgt_sigma=wgt_sigma, wgt_temp=wgt_temp, act_sigma=act_sigma, act_temp=act_temp)
# self.bn2 = nn.BatchNorm2d(planes)
# self.shortcut = nn.Sequential()
# if stride != 1 or inplanes != planes:
# self.shortcut = LambdaLayer(lambda x: F.pad(x[:, :, fc00:e968:6179::de52:7100, ::2], (0, 0, 0, 0, planes//4, planes//4),
# "constant", 0))
#
# def forward(self, x):
# conv1_out = F.relu(self.bn1(self.conv1(x)))
# conv2_out = self.bn2(self.conv2(conv1_out))
# out = conv2_out + self.shortcut(x)
# out = F.relu(out)
# return out, conv1_out, conv2_out
#
#
# class ResNet(nn.Module):
#
# def __init__(self, block, num_blocks, num_classes=10, num_bit=1, wgt_sigma=1, wgt_temp=2, act_sigma=2, act_temp=2):
# super(ResNet, self).__init__()
# self.in_planes = 16
# self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
# self.bn1 = nn.BatchNorm2d(16)
# self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1, num_bit=num_bit,
# wgt_sigma=wgt_sigma, wgt_temp=wgt_temp, act_sigma=act_sigma, act_temp=act_temp)
# self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2, num_bit=num_bit,
# wgt_sigma=wgt_sigma, wgt_temp=wgt_temp, act_sigma=act_sigma, act_temp=act_temp)
# self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2, num_bit=num_bit,
# wgt_sigma=wgt_sigma, wgt_temp=wgt_temp, act_sigma=act_sigma, act_temp=act_temp)
#
# self.bn2 = nn.BatchNorm1d(64)
#
# self.linear = nn.Linear(64, num_classes)
#
# def _make_layer(self, block, planes, num_blocks, stride, num_bit, wgt_sigma, wgt_temp, act_sigma, act_temp):
# strides = [stride] + [1]*(num_blocks-1)
# ret_dict = dict()
#
# for i, stride in enumerate(strides):
# layers = []
# layers.append(block(self.in_planes, planes, stride, num_bit, wgt_sigma, wgt_temp, act_sigma, act_temp))
# ret_dict['block_{}'.format(i)] = nn.Sequential(*layers)
# self.in_planes = planes * block.expansion
#
# return nn.Sequential(OrderedDict(ret_dict))
#
# def forward(self, x):
# ret_dict = dict()
# out = F.relu(self.conv1(x))
# layer_names = self.layer1._modules.keys()
# for i, layer_name in enumerate(layer_names):
# out, conv1_out, conv2_out = self.layer1._modules[layer_name](out)
# ret_dict['layer1_{}_conv1'.format(i)] = conv1_out
# ret_dict['layer1_{}_conv2'.format(i)] = conv2_out
#
# layer_names = self.layer2._modules.keys()
# for i, layer_name in enumerate(layer_names):
# out, conv1_out, conv2_out = self.layer2._modules[layer_name](out)
# ret_dict['layer2_{}_conv1'.format(i)] = conv1_out
# ret_dict['layer2_{}_conv2'.format(i)] = conv2_out
#
# layer_names = self.layer3._modules.keys()
# for i, layer_name in enumerate(layer_names):
# out, conv1_out, conv2_out = self.layer3._modules[layer_name](out)
# ret_dict['layer3_{}_conv1'.format(i)] = conv1_out
# ret_dict['layer3_{}_conv2'.format(i)] = conv2_out
#
# out = F.avg_pool2d(out, out.size()[3])
# out = out.view(out.size(0), -1)
# out = self.bn2(out)
# out = self.linear(out)
# ret_dict['out'] = out
# return ret_dict
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, func, params, inplanes, planes, stride=1):
super(BasicBlock, self).__init__()
conv = get_func(func)
self.conv1 = conv(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, **params)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv(planes, planes, kernel_size=3, stride=1, padding=1, bias=False, **params)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or inplanes != planes:
self.shortcut = LambdaLayer(lambda x: F.pad(x[:, :, fc00:e968:6179::de52:7100, ::2], (0, 0, 0, 0, planes//4, planes//4),
"constant", 0))
def forward(self, x):
conv1_out = F.relu(self.bn1(self.conv1(x)))
conv2_out = self.bn2(self.conv2(conv1_out))
out = conv2_out + self.shortcut(x)
out = F.relu(out)
return out, conv1_out, conv2_out
class ResNet(nn.Module):
def __init__(self, func, params, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(func, params, block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(func, params, block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(func, params, block, 64, num_blocks[2], stride=2)
self.bn2 = nn.BatchNorm1d(64)
self.linear = nn.Linear(64, num_classes)
def _make_layer(self, func, params, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
ret_dict = dict()
for i, stride in enumerate(strides):
layers = []
layers.append(block(func, params, self.in_planes, planes, stride))
ret_dict['block_{}'.format(i)] = nn.Sequential(*layers)
self.in_planes = planes * block.expansion
return nn.Sequential(OrderedDict(ret_dict))
def forward(self, x):
ret_dict = dict()
out = F.relu(self.conv1(x))
layer_names = self.layer1._modules.keys()
for i, layer_name in enumerate(layer_names):
out, conv1_out, conv2_out = self.layer1._modules[layer_name](out)
ret_dict['layer1_{}_conv1'.format(i)] = conv1_out
ret_dict['layer1_{}_conv2'.format(i)] = conv2_out
layer_names = self.layer2._modules.keys()
for i, layer_name in enumerate(layer_names):
out, conv1_out, conv2_out = self.layer2._modules[layer_name](out)
ret_dict['layer2_{}_conv1'.format(i)] = conv1_out
ret_dict['layer2_{}_conv2'.format(i)] = conv2_out
layer_names = self.layer3._modules.keys()
for i, layer_name in enumerate(layer_names):
out, conv1_out, conv2_out = self.layer3._modules[layer_name](out)
ret_dict['layer3_{}_conv1'.format(i)] = conv1_out
ret_dict['layer3_{}_conv2'.format(i)] = conv2_out
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.bn2(out)
out = self.linear(out)
ret_dict['out'] = out
return ret_dict
def resnet20(quant_func, quant_params, **kwargs):
"""ResNet-20 model.
"""
print(kwargs)
return ResNet(quant_func, quant_params, BasicBlock, [3, 3, 3], **kwargs)
| en | 0.414154 | #!/usr/bin/env python # -*- coding: utf-8 -*- # class BasicBlock(nn.Module): # expansion = 1 # # def __init__(self, func, inplanes, planes, stride=1, num_bit=1, wgt_sigma=1, wgt_temp=2, act_sigma=2, act_temp=2): # super(BasicBlock, self).__init__() # self.conv1 = func(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, num_bit=num_bit, # wgt_sigma=wgt_sigma, wgt_temp=wgt_temp, act_sigma=act_sigma, act_temp=act_temp) # self.bn1 = nn.BatchNorm2d(planes) # self.relu = nn.ReLU(inplace=True) # self.conv2 = func(planes, planes, kernel_size=3, stride=1, padding=1, bias=False, num_bit=num_bit, # wgt_sigma=wgt_sigma, wgt_temp=wgt_temp, act_sigma=act_sigma, act_temp=act_temp) # self.bn2 = nn.BatchNorm2d(planes) # self.shortcut = nn.Sequential() # if stride != 1 or inplanes != planes: # self.shortcut = LambdaLayer(lambda x: F.pad(x[:, :, fc00:e968:6179::de52:7100, ::2], (0, 0, 0, 0, planes//4, planes//4), # "constant", 0)) # # def forward(self, x): # conv1_out = F.relu(self.bn1(self.conv1(x))) # conv2_out = self.bn2(self.conv2(conv1_out)) # out = conv2_out + self.shortcut(x) # out = F.relu(out) # return out, conv1_out, conv2_out # # # class ResNet(nn.Module): # # def __init__(self, block, num_blocks, num_classes=10, num_bit=1, wgt_sigma=1, wgt_temp=2, act_sigma=2, act_temp=2): # super(ResNet, self).__init__() # self.in_planes = 16 # self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False) # self.bn1 = nn.BatchNorm2d(16) # self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1, num_bit=num_bit, # wgt_sigma=wgt_sigma, wgt_temp=wgt_temp, act_sigma=act_sigma, act_temp=act_temp) # self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2, num_bit=num_bit, # wgt_sigma=wgt_sigma, wgt_temp=wgt_temp, act_sigma=act_sigma, act_temp=act_temp) # self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2, num_bit=num_bit, # wgt_sigma=wgt_sigma, wgt_temp=wgt_temp, act_sigma=act_sigma, act_temp=act_temp) # # self.bn2 = nn.BatchNorm1d(64) # # self.linear = nn.Linear(64, num_classes) # # def _make_layer(self, block, planes, num_blocks, stride, num_bit, wgt_sigma, wgt_temp, act_sigma, act_temp): # strides = [stride] + [1]*(num_blocks-1) # ret_dict = dict() # # for i, stride in enumerate(strides): # layers = [] # layers.append(block(self.in_planes, planes, stride, num_bit, wgt_sigma, wgt_temp, act_sigma, act_temp)) # ret_dict['block_{}'.format(i)] = nn.Sequential(*layers) # self.in_planes = planes * block.expansion # # return nn.Sequential(OrderedDict(ret_dict)) # # def forward(self, x): # ret_dict = dict() # out = F.relu(self.conv1(x)) # layer_names = self.layer1._modules.keys() # for i, layer_name in enumerate(layer_names): # out, conv1_out, conv2_out = self.layer1._modules[layer_name](out) # ret_dict['layer1_{}_conv1'.format(i)] = conv1_out # ret_dict['layer1_{}_conv2'.format(i)] = conv2_out # # layer_names = self.layer2._modules.keys() # for i, layer_name in enumerate(layer_names): # out, conv1_out, conv2_out = self.layer2._modules[layer_name](out) # ret_dict['layer2_{}_conv1'.format(i)] = conv1_out # ret_dict['layer2_{}_conv2'.format(i)] = conv2_out # # layer_names = self.layer3._modules.keys() # for i, layer_name in enumerate(layer_names): # out, conv1_out, conv2_out = self.layer3._modules[layer_name](out) # ret_dict['layer3_{}_conv1'.format(i)] = conv1_out # ret_dict['layer3_{}_conv2'.format(i)] = conv2_out # # out = F.avg_pool2d(out, out.size()[3]) # out = out.view(out.size(0), -1) # out = self.bn2(out) # out = self.linear(out) # ret_dict['out'] = out # return ret_dict ResNet-20 model. | 2.362331 | 2 |
awsscripter/cli/test/udp.py | xformation/awsscripter | 0 | 6615749 | <reponame>xformation/awsscripter
import click
@click.command(name="udp")
def password_udp():
#this is command1
print("udp password") | import click
@click.command(name="udp")
def password_udp():
#this is command1
print("udp password") | en | 0.986848 | #this is command1 | 2.382992 | 2 |
audiolizer/history.py | asherp/audiolizer | 2 | 6615750 | <reponame>asherp/audiolizer
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Objective: get_history should fetch all the data at once then save it to separate files.
import logging
logger = logging.getLogger(__name__)
fhandler = logging.FileHandler(filename='audiolizer.log', mode='a')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fhandler.setFormatter(formatter)
logger.addHandler(fhandler)
logger.setLevel(logging.DEBUG)
# +
import pytz
from Historic_Crypto import HistoricalData
import pandas as pd
import os
from datetime import datetime
def get_timezones(url):
return [dict(label=v, value=v) for v in pytz.all_timezones]
granularity = int(os.environ.get('AUDIOLIZER_GRANULARITY', 300)) # seconds
audiolizer_temp_dir = os.environ.get('AUDIOLIZER_TEMP', './history')
logger.info('audiolizer temp data: {}'.format(audiolizer_temp_dir))
max_age = pd.Timedelta(os.environ.get('AUDIOLIZER_MAX_AGE', '5m'))
logger.info('audiolizer max daily age {}'.format(max_age))
def refactor(df, frequency='1W'):
"""Refactor/rebin the data to a lower cadence
The data is regrouped using pd.Grouper
"""
low = df.low.groupby(pd.Grouper(freq=frequency)).min()
high = df.high.groupby(pd.Grouper(freq=frequency)).max()
close = df.close.groupby(pd.Grouper(freq=frequency)).last()
open_ = df.open.groupby(pd.Grouper(freq=frequency)).first()
volume = df.volume.groupby(pd.Grouper(freq=frequency)).sum()
return pd.DataFrame(dict(low=low, high=high, open=open_, close=close, volume=volume))
def load_date(ticker, granularity, int_):
logger.info('loading single date {}'.format(int_))
start_ = int_.left.strftime('%Y-%m-%d-%H-%M')
end_ = int_.right.strftime('%Y-%m-%d-%H-%M')
try:
return HistoricalData(ticker,
granularity,
start_,
end_,
).retrieve_data()
except:
logger.warning('could not load using {} {}'.format(start_, end_))
raise
def get_gaps(df, granularity):
new_ = refactor(df, '{}s'.format(granularity))
return new_[new_.close.isna()]
def fetch_data(ticker, granularity, start_, end_):
"""Need dates in this format %Y-%m-%d-%H-%M"""
try:
return HistoricalData(ticker,
granularity,
start_,
end_,
).retrieve_data()
except:
logger.warning('could not load using {} {}'.format(start_, end_))
raise
def write_data(df, ticker):
for t, day in df.groupby(pd.Grouper(freq='1D')):
tstr = t.strftime('%Y-%m-%d-%H-%M')
fname = audiolizer_temp_dir + '/{}-{}.csv.gz'.format(
ticker, t.strftime('%Y-%m-%d'))
if len(day) > 1:
day.to_csv(fname, compression='gzip')
logger.info('wrote {}'.format(fname))
def fetch_missing(files_status, ticker, granularity):
"""Iterate over batches of missing dates"""
for batch, g in files_status[files_status.found==0].groupby('batch', sort=False):
t1, t2 = g.iloc[[0, -1]].index
# extend by 1 day whether or not t1 == t2
t2 += pd.Timedelta('1D')
endpoints = [t.strftime('%Y-%m-%d-%H-%M') for t in [t1, t2]]
logger.info('fetching {}, {}'.format(len(g), endpoints))
df = fetch_data(ticker, granularity, *endpoints).loc[t1:t2] # only grab data between endpoints
write_data(df, ticker)
def get_files_status(ticker, start_date, end_date):
start_date = pd.to_datetime(start_date.date())
end_date = pd.to_datetime(end_date.date())
fnames = []
foundlings = []
dates = []
batch = []
batch_number = 0
last_found = -1
for int_ in pd.interval_range(start_date, end_date):
dates.append(int_.left)
fname = audiolizer_temp_dir + '/{}-{}.csv.gz'.format(
ticker, int_.left.strftime('%Y-%m-%d'))
found = int(os.path.exists(fname))
foundlings.append(found)
if found != last_found:
batch_number += 1
last_found = found
batch.append(batch_number)
fnames.append(fname)
files_status = pd.DataFrame(dict(files=fnames, found=foundlings, batch=batch), index=dates)
return files_status
# -
def get_today_GMT():
# convert from system time to GMT
system_time = pd.Timestamp(datetime.now().astimezone())
today = system_time.tz_convert('GMT').tz_localize(None)
return today
# + active="ipynb"
# get_today_GMT()
# -
# * getting BTC-USD files status: 2021-07-20 00:00:00 -> 2021-07-21 03:50:49.619707
# * INFO:history:getting BTC-USD files status: 2021-07-20 00:00:00 -> 2021-07-21 04:07:48.872110
# * 2021-07-14 00:00:00 -> 2021-07-21 04:07:22.738431
files_status = get_files_status('BTC-USD', pd.to_datetime('2021-07-14 00:00:00'), pd.to_datetime('2021-07-21 04:07:22.738431'))
files_status
for batch, g in files_status[files_status.found==0].groupby('batch', sort=False):
t1, t2 = g.iloc[[0, -1]].index
# extend by 1 day whether or not t1 == t2
t2 += pd.Timedelta('1D')
endpoints = [t.strftime('%Y-%m-%d-%H-%M') for t in [t1, t2]]
print('fetching {}, {}'.format(len(g), endpoints))
df = fetch_data('BTC-USD', granularity, *endpoints)
# write_data(df, ticker)
# +
def get_today(ticker, granularity):
today = get_today_GMT()
tomorrow = today + pd.Timedelta('1D')
start_ = '{}-00-00'.format(today.strftime('%Y-%m-%d'))
end_ = today.strftime('%Y-%m-%d-%H-%M')
try:
df = HistoricalData(ticker,
granularity,
start_,
end_,
).retrieve_data()
return df
except:
logger.warning('could not load using {} {}'.format(start_, end_))
raise
def get_age(fname):
"""Get the age of a given a file"""
st=os.stat(fname)
mtime=st.st_mtime
return pd.Timestamp.now() - datetime.fromtimestamp(mtime)
def get_history(ticker, start_date, end_date = None, granularity=granularity):
"""Fetch/load historical data from Coinbase API at specified granularity
Data loaded from start_date through end of end_date
params:
start_date: (str) (see pandas.to_datetime for acceptable formats)
end_date: (str)
granularity: (int) seconds (default: 300)
price data is saved by ticker and date and stored in audiolizer_temp_dir
There are two timezones to keep track of. Assume input in GMT
system timezone: the timezone of the machine the audiolizer is run from
GMT: the timezone that price history is fetched/stored in
"""
start_date = pd.to_datetime(start_date)
today = get_today_GMT() #tz-naive but value matches GMT
if end_date is None:
# don't include today
end_date = today
logger.info('no end_date provided, using {}'.format(end_date))
else:
# convert the user-specified date and timezone to GMT
end_date = pd.to_datetime(end_date)
# prevent queries from the future
end_date = min(today, end_date) + pd.Timedelta('1d')
logger.info('using end_date {}'.format(end_date))
assert start_date <= end_date
logger.info('getting {} files status: {} -> {}'.format(ticker, start_date, end_date))
files_status = get_files_status(ticker, start_date, end_date)
fetch_missing(files_status, ticker, granularity)
if len(files_status) == 0:
raise IOError('Could not get file status for {}'.format(ticker, start_date, end_date))
df = pd.concat(map(lambda file: pd.read_csv(file, index_col='time', parse_dates=True, compression='gzip'),
files_status.files)).drop_duplicates()
if end_date == today:
logger.info('end date is today!')
# check age of today's data. If it's old, fetch the new one
today_fname = audiolizer_temp_dir + '/{}-today.csv.gz'.format(ticker)
if os.path.exists(today_fname):
if get_age(today_fname) > max_age:
logger.info('{} is too old, fetching new data'.format(today_fname))
today_data = get_today(ticker, granularity)
today_data.to_csv(today_fname, compression='gzip')
else:
logger.info('{} is not that old, loading from disk'.format(today_fname))
today_data = pd.read_csv(today_fname, index_col='time', parse_dates=True, compression='gzip')
else:
logger.info('{} not present. loading'.format(today_fname))
today_data = get_today(ticker, granularity)
today_data.to_csv(today_fname, compression='gzip')
df = pd.concat([df, today_data]).drop_duplicates()
return df
# -
to = get_today('BTC-USD', 300)
to.index
# + active="ipynb"
# hist = get_history('BTC-USD',
# '07/21/2021',
# # pd.Timestamp.now().tz_localize(None)-pd.Timedelta('3D'),
# )
# hist
# + active="ipynb"
# from audiolizer import candlestick_plot
# from plotly import graph_objs as go
# + active="ipynb"
# candlestick_plot(hist, 'BTC', 'USD')
# -
# Show today's prices
# + active="ipynb"
# today_file = 'history/BTC-USD-today.csv.gz'
# pd.read_csv(today_file, index_col='time', parse_dates=True, compression='gzip')
| # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Objective: get_history should fetch all the data at once then save it to separate files.
import logging
logger = logging.getLogger(__name__)
fhandler = logging.FileHandler(filename='audiolizer.log', mode='a')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fhandler.setFormatter(formatter)
logger.addHandler(fhandler)
logger.setLevel(logging.DEBUG)
# +
import pytz
from Historic_Crypto import HistoricalData
import pandas as pd
import os
from datetime import datetime
def get_timezones(url):
return [dict(label=v, value=v) for v in pytz.all_timezones]
granularity = int(os.environ.get('AUDIOLIZER_GRANULARITY', 300)) # seconds
audiolizer_temp_dir = os.environ.get('AUDIOLIZER_TEMP', './history')
logger.info('audiolizer temp data: {}'.format(audiolizer_temp_dir))
max_age = pd.Timedelta(os.environ.get('AUDIOLIZER_MAX_AGE', '5m'))
logger.info('audiolizer max daily age {}'.format(max_age))
def refactor(df, frequency='1W'):
"""Refactor/rebin the data to a lower cadence
The data is regrouped using pd.Grouper
"""
low = df.low.groupby(pd.Grouper(freq=frequency)).min()
high = df.high.groupby(pd.Grouper(freq=frequency)).max()
close = df.close.groupby(pd.Grouper(freq=frequency)).last()
open_ = df.open.groupby(pd.Grouper(freq=frequency)).first()
volume = df.volume.groupby(pd.Grouper(freq=frequency)).sum()
return pd.DataFrame(dict(low=low, high=high, open=open_, close=close, volume=volume))
def load_date(ticker, granularity, int_):
logger.info('loading single date {}'.format(int_))
start_ = int_.left.strftime('%Y-%m-%d-%H-%M')
end_ = int_.right.strftime('%Y-%m-%d-%H-%M')
try:
return HistoricalData(ticker,
granularity,
start_,
end_,
).retrieve_data()
except:
logger.warning('could not load using {} {}'.format(start_, end_))
raise
def get_gaps(df, granularity):
new_ = refactor(df, '{}s'.format(granularity))
return new_[new_.close.isna()]
def fetch_data(ticker, granularity, start_, end_):
"""Need dates in this format %Y-%m-%d-%H-%M"""
try:
return HistoricalData(ticker,
granularity,
start_,
end_,
).retrieve_data()
except:
logger.warning('could not load using {} {}'.format(start_, end_))
raise
def write_data(df, ticker):
for t, day in df.groupby(pd.Grouper(freq='1D')):
tstr = t.strftime('%Y-%m-%d-%H-%M')
fname = audiolizer_temp_dir + '/{}-{}.csv.gz'.format(
ticker, t.strftime('%Y-%m-%d'))
if len(day) > 1:
day.to_csv(fname, compression='gzip')
logger.info('wrote {}'.format(fname))
def fetch_missing(files_status, ticker, granularity):
"""Iterate over batches of missing dates"""
for batch, g in files_status[files_status.found==0].groupby('batch', sort=False):
t1, t2 = g.iloc[[0, -1]].index
# extend by 1 day whether or not t1 == t2
t2 += pd.Timedelta('1D')
endpoints = [t.strftime('%Y-%m-%d-%H-%M') for t in [t1, t2]]
logger.info('fetching {}, {}'.format(len(g), endpoints))
df = fetch_data(ticker, granularity, *endpoints).loc[t1:t2] # only grab data between endpoints
write_data(df, ticker)
def get_files_status(ticker, start_date, end_date):
start_date = pd.to_datetime(start_date.date())
end_date = pd.to_datetime(end_date.date())
fnames = []
foundlings = []
dates = []
batch = []
batch_number = 0
last_found = -1
for int_ in pd.interval_range(start_date, end_date):
dates.append(int_.left)
fname = audiolizer_temp_dir + '/{}-{}.csv.gz'.format(
ticker, int_.left.strftime('%Y-%m-%d'))
found = int(os.path.exists(fname))
foundlings.append(found)
if found != last_found:
batch_number += 1
last_found = found
batch.append(batch_number)
fnames.append(fname)
files_status = pd.DataFrame(dict(files=fnames, found=foundlings, batch=batch), index=dates)
return files_status
# -
def get_today_GMT():
# convert from system time to GMT
system_time = pd.Timestamp(datetime.now().astimezone())
today = system_time.tz_convert('GMT').tz_localize(None)
return today
# + active="ipynb"
# get_today_GMT()
# -
# * getting BTC-USD files status: 2021-07-20 00:00:00 -> 2021-07-21 03:50:49.619707
# * INFO:history:getting BTC-USD files status: 2021-07-20 00:00:00 -> 2021-07-21 04:07:48.872110
# * 2021-07-14 00:00:00 -> 2021-07-21 04:07:22.738431
files_status = get_files_status('BTC-USD', pd.to_datetime('2021-07-14 00:00:00'), pd.to_datetime('2021-07-21 04:07:22.738431'))
files_status
for batch, g in files_status[files_status.found==0].groupby('batch', sort=False):
t1, t2 = g.iloc[[0, -1]].index
# extend by 1 day whether or not t1 == t2
t2 += pd.Timedelta('1D')
endpoints = [t.strftime('%Y-%m-%d-%H-%M') for t in [t1, t2]]
print('fetching {}, {}'.format(len(g), endpoints))
df = fetch_data('BTC-USD', granularity, *endpoints)
# write_data(df, ticker)
# +
def get_today(ticker, granularity):
today = get_today_GMT()
tomorrow = today + pd.Timedelta('1D')
start_ = '{}-00-00'.format(today.strftime('%Y-%m-%d'))
end_ = today.strftime('%Y-%m-%d-%H-%M')
try:
df = HistoricalData(ticker,
granularity,
start_,
end_,
).retrieve_data()
return df
except:
logger.warning('could not load using {} {}'.format(start_, end_))
raise
def get_age(fname):
"""Get the age of a given a file"""
st=os.stat(fname)
mtime=st.st_mtime
return pd.Timestamp.now() - datetime.fromtimestamp(mtime)
def get_history(ticker, start_date, end_date = None, granularity=granularity):
"""Fetch/load historical data from Coinbase API at specified granularity
Data loaded from start_date through end of end_date
params:
start_date: (str) (see pandas.to_datetime for acceptable formats)
end_date: (str)
granularity: (int) seconds (default: 300)
price data is saved by ticker and date and stored in audiolizer_temp_dir
There are two timezones to keep track of. Assume input in GMT
system timezone: the timezone of the machine the audiolizer is run from
GMT: the timezone that price history is fetched/stored in
"""
start_date = pd.to_datetime(start_date)
today = get_today_GMT() #tz-naive but value matches GMT
if end_date is None:
# don't include today
end_date = today
logger.info('no end_date provided, using {}'.format(end_date))
else:
# convert the user-specified date and timezone to GMT
end_date = pd.to_datetime(end_date)
# prevent queries from the future
end_date = min(today, end_date) + pd.Timedelta('1d')
logger.info('using end_date {}'.format(end_date))
assert start_date <= end_date
logger.info('getting {} files status: {} -> {}'.format(ticker, start_date, end_date))
files_status = get_files_status(ticker, start_date, end_date)
fetch_missing(files_status, ticker, granularity)
if len(files_status) == 0:
raise IOError('Could not get file status for {}'.format(ticker, start_date, end_date))
df = pd.concat(map(lambda file: pd.read_csv(file, index_col='time', parse_dates=True, compression='gzip'),
files_status.files)).drop_duplicates()
if end_date == today:
logger.info('end date is today!')
# check age of today's data. If it's old, fetch the new one
today_fname = audiolizer_temp_dir + '/{}-today.csv.gz'.format(ticker)
if os.path.exists(today_fname):
if get_age(today_fname) > max_age:
logger.info('{} is too old, fetching new data'.format(today_fname))
today_data = get_today(ticker, granularity)
today_data.to_csv(today_fname, compression='gzip')
else:
logger.info('{} is not that old, loading from disk'.format(today_fname))
today_data = pd.read_csv(today_fname, index_col='time', parse_dates=True, compression='gzip')
else:
logger.info('{} not present. loading'.format(today_fname))
today_data = get_today(ticker, granularity)
today_data.to_csv(today_fname, compression='gzip')
df = pd.concat([df, today_data]).drop_duplicates()
return df
# -
to = get_today('BTC-USD', 300)
to.index
# + active="ipynb"
# hist = get_history('BTC-USD',
# '07/21/2021',
# # pd.Timestamp.now().tz_localize(None)-pd.Timedelta('3D'),
# )
# hist
# + active="ipynb"
# from audiolizer import candlestick_plot
# from plotly import graph_objs as go
# + active="ipynb"
# candlestick_plot(hist, 'BTC', 'USD')
# -
# Show today's prices
# + active="ipynb"
# today_file = 'history/BTC-USD-today.csv.gz'
# pd.read_csv(today_file, index_col='time', parse_dates=True, compression='gzip') | en | 0.692312 | # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.11.2 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Objective: get_history should fetch all the data at once then save it to separate files. # + # seconds Refactor/rebin the data to a lower cadence The data is regrouped using pd.Grouper Need dates in this format %Y-%m-%d-%H-%M Iterate over batches of missing dates # extend by 1 day whether or not t1 == t2 # only grab data between endpoints # - # convert from system time to GMT # + active="ipynb" # get_today_GMT() # - # * getting BTC-USD files status: 2021-07-20 00:00:00 -> 2021-07-21 03:50:49.619707 # * INFO:history:getting BTC-USD files status: 2021-07-20 00:00:00 -> 2021-07-21 04:07:48.872110 # * 2021-07-14 00:00:00 -> 2021-07-21 04:07:22.738431 # extend by 1 day whether or not t1 == t2 # write_data(df, ticker) # + Get the age of a given a file Fetch/load historical data from Coinbase API at specified granularity Data loaded from start_date through end of end_date params: start_date: (str) (see pandas.to_datetime for acceptable formats) end_date: (str) granularity: (int) seconds (default: 300) price data is saved by ticker and date and stored in audiolizer_temp_dir There are two timezones to keep track of. Assume input in GMT system timezone: the timezone of the machine the audiolizer is run from GMT: the timezone that price history is fetched/stored in #tz-naive but value matches GMT # don't include today # convert the user-specified date and timezone to GMT # prevent queries from the future # check age of today's data. If it's old, fetch the new one # - # + active="ipynb" # hist = get_history('BTC-USD', # '07/21/2021', # # pd.Timestamp.now().tz_localize(None)-pd.Timedelta('3D'), # ) # hist # + active="ipynb" # from audiolizer import candlestick_plot # from plotly import graph_objs as go # + active="ipynb" # candlestick_plot(hist, 'BTC', 'USD') # - # Show today's prices # + active="ipynb" # today_file = 'history/BTC-USD-today.csv.gz' # pd.read_csv(today_file, index_col='time', parse_dates=True, compression='gzip') | 2.755043 | 3 |