code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
"""
To run this in a publically available setting, use --host=0.0.0.0
at the command line
"""
from flask import Flask, render_template, request
from typing import Dict
from src.video_captions import Video
import datetime
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def home():
if request.method == "GET":
return render_template("index.html", transcript={}, default_url="")
elif request.method == "POST":
url = request.form["url"]
transcript = create_transcript_from_url(url)
return render_template('index.html', transcript=transcript, default_url=url)
@app.route("/json/<string:id>")
def create_transcript(id: str) -> Dict:
"""
Accepts a url of a youtube video and returns a dictionary mapping timestamps to their transcript text
"""
url = fr"https://www.youtube.com/watch?v={id}"
video = Video(url)
caption_dict = video.get_caption_dict()
caption_json = jsonify_caption_dict(caption_dict)
return caption_json
def create_transcript_from_url(url: str) -> Dict:
"""
Accepts a url of a youtube video and returns a dictionary mapping timestamps to their transcript text
"""
video = Video(url)
caption_dict = video.get_caption_dict()
caption_json = jsonify_caption_dict(caption_dict)
return caption_json
def jsonify_caption_dict(caption_dict, format="%H:%M:%S"):
"""
This function converts a caption dict into a valid json output. expects a
"""
DATETIME = 0
TITLE = 1
caption_json = dict()
for key, captions in caption_dict.items(): # key is a tuple (datetime, title: str)
dt = key[DATETIME]
title = key[TITLE]
time_str = datetime.datetime.strftime(dt, format)
json_key = f"{time_str} {title}"
json_captions = " ".join([str(caption) for caption in captions])
json_captions = json_captions.replace(" ", " ")
caption_json[json_key] = json_captions
return caption_json
if __name__ == '__main__':
url = r"https://www.youtube.com/watch?v=ClxRHJPz8aQ&t=3734s"
video = Video(url)
caption_dict = video.get_caption_dict()
caption_json = jsonify_caption_dict(caption_dict)
print(caption_json) | [
"flask.render_template",
"src.video_captions.Video",
"datetime.datetime.strftime",
"flask.Flask"
] | [((231, 246), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (236, 246), False, 'from flask import Flask, render_template, request\n'), ((878, 888), 'src.video_captions.Video', 'Video', (['url'], {}), '(url)\n', (883, 888), False, 'from src.video_captions import Video\n'), ((1196, 1206), 'src.video_captions.Video', 'Video', (['url'], {}), '(url)\n', (1201, 1206), False, 'from src.video_captions import Video\n'), ((2096, 2106), 'src.video_captions.Video', 'Video', (['url'], {}), '(url)\n', (2101, 2106), False, 'from src.video_captions import Video\n'), ((351, 411), 'flask.render_template', 'render_template', (['"""index.html"""'], {'transcript': '{}', 'default_url': '""""""'}), "('index.html', transcript={}, default_url='')\n", (366, 411), False, 'from flask import Flask, render_template, request\n'), ((1706, 1744), 'datetime.datetime.strftime', 'datetime.datetime.strftime', (['dt', 'format'], {}), '(dt, format)\n', (1732, 1744), False, 'import datetime\n'), ((550, 619), 'flask.render_template', 'render_template', (['"""index.html"""'], {'transcript': 'transcript', 'default_url': 'url'}), "('index.html', transcript=transcript, default_url=url)\n", (565, 619), False, 'from flask import Flask, render_template, request\n')] |
import os
import threading
import time
from selenium import webdriver
class WebDriverThread(threading.Thread):
def __init__(self, application_scoped_drivers, unique_id,
timeout=50, interval=1):
super(WebDriverThread, self).__init__()
self.timeout = timeout
self.interval = interval
self.hasQuit = False
self.driver = None
self.application_scoped_drivers = application_scoped_drivers
self.unique_id = unique_id
def start(self):
from django.conf import settings
self.driver = webdriver.PhantomJS(settings.PHANTOMJS_EXECUTABLE)
super(WebDriverThread, self).start()
print('Started thread %s - %s' % (self.name, self.unique_id))
def run(self):
while self.timeout > 0 and not self.hasQuit:
print('Timing out... %d - %s,%s' % (
self.timeout, os.getppid(), os.getpid()))
time.sleep(self.interval)
self.timeout -= self.interval
self.quit()
print('Finished thread %s - %s' % (self.name, self.unique_id))
def get(self, url):
self._increase_timeout()
self.driver.get(url)
def _increase_timeout(self):
self.timeout += 10
def find_element_by_id(self, element_id):
self._increase_timeout()
return self.driver.find_element_by_id(element_id)
def execute_script(self, script):
self._increase_timeout()
self.driver.execute_script(script)
def click_button(self, button_id):
self._increase_timeout()
self.driver.find_element_by_id(button_id).click()
def page_source(self):
self._increase_timeout()
return self.driver.page_source
def quit(self):
self._auto_remove_from_application_scope()
self.driver.quit()
self.hasQuit = True
def _auto_remove_from_application_scope(self):
try:
del self.application_scoped_drivers[self.unique_id]
except KeyError:
return
@staticmethod
def get_driver(application_drivers_dictionary, session_key):
try:
driver = application_drivers_dictionary[session_key]
if not driver.isAlive():
driver = WebDriverThread(
application_drivers_dictionary, session_key)
driver.start()
application_drivers_dictionary[session_key] = driver
except KeyError:
driver = WebDriverThread(
application_drivers_dictionary, session_key)
driver.start()
application_drivers_dictionary[session_key] = driver
return driver
| [
"selenium.webdriver.PhantomJS",
"os.getppid",
"os.getpid",
"time.sleep"
] | [((576, 626), 'selenium.webdriver.PhantomJS', 'webdriver.PhantomJS', (['settings.PHANTOMJS_EXECUTABLE'], {}), '(settings.PHANTOMJS_EXECUTABLE)\n', (595, 626), False, 'from selenium import webdriver\n'), ((934, 959), 'time.sleep', 'time.sleep', (['self.interval'], {}), '(self.interval)\n', (944, 959), False, 'import time\n'), ((894, 906), 'os.getppid', 'os.getppid', ([], {}), '()\n', (904, 906), False, 'import os\n'), ((908, 919), 'os.getpid', 'os.getpid', ([], {}), '()\n', (917, 919), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# czatpro/czat/views.py
from django.http import HttpResponse
def index(request):
"""Strona główna aplikacji."""
return HttpResponse("Witaj w aplikacji Czat!")
| [
"django.http.HttpResponse"
] | [((154, 193), 'django.http.HttpResponse', 'HttpResponse', (['"""Witaj w aplikacji Czat!"""'], {}), "('Witaj w aplikacji Czat!')\n", (166, 193), False, 'from django.http import HttpResponse\n')] |
from hashlib import sha256
from bitstring import BitArray
import requests
import sys
import logging
BITLENGTH = 128
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger(__name__)
def info(string):
print(string, flush=True)
# logger.info(string)
def compute_key(string, bitlength=BITLENGTH):
digest = sha256(bytes(string, 'utf-8')).hexdigest()
bindigest = BitArray(hex=digest).bin
subbin = bindigest[:bitlength]
return BitArray(bin=subbin).uint
def dist(a, b, maxnum=2**BITLENGTH - 1):
"""Compute the clockwise dist between a and b,
given maxnum as max clock value"""
if a == b:
return 0
elif a < b:
return b - a
else:
return maxnum - (a - b)
class Process():
def __init__(self, host, port, name, pred=None, succ=None):
self.name = name
self.id = compute_key(name)
self.host = host
self.port = port
if pred:
phost, pport, pname = pred.split(':')
self.pred = Process(phost, pport, pname)
else:
self.pred = None
if succ:
phost, pport, pname = succ.split(':')
self.succ = Process(phost, pport, pname)
else:
self.succ = None
self.ht = dict()
self.finger = {}
def toJSON(self):
retval = {}
retval['name'] = self.name
retval['id'] = self.id
retval['host'] = self.host
retval['port'] = self.port
if self.pred:
retval['pred'] = self.pred.hostportname()
else:
retval['pred'] = None
if self.succ:
retval['succ'] = self.succ.hostportname()
else:
retval['succ'] = None
return retval
def hostport(self):
return "{}:{}".format(self.host, self.port)
def hostportname(self):
return "{}:{}:{}".format(self.host, self.port, self.name)
def hostportnameJSON(self):
return {"host": self.host, "port": self.port, "name": self.name}
def processFromNodeInfo(host, port):
url = "http://{}:{}/nodeinfo".format(host, port)
respJSON = requests.get(url).json()
name, pred, succ = respJSON['name'], respJSON['pred'], respJSON['succ']
return Process(host, port, name, pred, succ)
def findNode(startProcess, key):
"""Recursively find the node whose ID is the greatest
but smaller ID in the DHT compared to key"""
# if no successor probably startNode is alone...debuggin server :)
if not startProcess.succ:
return startProcess, 0
current = startProcess
succ = processFromNodeInfo(startProcess.succ.host, startProcess.succ.port)
recLevel = 0
info("Entering FindNode method")
currentID, succID = current.id, succ.id
while dist(currentID, key) > dist(succID, key):
info("succ ({}) is closer to key ({}) than current ({})".format(
succID, key, currentID))
currentID = succ.id
succ = processFromNodeInfo(succ.succ.host, succ.succ.port)
succID = succ.id
info("\tNow we rety with {} {}".format(succ.name, succID))
recLevel += 1
info("{} {} {} is the good one!".format(succ.host, succ.port, succ.id))
return succ, recLevel
def wordsOfFile(file):
words = []
with open(file, 'r') as file:
for line in file:
for word in line.split():
words.append(word)
return words
| [
"logging.basicConfig",
"requests.get",
"logging.getLogger",
"bitstring.BitArray"
] | [((120, 178), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'stream': 'sys.stdout'}), '(level=logging.INFO, stream=sys.stdout)\n', (139, 178), False, 'import logging\n'), ((188, 215), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (205, 215), False, 'import logging\n'), ((412, 432), 'bitstring.BitArray', 'BitArray', ([], {'hex': 'digest'}), '(hex=digest)\n', (420, 432), False, 'from bitstring import BitArray\n'), ((483, 503), 'bitstring.BitArray', 'BitArray', ([], {'bin': 'subbin'}), '(bin=subbin)\n', (491, 503), False, 'from bitstring import BitArray\n'), ((2161, 2178), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2173, 2178), False, 'import requests\n')] |
from django import forms
from django.contrib.auth import password_validation
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from .models import UserAccount
class UserAccountCreationForm(UserCreationForm):
"""
Formulaire pour créer des utilisateurs
"""
password1 = forms.CharField(
label="Mot de passe",
widget=forms.PasswordInput(),
help_text=password_validation.password_validators_help_text_html(),
)
password2 = forms.CharField(
label="Confirmez votre mot de passe",
widget=forms.PasswordInput(),
help_text="Entrez votre mot de passe une autre fois"
)
class Meta:
model = UserAccount
fields = (
"email",
"first_name",
"last_name"
)
def clean_password2(self):
# Validate the passwords match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Le mot de passe n'est pas le même")
return password2
def save(self, commit=True):
# Save the user in the database
user = super().save(commit=False)
user.set_password(self.cleaned_data["<PASSWORD>"])
if commit:
user.save()
return user
class UserAccountChangeForm(UserChangeForm):
"""
Formulaire pour modifier un utilisateur
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = UserAccount
fields = ("email", "password",)
def clean_password(self):
return self.initial["password"]
| [
"django.contrib.auth.password_validation.password_validators_help_text_html",
"django.forms.PasswordInput",
"django.contrib.auth.forms.ReadOnlyPasswordHashField",
"django.forms.ValidationError"
] | [((1577, 1604), 'django.contrib.auth.forms.ReadOnlyPasswordHashField', 'ReadOnlyPasswordHashField', ([], {}), '()\n', (1602, 1604), False, 'from django.contrib.auth.forms import ReadOnlyPasswordHashField\n'), ((433, 454), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {}), '()\n', (452, 454), False, 'from django import forms\n'), ((474, 530), 'django.contrib.auth.password_validation.password_validators_help_text_html', 'password_validation.password_validators_help_text_html', ([], {}), '()\n', (528, 530), False, 'from django.contrib.auth import password_validation\n'), ((632, 653), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {}), '()\n', (651, 653), False, 'from django import forms\n'), ((1131, 1189), 'django.forms.ValidationError', 'forms.ValidationError', (['"""Le mot de passe n\'est pas le même"""'], {}), '("Le mot de passe n\'est pas le même")\n', (1152, 1189), False, 'from django import forms\n')] |
import logging
logging.getLogger("elasticsearch").setLevel(logging.WARNING)
| [
"logging.getLogger"
] | [((16, 50), 'logging.getLogger', 'logging.getLogger', (['"""elasticsearch"""'], {}), "('elasticsearch')\n", (33, 50), False, 'import logging\n')] |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import Menu, Operator
from bpy.props import StringProperty, BoolProperty
class AddPresetBase:
"""Base preset class, only for subclassing
subclasses must define
- preset_values
- preset_subdir """
# bl_idname = "script.preset_base_add"
# bl_label = "Add a Python Preset"
# only because invoke_props_popup requires. Also do not add to search menu.
bl_options = {'REGISTER', 'INTERNAL'}
name = StringProperty(
name="Name",
description="Name of the preset, used to make the path name",
maxlen=64,
options={'SKIP_SAVE'},
)
remove_active = BoolProperty(
default=False,
options={'HIDDEN', 'SKIP_SAVE'},
)
# needed for mix-ins
order = [
"name",
"remove_active",
]
@staticmethod
def as_filename(name): # could reuse for other presets
# lazy init maketrans
def maketrans_init():
cls = AddPresetBase
attr = "_as_filename_trans"
trans = getattr(cls, attr, None)
if trans is None:
trans = str.maketrans({char: "_" for char in " !@#$%^&*(){}:\";'[]<>,.\\/?"})
setattr(cls, attr, trans)
return trans
name = name.lower().strip()
name = bpy.path.display_name_to_filepath(name)
trans = maketrans_init()
return name.translate(trans)
def execute(self, context):
import os
if hasattr(self, "pre_cb"):
self.pre_cb(context)
preset_menu_class = getattr(bpy.types, self.preset_menu)
is_xml = getattr(preset_menu_class, "preset_type", None) == 'XML'
if is_xml:
ext = ".xml"
else:
ext = ".py"
if not self.remove_active:
name = self.name.strip()
if not name:
return {'FINISHED'}
filename = self.as_filename(name)
target_path = os.path.join("presets", self.preset_subdir)
target_path = bpy.utils.user_resource('SCRIPTS',
target_path,
create=True)
if not target_path:
self.report({'WARNING'}, "Failed to create presets path")
return {'CANCELLED'}
filepath = os.path.join(target_path, filename) + ext
if hasattr(self, "add"):
self.add(context, filepath)
else:
print("Writing Preset: %r" % filepath)
if is_xml:
import rna_xml
rna_xml.xml_file_write(context,
filepath,
preset_menu_class.preset_xml_map)
else:
def rna_recursive_attr_expand(value, rna_path_step, level):
if isinstance(value, bpy.types.PropertyGroup):
for sub_value_attr in value.bl_rna.properties.keys():
if sub_value_attr == "rna_type":
continue
sub_value = getattr(value, sub_value_attr)
rna_recursive_attr_expand(sub_value, "%s.%s" % (rna_path_step, sub_value_attr), level)
elif type(value).__name__ == "bpy_prop_collection_idprop": # could use nicer method
file_preset.write("%s.clear()\n" % rna_path_step)
for sub_value in value:
file_preset.write("item_sub_%d = %s.add()\n" % (level, rna_path_step))
rna_recursive_attr_expand(sub_value, "item_sub_%d" % level, level + 1)
else:
# convert thin wrapped sequences
# to simple lists to repr()
try:
value = value[:]
except:
pass
file_preset.write("%s = %r\n" % (rna_path_step, value))
file_preset = open(filepath, 'w', encoding="utf-8")
file_preset.write("import bpy\n")
if hasattr(self, "preset_defines"):
for rna_path in self.preset_defines:
exec(rna_path)
file_preset.write("%s\n" % rna_path)
file_preset.write("\n")
for rna_path in self.preset_values:
value = eval(rna_path)
rna_recursive_attr_expand(value, rna_path, 1)
file_preset.close()
preset_menu_class.bl_label = bpy.path.display_name(filename)
else:
preset_active = preset_menu_class.bl_label
# fairly sloppy but convenient.
filepath = bpy.utils.preset_find(preset_active,
self.preset_subdir,
ext=ext)
if not filepath:
filepath = bpy.utils.preset_find(preset_active,
self.preset_subdir,
display_name=True,
ext=ext)
if not filepath:
return {'CANCELLED'}
try:
if hasattr(self, "remove"):
self.remove(context, filepath)
else:
os.remove(filepath)
except Exception as e:
self.report({'ERROR'}, "Unable to remove preset: %r" % e)
import traceback
traceback.print_exc()
return {'CANCELLED'}
# XXX, stupid!
preset_menu_class.bl_label = "Presets"
if hasattr(self, "post_cb"):
self.post_cb(context)
return {'FINISHED'}
def check(self, context):
self.name = self.as_filename(self.name.strip())
def invoke(self, context, event):
if not self.remove_active:
wm = context.window_manager
return wm.invoke_props_dialog(self)
else:
return self.execute(context)
class ExecutePreset(Operator):
"""Execute a preset"""
bl_idname = "script.execute_preset"
bl_label = "Execute a Python Preset"
filepath = StringProperty(
subtype='FILE_PATH',
options={'SKIP_SAVE'},
)
menu_idname = StringProperty(
name="Menu ID Name",
description="ID name of the menu this was called from",
options={'SKIP_SAVE'},
)
def execute(self, context):
from os.path import basename, splitext
filepath = self.filepath
# change the menu title to the most recently chosen option
preset_class = getattr(bpy.types, self.menu_idname)
preset_class.bl_label = bpy.path.display_name(basename(filepath))
ext = splitext(filepath)[1].lower()
# execute the preset using script.python_file_run
if ext == ".py":
bpy.ops.script.python_file_run(filepath=filepath)
elif ext == ".xml":
import rna_xml
rna_xml.xml_file_run(context,
filepath,
preset_class.preset_xml_map)
else:
self.report({'ERROR'}, "unknown filetype: %r" % ext)
return {'CANCELLED'}
return {'FINISHED'}
class AddPresetRender(AddPresetBase, Operator):
"""Add or remove a Render Preset"""
bl_idname = "render.preset_add"
bl_label = "Add Render Preset"
preset_menu = "RENDER_MT_presets"
preset_defines = [
"scene = bpy.context.scene"
]
preset_values = [
"scene.render.field_order",
"scene.render.fps",
"scene.render.fps_base",
"scene.render.pixel_aspect_x",
"scene.render.pixel_aspect_y",
"scene.render.resolution_percentage",
"scene.render.resolution_x",
"scene.render.resolution_y",
"scene.render.use_fields",
"scene.render.use_fields_still",
]
preset_subdir = "render"
class AddPresetCamera(AddPresetBase, Operator):
"""Add or remove a Camera Preset"""
bl_idname = "camera.preset_add"
bl_label = "Add Camera Preset"
preset_menu = "CAMERA_MT_presets"
preset_defines = [
"cam = bpy.context.camera"
]
preset_subdir = "camera"
use_focal_length = BoolProperty(
name="Include Focal Length",
description="Include focal length into the preset",
options={'SKIP_SAVE'},
)
@property
def preset_values(self):
preset_values = [
"cam.sensor_width",
"cam.sensor_height",
"cam.sensor_fit"
]
if self.use_focal_length:
preset_values.append("cam.lens")
preset_values.append("cam.lens_unit")
return preset_values
class AddPresetSafeAreas(AddPresetBase, Operator):
"""Add or remove a Safe Areas Preset"""
bl_idname = "safe_areas.preset_add"
bl_label = "Add Safe Area Preset"
preset_menu = "SAFE_AREAS_MT_presets"
preset_defines = [
"safe_areas = bpy.context.scene.safe_areas"
]
preset_values = [
"safe_areas.title",
"safe_areas.action",
"safe_areas.title_center",
"safe_areas.action_center",
]
preset_subdir = "safe_areas"
class AddPresetSSS(AddPresetBase, Operator):
"""Add or remove a Subsurface Scattering Preset"""
bl_idname = "material.sss_preset_add"
bl_label = "Add SSS Preset"
preset_menu = "MATERIAL_MT_sss_presets"
preset_defines = [
("material = "
"bpy.context.material.active_node_material "
"if bpy.context.material.active_node_material "
"else bpy.context.material")
]
preset_values = [
"material.subsurface_scattering.back",
"material.subsurface_scattering.color",
"material.subsurface_scattering.color_factor",
"material.subsurface_scattering.error_threshold",
"material.subsurface_scattering.front",
"material.subsurface_scattering.ior",
"material.subsurface_scattering.radius",
"material.subsurface_scattering.scale",
"material.subsurface_scattering.texture_factor",
]
preset_subdir = "sss"
class AddPresetCloth(AddPresetBase, Operator):
"""Add or remove a Cloth Preset"""
bl_idname = "cloth.preset_add"
bl_label = "Add Cloth Preset"
preset_menu = "CLOTH_MT_presets"
preset_defines = [
"cloth = bpy.context.cloth"
]
preset_values = [
"cloth.settings.air_damping",
"cloth.settings.bending_stiffness",
"cloth.settings.mass",
"cloth.settings.quality",
"cloth.settings.spring_damping",
"cloth.settings.structural_stiffness",
]
preset_subdir = "cloth"
class AddPresetFluid(AddPresetBase, Operator):
"""Add or remove a Fluid Preset"""
bl_idname = "fluid.preset_add"
bl_label = "Add Fluid Preset"
preset_menu = "FLUID_MT_presets"
preset_defines = [
"fluid = bpy.context.fluid"
]
preset_values = [
"fluid.settings.viscosity_base",
"fluid.settings.viscosity_exponent",
]
preset_subdir = "fluid"
class AddPresetHairDynamics(AddPresetBase, Operator):
"""Add or remove a Hair Dynamics Preset"""
bl_idname = "particle.hair_dynamics_preset_add"
bl_label = "Add Hair Dynamics Preset"
preset_menu = "PARTICLE_MT_hair_dynamics_presets"
preset_defines = [
"psys = bpy.context.particle_system",
"cloth = bpy.context.particle_system.cloth",
"settings = bpy.context.particle_system.cloth.settings",
"collision = bpy.context.particle_system.cloth.collision_settings",
]
preset_subdir = "hair_dynamics"
preset_values = [
"settings.quality",
"settings.mass",
"settings.bending_stiffness",
"psys.settings.bending_random",
"settings.bending_damping",
"settings.air_damping",
"settings.internal_friction",
"settings.density_target",
"settings.density_strength",
"settings.voxel_cell_size",
"settings.pin_stiffness",
]
class AddPresetSunSky(AddPresetBase, Operator):
"""Add or remove a Sky & Atmosphere Preset"""
bl_idname = "lamp.sunsky_preset_add"
bl_label = "Add Sunsky Preset"
preset_menu = "LAMP_MT_sunsky_presets"
preset_defines = [
"sky = bpy.context.lamp.sky"
]
preset_values = [
"sky.atmosphere_extinction",
"sky.atmosphere_inscattering",
"sky.atmosphere_turbidity",
"sky.backscattered_light",
"sky.horizon_brightness",
"sky.spread",
"sky.sun_brightness",
"sky.sun_intensity",
"sky.sun_size",
"sky.sky_blend",
"sky.sky_blend_type",
"sky.sky_color_space",
"sky.sky_exposure",
]
preset_subdir = "sunsky"
class AddPresetInteraction(AddPresetBase, Operator):
"""Add or remove an Application Interaction Preset"""
bl_idname = "wm.interaction_preset_add"
bl_label = "Add Interaction Preset"
preset_menu = "USERPREF_MT_interaction_presets"
preset_defines = [
"user_preferences = bpy.context.user_preferences"
]
preset_values = [
"user_preferences.edit.use_drag_immediately",
"user_preferences.edit.use_insertkey_xyz_to_rgb",
"user_preferences.inputs.invert_mouse_zoom",
"user_preferences.inputs.select_mouse",
"user_preferences.inputs.use_emulate_numpad",
"user_preferences.inputs.use_mouse_continuous",
"user_preferences.inputs.use_mouse_emulate_3_button",
"user_preferences.inputs.view_rotate_method",
"user_preferences.inputs.view_zoom_axis",
"user_preferences.inputs.view_zoom_method",
]
preset_subdir = "interaction"
class AddPresetTrackingCamera(AddPresetBase, Operator):
"""Add or remove a Tracking Camera Intrinsics Preset"""
bl_idname = "clip.camera_preset_add"
bl_label = "Add Camera Preset"
preset_menu = "CLIP_MT_camera_presets"
preset_defines = [
"camera = bpy.context.edit_movieclip.tracking.camera"
]
preset_subdir = "tracking_camera"
use_focal_length = BoolProperty(
name="Include Focal Length",
description="Include focal length into the preset",
options={'SKIP_SAVE'},
default=True
)
@property
def preset_values(self):
preset_values = [
"camera.sensor_width",
"camera.pixel_aspect",
"camera.k1",
"camera.k2",
"camera.k3"
]
if self.use_focal_length:
preset_values.append("camera.units")
preset_values.append("camera.focal_length")
return preset_values
class AddPresetTrackingTrackColor(AddPresetBase, Operator):
"""Add or remove a Clip Track Color Preset"""
bl_idname = "clip.track_color_preset_add"
bl_label = "Add Track Color Preset"
preset_menu = "CLIP_MT_track_color_presets"
preset_defines = [
"track = bpy.context.edit_movieclip.tracking.tracks.active"
]
preset_values = [
"track.color",
"track.use_custom_color"
]
preset_subdir = "tracking_track_color"
class AddPresetTrackingSettings(AddPresetBase, Operator):
"""Add or remove a motion tracking settings preset"""
bl_idname = "clip.tracking_settings_preset_add"
bl_label = "Add Tracking Settings Preset"
preset_menu = "CLIP_MT_tracking_settings_presets"
preset_defines = [
"settings = bpy.context.edit_movieclip.tracking.settings"
]
preset_values = [
"settings.default_correlation_min",
"settings.default_pattern_size",
"settings.default_search_size",
"settings.default_frames_limit",
"settings.default_pattern_match",
"settings.default_margin",
"settings.default_motion_model",
"settings.use_default_brute",
"settings.use_default_normalization",
"settings.use_default_mask",
"settings.use_default_red_channel",
"settings.use_default_green_channel",
"settings.use_default_blue_channel"
"settings.default_weight"
]
preset_subdir = "tracking_settings"
class AddPresetNodeColor(AddPresetBase, Operator):
"""Add or remove a Node Color Preset"""
bl_idname = "node.node_color_preset_add"
bl_label = "Add Node Color Preset"
preset_menu = "NODE_MT_node_color_presets"
preset_defines = [
"node = bpy.context.active_node"
]
preset_values = [
"node.color",
"node.use_custom_color"
]
preset_subdir = "node_color"
class AddPresetInterfaceTheme(AddPresetBase, Operator):
"""Add or remove a theme preset"""
bl_idname = "wm.interface_theme_preset_add"
bl_label = "Add Theme Preset"
preset_menu = "USERPREF_MT_interface_theme_presets"
preset_subdir = "interface_theme"
class AddPresetKeyconfig(AddPresetBase, Operator):
"""Add or remove a Key-config Preset"""
bl_idname = "wm.keyconfig_preset_add"
bl_label = "Add Keyconfig Preset"
preset_menu = "USERPREF_MT_keyconfigs"
preset_subdir = "keyconfig"
def add(self, context, filepath):
bpy.ops.wm.keyconfig_export(filepath=filepath)
bpy.utils.keyconfig_set(filepath)
def pre_cb(self, context):
keyconfigs = bpy.context.window_manager.keyconfigs
if self.remove_active:
preset_menu_class = getattr(bpy.types, self.preset_menu)
preset_menu_class.bl_label = keyconfigs.active.name
def post_cb(self, context):
keyconfigs = bpy.context.window_manager.keyconfigs
if self.remove_active:
keyconfigs.remove(keyconfigs.active)
class AddPresetOperator(AddPresetBase, Operator):
"""Add or remove an Operator Preset"""
bl_idname = "wm.operator_preset_add"
bl_label = "Operator Preset"
preset_menu = "WM_MT_operator_presets"
operator = StringProperty(
name="Operator",
maxlen=64,
options={'HIDDEN', 'SKIP_SAVE'},
)
preset_defines = [
"op = bpy.context.active_operator",
]
@property
def preset_subdir(self):
return AddPresetOperator.operator_path(self.operator)
@property
def preset_values(self):
properties_blacklist = Operator.bl_rna.properties.keys()
prefix, suffix = self.operator.split("_OT_", 1)
op = getattr(getattr(bpy.ops, prefix.lower()), suffix)
operator_rna = op.get_rna().bl_rna
del op
ret = []
for prop_id, prop in operator_rna.properties.items():
if not (prop.is_hidden or prop.is_skip_save):
if prop_id not in properties_blacklist:
ret.append("op.%s" % prop_id)
return ret
@staticmethod
def operator_path(operator):
import os
prefix, suffix = operator.split("_OT_", 1)
return os.path.join("operator", "%s.%s" % (prefix.lower(), suffix))
class WM_MT_operator_presets(Menu):
bl_label = "Operator Presets"
def draw(self, context):
self.operator = context.active_operator.bl_idname
# dummy 'default' menu item
layout = self.layout
layout.operator("wm.operator_defaults")
layout.separator()
Menu.draw_preset(self, context)
@property
def preset_subdir(self):
return AddPresetOperator.operator_path(self.operator)
preset_operator = "script.execute_preset"
class AddPresetUnitsLength(AddPresetBase, Operator):
"""Add or remove length units preset"""
bl_idname = "scene.units_length_preset_add"
bl_label = "Add Length Units Preset"
preset_menu = "SCENE_MT_units_length_presets"
preset_defines = [
"scene = bpy.context.scene"
]
preset_values = [
"scene.unit_settings.system",
"scene.unit_settings.scale_length",
]
preset_subdir = "units_length"
classes = (
AddPresetCamera,
AddPresetCloth,
AddPresetFluid,
AddPresetHairDynamics,
AddPresetInteraction,
AddPresetInterfaceTheme,
AddPresetKeyconfig,
AddPresetNodeColor,
AddPresetOperator,
AddPresetRender,
AddPresetSSS,
AddPresetSafeAreas,
AddPresetSunSky,
AddPresetTrackingCamera,
AddPresetTrackingSettings,
AddPresetTrackingTrackColor,
AddPresetUnitsLength,
ExecutePreset,
WM_MT_operator_presets,
)
| [
"bpy.props.BoolProperty",
"bpy.props.StringProperty",
"bpy.ops.wm.keyconfig_export",
"bpy.path.display_name_to_filepath",
"bpy.path.display_name",
"os.path.join",
"os.path.splitext",
"bpy.utils.user_resource",
"bpy.types.Menu.draw_preset",
"bpy.utils.preset_find",
"os.remove",
"bpy.ops.script.... | [((1274, 1407), 'bpy.props.StringProperty', 'StringProperty', ([], {'name': '"""Name"""', 'description': '"""Name of the preset, used to make the path name"""', 'maxlen': '(64)', 'options': "{'SKIP_SAVE'}"}), "(name='Name', description=\n 'Name of the preset, used to make the path name', maxlen=64, options={\n 'SKIP_SAVE'})\n", (1288, 1407), False, 'from bpy.props import StringProperty, BoolProperty\n'), ((1481, 1541), 'bpy.props.BoolProperty', 'BoolProperty', ([], {'default': '(False)', 'options': "{'HIDDEN', 'SKIP_SAVE'}"}), "(default=False, options={'HIDDEN', 'SKIP_SAVE'})\n", (1493, 1541), False, 'from bpy.props import StringProperty, BoolProperty\n'), ((7442, 7500), 'bpy.props.StringProperty', 'StringProperty', ([], {'subtype': '"""FILE_PATH"""', 'options': "{'SKIP_SAVE'}"}), "(subtype='FILE_PATH', options={'SKIP_SAVE'})\n", (7456, 7500), False, 'from bpy.props import StringProperty, BoolProperty\n'), ((7558, 7677), 'bpy.props.StringProperty', 'StringProperty', ([], {'name': '"""Menu ID Name"""', 'description': '"""ID name of the menu this was called from"""', 'options': "{'SKIP_SAVE'}"}), "(name='Menu ID Name', description=\n 'ID name of the menu this was called from', options={'SKIP_SAVE'})\n", (7572, 7677), False, 'from bpy.props import StringProperty, BoolProperty\n'), ((9586, 9707), 'bpy.props.BoolProperty', 'BoolProperty', ([], {'name': '"""Include Focal Length"""', 'description': '"""Include focal length into the preset"""', 'options': "{'SKIP_SAVE'}"}), "(name='Include Focal Length', description=\n 'Include focal length into the preset', options={'SKIP_SAVE'})\n", (9598, 9707), False, 'from bpy.props import StringProperty, BoolProperty\n'), ((15524, 15664), 'bpy.props.BoolProperty', 'BoolProperty', ([], {'name': '"""Include Focal Length"""', 'description': '"""Include focal length into the preset"""', 'options': "{'SKIP_SAVE'}", 'default': '(True)'}), "(name='Include Focal Length', description=\n 'Include focal length into the preset', options={'SKIP_SAVE'}, default=True\n )\n", (15536, 15664), False, 'from bpy.props import StringProperty, BoolProperty\n'), ((19323, 19398), 'bpy.props.StringProperty', 'StringProperty', ([], {'name': '"""Operator"""', 'maxlen': '(64)', 'options': "{'HIDDEN', 'SKIP_SAVE'}"}), "(name='Operator', maxlen=64, options={'HIDDEN', 'SKIP_SAVE'})\n", (19337, 19398), False, 'from bpy.props import StringProperty, BoolProperty\n'), ((2173, 2212), 'bpy.path.display_name_to_filepath', 'bpy.path.display_name_to_filepath', (['name'], {}), '(name)\n', (2206, 2212), False, 'import bpy\n'), ((18579, 18625), 'bpy.ops.wm.keyconfig_export', 'bpy.ops.wm.keyconfig_export', ([], {'filepath': 'filepath'}), '(filepath=filepath)\n', (18606, 18625), False, 'import bpy\n'), ((18634, 18667), 'bpy.utils.keyconfig_set', 'bpy.utils.keyconfig_set', (['filepath'], {}), '(filepath)\n', (18657, 18667), False, 'import bpy\n'), ((19705, 19738), 'bpy.types.Operator.bl_rna.properties.keys', 'Operator.bl_rna.properties.keys', ([], {}), '()\n', (19736, 19738), False, 'from bpy.types import Menu, Operator\n'), ((20688, 20719), 'bpy.types.Menu.draw_preset', 'Menu.draw_preset', (['self', 'context'], {}), '(self, context)\n', (20704, 20719), False, 'from bpy.types import Menu, Operator\n'), ((2836, 2879), 'os.path.join', 'os.path.join', (['"""presets"""', 'self.preset_subdir'], {}), "('presets', self.preset_subdir)\n", (2848, 2879), False, 'import os\n'), ((2906, 2966), 'bpy.utils.user_resource', 'bpy.utils.user_resource', (['"""SCRIPTS"""', 'target_path'], {'create': '(True)'}), "('SCRIPTS', target_path, create=True)\n", (2929, 2966), False, 'import bpy\n'), ((5727, 5758), 'bpy.path.display_name', 'bpy.path.display_name', (['filename'], {}), '(filename)\n', (5748, 5758), False, 'import bpy\n'), ((5897, 5962), 'bpy.utils.preset_find', 'bpy.utils.preset_find', (['preset_active', 'self.preset_subdir'], {'ext': 'ext'}), '(preset_active, self.preset_subdir, ext=ext)\n', (5918, 5962), False, 'import bpy\n'), ((8019, 8037), 'os.path.basename', 'basename', (['filepath'], {}), '(filepath)\n', (8027, 8037), False, 'from os.path import basename, splitext\n'), ((8180, 8229), 'bpy.ops.script.python_file_run', 'bpy.ops.script.python_file_run', ([], {'filepath': 'filepath'}), '(filepath=filepath)\n', (8210, 8229), False, 'import bpy\n'), ((3235, 3270), 'os.path.join', 'os.path.join', (['target_path', 'filename'], {}), '(target_path, filename)\n', (3247, 3270), False, 'import os\n'), ((6110, 6198), 'bpy.utils.preset_find', 'bpy.utils.preset_find', (['preset_active', 'self.preset_subdir'], {'display_name': '(True)', 'ext': 'ext'}), '(preset_active, self.preset_subdir, display_name=True,\n ext=ext)\n', (6131, 6198), False, 'import bpy\n'), ((8297, 8365), 'rna_xml.xml_file_run', 'rna_xml.xml_file_run', (['context', 'filepath', 'preset_class.preset_xml_map'], {}), '(context, filepath, preset_class.preset_xml_map)\n', (8317, 8365), False, 'import rna_xml\n'), ((3515, 3590), 'rna_xml.xml_file_write', 'rna_xml.xml_file_write', (['context', 'filepath', 'preset_menu_class.preset_xml_map'], {}), '(context, filepath, preset_menu_class.preset_xml_map)\n', (3537, 3590), False, 'import rna_xml\n'), ((6564, 6583), 'os.remove', 'os.remove', (['filepath'], {}), '(filepath)\n', (6573, 6583), False, 'import os\n'), ((6742, 6763), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6761, 6763), False, 'import traceback\n'), ((8054, 8072), 'os.path.splitext', 'splitext', (['filepath'], {}), '(filepath)\n', (8062, 8072), False, 'from os.path import basename, splitext\n')] |
from django import template
register = template.Library()
@register.filter
def next(some_list, current_index):
"""
Returns the next element of the list using the current index if it exists.
Otherwise returns an empty string.
https://docs.djangoproject.com/en/3.0/howto/custom-template-tags/#writing-custom-template-filters
"""
try:
return some_list[int(current_index) + 1]
except:
pass
| [
"django.template.Library"
] | [((40, 58), 'django.template.Library', 'template.Library', ([], {}), '()\n', (56, 58), False, 'from django import template\n')] |
import operator
from itertools import chain
from Logic.ProperLogic.helper_classes.reducer import MaxReducer
from Logic.ProperLogic.misc_helpers import log_error
class ClusterDict(dict):
# TODO: Make sure constructor is only called when needed / doesn't produce more work than necessary!
def __init__(self, clusters=None):
super().__init__()
self.max_id = None
self.max_id_reducer = MaxReducer()
if not clusters:
return
for cluster in clusters:
cluster_id = cluster.cluster_id
self[cluster_id] = cluster
self.max_id_reducer(cluster_id)
self.max_id = self.max_id_reducer.get_state()
def get_clusters(self, with_ids=False):
if with_ids:
return self.items()
return self.values()
def get_cluster_by_id(self, cluster_id):
try:
return self[cluster_id]
except KeyError:
log_error(f"no cluster with id '{cluster_id}' found")
return None
def get_clusters_by_ids(self, cluster_ids):
return map(self.get_cluster_by_id, cluster_ids)
def get_cluster_ids(self):
return self.keys()
def get_cluster_labels(self, with_ids=False, unique=True):
"""
If with_ids is provided, unique is ignored.
:param with_ids:
:param unique:
:return:
"""
attrs = ['cluster_id'] if with_ids else []
attrs.append('label')
cluster_labels = self.get_cluster_attrs(*attrs)
if unique and not with_ids:
return list(set(cluster_labels))
return list(cluster_labels)
def get_cluster_attrs(self, *attrs):
clusters = self.get_clusters()
attrs_getter = operator.attrgetter(*attrs)
return map(attrs_getter, clusters)
def reset_ids(self, start_id=1):
clusters_with_ids = list(self.get_clusters(with_ids=True))
self.clear()
old_ids = []
for new_cluster_id, (old_cluster_id, cluster) in enumerate(clusters_with_ids, start=start_id):
old_ids.append(old_cluster_id)
cluster.set_cluster_id(new_cluster_id)
self[new_cluster_id] = cluster
max_id = start_id + len(clusters_with_ids) - 1
self.max_id = max_id
new_ids = list(range(start_id, max_id + 1))
return old_ids, new_ids
def set_ids(self, old_ids, new_ids):
clusters = self.get_clusters()
old_to_new_ids_dict = dict(zip(old_ids, new_ids))
self.max_id_reducer.reset()
for cluster in clusters:
new_id = old_to_new_ids_dict[cluster.cluster_id]
cluster.set_cluster_id(new_id)
self.max_id_reducer(new_id)
self.max_id = self.max_id_reducer.get_state()
def any_cluster_with_emb(self, emb):
clusters = self.get_clusters()
return any(filter(lambda cluster: cluster.contains_embedding(emb), clusters))
def add_clusters(self, clusters):
self.max_id_reducer.reset()
for cluster in clusters:
cluster_id = cluster.cluster_id
self[cluster_id] = cluster
self.max_id_reducer(cluster_id)
self.max_id = self.max_id_reducer.get_state()
def add_cluster(self, cluster):
self.add_clusters([cluster])
def remove_clusters(self, clusters):
reset_max_id = False
for cluster in clusters:
cluster_id = cluster.cluster_id
self.pop(cluster_id)
if cluster_id == self.max_id:
reset_max_id = True
if reset_max_id:
self.reset_max_id()
def reset_max_id(self):
cluster_ids = self.get_cluster_ids()
self.max_id = max(cluster_ids) if cluster_ids else 0
def remove_cluster(self, cluster):
self.remove_clusters([cluster])
def get_max_id(self):
if self.max_id is None:
return self.max_id_reducer.default
return self.max_id
def get_embeddings(self):
return chain(*map(lambda cluster: cluster.get_embeddings(),
self.get_clusters(with_ids=False)))
| [
"operator.attrgetter",
"Logic.ProperLogic.helper_classes.reducer.MaxReducer",
"Logic.ProperLogic.misc_helpers.log_error"
] | [((418, 430), 'Logic.ProperLogic.helper_classes.reducer.MaxReducer', 'MaxReducer', ([], {}), '()\n', (428, 430), False, 'from Logic.ProperLogic.helper_classes.reducer import MaxReducer\n'), ((1757, 1784), 'operator.attrgetter', 'operator.attrgetter', (['*attrs'], {}), '(*attrs)\n', (1776, 1784), False, 'import operator\n'), ((950, 1003), 'Logic.ProperLogic.misc_helpers.log_error', 'log_error', (['f"""no cluster with id \'{cluster_id}\' found"""'], {}), '(f"no cluster with id \'{cluster_id}\' found")\n', (959, 1003), False, 'from Logic.ProperLogic.misc_helpers import log_error\n')] |
from collections import OrderedDict
import numpy as np
import math
import torch
import torch.optim as optim
from torch import nn as nn
import rlkit.torch.pytorch_util as ptu
from rlkit.core.eval_util import create_stats_ordered_dict
from rlkit.torch.torch_rl_algorithm import TorchTrainer
def kl_divergence(mu, std):
kld = -0.5 * (1. + 2. * std.log() - mu.pow(2) - std.pow(2)).sum(1).mean().div(math.log(2))
return kld
def weighted_mse_loss(input, target, weight):
return torch.sum(weight * (input - target) ** 2)
class IWQTrainer(TorchTrainer):
def __init__(
self,
env,
policy,
qf,
target_qf,
num_samples,
weighted_mse,
beta,
discount=0.99,
reward_scale=1.0,
policy_lr=1e-3,
qf_lr=1e-3,
optimizer_class=optim.Adam,
soft_target_tau=1e-2,
target_update_period=1,
plotter=None,
render_eval_paths=False,
use_automatic_entropy_tuning=True,
target_entropy=None,
):
super().__init__()
self.env = env
self.policy = policy
self.qf = qf
self.target_qf = target_qf
self.num_samples = num_samples
self.weighted_mse = weighted_mse
self.beta = beta
self.soft_target_tau = soft_target_tau
self.target_update_period = target_update_period
self.use_automatic_entropy_tuning = use_automatic_entropy_tuning
if self.use_automatic_entropy_tuning:
if target_entropy:
self.target_entropy = target_entropy
else:
self.target_entropy = -np.prod(self.env.action_space.shape).item() # heuristic value from Tuomas
self.log_alpha = ptu.zeros(1, requires_grad=True)
self.alpha_optimizer = optimizer_class(
[self.log_alpha],
lr=policy_lr,
)
self.plotter = plotter
self.render_eval_paths = render_eval_paths
self.qf_criterion = nn.MSELoss()
self.vf_criterion = nn.MSELoss()
self.policy_optimizer = optimizer_class(
self.policy.parameters(),
lr=policy_lr,
)
self.qf_optimizer = optimizer_class(
self.qf.parameters(),
lr=qf_lr,
)
self.discount = discount
self.reward_scale = reward_scale
self.eval_statistics = OrderedDict()
self._n_train_steps_total = 0
self._need_to_update_eval_statistics = True
def train_from_torch(self, batch):
rewards = batch['rewards']
terminals = batch['terminals']
obs = batch['observations']
actions = batch['actions']
next_obs = batch['next_observations']
batch_size = obs.size(0)
"""
Policy and Alpha Loss
"""
new_obs_actions, policy_mean, policy_log_std, log_pi, *_ = self.policy(
obs, reparameterize=True, return_log_prob=True,
)
if self.use_automatic_entropy_tuning:
alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean()
self.alpha_optimizer.zero_grad()
alpha_loss.backward()
self.alpha_optimizer.step()
alpha = self.log_alpha.exp()
else:
alpha_loss = 0
alpha = 1
q_all, _, _ = self.qf(obs, new_obs_actions)
q_all = q_all.view(batch_size, self.num_samples, 1)
q_new_actions, _ = torch.min(q_all, dim=1)
policy_loss = (alpha*log_pi - q_new_actions).mean()
"""
QF Loss
"""
q_pred, mu, std = self.qf(obs, actions)
print(q_pred)
# Make sure policy accounts for squashing functions like tanh correctly!
new_next_actions, _, _, new_log_pi, *_ = self.policy(
next_obs, reparameterize=True, return_log_prob=True,
)
target_all, _, _ = self.target_qf(next_obs, new_next_actions)
target_all = target_all.view(self.num_samples, batch_size, 1)
print(target_all)
print(target_all.size())
target_q_values, _ = torch.min(target_all, dim=1)
print(target_q_values)
target_q_values = target_q_values - alpha * new_log_pi
q_target = self.reward_scale * rewards + (1. - terminals) * self.discount * target_q_values
if self.weighted_mse:
raise NotImplementedError
else:
q_target = q_target.repeat_interleave(self.num_samples, dim=0)
qf_loss = self.qf_criterion(q_pred, q_target.detach())
qf_loss += self.beta * kl_divergence(mu, std)
"""
Update networks
"""
self.qf_optimizer.zero_grad()
qf_loss.backward()
self.qf_optimizer.step()
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
# exit()
"""
Soft Updates
"""
if self._n_train_steps_total % self.target_update_period == 0:
ptu.soft_update_from_to(
self.qf, self.target_qf, self.soft_target_tau
)
"""
Save some statistics for eval
"""
if self._need_to_update_eval_statistics:
self._need_to_update_eval_statistics = False
"""
Eval should set this to None.
This way, these statistics are only computed for one batch.
"""
policy_loss = (log_pi - q_new_actions).mean()
self.eval_statistics['QF Loss'] = np.mean(ptu.get_numpy(qf_loss))
self.eval_statistics['Policy Loss'] = np.mean(ptu.get_numpy(
policy_loss
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q Predictions',
ptu.get_numpy(q_pred),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Q Targets',
ptu.get_numpy(q_target),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Log Pis',
ptu.get_numpy(log_pi),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Policy mu',
ptu.get_numpy(policy_mean),
))
self.eval_statistics.update(create_stats_ordered_dict(
'Policy log std',
ptu.get_numpy(policy_log_std),
))
if self.use_automatic_entropy_tuning:
self.eval_statistics['Alpha'] = alpha.item()
self.eval_statistics['Alpha Loss'] = alpha_loss.item()
self._n_train_steps_total += 1
def get_diagnostics(self):
return self.eval_statistics
def end_epoch(self, epoch):
self._need_to_update_eval_statistics = True
@property
def networks(self):
return [
self.policy,
self.qf,
self.target_qf,
]
def get_snapshot(self):
return dict(
policy=self.policy,
qf1=self.qf,
target_qf1=self.qf,
)
| [
"numpy.prod",
"collections.OrderedDict",
"math.log",
"torch.min",
"torch.nn.MSELoss",
"rlkit.torch.pytorch_util.get_numpy",
"torch.sum",
"rlkit.torch.pytorch_util.soft_update_from_to",
"rlkit.torch.pytorch_util.zeros"
] | [((489, 530), 'torch.sum', 'torch.sum', (['(weight * (input - target) ** 2)'], {}), '(weight * (input - target) ** 2)\n', (498, 530), False, 'import torch\n'), ((2104, 2116), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2114, 2116), True, 'from torch import nn as nn\n'), ((2145, 2157), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2155, 2157), True, 'from torch import nn as nn\n'), ((2499, 2512), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2510, 2512), False, 'from collections import OrderedDict\n'), ((3574, 3597), 'torch.min', 'torch.min', (['q_all'], {'dim': '(1)'}), '(q_all, dim=1)\n', (3583, 3597), False, 'import torch\n'), ((4219, 4247), 'torch.min', 'torch.min', (['target_all'], {'dim': '(1)'}), '(target_all, dim=1)\n', (4228, 4247), False, 'import torch\n'), ((403, 414), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (411, 414), False, 'import math\n'), ((1829, 1861), 'rlkit.torch.pytorch_util.zeros', 'ptu.zeros', (['(1)'], {'requires_grad': '(True)'}), '(1, requires_grad=True)\n', (1838, 1861), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((5128, 5198), 'rlkit.torch.pytorch_util.soft_update_from_to', 'ptu.soft_update_from_to', (['self.qf', 'self.target_qf', 'self.soft_target_tau'], {}), '(self.qf, self.target_qf, self.soft_target_tau)\n', (5151, 5198), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((5657, 5679), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['qf_loss'], {}), '(qf_loss)\n', (5670, 5679), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((5739, 5765), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['policy_loss'], {}), '(policy_loss)\n', (5752, 5765), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((5913, 5934), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['q_pred'], {}), '(q_pred)\n', (5926, 5934), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((6063, 6086), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['q_target'], {}), '(q_target)\n', (6076, 6086), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((6213, 6234), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['log_pi'], {}), '(log_pi)\n', (6226, 6234), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((6363, 6389), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['policy_mean'], {}), '(policy_mean)\n', (6376, 6389), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((6523, 6552), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['policy_log_std'], {}), '(policy_log_std)\n', (6536, 6552), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((1725, 1761), 'numpy.prod', 'np.prod', (['self.env.action_space.shape'], {}), '(self.env.action_space.shape)\n', (1732, 1761), True, 'import numpy as np\n')] |
#!/usr/bin/python
'''
["SnapshotArray","snap","snap","get","set","snap","set"]
[[4],[],[],[3,1],[2,4],[],[1,4]]
'''
from Solution import SnapshotArray
'''
obj = SnapshotArray(4)
obj.snap()
obj.snap()
print(obj.get(3, 1))
obj.set(2, 4)
obj.snap()
obj.set(1, 4)
#
obj = SnapshotArray(2)
obj.snap()
print(obj.get(1, 0))
print(obj.get(0, 0))
obj.set(1, 8)
print(obj.get(1, 0))
obj.set(0, 20)
print(obj.get(0, 0))
obj.set(0, 7)
'''
'''
obj = SnapshotArray(3)
obj.set(0, 5)
obj.snap()
obj.set(0, 6)
print(obj.get(0, 0))
'''
'''
obj = SnapshotArray(1)
obj.set(0, 4)
obj.set(0, 16)
obj.set(0, 13)
obj.snap()
print(obj.get(0, 0))
obj.snap()
'''
#["SnapshotArray","snap","get","get","set","snap","set","get","set","snap","get","set","set"]
#[[1],[],[0,0],[0,0],[0,2],[],[0,14],[0,1],[0,12],[],[0,0],[0,17],[0,16]]
obj = SnapshotArray(1)
obj.snap()
print(obj.get(0, 0))
print(obj.get(0, 0))
obj.set(0, 2)
obj.snap()
obj.set(0, 14)
print(obj.get(0, 1))
obj.set(0, 12)
obj.snap()
print(obj.get(0, 0))
obj.set(0, 17)
obj.set(0, 16)
| [
"Solution.SnapshotArray"
] | [((813, 829), 'Solution.SnapshotArray', 'SnapshotArray', (['(1)'], {}), '(1)\n', (826, 829), False, 'from Solution import SnapshotArray\n')] |
# -*- coding: utf-8 -*-
""" InfluxDB emitter """
import logging
from distutils import util as distutil
from influxdb_client import InfluxDBClient
from influxdb_client.client.write_api import SYNCHRONOUS
from jinja2 import Template
from tilty.common import safe_get_key
LOGGER = logging.getLogger()
def __type__() -> str:
return 'InfluxDB'
class InfluxDB: # pylint: disable=too-few-public-methods
""" Class to represent the actual device """
def __init__(self, config: dict) -> None:
""" Initializer
Args:
config: (dict) represents the configuration for the emitter
"""
self.gravity_template = Template(config['gravity_payload_template']) # noqa
self.temperature_template = Template(config['temperature_payload_template']) # noqa
self.bucket = safe_get_key(config, 'bucket')
verify_ssl = bool(distutil.strtobool(
safe_get_key(config, 'verify_ssl', 'False')
))
self.org = safe_get_key(config, 'org')
client = InfluxDBClient(
url=config['url'],
org=self.org,
token=safe_get_key(config, 'token'),
verify_ssl=verify_ssl
)
self.write_api = client.write_api(write_options=SYNCHRONOUS)
def emit(self, tilt_data: dict) -> None:
""" Initializer
Args:
tilt_data (dict): data returned from valid tilt device scan
"""
temperature_payload = self.temperature_template.render(
color=tilt_data['color'],
gravity=tilt_data['gravity'],
mac=tilt_data['mac'],
temp=tilt_data['temp'],
)
gravity_payload = self.gravity_template.render(
color=tilt_data['color'],
gravity=tilt_data['gravity'],
mac=tilt_data['mac'],
temp=tilt_data['temp'],
)
LOGGER.info('[influxdb] posting temperature data')
self.write_api.write(
bucket=self.bucket,
org=self.org,
record=temperature_payload
)
LOGGER.info('[influxdb] posting gravity data')
self.write_api.write(
bucket=self.bucket,
org=self.org,
record=gravity_payload
)
| [
"logging.getLogger",
"tilty.common.safe_get_key",
"jinja2.Template"
] | [((281, 300), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (298, 300), False, 'import logging\n'), ((658, 702), 'jinja2.Template', 'Template', (["config['gravity_payload_template']"], {}), "(config['gravity_payload_template'])\n", (666, 702), False, 'from jinja2 import Template\n'), ((747, 795), 'jinja2.Template', 'Template', (["config['temperature_payload_template']"], {}), "(config['temperature_payload_template'])\n", (755, 795), False, 'from jinja2 import Template\n'), ((826, 856), 'tilty.common.safe_get_key', 'safe_get_key', (['config', '"""bucket"""'], {}), "(config, 'bucket')\n", (838, 856), False, 'from tilty.common import safe_get_key\n'), ((990, 1017), 'tilty.common.safe_get_key', 'safe_get_key', (['config', '"""org"""'], {}), "(config, 'org')\n", (1002, 1017), False, 'from tilty.common import safe_get_key\n'), ((916, 959), 'tilty.common.safe_get_key', 'safe_get_key', (['config', '"""verify_ssl"""', '"""False"""'], {}), "(config, 'verify_ssl', 'False')\n", (928, 959), False, 'from tilty.common import safe_get_key\n'), ((1126, 1155), 'tilty.common.safe_get_key', 'safe_get_key', (['config', '"""token"""'], {}), "(config, 'token')\n", (1138, 1155), False, 'from tilty.common import safe_get_key\n')] |
import os
import utils
import torch
import torch.nn as nn
from torchvision import transforms
from torch.utils.data import DataLoader
import numpy as np
import data
import scipy.io as sio
from options.training_options import TrainOptions
import utils
import time
from models import AutoEncoderCov3D, AutoEncoderCov3DMem
from models import EntropyLossEncap
###
opt_parser = TrainOptions()
opt = opt_parser.parse(is_print=True)
use_cuda = opt.UseCUDA
device = torch.device("cuda" if use_cuda else "cpu")
###
utils.seed(opt.Seed)
if(opt.IsDeter):
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
######
model_setting = utils.get_model_setting(opt)
print('Setting: %s' % (model_setting))
############
batch_size_in = opt.BatchSize
learning_rate = opt.LR
max_epoch_num = opt.EpochNum
chnum_in_ = opt.ImgChnNum # channel number of the input images
framenum_in_ = opt.FrameNum # num of frames in a video clip
mem_dim_in = opt.MemDim
entropy_loss_weight = opt.EntropyLossWeight
sparse_shrink_thres = opt.ShrinkThres
img_crop_size = 0
print('bs=%d, lr=%f, entrloss=%f, shr=%f, memdim=%d' % (batch_size_in, learning_rate, entropy_loss_weight, sparse_shrink_thres, mem_dim_in))
############
## data path
data_root = opt.DataRoot + opt.Dataset + '/'
tr_data_frame_dir = data_root + 'Train/'
tr_data_idx_dir = data_root + 'Train_idx/'
############ model saving dir path
saving_root = opt.ModelRoot
saving_model_path = os.path.join(saving_root, 'model_' + model_setting + '/')
utils.mkdir(saving_model_path)
### tblog
if(opt.IsTbLog):
log_path = os.path.join(saving_root, 'log_'+model_setting + '/')
utils.mkdir(log_path)
tb_logger = utils.Logger(log_path)
##
if(chnum_in_==1):
norm_mean = [0.5]
norm_std = [0.5]
elif(chnum_in_==3):
norm_mean = (0.5, 0.5, 0.5)
norm_std = (0.5, 0.5, 0.5)
frame_trans = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std)
])
unorm_trans = utils.UnNormalize(mean=norm_mean, std=norm_std)
###### data
video_dataset = data.VideoDataset(tr_data_idx_dir, tr_data_frame_dir, transform=frame_trans)
tr_data_loader = DataLoader(video_dataset,
batch_size=batch_size_in,
shuffle=True,
num_workers=opt.NumWorker
)
###### model
if(opt.ModelName=='MemAE'):
model = AutoEncoderCov3DMem(chnum_in_, mem_dim_in, shrink_thres=sparse_shrink_thres)
else:
model = []
print('Wrong model name.')
model.apply(utils.weights_init)
#########
device = torch.device("cuda" if use_cuda else "cpu")
model.to(device)
tr_recon_loss_func = nn.MSELoss().to(device)
tr_entropy_loss_func = EntropyLossEncap().to(device)
tr_optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
##
data_loader_len = len(tr_data_loader)
textlog_interval = opt.TextLogInterval
snap_save_interval = opt.SnapInterval
save_check_interval = opt.SaveCheckInterval
tb_img_log_interval = opt.TBImgLogInterval
global_ite_idx = 0 # for logging
for epoch_idx in range(0, max_epoch_num):
for batch_idx, (item, frames) in enumerate(tr_data_loader):
frames = frames.to(device)
if (opt.ModelName == 'MemAE'):
recon_res = model(frames)
recon_frames = recon_res['output']
att_w = recon_res['att']
loss = tr_recon_loss_func(recon_frames, frames)
recon_loss_val = loss.item()
entropy_loss = tr_entropy_loss_func(att_w)
entropy_loss_val = entropy_loss.item()
loss = loss + entropy_loss_weight * entropy_loss
loss_val = loss.item()
##
tr_optimizer.zero_grad()
loss.backward()
tr_optimizer.step()
##
## TB log val
if(opt.IsTbLog):
tb_info = {
'loss': loss_val,
'recon_loss': recon_loss_val,
'entropy_loss': entropy_loss_val
}
for tag, value in tb_info.items():
tb_logger.scalar_summary(tag, value, global_ite_idx)
# TB log img
if( (global_ite_idx % tb_img_log_interval)==0 ):
frames_vis = utils.vframes2imgs(unorm_trans(frames.data), step=5, batch_idx=0)
frames_vis = np.concatenate(frames_vis, axis=-1)
frames_vis = frames_vis[None, :, :] * np.ones(3, dtype=int)[:, None, None]
frames_recon_vis = utils.vframes2imgs(unorm_trans(recon_frames.data), step=5, batch_idx=0)
frames_recon_vis = np.concatenate(frames_recon_vis, axis=-1)
frames_recon_vis = frames_recon_vis[None, :, :] * np.ones(3, dtype=int)[:, None, None]
tb_info = {
'x': frames_vis,
'x_rec': frames_recon_vis
}
for tag, imgs in tb_info.items():
tb_logger.image_summary(tag, imgs, global_ite_idx)
##
if((batch_idx % textlog_interval)==0):
print('[%s, epoch %d/%d, bt %d/%d] loss=%f, rc_losss=%f, ent_loss=%f' % (model_setting, epoch_idx, max_epoch_num, batch_idx, data_loader_len, loss_val, recon_loss_val, entropy_loss_val) )
if((global_ite_idx % snap_save_interval)==0):
torch.save(model.state_dict(), '%s/%s_snap.pt' % (saving_model_path, model_setting) )
global_ite_idx += 1
if((epoch_idx % save_check_interval)==0):
torch.save(model.state_dict(), '%s/%s_epoch_%04d.pt' % (saving_model_path, model_setting, epoch_idx) )
torch.save(model.state_dict(), '%s/%s_epoch_%04d_final.pt' % (saving_model_path, model_setting, epoch_idx) )
| [
"models.EntropyLossEncap",
"numpy.ones",
"utils.UnNormalize",
"utils.get_model_setting",
"os.path.join",
"options.training_options.TrainOptions",
"utils.Logger",
"torch.nn.MSELoss",
"utils.seed",
"utils.mkdir",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"numpy.concate... | [((373, 387), 'options.training_options.TrainOptions', 'TrainOptions', ([], {}), '()\n', (385, 387), False, 'from options.training_options import TrainOptions\n'), ((458, 501), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (470, 501), False, 'import torch\n'), ((507, 527), 'utils.seed', 'utils.seed', (['opt.Seed'], {}), '(opt.Seed)\n', (517, 527), False, 'import utils\n'), ((658, 686), 'utils.get_model_setting', 'utils.get_model_setting', (['opt'], {}), '(opt)\n', (681, 686), False, 'import utils\n'), ((1458, 1515), 'os.path.join', 'os.path.join', (['saving_root', "('model_' + model_setting + '/')"], {}), "(saving_root, 'model_' + model_setting + '/')\n", (1470, 1515), False, 'import os\n'), ((1516, 1546), 'utils.mkdir', 'utils.mkdir', (['saving_model_path'], {}), '(saving_model_path)\n', (1527, 1546), False, 'import utils\n'), ((1995, 2042), 'utils.UnNormalize', 'utils.UnNormalize', ([], {'mean': 'norm_mean', 'std': 'norm_std'}), '(mean=norm_mean, std=norm_std)\n', (2012, 2042), False, 'import utils\n'), ((2072, 2148), 'data.VideoDataset', 'data.VideoDataset', (['tr_data_idx_dir', 'tr_data_frame_dir'], {'transform': 'frame_trans'}), '(tr_data_idx_dir, tr_data_frame_dir, transform=frame_trans)\n', (2089, 2148), False, 'import data\n'), ((2166, 2262), 'torch.utils.data.DataLoader', 'DataLoader', (['video_dataset'], {'batch_size': 'batch_size_in', 'shuffle': '(True)', 'num_workers': 'opt.NumWorker'}), '(video_dataset, batch_size=batch_size_in, shuffle=True,\n num_workers=opt.NumWorker)\n', (2176, 2262), False, 'from torch.utils.data import DataLoader\n'), ((2607, 2650), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (2619, 2650), False, 'import torch\n'), ((1590, 1645), 'os.path.join', 'os.path.join', (['saving_root', "('log_' + model_setting + '/')"], {}), "(saving_root, 'log_' + model_setting + '/')\n", (1602, 1645), False, 'import os\n'), ((1648, 1669), 'utils.mkdir', 'utils.mkdir', (['log_path'], {}), '(log_path)\n', (1659, 1669), False, 'import utils\n'), ((1686, 1708), 'utils.Logger', 'utils.Logger', (['log_path'], {}), '(log_path)\n', (1698, 1708), False, 'import utils\n'), ((2426, 2502), 'models.AutoEncoderCov3DMem', 'AutoEncoderCov3DMem', (['chnum_in_', 'mem_dim_in'], {'shrink_thres': 'sparse_shrink_thres'}), '(chnum_in_, mem_dim_in, shrink_thres=sparse_shrink_thres)\n', (2445, 2502), False, 'from models import AutoEncoderCov3D, AutoEncoderCov3DMem\n'), ((1901, 1922), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1920, 1922), False, 'from torchvision import transforms\n'), ((1932, 1973), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['norm_mean', 'norm_std'], {}), '(norm_mean, norm_std)\n', (1952, 1973), False, 'from torchvision import transforms\n'), ((2689, 2701), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2699, 2701), True, 'import torch.nn as nn\n'), ((2736, 2754), 'models.EntropyLossEncap', 'EntropyLossEncap', ([], {}), '()\n', (2752, 2754), False, 'from models import EntropyLossEncap\n'), ((4347, 4382), 'numpy.concatenate', 'np.concatenate', (['frames_vis'], {'axis': '(-1)'}), '(frames_vis, axis=-1)\n', (4361, 4382), True, 'import numpy as np\n'), ((4616, 4657), 'numpy.concatenate', 'np.concatenate', (['frames_recon_vis'], {'axis': '(-1)'}), '(frames_recon_vis, axis=-1)\n', (4630, 4657), True, 'import numpy as np\n'), ((4437, 4458), 'numpy.ones', 'np.ones', (['(3)'], {'dtype': 'int'}), '(3, dtype=int)\n', (4444, 4458), True, 'import numpy as np\n'), ((4724, 4745), 'numpy.ones', 'np.ones', (['(3)'], {'dtype': 'int'}), '(3, dtype=int)\n', (4731, 4745), True, 'import numpy as np\n')] |
# Example of embedding hiplot into dash
import dash
from dash import html
from dash import dcc
from dash.dependencies import Input, Output, State
import pandas as pd
import hiplot as hip
df = pd.read_csv('https://gist.githubusercontent.com/chriddyp/c78bf172206ce24f77d6363a2d754b59/raw/c353e8ef842413cae56ae3920b8fd78468aa4cb2/usa-agricultural-exports-2011.csv')
app = dash.Dash(__name__)
app.layout = html.Div(
[
html.Label(
[
'Select Columns',
dcc.Dropdown(
id='columns_select',
options=[{"label": i, "value": i} for i in df.columns.astype(str).tolist()],
multi=True
)
]
),
html.Button(id='update_button', n_clicks=0, children='Update Plot'),
html.Div(html.Iframe(id='parallel', style={'width': '100%', 'height': '1080px'}))
]
)
@app.callback(
Output('parallel', 'srcDoc'),
Input('update_button', 'n_clicks'),
State('columns_select', 'value'),
)
def update_parallel(n_clicks, columns_selected):
if n_clicks == 0:
srcdoc = ''
else:
exp = hip.Experiment.from_dataframe(df)
hidden_columns = [i for i in df.columns.to_list() if i not in columns_selected] # Remove all but selected
exp.display_data(hip.Displays.PARALLEL_PLOT).update({'hide': hidden_columns+['uid']})
exp.display_data(hip.Displays.TABLE).update({'hide': hidden_columns+['uid', 'from_uid']})
srcdoc = exp.to_html() # Store html as string
return srcdoc
if __name__ == '__main__':
app.run_server(debug=True)
| [
"pandas.read_csv",
"dash.dependencies.Output",
"dash.dependencies.Input",
"hiplot.Experiment.from_dataframe",
"dash.html.Button",
"dash.dependencies.State",
"dash.html.Iframe",
"dash.Dash"
] | [((194, 374), 'pandas.read_csv', 'pd.read_csv', (['"""https://gist.githubusercontent.com/chriddyp/c78bf172206ce24f77d6363a2d754b59/raw/c353e8ef842413cae56ae3920b8fd78468aa4cb2/usa-agricultural-exports-2011.csv"""'], {}), "(\n 'https://gist.githubusercontent.com/chriddyp/c78bf172206ce24f77d6363a2d754b59/raw/c353e8ef842413cae56ae3920b8fd78468aa4cb2/usa-agricultural-exports-2011.csv'\n )\n", (205, 374), True, 'import pandas as pd\n'), ((372, 391), 'dash.Dash', 'dash.Dash', (['__name__'], {}), '(__name__)\n', (381, 391), False, 'import dash\n'), ((928, 956), 'dash.dependencies.Output', 'Output', (['"""parallel"""', '"""srcDoc"""'], {}), "('parallel', 'srcDoc')\n", (934, 956), False, 'from dash.dependencies import Input, Output, State\n'), ((962, 996), 'dash.dependencies.Input', 'Input', (['"""update_button"""', '"""n_clicks"""'], {}), "('update_button', 'n_clicks')\n", (967, 996), False, 'from dash.dependencies import Input, Output, State\n'), ((1002, 1034), 'dash.dependencies.State', 'State', (['"""columns_select"""', '"""value"""'], {}), "('columns_select', 'value')\n", (1007, 1034), False, 'from dash.dependencies import Input, Output, State\n'), ((740, 807), 'dash.html.Button', 'html.Button', ([], {'id': '"""update_button"""', 'n_clicks': '(0)', 'children': '"""Update Plot"""'}), "(id='update_button', n_clicks=0, children='Update Plot')\n", (751, 807), False, 'from dash import html\n'), ((1153, 1186), 'hiplot.Experiment.from_dataframe', 'hip.Experiment.from_dataframe', (['df'], {}), '(df)\n', (1182, 1186), True, 'import hiplot as hip\n'), ((826, 897), 'dash.html.Iframe', 'html.Iframe', ([], {'id': '"""parallel"""', 'style': "{'width': '100%', 'height': '1080px'}"}), "(id='parallel', style={'width': '100%', 'height': '1080px'})\n", (837, 897), False, 'from dash import html\n')] |
from django.conf.urls import url
from base import views as base_views
app_name = 'base'
urlpatterns = [
url(r'',
base_views.ProtectedDataView.as_view(),
name='protected_data'),
]
| [
"base.views.ProtectedDataView.as_view"
] | [((128, 166), 'base.views.ProtectedDataView.as_view', 'base_views.ProtectedDataView.as_view', ([], {}), '()\n', (164, 166), True, 'from base import views as base_views\n')] |
from dataclasses import dataclass
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import registry
mapper_registry = registry()
@dataclass
class Base:
__sa_dataclass_metadata_key__ = "sa"
@declared_attr # type: ignore[misc]
def __tablename__(cls) -> str:
return cls.__name__.lower() # type: ignore[attr-defined]
| [
"sqlalchemy.orm.registry"
] | [((143, 153), 'sqlalchemy.orm.registry', 'registry', ([], {}), '()\n', (151, 153), False, 'from sqlalchemy.orm import registry\n')] |
from os import environ as env
from os import urandom, path
import sqlite3
import json
# Schema
db = sqlite3.connect('gixnay.db')
db_cursor = db.cursor()
db_cursor.execute("""CREATE TABLE IF NOT EXISTS Countries (
name TEXT NOT NULL,
-- name of country
abbreviation VARCHAR(2),
-- two-letter abbreviation of country
top_level_domain TEXT,
-- Internet TLD of country
calling_code INTEGER,
-- Phone number country code
land_area INTEGER,
-- Square mile land area
population INTEGER,
-- Population of entire country, including military
mil_spending INTEGER, -- Annual budget for military spending
mil_personnel INTEGER,
-- Active military personnel, not reserves
gross_domestic_product INTEGER,
-- Cash value of all assets in country
-- Can calculate GDP per capita with this and population
permission_level INTEGER NOT NULL,
-- 0 - Read, 1 - Write
password VARCHAR(128) NOT NULL
-- 128-byte base16 SHA512 hash
)""")
db_cursor.execute("""CREATE TABLE IF NOT EXISTS Config (
secret_key VARCHAR(64) NOT NULL
-- used for signing cookies stored by web client(s)
)""")
# Check if secret key exists, and if not, generate
secret_key: str = None
for key in db_cursor.execute("""SELECT secret_key FROM Config"""):
if secret_key is None:
secret_key = key
else:
raise Exception('Field secret_key exists twice in Config')
if secret_key is None:
print("Found no secret key, generating new one")
secret_key = urandom(64)
db_cursor.execute(
"""INSERT INTO Config (secret_key) VALUES (?)""",
(secret_key,))
db.commit()
# Get config from XDG_CONFIG_HOME/gixnaydb/conf.json
# XDG_CONFIG_HOME defaults to $HOME/.config
try:
conf_path = env['XDG_CONFIG_HOME']
except Exception as e:
print(f"Error using XDG_CONFIG_HOME: {type(e)}({e})")
conf_path = path.join(env['HOME'], '.config')
print(f"Falling back to {conf_path}")
with open(path.join(conf_path, 'gixnaydb/conf.json')) as f:
config = json.loads(f.read())
def check_auth(auth):
raise NotImplementedError()
# Use this to select all columns in a table, v1 compatible
# Don't use * in the event that a new schema is used
# This way, we can select only the values this API is prepared to deal with
query = """name,abbreviation,top_level_domain,calling_code,land_area,
population,mil_spending,mil_personnel,gross_domestic_product,
permission_level,password"""
query = query.replace('\r', '').replace('\n', '').replace(' ', '')
dz_keys_p = query.split(',') # probably don't use this
dz_keys = [key for key in dz_keys_p if key != "password"] # use this instead
def dz(_input):
"""Convert a Cursor.execute() result into a dict()
:_input: Cursor.execute().fetchone() value
:returns: dict"""
return dict(zip(dz_keys, _input))
def get_country_names():
"""Yield a list of `name` from Countries
:returns: generator(name, [name...])"""
for country in db_cursor.execute(
"SELECT name FROM Countries ORDER BY name"):
yield country[0]
def get_countries(key="name"):
"""Yield a list of countries
:key: Key used for ordering, 'name' by default
:returns: generator(dz(country)[, dz(country)...])
"""
for country in db_cursor.execute(
f"SELECT {query} FROM Countries ORDER BY ?", (key,)):
yield dz(country)
def get_countries_by(key, value):
"""Yield a list of countries by value
:key: Key used for selection
:value: Value used for selection
:returns: dz(country)
"""
for country in db_cursor.execute(
f"SELECT {query} FROM Countries WHERE {key} LIKE ?", (value,)):
yield dz(country)
| [
"os.urandom",
"os.path.join",
"sqlite3.connect"
] | [((101, 129), 'sqlite3.connect', 'sqlite3.connect', (['"""gixnay.db"""'], {}), "('gixnay.db')\n", (116, 129), False, 'import sqlite3\n'), ((1689, 1700), 'os.urandom', 'urandom', (['(64)'], {}), '(64)\n', (1696, 1700), False, 'from os import urandom, path\n'), ((2068, 2101), 'os.path.join', 'path.join', (["env['HOME']", '""".config"""'], {}), "(env['HOME'], '.config')\n", (2077, 2101), False, 'from os import urandom, path\n'), ((2154, 2196), 'os.path.join', 'path.join', (['conf_path', '"""gixnaydb/conf.json"""'], {}), "(conf_path, 'gixnaydb/conf.json')\n", (2163, 2196), False, 'from os import urandom, path\n')] |
# encoding: utf-8
import pytest
import six
from flask import Blueprint
import ckan.plugins as p
from ckan.common import config, _
class MockRoutingPlugin(p.SingletonPlugin):
p.implements(p.IBlueprint)
def get_blueprint(self):
# Create Blueprint for plugin
blueprint = Blueprint(self.name, self.__module__)
blueprint.add_url_rule(
u"/simple_flask", u"flask_plugin_view", flask_plugin_view
)
blueprint.add_url_rule(
u"/flask_translated", u"flask_translated", flask_translated_view
)
return blueprint
def flask_plugin_view():
return u"Hello World, this is served from a Flask extension"
def flask_translated_view():
return _(u"Dataset")
@pytest.fixture
def patched_app(app):
flask_app = app.flask_app
def test_view():
return u"This was served from Flask"
flask_app.add_url_rule(
u"/flask_core", view_func=test_view, endpoint=u"flask_core.index"
)
return app
def test_flask_core_route_is_served(patched_app):
res = patched_app.get(u"/")
assert res.status_code == 200
res = patched_app.get(u"/flask_core")
assert six.ensure_text(res.data) == u"This was served from Flask"
@pytest.mark.ckan_config(u"SECRET_KEY", u"super_secret_stuff")
def test_secret_key_is_used_if_present(app):
assert app.flask_app.config[u"SECRET_KEY"] == u"super_secret_stuff"
@pytest.mark.ckan_config(u"SECRET_KEY", None)
def test_beaker_secret_is_used_by_default(app):
assert (
app.flask_app.config[u"SECRET_KEY"] == config[u"beaker.session.secret"]
)
@pytest.mark.ckan_config(u"SECRET_KEY", None)
@pytest.mark.ckan_config(u"beaker.session.secret", None)
def test_no_beaker_secret_crashes(make_app):
# TODO: When Pylons is finally removed, we should test for
# RuntimeError instead (thrown on `make_flask_stack`)
with pytest.raises(RuntimeError):
make_app()
| [
"ckan.common._",
"six.ensure_text",
"pytest.raises",
"pytest.mark.ckan_config",
"flask.Blueprint",
"ckan.plugins.implements"
] | [((1238, 1299), 'pytest.mark.ckan_config', 'pytest.mark.ckan_config', (['u"""SECRET_KEY"""', 'u"""super_secret_stuff"""'], {}), "(u'SECRET_KEY', u'super_secret_stuff')\n", (1261, 1299), False, 'import pytest\n'), ((1420, 1464), 'pytest.mark.ckan_config', 'pytest.mark.ckan_config', (['u"""SECRET_KEY"""', 'None'], {}), "(u'SECRET_KEY', None)\n", (1443, 1464), False, 'import pytest\n'), ((1615, 1659), 'pytest.mark.ckan_config', 'pytest.mark.ckan_config', (['u"""SECRET_KEY"""', 'None'], {}), "(u'SECRET_KEY', None)\n", (1638, 1659), False, 'import pytest\n'), ((1661, 1716), 'pytest.mark.ckan_config', 'pytest.mark.ckan_config', (['u"""beaker.session.secret"""', 'None'], {}), "(u'beaker.session.secret', None)\n", (1684, 1716), False, 'import pytest\n'), ((183, 209), 'ckan.plugins.implements', 'p.implements', (['p.IBlueprint'], {}), '(p.IBlueprint)\n', (195, 209), True, 'import ckan.plugins as p\n'), ((729, 742), 'ckan.common._', '_', (['u"""Dataset"""'], {}), "(u'Dataset')\n", (730, 742), False, 'from ckan.common import config, _\n'), ((298, 335), 'flask.Blueprint', 'Blueprint', (['self.name', 'self.__module__'], {}), '(self.name, self.__module__)\n', (307, 335), False, 'from flask import Blueprint\n'), ((1176, 1201), 'six.ensure_text', 'six.ensure_text', (['res.data'], {}), '(res.data)\n', (1191, 1201), False, 'import six\n'), ((1892, 1919), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (1905, 1919), False, 'import pytest\n')] |
from functools import partial
from os import path
from .obj import load_obj, save_obj
from .off import load_off, save_off
def load_mesh(filename):
loaders = {
'obj': load_obj,
'off': partial(load_off, no_colors=True),
}
ext = path.splitext(filename)[1].lower()[1:]
if ext not in loaders:
raise IOError("No loader for %s extension known, available file formats are: %s" % (ext, list(loaders.keys())))
return loaders[ext](filename)
def save_mesh(filename, verts, tris, *args, **kw):
writers = {
'obj': save_obj,
'off': save_off,
}
ext = path.splitext(filename)[1].lower()[1:]
if ext not in writers:
raise IOError("No known writer for %s extension known, available file formats are: %s" % (ext, list(loaders.keys())))
return writers[ext](filename, verts, tris, *args, **kw)
| [
"os.path.splitext",
"functools.partial"
] | [((206, 239), 'functools.partial', 'partial', (['load_off'], {'no_colors': '(True)'}), '(load_off, no_colors=True)\n', (213, 239), False, 'from functools import partial\n'), ((257, 280), 'os.path.splitext', 'path.splitext', (['filename'], {}), '(filename)\n', (270, 280), False, 'from os import path\n'), ((612, 635), 'os.path.splitext', 'path.splitext', (['filename'], {}), '(filename)\n', (625, 635), False, 'from os import path\n')] |
import pandas as pd
df = pd.read_csv(r'balanced_reviews.csv')
df.isnull().any(axis = 0)
#handle the missing data
df.dropna(inplace = True)
#leaving the reviews with rating 3 and collect reviews with
#rating 1, 2, 4 and 5 onyl
df = df [df['overall'] != 3]
import numpy as np
#creating a label
#based on the values in overall column
df['Positivity'] = np.where(df['overall'] > 3 , 1 , 0)
#NLP
#reviewText - feature - df['reviewText']
#Positivity - label - df['Positivity']
from sklearn.model_selection import train_test_split
features_train, features_test, labels_train, labels_test = train_test_split(df['reviewText'], df['Positivity'], random_state = 42 )
from sklearn.feature_extraction.text import TfidfVectorizer
vect = TfidfVectorizer(min_df = 5).fit(features_train)
features_train_vectorized = vect.transform(features_train)
#model building
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(features_train_vectorized, labels_train)
predictions = model.predict(vect.transform(features_test))
from sklearn.metrics import confusion_matrix
confusion_matrix(labels_test, predictions)
from sklearn.metrics import roc_auc_score
roc_auc_score(labels_test, predictions)
#save - pickle format
import pickle
file = open("pickle_model.pkl","wb")
pickle.dump(model, file)
#pickle the vocabulary
pickle.dump(vect.vocabulary_, open('features.pkl', 'wb'))
| [
"pickle.dump",
"pandas.read_csv",
"numpy.where",
"sklearn.model_selection.train_test_split",
"sklearn.linear_model.LogisticRegression",
"sklearn.metrics.roc_auc_score",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.metrics.confusion_matrix"
] | [((29, 64), 'pandas.read_csv', 'pd.read_csv', (['"""balanced_reviews.csv"""'], {}), "('balanced_reviews.csv')\n", (40, 64), True, 'import pandas as pd\n'), ((381, 414), 'numpy.where', 'np.where', (["(df['overall'] > 3)", '(1)', '(0)'], {}), "(df['overall'] > 3, 1, 0)\n", (389, 414), True, 'import numpy as np\n'), ((625, 694), 'sklearn.model_selection.train_test_split', 'train_test_split', (["df['reviewText']", "df['Positivity']"], {'random_state': '(42)'}), "(df['reviewText'], df['Positivity'], random_state=42)\n", (641, 694), False, 'from sklearn.model_selection import train_test_split\n'), ((974, 994), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (992, 994), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1164, 1206), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['labels_test', 'predictions'], {}), '(labels_test, predictions)\n', (1180, 1206), False, 'from sklearn.metrics import confusion_matrix\n'), ((1253, 1292), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['labels_test', 'predictions'], {}), '(labels_test, predictions)\n', (1266, 1292), False, 'from sklearn.metrics import roc_auc_score\n'), ((1379, 1403), 'pickle.dump', 'pickle.dump', (['model', 'file'], {}), '(model, file)\n', (1390, 1403), False, 'import pickle\n'), ((775, 800), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'min_df': '(5)'}), '(min_df=5)\n', (790, 800), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n')] |
#basic components: Embedding Layer, Scaled Dot-Product Attention, Dense Layer
import numpy as np
import torch.nn.functional as F
from torch import nn
import torch
class Embed(nn.Module):
def __init__(self, length, emb_dim,
embeddings=None, trainable=False, dropout=.1):
super(Embed, self).__init__()
self.embedding = nn.Embedding(num_embeddings=length,
embedding_dim=emb_dim,
padding_idx=0)
if embeddings is not None:
print("Loading pre-trained embeddings!")
self.embedding.weight = nn.Parameter(torch.from_numpy(embeddings),
requires_grad=trainable)
self.dropout = nn.Dropout(dropout)
def forward(self, X):
embedded = self.embedding(X)
embedded = self.dropout(embedded)
return embedded
class PosEmbed(nn.Module):
def __init__(self, length, emb_dim):
super(PosEmbed, self).__init__()
self.length = length
self.emb_dim = emb_dim
pos_weight = self.position_encoding_init(n_position=length,
emb_dim=emb_dim)
self.pos_embedding = nn.Embedding.from_pretrained(pos_weight, freeze=True)
def get_pos(self, word_sequences, mode='seq'):
batch = []
for word_seq in word_sequences:
start_idx = 1.0
word_pos = []
for pos in word_seq:
if mode == 'seq':
if int(pos) == 0:
word_pos.append(0.0)
else:
word_pos.append(start_idx)
start_idx += 1.0
elif mode == 'set':
word_pos.append(0.0)
else:
raise ValueError('Unrecognized position encoding mode! Should be chosen from "seq" or "set"! ')
batch.append(torch.from_numpy(np.array(word_pos)).type(torch.LongTensor))
batch = torch.cat(batch).view(-1, self.length)
return batch.to('cuda')
def forward(self, X, mode='seq'):
X = self.get_pos(X, mode=mode)
pos_embeded = self.pos_embedding(X)
return pos_embeded
@staticmethod
def position_encoding_init(n_position, emb_dim):
''' Init the sinusoid position encoding table '''
# keep dim 0 for padding token position encoding zero vector
n_position += 1
position_enc = np.array([
[pos / np.power(10000, 2 * (j // 2) / emb_dim) for j in range(emb_dim)]
if pos != 0 else np.zeros(emb_dim) for pos in range(n_position)])
position_enc[1:, 0::2] = np.sin(position_enc[1:, 0::2]) # apply sin on 0th,2nd,4th...emb_dim
position_enc[1:, 1::2] = np.cos(position_enc[1:, 1::2]) # apply cos on 1st,3rd,5th...emb_dim
return torch.from_numpy(position_enc).type(torch.FloatTensor)
class ScaledDotProductAttention(nn.Module):
def __init__(self, d_k, num_head,dropout=.1):
super(ScaledDotProductAttention, self).__init__()
self.reg = np.sqrt(d_k)
self.num_head = num_head
self.dropout = nn.Dropout(dropout)
self.softmax = nn.Softmax(dim=2) #input tensor dim: (batch, seq_length, seq_length)
def forward(self, q, k, v, pad_mask=None, context_mask=None):
attention = torch.bmm(q, k.transpose(1, 2)) #dim of q and k: (batch * n_head, seq_length)
attention /= self.reg
if pad_mask is not None:
attention = attention.masked_fill(pad_mask, -1e9) #see Attention is all you need 3.2.3
attention = self.softmax(attention)
attention = self.dropout(attention)
if pad_mask is not None:
attention = attention.masked_fill(pad_mask, 0) #see Attention is all you need 3.2.3
if context_mask is not None: #context masking
attention *= context_mask
# attention residual
residual = 0
if self.num_head > 1:
_length_1 = attention.shape[1]
_length_2 = attention.shape[2]
_attn = attention.contiguous().view(self.num_head, -1, _length_1, _length_2)
for m, left in enumerate(_attn):
for n, right in enumerate(_attn):
if not m == n:
residual += torch.sum(torch.abs(left * right)) / _length_1
residual = residual/self.num_head/self.num_head/2
output = torch.bmm(attention, v)
return output, attention, residual
class MultiHeadAttention(nn.Module):
def __init__(self, num_head, d_x, d_k, dropout=.1):
super(MultiHeadAttention, self).__init__()
self.num_head = num_head
self.d_k = d_k
self.wq = nn.Linear(d_x, num_head * d_k)
self.wk = nn.Linear(d_x, num_head * d_k)
self.wv = nn.Linear(d_x, num_head * d_k)
nn.init.xavier_normal_(self.wq.weight)
nn.init.xavier_normal_(self.wk.weight)
nn.init.xavier_normal_(self.wv.weight)
self.sdp_attn = ScaledDotProductAttention(d_k=d_k, num_head=num_head, dropout=dropout)
self.dropout = nn.Dropout(dropout)
self.norm = nn.LayerNorm(d_x)
self.wo = nn.Linear(num_head * d_k, d_x)
nn.init.xavier_normal_(self.wo.weight)
def forward(self, q, k, v, pad_mask=None):
X = q #batch * length_q * d_x
length_q = q.shape[1]
assert v.shape[1] == k.shape[1]
length_k = k.shape[1]
q = self.wq(q).view(-1, length_q, self.num_head, self.d_k) #batch * length * num_head * d_k
k = self.wk(k).view(-1, length_k, self.num_head, self.d_k)
v = self.wv(v).view(-1, length_k, self.num_head, self.d_k)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, length_q, self.d_k) # (batch * num_head) * length * d_k
k = k.permute(2, 0, 1, 3).contiguous().view(-1, length_k, self.d_k)
v = v.permute(2, 0, 1, 3).contiguous().view(-1, length_k, self.d_k)
if pad_mask is not None:
pad_mask = pad_mask.repeat(self.num_head, 1, 1) # batch * length_q * length_k -> (batch * num_head) * l_q * l_k
output, attention, _ = self.sdp_attn(q, k, v,
pad_mask=pad_mask)
#output: (batch*nh) * length_q * d_k
#attention: (batch*nh) * length_q * length_k
output = output.view(self.num_head, -1, length_q, self.d_k) # nh * batch * l_q * d_k
output = output.permute(1, 2, 0, 3).contiguous().view(-1, length_q,
self.num_head * self.d_k) # batch * l_q * (nh * d_k)
output = self.norm(self.dropout(self.wo(output)) + X) #batch * l_q * d_x
attention = attention.view(self.num_head, -1, length_q, length_k).permute(1, 0, 2, 3) #batch * nh * l_q * l_k
return output, attention
class LexiconMultiHeadAttention(nn.Module):
def __init__(self, num_head, d_x, d_k, d_kl, dropout=.1):
super(LexiconMultiHeadAttention, self).__init__()
self.num_head = num_head
self.d_k = d_k
self.d_kl = d_kl
self.wq = nn.Linear(d_x, num_head * d_k)
self.wk = nn.Linear(d_x, num_head * d_k)
self.wv = nn.Linear(d_x, num_head * d_k)
self.wkl = nn.Linear(d_x, num_head * d_kl)
self.wvl = nn.Linear(d_x, num_head * d_kl)
#initialization problems?
nn.init.xavier_normal_(self.wq.weight)
nn.init.xavier_normal_(self.wk.weight)
nn.init.xavier_normal_(self.wv.weight)
nn.init.xavier_normal_(self.wkl.weight)
nn.init.xavier_normal_(self.wvl.weight)
self.sdp_attn_context = ScaledDotProductAttention(d_k=d_k, num_head=num_head, dropout=dropout)
self.sdp_attn_lex = ScaledDotProductAttention(d_k=d_kl, num_head=num_head, dropout=dropout)
self.dropout = nn.Dropout(dropout)
self.norm = nn.LayerNorm(d_x)
self.wo = nn.Linear(num_head * d_k, d_x)
nn.init.xavier_normal_(self.wo.weight)
def forward(self, q, k, v, kl, vl,
pad_mask=None, pad_mask_l=None,
context_mask=None, alpha=0.5):
X = q #batch * length_q * d_x
length_q = q.shape[1]
assert v.shape[1] == k.shape[1]
length_k = k.shape[1]
assert vl.shape[1] == kl.shape[1]
length_kl = kl.shape[1]
q = self.wq(q).view(-1, length_q, self.num_head, self.d_k) #batch * length * num_head * d_k
k = self.wk(k).view(-1, length_k, self.num_head, self.d_k)
v = self.wv(v).view(-1, length_k, self.num_head, self.d_k)
kl = self.wkl(kl).view(-1, length_kl, self.num_head, self.d_kl)
vl = self.wvl(vl).view(-1, length_kl, self.num_head, self.d_kl)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, length_q, self.d_k) # (batch * num_head) * length * d_k
k = k.permute(2, 0, 1, 3).contiguous().view(-1, length_k, self.d_k)
v = v.permute(2, 0, 1, 3).contiguous().view(-1, length_k, self.d_k)
# value residual
residual = 0
# if self.num_head > 1:
#
# _v = v.contiguous().view(self.num_head, -1, length_k, self.d_k)
# _sim = torch.nn.CosineSimilarity(dim=2)
# for m, left in enumerate(_v):
# for n, right in enumerate(_v):
# if not m == n:
# residual += (torch.sum(torch.abs(_sim(left, right)))) / left.shape[0]
# residual /= 2
# residual = residual/self.num_head/self.num_head
kl = kl.permute(2, 0, 1, 3).contiguous().view(-1, length_kl, self.d_kl)
vl = vl.permute(2, 0, 1, 3).contiguous().view(-1, length_kl, self.d_kl)
if pad_mask is not None:
pad_mask = pad_mask.repeat(self.num_head, 1, 1) # batch * length_q * length_k -> (batch * num_head) * l_q * l_k
if pad_mask_l is not None:
pad_mask_l = pad_mask_l.repeat(self.num_head, 1, 1)
if context_mask is not None:
context_mask = context_mask.repeat(self.num_head, 1, 1)
output_context, attention_context, a_res_context = self.sdp_attn_context(q, k, v,
pad_mask=pad_mask)
output_lexicon, attention_lexicon, a_res_lexicon = self.sdp_attn_lex(q, kl, vl,
pad_mask=pad_mask_l,
context_mask=context_mask)
output = alpha * output_context + (1 - alpha) * output_lexicon
residual += a_res_context
#output: (batch*nh) * length_q * d_k
#attention: (batch*nh) * length_q * length_k
output = output.view(self.num_head, -1, length_q, self.d_k) #nh * batch * l_q * d_k
output = output.permute(1, 2, 0, 3).contiguous().view(-1, length_q, self.num_head * self.d_k) #batch * l_q * (nh * d_k)
output = self.norm(self.dropout(self.wo(output)) + X) #batch * l_q * d_x
attention_context = attention_context.view(self.num_head, -1, length_q, length_k).permute(1, 0, 2, 3)#batch * nh * l_q * l_k
attention_lexicon = attention_lexicon.view(self.num_head, -1, length_q, length_kl).permute(1, 0, 2, 3)#batch * nh * l_q * l_k
return output, attention_context, attention_lexicon, residual
class PointwiseFF(nn.Module):
def __init__(self, d_x, d_ff, dropout=.0):
super(PointwiseFF, self).__init__()
self.w1 = nn.Conv1d(d_x, d_ff, 1)
self.w2 = nn.Conv1d(d_ff, d_x, 1)
nn.init.xavier_normal_(self.w1.weight)
nn.init.xavier_normal_(self.w2.weight)
self.dropout = nn.Dropout(dropout)
self.norm = nn.LayerNorm(d_x)
def forward(self, X):
output = self.w2(F.relu(self.w1(X.transpose(1, 2)))) #dim of x: (batch, seq_length, d_x)
output = self.dropout(output.transpose(1, 2))
output = self.norm(output + X) #batch * seq_length * d_x
return output
| [
"torch.bmm",
"torch.nn.Dropout",
"numpy.sqrt",
"torch.abs",
"torch.nn.Softmax",
"numpy.power",
"torch.nn.LayerNorm",
"torch.from_numpy",
"torch.nn.init.xavier_normal_",
"torch.cat",
"numpy.zeros",
"numpy.array",
"numpy.cos",
"torch.nn.Linear",
"numpy.sin",
"torch.nn.Conv1d",
"torch.n... | [((367, 440), 'torch.nn.Embedding', 'nn.Embedding', ([], {'num_embeddings': 'length', 'embedding_dim': 'emb_dim', 'padding_idx': '(0)'}), '(num_embeddings=length, embedding_dim=emb_dim, padding_idx=0)\n', (379, 440), False, 'from torch import nn\n'), ((792, 811), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (802, 811), False, 'from torch import nn\n'), ((1348, 1401), 'torch.nn.Embedding.from_pretrained', 'nn.Embedding.from_pretrained', (['pos_weight'], {'freeze': '(True)'}), '(pos_weight, freeze=True)\n', (1376, 1401), False, 'from torch import nn\n'), ((2917, 2947), 'numpy.sin', 'np.sin', (['position_enc[1:, 0::2]'], {}), '(position_enc[1:, 0::2])\n', (2923, 2947), True, 'import numpy as np\n'), ((3018, 3048), 'numpy.cos', 'np.cos', (['position_enc[1:, 1::2]'], {}), '(position_enc[1:, 1::2])\n', (3024, 3048), True, 'import numpy as np\n'), ((3352, 3364), 'numpy.sqrt', 'np.sqrt', (['d_k'], {}), '(d_k)\n', (3359, 3364), True, 'import numpy as np\n'), ((3421, 3440), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (3431, 3440), False, 'from torch import nn\n'), ((3464, 3481), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(2)'}), '(dim=2)\n', (3474, 3481), False, 'from torch import nn\n'), ((4764, 4787), 'torch.bmm', 'torch.bmm', (['attention', 'v'], {}), '(attention, v)\n', (4773, 4787), False, 'import torch\n'), ((5074, 5104), 'torch.nn.Linear', 'nn.Linear', (['d_x', '(num_head * d_k)'], {}), '(d_x, num_head * d_k)\n', (5083, 5104), False, 'from torch import nn\n'), ((5123, 5153), 'torch.nn.Linear', 'nn.Linear', (['d_x', '(num_head * d_k)'], {}), '(d_x, num_head * d_k)\n', (5132, 5153), False, 'from torch import nn\n'), ((5172, 5202), 'torch.nn.Linear', 'nn.Linear', (['d_x', '(num_head * d_k)'], {}), '(d_x, num_head * d_k)\n', (5181, 5202), False, 'from torch import nn\n'), ((5222, 5260), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.wq.weight'], {}), '(self.wq.weight)\n', (5244, 5260), False, 'from torch import nn\n'), ((5269, 5307), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.wk.weight'], {}), '(self.wk.weight)\n', (5291, 5307), False, 'from torch import nn\n'), ((5316, 5354), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.wv.weight'], {}), '(self.wv.weight)\n', (5338, 5354), False, 'from torch import nn\n'), ((5483, 5502), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (5493, 5502), False, 'from torch import nn\n'), ((5523, 5540), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_x'], {}), '(d_x)\n', (5535, 5540), False, 'from torch import nn\n'), ((5568, 5598), 'torch.nn.Linear', 'nn.Linear', (['(num_head * d_k)', 'd_x'], {}), '(num_head * d_k, d_x)\n', (5577, 5598), False, 'from torch import nn\n'), ((5607, 5645), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.wo.weight'], {}), '(self.wo.weight)\n', (5629, 5645), False, 'from torch import nn\n'), ((7575, 7605), 'torch.nn.Linear', 'nn.Linear', (['d_x', '(num_head * d_k)'], {}), '(d_x, num_head * d_k)\n', (7584, 7605), False, 'from torch import nn\n'), ((7624, 7654), 'torch.nn.Linear', 'nn.Linear', (['d_x', '(num_head * d_k)'], {}), '(d_x, num_head * d_k)\n', (7633, 7654), False, 'from torch import nn\n'), ((7673, 7703), 'torch.nn.Linear', 'nn.Linear', (['d_x', '(num_head * d_k)'], {}), '(d_x, num_head * d_k)\n', (7682, 7703), False, 'from torch import nn\n'), ((7726, 7757), 'torch.nn.Linear', 'nn.Linear', (['d_x', '(num_head * d_kl)'], {}), '(d_x, num_head * d_kl)\n', (7735, 7757), False, 'from torch import nn\n'), ((7777, 7808), 'torch.nn.Linear', 'nn.Linear', (['d_x', '(num_head * d_kl)'], {}), '(d_x, num_head * d_kl)\n', (7786, 7808), False, 'from torch import nn\n'), ((7862, 7900), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.wq.weight'], {}), '(self.wq.weight)\n', (7884, 7900), False, 'from torch import nn\n'), ((7909, 7947), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.wk.weight'], {}), '(self.wk.weight)\n', (7931, 7947), False, 'from torch import nn\n'), ((7956, 7994), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.wv.weight'], {}), '(self.wv.weight)\n', (7978, 7994), False, 'from torch import nn\n'), ((8003, 8042), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.wkl.weight'], {}), '(self.wkl.weight)\n', (8025, 8042), False, 'from torch import nn\n'), ((8051, 8090), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.wvl.weight'], {}), '(self.wvl.weight)\n', (8073, 8090), False, 'from torch import nn\n'), ((8335, 8354), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (8345, 8354), False, 'from torch import nn\n'), ((8375, 8392), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_x'], {}), '(d_x)\n', (8387, 8392), False, 'from torch import nn\n'), ((8420, 8450), 'torch.nn.Linear', 'nn.Linear', (['(num_head * d_k)', 'd_x'], {}), '(num_head * d_k, d_x)\n', (8429, 8450), False, 'from torch import nn\n'), ((8459, 8497), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.wo.weight'], {}), '(self.wo.weight)\n', (8481, 8497), False, 'from torch import nn\n'), ((12184, 12207), 'torch.nn.Conv1d', 'nn.Conv1d', (['d_x', 'd_ff', '(1)'], {}), '(d_x, d_ff, 1)\n', (12193, 12207), False, 'from torch import nn\n'), ((12226, 12249), 'torch.nn.Conv1d', 'nn.Conv1d', (['d_ff', 'd_x', '(1)'], {}), '(d_ff, d_x, 1)\n', (12235, 12249), False, 'from torch import nn\n'), ((12259, 12297), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.w1.weight'], {}), '(self.w1.weight)\n', (12281, 12297), False, 'from torch import nn\n'), ((12306, 12344), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.w2.weight'], {}), '(self.w2.weight)\n', (12328, 12344), False, 'from torch import nn\n'), ((12369, 12388), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (12379, 12388), False, 'from torch import nn\n'), ((12409, 12426), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_x'], {}), '(d_x)\n', (12421, 12426), False, 'from torch import nn\n'), ((665, 693), 'torch.from_numpy', 'torch.from_numpy', (['embeddings'], {}), '(embeddings)\n', (681, 693), False, 'import torch\n'), ((2187, 2203), 'torch.cat', 'torch.cat', (['batch'], {}), '(batch)\n', (2196, 2203), False, 'import torch\n'), ((3101, 3131), 'torch.from_numpy', 'torch.from_numpy', (['position_enc'], {}), '(position_enc)\n', (3117, 3131), False, 'import torch\n'), ((2825, 2842), 'numpy.zeros', 'np.zeros', (['emb_dim'], {}), '(emb_dim)\n', (2833, 2842), True, 'import numpy as np\n'), ((2127, 2145), 'numpy.array', 'np.array', (['word_pos'], {}), '(word_pos)\n', (2135, 2145), True, 'import numpy as np\n'), ((2731, 2770), 'numpy.power', 'np.power', (['(10000)', '(2 * (j // 2) / emb_dim)'], {}), '(10000, 2 * (j // 2) / emb_dim)\n', (2739, 2770), True, 'import numpy as np\n'), ((4646, 4669), 'torch.abs', 'torch.abs', (['(left * right)'], {}), '(left * right)\n', (4655, 4669), False, 'import torch\n')] |
# -*- coding:utf-8 -*-
from flask import Blueprint
from flask import current_app
from flask_sqlalchemy import get_debug_queries
from model.permission import Permission
blog = Blueprint('blog', __name__)
@blog.app_context_processor
def inject_permissions():
return dict(Permission=Permission)
@blog.after_app_request
def after_request(response):
for query in get_debug_queries():
if query.duration >= current_app.config['MISSOURI_SLOW_DB_QUERY_TIME']:
current_app.logger.warning(
'Slow query: %s\nParameters: %s\nDuration: %fs\nContext: %s\n'
% (query.statement, query.nParameters, query.duration,
query.context)
)
return response
from . import views
| [
"flask.current_app.logger.warning",
"flask.Blueprint",
"flask_sqlalchemy.get_debug_queries"
] | [((176, 203), 'flask.Blueprint', 'Blueprint', (['"""blog"""', '__name__'], {}), "('blog', __name__)\n", (185, 203), False, 'from flask import Blueprint\n'), ((371, 390), 'flask_sqlalchemy.get_debug_queries', 'get_debug_queries', ([], {}), '()\n', (388, 390), False, 'from flask_sqlalchemy import get_debug_queries\n'), ((484, 654), 'flask.current_app.logger.warning', 'current_app.logger.warning', (['("""Slow query: %s\nParameters: %s\nDuration: %fs\nContext: %s\n""" % (query.\n statement, query.nParameters, query.duration, query.context))'], {}), '(\n """Slow query: %s\nParameters: %s\nDuration: %fs\nContext: %s\n""" % (query\n .statement, query.nParameters, query.duration, query.context))\n', (510, 654), False, 'from flask import current_app\n')] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_config import cfg
from oslo_config import fixture as cfg_fixture
from designate import exceptions
from designate import objects
from designate import scheduler
from designate import tests
DEFAULT_POOL_ID = BRONZE_POOL_ID = '67d71c2a-645c-4dde-a6b8-60a172c9ede8'
SILVER_POOL_ID = '5fabcd37-262c-4cf3-8625-7f419434b6df'
GOLD_POOL_ID = '24702e43-8a52-440f-ab74-19fc16048860'
def build_test_pools():
pools = objects.PoolList.from_list(
[
{'id': DEFAULT_POOL_ID},
{'id': SILVER_POOL_ID},
{'id': GOLD_POOL_ID},
]
)
# Pool 0 is also the default pool.
pool_0_attributes = objects.PoolAttributeList.from_list([
{
'key': 'service_tier',
'value': 'bronze'
},
])
pool_1_attributes = objects.PoolAttributeList.from_list([
{
'key': 'service_tier',
'value': 'silver'
},
])
pool_2_attributes = objects.PoolAttributeList.from_list([
{
'key': 'service_tier',
'value': 'gold'
},
])
pools[0].attributes = pool_0_attributes
pools[1].attributes = pool_1_attributes
pools[2].attributes = pool_2_attributes
return pools
class AttributeSchedulerPermutationsTest(tests.TestCase):
def setUp(self):
super(AttributeSchedulerPermutationsTest, self).setUp()
self.CONF = self.useFixture(cfg_fixture.Config(cfg.CONF)).conf
self.context = self.get_context()
self.CONF.set_override(
'scheduler_filters', ['attribute'], 'service:central'
)
self.CONF.set_override(
'default_pool_id', DEFAULT_POOL_ID, 'service:central'
)
attrs = {
'find_pools.return_value': build_test_pools()
}
mock_storage = mock.Mock(**attrs)
self.scheduler = scheduler.get_scheduler(storage=mock_storage)
def test_get_gold_tier(self):
zone = objects.Zone(
name='example.com.',
type='PRIMARY',
email='<EMAIL>',
attributes=objects.ZoneAttributeList.from_list(
[
{
'key': 'service_tier',
'value': 'gold'
},
]
)
)
result = self.scheduler.schedule_zone(self.context, zone)
self.assertEqual(GOLD_POOL_ID, result)
def test_get_silver_tier(self):
zone = objects.Zone(
name='example.com.',
type='PRIMARY',
email='<EMAIL>',
attributes=objects.ZoneAttributeList.from_list(
[
{
'key': 'service_tier',
'value': 'silver'
},
]
)
)
result = self.scheduler.schedule_zone(self.context, zone)
self.assertEqual(SILVER_POOL_ID, result)
def test_get_bronze_tier(self):
zone = objects.Zone(
name='example.com.',
type='PRIMARY',
email='<EMAIL>',
attributes=objects.ZoneAttributeList.from_list(
[
{
'key': 'service_tier',
'value': 'bronze'
},
]
)
)
result = self.scheduler.schedule_zone(self.context, zone)
self.assertEqual(BRONZE_POOL_ID, result)
def test_tier_not_found_raises_exception(self):
zone = objects.Zone(
name='example.com.',
type='PRIMARY',
email='<EMAIL>',
attributes=objects.ZoneAttributeList.from_list(
[
{
'key': 'service_tier',
'value': 'blue'
},
]
)
)
self.assertRaises(
exceptions.NoValidPoolFound,
self.scheduler.schedule_zone, self.context, zone
)
def test_no_tier_raises_exception(self):
zone = objects.Zone(
name='example.com.',
type='PRIMARY',
email='<EMAIL>',
attributes=objects.ZoneAttributeList.from_list(
[]
)
)
# When no attribute is requested it will return all available pools.
# NOTE(eandersson): This is probably not intended behavior.
# We probably want this to return NoValidPoolFound,
# so that we can use a fallback filter with the
# attribute filter.
self.assertRaises(
exceptions.MultiplePoolsFound,
self.scheduler.schedule_zone, self.context, zone
)
class DefaultSchedulerPermutationsTest(tests.TestCase):
def setUp(self):
super(DefaultSchedulerPermutationsTest, self).setUp()
self.CONF = self.useFixture(cfg_fixture.Config(cfg.CONF)).conf
self.context = self.get_context()
self.CONF.set_override(
'scheduler_filters', ['default_pool'], 'service:central'
)
self.CONF.set_override(
'default_pool_id', DEFAULT_POOL_ID, 'service:central'
)
attrs = {
'find_pools.return_value': build_test_pools()
}
mock_storage = mock.Mock(**attrs)
self.scheduler = scheduler.get_scheduler(storage=mock_storage)
def test_get_default_pool(self):
zone = objects.Zone(
name='example.com.',
type='PRIMARY',
email='<EMAIL>',
)
result = self.scheduler.schedule_zone(self.context, zone)
self.assertEqual(DEFAULT_POOL_ID, result)
class FallbackSchedulerPermutationsTest(tests.TestCase):
def setUp(self):
super(FallbackSchedulerPermutationsTest, self).setUp()
self.CONF = self.useFixture(cfg_fixture.Config(cfg.CONF)).conf
self.context = self.get_context()
self.CONF.set_override(
'scheduler_filters', ['attribute', 'fallback'], 'service:central'
)
self.CONF.set_override(
'default_pool_id', DEFAULT_POOL_ID, 'service:central'
)
attrs = {
'find_pools.return_value': build_test_pools()
}
mock_storage = mock.Mock(**attrs)
self.scheduler = scheduler.get_scheduler(storage=mock_storage)
def test_tier_not_found_return_default(self):
zone = objects.Zone(
name='example.com.',
type='PRIMARY',
email='<EMAIL>',
attributes=objects.ZoneAttributeList.from_list(
[
{
'key': 'service_tier',
'value': 'that does not exist'
},
]
)
)
result = self.scheduler.schedule_zone(self.context, zone)
self.assertEqual(DEFAULT_POOL_ID, result)
| [
"designate.objects.PoolAttributeList.from_list",
"designate.objects.ZoneAttributeList.from_list",
"unittest.mock.Mock",
"oslo_config.fixture.Config",
"designate.objects.PoolList.from_list",
"designate.objects.Zone",
"designate.scheduler.get_scheduler"
] | [((994, 1097), 'designate.objects.PoolList.from_list', 'objects.PoolList.from_list', (["[{'id': DEFAULT_POOL_ID}, {'id': SILVER_POOL_ID}, {'id': GOLD_POOL_ID}]"], {}), "([{'id': DEFAULT_POOL_ID}, {'id': SILVER_POOL_ID},\n {'id': GOLD_POOL_ID}])\n", (1020, 1097), False, 'from designate import objects\n'), ((1220, 1305), 'designate.objects.PoolAttributeList.from_list', 'objects.PoolAttributeList.from_list', (["[{'key': 'service_tier', 'value': 'bronze'}]"], {}), "([{'key': 'service_tier', 'value':\n 'bronze'}])\n", (1255, 1305), False, 'from designate import objects\n'), ((1375, 1460), 'designate.objects.PoolAttributeList.from_list', 'objects.PoolAttributeList.from_list', (["[{'key': 'service_tier', 'value': 'silver'}]"], {}), "([{'key': 'service_tier', 'value':\n 'silver'}])\n", (1410, 1460), False, 'from designate import objects\n'), ((1530, 1609), 'designate.objects.PoolAttributeList.from_list', 'objects.PoolAttributeList.from_list', (["[{'key': 'service_tier', 'value': 'gold'}]"], {}), "([{'key': 'service_tier', 'value': 'gold'}])\n", (1565, 1609), False, 'from designate import objects\n'), ((2395, 2413), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '(**attrs)\n', (2404, 2413), False, 'from unittest import mock\n'), ((2440, 2485), 'designate.scheduler.get_scheduler', 'scheduler.get_scheduler', ([], {'storage': 'mock_storage'}), '(storage=mock_storage)\n', (2463, 2485), False, 'from designate import scheduler\n'), ((5962, 5980), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '(**attrs)\n', (5971, 5980), False, 'from unittest import mock\n'), ((6007, 6052), 'designate.scheduler.get_scheduler', 'scheduler.get_scheduler', ([], {'storage': 'mock_storage'}), '(storage=mock_storage)\n', (6030, 6052), False, 'from designate import scheduler\n'), ((6106, 6172), 'designate.objects.Zone', 'objects.Zone', ([], {'name': '"""example.com."""', 'type': '"""PRIMARY"""', 'email': '"""<EMAIL>"""'}), "(name='example.com.', type='PRIMARY', email='<EMAIL>')\n", (6118, 6172), False, 'from designate import objects\n'), ((6933, 6951), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '(**attrs)\n', (6942, 6951), False, 'from unittest import mock\n'), ((6978, 7023), 'designate.scheduler.get_scheduler', 'scheduler.get_scheduler', ([], {'storage': 'mock_storage'}), '(storage=mock_storage)\n', (7001, 7023), False, 'from designate import scheduler\n'), ((1991, 2019), 'oslo_config.fixture.Config', 'cfg_fixture.Config', (['cfg.CONF'], {}), '(cfg.CONF)\n', (2009, 2019), True, 'from oslo_config import fixture as cfg_fixture\n'), ((2663, 2742), 'designate.objects.ZoneAttributeList.from_list', 'objects.ZoneAttributeList.from_list', (["[{'key': 'service_tier', 'value': 'gold'}]"], {}), "([{'key': 'service_tier', 'value': 'gold'}])\n", (2698, 2742), False, 'from designate import objects\n'), ((3186, 3271), 'designate.objects.ZoneAttributeList.from_list', 'objects.ZoneAttributeList.from_list', (["[{'key': 'service_tier', 'value': 'silver'}]"], {}), "([{'key': 'service_tier', 'value':\n 'silver'}])\n", (3221, 3271), False, 'from designate import objects\n'), ((3713, 3798), 'designate.objects.ZoneAttributeList.from_list', 'objects.ZoneAttributeList.from_list', (["[{'key': 'service_tier', 'value': 'bronze'}]"], {}), "([{'key': 'service_tier', 'value':\n 'bronze'}])\n", (3748, 3798), False, 'from designate import objects\n'), ((4256, 4335), 'designate.objects.ZoneAttributeList.from_list', 'objects.ZoneAttributeList.from_list', (["[{'key': 'service_tier', 'value': 'blue'}]"], {}), "([{'key': 'service_tier', 'value': 'blue'}])\n", (4291, 4335), False, 'from designate import objects\n'), ((4813, 4852), 'designate.objects.ZoneAttributeList.from_list', 'objects.ZoneAttributeList.from_list', (['[]'], {}), '([])\n', (4848, 4852), False, 'from designate import objects\n'), ((5555, 5583), 'oslo_config.fixture.Config', 'cfg_fixture.Config', (['cfg.CONF'], {}), '(cfg.CONF)\n', (5573, 5583), True, 'from oslo_config import fixture as cfg_fixture\n'), ((6517, 6545), 'oslo_config.fixture.Config', 'cfg_fixture.Config', (['cfg.CONF'], {}), '(cfg.CONF)\n', (6535, 6545), True, 'from oslo_config import fixture as cfg_fixture\n'), ((7217, 7315), 'designate.objects.ZoneAttributeList.from_list', 'objects.ZoneAttributeList.from_list', (["[{'key': 'service_tier', 'value': 'that does not exist'}]"], {}), "([{'key': 'service_tier', 'value':\n 'that does not exist'}])\n", (7252, 7315), False, 'from designate import objects\n')] |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import textwrap
from apache.aurora.client.cli import EXIT_API_ERROR, EXIT_OK, Noun, Verb
from apache.aurora.client.cli.context import AuroraCommandContext
from apache.aurora.client.cli.options import (
ALL_INSTANCES,
BIND_OPTION,
BROWSER_OPTION,
CONFIG_ARGUMENT,
HEALTHCHECK_OPTION,
INSTANCES_SPEC_ARGUMENT,
JOBSPEC_ARGUMENT,
JSON_READ_OPTION,
STRICT_OPTION
)
class StartUpdate(Verb):
@property
def name(self):
return 'start'
def get_options(self):
return [
BIND_OPTION, BROWSER_OPTION, JSON_READ_OPTION, HEALTHCHECK_OPTION, STRICT_OPTION,
INSTANCES_SPEC_ARGUMENT, CONFIG_ARGUMENT
]
def help(self):
return textwrap.dedent("""\
Start a scheduler-driven rolling upgrade on a running job, using the update
configuration within the config file as a control for update velocity and failure
tolerance.
The updater only takes action on instances in a job that have changed, meaning
that changing a single instance will only induce a restart on the changed task instance.
You may want to consider using the 'aurora job diff' subcommand before updating,
to preview what changes will take effect.
""")
# TODO(mchucarroll): consider adding an "aurora update preview"?
def execute(self, context):
job = context.options.instance_spec.jobkey
instances = (None if context.options.instance_spec.instance == ALL_INSTANCES else
context.options.instance_spec.instance)
if instances is not None and context.options.strict:
context.verify_shards_option_validity(job, instances)
config = context.get_job_config(job, context.options.config_file)
api = context.get_api(config.cluster())
resp = api.start_job_update(config, instances)
context.check_and_log_response(resp, err_code=EXIT_API_ERROR,
err_msg="Failed to start scheduler-driven update; see log for details.")
context.print_out("Scheduler-driven update of job %s has started." % job)
return EXIT_OK
class PauseUpdate(Verb):
@property
def name(self):
return 'pause'
def get_options(self):
return [
JOBSPEC_ARGUMENT
]
def help(self):
return """Pause a scheduler-driven rolling update."""
def execute(self, context):
jobkey = context.options.jobspec
api = context.get_api(jobkey.cluster)
resp = api.pause_job_update(jobkey)
context.check_and_log_response(resp, err_code=EXIT_API_ERROR,
err_msg="Failed to pause scheduler-driven update; see log for details")
context.print_out("Scheduler-driven update of job %s has been paused." % jobkey)
return EXIT_OK
class ResumeUpdate(Verb):
@property
def name(self):
return 'resume'
def get_options(self):
return [
JOBSPEC_ARGUMENT
]
def help(self):
return """Resume a paused scheduler-driven rolling update."""
def execute(self, context):
jobkey = context.options.jobspec
api = context.get_api(jobkey.cluster)
resp = api.resume_job_update(jobkey)
context.check_and_log_response(resp, err_code=EXIT_API_ERROR,
err_msg="Failed to resume scheduler-driven update; see log for details")
context.print_out("Scheduler-driven update of job %s has been resumed." % jobkey)
return EXIT_OK
class AbortUpdate(Verb):
@property
def name(self):
return 'abort'
def get_options(self):
return [
JOBSPEC_ARGUMENT
]
def help(self):
return """Abort an in-pregress scheduler-driven rolling update."""
def execute(self, context):
jobkey = context.options.jobspec
api = context.get_api(jobkey.cluster)
resp = api.abort_job_update(jobkey)
context.check_and_log_response(resp, err_code=EXIT_API_ERROR,
err_msg="Failed to abort scheduler-driven update; see log for details")
context.print_out("Scheduler-driven update of job %s has been aborted." % jobkey)
return EXIT_OK
class Update(Noun):
@property
def name(self):
return "update"
@property
def help(self):
return "Interact with the aurora update service."
@classmethod
def create_context(cls):
return AuroraCommandContext()
def __init__(self):
super(Update, self).__init__()
self.register_verb(StartUpdate())
self.register_verb(PauseUpdate())
self.register_verb(ResumeUpdate())
self.register_verb(AbortUpdate())
| [
"textwrap.dedent",
"apache.aurora.client.cli.context.AuroraCommandContext"
] | [((1271, 1830), 'textwrap.dedent', 'textwrap.dedent', (['""" Start a scheduler-driven rolling upgrade on a running job, using the update\n configuration within the config file as a control for update velocity and failure\n tolerance.\n\n The updater only takes action on instances in a job that have changed, meaning\n that changing a single instance will only induce a restart on the changed task instance.\n\n You may want to consider using the \'aurora job diff\' subcommand before updating,\n to preview what changes will take effect.\n """'], {}), '(\n """ Start a scheduler-driven rolling upgrade on a running job, using the update\n configuration within the config file as a control for update velocity and failure\n tolerance.\n\n The updater only takes action on instances in a job that have changed, meaning\n that changing a single instance will only induce a restart on the changed task instance.\n\n You may want to consider using the \'aurora job diff\' subcommand before updating,\n to preview what changes will take effect.\n """\n )\n', (1286, 1830), False, 'import textwrap\n'), ((4723, 4745), 'apache.aurora.client.cli.context.AuroraCommandContext', 'AuroraCommandContext', ([], {}), '()\n', (4743, 4745), False, 'from apache.aurora.client.cli.context import AuroraCommandContext\n')] |
# -*- coding: utf-8 -*-
import urwid
from blinker import signal
from plait.app.base import PlaitApp
from plait.frame import ConsoleFrame
from plait.tabs import VerticalTabs
class WorkerLog(urwid.ListBox):
def __init__(self):
walker = urwid.SimpleListWalker([])
urwid.ListBox.__init__(self, walker)
def write(self, text):
if text != "\n" and isinstance(text, basestring):
text = text.replace("\n", "")
new_text = urwid.Text(text)
self.body.append(new_text)
try:
self.body.set_focus(self.body.focus + 1)
except: pass
def add(self, content):
self.body.append(urwid.Text(content))
try:
self.body.set_focus(self.body.focus + 1)
except: pass
class ConsoleApp(PlaitApp):
default_palette = (
('reversed', 'standout', ''),
)
def __init__(self, title="plait"):
self.tabs = VerticalTabs()
self.root = ConsoleFrame(title)
self.screen = urwid.raw_display.Screen(input=open('/dev/tty', 'r'))
self.loop = urwid.MainLoop(
self.root, self.default_palette,
screen=self.screen,
handle_mouse=False, unhandled_input=self.unhandled_input,
event_loop=urwid.TwistedEventLoop())
self.loop.screen.set_terminal_properties(colors=256)
self.show(self.tabs, "Remote task hosts")
self.failed_workers = []
super(ConsoleApp, self).__init__()
def run(self, runner):
runner.run()
for host in runner.hosts:
self.tabs.addTab(host, WorkerLog())
self.loop.run()
def stop(self):
raise urwid.ExitMainLoop
def unhandled_input(self, key):
if key.lower() == 'q':
self.stop()
self.loop.draw_screen()
return key
def show(self, w, header_text=""):
self.root.show(w, header_text=header_text)
def on_worker_stdout(self, worker, data=None):
tab = self.tabs.tabs[worker.label]
for line in data.split("\n"):
tab.content.write(line)
self.loop.draw_screen()
def on_worker_stderr(self, worker, data=None):
tab = self.tabs.tabs[worker.label]
tab.content.write(data)
self.loop.draw_screen()
def on_worker_connect(self, worker):
tab = self.tabs.tabs[worker.label]
tab.set_cyan()
self.loop.draw_screen()
def on_worker_finish(self, worker):
tab = self.tabs.tabs[worker.label]
if worker not in self.failed_workers:
tab.set_green()
self.loop.draw_screen()
def on_worker_failure(self, worker, failure=None):
if worker.label not in self.tabs.tabs:
worker.label = "localhost"
tab = self.tabs.tabs[worker.label]
tab.content.write(repr(failure))
tab.set_red()
self.failed_workers.append(worker)
self.loop.draw_screen()
def on_task_start(self, worker, task=None):
tab = self.tabs.tabs[worker.label]
task_template = u"↪ {task.tag}".format(task=task).encode('utf8')
task_header = ('reversed', task_template)
tab.content.write(task_header)
self.loop.draw_screen()
def on_task_failure(self, worker, task=None, failure=None):
tab = self.tabs.tabs[worker.label]
tab.content.write(str(failure))
tab.set_orange()
self.failed_workers.append(worker)
self.loop.draw_screen()
def on_task_finish(self, worker, task=None, result=None):
if result:
tab = self.tabs.tabs[worker.label]
tab.content.write(str(result))
self.loop.draw_screen()
| [
"plait.frame.ConsoleFrame",
"urwid.ListBox.__init__",
"urwid.TwistedEventLoop",
"urwid.SimpleListWalker",
"plait.tabs.VerticalTabs",
"urwid.Text"
] | [((250, 276), 'urwid.SimpleListWalker', 'urwid.SimpleListWalker', (['[]'], {}), '([])\n', (272, 276), False, 'import urwid\n'), ((285, 321), 'urwid.ListBox.__init__', 'urwid.ListBox.__init__', (['self', 'walker'], {}), '(self, walker)\n', (307, 321), False, 'import urwid\n'), ((469, 485), 'urwid.Text', 'urwid.Text', (['text'], {}), '(text)\n', (479, 485), False, 'import urwid\n'), ((928, 942), 'plait.tabs.VerticalTabs', 'VerticalTabs', ([], {}), '()\n', (940, 942), False, 'from plait.tabs import VerticalTabs\n'), ((963, 982), 'plait.frame.ConsoleFrame', 'ConsoleFrame', (['title'], {}), '(title)\n', (975, 982), False, 'from plait.frame import ConsoleFrame\n'), ((662, 681), 'urwid.Text', 'urwid.Text', (['content'], {}), '(content)\n', (672, 681), False, 'import urwid\n'), ((1265, 1289), 'urwid.TwistedEventLoop', 'urwid.TwistedEventLoop', ([], {}), '()\n', (1287, 1289), False, 'import urwid\n')] |
# ------------------------------------------------------------------
#
# Copyright (C) 2015 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License published by the Free Software Foundation.
#
# ------------------------------------------------------------------
from __future__ import print_function # needed in py2 for print('...', file=sys.stderr)
import cgitb
import os
import sys
import tempfile
import traceback
from pyronia.common import error
#
# Exception handling
#
def handle_exception(*exc_info):
'''Used as exception handler in the aa-* tools.
For AppArmorException (used for profile syntax errors etc.), print only the exceptions
value because a backtrace is superfluous and would confuse users.
For other exceptions, print backtrace and save detailed information in a file in /tmp/
(including variable content etc.) to make debugging easier.
'''
(ex_cls, ex, tb) = exc_info
if ex_cls.__name__ == 'AppArmorException': # I didn't find a way to get this working with isinstance() :-/
print('', file=sys.stderr)
error(ex.value)
else:
(fd, path) = tempfile.mkstemp(prefix='apparmor-bugreport-', suffix='.txt')
file = os.fdopen(fd, 'w')
#file = open_file_write(path) # writes everything converted to utf8 - not sure if we want this...
cgitb_hook = cgitb.Hook(display=1, file=file, format='text', context=10)
cgitb_hook.handle(exc_info)
file.write('Please consider reporting a bug at https://bugs.launchpad.net/apparmor/\n')
file.write('and attach this file.\n')
print(''.join(traceback.format_exception(*exc_info)), file=sys.stderr)
print('', file=sys.stderr)
print('An unexpected error occoured!', file=sys.stderr)
print('', file=sys.stderr)
print('For details, see %s' % path, file=sys.stderr)
print('Please consider reporting a bug at https://bugs.launchpad.net/apparmor/', file=sys.stderr)
print('and attach this file.', file=sys.stderr)
def enable_aa_exception_handler():
'''Setup handle_exception() as exception handler'''
sys.excepthook = handle_exception
| [
"cgitb.Hook",
"traceback.format_exception",
"pyronia.common.error",
"os.fdopen",
"tempfile.mkstemp"
] | [((1206, 1221), 'pyronia.common.error', 'error', (['ex.value'], {}), '(ex.value)\n', (1211, 1221), False, 'from pyronia.common import error\n'), ((1253, 1314), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""apparmor-bugreport-"""', 'suffix': '""".txt"""'}), "(prefix='apparmor-bugreport-', suffix='.txt')\n", (1269, 1314), False, 'import tempfile\n'), ((1330, 1348), 'os.fdopen', 'os.fdopen', (['fd', '"""w"""'], {}), "(fd, 'w')\n", (1339, 1348), False, 'import os\n'), ((1478, 1537), 'cgitb.Hook', 'cgitb.Hook', ([], {'display': '(1)', 'file': 'file', 'format': '"""text"""', 'context': '(10)'}), "(display=1, file=file, format='text', context=10)\n", (1488, 1537), False, 'import cgitb\n'), ((1740, 1777), 'traceback.format_exception', 'traceback.format_exception', (['*exc_info'], {}), '(*exc_info)\n', (1766, 1777), False, 'import traceback\n')] |
import requests
import cool_utils
from typing import Union
cache = {}
cool_utils.JSON.open("config")
class HTTPClient:
def __init__(self):
self.username = None
self.token = None
self.BASE = cool_utils.JSON.get_data("server")
@classmethod
def create_request(
self,
type: str,
route: str,
payload: dict = None,
args: dict = None
):
if type == "GET":
if route.startswith("/") == False:
route = "/" + route
arg = ""
for key, value in args:
if arg == "":
arg = arg + key + "=" + value
else:
arg = arg + "&" + key + "=" + value
response = requests.get(HTTPClient.BASE + route + "?" + arg, json=payload)
return response
elif type == "POST":
if route.startswith("/") == False:
route = "/" + route
payload['username'] = self.username
if self.token != None:
payload['token'] = self.token
response = requests.post(HTTPClient.BASE + route, json=payload)
return response
elif type == "DELETE":
if route.startswith("/") == False:
route = "/" + route
payload['username'] = self.username
if self.token != None:
payload['token'] = self.token
response = requests.delete(HTTPClient.BASE + route, json=payload)
return response
@classmethod
def create_session(self, username):
self.username = username
token = HTTPClient.create_request("POST", "create-session").json()['token']
self.token = token
return token
@staticmethod
def delete_session():
return HTTPClient.create_request("DELETE", "delete-session") | [
"requests.post",
"cool_utils.JSON.open",
"requests.get",
"requests.delete",
"cool_utils.JSON.get_data"
] | [((71, 101), 'cool_utils.JSON.open', 'cool_utils.JSON.open', (['"""config"""'], {}), "('config')\n", (91, 101), False, 'import cool_utils\n'), ((220, 254), 'cool_utils.JSON.get_data', 'cool_utils.JSON.get_data', (['"""server"""'], {}), "('server')\n", (244, 254), False, 'import cool_utils\n'), ((775, 838), 'requests.get', 'requests.get', (["(HTTPClient.BASE + route + '?' + arg)"], {'json': 'payload'}), "(HTTPClient.BASE + route + '?' + arg, json=payload)\n", (787, 838), False, 'import requests\n'), ((1160, 1212), 'requests.post', 'requests.post', (['(HTTPClient.BASE + route)'], {'json': 'payload'}), '(HTTPClient.BASE + route, json=payload)\n', (1173, 1212), False, 'import requests\n'), ((1524, 1578), 'requests.delete', 'requests.delete', (['(HTTPClient.BASE + route)'], {'json': 'payload'}), '(HTTPClient.BASE + route, json=payload)\n', (1539, 1578), False, 'import requests\n')] |
''' Python program to create email list from master. '''
import os
import csv
pathName = os.getcwd()
print(os.path.join(pathName, '/master.csv'))
file = open(os.path.join(pathName, 'master.csv'), "r")
reader = csv.reader(file, delimiter=',')
comp_dic = {}
tool_dic = {}
unsure_dic = {}
firstline = True
for row in reader:
if firstline:
firstline = False
continue
# print(row)
# Check if email is present
if row[10] != '':
# Check that company name or first/last name is there
email = row[10]
# print(email)
if row[0] != '' or row[2] != '' or row[3] != '':
# Check if tool, compressor, or misc list
if row[1] == 'True':
if email in comp_dic:
print('EMAIL COMP MATCH: ', email)
comp_dic[email] = {}
comp_dic[email]['first'] = row[2]
comp_dic[email]['last'] = row[3]
comp_dic[email]['company'] = row[0]
comp_dic[email]['phone'] = row[9]
elif row[1] == 'False':
# print('Tool List')
if email in tool_dic:
print('EMAIL TOOL MATCH: ', email)
tool_dic[email] = {}
tool_dic[email]['first'] = row[2]
tool_dic[email]['last'] = row[3]
tool_dic[email]['company'] = row[0]
tool_dic[email]['phone'] = row[9]
else:
# print('Unsure List')
if email in unsure_dic:
print('EMAIL UNSURE MATCH: ', row[10])
unsure_dic[email] = {}
unsure_dic[email]['first'] = row[2]
unsure_dic[email]['last'] = row[3]
unsure_dic[email]['company'] = row[0]
unsure_dic[email]['phone'] = row[9]
# Add mailing-only CSV sheets
pathName = os.getcwd()
pathName += os.path.join(pathName, '/mailing-lists/CSV/mailing_only/')
file = open(os.path.join(pathName, 'WebsiteLeadData-COMPRESSORS.csv'), "r")
reader = csv.reader(file, delimiter=',')
firstline = True
for row in reader:
if firstline:
firstline = False
continue
email = row[4]
if email in comp_dic:
print('EMAIL Lead Data COMP MATCH: ', email)
print(email, comp_dic[email])
continue
comp_dic[email] = {}
hold = row[3].split()
if len(hold) > 1:
comp_dic[email]['last'] = hold[1].title()
comp_dic[email]['first'] = hold[0].title()
file = open(os.path.join(pathName, 'WebsiteLeadData-TOOLS.csv'), "r")
reader = csv.reader(file, delimiter=',')
firstline = True
for row in reader:
if firstline:
firstline = False
continue
email = row[4]
if email == '':
break
if email in tool_dic:
print('EMAIL Lead Data TOOL MATCH: ', email)
print(email, tool_dic[email])
continue
tool_dic[email] = {}
hold = row[3].split()
if len(hold) > 1:
tool_dic[email]['last'] = hold[1].title()
tool_dic[email]['first'] = hold[0].title()
# Get unsure from repeats
pathName = os.getcwd()
file = open(os.path.join(pathName, 'repeats.csv'), "r")
reader = csv.reader(file, delimiter=',')
counter = 0
firstline = True
for row in reader:
if firstline:
firstline = False
counter += 1
continue
if row[0] == 'ABREO':
break
email = row[0]
if email == '<EMAIL>':
continue
if email in unsure_dic:
print('EMAIL Repeat UNSURE MATCH: ', email)
print(email, unsure_dic[email])
continue
unsure_dic[email] = {}
unsure_dic[email]['first'] = row[1]
unsure_dic[email]['last'] = row[2]
# Write data to files
with open('email-comp.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
top = ['Company Name', 'First Name', 'Last Name', 'Email', 'Phone']
writer.writerow(top)
for key,val in comp_dic.items():
out_list = []
# print('VAL:', val)
out_list.append(val.get('company', None))
out_list.append(val.get('first', None))
out_list.append(val.get('last', None))
out_list.append(key)
out_list.append(val.get('phone', None))
writer.writerow(out_list)
print('Finished email-comp.csv')
with open('email-tool.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
top = ['Company Name', 'First Name', 'Last Name', 'Email', 'Phone']
writer.writerow(top)
for key,val in tool_dic.items():
out_list = []
# print('VAL:', val)
out_list.append(val.get('company', None))
out_list.append(val.get('first', None))
out_list.append(val.get('last', None))
out_list.append(key)
out_list.append(val.get('phone', None))
writer.writerow(out_list)
print('Finished email-tool.csv')
with open('email-unsure.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
top = ['Company Name', 'First Name', 'Last Name', 'Email', 'Phone']
writer.writerow(top)
for key,val in unsure_dic.items():
out_list = []
# print('VAL:', val)
out_list.append(val.get('company', None))
out_list.append(val.get('first', None))
out_list.append(val.get('last', None))
out_list.append(key)
out_list.append(val.get('phone', None))
writer.writerow(out_list)
print('Finished email-unsure.csv') | [
"os.path.join",
"csv.writer",
"csv.reader",
"os.getcwd"
] | [((92, 103), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (101, 103), False, 'import os\n'), ((214, 245), 'csv.reader', 'csv.reader', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',')\n", (224, 245), False, 'import csv\n'), ((1533, 1544), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1542, 1544), False, 'import os\n'), ((1557, 1615), 'os.path.join', 'os.path.join', (['pathName', '"""/mailing-lists/CSV/mailing_only/"""'], {}), "(pathName, '/mailing-lists/CSV/mailing_only/')\n", (1569, 1615), False, 'import os\n'), ((1703, 1734), 'csv.reader', 'csv.reader', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',')\n", (1713, 1734), False, 'import csv\n'), ((2182, 2213), 'csv.reader', 'csv.reader', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',')\n", (2192, 2213), False, 'import csv\n'), ((2643, 2654), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2652, 2654), False, 'import os\n'), ((2721, 2752), 'csv.reader', 'csv.reader', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',')\n", (2731, 2752), False, 'import csv\n'), ((110, 147), 'os.path.join', 'os.path.join', (['pathName', '"""/master.csv"""'], {}), "(pathName, '/master.csv')\n", (122, 147), False, 'import os\n'), ((162, 198), 'os.path.join', 'os.path.join', (['pathName', '"""master.csv"""'], {}), "(pathName, 'master.csv')\n", (174, 198), False, 'import os\n'), ((1630, 1687), 'os.path.join', 'os.path.join', (['pathName', '"""WebsiteLeadData-COMPRESSORS.csv"""'], {}), "(pathName, 'WebsiteLeadData-COMPRESSORS.csv')\n", (1642, 1687), False, 'import os\n'), ((2115, 2166), 'os.path.join', 'os.path.join', (['pathName', '"""WebsiteLeadData-TOOLS.csv"""'], {}), "(pathName, 'WebsiteLeadData-TOOLS.csv')\n", (2127, 2166), False, 'import os\n'), ((2668, 2705), 'os.path.join', 'os.path.join', (['pathName', '"""repeats.csv"""'], {}), "(pathName, 'repeats.csv')\n", (2680, 2705), False, 'import os\n'), ((3254, 3296), 'csv.writer', 'csv.writer', (['csvfile'], {'quoting': 'csv.QUOTE_ALL'}), '(csvfile, quoting=csv.QUOTE_ALL)\n', (3264, 3296), False, 'import csv\n'), ((3788, 3830), 'csv.writer', 'csv.writer', (['csvfile'], {'quoting': 'csv.QUOTE_ALL'}), '(csvfile, quoting=csv.QUOTE_ALL)\n', (3798, 3830), False, 'import csv\n'), ((4325, 4367), 'csv.writer', 'csv.writer', (['csvfile'], {'quoting': 'csv.QUOTE_ALL'}), '(csvfile, quoting=csv.QUOTE_ALL)\n', (4335, 4367), False, 'import csv\n')] |
from argparse import ArgumentParser
from os import listdir
from os.path import isfile, isdir
from zmigrate.config import load as load_config
from zmigrate.range import Range
from zmigrate.dir import Dir
from zmigrate.drivers import SUPPORTED_DRIVERS
def str_to_bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'y'):
return True
elif v.lower() in ('no', 'n'):
return False
raise Exception('Invalid value: %s' % v)
def file_validator(value):
if isfile(value):
return value
raise Exception("%s doesn't exist" % value)
def dir_validator(value):
if isdir(value):
return value
raise Exception("%s isn't a valid directory path" % value)
def main():
# Default config file name is config.json, so it needs not be specified in our case.
cfg = load_config()
parser = ArgumentParser()
parser.add_argument(
'-d',
'--direction',
default=cfg.direction,
type=str,
choices=('up', 'down')
)
parser.add_argument(
'-s',
'--seed',
default=cfg.seed,
nargs='?',
const=True,
type=str_to_bool
)
parser.add_argument(
'-S',
'--skip-missing',
default=cfg.skip_missing,
nargs='?',
const=True,
type=str_to_bool
)
parser.add_argument(
'-m',
'--migration-dir',
default=cfg.migration_dir,
type=dir_validator
)
parser.add_argument(
'--driver',
default=cfg.driver,
choices=SUPPORTED_DRIVERS.keys()
)
parser.add_argument(
'-u',
'--user',
default=cfg.user,
type=str
)
parser.add_argument(
'-p',
'--password',
default=cfg.password,
type=str
)
parser.add_argument(
'-H',
'--host',
default=cfg.host,
type=str
)
parser.add_argument(
'-D',
'--database',
default=cfg.database,
type=str
)
parser.add_argument(
'-r',
'--range',
default=Range(),
type=Range
)
args = parser.parse_args()
if args.range.first and args.range.last:
if args.direction == 'up' and args.range.last.toInt() < args.range.first.toInt():
raise Exception('Invalid range: %s > %s' % (args.range.first, args.range.last))
if args.direction == 'down' and args.range.first.toInt() < args.range.last.toInt():
raise Exception('Invalid range: %s < %s' % (args.range.first, args.range.last))
dirs = sorted([Dir(dir) for dir in listdir(args.migration_dir)], key=lambda x: x.toInt(), reverse=args.direction == 'down')
migrate(args, dirs)
def upgrade(args, dir, db):
scripts = ['up.sql']
if args.seed:
scripts.append('seed.sql')
if args.range.first and dir.toInt() < args.range.first.toInt():
return
if args.range.last and dir.toInt() > args.range.last.toInt():
return
migInfo = db.get_rows('migrations', '*', 1, revision="'%s'" % dir)
if len(migInfo) > 0:
print('%s is already migrated. Skipping' % dir)
return
print('Migrating', dir)
readmePath = '%s/%s/readme' % (args.migration_dir, dir)
if isfile(readmePath):
for line in open(readmePath).read().strip().split('\n'):
line = line.strip()
if not len(line):
continue
print('|-', line)
for script in scripts:
scriptPath = '%s/%s/%s' % (args.migration_dir, dir, script)
if not isfile(scriptPath):
if args.skip_missing:
continue
raise Exception('Missing %s' % scriptPath)
print('Executing', scriptPath)
db.execute_script(open(scriptPath).read().strip())
db.insert_row('migrations', revision="'%s'" % dir)
def downgrade(args, dir, db):
if args.range.first and dir.toInt() > args.range.first.toInt():
return
if args.range.last and dir.toInt() < args.range.last.toInt():
return
migInfo = db.get_rows('migrations', '*', 1, revision="'%s'" % dir)
if not len(migInfo):
print('%s not migrated. No downgrading needed' % dir)
return
print('Downgrading', dir)
scriptPath = '%s/%s/down.sql' % (args.migration_dir, dir)
if not isfile(scriptPath):
if not args.skip_missing:
raise Exception('Missing %s' % scriptPath)
else:
print('Executing', scriptPath)
db.execute_script(open(scriptPath).read().strip())
db.delete_row("migrations", "revision = '%s'" % dir)
def migrate(args, dirs):
db = SUPPORTED_DRIVERS[args.driver](args)
# We create the table without columns for backwawrd-compatibility purposes.
# This allows us to easily add new columns and drop existing columns without
# issues in the future.
columns = [
{
'name': 'id',
'type': 'SERIAL',
'constraints': 'PRIMARY KEY'
},
{
'name': 'revision',
'type': 'TEXT',
'constraints': 'NOT NULL UNIQUE',
}
]
db.create_table('migrations', columns)
for dir in dirs:
if args.direction == 'up':
upgrade(args, dir, db)
else:
downgrade(args, dir, db)
| [
"os.listdir",
"argparse.ArgumentParser",
"zmigrate.dir.Dir",
"os.path.isfile",
"zmigrate.config.load",
"zmigrate.range.Range",
"os.path.isdir",
"zmigrate.drivers.SUPPORTED_DRIVERS.keys"
] | [((505, 518), 'os.path.isfile', 'isfile', (['value'], {}), '(value)\n', (511, 518), False, 'from os.path import isfile, isdir\n'), ((623, 635), 'os.path.isdir', 'isdir', (['value'], {}), '(value)\n', (628, 635), False, 'from os.path import isfile, isdir\n'), ((833, 846), 'zmigrate.config.load', 'load_config', ([], {}), '()\n', (844, 846), True, 'from zmigrate.config import load as load_config\n'), ((860, 876), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (874, 876), False, 'from argparse import ArgumentParser\n'), ((3279, 3297), 'os.path.isfile', 'isfile', (['readmePath'], {}), '(readmePath)\n', (3285, 3297), False, 'from os.path import isfile, isdir\n'), ((4352, 4370), 'os.path.isfile', 'isfile', (['scriptPath'], {}), '(scriptPath)\n', (4358, 4370), False, 'from os.path import isfile, isdir\n'), ((1570, 1594), 'zmigrate.drivers.SUPPORTED_DRIVERS.keys', 'SUPPORTED_DRIVERS.keys', ([], {}), '()\n', (1592, 1594), False, 'from zmigrate.drivers import SUPPORTED_DRIVERS\n'), ((2109, 2116), 'zmigrate.range.Range', 'Range', ([], {}), '()\n', (2114, 2116), False, 'from zmigrate.range import Range\n'), ((2606, 2614), 'zmigrate.dir.Dir', 'Dir', (['dir'], {}), '(dir)\n', (2609, 2614), False, 'from zmigrate.dir import Dir\n'), ((3592, 3610), 'os.path.isfile', 'isfile', (['scriptPath'], {}), '(scriptPath)\n', (3598, 3610), False, 'from os.path import isfile, isdir\n'), ((2626, 2653), 'os.listdir', 'listdir', (['args.migration_dir'], {}), '(args.migration_dir)\n', (2633, 2653), False, 'from os import listdir\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName : core_recorder.py
# @Time : 2020/9/25 12:29
# @Author : 陈嘉昕
# @Demand : 声音复杂记录
import threading
import logging
import wave
from pyaudio import PyAudio, paInt16
import numpy as np
import queue
import time
class CoreRecorder(threading.Thread):
def __init__(self,
whole_time=None, # How much time to the end
sr=20000, # Sample rate
batch_num=600, # Batch size (how much data for a single fetch)
frames_per_buffer=600
):
threading.Thread.__init__(self)
self.time = whole_time
self.sr = sr
self.batch_num = batch_num
self.data_alter = threading.Lock()
self.frames_per_buffer = frames_per_buffer
self.logger = logging.getLogger(__name__ + '.CoreRecorder')
self.buffer = queue.Queue()
self.start_time = None
self.__running = threading.Event()
self.__running.set()
def run(self):
self.logger.debug("Start to recording...")
self.logger.debug(" Time = %s" % self.time)
self.logger.debug(" Sample Rate = %s" % self.sr)
self.start_time = time.time()
pa = PyAudio()
stream = pa.open(format=paInt16, channels=1, rate=self.sr, input=True, frames_per_buffer=self.frames_per_buffer)
my_buf = []
count = 0
if self.time is None:
total_count = 1e10
else:
total_count = self.time * self.sr / self.batch_num
while count < total_count and self.__running.isSet():
datawav = stream.read(self.batch_num, exception_on_overflow=True)
datause = np.fromstring(datawav, dtype=np.short)
for w in datause:
self.buffer.put(w)
count += 1
stream.close()
def save_wave_file(self, filename, data):
wf = wave.open(filename, 'wb')
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(self.sr)
wf.writeframes(b"".join(data))
wf.close()
def stop(self):
self.__running.clear()
| [
"logging.getLogger",
"threading.Thread.__init__",
"wave.open",
"threading.Lock",
"numpy.fromstring",
"threading.Event",
"queue.Queue",
"pyaudio.PyAudio",
"time.time"
] | [((591, 622), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (616, 622), False, 'import threading\n'), ((736, 752), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (750, 752), False, 'import threading\n'), ((826, 871), 'logging.getLogger', 'logging.getLogger', (["(__name__ + '.CoreRecorder')"], {}), "(__name__ + '.CoreRecorder')\n", (843, 871), False, 'import logging\n'), ((894, 907), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (905, 907), False, 'import queue\n'), ((964, 981), 'threading.Event', 'threading.Event', ([], {}), '()\n', (979, 981), False, 'import threading\n'), ((1219, 1230), 'time.time', 'time.time', ([], {}), '()\n', (1228, 1230), False, 'import time\n'), ((1244, 1253), 'pyaudio.PyAudio', 'PyAudio', ([], {}), '()\n', (1251, 1253), False, 'from pyaudio import PyAudio, paInt16\n'), ((1923, 1948), 'wave.open', 'wave.open', (['filename', '"""wb"""'], {}), "(filename, 'wb')\n", (1932, 1948), False, 'import wave\n'), ((1713, 1751), 'numpy.fromstring', 'np.fromstring', (['datawav'], {'dtype': 'np.short'}), '(datawav, dtype=np.short)\n', (1726, 1751), True, 'import numpy as np\n')] |
#!/usr/bin/python3
# Routes.py file
##### Imports #####
from flask import Flask, render_template, request
import requests, random
from application import app, db
from application.models import Character
from sqlalchemy import desc
##### Routes #####
@app.route('/', methods=['GET','POST'])
def index():
# To run on localhost unhash this
# character_race_response = requests.get("http://localhost:5001/race")
# character_class_response = requests.get("http://localhost:5002/class")
# weapon_response = requests.post("http://localhost:5003/weapon", json={"character_race":character_race_response.text, "character_class":character_class_response.text})
# For Docker
character_race_response = requests.get("http://service2:5001/race")
character_class_response = requests.get("http://service3:5002/class")
weapon_response = requests.post("http://service4:5003/weapon", json={"character_race":character_race_response.text, "character_class":character_class_response.text})
new_character= Character(character_race=character_race_response.text,character_class=character_class_response.text,weapon=weapon_response.text)
db.session.add(new_character)
db.session.commit()
old_characters = Character.query.order_by(desc("Id")).limit(3).all()
return render_template("index.html", character_race=character_race_response.text, character_class=character_class_response.text, weapon=weapon_response.text, old_characters=old_characters) | [
"flask.render_template",
"requests.post",
"application.db.session.add",
"application.db.session.commit",
"requests.get",
"sqlalchemy.desc",
"application.app.route",
"application.models.Character"
] | [((277, 316), 'application.app.route', 'app.route', (['"""/"""'], {'methods': "['GET', 'POST']"}), "('/', methods=['GET', 'POST'])\n", (286, 316), False, 'from application import app, db\n'), ((756, 797), 'requests.get', 'requests.get', (['"""http://service2:5001/race"""'], {}), "('http://service2:5001/race')\n", (768, 797), False, 'import requests, random\n'), ((829, 871), 'requests.get', 'requests.get', (['"""http://service3:5002/class"""'], {}), "('http://service3:5002/class')\n", (841, 871), False, 'import requests, random\n'), ((894, 1051), 'requests.post', 'requests.post', (['"""http://service4:5003/weapon"""'], {'json': "{'character_race': character_race_response.text, 'character_class':\n character_class_response.text}"}), "('http://service4:5003/weapon', json={'character_race':\n character_race_response.text, 'character_class':\n character_class_response.text})\n", (907, 1051), False, 'import requests, random\n'), ((1062, 1197), 'application.models.Character', 'Character', ([], {'character_race': 'character_race_response.text', 'character_class': 'character_class_response.text', 'weapon': 'weapon_response.text'}), '(character_race=character_race_response.text, character_class=\n character_class_response.text, weapon=weapon_response.text)\n', (1071, 1197), False, 'from application.models import Character\n'), ((1195, 1224), 'application.db.session.add', 'db.session.add', (['new_character'], {}), '(new_character)\n', (1209, 1224), False, 'from application import app, db\n'), ((1229, 1248), 'application.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1246, 1248), False, 'from application import app, db\n'), ((1335, 1525), 'flask.render_template', 'render_template', (['"""index.html"""'], {'character_race': 'character_race_response.text', 'character_class': 'character_class_response.text', 'weapon': 'weapon_response.text', 'old_characters': 'old_characters'}), "('index.html', character_race=character_race_response.text,\n character_class=character_class_response.text, weapon=weapon_response.\n text, old_characters=old_characters)\n", (1350, 1525), False, 'from flask import Flask, render_template, request\n'), ((1296, 1306), 'sqlalchemy.desc', 'desc', (['"""Id"""'], {}), "('Id')\n", (1300, 1306), False, 'from sqlalchemy import desc\n')] |
# import pandas and matplotlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from connection import setConnection
def liabilityasset():
dataT=setConnection(["profitloss"])
labels=[]
Income_of_all_trips=[]
Expenditure_of_the_year=[]
data=[]
for x in dataT:
data.append(list(x))
for x in data:
for n,i in enumerate(x):
if i==None:
x[n]=0
print("##########",x)
labels.append(x[0])
Income_of_all_trips.append(int(x[1]))
Expenditure_of_the_year.append(int(x[2]))
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - width/2, Income_of_all_trips, width, label='Yearly Income')
rects2 = ax.bar(x + width/2, Expenditure_of_the_year, width, label='Yearly Expenditure')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Amount in Rs')
ax.set_title('LIABILITY OR ASSET', bbox={'facecolor':'0.8', 'pad':5})
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
fig.tight_layout()
plt.show()
def Maintenance():
data=setConnection(["maintenanceCategory"])
Maintenance=[]
amount=[]
for x in data:
Maintenance.append(x[0])
amount.append(x[1])
colors = ["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd", "#8c564b"]
plt.pie(amount, labels=Maintenance, colors=colors,autopct='%1.1f%%', shadow=True, startangle=90)
plt.title("MAINTENANCE", bbox={'facecolor':'0.8', 'pad':5})
plt.show()
def Expenditure():
data=setConnection(["expenseView"])
dataList=[]
index=[]
for x in data:
index.append(x[0])
dataList.append(list(x))
for x in dataList:
x.remove(x[0])
dataListNum=[]
for x in dataList:
col=[]
for y in x:
col.append(int(y))
dataListNum.append(col)
# dataframe created with
# the above data array
df = pd.DataFrame(dataListNum, columns = ['Insurance_Amt','Tax_Amt','Permit_Amt','Maintenance_Amt'],index=index )
df.plot.bar()
plt.show() | [
"matplotlib.pyplot.pie",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"connection.setConnection",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((218, 247), 'connection.setConnection', 'setConnection', (["['profitloss']"], {}), "(['profitloss'])\n", (231, 247), False, 'from connection import setConnection\n'), ((757, 771), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (769, 771), True, 'import matplotlib.pyplot as plt\n'), ((1739, 1749), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1747, 1749), True, 'import matplotlib.pyplot as plt\n'), ((1778, 1816), 'connection.setConnection', 'setConnection', (["['maintenanceCategory']"], {}), "(['maintenanceCategory'])\n", (1791, 1816), False, 'from connection import setConnection\n'), ((2014, 2115), 'matplotlib.pyplot.pie', 'plt.pie', (['amount'], {'labels': 'Maintenance', 'colors': 'colors', 'autopct': '"""%1.1f%%"""', 'shadow': '(True)', 'startangle': '(90)'}), "(amount, labels=Maintenance, colors=colors, autopct='%1.1f%%',\n shadow=True, startangle=90)\n", (2021, 2115), True, 'import matplotlib.pyplot as plt\n'), ((2115, 2176), 'matplotlib.pyplot.title', 'plt.title', (['"""MAINTENANCE"""'], {'bbox': "{'facecolor': '0.8', 'pad': 5}"}), "('MAINTENANCE', bbox={'facecolor': '0.8', 'pad': 5})\n", (2124, 2176), True, 'import matplotlib.pyplot as plt\n'), ((2179, 2189), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2187, 2189), True, 'import matplotlib.pyplot as plt\n'), ((2232, 2262), 'connection.setConnection', 'setConnection', (["['expenseView']"], {}), "(['expenseView'])\n", (2245, 2262), False, 'from connection import setConnection\n'), ((2693, 2806), 'pandas.DataFrame', 'pd.DataFrame', (['dataListNum'], {'columns': "['Insurance_Amt', 'Tax_Amt', 'Permit_Amt', 'Maintenance_Amt']", 'index': 'index'}), "(dataListNum, columns=['Insurance_Amt', 'Tax_Amt', 'Permit_Amt',\n 'Maintenance_Amt'], index=index)\n", (2705, 2806), True, 'import pandas as pd\n'), ((2835, 2845), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2843, 2845), True, 'import matplotlib.pyplot as plt\n')] |
from datetime import timedelta
import uuid
import factory
from django.utils import timezone
from assopy.tests.factories.user import AssopyUserFactory
import conference.models
class CreditCardOrderFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'assopy.Order'
user = factory.SubFactory(AssopyUserFactory)
payment = 'cc' # cc because stripe is a credit card
items = []
class CouponFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'assopy.Coupon'
conference = factory.Iterator(conference.models.Conference.objects.all())
value = '10%'
code = factory.LazyAttribute(lambda _: uuid.uuid4().hex)
start_validity = factory.LazyAttribute(lambda _: timezone.now().date())
end_validity = factory.LazyAttribute(lambda _: timezone.now().date() + timedelta(days=1))
@factory.post_generation
def fares(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for fare in extracted:
self.fares.add(fare)
else:
self.fares.add(*conference.models.Fare.objects.all())
| [
"django.utils.timezone.now",
"factory.SubFactory",
"datetime.timedelta",
"uuid.uuid4"
] | [((304, 341), 'factory.SubFactory', 'factory.SubFactory', (['AssopyUserFactory'], {}), '(AssopyUserFactory)\n', (322, 341), False, 'import factory\n'), ((660, 672), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (670, 672), False, 'import uuid\n'), ((829, 846), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (838, 846), False, 'from datetime import timedelta\n'), ((731, 745), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (743, 745), False, 'from django.utils import timezone\n'), ((805, 819), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (817, 819), False, 'from django.utils import timezone\n')] |
from pygame.sprite import ( Sprite )
from src.entitys.mushroom import ( Mushroom )
from math import sin
class LootBlock( Sprite ):
def __init__( self, group_sprite, sheet, position, loot ):
self.groups = group_sprite
Sprite.__init__( self, group_sprite )
# ### STRING VARIABLES ###
self.id = 'lootblock'
self.state = 'normal'
self.what_loot, self.color = loot
self.mushrooms = []
# ### INT/FLOAT VARIABLES ###
self.frame = 0
self.step = 0
self.dt = 0
self.t = 0
self.image = sheet[ 0 ].subsurface((0, 0), (16, 16))
self.sheet = sheet
self.animation()
# ### RECT VARIABLES ###
self.rect = self.image.get_rect()
self.rect.x = position[ 0 ]
self.rect.y = position[ 1 ]
self.y_init = position[ 1 ]
self.mushroom_x = position[ 0 ]
self.mushroom_y = position[ 1 ] + 17
self.spawn = False
def animation( self ):
if self.state == 'normal':
self.image = self.sheet[ 0 ].subsurface( (int( self.frame%4 )*16, 0), (16, 16) )
self.frame += (.1*self.dt)
elif self.state == 'activated':
self.t += (.1*self.dt)
self.image = self.sheet[ 0 ].subsurface( (4*16, 0), (16, 16) )
if int(self.t**2) + self.y_init != self.y_init:
self.rect.y = 10*sin(self.t) + self.y_init
def update( self, dt ):
self.dt = dt
self.animation()
if all([ self.state == 'activated', self.what_loot == 'mushroom', self.spawn is False ]):
self.mushrooms.append(Mushroom( self.groups, self.sheet[ 1 ], [ self.mushroom_x, self.mushroom_y ], 'red' if self.color == 'red' else 'green' ))
self.spawn = True
| [
"pygame.sprite.Sprite.__init__",
"math.sin",
"src.entitys.mushroom.Mushroom"
] | [((242, 277), 'pygame.sprite.Sprite.__init__', 'Sprite.__init__', (['self', 'group_sprite'], {}), '(self, group_sprite)\n', (257, 277), False, 'from pygame.sprite import Sprite\n'), ((1653, 1773), 'src.entitys.mushroom.Mushroom', 'Mushroom', (['self.groups', 'self.sheet[1]', '[self.mushroom_x, self.mushroom_y]', "('red' if self.color == 'red' else 'green')"], {}), "(self.groups, self.sheet[1], [self.mushroom_x, self.mushroom_y], \n 'red' if self.color == 'red' else 'green')\n", (1661, 1773), False, 'from src.entitys.mushroom import Mushroom\n'), ((1418, 1429), 'math.sin', 'sin', (['self.t'], {}), '(self.t)\n', (1421, 1429), False, 'from math import sin\n')] |
import logging
import os
from insights.core import archives
from insights.core.archives import COMPRESSION_TYPES
from insights.core.context import ClusterArchiveContext, JDRContext, HostArchiveContext, SosArchiveContext
log = logging.getLogger(__name__)
def get_all_files(path):
all_files = []
for f in archives.get_all_files(path):
if os.path.isfile(f) and not os.path.islink(f):
all_files.append(f)
return all_files
def determine_context(common_path, files):
if any(f.endswith(COMPRESSION_TYPES) for f in os.listdir(common_path)):
return ClusterArchiveContext
for f in files:
if "insights_commands" in f:
return HostArchiveContext
elif "sos_commands" in f:
return SosArchiveContext
elif "JBOSS_HOME" in f:
return JDRContext
return HostArchiveContext
def create_context(path, context=None):
all_files = get_all_files(path)
common_path = os.path.dirname(os.path.commonprefix(all_files))
context = context or determine_context(common_path, all_files)
return context(common_path, all_files=all_files)
| [
"logging.getLogger",
"os.listdir",
"os.path.isfile",
"insights.core.archives.get_all_files",
"os.path.commonprefix",
"os.path.islink"
] | [((228, 255), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (245, 255), False, 'import logging\n'), ((315, 343), 'insights.core.archives.get_all_files', 'archives.get_all_files', (['path'], {}), '(path)\n', (337, 343), False, 'from insights.core import archives\n'), ((984, 1015), 'os.path.commonprefix', 'os.path.commonprefix', (['all_files'], {}), '(all_files)\n', (1004, 1015), False, 'import os\n'), ((356, 373), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (370, 373), False, 'import os\n'), ((382, 399), 'os.path.islink', 'os.path.islink', (['f'], {}), '(f)\n', (396, 399), False, 'import os\n'), ((549, 572), 'os.listdir', 'os.listdir', (['common_path'], {}), '(common_path)\n', (559, 572), False, 'import os\n')] |
import featext.feature_processing
from featext.mfcc import Mfcc
import numpy as np
import system.gmm_em as gmm
import system.ivector as ivector
import system.backend as backend
# UPDATE THIS FOLDER (folder to spoken digit dataset recordings):
data_folder = '/home/ville/files/recordings/'
speakers = ['jackson', 'nicolas', 'theo']
n_speakers = len(speakers)
n_digits = 10 # 0 - 9
n_sessions = 50 # 0 - 49
#### Feature extraction:
# Let us train the spoken digit recognition system with speakers Jackson and Nicolas and test with Theo.
mfcc = Mfcc()
mfcc.frame_duration = 0.025
mfcc.frame_overlap_duration = 0.01
mfcc.sad_threshold = 60
mfcc.include_deltas = 1
mfcc.include_double_deltas = 1
mfcc.include_base_coeffs = 1
mfcc.include_energy = 1
mfcc.n_coeffs = 20
mfcc.rasta_coeff = 0
mfcc.pre_emphasis = 0
mfcc.cmvn = 1
mfcc.initialize()
all_features = np.empty((n_speakers, n_digits, n_sessions), dtype=object)
for speaker in range(n_speakers):
for digit in range(n_digits):
for session in range(n_sessions):
filename = '{}{}_{}_{}.wav'.format(data_folder, digit, speakers[speaker], session)
all_features[speaker, digit, session] = featext.feature_processing.extract_features_from_file(filename, mfcc)
feature_dim = all_features[0, 0, 0].shape[0]
#### Train GMM for every digit:
n_components = 64
digit_models = []
for digit in range(n_digits):
model = gmm.GMM(ndim=feature_dim, nmix=n_components, ds_factor=1, final_niter=10, nworkers=2)
model.fit(np.reshape(all_features[0:2, digit, :], (-1,)))
digit_models.append(model)
### Scoring (Based on GMM log likelihoods):
test_features = np.reshape(all_features[2, :, :], (-1))
n_tests = test_features.size
true_labels = np.repeat(np.arange(n_digits), n_sessions)
scores = np.zeros((n_digits, n_tests))
for test_segment in range(n_tests):
for digit in range(n_digits):
scores[digit, test_segment] = np.mean(digit_models[digit].compute_log_lik(test_features[test_segment]))
classifications = np.argmax(scores, axis=0)
n_correct = sum(classifications == true_labels)
print('Correct classifications: {} / {} ({:.1f} %)\n'.format(n_correct, n_tests, n_correct / n_tests * 100))
# EXERCISE: Implement GMM-based scoring with universal background model (UBM)
#### Universal background model (UBM) training:
ubm = gmm.GMM(ndim=feature_dim, nmix=n_components, ds_factor=1, final_niter=10, nworkers=2)
ubm.fit(np.reshape(all_features[0:2, :, :], -1))
#### GMM adaptation
relevance_factor = 1
#digit_models = np.empty((n_digits,), dtype=object)
digit_models = []
for i in range(n_digits):
digit_models.append(ubm.adapt(np.reshape(all_features[0:2, i, :], (-1,)), relevance_factor))
#### Scoring trials (all test files vs. all models):
test_features = np.reshape(all_features[2, :, :], (-1))
n_tests = test_features.size
true_labels = np.repeat(np.arange(n_digits), n_sessions)
scores = np.zeros((n_digits, n_tests))
for test_segment in range(n_tests):
for digit in range(n_digits):
scores[digit, test_segment] = np.mean(digit_models[digit].compute_log_lik(test_features[test_segment]))
classifications = np.argmax(scores, axis=0)
n_correct = sum(classifications == true_labels)
print('Correct classifications: {} / {} ({:.1f} %)\n'.format(n_correct, n_tests, n_correct / n_tests * 100))
###### I-vector / PLDA system
#### Sufficient statistics (Baum-Welch statistics) extraction:
all_stats = np.empty((n_speakers, n_digits, n_sessions), dtype=object)
for speaker in range(n_speakers):
for digit in range(n_digits):
for session in range(n_sessions):
N, F = ubm.compute_centered_stats(all_features[speaker, digit, session])
all_stats[speaker, digit, session] = (N, F)
#### Total variability matrix training:
ivector_dim = 50;
tMatrix = ivector.TMatrix(ivector_dim, feature_dim, n_components, niter=5, nworkers=2)
tMatrix.train(np.reshape(all_stats[0:2, :, :], (-1,)), ubm)
#### I-vector extraction:
extractor = ivector.Ivector(ivector_dim, feature_dim, n_components)
extractor.initialize(ubm, tMatrix.Tm)
ivectors = np.empty((ivector_dim, n_speakers, n_digits, n_sessions))
for speaker in range(n_speakers):
for digit in range(n_digits):
for session in range(n_sessions):
ivectors[:, speaker, digit, session] = extractor.extract(*all_stats[speaker, digit, session])
#### I-vector processing:
training_vectors = np.reshape(ivectors[:, 0:2, :, :], (ivector_dim, -1), order='F')
training_labels = np.tile(np.arange(n_digits).repeat(2), n_sessions)
model_vectors = np.reshape(np.mean(ivectors[:, 0:2, :, :], (1, 3)), (ivector_dim, -1), order='F')
test_vectors = np.reshape(ivectors[:, 2, :, :], (ivector_dim, -1), order='F')
true_labels = np.tile(np.arange(n_digits), n_sessions)
center = backend.compute_mean(training_vectors)
w = backend.calc_white_mat(np.cov(training_vectors))
training_vectors = backend.preprocess(training_vectors, center, w)
model_vectors = backend.preprocess(model_vectors, center, w)
test_vectors = backend.preprocess(test_vectors, center, w)
#### PLDA training:
#### (probabilistic linear discriminant analysis)
latent_dim = 40;
plda = backend.GPLDA(ivector_dim, latent_dim, niter=20)
plda.train_em(training_vectors, training_labels)
#### Scoring:
scores = plda.score_trials(model_vectors, test_vectors)
# scores = backend.cosine_similarity(modelVectors, testVectors)
classifications = np.argmax(scores, axis=0)
n_correct = sum(classifications == true_labels)
print('Correct classifications: {} / {} ({:.1f} %)\n'.format(n_correct, n_tests, n_correct / n_tests * 100))
| [
"featext.mfcc.Mfcc",
"numpy.mean",
"numpy.reshape",
"system.ivector.TMatrix",
"system.backend.GPLDA",
"system.gmm_em.GMM",
"numpy.argmax",
"system.backend.preprocess",
"system.backend.compute_mean",
"numpy.zeros",
"numpy.empty",
"numpy.cov",
"system.ivector.Ivector",
"numpy.arange"
] | [((551, 557), 'featext.mfcc.Mfcc', 'Mfcc', ([], {}), '()\n', (555, 557), False, 'from featext.mfcc import Mfcc\n'), ((863, 921), 'numpy.empty', 'np.empty', (['(n_speakers, n_digits, n_sessions)'], {'dtype': 'object'}), '((n_speakers, n_digits, n_sessions), dtype=object)\n', (871, 921), True, 'import numpy as np\n'), ((1647, 1684), 'numpy.reshape', 'np.reshape', (['all_features[2, :, :]', '(-1)'], {}), '(all_features[2, :, :], -1)\n', (1657, 1684), True, 'import numpy as np\n'), ((1783, 1812), 'numpy.zeros', 'np.zeros', (['(n_digits, n_tests)'], {}), '((n_digits, n_tests))\n', (1791, 1812), True, 'import numpy as np\n'), ((2015, 2040), 'numpy.argmax', 'np.argmax', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (2024, 2040), True, 'import numpy as np\n'), ((2333, 2422), 'system.gmm_em.GMM', 'gmm.GMM', ([], {'ndim': 'feature_dim', 'nmix': 'n_components', 'ds_factor': '(1)', 'final_niter': '(10)', 'nworkers': '(2)'}), '(ndim=feature_dim, nmix=n_components, ds_factor=1, final_niter=10,\n nworkers=2)\n', (2340, 2422), True, 'import system.gmm_em as gmm\n'), ((2776, 2813), 'numpy.reshape', 'np.reshape', (['all_features[2, :, :]', '(-1)'], {}), '(all_features[2, :, :], -1)\n', (2786, 2813), True, 'import numpy as np\n'), ((2912, 2941), 'numpy.zeros', 'np.zeros', (['(n_digits, n_tests)'], {}), '((n_digits, n_tests))\n', (2920, 2941), True, 'import numpy as np\n'), ((3143, 3168), 'numpy.argmax', 'np.argmax', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (3152, 3168), True, 'import numpy as np\n'), ((3435, 3493), 'numpy.empty', 'np.empty', (['(n_speakers, n_digits, n_sessions)'], {'dtype': 'object'}), '((n_speakers, n_digits, n_sessions), dtype=object)\n', (3443, 3493), True, 'import numpy as np\n'), ((3815, 3891), 'system.ivector.TMatrix', 'ivector.TMatrix', (['ivector_dim', 'feature_dim', 'n_components'], {'niter': '(5)', 'nworkers': '(2)'}), '(ivector_dim, feature_dim, n_components, niter=5, nworkers=2)\n', (3830, 3891), True, 'import system.ivector as ivector\n'), ((3993, 4048), 'system.ivector.Ivector', 'ivector.Ivector', (['ivector_dim', 'feature_dim', 'n_components'], {}), '(ivector_dim, feature_dim, n_components)\n', (4008, 4048), True, 'import system.ivector as ivector\n'), ((4098, 4155), 'numpy.empty', 'np.empty', (['(ivector_dim, n_speakers, n_digits, n_sessions)'], {}), '((ivector_dim, n_speakers, n_digits, n_sessions))\n', (4106, 4155), True, 'import numpy as np\n'), ((4419, 4483), 'numpy.reshape', 'np.reshape', (['ivectors[:, 0:2, :, :]', '(ivector_dim, -1)'], {'order': '"""F"""'}), "(ivectors[:, 0:2, :, :], (ivector_dim, -1), order='F')\n", (4429, 4483), True, 'import numpy as np\n'), ((4666, 4728), 'numpy.reshape', 'np.reshape', (['ivectors[:, 2, :, :]', '(ivector_dim, -1)'], {'order': '"""F"""'}), "(ivectors[:, 2, :, :], (ivector_dim, -1), order='F')\n", (4676, 4728), True, 'import numpy as np\n'), ((4794, 4832), 'system.backend.compute_mean', 'backend.compute_mean', (['training_vectors'], {}), '(training_vectors)\n', (4814, 4832), True, 'import system.backend as backend\n'), ((4905, 4952), 'system.backend.preprocess', 'backend.preprocess', (['training_vectors', 'center', 'w'], {}), '(training_vectors, center, w)\n', (4923, 4952), True, 'import system.backend as backend\n'), ((4969, 5013), 'system.backend.preprocess', 'backend.preprocess', (['model_vectors', 'center', 'w'], {}), '(model_vectors, center, w)\n', (4987, 5013), True, 'import system.backend as backend\n'), ((5029, 5072), 'system.backend.preprocess', 'backend.preprocess', (['test_vectors', 'center', 'w'], {}), '(test_vectors, center, w)\n', (5047, 5072), True, 'import system.backend as backend\n'), ((5168, 5216), 'system.backend.GPLDA', 'backend.GPLDA', (['ivector_dim', 'latent_dim'], {'niter': '(20)'}), '(ivector_dim, latent_dim, niter=20)\n', (5181, 5216), True, 'import system.backend as backend\n'), ((5420, 5445), 'numpy.argmax', 'np.argmax', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (5429, 5445), True, 'import numpy as np\n'), ((1407, 1496), 'system.gmm_em.GMM', 'gmm.GMM', ([], {'ndim': 'feature_dim', 'nmix': 'n_components', 'ds_factor': '(1)', 'final_niter': '(10)', 'nworkers': '(2)'}), '(ndim=feature_dim, nmix=n_components, ds_factor=1, final_niter=10,\n nworkers=2)\n', (1414, 1496), True, 'import system.gmm_em as gmm\n'), ((1740, 1759), 'numpy.arange', 'np.arange', (['n_digits'], {}), '(n_digits)\n', (1749, 1759), True, 'import numpy as np\n'), ((2427, 2466), 'numpy.reshape', 'np.reshape', (['all_features[0:2, :, :]', '(-1)'], {}), '(all_features[0:2, :, :], -1)\n', (2437, 2466), True, 'import numpy as np\n'), ((2869, 2888), 'numpy.arange', 'np.arange', (['n_digits'], {}), '(n_digits)\n', (2878, 2888), True, 'import numpy as np\n'), ((3907, 3946), 'numpy.reshape', 'np.reshape', (['all_stats[0:2, :, :]', '(-1,)'], {}), '(all_stats[0:2, :, :], (-1,))\n', (3917, 3946), True, 'import numpy as np\n'), ((4580, 4619), 'numpy.mean', 'np.mean', (['ivectors[:, 0:2, :, :]', '(1, 3)'], {}), '(ivectors[:, 0:2, :, :], (1, 3))\n', (4587, 4619), True, 'import numpy as np\n'), ((4751, 4770), 'numpy.arange', 'np.arange', (['n_digits'], {}), '(n_digits)\n', (4760, 4770), True, 'import numpy as np\n'), ((4860, 4884), 'numpy.cov', 'np.cov', (['training_vectors'], {}), '(training_vectors)\n', (4866, 4884), True, 'import numpy as np\n'), ((1507, 1553), 'numpy.reshape', 'np.reshape', (['all_features[0:2, digit, :]', '(-1,)'], {}), '(all_features[0:2, digit, :], (-1,))\n', (1517, 1553), True, 'import numpy as np\n'), ((2642, 2684), 'numpy.reshape', 'np.reshape', (['all_features[0:2, i, :]', '(-1,)'], {}), '(all_features[0:2, i, :], (-1,))\n', (2652, 2684), True, 'import numpy as np\n'), ((4510, 4529), 'numpy.arange', 'np.arange', (['n_digits'], {}), '(n_digits)\n', (4519, 4529), True, 'import numpy as np\n')] |
import functools
import itertools
import operator
import numpy as np
from qecsim.model import StabilizerCode, cli_description
from qecsim.models.rotatedplanar import RotatedPlanarPauli
@cli_description('Rotated planar (rows INT >= 3, cols INT >= 3)')
class RotatedPlanarCode(StabilizerCode):
r"""
Implements a rotated planar mixed boundary code defined by its lattice size.
In addition to the members defined in :class:`qecsim.model.StabilizerCode`, it provides several lattice methods as
described below.
Lattice methods:
* Get size: :meth:`size`.
* Get plaquette type: :meth:`is_x_plaquette`, :meth:`is_z_plaquette`, :meth:`is_virtual_plaquette`.
* Get and test bounds: :meth:`site_bounds`, :meth:`is_in_site_bounds`, :meth:`is_in_plaquette_bounds`.
* Resolve a syndrome to plaquettes: :meth:`syndrome_to_plaquette_indices`.
* Construct a Pauli operator on the lattice: :meth:`new_pauli`.
Indices:
* Indices are in the format (x, y).
* Qubit sites (vertices) are indexed by (x, y) coordinates with the origin at the lower left qubit.
* Stabilizer plaquettes are indexed by (x, y) coordinates such that the lower left corner of the plaquette is on the
qubit site at (x, y).
* X-type stabilizer plaquette indices satisfy (x-y) % 2 == 1.
* Z-type stabilizer plaquette indices satisfy (x-y) % 2 == 0.
For example, qubit site indices on a 3 x 3 lattice:
::
(0,2)-----(1,2)-----(2,2)
| | |
| | |
| | |
(0,1)-----(1,1)-----(2,1)
| | |
| | |
| | |
(0,0)-----(1,0)-----(2,0)
For example, stabilizer plaquette types and indices on a 3 x 3 lattice:
::
-------
/ Z \
| (0,2) |
+---------+---------+-----
| X | Z | X \
| (0,1) | (1,1) |(2,1) |
| | | /
-----+---------+---------+-----
/ X | Z | X |
|(-1,0)| (0,0) | (1,0) |
\ | | |
-----+---------+---------+
| Z |
\ (1,-1)/
-------
"""
MIN_SIZE = (3, 3)
def __init__(self, rows, columns):
"""
Initialise new rotated planar code.
:param rows: Number of rows in lattice.
:type rows: int
:param columns: Number of columns in lattice.
:type columns: int
:raises ValueError: if (rows, columns) smaller than (3, 3) in either dimension.
:raises TypeError: if any parameter is of an invalid type.
"""
min_rows, min_cols = self.MIN_SIZE
try: # paranoid checking for CLI. (operator.index ensures the parameter can be treated as an int)
if operator.index(rows) < min_rows or operator.index(columns) < min_cols:
raise ValueError('{} minimum size is {}.'.format(type(self).__name__, self.MIN_SIZE))
except TypeError as ex:
raise TypeError('{} invalid parameter type'.format(type(self).__name__)) from ex
self._size = rows, columns
# < StabilizerCode interface methods >
@property
@functools.lru_cache()
def n_k_d(self):
"""See :meth:`qecsim.model.StabilizerCode.n_k_d`"""
# n = r*c, k = 1, d = min(r, c)
rows, cols = self.size
return rows * cols, 1, min(rows, cols)
@property
def label(self):
"""See :meth:`qecsim.model.StabilizerCode.label`"""
return 'Rotated planar {}x{}'.format(*self.size)
@property
@functools.lru_cache()
def stabilizers(self):
"""See :meth:`qecsim.model.StabilizerCode.stabilizers`"""
return np.array([self.new_pauli().plaquette(i).to_bsf() for i in self._plaquette_indices])
@property
@functools.lru_cache()
def logical_xs(self):
"""See :meth:`qecsim.model.StabilizerCode.logical_xs`"""
return np.array([self.new_pauli().logical_x().to_bsf()])
@property
@functools.lru_cache()
def logical_zs(self):
"""See :meth:`qecsim.model.StabilizerCode.logical_zs`"""
return np.array([self.new_pauli().logical_z().to_bsf()])
# </ StabilizerCode interface methods >
@property
def size(self):
"""
Size of the lattice in format (rows, columns), e.g. (5, 5).
:rtype: 2-tuple of int
"""
return self._size
@classmethod
def is_x_plaquette(cls, index):
"""
Return True if the plaquette index specifies an X-type plaquette, irrespective of lattice bounds.
:param index: Index in the format (x, y).
:type index: 2-tuple of int
:return: If the index specifies an X-type plaquette.
:rtype: bool
"""
x, y = index
return (x - y) % 2 == 1
@classmethod
def is_z_plaquette(cls, index):
"""
Return True if the plaquette index specifies an Z-type plaquette, irrespective of lattice bounds.
:param index: Index in the format (x, y).
:type index: 2-tuple of int
:return: If the index specifies an Z-type plaquette.
:rtype: bool
"""
return not cls.is_x_plaquette(index)
@property
def site_bounds(self):
"""
Maximum x and y value that an index coordinate can take.
:rtype: 2-tuple of int
"""
# max_row, max_col
rows, cols = self.size
return cols - 1, rows - 1 # max_x, max_y
def is_in_site_bounds(self, index):
"""
Return True if the site index is within lattice bounds inclusive.
:param index: Index in the format (x, y).
:type index: 2-tuple of int
:return: If the index is within lattice bounds inclusive.
:rtype: bool
"""
x, y = index
max_site_x, max_site_y = self.site_bounds
return 0 <= x <= max_site_x and 0 <= y <= max_site_y
@functools.lru_cache(maxsize=2 ** 14) # O(n) per code, so for 101x101 code
def is_in_plaquette_bounds(self, index):
"""
Return True if the plaquette index is within lattice bounds inclusive.
:param index: Index in the format (x, y).
:type index: 2-tuple of int
:return: If the index is within lattice bounds inclusive.
:rtype: bool
"""
x, y = index
max_site_x, max_site_y = self.site_bounds
# derive min and max x bounds allowing for boundary plaquettes
min_x = -1 if y % 2 == 0 else 0
if max_site_x % 2 == 0: # even max_site_x (i.e. odd number of columns)
max_x = max_site_x - 1 if y % 2 == 0 else max_site_x
else:
max_x = max_site_x if y % 2 == 0 else max_site_x - 1
# derive min and max y bounds allowing for boundary plaquettes
min_y = 0 if x % 2 == 0 else -1
if max_site_y % 2 == 0: # even max_site_y (i.e. odd number of rows)
max_y = max_site_y if x % 2 == 0 else max_site_y - 1
else: # odd max_site_y (i.e. even number of rows)
max_y = max_site_y - 1 if x % 2 == 0 else max_site_y
# evaluate in bounds
return min_x <= x <= max_x and min_y <= y <= max_y
def is_virtual_plaquette(self, index):
"""
Return True if the plaquette index specifies a virtual plaquette
(i.e. index is on the boundary but not within lattice bounds).
:param index: Index in the format (x, y).
:type index: 2-tuple of int
:return: If the index specifies a virtual plaquette.
:rtype: bool
"""
x, y = index
max_site_x, max_site_y = self.site_bounds
# index is on boundary but not within lattice bounds.
return (x == -1 or x == max_site_x or y == -1 or y == max_site_y) and not self.is_in_plaquette_bounds(index)
@property
@functools.lru_cache()
def _plaquette_indices(self):
"""
Return a list of the plaquette indices of the lattice.
Notes:
* Each index is in the format (x, y).
* Indices are in order of increasing type, y, x. (Z-type first)
:return: List of indices in the format (x, y).
:rtype: list of 2-tuple of int
"""
max_site_x, max_site_y = self.site_bounds
z_plaquette_indices, x_plaquette_indices = [], []
for y in range(-1, max_site_y + 2):
for x in range(-1, max_site_x + 2):
index = x, y
if self.is_in_plaquette_bounds(index):
if self.is_z_plaquette(index):
z_plaquette_indices.append(index)
else:
x_plaquette_indices.append(index)
return list(itertools.chain(z_plaquette_indices, x_plaquette_indices))
def syndrome_to_plaquette_indices(self, syndrome):
"""
Returns the indices of the plaquettes associated with the non-commuting stabilizers identified by the syndrome.
:param syndrome: Binary vector identifying commuting and non-commuting stabilizers by 0 and 1 respectively.
:type syndrome: numpy.array (1d)
:return: Set of plaquette indices.
:rtype: set of 2-tuple of int
"""
return set(tuple(index) for index in np.array(self._plaquette_indices)[syndrome.nonzero()])
def __eq__(self, other):
if type(other) is type(self):
return self._size == other._size
return NotImplemented
def __hash__(self):
return hash(self._size)
def __repr__(self):
return '{}({!r}, {!r})'.format(type(self).__name__, *self.size)
def ascii_art(self, syndrome=None, pauli=None, plaquette_labels=None, site_labels=None):
"""
Return ASCII art style lattice showing primal lattice lines with syndrome bits and Pauli operators as given.
Notes:
* Optional plaquette_labels override syndrome.
* Optional site_labels override pauli.
:param syndrome: Syndrome (optional) as binary vector.
:type syndrome: numpy.array (1d)
:param pauli: Rotated planar Pauli (optional)
:type pauli: RotatedPlanarPauli
:param plaquette_labels: Dictionary of plaquette indices as (x, y) to single-character labels (optional).
:type plaquette_labels: dict of (int, int) to char
:param site_labels: Dictionary of site indices as (x, y) to single-character labels (optional).
:type site_labels: dict of (int, int) to char
:return: ASCII art style lattice.
:rtype: str
"""
# See https://unicode-table.com/en/blocks/box-drawing/ for box-drawing unicode characters
max_site_x, max_site_y = self.site_bounds
syndrome_indices = set() if syndrome is None else self.syndrome_to_plaquette_indices(syndrome)
pauli = self.new_pauli() if pauli is None else pauli
plaquette_labels = {} if plaquette_labels is None else plaquette_labels
site_labels = {} if site_labels is None else site_labels
# Build row templates
# e.g. (where @=plaquette, o=site, .=virtual_plaquette):
#
# . /-@-\ . /-@-\ . . :plaquette_row_top_even
# o---o---o---o---o-\ :site_row_top_even
# . |#@#| @ |#@#| @ |#@ :plaquette_row_odd
# /-o---o---o---o---o-/ :site_row_odd
# @#| @ |#@#| @ |#@#| . :plaquette_row_even
# \-o---o---o---o---o-\ :t_site_row_even
# . |#@#| @ |#@#| @ |#@ :plaquette_row_odd
# /-o---o---o---o---o-/ :site_row_odd
# @#| @ |#@#| @ |#@#| . :plaquette_row_even
# \-o---o---o---o---o :site_row_bottom
# . . \-@-/ . \-@-/ . :plaquette_row_bottom
#
# e.g (if top row odd):
#
# . . /-@-\ . /-@-\ . :plaquette_row_top_odd
# /-o---o---o---o---o :site_row_top_odd
#
# Common chars
c_dot = chr(0x00B7)
c_dash = chr(0x2500)
c_bar = chr(0x2502)
c_angle_nw = chr(0x250C)
c_angle_ne = chr(0x2510)
c_angle_sw = chr(0x2514)
c_angle_se = chr(0x2518)
c_shade = chr(0x2591)
# Common char sequences
cs_pn = c_angle_nw + c_dash + '{}' + c_dash + c_angle_ne # '/-{}-\'
cs_pnw = c_angle_nw + c_dash # '/-'
cs_pw = '{}' + c_shade # ' #'
cs_psw = c_angle_sw + c_dash # '\-'
cs_pne = c_dash + c_angle_ne # '-\'
cs_pe = c_shade + '{}' # '# '
cs_pse = c_dash + c_angle_se # '-/'
cs_ps = c_angle_sw + c_dash + '{}' + c_dash + c_angle_se # '\-{}-/'
cs_pbulkx = c_bar + c_shade + '{}' + c_shade # '|#{}#'
cs_pbulkz = c_bar + ' {} ' # '| {} '
cs_sbulk = '{}' + c_dash * 3 # '{}---'
# booleans to control placement of boundary plaquettes
odd_rows = max_site_y % 2 == 0
odd_cols = max_site_x % 2 == 0
if odd_rows:
# . /-@-\ . /-@-\ . .
t_plaquette_row_top = ('{} ' + cs_pn + ' ') * ((max_site_x + 1) // 2) + ('{} {}' if odd_cols else '{}')
# o---o---o---o---o-\
t_site_row_top = ' ' + cs_sbulk * max_site_x + '{}' + (cs_pne if odd_cols else ' ')
else:
# . . /-@-\ . /-@-\ .
t_plaquette_row_top = '{} {}' + (' ' + cs_pn + ' {}') * (max_site_x // 2) + ('' if odd_cols else ' {}')
# /-o---o---o---o---o
t_site_row_top = cs_pnw + cs_sbulk * max_site_x + '{}' + (cs_pne if not odd_cols else ' ')
# |#@#| @ |#@#| @ |#@
t_plaquette_row_odd = ('{} ' + ''.join(([cs_pbulkx, cs_pbulkz] * max_site_x)[:max_site_x])
+ c_bar + (cs_pe if odd_cols else ' {}'))
# /-o---o---o---o---o-/
t_site_row_odd = cs_pnw + cs_sbulk * max_site_x + '{}' + (cs_pse if odd_cols else cs_pne)
# @#| @ |#@#| @ |#@#| .
t_plaquette_row_even = (cs_pw + ''.join(([cs_pbulkz, cs_pbulkx] * max_site_x)[:max_site_x])
+ c_bar + (cs_pe if not odd_cols else ' {}'))
# \-o---o---o---o---o-\
t_site_row_even = cs_psw + cs_sbulk * max_site_x + '{}' + (cs_pne if odd_cols else cs_pse)
# \-o---o---o---o---o
t_site_row_bottom = cs_psw + cs_sbulk * max_site_x + '{}' + (cs_pse if not odd_cols else ' ')
# . . \-@-/ . \-@-/ .
t_plaquette_row_bottom = '{} {}' + (' ' + cs_ps + ' {}') * (max_site_x // 2) + ('' if odd_cols else ' {}')
# Parameter extraction functions
def _site_parameters(y):
indices = [i for i in ((x, y) for x in range(max_site_x + 1))]
parameters = []
for i in indices:
if i in site_labels:
parameters.append(site_labels[i])
else:
op = pauli.operator(i)
parameters.append(c_dot if op == 'I' else op)
return parameters
def _plaquette_parameters(y):
indices = [i for i in ((x, y) for x in range(-1, max_site_x + 1))]
parameters = []
for i in indices:
is_z_plaquette = self.is_z_plaquette(i)
is_virtual_plaquette = self.is_virtual_plaquette(i)
if is_virtual_plaquette:
parameters.append(plaquette_labels.get(i, ' '))
elif i in plaquette_labels:
parameters.append(plaquette_labels[i])
elif i in syndrome_indices:
parameters.append('Z' if is_z_plaquette else 'X')
elif i[0] == -1 or i[0] == max_site_x:
parameters.append(c_bar)
elif i[1] == -1 or i[1] == max_site_y:
parameters.append(c_dash)
else:
parameters.append(' ' if is_z_plaquette else c_shade)
return parameters
# Append templates to text with parameters
text = []
# top rows
text.append(t_plaquette_row_top.format(*_plaquette_parameters(max_site_y)))
text.append(t_site_row_top.format(*_site_parameters(max_site_y)))
# middle rows
for y in range(max_site_y - 1, 0, -1):
if y % 2 == 0:
text.append(t_plaquette_row_even.format(*_plaquette_parameters(y)))
text.append(t_site_row_even.format(*_site_parameters(y)))
else:
text.append(t_plaquette_row_odd.format(*_plaquette_parameters(y)))
text.append(t_site_row_odd.format(*_site_parameters(y)))
# bottom rows
text.append(t_plaquette_row_even.format(*_plaquette_parameters(0)))
text.append(t_site_row_bottom.format(*_site_parameters(0)))
text.append(t_plaquette_row_bottom.format(*_plaquette_parameters(-1)))
return '\n'.join(text)
def new_pauli(self, bsf=None):
"""
Convenience constructor of planar Pauli for this code.
Notes:
* For performance reasons, the new Pauli is a view of the given bsf. Modifying one will modify the other.
:param bsf: Binary symplectic representation of Pauli. (Optional. Defaults to identity.)
:type bsf: numpy.array (1d)
:return: Rotated planar Pauli
:rtype: RotatedPlanarPauli
"""
return RotatedPlanarPauli(self, bsf)
| [
"itertools.chain",
"qecsim.model.cli_description",
"operator.index",
"numpy.array",
"functools.lru_cache",
"qecsim.models.rotatedplanar.RotatedPlanarPauli"
] | [((190, 254), 'qecsim.model.cli_description', 'cli_description', (['"""Rotated planar (rows INT >= 3, cols INT >= 3)"""'], {}), "('Rotated planar (rows INT >= 3, cols INT >= 3)')\n", (205, 254), False, 'from qecsim.model import StabilizerCode, cli_description\n'), ((3425, 3446), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (3444, 3446), False, 'import functools\n'), ((3819, 3840), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (3838, 3840), False, 'import functools\n'), ((4053, 4074), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (4072, 4074), False, 'import functools\n'), ((4251, 4272), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (4270, 4272), False, 'import functools\n'), ((6185, 6221), 'functools.lru_cache', 'functools.lru_cache', ([], {'maxsize': '(2 ** 14)'}), '(maxsize=2 ** 14)\n', (6204, 6221), False, 'import functools\n'), ((8104, 8125), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (8123, 8125), False, 'import functools\n'), ((17584, 17613), 'qecsim.models.rotatedplanar.RotatedPlanarPauli', 'RotatedPlanarPauli', (['self', 'bsf'], {}), '(self, bsf)\n', (17602, 17613), False, 'from qecsim.models.rotatedplanar import RotatedPlanarPauli\n'), ((8974, 9031), 'itertools.chain', 'itertools.chain', (['z_plaquette_indices', 'x_plaquette_indices'], {}), '(z_plaquette_indices, x_plaquette_indices)\n', (8989, 9031), False, 'import itertools\n'), ((3028, 3048), 'operator.index', 'operator.index', (['rows'], {}), '(rows)\n', (3042, 3048), False, 'import operator\n'), ((3063, 3086), 'operator.index', 'operator.index', (['columns'], {}), '(columns)\n', (3077, 3086), False, 'import operator\n'), ((9517, 9550), 'numpy.array', 'np.array', (['self._plaquette_indices'], {}), '(self._plaquette_indices)\n', (9525, 9550), True, 'import numpy as np\n')] |
from sklearn.base import BaseEstimator, TransformerMixin
from autogluon.features.generators import OneHotEncoderFeatureGenerator
class OheFeaturesGenerator(BaseEstimator, TransformerMixin):
def __init__(self):
self._feature_names = []
self._encoder = None
def fit(self, X, y=None):
self._encoder = OneHotEncoderFeatureGenerator(max_levels=10000, verbosity=0)
self._encoder.fit(X)
self._feature_names = self._encoder.features_out
return self
def transform(self, X, y=None):
return self._encoder.transform_ohe(X)
def get_feature_names(self):
return self._feature_names
class NlpDataPreprocessor(BaseEstimator, TransformerMixin):
def __init__(self, nlp_cols):
self.nlp_cols = nlp_cols
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X = X[self.nlp_cols].copy()
for c in self.nlp_cols:
X[c] = X[c].astype(str).fillna(' ')
X = X.apply(' '.join, axis=1).str.replace('[ ]+', ' ', regex=True)
return X.values.tolist()
| [
"autogluon.features.generators.OneHotEncoderFeatureGenerator"
] | [((334, 394), 'autogluon.features.generators.OneHotEncoderFeatureGenerator', 'OneHotEncoderFeatureGenerator', ([], {'max_levels': '(10000)', 'verbosity': '(0)'}), '(max_levels=10000, verbosity=0)\n', (363, 394), False, 'from autogluon.features.generators import OneHotEncoderFeatureGenerator\n')] |
# Multi Indexing a Tensor
import tensorflow as tf
rank_2 = tf.constant(
[[1, 2],[3, 4],[5, 6]], dtype=tf.float16)
print(rank_2[1, 1].numpy())
# 4.0
print("Second row:", rank_2[1, :].numpy())
# Second row: [3. 4.]
print("Second column:", rank_2[:, 1].numpy())
# Second column: [2. 4. 6.]
# Skip first row :
rank_2[1:, :].numpy()
# [[3. 4.] [5. 6.]] | [
"tensorflow.constant"
] | [((59, 114), 'tensorflow.constant', 'tf.constant', (['[[1, 2], [3, 4], [5, 6]]'], {'dtype': 'tf.float16'}), '([[1, 2], [3, 4], [5, 6]], dtype=tf.float16)\n', (70, 114), True, 'import tensorflow as tf\n')] |
import nose.tools as nt
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
from treeano.sandbox.nodes import lrn
fX = theano.config.floatX
def ground_truth_normalizer(bc01, k, n, alpha, beta):
"""
This code is adapted from pylearn2.
https://github.com/lisa-lab/pylearn2/blob/master/LICENSE.txt
"""
def ground_truth_normalize_row(row, k, n, alpha, beta):
assert row.ndim == 1
out = np.zeros(row.shape)
for i in range(row.shape[0]):
s = k
tot = 0
for j in range(max(0, i - n // 2),
min(row.shape[0], i + n // 2 + 1)):
tot += 1
sq = row[j] ** 2.
assert sq > 0.
assert s >= k
assert alpha > 0.
s += alpha * sq
assert s >= k
assert tot <= n
assert s >= k
s = s ** beta
out[i] = row[i] / s
return out
c01b = bc01.transpose(1, 2, 3, 0)
out = np.zeros(c01b.shape)
for r in range(out.shape[1]):
for c in range(out.shape[2]):
for x in range(out.shape[3]):
out[:, r, c, x] = ground_truth_normalize_row(
row=c01b[:, r, c, x],
k=k, n=n, alpha=alpha, beta=beta)
out_bc01 = out.transpose(3, 0, 1, 2)
return out_bc01
def _test_localresponse_normalization_fn(fn, shape=(3, 4, 5, 6), **kwargs):
vw = treeano.VariableWrapper("foo", variable=T.tensor4(), shape=shape)
new_kwargs = dict(
# use a big value of alpha so mistakes involving alpha show up strong
alpha=1.5,
k=2,
beta=0.75,
n=5,
)
new_kwargs.update(kwargs)
fn = theano.function([vw.variable], [fn(vw, **new_kwargs)])
x = np.random.randn(*shape).astype(fX)
res, = fn(x)
ans = ground_truth_normalizer(x, **new_kwargs)
np.testing.assert_allclose(ans, res, rtol=1e-5)
def test_local_response_normalization_2d_v1():
_test_localresponse_normalization_fn(
lrn.local_response_normalization_2d_v1)
def test_local_response_normalization_2d_v2():
_test_localresponse_normalization_fn(
lrn.local_response_normalization_2d_v2)
def test_local_response_normalization_2d_pool():
_test_localresponse_normalization_fn(
lrn.local_response_normalization_2d_pool)
def test_local_response_normalization_2d_pool():
_test_localresponse_normalization_fn(
lrn.local_response_normalization_pool)
def test_local_response_normalization_2d_node_shape():
shape = (3, 4, 5, 6)
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=shape),
lrn.LocalResponseNormalization2DNode("lrn")]
).network()
fn = network.function(["i"], ["s"])
x = np.random.randn(*shape).astype(fX)
res = fn(x)[0].shape
np.testing.assert_equal(shape, res)
def test_local_response_normalization_node_shape():
for ndim in [2, 3, 4, 5, 6]:
shape = (3,) * ndim
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=shape),
lrn.LocalResponseNormalizationNode("lrn")]
).network()
fn = network.function(["i"], ["s"])
x = np.random.randn(*shape).astype(fX)
res = fn(x)[0].shape
np.testing.assert_equal(shape, res)
| [
"treeano.nodes.InputNode",
"numpy.testing.assert_equal",
"numpy.testing.assert_allclose",
"numpy.zeros",
"treeano.sandbox.nodes.lrn.LocalResponseNormalizationNode",
"treeano.sandbox.nodes.lrn.LocalResponseNormalization2DNode",
"theano.tensor.tensor4",
"numpy.random.randn"
] | [((1075, 1095), 'numpy.zeros', 'np.zeros', (['c01b.shape'], {}), '(c01b.shape)\n', (1083, 1095), True, 'import numpy as np\n'), ((1963, 2011), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ans', 'res'], {'rtol': '(1e-05)'}), '(ans, res, rtol=1e-05)\n', (1989, 2011), True, 'import numpy as np\n'), ((2923, 2958), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['shape', 'res'], {}), '(shape, res)\n', (2946, 2958), True, 'import numpy as np\n'), ((473, 492), 'numpy.zeros', 'np.zeros', (['row.shape'], {}), '(row.shape)\n', (481, 492), True, 'import numpy as np\n'), ((3377, 3412), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['shape', 'res'], {}), '(shape, res)\n', (3400, 3412), True, 'import numpy as np\n'), ((1557, 1568), 'theano.tensor.tensor4', 'T.tensor4', ([], {}), '()\n', (1566, 1568), True, 'import theano.tensor as T\n'), ((1856, 1879), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1871, 1879), True, 'import numpy as np\n'), ((2859, 2882), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (2874, 2882), True, 'import numpy as np\n'), ((3305, 3328), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (3320, 3328), True, 'import numpy as np\n'), ((2709, 2739), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""i"""'], {'shape': 'shape'}), "('i', shape=shape)\n", (2721, 2739), True, 'import treeano.nodes as tn\n'), ((2750, 2793), 'treeano.sandbox.nodes.lrn.LocalResponseNormalization2DNode', 'lrn.LocalResponseNormalization2DNode', (['"""lrn"""'], {}), "('lrn')\n", (2786, 2793), False, 'from treeano.sandbox.nodes import lrn\n'), ((3141, 3171), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""i"""'], {'shape': 'shape'}), "('i', shape=shape)\n", (3153, 3171), True, 'import treeano.nodes as tn\n'), ((3186, 3227), 'treeano.sandbox.nodes.lrn.LocalResponseNormalizationNode', 'lrn.LocalResponseNormalizationNode', (['"""lrn"""'], {}), "('lrn')\n", (3220, 3227), False, 'from treeano.sandbox.nodes import lrn\n')] |
from pathlib import Path
from musurgia.pdf.line import HorizontalLineSegment, HorizontalSegmentedLine, VerticalSegmentedLine
from musurgia.pdf.pdf import Pdf
from musurgia.unittest import TestCase
path = Path(__file__)
class TestMarkLineLabels(TestCase):
def setUp(self) -> None:
self.pdf = Pdf()
self.ls = HorizontalLineSegment(length=20)
def test_draw_above(self):
ml = self.ls.start_mark_line
ml.add_text_label('first text label above')
ml.add_text_label('second text label above')
ml.add_text_label('third text label above')
self.ls.relative_x = 10
self.ls.relative_y = 20
with self.file_path(path, 'draw_above', 'pdf') as pdf_path:
self.pdf.translate_page_margins()
self.pdf.draw_ruler('h')
self.pdf.draw_ruler('v')
self.ls.draw(self.pdf)
self.pdf.write(pdf_path)
def test_draw_one_above(self):
ml = self.ls.start_mark_line
ml.length = 20
ml.add_text_label('first text label above')
with self.file_path(path, 'draw_one_above', 'pdf') as pdf_path:
self.pdf.translate_page_margins()
# self.pdf.draw_ruler('h')
# self.pdf.draw_ruler('v')
self.pdf.translate(30, 30)
self.ls.draw(self.pdf)
self.pdf.write(pdf_path)
def test_draw_below(self):
ml = self.ls.start_mark_line
ml.add_text_label('first text label below', placement='below')
ml.add_text_label('second text label below', placement='below')
self.ls.relative_x = 10
self.ls.relative_y = 20
with self.file_path(path, 'draw_below', 'pdf') as pdf_path:
self.pdf.translate_page_margins()
self.pdf.draw_ruler('h')
self.pdf.draw_ruler('v')
self.ls.draw(self.pdf)
self.pdf.write(pdf_path)
def test_draw_left(self):
ml = self.ls.start_mark_line
ml.add_text_label('first text label left', placement='left')
ml.add_text_label('second text label left left left', placement='left')
ml.add_text_label('third text label left left left', placement='left')
ml.left_text_labels[1].font.size = 8
self.ls.relative_x = 40
self.ls.relative_y = 10
with self.file_path(path, 'draw_left', 'pdf') as pdf_path:
self.pdf.translate_page_margins()
self.pdf.draw_ruler('h')
self.pdf.draw_ruler('v')
self.ls.draw(self.pdf)
self.pdf.write(pdf_path)
def test_different_sizes(self):
ml = self.ls.start_mark_line
ml.add_text_label('first text label above', font_size=7)
ml.add_text_label('second text label above', font_size=8)
ml.add_text_label('third text label above', font_size=9)
with self.file_path(path, 'different_sizes', 'pdf') as pdf_path:
self.pdf.translate_page_margins()
self.pdf.draw_ruler('h')
self.pdf.draw_ruler('v')
self.pdf.translate(10, 20)
self.ls.draw(self.pdf)
self.pdf.write(pdf_path)
def test_font_size_8(self):
ml = self.ls.start_mark_line
ml.add_text_label('first text label above', font_size=8, bottom_margin=2)
ml.add_text_label('second text label above', font_size=8, bottom_margin=4)
ml.add_text_label('third text label above', font_size=8)
with self.file_path(path, 'font_size_8', 'pdf') as pdf_path:
self.pdf.translate_page_margins()
self.pdf.draw_ruler('h')
self.pdf.draw_ruler('v')
self.pdf.translate(10, 20)
self.ls.draw(self.pdf)
self.pdf.write(pdf_path)
def test_below_with_different_mark_line_lengths(self):
hsl = HorizontalSegmentedLine(lengths=[10, 15, 20])
hsl.segments[0].start_mark_line.length = 6
hsl.segments[0].start_mark_line.add_label(1, placement='below', font_size=8)
hsl.segments[1].start_mark_line.add_label(2, placement='below', font_size=8)
hsl.segments[2].start_mark_line.add_label(3, placement='below', font_size=8)
with self.file_path(path, 'below_with_different_mark_line_lengths', 'pdf') as pdf_path:
self.pdf.translate_page_margins()
self.pdf.draw_ruler('h')
self.pdf.draw_ruler('v')
self.pdf.translate(10, 10)
hsl.draw(self.pdf)
self.pdf.write(pdf_path)
def test_vertical_with_left_text_labels(self):
vsl = VerticalSegmentedLine(lengths=[10, 15, 20])
vsl.segments[0].start_mark_line.length = 6
vsl.segments[0].start_mark_line.add_label(1, placement='left', font_size=8)
vsl.segments[1].start_mark_line.add_label(2, placement='left', font_size=8)
vsl.segments[2].start_mark_line.add_label(3, placement='left', font_size=8)
with self.file_path(path, 'vertical_with_left_text_labels', 'pdf') as pdf_path:
self.pdf.translate_page_margins()
self.pdf.draw_ruler('h')
self.pdf.draw_ruler('v')
self.pdf.translate(10, 10)
vsl.draw(self.pdf)
self.pdf.write(pdf_path)
def test_draw_above_with_different_bottom_margins(self):
ml = self.ls.start_mark_line
ml.add_text_label('first text label above', bottom_margin=2)
ml.add_text_label('second text label above', bottom_margin=4)
ml.add_text_label('third text label above', bottom_margin=15)
self.ls.relative_x = 10
self.ls.relative_y = 40
with self.file_path(path, 'draw_above_with_different_bottom_margins', 'pdf') as pdf_path:
self.pdf.translate_page_margins()
self.pdf.draw_ruler('h')
self.pdf.draw_ruler('v')
self.ls.draw(self.pdf)
self.pdf.write(pdf_path)
def test_left_position(self):
ml = self.ls.start_mark_line
# print(ml.get_relative_position())
# print(ml.get_height())
left_l = ml.add_text_label('left one', placement='left')
# print(left_l.get_relative_position())
left_l = ml.add_text_label('left two', placement='left')
left_l = ml.add_text_label('left three', placement='left')
left_l = ml.add_text_label('left four', placement='left')
with self.file_path(path, 'left_position', 'pdf') as pdf_path:
self.pdf.translate_page_margins()
self.pdf.draw_ruler('h')
self.pdf.draw_ruler('v')
self.pdf.translate(30, 30)
self.ls.draw(self.pdf)
self.pdf.write(pdf_path)
| [
"pathlib.Path",
"musurgia.pdf.pdf.Pdf",
"musurgia.pdf.line.HorizontalSegmentedLine",
"musurgia.pdf.line.VerticalSegmentedLine",
"musurgia.pdf.line.HorizontalLineSegment"
] | [((206, 220), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (210, 220), False, 'from pathlib import Path\n'), ((307, 312), 'musurgia.pdf.pdf.Pdf', 'Pdf', ([], {}), '()\n', (310, 312), False, 'from musurgia.pdf.pdf import Pdf\n'), ((331, 363), 'musurgia.pdf.line.HorizontalLineSegment', 'HorizontalLineSegment', ([], {'length': '(20)'}), '(length=20)\n', (352, 363), False, 'from musurgia.pdf.line import HorizontalLineSegment, HorizontalSegmentedLine, VerticalSegmentedLine\n'), ((3820, 3865), 'musurgia.pdf.line.HorizontalSegmentedLine', 'HorizontalSegmentedLine', ([], {'lengths': '[10, 15, 20]'}), '(lengths=[10, 15, 20])\n', (3843, 3865), False, 'from musurgia.pdf.line import HorizontalLineSegment, HorizontalSegmentedLine, VerticalSegmentedLine\n'), ((4561, 4604), 'musurgia.pdf.line.VerticalSegmentedLine', 'VerticalSegmentedLine', ([], {'lengths': '[10, 15, 20]'}), '(lengths=[10, 15, 20])\n', (4582, 4604), False, 'from musurgia.pdf.line import HorizontalLineSegment, HorizontalSegmentedLine, VerticalSegmentedLine\n')] |
""" Script to send prediction request.
Usage:
python predict.py --url=YOUR_KF_HOST/models/coco --input_image=YOUR_LOCAL_IMAGE
--output_image=OUTPUT_IMAGE_NAME.
This will save the prediction result as OUTPUT_IMAGE_NAME.
The output image is the input image with the detected bounding boxes.
"""
import argparse
import json
import requests
import numpy as np
from PIL import Image
import visualization_utils as vis_util
WIDTH = 1024
HEIGHT = 768
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--url", help='The url to send the request')
parser.add_argument("--input_image", default='image1.jpg')
parser.add_argument("--output_image", default='output.jpg')
args = parser.parse_args()
img = Image.open(args.input_image)
img = img.resize((WIDTH, HEIGHT), Image.ANTIALIAS)
img_np = np.array(img)
res = requests.post(
args.url,
data=json.dumps({"instances": [{"inputs": img_np.tolist()}]}))
if res.status_code != 200:
print('Failed: {}'.format(res.text))
return
output_dict = json.loads(res.text).get('predictions')[0]
vis_util.visualize_boxes_and_labels_on_image_array(
img_np,
np.array(output_dict['detection_boxes']),
map(int, output_dict['detection_classes']),
output_dict['detection_scores'],
{},
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
output_image = Image.fromarray(img_np)
output_image.save(args.output_image)
if __name__ == '__main__':
main()
| [
"PIL.Image.fromarray",
"PIL.Image.open",
"json.loads",
"argparse.ArgumentParser",
"numpy.array"
] | [((477, 502), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (500, 502), False, 'import argparse\n'), ((731, 759), 'PIL.Image.open', 'Image.open', (['args.input_image'], {}), '(args.input_image)\n', (741, 759), False, 'from PIL import Image\n'), ((824, 837), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (832, 837), True, 'import numpy as np\n'), ((1422, 1445), 'PIL.Image.fromarray', 'Image.fromarray', (['img_np'], {}), '(img_np)\n', (1437, 1445), False, 'from PIL import Image\n'), ((1155, 1195), 'numpy.array', 'np.array', (["output_dict['detection_boxes']"], {}), "(output_dict['detection_boxes'])\n", (1163, 1195), True, 'import numpy as np\n'), ((1041, 1061), 'json.loads', 'json.loads', (['res.text'], {}), '(res.text)\n', (1051, 1061), False, 'import json\n')] |
"""
Estimate luminosity function in COSMOS from interferometric follow-up of
Miettinen+ 2014, Younger+ 2007, and Younger+2009.
"""
import numpy
import matplotlib.pyplot as plt
from pylab import savefig
from astropy.table import Table
import matplotlib
def MonteCarloCounts(fluxes, errors):
hist890, bin_edges = numpy.histogram(fluxes)
nbins = bin_edges.size - 1
nsource = fluxes.size
nsim = 1000
obsPDF = numpy.zeros([nsource, nsim])
for i in range(nsource):
imean = fluxes[i]
irms = errors[i]
obsPDF[i, :] = numpy.random.normal(loc=imean, scale=irms, size=nsim)
histPDF = numpy.zeros([nbins, nsim])
for isim in range(nsim):
hist, bedge = numpy.histogram(obsPDF[:, isim], bins=bin_edges)
histPDF[:, isim] = hist
histmean = numpy.mean(histPDF, axis=1)
histrms = numpy.std(histPDF, axis=1)
return histmean, histrms, bin_edges
S_1100 = [10.7, 9.0, 7.6, 6.8, 7.6, 7.9, 8.3, 5.5, 5.8, 4.7, 4.7, 4.5, 4.4,
4.3, 4.3, 4.2]
S_1100 = numpy.array(S_1100)
S_890 = [15.6, 12.4, 8.7, 14.4, 9.3, 8.6, 12.0, 19.7, 9.0, 5.3, 14.4,
13.5, 8.2, 5.0, 3.9, 4.4]
e_S_890 = [1.1, 1.0, 1.5, 1.9, 1.3, 1.3, 1.5, 1.8, 2.2, 1.0, 2.9, 1.8, 1.8,
1.0, 1.0, 1.0]
S_890 = numpy.array(S_890)
e_S_890 = numpy.array(e_S_890) * 1.2
plt.clf()
plt.plot(S_1100, S_890/S_1100, 'o')
# This plot illustrates that the typical correction factor from total 1.1mm
# flux density to total 890um flux density is ~1.5
Oskari1300 = [2.07, 2.15, 1.58, 1.53, 1.78, 1.04, 4.82, 5.72, 1.85, 3.37, 2.19,
1.27, 1.82, 0.99, 1.41, 1.79, 1.72, 2.85, 0.98, 0.90, 3.36, 2.38, 2.45,
9.01, 1.53]
e_Oskari1300 = [0.62, 0.63, 0.43, 0.46, 0.54, 0.36, 1.33, 1.85, 0.49, 1.03,
0.83, 0.40, 0.59, 0.29, 0.42, 0.53, 0.53, 0.78, 0.36, 0.28, 0.97, 0.77,
0.67, 2.39, 0.45]
Oskari890 = numpy.array(Oskari1300) * 2.5
e_Oskari890 = numpy.array(e_Oskari1300) * 2.5 * 1.5
cosmos890 = numpy.append(S_890, Oskari890)
e_cosmos890 = numpy.append(e_S_890, e_Oskari890)
#cosmos890 = S_890
#e_cosmos890 = e_S_890
completeness = [0.28, 0.5, 0.8, 0.9, 0.99, 1.0, 1.0, 1.0, 1.0, 1.0]
# AzTEC coverage in COSMOS is 0.15 deg^2, centered on z=0.7 overdensity
MCresult = MonteCarloCounts(cosmos890, e_cosmos890)
hist890mean = MCresult[0]
hist890rms = MCresult[1]
bin_edges = MCresult[2]
bin_width = bin_edges[1] - bin_edges[0]
area_aztec = 0.15
norm890 = hist890mean / area_aztec
e_norm890 = hist890rms / area_aztec
nbins = norm890.size
cum890 = numpy.zeros(nbins)
bin_centers = numpy.zeros(nbins)
for ibin in range(nbins):
bin_centers[ibin] = (bin_edges[ibin] + bin_edges[ibin + 1]) / 2
cum890[ibin] = norm890[ibin:].sum()
diff890 = norm890 / bin_width / completeness #/ bin_centers
e_diff890 = e_norm890 / bin_width / completeness #/ bin_centers
# Barger catalog
bargerloc = '../Data/barger_catalog.txt'
bargercat = Table.read(bargerloc, format='ascii')
bargerfluxes = bargercat['S860']
e_bargerfluxes = bargercat['e_S860'] * 1.2
MCresult = MonteCarloCounts(bargerfluxes, e_bargerfluxes)
barger890mean = MCresult[0]
barger890rms = MCresult[1]
bin_edges = MCresult[2]
bin_width = bin_edges[1] - bin_edges[0]
area_barger = 0.09
barger890 = barger890mean / area_barger
e_barger890 = barger890rms / area_barger
diffbarger890 = barger890 / bin_width# / completeness #/ bin_centers
e_diffbarger890 = e_barger890 / bin_width# / completeness #/ bin_centers
nbins = barger890.size
cum890 = numpy.zeros(nbins)
barger_bin_centers = numpy.zeros(nbins)
for ibin in range(nbins):
barger_bin_centers[ibin] = (bin_edges[ibin] + bin_edges[ibin + 1]) / 2
# Smolcic catalog
smolcicloc = '../Data/smolcic_catalog.txt'
smolciccat = Table.read(smolcicloc, format='ascii')
smolcicfluxes = smolciccat['S1300'] * 2.5
e_smolcicfluxes = smolciccat['e_S1300'] * 2.5 * 1.5
MCresult = MonteCarloCounts(smolcicfluxes, e_smolcicfluxes)
smolcic890mean = MCresult[0]
smolcic890rms = MCresult[1]
bin_edges = MCresult[2]
bin_width = bin_edges[1] - bin_edges[0]
area_smolcic = 0.7 / 3.5
smolcic890 = smolcic890mean / area_smolcic
e_smolcic890 = smolcic890rms / area_smolcic
diffsmolcic890 = smolcic890 / bin_width / completeness #/ bin_centers
e_diffsmolcic890 = e_smolcic890 / bin_width / completeness #/ bin_centers
nbins = smolcic890.size
cum890 = numpy.zeros(nbins)
smolcic_bin_centers = numpy.zeros(nbins)
for ibin in range(nbins):
smolcic_bin_centers[ibin] = (bin_edges[ibin] + bin_edges[ibin + 1]) / 2
# ALESS number counts from Karim et al. 2013
alesscounts = [52.3, 32.3, 24.9, 15.6, 1.6]#, 0.0, 0.0]
e_alesscounts = [18.2, 13.6, 7.9, 12.2, 7.2]#, 0.0, 0.0]
alessfluxes = [4.8, 5.9, 7.5, 8.8, 9.7]#, 11.0, 14.0]
#shadescounts = [2506, 844, 362, 150, 68, 33, 15, 7.4, 3.9, 2.0]
shadescounts = [831, 240, 106, 41, 17, 8.8, 3.9, 1.8, 1.0, 0.6]
shadescounts = numpy.array(shadescounts)
shadesfluxes = numpy.array([2.77, 4.87, 6.90, 8.93, 10.94, 12.95, 14.96, 16.96,
18.96, 20.97]) / 1.5
# Aretxaga luminosity function
true_centers = [1.41, 2.44, 3.44, 4.45, 5.45, 6.46, 7.46, 8.46, 9.46, 10.46,
11.46]
true_centers = numpy.array(true_centers)
true_edges = true_centers - 0.5
true_edges = numpy.append(true_edges, true_centers[-1] + 0.5)
true_diffaretxaga = [394, 269, 176, 99.5, 49.9, 22.3, 10.3, 5.83, 4.07, 2.94,
1.87]
true_diffaretxaga = numpy.array(true_diffaretxaga)
aretxaga = Table.read('../Data/aretxagacatalog.fits')
aretxaga_S1100 = aretxaga['S1_1mm']
hist_aretxaga, edge_aretxaga = numpy.histogram(aretxaga_S1100, bins=true_edges)
nbins = hist_aretxaga.size
#cum890 = numpy.zeros(nbins)
aretxaga_centers = numpy.zeros(nbins)
for ibin in range(nbins):
aretxaga_centers[ibin] = (edge_aretxaga[ibin] + edge_aretxaga[ibin + 1]) / 2
#cum890[ibin] = norm890[ibin:].sum()
area_aretxaga = 0.71
normaretxaga = hist_aretxaga / area_aretxaga
aretxaga_completeness = [0.5, 0.85, 0.92, 0.95, 0.97, 0.98, 0.99, 1.0, 1.0,
1.0, 1.0]
aretxaga_completeness = numpy.array(aretxaga_completeness)
diffaretxaga = normaretxaga / aretxaga_completeness #/ aretxaga_centers
# set font properties
font = {'family' : 'Arial',
'weight' : 'normal',
'size' : 12}
matplotlib.rc('font', **font)
matplotlib.rcParams['axes.linewidth'] = 1.5
fig = plt.figure(figsize=(5.0, 4.5))
plt.clf()
# plot the intrinsic luminosity functions used to make predictions shown in
# mag-flux.py
Sstar = 7.
nstar = 424.
alpha = 1.9
Svector = numpy.arange(1e3)/10
dndS1 = nstar / Sstar
dndS2 = (Svector / Sstar) ** (-alpha)
dndS3 = numpy.exp(-Svector / Sstar)
dndS = dndS1 * dndS2 * dndS3
#line1, = plt.plot(Svector, dndS, color='blue', label='Schechter')
Sstar = 8.
Nstar = 20.
beta1 = 2.0
beta2 = 6.9
dndS1 = Nstar * (Svector / Sstar) ** (-beta1)
dndS2 = Nstar * (Svector / Sstar) ** (-beta2)
dndS = dndS1
high = Svector > Sstar
dndS[high] = dndS2[high]
#line2 = plt.plot(Svector, dndS, color='black', lw=1.5, label='Karim+ 2013')
line2 = plt.plot(Svector, dndS, color='magenta', lw=1.5, label='Karim+ 2013')
Sstar = 15.
Nstar = 5.
beta1 = 2.0
beta2 = 6.9
dndS1 = Nstar * (Svector / Sstar) ** (-beta1)
dndS2 = Nstar * (Svector / Sstar) ** (-beta2)
dndS = dndS1
high = Svector > Sstar
dndS[high] = dndS2[high]
line3, = plt.plot(Svector, dndS, color='blue', lw=1.5,
label=r'PL, $S_\star = 15\,{\rm mJy}$')
data1, = plt.plot(bin_centers, diff890, 'o', label='COSMOS', color='black')
plt.errorbar(bin_centers, diff890, yerr=e_diff890, fmt='o',
ecolor='gray', capsize=0, color='black')
data2, = plt.plot(alessfluxes, alesscounts, 'D', label='ALESS', color='pink')
plt.errorbar(alessfluxes, alesscounts, yerr=e_alesscounts, fmt='D',
ecolor='gray', capsize=0, color='pink')
#data3, = plt.plot(barger_bin_centers, diffbarger890, 's', label='Barger',
# color='orange')
#plt.errorbar(barger_bin_centers, diffbarger890, yerr=e_diffbarger890,
# fmt='s', ecolor='gray', capsize=0, color='orange')
#data4, = plt.plot(smolcic_bin_centers, diffsmolcic890, 's', label='Smolcic',
# color='orange')
#plt.errorbar(smolcic_bin_centers, diffsmolcic890, yerr=e_diffsmolcic890,
# fmt='s', ecolor='gray', capsize=0, color='orange')
#plt.plot(shadesfluxes, shadescounts, 's', label='SHADES')
#plt.hist(cosmos890, cumulative=-1)
#plt.plot(aretxaga_centers, diffaretxaga, '+', label='Aretxaga+ 2011: Me')
#plt.plot(true_centers, true_diffaretxaga, 'x', label='Aretxaga+ 2011: True')
#plt.loglog()
plt.yscale('log', nonposy='clip')
plt.xscale('log', nonposy='clip')
plt.minorticks_on()
plt.tick_params(width=1.2, which='both')
plt.tick_params(length=2, which='minor')
plt.tick_params(length=4, which='major')
plt.axis([01., 120, .001, 300])
first_legend = plt.legend(loc='lower left', numpoints=1, handletextpad=0.35,
borderpad=0.4, labelspacing=0.18, handlelength=1.0)
leg = plt.gca().get_legend()
ltext = leg.get_texts()
#ax = plt.gca().add_artist(first_legend)
# Create another legend for the second line.
#plt.legend(handles=[line2], loc=4)
#plt.setp(ltext, fontsize='medium')
plt.subplots_adjust(left=0.15, right=0.98, top=0.97, bottom=0.13, wspace=0.39)
plt.ylabel(r'$dN/dS\;{\rm (mJy}^{-1} \, {\rm deg}^{-2})$', fontsize='large')
plt.xlabel(r'$S_{870}\;{\rm (mJy)}$', fontsize='large')
savefig('../Figures/DifferentialNumberCounts.pdf')
import pdb; pdb.set_trace()
| [
"matplotlib.pyplot.ylabel",
"pylab.savefig",
"numpy.array",
"matplotlib.rc",
"matplotlib.pyplot.errorbar",
"numpy.arange",
"numpy.mean",
"numpy.histogram",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.minorticks_on",
"numpy.exp",
"matplotlib.pyplot.axis",
"matpl... | [((1029, 1048), 'numpy.array', 'numpy.array', (['S_1100'], {}), '(S_1100)\n', (1040, 1048), False, 'import numpy\n'), ((1261, 1279), 'numpy.array', 'numpy.array', (['S_890'], {}), '(S_890)\n', (1272, 1279), False, 'import numpy\n'), ((1318, 1327), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1325, 1327), True, 'import matplotlib.pyplot as plt\n'), ((1328, 1365), 'matplotlib.pyplot.plot', 'plt.plot', (['S_1100', '(S_890 / S_1100)', '"""o"""'], {}), "(S_1100, S_890 / S_1100, 'o')\n", (1336, 1365), True, 'import matplotlib.pyplot as plt\n'), ((1963, 1993), 'numpy.append', 'numpy.append', (['S_890', 'Oskari890'], {}), '(S_890, Oskari890)\n', (1975, 1993), False, 'import numpy\n'), ((2008, 2042), 'numpy.append', 'numpy.append', (['e_S_890', 'e_Oskari890'], {}), '(e_S_890, e_Oskari890)\n', (2020, 2042), False, 'import numpy\n'), ((2514, 2532), 'numpy.zeros', 'numpy.zeros', (['nbins'], {}), '(nbins)\n', (2525, 2532), False, 'import numpy\n'), ((2547, 2565), 'numpy.zeros', 'numpy.zeros', (['nbins'], {}), '(nbins)\n', (2558, 2565), False, 'import numpy\n'), ((2896, 2933), 'astropy.table.Table.read', 'Table.read', (['bargerloc'], {'format': '"""ascii"""'}), "(bargerloc, format='ascii')\n", (2906, 2933), False, 'from astropy.table import Table\n'), ((3464, 3482), 'numpy.zeros', 'numpy.zeros', (['nbins'], {}), '(nbins)\n', (3475, 3482), False, 'import numpy\n'), ((3504, 3522), 'numpy.zeros', 'numpy.zeros', (['nbins'], {}), '(nbins)\n', (3515, 3522), False, 'import numpy\n'), ((3699, 3737), 'astropy.table.Table.read', 'Table.read', (['smolcicloc'], {'format': '"""ascii"""'}), "(smolcicloc, format='ascii')\n", (3709, 3737), False, 'from astropy.table import Table\n'), ((4305, 4323), 'numpy.zeros', 'numpy.zeros', (['nbins'], {}), '(nbins)\n', (4316, 4323), False, 'import numpy\n'), ((4346, 4364), 'numpy.zeros', 'numpy.zeros', (['nbins'], {}), '(nbins)\n', (4357, 4364), False, 'import numpy\n'), ((4825, 4850), 'numpy.array', 'numpy.array', (['shadescounts'], {}), '(shadescounts)\n', (4836, 4850), False, 'import numpy\n'), ((5096, 5121), 'numpy.array', 'numpy.array', (['true_centers'], {}), '(true_centers)\n', (5107, 5121), False, 'import numpy\n'), ((5167, 5215), 'numpy.append', 'numpy.append', (['true_edges', '(true_centers[-1] + 0.5)'], {}), '(true_edges, true_centers[-1] + 0.5)\n', (5179, 5215), False, 'import numpy\n'), ((5328, 5358), 'numpy.array', 'numpy.array', (['true_diffaretxaga'], {}), '(true_diffaretxaga)\n', (5339, 5358), False, 'import numpy\n'), ((5371, 5413), 'astropy.table.Table.read', 'Table.read', (['"""../Data/aretxagacatalog.fits"""'], {}), "('../Data/aretxagacatalog.fits')\n", (5381, 5413), False, 'from astropy.table import Table\n'), ((5481, 5529), 'numpy.histogram', 'numpy.histogram', (['aretxaga_S1100'], {'bins': 'true_edges'}), '(aretxaga_S1100, bins=true_edges)\n', (5496, 5529), False, 'import numpy\n'), ((5606, 5624), 'numpy.zeros', 'numpy.zeros', (['nbins'], {}), '(nbins)\n', (5617, 5624), False, 'import numpy\n'), ((5958, 5992), 'numpy.array', 'numpy.array', (['aretxaga_completeness'], {}), '(aretxaga_completeness)\n', (5969, 5992), False, 'import numpy\n'), ((6168, 6197), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (6181, 6197), False, 'import matplotlib\n'), ((6249, 6279), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5.0, 4.5)'}), '(figsize=(5.0, 4.5))\n', (6259, 6279), True, 'import matplotlib.pyplot as plt\n'), ((6281, 6290), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6288, 6290), True, 'import matplotlib.pyplot as plt\n'), ((6518, 6545), 'numpy.exp', 'numpy.exp', (['(-Svector / Sstar)'], {}), '(-Svector / Sstar)\n', (6527, 6545), False, 'import numpy\n'), ((6930, 6999), 'matplotlib.pyplot.plot', 'plt.plot', (['Svector', 'dndS'], {'color': '"""magenta"""', 'lw': '(1.5)', 'label': '"""Karim+ 2013"""'}), "(Svector, dndS, color='magenta', lw=1.5, label='Karim+ 2013')\n", (6938, 6999), True, 'import matplotlib.pyplot as plt\n'), ((7211, 7303), 'matplotlib.pyplot.plot', 'plt.plot', (['Svector', 'dndS'], {'color': '"""blue"""', 'lw': '(1.5)', 'label': '"""PL, $S_\\\\star = 15\\\\,{\\\\rm mJy}$"""'}), "(Svector, dndS, color='blue', lw=1.5, label=\n 'PL, $S_\\\\star = 15\\\\,{\\\\rm mJy}$')\n", (7219, 7303), True, 'import matplotlib.pyplot as plt\n'), ((7315, 7381), 'matplotlib.pyplot.plot', 'plt.plot', (['bin_centers', 'diff890', '"""o"""'], {'label': '"""COSMOS"""', 'color': '"""black"""'}), "(bin_centers, diff890, 'o', label='COSMOS', color='black')\n", (7323, 7381), True, 'import matplotlib.pyplot as plt\n'), ((7382, 7486), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['bin_centers', 'diff890'], {'yerr': 'e_diff890', 'fmt': '"""o"""', 'ecolor': '"""gray"""', 'capsize': '(0)', 'color': '"""black"""'}), "(bin_centers, diff890, yerr=e_diff890, fmt='o', ecolor='gray',\n capsize=0, color='black')\n", (7394, 7486), True, 'import matplotlib.pyplot as plt\n'), ((7500, 7568), 'matplotlib.pyplot.plot', 'plt.plot', (['alessfluxes', 'alesscounts', '"""D"""'], {'label': '"""ALESS"""', 'color': '"""pink"""'}), "(alessfluxes, alesscounts, 'D', label='ALESS', color='pink')\n", (7508, 7568), True, 'import matplotlib.pyplot as plt\n'), ((7569, 7681), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['alessfluxes', 'alesscounts'], {'yerr': 'e_alesscounts', 'fmt': '"""D"""', 'ecolor': '"""gray"""', 'capsize': '(0)', 'color': '"""pink"""'}), "(alessfluxes, alesscounts, yerr=e_alesscounts, fmt='D', ecolor=\n 'gray', capsize=0, color='pink')\n", (7581, 7681), True, 'import matplotlib.pyplot as plt\n'), ((8417, 8450), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {'nonposy': '"""clip"""'}), "('log', nonposy='clip')\n", (8427, 8450), True, 'import matplotlib.pyplot as plt\n'), ((8451, 8484), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {'nonposy': '"""clip"""'}), "('log', nonposy='clip')\n", (8461, 8484), True, 'import matplotlib.pyplot as plt\n'), ((8486, 8505), 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), '()\n', (8503, 8505), True, 'import matplotlib.pyplot as plt\n'), ((8506, 8546), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'width': '(1.2)', 'which': '"""both"""'}), "(width=1.2, which='both')\n", (8521, 8546), True, 'import matplotlib.pyplot as plt\n'), ((8547, 8587), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'length': '(2)', 'which': '"""minor"""'}), "(length=2, which='minor')\n", (8562, 8587), True, 'import matplotlib.pyplot as plt\n'), ((8588, 8628), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'length': '(4)', 'which': '"""major"""'}), "(length=4, which='major')\n", (8603, 8628), True, 'import matplotlib.pyplot as plt\n'), ((8630, 8662), 'matplotlib.pyplot.axis', 'plt.axis', (['[1.0, 120, 0.001, 300]'], {}), '([1.0, 120, 0.001, 300])\n', (8638, 8662), True, 'import matplotlib.pyplot as plt\n'), ((8677, 8794), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower left"""', 'numpoints': '(1)', 'handletextpad': '(0.35)', 'borderpad': '(0.4)', 'labelspacing': '(0.18)', 'handlelength': '(1.0)'}), "(loc='lower left', numpoints=1, handletextpad=0.35, borderpad=0.4,\n labelspacing=0.18, handlelength=1.0)\n", (8687, 8794), True, 'import matplotlib.pyplot as plt\n'), ((9015, 9093), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.15)', 'right': '(0.98)', 'top': '(0.97)', 'bottom': '(0.13)', 'wspace': '(0.39)'}), '(left=0.15, right=0.98, top=0.97, bottom=0.13, wspace=0.39)\n', (9034, 9093), True, 'import matplotlib.pyplot as plt\n'), ((9095, 9174), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$dN/dS\\\\;{\\\\rm (mJy}^{-1} \\\\, {\\\\rm deg}^{-2})$"""'], {'fontsize': '"""large"""'}), "('$dN/dS\\\\;{\\\\rm (mJy}^{-1} \\\\, {\\\\rm deg}^{-2})$', fontsize='large')\n", (9105, 9174), True, 'import matplotlib.pyplot as plt\n'), ((9172, 9228), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$S_{870}\\\\;{\\\\rm (mJy)}$"""'], {'fontsize': '"""large"""'}), "('$S_{870}\\\\;{\\\\rm (mJy)}$', fontsize='large')\n", (9182, 9228), True, 'import matplotlib.pyplot as plt\n'), ((9228, 9278), 'pylab.savefig', 'savefig', (['"""../Figures/DifferentialNumberCounts.pdf"""'], {}), "('../Figures/DifferentialNumberCounts.pdf')\n", (9235, 9278), False, 'from pylab import savefig\n'), ((9291, 9306), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (9304, 9306), False, 'import pdb\n'), ((321, 344), 'numpy.histogram', 'numpy.histogram', (['fluxes'], {}), '(fluxes)\n', (336, 344), False, 'import numpy\n'), ((432, 460), 'numpy.zeros', 'numpy.zeros', (['[nsource, nsim]'], {}), '([nsource, nsim])\n', (443, 460), False, 'import numpy\n'), ((633, 659), 'numpy.zeros', 'numpy.zeros', (['[nbins, nsim]'], {}), '([nbins, nsim])\n', (644, 659), False, 'import numpy\n'), ((810, 837), 'numpy.mean', 'numpy.mean', (['histPDF'], {'axis': '(1)'}), '(histPDF, axis=1)\n', (820, 837), False, 'import numpy\n'), ((852, 878), 'numpy.std', 'numpy.std', (['histPDF'], {'axis': '(1)'}), '(histPDF, axis=1)\n', (861, 878), False, 'import numpy\n'), ((1290, 1310), 'numpy.array', 'numpy.array', (['e_S_890'], {}), '(e_S_890)\n', (1301, 1310), False, 'import numpy\n'), ((1868, 1891), 'numpy.array', 'numpy.array', (['Oskari1300'], {}), '(Oskari1300)\n', (1879, 1891), False, 'import numpy\n'), ((4866, 4944), 'numpy.array', 'numpy.array', (['[2.77, 4.87, 6.9, 8.93, 10.94, 12.95, 14.96, 16.96, 18.96, 20.97]'], {}), '([2.77, 4.87, 6.9, 8.93, 10.94, 12.95, 14.96, 16.96, 18.96, 20.97])\n', (4877, 4944), False, 'import numpy\n'), ((6429, 6449), 'numpy.arange', 'numpy.arange', (['(1000.0)'], {}), '(1000.0)\n', (6441, 6449), False, 'import numpy\n'), ((564, 617), 'numpy.random.normal', 'numpy.random.normal', ([], {'loc': 'imean', 'scale': 'irms', 'size': 'nsim'}), '(loc=imean, scale=irms, size=nsim)\n', (583, 617), False, 'import numpy\n'), ((713, 761), 'numpy.histogram', 'numpy.histogram', (['obsPDF[:, isim]'], {'bins': 'bin_edges'}), '(obsPDF[:, isim], bins=bin_edges)\n', (728, 761), False, 'import numpy\n'), ((1912, 1937), 'numpy.array', 'numpy.array', (['e_Oskari1300'], {}), '(e_Oskari1300)\n', (1923, 1937), False, 'import numpy\n'), ((8806, 8815), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8813, 8815), True, 'import matplotlib.pyplot as plt\n')] |
"""
byceps.blueprints.admin.news.forms
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 <NAME>
:License: Revised BSD (see `LICENSE` file for details)
"""
import re
from flask_babel import lazy_gettext
from wtforms import FileField, StringField, TextAreaField
from wtforms.fields.html5 import DateField, TimeField
from wtforms.validators import InputRequired, Length, Optional, Regexp
from ....util.l10n import LocalizedForm
SLUG_REGEX = re.compile('^[a-z0-9-]+$')
class ChannelCreateForm(LocalizedForm):
channel_id = StringField(
lazy_gettext('ID'), validators=[InputRequired(), Length(min=1, max=40)]
)
url_prefix = StringField(
lazy_gettext('URL prefix'), [InputRequired(), Length(max=80)]
)
class _ImageFormBase(LocalizedForm):
alt_text = StringField(lazy_gettext('Alternative text'), [InputRequired()])
caption = StringField(lazy_gettext('Caption'), [Optional()])
attribution = StringField(lazy_gettext('Source'), [Optional()])
class ImageCreateForm(_ImageFormBase):
image = FileField(lazy_gettext('Image file'), [InputRequired()])
class ImageUpdateForm(_ImageFormBase):
pass
class ItemCreateForm(LocalizedForm):
slug = StringField(
lazy_gettext('Slug'),
[
InputRequired(),
Length(max=100),
Regexp(
SLUG_REGEX,
message=lazy_gettext(
'Lowercase letters, digits, and dash are allowed.'
),
),
],
)
title = StringField(
lazy_gettext('Title'), [InputRequired(), Length(max=100)]
)
body = TextAreaField(lazy_gettext('Text'), [InputRequired()])
image_url_path = StringField(
lazy_gettext('Image URL path'), [Optional(), Length(max=100)]
)
class ItemUpdateForm(ItemCreateForm):
pass
class ItemPublishLaterForm(LocalizedForm):
publish_on = DateField(lazy_gettext('Date'), [InputRequired()])
publish_at = TimeField(lazy_gettext('Time'), [InputRequired()])
| [
"re.compile",
"wtforms.validators.Length",
"wtforms.validators.Optional",
"flask_babel.lazy_gettext",
"wtforms.validators.InputRequired"
] | [((451, 477), 're.compile', 're.compile', (['"""^[a-z0-9-]+$"""'], {}), "('^[a-z0-9-]+$')\n", (461, 477), False, 'import re\n'), ((558, 576), 'flask_babel.lazy_gettext', 'lazy_gettext', (['"""ID"""'], {}), "('ID')\n", (570, 576), False, 'from flask_babel import lazy_gettext\n'), ((674, 700), 'flask_babel.lazy_gettext', 'lazy_gettext', (['"""URL prefix"""'], {}), "('URL prefix')\n", (686, 700), False, 'from flask_babel import lazy_gettext\n'), ((808, 840), 'flask_babel.lazy_gettext', 'lazy_gettext', (['"""Alternative text"""'], {}), "('Alternative text')\n", (820, 840), False, 'from flask_babel import lazy_gettext\n'), ((887, 910), 'flask_babel.lazy_gettext', 'lazy_gettext', (['"""Caption"""'], {}), "('Caption')\n", (899, 910), False, 'from flask_babel import lazy_gettext\n'), ((956, 978), 'flask_babel.lazy_gettext', 'lazy_gettext', (['"""Source"""'], {}), "('Source')\n", (968, 978), False, 'from flask_babel import lazy_gettext\n'), ((1057, 1083), 'flask_babel.lazy_gettext', 'lazy_gettext', (['"""Image file"""'], {}), "('Image file')\n", (1069, 1083), False, 'from flask_babel import lazy_gettext\n'), ((1225, 1245), 'flask_babel.lazy_gettext', 'lazy_gettext', (['"""Slug"""'], {}), "('Slug')\n", (1237, 1245), False, 'from flask_babel import lazy_gettext\n'), ((1556, 1577), 'flask_babel.lazy_gettext', 'lazy_gettext', (['"""Title"""'], {}), "('Title')\n", (1568, 1577), False, 'from flask_babel import lazy_gettext\n'), ((1645, 1665), 'flask_babel.lazy_gettext', 'lazy_gettext', (['"""Text"""'], {}), "('Text')\n", (1657, 1665), False, 'from flask_babel import lazy_gettext\n'), ((1728, 1758), 'flask_babel.lazy_gettext', 'lazy_gettext', (['"""Image URL path"""'], {}), "('Image URL path')\n", (1740, 1758), False, 'from flask_babel import lazy_gettext\n'), ((1917, 1937), 'flask_babel.lazy_gettext', 'lazy_gettext', (['"""Date"""'], {}), "('Date')\n", (1929, 1937), False, 'from flask_babel import lazy_gettext\n'), ((1985, 2005), 'flask_babel.lazy_gettext', 'lazy_gettext', (['"""Time"""'], {}), "('Time')\n", (1997, 2005), False, 'from flask_babel import lazy_gettext\n'), ((703, 718), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (716, 718), False, 'from wtforms.validators import InputRequired, Length, Optional, Regexp\n'), ((720, 734), 'wtforms.validators.Length', 'Length', ([], {'max': '(80)'}), '(max=80)\n', (726, 734), False, 'from wtforms.validators import InputRequired, Length, Optional, Regexp\n'), ((843, 858), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (856, 858), False, 'from wtforms.validators import InputRequired, Length, Optional, Regexp\n'), ((913, 923), 'wtforms.validators.Optional', 'Optional', ([], {}), '()\n', (921, 923), False, 'from wtforms.validators import InputRequired, Length, Optional, Regexp\n'), ((981, 991), 'wtforms.validators.Optional', 'Optional', ([], {}), '()\n', (989, 991), False, 'from wtforms.validators import InputRequired, Length, Optional, Regexp\n'), ((1086, 1101), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (1099, 1101), False, 'from wtforms.validators import InputRequired, Length, Optional, Regexp\n'), ((1269, 1284), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (1282, 1284), False, 'from wtforms.validators import InputRequired, Length, Optional, Regexp\n'), ((1298, 1313), 'wtforms.validators.Length', 'Length', ([], {'max': '(100)'}), '(max=100)\n', (1304, 1313), False, 'from wtforms.validators import InputRequired, Length, Optional, Regexp\n'), ((1580, 1595), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (1593, 1595), False, 'from wtforms.validators import InputRequired, Length, Optional, Regexp\n'), ((1597, 1612), 'wtforms.validators.Length', 'Length', ([], {'max': '(100)'}), '(max=100)\n', (1603, 1612), False, 'from wtforms.validators import InputRequired, Length, Optional, Regexp\n'), ((1668, 1683), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (1681, 1683), False, 'from wtforms.validators import InputRequired, Length, Optional, Regexp\n'), ((1761, 1771), 'wtforms.validators.Optional', 'Optional', ([], {}), '()\n', (1769, 1771), False, 'from wtforms.validators import InputRequired, Length, Optional, Regexp\n'), ((1773, 1788), 'wtforms.validators.Length', 'Length', ([], {'max': '(100)'}), '(max=100)\n', (1779, 1788), False, 'from wtforms.validators import InputRequired, Length, Optional, Regexp\n'), ((1940, 1955), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (1953, 1955), False, 'from wtforms.validators import InputRequired, Length, Optional, Regexp\n'), ((2008, 2023), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (2021, 2023), False, 'from wtforms.validators import InputRequired, Length, Optional, Regexp\n'), ((590, 605), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (603, 605), False, 'from wtforms.validators import InputRequired, Length, Optional, Regexp\n'), ((607, 628), 'wtforms.validators.Length', 'Length', ([], {'min': '(1)', 'max': '(40)'}), '(min=1, max=40)\n', (613, 628), False, 'from wtforms.validators import InputRequired, Length, Optional, Regexp\n'), ((1387, 1451), 'flask_babel.lazy_gettext', 'lazy_gettext', (['"""Lowercase letters, digits, and dash are allowed."""'], {}), "('Lowercase letters, digits, and dash are allowed.')\n", (1399, 1451), False, 'from flask_babel import lazy_gettext\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
import subprocess
from mephisto.operations.operator import Operator
from mephisto.operations.utils import get_root_dir
from mephisto.tools.scripts import load_db_and_process_config
from mephisto.abstractions.blueprints.abstract.static_task.static_blueprint import (
SharedStaticTaskState,
)
import hydra
from omegaconf import DictConfig
from dataclasses import dataclass, field
from typing import List, Any
import json
TASK_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
defaults = ["_self_", {"conf": "example"}]
from mephisto.operations.hydra_config import RunScriptConfig, register_script_config
@dataclass
class TestScriptConfig(RunScriptConfig):
defaults: List[Any] = field(default_factory=lambda: defaults)
task_dir: str = TASK_DIRECTORY
register_script_config(name="scriptconfig", module=TestScriptConfig)
# TODO it would be nice if this was automated in the way that it
# is for ParlAI custom frontend tasks
def build_task(task_dir):
"""Rebuild the frontend for this task"""
frontend_source_dir = os.path.join(task_dir, "webapp")
frontend_build_dir = os.path.join(frontend_source_dir, "build")
return_dir = os.getcwd()
os.chdir(frontend_source_dir)
if os.path.exists(frontend_build_dir):
shutil.rmtree(frontend_build_dir)
packages_installed = subprocess.call(["npm", "install"])
if packages_installed != 0:
raise Exception(
"please make sure npm is installed, otherwise view "
"the above error for more info."
)
webpack_complete = subprocess.call(["npm", "run", "dev"])
if webpack_complete != 0:
raise Exception(
"Webpack appears to have failed to build your "
"frontend. See the above error for more information."
)
os.chdir(return_dir)
import os
import json
def load_data(data_dir):
all_data = []
for file in os.listdir(data_dir):
full_path = os.path.join(data_dir, file)
if not full_path.endswith('jsonl'):
continue
with open(full_path) as f:
for line in f:
all_data.append(json.loads(line))
def clean_data(text):
text = text.replace('{ vocalsound } ', '')
text = text.replace('{ vocalsound }', '')
text = text.replace('{ disfmarker } ', '')
text = text.replace('{ disfmarker } ', '')
text = text.replace('{disfmarker}', '')
text = text.replace("{vocalsound}", '')
text = text.replace('a_m_i_', 'ami')
text = text.replace('l_c_d_', 'lcd')
text = text.replace('p_m_s', 'pms')
text = text.replace('t_v_', 'tv')
text = text.replace('{ pause } ', '')
text = text.replace('{pause}', '')
text = text.replace('{ nonvocalsound } ', '')
text = text.replace('{nonvocalsound}', '')
text = text.replace('{ gap } ', '')
text = text.replace('{gap}', '')
return text
data_mturk = []
min_turns_per_segment = 2
max_turns_per_segment = 7
max_words_per_segment = 500
min_words_per_segment = 200
for item in all_data:
turns = item['meeting_transcripts']
turns_formatted = [turn['speaker'] + ': ' + clean_data(turn['content']) for turn in turns]
# assert len(item['general_query_list']) == 1, item['general_query_list']
overall_summ = item['general_query_list'][0]['answer']
for topic_seg in item['topic_list']:
topic = topic_seg['topic']
for span in topic_seg['relevant_text_span']:
start, end = int(span[0]), int(span[1])
curr_segment, curr_word_cnt = [], 0
for turn_idx in range(start, end + 1):
single_turn = turns_formatted[turn_idx]
# skip empty turn
if len(single_turn.split(': ')[1].strip()) == 0:
continue
# exceed token limits or exceed turn limits
if (curr_word_cnt + len(single_turn.split()) > max_words_per_segment and len(curr_segment) >= min_turns_per_segment) \
or (len(curr_segment) + 1 > max_turns_per_segment and curr_word_cnt >= min_words_per_segment):
data_mturk.append(
{
'abstract': overall_summ,
'turns': curr_segment,
'topic': topic,
'section': ' '.join(curr_segment)
}
)
curr_segment = [single_turn]
curr_word_cnt = len(single_turn.split())
continue
curr_segment.append(single_turn)
curr_word_cnt += len(single_turn.split())
if curr_segment:
data_mturk.append(
{
'abstract': overall_summ,
'turns': curr_segment,
'topic': topic,
'section': ' '.join(curr_segment)
}
)
return data_mturk
@hydra.main(config_path="hydra_configs", config_name="scriptconfig")
def main(cfg: DictConfig) -> None:
data_dir = cfg.mephisto.task.data_dir
raw_data = load_data(data_dir)
def onboarding_always_valid(onboarding_data):
return onboarding_data['outputs']['answer'] == "1"
# Bold the speaker name
for item in raw_data:
item['turns'] = ['<strong>' + turn.replace(':', ":</strong>") for turn in item['turns'] ]
shared_state = SharedStaticTaskState(
static_task_data=raw_data[:200],
validate_onboarding=onboarding_always_valid,
)
build_task(cfg.task_dir)
db, cfg = load_db_and_process_config(cfg)
operator = Operator(db)
operator.validate_and_run_config(cfg.mephisto, shared_state)
operator.wait_for_runs_then_shutdown(skip_input=True, log_rate=30)
if __name__ == "__main__":
main()
| [
"os.path.exists",
"json.loads",
"os.listdir",
"hydra.main",
"mephisto.tools.scripts.load_db_and_process_config",
"os.path.join",
"os.getcwd",
"os.chdir",
"shutil.rmtree",
"mephisto.abstractions.blueprints.abstract.static_task.static_blueprint.SharedStaticTaskState",
"subprocess.call",
"mephist... | [((995, 1063), 'mephisto.operations.hydra_config.register_script_config', 'register_script_config', ([], {'name': '"""scriptconfig"""', 'module': 'TestScriptConfig'}), "(name='scriptconfig', module=TestScriptConfig)\n", (1017, 1063), False, 'from mephisto.operations.hydra_config import RunScriptConfig, register_script_config\n'), ((5507, 5574), 'hydra.main', 'hydra.main', ([], {'config_path': '"""hydra_configs"""', 'config_name': '"""scriptconfig"""'}), "(config_path='hydra_configs', config_name='scriptconfig')\n", (5517, 5574), False, 'import hydra\n'), ((682, 707), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (697, 707), False, 'import os\n'), ((919, 959), 'dataclasses.field', 'field', ([], {'default_factory': '(lambda : defaults)'}), '(default_factory=lambda : defaults)\n', (924, 959), False, 'from dataclasses import dataclass, field\n'), ((1267, 1299), 'os.path.join', 'os.path.join', (['task_dir', '"""webapp"""'], {}), "(task_dir, 'webapp')\n", (1279, 1299), False, 'import os\n'), ((1325, 1367), 'os.path.join', 'os.path.join', (['frontend_source_dir', '"""build"""'], {}), "(frontend_source_dir, 'build')\n", (1337, 1367), False, 'import os\n'), ((1386, 1397), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1395, 1397), False, 'import os\n'), ((1402, 1431), 'os.chdir', 'os.chdir', (['frontend_source_dir'], {}), '(frontend_source_dir)\n', (1410, 1431), False, 'import os\n'), ((1439, 1473), 'os.path.exists', 'os.path.exists', (['frontend_build_dir'], {}), '(frontend_build_dir)\n', (1453, 1473), False, 'import os\n'), ((1542, 1577), 'subprocess.call', 'subprocess.call', (["['npm', 'install']"], {}), "(['npm', 'install'])\n", (1557, 1577), False, 'import subprocess\n'), ((1779, 1817), 'subprocess.call', 'subprocess.call', (["['npm', 'run', 'dev']"], {}), "(['npm', 'run', 'dev'])\n", (1794, 1817), False, 'import subprocess\n'), ((2013, 2033), 'os.chdir', 'os.chdir', (['return_dir'], {}), '(return_dir)\n', (2021, 2033), False, 'import os\n'), ((2116, 2136), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (2126, 2136), False, 'import os\n'), ((5973, 6077), 'mephisto.abstractions.blueprints.abstract.static_task.static_blueprint.SharedStaticTaskState', 'SharedStaticTaskState', ([], {'static_task_data': 'raw_data[:200]', 'validate_onboarding': 'onboarding_always_valid'}), '(static_task_data=raw_data[:200], validate_onboarding=\n onboarding_always_valid)\n', (5994, 6077), False, 'from mephisto.abstractions.blueprints.abstract.static_task.static_blueprint import SharedStaticTaskState\n'), ((6141, 6172), 'mephisto.tools.scripts.load_db_and_process_config', 'load_db_and_process_config', (['cfg'], {}), '(cfg)\n', (6167, 6172), False, 'from mephisto.tools.scripts import load_db_and_process_config\n'), ((6188, 6200), 'mephisto.operations.operator.Operator', 'Operator', (['db'], {}), '(db)\n', (6196, 6200), False, 'from mephisto.operations.operator import Operator\n'), ((1483, 1516), 'shutil.rmtree', 'shutil.rmtree', (['frontend_build_dir'], {}), '(frontend_build_dir)\n', (1496, 1516), False, 'import shutil\n'), ((2158, 2186), 'os.path.join', 'os.path.join', (['data_dir', 'file'], {}), '(data_dir, file)\n', (2170, 2186), False, 'import os\n'), ((2347, 2363), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2357, 2363), False, 'import json\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9b1 on 2015-11-12 03:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('arcfire', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='place',
name='location',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='places_primary', to='arcfire.Location'),
),
migrations.AlterField(
model_name='person',
name='gender',
field=models.CharField(blank=True, choices=[('f', 'Female'), ('m', 'Male'), ('none', 'None'), ('other', 'Other')], max_length=10),
),
]
| [
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((421, 565), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""places_primary"""', 'to': '"""arcfire.Location"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='places_primary', to='arcfire.Location')\n", (438, 565), False, 'from django.db import migrations, models\n'), ((682, 810), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'choices': "[('f', 'Female'), ('m', 'Male'), ('none', 'None'), ('other', 'Other')]", 'max_length': '(10)'}), "(blank=True, choices=[('f', 'Female'), ('m', 'Male'), (\n 'none', 'None'), ('other', 'Other')], max_length=10)\n", (698, 810), False, 'from django.db import migrations, models\n')] |
from django.urls import path
from django.conf.urls import url
from . import views
from django.views.generic.base import RedirectView
from rest_framework.urlpatterns import format_suffix_patterns
app_name = 'userprofile'
urlpatterns = [
path('profile/<int:pk>/', views.OtherUserDetail.as_view()), #includes favorites, and friends' recipes for the feed
path('profile/<int:pk>/favorites/', views.UserFavoriteList.as_view()),
path('profile/<int:pk>/friends/', views. UserFriendsList.as_view()),
path('profile/search/<str:query>/', views. UserSearchList.as_view())
]
urlpatterns = format_suffix_patterns(urlpatterns) | [
"rest_framework.urlpatterns.format_suffix_patterns"
] | [((595, 630), 'rest_framework.urlpatterns.format_suffix_patterns', 'format_suffix_patterns', (['urlpatterns'], {}), '(urlpatterns)\n', (617, 630), False, 'from rest_framework.urlpatterns import format_suffix_patterns\n')] |
#!/usr/bin/env python
"""
analyse Elasticsearch query
"""
import json
from elasticsearch import Elasticsearch
from elasticsearch import logger as es_logger
from collections import defaultdict, Counter
import re
import os
from datetime import datetime
# Preprocess terms for TF-IDF
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
# progress bar
from tqdm import tqdm
# ploting
import matplotlib.pyplot as plt
# LOG
import logging
from logging.handlers import RotatingFileHandler
# Word embedding for evaluation
from sentence_transformers import SentenceTransformer
from sklearn.manifold import TSNE
import seaborn as sns
from sklearn.cluster import KMeans, AgglomerativeClustering
from sklearn.metrics.pairwise import cosine_similarity
from scipy import sparse
import scipy.spatial as sp
# Spatial entity as descriptor :
from geopy.geocoders import Nominatim
from geopy.extra.rate_limiter import RateLimiter
# venn
from matplotlib_venn_wordcloud import venn2_wordcloud, venn3_wordcloud
import operator
# Global var on Levels on spatial and temporal axis
spatialLevels = ['city', 'state', 'country']
temporalLevels = ['day', 'week', 'month', 'period']
def elasticsearch_query(query_fname, logger):
"""
Build a ES query and return a default dict with resuls
:return: tweetsByCityAndDate
"""
# Elastic search credentials
client = Elasticsearch("http://localhost:9200")
es_logger.setLevel(logging.WARNING)
index = "twitter"
# Define a Query
query = open(query_fname, "r").read()
result = Elasticsearch.search(client, index=index, body=query, scroll='2m', size=5000)
# Append all pages form scroll search : avoid the 10k limitation of ElasticSearch
results = avoid10kquerylimitation(result, client, logger)
# Initiate a dict for each city append all Tweets content
tweetsByCityAndDate = defaultdict(list)
for hits in results:
# parse Java date : EEE MMM dd HH:mm:ss Z yyyy
inDate = hits["_source"]["created_at"]
parseDate = datetime.strptime(inDate, "%a %b %d %H:%M:%S %z %Y")
try:# geodocing may be bad
geocoding = hits["_source"]["rest"]["features"][0]["properties"]
except:
continue # skip this iteraction
if "country" in hits["_source"]["rest"]["features"][0]["properties"]:
# locaties do not necessarily have an associated stated
try:
cityStateCountry = str(hits["_source"]["rest"]["features"][0]["properties"]["city"]) + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["state"]) + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["country"])
except: # there is no state in geocoding
try:
logger.debug(hits["_source"]["rest"]["features"][0]["properties"]["city"] + " has no state")
cityStateCountry = str(hits["_source"]["rest"]["features"][0]["properties"]["city"]) + "_" + \
str("none") + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["country"])
except: # there is no city as well : only country
# print(json.dumps(hits["_source"], indent=4))
try: #
cityStateCountry = str("none") + "_" + \
str("none") + "_" + \
str(hits["_source"]["rest"]["features"][0]["properties"]["country"])
except:
cityStateCountry = str("none") + "_" + \
str("none") + "_" + \
str("none")
try:
tweetsByCityAndDate[cityStateCountry].append(
{
"tweet": preprocessTweets(hits["_source"]["full_text"]),
"created_at": parseDate
}
)
except:
print(json.dumps(hits["_source"], indent=4))
# biotexInputBuilder(tweetsByCityAndDate)
# pprint(tweetsByCityAndDate)
return tweetsByCityAndDate
def avoid10kquerylimitation(result, client, logger):
"""
Elasticsearch limit results of query at 10 000. To avoid this limit, we need to paginate results and scroll
This method append all pages form scroll search
:param result: a result of a ElasticSearcg query
:return:
"""
scroll_size = result['hits']['total']["value"]
logger.info("Number of elasticsearch scroll: " + str(scroll_size))
results = []
# Progress bar
pbar = tqdm(total=scroll_size)
while (scroll_size > 0):
try:
scroll_id = result['_scroll_id']
res = client.scroll(scroll_id=scroll_id, scroll='60s')
results += res['hits']['hits']
scroll_size = len(res['hits']['hits'])
pbar.update(scroll_size)
except:
pbar.close()
logger.error("elasticsearch search scroll failed")
break
pbar.close()
return results
def preprocessTweets(text):
"""
1 - Clean up tweets text cf : https://medium.com/analytics-vidhya/basic-tweet-preprocessing-method-with-python-56b4e53854a1
2 - Detection lang
3 - remove stopword ??
:param text:
:return: list : texclean, and langue detected
"""
## 1 clean up twetts
# remove URLs
textclean = re.sub('((www\.[^\s]+)|(https?://[^\s]+)|(http?://[^\s]+))', '', text)
textclean = re.sub(r'http\S+', '', textclean)
# remove usernames
# textclean = re.sub('@[^\s]+', '', textclean)
# remove the # in #hashtag
# textclean = re.sub(r'#([^\s]+)', r'\1', textclean)
return textclean
def matrixOccurenceBuilder(tweetsofcity, matrixAggDay_fout, matrixOccurence_fout, save_intermediaire_files, logger):
"""
Create a matrix of :
- line : (city,day)
- column : terms
- value of cells : TF (term frequency)
Help found here :
http://www.xavierdupre.fr/app/papierstat/helpsphinx/notebooks/artificiel_tokenize_features.html
https://towardsdatascience.com/natural-language-processing-feature-engineering-using-tf-idf-e8b9d00e7e76
:param tweetsofcity:
:param matrixAggDay_fout: file to save
:param matrixOccurence_fout: file to save
:return:
"""
# initiate matrix of tweets aggregate by day
# col = ['city', 'day', 'tweetsList', 'bow']
col = ['city', 'day', 'tweetsList']
matrixAggDay = pd.DataFrame(columns=col)
cityDayList = []
logger.info("start full_text concatenation for city & day")
pbar = tqdm(total=len(tweetsofcity))
for city in tweetsofcity:
# create a table with 2 columns : tweet and created_at for a specific city
matrix = pd.DataFrame(tweetsofcity[city])
# Aggregate list of tweets by single day for specifics cities
## Loop on days for a city
period = matrix['created_at'].dt.date
period = period.unique()
period.sort()
for day in period:
# aggregate city and date document
document = '. \n'.join(matrix.loc[matrix['created_at'].dt.date == day]['tweet'].tolist())
# Bag of Words and preprocces
# preproccesFullText = preprocessTerms(document)
tweetsOfDayAndCity = {
'city': city,
'day': day,
'tweetsList': document
}
cityDayList.append(city + "_" + str(day))
try:
matrixAggDay = matrixAggDay.append(tweetsOfDayAndCity, ignore_index=True)
except:
print("full_text empty after pre-process: "+document)
continue
pbar.update(1)
pbar.close()
if save_intermediaire_files:
logger.info("Saving file: matrix of full_text concatenated by day & city: "+str(matrixAggDay_fout))
matrixAggDay.to_csv(matrixAggDay_fout)
# Count terms with sci-kit learn
cd = CountVectorizer(
stop_words='english',
#preprocessor=sklearn_vectorizer_no_number_preprocessor,
#min_df=2, # token at least present in 2 cities : reduce size of matrix
max_features=25000,
ngram_range=(1, 1),
token_pattern='[a-zA-Z0-9#@]+', #remove user name, i.e term starting with @ for personnal data issue
# strip_accents= "ascii" # remove token with special character (trying to keep only english word)
)
cd.fit(matrixAggDay['tweetsList'])
res = cd.transform(matrixAggDay["tweetsList"])
countTerms = res.todense()
# create matrix
## get terms :
# voc = cd.vocabulary_
# listOfTerms = {term for term, index in sorted(voc.items(), key=lambda item: item[1])}
listOfTerms = cd.get_feature_names()
##initiate matrix with count for each terms
matrixOccurence = pd.DataFrame(data=countTerms[0:, 0:], index=cityDayList, columns=listOfTerms)
# save to file
if save_intermediaire_files:
logger.info("Saving file: occurence of term: "+str(matrixOccurence_fout))
matrixOccurence.to_csv(matrixOccurence_fout)
return matrixOccurence
def spatiotemporelFilter(matrix, listOfcities='all', spatialLevel='city', period='all', temporalLevel='day'):
"""
Filter matrix with list of cities and a period
:param matrix:
:param listOfcities:
:param spatialLevel:
:param period:
:param temporalLevel:
:return: matrix filtred
"""
if spatialLevel not in spatialLevels or temporalLevel not in temporalLevels:
print("wrong level, please double check")
return 1
# Extract cities and period
## cities
if listOfcities != 'all': ### we need to filter
###Initiate a numpy array of False
filter = np.zeros((1, len(matrix.index)), dtype=bool)[0]
for city in listOfcities:
### edit filter if index contains the city (for each city of the list)
filter += matrix.index.str.startswith(str(city) + "_")
matrix = matrix.loc[filter]
##period
if str(period) != 'all': ### we need a filter on date
datefilter = np.zeros((1, len(matrix.index)), dtype=bool)[0]
for date in period:
datefilter += matrix.index.str.contains(date.strftime('%Y-%m-%d'))
matrix = matrix.loc[datefilter]
return matrix
def HTFIDF(matrixOcc, matrixHTFIDF_fname, biggestHTFIDFscore_fname, listOfcities='all', spatialLevel='city',
period='all', temporalLevel='day'):
"""
Aggregate on spatial and temporel and then compute TF-IDF
:param matrixOcc: Matrix with TF already compute
:param listOfcities: filter on this cities
:param spatialLevel: city / state / country / world
:param period: Filter on this period
:param temporalLevel: day / week (month have to be implemented)
:return:
"""
matrixOcc = spatiotemporelFilter(matrix=matrixOcc, listOfcities=listOfcities,
spatialLevel='state', period=period)
# Aggregate by level
## Create 4 new columns : city, State, Country and date
def splitindex(row):
return row.split("_")
matrixOcc["city"], matrixOcc["state"], matrixOcc["country"], matrixOcc["date"] = \
zip(*matrixOcc.index.map(splitindex))
if temporalLevel == 'day':
## In space
if spatialLevel == 'city':
# do nothing
pass
elif spatialLevel == 'state' and temporalLevel == 'day':
matrixOcc = matrixOcc.groupby("state").sum()
elif spatialLevel == 'country' and temporalLevel == 'day':
matrixOcc = matrixOcc.groupby("country").sum()
elif temporalLevel == "week":
matrixOcc.date = pd.to_datetime((matrixOcc.date)) - pd.to_timedelta(7, unit='d')# convert date into datetime
## in space and time
if spatialLevel == 'country':
matrixOcc = matrixOcc.groupby(["country", pd.Grouper(key="date", freq="W")]).sum()
elif spatialLevel == 'state':
matrixOcc = matrixOcc.groupby(["state", pd.Grouper(key="date", freq="W")]).sum()
elif spatialLevel == 'city':
matrixOcc = matrixOcc.groupby(["city", pd.Grouper(key="date", freq="W")]).sum()
# Compute TF-IDF
## compute TF : for each doc, devide count by Sum of all count
### Sum fo all count by row
matrixOcc['sumCount'] = matrixOcc.sum(axis=1)
### Devide each cell by these sums
listOfTerms = matrixOcc.keys()
matrixOcc = matrixOcc.loc[:, listOfTerms].div(matrixOcc['sumCount'], axis=0)
## Compute IDF : create a vector of length = nb of termes with IDF value
idf = pd.Series(index=matrixOcc.keys(), dtype=float)
### N : nb of doucments <=> nb of rows :
N = matrixOcc.shape[0]
### DFt : nb of document that contains the term
DFt = matrixOcc.astype(bool).sum(axis=0) # Tip : convert all value in boolean. float O,O will be False, other True
#### Not a Number when value 0 because otherwise log is infinite
DFt.replace(0, np.nan, inplace=True)
### compute log(N/DFt)
idf = np.log10(N / (DFt))
# idf = np.log10( N / (DFt * 10))
## compute TF-IDF
matrixTFIDF = matrixOcc * idf
# matrixTFIDF = matrixOcc * idf * idf
## remove terms if for all documents value are Nan
matrixTFIDF.dropna(axis=1, how='all', inplace=True)
# Save file
matrixTFIDF.to_csv(matrixHTFIDF_fname)
# Export N biggest TF-IDF score:
top_n = 500
extractBiggest = pd.DataFrame(index=matrixTFIDF.index, columns=range(0, top_n))
for row in matrixTFIDF.index:
try:
row_without_zero = matrixTFIDF.loc[row]# we remove term with a score = 0
row_without_zero = row_without_zero[ row_without_zero !=0 ]
try:
extractBiggest.loc[row] = row_without_zero.nlargest(top_n).keys()
except:
extractBiggest.loc[row] = row_without_zero.nlargest(len(row_without_zero)).keys()
except:
logger.debug("H-TFIDF: city "+str(matrixTFIDF.loc[row].name)+ "not enough terms")
extractBiggest.to_csv(biggestHTFIDFscore_fname+".old.csv")
# Transpose this table in order to share the same structure with TF-IDF classifical biggest score :
hbt = pd.DataFrame()
extractBiggest = extractBiggest.reset_index()
for index, row in extractBiggest.iterrows():
hbtrow = pd.DataFrame(row.drop([spatialLevel, "date"]).values, columns=["terms"])
hbtrow[spatialLevel] = row[spatialLevel]
hbtrow["date"] = row["date"]
hbt = hbt.append(hbtrow, ignore_index=True)
hbt.to_csv(biggestHTFIDFscore_fname)
def TFIDF_TF_with_corpus_state(elastic_query_fname, logger, save_intermediaire_files, nb_biggest_terms=500, path_for_filesaved="./",
spatial_hiearchy="country", temporal_period='all', listOfCities='all'):
"""
Compute TFIDF and TF from an elastic query file
1 doc = 1 tweet
Corpus = by hiearchy level, i.e. : state or country
:param elastic_query_fname: filename and path of the elastic query
:param logger: logger of the main program
:param nb_biggest_terms: How many biggest term are to keep
:param spatial_hiearchy: define the size of the corpus : state or country
:param temporal_period:
:param listOfCities: If you want to filter out some cities, you can
:return:
"""
# tfidfStartDate = date(2020, 1, 23)
# tfidfEndDate = date(2020, 1, 30)
# temporal_period = pd.date_range(tfidfStartDate, tfidfEndDate)
# listOfCity = ['London', 'Glasgow', 'Belfast', 'Cardiff']
# listOfState = ["England", "Scotland", "Northern Ireland", "Wales"]
tweets = elasticsearch_query(elastic_query_fname, logger)
if listOfCities == 'all':
listOfCities = []
listOfStates = []
listOfCountry = []
for triple in tweets:
splitted = triple.split("_")
listOfCities.append(splitted[0])
listOfStates.append(splitted[1])
listOfCountry.append(splitted[2])
listOfCities = list(set(listOfCities))
listOfStates = list(set(listOfStates))
listOfCountry = list(set(listOfCountry))
# reorganie tweets (dict : tweets by cities) into dataframe (city and date)
matrixAllTweets = pd.DataFrame()
for tweetByCity in tweets.keys():
# Filter cities :
city = str(tweetByCity).split("_")[0]
state = str(tweetByCity).split("_")[1]
country = str(tweetByCity).split("_")[2]
if city in listOfCities:
matrix = pd.DataFrame(tweets[tweetByCity])
matrix['city'] = city
matrix['state'] = state
matrix['country'] = country
matrixAllTweets = matrixAllTweets.append(matrix, ignore_index=True)
# Split datetime into date and time
matrixAllTweets["date"] = [d.date() for d in matrixAllTweets['created_at']]
matrixAllTweets["time"] = [d.time() for d in matrixAllTweets['created_at']]
# Filter by a period
if temporal_period != "all":
mask = ((matrixAllTweets["date"] >= temporal_period.min()) & (matrixAllTweets["date"] <= temporal_period.max()))
matrixAllTweets = matrixAllTweets.loc[mask]
# Compute TF-IDF and TF by state
extractBiggestTF_allstates = pd.DataFrame()
extractBiggestTFIDF_allstates = pd.DataFrame()
if spatial_hiearchy == "country":
listOfLocalities = listOfCountry
elif spatial_hiearchy == "state":
listOfLocalities = listOfStates
elif spatial_hiearchy == "city":
listOfLocalities = listOfCities
for locality in listOfLocalities:
matrix_by_locality = matrixAllTweets[matrixAllTweets[spatial_hiearchy] == locality]
vectorizer = TfidfVectorizer(
stop_words='english',
min_df=0.001,
# max_features=50000,
ngram_range=(1, 1),
token_pattern='[<KEY>',
)
# logger.info("Compute TF-IDF on corpus = "+spatial_hiearchy)
try:
vectors = vectorizer.fit_transform(matrix_by_locality['tweet'])
feature_names = vectorizer.get_feature_names()
dense = vectors.todense()
denselist = dense.tolist()
except:
logger.info("Impossible to compute TF-IDF on: "+locality)
continue
## matrixTFIDF
TFIDFClassical = pd.DataFrame(denselist, columns=feature_names)
locality_format = locality.replace("/", "_")
locality_format = locality_format.replace(" ", "_")
if save_intermediaire_files:
logger.info("saving TF-IDF File: "+path_for_filesaved+"/tfidf_on_"+locality_format+"_corpus.csv")
TFIDFClassical.to_csv(path_for_filesaved+"/tfidf_on_"+locality_format+"_corpus.csv")
## Extract N TOP ranking score
extractBiggest = TFIDFClassical.max().nlargest(nb_biggest_terms)
extractBiggest = extractBiggest.to_frame()
extractBiggest = extractBiggest.reset_index()
extractBiggest.columns = ['terms', 'score']
extractBiggest[spatial_hiearchy] = locality
extractBiggestTFIDF_allstates = extractBiggestTFIDF_allstates.append(extractBiggest, ignore_index=True)
"""
# Compute TF
tf = CountVectorizer(
stop_words='english',
min_df=2,
ngram_range=(1,2),
token_pattern='[a-zA-Z0-9@#]+',
)
try:
tf.fit(matrix_by_locality['tweet'])
tf_res = tf.transform(matrix_by_locality['tweet'])
listOfTermsTF = tf.get_feature_names()
countTerms = tf_res.todense()
except:# locality does not have enough different term
logger.info("Impossible to compute TF on: "+locality)
continue
## matrixTF
TFClassical = pd.DataFrame(countTerms.tolist(), columns=listOfTermsTF)
### save in file
logger.info("saving TF File: "+path_for_filesaved+"/tf_on_"+locality.replace("/", "_")+"_corpus.csv")
TFClassical.to_csv(path_for_filesaved+"/tf_on_"+locality.replace("/", "_")+"_corpus.csv")
## Extract N TOP ranking score
extractBiggestTF = TFClassical.max().nlargest(nb_biggest_terms)
extractBiggestTF = extractBiggestTF.to_frame()
extractBiggestTF = extractBiggestTF.reset_index()
extractBiggestTF.columns = ['terms', 'score']
extractBiggestTF[spatial_hiearchy] = locality
extractBiggestTF_allstates = extractBiggestTF_allstates.append(extractBiggestTF, ignore_index=True)
"""
logger.info("saving TF and TF-IDF top"+str(nb_biggest_terms)+" biggest score")
extractBiggestTF_allstates.to_csv(path_for_filesaved+"/TF_BiggestScore_on_"+spatial_hiearchy+"_corpus.csv")
extractBiggestTFIDF_allstates.to_csv(path_for_filesaved+"/TF-IDF_BiggestScore_on_"+spatial_hiearchy+"_corpus.csv")
def TFIDF_TF_on_whole_corpus(elastic_query_fname, logger, save_intermediaire_files, path_for_filesaved="./",
temporal_period='all', listOfCities='all'):
"""
Compute TFIDF and TF from an elastic query file
1 doc = 1 tweet
Corpus = on the whole elastic query (with filter out cities that are not in listOfCities
:param elastic_query_fname: filename and path of the elastic query
:param logger: logger of the main program
:param nb_biggest_terms: How many biggest term are to keep. It has to be greater than H-TF-IDF or
TF-IDF classical on corpus = localité because a lot of temrs have 1.0 has the score
:param spatial_hiearchy: define the size of the corpus : state or country
:param temporal_period:
:param listOfCities: If you want to filter out some cities, you can
:return:
"""
# tfidfStartDate = date(2020, 1, 23)
# tfidfEndDate = date(2020, 1, 30)
# temporal_period = pd.date_range(tfidfStartDate, tfidfEndDate)
# listOfCity = ['London', 'Glasgow', 'Belfast', 'Cardiff']
# listOfState = ["England", "Scotland", "Northern Ireland", "Wales"]
# Query Elasticsearch to get all tweets from UK
tweets = elasticsearch_query(elastic_query_fname, logger)
if listOfCities == 'all':
listOfCities = []
listOfStates = []
listOfCountry = []
for triple in tweets:
splitted = triple.split("_")
listOfCities.append(splitted[0])
listOfStates.append(splitted[1])
listOfCountry.append(splitted[2])
listOfCities = list(set(listOfCities))
listOfStates = list(set(listOfStates))
listOfCountry = list(set(listOfCountry))
# reorganie tweets (dict : tweets by cities) into dataframe (city and date)
matrixAllTweets = pd.DataFrame()
for tweetByCity in tweets.keys():
# Filter cities :
city = str(tweetByCity).split("_")[0]
state = str(tweetByCity).split("_")[1]
country = str(tweetByCity).split("_")[2]
if city in listOfCities:
matrix = pd.DataFrame(tweets[tweetByCity])
matrix["country"] = country
matrixAllTweets = matrixAllTweets.append(matrix, ignore_index=True)
# Split datetime into date and time
matrixAllTweets["date"] = [d.date() for d in matrixAllTweets['created_at']]
matrixAllTweets["time"] = [d.time() for d in matrixAllTweets['created_at']]
# Filter by a period
if temporal_period != "all":
mask = ((matrixAllTweets["date"] >= temporal_period.min()) & (matrixAllTweets["date"] <= temporal_period.max()))
matrixAllTweets = matrixAllTweets.loc[mask]
vectorizer = TfidfVectorizer(
stop_words='english',
min_df=0.001,
# max_features=50000,
ngram_range=(1, 1),
token_pattern='[a-zA-Z0-9#]+', #remove user name, i.e term starting with @ for personnal data issue
)
try:
vectors = vectorizer.fit_transform(matrixAllTweets['tweet'])
feature_names = vectorizer.get_feature_names()
dense = vectors.todense()
denselist = dense.tolist()
except:
logger.info("Impossible to compute TF-IDF")
exit(-1)
## matrixTFIDF
TFIDFClassical = pd.DataFrame(denselist, columns=feature_names)
TFIDFClassical["country"] = matrixAllTweets["country"]
if save_intermediaire_files:
logger.info("saving TF-IDF File: "+path_for_filesaved+"/tfidf_on_whole_corpus.csv")
TFIDFClassical.to_csv(path_for_filesaved+"/tfidf_on_whole_corpus.csv")
extractBiggest = pd.DataFrame()
for term in TFIDFClassical.keys():
try:
index = TFIDFClassical[term].idxmax()
score = TFIDFClassical[term].max()
country = TFIDFClassical.iloc[index]["country"]
row = {
'terms': term,
'score': score,
'country': country
}
extractBiggest = extractBiggest.append(row, ignore_index=True)
except:
logger.info(term+' : '+str(index)+" : "+str(score)+" : "+country)
## Extract N TOP ranking score
# extractBiggest = TFIDFClassical.max()
extractBiggest = extractBiggest[extractBiggest['score'] == 1] # we keep only term with high score TF-IDF, i.e 1.0
# extractBiggest = extractBiggest.to_frame()
# extractBiggest = extractBiggest.reset_index()
# extractBiggest.columns = ['terms', 'score', 'country']
logger.info("saving TF-IDF top"+str(extractBiggest['terms'].size)+" biggest score")
extractBiggest.to_csv(path_for_filesaved+"/TFIDF_BiggestScore_on_whole_corpus.csv")
def logsetup(log_fname):
"""
Initiate a logger object :
- Log in file : collectweets.log
- also print on screen
:return: logger object
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(funcName)20s() ::%(message)s')
now = datetime.now()
file_handler = RotatingFileHandler(log_fname + "_" + now.strftime("%Y-%m-%d_%H-%M-%S") + ".log", 'a', 1000000, 1)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
# Only display on screen INFO
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
return logger
def t_SNE_bert_embedding_visualization(biggest_score, logger, listOfLocalities="all", spatial_hieararchy="country",
plotname="colored by country", paht2save="./"):
"""
Plot t-SNE representation of terms by country
ressources:
+ https://colab.research.google.com/drive/1FmREx0O4BDeogldyN74_7Lur5NeiOVye?usp=sharing#scrollTo=Fbq5MAv0jkft
+ https://github.com/UKPLab/sentence-transformers
:param biggest_score:
:param listOfLocalities:
:param spatial_hieararchy:
:param plotname:
:param paht2save:
:return:
"""
modelSentenceTransformer = SentenceTransformer('distilbert-base-nli-mean-tokens')
# filter by localities
for locality in biggest_score[spatial_hieararchy].unique():
if locality not in listOfLocalities:
biggest_score = biggest_score.drop(biggest_score[biggest_score[spatial_hieararchy] == locality].index)
embeddings = modelSentenceTransformer.encode(biggest_score['terms'].to_list(), show_progress_bar=True)
# embeddings.tofile(paht2save+"/tsne_bert-embeddings_"+plotname+"_matrix-embeddig")
modelTSNE = TSNE(n_components=2) # n_components means the lower dimension
low_dim_data = modelTSNE.fit_transform(embeddings)
label_tsne = biggest_score[spatial_hieararchy]
# Style Plots a bit
sns.set_style('darkgrid')
sns.set_palette('muted')
sns.set_context("notebook", font_scale=1, rc={"lines.linewidth": 2.5})
plt.rcParams['figure.figsize'] = (20, 14)
tsne_df = pd.DataFrame(low_dim_data, label_tsne)
tsne_df.columns = ['x', 'y']
ax = sns.scatterplot(data=tsne_df, x='x', y='y', hue=tsne_df.index)
plt.setp(ax.get_legend().get_texts(), fontsize='40') # for legend text
plt.setp(ax.get_legend().get_title(), fontsize='50') # for legend title
plt.ylim(-100,100)
plt.xlim(-100, 100)
#ax.set_title('T-SNE BERT Sentence Embeddings for '+plotname)
plt.savefig(paht2save+"/tsne_bert-embeddings_"+plotname)
logger.info("file: "+paht2save+"/tsne_bert-embeddings_"+plotname+" has been saved.")
#plt.show()
plt.close()
# Perform kmean clustering
# num_clusters = 5
# clustering_model = KMeans(n_clusters=num_clusters)
# clustering_model.fit(embeddings)
# cluster_assignment = clustering_model.labels_
# Normalize the embeddings to unit length
corpus_embeddings = embeddings / np.linalg.norm(embeddings, axis=1, keepdims=True)
# Perform kmean clustering
clustering_model = AgglomerativeClustering(n_clusters=None,
distance_threshold=1.5) # , affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
# clustered_sentences = [[] for i in range(num_clusters)]
# for sentence_id, cluster_id in enumerate(cluster_assignment):
# clustered_sentences[cluster_id].append(biggest_score['terms'].iloc[sentence_id])
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if cluster_id not in clustered_sentences:
clustered_sentences[cluster_id] = []
clustered_sentences[cluster_id].append(biggest_score['terms'].iloc[sentence_id])
#for i, cluster in enumerate(clustered_sentences):
# for i, cluster in clustered_sentences.items():
# print("Cluster ", i+1)
# print(cluster)
# print("")
def bert_embedding_filtred(biggest_score, listOfLocalities="all", spatial_hieararchy="country"):
"""
Retrieve embedding of a matrix of terms (possibility of filtring by a list of locality)
:param biggest_score: pd.Datraframe with columns : [terms, country/state/city]
:param listOfLocalities:
:param spatial_hieararchy:
:return:
"""
modelSentenceTransformer = SentenceTransformer('distilbert-base-nli-mean-tokens')
# filter by localities
if listOfLocalities != "all":
for locality in biggest_score[spatial_hieararchy].unique():
if locality not in listOfLocalities:
biggest_score = biggest_score.drop(biggest_score[biggest_score[spatial_hieararchy] == locality].index)
embeddings = modelSentenceTransformer.encode(biggest_score['terms'].to_list(), show_progress_bar=True)
return embeddings
def similarity_intra_matrix_pairwise(matrix):
"""
Compute pairwise cosine similarity on the rows of a Matrix and retrieve unique score by pair.
indeed, cosine_similarity pairwise retrive a matrix with duplication : let's take an exemple :
Number of terms : 4, cosine similarity :
w1 w2 w3 w4
+---+---+----+--+
w1 | 1 | | | |
w2 | | 1 | | |
w3 | | | 1 | |
w4 | | | | 1 |
+---+---+----+--+
(w1, w2) = (w2, w1), so we have to keep only : (number_of_terms)^2/2 - (number_of_terms)/2
for nb_term = 4 :
4*4/2 - 4/2 = 16/2 - 4/2 = 6 => we have 6 unique scores
:param matrix:
:return: list of unique similarity score
"""
similarity = cosine_similarity(sparse.csr_matrix(matrix))
similarity_1D = np.array([])
for i, row in enumerate(similarity):
similarity_1D = np.append(similarity_1D, row[i+1:]) # We remove duplicate pairwise value
return similarity_1D
def similarity_inter_matrix(matrix1, matrix2):
"""
:param matrix1:
:param matrix2:
:return:
"""
similarity = 1 - sp.distance.cdist(matrix1, matrix2, 'cosine')
return similarity
def clustering_terms(biggest, logger, cluster_f_out, listOfLocalities="all", spatial_hieararchy="country", method="kmeans"):
"""
:param biggest:
:param method:
:return:
"""
method_list = ["kmeans", "agglomerative_clustering"]
if method not in method_list:
logger.error("This method is not implemented for clustering: "+str(method))
return -1
# filter by localities
if listOfLocalities != "all":
for locality in biggest[spatial_hieararchy].unique():
if locality not in listOfLocalities:
biggest = biggest.drop(biggest[biggest[spatial_hieararchy] == locality].index)
embeddings = bert_embedding_filtred(biggest)
if method == "kmeans":
# Perform kmean clustering
num_clusters = 5
clustering_model = KMeans(n_clusters=num_clusters)
clustering_model.fit(embeddings)
cluster_assignment = clustering_model.labels_
elif method == "agglomerative_clustering":
# Normalize the embeddings to unit length
corpus_embeddings = embeddings / np.linalg.norm(embeddings, axis=1, keepdims=True)
# Perform Agglomerative clustering
clustering_model = AgglomerativeClustering(n_clusters=None,
distance_threshold=1.5) # , affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if str(cluster_id) not in clustered_sentences:
clustered_sentences[str(cluster_id)] = []
clustered_sentences[str(cluster_id)].append(biggest['terms'].iloc[sentence_id])
with open(cluster_f_out, "w") as outfile:
json.dump(clustered_sentences, outfile)
logger.info("file " + cluster_f_out + " has been saved")
def geocoding_token(biggest, listOfLocality, spatial_hieararchy, logger):
"""
Find and geocode Spatial entity with OSM data (nominatim)
Respect terms and use of OSM and Nomitim :
- Specify a name for the application, Ie.e user agent
- add delay between each query : min_delay_seconds = 1.
See : https://geopy.readthedocs.io/en/stable/#module-geopy.extra.rate_limiter
- define a time out for waiting nomatim answer : to 10 seconds
:param biggest:
:return: biggest with geocoding information
"""
try:
if listOfLocality != "all":
for locality in biggest[spatial_hieararchy].unique():
if locality not in listOfLocality:
biggest = biggest.drop(biggest[biggest[spatial_hieararchy] == locality].index)
except:
logger.info("could not filter, certainly because there is no spatial hiearchy on biggest score")
geolocator = Nominatim(user_agent="h-tfidf-evaluation", timeout=10)
geocoder = RateLimiter(geolocator.geocode, min_delay_seconds=1)
tqdm.pandas()
biggest["geocode"] = biggest["terms"].progress_apply(geocoder)
return biggest
def post_traitement_flood(biggest, logger, spatialLevel, ratio_of_flood=0.5):
"""
Remove terms from people flooding : return same dataframe with 1 more column : user_flooding
With default ratio_of_flood : If an twitter.user use a term in more than 50% of occurence of this terms,
we consider this user is flooding
:param biggest: File of terms to process
:param logger:
:param: spatialLevel : work on Country / State / City
:param: ratio_of_flood
:return: return same dataframe with 1 more column : user_flooding
"""
ratio_of_flood_global = ratio_of_flood
es_logger.setLevel(logging.WARNING)
# pre-build elastic query for spatialLevel :
rest_user_osm_level = ""
if spatialLevel == "country":
rest_user_osm_level = "rest_user_osm.country"
elif spatialLevel == "state":
rest_user_osm_level = "rest.features.properties.state"
elif spatialLevel == "city":
rest_user_osm_level = "rest.features.properties.city"
def is_an_user_flooding(term, locality):
client = Elasticsearch("http://localhost:9200")
index = "twitter"
# Query :
## Retrieve only user name where in full_text = term and rest_user_osm.country = locality
if term is not np.NAN:
query = {"_source": "user.name","query":{"bool":{"filter":[{"bool":{"should":[{"match_phrase":{"full_text":term}}],"minimum_should_match":1}},
{"bool":{"should":[{"match_phrase":{rest_user_osm_level:locality}}],"minimum_should_match":1}}]}}}
try:
result = Elasticsearch.search(client, index=index, body=query)
list_of_user = []
if len(result["hits"]["hits"]) != 0:
for hit in result["hits"]["hits"]:
user = hit["_source"]["user"]["name"]
list_of_user.append(user)
dict_user_nbtweet = dict(Counter(list_of_user))
d = dict((k, v) for k, v in dict_user_nbtweet.items() if v >= (ratio_of_flood_global * len(list_of_user)))
if len(d) > 0 : # there is a flood on this term:
return 1
else:
return 0
else: # not found in ES why ?
return "not_in_es"
except:
logger.info("There is a trouble with this term: " + str(term))
return np.NAN
else:
return 0
logger.debug("start remove terms if they coming from a flooding user, ie, terms in "+str(ratio_of_flood_global*100)+"% of tweets from an unique user over tweets with this words")
tqdm.pandas()
biggest["user_flooding"] = biggest.progress_apply(lambda t: is_an_user_flooding(t.terms, t[spatialLevel]), axis=1)
return biggest
def venn(biggest, logger, spatial_level, result_path, locality):
"""
Build Venn diagramm in word_cloud
Save fig in result_path
Discussion about font size :
In each subset (common or specific), the font size of term is related with the H-TFIDF Rank inside the subset
:param biggest:
:param logger:
:param spatialLevel:
:return:
"""
# Post-traitement
biggest = biggest[biggest["user_flooding"] == "0"]
# Select locality
biggest = biggest[biggest[spatial_level] == locality]
# select week
weeks = biggest['date'].unique()
if len(weeks) == 2:
sets = []
weeks_list = []
for week in weeks:
sets.append(set(biggest[biggest["date"] == week].terms[0:100]))
weeks_list.append(week)
try:
venn = venn2_wordcloud(sets, set_labels=weeks_list, wordcloud_kwargs=dict(min_font_size=10),)
except:
logger.info("Can't build venn for: "+locality)
elif len(weeks) == 3 or len(weeks) > 3:
sets = []
weeks_list = []
word_frequency = {} # for font-size of wordcloud : based on H-TFIDF Rank
for nb, week in enumerate(weeks[-3:]):
sets.append(set(biggest[biggest["date"] == week].terms[0:100]))
weeks_list.append(week)
for rank, term in enumerate(biggest[biggest["date"] == week].terms[0:100]):
if term not in word_frequency:
word_frequency[term] = (100 - rank)
try:
venn = venn3_wordcloud(sets, set_labels=weeks_list, word_to_frequency=word_frequency,
wordcloud_kwargs=dict(min_font_size=4,),)
except:
logger.info("Can't build venn for: "+locality)
sorted_word_frequency = dict(sorted(word_frequency.items(), key=operator.itemgetter(1),reverse=True))
logger.info(locality + ": " + str(sorted_word_frequency))
plt.savefig(result_path + "/venn_" + locality)
def frequent_terms_by_level(matrixOcc, logger, most_frequent_terms_fpath, listOfLocalities='all', spatialLevel='country'):
"""
:param matrixOcc:
:param most_frequent_terms_fpath:
:param listOfLocalities:
:param spatialLevel:
:return:
"""
#matrixOcc = spatiotemporelFilter(matrix=matrixOcc, listOfcities=listOfLocalities,
# spatialLevel=spatialLevel, period='all')
# Aggregate by level
## Create 4 new columns : city, State, Country and date
def splitindex(row):
return row.split("_")
matrixOcc["city"], matrixOcc["state"], matrixOcc["country"], matrixOcc["date"] = \
zip(*matrixOcc.index.map(splitindex))
matrixOcc.date = pd.to_datetime((matrixOcc.date)) # convert date into datetime
if spatialLevel == 'city':
matrixOcc = matrixOcc.groupby(["city", pd.Grouper(key="date", freq="Y")]).sum()
elif spatialLevel == 'state':
matrixOcc = matrixOcc.groupby(["state", pd.Grouper(key="date", freq="Y")]).sum()
elif spatialLevel == 'country':
matrixOcc = matrixOcc.groupby(["country", pd.Grouper(key="date", freq="Y")]).sum()
# Export N biggest TF-IDF score:
top_n = 500
extractBiggest = pd.DataFrame(index=matrixOcc.index, columns=range(0, top_n))
for row in matrixOcc.index:
try:
row_without_zero = matrixOcc.loc[row]# we remove term with a score = 0
row_without_zero = row_without_zero[ row_without_zero !=0 ]
try:
extractBiggest.loc[row] = row_without_zero.nlargest(top_n).keys()
except:
extractBiggest.loc[row] = row_without_zero.nlargest(len(row_without_zero)).keys()
except:
logger.debug("H-TFIDF: city " + str(matrixOcc.loc[row].name) + "not enough terms")
# Transpose this table in order to share the same structure with TF-IDF classifical biggest score :
hbt = pd.DataFrame()
extractBiggest = extractBiggest.reset_index()
for index, row in extractBiggest.iterrows():
hbtrow = pd.DataFrame(row.drop([spatialLevel, "date"]).values, columns=["terms"])
hbtrow[spatialLevel] = row[spatialLevel]
hbtrow["date"] = row["date"]
hbt = hbt.append(hbtrow, ignore_index=True)
# save file
logger.info("saving file: "+most_frequent_terms_fpath)
hbt.to_csv(most_frequent_terms_fpath)
return hbt
def comparison_htfidf_tfidf_frequentterms(htfidf_f, tfidf_corpus_country_f, frequent_terms, logger, plot_f_out, listOfCountries="all"):
# Open dataframes
htfidf = pd.read_csv(htfidf_f, index_col=0)
tfidf = pd.read_csv(tfidf_corpus_country_f, index_col=0)
for nb_terms in [100, 200, 500]:
# barchart building
barchart_df_col = ["country", "h-tfidf", "tf-idf"]
barchart_df = pd.DataFrame(columns=barchart_df_col, index=range(len(listOfCountries)))
# loop on countries
for country in listOfCountries:
htfidf_country = htfidf[htfidf["country"] == country]
tfidf_country = tfidf[tfidf["country"] == country]
frequent_terms_country = frequent_terms[frequent_terms["country"] == country]
# loop on weeks
htfidf_overlap_per_week_df = pd.DataFrame(index=range(1))
for week in htfidf_country.date.unique():
htfidf_country_week = htfidf_country[htfidf_country["date"] == week]
# build on venn comparison H-TFIDF with Frequent terms
sets = []
sets.append(set(htfidf_country_week.terms[0:nb_terms]))
sets.append(set(frequent_terms_country.terms[0:nb_terms]))
try:
venn_htfidf = venn2_wordcloud(sets)
htfidf_overlap_per_week_df[week] = len(venn_htfidf.get_words_by_id('11'))
except:
htfidf_overlap_per_week_df[week] = np.NAN
# mean value for all weeks :
mean_htfidf_overlap_per_week_df = htfidf_overlap_per_week_df.mean(axis=1).iloc[0] * 100 / nb_terms
# Compute TF-IDF overlap with Frequent termes
sets = []
sets.append(set(tfidf_country.terms[0:nb_terms]))
sets.append(set(frequent_terms_country.terms[0:nb_terms]))
logger.info(country)
venn_tfidf = venn2_wordcloud(sets)
plt.close('all')
# barchart_df['TFIDF_' + country] = len(venn_tfidf.get_words_by_id('11'))
tfidf_overlap = len(venn_tfidf.get_words_by_id('11')) * 100 / nb_terms
# build the row for barchart
if country == "Ἑλλάς":
country = "Greece"
row = {"country": country, "h-tfidf": mean_htfidf_overlap_per_week_df, "tf-idf": tfidf_overlap}
barchart_df = barchart_df.append(row, ignore_index=True)
# Plot bar chart
barchart_df = barchart_df.set_index("country")
barchart_df = barchart_df.dropna()
barchart_df.plot.bar(figsize=(8,6))
plt.subplots_adjust(bottom=0.27)
plt.ylabel("% overlap between H-TFIDF / TF-IDF with most frequent terms")
plt.savefig(plot_f_out + "_" + str(nb_terms) + ".png")
# build venn diagramm
## Choose a country
country = "United Kingdom"
nb_terms = 100
week = "2020-01-26"
## Filtering matrix to keep TOP15 terms without term with 1 caracter or digital number
htfidf_country = htfidf[(htfidf["country"] == country) & (htfidf["date"] == week)]
tfidf_country = tfidf[tfidf["country"] == country]
frequent_terms_country = frequent_terms[frequent_terms["country"] == country]
htfidf_country = htfidf_country[htfidf_country["terms"].map(len) > 3]
tfidf_country = tfidf_country[tfidf_country["terms"].map(len) > 3]
frequent_terms_country = frequent_terms_country[frequent_terms_country["terms"].map(len) > 3]
### Remove number
htfidf_country_terms = htfidf_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms)
tfidf_country_terms = tfidf_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms)
frequent_terms_country_terms = frequent_terms_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms)
columns_name = []
latex_table_nb_terms = 30
for i in range(latex_table_nb_terms):
columns_name.append("rank "+str(i))
latex_table = pd.DataFrame(index=range(3), columns=columns_name)
latex_table.loc["H-TFIDF"] = htfidf_country_terms.head(latex_table_nb_terms).values
latex_table.loc["TF-IDF"] = tfidf_country_terms.head(latex_table_nb_terms).values
latex_table.loc["Frequent terms"] = frequent_terms_country_terms.head(latex_table_nb_terms).values
print(latex_table.T[["H-TFIDF", "TF-IDF", "Frequent terms"]].to_latex(index=False))
sets = []
sets.append(set(htfidf_country_terms))
sets.append(set(tfidf_country_terms))
sets.append(set(frequent_terms_country_terms))
fig, ax = plt.subplots(figsize=(8, 6))
venn_3 = venn3_wordcloud(sets, set_labels=["H-TFIDF", "TF-IDF", "Frequent terms"], ax=ax)
plt.savefig(plot_f_out + "_"+ country + "venn3.png")
plt.show()
def comparison_htfidf_tfidfwhole_frequentterms(htfidf_f, tfidf_whole_f, frequent_terms, logger, plot_f_out, listOfCountries="all"):
# Open dataframes
htfidf = pd.read_csv(htfidf_f, index_col=0)
tfidf = pd.read_csv(tfidf_whole_f, index_col=0)
for nb_terms in [100, 200, 500]:
# barchart building
barchart_df_col = ["country", "h-tfidf", "tf-idf"]
barchart_df = pd.DataFrame(columns=barchart_df_col, index=range(len(listOfCountries)))
# loop on countries
for country in listOfCountries:
# build_compare_measures_localities = ["Ἑλλάς", "Deutschland", "España", "France", "Italia", "Portugal", "United Kingdom"]
if country == "Ἑλλάς":
htfidf_country = htfidf[(htfidf["country"] == country) | (htfidf["country"] == "Greece")]
tfidf_country = tfidf[(tfidf["country"] == country) | (tfidf["country"] == "Greece")]
elif country == "Deutschland":
htfidf_country = htfidf[(htfidf["country"] == country) | (htfidf["country"] == "Germany")]
tfidf_country = tfidf[(tfidf["country"] == country) | (tfidf["country"] == "Germany")]
elif country == "España":
htfidf_country = htfidf[(htfidf["country"] == country) | (htfidf["country"] == "Spain")]
tfidf_country = tfidf[(tfidf["country"] == country) | (tfidf["country"] == "Spain")]
elif country == "Italia":
htfidf_country = htfidf[(htfidf["country"] == country) | (htfidf["country"] == "Italy")]
tfidf_country = tfidf[(tfidf["country"] == country) | (tfidf["country"] == "Italy")]
else:
htfidf_country = htfidf[htfidf["country"] == country]
tfidf_country = tfidf[tfidf["country"] == country]
frequent_terms_country = frequent_terms[frequent_terms["country"] == country]
# loop on weeks
htfidf_overlap_per_week_df = pd.DataFrame(index=range(1))
for week in htfidf_country.date.unique():
htfidf_country_week = htfidf_country[htfidf_country["date"] == week]
# build on venn comparison H-TFIDF with Frequent terms
sets = []
sets.append(set(htfidf_country_week.terms[0:nb_terms]))
sets.append(set(frequent_terms_country.terms[0:nb_terms]))
try:
venn_htfidf = venn2_wordcloud(sets)
htfidf_overlap_per_week_df[week] = len(venn_htfidf.get_words_by_id('11'))
except:
htfidf_overlap_per_week_df[week] = np.NAN
# mean value for all weeks :
mean_htfidf_overlap_per_week_df = htfidf_overlap_per_week_df.mean(axis=1).iloc[0] * 100 / nb_terms
# Compute TF-IDF overlap with Frequent termes
sets = []
sets.append(set(tfidf_country.terms[0:nb_terms]))
sets.append(set(frequent_terms_country.terms[0:nb_terms]))
logger.info(country)
try :
venn_tfidf = venn2_wordcloud(sets)
plt.close('all')
# barchart_df['TFIDF_' + country] = len(venn_tfidf.get_words_by_id('11'))
tfidf_overlap = len(venn_tfidf.get_words_by_id('11')) * 100 / nb_terms
except:
logger.info("No terms in biggest score for TF-IDF - country: " + country)
tfidf_overlap = 0.0
# build the row for barchart
if country == "Ἑλλάς":
country = "Greece"
row = {"country": country, "h-tfidf": mean_htfidf_overlap_per_week_df, "tf-idf": tfidf_overlap}
barchart_df = barchart_df.append(row, ignore_index=True)
# Plot bar chart
barchart_df = barchart_df.set_index("country")
barchart_df = barchart_df.dropna()
barchart_df.plot.bar(figsize=(8,6))
plt.subplots_adjust(bottom=0.27)
plt.ylabel("% overlap between H-TFIDF / TF-IDF with most frequent terms")
plt.savefig(plot_f_out + "_" + str(nb_terms) + ".png")
# build venn diagramm
## Choose a country
country = "Germany"
nb_terms = 100
week = "2020-01-26"
## Filtering matrix to keep TOP15 terms without term with 1 caracter or digital number
htfidf_country = htfidf[(htfidf["country"] == country) & (htfidf["date"] == week)]
tfidf_country = tfidf[tfidf["country"] == country]
frequent_terms_country = frequent_terms[frequent_terms["country"] == country]
htfidf_country = htfidf_country[htfidf_country["terms"].map(len) > 3]
tfidf_country = tfidf_country[tfidf_country["terms"].map(len) > 3]
frequent_terms_country = frequent_terms_country[frequent_terms_country["terms"].map(len) > 3]
### Remove number
htfidf_country_terms = htfidf_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms)
tfidf_country_terms = tfidf_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms)
frequent_terms_country_terms = frequent_terms_country["terms"].replace("^\d+", np.nan, regex=True).dropna().head(nb_terms)
columns_name = []
latex_table_nb_terms = 15
for i in range(latex_table_nb_terms):
columns_name.append("rank "+str(i))
latex_table = pd.DataFrame(index=range(3), columns=columns_name)
latex_table.loc["H-TFIDF"] = htfidf_country_terms.head(latex_table_nb_terms).values
latex_table.loc["TF-IDF"] = tfidf_country_terms.head(latex_table_nb_terms).values
latex_table.loc["Frequent terms"] = frequent_terms_country_terms.head(latex_table_nb_terms).values
print(latex_table.T[["H-TFIDF", "TF-IDF", "Frequent terms"]].to_latex(index=False))
sets = []
sets.append(set(htfidf_country_terms))
sets.append(set(tfidf_country_terms))
sets.append(set(frequent_terms_country_terms))
fig, ax = plt.subplots(figsize=(8, 6))
venn_3 = venn3_wordcloud(sets, set_labels=["H-TFIDF", "TF-IDF", "Frequent terms"], ax=ax)
plt.savefig(plot_f_out + "_"+ country + "venn3.png")
plt.show()
if __name__ == '__main__':
# Global parameters :
## Spatial level hierarchie :
# spatialLevels = ['country', 'state', 'city']
spatialLevels = ['country', 'state']
## Time level hierarchie :
timeLevel = "week"
## List of country to work on :
listOfLocalities = ["Deutschland", "España", "France", "Italia", "United Kingdom"]
## elastic query :
query_fname = "elasticsearch/analyse/nldb21/elastic-query/nldb21_europeBySpatialExtent_en_february.txt"
## Path to results :
period_extent = "feb_tfidf_whole"
f_path_result = "elasticsearch/analyse/nldb21/results/" + period_extent + "_" + timeLevel
if not os.path.exists(f_path_result):
os.makedirs(f_path_result)
# Workflow parameters :
## Rebuild H-TFIDF (with Matrix Occurence)
build_htfidf = False
build_htfidf_save_intermediaire_files = True
## eval 1 : Comparison with classical TF-IDf
build_classical_tfidf = False
build_classical_tfidf_save_intermediaire_files = False
## evla 2 : Use word_embedding with t-SNE
build_tsne = False
build_tsne_spatial_level = "country"
## eval 3 : Use word_embedding with box plot to show disparity
build_boxplot = False
build_boxplot_spatial_level = "country"
## eval 4 : Compare H-TFIDF and TF-IDF with most frequent terms by level
build_compare_measures = True
build_compare_measures_build_intermedate_files = False
build_compare_measures_level = "country"
build_compare_measures_localities = ["Ἑλλάς", "Deutschland", "España", "France", "Italia", "Portugal", "United Kingdom"]
## post-traitement 1 : geocode term
build_posttraitement_geocode = False
## post-traitement 2 : remove terms form a flooding user
build_posttraitement_flooding = False
build_posttraitement_flooding_spatial_levels = spatialLevels
## Analyse H-TFIDF for epidemiology 1 : clustering
build_clustering = False
build_clustering_spatial_levels = ['country', 'state']
build_clustering_list_hierachical_locality = {
"country": ["France", "Deutschland", "España", "Italia", "United Kingdom"],
'state': ["Lombardia", "Lazio"],
# "city": ["London"]
}
## Venn diagramm
build_venn = False
build_venn_spatial_level = "country"
# initialize a logger :
log_fname = "elasticsearch/analyse/nldb21/logs/nldb21_"
logger = logsetup(log_fname)
logger.info("H-TFIDF expirements starts")
if build_htfidf:
# start the elastic query
query = open(query_fname, "r").read()
logger.debug("elasticsearch : start quering")
tweetsByCityAndDate = elasticsearch_query(query_fname, logger)
logger.debug("elasticsearch : stop quering")
# Build a matrix of occurence for each terms in document aggregate by city and day
## prepare tree for file in commun for all spatial level :
f_path_result_common = f_path_result+"/common"
if not os.path.exists(f_path_result_common):
os.makedirs(f_path_result_common)
## Define file path
matrixAggDay_fpath = f_path_result_common + "/matrixAggDay.csv"
matrixOccurence_fpath = f_path_result_common + "/matrixOccurence.csv"
logger.debug("Build matrix of occurence : start")
matrixOccurence = matrixOccurenceBuilder(tweetsByCityAndDate, matrixAggDay_fpath, matrixOccurence_fpath, build_htfidf_save_intermediaire_files, logger)
logger.debug("Build matrix of occurence : stop")
## import matrixOccurence if you don't want to re-build it
# matrixOccurence = pd.read_csv('elasticsearch/analyse/matrixOccurence.csv', index_col=0)
for spatialLevel in spatialLevels:
logger.info("H-TFIDF on: "+spatialLevel)
f_path_result_level = f_path_result+"/"+spatialLevel
if not os.path.exists(f_path_result_level):
os.makedirs(f_path_result_level)
## Compute H-TFIDF
matrixHTFIDF_fname = f_path_result_level + "/matrix_H-TFIDF.csv"
biggestHTFIDFscore_fname = f_path_result_level + "/h-tfidf-Biggest-score.csv"
logger.debug("H-TFIDF : start to compute")
HTFIDF(matrixOcc=matrixOccurence,
matrixHTFIDF_fname=matrixHTFIDF_fname,
biggestHTFIDFscore_fname=biggestHTFIDFscore_fname,
spatialLevel=spatialLevel,
temporalLevel=timeLevel,
)
logger.info("H-TFIDF : stop to compute for all spatial levels")
## Comparison with TF-IDF
f_path_result_tfidf = f_path_result + "/tf-idf-classical"
f_path_result_tfidf_by_locality = f_path_result_tfidf + "/tfidf-tf-corpus-country"
if build_classical_tfidf :
if not os.path.exists(f_path_result_tfidf):
os.makedirs(f_path_result_tfidf)
if not os.path.exists(f_path_result_tfidf_by_locality):
os.makedirs(f_path_result_tfidf_by_locality)
### On whole corpus
TFIDF_TF_on_whole_corpus(elastic_query_fname=query_fname,
logger=logger,
save_intermediaire_files=build_classical_tfidf_save_intermediaire_files,
path_for_filesaved=f_path_result_tfidf)
### By Country
TFIDF_TF_with_corpus_state(elastic_query_fname=query_fname,
logger=logger,
save_intermediaire_files=build_classical_tfidf_save_intermediaire_files,
nb_biggest_terms=500,
path_for_filesaved=f_path_result_tfidf_by_locality,
spatial_hiearchy="country",
temporal_period='all')
if build_compare_measures:
f_path_result_compare_meassures_dir = f_path_result+"/common"
f_path_result_compare_meassures_file = \
f_path_result_compare_meassures_dir + "/most_frequent_terms_by_" + build_compare_measures_level + ".csv"
f_path_result_compare_meassures_plot = \
f_path_result_compare_meassures_dir + "/most_frequent_terms_by_" + build_compare_measures_level
if not os.path.exists(f_path_result_compare_meassures_dir):
os.makedirs(f_path_result_compare_meassures_dir)
# open Matrix of occurence:
try:
matrixOccurence = pd.read_csv(f_path_result_compare_meassures_dir + '/matrixOccurence.csv', index_col=0)
except:
logger.error("File: " + f_path_result_compare_meassures_dir + '/matrixOccurence.csv' + "doesn't exist. You may need to save intermediate file for H-TFIDF")
logger.info("Retrieve frequent terms per country")
if build_compare_measures_build_intermedate_files:
ft = frequent_terms_by_level(matrixOccurence, logger, f_path_result_compare_meassures_file, build_compare_measures_localities, build_compare_measures_level)
else:
ft = pd.read_csv(f_path_result_compare_meassures_file)
# files_path
htfidf_f = f_path_result + "/country/h-tfidf-Biggest-score.csv"
tfidf_corpus_whole_f = f_path_result + "/tf-idf-classical/TFIDF_BiggestScore_on_whole_corpus.csv"
comparison_htfidf_tfidfwhole_frequentterms(htfidf_f, tfidf_corpus_whole_f, ft, logger,
f_path_result_compare_meassures_plot,
listOfCountries=build_compare_measures_localities)
if build_tsne :
f_path_result_tsne = f_path_result+"/tsne"
if not os.path.exists(f_path_result_tsne):
os.makedirs(f_path_result_tsne)
biggest_TFIDF_country = pd.read_csv(f_path_result+"/tf-idf-classical/tfidf-tf-corpus-country/TF-IDF_BiggestScore_on_country_corpus.csv", index_col=0)
biggest_TFIDF_whole = pd.read_csv(f_path_result+"/tf-idf-classical/TFIDF_BiggestScore_on_whole_corpus.csv")
biggest_H_TFIDF = pd.read_csv(f_path_result+"/"+build_tsne_spatial_level+'/h-tfidf-Biggest-score.csv', index_col=0)
# t_SNE visulation
t_SNE_bert_embedding_visualization(biggest_TFIDF_country, logger, listOfLocalities=listOfLocalities,
plotname="TF-IDF on corpus by Country",
paht2save=f_path_result_tsne)
t_SNE_bert_embedding_visualization(biggest_H_TFIDF, logger, listOfLocalities=listOfLocalities,
plotname="H-TFIDF", paht2save=f_path_result_tsne)
if build_boxplot :
# dir path to save :
f_path_result_boxplot = f_path_result+"/pairwise-similarity-boxplot"
if not os.path.exists(f_path_result_boxplot):
os.makedirs(f_path_result_boxplot)
# open result from mesures :
biggest_TFIDF_country = pd.read_csv(f_path_result_tfidf_by_locality+"/TF-IDF_BiggestScore_on_country_corpus.csv", index_col=0)
biggest_TFIDF_whole = pd.read_csv(f_path_result_tfidf+"/TFIDF_BiggestScore_on_whole_corpus.csv")
biggest_H_TFIDF = pd.read_csv(f_path_result+"/"+build_boxplot_spatial_level+'/h-tfidf-Biggest-score.csv', index_col=0)
# Retrieve embedding :
htfidf_embeddings = bert_embedding_filtred(biggest_H_TFIDF, listOfLocalities=listOfLocalities)
tfidf_country_embeddings = bert_embedding_filtred(biggest_TFIDF_country, listOfLocalities=listOfLocalities)
tfidf_whole_embeddings = bert_embedding_filtred(biggest_TFIDF_whole)
# Compute similarity :
## Distribution of similarities between terms extracted from a measure
htidf_similarity = similarity_intra_matrix_pairwise(htfidf_embeddings)
tfidf_country_similarity = similarity_intra_matrix_pairwise(tfidf_country_embeddings)
tfidf_whole_similarity = similarity_intra_matrix_pairwise(tfidf_whole_embeddings)
plt.subplot(131)
plt.boxplot(htidf_similarity)
plt.title("H-TFIDF")
plt.ylim(0,1)
plt.subplot(132)
plt.boxplot(tfidf_country_similarity)
plt.title("TFIDF with corpus by country")
plt.ylim(0, 1)
plt.subplot(133)
plt.boxplot(tfidf_whole_similarity)
plt.title("TFIDF on the whole corpus")
plt.ylim(0, 1)
plt.tight_layout()
plt.subplots_adjust(wspace=0.3)
plt.suptitle("Distribution of similarity values among the extracted terms pairs of a measure")
plt.savefig(f_path_result_boxplot+"/pairwise-similarity-boxplot.png")
# plt.show()
plt.close()
## Distribution of similarities between the terms of a country extracted from a measure
### H-TFIDF
fig2, axs2 = plt.subplots(1, 5)
for i, country in enumerate(listOfLocalities):
axs2[i].boxplot(similarity_intra_matrix_pairwise(htfidf_embeddings[i*500:(i+1)*500-1]))
axs2[i].set_title(country, fontsize=40)
axs2[i].set_ylim(0, 1)
# fig2.suptitle("Distribution of similarity by pairs for H-TF-IDF")
plt.savefig(f_path_result_boxplot + "/pairwise-similarity-boxplot_HTFIDF-country.png")
# plt.show()
plt.close(fig2)
### TF-IDF by corpus = country
fig3, axs3 = plt.subplots(1, 5)
for i, country in enumerate(listOfLocalities):
axs3[i].boxplot(similarity_intra_matrix_pairwise(tfidf_country_embeddings[i*500:(i+1)*500-1]))
axs3[i].set_title(country, fontsize=40)
axs3[i].set_ylim(0, 1)
# fig3.suptitle("Distribution of similarity by pairs for TF-IDF focus on each country")
plt.savefig(f_path_result_boxplot + "/pairwise-similarity-boxplot_TFIDF-country.png")
# plt.show()
plt.close(fig3)
## Distribution of similarities between the set of terms of 2 measures
### H-TF-IDF with TF-IDF on whole corpus and TF-IDF country with TF-IDF on whole corpus
fig_compare_TFIDF_whole, ax4 = plt.subplots(1,2)
similarity_between_htfidf_tfidf_whole = similarity_inter_matrix(htfidf_embeddings, tfidf_whole_embeddings)
similarity_between_tfidfcountry_tfidf_whole = similarity_inter_matrix(tfidf_country_embeddings, tfidf_whole_embeddings)
similarity_between_htfidf_tfidf_whole_1D = np.array([])
similarity_between_tfidfcountry_tfidf_whole_1D = np.array([])
for i, row in enumerate(similarity_between_htfidf_tfidf_whole):
similarity_between_htfidf_tfidf_whole_1D = np.append(similarity_between_htfidf_tfidf_whole_1D, row[i+1:]) # We remove duplicate pairwise value
for i, row in enumerate(similarity_between_tfidfcountry_tfidf_whole):
similarity_between_tfidfcountry_tfidf_whole_1D = np.append(similarity_between_tfidfcountry_tfidf_whole_1D,
row[i + 1:])
ax4[0].boxplot(similarity_between_htfidf_tfidf_whole_1D)
ax4[0].set_ylim(0, 1)
ax4[0].set_title("H-TFIDF")
ax4[1].boxplot(similarity_between_tfidfcountry_tfidf_whole_1D)
ax4[1].set_ylim(0, 1)
ax4[1].set_title("TFIDF on country")
fig_compare_TFIDF_whole.suptitle("Distribution of similarity between H-TFIDF and TF-IDF on whole corpus")
plt.savefig(f_path_result_boxplot + "/pairwise-similarity-boxplot_between_TFIDF-whole.png")
# plt.show()
plt.close(fig_compare_TFIDF_whole)
## Distribution of similarities between sub-set terms by country compared by country pair
if build_posttraitement_geocode:
# Geocode terms :
## Comments : over geocode even on non spatial entities
spatial_level = "country"
listOfLocalities = ["France", "Deutschland", "España", "Italia", "United Kingdom"]
f_path_result = "elasticsearch/analyse/nldb21/results/4thfeb_country"
biggest_TFIDF_country = pd.read_csv(
f_path_result+"/tfidf-tf-corpus-country/TF-IDF_BiggestScore_on_"+spatial_level+"_corpus.csv", index_col=0)
biggest_TFIDF_whole = pd.read_csv(f_path_result+"/TFIDF_BiggestScore_on_whole_corpus.csv")
biggest_H_TFIDF = pd.read_csv(f_path_result+'/h-tfidf-Biggest-score.csv', index_col=0)
biggest_H_TFIDF_gepocode = geocoding_token(biggest_H_TFIDF,
listOfLocality=listOfLocalities,
spatial_hieararchy=spatial_level,
logger=logger)
biggest_H_TFIDF_gepocode.to_csv(f_path_result+"/h-tfidf-Biggest-score-geocode.csv")
biggest_TFIDF_country_gepocode = geocoding_token(biggest_TFIDF_country,
listOfLocality=listOfLocalities,
spatial_hieararchy=spatial_level,
logger=logger)
biggest_TFIDF_country_gepocode.to_csv(f_path_result+"/TF-IDF_BiggestScore_on_"+spatial_level+"_corpus_geocode.csv")
biggest_TFIDF_whole_gepocode = geocoding_token(biggest_TFIDF_whole,
listOfLocality=listOfLocalities,
spatial_hieararchy=spatial_level,
logger=logger)
biggest_TFIDF_whole_gepocode.to_csv(f_path_result+"/TFIDF_BiggestScore_on_whole_corpus_geocode.csv")
if build_posttraitement_flooding:
# Post traitement : remove terms coming from user who flood
for spatial_level_flood in build_posttraitement_flooding_spatial_levels:
logger.info("post-traitement flooding on: " + spatial_level_flood)
f_path_result_flood = f_path_result + "/" + spatial_level_flood
biggest_H_TFIDF = pd.read_csv(f_path_result_flood + '/h-tfidf-Biggest-score.csv', index_col=0)
biggest_H_TFIDF_with_flood = post_traitement_flood(biggest_H_TFIDF, logger, spatialLevel=spatial_level_flood)
biggest_H_TFIDF_with_flood.to_csv(f_path_result_flood + "/h-tfidf-Biggest-score-flooding.csv")
if build_clustering:
# Create clustering
# method="agglomerative_clustering"
method_list = ["kmeans", "agglomerative_clustering"]
for spatial_level in build_clustering_spatial_levels:
f_path_result_flood = f_path_result + "/" + spatial_level
f_path_result_clustering = f_path_result + "/" + spatial_level + "/clustering"
if not os.path.exists(f_path_result_clustering):
os.makedirs(f_path_result_clustering)
# open result post_traited
try:
biggest_H_TFIDF = pd.read_csv(f_path_result_flood + "/h-tfidf-Biggest-score-flooding.csv", index_col=0)
except:
logger.error("Clustering: file biggest score doesn't exist")
# drop token from flooding user and drop ngram not in the same sentence (see post_traitement)
biggest = biggest_H_TFIDF[biggest_H_TFIDF["user_flooding"] == str(0)]
for method in method_list:
for locality in build_clustering_list_hierachical_locality[spatial_level]:
f_path = f_path_result_clustering + "/" + locality + "_" + method + ".json"
try:
clustering_terms(biggest, logger, f_path,
listOfLocalities=locality,
spatial_hieararchy=spatial_level,
method=method)
except:
logger.error("Impossible to cluster for " + spatial_level + "with method: "+method)
if build_venn:
f_path_result_venn = f_path_result + "/venn"
if not os.path.exists(f_path_result_venn):
os.makedirs(f_path_result_venn)
# open result post_traited
try:
biggest_H_TFIDF = pd.read_csv(f_path_result + "/" + build_venn_spatial_level + "/h-tfidf-Biggest-score-flooding.csv", index_col=0)
except:
logger.error("Venn: file biggest score doesn't exist")
for locality in listOfLocalities:
venn(biggest_H_TFIDF, logger, build_venn_spatial_level, f_path_result_venn, locality)
logger.info("H-TFIDF expirements stops")
| [
"logging.getLogger",
"matplotlib.pyplot.boxplot",
"numpy.log10",
"logging.StreamHandler",
"geopy.extra.rate_limiter.RateLimiter",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"pandas.to_timedelta",
"pandas.Grouper",
"seaborn.set_style",
"numpy.array",
"seaborn.scatterplot",
"numpy.linalg.n... | [((1424, 1462), 'elasticsearch.Elasticsearch', 'Elasticsearch', (['"""http://localhost:9200"""'], {}), "('http://localhost:9200')\n", (1437, 1462), False, 'from elasticsearch import Elasticsearch\n'), ((1467, 1502), 'elasticsearch.logger.setLevel', 'es_logger.setLevel', (['logging.WARNING'], {}), '(logging.WARNING)\n', (1485, 1502), True, 'from elasticsearch import logger as es_logger\n'), ((1601, 1678), 'elasticsearch.Elasticsearch.search', 'Elasticsearch.search', (['client'], {'index': 'index', 'body': 'query', 'scroll': '"""2m"""', 'size': '(5000)'}), "(client, index=index, body=query, scroll='2m', size=5000)\n", (1621, 1678), False, 'from elasticsearch import Elasticsearch\n'), ((1917, 1934), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1928, 1934), False, 'from collections import defaultdict, Counter\n'), ((4781, 4804), 'tqdm.tqdm', 'tqdm', ([], {'total': 'scroll_size'}), '(total=scroll_size)\n', (4785, 4804), False, 'from tqdm import tqdm\n'), ((5598, 5672), 're.sub', 're.sub', (['"""((www\\\\.[^\\\\s]+)|(https?://[^\\\\s]+)|(http?://[^\\\\s]+))"""', '""""""', 'text'], {}), "('((www\\\\.[^\\\\s]+)|(https?://[^\\\\s]+)|(http?://[^\\\\s]+))', '', text)\n", (5604, 5672), False, 'import re\n'), ((5685, 5718), 're.sub', 're.sub', (['"""http\\\\S+"""', '""""""', 'textclean'], {}), "('http\\\\S+', '', textclean)\n", (5691, 5718), False, 'import re\n'), ((6676, 6701), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'col'}), '(columns=col)\n', (6688, 6701), True, 'import pandas as pd\n'), ((8174, 8288), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'stop_words': '"""english"""', 'max_features': '(25000)', 'ngram_range': '(1, 1)', 'token_pattern': '"""[a-zA-Z0-9#@]+"""'}), "(stop_words='english', max_features=25000, ngram_range=(1, 1\n ), token_pattern='[a-zA-Z0-9#@]+')\n", (8189, 8288), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n'), ((9033, 9110), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'countTerms[0:, 0:]', 'index': 'cityDayList', 'columns': 'listOfTerms'}), '(data=countTerms[0:, 0:], index=cityDayList, columns=listOfTerms)\n', (9045, 9110), True, 'import pandas as pd\n'), ((13276, 13293), 'numpy.log10', 'np.log10', (['(N / DFt)'], {}), '(N / DFt)\n', (13284, 13293), True, 'import numpy as np\n'), ((14450, 14464), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (14462, 14464), True, 'import pandas as pd\n'), ((16494, 16508), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (16506, 16508), True, 'import pandas as pd\n'), ((17497, 17511), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (17509, 17511), True, 'import pandas as pd\n'), ((17548, 17562), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (17560, 17562), True, 'import pandas as pd\n'), ((22919, 22933), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (22931, 22933), True, 'import pandas as pd\n'), ((23799, 23905), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'stop_words': '"""english"""', 'min_df': '(0.001)', 'ngram_range': '(1, 1)', 'token_pattern': '"""[a-zA-Z0-9#]+"""'}), "(stop_words='english', min_df=0.001, ngram_range=(1, 1),\n token_pattern='[a-zA-Z0-9#]+')\n", (23814, 23905), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n'), ((24363, 24409), 'pandas.DataFrame', 'pd.DataFrame', (['denselist'], {'columns': 'feature_names'}), '(denselist, columns=feature_names)\n', (24375, 24409), True, 'import pandas as pd\n'), ((24695, 24709), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (24707, 24709), True, 'import pandas as pd\n'), ((25943, 25962), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (25960, 25962), False, 'import logging\n'), ((26013, 26101), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s :: %(levelname)s :: %(funcName)20s() ::%(message)s"""'], {}), "(\n '%(asctime)s :: %(levelname)s :: %(funcName)20s() ::%(message)s')\n", (26030, 26101), False, 'import logging\n'), ((26107, 26121), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (26119, 26121), False, 'from datetime import datetime\n'), ((26379, 26402), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (26400, 26402), False, 'import logging\n'), ((27171, 27225), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""distilbert-base-nli-mean-tokens"""'], {}), "('distilbert-base-nli-mean-tokens')\n", (27190, 27225), False, 'from sentence_transformers import SentenceTransformer\n'), ((27691, 27711), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)'}), '(n_components=2)\n', (27695, 27711), False, 'from sklearn.manifold import TSNE\n'), ((27890, 27915), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (27903, 27915), True, 'import seaborn as sns\n'), ((27920, 27944), 'seaborn.set_palette', 'sns.set_palette', (['"""muted"""'], {}), "('muted')\n", (27935, 27944), True, 'import seaborn as sns\n'), ((27949, 28019), 'seaborn.set_context', 'sns.set_context', (['"""notebook"""'], {'font_scale': '(1)', 'rc': "{'lines.linewidth': 2.5}"}), "('notebook', font_scale=1, rc={'lines.linewidth': 2.5})\n", (27964, 28019), True, 'import seaborn as sns\n'), ((28082, 28120), 'pandas.DataFrame', 'pd.DataFrame', (['low_dim_data', 'label_tsne'], {}), '(low_dim_data, label_tsne)\n', (28094, 28120), True, 'import pandas as pd\n'), ((28163, 28225), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'tsne_df', 'x': '"""x"""', 'y': '"""y"""', 'hue': 'tsne_df.index'}), "(data=tsne_df, x='x', y='y', hue=tsne_df.index)\n", (28178, 28225), True, 'import seaborn as sns\n'), ((28382, 28401), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-100)', '(100)'], {}), '(-100, 100)\n', (28390, 28401), True, 'import matplotlib.pyplot as plt\n'), ((28405, 28424), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-100)', '(100)'], {}), '(-100, 100)\n', (28413, 28424), True, 'import matplotlib.pyplot as plt\n'), ((28495, 28555), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(paht2save + '/tsne_bert-embeddings_' + plotname)"], {}), "(paht2save + '/tsne_bert-embeddings_' + plotname)\n", (28506, 28555), True, 'import matplotlib.pyplot as plt\n'), ((28661, 28672), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (28670, 28672), True, 'import matplotlib.pyplot as plt\n'), ((29065, 29129), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': 'None', 'distance_threshold': '(1.5)'}), '(n_clusters=None, distance_threshold=1.5)\n', (29088, 29129), False, 'from sklearn.cluster import KMeans, AgglomerativeClustering\n'), ((30425, 30479), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""distilbert-base-nli-mean-tokens"""'], {}), "('distilbert-base-nli-mean-tokens')\n", (30444, 30479), False, 'from sentence_transformers import SentenceTransformer\n'), ((31854, 31866), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (31862, 31866), True, 'import numpy as np\n'), ((35127, 35181), 'geopy.geocoders.Nominatim', 'Nominatim', ([], {'user_agent': '"""h-tfidf-evaluation"""', 'timeout': '(10)'}), "(user_agent='h-tfidf-evaluation', timeout=10)\n", (35136, 35181), False, 'from geopy.geocoders import Nominatim\n'), ((35197, 35249), 'geopy.extra.rate_limiter.RateLimiter', 'RateLimiter', (['geolocator.geocode'], {'min_delay_seconds': '(1)'}), '(geolocator.geocode, min_delay_seconds=1)\n', (35208, 35249), False, 'from geopy.extra.rate_limiter import RateLimiter\n'), ((35255, 35268), 'tqdm.tqdm.pandas', 'tqdm.pandas', ([], {}), '()\n', (35266, 35268), False, 'from tqdm import tqdm\n'), ((35964, 35999), 'elasticsearch.logger.setLevel', 'es_logger.setLevel', (['logging.WARNING'], {}), '(logging.WARNING)\n', (35982, 35999), True, 'from elasticsearch import logger as es_logger\n'), ((38102, 38115), 'tqdm.tqdm.pandas', 'tqdm.pandas', ([], {}), '()\n', (38113, 38115), False, 'from tqdm import tqdm\n'), ((40199, 40245), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(result_path + '/venn_' + locality)"], {}), "(result_path + '/venn_' + locality)\n", (40210, 40245), True, 'import matplotlib.pyplot as plt\n'), ((40977, 41007), 'pandas.to_datetime', 'pd.to_datetime', (['matrixOcc.date'], {}), '(matrixOcc.date)\n', (40991, 41007), True, 'import pandas as pd\n'), ((42187, 42201), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (42199, 42201), True, 'import pandas as pd\n'), ((42835, 42869), 'pandas.read_csv', 'pd.read_csv', (['htfidf_f'], {'index_col': '(0)'}), '(htfidf_f, index_col=0)\n', (42846, 42869), True, 'import pandas as pd\n'), ((42882, 42930), 'pandas.read_csv', 'pd.read_csv', (['tfidf_corpus_country_f'], {'index_col': '(0)'}), '(tfidf_corpus_country_f, index_col=0)\n', (42893, 42930), True, 'import pandas as pd\n'), ((47247, 47275), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (47259, 47275), True, 'import matplotlib.pyplot as plt\n'), ((47289, 47374), 'matplotlib_venn_wordcloud.venn3_wordcloud', 'venn3_wordcloud', (['sets'], {'set_labels': "['H-TFIDF', 'TF-IDF', 'Frequent terms']", 'ax': 'ax'}), "(sets, set_labels=['H-TFIDF', 'TF-IDF', 'Frequent terms'], ax=ax\n )\n", (47304, 47374), False, 'from matplotlib_venn_wordcloud import venn2_wordcloud, venn3_wordcloud\n'), ((47374, 47427), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(plot_f_out + '_' + country + 'venn3.png')"], {}), "(plot_f_out + '_' + country + 'venn3.png')\n", (47385, 47427), True, 'import matplotlib.pyplot as plt\n'), ((47431, 47441), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (47439, 47441), True, 'import matplotlib.pyplot as plt\n'), ((47611, 47645), 'pandas.read_csv', 'pd.read_csv', (['htfidf_f'], {'index_col': '(0)'}), '(htfidf_f, index_col=0)\n', (47622, 47645), True, 'import pandas as pd\n'), ((47658, 47697), 'pandas.read_csv', 'pd.read_csv', (['tfidf_whole_f'], {'index_col': '(0)'}), '(tfidf_whole_f, index_col=0)\n', (47669, 47697), True, 'import pandas as pd\n'), ((53332, 53360), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (53344, 53360), True, 'import matplotlib.pyplot as plt\n'), ((53374, 53459), 'matplotlib_venn_wordcloud.venn3_wordcloud', 'venn3_wordcloud', (['sets'], {'set_labels': "['H-TFIDF', 'TF-IDF', 'Frequent terms']", 'ax': 'ax'}), "(sets, set_labels=['H-TFIDF', 'TF-IDF', 'Frequent terms'], ax=ax\n )\n", (53389, 53459), False, 'from matplotlib_venn_wordcloud import venn2_wordcloud, venn3_wordcloud\n'), ((53459, 53512), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(plot_f_out + '_' + country + 'venn3.png')"], {}), "(plot_f_out + '_' + country + 'venn3.png')\n", (53470, 53512), True, 'import matplotlib.pyplot as plt\n'), ((53516, 53526), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (53524, 53526), True, 'import matplotlib.pyplot as plt\n'), ((2082, 2134), 'datetime.datetime.strptime', 'datetime.strptime', (['inDate', '"""%a %b %d %H:%M:%S %z %Y"""'], {}), "(inDate, '%a %b %d %H:%M:%S %z %Y')\n", (2099, 2134), False, 'from datetime import datetime\n'), ((6959, 6991), 'pandas.DataFrame', 'pd.DataFrame', (['tweetsofcity[city]'], {}), '(tweetsofcity[city])\n', (6971, 6991), True, 'import pandas as pd\n'), ((17950, 18049), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'stop_words': '"""english"""', 'min_df': '(0.001)', 'ngram_range': '(1, 1)', 'token_pattern': '"""[<KEY>"""'}), "(stop_words='english', min_df=0.001, ngram_range=(1, 1),\n token_pattern='[<KEY>')\n", (17965, 18049), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n'), ((18589, 18635), 'pandas.DataFrame', 'pd.DataFrame', (['denselist'], {'columns': 'feature_names'}), '(denselist, columns=feature_names)\n', (18601, 18635), True, 'import pandas as pd\n'), ((28960, 29009), 'numpy.linalg.norm', 'np.linalg.norm', (['embeddings'], {'axis': '(1)', 'keepdims': '(True)'}), '(embeddings, axis=1, keepdims=True)\n', (28974, 29009), True, 'import numpy as np\n'), ((31807, 31832), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['matrix'], {}), '(matrix)\n', (31824, 31832), False, 'from scipy import sparse\n'), ((31932, 31969), 'numpy.append', 'np.append', (['similarity_1D', 'row[i + 1:]'], {}), '(similarity_1D, row[i + 1:])\n', (31941, 31969), True, 'import numpy as np\n'), ((32169, 32214), 'scipy.spatial.distance.cdist', 'sp.distance.cdist', (['matrix1', 'matrix2', '"""cosine"""'], {}), "(matrix1, matrix2, 'cosine')\n", (32186, 32214), True, 'import scipy.spatial as sp\n'), ((33058, 33089), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'num_clusters'}), '(n_clusters=num_clusters)\n', (33064, 33089), False, 'from sklearn.cluster import KMeans, AgglomerativeClustering\n'), ((34075, 34114), 'json.dump', 'json.dump', (['clustered_sentences', 'outfile'], {}), '(clustered_sentences, outfile)\n', (34084, 34114), False, 'import json\n'), ((36422, 36460), 'elasticsearch.Elasticsearch', 'Elasticsearch', (['"""http://localhost:9200"""'], {}), "('http://localhost:9200')\n", (36435, 36460), False, 'from elasticsearch import Elasticsearch\n'), ((45281, 45313), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.27)'}), '(bottom=0.27)\n', (45300, 45313), True, 'import matplotlib.pyplot as plt\n'), ((45322, 45395), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""% overlap between H-TFIDF / TF-IDF with most frequent terms"""'], {}), "('% overlap between H-TFIDF / TF-IDF with most frequent terms')\n", (45332, 45395), True, 'import matplotlib.pyplot as plt\n'), ((51373, 51405), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.27)'}), '(bottom=0.27)\n', (51392, 51405), True, 'import matplotlib.pyplot as plt\n'), ((51414, 51487), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""% overlap between H-TFIDF / TF-IDF with most frequent terms"""'], {}), "('% overlap between H-TFIDF / TF-IDF with most frequent terms')\n", (51424, 51487), True, 'import matplotlib.pyplot as plt\n'), ((54184, 54213), 'os.path.exists', 'os.path.exists', (['f_path_result'], {}), '(f_path_result)\n', (54198, 54213), False, 'import os\n'), ((54223, 54249), 'os.makedirs', 'os.makedirs', (['f_path_result'], {}), '(f_path_result)\n', (54234, 54249), False, 'import os\n'), ((61285, 61421), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result +\n '/tf-idf-classical/tfidf-tf-corpus-country/TF-IDF_BiggestScore_on_country_corpus.csv'\n )"], {'index_col': '(0)'}), "(f_path_result +\n '/tf-idf-classical/tfidf-tf-corpus-country/TF-IDF_BiggestScore_on_country_corpus.csv'\n , index_col=0)\n", (61296, 61421), True, 'import pandas as pd\n'), ((61441, 61532), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result + '/tf-idf-classical/TFIDF_BiggestScore_on_whole_corpus.csv')"], {}), "(f_path_result +\n '/tf-idf-classical/TFIDF_BiggestScore_on_whole_corpus.csv')\n", (61452, 61532), True, 'import pandas as pd\n'), ((61553, 61660), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result + '/' + build_tsne_spatial_level + '/h-tfidf-Biggest-score.csv')"], {'index_col': '(0)'}), "(f_path_result + '/' + build_tsne_spatial_level +\n '/h-tfidf-Biggest-score.csv', index_col=0)\n", (61564, 61660), True, 'import pandas as pd\n'), ((62439, 62547), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result_tfidf_by_locality + '/TF-IDF_BiggestScore_on_country_corpus.csv'\n )"], {'index_col': '(0)'}), "(f_path_result_tfidf_by_locality +\n '/TF-IDF_BiggestScore_on_country_corpus.csv', index_col=0)\n", (62450, 62547), True, 'import pandas as pd\n'), ((62572, 62648), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result_tfidf + '/TFIDF_BiggestScore_on_whole_corpus.csv')"], {}), "(f_path_result_tfidf + '/TFIDF_BiggestScore_on_whole_corpus.csv')\n", (62583, 62648), True, 'import pandas as pd\n'), ((62673, 62783), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result + '/' + build_boxplot_spatial_level +\n '/h-tfidf-Biggest-score.csv')"], {'index_col': '(0)'}), "(f_path_result + '/' + build_boxplot_spatial_level +\n '/h-tfidf-Biggest-score.csv', index_col=0)\n", (62684, 62783), True, 'import pandas as pd\n'), ((63483, 63499), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (63494, 63499), True, 'import matplotlib.pyplot as plt\n'), ((63508, 63537), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['htidf_similarity'], {}), '(htidf_similarity)\n', (63519, 63537), True, 'import matplotlib.pyplot as plt\n'), ((63546, 63566), 'matplotlib.pyplot.title', 'plt.title', (['"""H-TFIDF"""'], {}), "('H-TFIDF')\n", (63555, 63566), True, 'import matplotlib.pyplot as plt\n'), ((63575, 63589), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (63583, 63589), True, 'import matplotlib.pyplot as plt\n'), ((63597, 63613), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (63608, 63613), True, 'import matplotlib.pyplot as plt\n'), ((63622, 63659), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['tfidf_country_similarity'], {}), '(tfidf_country_similarity)\n', (63633, 63659), True, 'import matplotlib.pyplot as plt\n'), ((63668, 63709), 'matplotlib.pyplot.title', 'plt.title', (['"""TFIDF with corpus by country"""'], {}), "('TFIDF with corpus by country')\n", (63677, 63709), True, 'import matplotlib.pyplot as plt\n'), ((63718, 63732), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (63726, 63732), True, 'import matplotlib.pyplot as plt\n'), ((63741, 63757), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (63752, 63757), True, 'import matplotlib.pyplot as plt\n'), ((63766, 63801), 'matplotlib.pyplot.boxplot', 'plt.boxplot', (['tfidf_whole_similarity'], {}), '(tfidf_whole_similarity)\n', (63777, 63801), True, 'import matplotlib.pyplot as plt\n'), ((63810, 63848), 'matplotlib.pyplot.title', 'plt.title', (['"""TFIDF on the whole corpus"""'], {}), "('TFIDF on the whole corpus')\n", (63819, 63848), True, 'import matplotlib.pyplot as plt\n'), ((63857, 63871), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (63865, 63871), True, 'import matplotlib.pyplot as plt\n'), ((63880, 63898), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (63896, 63898), True, 'import matplotlib.pyplot as plt\n'), ((63907, 63938), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.3)'}), '(wspace=0.3)\n', (63926, 63938), True, 'import matplotlib.pyplot as plt\n'), ((63947, 64051), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Distribution of similarity values among the extracted terms pairs of a measure"""'], {}), "(\n 'Distribution of similarity values among the extracted terms pairs of a measure'\n )\n", (63959, 64051), True, 'import matplotlib.pyplot as plt\n'), ((64050, 64121), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(f_path_result_boxplot + '/pairwise-similarity-boxplot.png')"], {}), "(f_path_result_boxplot + '/pairwise-similarity-boxplot.png')\n", (64061, 64121), True, 'import matplotlib.pyplot as plt\n'), ((64149, 64160), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (64158, 64160), True, 'import matplotlib.pyplot as plt\n'), ((64298, 64316), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(5)'], {}), '(1, 5)\n', (64310, 64316), True, 'import matplotlib.pyplot as plt\n'), ((64643, 64733), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(f_path_result_boxplot + '/pairwise-similarity-boxplot_HTFIDF-country.png')"], {}), "(f_path_result_boxplot +\n '/pairwise-similarity-boxplot_HTFIDF-country.png')\n", (64654, 64733), True, 'import matplotlib.pyplot as plt\n'), ((64759, 64774), 'matplotlib.pyplot.close', 'plt.close', (['fig2'], {}), '(fig2)\n', (64768, 64774), True, 'import matplotlib.pyplot as plt\n'), ((64835, 64853), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(5)'], {}), '(1, 5)\n', (64847, 64853), True, 'import matplotlib.pyplot as plt\n'), ((65207, 65296), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(f_path_result_boxplot + '/pairwise-similarity-boxplot_TFIDF-country.png')"], {}), "(f_path_result_boxplot +\n '/pairwise-similarity-boxplot_TFIDF-country.png')\n", (65218, 65296), True, 'import matplotlib.pyplot as plt\n'), ((65322, 65337), 'matplotlib.pyplot.close', 'plt.close', (['fig3'], {}), '(fig3)\n', (65331, 65337), True, 'import matplotlib.pyplot as plt\n'), ((65552, 65570), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (65564, 65570), True, 'import matplotlib.pyplot as plt\n'), ((65864, 65876), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (65872, 65876), True, 'import numpy as np\n'), ((65934, 65946), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (65942, 65946), True, 'import numpy as np\n'), ((66848, 66943), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(f_path_result_boxplot + '/pairwise-similarity-boxplot_between_TFIDF-whole.png'\n )"], {}), "(f_path_result_boxplot +\n '/pairwise-similarity-boxplot_between_TFIDF-whole.png')\n", (66859, 66943), True, 'import matplotlib.pyplot as plt\n'), ((66969, 67003), 'matplotlib.pyplot.close', 'plt.close', (['fig_compare_TFIDF_whole'], {}), '(fig_compare_TFIDF_whole)\n', (66978, 67003), True, 'import matplotlib.pyplot as plt\n'), ((67465, 67597), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result + '/tfidf-tf-corpus-country/TF-IDF_BiggestScore_on_' +\n spatial_level + '_corpus.csv')"], {'index_col': '(0)'}), "(f_path_result +\n '/tfidf-tf-corpus-country/TF-IDF_BiggestScore_on_' + spatial_level +\n '_corpus.csv', index_col=0)\n", (67476, 67597), True, 'import pandas as pd\n'), ((67627, 67697), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result + '/TFIDF_BiggestScore_on_whole_corpus.csv')"], {}), "(f_path_result + '/TFIDF_BiggestScore_on_whole_corpus.csv')\n", (67638, 67697), True, 'import pandas as pd\n'), ((67722, 67792), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result + '/h-tfidf-Biggest-score.csv')"], {'index_col': '(0)'}), "(f_path_result + '/h-tfidf-Biggest-score.csv', index_col=0)\n", (67733, 67792), True, 'import pandas as pd\n'), ((16769, 16802), 'pandas.DataFrame', 'pd.DataFrame', (['tweets[tweetByCity]'], {}), '(tweets[tweetByCity])\n', (16781, 16802), True, 'import pandas as pd\n'), ((23194, 23227), 'pandas.DataFrame', 'pd.DataFrame', (['tweets[tweetByCity]'], {}), '(tweets[tweetByCity])\n', (23206, 23227), True, 'import pandas as pd\n'), ((33443, 33507), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': 'None', 'distance_threshold': '(1.5)'}), '(n_clusters=None, distance_threshold=1.5)\n', (33466, 33507), False, 'from sklearn.cluster import KMeans, AgglomerativeClustering\n'), ((44598, 44619), 'matplotlib_venn_wordcloud.venn2_wordcloud', 'venn2_wordcloud', (['sets'], {}), '(sets)\n', (44613, 44619), False, 'from matplotlib_venn_wordcloud import venn2_wordcloud, venn3_wordcloud\n'), ((44632, 44648), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (44641, 44648), True, 'import matplotlib.pyplot as plt\n'), ((56496, 56532), 'os.path.exists', 'os.path.exists', (['f_path_result_common'], {}), '(f_path_result_common)\n', (56510, 56532), False, 'import os\n'), ((56546, 56579), 'os.makedirs', 'os.makedirs', (['f_path_result_common'], {}), '(f_path_result_common)\n', (56557, 56579), False, 'import os\n'), ((58301, 58336), 'os.path.exists', 'os.path.exists', (['f_path_result_tfidf'], {}), '(f_path_result_tfidf)\n', (58315, 58336), False, 'import os\n'), ((58350, 58382), 'os.makedirs', 'os.makedirs', (['f_path_result_tfidf'], {}), '(f_path_result_tfidf)\n', (58361, 58382), False, 'import os\n'), ((58398, 58445), 'os.path.exists', 'os.path.exists', (['f_path_result_tfidf_by_locality'], {}), '(f_path_result_tfidf_by_locality)\n', (58412, 58445), False, 'import os\n'), ((58459, 58503), 'os.makedirs', 'os.makedirs', (['f_path_result_tfidf_by_locality'], {}), '(f_path_result_tfidf_by_locality)\n', (58470, 58503), False, 'import os\n'), ((59779, 59830), 'os.path.exists', 'os.path.exists', (['f_path_result_compare_meassures_dir'], {}), '(f_path_result_compare_meassures_dir)\n', (59793, 59830), False, 'import os\n'), ((59844, 59892), 'os.makedirs', 'os.makedirs', (['f_path_result_compare_meassures_dir'], {}), '(f_path_result_compare_meassures_dir)\n', (59855, 59892), False, 'import os\n'), ((59972, 60062), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result_compare_meassures_dir + '/matrixOccurence.csv')"], {'index_col': '(0)'}), "(f_path_result_compare_meassures_dir + '/matrixOccurence.csv',\n index_col=0)\n", (59983, 60062), True, 'import pandas as pd\n'), ((60561, 60610), 'pandas.read_csv', 'pd.read_csv', (['f_path_result_compare_meassures_file'], {}), '(f_path_result_compare_meassures_file)\n', (60572, 60610), True, 'import pandas as pd\n'), ((61173, 61207), 'os.path.exists', 'os.path.exists', (['f_path_result_tsne'], {}), '(f_path_result_tsne)\n', (61187, 61207), False, 'import os\n'), ((61221, 61252), 'os.makedirs', 'os.makedirs', (['f_path_result_tsne'], {}), '(f_path_result_tsne)\n', (61232, 61252), False, 'import os\n'), ((62284, 62321), 'os.path.exists', 'os.path.exists', (['f_path_result_boxplot'], {}), '(f_path_result_boxplot)\n', (62298, 62321), False, 'import os\n'), ((62335, 62369), 'os.makedirs', 'os.makedirs', (['f_path_result_boxplot'], {}), '(f_path_result_boxplot)\n', (62346, 62369), False, 'import os\n'), ((66074, 66138), 'numpy.append', 'np.append', (['similarity_between_htfidf_tfidf_whole_1D', 'row[i + 1:]'], {}), '(similarity_between_htfidf_tfidf_whole_1D, row[i + 1:])\n', (66083, 66138), True, 'import numpy as np\n'), ((66313, 66383), 'numpy.append', 'np.append', (['similarity_between_tfidfcountry_tfidf_whole_1D', 'row[i + 1:]'], {}), '(similarity_between_tfidfcountry_tfidf_whole_1D, row[i + 1:])\n', (66322, 66383), True, 'import numpy as np\n'), ((69428, 69504), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result_flood + '/h-tfidf-Biggest-score.csv')"], {'index_col': '(0)'}), "(f_path_result_flood + '/h-tfidf-Biggest-score.csv', index_col=0)\n", (69439, 69504), True, 'import pandas as pd\n'), ((71432, 71466), 'os.path.exists', 'os.path.exists', (['f_path_result_venn'], {}), '(f_path_result_venn)\n', (71446, 71466), False, 'import os\n'), ((71480, 71511), 'os.makedirs', 'os.makedirs', (['f_path_result_venn'], {}), '(f_path_result_venn)\n', (71491, 71511), False, 'import os\n'), ((71590, 71706), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result + '/' + build_venn_spatial_level +\n '/h-tfidf-Biggest-score-flooding.csv')"], {'index_col': '(0)'}), "(f_path_result + '/' + build_venn_spatial_level +\n '/h-tfidf-Biggest-score-flooding.csv', index_col=0)\n", (71601, 71706), True, 'import pandas as pd\n'), ((11910, 11940), 'pandas.to_datetime', 'pd.to_datetime', (['matrixOcc.date'], {}), '(matrixOcc.date)\n', (11924, 11940), True, 'import pandas as pd\n'), ((11945, 11973), 'pandas.to_timedelta', 'pd.to_timedelta', (['(7)'], {'unit': '"""d"""'}), "(7, unit='d')\n", (11960, 11973), True, 'import pandas as pd\n'), ((33323, 33372), 'numpy.linalg.norm', 'np.linalg.norm', (['embeddings'], {'axis': '(1)', 'keepdims': '(True)'}), '(embeddings, axis=1, keepdims=True)\n', (33337, 33372), True, 'import numpy as np\n'), ((37001, 37054), 'elasticsearch.Elasticsearch.search', 'Elasticsearch.search', (['client'], {'index': 'index', 'body': 'query'}), '(client, index=index, body=query)\n', (37021, 37054), False, 'from elasticsearch import Elasticsearch\n'), ((50532, 50553), 'matplotlib_venn_wordcloud.venn2_wordcloud', 'venn2_wordcloud', (['sets'], {}), '(sets)\n', (50547, 50553), False, 'from matplotlib_venn_wordcloud import venn2_wordcloud, venn3_wordcloud\n'), ((50570, 50586), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (50579, 50586), True, 'import matplotlib.pyplot as plt\n'), ((57379, 57414), 'os.path.exists', 'os.path.exists', (['f_path_result_level'], {}), '(f_path_result_level)\n', (57393, 57414), False, 'import os\n'), ((57432, 57464), 'os.makedirs', 'os.makedirs', (['f_path_result_level'], {}), '(f_path_result_level)\n', (57443, 57464), False, 'import os\n'), ((70135, 70175), 'os.path.exists', 'os.path.exists', (['f_path_result_clustering'], {}), '(f_path_result_clustering)\n', (70149, 70175), False, 'import os\n'), ((70193, 70230), 'os.makedirs', 'os.makedirs', (['f_path_result_clustering'], {}), '(f_path_result_clustering)\n', (70204, 70230), False, 'import os\n'), ((70321, 70410), 'pandas.read_csv', 'pd.read_csv', (["(f_path_result_flood + '/h-tfidf-Biggest-score-flooding.csv')"], {'index_col': '(0)'}), "(f_path_result_flood + '/h-tfidf-Biggest-score-flooding.csv',\n index_col=0)\n", (70332, 70410), True, 'import pandas as pd\n'), ((4161, 4198), 'json.dumps', 'json.dumps', (["hits['_source']"], {'indent': '(4)'}), "(hits['_source'], indent=4)\n", (4171, 4198), False, 'import json\n'), ((43973, 43994), 'matplotlib_venn_wordcloud.venn2_wordcloud', 'venn2_wordcloud', (['sets'], {}), '(sets)\n', (43988, 43994), False, 'from matplotlib_venn_wordcloud import venn2_wordcloud, venn3_wordcloud\n'), ((49885, 49906), 'matplotlib_venn_wordcloud.venn2_wordcloud', 'venn2_wordcloud', (['sets'], {}), '(sets)\n', (49900, 49906), False, 'from matplotlib_venn_wordcloud import venn2_wordcloud, venn3_wordcloud\n'), ((37354, 37375), 'collections.Counter', 'Counter', (['list_of_user'], {}), '(list_of_user)\n', (37361, 37375), False, 'from collections import defaultdict, Counter\n'), ((40091, 40113), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (40110, 40113), False, 'import operator\n'), ((41118, 41150), 'pandas.Grouper', 'pd.Grouper', ([], {'key': '"""date"""', 'freq': '"""Y"""'}), "(key='date', freq='Y')\n", (41128, 41150), True, 'import pandas as pd\n'), ((41241, 41273), 'pandas.Grouper', 'pd.Grouper', ([], {'key': '"""date"""', 'freq': '"""Y"""'}), "(key='date', freq='Y')\n", (41251, 41273), True, 'import pandas as pd\n'), ((12123, 12155), 'pandas.Grouper', 'pd.Grouper', ([], {'key': '"""date"""', 'freq': '"""W"""'}), "(key='date', freq='W')\n", (12133, 12155), True, 'import pandas as pd\n'), ((41368, 41400), 'pandas.Grouper', 'pd.Grouper', ([], {'key': '"""date"""', 'freq': '"""Y"""'}), "(key='date', freq='Y')\n", (41378, 41400), True, 'import pandas as pd\n'), ((12254, 12286), 'pandas.Grouper', 'pd.Grouper', ([], {'key': '"""date"""', 'freq': '"""W"""'}), "(key='date', freq='W')\n", (12264, 12286), True, 'import pandas as pd\n'), ((12383, 12415), 'pandas.Grouper', 'pd.Grouper', ([], {'key': '"""date"""', 'freq': '"""W"""'}), "(key='date', freq='W')\n", (12393, 12415), True, 'import pandas as pd\n')] |
from bbpyp.common.exception.bbpyp_value_error import BbpypValueError
from bbpyp.common.model.queue_type import QueueType
class QueueFactory:
def __init__(self, fifo_queue_factory, sequence_queue_factory):
self._fifo_queue_factory = fifo_queue_factory
self._sequence_queue_factory = sequence_queue_factory
def __call__(self, queue_type):
queue = None
if queue_type == QueueType.FIFO:
queue = self._fifo_queue_factory()
elif queue_type == QueueType.SEQUENCE:
queue = self._sequence_queue_factory()
else:
raise BbpypValueError("queue_type", queue_type, "unsupported queue type")
return queue
| [
"bbpyp.common.exception.bbpyp_value_error.BbpypValueError"
] | [((603, 670), 'bbpyp.common.exception.bbpyp_value_error.BbpypValueError', 'BbpypValueError', (['"""queue_type"""', 'queue_type', '"""unsupported queue type"""'], {}), "('queue_type', queue_type, 'unsupported queue type')\n", (618, 670), False, 'from bbpyp.common.exception.bbpyp_value_error import BbpypValueError\n')] |
from distutils.core import setup
setup(
name='pymetal',
version='0.5.0',
packages=[],
install_requires=["requests", "bs4", "requests_cache",
"random-user-agent", "lxml"],
url='https://www.github.com/OpenJarbas/pymetal',
license='Apache2.0',
author='jarbasAi',
author_email='<EMAIL>',
description='metal archives, dark lyrics api'
)
| [
"distutils.core.setup"
] | [((34, 349), 'distutils.core.setup', 'setup', ([], {'name': '"""pymetal"""', 'version': '"""0.5.0"""', 'packages': '[]', 'install_requires': "['requests', 'bs4', 'requests_cache', 'random-user-agent', 'lxml']", 'url': '"""https://www.github.com/OpenJarbas/pymetal"""', 'license': '"""Apache2.0"""', 'author': '"""jarbasAi"""', 'author_email': '"""<EMAIL>"""', 'description': '"""metal archives, dark lyrics api"""'}), "(name='pymetal', version='0.5.0', packages=[], install_requires=[\n 'requests', 'bs4', 'requests_cache', 'random-user-agent', 'lxml'], url=\n 'https://www.github.com/OpenJarbas/pymetal', license='Apache2.0',\n author='jarbasAi', author_email='<EMAIL>', description=\n 'metal archives, dark lyrics api')\n", (39, 349), False, 'from distutils.core import setup\n')] |
import typing
import torch
import torch.nn as nn
import torch.nn.functional as F
from .net_sphere import *
class ResCBAMLayer(nn.Module):
"""
CBAM+Res model
"""
def __init__(self, in_planes, feature_size):
super(ResCBAMLayer, self).__init__()
self.in_planes = in_planes
self.feature_size = feature_size
self.ch_AvgPool = nn.AvgPool3d(feature_size, feature_size)
self.ch_MaxPool = nn.MaxPool3d(feature_size, feature_size)
self.ch_Linear1 = nn.Linear(in_planes, in_planes // 4, bias=False)
self.ch_Linear2 = nn.Linear(in_planes // 4, in_planes, bias=False)
self.ch_Softmax = nn.Softmax(1)
self.sp_Conv = nn.Conv3d(2, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.sp_Softmax = nn.Softmax(1)
def forward(self, x):
x_ch_avg_pool = self.ch_AvgPool(x).view(x.size(0), -1)
x_ch_max_pool = self.ch_MaxPool(x).view(x.size(0), -1)
# x_ch_avg_linear = self.ch_Linear2(self.ch_Linear1(x_ch_avg_pool))
a = self.ch_Linear1(x_ch_avg_pool)
x_ch_avg_linear = self.ch_Linear2(a)
x_ch_max_linear = self.ch_Linear2(self.ch_Linear1(x_ch_max_pool))
ch_out = (self.ch_Softmax(x_ch_avg_linear + x_ch_max_linear).view(x.size(0), self.in_planes, 1, 1, 1)) * x
x_sp_max_pool = torch.max(ch_out, 1, keepdim=True)[0]
x_sp_avg_pool = torch.sum(ch_out, 1, keepdim=True) / self.in_planes
sp_conv1 = torch.cat([x_sp_max_pool, x_sp_avg_pool], dim=1)
sp_out = self.sp_Conv(sp_conv1)
sp_out = self.sp_Softmax(sp_out.view(x.size(0), -1)).view(x.size(0), 1, x.size(2), x.size(3), x.size(4))
out = sp_out * x + x
return out
def make_conv3d(in_channels: int, out_channels: int, kernel_size: typing.Union[int, tuple], stride: int,
padding: int, dilation=1, groups=1,
bias=True) -> nn.Module:
"""
produce a Conv3D with Batch Normalization and ReLU
:param in_channels: num of in in channels
:param out_channels: num of out channels
:param kernel_size: size of kernel int or tuple
:param stride: num of stride
:param padding: num of padding
:param bias: bias
:param groups: groups
:param dilation: dilation
:return: conv3d module
"""
module = nn.Sequential(
nn.Conv3d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups,
bias=bias),
nn.BatchNorm3d(out_channels),
nn.ReLU())
return module
def conv3d_same_size(in_channels, out_channels, kernel_size, stride=1,
dilation=1, groups=1,
bias=True):
"""
keep the w,h of inputs same as the outputs
:param in_channels: num of in in channels
:param out_channels: num of out channels
:param kernel_size: size of kernel int or tuple
:param stride: num of stride
:param dilation: Spacing between kernel elements
:param groups: Number of blocked connections from input channels to output channels.
:param bias: If True, adds a learnable bias to the output
:return: conv3d
"""
padding = kernel_size // 2
return make_conv3d(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups,
bias)
def conv3d_pooling(in_channels, kernel_size, stride=1,
dilation=1, groups=1,
bias=False):
"""
pooling with convolution
:param in_channels:
:param kernel_size:
:param stride:
:param dilation:
:param groups:
:param bias:
:return: pooling-convolution
"""
padding = kernel_size // 2
return make_conv3d(in_channels, in_channels, kernel_size, stride,
padding, dilation, groups,
bias)
class ResidualBlock(nn.Module):
"""
a simple residual block
"""
def __init__(self, in_channels, out_channels):
super(ResidualBlock, self).__init__()
self.my_conv1 = make_conv3d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.my_conv2 = make_conv3d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.conv3 = make_conv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, inputs):
out1 = self.conv3(inputs)
out = self.my_conv1(inputs)
out = self.my_conv2(out)
out = out + out1
return out
| [
"torch.nn.ReLU",
"torch.nn.Softmax",
"torch.nn.AvgPool3d",
"torch.nn.MaxPool3d",
"torch.max",
"torch.sum",
"torch.nn.Linear",
"torch.nn.BatchNorm3d",
"torch.cat",
"torch.nn.Conv3d"
] | [((373, 413), 'torch.nn.AvgPool3d', 'nn.AvgPool3d', (['feature_size', 'feature_size'], {}), '(feature_size, feature_size)\n', (385, 413), True, 'import torch.nn as nn\n'), ((440, 480), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', (['feature_size', 'feature_size'], {}), '(feature_size, feature_size)\n', (452, 480), True, 'import torch.nn as nn\n'), ((507, 555), 'torch.nn.Linear', 'nn.Linear', (['in_planes', '(in_planes // 4)'], {'bias': '(False)'}), '(in_planes, in_planes // 4, bias=False)\n', (516, 555), True, 'import torch.nn as nn\n'), ((582, 630), 'torch.nn.Linear', 'nn.Linear', (['(in_planes // 4)', 'in_planes'], {'bias': '(False)'}), '(in_planes // 4, in_planes, bias=False)\n', (591, 630), True, 'import torch.nn as nn\n'), ((657, 670), 'torch.nn.Softmax', 'nn.Softmax', (['(1)'], {}), '(1)\n', (667, 670), True, 'import torch.nn as nn\n'), ((694, 757), 'torch.nn.Conv3d', 'nn.Conv3d', (['(2)', '(1)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(2, 1, kernel_size=3, stride=1, padding=1, bias=False)\n', (703, 757), True, 'import torch.nn as nn\n'), ((784, 797), 'torch.nn.Softmax', 'nn.Softmax', (['(1)'], {}), '(1)\n', (794, 797), True, 'import torch.nn as nn\n'), ((1462, 1510), 'torch.cat', 'torch.cat', (['[x_sp_max_pool, x_sp_avg_pool]'], {'dim': '(1)'}), '([x_sp_max_pool, x_sp_avg_pool], dim=1)\n', (1471, 1510), False, 'import torch\n'), ((2337, 2479), 'torch.nn.Conv3d', 'nn.Conv3d', (['in_channels', 'out_channels'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation', 'groups': 'groups', 'bias': 'bias'}), '(in_channels, out_channels, kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, groups=groups, bias=bias)\n', (2346, 2479), True, 'import torch.nn as nn\n'), ((2521, 2549), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['out_channels'], {}), '(out_channels)\n', (2535, 2549), True, 'import torch.nn as nn\n'), ((2559, 2568), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2566, 2568), True, 'import torch.nn as nn\n'), ((1329, 1363), 'torch.max', 'torch.max', (['ch_out', '(1)'], {'keepdim': '(True)'}), '(ch_out, 1, keepdim=True)\n', (1338, 1363), False, 'import torch\n'), ((1391, 1425), 'torch.sum', 'torch.sum', (['ch_out', '(1)'], {'keepdim': '(True)'}), '(ch_out, 1, keepdim=True)\n', (1400, 1425), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
import collections
import json
import re
punct = re.compile(r'(\w+)')
PATH_FOR_TXT_FILE = "base.txt"
PATH_FOR_1_GRAM_JSON = '1-grams_count_dictionary.json'
PATH_FOR_2_GRAM_JSON = '2-grams_count_dictionary.json'
#str(input("1-gram json file name withOUT .json")) + ".json"# to take the file names from user.
"""
def three_grams():
c3grams = collections.defaultdict()
c3grams_list = []
de3grams = collections.deque()
wordsdeq3 = collections.deque()
f = open(PATH_FOR_TXT_FILE,"r")
for line in f:
tokenized = [m.group() for m in punct.finditer(line)]
line = ' '.join(tokenized)
line = line.strip("\n").lower()
theline = line.split(" ")
new_line = []
for e in theline:
if not e :
continue
else:
new_line.append(e)
theline = new_line
del new_line
if len(theline) < 3:
continue
for word in theline:
if word:
wordsdeq3.append(word)
#wordsdeq3.appendleft("<S>")
#wordsdeq3.append("<E>")
for i in range(2):
de3grams.append(wordsdeq3.popleft())
while wordsdeq3:
newWord = wordsdeq3.popleft()
de3grams.append(newWord)
g3 = " ".join(de3grams)
#c3grams[g3] = c3grams.get(g3, 0) + 1
c3grams_list.append(g3)
de3grams.popleft()
de3grams.clear()
f.close()
del wordsdeq3
del de3grams
f3 = open("3-grams_count_dictionary.json","x")
c3grams = dict(sorted(c3grams.items(), key=lambda x: x[1], reverse= True))
f3.write(json.dumps(c3grams))
f3 = open("3-grams.txt","x")
for line in c3grams_list:
f3.write(line + "\n")
"""
def two_grams():
c2grams = collections.defaultdict()
c2grams_list = []
de2grams = collections.deque()
wordsdeq2 = collections.deque()
f = open(PATH_FOR_TXT_FILE,"r")
for line in f:
tokenized = [m.group() for m in punct.finditer(line)]
line = ' '.join(tokenized)
line = line.strip("\n").lower()
theline = line.split(" ")
new_line = []
for e in theline:
if not e:
continue
else:
new_line.append(e)
theline = new_line
del new_line
if len(theline) < 2:
continue
for word in theline:
if word:
wordsdeq2.append(word)
wordsdeq2.appendleft("<S>")
wordsdeq2.append("<E>")
for i in range(1):
de2grams.append(wordsdeq2.popleft())
while wordsdeq2:
newWord = wordsdeq2.popleft()
de2grams.append(newWord)
g2 = " ".join(de2grams)
c2grams[g2] = c2grams.get(g2, 0) + 1
c2grams_list.append(g2)
de2grams.popleft()
de2grams.clear()
f.close()
del wordsdeq2
del de2grams
f2 = open(PATH_FOR_2_GRAM_JSON, "w+")
c2grams = dict(sorted(c2grams.items(), key=lambda x: x[1], reverse=True))
f2.write(json.dumps(c2grams))
"""
f2 = open("2-grams.txt","+w")
for line in c2grams_list:
f2.write(line + "\n")
"""
def one_grams():
c1grams = collections.defaultdict()
c1grams_list = []
de1grams = collections.deque()
wordsdeq1 = collections.deque()
f = open(PATH_FOR_TXT_FILE,"r")
for line in f:
tokenized = [m.group() for m in punct.finditer(line)]
line = ' '.join(tokenized)
line = line.strip("\n").lower()
theline = line.split(" ")
new_line = []
for e in theline:
if not e:
continue
else:
new_line.append(e)
theline = new_line
del new_line
if len(theline) < 1:
continue
for word in theline:
if word:
wordsdeq1.append(word)
wordsdeq1.appendleft("<S>")
wordsdeq1.append("<E>")
for i in range(0):
de1grams.append(wordsdeq1.popleft())
while wordsdeq1:
newWord = wordsdeq1.popleft()
de1grams.append(newWord)
g1 = de1grams[0]
c1grams[g1] = c1grams.get(g1, 0) + 1
c1grams_list.append(g1)
de1grams.popleft()
de1grams.clear()
f.close()
del wordsdeq1
del de1grams
f1 = open(PATH_FOR_1_GRAM_JSON, "w+")
c1grams = dict(sorted(c1grams.items(), key=lambda x: x[1], reverse=True))
f1.write(json.dumps(c1grams))
"""
f1 = open("1-grams.txt","+w")
for line in c1grams_list:
f1.write(line + "\n")
"""
| [
"collections.deque",
"json.dumps",
"collections.defaultdict",
"re.compile"
] | [((74, 94), 're.compile', 're.compile', (['"""(\\\\w+)"""'], {}), "('(\\\\w+)')\n", (84, 94), False, 'import re\n'), ((1844, 1869), 'collections.defaultdict', 'collections.defaultdict', ([], {}), '()\n', (1867, 1869), False, 'import collections\n'), ((1907, 1926), 'collections.deque', 'collections.deque', ([], {}), '()\n', (1924, 1926), False, 'import collections\n'), ((1943, 1962), 'collections.deque', 'collections.deque', ([], {}), '()\n', (1960, 1962), False, 'import collections\n'), ((3292, 3317), 'collections.defaultdict', 'collections.defaultdict', ([], {}), '()\n', (3315, 3317), False, 'import collections\n'), ((3355, 3374), 'collections.deque', 'collections.deque', ([], {}), '()\n', (3372, 3374), False, 'import collections\n'), ((3391, 3410), 'collections.deque', 'collections.deque', ([], {}), '()\n', (3408, 3410), False, 'import collections\n'), ((3146, 3165), 'json.dumps', 'json.dumps', (['c2grams'], {}), '(c2grams)\n', (3156, 3165), False, 'import json\n'), ((4586, 4605), 'json.dumps', 'json.dumps', (['c1grams'], {}), '(c1grams)\n', (4596, 4605), False, 'import json\n')] |
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
"""
Generates the booleans to determine card visibility,
based on dates in either the current, next, or previous term.
https://docs.google.com/document/d/14q26auOLPU34KFtkUmC_bkoo5dAwegRzgpwmZEQMhaU
"""
import logging
import traceback
from datetime import datetime, timedelta
from myuw.dao import log_err
from myuw.dao.term import get_comparison_datetime,\
get_current_quarter, get_next_quarter, get_previous_quarter,\
get_term_after, is_in_summer_quarter,\
is_in_summer_b_term, get_bod_current_term_class_start,\
get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction,\
get_eod_7d_after_class_start, get_eod_current_term_last_final_exam
from myuw.dao.term import get_bod_class_start_quarter_after as\
get_bod_quarter_after
from myuw.dao.iasystem import in_coursevel_fetch_window
logger = logging.getLogger(__name__)
def in_show_grades_period(term, request):
return (term is not None and request is not None and
get_comparison_datetime(request) < get_bod_quarter_after(term))
def get_card_visibilty_date_values(request=None):
values = get_values_by_date(get_comparison_datetime(request),
request)
set_js_overrides(request, values)
return values
def get_values_by_date(now, request):
"""
now is a datetime object of 1 second after the beginning of the day.
"""
reg_data = get_reg_data(now, request)
data = {
"is_after_7d_before_last_instruction":
is_after_7d_before_last_instruction(now, request),
"is_after_grade_submission_deadline":
is_before_bof_term(now, request),
"is_after_last_day_of_classes":
not is_before_last_day_of_classes(now, request),
"is_after_start_of_registration_display_period":
reg_data["after_start"],
"is_after_start_of_summer_reg_display_period1":
reg_data["after_summer1_start"],
"is_after_start_of_summer_reg_display_periodA":
reg_data["after_summerA_start"],
"is_before_eof_7days_of_term":
is_before_eof_7d_after_class_start(now, request),
"is_before_end_of_finals_week":
is_before_eof_finals_week(now, request),
"is_before_end_of_registration_display_period":
reg_data["after_start"],
"is_before_end_of_summer_reg_display_periodA":
reg_data["after_summerA_start"],
"is_before_end_of_summer_reg_display_period1":
reg_data["after_summer1_start"],
"is_before_first_day_of_term":
is_before_bof_term(now, request),
"is_before_last_day_of_classes":
is_before_last_day_of_classes(now, request),
"myplan_peak_load": during_myplan_peak_load(now, request),
"reg_period1_started": reg_data["period1_started"],
"is_summer": is_in_summer_quarter(request),
"is_after_summer_b": is_in_summer_b_term(request),
"in_coursevel_fetch_window": in_coursevel_fetch_window(request),
"comparison_date": get_comparison_datetime(request)
}
try:
last_term = get_previous_quarter(request)
data["current_summer_term"] = "{},summer".format(last_term.year)
data["last_term"] = "{},{}".format(last_term.year, last_term.quarter)
except Exception:
log_err(logger, "get_previous_quarter", traceback, request)
return data
def is_before_bof_term(now, request):
"""
The term switches after the grade submission deadline.
@return true if it is before the begining of the 1st day of instruction
"""
logger.debug("{} is_before_bof_term {} ==> {}".format(
now, get_bod_current_term_class_start(request),
now < get_bod_current_term_class_start(request)))
return now < get_bod_current_term_class_start(request)
def is_before_eof_7d_after_class_start(now, request):
"""
@return true if it is before the end of the 7 days
after the instruction start day
"""
logger.debug("{} is_before_eof_7d_after_class_start {} ==> {}".format(
now, get_eod_7d_after_class_start(request),
now < get_eod_7d_after_class_start(request)))
return now < get_eod_7d_after_class_start(request)
def is_after_7d_before_last_instruction(now, request):
"""
@return true if it is after the begining of 7 days
before instruction end
"""
logger.debug("{} is_after_7d_before_last_instruction {} ==> {}".format(
now, get_bod_7d_before_last_instruction(request),
now > get_bod_7d_before_last_instruction(request)))
return now > get_bod_7d_before_last_instruction(request)
def is_before_last_day_of_classes(now, request):
"""
@return true if it is before the end of the last day of classes
"""
logger.debug("{} is_before_last_day_of_classes {} ==> {}".format(
now, get_eod_current_term_last_instruction(request),
now < get_eod_current_term_last_instruction(request)))
return now < get_eod_current_term_last_instruction(request)
def is_before_eof_finals_week(now, request):
"""
@return true if it is before the end of the last day of finalsweek
"""
logger.debug("{} is_before_eof_finals_week {} ==> {}".format(
now, get_eod_current_term_last_final_exam(request),
now < get_eod_current_term_last_final_exam(request)))
return now < get_eod_current_term_last_final_exam(request)
def during_myplan_peak_load(now, request):
reg_data = get_reg_data(now, request)
logger.debug("{} myplan_peak_load ==> {}".format(
now, reg_data["myplan_peak_load"]))
return reg_data["myplan_peak_load"]
def get_reg_data(now, request):
"""
now is the second after mid-night
"""
if hasattr(request, "myuw_reg_data"):
return request.myuw_reg_data
term_reg_data = {
"after_start": False,
"after_summer1_start": False,
"after_summerA_start": False,
"period1_started": False,
"myplan_peak_load": False
}
next_term = get_next_quarter(request)
get_term_reg_data(now, next_term, term_reg_data)
# We need to show this term's registration stuff, because
# the period 2 stretches past the grade submission deadline
current_term = get_current_quarter(request)
get_term_reg_data(now, current_term, term_reg_data)
# We also need to be able to show the term after next, in spring quarter
term_after_next = get_term_after(next_term)
get_term_reg_data(now, term_after_next, term_reg_data)
request.myuw_reg_data = term_reg_data
return term_reg_data
def is_term_myplan_peak(now, term, data):
now_date = now.date()
if (now_date >= term.registration_period1_start and
now_date <= term.registration_period1_end):
peak_start_time = datetime(now.year, now.month, now.day, 5, 30, 0)
peak_end_time = datetime(now.year, now.month, now.day, 6, 30, 0)
if (now >= peak_start_time and now <= peak_end_time):
return True
return False
def get_term_reg_data(now, term, data):
if term.registration_period1_start is None:
data["myplan_peak_load"] = False
return
if not (data["myplan_peak_load"] is True):
data["myplan_peak_load"] = is_term_myplan_peak(now, term, data)
now = now.date()
if term.quarter == "summer":
if now >= term.registration_period1_start - timedelta(days=7) and\
now < term.registration_period1_start + timedelta(days=7):
data["after_summerA_start"] = True
data["before_summerA_end"] = True
if now >= term.registration_period1_start:
data["period1_started"] = True
elif now >= term.registration_period1_start + timedelta(days=7) and\
now < term.registration_period2_start + timedelta(days=7):
data["after_summer1_start"] = True
data["before_summer1_end"] = True
if now >= term.registration_period1_start:
data["period1_started"] = True
else:
if now >= term.registration_period1_start - timedelta(days=14) and\
now < term.registration_period2_start + timedelta(days=7):
data["after_start"] = True
data["before_end"] = True
if now >= term.registration_period1_start:
data["period1_started"] = True
def set_js_overrides(request, values):
after_reg = 'is_after_start_of_registration_display_period'
before_reg = 'is_before_end_of_registration_display_period'
MAP = {'myuw_after_submission': 'is_after_grade_submission_deadline',
'myuw_after_last_day': 'is_after_last_day_of_classes',
'myuw_after_reg': after_reg,
'myuw_before_finals_end': 'is_before_end_of_finals_week',
'myuw_before_last_day': 'is_before_last_day_of_classes',
'myuw_before_end_of_reg_display': before_reg,
'myuw_before_first_day': 'is_before_first_day_of_term',
'myuw_before_end_of_first_week': 'is_before_eof_7days_of_term',
'myuw_after_eval_start': 'is_after_7d_before_last_instruction',
'myplan_peak_load': 'myplan_peak_load',
'myuw_in_coursevel_fetch_window': 'in_coursevel_fetch_window'
}
for key in MAP:
if key in request.session:
values[MAP[key]] = request.session[key]
| [
"logging.getLogger",
"myuw.dao.term.get_previous_quarter",
"myuw.dao.term.get_eod_current_term_last_instruction",
"datetime.datetime",
"myuw.dao.term.get_bod_class_start_quarter_after",
"myuw.dao.term.get_comparison_datetime",
"myuw.dao.term.get_eod_7d_after_class_start",
"myuw.dao.term.is_in_summer_q... | [((923, 950), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (940, 950), False, 'import logging\n'), ((6109, 6134), 'myuw.dao.term.get_next_quarter', 'get_next_quarter', (['request'], {}), '(request)\n', (6125, 6134), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((6333, 6361), 'myuw.dao.term.get_current_quarter', 'get_current_quarter', (['request'], {}), '(request)\n', (6352, 6361), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((6517, 6542), 'myuw.dao.term.get_term_after', 'get_term_after', (['next_term'], {}), '(next_term)\n', (6531, 6542), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((1212, 1244), 'myuw.dao.term.get_comparison_datetime', 'get_comparison_datetime', (['request'], {}), '(request)\n', (1235, 1244), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((2944, 2973), 'myuw.dao.term.is_in_summer_quarter', 'is_in_summer_quarter', (['request'], {}), '(request)\n', (2964, 2973), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((3004, 3032), 'myuw.dao.term.is_in_summer_b_term', 'is_in_summer_b_term', (['request'], {}), '(request)\n', (3023, 3032), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((3071, 3105), 'myuw.dao.iasystem.in_coursevel_fetch_window', 'in_coursevel_fetch_window', (['request'], {}), '(request)\n', (3096, 3105), False, 'from myuw.dao.iasystem import in_coursevel_fetch_window\n'), ((3134, 3166), 'myuw.dao.term.get_comparison_datetime', 'get_comparison_datetime', (['request'], {}), '(request)\n', (3157, 3166), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((3202, 3231), 'myuw.dao.term.get_previous_quarter', 'get_previous_quarter', (['request'], {}), '(request)\n', (3222, 3231), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((3870, 3911), 'myuw.dao.term.get_bod_current_term_class_start', 'get_bod_current_term_class_start', (['request'], {}), '(request)\n', (3902, 3911), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((4273, 4310), 'myuw.dao.term.get_eod_7d_after_class_start', 'get_eod_7d_after_class_start', (['request'], {}), '(request)\n', (4301, 4310), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((4677, 4720), 'myuw.dao.term.get_bod_7d_before_last_instruction', 'get_bod_7d_before_last_instruction', (['request'], {}), '(request)\n', (4711, 4720), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((5067, 5113), 'myuw.dao.term.get_eod_current_term_last_instruction', 'get_eod_current_term_last_instruction', (['request'], {}), '(request)\n', (5104, 5113), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((5453, 5498), 'myuw.dao.term.get_eod_current_term_last_final_exam', 'get_eod_current_term_last_final_exam', (['request'], {}), '(request)\n', (5489, 5498), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((6877, 6925), 'datetime.datetime', 'datetime', (['now.year', 'now.month', 'now.day', '(5)', '(30)', '(0)'], {}), '(now.year, now.month, now.day, 5, 30, 0)\n', (6885, 6925), False, 'from datetime import datetime, timedelta\n'), ((6950, 6998), 'datetime.datetime', 'datetime', (['now.year', 'now.month', 'now.day', '(6)', '(30)', '(0)'], {}), '(now.year, now.month, now.day, 6, 30, 0)\n', (6958, 6998), False, 'from datetime import datetime, timedelta\n'), ((1064, 1096), 'myuw.dao.term.get_comparison_datetime', 'get_comparison_datetime', (['request'], {}), '(request)\n', (1087, 1096), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((1099, 1126), 'myuw.dao.term.get_bod_class_start_quarter_after', 'get_bod_quarter_after', (['term'], {}), '(term)\n', (1120, 1126), True, 'from myuw.dao.term import get_bod_class_start_quarter_after as get_bod_quarter_after\n'), ((3413, 3472), 'myuw.dao.log_err', 'log_err', (['logger', '"""get_previous_quarter"""', 'traceback', 'request'], {}), "(logger, 'get_previous_quarter', traceback, request)\n", (3420, 3472), False, 'from myuw.dao import log_err\n'), ((3752, 3793), 'myuw.dao.term.get_bod_current_term_class_start', 'get_bod_current_term_class_start', (['request'], {}), '(request)\n', (3784, 3793), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((4163, 4200), 'myuw.dao.term.get_eod_7d_after_class_start', 'get_eod_7d_after_class_start', (['request'], {}), '(request)\n', (4191, 4200), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((4555, 4598), 'myuw.dao.term.get_bod_7d_before_last_instruction', 'get_bod_7d_before_last_instruction', (['request'], {}), '(request)\n', (4589, 4598), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((4939, 4985), 'myuw.dao.term.get_eod_current_term_last_instruction', 'get_eod_current_term_last_instruction', (['request'], {}), '(request)\n', (4976, 4985), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((5327, 5372), 'myuw.dao.term.get_eod_current_term_last_final_exam', 'get_eod_current_term_last_final_exam', (['request'], {}), '(request)\n', (5363, 5372), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((3809, 3850), 'myuw.dao.term.get_bod_current_term_class_start', 'get_bod_current_term_class_start', (['request'], {}), '(request)\n', (3841, 3850), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((4216, 4253), 'myuw.dao.term.get_eod_7d_after_class_start', 'get_eod_7d_after_class_start', (['request'], {}), '(request)\n', (4244, 4253), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((4614, 4657), 'myuw.dao.term.get_bod_7d_before_last_instruction', 'get_bod_7d_before_last_instruction', (['request'], {}), '(request)\n', (4648, 4657), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((5001, 5047), 'myuw.dao.term.get_eod_current_term_last_instruction', 'get_eod_current_term_last_instruction', (['request'], {}), '(request)\n', (5038, 5047), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((5388, 5433), 'myuw.dao.term.get_eod_current_term_last_final_exam', 'get_eod_current_term_last_final_exam', (['request'], {}), '(request)\n', (5424, 5433), False, 'from myuw.dao.term import get_comparison_datetime, get_current_quarter, get_next_quarter, get_previous_quarter, get_term_after, is_in_summer_quarter, is_in_summer_b_term, get_bod_current_term_class_start, get_eod_current_term_last_instruction, get_bod_7d_before_last_instruction, get_eod_7d_after_class_start, get_eod_current_term_last_final_exam\n'), ((7475, 7492), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (7484, 7492), False, 'from datetime import datetime, timedelta\n'), ((7554, 7571), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (7563, 7571), False, 'from datetime import datetime, timedelta\n'), ((8178, 8196), 'datetime.timedelta', 'timedelta', ([], {'days': '(14)'}), '(days=14)\n', (8187, 8196), False, 'from datetime import datetime, timedelta\n'), ((8258, 8275), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (8267, 8275), False, 'from datetime import datetime, timedelta\n'), ((7823, 7840), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (7832, 7840), False, 'from datetime import datetime, timedelta\n'), ((7902, 7919), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (7911, 7919), False, 'from datetime import datetime, timedelta\n')] |
import tensorflow as tf
class Encoder(tf.keras.Model):
def __init__(self, dim, **kwargs):
"""
Encoder model
:param dim: hyperparameters of the model [h_dim, z_dim]
:param dropout: Noise dropout [0,1]
:param kwargs: Keras parameters (Optional)
"""
h_dim = dim[0]
z_dim = dim[1]
super(Encoder, self).__init__(**kwargs)
self.fc1 = tf.keras.layers.Dense(h_dim)
self.fc2 = tf.keras.layers.Dense(z_dim)
def call(self, inputs, training=None, mask=None):
"""
Function that works as __call__
:param inputs: input data
:param training: (Not use)
:param mask: (Not use)
:return: model output
"""
h = tf.nn.relu(self.fc1(inputs))
z = self.fc2(h)
return z | [
"tensorflow.keras.layers.Dense"
] | [((416, 444), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['h_dim'], {}), '(h_dim)\n', (437, 444), True, 'import tensorflow as tf\n'), ((464, 492), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['z_dim'], {}), '(z_dim)\n', (485, 492), True, 'import tensorflow as tf\n')] |
#!/usr/bin/python
from __future__ import print_function
import GeoIP
gi = GeoIP.open("/usr/local/share/GeoIP/GeoIPDomain.dat", GeoIP.GEOIP_STANDARD)
print(gi.org_by_addr("24.24.24.24"))
| [
"GeoIP.open"
] | [((77, 151), 'GeoIP.open', 'GeoIP.open', (['"""/usr/local/share/GeoIP/GeoIPDomain.dat"""', 'GeoIP.GEOIP_STANDARD'], {}), "('/usr/local/share/GeoIP/GeoIPDomain.dat', GeoIP.GEOIP_STANDARD)\n", (87, 151), False, 'import GeoIP\n')] |
from keras.models import Sequential
from keras.layers.core import Dense, Activation
def XOR():
model = Sequential()
model.add(Dense(8, input_dim=2))
model.add(Activation('tanh'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
| [
"keras.layers.core.Dense",
"keras.layers.core.Activation",
"keras.models.Sequential"
] | [((108, 120), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (118, 120), False, 'from keras.models import Sequential\n'), ((135, 156), 'keras.layers.core.Dense', 'Dense', (['(8)'], {'input_dim': '(2)'}), '(8, input_dim=2)\n', (140, 156), False, 'from keras.layers.core import Dense, Activation\n'), ((172, 190), 'keras.layers.core.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (182, 190), False, 'from keras.layers.core import Dense, Activation\n'), ((206, 214), 'keras.layers.core.Dense', 'Dense', (['(1)'], {}), '(1)\n', (211, 214), False, 'from keras.layers.core import Dense, Activation\n'), ((230, 251), 'keras.layers.core.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (240, 251), False, 'from keras.layers.core import Dense, Activation\n')] |
"""
Utility functions for working with DataFrames
"""
import pandas
import numpy as np
TEST_DF = pandas.DataFrame([1,2,3])
class O:
"""
A square shaped block for my PyTetris game.
"""
def __init__(self):
self.type = "O"
self.color = (255, 255, 0)
mold = np.zeros([24, 10]) # framework for falling piece
mold[1, 4:6] = 1 # placing 1s where the piece begins
mold[2, 4:6] = 1
self.position = mold
self.position = [self.position, self.position, self.position, self.position]
| [
"pandas.DataFrame",
"numpy.zeros"
] | [((99, 126), 'pandas.DataFrame', 'pandas.DataFrame', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (115, 126), False, 'import pandas\n'), ((297, 315), 'numpy.zeros', 'np.zeros', (['[24, 10]'], {}), '([24, 10])\n', (305, 315), True, 'import numpy as np\n')] |
from django.db import models
import api.json_worker as json_worker
"""
ABSTRACT MODEL
Class BibliographyTemplateModel
Handles all fields that are in our bibliography databases.
"""
class BibliographyTemplateModel(models.Model):
# FIELDS GO HERE
id = models.IntegerField(primary_key=True) #1 - ID rekordu (integer)
book_author = models.CharField(default = "Bez autora", max_length=512, verbose_name = "Autor książki") #2.5 - Autor (autor książki)
co_authors = models.CharField(max_length=256, default = "Bez współtwórcy.", verbose_name = "Współtwórca") #3 - Współtwórca (string)
editor = models.CharField(max_length = 256, default = "Bez redaktora.", verbose_name = "Redaktor") #4 - Redaktor (string)
title = models.TextField(default = "Brak Tytułu.", verbose_name = "Tytuł") #5 - Tytuł (string)
subtitle = models.TextField(default = "Bez podtytułu", verbose_name = "Podtytuł") #6 - Podtytuł (string)
original_edition = models.TextField(default = "Bez wydania oryginalnego.", verbose_name = "Wydanie oryginalne") #7 - Wydane oryginalne (string)
series = models.TextField(default = "Bez numeru serii.", verbose_name = "Numer serii") #8 - Numer serii (string?)
publication_date = models.TextField(default = "Brak roku wydania.", verbose_name = "Rok wydania") #9 - Rok wydania (TextField, string)
publication = models.TextField(default = "Bez wydania.", verbose_name = "Wydanie") #10 - Wydanie (string)
publication_place = models.TextField(default = "Bez miejsca wydania.", verbose_name = "Miejsce wydania") #11 - Miejsce wydania (string)
publisher = models.TextField(default = "Bez wydawcy.", verbose_name = "Wydawca") #12 - Wydawca (string)
source = models.TextField(default = "Bez źródła.", verbose_name = "Źródło") #13 - Źródło (string)
number = models.TextField(default = "Bez numeru.", verbose_name = "Numer") #14 - Numer (string)
notebook = models.TextField(default = "Bez zeszytu.", verbose_name = "Zeszyt") #15 - Zeszyt (string)
pages = models.TextField(default = "0", verbose_name = "Ilość stron") #16 - Strony (Text Field (string))
language = models.TextField(default = "Bez języka.", verbose_name = "Język") #17 - Język (string)
isbn_or_issn_number = models.TextField(default = "Bez numeru ISBN/ISSN.", verbose_name = "Numer ISBN/ISSN") #18 - ISBN/ISSN numer (string)
doi_number = models.TextField(default = "Bez numeru DOI.", verbose_name = "Numer DOI") #19 - Numer DOI (strng)
link = models.URLField(max_length=1024, verbose_name = "Link/Załącznik") #20 - Link (URLField)
keywords_and_content = models.TextField(default = "Bez słów kluczowych/zawratości.", verbose_name = "Słowa kluczowe") #21 - Słowa kluczowe, zawartość (string)
comments = models.TextField(default = "Bez komentarzy.", verbose_name = "Komentarze") #22 - Komentarze (string)
def __str__(self):
return self.title
class Meta:
abstract = True
verbose_name_plural = "Test"
"""
Class NewBibliographyDynamicModel
Class handle method to CREATE new dynamic model from BibliographyTemplateModel
"""
class NewBibliographyDynamicModel(object):
_instance = dict()
def __new__(cls, base_cls, tb_name):
"""
Create Class
:param base_cls: class name
:param tb_name: table name
:return:
"""
new_cls_name = tb_name
if new_cls_name not in cls._instance:
new_meta_cls = base_cls.Meta
new_meta_cls.db_table = tb_name
model_cls = type(str(new_cls_name), (base_cls,),
{'__tablename__': tb_name, 'Meta': new_meta_cls, '__module__': cls.__module__})
cls._instance[new_cls_name] = model_cls
return cls._instance[new_cls_name]
"""
Model to save meta data about the models available
"""
class MetaDBInfo(models.Model):
#we need id, db_name, db_name to show, name of the Author
id = models.AutoField(primary_key=True) #1 - ID rekordu (integer)
db_name = models.CharField(max_length=200)
real_db_name = models.CharField(max_length=200)
author = models.CharField(max_length=50)
def __str__(self) -> str:
return "Model to save meta data about existing databases"
"""
Initialise all models - initialise dynamic models
"""
meta_info = MetaDBInfo()
models = list() # List handles all models loaded, and pass to admin.py & serializers.py, it's really important list
models_from_json = json_worker.get_models("/data/models.json") # IMPORTANT: Function from json_worker.py
# models_from_json["models"] = sorted(models_from_json["models"]) # Keep the correct order in models, even when someone mades a typo and create a new model at the end of models.json with "a" on start
for model in models_from_json["models"]:
model = NewBibliographyDynamicModel(BibliographyTemplateModel, model) # Initialise new DynamicModel
model._meta.verbose_name_plural = model._meta.db_table # IMPORTANT: Set name of table in Django Admin Panel to table name - remove extra "s" from name.
models.append(model) # Append new dynamc model to list, to pass it to admin.py
| [
"api.json_worker.get_models",
"django.db.models.append",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.AutoField",
"django.db.models.URLField",
"django.db.models.CharField"
] | [((4466, 4509), 'api.json_worker.get_models', 'json_worker.get_models', (['"""/data/models.json"""'], {}), "('/data/models.json')\n", (4488, 4509), True, 'import api.json_worker as json_worker\n'), ((261, 298), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (280, 298), False, 'from django.db import models\n'), ((343, 432), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""Bez autora"""', 'max_length': '(512)', 'verbose_name': '"""Autor książki"""'}), "(default='Bez autora', max_length=512, verbose_name=\n 'Autor książki')\n", (359, 432), False, 'from django.db import models\n'), ((479, 572), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)', 'default': '"""Bez współtwórcy."""', 'verbose_name': '"""Współtwórca"""'}), "(max_length=256, default='Bez współtwórcy.', verbose_name=\n 'Współtwórca')\n", (495, 572), False, 'from django.db import models\n'), ((611, 699), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)', 'default': '"""Bez redaktora."""', 'verbose_name': '"""Redaktor"""'}), "(max_length=256, default='Bez redaktora.', verbose_name=\n 'Redaktor')\n", (627, 699), False, 'from django.db import models\n'), ((736, 798), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""Brak Tytułu."""', 'verbose_name': '"""Tytuł"""'}), "(default='Brak Tytułu.', verbose_name='Tytuł')\n", (752, 798), False, 'from django.db import models\n'), ((838, 904), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""Bez podtytułu"""', 'verbose_name': '"""Podtytuł"""'}), "(default='Bez podtytułu', verbose_name='Podtytuł')\n", (854, 904), False, 'from django.db import models\n'), ((955, 1048), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""Bez wydania oryginalnego."""', 'verbose_name': '"""Wydanie oryginalne"""'}), "(default='Bez wydania oryginalnego.', verbose_name=\n 'Wydanie oryginalne')\n", (971, 1048), False, 'from django.db import models\n'), ((1093, 1166), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""Bez numeru serii."""', 'verbose_name': '"""Numer serii"""'}), "(default='Bez numeru serii.', verbose_name='Numer serii')\n", (1109, 1166), False, 'from django.db import models\n'), ((1221, 1295), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""Brak roku wydania."""', 'verbose_name': '"""Rok wydania"""'}), "(default='Brak roku wydania.', verbose_name='Rok wydania')\n", (1237, 1295), False, 'from django.db import models\n'), ((1355, 1419), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""Bez wydania."""', 'verbose_name': '"""Wydanie"""'}), "(default='Bez wydania.', verbose_name='Wydanie')\n", (1371, 1419), False, 'from django.db import models\n'), ((1471, 1556), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""Bez miejsca wydania."""', 'verbose_name': '"""Miejsce wydania"""'}), "(default='Bez miejsca wydania.', verbose_name='Miejsce wydania'\n )\n", (1487, 1556), False, 'from django.db import models\n'), ((1603, 1667), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""Bez wydawcy."""', 'verbose_name': '"""Wydawca"""'}), "(default='Bez wydawcy.', verbose_name='Wydawca')\n", (1619, 1667), False, 'from django.db import models\n'), ((1708, 1770), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""Bez źródła."""', 'verbose_name': '"""Źródło"""'}), "(default='Bez źródła.', verbose_name='Źródło')\n", (1724, 1770), False, 'from django.db import models\n'), ((1810, 1871), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""Bez numeru."""', 'verbose_name': '"""Numer"""'}), "(default='Bez numeru.', verbose_name='Numer')\n", (1826, 1871), False, 'from django.db import models\n'), ((1912, 1975), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""Bez zeszytu."""', 'verbose_name': '"""Zeszyt"""'}), "(default='Bez zeszytu.', verbose_name='Zeszyt')\n", (1928, 1975), False, 'from django.db import models\n'), ((2014, 2071), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""0"""', 'verbose_name': '"""Ilość stron"""'}), "(default='0', verbose_name='Ilość stron')\n", (2030, 2071), False, 'from django.db import models\n'), ((2126, 2187), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""Bez języka."""', 'verbose_name': '"""Język"""'}), "(default='Bez języka.', verbose_name='Język')\n", (2142, 2187), False, 'from django.db import models\n'), ((2239, 2325), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""Bez numeru ISBN/ISSN."""', 'verbose_name': '"""Numer ISBN/ISSN"""'}), "(default='Bez numeru ISBN/ISSN.', verbose_name=\n 'Numer ISBN/ISSN')\n", (2255, 2325), False, 'from django.db import models\n'), ((2373, 2442), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""Bez numeru DOI."""', 'verbose_name': '"""Numer DOI"""'}), "(default='Bez numeru DOI.', verbose_name='Numer DOI')\n", (2389, 2442), False, 'from django.db import models\n'), ((2482, 2545), 'django.db.models.URLField', 'models.URLField', ([], {'max_length': '(1024)', 'verbose_name': '"""Link/Załącznik"""'}), "(max_length=1024, verbose_name='Link/Załącznik')\n", (2497, 2545), False, 'from django.db import models\n'), ((2597, 2692), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""Bez słów kluczowych/zawratości."""', 'verbose_name': '"""Słowa kluczowe"""'}), "(default='Bez słów kluczowych/zawratości.', verbose_name=\n 'Słowa kluczowe')\n", (2613, 2692), False, 'from django.db import models\n'), ((2748, 2818), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""Bez komentarzy."""', 'verbose_name': '"""Komentarze"""'}), "(default='Bez komentarzy.', verbose_name='Komentarze')\n", (2764, 2818), False, 'from django.db import models\n'), ((3940, 3974), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (3956, 3974), False, 'from django.db import models\n'), ((4016, 4048), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (4032, 4048), False, 'from django.db import models\n'), ((4068, 4100), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (4084, 4100), False, 'from django.db import models\n'), ((4114, 4145), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (4130, 4145), False, 'from django.db import models\n'), ((5059, 5079), 'django.db.models.append', 'models.append', (['model'], {}), '(model)\n', (5072, 5079), False, 'from django.db import models\n')] |
import pytest
from yapic.di import Injector, InjectError, VALUE, FACTORY, SCOPED_SINGLETON, SINGLETON
def test_strategy_value():
injector = Injector()
provided = "VALUE"
injector.provide("V", provided, VALUE)
assert injector["V"] == "VALUE"
assert injector["V"] is provided
assert injector.provide("V", provided, VALUE)(injector) == "VALUE"
def test_strategy_custom():
cscope = dict()
def custom_strategy(injectable, injector):
try:
return cscope[injectable]
except KeyError:
value = cscope[injectable] = injectable(injector)
return value
class A:
pass
injector = Injector()
injector.provide(A, A, custom_strategy)
assert isinstance(injector[A], A)
assert injector[A] is injector[A]
def test_strategy_scoped_singleton():
class A:
pass
injector = Injector()
injector.provide(A, A, SCOPED_SINGLETON)
assert isinstance(injector[A], A)
assert injector[A] is injector[A]
assert injector[A] is injector[A]
def test_strategy_singleton():
class A:
pass
injector = Injector()
injector.provide(A, A, SINGLETON)
assert isinstance(injector[A], A)
assert injector[A] is injector[A]
assert injector[A] is injector[A]
| [
"yapic.di.Injector"
] | [((146, 156), 'yapic.di.Injector', 'Injector', ([], {}), '()\n', (154, 156), False, 'from yapic.di import Injector, InjectError, VALUE, FACTORY, SCOPED_SINGLETON, SINGLETON\n'), ((672, 682), 'yapic.di.Injector', 'Injector', ([], {}), '()\n', (680, 682), False, 'from yapic.di import Injector, InjectError, VALUE, FACTORY, SCOPED_SINGLETON, SINGLETON\n'), ((886, 896), 'yapic.di.Injector', 'Injector', ([], {}), '()\n', (894, 896), False, 'from yapic.di import Injector, InjectError, VALUE, FACTORY, SCOPED_SINGLETON, SINGLETON\n'), ((1132, 1142), 'yapic.di.Injector', 'Injector', ([], {}), '()\n', (1140, 1142), False, 'from yapic.di import Injector, InjectError, VALUE, FACTORY, SCOPED_SINGLETON, SINGLETON\n')] |
import re
import sys
from notebook.notebookapp import main
from qulab.utils import ShutdownBlocker
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
with ShutdownBlocker('jupyter-notebook'):
sys.exit(main())
| [
"re.sub",
"notebook.notebookapp.main",
"qulab.utils.ShutdownBlocker"
] | [((146, 198), 're.sub', 're.sub', (['"""(-script\\\\.pyw?|\\\\.exe)?$"""', '""""""', 'sys.argv[0]'], {}), "('(-script\\\\.pyw?|\\\\.exe)?$', '', sys.argv[0])\n", (152, 198), False, 'import re\n'), ((207, 242), 'qulab.utils.ShutdownBlocker', 'ShutdownBlocker', (['"""jupyter-notebook"""'], {}), "('jupyter-notebook')\n", (222, 242), False, 'from qulab.utils import ShutdownBlocker\n'), ((261, 267), 'notebook.notebookapp.main', 'main', ([], {}), '()\n', (265, 267), False, 'from notebook.notebookapp import main\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0436-Find-Right-Interval.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-05-20
=================================================================="""
import sys
import time
from typing import List
import bisect
# import functools
"""
LeetCode - 0436 - (Medium) - Find Right Interval
https://leetcode.com/problems/find-right-interval/
Description & Requirement:
You are given an array of intervals, where intervals[i] = [start_i, end_i] and each start_i is unique.
The right interval for an interval i is an interval j such that start_j >= end_i and start_j is minimized.
Note that i may equal j.
Return an array of right interval indices for each interval i.
If no right interval exists for interval i, then put -1 at index i.
Example 1:
Input: intervals = [[1,2]]
Output: [-1]
Explanation: There is only one interval in the collection, so it outputs -1.
Example 2:
Input: intervals = [[3,4],[2,3],[1,2]]
Output: [-1,0,1]
Explanation: There is no right interval for [3,4].
The right interval for [2,3] is [3,4] since start0 = 3 is the smallest start that is >= end1 = 3.
The right interval for [1,2] is [2,3] since start1 = 2 is the smallest start that is >= end2 = 2.
Example 3:
Input: intervals = [[1,4],[2,3],[3,4]]
Output: [-1,2,-1]
Explanation: There is no right interval for [1,4] and [3,4].
The right interval for [2,3] is [3,4] since start2 = 3 is the smallest start that is >= end1 = 3.
Constraints:
1 <= intervals.length <= 2 * 10^4
intervals[i].length == 2
-10^6 <= start_i <= end_i <= 10^6
The start point of each interval is unique.
"""
class Solution:
def findRightInterval(self, intervals: List[List[int]]) -> List[int]:
# exception case
assert isinstance(intervals, list) and len(intervals) >= 1
# main method: (sort & binary search)
return self._findRightInterval(intervals)
def _findRightInterval(self, intervals: List[List[int]]) -> List[int]:
"""
Runtime: 304 ms, faster than 92.64% of Python3 online submissions for Find Right Interval.
Memory Usage: 20.2 MB, less than 50.56% of Python3 online submissions for Find Right Interval.
"""
assert isinstance(intervals, list) and len(intervals) >= 1
len_intervals = len(intervals)
if len_intervals == 1:
return [-1]
for idx in range(len_intervals): # append index to each interval
intervals[idx].append(idx)
intervals.sort()
res = [-1 for _ in range(len_intervals)]
for _start, _end, _idx in intervals:
bs_idx = bisect.bisect_left(intervals, [_end])
if 0 <= bs_idx < len_intervals:
res[_idx] = intervals[bs_idx][-1]
return res
def main():
# Example 1: Output: [-1]
# intervals = [[1, 2]]
# Example 2: Output: [-1,0,1]
# intervals = [[3, 4], [2, 3], [1, 2]]
# Example 3: Output: [-1,2,-1]
intervals = [[1, 4], [2, 3], [3, 4]]
# init instance
solution = Solution()
# run & time
start = time.process_time()
ans = solution.findRightInterval(intervals)
end = time.process_time()
# show answer
print('\nAnswer:')
print(ans)
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
| [
"time.process_time",
"bisect.bisect_left"
] | [((3317, 3336), 'time.process_time', 'time.process_time', ([], {}), '()\n', (3334, 3336), False, 'import time\n'), ((3395, 3414), 'time.process_time', 'time.process_time', ([], {}), '()\n', (3412, 3414), False, 'import time\n'), ((2862, 2899), 'bisect.bisect_left', 'bisect.bisect_left', (['intervals', '[_end]'], {}), '(intervals, [_end])\n', (2880, 2899), False, 'import bisect\n')] |
from numpy import log, pi, arange, exp
from scipy.optimize import brentq
import matplotlib.pyplot as plot
from matplotlib import rc
import equation
def diagram_sum(x, d):
return 4.*pi/log(d**2 *2.*x)
def diagram_sum_3body(x, d):
point=equation.equation(3.*x,'2D',20.,0.1,d)
point.solve()
g3=point.g3
del point
return 4.*pi/log(d**2 *2.*x) + g3
drange=arange(0.6,5.,0.05)
xx=[d for d in drange]
ee=[1/d**2 for d in drange]
yy=[brentq(lambda mu:mu - diagram_sum(mu,d),(0.5+0.01)/(d**2),0.5/d**2 *exp(8 * pi * d**2), xtol=1e-3) for d in drange]
drange=arange(0.6,5.6,1.0)
zx=[d for d in drange]
ze=[1/d**2 for d in drange]
zz=[brentq(lambda mu:mu - diagram_sum_3body(mu,d),(1+0.01)/(2.*d**2),0.5/d**2 *exp(8 * pi * d**2), xtol=1e-2) for d in drange]
drange=arange(0.7,1.5,0.1)
wx=[d for d in drange]
we=[1/d**2 for d in drange]
wz=[brentq(lambda mu:mu - diagram_sum_3body(mu,d),(1+0.01)/(2.*d**2),0.5/d**2 *exp(8 * pi * d**2), xtol=1e-2) for d in drange]
drange=arange(0.6,0.7,0.025)
fx=[d for d in drange]
fe=[1/d**2 for d in drange]
fz=[brentq(lambda mu:mu - diagram_sum_3body(mu,d),(1+0.01)/(2.*d**2),0.5/d**2 *exp(8 * pi * d**2), xtol=1e-2) for d in drange]
plot.plot(xx,yy)
plot.plot(zx,zz,'o')
plot.plot(wx,wz,'o')
plot.plot(fx,fz,'o')
plot.xlabel('d, bound state size parameter')
plot.ylabel(r'$\mu$, self-consistent potential')
plot.savefig('results/potential_self-consistent.pdf')
plot.close()
plot.plot(ee,yy)
plot.plot(ze,zz,'o')
plot.plot(we,wz,'o')
plot.plot(fe,fz,'o')
rc('text', usetex=True)
plot.xlabel(r'$\frac{1}{d^2}$, bound state energy')
plot.ylabel(r'$\mu$, self-consistent potential')
plot.savefig('results/potential_energy_parameter.pdf')
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.log",
"equation.equation",
"matplotlib.pyplot.close",
"numpy.exp",
"matplotlib.rc",
"numpy.arange"
] | [((379, 401), 'numpy.arange', 'arange', (['(0.6)', '(5.0)', '(0.05)'], {}), '(0.6, 5.0, 0.05)\n', (385, 401), False, 'from numpy import log, pi, arange, exp\n'), ((578, 599), 'numpy.arange', 'arange', (['(0.6)', '(5.6)', '(1.0)'], {}), '(0.6, 5.6, 1.0)\n', (584, 599), False, 'from numpy import log, pi, arange, exp\n'), ((784, 805), 'numpy.arange', 'arange', (['(0.7)', '(1.5)', '(0.1)'], {}), '(0.7, 1.5, 0.1)\n', (790, 805), False, 'from numpy import log, pi, arange, exp\n'), ((990, 1013), 'numpy.arange', 'arange', (['(0.6)', '(0.7)', '(0.025)'], {}), '(0.6, 0.7, 0.025)\n', (996, 1013), False, 'from numpy import log, pi, arange, exp\n'), ((1192, 1209), 'matplotlib.pyplot.plot', 'plot.plot', (['xx', 'yy'], {}), '(xx, yy)\n', (1201, 1209), True, 'import matplotlib.pyplot as plot\n'), ((1209, 1231), 'matplotlib.pyplot.plot', 'plot.plot', (['zx', 'zz', '"""o"""'], {}), "(zx, zz, 'o')\n", (1218, 1231), True, 'import matplotlib.pyplot as plot\n'), ((1230, 1252), 'matplotlib.pyplot.plot', 'plot.plot', (['wx', 'wz', '"""o"""'], {}), "(wx, wz, 'o')\n", (1239, 1252), True, 'import matplotlib.pyplot as plot\n'), ((1251, 1273), 'matplotlib.pyplot.plot', 'plot.plot', (['fx', 'fz', '"""o"""'], {}), "(fx, fz, 'o')\n", (1260, 1273), True, 'import matplotlib.pyplot as plot\n'), ((1272, 1316), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""d, bound state size parameter"""'], {}), "('d, bound state size parameter')\n", (1283, 1316), True, 'import matplotlib.pyplot as plot\n'), ((1317, 1365), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""$\\\\mu$, self-consistent potential"""'], {}), "('$\\\\mu$, self-consistent potential')\n", (1328, 1365), True, 'import matplotlib.pyplot as plot\n'), ((1366, 1419), 'matplotlib.pyplot.savefig', 'plot.savefig', (['"""results/potential_self-consistent.pdf"""'], {}), "('results/potential_self-consistent.pdf')\n", (1378, 1419), True, 'import matplotlib.pyplot as plot\n'), ((1420, 1432), 'matplotlib.pyplot.close', 'plot.close', ([], {}), '()\n', (1430, 1432), True, 'import matplotlib.pyplot as plot\n'), ((1434, 1451), 'matplotlib.pyplot.plot', 'plot.plot', (['ee', 'yy'], {}), '(ee, yy)\n', (1443, 1451), True, 'import matplotlib.pyplot as plot\n'), ((1451, 1473), 'matplotlib.pyplot.plot', 'plot.plot', (['ze', 'zz', '"""o"""'], {}), "(ze, zz, 'o')\n", (1460, 1473), True, 'import matplotlib.pyplot as plot\n'), ((1472, 1494), 'matplotlib.pyplot.plot', 'plot.plot', (['we', 'wz', '"""o"""'], {}), "(we, wz, 'o')\n", (1481, 1494), True, 'import matplotlib.pyplot as plot\n'), ((1493, 1515), 'matplotlib.pyplot.plot', 'plot.plot', (['fe', 'fz', '"""o"""'], {}), "(fe, fz, 'o')\n", (1502, 1515), True, 'import matplotlib.pyplot as plot\n'), ((1514, 1537), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (1516, 1537), False, 'from matplotlib import rc\n'), ((1538, 1589), 'matplotlib.pyplot.xlabel', 'plot.xlabel', (['"""$\\\\frac{1}{d^2}$, bound state energy"""'], {}), "('$\\\\frac{1}{d^2}$, bound state energy')\n", (1549, 1589), True, 'import matplotlib.pyplot as plot\n'), ((1590, 1638), 'matplotlib.pyplot.ylabel', 'plot.ylabel', (['"""$\\\\mu$, self-consistent potential"""'], {}), "('$\\\\mu$, self-consistent potential')\n", (1601, 1638), True, 'import matplotlib.pyplot as plot\n'), ((1639, 1693), 'matplotlib.pyplot.savefig', 'plot.savefig', (['"""results/potential_energy_parameter.pdf"""'], {}), "('results/potential_energy_parameter.pdf')\n", (1651, 1693), True, 'import matplotlib.pyplot as plot\n'), ((245, 291), 'equation.equation', 'equation.equation', (['(3.0 * x)', '"""2D"""', '(20.0)', '(0.1)', 'd'], {}), "(3.0 * x, '2D', 20.0, 0.1, d)\n", (262, 291), False, 'import equation\n'), ((189, 210), 'numpy.log', 'log', (['(d ** 2 * 2.0 * x)'], {}), '(d ** 2 * 2.0 * x)\n', (192, 210), False, 'from numpy import log, pi, arange, exp\n'), ((349, 370), 'numpy.log', 'log', (['(d ** 2 * 2.0 * x)'], {}), '(d ** 2 * 2.0 * x)\n', (352, 370), False, 'from numpy import log, pi, arange, exp\n'), ((522, 542), 'numpy.exp', 'exp', (['(8 * pi * d ** 2)'], {}), '(8 * pi * d ** 2)\n', (525, 542), False, 'from numpy import log, pi, arange, exp\n'), ((728, 748), 'numpy.exp', 'exp', (['(8 * pi * d ** 2)'], {}), '(8 * pi * d ** 2)\n', (731, 748), False, 'from numpy import log, pi, arange, exp\n'), ((934, 954), 'numpy.exp', 'exp', (['(8 * pi * d ** 2)'], {}), '(8 * pi * d ** 2)\n', (937, 954), False, 'from numpy import log, pi, arange, exp\n'), ((1142, 1162), 'numpy.exp', 'exp', (['(8 * pi * d ** 2)'], {}), '(8 * pi * d ** 2)\n', (1145, 1162), False, 'from numpy import log, pi, arange, exp\n')] |
# Simulates a network with nodes, where each node can be either a
# transmitter or receiver (but not both) at any time step. The simulation
# examines the coverage based on the signal-to-interference ratio (SINR).
# The network has a random medium access control (MAC) scheme based on a
# determinantal point process, as outlined in the paper[1] by
# B\laszczyszyn, Brochard and Keeler. This code validates by simulation
# Propositions IV.1 and IV.2 in the paper[1]. This result gives the
# probability of coverage based on the SINR value of a transmitter-receiver
# pair in a non-random network of transmitter-or-receiver nodes such as a
# realization of a random point process.
#
# More specifically, the code estimates the probability of x and y being
# connected (ie SINR(x,y)>tau)given that x is transmitting and
# y isn't.
#
# The simulation section estimates the empirical probability of SINR-based
# coverage. For a large enough number of simulations, this empirical result
# will agree with the analytic results given in the paper[2].
#
# By coverage, it is assumed that the SINR of the transmitter is larger
# than some threshold at the corresponding receiver.
#
# Probabilities for other events are calculated/estimated including:
#
# Event A=SINR(x,y) > tau
# Event B=Transmitter exists
# Event C=Receiver exists
#
# This code was originally written by <NAME> for the paper by
# B\laszczyszyn, Brochard and Keeler[1].
#
# If you use this code in published research, please cite paper[1].
#
# References:
#
# [1] B\laszczyszyn, Brochard and Keeler, "Coverage probability in
# wireless networks with determinantal scheduling", 2020.
#
# Author: <NAME>, 2020.
from funProbCovTXRXDet import funProbCovTXRXDet
import numpy as np # NumPy package for arrays, random number generation, etc
import matplotlib.pyplot as plt # for plotting
# simulate determintal point process
from funSimSimpleDPP import funSimSimpleDPP
from funPalmK import funPalmK # find Palm distribution (for a single point)
from funLtoK import funLtoK # convert L kernel to a (normalized) K kernel
plt.close("all") # close all figures
#set random seed for reproducibility
np.random.seed(1)
###START -- Parameters -- START###
choiceExample = 1 # 1 or 2 for a random (uniform) or deterministic example
numbSim = 10**4 # number of simulations
numbNodes = 10 # number of pairs
indexTrans = 0 # index for transmitter
indexRec = 1 # index for receiver
#above indices are bounded by numbNodes
#fading model
muFading = 1/3 # Rayleigh fading average
#path loss model
betaPath = 2 # pathloss exponent
kappaPath = 1 # rescaling constant for pathloss function
thresholdSINR = 0.1 # SINR threshold value
constNoise = 0 # noise constant
#Determinantal kernel parameters
choiceKernel = 1 # 1 for Gaussian (ie squared exponetial );2 for Cauchy
#3 for independent (ie binomial) model
sigma = 1 # parameter for Gaussian and Cauchy kernel
alpha = 1 # parameter for Cauchy kernel
pAloha = 0.5 # parameter for independent kernel (ie proportion transmitting)
#Simulation window parameters
xMin = -1
xMax = 1 # x dimensions
yMin = -1
yMax = 1 # y dimensions
xDelta = xMax-xMin # rectangle width
yDelta = yMax-yMin # rectangle height
###END -- Parameters -- END###
#Simulate a random point process for the network configuration
#interferer section
if (choiceExample == 1):
#random (uniform) x/y coordinates
#transmitters or receivers
xx = xDelta*(np.random.rand(numbNodes))+xMin
yy = yDelta*(np.random.rand(numbNodes))+yMin
else:
#non-random x/y coordinates
#transmitters or receivers
t = 2*np.pi*np.linspace(0, (numbNodes-1)/numbNodes, numbNodes)
xx = (1+np.cos(5*t+1))/2
yy = (1+np.sin(3*t+2))/2
#transmitter location
xxTX = xx[indexTrans]
yyTX = yy[indexTrans]
#Receiver location
xxRX = xx[indexRec]
yyRX = yy[indexRec]
# START -- CREATE L matrix -- START
sizeL = numbNodes
#Calculate Gaussian or Cauchy kernel based on grid x/y values
#all squared distances of x/y difference pairs
xxDiff = np.outer(xx, np.ones((sizeL,)))-np.outer(np.ones((sizeL,)), xx)
yyDiff = np.outer(yy, np.ones((sizeL,)))-np.outer(np.ones((sizeL,)), yy)
rrDiffSquared = (xxDiff**2+yyDiff**2)
if choiceKernel == 1:
#Gaussian/squared exponential kernel
L = np.exp(-(rrDiffSquared)/sigma**2)
elif choiceKernel == 2:
#Cauchy kernel
L = 1/(1+rrDiffSquared/sigma**2)**(alpha+1/2)
else:
raise Exception('choiceKernel has to be equal to 1 or 2.')
L = 10*L # scale matrix up (increases the eigenvalues ie number of points)
# END-- CREATE L matrix -- # END
#Eigen decomposition
eigenValL, eigenVecL = np.linalg.eig(L)
#Helper functions
def funPathloss(r):
return (kappaPath*(1+r))**(-betaPath) # pathloss function
#Functions for the proability of being connected
def fun_h(s, r):
return (1/(thresholdSINR*(funPathloss(s)/funPathloss(r))+1))
def fun_w(r):
return (np.exp(-(thresholdSINR/muFading)*constNoise/funPathloss(r)))
#initialize boolean vectors/arrays for collecting statistics
booleA = np.zeros(numbSim, dtype=bool) # transmitter is connected
booleB = np.zeros(numbSim, dtype=bool) # transmitter exists
booleC = np.zeros(numbSim, dtype=bool) # receiver exists
#loop through all simulations
for ss in range(numbSim):
#DPP for active transmitter nodes
indexDPP = funSimSimpleDPP(eigenVecL, eigenValL)
booleB[ss] = any(indexDPP == indexTrans) # if transmitter is in subset
booleC[ss] = all(indexDPP != indexRec) # if receiver is not in subset
#if transmitter is in the determinantal subset, calculate its SINR
if booleB[ss]:
#create Boolean variable for active interferers
booleInter = np.zeros(numbNodes, dtype=bool)
booleInter[indexDPP] = True
booleInter[indexTrans] = False # exclude transmitter
#x/y values of interfering nodes
xxInter = xx[booleInter]
yyInter = yy[booleInter]
#number of interferers
numbInter = np.sum(booleInter)
#simulate signal for interferers
fadeRandInter = np.random.exponential(muFading, numbInter) # fading
distPathInter = np.hypot(xxInter-xxRX, yyInter-yyRX) # path distance
proplossInter = fadeRandInter*funPathloss(distPathInter) # pathloss
#simulate signal for transmitter
fadeRandSig = np.random.exponential(muFading) # fading
distPathSig = np.hypot(xxTX-xxRX, yyTX-yyRX) # path distance
proplossSig = fadeRandSig*funPathloss(distPathSig) # pathloss
#Calculate the SINR
SINR = proplossSig/(np.sum(proplossInter)+constNoise)
#see if transmitter is connected
booleA[ss] = (SINR > thresholdSINR)
booleBandC = booleB & booleC # transmitter-receiver pair exists
booleNotC = ~booleC # receiver does not exist
booleBandNotC = booleB & booleNotC # transmitter exists, receiver does not
###START Create kernels and Palm kernels START###
K = funLtoK(L) # caclulate K kernel from kernel L
sizeK = K.shape[0] # number of columns/rows in kernel matrix K
#Calculate all respective distances (based on random network configuration)
#from all transmitters to receiver
dist_ji_xx = np.outer(xx, np.ones((sizeK,)))-np.outer(np.ones((sizeK,)), xxRX)
dist_ji_yy = np.outer(yy, np.ones((sizeK,)))-np.outer(np.ones((sizeK,)), yyRX)
dist_ji = np.hypot(dist_ji_xx, dist_ji_yy) # Euclidean distances
#transmitters to receivers
dist_ii_xx = xxTX-xxRX
dist_ii_yy = yyTX-yyRX
dist_ii = np.hypot(dist_ii_xx, dist_ii_yy) # Euclidean distances
# repeat cols for element-wise evaluation
dist_ii = np.tile(dist_ii, (sizeK, 1))
#apply functions
hMatrix = fun_h(dist_ji, dist_ii) # matrix H for all h_{x_i}(x_j) values
W_x = fun_w(np.hypot(xx-xxRX, yy-yyRX)) # noise factor
##create h matrix corresponding to transmitter
booleAll = np.ones(sizeK, dtype=bool)
booleReduced = booleAll
booleReduced[indexTrans] = False # remove transmitter
#choose transmitter-receiver row
hVectorReduced = hMatrix[booleReduced, indexTrans]
#repeat vector hVectorReduced as rows
hMatrixReduced = np.tile(hVectorReduced, (sizeK-1, 1))
hMatrixReduced = hMatrixReduced.transpose()
#create Palm kernels conditioned on transmitter existing
KPalmReducedTX, KPalmTX = funPalmK(K, indexTrans)
#create Palm kernels conditioned on receiver existing
KPalmRXReduced, KPalmRX = funPalmK(K, indexRec)
#create Palm kernels conditioned on transmitter AND receiver existing
_, KPalmTXRX = funPalmK(KPalmTX, indexRec)
#create reduced (by transmitter) Palm kernel conditioned on transmitter
#AND receiver existing
indexReduced = np.arange(sizeK)[booleReduced]
KPalmSemiReducedTXRX = np.eye(sizeK-1)
for i in range(KPalmTXRX.shape[0]-1):
KPalmSemiReducedTXRX[:, i] = KPalmTXRX[indexReduced, indexReduced[i]]
#calculate final kernels
#for transmitter
KReduced_hTX = np.sqrt(1-hMatrixReduced.transpose()) * \
KPalmReducedTX*np.sqrt(1-hMatrixReduced)
##for reciever and transmitter
KReduced_hRX = np.sqrt(1-hMatrixReduced.transpose()) * \
KPalmSemiReducedTXRX*np.sqrt(1-hMatrixReduced)
###END Create kernels and Palm kernels END###
###START Connection Proability (ie SINR>thresholdConst) START###
#calculate probabiliity for the event that transmitter's
#signal at the receiver has an SINR>thresholdConst, given the pair is
# active (ie trasnmitting and receiving); see Section IV in paper[1].
#probability transmitter exists (ie transmitter at indexTrans) - event B
probB = K[indexTrans, indexTrans]
probB_Emp = np.mean(booleB)
#probability receiver exists (ie no transmitter at indexRec) - event C
probC = 1-K[indexRec, indexRec]
probC_Emp = np.mean(booleC)
#probability transmitter but no receiver
indexPair = np.array([indexTrans, indexRec])
probBNotC = np.linalg.det(K[indexPair, :][:, indexPair])
probBNotC_Emp = np.mean(booleBandNotC)
#
#probability transmitter and receiver existing
probBandC = probB-probBNotC
probBandC_Emp = np.mean(booleBandC)
#probability of SINR>threshold (ie transmiter is connected ) given B
probA_GivenB = np.linalg.det(np.eye(sizeK-1)-KReduced_hTX)*W_x[indexTrans]
probA_GivenB_Emp = np.mean(booleA[booleB])
#probability of SINR>threshold (ie transmiter is connected ) given B and C
probA_GivenBNotC = np.linalg.det(np.eye(sizeK-1)-KReduced_hRX)*W_x[indexTrans]
probA_GivenBNotC_Emp = np.mean(booleA[booleNotC])
#probability B given NOT C (ie a transmitter exists at indexRec)
probB_GivenNotC = KPalmRX[indexTrans, indexTrans]
probB_GivenNotC_Emp = np.mean(booleB[booleNotC])
#probability B given C
probB_GivenC = (probB-(1-probC)*probB_GivenNotC)/probC
probB_GivenC_Emp = np.mean(booleB[booleC])
#probability NOT C (ie a transmitter exists at indexRec) given B
probNotC_GivenB = KPalmTX[indexRec, indexRec]
probNotC_GivenB_Emp = np.mean(booleNotC[booleB])
#probability C given B
probC_GivenB_Emp = np.mean(booleC[booleB])
probC_GivenB = 1-probNotC_GivenB
print('Conditional coverage probability (ie A given B and C).')
#coverage probability ie probability of A given B and C
probA_GivenBandC = (probA_GivenB-probNotC_GivenB*probA_GivenBNotC)/probC_GivenB
print('probA_GivenBandC = ', probA_GivenBandC)
#Estimate empirical probability two different ways
#Directly
probA_GivenBandC_Emp1 = np.mean(booleA[booleBandC])
print('probA_GivenBandC_Emp1 = ', probA_GivenBandC_Emp1)
#Indirectly
probA_GivenBandC_Emp2 = (probA_GivenB_Emp-probNotC_GivenB_Emp*probA_GivenBNotC_Emp)\
/ probC_GivenB_Emp
print('Coverage probability (ie A given B and C).')
#connection probability
probCov = probA_GivenBandC*probBandC
print('probCov = ', probCov)
probCov_Emp1 = np.mean(booleA & booleB & booleC)
print('probCov_Emp1 = ', probCov_Emp1)
#probCov_Emp2=probA_GivenBandC_Emp2*probBandC_Emp
#probCovCond=probA_GivenBandC #conditional coverage probability
#probTXRX=probBandC #probability of pair existing
#connection probability
#probCov=probCovCond*probTXRX
###END Connection Proability (ie SINR>thresholdConst) END###
#TEST
probCov, probTXRX, probCovCond = funProbCovTXRXDet(
xx, yy, fun_h, fun_w, L, indexTrans, indexRec)
if indexDPP.size > 0:
### START -- Plotting -- START ###
markerSize = 13
#random color vector
vectorColor = np.random.rand(3) # random vector for colors of marker
#Plot point process
plt.plot(xx, yy, 'ko', markerfacecolor="None", markersize=markerSize)
#Plot determinantally-thinned point process
plt.plot(xx[indexDPP], yy[indexDPP], 'k.', markerfacecolor=vectorColor,
markersize=1.1*markerSize, markeredgecolor='none')
plt.axis('equal')
plt.axis('off')
plt.legend(('Original point process', 'Determinantal subset'))
### END -- Plotting -- END ###
#end
| [
"numpy.sqrt",
"numpy.random.rand",
"numpy.random.exponential",
"numpy.array",
"funPalmK.funPalmK",
"numpy.sin",
"numpy.arange",
"numpy.mean",
"funProbCovTXRXDet.funProbCovTXRXDet",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.exp",
"numpy.linspace",
"numpy.random.seed",
"n... | [((2079, 2095), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (2088, 2095), True, 'import matplotlib.pyplot as plt\n'), ((2155, 2172), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2169, 2172), True, 'import numpy as np\n'), ((4619, 4635), 'numpy.linalg.eig', 'np.linalg.eig', (['L'], {}), '(L)\n', (4632, 4635), True, 'import numpy as np\n'), ((5032, 5061), 'numpy.zeros', 'np.zeros', (['numbSim'], {'dtype': 'bool'}), '(numbSim, dtype=bool)\n', (5040, 5061), True, 'import numpy as np\n'), ((5099, 5128), 'numpy.zeros', 'np.zeros', (['numbSim'], {'dtype': 'bool'}), '(numbSim, dtype=bool)\n', (5107, 5128), True, 'import numpy as np\n'), ((5160, 5189), 'numpy.zeros', 'np.zeros', (['numbSim'], {'dtype': 'bool'}), '(numbSim, dtype=bool)\n', (5168, 5189), True, 'import numpy as np\n'), ((6928, 6938), 'funLtoK.funLtoK', 'funLtoK', (['L'], {}), '(L)\n', (6935, 6938), False, 'from funLtoK import funLtoK\n'), ((7319, 7351), 'numpy.hypot', 'np.hypot', (['dist_ji_xx', 'dist_ji_yy'], {}), '(dist_ji_xx, dist_ji_yy)\n', (7327, 7351), True, 'import numpy as np\n'), ((7458, 7490), 'numpy.hypot', 'np.hypot', (['dist_ii_xx', 'dist_ii_yy'], {}), '(dist_ii_xx, dist_ii_yy)\n', (7466, 7490), True, 'import numpy as np\n'), ((7566, 7594), 'numpy.tile', 'np.tile', (['dist_ii', '(sizeK, 1)'], {}), '(dist_ii, (sizeK, 1))\n', (7573, 7594), True, 'import numpy as np\n'), ((7802, 7828), 'numpy.ones', 'np.ones', (['sizeK'], {'dtype': 'bool'}), '(sizeK, dtype=bool)\n', (7809, 7828), True, 'import numpy as np\n'), ((8048, 8087), 'numpy.tile', 'np.tile', (['hVectorReduced', '(sizeK - 1, 1)'], {}), '(hVectorReduced, (sizeK - 1, 1))\n', (8055, 8087), True, 'import numpy as np\n'), ((8214, 8237), 'funPalmK.funPalmK', 'funPalmK', (['K', 'indexTrans'], {}), '(K, indexTrans)\n', (8222, 8237), False, 'from funPalmK import funPalmK\n'), ((8318, 8339), 'funPalmK.funPalmK', 'funPalmK', (['K', 'indexRec'], {}), '(K, indexRec)\n', (8326, 8339), False, 'from funPalmK import funPalmK\n'), ((8426, 8453), 'funPalmK.funPalmK', 'funPalmK', (['KPalmTX', 'indexRec'], {}), '(KPalmTX, indexRec)\n', (8434, 8453), False, 'from funPalmK import funPalmK\n'), ((8618, 8635), 'numpy.eye', 'np.eye', (['(sizeK - 1)'], {}), '(sizeK - 1)\n', (8624, 8635), True, 'import numpy as np\n'), ((9459, 9474), 'numpy.mean', 'np.mean', (['booleB'], {}), '(booleB)\n', (9466, 9474), True, 'import numpy as np\n'), ((9591, 9606), 'numpy.mean', 'np.mean', (['booleC'], {}), '(booleC)\n', (9598, 9606), True, 'import numpy as np\n'), ((9661, 9693), 'numpy.array', 'np.array', (['[indexTrans, indexRec]'], {}), '([indexTrans, indexRec])\n', (9669, 9693), True, 'import numpy as np\n'), ((9706, 9750), 'numpy.linalg.det', 'np.linalg.det', (['K[indexPair, :][:, indexPair]'], {}), '(K[indexPair, :][:, indexPair])\n', (9719, 9750), True, 'import numpy as np\n'), ((9767, 9789), 'numpy.mean', 'np.mean', (['booleBandNotC'], {}), '(booleBandNotC)\n', (9774, 9789), True, 'import numpy as np\n'), ((9883, 9902), 'numpy.mean', 'np.mean', (['booleBandC'], {}), '(booleBandC)\n', (9890, 9902), True, 'import numpy as np\n'), ((10067, 10090), 'numpy.mean', 'np.mean', (['booleA[booleB]'], {}), '(booleA[booleB])\n', (10074, 10090), True, 'import numpy as np\n'), ((10269, 10295), 'numpy.mean', 'np.mean', (['booleA[booleNotC]'], {}), '(booleA[booleNotC])\n', (10276, 10295), True, 'import numpy as np\n'), ((10434, 10460), 'numpy.mean', 'np.mean', (['booleB[booleNotC]'], {}), '(booleB[booleNotC])\n', (10441, 10460), True, 'import numpy as np\n'), ((10559, 10582), 'numpy.mean', 'np.mean', (['booleB[booleC]'], {}), '(booleB[booleC])\n', (10566, 10582), True, 'import numpy as np\n'), ((10717, 10743), 'numpy.mean', 'np.mean', (['booleNotC[booleB]'], {}), '(booleNotC[booleB])\n', (10724, 10743), True, 'import numpy as np\n'), ((10787, 10810), 'numpy.mean', 'np.mean', (['booleC[booleB]'], {}), '(booleC[booleB])\n', (10794, 10810), True, 'import numpy as np\n'), ((11178, 11205), 'numpy.mean', 'np.mean', (['booleA[booleBandC]'], {}), '(booleA[booleBandC])\n', (11185, 11205), True, 'import numpy as np\n'), ((11542, 11575), 'numpy.mean', 'np.mean', (['(booleA & booleB & booleC)'], {}), '(booleA & booleB & booleC)\n', (11549, 11575), True, 'import numpy as np\n'), ((11936, 12000), 'funProbCovTXRXDet.funProbCovTXRXDet', 'funProbCovTXRXDet', (['xx', 'yy', 'fun_h', 'fun_w', 'L', 'indexTrans', 'indexRec'], {}), '(xx, yy, fun_h, fun_w, L, indexTrans, indexRec)\n', (11953, 12000), False, 'from funProbCovTXRXDet import funProbCovTXRXDet\n'), ((4266, 4301), 'numpy.exp', 'np.exp', (['(-rrDiffSquared / sigma ** 2)'], {}), '(-rrDiffSquared / sigma ** 2)\n', (4272, 4301), True, 'import numpy as np\n'), ((5318, 5355), 'funSimSimpleDPP.funSimSimpleDPP', 'funSimSimpleDPP', (['eigenVecL', 'eigenValL'], {}), '(eigenVecL, eigenValL)\n', (5333, 5355), False, 'from funSimSimpleDPP import funSimSimpleDPP\n'), ((7699, 7729), 'numpy.hypot', 'np.hypot', (['(xx - xxRX)', '(yy - yyRX)'], {}), '(xx - xxRX, yy - yyRX)\n', (7707, 7729), True, 'import numpy as np\n'), ((8564, 8580), 'numpy.arange', 'np.arange', (['sizeK'], {}), '(sizeK)\n', (8573, 8580), True, 'import numpy as np\n'), ((8865, 8892), 'numpy.sqrt', 'np.sqrt', (['(1 - hMatrixReduced)'], {}), '(1 - hMatrixReduced)\n', (8872, 8892), True, 'import numpy as np\n'), ((9004, 9031), 'numpy.sqrt', 'np.sqrt', (['(1 - hMatrixReduced)'], {}), '(1 - hMatrixReduced)\n', (9011, 9031), True, 'import numpy as np\n'), ((12131, 12148), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (12145, 12148), True, 'import numpy as np\n'), ((12215, 12284), 'matplotlib.pyplot.plot', 'plt.plot', (['xx', 'yy', '"""ko"""'], {'markerfacecolor': '"""None"""', 'markersize': 'markerSize'}), "(xx, yy, 'ko', markerfacecolor='None', markersize=markerSize)\n", (12223, 12284), True, 'import matplotlib.pyplot as plt\n'), ((12337, 12465), 'matplotlib.pyplot.plot', 'plt.plot', (['xx[indexDPP]', 'yy[indexDPP]', '"""k."""'], {'markerfacecolor': 'vectorColor', 'markersize': '(1.1 * markerSize)', 'markeredgecolor': '"""none"""'}), "(xx[indexDPP], yy[indexDPP], 'k.', markerfacecolor=vectorColor,\n markersize=1.1 * markerSize, markeredgecolor='none')\n", (12345, 12465), True, 'import matplotlib.pyplot as plt\n'), ((12477, 12494), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (12485, 12494), True, 'import matplotlib.pyplot as plt\n'), ((12499, 12514), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (12507, 12514), True, 'import matplotlib.pyplot as plt\n'), ((12519, 12581), 'matplotlib.pyplot.legend', 'plt.legend', (["('Original point process', 'Determinantal subset')"], {}), "(('Original point process', 'Determinantal subset'))\n", (12529, 12581), True, 'import matplotlib.pyplot as plt\n'), ((3610, 3664), 'numpy.linspace', 'np.linspace', (['(0)', '((numbNodes - 1) / numbNodes)', 'numbNodes'], {}), '(0, (numbNodes - 1) / numbNodes, numbNodes)\n', (3621, 3664), True, 'import numpy as np\n'), ((4032, 4049), 'numpy.ones', 'np.ones', (['(sizeL,)'], {}), '((sizeL,))\n', (4039, 4049), True, 'import numpy as np\n'), ((4060, 4077), 'numpy.ones', 'np.ones', (['(sizeL,)'], {}), '((sizeL,))\n', (4067, 4077), True, 'import numpy as np\n'), ((4105, 4122), 'numpy.ones', 'np.ones', (['(sizeL,)'], {}), '((sizeL,))\n', (4112, 4122), True, 'import numpy as np\n'), ((4133, 4150), 'numpy.ones', 'np.ones', (['(sizeL,)'], {}), '((sizeL,))\n', (4140, 4150), True, 'import numpy as np\n'), ((5676, 5707), 'numpy.zeros', 'np.zeros', (['numbNodes'], {'dtype': 'bool'}), '(numbNodes, dtype=bool)\n', (5684, 5707), True, 'import numpy as np\n'), ((5966, 5984), 'numpy.sum', 'np.sum', (['booleInter'], {}), '(booleInter)\n', (5972, 5984), True, 'import numpy as np\n'), ((6051, 6093), 'numpy.random.exponential', 'np.random.exponential', (['muFading', 'numbInter'], {}), '(muFading, numbInter)\n', (6072, 6093), True, 'import numpy as np\n'), ((6128, 6168), 'numpy.hypot', 'np.hypot', (['(xxInter - xxRX)', '(yyInter - yyRX)'], {}), '(xxInter - xxRX, yyInter - yyRX)\n', (6136, 6168), True, 'import numpy as np\n'), ((6323, 6354), 'numpy.random.exponential', 'np.random.exponential', (['muFading'], {}), '(muFading)\n', (6344, 6354), True, 'import numpy as np\n'), ((6387, 6421), 'numpy.hypot', 'np.hypot', (['(xxTX - xxRX)', '(yyTX - yyRX)'], {}), '(xxTX - xxRX, yyTX - yyRX)\n', (6395, 6421), True, 'import numpy as np\n'), ((7177, 7194), 'numpy.ones', 'np.ones', (['(sizeK,)'], {}), '((sizeK,))\n', (7184, 7194), True, 'import numpy as np\n'), ((7205, 7222), 'numpy.ones', 'np.ones', (['(sizeK,)'], {}), '((sizeK,))\n', (7212, 7222), True, 'import numpy as np\n'), ((7256, 7273), 'numpy.ones', 'np.ones', (['(sizeK,)'], {}), '((sizeK,))\n', (7263, 7273), True, 'import numpy as np\n'), ((7284, 7301), 'numpy.ones', 'np.ones', (['(sizeK,)'], {}), '((sizeK,))\n', (7291, 7301), True, 'import numpy as np\n'), ((3443, 3468), 'numpy.random.rand', 'np.random.rand', (['numbNodes'], {}), '(numbNodes)\n', (3457, 3468), True, 'import numpy as np\n'), ((3492, 3517), 'numpy.random.rand', 'np.random.rand', (['numbNodes'], {}), '(numbNodes)\n', (3506, 3517), True, 'import numpy as np\n'), ((3673, 3690), 'numpy.cos', 'np.cos', (['(5 * t + 1)'], {}), '(5 * t + 1)\n', (3679, 3690), True, 'import numpy as np\n'), ((3702, 3719), 'numpy.sin', 'np.sin', (['(3 * t + 2)'], {}), '(3 * t + 2)\n', (3708, 3719), True, 'import numpy as np\n'), ((10002, 10019), 'numpy.eye', 'np.eye', (['(sizeK - 1)'], {}), '(sizeK - 1)\n', (10008, 10019), True, 'import numpy as np\n'), ((10200, 10217), 'numpy.eye', 'np.eye', (['(sizeK - 1)'], {}), '(sizeK - 1)\n', (10206, 10217), True, 'import numpy as np\n'), ((6563, 6584), 'numpy.sum', 'np.sum', (['proplossInter'], {}), '(proplossInter)\n', (6569, 6584), True, 'import numpy as np\n')] |
from setuptools import setup
with open("qondor/include/VERSION", "r") as f:
version = f.read().strip()
setup(
name="qondor",
version=version,
license="BSD 3-Clause License",
description="Description text",
url="https://github.com/tklijnsma/qondor.git",
author="<NAME>",
author_email="<EMAIL>",
packages=["qondor"],
zip_safe=False,
scripts=[
"bin/qondor-submit",
"bin/qondor-resubmit",
"bin/qondor-status",
"bin/qondor-make-cmssw-tarball",
"bin/qondor-version",
],
install_requires=["seutils"],
include_package_data=True,
)
| [
"setuptools.setup"
] | [((109, 548), 'setuptools.setup', 'setup', ([], {'name': '"""qondor"""', 'version': 'version', 'license': '"""BSD 3-Clause License"""', 'description': '"""Description text"""', 'url': '"""https://github.com/tklijnsma/qondor.git"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'packages': "['qondor']", 'zip_safe': '(False)', 'scripts': "['bin/qondor-submit', 'bin/qondor-resubmit', 'bin/qondor-status',\n 'bin/qondor-make-cmssw-tarball', 'bin/qondor-version']", 'install_requires': "['seutils']", 'include_package_data': '(True)'}), "(name='qondor', version=version, license='BSD 3-Clause License',\n description='Description text', url=\n 'https://github.com/tklijnsma/qondor.git', author='<NAME>',\n author_email='<EMAIL>', packages=['qondor'], zip_safe=False, scripts=[\n 'bin/qondor-submit', 'bin/qondor-resubmit', 'bin/qondor-status',\n 'bin/qondor-make-cmssw-tarball', 'bin/qondor-version'],\n install_requires=['seutils'], include_package_data=True)\n", (114, 548), False, 'from setuptools import setup\n')] |
"""
任务:
1、就是普通函数
2、该函数必须通过celery的实例对象的tasks装饰其装饰
3、该任务需要让celery实例对象自动检测
4、任务(函数)需要使用任务名(函数名).delay() 进行调用
"""
from libs.yuntongxun.sms import CCP
from celery_tasks.main import app
@app.task
def send_sms_code(mobile,sms_code):
ccp = CCP()
ccp.send_template_sms(mobile, [sms_code, 5], 1)
| [
"libs.yuntongxun.sms.CCP"
] | [((257, 262), 'libs.yuntongxun.sms.CCP', 'CCP', ([], {}), '()\n', (260, 262), False, 'from libs.yuntongxun.sms import CCP\n')] |
#!/usr/bin/python
#coding=utf-8
from simplified_scrapy.simplified_main import SimplifiedMain
SimplifiedMain.startThread()
| [
"simplified_scrapy.simplified_main.SimplifiedMain.startThread"
] | [((93, 121), 'simplified_scrapy.simplified_main.SimplifiedMain.startThread', 'SimplifiedMain.startThread', ([], {}), '()\n', (119, 121), False, 'from simplified_scrapy.simplified_main import SimplifiedMain\n')] |
#!/usr/bin/env python3
# -- coding: utf-8 --
import datetime
from dateutil import parser, tz
from lxml import etree
from flask import Flask, request, abort, make_response, Response
from flask_appconfig import AppConfig
import requests
def create_app(configfile=None):
app = Flask("delayrss")
AppConfig(app, configfile)
return app
app = create_app()
@app.route('/')
def delay_rss():
url = request.args.get("url", "")
timedelta = datetime.timedelta(
minutes=int(request.args.get("minutes", "0")),
hours=int(request.args.get("hours", "0")),
days=int(request.args.get("days", "0")),
weeks=int(request.args.get("weeks", "0"))
)
if not url:
abort(make_response("Missing url parameter.", 400))
if not timedelta:
abort(make_response("Missing a timedelta parameter (minutes, hours, days and/or weeks).", 400))
page = requests.get(request.args["url"])
root = etree.fromstring(page.content, base_url=url)
for article in root.xpath("//*[local-name() = 'item']"):
date = article.xpath("./*[local-name() = 'date']/text()") or article.xpath("./*[local-name() = 'pubDate']/text()")
pub_date = parser.parse(date[0])
if pub_date + timedelta >= datetime.datetime.now(tz.tzlocal()):
article.getparent().remove(article)
return Response(etree.tostring(root), mimetype='text/xml')
if __name__ == '__main__':
app.run(host=app.config.get("HOST", "127.0.0.1"), debug=app.config.get("DEBUG", False))
| [
"flask.request.args.get",
"dateutil.parser.parse",
"dateutil.tz.tzlocal",
"flask.Flask",
"requests.get",
"lxml.etree.fromstring",
"flask.make_response",
"flask_appconfig.AppConfig",
"lxml.etree.tostring"
] | [((282, 299), 'flask.Flask', 'Flask', (['"""delayrss"""'], {}), "('delayrss')\n", (287, 299), False, 'from flask import Flask, request, abort, make_response, Response\n'), ((304, 330), 'flask_appconfig.AppConfig', 'AppConfig', (['app', 'configfile'], {}), '(app, configfile)\n', (313, 330), False, 'from flask_appconfig import AppConfig\n'), ((411, 438), 'flask.request.args.get', 'request.args.get', (['"""url"""', '""""""'], {}), "('url', '')\n", (427, 438), False, 'from flask import Flask, request, abort, make_response, Response\n'), ((900, 933), 'requests.get', 'requests.get', (["request.args['url']"], {}), "(request.args['url'])\n", (912, 933), False, 'import requests\n'), ((945, 989), 'lxml.etree.fromstring', 'etree.fromstring', (['page.content'], {'base_url': 'url'}), '(page.content, base_url=url)\n', (961, 989), False, 'from lxml import etree\n'), ((1194, 1215), 'dateutil.parser.parse', 'parser.parse', (['date[0]'], {}), '(date[0])\n', (1206, 1215), False, 'from dateutil import parser, tz\n'), ((1357, 1377), 'lxml.etree.tostring', 'etree.tostring', (['root'], {}), '(root)\n', (1371, 1377), False, 'from lxml import etree\n'), ((716, 760), 'flask.make_response', 'make_response', (['"""Missing url parameter."""', '(400)'], {}), "('Missing url parameter.', 400)\n", (729, 760), False, 'from flask import Flask, request, abort, make_response, Response\n'), ((798, 891), 'flask.make_response', 'make_response', (['"""Missing a timedelta parameter (minutes, hours, days and/or weeks)."""', '(400)'], {}), "(\n 'Missing a timedelta parameter (minutes, hours, days and/or weeks).', 400)\n", (811, 891), False, 'from flask import Flask, request, abort, make_response, Response\n'), ((495, 527), 'flask.request.args.get', 'request.args.get', (['"""minutes"""', '"""0"""'], {}), "('minutes', '0')\n", (511, 527), False, 'from flask import Flask, request, abort, make_response, Response\n'), ((548, 578), 'flask.request.args.get', 'request.args.get', (['"""hours"""', '"""0"""'], {}), "('hours', '0')\n", (564, 578), False, 'from flask import Flask, request, abort, make_response, Response\n'), ((598, 627), 'flask.request.args.get', 'request.args.get', (['"""days"""', '"""0"""'], {}), "('days', '0')\n", (614, 627), False, 'from flask import Flask, request, abort, make_response, Response\n'), ((648, 678), 'flask.request.args.get', 'request.args.get', (['"""weeks"""', '"""0"""'], {}), "('weeks', '0')\n", (664, 678), False, 'from flask import Flask, request, abort, make_response, Response\n'), ((1273, 1285), 'dateutil.tz.tzlocal', 'tz.tzlocal', ([], {}), '()\n', (1283, 1285), False, 'from dateutil import parser, tz\n')] |
import unittest
from django.core.exceptions import ValidationError
from petstagram.common.validators import MaxFileSizeInMbValidator
class FakeFile:
size = 5
class FakeImage:
file = FakeFile()
class MaxFileSizeInMbValidatorTests(unittest.TestCase):
def test_when_file_is_bigger__expect_to_raise(self):
validator = MaxFileSizeInMbValidator(0.000001)
file = FakeImage()
with self.assertRaises(ValidationError) as context:
validator(file)
self.assertIsNotNone(context.exception)
def test_when_file_size_is_valid__expect_to_do_nothing(self):
validator = MaxFileSizeInMbValidator(1)
file = FakeImage()
validator(file)
| [
"petstagram.common.validators.MaxFileSizeInMbValidator"
] | [((341, 372), 'petstagram.common.validators.MaxFileSizeInMbValidator', 'MaxFileSizeInMbValidator', (['(1e-06)'], {}), '(1e-06)\n', (365, 372), False, 'from petstagram.common.validators import MaxFileSizeInMbValidator\n'), ((629, 656), 'petstagram.common.validators.MaxFileSizeInMbValidator', 'MaxFileSizeInMbValidator', (['(1)'], {}), '(1)\n', (653, 656), False, 'from petstagram.common.validators import MaxFileSizeInMbValidator\n')] |
import os
import csv
import sys
from sklearn.model_selection import train_test_split
sys.path.append("..")
from training_config import RANDOM_SEED, ALLOWED_CLASSES, DATA_DIR
def stratified_split(X, y, test_size=0.2, validate_size=0.2, random_state=42):
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=test_size,
random_state=random_state)
new_validate_size = validate_size / (1 - test_size)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, stratify=y_train, test_size=new_validate_size,
random_state=random_state)
return X_train, X_test, X_val, y_train, y_test, y_val
def populate(X, Z, y):
y_mod = []
X_list = []
for i, key in enumerate(Z):
for file_name in X[key]:
X_list.append(file_name)
y_mod.append(y[i])
return X_list, y_mod
def create_dataset(X, y, file_name):
with open(os.path.join(DATA_DIR, file_name), 'w', newline='') as csvfile:
dataset_writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for i in range(len(X)):
dataset_writer.writerow((X[i], y[i]))
if __name__ == "__main__":
X = {}
y = []
Z = []
for class_name in ALLOWED_CLASSES:
class_path = os.path.join(DATA_DIR, class_name)
files = []
for file_name in os.listdir(class_path):
if not file_name.endswith(".pkl"):
continue
files.append(file_name)
splitted_terms = file_name.split("_")
patient_id = splitted_terms[0]
value = class_name + "_" + patient_id
if value not in X.keys():
X[value] = []
y.append(class_name)
Z.append(value)
X[value].append(os.path.join(class_path, file_name))
Z_train, Z_test, Z_val, y_train, y_test, y_val = stratified_split(Z, y, test_size=0.2, validate_size=0.2,
random_state=RANDOM_SEED)
X_train, y_train = populate(X, Z_train, y_train)
X_test, y_test = populate(X, Z_test, y_test)
X_val, y_val = populate(X, Z_val, y_val)
print("Train size: {}".format(len(X_train)))
print("Test size: {}".format(len(X_test)))
print("Val size: {}".format(len(X_val)))
create_dataset(X_train, y_train, 'train.csv')
create_dataset(X_test, y_test, 'test.csv')
create_dataset(X_val, y_val, 'val.csv')
| [
"os.listdir",
"sklearn.model_selection.train_test_split",
"csv.writer",
"os.path.join",
"sys.path.append"
] | [((85, 106), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (100, 106), False, 'import sys\n'), ((294, 381), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'stratify': 'y', 'test_size': 'test_size', 'random_state': 'random_state'}), '(X, y, stratify=y, test_size=test_size, random_state=\n random_state)\n', (310, 381), False, 'from sklearn.model_selection import train_test_split\n'), ((526, 639), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'y_train'], {'stratify': 'y_train', 'test_size': 'new_validate_size', 'random_state': 'random_state'}), '(X_train, y_train, stratify=y_train, test_size=\n new_validate_size, random_state=random_state)\n', (542, 639), False, 'from sklearn.model_selection import train_test_split\n'), ((1104, 1180), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""|"""', 'quoting': 'csv.QUOTE_MINIMAL'}), "(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n", (1114, 1180), False, 'import csv\n'), ((1385, 1419), 'os.path.join', 'os.path.join', (['DATA_DIR', 'class_name'], {}), '(DATA_DIR, class_name)\n', (1397, 1419), False, 'import os\n'), ((1464, 1486), 'os.listdir', 'os.listdir', (['class_path'], {}), '(class_path)\n', (1474, 1486), False, 'import os\n'), ((1015, 1048), 'os.path.join', 'os.path.join', (['DATA_DIR', 'file_name'], {}), '(DATA_DIR, file_name)\n', (1027, 1048), False, 'import os\n'), ((1907, 1942), 'os.path.join', 'os.path.join', (['class_path', 'file_name'], {}), '(class_path, file_name)\n', (1919, 1942), False, 'import os\n')] |
import os
def delete_log():
if os.path.exists('pyfibre.log'):
os.remove('pyfibre.log')
| [
"os.path.exists",
"os.remove"
] | [((37, 66), 'os.path.exists', 'os.path.exists', (['"""pyfibre.log"""'], {}), "('pyfibre.log')\n", (51, 66), False, 'import os\n'), ((76, 100), 'os.remove', 'os.remove', (['"""pyfibre.log"""'], {}), "('pyfibre.log')\n", (85, 100), False, 'import os\n')] |
import requests
def get_programming_joke() -> str:
joke = "// This line doesn't actually do anything, but the code stops working when I delete it."
response = requests.get(
"https://sv443.net/jokeapi/v2/joke/Programming?format=txt&type=single")
if response.status_code == 200:
joke = response.text
return joke
| [
"requests.get"
] | [((169, 258), 'requests.get', 'requests.get', (['"""https://sv443.net/jokeapi/v2/joke/Programming?format=txt&type=single"""'], {}), "(\n 'https://sv443.net/jokeapi/v2/joke/Programming?format=txt&type=single')\n", (181, 258), False, 'import requests\n')] |
from copy import deepcopy
import pytest
from catenets.datasets import load
from catenets.experiment_utils.tester import evaluate_treatments_model
from catenets.models.jax import FLEXTE_NAME, OFFSET_NAME, FlexTENet, OffsetNet
LAYERS_OUT = 2
LAYERS_R = 3
PENALTY_L2 = 0.01 / 100
PENALTY_ORTHOGONAL_IHDP = 0
MODEL_PARAMS = {
"n_layers_out": LAYERS_OUT,
"n_layers_r": LAYERS_R,
"penalty_l2": PENALTY_L2,
"penalty_orthogonal": PENALTY_ORTHOGONAL_IHDP,
"n_layers_out_t": LAYERS_OUT,
"n_layers_r_t": LAYERS_R,
"penalty_l2_t": PENALTY_L2,
}
PARAMS_DEPTH: dict = {"n_layers_r": 2, "n_layers_out": 2}
PARAMS_DEPTH_2: dict = {
"n_layers_r": 2,
"n_layers_out": 2,
"n_layers_r_t": 2,
"n_layers_out_t": 2,
}
PENALTY_DIFF = 0.01
PENALTY_ORTHOGONAL = 0.1
ALL_MODELS = {
OFFSET_NAME: OffsetNet(penalty_l2_p=PENALTY_DIFF, **PARAMS_DEPTH),
FLEXTE_NAME: FlexTENet(
penalty_orthogonal=PENALTY_ORTHOGONAL, penalty_l2_p=PENALTY_DIFF, **PARAMS_DEPTH
),
}
models = list(ALL_MODELS.keys())
@pytest.mark.slow
@pytest.mark.parametrize("dataset, pehe_threshold", [("twins", 0.4), ("ihdp", 3)])
@pytest.mark.parametrize("model_name", models)
def test_model_sanity(dataset: str, pehe_threshold: float, model_name: str) -> None:
model = deepcopy(ALL_MODELS[model_name])
X_train, W_train, Y_train, Y_train_full, X_test, Y_test = load(dataset)
score = evaluate_treatments_model(model, X_train, Y_train, Y_train_full, W_train)
print(f"Evaluation for model jax.{model_name} on {dataset} = {score['str']}")
assert score["raw"]["pehe"][0] < pehe_threshold
def test_model_score() -> None:
model = OffsetNet()
X_train, W_train, Y_train, Y_train_full, X_test, Y_test = load("ihdp")
model.fit(X_train[:10], Y_train[:10], W_train[:10])
result = model.score(X_test, Y_test)
assert result > 0
with pytest.raises(ValueError):
model.score(X_train, Y_train) # Y_train has just one outcome
| [
"catenets.models.jax.OffsetNet",
"pytest.mark.parametrize",
"catenets.models.jax.FlexTENet",
"catenets.experiment_utils.tester.evaluate_treatments_model",
"pytest.raises",
"copy.deepcopy",
"catenets.datasets.load"
] | [((1055, 1140), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dataset, pehe_threshold"""', "[('twins', 0.4), ('ihdp', 3)]"], {}), "('dataset, pehe_threshold', [('twins', 0.4), ('ihdp',\n 3)])\n", (1078, 1140), False, 'import pytest\n'), ((1138, 1183), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""model_name"""', 'models'], {}), "('model_name', models)\n", (1161, 1183), False, 'import pytest\n'), ((820, 872), 'catenets.models.jax.OffsetNet', 'OffsetNet', ([], {'penalty_l2_p': 'PENALTY_DIFF'}), '(penalty_l2_p=PENALTY_DIFF, **PARAMS_DEPTH)\n', (829, 872), False, 'from catenets.models.jax import FLEXTE_NAME, OFFSET_NAME, FlexTENet, OffsetNet\n'), ((891, 986), 'catenets.models.jax.FlexTENet', 'FlexTENet', ([], {'penalty_orthogonal': 'PENALTY_ORTHOGONAL', 'penalty_l2_p': 'PENALTY_DIFF'}), '(penalty_orthogonal=PENALTY_ORTHOGONAL, penalty_l2_p=PENALTY_DIFF,\n **PARAMS_DEPTH)\n', (900, 986), False, 'from catenets.models.jax import FLEXTE_NAME, OFFSET_NAME, FlexTENet, OffsetNet\n'), ((1281, 1313), 'copy.deepcopy', 'deepcopy', (['ALL_MODELS[model_name]'], {}), '(ALL_MODELS[model_name])\n', (1289, 1313), False, 'from copy import deepcopy\n'), ((1377, 1390), 'catenets.datasets.load', 'load', (['dataset'], {}), '(dataset)\n', (1381, 1390), False, 'from catenets.datasets import load\n'), ((1404, 1477), 'catenets.experiment_utils.tester.evaluate_treatments_model', 'evaluate_treatments_model', (['model', 'X_train', 'Y_train', 'Y_train_full', 'W_train'], {}), '(model, X_train, Y_train, Y_train_full, W_train)\n', (1429, 1477), False, 'from catenets.experiment_utils.tester import evaluate_treatments_model\n'), ((1658, 1669), 'catenets.models.jax.OffsetNet', 'OffsetNet', ([], {}), '()\n', (1667, 1669), False, 'from catenets.models.jax import FLEXTE_NAME, OFFSET_NAME, FlexTENet, OffsetNet\n'), ((1733, 1745), 'catenets.datasets.load', 'load', (['"""ihdp"""'], {}), "('ihdp')\n", (1737, 1745), False, 'from catenets.datasets import load\n'), ((1878, 1903), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1891, 1903), False, 'import pytest\n')] |
# -*- coding: UTF-8 -*-
"""TK版文件下载
技术要点:1 自定义事件;2 UI线程和子线程数据通信
"""
from Tkinter import *
import sys,os
import urllib
import threading
import Queue
import tkMessageBox
class Event(object):
REFLASH = '<<Reflash>>'
class MWindow(Frame):
def __init__(self):
Frame.__init__(self)
self.master.title('Download Demo')
self.master.geometry('500x400')
self.master.resizable(False, False)
self.grid()
#下载地址标签、输入框
self.download_url = StringVar()
Label(self, text='下载地址').grid(row=0, column=0, columnspan=3, sticky = W+E+N+S)
Entry(self, textvariable=self.download_url).grid(row=0,column=3, columnspan=7, sticky = W+E+N+S)
#另存为标签、输入框
Label(self, text='另存为').grid(row=1, column=0, columnspan=3, sticky = W+E+N+S)
self.file_name = StringVar()
Entry(self, textvariable=self.file_name).grid(row=1, column=3, columnspan=7, sticky = W+E+N+S)
#进度条
self.scale=Scale(self, from_=0,to=100,orient=HORIZONTAL)
self.scale.set(0)
self.scale.grid(row=2, column=0, columnspan=10, sticky = W+E+N+S)
#按钮
Button(self, text='下载', command=self.download).grid(row=3, column=3, columnspan=4, sticky = W+E+N+S)
self.bind(Event.REFLASH,self.on_processing)
def on_processing(self,event):
self.scale.set(cq.get())
def download(self):
url = self.download_url.get()
fileName = self.file_name.get()
if not url or not fileName:
tkMessageBox.showerror('错误','请填写完整')
return 0
self.downThread = threading.Thread(target=downloadTask,args=(url,fileName))
self.downThread.start()
mw = MWindow()
cq = Queue.Queue()
def downloadTask(url,file_name):
urllib.urlretrieve(url, fileName, file_name)
def putPercent(downloaded, data_size,file_size):
"""
downloaded,已下载的数据块
data_size,数据块的大小
file_size,远程文件的大小
"""
perc = 100.0 * downloaded * data_size/file_size
if 100 < perc:
perc = 100
cq.put(perc)
try:
mw.event_generate(Event.REFLASH, when='tail')
except TclError:
pass
def main():
mw.mainloop()
if __name__ == '__main__':
main()
| [
"urllib.urlretrieve",
"threading.Thread",
"Queue.Queue",
"tkMessageBox.showerror"
] | [((1747, 1760), 'Queue.Queue', 'Queue.Queue', ([], {}), '()\n', (1758, 1760), False, 'import Queue\n'), ((1802, 1846), 'urllib.urlretrieve', 'urllib.urlretrieve', (['url', 'fileName', 'file_name'], {}), '(url, fileName, file_name)\n', (1820, 1846), False, 'import urllib\n'), ((1628, 1687), 'threading.Thread', 'threading.Thread', ([], {'target': 'downloadTask', 'args': '(url, fileName)'}), '(target=downloadTask, args=(url, fileName))\n', (1644, 1687), False, 'import threading\n'), ((1544, 1581), 'tkMessageBox.showerror', 'tkMessageBox.showerror', (['"""错误"""', '"""请填写完整"""'], {}), "('错误', '请填写完整')\n", (1566, 1581), False, 'import tkMessageBox\n')] |
from flask import Flask, render_template, redirect
import requests
import json
app: Flask = Flask( __name__ )
@app.route( "/" )
def index():
cotacao = requests.get("https://economia.awesomeapi.com.br/last/USD-BRL,EUR-BRL,BTC-BRL")
cotacao = cotacao.json()
cotacao_bit = cotacao['BTCBRL']['bid']
cotacao_euro = cotacao['EURBRL']['bid']
cotacao_real = cotacao['USDBRL']['bid']
return render_template('index.html', bit=cotacao_bit, euro=cotacao_euro,
real=cotacao_real)
if __name__ != "__name__":
app.run()
| [
"flask.render_template",
"requests.get",
"flask.Flask"
] | [((93, 108), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (98, 108), False, 'from flask import Flask, render_template, redirect\n'), ((158, 237), 'requests.get', 'requests.get', (['"""https://economia.awesomeapi.com.br/last/USD-BRL,EUR-BRL,BTC-BRL"""'], {}), "('https://economia.awesomeapi.com.br/last/USD-BRL,EUR-BRL,BTC-BRL')\n", (170, 237), False, 'import requests\n'), ((410, 499), 'flask.render_template', 'render_template', (['"""index.html"""'], {'bit': 'cotacao_bit', 'euro': 'cotacao_euro', 'real': 'cotacao_real'}), "('index.html', bit=cotacao_bit, euro=cotacao_euro, real=\n cotacao_real)\n", (425, 499), False, 'from flask import Flask, render_template, redirect\n')] |
import cv2
class VideoStream:
def __init__(self, path='/dev/video0', size=(640, 480)):
self.path = path
self.size = size
self.cap = self.capture_stream()
def __del__(self):
cap = getattr(self, 'cap', None)
if cap is not None:
cap.release()
def capture_stream(self):
cap = cv2.VideoCapture(self.path)
if self.path.startswith('/dev/'):
width, height = self.size
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
return cap
def read(self):
success, frame = self.cap.read()
if not success:
return
return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
@staticmethod
def draw_box(frame, box, color=(0, 255, 0)):
xmin, ymin, xmax, ymax = box
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, thickness=2)
@staticmethod
def draw_text(frame, text, anchor=None, color=(0, 255, 0)):
if anchor is None:
height = frame.shape[0]
anchor = (5, height - 5)
cv2.putText(frame, text, anchor, cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1, color=color, thickness=2)
@staticmethod
def show(frame, title=None, size=None):
if frame is None:
return
if size is not None:
frame = cv2.resize(frame, size)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
cv2.namedWindow(title, cv2.WINDOW_NORMAL)
cv2.imshow(title, frame)
@staticmethod
def save(frame, filename):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
cv2.imwrite(filename, frame)
@staticmethod
def wait_key(timeout=1):
return chr(cv2.waitKey(timeout) & 0xFF)
@staticmethod
def close_windows():
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"cv2.imwrite",
"cv2.putText",
"cv2.imshow",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.VideoWriter_fourcc",
"cv2.resize",
"cv2.waitKey",
"cv2.namedWindow"
] | [((350, 377), 'cv2.VideoCapture', 'cv2.VideoCapture', (['self.path'], {}), '(self.path)\n', (366, 377), False, 'import cv2\n'), ((831, 869), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (843, 869), False, 'import cv2\n'), ((983, 1051), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(xmin, ymin)', '(xmax, ymax)', 'color'], {'thickness': '(2)'}), '(frame, (xmin, ymin), (xmax, ymax), color, thickness=2)\n', (996, 1051), False, 'import cv2\n'), ((1245, 1346), 'cv2.putText', 'cv2.putText', (['frame', 'text', 'anchor', 'cv2.FONT_HERSHEY_SIMPLEX'], {'fontScale': '(1)', 'color': 'color', 'thickness': '(2)'}), '(frame, text, anchor, cv2.FONT_HERSHEY_SIMPLEX, fontScale=1,\n color=color, thickness=2)\n', (1256, 1346), False, 'import cv2\n'), ((1563, 1601), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_RGB2BGR'], {}), '(frame, cv2.COLOR_RGB2BGR)\n', (1575, 1601), False, 'import cv2\n'), ((1610, 1651), 'cv2.namedWindow', 'cv2.namedWindow', (['title', 'cv2.WINDOW_NORMAL'], {}), '(title, cv2.WINDOW_NORMAL)\n', (1625, 1651), False, 'import cv2\n'), ((1660, 1684), 'cv2.imshow', 'cv2.imshow', (['title', 'frame'], {}), '(title, frame)\n', (1670, 1684), False, 'import cv2\n'), ((1751, 1789), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_RGB2BGR'], {}), '(frame, cv2.COLOR_RGB2BGR)\n', (1763, 1789), False, 'import cv2\n'), ((1798, 1826), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'frame'], {}), '(filename, frame)\n', (1809, 1826), False, 'import cv2\n'), ((1975, 1998), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1996, 1998), False, 'import cv2\n'), ((1522, 1545), 'cv2.resize', 'cv2.resize', (['frame', 'size'], {}), '(frame, size)\n', (1532, 1545), False, 'import cv2\n'), ((608, 639), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MJPG'"], {}), "(*'MJPG')\n", (630, 639), False, 'import cv2\n'), ((1894, 1914), 'cv2.waitKey', 'cv2.waitKey', (['timeout'], {}), '(timeout)\n', (1905, 1914), False, 'import cv2\n')] |
from django.contrib import admin
from ..abstract.admin import AbstractObservation, ObservableObjectAdmin
from .models import (
Asteroid, AsteroidObservation,
Comet, CometObservation,
Planet, PlanetObservation,
MoonObservation,
MeteorShower,
)
class AsteroidObservationAdmin(AbstractObservation):
model = AsteroidObservation
class CometObservationAdmin(AbstractObservation):
model = CometObservation
class PlanetObservationAdmin(AbstractObservation):
model = PlanetObservation
class MoonObservationAdmin(AbstractObservation):
model = MoonObservation
class MeteorShowerAdmin(admin.ModelAdmin):
model = MeteorShower
list_display = ['pk', 'name', 'radiant_ra', 'radiant_dec', 'start_date', 'peak_date', 'end_date', 'zhr']
search_fields = ['name']
fieldsets = (
(None, {
'fields': [
('name', 'slug'),
('start_date', 'peak_date', 'end_date'),
('radiant_ra', 'radiant_dec', 'longitude'),
('speed', 'zhr', 'parent_body'),
'notes',
]
}),
)
class PlanetAdmin(ObservableObjectAdmin):
model = Planet
list_display = [
'pk', 'name', 'diameter', 'load', 'moon_list', 'n_obs', 'obs_date'
]
list_display_links = ['pk', 'name']
readonly_fields = ['moon_list',]
inlines = [PlanetObservationAdmin]
save_on_top = True
class AsteroidAdmin(ObservableObjectAdmin):
model = Asteroid
list_display = ['number', 'name', 'diameter', 'est_brightest', 'h', 'n_obs', 'obs_date']
list_display_links = ['number', 'name']
inlines = [AsteroidObservationAdmin]
save_on_top = True
class CometAdmin(ObservableObjectAdmin):
model = Comet
list_display = ['pk', 'name', 'status', 'n_obs', 'obs_date']
list_display_links = ['pk', 'name']
inlines = [CometObservationAdmin]
save_on_top = True
admin.site.register(MeteorShower, MeteorShowerAdmin)
admin.site.register(Planet, PlanetAdmin)
admin.site.register(Asteroid, AsteroidAdmin)
admin.site.register(Comet, CometAdmin)
admin.site.register(MoonObservation) | [
"django.contrib.admin.site.register"
] | [((1916, 1968), 'django.contrib.admin.site.register', 'admin.site.register', (['MeteorShower', 'MeteorShowerAdmin'], {}), '(MeteorShower, MeteorShowerAdmin)\n', (1935, 1968), False, 'from django.contrib import admin\n'), ((1969, 2009), 'django.contrib.admin.site.register', 'admin.site.register', (['Planet', 'PlanetAdmin'], {}), '(Planet, PlanetAdmin)\n', (1988, 2009), False, 'from django.contrib import admin\n'), ((2010, 2054), 'django.contrib.admin.site.register', 'admin.site.register', (['Asteroid', 'AsteroidAdmin'], {}), '(Asteroid, AsteroidAdmin)\n', (2029, 2054), False, 'from django.contrib import admin\n'), ((2055, 2093), 'django.contrib.admin.site.register', 'admin.site.register', (['Comet', 'CometAdmin'], {}), '(Comet, CometAdmin)\n', (2074, 2093), False, 'from django.contrib import admin\n'), ((2094, 2130), 'django.contrib.admin.site.register', 'admin.site.register', (['MoonObservation'], {}), '(MoonObservation)\n', (2113, 2130), False, 'from django.contrib import admin\n')] |
# Copyright (C) 2013-2020, <NAME>
# and ftputil contributors (see `doc/contributors.txt`)
# See the file LICENSE for licensing terms.
"""
tool.py - helper code
"""
import os
__all__ = ["same_string_type_as", "as_str", "as_str_path"]
# Encoding to convert between byte string and unicode string. This is
# a "lossless" encoding: Strings can be encoded/decoded back and forth
# without information loss or causing encoding-related errors. The
# `ftplib` module under Python 3 also uses the "latin1" encoding
# internally. It's important to use the same encoding here, so that users who
# used `ftplib` to create FTP items with non-ASCII characters can access them
# in the same way with ftputil.
LOSSLESS_ENCODING = "latin1"
def same_string_type_as(type_source, string):
"""
Return a string of the same type as `type_source` with the content from
`string`.
If the `type_source` and `string` don't have the same type, use
`LOSSLESS_ENCODING` above to encode or decode, whatever operation is needed.
"""
if isinstance(type_source, bytes) and isinstance(string, str):
return string.encode(LOSSLESS_ENCODING)
elif isinstance(type_source, str) and isinstance(string, bytes):
return string.decode(LOSSLESS_ENCODING)
else:
return string
def as_str(string):
"""
Return the argument `string` converted to a unicode string if it's
a `bytes` object. Otherwise just return the string.
If `string` is neither `str` nor `bytes`, raise a `TypeError`.
"""
if isinstance(string, bytes):
return string.decode(LOSSLESS_ENCODING)
elif isinstance(string, str):
return string
else:
raise TypeError("`as_str` argument must be `bytes` or `str`")
def as_str_path(path):
"""
Return the argument `path` converted to a unicode string if it's
a `bytes` object. Otherwise just return the string.
Instead of passing a `bytes` or `str` object for `path`, you can
pass a `PathLike` object that can be converted to a `bytes` or
`str` object.
If the `path` can't be converted to a `bytes` or `str`, a `TypeError`
is raised.
"""
path = os.fspath(path)
return as_str(path)
| [
"os.fspath"
] | [((2170, 2185), 'os.fspath', 'os.fspath', (['path'], {}), '(path)\n', (2179, 2185), False, 'import os\n')] |
# Generated by Django 3.2.7 on 2022-01-06 03:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('data', '0070_taibifcode'),
]
operations = [
migrations.RenameField(
model_name='taibifcode',
old_name='code_id',
new_name='objid',
),
]
| [
"django.db.migrations.RenameField"
] | [((216, 305), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""taibifcode"""', 'old_name': '"""code_id"""', 'new_name': '"""objid"""'}), "(model_name='taibifcode', old_name='code_id',\n new_name='objid')\n", (238, 305), False, 'from django.db import migrations\n')] |
# Copyright (C) 2022 National Center for Atmospheric Research and National Oceanic and Atmospheric Administration
# SPDX-License-Identifier: Apache-2.0
#
import numpy as np
from pandas.api.types import is_float_dtype
def write_ncf(dset, output_name, title=''):
"""Function to write netcdf4 files with some compression for floats
Parameters
----------
dset : type
Description of parameter `dset`.
output_name : type
Description of parameter `output_name`.
Returns
-------
type
Description of returned object.
"""
import pandas as pd
print('Writing:', output_name)
comp = dict(zlib=True, complevel=7)
encoding = {}
for i in dset.data_vars.keys():
if is_float_dtype(dset[i]): # (dset[i].dtype != 'object') & (i != 'time') & (i != 'time_local') :
print("Compressing: {}, original_dtype: {}".format(i, dset[i].dtype))
dset[i] = compress_variable(dset[i])
encoding[i] = comp
dset.attrs['title'] = title
dset.attrs['format'] = 'NetCDF-4'
dset.attrs['date_created'] = pd.to_datetime('today').strftime('%Y-%m-%d')
dset.to_netcdf(output_name, encoding=encoding)
def compute_scale_and_offset(mn, mx, n, dtype=np.float32):
"""Calculates the scale and offset to be used for a variable
Parameters
----------
mn : float
minimum value.
mx : float
maximum value.
n : number of bits
default is 32bit.
dtype : numpy dtype
default is numpy.float32.
Returns
-------
type
Description of returned object.
"""
"""
min is the minimum of the values
max is the maximum of the values
n is the integer bit length (ie 32 for np.int32 or 16 for np.int16)
"""
# stretch/compress data to the available packed range
scale_factor = (mx - mn) / (2 ** n - 1)
# translate the range to be symmetric about zero
add_offset = mn + 2 ** (n - 1) * scale_factor
return (scale_factor.astype(dtype), add_offset.astype(dtype))
def pack_value(values, scale_factor, offset, dtype):
"""Values to pack the array with scale factors from a float to integers
Parameters
----------
values : type
Description of parameter `values`.
scale_factor : type
Description of parameter `scale_factor`.
offset : type
Description of parameter `offset`.
dtype : type
Description of parameter `dtype`.
Returns
-------
type
Description of returned object.
"""
return ((values - offset) / scale_factor).astype(dtype)
def get_min_max(da):
"""Function to return the maximum and minimum value
Parameters
----------
da : type
Description of parameter `da`.
Returns
-------
type
Description of returned object.
"""
return (da.min().compute(), da.max().compute())
def compress_variable(da):
"""Function to compress a variable from a float to integer and adds netcdf attributes for CF convention.
Parameters
----------
da : type
Description of parameter `da`.
Returns
-------
type
Description of returned object.
"""
da = da.fillna(-1)
mn, mx = get_min_max(da)
scale_factor, offset = compute_scale_and_offset(mn, mx, 32, dtype=da.dtype)
da.data = pack_value(da, scale_factor, offset, dtype=np.int32).data
da.attrs['scale_factor'] = scale_factor.values
da.attrs['add_offset'] = offset.values
da.attrs['_FillValue'] = -1
da.attrs['missing_value'] = -1
return da
| [
"pandas.to_datetime",
"pandas.api.types.is_float_dtype"
] | [((742, 765), 'pandas.api.types.is_float_dtype', 'is_float_dtype', (['dset[i]'], {}), '(dset[i])\n', (756, 765), False, 'from pandas.api.types import is_float_dtype\n'), ((1099, 1122), 'pandas.to_datetime', 'pd.to_datetime', (['"""today"""'], {}), "('today')\n", (1113, 1122), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('shorter', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='urlslug',
name='is_used',
field=models.BooleanField(default=False, help_text='checked if was used as slug', db_index=True, verbose_name='Used'),
),
]
| [
"django.db.models.BooleanField"
] | [((343, 458), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""checked if was used as slug"""', 'db_index': '(True)', 'verbose_name': '"""Used"""'}), "(default=False, help_text='checked if was used as slug',\n db_index=True, verbose_name='Used')\n", (362, 458), False, 'from django.db import models, migrations\n')] |
""" Tests for functions in imaging module
Run at the project directory with:
nosetests code/utils/tests/test_imaging.py
"""
# Loading modules.
from __future__ import absolute_import, division, print_function
import numpy as np
import nibabel as nib
import os
import sys
from numpy.testing import assert_almost_equal, assert_array_equal, assert_equal
# Add path to functions to the system path.
sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/"))
# Load our visualization functions.
from Image_Visualizing import present_3d, make_mask,present_3d_options
# all tests of present are looking at the output sizes of the 2d arrays
def test_present():
# Read in the image data.
data = np.arange(100000)
data = data.reshape((100,100,10))
full=present_3d(data)
assert full.shape == (400,300)
def test_present_options_2():
data = np.arange(100000)
data = data.reshape((100,100,10))
full=present_3d_options(data,axis=2)
first=np.ceil(np.sqrt(10))
second=np.ceil(10/first)
assert full.shape == (100*first,100*second)
def test_present_options_1():
data = np.arange(100000)
data = data.reshape((100,100,10))
full=present_3d_options(data,axis=1)
assert full.shape == (10*10,100*10)
def test_present_options_0():
data = np.arange(100000)
data = data.reshape((100,100,10))
full=present_3d_options(data,axis=0)
assert full.shape == (10*10,100*10)
def test_mask():
# example from http://www.jarrodmillman.com/rcsds/lectures/glm_intro.html
# it should be pointed out that hypothesis just looks at simple linear regression
data = np.arange(1000000)
data = data.reshape((100,100,100))
mask1 = np.ones((100,100,100))
mask2 = np.zeros((100,100,100))
mask3 = np.ones((200,200,100))
assert_equal(make_mask(data, mask1), data)
assert_equal(make_mask(data,mask2), mask2)
assert_equal(make_mask(data,mask3,fit=True).shape, data.shape)
x= False
try:
make_mask(data,mask3,fit=False)
except ValueError:
x=True
assert(x==True)
| [
"Image_Visualizing.present_3d",
"numpy.ceil",
"Image_Visualizing.make_mask",
"numpy.sqrt",
"numpy.ones",
"os.path.dirname",
"numpy.zeros",
"Image_Visualizing.present_3d_options",
"numpy.arange"
] | [((717, 734), 'numpy.arange', 'np.arange', (['(100000)'], {}), '(100000)\n', (726, 734), True, 'import numpy as np\n'), ((787, 803), 'Image_Visualizing.present_3d', 'present_3d', (['data'], {}), '(data)\n', (797, 803), False, 'from Image_Visualizing import present_3d, make_mask, present_3d_options\n'), ((886, 903), 'numpy.arange', 'np.arange', (['(100000)'], {}), '(100000)\n', (895, 903), True, 'import numpy as np\n'), ((956, 988), 'Image_Visualizing.present_3d_options', 'present_3d_options', (['data'], {'axis': '(2)'}), '(data, axis=2)\n', (974, 988), False, 'from Image_Visualizing import present_3d, make_mask, present_3d_options\n'), ((1035, 1054), 'numpy.ceil', 'np.ceil', (['(10 / first)'], {}), '(10 / first)\n', (1042, 1054), True, 'import numpy as np\n'), ((1144, 1161), 'numpy.arange', 'np.arange', (['(100000)'], {}), '(100000)\n', (1153, 1161), True, 'import numpy as np\n'), ((1214, 1246), 'Image_Visualizing.present_3d_options', 'present_3d_options', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (1232, 1246), False, 'from Image_Visualizing import present_3d, make_mask, present_3d_options\n'), ((1328, 1345), 'numpy.arange', 'np.arange', (['(100000)'], {}), '(100000)\n', (1337, 1345), True, 'import numpy as np\n'), ((1398, 1430), 'Image_Visualizing.present_3d_options', 'present_3d_options', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (1416, 1430), False, 'from Image_Visualizing import present_3d, make_mask, present_3d_options\n'), ((1670, 1688), 'numpy.arange', 'np.arange', (['(1000000)'], {}), '(1000000)\n', (1679, 1688), True, 'import numpy as np\n'), ((1740, 1764), 'numpy.ones', 'np.ones', (['(100, 100, 100)'], {}), '((100, 100, 100))\n', (1747, 1764), True, 'import numpy as np\n'), ((1775, 1800), 'numpy.zeros', 'np.zeros', (['(100, 100, 100)'], {}), '((100, 100, 100))\n', (1783, 1800), True, 'import numpy as np\n'), ((1811, 1835), 'numpy.ones', 'np.ones', (['(200, 200, 100)'], {}), '((200, 200, 100))\n', (1818, 1835), True, 'import numpy as np\n'), ((429, 454), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (444, 454), False, 'import os\n'), ((1011, 1022), 'numpy.sqrt', 'np.sqrt', (['(10)'], {}), '(10)\n', (1018, 1022), True, 'import numpy as np\n'), ((1856, 1878), 'Image_Visualizing.make_mask', 'make_mask', (['data', 'mask1'], {}), '(data, mask1)\n', (1865, 1878), False, 'from Image_Visualizing import present_3d, make_mask, present_3d_options\n'), ((1904, 1926), 'Image_Visualizing.make_mask', 'make_mask', (['data', 'mask2'], {}), '(data, mask2)\n', (1913, 1926), False, 'from Image_Visualizing import present_3d, make_mask, present_3d_options\n'), ((2036, 2069), 'Image_Visualizing.make_mask', 'make_mask', (['data', 'mask3'], {'fit': '(False)'}), '(data, mask3, fit=False)\n', (2045, 2069), False, 'from Image_Visualizing import present_3d, make_mask, present_3d_options\n'), ((1951, 1983), 'Image_Visualizing.make_mask', 'make_mask', (['data', 'mask3'], {'fit': '(True)'}), '(data, mask3, fit=True)\n', (1960, 1983), False, 'from Image_Visualizing import present_3d, make_mask, present_3d_options\n')] |
import re
from datetime import datetime
from typing import List, Optional, Match, AnyStr, Dict
import logging
import pandas as pd
from PyPDF2 import PdfFileReader
from nltk.tokenize import word_tokenize
from pandas import DataFrame
from avicena.models.MergeAddress import MergeAddress
from avicena.models.RevenueRate import RevenueRate
from avicena.util.ParserUtil import standardize_trip_df
log = logging.getLogger(__name__)
def _load_pdf_content(pdf: PdfFileReader) -> str:
"""
#################################
## PDF Load content ##
#################################
:param pdf: PDFFileReader object that has loaded the PDF path
:return: the raw text from the PDF
"""
# Discerning the number of pages will allow us to parse through all #the pages
num_pages = pdf.numPages
count = 0
text = ""
# The while loop will read each page
while count < num_pages:
pageObj = pdf.getPage(count)
count += 1
text += pageObj.extractText()
# This if statement exists to check if the above library returned #words. It's done because PyPDF2 cannot read scanned files.
if text != "":
text = text
# If the above returns as False, we run the OCR library textract to #convert scanned/image based PDF files into text
else:
log.error('cannot read scanned images.')
return text
def _tokenize_text(text: str) -> (List[str], int):
"""
#################################
## Tokenize content ##
#################################
# The word_tokenize() function will break our text phrases into #individual words
:param text: Parsed Text
:return: Break up the text into a list of words and return the total number of trips
"""
def _remove_adjacent(nums):
result = []
for num in nums:
if len(result) == 0 or num != result[-1]:
result.append(num)
return result
trip_count = text.count('Age:')
tokens = word_tokenize(text)
for index, x in enumerate(tokens):
if (x == '--') & (tokens[index - 1] == '--'):
tokens[index] = 'newline'
tokens = _remove_adjacent(tokens)
tokens.append('newline')
return tokens, trip_count
def _initialize_df(tokens: List[str]) -> DataFrame:
"""
#################################
## Split content into rows ##
#################################
:param tokens: list of string tokens extracted from PDF
:return: A dataframe of rows filled with a single column "raw_data" for each trip to be completed
"""
list1 = []
list2 = []
for index, x in enumerate(tokens):
if x != 'newline':
list1.append(x)
elif x == 'newline':
list1 = ' '.join(list1)
list2.append(list1)
list1 = []
se = pd.Series(list2)
df = pd.DataFrame()
df['raw_data'] = se.values
df = df[df["raw_data"] != '--']
for index, row in df.iterrows():
searchString = row['raw_data']
if "LogistiCare" in searchString:
y = searchString.find("LogistiCare")
row['raw_data'] = row['raw_data'][:y]
searchString = row['raw_data']
searchString = searchString.replace('-- ', '')
row['raw_data'] = searchString
cols = df.columns.tolist()
cols = cols[-1:] + cols[:-1]
df = df[cols]
df = df.iloc[1:]
return df
def _split_it(raw_data) -> Optional[Match[AnyStr]]:
"""
Apply Regex to the raw data column and split it into groups
:param raw_data: raw data string
:return: A Regex Match
"""
return re.search(
r"(.*?) \*\* (.*?) \*\* (.*?) - (.*?) (.*?) Age : (.*?) (\d{2}:\d{2}) PU (.*?) Phy : (.{16}) (.*?) (\d{2}:\d{2}) DO (.*?) Phy : (.{16}) (.*?) LOS : (\S+) (.*?(?=CPay))CPay : (.*?) PCA : (.*?) AEsc : (.*?) CEsc : (.*?) Seats : (.*?) Miles : (\d*)(.*$)",
raw_data)
def _clean_address(addr: str) -> str:
"""
Remove clobbering tokens from input address
:param addr: Address with unneeded tokens
:return: cleandup address string
"""
replacements = {'No Gc': ' ', '*': ' ', '\s*Apt. ': ' ', '//': ' ', 'Bldg .': ' ', 'Aust ': 'Austin, TX ',
' B ': 'Blvd ', 'Doorcode :': ' '}
for to_replace, replace_with in replacements.items():
pattern = re.compile(r"\s*" + re.escape(to_replace) + r"\s*")
addr = re.sub(pattern, replace_with, addr)
return addr
def _parse_raw_data(df: DataFrame, tokens: List[str]) -> None:
"""
Split the 'raw_data' column in the dataframe and populate it with individual details of each column
:param df: DataFrame with `raw_data` column parsed from PDF
:param tokens: Raw Tokens from the PDF
"""
#################################
## Split raw_data into columns ##
#################################
df['trip_id'] = df['raw_data'].apply(lambda x: _split_it(x).group(1))
df['trip_status'] = df['raw_data'].apply(lambda x: _split_it(x).group(2))
df['trip_reg'] = df['raw_data'].apply(lambda x: _split_it(x).group(3))
df['trip_county'] = df['raw_data'].apply(lambda x: _split_it(x).group(4))
df['customer_name'] = df['raw_data'].apply(lambda x: _split_it(x).group(5))
df['customer_age'] = df['raw_data'].apply(lambda x: _split_it(x).group(6))
df['trip_pickup_time'] = df['raw_data'].apply(lambda x: _split_it(x).group(7))
df['trip_pickup_name'] = df['raw_data'].apply(lambda x: _split_it(x).group(8))
df['trip_pickup_phone'] = df['raw_data'].apply(lambda x: _split_it(x).group(9))
df['trip_pickup_address'] = df['raw_data'].apply(lambda x: _clean_address(_split_it(x).group(10)))
df['trip_dropoff_time'] = df['raw_data'].apply(lambda x: _split_it(x).group(11))
df['trip_dropoff_name'] = df['raw_data'].apply(lambda x: _split_it(x).group(12))
df['trip_dropoff_phone'] = df['raw_data'].apply(lambda x: _split_it(x).group(13))
df['trip_dropoff_address'] = df['raw_data'].apply(lambda x: _clean_address(_split_it(x).group(14)))
df['trip_los'] = df['raw_data'].apply(lambda x: _split_it(x).group(15))
df['trip_daysofweek'] = df['raw_data'].apply(lambda x: _split_it(x).group(16))
df['trip_cpay'] = df['raw_data'].apply(lambda x: _split_it(x).group(17))
df['trip_pca'] = df['raw_data'].apply(lambda x: _split_it(x).group(18))
df['trip_aesc'] = df['raw_data'].apply(lambda x: _split_it(x).group(19))
df['trip_cesc'] = df['raw_data'].apply(lambda x: _split_it(x).group(20))
df['trip_seats'] = df['raw_data'].apply(lambda x: _split_it(x).group(21))
df['trip_miles'] = df['raw_data'].apply(lambda x: _split_it(x).group(22))
df['trip_notes'] = df['raw_data'].apply(lambda x: _split_it(x).group(23))
s = (tokens[10] + ' ' + tokens[11] + tokens[12] + ' ' + tokens[13])
d = datetime.strptime(s, '%B %d, %Y')
filedate = d.strftime('%m-%d-%y')
df['trip_date'] = filedate
def _store_raw_data(df: DataFrame, output_directory: str, name: str, trip_count: int) -> None:
"""
Save the raw parsed data in the directory
:param df: DataFrame with the raw data split into columns
:param output_directory: directory where parsed DataFrame will be stored as CSV
:param name: name of the model run
:param trip_count: total number of trips found in PDF
"""
filedate = df['trip_date'].iloc[0].replace('-', '_')
log.info(str(len(df)) + "/" + str(trip_count) + " trips parsed.")
df.to_csv(output_directory + name + filedate + '.csv', encoding='utf-8', index=False)
log.info('PDF file converted to ' + output_directory + name + filedate + '.csv')
def parse_trips_to_df(trips_file: str, merge_details: Dict[str, MergeAddress],
revenue_table: Dict[str, List[RevenueRate]], output_directory: str) -> DataFrame:
"""
Parse in the input PDF into a DataFrame with the trip details extracted
:param trips_file: Path to PDF trips file
:param merge_details: dictionary mapping address substring to actual MergeAddress object
:param revenue_table: dictionary mapping level of service to a list of associated revenue rates
:param output_directory: directory where intermediate parsed files will be stored
:return: DataFrame with parsed Trip Details
"""
z = trips_file.find(".pdf")
name = trips_file[trips_file.rfind('/') + 1:z]
pdfFileObj = open(trips_file, 'rb')
loaded_pdf = PdfFileReader(pdfFileObj)
text = _load_pdf_content(loaded_pdf)
tokens, trip_count = _tokenize_text(text)
df = _initialize_df(tokens)
_parse_raw_data(df, tokens)
_store_raw_data(df, output_directory, name, trip_count)
standardize_trip_df(df, merge_details, revenue_table)
df.drop(['raw_data', 'trip_notes', 'trip_reg', 'trip_county', 'customer_name', 'customer_age', 'trip_pickup_name',
'trip_pickup_phone', 'trip_dropoff_name', 'trip_dropoff_phone', 'trip_daysofweek', 'trip_cpay', 'trip_pca',
'trip_aesc', 'trip_cesc', 'trip_seats', 'trip_notes'], axis='columns', inplace=True)
return df[df['trip_status'] != "CANCELED"]
| [
"logging.getLogger",
"pandas.Series",
"re.escape",
"avicena.util.ParserUtil.standardize_trip_df",
"datetime.datetime.strptime",
"nltk.tokenize.word_tokenize",
"pandas.DataFrame",
"re.sub",
"PyPDF2.PdfFileReader",
"re.search"
] | [((400, 427), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (417, 427), False, 'import logging\n'), ((2015, 2034), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (2028, 2034), False, 'from nltk.tokenize import word_tokenize\n'), ((2863, 2879), 'pandas.Series', 'pd.Series', (['list2'], {}), '(list2)\n', (2872, 2879), True, 'import pandas as pd\n'), ((2889, 2903), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2901, 2903), True, 'import pandas as pd\n'), ((3647, 3938), 're.search', 're.search', (['"""(.*?) \\\\*\\\\* (.*?) \\\\*\\\\* (.*?) - (.*?) (.*?) Age : (.*?) (\\\\d{2}:\\\\d{2}) PU (.*?) Phy : (.{16}) (.*?) (\\\\d{2}:\\\\d{2}) DO (.*?) Phy : (.{16}) (.*?) LOS : (\\\\S+) (.*?(?=CPay))CPay : (.*?) PCA : (.*?) AEsc : (.*?) CEsc : (.*?) Seats : (.*?) Miles : (\\\\d*)(.*$)"""', 'raw_data'], {}), "(\n '(.*?) \\\\*\\\\* (.*?) \\\\*\\\\* (.*?) - (.*?) (.*?) Age : (.*?) (\\\\d{2}:\\\\d{2}) PU (.*?) Phy : (.{16}) (.*?) (\\\\d{2}:\\\\d{2}) DO (.*?) Phy : (.{16}) (.*?) LOS : (\\\\S+) (.*?(?=CPay))CPay : (.*?) PCA : (.*?) AEsc : (.*?) CEsc : (.*?) Seats : (.*?) Miles : (\\\\d*)(.*$)'\n , raw_data)\n", (3656, 3938), False, 'import re\n'), ((6848, 6881), 'datetime.datetime.strptime', 'datetime.strptime', (['s', '"""%B %d, %Y"""'], {}), "(s, '%B %d, %Y')\n", (6865, 6881), False, 'from datetime import datetime\n'), ((8445, 8470), 'PyPDF2.PdfFileReader', 'PdfFileReader', (['pdfFileObj'], {}), '(pdfFileObj)\n', (8458, 8470), False, 'from PyPDF2 import PdfFileReader\n'), ((8686, 8739), 'avicena.util.ParserUtil.standardize_trip_df', 'standardize_trip_df', (['df', 'merge_details', 'revenue_table'], {}), '(df, merge_details, revenue_table)\n', (8705, 8739), False, 'from avicena.util.ParserUtil import standardize_trip_df\n'), ((4433, 4468), 're.sub', 're.sub', (['pattern', 'replace_with', 'addr'], {}), '(pattern, replace_with, addr)\n', (4439, 4468), False, 'import re\n'), ((4386, 4407), 're.escape', 're.escape', (['to_replace'], {}), '(to_replace)\n', (4395, 4407), False, 'import re\n')] |
# Copyright (c) 2014, <NAME>. Please see the AUTHORS file for details.
# All rights reserved. Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.)
"""Actions performed using the analyzer's responses.
"""
import sublime
import os
from Dart.lib.analyzer.api.types import AnalysisErrorSeverity
from Dart.lib.analyzer.api.types import Location
from Dart.sublime_plugin_lib import PluginLogger
from Dart.sublime_plugin_lib.panels import OutputPanel
_logger = PluginLogger(__name__)
_flags = (sublime.DRAW_SQUIGGLY_UNDERLINE | sublime.DRAW_NO_FILL |
sublime.DRAW_NO_OUTLINE)
def show_errors(errors):
'''Show errors in the ui.
@errors
An instance of `ErrorInfoCollection`.
'''
v = sublime.active_window().active_view()
# TODO(guillermooo): Use tokens to identify requests:file.
# todo (pp): notifications don't have id; process all
if os.path.realpath(errors.file) != os.path.realpath(v.file_name()):
_logger.debug('different view active - aborting')
return
analysis_errs = list(errors.errors)
if analysis_errs == 0:
clear_ui()
return
infos = [ae for ae in analysis_errs if (ae.severity == AnalysisErrorSeverity.INFO)]
warns = [ae for ae in analysis_errs if (ae.severity == AnalysisErrorSeverity.WARNING)]
erros = [ae for ae in analysis_errs if (ae.severity == AnalysisErrorSeverity.ERROR)]
def error_to_region(view, error):
'''Converts location data to region data.
'''
loc = Location(error.location)
pt = view.text_point(loc.startLine - 1,
loc.startColumn - 1)
return sublime.Region(pt, pt + loc.length)
info_regs = [error_to_region(v, item) for item in infos]
warn_regs = [error_to_region(v, item) for item in warns]
errs_regs = [error_to_region(v, item) for item in erros]
_logger.debug('displaying errors to the user')
v.add_regions('dart.infos', info_regs,
scope='dartlint.mark.info',
icon="Packages/Dart/gutter/dartlint-simple-info.png",
flags=_flags)
v.add_regions('dart.warnings', warn_regs,
scope='dartlint.mark.warning',
icon="Packages/Dart/gutter/dartlint-simple-warning.png",
flags=_flags)
v.add_regions('dart.errors', errs_regs,
scope='dartlint.mark.error',
icon='Packages/Dart/gutter/dartlint-simple-error.png',
flags=_flags)
def to_compact_text(error):
return ("{error.severity}|{error.type}|{loc.file}|"
"{loc.startLine}|{loc.startColumn}|{error.message}").format(
error=error, loc=Location(error.location))
info_patts = [to_compact_text(item) for item in infos]
warn_patts = [to_compact_text(item) for item in warns]
errs_patts = [to_compact_text(item) for item in erros]
all_errs = set(errs_patts + warn_patts + info_patts)
panel = OutputPanel('dart.analyzer')
errors_pattern = r'^\w+\|\w+\|(.+)\|(\d+)\|(\d+)\|(.+)$'
panel.set('result_file_regex', errors_pattern)
panel.write('\n'.join(all_errs))
def clear_ui():
'''Remove UI decoration.
'''
_logger.debug('erasing errors from view')
v = sublime.active_window().active_view()
v.erase_regions('dart.errors')
v.erase_regions('dart.warnings')
v.erase_regions('dart.infos')
| [
"sublime.active_window",
"Dart.sublime_plugin_lib.PluginLogger",
"Dart.sublime_plugin_lib.panels.OutputPanel",
"os.path.realpath",
"sublime.Region",
"Dart.lib.analyzer.api.types.Location"
] | [((508, 530), 'Dart.sublime_plugin_lib.PluginLogger', 'PluginLogger', (['__name__'], {}), '(__name__)\n', (520, 530), False, 'from Dart.sublime_plugin_lib import PluginLogger\n'), ((2980, 3008), 'Dart.sublime_plugin_lib.panels.OutputPanel', 'OutputPanel', (['"""dart.analyzer"""'], {}), "('dart.analyzer')\n", (2991, 3008), False, 'from Dart.sublime_plugin_lib.panels import OutputPanel\n'), ((931, 960), 'os.path.realpath', 'os.path.realpath', (['errors.file'], {}), '(errors.file)\n', (947, 960), False, 'import os\n'), ((1556, 1580), 'Dart.lib.analyzer.api.types.Location', 'Location', (['error.location'], {}), '(error.location)\n', (1564, 1580), False, 'from Dart.lib.analyzer.api.types import Location\n'), ((1694, 1729), 'sublime.Region', 'sublime.Region', (['pt', '(pt + loc.length)'], {}), '(pt, pt + loc.length)\n', (1708, 1729), False, 'import sublime\n'), ((765, 788), 'sublime.active_window', 'sublime.active_window', ([], {}), '()\n', (786, 788), False, 'import sublime\n'), ((3267, 3290), 'sublime.active_window', 'sublime.active_window', ([], {}), '()\n', (3288, 3290), False, 'import sublime\n'), ((2705, 2729), 'Dart.lib.analyzer.api.types.Location', 'Location', (['error.location'], {}), '(error.location)\n', (2713, 2729), False, 'from Dart.lib.analyzer.api.types import Location\n')] |
from djangocms_versioning.exceptions import ConditionFailed
from djangocms_versioning.models import Version
from djangocms_versioning_filer.models import FileGrouper
from djangocms_versioning_filer.monkeypatch.models import (
is_file_content_valid_for_discard,
is_file_content_valid_for_revert,
)
from .base import BaseFilerVersioningTestCase
class FilerVersioningChecksTestCase(BaseFilerVersioningTestCase):
def test_checks_only_filer_models(self):
# Dont expect to raise any error
self.assertEquals(
is_file_content_valid_for_discard(self.page_draft_version, self.superuser),
None,
)
self.assertEquals(
is_file_content_valid_for_revert(self.page_draft_version, self.superuser),
None,
)
def test_check_is_file_content_valid_for_discard(self):
file_grouper = FileGrouper.objects.create()
self.create_file_obj(
original_filename='name.docx',
grouper=file_grouper,
publish=False,
)
discard_file = self.create_file_obj(
original_filename='name2.docx',
grouper=file_grouper,
publish=True,
)
self.create_file_obj(
original_filename='name.docx',
publish=False,
)
with self.assertRaises(ConditionFailed):
is_file_content_valid_for_discard(
Version.objects.get_for_content(discard_file),
self.superuser,
)
file_grouper = FileGrouper.objects.create()
self.create_file_obj(
original_filename='name.docx',
folder=self.folder,
grouper=file_grouper,
publish=False,
)
self.create_file_obj(
original_filename='name23.docx',
folder=self.folder,
grouper=file_grouper,
publish=False,
)
discard_file = self.create_file_obj(
original_filename='name2.docx',
folder=self.folder,
grouper=file_grouper,
publish=True,
)
self.create_file_obj(
original_filename='name.docx',
folder=self.folder,
publish=False,
)
self.assertEquals(
is_file_content_valid_for_discard(
Version.objects.get_for_content(discard_file),
self.superuser,
),
None,
)
def test_check_is_file_content_valid_for_revert(self):
file_grouper = FileGrouper.objects.create()
revert_file = self.create_file_obj(
original_filename='name.docx',
grouper=file_grouper,
publish=False,
)
self.create_file_obj(
original_filename='name2.docx',
grouper=file_grouper,
publish=True,
)
self.create_file_obj(
original_filename='name.docx',
publish=False,
)
with self.assertRaises(ConditionFailed):
is_file_content_valid_for_revert(
Version.objects.get_for_content(revert_file),
self.superuser,
)
file_grouper = FileGrouper.objects.create()
revert_file = self.create_file_obj(
original_filename='name2.docx',
folder=self.folder,
grouper=file_grouper,
publish=False,
)
self.create_file_obj(
original_filename='name2.docx',
folder=self.folder,
grouper=file_grouper,
publish=True,
)
self.create_file_obj(
original_filename='name.docx',
folder=self.folder,
publish=False,
)
self.assertEquals(
is_file_content_valid_for_revert(
Version.objects.get_for_content(revert_file),
self.superuser,
),
None,
)
| [
"djangocms_versioning.models.Version.objects.get_for_content",
"djangocms_versioning_filer.monkeypatch.models.is_file_content_valid_for_revert",
"djangocms_versioning_filer.monkeypatch.models.is_file_content_valid_for_discard",
"djangocms_versioning_filer.models.FileGrouper.objects.create"
] | [((877, 905), 'djangocms_versioning_filer.models.FileGrouper.objects.create', 'FileGrouper.objects.create', ([], {}), '()\n', (903, 905), False, 'from djangocms_versioning_filer.models import FileGrouper\n'), ((1548, 1576), 'djangocms_versioning_filer.models.FileGrouper.objects.create', 'FileGrouper.objects.create', ([], {}), '()\n', (1574, 1576), False, 'from djangocms_versioning_filer.models import FileGrouper\n'), ((2559, 2587), 'djangocms_versioning_filer.models.FileGrouper.objects.create', 'FileGrouper.objects.create', ([], {}), '()\n', (2585, 2587), False, 'from djangocms_versioning_filer.models import FileGrouper\n'), ((3227, 3255), 'djangocms_versioning_filer.models.FileGrouper.objects.create', 'FileGrouper.objects.create', ([], {}), '()\n', (3253, 3255), False, 'from djangocms_versioning_filer.models import FileGrouper\n'), ((547, 621), 'djangocms_versioning_filer.monkeypatch.models.is_file_content_valid_for_discard', 'is_file_content_valid_for_discard', (['self.page_draft_version', 'self.superuser'], {}), '(self.page_draft_version, self.superuser)\n', (580, 621), False, 'from djangocms_versioning_filer.monkeypatch.models import is_file_content_valid_for_discard, is_file_content_valid_for_revert\n'), ((690, 763), 'djangocms_versioning_filer.monkeypatch.models.is_file_content_valid_for_revert', 'is_file_content_valid_for_revert', (['self.page_draft_version', 'self.superuser'], {}), '(self.page_draft_version, self.superuser)\n', (722, 763), False, 'from djangocms_versioning_filer.monkeypatch.models import is_file_content_valid_for_discard, is_file_content_valid_for_revert\n'), ((1431, 1476), 'djangocms_versioning.models.Version.objects.get_for_content', 'Version.objects.get_for_content', (['discard_file'], {}), '(discard_file)\n', (1462, 1476), False, 'from djangocms_versioning.models import Version\n'), ((2354, 2399), 'djangocms_versioning.models.Version.objects.get_for_content', 'Version.objects.get_for_content', (['discard_file'], {}), '(discard_file)\n', (2385, 2399), False, 'from djangocms_versioning.models import Version\n'), ((3111, 3155), 'djangocms_versioning.models.Version.objects.get_for_content', 'Version.objects.get_for_content', (['revert_file'], {}), '(revert_file)\n', (3142, 3155), False, 'from djangocms_versioning.models import Version\n'), ((3854, 3898), 'djangocms_versioning.models.Version.objects.get_for_content', 'Version.objects.get_for_content', (['revert_file'], {}), '(revert_file)\n', (3885, 3898), False, 'from djangocms_versioning.models import Version\n')] |
from django.http import HttpRequest, HttpResponse
from django.utils.deprecation import MiddlewareMixin
from typing import Optional
import logging
class QuickpayMiddleware(MiddlewareMixin):
def process_view(self, request: HttpRequest, view_func, view_args, view_kwargs) -> Optional[HttpResponse]:
from cartridge.shop.views import checkout_steps
from cartridge.shop.checkout import CHECKOUT_STEP_FIRST
logging.debug("Quickpay.process_view: method={}, at checkout={}, step={}"
.format(request.method, view_func is checkout_steps, request.POST.get('step', 0)))
step_str = request.POST.get('step', '0')
step = int(step_str) if step_str.isdigit() else 0
if (request.method == 'POST'
and view_func is checkout_steps
and step == CHECKOUT_STEP_FIRST):
logging.debug("Quickpay.process_view: Making QP checkout view")
from .views import quickpay_checkout
return quickpay_checkout(request)
else:
return None
| [
"logging.debug"
] | [((863, 926), 'logging.debug', 'logging.debug', (['"""Quickpay.process_view: Making QP checkout view"""'], {}), "('Quickpay.process_view: Making QP checkout view')\n", (876, 926), False, 'import logging\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import h5py
import json
import os
import scipy.misc
import sys
import re
import fnmatch
import datetime
from PIL import Image
import numpy as np
'''
srun --mem 10000 python lib/datasets/wider/convert_face_to_coco.py --dataset cs6-train-det
'''
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = os.path.dirname(__file__)
add_path(this_dir)
# print(this_dir)
add_path(os.path.join(this_dir, '..', '..'))
import utils
import utils.boxes as bboxs_util
import utils.face_utils as face_util
def parse_args():
parser = argparse.ArgumentParser(description='Convert dataset')
parser.add_argument(
'--dataset', help="wider", default='wider', type=str)
parser.add_argument(
'--outdir', help="output dir for json files",
default='', type=str)
parser.add_argument(
'--datadir', help="data dir for annotations to be converted",
default='', type=str)
parser.add_argument(
'--imdir', help="root directory for loading dataset images",
default='', type=str)
parser.add_argument(
'--annotfile', help="directly specify the annotations file",
default='', type=str)
parser.add_argument(
'--thresh', help="specify the confidence threshold on detections",
default=-1, type=float)
# if len(sys.argv) == 1:
# parser.print_help()
# sys.exit(1)
return parser.parse_args()
def convert_wider_annots(data_dir, out_dir, data_set='WIDER', conf_thresh=0.5):
"""Convert from WIDER FDDB-style format to COCO bounding box"""
# http://cocodataset.org/#format-data: [x,w,width,height]
json_name = 'wider_face_train_annot_coco_style.json'
img_id = 0
ann_id = 0
cat_id = 1
print('Starting %s' % data_set)
ann_dict = {}
categories = [{"id": 1, "name": 'face'}]
images = []
annotations = []
ann_file = os.path.join(data_dir, 'wider_face_train_annot.txt')
wider_annot_dict = face_util.parse_wider_gt(ann_file) # [im-file] = [[x,y,w,h], ...]
for filename in wider_annot_dict.keys():
if len(images) % 50 == 0:
print("Processed %s images, %s annotations" % (
len(images), len(annotations)))
image = {}
image['id'] = img_id
img_id += 1
im = Image.open(os.path.join(data_dir, filename))
image['width'] = im.height
image['height'] = im.width
image['file_name'] = filename
images.append(image)
for gt_bbox in wider_annot_dict[filename]:
ann = {}
ann['id'] = ann_id
ann_id += 1
ann['image_id'] = image['id']
ann['segmentation'] = []
ann['category_id'] = cat_id # 1:"face" for WIDER
ann['iscrowd'] = 0
ann['area'] = gt_bbox[2] * gt_bbox[3]
ann['bbox'] = gt_bbox
annotations.append(ann)
ann_dict['images'] = images
ann_dict['categories'] = categories
ann_dict['annotations'] = annotations
print("Num categories: %s" % len(categories))
print("Num images: %s" % len(images))
print("Num annotations: %s" % len(annotations))
with open(os.path.join(out_dir, json_name), 'w', encoding='utf8') as outfile:
outfile.write(json.dumps(ann_dict))
def convert_cs6_annots(ann_file, im_dir, out_dir, data_set='CS6-subset', conf_thresh=0.5):
"""Convert from WIDER FDDB-style format to COCO bounding box"""
# cs6 subsets
if data_set=='CS6-subset':
json_name = 'cs6-subset_face_train_annot_coco_style.json'
elif data_set=='CS6-subset-score':
# include "scores" as soft-labels
json_name = 'cs6-subset_face_train_score-annot_coco_style.json'
elif data_set=='CS6-subset-gt':
json_name = 'cs6-subset-gt_face_train_annot_coco_style.json'
elif data_set=='CS6-train-gt':
# full train set of CS6 (86 videos)
json_name = 'cs6-train-gt.json'
elif data_set=='CS6-train-det-score':
# soft-labels used in distillation
json_name = 'cs6-train-det-score_face_train_annot_coco_style.json'
elif data_set=='CS6-train-det-score-0.5':
# soft-labels used in distillation, keeping dets with score > 0.5
json_name = 'cs6-train-det-score-0.5_face_train_annot_coco_style.json'
conf_thresh = 0.5
elif data_set=='CS6-train-det':
json_name = 'cs6-train-det_face_train_annot_coco_style.json'
elif data_set=='CS6-train-det-0.5':
json_name = 'cs6-train-det-0.5_face_train_annot_coco_style.json'
elif data_set=='CS6-train-easy-hp':
json_name = 'cs6-train-easy-hp.json'
elif data_set=='CS6-train-easy-gt':
json_name = 'cs6-train-easy-gt.json'
elif data_set=='CS6-train-easy-det':
json_name = 'cs6-train-easy-det.json'
elif data_set=='CS6-train-hp':
json_name = 'cs6-train-hp.json'
else:
raise NotImplementedError
img_id = 0
ann_id = 0
cat_id = 1
print('Starting %s' % data_set)
ann_dict = {}
categories = [{"id": 1, "name": 'face'}]
images = []
annotations = []
wider_annot_dict = face_util.parse_wider_gt(ann_file) # [im-file] = [[x,y,w,h], ...]
for filename in wider_annot_dict.keys():
if len(images) % 50 == 0:
print("Processed %s images, %s annotations" % (
len(images), len(annotations)))
if 'score' in data_set:
dets = np.array(wider_annot_dict[filename])
if not any(dets[:,4] > conf_thresh):
continue
image = {}
image['id'] = img_id
img_id += 1
im = Image.open(os.path.join(im_dir, filename))
image['width'] = im.height
image['height'] = im.width
image['file_name'] = filename
images.append(image)
for gt_bbox in wider_annot_dict[filename]:
ann = {}
ann['id'] = ann_id
ann_id += 1
ann['image_id'] = image['id']
ann['segmentation'] = []
ann['category_id'] = cat_id # 1:"face" for WIDER
ann['iscrowd'] = 0
ann['area'] = gt_bbox[2] * gt_bbox[3]
ann['bbox'] = gt_bbox[:4]
ann['dataset'] = data_set
score = gt_bbox[4]
if score < conf_thresh:
continue
if 'hp' in data_set:
ann['score'] = score # for soft-label distillation
ann['source'] = gt_bbox[5] # annot source: {1: detection, 2:tracker}
if data_set=='CS6-train-easy-det':
if gt_bbox[5] != 1:
continue # ignore if annot source is not detection (i.e. skip HP)
annotations.append(ann)
ann_dict['images'] = images
ann_dict['categories'] = categories
ann_dict['annotations'] = annotations
print("Num categories: %s" % len(categories))
print("Num images: %s" % len(images))
print("Num annotations: %s" % len(annotations))
with open(os.path.join(out_dir, json_name), 'w', encoding='utf8') as outfile:
outfile.write(json.dumps(ann_dict, indent=2))
if __name__ == '__main__':
args = parse_args()
if args.dataset == "wider":
convert_wider_annots(args.datadir, args.outdir)
# --------------------------------------------------------------------------
# CS6 Train GT
# --------------------------------------------------------------------------
elif args.dataset == "cs6-subset":
convert_cs6_annots(args.annotfile, args.imdir,
args.outdir, data_set='CS6-subset')
elif args.dataset == "cs6-subset-score":
convert_cs6_annots(args.annotfile, args.imdir,
args.outdir, data_set='CS6-subset-score')
elif args.dataset == "cs6-subset-gt":
convert_cs6_annots(args.annotfile, args.imdir,
args.outdir, data_set='CS6-subset-gt')
elif args.dataset == "cs6-train-gt":
# set defaults if inputs args are empty
if not args.annotfile:
args.annotfile = 'data/CS6_annot/annot-format-GT/cs6_gt_annot_train.txt'
if not args.imdir:
args.imdir = 'data/CS6_annot'
if not args.outdir:
args.outdir = 'data/CS6_annot'
convert_cs6_annots(args.annotfile, args.imdir,
args.outdir, data_set='CS6-train-gt')
# Distillation scores for CS6-Train detections (conf 0.25)
elif args.dataset == "cs6-train-det-score":
# set defaults if inputs args are empty
if not args.annotfile:
args.annotfile = 'data/CS6_annot/annot-format-GT/cs6_det_annot_train_scores.txt'
# --------------------------------------------------------------------------
# CS6 Train unlabeled
# --------------------------------------------------------------------------
# Pseudo-labels from CS6-Train
elif args.dataset == "cs6-train-det":
# set defaults if inputs args are empty
if not args.annotfile:
args.annotfile = 'data/CS6_annot/annot-format-GT/cs6_det_annot_train_conf-0.25.txt'
if not args.imdir:
args.imdir = 'data/CS6_annot'
if not args.outdir:
args.outdir = 'data/CS6_annot'
convert_cs6_annots(args.annotfile, args.imdir,
args.outdir, data_set='CS6-train-det')
elif args.dataset == "cs6-train-det-0.5":
# set defaults if inputs args are empty
if not args.annotfile:
args.annotfile = 'data/CS6_annot/annot-format-GT/cs6_det_annot_train_conf-0.50.txt'
if not args.imdir:
args.imdir = 'data/CS6_annot'
if not args.outdir:
args.outdir = 'data/CS6_annot'
convert_cs6_annots(args.annotfile, args.imdir,
args.outdir, data_set='CS6-train-det-0.5')
# Hard positives from CS6-Train
elif args.dataset == "cs6-train-hp":
# set defaults if inputs args are empty
if not args.annotfile:
args.annotfile = 'Outputs/tracklets/hp-res-cs6/hp_cs6_train.txt'
if not args.imdir:
args.imdir = 'data/CS6_annot'
if not args.outdir:
args.outdir = 'data/CS6_annot'
convert_cs6_annots(args.annotfile, args.imdir,
args.outdir, data_set='CS6-train-hp', conf_thresh=0.5)
# --------------------------------------------------------------------------
# CS6 "EASY" set
# --------------------------------------------------------------------------
elif args.dataset == "cs6-train-easy-hp":
# set defaults if inputs args are empty
if not args.annotfile:
args.annotfile = 'Outputs/tracklets/hp-res-cs6/hp_cs6_easy.txt'
if not args.imdir:
args.imdir = 'data/CS6_annot'
if not args.outdir:
args.outdir = 'data/CS6_annot'
convert_cs6_annots(args.annotfile, args.imdir,
args.outdir, data_set='CS6-train-easy-hp')
elif args.dataset == "cs6-train-easy-gt":
# set defaults if inputs args are empty
if not args.annotfile:
args.annotfile = 'data/CS6_annot/annot-format-GT/cs6_gt_annot_train-easy.txt'
if not args.imdir:
args.imdir = 'data/CS6_annot'
if not args.outdir:
args.outdir = 'data/CS6_annot'
convert_cs6_annots(args.annotfile, args.imdir,
args.outdir, data_set='CS6-train-easy-gt')
elif args.dataset == "cs6-train-easy-det":
# set defaults if inputs args are empty
if not args.annotfile:
args.annotfile = 'Outputs/tracklets/hp-res-cs6/hp_cs6_train_easy.txt'
if not args.imdir:
args.imdir = 'data/CS6_annot'
if not args.outdir:
args.outdir = 'data/CS6_annot'
convert_cs6_annots(args.annotfile, args.imdir,
args.outdir, data_set='CS6-train-easy-det')
else:
print("Dataset not supported: %s" % args.dataset)
| [
"sys.path.insert",
"argparse.ArgumentParser",
"json.dumps",
"os.path.join",
"utils.face_utils.parse_wider_gt",
"os.path.dirname",
"numpy.array"
] | [((512, 537), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (527, 537), False, 'import os\n'), ((584, 618), 'os.path.join', 'os.path.join', (['this_dir', '""".."""', '""".."""'], {}), "(this_dir, '..', '..')\n", (596, 618), False, 'import os\n'), ((738, 792), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Convert dataset"""'}), "(description='Convert dataset')\n", (761, 792), False, 'import argparse\n'), ((2075, 2127), 'os.path.join', 'os.path.join', (['data_dir', '"""wider_face_train_annot.txt"""'], {}), "(data_dir, 'wider_face_train_annot.txt')\n", (2087, 2127), False, 'import os\n'), ((2151, 2185), 'utils.face_utils.parse_wider_gt', 'face_util.parse_wider_gt', (['ann_file'], {}), '(ann_file)\n', (2175, 2185), True, 'import utils.face_utils as face_util\n'), ((5343, 5377), 'utils.face_utils.parse_wider_gt', 'face_util.parse_wider_gt', (['ann_file'], {}), '(ann_file)\n', (5367, 5377), True, 'import utils.face_utils as face_util\n'), ((475, 499), 'sys.path.insert', 'sys.path.insert', (['(0)', 'path'], {}), '(0, path)\n', (490, 499), False, 'import sys\n'), ((2498, 2530), 'os.path.join', 'os.path.join', (['data_dir', 'filename'], {}), '(data_dir, filename)\n', (2510, 2530), False, 'import os\n'), ((3362, 3394), 'os.path.join', 'os.path.join', (['out_dir', 'json_name'], {}), '(out_dir, json_name)\n', (3374, 3394), False, 'import os\n'), ((3452, 3472), 'json.dumps', 'json.dumps', (['ann_dict'], {}), '(ann_dict)\n', (3462, 3472), False, 'import json\n'), ((5653, 5689), 'numpy.array', 'np.array', (['wider_annot_dict[filename]'], {}), '(wider_annot_dict[filename])\n', (5661, 5689), True, 'import numpy as np\n'), ((5859, 5889), 'os.path.join', 'os.path.join', (['im_dir', 'filename'], {}), '(im_dir, filename)\n', (5871, 5889), False, 'import os\n'), ((7221, 7253), 'os.path.join', 'os.path.join', (['out_dir', 'json_name'], {}), '(out_dir, json_name)\n', (7233, 7253), False, 'import os\n'), ((7311, 7341), 'json.dumps', 'json.dumps', (['ann_dict'], {'indent': '(2)'}), '(ann_dict, indent=2)\n', (7321, 7341), False, 'import json\n')] |
"""
=================================================
Deterministic Tracking with EuDX on Tensor Fields
=================================================
In this example we do deterministic fiber tracking on Tensor fields with EuDX
[Garyfallidis12]_.
This example requires to import example `reconst_dti.py` to run. EuDX was
primarily made with cpu efficiency in mind. Therefore, it should be useful to
give you a quick overview of your reconstruction results with the help of
tracking.
"""
import os
import numpy as np
import nibabel as nib
if not os.path.exists('tensor_fa.nii.gz'):
import reconst_dti
"""
EuDX will use the directions (eigen vectors) of the Tensors to propagate
streamlines from voxel to voxel and fractional anisotropy to stop tracking.
"""
fa_img = nib.load('tensor_fa.nii.gz')
FA = fa_img.get_data()
evecs_img = nib.load('tensor_evecs.nii.gz')
evecs = evecs_img.get_data()
"""
In the background of the image the fitting will not be accurate because there all
measured signal is mostly noise and possibly we will find FA values with nans
(not a number). We can easily remove these in the following way.
"""
FA[np.isnan(FA)] = 0
"""
EuDX takes as input discretized voxel directions on a unit sphere. Therefore,
it is necessary to discretize the eigen vectors before feeding them in EuDX.
For the discretization procedure we use an evenly distributed sphere of 724
points which we can access using the get_sphere function.
"""
from dipy.data import get_sphere
sphere = get_sphere('symmetric724')
"""
We use quantize_evecs (evecs here stands for eigen vectors) to apply the
discretization.
"""
from dipy.reconst.dti import quantize_evecs
peak_indices = quantize_evecs(evecs, sphere.vertices)
"""
EuDX is the fiber tracking algorithm that we use in this example.
The most important parameters are the first one which represents the
magnitude of the peak of a scalar anisotropic function, the
second which represents the indices of the discretized directions of
the peaks and odf_vertices are the vertices of the input sphere.
"""
from dipy.tracking.eudx import EuDX
from dipy.tracking.streamline import Streamlines
eu = EuDX(FA.astype('f8'), peak_indices, seeds=50000,
odf_vertices=sphere.vertices, a_low=0.2)
tensor_streamlines = Streamlines(eu)
"""
We can now save the results in the disk. For this purpose we can use the
TrackVis format (``*.trk``). First, we need to import ``save_trk`` function.
"""
from dipy.io.streamline import save_trk
"""
Save the streamlines.
"""
ten_sl_fname = 'tensor_streamlines.trk'
save_trk(ten_sl_fname, tensor_streamlines,
affine=np.eye(4),
vox_size=fa_img.header.get_zooms()[:3],
shape=FA.shape)
"""
If you don't want to use Trackvis to visualize the file you can use our
lightweight `dipy.viz` module.
"""
try:
from dipy.viz import window, actor
except ImportError:
raise ImportError('Python fury module is not installed')
import sys
sys.exit()
"""
Create a scene.
"""
ren = window.Renderer()
"""
Every streamline will be coloured according to its orientation
"""
from dipy.viz import colormap as cmap
"""
`actor.line` creates a streamline actor for streamline visualization
and `ren.add` adds this actor to the scene
"""
ren.add(actor.streamtube(tensor_streamlines,
cmap.line_colors(tensor_streamlines)))
print('Saving illustration as tensor_tracks.png')
ren.SetBackground(1, 1, 1)
window.record(ren, out_path='tensor_tracks.png', size=(600, 600))
# Enables/disables interactive visualization
interactive = False
if interactive:
window.show(ren)
"""
.. figure:: tensor_tracks.png
:align: center
Deterministic streamlines with EuDX on a Tensor Field.
References
----------
.. [Garyfallidis12] Garyfallidis E., "Towards an accurate brain tractography",
PhD thesis, University of Cambridge, 2012.
.. include:: ../links_names.inc
"""
| [
"dipy.reconst.dti.quantize_evecs",
"os.path.exists",
"numpy.eye",
"dipy.tracking.streamline.Streamlines",
"nibabel.load",
"dipy.data.get_sphere",
"numpy.isnan",
"dipy.viz.colormap.line_colors",
"sys.exit",
"dipy.viz.window.show",
"dipy.viz.window.record",
"dipy.viz.window.Renderer"
] | [((782, 810), 'nibabel.load', 'nib.load', (['"""tensor_fa.nii.gz"""'], {}), "('tensor_fa.nii.gz')\n", (790, 810), True, 'import nibabel as nib\n'), ((846, 877), 'nibabel.load', 'nib.load', (['"""tensor_evecs.nii.gz"""'], {}), "('tensor_evecs.nii.gz')\n", (854, 877), True, 'import nibabel as nib\n'), ((1506, 1532), 'dipy.data.get_sphere', 'get_sphere', (['"""symmetric724"""'], {}), "('symmetric724')\n", (1516, 1532), False, 'from dipy.data import get_sphere\n'), ((1692, 1730), 'dipy.reconst.dti.quantize_evecs', 'quantize_evecs', (['evecs', 'sphere.vertices'], {}), '(evecs, sphere.vertices)\n', (1706, 1730), False, 'from dipy.reconst.dti import quantize_evecs\n'), ((2283, 2298), 'dipy.tracking.streamline.Streamlines', 'Streamlines', (['eu'], {}), '(eu)\n', (2294, 2298), False, 'from dipy.tracking.streamline import Streamlines\n'), ((3016, 3033), 'dipy.viz.window.Renderer', 'window.Renderer', ([], {}), '()\n', (3031, 3033), False, 'from dipy.viz import window, actor\n'), ((3455, 3520), 'dipy.viz.window.record', 'window.record', (['ren'], {'out_path': '"""tensor_tracks.png"""', 'size': '(600, 600)'}), "(ren, out_path='tensor_tracks.png', size=(600, 600))\n", (3468, 3520), False, 'from dipy.viz import window, actor\n'), ((555, 589), 'os.path.exists', 'os.path.exists', (['"""tensor_fa.nii.gz"""'], {}), "('tensor_fa.nii.gz')\n", (569, 589), False, 'import os\n'), ((1145, 1157), 'numpy.isnan', 'np.isnan', (['FA'], {}), '(FA)\n', (1153, 1157), True, 'import numpy as np\n'), ((3606, 3622), 'dipy.viz.window.show', 'window.show', (['ren'], {}), '(ren)\n', (3617, 3622), False, 'from dipy.viz import window, actor\n'), ((2631, 2640), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2637, 2640), True, 'import numpy as np\n'), ((2973, 2983), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2981, 2983), False, 'import sys\n'), ((3337, 3373), 'dipy.viz.colormap.line_colors', 'cmap.line_colors', (['tensor_streamlines'], {}), '(tensor_streamlines)\n', (3353, 3373), True, 'from dipy.viz import colormap as cmap\n')] |
#!/usr/bin/env python3
import sqlite3
import argparse
###############################################################################
#
# Super simple (and probably not very efficient) way to query sqlite file and
# produce a python list of dictionaries with the result of that query
#
###############################################################################
def do_query(sqlite_file, query_string):
cur = sqlite3.connect(sqlite_file)
query = cur.execute( query_string )
colname = [ d[0] for d in query.description ]
output_list = [ dict(zip(colname, r)) for r in query.fetchall() ]
cur.close()
return output_list
###############################################################################
#
# The list of NVTX ranges
#
###############################################################################
class MarkerTable:
def __init__(self, sqlite_filename):
self.filename = sqlite_filename
self.names = [] # map marker.id => marker string value
self.tids = [] # map marker.id => marker thread id
self.stack = []
self.next_idx = int(0)
self.list = do_query(self.filename,
'''
select
marker.timestamp,
marker.flags,
marker.id,
StringTable.value,
marker.objectId
from cupti_activity_kind_marker as marker
inner join StringTable
on marker.name = StringTable._id_
order by timestamp''')
#######################################################################
# we only want to see kernels invoked during the range of times between
# when the user started and stopped using nvtx ranges:
#######################################################################
self.first_time = self.list[0]['timestamp']
self.last_time = self.list[-1]['timestamp']
###########################################################################
# pretty print the currently active nvtx ranges
###########################################################################
def cvt_stack_to_string(self, tid_printing):
output = ''
for m in self.stack:
if self.tids[m] == tid_printing: # only elements for this thread
output = output + self.names[m]+'('+str(m)+'):'
return output
###########################################################################
# Iterate through marker events for ranges until we're at the state
# corresponding to input 'tm'
###########################################################################
def update_time(self, tm, tid_printing):
while self.list[self.next_idx]['timestamp'] <= tm:
timestamp = self.list[self.next_idx]['timestamp']
flags = self.list[self.next_idx]['flags']
rangeid = self.list[self.next_idx]['id']
nm = self.list[self.next_idx]['value']
# as per cupti_activity.h: CUpti_ActivityObjectKindId is a 12 byte
# union, where the threadId (in this case) sits in the middle 4
# bytes.
threadid = int.from_bytes(self.list[self.next_idx]['objectId'][4:8],
byteorder='little', signed=True)
if flags == 2: # a range starts
# Range start command gives the name of the range. We want
# self.names[rangeid]=nm, but need to fill in any holes because
# Python arrays are dense and start at 0
while rangeid >= len(self.names):
self.names.append(None)
self.tids.append(None)
self.names[rangeid] = nm[:31]
self.tids[rangeid] = threadid
self.stack.append(rangeid)
elif flags == 4: # a range ends
if rangeid in self.stack:
self.stack.pop(self.stack.index(rangeid))
if self.tids[rangeid] != threadid:
print("error: popping marker from different thread than pushed????\n")
else:
print("error: popping non-existing marker????\n")
self.next_idx = self.next_idx+1
return self.cvt_stack_to_string(tid_printing)
def print_grids(gridX, gridY, gridZ, blockX, blockY, blockZ):
print("({},{},{})\t({},{},{})".format(gridX, gridY, gridZ,
blockX, blockY, blockZ),
end='\t')
def print_place_ids(streamId, threadId):
print("{}\t{}".format(streamId, hex(threadId)),
end='\t')
def print_times(apiStart, apiLatency, gpuStart, gpuLatency):
print("{}\t{}\t{}\t{}".format(apiStart, apiLatency, gpuStart, gpuLatency),
end='\t')
###############################################################################
#
# The list of kernel calls
#
###############################################################################
class KernelTable:
def __init__(self, sqlite_filename, markers, start_time, end_time, args):
self.filename = sqlite_filename
self.markers = markers
self.start_time = start_time
self.end_time = end_time
self.args = args
# We join the concurrent_kernel table with the runtime api call table
# on correlationId. We only select records between the start and end
# of the NVTX __start_profile and __stop_profile
self.list = do_query(self.filename,
'''
select
kernels._id_,
kernels.registersPerThread,
kernels.start as kernelStart,
kernels.completed as kernelEnd,
kernels.deviceId,
kernels.contextId,
kernels.streamId,
kernels.gridX,
kernels.gridY,
kernels.gridZ,
kernels.blockX,
kernels.blockY,
kernels.blockZ,
kernels.staticSharedMemory,
kernels.correlationId,
api_calls.start as apiStart,
api_calls.end as apiEnd,
api_calls.processId,
api_calls.threadId,
StringTable.value as kernelName
from cupti_activity_kind_concurrent_kernel as kernels
inner join cupti_activity_kind_runtime as api_calls
on kernels.correlationId = api_calls.correlationId
inner join StringTable
on kernels.name = StringTable._id_
where apiStart > {start_val} and apiStart < {end_val}
order by apiStart'''.format(
start_val = self.start_time,
end_val = self.end_time))
###########################################################################
# here we're doing the work that couldn't be done using a simple sql join
###########################################################################
def process_list(self):
for kernel_call in self.list:
call_time = kernel_call['apiStart']
rel_start_time = (kernel_call['kernelStart']-self.start_time)
execution_time = (kernel_call['kernelEnd'] -
kernel_call['kernelStart'])
thread_id = kernel_call['threadId']
print_grids(kernel_call['gridX'],
kernel_call['gridY'],
kernel_call['gridZ'],
kernel_call['blockX'],
kernel_call['blockY'],
kernel_call['blockZ'])
print_place_ids(kernel_call['streamId'], thread_id)
print_times(call_time-self.start_time,
kernel_call['apiEnd']-call_time,
rel_start_time,
execution_time)
# print the markers from just before the kernel's api timestamp
if self.args.also_markers:
if self.markers is not None:
print(self.markers.update_time(call_time, thread_id), end='\t')
else:
print('', end='\t')
print(kernel_call['kernelName'])
###############################################################################
#
# The list of memcpys
#
###############################################################################
class MemcpyTable:
# enums from cupti_activity.h
memory_kinds = ['UNKNOWN', 'PAGEABLE', 'PINNED', 'DEVICE',
'ARRAY', 'MANAGED', 'DEVICE_STATIC', 'MANAGED_STATIC']
copy_kinds = ['UNKNOWN', 'HTOD', 'DTOH', 'HTOA', 'ATOH', 'ATOA', 'ATOD',
'DTOA', 'DTOD', 'HTOH', 'PTOP']
def __init__(self, sqlite_filename, start_time, end_time, args):
self.start_time = start_time
self.end_time = end_time
# We join the memcpy table with the runtime api call table
# on correlationId. We only select records between the start and end
# of the NVTX __start_profile and __stop_profile
self.list = do_query(sqlite_filename,
'''
select
memcpy._id_,
memcpy.copyKind,
memcpy.srcKind,
memcpy.dstKind,
memcpy.flags,
memcpy.bytes,
memcpy.start as cpyStart,
memcpy.end as cpyEnd,
memcpy.correlationId,
memcpy.streamId,
api_calls.start as apiStart,
api_calls.end as apiEnd,
api_calls.processId,
api_calls.threadId
from cupti_activity_kind_memcpy as memcpy
inner join cupti_activity_kind_runtime as api_calls
on memcpy.correlationId = api_calls.correlationId
where apiStart > {start_val} and apiStart < {end_val}
order by apiStart'''.format(
start_val = start_time,
end_val = end_time))
###########################################################################
# here we're doing the work that couldn't be trivially done using a simple sql join
###########################################################################
def process_list(self):
for memcpy in self.list:
call_time = memcpy['apiStart']
rel_start_time = (memcpy['cpyStart']-self.start_time)
execution_time = (memcpy['cpyEnd'] -
memcpy['cpyStart'])
print_grids(MemcpyTable.copy_kinds[memcpy['copyKind']],
MemcpyTable.memory_kinds[memcpy['srcKind']],
MemcpyTable.memory_kinds[memcpy['dstKind']],
memcpy['flags'],
'', '')
print_place_ids(memcpy['streamId'], memcpy['threadId'])
print_times(call_time-self.start_time,
memcpy['apiEnd']-call_time,
rel_start_time,
execution_time)
# no marker, but memcpy bytes
print('\tmemcpy - {}'.format(memcpy['bytes']))
###############################################################################
#
# The list of memsets
#
###############################################################################
class MemsetTable:
# enums from cupti_activity.h
memory_kinds = ['UNKNOWN', 'PAGEABLE', 'PINNED', 'DEVICE',
'ARRAY', 'MANAGED', 'DEVICE_STATIC', 'MANAGED_STATIC']
copy_kinds = ['UNKNOWN', 'HTOD', 'DTOH', 'HTOA', 'ATOH', 'ATOA', 'ATOD',
'DTOA', 'DTOD', 'HTOH', 'PTOP']
def __init__(self, sqlite_filename, start_time, end_time, args):
self.start_time = start_time
self.end_time = end_time
# We join the memset table with the runtime api call table
# on correlationId. We only select records between the start and end
# of the NVTX __start_profile and __stop_profile
self.list = do_query(sqlite_filename,
'''
select
memset._id_,
memset.value,
memset.memoryKind as dstKind,
memset.flags,
memset.bytes,
memset.start as dvcStart,
memset.end as dvcEnd,
memset.correlationId,
memset.streamId,
api_calls.start as apiStart,
api_calls.end as apiEnd,
api_calls.processId,
api_calls.threadId
from cupti_activity_kind_memset as memset
inner join cupti_activity_kind_runtime as api_calls
on memset.correlationId = api_calls.correlationId
where apiStart > {start_val} and apiStart < {end_val}
order by apiStart'''.format(
start_val = start_time,
end_val = end_time))
###########################################################################
# here we're doing the work that couldn't be trivially done using a simple sql join
###########################################################################
def process_list(self):
for memset in self.list:
call_time = memset['apiStart']
rel_start_time = (memset['dvcStart']-self.start_time)
execution_time = (memset['dvcEnd'] -
memset['dvcStart'])
print_grids('',
memset['value'],
MemsetTable.memory_kinds[memset['dstKind']],
memset['flags'],
'', '')
print_place_ids(memset['streamId'], memset['threadId'])
print_times(call_time-self.start_time,
memset['apiEnd']-call_time,
rel_start_time,
execution_time)
# no marker, but memset bytes
print('\tmemset - {}'.format(memset['bytes']))
###############################################################################
#
# The list of synchronizations
#
###############################################################################
class SyncTable:
# enums from cupti_activity.h
sync_kinds = ['UNKNOWN', 'EVENT_SYNC', 'STREAM_WAIT_EVENT',
'STREAM_SYNC', 'CONTEXT_SYNC']
def __init__(self, sqlite_filename, start_time, end_time, args):
self.start_time = start_time
self.end_time = end_time
# We join the sync table with the runtime api call table
# on correlationId. We only select records between the start and end
# of the NVTX __start_profile and __stop_profile
self.list = do_query(sqlite_filename,
'''
select
sync._id_,
sync.type,
sync.start as dvcStart,
sync.end as dvcEnd,
sync.correlationId,
sync.streamId,
api_calls.start as apiStart,
api_calls.end as apiEnd,
api_calls.processId,
api_calls.threadId
from cupti_activity_kind_synchronization as sync
inner join cupti_activity_kind_runtime as api_calls
on sync.correlationId = api_calls.correlationId
where apiStart > {start_val} and apiStart < {end_val}
order by apiStart'''.format(
start_val = start_time,
end_val = end_time))
###########################################################################
# here we're doing the work that couldn't be trivially done using a simple sql join
###########################################################################
def process_list(self):
for sync in self.list:
call_time = sync['apiStart']
rel_start_time = (sync['dvcStart']-self.start_time)
execution_time = (sync['dvcEnd'] -
sync['dvcStart'])
print_grids('', '', SyncTable.sync_kinds[sync['type']],
'', '', '')
print_place_ids(sync['streamId'], sync['threadId'])
print_times(call_time-self.start_time,
sync['apiEnd']-call_time,
rel_start_time,
execution_time)
# no marker
print('\tsync')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process the output of nvprof --profile-api-trace all.')
parser.add_argument('input_file', nargs=1,
help='.nvvp file to process')
parser.add_argument('--also-markers', action='store_true',
help='print nvtx marker info')
args = parser.parse_args()
try:
markers = MarkerTable(args.input_file[0])
start_time = markers.first_time
end_time = markers.last_time
except:
markers = None
start_time = 0
end_time = 9223372036854775806
kernels = KernelTable(args.input_file[0], markers, start_time, end_time, args)
kernels.process_list()
memcpys = MemcpyTable(args.input_file[0], start_time, end_time, args)
memcpys.process_list()
memsets = MemsetTable(args.input_file[0], start_time, end_time, args)
memsets.process_list()
syncs = SyncTable(args.input_file[0], start_time, end_time, args)
syncs.process_list()
| [
"sqlite3.connect",
"argparse.ArgumentParser"
] | [((419, 447), 'sqlite3.connect', 'sqlite3.connect', (['sqlite_file'], {}), '(sqlite_file)\n', (434, 447), False, 'import sqlite3\n'), ((17522, 17619), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process the output of nvprof --profile-api-trace all."""'}), "(description=\n 'Process the output of nvprof --profile-api-trace all.')\n", (17545, 17619), False, 'import argparse\n')] |
import threading
import time
import mod_log
import mod_measure_list
import mod_sense_hat
# Threads management class
class ThreadManager(object):
def __init__(self, log_mgr, channel, delay, source, measure_list):
self.log_mgr = log_mgr # Logger module
self.channel = channel # Canale di acquisizione
self.delay = delay # Tempo di acquisizione in ms.
self.source = source # Modalita' di acquisizione
self.measure_list = measure_list # Riferimento alla lista misure
self.exit_flag = False # Flag per la terminazione del thread
self.log_mgr.info(self.__class__.__name__, "Initialized for channel <" + str(self.channel) + ">")
# Start acquisition thread
def start_acquisition(self):
self.acq_thread = threading.Thread(target = self.acquisition_thread)
self.acq_thread.start()
# Acquisition thread definition
def acquisition_thread(self):
self.log_mgr.info(self.__class__.__name__, "Started for channel <" + str(self.channel) + ">")
while (self.exit_flag == False):
# Get timestamp
ts = time.time()
# Add to measure list
self.measure_list.add_details(self.channel, self.source.read_channel(), ts)
time.sleep(self.delay)
# Stop acquisition thread
def stop_acquisition (self):
self.log_mgr.info(self.__class__.__name__, "Stopped for channel <" + str(self.channel) + ">")
self.exit_flag = True
def stopped_acquisition(self):
return not(self.acq_thread.isAlive)
def get_channel(self):
return self.channel
# # Thread per il processamento delle misure
# def parse_measures(self, exit_flag, measure_list):
# while self.counter:
# # Se ho premuto il pulsante, esco e visualizzo
# # il segno verde
# if (self.exit_flag == 1):
# counter = 0
# # Genero le medie per le grandezze rilevate
# for ch in channels:
# meas = self.measure_list.avg_by_channel(ch)
# # Stampo il valore della media
# print("TS:<" + str(meas.timestamp) + ">; NUM:<" + str(meas.count)+ ">; AVG:<" + str(meas.value)+ ">")
# # Aggiorno il codice canale e aggiungo la media alla lista misure
# meas.channel = meas.channel + 10
# self.measure_list.add_measure(meas)
# # Per la temperatura, coloro il display in funzione della media rilevata
# if (meas.channel == 11):
# self.show_temperature(meas.value)
# # Genero il JSON
# main_dic = {}
# main_dic[mkc.key_timestamp] = time.time()
# main_dic[mkc.key_qos] = "good"
# main_dic[mkc.key_values] = self.measure_list.json_dictionary()
# self.measure_list.clear_list()
# print("")
# print("************************")
# print(str(json.dumps(main_dic,
# indent=4, sort_keys=True,
# separators=(',', ': '), ensure_ascii=False)))
# time.sleep(self.delay)
# counter -= 1
| [
"threading.Thread",
"time.time",
"time.sleep"
] | [((850, 898), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.acquisition_thread'}), '(target=self.acquisition_thread)\n', (866, 898), False, 'import threading\n'), ((1194, 1205), 'time.time', 'time.time', ([], {}), '()\n', (1203, 1205), False, 'import time\n'), ((1342, 1364), 'time.sleep', 'time.sleep', (['self.delay'], {}), '(self.delay)\n', (1352, 1364), False, 'import time\n')] |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
from DAO import ServidoresDAO
from Tkinter import *
import ConfigParser
import os
class Application:
def __init__(self, master=None):
self.fonte = ("Verdana", "8")
self.container1 = Frame(master)
self.container1["pady"] = 10
self.container1.pack()
self.container2 = Frame(master)
self.container2["padx"] = 10
self.container2["pady"] = 5
self.container2.pack()
self.container3 = Frame(master)
self.container3["padx"] = 20
self.container3["pady"] = 5
self.container3.pack()
self.container4 = Frame(master)
self.container4["pady"] = 15
self.container4.pack()
self.container5 = Frame(master)
self.container5["pady"] = 15
self.container5.pack()
self.container6 = Frame(master)
self.container6["pady"] = 15
self.container6.pack()
self.titulo = Label(self.container1, text="Informe os dados")
self.titulo["font"] = ("Calibri", "9", "bold")
self.titulo.pack ()
self.lblpath = Label(self.container2, text="Path:", font=self.fonte, width=10)
self.lblpath.pack(side=LEFT)
self.txtpath = Entry(self.container2)
self.txtpath["width"] = 40
self.txtpath["font"] = self.fonte
self.txtpath.pack(side=LEFT)
self.btnLimpar = Button(self.container2, text="Limpar", font=self.fonte, width=10)
self.btnLimpar["command"] = self.clear
self.btnLimpar.pack(side=RIGHT)
self.tmp = BooleanVar()
self.chkTmp = Checkbutton(self.container3, text="Tmp", variable=self.tmp)
self.chkTmp.pack (side=LEFT)
self.tmp.set(True)
self.log = BooleanVar()
self.chkTmp = Checkbutton(self.container3, text="Log", variable=self.log)
self.chkTmp.pack (side=LEFT)
self.log.set(True)
self.deploy = BooleanVar()
self.chkTmp = Checkbutton(self.container3, text="Deploy", variable=self.deploy)
self.chkTmp.pack (side=LEFT)
self.deploy.set(True)
self.lblmsgTmp = Label(self.container4, text="")
self.lblmsgTmp["font"] = ("Verdana", "9", "italic")
self.lblmsgTmp.pack()
self.lblmsgLog = Label(self.container5, text="")
self.lblmsgLog["font"] = ("Verdana", "9", "italic")
self.lblmsgLog.pack()
self.lblmsgDeploy = Label(self.container6, text="")
self.lblmsgDeploy["font"] = ("Verdana", "9", "italic")
self.lblmsgDeploy.pack()
self.buscarPreferencial()
## Limpar diretorios do JBOSS
def clear(self):
dao = ServidoresDAO()
dao.fullpath = self.txtpath.get()
## TEMPORARIOS
if self.tmp.get():
self.lblmsgTmp["text"] = dao.clearTmps()
else:
self.lblmsgTmp["text"] = ""
## LOGS
if self.log.get():
self.lblmsgLog["text"] = dao.clearLogs()
else:
self.lblmsgLog["text"] = ""
##DEPLOYS
if self.deploy.get():
self.lblmsgDeploy["text"] = dao.clearDeploys()
else:
self.lblmsgDeploy["text"] = ""
## Inicia com diretorio configurado como DEFAULT no arquivo condig.ini
def buscarPreferencial(self):
cfg = ConfigParser.ConfigParser()
cfg.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'conf', 'config.ini'))
pathDefault = cfg.get('dir', 'default')
self.txtpath.delete(0, END)
self.txtpath.insert(INSERT, pathDefault)
root = Tk()
root.wm_title("Clean JBoss - @RodolfoCruzTI")
Application(root)
root.mainloop()
| [
"os.path.dirname",
"DAO.ServidoresDAO",
"ConfigParser.ConfigParser"
] | [((2590, 2605), 'DAO.ServidoresDAO', 'ServidoresDAO', ([], {}), '()\n', (2603, 2605), False, 'from DAO import ServidoresDAO\n'), ((3271, 3298), 'ConfigParser.ConfigParser', 'ConfigParser.ConfigParser', ([], {}), '()\n', (3296, 3298), False, 'import ConfigParser\n'), ((3351, 3376), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3366, 3376), False, 'import os\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy2 Experiment Builder (v1.84.2),
on February 14, 2019, at 13:04
If you publish work using this script please cite the PsychoPy publications:
<NAME> (2007) PsychoPy - Psychophysics software in Python.
Journal of Neuroscience Methods, 162(1-2), 8-13.
Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy.
Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008
"""
#relative files (condition files and stimuli pictures, in C:\Users\MiaoLi\Desktop\SCALab\Programming\Crowding_and_numerosity\setupExp_psychopy\Psychopybuilder\Crowding\Miao_exp_lilleLab\Exp1_short_olderPsychopy)
from __future__ import absolute_import, division
from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import (sin, cos, tan, log, log10, pi, average,
sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
import sys # to get file system encoding
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__)).decode(sys.getfilesystemencoding())
os.chdir(_thisDir)
# Store info about the experiment session
expName = 'Crwdng_Nmrsty_older_runOnLab1' # from the Builder filename that created this script
expInfo = {u'handedness': ['Right handed', 'Left handed'],
u'participant': u'',
u'age': u'',
u'blockOrder': u'',
u'sex': ['Female','Male'],
u'group': ['1','2']}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data_Crwdng_Nmrsty1/group_%s_participant_%s_date_%s' % (expInfo['group'], expInfo['participant'], expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath=None,
savePickle=True, saveWideText=True,
dataFileName=filename)
# save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
# Start Code - component code to be run before the window creation
# Setup the Window
#win = visual.Window(
# size=(1024, 768), fullscr=True, screen=0,
# allowGUI=False, allowStencil=False,
# monitor='testMonitor', color=[0,0,0], colorSpace='rgb',
# blendMode='avg', useFBO=True)
myMonitor= monitors.Monitor('CRT_Lille', width = 57, distance = 40.5)#TODO
myMonitor.setSizePix([1024, 768])
win = visual.Window(monitor=myMonitor,
size = [1024, 768],
screen =1,
units='pix',
fullscr = False,
allowGUI = False,
winType = 'pyglet',
color = (0,0,0))
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 100.0 # could not measure, so guess
#print(expInfo['frameRate'])
# Initialize components for Routine "instr1"
instr1Clock = core.Clock()
# Initialize components for Routine "fixation"
fixationClock = core.Clock()
# Initialize components for Routine "practice"
practiceClock = core.Clock()
p_img = visual.ImageStim(
win=win, name='p_img',
image='sin', mask=None,
ori=0, pos=(0, 0), size=None,
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=0.0)
# Initialize components for Routine "endP"
endPClock = core.Clock()
# Initialize components for Routine "instr2"
instr2Clock = core.Clock()
# Initialize components for Routine "fixation"
fixationClock = core.Clock()
# Initialize components for Routine "trial"
trialClock = core.Clock()
image = visual.ImageStim(
win=win, name='image',
image='sin', mask=None,
ori=0, pos=(0, 0), size=None,
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=0.0)
# Initialize components for Routine "break_3"
break_3Clock = core.Clock()
# Initialize components for Routine "thanks"
thanksClock = core.Clock()
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# ------Prepare to start Routine "instr1"-------
t = 0
instr1Clock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
# keep track of which components have finished
instr1Components = []
for thisComponent in instr1Components:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "instr1"-------
while continueRoutine:
# get current time
t = instr1Clock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
message1 = visual.TextStim(win, pos=[0,+30],units = 'pix')
message1.setText('Welcome to our experiment.')
message2 = visual.TextStim(win, pos=[0, 0],units = 'pix')
message2.setText('Please give your best esimation.')
message3 = visual.TextStim(win, pos=[0, -30], units = 'pix')
message3.setText('Hit spacebar to start practice.')
message1.draw()
message2.draw()
message3.draw()
win.flip()
event.waitKeys(keyList = ['space'])
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in instr1Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "instr1"-------
for thisComponent in instr1Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "instr1" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
p_trials = data.TrialHandler(nReps=5, method='random',
extraInfo=expInfo, originPath=-1,
trialList=[None],
seed=None, name='p_trials')
thisExp.addLoop(p_trials) # add the loop to the experiment
thisP_trial = p_trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisP_trial.rgb)
if thisP_trial != None:
for paramName in thisP_trial.keys():
exec(paramName + '= thisP_trial.' + paramName)
for thisP_trial in p_trials:
currentLoop = p_trials
# abbreviate parameter names if possible (e.g. rgb = thisP_trial.rgb)
if thisP_trial != None:
for paramName in thisP_trial.keys():
exec(paramName + '= thisP_trial.' + paramName)
# ------Prepare to start Routine "fixation"-------
t = 0
fixationClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
# keep track of which components have finished
fixationComponents = []
for thisComponent in fixationComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "fixation"-------
while continueRoutine:
# get current time
t = fixationClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
fixation = visual.TextStim(win, color = (-1, -1, -1), bold = True, units = 'pix')
fixation.setText('+')
fixation.draw()
win.flip()
event.waitKeys(keyList = ['space'])
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in fixationComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "fixation"-------
for thisComponent in fixationComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "fixation" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "practice"-------
t = 0
practiceClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
p_img.setImage(u'2_c_2_f_100_wS_0.4_eS_0.15811388300841897_0.15811388300841897_33.png')
key_resp_2 = event.BuilderKeyResponse()
# keep track of which components have finished
practiceComponents = [p_img, key_resp_2]
for thisComponent in practiceComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "practice"-------
while continueRoutine:
# get current time
t = practiceClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *p_img* updates
if t >= 0.0 and p_img.status == NOT_STARTED:
# keep track of start time/frame for later
p_img.tStart = t
p_img.frameNStart = frameN # exact frame index
p_img.setAutoDraw(True)
frameRemains = 0.0 + 0.15- win.monitorFramePeriod * 0.75 # most of one frame period left
if p_img.status == STARTED and t >= frameRemains:
p_img.setAutoDraw(False)
# *key_resp_2* updates
if t >= 0.15 and key_resp_2.status == NOT_STARTED:
# keep track of start time/frame for later
key_resp_2.tStart = t
key_resp_2.frameNStart = frameN # exact frame index
key_resp_2.status = STARTED
# keyboard checking is just starting
win.callOnFlip(key_resp_2.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
if key_resp_2.status == STARTED:
# theseKeys = event.getKeys()
ptext = visual.TextStim(win, pos = [0, 0])
# theseKeys = event.getKeys(keyList=['y', 'n', 'left', 'right', 'space'])
theseKeysP = event.getKeys(keyList=['1','2','3','4','5','6','7','8','9','0','return', 'backspace','num_1','num_2','num_3','num_4','num_5','num_6','num_7','num_8','num_9','num_0'])
# check for quit:
if "escape" in theseKeysP:
endExpNow = True
if len(theseKeysP) > 0: # at least one key was pressed
if "backspace" in theseKeysP:
key_resp_2.keys=key_resp_2.keys[:-1]
key_resp_2.keys.extend([key for key in theseKeysP if key != "return" and key != "backspace"])
for n, i in enumerate(key_resp_2.keys):
if i =='num_1':
key_resp_2.keys[n] = '1'
elif i =='num_2':
key_resp_2.keys[n] = '2'
elif i =='num_3':
key_resp_2.keys[n] = '3'
elif i =='num_4':
key_resp_2.keys[n] = '4'
elif i =='num_5':
key_resp_2.keys[n] = '5'
elif i =='num_6':
key_resp_2.keys[n] = '6'
elif i =='num_7':
key_resp_2.keys[n] = '7'
elif i =='num_8':
key_resp_2.keys[n] = '8'
elif i =='num_9':
key_resp_2.keys[n] = '9'
elif i =='num_0':
key_resp_2.keys[n] = '0'
# Atext.setText("".join(key_resp_3.keys))
# convert the list of strings into a single string
key_str2 = "".join(key_resp_2.keys)
ptext.setText(key_str2)
ptext.draw()
win.flip()
# # event.waitKeys(5,keyList = ['return'])
core.wait(0.5)
if len(key_str2) !=0:
# then convert the string to a number
key_num2 = int(key_str2)
if "return" in theseKeysP:
# ptext.setText('')
# ptext.draw()
# win.flip()
# core.wait(0.5)
continueRoutine=False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in practiceComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "practice"-------
for thisComponent in practiceComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if key_resp_2.keys in ['', [], None]: # No response was made
key_resp_2.keys=None
p_trials.addData('key_resp_2.keys',key_resp_2.keys)
if key_resp_2.keys != None: # we had a response
p_trials.addData('key_resp_2.rt', key_resp_2.rt)
# the Routine "practice" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
thisExp.nextEntry()
# completed 5 repeats of 'p_trials'
# ------Prepare to start Routine "endP"-------
t = 0
endPClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
# keep track of which components have finished
endPComponents = []
for thisComponent in endPComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "endP"-------
while continueRoutine:
# get current time
t = endPClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
end_practice1 = visual.TextStim(win, pos=[0,+35],units = 'pix')
end_practice1.setText('This is the end of practice')
end_practice2 = visual.TextStim(win, pos=[0, 0], units = 'pix')
end_practice2.setText('There are 10 blocks of the real experiment, you will see 3 reference images before each block.')
end_practice3 = visual.TextStim(win, pos=[0, -35], units = 'pix')
end_practice3.setText('Hit spacebar to start the real experiment.')
end_practice1.draw()
end_practice2.draw()
end_practice3.draw()
win.flip()
event.waitKeys(keyList = ['space'])
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in endPComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "endP"-------
for thisComponent in endPComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "endP" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
blocks = data.TrialHandler(nReps=1, method='sequential',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions("blockOrder"+expInfo['blockOrder']+".csv"),
seed=None, name='blocks')
thisExp.addLoop(blocks) # add the loop to the experiment
thisBlock = blocks.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisBlock.rgb)
if thisBlock != None:
for paramName in thisBlock.keys():
exec(paramName + '= thisBlock.' + paramName)
for thisBlock in blocks:
currentLoop = blocks
# abbreviate parameter names if possible (e.g. rgb = thisBlock.rgb)
if thisBlock != None:
for paramName in thisBlock.keys():
exec(paramName + '= thisBlock.' + paramName)
# ------Prepare to start Routine "instr2"-------
t = 0
instr2Clock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
# keep track of which components have finished
instr2Components = []
for thisComponent in instr2Components:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "instr2"-------
while continueRoutine:
# get current time
t = instr2Clock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
fix = visual.TextStim(win, pos = [0, 0], bold = True, units = 'pix')
block_text = visual.TextStim(win, pos=[0, 0], units = 'pix')
block_text.setText('Fixate to the center of screen and press spacebar to see the reference display.')
block_text.draw()
win.flip()
event.waitKeys(keyList = ['space'])
fix.setText('+')
fix.setColor(u'black')
fix.draw()
win.flip()
event.waitKeys(keyList = ['space'])
image_ref = visual.ImageStim(win, image = ref_image1, units = 'pix')
image_ref.draw()
win.flip()
core.wait(0.15)
image_ref_text = visual.TextStim(win, pos=[0, 15], units ='pix')
image_ref_text2 = visual.TextStim(win, pos=[0, -15], units = 'pix')
image_ref_text3 = visual.TextStim(win, pos=[0, 0], units = 'pix')
image_ref_text.setText('The number of the reference disks is %s:' %(int(Number1)))
image_ref_text2.setText('Press C to continue')
image_ref_text.draw()
image_ref_text2.draw()
win.flip()
event.waitKeys(keyList = ['c'])
# image_ref_text2.setText(Number1)
image_ref_text3.setText('Fixate to the center and press spacebar to see another reference display.')
# image_ref_text.draw()
# image_ref_text2.draw()
image_ref_text3.draw()
win.flip()
event.waitKeys(keyList = ['space'])
# block_text.setText('+')
# block_text.setColor(u'black')
# block_text.draw()
fix.draw()
win.flip()
event.waitKeys(keyList = ['space'])
image_ref2 = visual.ImageStim(win, image = ref_image2, units = 'pix')
image_ref.draw()
win.flip()
core.wait(0.15)
image_ref_text.setText('The number of the reference disks is %s:' %(int(Number2)))
# image_ref_text2.setText(Number2)
# image_ref_text2.setText('Press spacebar to continue')
image_ref_text.draw()
image_ref_text2.draw()
win.flip()
event.waitKeys(keyList = ['c'])
image_ref_text3.draw()
win.flip()
event.waitKeys(keyList = ['space'])
fix.draw()
win.flip()
event.waitKeys(keyList = ['space'])
image_ref3 = visual.ImageStim(win, image = ref_image3, units = 'pix')
image_ref3.draw()
win.flip()
core.wait(0.15)
image_ref_text.setText('The number of the reference disks is %s:' %(int(Number3)))
image_ref_text.draw()
image_ref_text2.draw()
win.flip()
event.waitKeys(keyList = ['c'])
image_ref_text3.setText('Press spacebar to start the real experiment.')
image_ref_text3.draw()
win.flip()
event.waitKeys(keyList = ['space'])
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in instr2Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "instr2"-------
for thisComponent in instr2Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "instr2" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
trials = data.TrialHandler(nReps=1, method='random',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions(winsize),
seed=None, name='trials')
thisExp.addLoop(trials) # add the loop to the experiment
thisTrial = trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial.keys():
exec(paramName + '= thisTrial.' + paramName)
for thisTrial in trials:
currentLoop = trials
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial.keys():
exec(paramName + '= thisTrial.' + paramName)
# ------Prepare to start Routine "fixation"-------
t = 0
fixationClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
# keep track of which components have finished
fixationComponents = []
for thisComponent in fixationComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "fixation"-------
while continueRoutine:
# get current time
t = fixationClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
fixation = visual.TextStim(win, color = (-1, -1, -1), bold = True, units = 'pix')
fixation.setText('+')
fixation.draw()
win.flip()
event.waitKeys(keyList = ['space'])
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in fixationComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "fixation"-------
for thisComponent in fixationComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "fixation" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "trial"-------
t = 0
trialClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
image.setImage(imageFile)
key_resp_3 = event.BuilderKeyResponse()
# keep track of which components have finished
trialComponents = [image, key_resp_3]
for thisComponent in trialComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "trial"-------
while continueRoutine:
# get current time
t = trialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *image* updates
if t >= 0.0 and image.status == NOT_STARTED:
# keep track of start time/frame for later
image.tStart = t
image.frameNStart = frameN # exact frame index
image.setAutoDraw(True)
frameRemains = 0.0 + 0.15- win.monitorFramePeriod * 0.75 # most of one frame period left
if image.status == STARTED and t >= frameRemains:
image.setAutoDraw(False)
# *key_resp_3* updates
if t >= 0.15 and key_resp_3.status == NOT_STARTED:
# keep track of start time/frame for later
key_resp_3.tStart = t
key_resp_3.frameNStart = frameN # exact frame index
key_resp_3.status = STARTED
# keyboard checking is just starting
win.callOnFlip(key_resp_3.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
if key_resp_3.status == STARTED:
# theseKeys = event.getKeys()
Atext=visual.TextStim(win)
theseKeys = event.getKeys(keyList=['1','2','3','4','5','6','7','8','9','0','return', 'backspace','num_1','num_2','num_3','num_4','num_5','num_6','num_7','num_8','num_9','num_0'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
if "backspace" in theseKeys:
key_resp_3.keys=key_resp_3.keys[:-1]
#key_resp_3.rt = key_resp_3.clock.getTime()
key_resp_3.keys.extend([key for key in theseKeys if key != "return" and key != "backspace"])
for n, i in enumerate(key_resp_3.keys):
if i =='num_1':
key_resp_3.keys[n] = '1'
elif i =='num_2':
key_resp_3.keys[n] = '2'
elif i =='num_3':
key_resp_3.keys[n] = '3'
elif i =='num_4':
key_resp_3.keys[n] = '4'
elif i =='num_5':
key_resp_3.keys[n] = '5'
elif i =='num_6':
key_resp_3.keys[n] = '6'
elif i =='num_7':
key_resp_3.keys[n] = '7'
elif i =='num_8':
key_resp_3.keys[n] = '8'
elif i =='num_9':
key_resp_3.keys[n] = '9'
elif i =='num_0':
key_resp_3.keys[n] = '0'
# Atext.setText("".join(key_resp_3.keys))
# convert the list of strings into a single string
key_str = "".join(key_resp_3.keys)
Atext.setText(key_str)
Atext.draw()
win.flip()
# # event.waitKeys(5,keyList = ['return'])
core.wait(0.5)
if len(key_str) !=0:
# then convert the string to a number
key_num = int(key_str)
if "return" in theseKeys:
key_resp_3.rt = key_resp_3.clock.getTime()
Atext.setText('')
Atext.draw()
core.wait(0.5)
continueRoutine=False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "trial"-------
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if key_resp_3.keys in ['', [], None]: # No response was made
key_resp_3.keys=None
trials.addData('key_resp_3.keys',key_num)
if key_resp_3.keys != None: # we had a response
trials.addData('key_resp_3.rt', key_resp_3.rt)
# the Routine "trial" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "break_3"-------
t = 0
break_3Clock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
if trials.thisN != 24: #TODO
continueRoutine = False
# keep track of which components have finished
break_3Components = []
for thisComponent in break_3Components:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "break_3"-------
while continueRoutine:
# get current time
t = break_3Clock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
break_text2 = visual.TextStim(win, text = 'Take a short break. Press spacebar to continue.', pos=[0, 0],units = 'pix')
break_text2.draw()
win.flip()
event.waitKeys(keyList = ['space'])
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in break_3Components:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "break_3"-------
for thisComponent in break_3Components:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "break_3" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
thisExp.nextEntry()
# completed 1 repeats of 'trials'
thisExp.nextEntry()
# completed 1 repeats of 'blocks'
# ------Prepare to start Routine "thanks"-------
t = 0
thanksClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
# keep track of which components have finished
thanksComponents = []
for thisComponent in thanksComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "thanks"-------
while continueRoutine:
# get current time
t = thanksClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
thankmesg1 = visual.TextStim(win, pos=[0,+35],units = 'pix')
thankmesg1.setText('This is the end of the experiment.')
thankmesg2 = visual.TextStim(win, pos=[0, 0], units = 'pix')
thankmesg2.setText('Thank you for your participation.')
thankmesg1.draw()
thankmesg2.draw()
win.flip()
event.waitKeys(keyList = ['n'])
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in thanksComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "thanks"-------
for thisComponent in thanksComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "thanks" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# these shouldn't be strictly necessary (should auto-save)
thisExp.saveAsWideText(filename+'.csv')
thisExp.saveAsPickle(filename)
logging.flush()
# make sure everything is closed down
thisExp.abort() # or data files will save again on exit
win.close()
core.quit()
| [
"psychopy.core.quit",
"psychopy.monitors.Monitor",
"psychopy.core.wait",
"psychopy.data.TrialHandler",
"psychopy.logging.console.setLevel",
"psychopy.gui.DlgFromDict",
"sys.getfilesystemencoding",
"psychopy.data.importConditions",
"psychopy.logging.LogFile",
"psychopy.event.waitKeys",
"psychopy.... | [((1516, 1534), 'os.chdir', 'os.chdir', (['_thisDir'], {}), '(_thisDir)\n', (1524, 1534), False, 'import os\n'), ((1900, 1950), 'psychopy.gui.DlgFromDict', 'gui.DlgFromDict', ([], {'dictionary': 'expInfo', 'title': 'expName'}), '(dictionary=expInfo, title=expName)\n', (1915, 1950), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((2028, 2045), 'psychopy.data.getDateStr', 'data.getDateStr', ([], {}), '()\n', (2043, 2045), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((2408, 2577), 'psychopy.data.ExperimentHandler', 'data.ExperimentHandler', ([], {'name': 'expName', 'version': '""""""', 'extraInfo': 'expInfo', 'runtimeInfo': 'None', 'originPath': 'None', 'savePickle': '(True)', 'saveWideText': '(True)', 'dataFileName': 'filename'}), "(name=expName, version='', extraInfo=expInfo,\n runtimeInfo=None, originPath=None, savePickle=True, saveWideText=True,\n dataFileName=filename)\n", (2430, 2577), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((2638, 2691), 'psychopy.logging.LogFile', 'logging.LogFile', (["(filename + '.log')"], {'level': 'logging.EXP'}), "(filename + '.log', level=logging.EXP)\n", (2653, 2691), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((2690, 2731), 'psychopy.logging.console.setLevel', 'logging.console.setLevel', (['logging.WARNING'], {}), '(logging.WARNING)\n', (2714, 2731), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((3154, 3208), 'psychopy.monitors.Monitor', 'monitors.Monitor', (['"""CRT_Lille"""'], {'width': '(57)', 'distance': '(40.5)'}), "('CRT_Lille', width=57, distance=40.5)\n", (3170, 3208), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((3258, 3401), 'psychopy.visual.Window', 'visual.Window', ([], {'monitor': 'myMonitor', 'size': '[1024, 768]', 'screen': '(1)', 'units': '"""pix"""', 'fullscr': '(False)', 'allowGUI': '(False)', 'winType': '"""pyglet"""', 'color': '(0, 0, 0)'}), "(monitor=myMonitor, size=[1024, 768], screen=1, units='pix',\n fullscr=False, allowGUI=False, winType='pyglet', color=(0, 0, 0))\n", (3271, 3401), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((3890, 3902), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (3900, 3902), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((3968, 3980), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (3978, 3980), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((4046, 4058), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (4056, 4058), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((4067, 4289), 'psychopy.visual.ImageStim', 'visual.ImageStim', ([], {'win': 'win', 'name': '"""p_img"""', 'image': '"""sin"""', 'mask': 'None', 'ori': '(0)', 'pos': '(0, 0)', 'size': 'None', 'color': '[1, 1, 1]', 'colorSpace': '"""rgb"""', 'opacity': '(1)', 'flipHoriz': '(False)', 'flipVert': '(False)', 'texRes': '(128)', 'interpolate': '(True)', 'depth': '(0.0)'}), "(win=win, name='p_img', image='sin', mask=None, ori=0, pos=\n (0, 0), size=None, color=[1, 1, 1], colorSpace='rgb', opacity=1,\n flipHoriz=False, flipVert=False, texRes=128, interpolate=True, depth=0.0)\n", (4083, 4289), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((4360, 4372), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (4370, 4372), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((4434, 4446), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (4444, 4446), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((4512, 4524), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (4522, 4524), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((4584, 4596), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (4594, 4596), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((4605, 4827), 'psychopy.visual.ImageStim', 'visual.ImageStim', ([], {'win': 'win', 'name': '"""image"""', 'image': '"""sin"""', 'mask': 'None', 'ori': '(0)', 'pos': '(0, 0)', 'size': 'None', 'color': '[1, 1, 1]', 'colorSpace': '"""rgb"""', 'opacity': '(1)', 'flipHoriz': '(False)', 'flipVert': '(False)', 'texRes': '(128)', 'interpolate': '(True)', 'depth': '(0.0)'}), "(win=win, name='image', image='sin', mask=None, ori=0, pos=\n (0, 0), size=None, color=[1, 1, 1], colorSpace='rgb', opacity=1,\n flipHoriz=False, flipVert=False, texRes=128, interpolate=True, depth=0.0)\n", (4621, 4827), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((4904, 4916), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (4914, 4916), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((4978, 4990), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (4988, 4990), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((5034, 5046), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (5044, 5046), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((5108, 5129), 'psychopy.core.CountdownTimer', 'core.CountdownTimer', ([], {}), '()\n', (5127, 5129), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((7304, 7432), 'psychopy.data.TrialHandler', 'data.TrialHandler', ([], {'nReps': '(5)', 'method': '"""random"""', 'extraInfo': 'expInfo', 'originPath': '(-1)', 'trialList': '[None]', 'seed': 'None', 'name': '"""p_trials"""'}), "(nReps=5, method='random', extraInfo=expInfo, originPath=-\n 1, trialList=[None], seed=None, name='p_trials')\n", (7321, 7432), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((36926, 36941), 'psychopy.logging.flush', 'logging.flush', ([], {}), '()\n', (36939, 36941), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((37049, 37060), 'psychopy.core.quit', 'core.quit', ([], {}), '()\n', (37058, 37060), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((1487, 1514), 'sys.getfilesystemencoding', 'sys.getfilesystemencoding', ([], {}), '()\n', (1512, 1514), False, 'import sys\n'), ((1975, 1986), 'psychopy.core.quit', 'core.quit', ([], {}), '()\n', (1984, 1986), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((5798, 5845), 'psychopy.visual.TextStim', 'visual.TextStim', (['win'], {'pos': '[0, +30]', 'units': '"""pix"""'}), "(win, pos=[0, +30], units='pix')\n", (5813, 5845), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((5912, 5957), 'psychopy.visual.TextStim', 'visual.TextStim', (['win'], {'pos': '[0, 0]', 'units': '"""pix"""'}), "(win, pos=[0, 0], units='pix')\n", (5927, 5957), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((6031, 6078), 'psychopy.visual.TextStim', 'visual.TextStim', (['win'], {'pos': '[0, -30]', 'units': '"""pix"""'}), "(win, pos=[0, -30], units='pix')\n", (6046, 6078), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((6216, 6249), 'psychopy.event.waitKeys', 'event.waitKeys', ([], {'keyList': "['space']"}), "(keyList=['space'])\n", (6230, 6249), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((10328, 10354), 'psychopy.event.BuilderKeyResponse', 'event.BuilderKeyResponse', ([], {}), '()\n', (10352, 10354), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((16320, 16367), 'psychopy.visual.TextStim', 'visual.TextStim', (['win'], {'pos': '[0, +35]', 'units': '"""pix"""'}), "(win, pos=[0, +35], units='pix')\n", (16335, 16367), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((16445, 16490), 'psychopy.visual.TextStim', 'visual.TextStim', (['win'], {'pos': '[0, 0]', 'units': '"""pix"""'}), "(win, pos=[0, 0], units='pix')\n", (16460, 16490), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((16637, 16684), 'psychopy.visual.TextStim', 'visual.TextStim', (['win'], {'pos': '[0, -35]', 'units': '"""pix"""'}), "(win, pos=[0, -35], units='pix')\n", (16652, 16684), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((16853, 16886), 'psychopy.event.waitKeys', 'event.waitKeys', ([], {'keyList': "['space']"}), "(keyList=['space'])\n", (16867, 16886), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((35478, 35525), 'psychopy.visual.TextStim', 'visual.TextStim', (['win'], {'pos': '[0, +35]', 'units': '"""pix"""'}), "(win, pos=[0, +35], units='pix')\n", (35493, 35525), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((35604, 35649), 'psychopy.visual.TextStim', 'visual.TextStim', (['win'], {'pos': '[0, 0]', 'units': '"""pix"""'}), "(win, pos=[0, 0], units='pix')\n", (35619, 35649), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((35780, 35809), 'psychopy.event.waitKeys', 'event.waitKeys', ([], {'keyList': "['n']"}), "(keyList=['n'])\n", (35794, 35809), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((6772, 6805), 'psychopy.event.getKeys', 'event.getKeys', ([], {'keyList': "['escape']"}), "(keyList=['escape'])\n", (6785, 6805), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((6815, 6826), 'psychopy.core.quit', 'core.quit', ([], {}), '()\n', (6824, 6826), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((8748, 8812), 'psychopy.visual.TextStim', 'visual.TextStim', (['win'], {'color': '(-1, -1, -1)', 'bold': '(True)', 'units': '"""pix"""'}), "(win, color=(-1, -1, -1), bold=True, units='pix')\n", (8763, 8812), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((8900, 8933), 'psychopy.event.waitKeys', 'event.waitKeys', ([], {'keyList': "['space']"}), "(keyList=['space'])\n", (8914, 8933), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((17407, 17440), 'psychopy.event.getKeys', 'event.getKeys', ([], {'keyList': "['escape']"}), "(keyList=['escape'])\n", (17420, 17440), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((17450, 17461), 'psychopy.core.quit', 'core.quit', ([], {}), '()\n', (17459, 17461), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((18032, 18100), 'psychopy.data.importConditions', 'data.importConditions', (["('blockOrder' + expInfo['blockOrder'] + '.csv')"], {}), "('blockOrder' + expInfo['blockOrder'] + '.csv')\n", (18053, 18100), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((19390, 19446), 'psychopy.visual.TextStim', 'visual.TextStim', (['win'], {'pos': '[0, 0]', 'bold': '(True)', 'units': '"""pix"""'}), "(win, pos=[0, 0], bold=True, units='pix')\n", (19405, 19446), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((19474, 19519), 'psychopy.visual.TextStim', 'visual.TextStim', (['win'], {'pos': '[0, 0]', 'units': '"""pix"""'}), "(win, pos=[0, 0], units='pix')\n", (19489, 19519), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((19685, 19718), 'psychopy.event.waitKeys', 'event.waitKeys', ([], {'keyList': "['space']"}), "(keyList=['space'])\n", (19699, 19718), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((19832, 19865), 'psychopy.event.waitKeys', 'event.waitKeys', ([], {'keyList': "['space']"}), "(keyList=['space'])\n", (19846, 19865), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((19897, 19949), 'psychopy.visual.ImageStim', 'visual.ImageStim', (['win'], {'image': 'ref_image1', 'units': '"""pix"""'}), "(win, image=ref_image1, units='pix')\n", (19913, 19949), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((20006, 20021), 'psychopy.core.wait', 'core.wait', (['(0.15)'], {}), '(0.15)\n', (20015, 20021), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((20065, 20111), 'psychopy.visual.TextStim', 'visual.TextStim', (['win'], {'pos': '[0, 15]', 'units': '"""pix"""'}), "(win, pos=[0, 15], units='pix')\n", (20080, 20111), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((20139, 20186), 'psychopy.visual.TextStim', 'visual.TextStim', (['win'], {'pos': '[0, -15]', 'units': '"""pix"""'}), "(win, pos=[0, -15], units='pix')\n", (20154, 20186), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((20215, 20260), 'psychopy.visual.TextStim', 'visual.TextStim', (['win'], {'pos': '[0, 0]', 'units': '"""pix"""'}), "(win, pos=[0, 0], units='pix')\n", (20230, 20260), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((20497, 20526), 'psychopy.event.waitKeys', 'event.waitKeys', ([], {'keyList': "['c']"}), "(keyList=['c'])\n", (20511, 20526), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((20804, 20837), 'psychopy.event.waitKeys', 'event.waitKeys', ([], {'keyList': "['space']"}), "(keyList=['space'])\n", (20818, 20837), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((20988, 21021), 'psychopy.event.waitKeys', 'event.waitKeys', ([], {'keyList': "['space']"}), "(keyList=['space'])\n", (21002, 21021), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((21063, 21115), 'psychopy.visual.ImageStim', 'visual.ImageStim', (['win'], {'image': 'ref_image2', 'units': '"""pix"""'}), "(win, image=ref_image2, units='pix')\n", (21079, 21115), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((21172, 21187), 'psychopy.core.wait', 'core.wait', (['(0.15)'], {}), '(0.15)\n', (21181, 21187), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((21492, 21521), 'psychopy.event.waitKeys', 'event.waitKeys', ([], {'keyList': "['c']"}), "(keyList=['c'])\n", (21506, 21521), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((21591, 21624), 'psychopy.event.waitKeys', 'event.waitKeys', ([], {'keyList': "['space']"}), "(keyList=['space'])\n", (21605, 21624), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((21682, 21715), 'psychopy.event.waitKeys', 'event.waitKeys', ([], {'keyList': "['space']"}), "(keyList=['space'])\n", (21696, 21715), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((21748, 21800), 'psychopy.visual.ImageStim', 'visual.ImageStim', (['win'], {'image': 'ref_image3', 'units': '"""pix"""'}), "(win, image=ref_image3, units='pix')\n", (21764, 21800), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((21858, 21873), 'psychopy.core.wait', 'core.wait', (['(0.15)'], {}), '(0.15)\n', (21867, 21873), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((22062, 22091), 'psychopy.event.waitKeys', 'event.waitKeys', ([], {'keyList': "['c']"}), "(keyList=['c'])\n", (22076, 22091), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((22241, 22274), 'psychopy.event.waitKeys', 'event.waitKeys', ([], {'keyList': "['space']"}), "(keyList=['space'])\n", (22255, 22274), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((26689, 26715), 'psychopy.event.BuilderKeyResponse', 'event.BuilderKeyResponse', ([], {}), '()\n', (26713, 26715), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((36332, 36365), 'psychopy.event.getKeys', 'event.getKeys', ([], {'keyList': "['escape']"}), "(keyList=['escape'])\n", (36345, 36365), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((36375, 36386), 'psychopy.core.quit', 'core.quit', ([], {}), '()\n', (36384, 36386), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((1453, 1478), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1468, 1478), False, 'import os\n'), ((9506, 9539), 'psychopy.event.getKeys', 'event.getKeys', ([], {'keyList': "['escape']"}), "(keyList=['escape'])\n", (9519, 9539), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((9553, 9564), 'psychopy.core.quit', 'core.quit', ([], {}), '()\n', (9562, 9564), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((11752, 11791), 'psychopy.event.clearEvents', 'event.clearEvents', ([], {'eventType': '"""keyboard"""'}), "(eventType='keyboard')\n", (11769, 11791), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((11894, 11926), 'psychopy.visual.TextStim', 'visual.TextStim', (['win'], {'pos': '[0, 0]'}), '(win, pos=[0, 0])\n', (11909, 11926), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((12040, 12234), 'psychopy.event.getKeys', 'event.getKeys', ([], {'keyList': "['1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'return', 'backspace',\n 'num_1', 'num_2', 'num_3', 'num_4', 'num_5', 'num_6', 'num_7', 'num_8',\n 'num_9', 'num_0']"}), "(keyList=['1', '2', '3', '4', '5', '6', '7', '8', '9', '0',\n 'return', 'backspace', 'num_1', 'num_2', 'num_3', 'num_4', 'num_5',\n 'num_6', 'num_7', 'num_8', 'num_9', 'num_0'])\n", (12053, 12234), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((14855, 14888), 'psychopy.event.getKeys', 'event.getKeys', ([], {'keyList': "['escape']"}), "(keyList=['escape'])\n", (14868, 14888), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((14902, 14913), 'psychopy.core.quit', 'core.quit', ([], {}), '()\n', (14911, 14913), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((22845, 22878), 'psychopy.event.getKeys', 'event.getKeys', ([], {'keyList': "['escape']"}), "(keyList=['escape'])\n", (22858, 22878), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((22892, 22903), 'psychopy.core.quit', 'core.quit', ([], {}), '()\n', (22901, 22903), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((23544, 23574), 'psychopy.data.importConditions', 'data.importConditions', (['winsize'], {}), '(winsize)\n', (23565, 23574), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((25025, 25089), 'psychopy.visual.TextStim', 'visual.TextStim', (['win'], {'color': '(-1, -1, -1)', 'bold': '(True)', 'units': '"""pix"""'}), "(win, color=(-1, -1, -1), bold=True, units='pix')\n", (25040, 25089), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((25193, 25226), 'psychopy.event.waitKeys', 'event.waitKeys', ([], {'keyList': "['space']"}), "(keyList=['space'])\n", (25207, 25226), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((33331, 33436), 'psychopy.visual.TextStim', 'visual.TextStim', (['win'], {'text': '"""Take a short break. Press spacebar to continue."""', 'pos': '[0, 0]', 'units': '"""pix"""'}), "(win, text='Take a short break. Press spacebar to continue.',\n pos=[0, 0], units='pix')\n", (33346, 33436), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((33502, 33535), 'psychopy.event.waitKeys', 'event.waitKeys', ([], {'keyList': "['space']"}), "(keyList=['space'])\n", (33516, 33535), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((13906, 13920), 'psychopy.core.wait', 'core.wait', (['(0.5)'], {}), '(0.5)\n', (13915, 13920), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((25847, 25880), 'psychopy.event.getKeys', 'event.getKeys', ([], {'keyList': "['escape']"}), "(keyList=['escape'])\n", (25860, 25880), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((25898, 25909), 'psychopy.core.quit', 'core.quit', ([], {}), '()\n', (25907, 25909), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((28229, 28268), 'psychopy.event.clearEvents', 'event.clearEvents', ([], {'eventType': '"""keyboard"""'}), "(eventType='keyboard')\n", (28246, 28268), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((28381, 28401), 'psychopy.visual.TextStim', 'visual.TextStim', (['win'], {}), '(win)\n', (28396, 28401), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((28430, 28624), 'psychopy.event.getKeys', 'event.getKeys', ([], {'keyList': "['1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'return', 'backspace',\n 'num_1', 'num_2', 'num_3', 'num_4', 'num_5', 'num_6', 'num_7', 'num_8',\n 'num_9', 'num_0']"}), "(keyList=['1', '2', '3', '4', '5', '6', '7', '8', '9', '0',\n 'return', 'backspace', 'num_1', 'num_2', 'num_3', 'num_4', 'num_5',\n 'num_6', 'num_7', 'num_8', 'num_9', 'num_0'])\n", (28443, 28624), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((31615, 31648), 'psychopy.event.getKeys', 'event.getKeys', ([], {'keyList': "['escape']"}), "(keyList=['escape'])\n", (31628, 31648), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((31666, 31677), 'psychopy.core.quit', 'core.quit', ([], {}), '()\n', (31675, 31677), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((34155, 34188), 'psychopy.event.getKeys', 'event.getKeys', ([], {'keyList': "['escape']"}), "(keyList=['escape'])\n", (34168, 34188), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((34206, 34217), 'psychopy.core.quit', 'core.quit', ([], {}), '()\n', (34215, 34217), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((30552, 30566), 'psychopy.core.wait', 'core.wait', (['(0.5)'], {}), '(0.5)\n', (30561, 30566), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n'), ((30939, 30953), 'psychopy.core.wait', 'core.wait', (['(0.5)'], {}), '(0.5)\n', (30948, 30953), False, 'from psychopy import locale_setup, gui, visual, core, data, event, logging, sound, monitors\n')] |
from flask import Flask, render_template, request, redirect, url_for
from flaskext.mysql import MySQL
app = Flask(__name__)
mysql = MySQL()
app.config['MYSQL_DATABASE_USER'] = 'root'
app.config['MYSQL_DATABASE_PASSWORD'] = '<PASSWORD>'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
app.config['MYSQL_DATABASE_DB'] = 'dbtify'
mysql.init_app(app)
conn = mysql.connect()
cursor = conn.cursor()
def create_DB():
"""
if (cursor.execute("SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = 'dbtify' ") == 0):
cursor.execute("CREATE DATABASE dbtify;")
conn.commit()
cursor.execute("USE dbtify;")
conn.commit()
app.config['MYSQL_DATABASE_DB'] = 'dbtify'
"""
cursor.execute("CREATE DATABASE IF NOT EXISTS dbtify;")
conn.commit()
cursor.execute("USE dbtify;")
conn.commit()
table_names = ["listener", "artist", "album", "song", "song_artist", "listener_song", "listener_album"]
def create_tables():
for table_name in table_names:
if (cursor.execute("SELECT * FROM information_schema.tables WHERE table_schema = '{}' AND table_name = '{}' LIMIT 1;".format(app.config['MYSQL_DATABASE_DB'], table_name)) == 0):
with open("./procedures/{}.sql".format(table_name), "r") as table_create_query:
cursor.execute(table_create_query.read())
conn.commit()
def delete_DB():
cursor.execute("DROP DATABASE IF EXISTS dbtify;")
conn.commit()
@app.route("/")
def main():
return redirect(url_for('usertype'))
@app.route('/initDB')
def initDB():
#delete_DB()
#create_DB()
create_tables()
return render_template("userType.html")
@app.route('/login')
def login():
#get db username (typically root) and password and place into app.config
pass
@app.route('/usertype')
def usertype():
return render_template('userType.html')
@app.route('/artist', methods=["POST","GET"])
def artist():
cursor.execute("SELECT * FROM dbtify.artist")
artist_list = cursor.fetchall()
name = ""
surname = ""
if request.method == "POST":
name = request.form["name"]
surname = request.form["surname"]
cursor.execute( "SELECT EXISTS(SELECT * FROM dbtify.artist WHERE name = '{}' AND surname = '{}' )".format(name, surname) )
if cursor.fetchone()[0] > 0: # check if artist is already exists
return render_template('artistMainPage.html', artist_list = artist_list, name = name, surname=surname, duplicate = True)
else: # if not exists in db then add it
cursor.execute("INSERT INTO artist (name, surname) VALUES ('{}', '{}');".format(name, surname))
conn.commit()
#return all the artists from db
cursor.execute("SELECT * FROM dbtify.artist")
artist_list = cursor.fetchall()
return render_template('artistMainPage.html', artist_list = artist_list, name=name, surname=surname, duplicate=False)
return render_template('artistMainPage.html', artist_list = artist_list)
@app.route('/artistProfile', methods=["POST", "GET"])
def artistProfile():
nameSurname = request.args.get('artist')
name = nameSurname.split("_")[0]
surname = nameSurname.split("_")[1]
cursor.execute("SELECT id FROM dbtify.artist WHERE name = '{}' AND surname = '{}' ".format(name, surname))
artist_id = cursor.fetchone()
if request.method == "GET":
pass
if request.method == "POST": # If a new song or album is created
if "genre" in request.form: # If a new album is created
album_id = request.form["ID"]
title = request.form["title"]
genre = request.form["genre"]
cursor.execute( "SELECT EXISTS(SELECT * FROM dbtify.album WHERE id = '{}')".format(album_id) )
if cursor.fetchone()[0] > 0: # check if album is already exists
#return render_template('artistMainPage.html', artist_list = artist_list, name = name, surname=surname, duplicate = True)
print("album {} already exists".format(id))
else: # if not exists in db then add it
cursor.execute("INSERT INTO album (id, genre, title, artist_id) VALUES ('{}', '{}', '{}', '{}');".format(album_id, genre, title, artist_id[0]))
conn.commit()
elif "song_album" in request.form: # If a new song is created
song_id = request.form["ID"]
title = request.form["title"]
album = request.form["song_album"]
contributors = request.form["cont_list"].split("%")[:-1]
cursor.execute( "SELECT EXISTS(SELECT * FROM dbtify.song WHERE id = '{}' )".format(song_id) )
if cursor.fetchone()[0] > 0: # check if song is already exists
#return render_template('artistMainPage.html', artist_list = artist_list, name = name, surname=surname, duplicate = True)
print("song {} already exists".format(song_id))
else: # if not exists in db then add it
cursor.execute("INSERT INTO song (id, title, album) VALUES ('{}', '{}', '{}');".format(song_id, title, album))
conn.commit()
cursor.execute("INSERT INTO song_artist (song_id, artist_id) VALUES ('{}', '{}');".format(song_id, artist_id[0]))
conn.commit()
for contributor in contributors: # add contributors
cont_name = contributor.split(" ")[0]
cont_surname = contributor.split(" ")[1]
cursor.execute("SELECT id FROM dbtify.artist WHERE name = '{}' AND surname = '{}';".format(cont_name, cont_surname))
cont_id = cursor.fetchone()
cursor.execute("INSERT INTO song_artist (song_id, artist_id) VALUES ('{}', '{}');".format(song_id, cont_id[0]))
conn.commit()
elif "deleteAlbum" in request.form: # If an album is deleted
album_id = request.form["album_id"]
cursor.execute("DELETE FROM album WHERE id = '{}' ".format(album_id))
conn.commit()
elif "deleteSong" in request.form: # If a song is deleted
song_id = request.form["song_id"]
print(song_id)
cursor.execute("DELETE FROM song WHERE id = '{}'".format(song_id))
conn.commit()
else:
print("Neither song nor album with POST method. ERROR!")
else:
print("Error! Couldn't get any form request!")
cursor.execute("SELECT * FROM dbtify.album WHERE artist_id = '{}' ".format(artist_id[0]))
albums = cursor.fetchall()
cursor.execute("SELECT * FROM dbtify.artist WHERE id NOT IN(SELECT id FROM dbtify.artist WHERE id = {})".format(artist_id[0]))
artist_list = cursor.fetchall()
cursor.execute("SELECT song_id , title, album FROM dbtify.song INNER JOIN dbtify.song_artist ON song.id = song_artist.song_id WHERE artist_id = {}".format(artist_id[0]))
songs = cursor.fetchall()
return render_template('artistProfile.html', name = name, surname = surname, artist_list = artist_list, album_list = albums, song_list = songs)
@app.route("/viewArtist", methods =["POST", "GET"])
def viewArtist():
listener_info = request.args.get("listener").split("_")
listener_id = listener_info[0]
listener_username = listener_info[1]
artist_id = request.args.get("artist")
cursor.execute("SELECT * FROM dbtify.album WHERE artist_id = '{}'".format(artist_id))
album_list = cursor.fetchall()
cursor.execute("SELECT * FROM dbtify.song \
INNER JOIN dbtify.album ON song.album = album.id \
INNER JOIN dbtify.song_artist ON song.id = song_artist.song_id \
INNER JOIN dbtify.artist ON song_artist.artist_id = artist.id \
WHERE artist.id = '{}'".format(artist_id) )
song_list = cursor.fetchall()
cursor.execute("SELECT name, surname, id FROM dbtify.artist WHERE id = '{}'".format(artist_id))
artist_info = cursor.fetchone()
cursor.execute("call get_contributors('{}', '{}');".format(artist_info[0], artist_info[1])) #stored procedure
contributors_list = cursor.fetchone()[0].split("#")[:-1]
cont_dict = {} # parse string into a dict
for contribution in contributors_list:
pair = eval(contribution)
p_song_id = pair[0]
p_artist_id = pair[1]
cursor.execute("select id, title from song where id ='{}';".format(p_song_id))
p_song_info = cursor.fetchone()
p_song_info = str(p_song_info[0]) + " - " + str(p_song_info[1])
cursor.execute("select name, surname from artist where id ='{}';".format(p_artist_id))
p_artist_info = cursor.fetchone()
p_artist_info = str(p_artist_info[0]) + " " + str(p_artist_info[1])
if p_song_info in cont_dict:
cont_dict[p_song_info].append(p_artist_info)
else:
cont_dict[p_song_info] = [p_artist_info]
if request.method == "POST":
liked_one = ""
if "album" in request.form:
liked_album = request.form["album"].split("_")
liked_album_id = int(liked_album[0])
liked_album_title = liked_album[1]
liked_one = liked_album_title
cursor.execute( "SELECT EXISTS(SELECT * FROM listener_album WHERE album_id = '{}' AND listener_id = '{}')".format(liked_album_id, listener_id) )
if cursor.fetchone()[0] > 0: # check if album is already liked
cursor.execute("SELECT id FROM song WHERE album = '{}'".format(liked_album_id))
song_of_album = cursor.fetchall()
song_of_album = list(map(list,song_of_album))
for song in song_of_album:
cursor.execute( "SELECT EXISTS(SELECT * FROM listener_song WHERE song_id = '{}' AND listener_id = '{}')".format(song[0], listener_id) )
if cursor.fetchone()[0] == 0: # check if song is already liked
cursor.execute("INSERT INTO listener_song (listener_id, song_id) VALUES ('{}', '{}');".format(listener_id, song[0]))
conn.commit()
return render_template("viewArtist.html", album_list = album_list, song_list = song_list, id = listener_id, username = listener_username, artist = artist_info, cont_dict = cont_dict, success=False, duplicate = True, liked_one = liked_one)
else:
cursor.execute("INSERT INTO listener_album (listener_id, album_id) VALUES ('{}', '{}');".format(listener_id, liked_album_id))
conn.commit()
"""cursor.execute("SELECT id FROM song WHERE album = '{}'".format(liked_album_id))
song_of_album = cursor.fetchall()
song_of_album = list(map(list,song_of_album))
for song in song_of_album: # like the songs of liked album
cursor.execute( "SELECT EXISTS(SELECT * FROM listener_song WHERE song_id = '{}' AND listener_id = '{}')".format(song[0], listener_id) )
if cursor.fetchone()[0] == 0: # check if song is already liked
cursor.execute("INSERT INTO listener_song (listener_id, song_id) VALUES ('{}', '{}');".format(listener_id, song[0]))
conn.commit()"""
elif "song" in request.form:
liked_song = request.form["song"].split("_")
liked_song_id = int(liked_song[0])
liked_song_title = liked_song[1]
liked_one = liked_song_title
cursor.execute( "SELECT EXISTS(SELECT * FROM listener_song WHERE song_id = '{}' AND listener_id = '{}')".format(liked_song_id, listener_id) )
if cursor.fetchone()[0] > 0: # check if song is already liked
return render_template("viewArtist.html", album_list = album_list, song_list = song_list, id = listener_id, username = listener_username, artist = artist_info, cont_dict = cont_dict, success=False, duplicate = True, liked_one = liked_one)
else:
cursor.execute("INSERT INTO listener_song (listener_id, song_id) VALUES ('{}', '{}');".format(listener_id, liked_song_id))
conn.commit()
else:
print("")
return render_template("viewArtist.html", album_list = album_list, song_list = song_list, id = listener_id, username = listener_username, artist = artist_info, cont_dict = cont_dict, success=True, duplicate = False, liked_one = liked_one)
return render_template("viewArtist.html", album_list = album_list, song_list = song_list, id = listener_id, username = listener_username, artist = artist_info, cont_dict = cont_dict, success=False)
@app.route('/albumPage', methods = ["GET","POST"])
def albumPage():
album_id = request.args.get('album').strip()
cursor.execute("SELECT title, genre FROM dbtify.album WHERE id = {}".format(album_id))
titleGenre = cursor.fetchone()
title = titleGenre[0].strip()
genre = titleGenre[1].strip()
cursor.execute("SELECT id, title FROM dbtify.song WHERE album = {}".format(album_id))
songs = cursor.fetchall()
artist_name = request.args.get("artist").split("_")[0]
artist_surname = request.args.get("artist").split("_")[1]
flag = False
if request.method == "POST":
if "title" in request.form:
title = request.form["title"].strip()
flag = True
if "genre" in request.form:
genre = request.form["genre"].strip()
flag = True
cursor.execute("UPDATE dbtify.album SET title = '{}' , genre = '{}' WHERE id = '{}'".format(title, genre, album_id))
conn.commit()
return render_template("album.html", id=album_id, title=title, genre=genre, songs=songs, flag = flag, artist_name= artist_name, artist_surname= artist_surname)
@app.route('/songPage', methods = ["GET", "POST"])
def songPage():
song_id = request.args.get('song').strip()
artist = request.args.get("artist").strip().split("_")
artist_name = artist[0]
artist_surname = artist[1]
cursor.execute("SELECT id FROM dbtify.artist WHERE name='{}' AND surname='{}'".format(artist_name, artist_surname))
artist_id = cursor.fetchone()
cursor.execute("SELECT title, album FROM dbtify.song WHERE id = {}".format(song_id))
titleAlbum = cursor.fetchone()
title = titleAlbum[0].strip()
album_id = titleAlbum[1]
cursor.execute("SELECT artist_id FROM dbtify.song_artist WHERE song_id = '{}'".format(song_id))
cont_list = cursor.fetchall()
first = lambda x: x[0]
cont_ids = list(map(first, list(cont_list) ) )
cont_ids.remove(artist_id[0])
cursor.execute("SELECT * FROM dbtify.artist")
artist_list = cursor.fetchall()
artist_list = list(map(list, artist_list))
artist_list.remove([artist_id[0], str(artist_name), str(artist_surname)])
artist_ids = map(first, artist_list)
flag = False
if request.method == "POST":
if "title" in request.form:
title = request.form["title"].strip()
flag = True
contributors = request.form["cont_list"].split("%")[:-1]
cursor.execute("DELETE FROM dbtify.song_artist WHERE song_id = '{}'".format(song_id))
conn.commit()
cursor.execute("INSERT INTO song_artist (song_id, artist_id) VALUES ('{}', '{}');".format(song_id, artist_id[0]))
conn.commit()
for contributor in contributors: # add contributors
cont_name = contributor.split(" ")[0]
cont_surname = contributor.split(" ")[1]
cursor.execute("SELECT id FROM dbtify.artist WHERE name = '{}' AND surname = '{}';".format(cont_name, cont_surname))
cont_id = cursor.fetchone()
cursor.execute("INSERT INTO song_artist (song_id, artist_id) VALUES ('{}', '{}');".format(song_id, cont_id[0]))
conn.commit()
cursor.execute("UPDATE dbtify.song SET title = '{}' WHERE id = '{}'".format(title, song_id))
conn.commit()
return render_template("song.html", id=song_id, title=title, flag = flag, artist_name= artist_name, artist_surname= artist_surname, album=album_id, cont_ids = cont_ids, artists=artist_list)
return
@app.route('/listener', methods=["POST", "GET"])
def listener():
cursor.execute("SELECT * FROM dbtify.listener")
listener_list = cursor.fetchall()
username = ""
email = ""
if request.method == "POST": # if a new listener is created
username = request.form["username"]
email = request.form["email"]
cursor.execute( "SELECT EXISTS(SELECT * FROM dbtify.listener WHERE username = '{}' OR email = '{}' )".format(username, email) )
if cursor.fetchone()[0] > 0: # check if listener is already exists
return render_template('listenerMainPage.html', listener_list = listener_list, username = username, email=email, duplicate = True)
else: # if not exists in db then add it
cursor.execute("INSERT INTO dbtify.listener (username, email) VALUES ('{}', '{}');".format(username, email))
conn.commit()
#return all the listeners from db
cursor.execute("SELECT * FROM dbtify.listener")
listener_list = cursor.fetchall()
return render_template('listenerMainPage.html', listener_list = listener_list, username = username, email=email, duplicate=False)
return render_template('listenerMainPage.html', listener_list = listener_list)
@app.route('/listenerProfile')
def listenerProfile():
listener_info = request.args.get("listener").split("_")
listener_id = listener_info[0]
listener_username = listener_info[1]
cursor.execute("SELECT * FROM dbtify.listener_song \
INNER JOIN dbtify.song ON song.id = listener_song.song_id\
INNER JOIN dbtify.song_artist ON song_artist.song_id = song.id\
INNER JOIN dbtify.artist ON song_artist.artist_id = artist.id\
WHERE listener_id = '{}' order by title".format(listener_id))
liked_songs = cursor.fetchall()
cursor.execute("SELECT listener_id, username, title, name, surname FROM dbtify.listener_song \
INNER JOIN dbtify.song ON song.id = listener_song.song_id\
INNER JOIN dbtify.song_artist ON song_artist.song_id = song.id\
INNER JOIN dbtify.artist ON song_artist.artist_id = artist.id\
INNER JOIN dbtify.listener ON listener_song.listener_id = listener.id\
WHERE NOT listener_id = '{}' order by title".format(listener_id))
all_liked_songs = cursor.fetchall()
all_liked_songs = list(map(list, all_liked_songs))
cursor.execute("SELECT id, username FROM listener WHERE NOT id = '{}'".format(listener_id))
listener_list = cursor.fetchall()
listener_list = list(map(list, listener_list))
cursor.execute("select count(listener_song.song_id), artist_id, name, surname from listener_song \
inner join song_artist on song_artist.song_id = listener_song.song_id \
inner join artist on artist.id = song_artist.artist_id \
group by artist_id order by count(listener_song.song_id) DESC;")
n_likes_per_artist = cursor.fetchall()
n_likes_per_artist = list(map(list, n_likes_per_artist))
liked_artists = list(artist[1] for artist in n_likes_per_artist)
cursor.execute("select * from artist order by name asc")
artists = cursor.fetchall()
cursor.execute("call songs_per_like()")
#likes title artist_id name surname
#song_id title likes artist_id name surname
songs_per_like = cursor.fetchall()
songs_per_like = list(map(list, songs_per_like))
liked_song_ids = list(song[0] for song in songs_per_like)
cursor.execute("select song.id, song.title, song_artist.artist_id, artist.name, artist.surname from song \
inner join song_artist on song_artist.song_id = song.id \
inner join artist on song_artist.artist_id = artist.id\
order by song.title asc")
songs = cursor.fetchall()
print(songs_per_like)
print(liked_song_ids)
print(songs)
return render_template('listenerProfile.html', id = listener_id, username = listener_username, liked_songs = liked_songs, \
all_liked_songs = all_liked_songs, listener_list = listener_list, \
likes_per_artist = n_likes_per_artist, artists = artists, liked_artists = liked_artists , songs = songs, songs_per_like = songs_per_like, liked_song_ids = liked_song_ids)
@app.route("/viewAll", methods = ["POST", "GET"])
def viewAll():
listener_info = request.args.get("listener").split("_")
listener_id = listener_info[0]
listener_username = listener_info[1]
cursor.execute("SELECT * FROM dbtify.artist")
artist_list = cursor.fetchall()
cursor.execute("SELECT * FROM dbtify.album")
album_list = cursor.fetchall()
cursor.execute("SELECT * FROM dbtify.song INNER JOIN dbtify.album ON song.album = album.id ")
song_list = cursor.fetchall()
if request.method == "POST":
liked_one = ""
if "song" in request.form:
liked_song = request.form["song"].split("_")
liked_song_id = int(liked_song[0])
liked_song_title = liked_song[1]
liked_one = liked_song_title
cursor.execute( "SELECT EXISTS(SELECT * FROM listener_song WHERE song_id = '{}' AND listener_id = '{}')".format(liked_song_id, listener_id) )
if cursor.fetchone()[0] > 0: # check if song is already liked
return render_template("viewAll.html", id = listener_id, username = listener_username, artist_list = artist_list, album_list = album_list, song_list = song_list, success=False, duplicate = True, liked_one = liked_one)
else:
cursor.execute("INSERT INTO listener_song (listener_id, song_id) VALUES ('{}', '{}');".format(listener_id, liked_song_id))
conn.commit()
return render_template("viewAll.html", id = listener_id, username = listener_username, artist_list = artist_list, album_list = album_list, song_list = song_list, success=True, duplicate = False, liked_one = liked_one)
return render_template("viewAll.html", id = listener_id, username = listener_username, artist_list = artist_list, album_list = album_list, song_list = song_list, success = False)
@app.route("/viewAlbum", methods = ["POST", "GET"])
def viewAlbum():
listener_info = request.args.get("listener").split("_")
listener_id = listener_info[0]
listener_username = listener_info[1]
album_id = request.args.get("album")
cursor.execute("select album.id, genre, title, name, surname from album inner join artist on artist.id = album.artist_id where album.id = '{}';".format(album_id))
album = cursor.fetchone()
album = list(album)
cursor.execute("select id, title from song where album = '{}'".format(album_id))
songs = cursor.fetchall()
songs = list(map(list, songs))
if request.method == "POST": # liking album
liked_album = request.form["album"].split("_")
liked_album_id = int(liked_album[0])
liked_album_title = liked_album[1]
liked_one = liked_album_title
cursor.execute( "SELECT EXISTS(SELECT * FROM listener_album WHERE album_id = '{}' AND listener_id = '{}')".format(liked_album_id, listener_id) )
if cursor.fetchone()[0] > 0: # check if album is already liked
cursor.execute("SELECT id FROM song WHERE album = '{}'".format(liked_album_id)) # check if album contains non-liked songs
song_of_album = cursor.fetchall()
song_of_album = list(map(list,song_of_album))
for song in song_of_album:
cursor.execute( "SELECT EXISTS(SELECT * FROM listener_song WHERE song_id = '{}' AND listener_id = '{}')".format(song[0], listener_id) )
if cursor.fetchone()[0] == 0: # check if song is not already liked
cursor.execute("INSERT INTO listener_song (listener_id, song_id) VALUES ('{}', '{}');".format(listener_id, song[0]))
conn.commit()
return render_template("viewAlbum.html", id = listener_id, username = listener_username, album = album, songs = songs, like = False, duplicate = True, liked_one = liked_one)
else:
cursor.execute("INSERT INTO listener_album (listener_id, album_id) VALUES ('{}', '{}');".format(listener_id, liked_album_id))
conn.commit()
"""cursor.execute("SELECT id FROM song WHERE album = '{}'".format(liked_album_id)) # like all songs of that album
song_of_album = cursor.fetchall()
song_of_album = list(map(list,song_of_album))
for song in song_of_album:
cursor.execute( "SELECT EXISTS(SELECT * FROM listener_song WHERE song_id = '{}' AND listener_id = '{}')".format(song[0], listener_id) )
if cursor.fetchone()[0] == 0: # check if song is already liked
cursor.execute("INSERT INTO listener_song (listener_id, song_id) VALUES ('{}', '{}');".format(listener_id, song[0]))
conn.commit()"""
return render_template("viewAlbum.html", id = listener_id, username = listener_username, album = album, songs = songs, like = True, liked_one = liked_one)
return render_template("viewAlbum.html", id = listener_id, username = listener_username, album = album, songs = songs, like = False, duplicate = False)
@app.route("/exploreSongs", methods = ["POST", "GET"])
def exploreSongs():
listener_info = request.args.get("listener").split("_")
listener_id = listener_info[0]
listener_username = listener_info[1]
cursor.execute("select genre from album group by genre;")
genres = cursor.fetchall()
cursor.execute("select song.id, song.title, album.genre from song inner join album where song.album = album.id;")
songs_by_genre = cursor.fetchall()
searched_songs = []
if "keyword" in request.args:
cursor.execute("select id, title from song where title like '%{}%'".format(request.args.get("keyword")))
searched_songs = cursor.fetchall()
return render_template("exploreSongs.html", id = listener_id, username = listener_username, genres = genres, songs = songs_by_genre, searched_songs = searched_songs)
if __name__ == "__main__":
app.config['MYSQL_DATABASE_DB'] = 'dbtify'
app.run(debug=True)
| [
"flask.render_template",
"flask.request.args.get",
"flask.Flask",
"flask.url_for",
"flaskext.mysql.MySQL"
] | [((109, 124), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (114, 124), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((133, 140), 'flaskext.mysql.MySQL', 'MySQL', ([], {}), '()\n', (138, 140), False, 'from flaskext.mysql import MySQL\n'), ((1653, 1685), 'flask.render_template', 'render_template', (['"""userType.html"""'], {}), "('userType.html')\n", (1668, 1685), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((1860, 1892), 'flask.render_template', 'render_template', (['"""userType.html"""'], {}), "('userType.html')\n", (1875, 1892), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((3010, 3073), 'flask.render_template', 'render_template', (['"""artistMainPage.html"""'], {'artist_list': 'artist_list'}), "('artistMainPage.html', artist_list=artist_list)\n", (3025, 3073), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((3176, 3202), 'flask.request.args.get', 'request.args.get', (['"""artist"""'], {}), "('artist')\n", (3192, 3202), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((7118, 7248), 'flask.render_template', 'render_template', (['"""artistProfile.html"""'], {'name': 'name', 'surname': 'surname', 'artist_list': 'artist_list', 'album_list': 'albums', 'song_list': 'songs'}), "('artistProfile.html', name=name, surname=surname,\n artist_list=artist_list, album_list=albums, song_list=songs)\n", (7133, 7248), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((7483, 7509), 'flask.request.args.get', 'request.args.get', (['"""artist"""'], {}), "('artist')\n", (7499, 7509), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((12708, 12896), 'flask.render_template', 'render_template', (['"""viewArtist.html"""'], {'album_list': 'album_list', 'song_list': 'song_list', 'id': 'listener_id', 'username': 'listener_username', 'artist': 'artist_info', 'cont_dict': 'cont_dict', 'success': '(False)'}), "('viewArtist.html', album_list=album_list, song_list=\n song_list, id=listener_id, username=listener_username, artist=\n artist_info, cont_dict=cont_dict, success=False)\n", (12723, 12896), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((13902, 14055), 'flask.render_template', 'render_template', (['"""album.html"""'], {'id': 'album_id', 'title': 'title', 'genre': 'genre', 'songs': 'songs', 'flag': 'flag', 'artist_name': 'artist_name', 'artist_surname': 'artist_surname'}), "('album.html', id=album_id, title=title, genre=genre, songs=\n songs, flag=flag, artist_name=artist_name, artist_surname=artist_surname)\n", (13917, 14055), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((16267, 16451), 'flask.render_template', 'render_template', (['"""song.html"""'], {'id': 'song_id', 'title': 'title', 'flag': 'flag', 'artist_name': 'artist_name', 'artist_surname': 'artist_surname', 'album': 'album_id', 'cont_ids': 'cont_ids', 'artists': 'artist_list'}), "('song.html', id=song_id, title=title, flag=flag,\n artist_name=artist_name, artist_surname=artist_surname, album=album_id,\n cont_ids=cont_ids, artists=artist_list)\n", (16282, 16451), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((17675, 17744), 'flask.render_template', 'render_template', (['"""listenerMainPage.html"""'], {'listener_list': 'listener_list'}), "('listenerMainPage.html', listener_list=listener_list)\n", (17690, 17744), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((20352, 20700), 'flask.render_template', 'render_template', (['"""listenerProfile.html"""'], {'id': 'listener_id', 'username': 'listener_username', 'liked_songs': 'liked_songs', 'all_liked_songs': 'all_liked_songs', 'listener_list': 'listener_list', 'likes_per_artist': 'n_likes_per_artist', 'artists': 'artists', 'liked_artists': 'liked_artists', 'songs': 'songs', 'songs_per_like': 'songs_per_like', 'liked_song_ids': 'liked_song_ids'}), "('listenerProfile.html', id=listener_id, username=\n listener_username, liked_songs=liked_songs, all_liked_songs=\n all_liked_songs, listener_list=listener_list, likes_per_artist=\n n_likes_per_artist, artists=artists, liked_artists=liked_artists, songs\n =songs, songs_per_like=songs_per_like, liked_song_ids=liked_song_ids)\n", (20367, 20700), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((22457, 22624), 'flask.render_template', 'render_template', (['"""viewAll.html"""'], {'id': 'listener_id', 'username': 'listener_username', 'artist_list': 'artist_list', 'album_list': 'album_list', 'song_list': 'song_list', 'success': '(False)'}), "('viewAll.html', id=listener_id, username=listener_username,\n artist_list=artist_list, album_list=album_list, song_list=song_list,\n success=False)\n", (22472, 22624), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((22852, 22877), 'flask.request.args.get', 'request.args.get', (['"""album"""'], {}), "('album')\n", (22868, 22877), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((25688, 25825), 'flask.render_template', 'render_template', (['"""viewAlbum.html"""'], {'id': 'listener_id', 'username': 'listener_username', 'album': 'album', 'songs': 'songs', 'like': '(False)', 'duplicate': '(False)'}), "('viewAlbum.html', id=listener_id, username=\n listener_username, album=album, songs=songs, like=False, duplicate=False)\n", (25703, 25825), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((26528, 26686), 'flask.render_template', 'render_template', (['"""exploreSongs.html"""'], {'id': 'listener_id', 'username': 'listener_username', 'genres': 'genres', 'songs': 'songs_by_genre', 'searched_songs': 'searched_songs'}), "('exploreSongs.html', id=listener_id, username=\n listener_username, genres=genres, songs=songs_by_genre, searched_songs=\n searched_songs)\n", (26543, 26686), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((1530, 1549), 'flask.url_for', 'url_for', (['"""usertype"""'], {}), "('usertype')\n", (1537, 1549), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((12463, 12692), 'flask.render_template', 'render_template', (['"""viewArtist.html"""'], {'album_list': 'album_list', 'song_list': 'song_list', 'id': 'listener_id', 'username': 'listener_username', 'artist': 'artist_info', 'cont_dict': 'cont_dict', 'success': '(True)', 'duplicate': '(False)', 'liked_one': 'liked_one'}), "('viewArtist.html', album_list=album_list, song_list=\n song_list, id=listener_id, username=listener_username, artist=\n artist_info, cont_dict=cont_dict, success=True, duplicate=False,\n liked_one=liked_one)\n", (12478, 12692), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((22234, 22438), 'flask.render_template', 'render_template', (['"""viewAll.html"""'], {'id': 'listener_id', 'username': 'listener_username', 'artist_list': 'artist_list', 'album_list': 'album_list', 'song_list': 'song_list', 'success': '(True)', 'duplicate': '(False)', 'liked_one': 'liked_one'}), "('viewAll.html', id=listener_id, username=listener_username,\n artist_list=artist_list, album_list=album_list, song_list=song_list,\n success=True, duplicate=False, liked_one=liked_one)\n", (22249, 22438), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((2420, 2531), 'flask.render_template', 'render_template', (['"""artistMainPage.html"""'], {'artist_list': 'artist_list', 'name': 'name', 'surname': 'surname', 'duplicate': '(True)'}), "('artistMainPage.html', artist_list=artist_list, name=name,\n surname=surname, duplicate=True)\n", (2435, 2531), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((2882, 2994), 'flask.render_template', 'render_template', (['"""artistMainPage.html"""'], {'artist_list': 'artist_list', 'name': 'name', 'surname': 'surname', 'duplicate': '(False)'}), "('artistMainPage.html', artist_list=artist_list, name=name,\n surname=surname, duplicate=False)\n", (2897, 2994), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((7348, 7376), 'flask.request.args.get', 'request.args.get', (['"""listener"""'], {}), "('listener')\n", (7364, 7376), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((12984, 13009), 'flask.request.args.get', 'request.args.get', (['"""album"""'], {}), "('album')\n", (13000, 13009), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((14138, 14162), 'flask.request.args.get', 'request.args.get', (['"""song"""'], {}), "('song')\n", (14154, 14162), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((17044, 17165), 'flask.render_template', 'render_template', (['"""listenerMainPage.html"""'], {'listener_list': 'listener_list', 'username': 'username', 'email': 'email', 'duplicate': '(True)'}), "('listenerMainPage.html', listener_list=listener_list,\n username=username, email=email, duplicate=True)\n", (17059, 17165), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((17535, 17657), 'flask.render_template', 'render_template', (['"""listenerMainPage.html"""'], {'listener_list': 'listener_list', 'username': 'username', 'email': 'email', 'duplicate': '(False)'}), "('listenerMainPage.html', listener_list=listener_list,\n username=username, email=email, duplicate=False)\n", (17550, 17657), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((17824, 17852), 'flask.request.args.get', 'request.args.get', (['"""listener"""'], {}), "('listener')\n", (17840, 17852), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((20811, 20839), 'flask.request.args.get', 'request.args.get', (['"""listener"""'], {}), "('listener')\n", (20827, 20839), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((22720, 22748), 'flask.request.args.get', 'request.args.get', (['"""listener"""'], {}), "('listener')\n", (22736, 22748), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((24441, 24602), 'flask.render_template', 'render_template', (['"""viewAlbum.html"""'], {'id': 'listener_id', 'username': 'listener_username', 'album': 'album', 'songs': 'songs', 'like': '(False)', 'duplicate': '(True)', 'liked_one': 'liked_one'}), "('viewAlbum.html', id=listener_id, username=\n listener_username, album=album, songs=songs, like=False, duplicate=True,\n liked_one=liked_one)\n", (24456, 24602), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((25523, 25668), 'flask.render_template', 'render_template', (['"""viewAlbum.html"""'], {'id': 'listener_id', 'username': 'listener_username', 'album': 'album', 'songs': 'songs', 'like': '(True)', 'liked_one': 'liked_one'}), "('viewAlbum.html', id=listener_id, username=\n listener_username, album=album, songs=songs, like=True, liked_one=liked_one\n )\n", (25538, 25668), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((25930, 25958), 'flask.request.args.get', 'request.args.get', (['"""listener"""'], {}), "('listener')\n", (25946, 25958), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((10308, 10537), 'flask.render_template', 'render_template', (['"""viewArtist.html"""'], {'album_list': 'album_list', 'song_list': 'song_list', 'id': 'listener_id', 'username': 'listener_username', 'artist': 'artist_info', 'cont_dict': 'cont_dict', 'success': '(False)', 'duplicate': '(True)', 'liked_one': 'liked_one'}), "('viewArtist.html', album_list=album_list, song_list=\n song_list, id=listener_id, username=listener_username, artist=\n artist_info, cont_dict=cont_dict, success=False, duplicate=True,\n liked_one=liked_one)\n", (10323, 10537), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((13357, 13383), 'flask.request.args.get', 'request.args.get', (['"""artist"""'], {}), "('artist')\n", (13373, 13383), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((13419, 13445), 'flask.request.args.get', 'request.args.get', (['"""artist"""'], {}), "('artist')\n", (13435, 13445), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((21786, 21990), 'flask.render_template', 'render_template', (['"""viewAll.html"""'], {'id': 'listener_id', 'username': 'listener_username', 'artist_list': 'artist_list', 'album_list': 'album_list', 'song_list': 'song_list', 'success': '(False)', 'duplicate': '(True)', 'liked_one': 'liked_one'}), "('viewAll.html', id=listener_id, username=listener_username,\n artist_list=artist_list, album_list=album_list, song_list=song_list,\n success=False, duplicate=True, liked_one=liked_one)\n", (21801, 21990), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((26443, 26470), 'flask.request.args.get', 'request.args.get', (['"""keyword"""'], {}), "('keyword')\n", (26459, 26470), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((11964, 12193), 'flask.render_template', 'render_template', (['"""viewArtist.html"""'], {'album_list': 'album_list', 'song_list': 'song_list', 'id': 'listener_id', 'username': 'listener_username', 'artist': 'artist_info', 'cont_dict': 'cont_dict', 'success': '(False)', 'duplicate': '(True)', 'liked_one': 'liked_one'}), "('viewArtist.html', album_list=album_list, song_list=\n song_list, id=listener_id, username=listener_username, artist=\n artist_info, cont_dict=cont_dict, success=False, duplicate=True,\n liked_one=liked_one)\n", (11979, 12193), False, 'from flask import Flask, render_template, request, redirect, url_for\n'), ((14189, 14215), 'flask.request.args.get', 'request.args.get', (['"""artist"""'], {}), "('artist')\n", (14205, 14215), False, 'from flask import Flask, render_template, request, redirect, url_for\n')] |
from setuptools import setup, find_namespace_packages
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
requirements = [
"numpy",
"pandas",
"vedo>=2020.3.3",
"k3d==2.7.4",
"msgpack",
"vtk<9.0.0",
"allensdk",
"pyyaml>=5.3",
"scikit-image",
"brainio>=0.0.19",
"sklearn",
"morphapi>=0.1.1.3",
"requests",
"rich",
"bg-atlasapi>=0.0.7",
]
setup(
name="brainrender",
version="1.0.0.1rc2",
description="Python scripts to use Allen Brain Map data for analysis "
"and rendering",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=requirements,
extras_require={
"nb": ["jupyter", "k3d"],
"dev": [
"pytest-cov",
"pytest",
"pytest-sugar",
"coveralls",
"coverage<=4.5.4",
"pre-commit",
"opencv-python",
"jupyter",
],
},
python_requires=">=3.6, <3.8",
packages=find_namespace_packages(
exclude=("Installation", "Meshes", "Metadata", "Screenshots")
),
include_package_data=True,
url="https://github.com/BrancoLab/brainrender",
author="<NAME>",
zip_safe=False,
entry_points={"console_scripts": ["brainrender = brainrender.cli:main"]},
)
| [
"setuptools.find_namespace_packages",
"os.path.dirname",
"os.path.join"
] | [((105, 127), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (117, 127), False, 'from os import path\n'), ((139, 177), 'os.path.join', 'path.join', (['this_directory', '"""README.md"""'], {}), "(this_directory, 'README.md')\n", (148, 177), False, 'from os import path\n'), ((1153, 1243), 'setuptools.find_namespace_packages', 'find_namespace_packages', ([], {'exclude': "('Installation', 'Meshes', 'Metadata', 'Screenshots')"}), "(exclude=('Installation', 'Meshes', 'Metadata',\n 'Screenshots'))\n", (1176, 1243), False, 'from setuptools import setup, find_namespace_packages\n')] |
import pandas as pd
import numpy as np
import seaborn as sns
import os
import matplotlib.pyplot as plt
df = pd.read_csv( os.path.join( "fairness-2021", "simple-rank-res.csv" ) )
df["pp"] = [ "fs" if x == 'fairsmote' else x for x in df["pp"] ]
df["pt"] = [ "rs" if x == 'random' else x for x in df["pt"] ]
df["tech"] = [ x + "+rf+" + y for x, y in zip( df["pp"], df["pt"] ) ]
df["tech"] = [ x.replace("none+","").upper() for x in df["tech"] ]
df["tech"] = [ x.replace("+DEFAULT","").upper() for x in df["tech"] ]
df = df[ [x in ["d2h1", "d2h2", "d2h3"] for x in df["m"]] ]
df["d2h"] = [ "overall" if x == "d2h1" else "classification" if x == "d2h2" else "fairness" for x in df["m"] ]
ds_list = df["ds"].unique()
m_list = ["d2h1", "d2h2", "d2h3"]
mn_list = ["Prediction", "Fairness", "Overall"]
tech_list = df["tech"].unique()
for ds in ds_list:
top, bottom = [], []
for m in m_list:
for t in tech_list:
sub_df = df[ np.logical_and(np.logical_and(df["ds"]==ds, df["m"]==m), df["tech"]==t) ]
val = sub_df["val"]
iqr = val.quantile(0.75) - val.quantile(0.25)
bottom += [val.quantile(0.25) - 1.5*iqr]
top += [val.quantile(0.75) + 1.5*iqr]
top, bottom = max(top), min(bottom)
for m, mn in zip( m_list, mn_list ):
sub_df = df[ np.logical_and(df["ds"]==ds, df["m"]==m) ]
ranks = pd.DataFrame([ sub_df[ sub_df["tech"] == x ].iloc[0][["tech", "rank"]] for x in tech_list ])
ranks = ranks.sort_values(by=['rank'])
plt.clf()
fig, ax = plt.subplots(figsize=(5, 8.5))
plt.ylim((bottom, top))
plt.tight_layout(pad=2.25)
ax.tick_params(axis='x', rotation=25)
g = sns.boxplot( x = "tech", y = "val", hue = "rank",
data = sub_df, dodge = False, order = list(ranks["tech"]),
showfliers = False,
palette = sns.cubehelix_palette(start=1.5, rot=0.4,
dark=0.35, light=1, reverse=True)
).set(
xlabel='Model',
ylabel='Distance to heaven',
title=f'{mn}',
)
ax.get_legend().remove()
fig.savefig( os.path.join( "fairness-2021", "box", f"box-{ds}-{m}.png" ) )
plt.close()
| [
"seaborn.cubehelix_palette",
"numpy.logical_and",
"matplotlib.pyplot.clf",
"os.path.join",
"matplotlib.pyplot.close",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.subplots"
] | [((122, 174), 'os.path.join', 'os.path.join', (['"""fairness-2021"""', '"""simple-rank-res.csv"""'], {}), "('fairness-2021', 'simple-rank-res.csv')\n", (134, 174), False, 'import os\n'), ((1416, 1508), 'pandas.DataFrame', 'pd.DataFrame', (["[sub_df[sub_df['tech'] == x].iloc[0][['tech', 'rank']] for x in tech_list]"], {}), "([sub_df[sub_df['tech'] == x].iloc[0][['tech', 'rank']] for x in\n tech_list])\n", (1428, 1508), True, 'import pandas as pd\n'), ((1573, 1582), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1580, 1582), True, 'import matplotlib.pyplot as plt\n'), ((1601, 1631), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 8.5)'}), '(figsize=(5, 8.5))\n', (1613, 1631), True, 'import matplotlib.pyplot as plt\n'), ((1640, 1663), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(bottom, top)'], {}), '((bottom, top))\n', (1648, 1663), True, 'import matplotlib.pyplot as plt\n'), ((1672, 1698), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(2.25)'}), '(pad=2.25)\n', (1688, 1698), True, 'import matplotlib.pyplot as plt\n'), ((2351, 2362), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2360, 2362), True, 'import matplotlib.pyplot as plt\n'), ((1348, 1392), 'numpy.logical_and', 'np.logical_and', (["(df['ds'] == ds)", "(df['m'] == m)"], {}), "(df['ds'] == ds, df['m'] == m)\n", (1362, 1392), True, 'import numpy as np\n'), ((2281, 2338), 'os.path.join', 'os.path.join', (['"""fairness-2021"""', '"""box"""', 'f"""box-{ds}-{m}.png"""'], {}), "('fairness-2021', 'box', f'box-{ds}-{m}.png')\n", (2293, 2338), False, 'import os\n'), ((975, 1019), 'numpy.logical_and', 'np.logical_and', (["(df['ds'] == ds)", "(df['m'] == m)"], {}), "(df['ds'] == ds, df['m'] == m)\n", (989, 1019), True, 'import numpy as np\n'), ((1957, 2032), 'seaborn.cubehelix_palette', 'sns.cubehelix_palette', ([], {'start': '(1.5)', 'rot': '(0.4)', 'dark': '(0.35)', 'light': '(1)', 'reverse': '(True)'}), '(start=1.5, rot=0.4, dark=0.35, light=1, reverse=True)\n', (1978, 2032), True, 'import seaborn as sns\n')] |
# License: MIT
'''
:author: <NAME> (<EMAIL>)
:organization: ETS
'''
import ctypes as c
import logging
import os
class Tagger(object):
"""The ZPar English POS Tagger"""
def __init__(self, modelpath, libptr, zpar_session_obj):
super(Tagger, self).__init__()
# save the zpar session object
self._zpar_session_obj = zpar_session_obj
# set up a logger
self.logger = logging.getLogger(__name__)
# get the library method that loads the tagger models
self._load_tagger = libptr.load_tagger
self._load_tagger.restype = c.c_int
self._load_tagger.argtypes = [c.c_void_p, c.c_char_p]
# get the library methods that tag sentences and files
self._tag_sentence = libptr.tag_sentence
self._tag_sentence.restype = c.c_char_p
self._tag_sentence.argtypes = [c.c_void_p, c.c_char_p, c.c_bool]
self._tag_file = libptr.tag_file
self._tag_file.restype = None
self._tag_file.argtypes = [c.c_void_p, c.c_char_p, c.c_char_p, c.c_bool]
if self._load_tagger(self._zpar_session_obj, modelpath.encode('utf-8')):
raise OSError('Cannot find tagger model at {}\n'.format(modelpath))
def tag_sentence(self, sentence, tokenize=True):
if not sentence.strip():
# return empty string if the input is empty
ans = ""
else:
zpar_compatible_sentence = sentence.strip() + "\n "
zpar_compatible_sentence = zpar_compatible_sentence.encode('utf-8')
tagged_sent = self._tag_sentence(self._zpar_session_obj, zpar_compatible_sentence, tokenize)
ans = tagged_sent.decode('utf-8')
return ans
return ans
def tag_file(self, inputfile, outputfile, tokenize=True):
if os.path.exists(inputfile):
self._tag_file(self._zpar_session_obj, inputfile.encode('utf-8'), outputfile.encode('utf-8'), tokenize)
else:
raise OSError('File {} does not exist.'.format(inputfile))
def cleanup(self):
self._load_tagger = None
self._tag_sentence = None
self._tag_file = None
self._zpar_session_obj = None
| [
"logging.getLogger",
"os.path.exists"
] | [((414, 441), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (431, 441), False, 'import logging\n'), ((1805, 1830), 'os.path.exists', 'os.path.exists', (['inputfile'], {}), '(inputfile)\n', (1819, 1830), False, 'import os\n')] |
# ------------------------------------------------------------------------------
# Global imports
# ------------------------------------------------------------------------------
# *** Type hints *** #
from typing import Any
from typing import Optional
from typing import Union
# *** Enumerations *** #
import enum
# *** PyTorch & numerical libs *** #
import torch as pt
import torch.nn.functional as ptf
from torch.distributions.one_hot_categorical import OneHotCategorical
import numpy as np
# *** Plotting *** #
import matplotlib.cm as cm
import matplotlib.pyplot as plt
pt.set_printoptions(linewidth=200)
plt.rcParams["figure.figsize"] = [12, 12]
# *** Images & video *** #
import cv2 as cv
class Source:
"""
Baisc image operations.
"""
def __init__(
self,
_src: Union[str, cv.VideoCapture],
_height: Optional[int] = None,
_width: Optional[int] = None,
_mode: Optional[enum.Enum] = cv.COLOR_RGB2GRAY,
):
if _height is not None and _width is not None:
raise ValueError("Please provide the new height *or* width, but not both.")
self.source = _src
self.frame = None
self.op = None
self.processing = True
self.mode = _mode
if isinstance(_src, str):
self.op = lambda processing, src: (processing, cv.imread(_src))
elif isinstance(_src, cv.VideoCapture):
self.op = lambda processing, src: src.read()
else:
raise TypeError("Invalid input source.")
self._get_frame(_probe=True)
shape = tuple(self.frame.shape)
# Image dimensions
self.source_height = shape[0]
self.source_width = shape[1]
# NOTE: Perhaps add transparency as well
self.source_depth = shape[2] if len(self.frame.shape) > 2 else 1
self.output_height = self.source_height
self.output_width = self.source_width
self.output_depth = self.source_depth
self.resize = None
if (_width, _height) != (None, None):
if _height is not None:
# Fixed height, calculate the width with the same AR
pct = _height / float(self.source_height)
_width = int((float(self.source_width) * pct))
elif _width is not None:
# Fixed width, calculate the height with the same AR
pct = _width / float(self.source_width)
_height = int((float(self.source_height) * pct))
self.resize = (_width, _height)
self.output_height = _height
self.output_width = _width
# Create a flatmask
self._make_flatmask()
# Display some useful info
print(f"==[ Press ESC to quit.")
def __del__(self):
if isinstance(self.source, cv.VideoCapture):
self.source.release()
cv.destroyAllWindows()
def _make_flatmask(self):
"""
Create a mask that will can be used to obtain a flattened
version of the original image with colour channel sampling.
"""
self.flatmask = None
if self.source_depth == 1:
# The image is already flat
return
print(f"==[ Creating flatmask...")
probs = pt.zeros((self.output_height, self.output_width, self.output_depth))
r_prob = 0.475
g_prob = 0.475
b_prob = 0.05
probs[:, :, 0] = r_prob # R channel
probs[:, :, 1] = g_prob # G channel
probs[:, :, 2] = b_prob # B channel
ohc = OneHotCategorical(probs)
self.flatmask = ohc.sample()
self.flatmask[:, :, 0] *= r_prob # R channel
self.flatmask[:, :, 1] *= g_prob # G channel
self.flatmask[:, :, 2] *= b_prob # B channel
# print(f"==[ R: {self.flatmask[:,:,0].sum()}")
# print(f"==[ G: {self.flatmask[:,:,1].sum()}")
# print(f"==[ B: {self.flatmask[:,:,2].sum()}")
# Set the depth to 1
self.output_depth = 1
def show(
self,
*_frames,
):
if _frames is None:
_frames = [self.frame]
# Show all images
# for idx, _img in enumerate(_frames):
cv.imshow(
f"Result",
np.hstack([_img.numpy() for _img in _frames]).astype(np.uint8),
)
# Press ESC to quit
self.processing &= cv.waitKey(10) != 27
def _get_frame(
self,
_center: Optional[pt.Tensor] = None,
_probe: Optional[bool] = False,
):
self.processing, self.frame = self.op(self.processing, self.source)
if self.mode is not None:
self.frame = cv.cvtColor(self.frame, self.mode)
if _probe:
self.processing = True
return
if self.resize is not None:
self.frame = cv.resize(self.frame, self.resize, interpolation=cv.INTER_AREA)
self.frame = pt.from_numpy(self.frame).float()
def read(
self,
center: Optional[pt.Tensor] = None,
) -> pt.Tensor:
"""
Read a frame and flatten it (remove all channel information).
"""
self._get_frame(center)
if self.source_depth == 1:
# The image is already flattened
return self.frame
self.frame = self.flatmask * self.frame
return self.frame.sum(axis=2)
def scale(
self,
_frame: pt.Tensor,
_min: Optional[float] = 0.0,
_max: Optional[float] = 255.0,
) -> pt.Tensor:
"""
Min-max normalised version of the frame.
"""
fmin = float(pt.min(_frame))
fmax = float(pt.max(_frame))
return _min + (_max - _min) * (_frame - fmin) / (fmax - fmin)
def stretch(
self,
_frame: Optional[pt.Tensor] = None,
) -> pt.Tensor:
"""
Return a 2D image stretched into a 1D vector.
"""
if _frame is None:
_frame = self.frame
return _frame.t().flatten().reshape(_frame.numel(), 1)
def fold(
self,
_frame: Optional[pt.Tensor] = None,
) -> pt.Tensor:
"""
Return a 2D image from a 1D vector.
"""
if _frame is None:
_frame = self.frame
return _frame
return _frame.reshape(self.output_width, self.output_height).t()
| [
"torch.set_printoptions",
"torch.max",
"torch.from_numpy",
"torch.distributions.one_hot_categorical.OneHotCategorical",
"torch.min",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"cv2.resize",
"cv2.waitKey",
"torch.zeros",
"cv2.imread"
] | [((579, 613), 'torch.set_printoptions', 'pt.set_printoptions', ([], {'linewidth': '(200)'}), '(linewidth=200)\n', (598, 613), True, 'import torch as pt\n'), ((2896, 2918), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (2916, 2918), True, 'import cv2 as cv\n'), ((3294, 3362), 'torch.zeros', 'pt.zeros', (['(self.output_height, self.output_width, self.output_depth)'], {}), '((self.output_height, self.output_width, self.output_depth))\n', (3302, 3362), True, 'import torch as pt\n'), ((3583, 3607), 'torch.distributions.one_hot_categorical.OneHotCategorical', 'OneHotCategorical', (['probs'], {}), '(probs)\n', (3600, 3607), False, 'from torch.distributions.one_hot_categorical import OneHotCategorical\n'), ((4414, 4428), 'cv2.waitKey', 'cv.waitKey', (['(10)'], {}), '(10)\n', (4424, 4428), True, 'import cv2 as cv\n'), ((4699, 4733), 'cv2.cvtColor', 'cv.cvtColor', (['self.frame', 'self.mode'], {}), '(self.frame, self.mode)\n', (4710, 4733), True, 'import cv2 as cv\n'), ((4870, 4933), 'cv2.resize', 'cv.resize', (['self.frame', 'self.resize'], {'interpolation': 'cv.INTER_AREA'}), '(self.frame, self.resize, interpolation=cv.INTER_AREA)\n', (4879, 4933), True, 'import cv2 as cv\n'), ((5658, 5672), 'torch.min', 'pt.min', (['_frame'], {}), '(_frame)\n', (5664, 5672), True, 'import torch as pt\n'), ((5695, 5709), 'torch.max', 'pt.max', (['_frame'], {}), '(_frame)\n', (5701, 5709), True, 'import torch as pt\n'), ((4956, 4981), 'torch.from_numpy', 'pt.from_numpy', (['self.frame'], {}), '(self.frame)\n', (4969, 4981), True, 'import torch as pt\n'), ((1349, 1364), 'cv2.imread', 'cv.imread', (['_src'], {}), '(_src)\n', (1358, 1364), True, 'import cv2 as cv\n')] |
# Copyright 2020-2021 eBay Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import defaultdict
logging.basicConfig(level=logging.INFO)
import numpy as np
import tqdm
from xfraud.glib.graph_loader import GraphData, NaiveHetGraph
from xfraud.glib.utils import timeit
def feature_mock(layer_data, graph, feature_dim=16):
feature = {}
times = {}
indxs = {}
texts = []
for ntype, data in layer_data.items():
if not data:
continue
idx = np.array(list(data.keys()))
idx_data = np.array(list(data.values()))
ts = idx_data[:, 1]
feature[ntype] = np.zeros((len(idx), feature_dim))
times[ntype] = ts
indxs[ntype] = idx
return feature, times, indxs, texts
def create_naive_het_graph_from_edges(df):
logger = logging.getLogger('factory-naive-het-graph')
logger.setLevel(logging.INFO)
with timeit(logger, 'node-type-init'):
view = df[['src', 'ts']].drop_duplicates()
node_ts = dict((k, v) for k, v in view.itertuples(index=False))
view = df[['src', 'src_type']].drop_duplicates()
node_type = dict(
(node, tp)
for node, tp in view.itertuples(index=False)
)
view = df[['dst', 'dst_type']].drop_duplicates()
node_type.update(dict(
(node, tp)
for node, tp in view.itertuples(index=False)
))
if 'graph_edge_type' not in df:
df['graph_edge_type'] = 'default'
with timeit(logger, 'edge-list-init'):
edge_list = list(
df[['src', 'dst', 'graph_edge_type']].drop_duplicates().itertuples(index=False))
edge_list += [(e1, e0, t) for e0, e1, t in edge_list]
select = df['seed'] > 0
view = df[select][['src', 'src_label']].drop_duplicates()
seed_label = dict((k, v) for k, v in view.itertuples(index=False))
return NaiveHetGraph(node_type, edge_list,
seed_label=seed_label, node_ts=node_ts)
def create_graph_data_from_edges(df):
node_link_ts = df[['src', 'ts']].drop_duplicates()
node_ts = dict(
(node, ts)
for node, ts in node_link_ts.itertuples(index=False)
)
view = df[['src', 'src_type']].drop_duplicates()
node_type = dict(
(node, tp)
for node, tp in view.itertuples(index=False)
)
view = df[['dst', 'dst_type']].drop_duplicates()
node_type.update(dict(
(node, tp)
for node, tp in view.itertuples(index=False)
))
view = df[['src', 'src_label']].drop_duplicates()
node_label = dict(
(node, lbl)
for node, lbl in view.itertuples(index=False)
)
if 'graph_edge_type' not in df:
df['graph_edge_type'] = 'default'
type_adj = {}
node_gtypes = defaultdict(set)
graph_edge_type = {}
for (stype, etype, dtype), gdf in df.groupby(
['src_type', 'graph_edge_type', 'dst_type']):
gtype = stype, etype, dtype
adj = defaultdict(set)
for u, v in gdf[['src', 'dst']].itertuples(index=False):
node_gtypes[u].add(gtype)
node_gtypes[v].add(gtype)
adj[u].add(v)
adj[v].add(u)
type_adj[gtype] = dict((k, tuple(v)) for k, v in adj.items())
graph_edge_type[gtype] = etype
rval = GraphData(
type_adj=type_adj,
node_gtypes=node_gtypes,
node_ts=node_ts, node_type=node_type,
graph_edge_type=graph_edge_type,
node_label=node_label)
return rval
def create_naive_het_homo_graph_from_edges(df):
logger = logging.getLogger('factory-naive-het-homo-graph')
logger.setLevel(logging.INFO)
with timeit(logger, 'node-type-init'):
view = df[['src', 'src_ts']].drop_duplicates()
node_ts = dict((node, ts)
for node, ts in view.itertuples(index=False)
)
view = df[['src']].drop_duplicates()
node_type = dict(
(node[0], 'node_link_id')
for node in view.itertuples(index=False)
)
with timeit(logger, 'node-seed-init'):
select = df['src_seed'] > 0
view = df[select][['src', 'src_label']].drop_duplicates()
seed_label = dict((k, v) for k, v in view.itertuples(index=False))
with timeit(logger, 'edge-list-init'):
# edge_list = []
# df_tmp = df[['src', 'dst']].drop_duplicates()
# for i, row in tqdm.tqdm(df_tmp.iterrows(),
# total=df_tmp.shape[0],
# desc='iter-edges'):
# edge_list.append(tuple(row.tolist()) + ('default',))
view = df[['src', 'dst']].drop_duplicates()
view['graph_edge_type'] = 'default'
edge_list = view.to_numpy().tolist()
return NaiveHetGraph(node_type, edge_list,
seed_label=seed_label, node_ts=node_ts)
| [
"logging.basicConfig",
"logging.getLogger",
"xfraud.glib.graph_loader.GraphData",
"xfraud.glib.utils.timeit",
"collections.defaultdict",
"xfraud.glib.graph_loader.NaiveHetGraph"
] | [((627, 666), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (646, 666), False, 'import logging\n'), ((1331, 1375), 'logging.getLogger', 'logging.getLogger', (['"""factory-naive-het-graph"""'], {}), "('factory-naive-het-graph')\n", (1348, 1375), False, 'import logging\n'), ((2407, 2482), 'xfraud.glib.graph_loader.NaiveHetGraph', 'NaiveHetGraph', (['node_type', 'edge_list'], {'seed_label': 'seed_label', 'node_ts': 'node_ts'}), '(node_type, edge_list, seed_label=seed_label, node_ts=node_ts)\n', (2420, 2482), False, 'from xfraud.glib.graph_loader import GraphData, NaiveHetGraph\n'), ((3296, 3312), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (3307, 3312), False, 'from collections import defaultdict\n'), ((3827, 3983), 'xfraud.glib.graph_loader.GraphData', 'GraphData', ([], {'type_adj': 'type_adj', 'node_gtypes': 'node_gtypes', 'node_ts': 'node_ts', 'node_type': 'node_type', 'graph_edge_type': 'graph_edge_type', 'node_label': 'node_label'}), '(type_adj=type_adj, node_gtypes=node_gtypes, node_ts=node_ts,\n node_type=node_type, graph_edge_type=graph_edge_type, node_label=node_label\n )\n', (3836, 3983), False, 'from xfraud.glib.graph_loader import GraphData, NaiveHetGraph\n'), ((4095, 4144), 'logging.getLogger', 'logging.getLogger', (['"""factory-naive-het-homo-graph"""'], {}), "('factory-naive-het-homo-graph')\n", (4112, 4144), False, 'import logging\n'), ((5309, 5384), 'xfraud.glib.graph_loader.NaiveHetGraph', 'NaiveHetGraph', (['node_type', 'edge_list'], {'seed_label': 'seed_label', 'node_ts': 'node_ts'}), '(node_type, edge_list, seed_label=seed_label, node_ts=node_ts)\n', (5322, 5384), False, 'from xfraud.glib.graph_loader import GraphData, NaiveHetGraph\n'), ((1420, 1452), 'xfraud.glib.utils.timeit', 'timeit', (['logger', '"""node-type-init"""'], {}), "(logger, 'node-type-init')\n", (1426, 1452), False, 'from xfraud.glib.utils import timeit\n'), ((2018, 2050), 'xfraud.glib.utils.timeit', 'timeit', (['logger', '"""edge-list-init"""'], {}), "(logger, 'edge-list-init')\n", (2024, 2050), False, 'from xfraud.glib.utils import timeit\n'), ((3496, 3512), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (3507, 3512), False, 'from collections import defaultdict\n'), ((4189, 4221), 'xfraud.glib.utils.timeit', 'timeit', (['logger', '"""node-type-init"""'], {}), "(logger, 'node-type-init')\n", (4195, 4221), False, 'from xfraud.glib.utils import timeit\n'), ((4587, 4619), 'xfraud.glib.utils.timeit', 'timeit', (['logger', '"""node-seed-init"""'], {}), "(logger, 'node-seed-init')\n", (4593, 4619), False, 'from xfraud.glib.utils import timeit\n'), ((4808, 4840), 'xfraud.glib.utils.timeit', 'timeit', (['logger', '"""edge-list-init"""'], {}), "(logger, 'edge-list-init')\n", (4814, 4840), False, 'from xfraud.glib.utils import timeit\n')] |
"""This module defines the Scene related interfaces.
Attributes:
SCENE_RE: Python regular expression that matches SmolDM's adventure scene
SCENE_TITLE_RE: Python regular expression that matches SmolDM's adventure
scene title
SCENE_OPTION_RE: Python regular expression that matches SmolDM's option
:copywrite: <NAME> 2018-2019
:license: MIT, see license for details
"""
import re
from dataclasses import dataclass
from typing import List, Dict, Sequence
from loguru import logger
SCENE_RE = r"^\#\s(?:.|\n)*?\={5}$"
SCENE_TITLE_RE = r"^\#.*\n"
SCENE_OPTION_RE = r"^(\d+).\s(.*)\${(.*)}"
@dataclass
class Option:
"""Data class representing a Option the player have."""
option_id: int
destination: List[int]
description: str
@dataclass
class Scene:
"""Data class representing a Scene."""
scene_id: int
title: str
lines: List[str]
options: List[Option]
def _parse_option(option_param_seq: Sequence) -> Option:
dests = [int(dest) for dest in option_param_seq[2].split("/")]
return Option(option_param_seq[0], dests, option_param_seq[1])
def _parse_scene(scene_content: str, scene_num: int) -> Scene:
lines = list()
options = list()
for scene_att in scene_content.splitlines(True):
title_match = re.match(SCENE_TITLE_RE, scene_att)
if title_match:
title = title_match.group()
continue
option_match = re.match(SCENE_OPTION_RE, scene_att)
if option_match:
options.append(_parse_option(option_match.groups()))
continue
lines.append(scene_att)
return Scene(scene_num, title, lines, options)
def load_scenes(scene_file_path: str) -> Dict[int, Scene]:
"""Load scenes from file path.
Args:
scene_file_path: file with scene definitions
Returns:
scenes: Dictionary with numbered keys and Snece object values, starts from 1
"""
scenes = dict()
scene_file = open(scene_file_path, "r")
logger.info("Attemping to read adventure file content.")
scene_file_content = scene_file.read()
scene_file.close()
logger.info("Loading scenes....")
matches = re.finditer(SCENE_RE, scene_file_content, re.MULTILINE)
for scene_num, scene_match in enumerate(matches, start=1):
logger.debug(f"Loading scene {scene_num}...")
scenes[scene_num] = _parse_scene(scene_match.group(), scene_num)
return scenes
| [
"loguru.logger.debug",
"re.match",
"re.finditer",
"loguru.logger.info"
] | [((2011, 2067), 'loguru.logger.info', 'logger.info', (['"""Attemping to read adventure file content."""'], {}), "('Attemping to read adventure file content.')\n", (2022, 2067), False, 'from loguru import logger\n'), ((2138, 2171), 'loguru.logger.info', 'logger.info', (['"""Loading scenes...."""'], {}), "('Loading scenes....')\n", (2149, 2171), False, 'from loguru import logger\n'), ((2186, 2241), 're.finditer', 're.finditer', (['SCENE_RE', 'scene_file_content', 're.MULTILINE'], {}), '(SCENE_RE, scene_file_content, re.MULTILINE)\n', (2197, 2241), False, 'import re\n'), ((1300, 1335), 're.match', 're.match', (['SCENE_TITLE_RE', 'scene_att'], {}), '(SCENE_TITLE_RE, scene_att)\n', (1308, 1335), False, 'import re\n'), ((1444, 1480), 're.match', 're.match', (['SCENE_OPTION_RE', 'scene_att'], {}), '(SCENE_OPTION_RE, scene_att)\n', (1452, 1480), False, 'import re\n'), ((2313, 2358), 'loguru.logger.debug', 'logger.debug', (['f"""Loading scene {scene_num}..."""'], {}), "(f'Loading scene {scene_num}...')\n", (2325, 2358), False, 'from loguru import logger\n')] |
import random
from task_widgets.task_base.intro_hint import IntroHint
from utils import import_kv
from .calculation import ModeOperandsCalculation
import_kv(__file__)
class IntroHintNumbersCalculation(IntroHint):
pass
class NumbersCalculation(ModeOperandsCalculation):
FROM = 101
TO = 899
TASK_KEY = "numbers_calculation"
INTRO_HINT_CLASS = IntroHintNumbersCalculation
def calculate_operands(self):
self.first = self.first or random.randint(self.FROM, self.TO - 100)
self.result = self.result or random.randint(self.first + 100, self.TO)
self.second = self.second or self.result - self.first
def build_text(self):
text = None
if self.mode == 0:
self.correct_answer = self.result
text = "%s + %s = ?" % (self.first, self.second)
if self.mode == 1:
self.correct_answer = self.first
text = "? + %s = %s" % (self.second, self.result)
if self.mode == 2:
self.correct_answer = self.second
text = "%s + ? = %s" % (self.first, self.result)
return text
def get_next_variant(self):
return self.correct_answer + 10 * random.randint(-10, +10)
| [
"utils.import_kv",
"random.randint"
] | [((149, 168), 'utils.import_kv', 'import_kv', (['__file__'], {}), '(__file__)\n', (158, 168), False, 'from utils import import_kv\n'), ((465, 505), 'random.randint', 'random.randint', (['self.FROM', '(self.TO - 100)'], {}), '(self.FROM, self.TO - 100)\n', (479, 505), False, 'import random\n'), ((543, 584), 'random.randint', 'random.randint', (['(self.first + 100)', 'self.TO'], {}), '(self.first + 100, self.TO)\n', (557, 584), False, 'import random\n'), ((1194, 1218), 'random.randint', 'random.randint', (['(-10)', '(+10)'], {}), '(-10, +10)\n', (1208, 1218), False, 'import random\n')] |
import warnings
import numpy as np
import pandas as pd
import sklearn
from sklearn import metrics
class MetricCatalog:
catalog_dict = {
'accuracy': {
'func': metrics.accuracy_score,
'params': {},
'require_score': False,
'binary': True,
'multi': True},
# AP is not straightfoward to apply to multiclass
'average_precision': {
'func': metrics.average_precision_score,
'params': {},
'require_score': True,
'binary': True,
'multi': False},
# Default configuration only handles binary classification
'f1': {
'func': metrics.f1_score,
'params': {'average': 'macro'},
'require_score': False,
'binary': True,
'multi': True},
'f1_micro': {
'func': metrics.f1_score,
'params': {'average': 'micro'},
'require_score': False,
'binary': True,
'multi': True},
'f1_macro': {
'func': metrics.f1_score,
'params': {'average': 'macro'},
'require_score': False,
'binary': True,
'multi': True},
# Note: log_loss returns "loss" value
'neg_log_loss': {
'func': lambda y_true, y_pred: - metrics.log_loss(y_true, y_pred),
'params': {},
'require_score': True,
'binary': True,
'multi': True},
# Same problem as f1_score
'precision': {
'func': metrics.precision_score,
'params': {'average': 'macro'},
'require_score': False,
'binary': True,
'multi': True},
'precision_micro': {
'func': metrics.precision_score,
'params': {'average': 'micro'},
'require_score': False,
'binary': True,
'multi': True},
'precision_macro': {
'func': metrics.precision_score,
'params': {'average': 'macro'},
'require_score': False,
'binary': True,
'multi': True},
# Same problem as f1_score
'recall': {
'func': metrics.recall_score,
'params': {'average': 'macro'},
'require_score': False,
'binary': True,
'multi': True},
'recall_micro': {
'func': metrics.recall_score,
'params': {'average': 'micro'},
'require_score': False,
'binary': True,
'multi': True},
'recall_macro': {
'func': metrics.recall_score,
'params': {'average': 'macro'},
'require_score': False,
'binary': True,
'multi': True},
'roc_auc': {
'func': metrics.roc_auc_score,
'params': {},
'require_score': True,
'binary': True,
'multi': False},
# Regression metrics
'explained_variance': {
'func': metrics.explained_variance_score,
'params': {},
'require_score': False,
'regression': True},
'neg_mean_absolute_error': {
'func': lambda y_true, y_pred: - metrics.mean_absolute_error(
y_true, y_pred),
'params': {},
'require_score': False,
'regression': True},
'neg_mean_squared_error': {
'func': lambda y_true, y_pred: - metrics.mean_squared_error(
y_true, y_pred),
'params': {},
'require_score': False,
'regression': True},
'neg_median_absolute_error': {
'func': lambda y_true, y_pred: - metrics.median_absolute_error(
y_true, y_pred),
'params': {},
'require_score': False,
'regression': True},
'r2': {
'func': metrics.r2_score,
'params': {},
'require_score': False,
'regression': True}}
@classmethod
def get_basic_metrics(cls,
task_type="classification"):
if task_type in ["classification",
"binary",
"multi"]:
return dict(
filter(lambda x: x[0] in ["accuracy",
"precision",
"recall"],
cls.catalog_dict.items()))
elif task_type in ["regression", "reg"]:
return dict(
filter(lambda x: x[0] in ["neg_mean_absolute_error",
"neg_mean_squared_error",
"r2"],
cls.catalog_dict.items()))
class ErrorSummary(object):
"""Error Analysis summary class."""
def __init__(self,
error_dist=None,
diversity=None,
errors=None):
"""Initialization
Args:
error_dist (pd.DataFrame): Error distribution table
diversity (pd.DataFrame): Diversity metric table
errors (pd.DataFrame): Misclassified examples
"""
self.error_dist = error_dist
self.diversity = diversity
self.errors = errors
class Evaluate():
def __init__(self,
alearn,
ac=None,
feature_names=None,
random_state=7):
"""Data evaluation class
Args:
alearn (AutoLearn or sklearn classifier instance):
Trained model instance
ac (AutoConverter instance): Autoconverter for converting column
data to feature matrix
feature_names (list): List of feature names (str)
If ac is given, the parameter will be disregarded.
If not, feature_names becomes mandatory.
random_state (int): random seed for pandas.sample. Default: 7
"""
if ac is None:
if feature_names is None:
raise ValueError("Either AutoConverter or feature_names must",
"be given.")
self.feature_names = feature_names
self.ac = None
else:
self.ac = ac
if feature_names is not None:
warnings.warn("AutoConverter instance is given so",
"feature_names will be discarded.")
self.feature_names = None
# TODO(Yoshi): Need to modify when it incorporates regression type
assert hasattr(alearn, "predict")
assert hasattr(alearn, "predict_proba")
if alearn.__class__.__name__ == "AutoLearn":
assert alearn.trained
else:
# scikit-learn classifiers do not have "fitted" flag
# A solution would be calling predict()/predict_proba()
# to see if it returns exception.
pass
self.alearn = alearn
self.rs = random_state
self.orig_eval_s = None
def _task_type(self):
"""Extract task_type from alearn (could be sklearn clf) instance."""
if hasattr(self.alearn, 'task'):
# AutoLearn instance passed
if self.alearn.task == 'regression':
task_type = 'regression'
elif hasattr(self.alearn.learner, "task_type"):
task_type = self.alearn.learner.task_type
else:
raise ValueError("wrong task_type passed to evaluate")
else:
# in this case we have scikit-learn classifier passed
if isinstance(self.alearn, sklearn.base.ClassifierMixin):
if len(self.alearn.classes_) == 2:
task_type = "binary"
else:
task_type = "multi"
elif isinstance(self.alearn, sklearn.base.RegressorMixin):
task_type = "regression"
else:
raise ValueError("Unknown instance type: {}".format(
type(self.alearn)))
return task_type
def _pos_label(self):
if hasattr(self.alearn, "pos_label"):
return self.alearn.pos_label
else:
# Assume that the second index is positive
return 1
def get_feature_indexes(self):
"""Returns di
Returns:
table_colname_pos_dict =
{"main..Ticket": [0, 20], "main..Age": [21, 30], ...}
"""
if self.ac is not None:
all_feature_names = self.ac.feature_names
else:
all_feature_names = self.feature_names
# table_feature_names_cols =
# ["main..Ticket", "main..Ticket", ...]
table_feature_name_cols = list(map(
lambda x: x.split('..')[0] + ".." + x.split('..')[1].split('.')[0],
all_feature_names))
table_colname_pos_dict = {}
begin = 0
table_colname = table_feature_name_cols[0]
counter = 0
for i, feature_name in enumerate(table_feature_name_cols):
if feature_name == table_colname:
counter += 1
else:
# end is not included to the interval
table_colname_pos_dict[table_colname] = [begin, i]
begin = i
counter = 1
table_colname = feature_name
table_colname_pos_dict[table_colname] = [begin,
len(table_feature_name_cols)]
return table_colname_pos_dict
@classmethod
def run_metric_functions(cls,
y,
y_pred,
y_prob,
metric_func_dict,
task_type):
"""Run metric functions
Args:
y (np.ndarray): True label vector
y_pred (np.ndarray): Predicted label vector
y_prob (np.ndarray): Probability vector
None if task_type == "regression"
metric_func_dict (dict): metric func dictionary
see MetricCatalog for details
task_type (str): task type {"binary", "multi", "regression"}
Returns:
orig_eval_s (pd.Series)
"""
if task_type not in ["binary", "multi", "regression"]:
raise ValueError('task_type must be {"binary", "multi",'
'"regression"}')
if task_type == "regression" and y_prob is not None:
warnings.warn("y_prob will be disregarded for"
"task_type=regression")
# Only use evaluation metric that supports task_type
sorted_metric_names = sorted(
filter(lambda x: (task_type in metric_func_dict[x] and
metric_func_dict[x][task_type]),
metric_func_dict.keys()))
# Evaluate prediction
eval_list = []
for metric_name in sorted_metric_names:
metric_info = metric_func_dict[metric_name]
metric_func = metric_info['func']
metric_params = metric_info['params']
assert metric_info[task_type]
if metric_info["require_score"]:
score = metric_func(y, y_prob, **metric_params)
else:
# Evaluation metrics for regression use y_pred
score = metric_func(y, y_pred, **metric_params)
eval_list.append(score)
orig_eval_s = pd.Series(eval_list, index=sorted_metric_names)
return orig_eval_s
def evaluate_performance(self,
X=None,
y=None,
metric_func_dict=None):
"""Evaluate prediction performance.
Args:
df (pd.DataFrame): Main table
X (np.array): Test feature matrix
y (np.array): Test label vector
metric_func_dict (dict): if None, it will use MetricCatalog
{"metric_name": {"func": func,
"params": {},
"require_score": True,
"binary": True,
"multi": True}}
Returns:
orig_eval_s (pd.Series): Evaluation values
"""
if metric_func_dict is None:
metric_func_dict = MetricCatalog.catalog_dict
if (X is None) or (y is None):
if self.ac is None:
raise ValueError(
"X and y are missing since AutoConverter instance was not",
"given.")
if not self.ac.hasdata:
raise RuntimeError(
"AutoConverter instance does not store X and y.")
X = self.ac.X
y = self.ac.y
# 1. pure prediction
y_pred = self.alearn.predict(X)
if self._task_type() in ["binary", "multi"]:
y_prob = self.alearn.predict_proba(X)
if self._task_type() == "binary":
y_prob = y_prob[:, self._pos_label()]
else:
# y_prob is empty for regression
y_prob = None
# y_pred, y_prob, metric_func_dict
self.orig_eval_s = Evaluate.run_metric_functions(y,
y_pred,
y_prob,
metric_func_dict,
self._task_type())
return self.orig_eval_s
def calculate_column_importance(self,
X=None,
y=None,
target=None,
metric_func_dict=None):
"""Evaluate column importance scores
Args:
X (np.array): Test feature matrix
y (np.array): Test label vector
column_importance (bool): Calculate column importance if True
Default=True,
metric_func_dict (dict): if None, it will use MetricCatalog
{"metric_name": {"func": func,
"params": {},
"require_score": True,
"binary": True,
"multi": True}}
Returns:
col_imp_df (pd.DataFrame):
accuracy average_precision f1 ...
tablename colname
main Age 0.012240 0.007844 0.013407 ...
Cabin 0.040392 0.024465 0.044803 ...
Embarked 0.008568 0.006306 0.009215 ...
Fare 0.009792 0.002827 0.010472 ...
Name 0.046512 0.057124 0.050983 ...
Parch 0.000000 0.000600 0.000127 ...
Pclass 0.029376 0.027463 0.031666 ...
Sex 0.227662 0.236873 0.244964 ...
SibSp 0.006120 0.006541 0.006973 ...
Ticket 0.055080 0.072796 0.058413 ...
"""
if metric_func_dict is None:
metric_func_dict = MetricCatalog.catalog_dict
if (X is None) or (y is None):
if self.ac is None:
raise ValueError(
"X and y must be given since it has no AutoConverter",
"instance.")
if not self.ac.hasdata:
raise RuntimeError(
"AutoConverter instance does not store X and y.")
X = self.ac.X
y = self.ac.y
if self.ac is None:
if target is None:
raise ValueError("target parameter must be given since",
"it has no AutoConverter instance.")
else:
target = self.ac.target
if target is not None:
warnings.warn("Give target will be discarded.")
if self.orig_eval_s is None:
self.evaluate_performance(X=X,
y=y,
metric_func_dict=metric_func_dict)
assert self.orig_eval_s is not None
# feature_indexes_dict[table_colname] = [begin, end]
feature_indexes_dict = self.get_feature_indexes()
# Only use evaluation metric that supports task_type
sorted_metric_names = sorted(
filter(lambda x: (self._task_type() in metric_func_dict[x] and
metric_func_dict[x][self._task_type()]),
metric_func_dict.keys()))
# Column importance
col_importance_list = []
col_imp_index_list = []
for table_colname in sorted(feature_indexes_dict.keys()):
tablename, colname = table_colname.split('..')
if tablename == 'main' and colname == target:
continue
col_imp_index_list.append(table_colname)
# Get needed feature columns range and spoil them
beg_idx, end_idx = feature_indexes_dict[table_colname]
X_shuf = X.copy()
np.random.shuffle(X_shuf[:, beg_idx:end_idx])
# Permuted prediction
y_shuf_pred = self.alearn.predict(X_shuf)
if self._task_type() in ["binary", "multi"]:
y_shuf_prob = self.alearn.predict_proba(X_shuf)
if self._task_type() == 'binary':
y_shuf_prob = y_shuf_prob[:, self._pos_label()]
# Calculate evaluation
metric_list = []
for metric_name in sorted_metric_names:
metric_info = metric_func_dict[metric_name]
metric_func = metric_info['func']
metric_params = metric_info['params']
assert metric_info[self._task_type()]
if metric_info["require_score"]:
# orig_score = metric_func(y, y_prob)
orig_score = self.orig_eval_s[metric_name]
shuf_score = metric_func(y, y_shuf_prob, **metric_params)
else:
# orig_score = metric_func(y, y_pred)
orig_score = self.orig_eval_s[metric_name]
shuf_score = metric_func(y, y_shuf_pred, **metric_params)
# TODO(Yoshi): Double check if there is no problem
# for neg_log_loss
if orig_score == 0:
metric_list.append(0.0)
else:
metric_list.append((orig_score - shuf_score) / orig_score)
col_importance_list.append(metric_list)
col_imp_df = pd.DataFrame(col_importance_list)
col_imp_df.columns = sorted_metric_names
tablename_list = list(map(lambda x: x.split('..')[0],
col_imp_index_list))
colname_list = list(map(lambda x: x.split('..')[1],
col_imp_index_list))
assert len(tablename_list) == len(col_imp_df)
assert len(tablename_list) == len(colname_list)
assert "tablename" not in sorted_metric_names
assert "colname" not in sorted_metric_names
col_imp_df["tablename"] = tablename_list
col_imp_df["colname"] = colname_list
col_imp_df.set_index(["tablename", "colname"], inplace=True)
return col_imp_df
def get_top_columns(self, n=3):
"""Returns n most important columns in the DataFrame
Args:
n (integer): number of columns returned
Returns:
list of [tablename..columname, ...] of most
important columns, sorted in descending order
"""
col_imp_df = self.calculate_column_importance()
if self._task_type() == 'binary':
metric = 'roc_auc'
else:
metric = 'neg_log_loss'
new_df = col_imp_df[metric].sort_values(ascending=False).head(n)
return list(map(lambda x: x[0] + '..' + x[1], new_df.index.values))
def get_mispredictions(self, df):
"""Get mispredicted examples based on the classifier
Args:
df (pd.DateFrame): dataset to evaluate.
Returns:
mispred_df (pd.DataFrame):
TODO(Yoshi): subtable support
"""
# Assume AutoConverter is mandatory for the function
if self.ac is None:
raise ValueError("AutoConverter instance is required to call",
"get_mispredictions()")
# TODO(Yoshi): This is not accurate.
# AutoConverter also should have "fitted" flag or something like that.
assert self.ac.hasdata
X, y = self.ac.transform(df)
pred_y = self.alearn.predict(X)
# TODO(Yoshi): Add some columns such as ==prediction== column,
# ==confidence==. To be disccused and will be another ticket.
return df.ix[y != pred_y]
def stratify_errors(self,
df,
max_numcat=5):
"""Stratify mispredicted examples.
TODO(Yoshi): Will avoid hand-crafted configuration
Args:
df (pd.DataFrame):
Returns:
es (ErrorSummary)
"""
# Assume AutoConverter is mandatory for the function
if self.ac is None:
raise ValueError("AutoConverter instance is required to call",
"stratify_errors()")
def calc_diversity(s):
"""Calculate entropy as a diversity metric."""
probs = s / s.sum()
return (probs * np.log(1.0 / probs)).sum()
assert self.ac.hasdata
error_df = self.get_mispredictions(df)
# Conduct for loop for each column
colname_list = []
error_dist_df_list = []
diversity_list = []
sorted_colnames = sorted(error_df.columns.tolist())
for colname in sorted_colnames:
if colname not in self.ac.colname_type_dict:
continue
error_count_s = error_df[colname].value_counts()
total_count_s = df[colname].value_counts()
error_dist_df = pd.concat([error_count_s, total_count_s], axis=1)
error_dist_df.columns = ["error_count", "total_count"]
error_dist_df["error_rate"] = (error_dist_df["error_count"] /
error_dist_df["total_count"])
if len(error_dist_df) > max_numcat:
continue
error_dist_df.index.name = "group"
error_dist_df = error_dist_df.reset_index()
# Calculate diversity score
diversity_score = calc_diversity(error_dist_df["error_rate"])
error_dist_df.loc[:, 'colname'] = colname
error_dist_df_list.append(error_dist_df)
diversity_list.append(diversity_score)
colname_list.append(colname)
if len(error_dist_df_list) < 1:
# No grouped result found
# TODO(Yoshi): Output any message?
return None
error_dist_concat_df = pd.concat(error_dist_df_list, axis=0)
error_dist_concat_df.set_index(["colname", "group"], inplace=True)
diversity_df = pd.DataFrame({"diversity": diversity_list},
index=colname_list)
return ErrorSummary(error_dist=error_dist_concat_df,
diversity=diversity_df,
errors=error_df)
def get_explanations(self,
test_df,
X=None,
topk=3,
max_candidates=10,
num_sampling=10,
spoil_method='random'):
"""Returns explanations (previously known as reason codes)
V1 simply calculates the average difference of class probabilities
no matter whether binary or multiclass
Args:
test_df (pd.DataFrame): Original DataFrame
X (np.array): Test feature matrix
topk (int): select top-k colnames for explanations
max_candidates (int): At most <max_candidates> columns will be
used for explanations (Default 10)
num_sampling (int): Number of sampling iterations
(Default 10)
spoil_method (str): {"random"}
Returns:
"""
# Assume AutoConverter is mandatory for the function
if self.ac is None:
raise ValueError("AutoConverter instance is required to call",
"get_explanations()")
# TODO(Yoshi): spoil_method should be improved
top_colnames = self.get_top_columns(n=max_candidates)
# TODO(Yoshi): it's not straightforward to visualize representative
# values for subtables. Only focus on main table for now
top_colnames = list(filter(lambda x: x.split('..')[0] == 'main',
top_colnames))
assert len(top_colnames) > 0
table_colname_feature_pos_dict = self.get_feature_indexes()
if X is None:
assert self.ac.hasdata
X = self.ac.X
all_pred = self.alearn.predict_proba(X)
table_colname_impact_dict = {}
for table_colname in top_colnames:
abs_diff_probs = np.zeros_like(all_pred)
beg_idx, end_idx = table_colname_feature_pos_dict[table_colname]
for _ in range(num_sampling):
X_shuf = X.copy()
np.random.shuffle(X_shuf[:, beg_idx:end_idx])
all_pred_shuf = self.alearn.predict_proba(X_shuf)
abs_diff_probs += np.abs(all_pred - all_pred_shuf)
# <num_sample>-dimensional vector
impact_scores = np.mean(abs_diff_probs, axis=1)
table_colname_impact_dict[table_colname] = impact_scores
impact_df = pd.DataFrame(table_colname_impact_dict)
assert len(impact_df) == len(test_df)
impact_df.index = test_df.index
all_explanation_list = []
for index, row in impact_df.iterrows():
top_s = row.sort_values(ascending=False).head(topk)
top_colnames = top_s.index.tolist()
cur_explanation_list = []
for table_colname in top_colnames:
# split colanme in to tablename and colname
tablename, colname = table_colname.split("..")
val = test_df.ix[index][colname]
cur_explanation_list.append((colname, val))
all_explanation_list.append(cur_explanation_list)
explain_df = pd.DataFrame({"explanations": all_explanation_list})
assert len(explain_df) == len(test_df)
explain_df.index = test_df.index
return explain_df
| [
"pandas.Series",
"numpy.mean",
"numpy.abs",
"pandas.DataFrame",
"sklearn.metrics.median_absolute_error",
"numpy.log",
"numpy.zeros_like",
"sklearn.metrics.mean_squared_error",
"sklearn.metrics.log_loss",
"warnings.warn",
"sklearn.metrics.mean_absolute_error",
"pandas.concat",
"numpy.random.s... | [((11681, 11728), 'pandas.Series', 'pd.Series', (['eval_list'], {'index': 'sorted_metric_names'}), '(eval_list, index=sorted_metric_names)\n', (11690, 11728), True, 'import pandas as pd\n'), ((19146, 19179), 'pandas.DataFrame', 'pd.DataFrame', (['col_importance_list'], {}), '(col_importance_list)\n', (19158, 19179), True, 'import pandas as pd\n'), ((23608, 23645), 'pandas.concat', 'pd.concat', (['error_dist_df_list'], {'axis': '(0)'}), '(error_dist_df_list, axis=0)\n', (23617, 23645), True, 'import pandas as pd\n'), ((23744, 23807), 'pandas.DataFrame', 'pd.DataFrame', (["{'diversity': diversity_list}"], {'index': 'colname_list'}), "({'diversity': diversity_list}, index=colname_list)\n", (23756, 23807), True, 'import pandas as pd\n'), ((26494, 26533), 'pandas.DataFrame', 'pd.DataFrame', (['table_colname_impact_dict'], {}), '(table_colname_impact_dict)\n', (26506, 26533), True, 'import pandas as pd\n'), ((27215, 27267), 'pandas.DataFrame', 'pd.DataFrame', (["{'explanations': all_explanation_list}"], {}), "({'explanations': all_explanation_list})\n", (27227, 27267), True, 'import pandas as pd\n'), ((10700, 10767), 'warnings.warn', 'warnings.warn', (['"""y_prob will be disregarded fortask_type=regression"""'], {}), "('y_prob will be disregarded fortask_type=regression')\n", (10713, 10767), False, 'import warnings\n'), ((17609, 17654), 'numpy.random.shuffle', 'np.random.shuffle', (['X_shuf[:, beg_idx:end_idx]'], {}), '(X_shuf[:, beg_idx:end_idx])\n', (17626, 17654), True, 'import numpy as np\n'), ((22671, 22720), 'pandas.concat', 'pd.concat', (['[error_count_s, total_count_s]'], {'axis': '(1)'}), '([error_count_s, total_count_s], axis=1)\n', (22680, 22720), True, 'import pandas as pd\n'), ((25927, 25950), 'numpy.zeros_like', 'np.zeros_like', (['all_pred'], {}), '(all_pred)\n', (25940, 25950), True, 'import numpy as np\n'), ((26373, 26404), 'numpy.mean', 'np.mean', (['abs_diff_probs'], {'axis': '(1)'}), '(abs_diff_probs, axis=1)\n', (26380, 26404), True, 'import numpy as np\n'), ((6420, 6511), 'warnings.warn', 'warnings.warn', (['"""AutoConverter instance is given so"""', '"""feature_names will be discarded."""'], {}), "('AutoConverter instance is given so',\n 'feature_names will be discarded.')\n", (6433, 6511), False, 'import warnings\n'), ((16382, 16429), 'warnings.warn', 'warnings.warn', (['"""Give target will be discarded."""'], {}), "('Give target will be discarded.')\n", (16395, 16429), False, 'import warnings\n'), ((26120, 26165), 'numpy.random.shuffle', 'np.random.shuffle', (['X_shuf[:, beg_idx:end_idx]'], {}), '(X_shuf[:, beg_idx:end_idx])\n', (26137, 26165), True, 'import numpy as np\n'), ((26266, 26298), 'numpy.abs', 'np.abs', (['(all_pred - all_pred_shuf)'], {}), '(all_pred - all_pred_shuf)\n', (26272, 26298), True, 'import numpy as np\n'), ((1353, 1385), 'sklearn.metrics.log_loss', 'metrics.log_loss', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1369, 1385), False, 'from sklearn import metrics\n'), ((3278, 3321), 'sklearn.metrics.mean_absolute_error', 'metrics.mean_absolute_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (3305, 3321), False, 'from sklearn import metrics\n'), ((3516, 3558), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (3542, 3558), False, 'from sklearn import metrics\n'), ((3756, 3801), 'sklearn.metrics.median_absolute_error', 'metrics.median_absolute_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (3785, 3801), False, 'from sklearn import metrics\n'), ((22109, 22128), 'numpy.log', 'np.log', (['(1.0 / probs)'], {}), '(1.0 / probs)\n', (22115, 22128), True, 'import numpy as np\n')] |