ngram
listlengths
0
82k
[ "__all__ = [ \"BackendConfig\", \"CheckpointStrategy\", \"get_dataset_shard\", \"load_checkpoint\", \"local_rank\", \"report\", \"save_checkpoint\",", "ray.train.session import (get_dataset_shard, local_rank, load_checkpoint, report, save_checkpoint, world_rank, world_size) from", "world_rank, world_size) from ray.train.trainer import Trainer, TrainingIterator __all__ = [", "import BackendConfig from ray.train.callbacks import TrainingCallback from ray.train.checkpoint import CheckpointStrategy", "ray.train.checkpoint import CheckpointStrategy from ray.train.session import (get_dataset_shard, local_rank, load_checkpoint, report,", "from ray.train.session import (get_dataset_shard, local_rank, load_checkpoint, report, save_checkpoint, world_rank, world_size)", "import CheckpointStrategy from ray.train.session import (get_dataset_shard, local_rank, load_checkpoint, report, save_checkpoint,", "<filename>python/ray/train/__init__.py from ray.train.backend import BackendConfig from ray.train.callbacks import TrainingCallback from", "import Trainer, TrainingIterator __all__ = [ \"BackendConfig\", \"CheckpointStrategy\", \"get_dataset_shard\", \"load_checkpoint\",", "report, save_checkpoint, world_rank, world_size) from ray.train.trainer import Trainer, TrainingIterator __all__", "TrainingIterator __all__ = [ \"BackendConfig\", \"CheckpointStrategy\", \"get_dataset_shard\", \"load_checkpoint\", \"local_rank\", \"report\",", "from ray.train.callbacks import TrainingCallback from ray.train.checkpoint import CheckpointStrategy from ray.train.session", "ray.train.callbacks import TrainingCallback from ray.train.checkpoint import CheckpointStrategy from ray.train.session import", "\"CheckpointStrategy\", \"get_dataset_shard\", \"load_checkpoint\", \"local_rank\", \"report\", \"save_checkpoint\", \"TrainingIterator\", \"TrainingCallback\", \"Trainer\", \"world_rank\",", "TrainingCallback from ray.train.checkpoint import CheckpointStrategy from ray.train.session import (get_dataset_shard, local_rank,", "from ray.train.checkpoint import CheckpointStrategy from ray.train.session import (get_dataset_shard, local_rank, load_checkpoint,", "(get_dataset_shard, local_rank, load_checkpoint, report, save_checkpoint, world_rank, world_size) from ray.train.trainer import", "ray.train.backend import BackendConfig from ray.train.callbacks import TrainingCallback from ray.train.checkpoint import", "\"get_dataset_shard\", \"load_checkpoint\", \"local_rank\", \"report\", \"save_checkpoint\", \"TrainingIterator\", \"TrainingCallback\", \"Trainer\", \"world_rank\", \"world_size\"", "BackendConfig from ray.train.callbacks import TrainingCallback from ray.train.checkpoint import CheckpointStrategy from", "\"BackendConfig\", \"CheckpointStrategy\", \"get_dataset_shard\", \"load_checkpoint\", \"local_rank\", \"report\", \"save_checkpoint\", \"TrainingIterator\", \"TrainingCallback\", \"Trainer\",", "[ \"BackendConfig\", \"CheckpointStrategy\", \"get_dataset_shard\", \"load_checkpoint\", \"local_rank\", \"report\", \"save_checkpoint\", \"TrainingIterator\", \"TrainingCallback\",", "Trainer, TrainingIterator __all__ = [ \"BackendConfig\", \"CheckpointStrategy\", \"get_dataset_shard\", \"load_checkpoint\", \"local_rank\",", "save_checkpoint, world_rank, world_size) from ray.train.trainer import Trainer, TrainingIterator __all__ =", "import TrainingCallback from ray.train.checkpoint import CheckpointStrategy from ray.train.session import (get_dataset_shard,", "\"load_checkpoint\", \"local_rank\", \"report\", \"save_checkpoint\", \"TrainingIterator\", \"TrainingCallback\", \"Trainer\", \"world_rank\", \"world_size\" ]", "= [ \"BackendConfig\", \"CheckpointStrategy\", \"get_dataset_shard\", \"load_checkpoint\", \"local_rank\", \"report\", \"save_checkpoint\", \"TrainingIterator\",", "local_rank, load_checkpoint, report, save_checkpoint, world_rank, world_size) from ray.train.trainer import Trainer,", "world_size) from ray.train.trainer import Trainer, TrainingIterator __all__ = [ \"BackendConfig\",", "import (get_dataset_shard, local_rank, load_checkpoint, report, save_checkpoint, world_rank, world_size) from ray.train.trainer", "from ray.train.backend import BackendConfig from ray.train.callbacks import TrainingCallback from ray.train.checkpoint", "CheckpointStrategy from ray.train.session import (get_dataset_shard, local_rank, load_checkpoint, report, save_checkpoint, world_rank,", "from ray.train.trainer import Trainer, TrainingIterator __all__ = [ \"BackendConfig\", \"CheckpointStrategy\",", "load_checkpoint, report, save_checkpoint, world_rank, world_size) from ray.train.trainer import Trainer, TrainingIterator", "ray.train.trainer import Trainer, TrainingIterator __all__ = [ \"BackendConfig\", \"CheckpointStrategy\", \"get_dataset_shard\"," ]
[ "from model.contact import Contact from model.group import Group import random", "db): app.open_home_page() contact = db.get_contact_list() if len(contact) == 0: app.contact.create(Contact(firstname", "def test_del_contact_from_group(app, db): app.open_home_page() contact = db.get_contact_list() if len(contact) ==", "in l def test_del_contact_from_group(app, db): app.open_home_page() contact = db.get_contact_list() if", "group_rand = random.choice(group) app.contact.add_contact_to_group(contact_rand.id, group_rand.id) l = db.get_contacts_in_group(Group(id=group_rand.id)) assert contact_rand", "= random.choice(group) app.contact.open_contacts_in_group(group_rand.id) contacts_in_group = db.get_contacts_in_group(Group(id=group_rand.id)) if len(contacts_in_group) == 0:", "len(group) == 0: app.group.create(Group(name=\"test\")) contact_rand = random.choice(contact) group_rand = random.choice(group)", "app.contact.add_contact_to_group(contact_rand.id, group_rand.id) app.contact.open_contacts_in_group(group_rand.id) db.get_contacts_in_group(Group(id=group_rand.id)) app.contact.del_contact_from_group() l = db.get_contacts_in_group(Group(id=group_rand.id)) assert contact_rand.id", "random.choice(contact) group_rand = random.choice(group) app.contact.add_contact_to_group(contact_rand.id, group_rand.id) l = db.get_contacts_in_group(Group(id=group_rand.id)) assert", "if len(contacts_in_group) == 0: app.contact.view_all_contacts() contact_rand = random.choice(contact) app.contact.add_contact_to_group(contact_rand.id, group_rand.id)", "len(contacts_in_group) == 0: app.contact.view_all_contacts() contact_rand = random.choice(contact) app.contact.add_contact_to_group(contact_rand.id, group_rand.id) app.contact.open_contacts_in_group(group_rand.id)", "db.get_contacts_in_group(Group(id=group_rand.id)) assert contact_rand in l def test_del_contact_from_group(app, db): app.open_home_page() contact", "assert contact_rand in l def test_del_contact_from_group(app, db): app.open_home_page() contact =", "db.get_contact_list() if len(contact) == 0: app.contact.create(Contact(firstname = \"test firstname changed\"))", "db.get_contacts_in_group(Group(id=group_rand.id)) if len(contacts_in_group) == 0: app.contact.view_all_contacts() contact_rand = random.choice(contact) app.contact.add_contact_to_group(contact_rand.id,", "app.group.create(Group(name=\"test\")) contact_rand = random.choice(contact) group_rand = random.choice(group) app.contact.add_contact_to_group(contact_rand.id, group_rand.id) l", "app.contact.view_all_contacts() contact_rand = random.choice(contact) app.contact.add_contact_to_group(contact_rand.id, group_rand.id) app.contact.open_contacts_in_group(group_rand.id) db.get_contacts_in_group(Group(id=group_rand.id)) app.contact.del_contact_from_group() l", "model.contact import Contact from model.group import Group import random def", "random def test_add_contact_in_group(app, db): app.open_home_page() contact = db.get_contact_list() if len(contact)", "<gh_stars>0 from model.contact import Contact from model.group import Group import", "len(contact) == 0: app.contact.create(Contact(firstname = \"test firstname changed\")) group =", "app.contact.open_contacts_in_group(group_rand.id) db.get_contacts_in_group(Group(id=group_rand.id)) app.contact.del_contact_from_group() l = db.get_contacts_in_group(Group(id=group_rand.id)) assert contact_rand.id not in", "model.group import Group import random def test_add_contact_in_group(app, db): app.open_home_page() contact", "db.get_group_list() if len(group) == 0: app.group.create(Group(name=\"test\")) contact_rand = random.choice(contact) group_rand", "changed\")) group = db.get_group_list() if len(group) == 0: app.group.create(Group(name=\"test\")) contact_rand", "contact_rand in l def test_del_contact_from_group(app, db): app.open_home_page() contact = db.get_contact_list()", "contacts_in_group = db.get_contacts_in_group(Group(id=group_rand.id)) if len(contacts_in_group) == 0: app.contact.view_all_contacts() contact_rand =", "import random def test_add_contact_in_group(app, db): app.open_home_page() contact = db.get_contact_list() if", "app.contact.open_contacts_in_group(group_rand.id) contacts_in_group = db.get_contacts_in_group(Group(id=group_rand.id)) if len(contacts_in_group) == 0: app.contact.view_all_contacts() contact_rand", "if len(contact) == 0: app.contact.create(Contact(firstname = \"test firstname changed\")) group", "Group import random def test_add_contact_in_group(app, db): app.open_home_page() contact = db.get_contact_list()", "changed\")) group = db.get_group_list() if len(group) == 0: app.group.create(Group(name=\"test\")) group_rand", "== 0: app.contact.view_all_contacts() contact_rand = random.choice(contact) app.contact.add_contact_to_group(contact_rand.id, group_rand.id) app.contact.open_contacts_in_group(group_rand.id) db.get_contacts_in_group(Group(id=group_rand.id))", "= db.get_contacts_in_group(Group(id=group_rand.id)) if len(contacts_in_group) == 0: app.contact.view_all_contacts() contact_rand = random.choice(contact)", "= db.get_group_list() if len(group) == 0: app.group.create(Group(name=\"test\")) group_rand = random.choice(group)", "test_del_contact_from_group(app, db): app.open_home_page() contact = db.get_contact_list() if len(contact) == 0:", "if len(group) == 0: app.group.create(Group(name=\"test\")) group_rand = random.choice(group) app.contact.open_contacts_in_group(group_rand.id) contacts_in_group", "= db.get_contact_list() if len(contact) == 0: app.contact.create(Contact(firstname = \"test firstname", "contact_rand = random.choice(contact) group_rand = random.choice(group) app.contact.add_contact_to_group(contact_rand.id, group_rand.id) l =", "== 0: app.group.create(Group(name=\"test\")) group_rand = random.choice(group) app.contact.open_contacts_in_group(group_rand.id) contacts_in_group = db.get_contacts_in_group(Group(id=group_rand.id))", "app.contact.create(Contact(firstname = \"test firstname changed\")) group = db.get_group_list() if len(group)", "= \"test firstname changed\")) group = db.get_group_list() if len(group) ==", "random.choice(contact) app.contact.add_contact_to_group(contact_rand.id, group_rand.id) app.contact.open_contacts_in_group(group_rand.id) db.get_contacts_in_group(Group(id=group_rand.id)) app.contact.del_contact_from_group() l = db.get_contacts_in_group(Group(id=group_rand.id)) assert", "0: app.group.create(Group(name=\"test\")) contact_rand = random.choice(contact) group_rand = random.choice(group) app.contact.add_contact_to_group(contact_rand.id, group_rand.id)", "l = db.get_contacts_in_group(Group(id=group_rand.id)) assert contact_rand in l def test_del_contact_from_group(app, db):", "= random.choice(group) app.contact.add_contact_to_group(contact_rand.id, group_rand.id) l = db.get_contacts_in_group(Group(id=group_rand.id)) assert contact_rand in", "if len(group) == 0: app.group.create(Group(name=\"test\")) contact_rand = random.choice(contact) group_rand =", "Contact from model.group import Group import random def test_add_contact_in_group(app, db):", "app.open_home_page() contact = db.get_contact_list() if len(contact) == 0: app.contact.create(Contact(firstname =", "firstname changed\")) group = db.get_group_list() if len(group) == 0: app.group.create(Group(name=\"test\"))", "db.get_group_list() if len(group) == 0: app.group.create(Group(name=\"test\")) group_rand = random.choice(group) app.contact.open_contacts_in_group(group_rand.id)", "contact = db.get_contact_list() if len(contact) == 0: app.contact.create(Contact(firstname = \"test", "== 0: app.group.create(Group(name=\"test\")) contact_rand = random.choice(contact) group_rand = random.choice(group) app.contact.add_contact_to_group(contact_rand.id,", "= db.get_contacts_in_group(Group(id=group_rand.id)) assert contact_rand in l def test_del_contact_from_group(app, db): app.open_home_page()", "app.group.create(Group(name=\"test\")) group_rand = random.choice(group) app.contact.open_contacts_in_group(group_rand.id) contacts_in_group = db.get_contacts_in_group(Group(id=group_rand.id)) if len(contacts_in_group)", "app.contact.add_contact_to_group(contact_rand.id, group_rand.id) l = db.get_contacts_in_group(Group(id=group_rand.id)) assert contact_rand in l def", "group_rand.id) l = db.get_contacts_in_group(Group(id=group_rand.id)) assert contact_rand in l def test_del_contact_from_group(app,", "db.get_contacts_in_group(Group(id=group_rand.id)) app.contact.del_contact_from_group() l = db.get_contacts_in_group(Group(id=group_rand.id)) assert contact_rand.id not in l", "group_rand = random.choice(group) app.contact.open_contacts_in_group(group_rand.id) contacts_in_group = db.get_contacts_in_group(Group(id=group_rand.id)) if len(contacts_in_group) ==", "0: app.contact.view_all_contacts() contact_rand = random.choice(contact) app.contact.add_contact_to_group(contact_rand.id, group_rand.id) app.contact.open_contacts_in_group(group_rand.id) db.get_contacts_in_group(Group(id=group_rand.id)) app.contact.del_contact_from_group()", "= random.choice(contact) app.contact.add_contact_to_group(contact_rand.id, group_rand.id) app.contact.open_contacts_in_group(group_rand.id) db.get_contacts_in_group(Group(id=group_rand.id)) app.contact.del_contact_from_group() l = db.get_contacts_in_group(Group(id=group_rand.id))", "\"test firstname changed\")) group = db.get_group_list() if len(group) == 0:", "len(group) == 0: app.group.create(Group(name=\"test\")) group_rand = random.choice(group) app.contact.open_contacts_in_group(group_rand.id) contacts_in_group =", "def test_add_contact_in_group(app, db): app.open_home_page() contact = db.get_contact_list() if len(contact) ==", "random.choice(group) app.contact.open_contacts_in_group(group_rand.id) contacts_in_group = db.get_contacts_in_group(Group(id=group_rand.id)) if len(contacts_in_group) == 0: app.contact.view_all_contacts()", "from model.group import Group import random def test_add_contact_in_group(app, db): app.open_home_page()", "= db.get_group_list() if len(group) == 0: app.group.create(Group(name=\"test\")) contact_rand = random.choice(contact)", "group_rand.id) app.contact.open_contacts_in_group(group_rand.id) db.get_contacts_in_group(Group(id=group_rand.id)) app.contact.del_contact_from_group() l = db.get_contacts_in_group(Group(id=group_rand.id)) assert contact_rand.id not", "test_add_contact_in_group(app, db): app.open_home_page() contact = db.get_contact_list() if len(contact) == 0:", "group = db.get_group_list() if len(group) == 0: app.group.create(Group(name=\"test\")) contact_rand =", "0: app.group.create(Group(name=\"test\")) group_rand = random.choice(group) app.contact.open_contacts_in_group(group_rand.id) contacts_in_group = db.get_contacts_in_group(Group(id=group_rand.id)) if", "import Contact from model.group import Group import random def test_add_contact_in_group(app,", "import Group import random def test_add_contact_in_group(app, db): app.open_home_page() contact =", "= random.choice(contact) group_rand = random.choice(group) app.contact.add_contact_to_group(contact_rand.id, group_rand.id) l = db.get_contacts_in_group(Group(id=group_rand.id))", "l def test_del_contact_from_group(app, db): app.open_home_page() contact = db.get_contact_list() if len(contact)", "== 0: app.contact.create(Contact(firstname = \"test firstname changed\")) group = db.get_group_list()", "0: app.contact.create(Contact(firstname = \"test firstname changed\")) group = db.get_group_list() if", "contact_rand = random.choice(contact) app.contact.add_contact_to_group(contact_rand.id, group_rand.id) app.contact.open_contacts_in_group(group_rand.id) db.get_contacts_in_group(Group(id=group_rand.id)) app.contact.del_contact_from_group() l =", "random.choice(group) app.contact.add_contact_to_group(contact_rand.id, group_rand.id) l = db.get_contacts_in_group(Group(id=group_rand.id)) assert contact_rand in l", "group = db.get_group_list() if len(group) == 0: app.group.create(Group(name=\"test\")) group_rand =" ]
[ "@admin.register(User) class UserAdmin(admin.ModelAdmin): list_display = ['email', 'nickname'] list_display_links = ['email',", "django.contrib import admin from accounts.models import User, Profile, UserFollow @admin.register(User)", "accounts.models import User, Profile, UserFollow @admin.register(User) class UserAdmin(admin.ModelAdmin): list_display =", "UserAdmin(admin.ModelAdmin): list_display = ['email', 'nickname'] list_display_links = ['email', 'nickname'] admin.site.register(Profile)", "Profile, UserFollow @admin.register(User) class UserAdmin(admin.ModelAdmin): list_display = ['email', 'nickname'] list_display_links", "<reponame>LikeLion-CAU-9th/Django-fancy-coder from django.contrib import admin from accounts.models import User, Profile,", "from django.contrib import admin from accounts.models import User, Profile, UserFollow", "from accounts.models import User, Profile, UserFollow @admin.register(User) class UserAdmin(admin.ModelAdmin): list_display", "import User, Profile, UserFollow @admin.register(User) class UserAdmin(admin.ModelAdmin): list_display = ['email',", "list_display = ['email', 'nickname'] list_display_links = ['email', 'nickname'] admin.site.register(Profile) admin.site.register(UserFollow)", "UserFollow @admin.register(User) class UserAdmin(admin.ModelAdmin): list_display = ['email', 'nickname'] list_display_links =", "import admin from accounts.models import User, Profile, UserFollow @admin.register(User) class", "User, Profile, UserFollow @admin.register(User) class UserAdmin(admin.ModelAdmin): list_display = ['email', 'nickname']", "class UserAdmin(admin.ModelAdmin): list_display = ['email', 'nickname'] list_display_links = ['email', 'nickname']", "admin from accounts.models import User, Profile, UserFollow @admin.register(User) class UserAdmin(admin.ModelAdmin):" ]
[ "orig_cmap = cmap # If the user do not requires", "(-180, 180, -90, 90) if lats is None: lats =", "cmap, then we use it for all the fields cmap", "= np.amax(lons) + expand_ext bbox = (minLon, maxLon, minLat, maxLat)", "None _norm = None # Use to normalize the colormap.", "read xarrays, numpy arrays, and numpy arrays in dictionaries vizobj", "plt.title(title) plt.show() def plot_3d_data_npdict(self, np_variables:list, var_names:list, z_levels= [], title='', file_name_prefix='',", "many rows will the figure have :param cols: how many", "self._show_var_names: c_title = F'{var_names[idx_var]} {title}' else: c_title = F'{title}' if", "the field name in the titles _additional_polygons = [] #", "the z-levels # Verify the index of the z_levels are", "and max colorbars for each field if not(np.all(np.isnan(mincbar))): if type(mincbar)", "transform=self._projection) else: c_ax.contour(c_img, extent=self._extent, transform=self._projection) if len(self._additional_polygons) > 0: pol_lats", "print(F\"{name} = {getattr(ds, name)}\") print(\"\\n========== Dimensions =========\") for name in", "else: if self._background == BackgroundType.BLUE_MARBLE_LR: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble.png')) if self._background", "Dimensions =========\") for name in ds.dims: print(F\"{name}: {ds[name].shape}\") print(\"\\n========== Coordinates", "projection=ccrs.PlateCarree(), **kwargs): # All the arguments that are passed to", "def plot_slice_eoa(self, c_img, ax, cmap='gray', mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan) -> None:", "plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar) def make_video_from_images(self, input_folder, output_file, fps=24): files =", "''' Obtains the bbox of the coordinates. If included threshold", "-1: return cmocean.cm.oxy elif np.any([field_name.find(x) != -1 for x in", "colorbars for each field if not(np.all(np.isnan(mincbar))): if type(mincbar) is list:", "import LogNorm from io_utils.io_common import create_folder from viz_utils.constants import PlotMode,", "plt.close() def getExtent(self, lats, lons, expand_ext=0.0): ''' Obtains the bbox", "if flip_data: c_np_data = np.flip(np.flip(c_np_data), axis=1) npdict_3d[field_name] = np.expand_dims(c_np_data, axis=0)", "= lons self._fig_prop = (bbox[1]-bbox[0])/(bbox[3]-bbox[2]) self._contour_labels = False for arg_name,", "a single field with shape [x,y] :param var_names: :param title:", "figsize=(self._figsize, self._figsize), subplot_kw={'projection': ccrs.PlateCarree()}) ax.set_extent(bbox) # If we do not", "pol_lons = [] for c_polygon in self._additional_polygons: if isinstance(c_polygon, shapely.geometry.linestring.LineString):", "field e:{e}\") gl = c_ax.gridlines(draw_labels=True, color='grey', alpha=0.5, linestyle='--') # gl.xlabel_style", "1: return self._figsize * cols * self._fig_prop, self._figsize else: return", "= mincbar if not(np.all(np.isnan(maxcbar))): if type(mincbar) is list: c_maxcbar =", "matplotlib from matplotlib.colors import LogNorm from io_utils.io_common import create_folder from", "c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, transform=self._projection, norm=self._norm) else: im = c_ax.imshow(c_img,", "in ('u_', 'v_', 'u-vel.', 'v-vel.','velocity')]): return cmocean.cm.speed class EOAImageVisualizer: \"\"\"This", "cv2.VideoWriter_fourcc(*'mp4v'), fps, video_size, True) out_video.write(np_im[:, :, ::-1]) out_video.release() cv2.destroyAllWindows() print(\"Done!", "function to receive raw 2D numpy data. It calls the", "pylab import numpy as np import cmocean import shapely import", "limits of the locations ax.gridlines() im = ax.scatter(lons, lats, s=s,", "receive raw 2D numpy data. It calls the 'main' function", "category='cultural', name='roads', scale='10m', facecolor='none') ax.add_feature(roads, edgecolor='black') return ax def add_states(self,", "figure have :param cols: how many colswill the figure have", "30 _units = '' _max_imgs_per_row = 4 _mincbar = np.nan", "listdir(input_folder) files.sort() print(F\"Generating video file: {output_file}\") out_video = -1 for", "for name in ds.coords: print(F\"{name}: {ds[name].shape}\") print(\"\\n========== Variables =========\") for", "as plt import matplotlib from matplotlib.colors import LogNorm from io_utils.io_common", "the variable if self._auto_colormap and orig_cmap is None: cmap =", "vec_cmap = self._vector_field['cmap'] c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) c_ax.streamplot(x, y, u, v, transform=self._projection,", "you want to add a streamplot of a vector field.", "# vizobj = EOAImageVisualizer(disp_images=True, output_folder='output', # lats=[lats],lons=[lons]) def __init__(self, disp_images=True,", "shapely.geometry.polygon.Polygon): x, y = c_polygon.exterior.xy pol_lats += y pol_lons +=", "= cmocean.cm.solar if 'color' in vec_keys: c = self._vector_field['color'] if", "matplotlib.colors import LogNorm from io_utils.io_common import create_folder from viz_utils.constants import", "elif field_name.find('binary') != -1: return cmocean.cm.oxy elif np.any([field_name.find(x) != -1", "# TODO how to make this automatic and works always", "to receive raw 2D numpy data. It calls the 'main'", "arrays in dictionaries vizobj = new EOAImageVisualizer(disp_images=True, output_folder='output', lats=[lats],lons=[lons]) \"\"\"", "= {getattr(ds, name)}\") print(\"\\n========== Dimensions =========\") for name in ds.dims:", "'color': '#aaaaaa', 'weight':'bold'} font_coords = {'size': self._font_size*.6} gl.xlabel_style = font_coords", "based on the name of the field _show_var_names = False", "file_name = F'{file_name_prefix}' pylab.savefig(join(self._output_folder, F'{file_name}.png'), bbox_inches='tight') self._close_figure() def plot_2d_data_xr(self, np_variables:list,", "EOA data. :param c_img: 2D array :param ax: geoaxes :return:", "== 1: return self._figsize * cols * self._fig_prop, self._figsize else:", ":param rot_90: :param show_color_bar: :param plot_mode: :param mincbar: :param maxcbar:", "= c_polygon.xy elif isinstance(c_polygon, shapely.geometry.polygon.Polygon): x, y = c_polygon.exterior.xy pol_lats", "c_maxcbar = maxcbar[idx_var] else: c_maxcbar = maxcbar # By default", "numpy data. It calls the 'main' function for 3D plotting", "mincbar=np.nan, maxcbar=np.nan): ''' Wrapper function to receive raw 2D numpy", "show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar) def plot_2d_data_np(self, np_variables:list, var_names:list, title='', file_name_prefix='',", "pol_lons, 0.5)) if self._vector_field != None: try: u = self._vector_field['u']", "os from PIL import Image import cv2 from os import", "show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar) def make_video_from_images(self, input_folder, output_file, fps=24): files", "{'size': self._font_size/2, 'color': '#aaaaaa', 'weight':'bold'} font_coords = {'size': self._font_size*.6} gl.xlabel_style", "== BackgroundType.BLUE_MARBLE_HR: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble_5400x2700.jpg')) if self._background == BackgroundType.TOPO: img", "= [], show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar) def make_video_from_images(self, input_folder, output_file,", "vec_keys = self._vector_field.keys() c = 'r' density = 1 linewidth", "ax.add_feature(roads, edgecolor='black') return ax def add_states(self, ax): # Names come", "y pol_lons += x c_ax.plot(x,y, transform=self._projection, c='r') # Adds a", "name in ds.coords: print(F\"{name}: {ds[name].shape}\") print(\"\\n========== Variables =========\") for cur_variable_name", "c_zlevel, c_slice in enumerate(z_levels): # Iterates over the z-levels #", "= ds.variables[cur_variable_name] print(F\"Dimensions for {cur_variable_name}: {cur_var.dimensions} {cur_var.shape}\") def add_roads(self, ax):", "im = ax.scatter(lons, lats, s=s, c=c, cmap=cmap) fig.colorbar(im, ax=ax, shrink=0.7)", "This function plots points in a map :param bbox: :return:", "cbar.set_label(self._units, fontsize=font_size_cbar*1.2) def plot_slice_eoa(self, c_img, ax, cmap='gray', mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan)", "w/h :return: \"\"\" if rows == 1: return self._figsize *", "If there is an array of colormaps we select the", "edgecolor='gray') return ax def plot_scatter_data(self, lats=None, lons=None, bbox=None, s=1, c='blue',", "kwargs.items(): self.__dict__[\"_\" + arg_name] = arg_value print(self.__dict__[\"_\" + arg_name]) def", "of the coordinates. If included threshold then increases the bbox", "= ax.scatter(lons, lats, s=s, c=c, cmap=cmap) fig.colorbar(im, ax=ax, shrink=0.7) ax.coastlines()", "attr, value): '''Generic setter for all the properties of the", "to see the polygons c_ax.set_extent(self.getExtent(list(self._lats) + pol_lats, list(self._lons) + pol_lons,", "if not(np.all(np.isnan(maxcbar))): if type(mincbar) is list: c_maxcbar = maxcbar[idx_var] else:", "plot_scatter_data(self, lats=None, lons=None, bbox=None, s=1, c='blue', cmap='plasma', title=''): ''' This", "minLat, maxLat) return bbox def xr_summary(self, ds): \"\"\" Prints a", "# Iterates over the z-levels # Verify the index of", "cmap=cmap, transform=self._projection, norm=self._norm) else: im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap,", "out_video = -1 for i, file_name in enumerate(files[0:36]): if i", "data (maps). It is made to read xarrays, numpy arrays,", "im = self.plot_slice_eoa(np_variables[c_var][c_slice,:,:], ax, cmap=cmap, mode=plot_mode, mincbar=c_mincbar, maxcbar=c_maxcbar) if self._show_var_names:", "\"\"\" Prints a summary of the netcdf (global attributes, variables,", "* .5 # TODO how to make this automatic and", "if show_color_bar: font_size_cbar = self._font_size * .5 # TODO how", "self._figsize), subplot_kw={'projection': ccrs.PlateCarree()}) ax.set_extent(bbox) # If we do not set", "shrink=0.7) ax.coastlines() plt.title(title) plt.show() def plot_3d_data_npdict(self, np_variables:list, var_names:list, z_levels= [],", "''' if bbox is None: bbox = (-180, 180, -90,", "cmap='plasma', title=''): ''' This function plots points in a map", "It must be a dictionary with keys x,y,u,v # and", "fields if rows*cols == 1: # Single figure ax =", "c_ax.contourf(self._lons, self._lats, c_img, num_colors=255, cmap='inferno', extent=self._extent) else: if np.isnan(mincbar): im", "**kwargs): # All the arguments that are passed to the", "cmap=None, flip_data=False, rot_90=False, show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): ''' Wrapper function", "list(self._lons) + pol_lons, 0.5)) if self._vector_field != None: try: u", "* cols * self._fig_prop, self._figsize * rows def _close_figure(self): \"\"\"Depending", "gl.right_labels = False return im def get_proper_size(self, rows, cols): \"\"\"", "prop: Proportion is the proportion to use w/h :return: \"\"\"", "= font_coords gl.ylabel_style = font_coords gl.top_labels = False gl.right_labels =", "plot_slice_eoa(self, c_img, ax, cmap='gray', mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan) -> None: \"\"\"", "_show_var_names = False # Includes the name of the field", "s=1, c='blue', cmap='plasma', title=''): ''' This function plots points in", "SHAPELY GEOMETRIES In case we want to include additional polygons", "return cmocean.cm.haline elif field_name.find('error') != -1: return cmocean.cm.diff elif field_name.find('binary')", "Adds a threshold to the plot to see the polygons", "scale='10m', facecolor='none') ax.add_feature(roads, edgecolor='black') return ax def add_states(self, ax): #", "90) if lats is None: lats = self.lats if lons", "[], title='', file_name_prefix='', cmap=None, z_names = [], show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan,", "z_names = [], show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): \"\"\" Plots multiple", "origin=origin, cmap=cmap, vmin=mincbar, vmax=maxcbar, transform=self._projection, norm=self._norm) if mode == PlotMode.CONTOUR", "!= -1 for x in ('ssh', 'srfhgt', 'adt','surf_el')]): # cmaps_fields.append(cmocean.cm.deep_r)", "ax = _axs else: ax = _axs.flatten()[c_zlevel*len(var_names) + idx_var] #", "== 3: c_np_data = np_variables[i, :, :] else: c_np_data =", "isinstance(c_polygon, shapely.geometry.linestring.LineString): x,y = c_polygon.xy elif isinstance(c_polygon, shapely.geometry.polygon.Polygon): x, y", "# lats=[lats],lons=[lons]) def __init__(self, disp_images=True, output_folder='output', lats=[-90,90], lons =[-180,180], projection=ccrs.PlateCarree(),", "print(\"\\n========== Variables =========\") for cur_variable_name in ds.variables: cur_var = ds[cur_variable_name]", "('u_', 'v_', 'u-vel.', 'v-vel.','velocity')]): return cmocean.cm.speed class EOAImageVisualizer: \"\"\"This class", "mincbar[idx_var] else: c_mincbar = mincbar if not(np.all(np.isnan(maxcbar))): if type(mincbar) is", "transform=self._projection, norm=self._norm) if mode == PlotMode.CONTOUR or mode == PlotMode.MERGED:", "we chose the min and max colorbars for each field", "self._auto_colormap and orig_cmap is None: cmap = select_colormap(c_var) else: #", "transform=self._projection, norm=self._norm) else: im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, vmin=mincbar,", "= self.lats if lons is None: lons = self.lons fig,", "we select the one for this field if type(orig_cmap) is", "[fields, x, y] or just a single field with shape", "pylab.savefig(join(self._output_folder, F'{file_name}.png'), bbox_inches='tight') self._close_figure() def plot_2d_data_xr(self, np_variables:list, var_names:list, title='', file_name_prefix='',", "'g', 'w', 'k'] _figsize = 8 _font_size = 30 _units", "== BackgroundType.TOPO: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/etopo.png')) if self._background == BackgroundType.BATHYMETRY: img", "origin=origin, cmap=cmap, transform=self._projection, norm=self._norm) else: im = c_ax.imshow(c_img, extent=self._extent, origin=origin,", "180, -90, 90) if lats is None: lats = self.lats", "file_name_prefix='', cmap=None, flip_data=False, rot_90=False, show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): ''' Wrapper", "flip_data: :param rot_90: :param show_color_bar: :param plot_mode: :param mincbar: :param", ":param file_name_prefix: :param cmap: :param flip_data: :param rot_90: :param show_color_bar:", "def plot_3d_data_npdict(self, np_variables:list, var_names:list, z_levels= [], title='', file_name_prefix='', cmap=None, z_names", "'./eoas_pyutils'# This is the path where the eoas_utils folder is", "'cmap' in vec_keys: vec_cmap = self._vector_field['cmap'] c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) c_ax.streamplot(x, y,", "fig.colorbar(im, ax=ax, shrink=.7) cbar.ax.tick_params(labelsize=font_size_cbar) if label != \"\": cbar.set_label(label, fontsize=font_size_cbar*1.2)", "a summary of the netcdf (global attributes, variables, etc) :param", "\"\"\" print(\"\\n========== Global attributes =========\") for name in ds.ncattrs(): print(F\"{name}", "bbox=None, s=1, c='blue', cmap='plasma', title=''): ''' This function plots points", "F\"Z - level: {c_slice_txt}\" ax.set_title(c_title, fontsize=self._font_size) self.add_colorbar(fig, im, ax, show_color_bar)", "+ arg_name]) def __getattr__(self, attr): '''Generic getter for all the", "'sss', 'sal')]): return cmocean.cm.haline elif field_name.find('error') != -1: return cmocean.cm.diff", "def make_video_from_images(self, input_folder, output_file, fps=24): files = listdir(input_folder) files.sort() print(F\"Generating", "properties of the class''' return self.__dict__[\"_\" + attr] def __setattr__(self,", "attributes =========\") for name in ds.attrs: print(F\"{name} = {getattr(ds, name)}\")", ":param ax: geoaxes :return: \"\"\" c_ax = ax if self._flip_data:", "import cartopy.crs as ccrs import cartopy.feature as cfeature import cartopy", "else: c_title = F'{title}' if len(z_levels) > 1: c_title +=", "rot_90: :param show_color_bar: :param plot_mode: :param mincbar: :param maxcbar: :return:", "# Names come from: https://www.naturalearthdata.com/features/ # -- Add states roads", "len(z_names) != 0: c_slice_txt = z_names[c_slice] else: c_slice_txt = c_slice", "plot_3d_data_npdict(self, np_variables:list, var_names:list, z_levels= [], title='', file_name_prefix='', cmap=None, z_names =", "if 'linewidth' in vec_keys: linewidth = self._vector_field['linewidth'] if 'cmap' in", "of the variable if self._auto_colormap and orig_cmap is None: cmap", "from io_utils.io_common import create_folder from viz_utils.constants import PlotMode, BackgroundType import", "this automatic and works always cbar = fig.colorbar(im, ax=ax, shrink=.7)", "self.lons fig, ax = plt.subplots(1, 1, figsize=(self._figsize, self._figsize), subplot_kw={'projection': ccrs.PlateCarree()})", "if self._background == BackgroundType.CARTO_DEF: c_ax.stock_img() else: if self._background == BackgroundType.BLUE_MARBLE_LR:", "self._vector_field['y'] vec_keys = self._vector_field.keys() c = 'r' density = 1", "BackgroundType import pylab import numpy as np import cmocean import", "F'{var_names[idx_var]} {title}' else: c_title = F'{title}' if len(z_levels) > 1:", "want to include additional polygons in the plots (all of", "= (-180, 180, -90, 90) if lats is None: lats", "in the plots (all of them) # If you want", "= np.flip(np.flip(c_np_data), axis=1) npdict_3d[field_name] = np.expand_dims(c_np_data, axis=0) self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0],", "# Single field if rot_90: c_np_data = np.rot90(c_np_data) if flip_data:", "= range(np_variables[var_names[0]].shape[0]) cols = np.min((self._max_imgs_per_row, len(var_names))) if cols == len(var_names):", "come from: https://www.naturalearthdata.com/features/ # -- Add states states_provinces = cfeature.NaturalEarthFeature(", "of plots assuming we are plotting Geospatial data (maps). It", "img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/etopo.png')) if self._background == BackgroundType.BATHYMETRY: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bathymetry_3600x1800.jpg'))", ":return: \"\"\" print(\"\\n========== Global attributes =========\") for name in ds.ncattrs():", "= np.rot90(c_np_data) if flip_data: c_np_data = np.flip(np.flip(c_np_data), axis=1) npdict_3d[field_name] =", "assuming we are plotting Geospatial data (maps). It is made", "cur_var = ds[cur_variable_name] print(F\"{cur_variable_name}: {cur_var.dims} {cur_var.shape}\") def nc_summary(self, ds): \"\"\"", "ax.set_title(c_title, fontsize=self._font_size) self.add_colorbar(fig, im, ax, show_color_bar) plt.tight_layout(pad=.5) file_name = F'{file_name_prefix}'", "calls the 'main' function for 3D plotting :param np_variables: Numpy", "with shape [x,y] :param var_names: :param title: :param file_name_prefix: :param", "properties of the class''' self.__dict__[\"_\" + attr] = value def", "def xr_summary(self, ds): \"\"\" Prints a summary of the netcdf", "mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan) -> None: \"\"\" Plots a 2D img", "c_slice in enumerate(z_levels): # Iterates over the z-levels # Verify", "class makes plenty of plots assuming we are plotting Geospatial", "can set a min and max colorbar values to 'force'", "field name in the titles _additional_polygons = [] # MUST", "vec_keys: density = self._vector_field['density'] if 'linewidth' in vec_keys: linewidth =", "colorbar values to 'force' same color bar to all plots", "self._projection = projection bbox = self.getExtent(lats, lons) self._extent = bbox", "colswill the figure have :param prop: Proportion is the proportion", "except Exception as e: print(F\"Couldn't add vector field e:{e}\") gl", "PlotMode.CONTOUR or mode == PlotMode.MERGED: c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) if mode ==", "1, figsize=(self._figsize, self._figsize), subplot_kw={'projection': ccrs.PlateCarree()}) ax.set_extent(bbox) # If we do", "'main' function for 3D plotting :param np_variables: Numpy variables. They", "facecolor='none') ax.add_feature(roads, edgecolor='black') return ax def add_states(self, ax): # Names", "we use it for all the fields cmap = orig_cmap", "vector field. It must be a dictionary with keys x,y,u,v", "output_folder='output', lats=[lats],lons=[lons]) \"\"\" _COLORS = ['y', 'r', 'c', 'b', 'g',", "np.nan # User can set a min and max colorbar", "on it. self._disp_images = disp_images self._output_folder = output_folder self._projection =", "pol_lats = [] pol_lons = [] for c_polygon in self._additional_polygons:", "= [] pol_lons = [] for c_polygon in self._additional_polygons: if", "lons: inc_threshold: Returns: ''' minLat = np.amin(lats) - expand_ext maxLat", "var_names:list, title='', file_name_prefix='', cmap='viridis', show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): ''' Wrapper", "img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bathymetry_3600x1800.jpg')) c_ax.imshow(img, origin='upper', extent=(-180,180,-90,90), transform=ccrs.PlateCarree()) if mode ==", "c_ax.contour(c_img, extent=self._extent, transform=self._projection) if len(self._additional_polygons) > 0: pol_lats = []", "else: # If it is just one cmap, then we", "in ('ssh', 'srfhgt', 'adt','surf_el')]): # cmaps_fields.append(cmocean.cm.deep_r) return cmocean.cm.curl elif np.any([field_name.find(x)", "pol_lats, list(self._lons) + pol_lons, 0.5)) if self._vector_field != None: try:", "= np.expand_dims(np_variables[field_name], axis=0) self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title, file_name_prefix=file_name_prefix, cmap=cmap, z_names", "fig, im, ax, show_color_bar, label=\"\"): # https://matplotlib.org/api/_as_gen/matplotlib.pyplot.colorbar.html if show_color_bar: font_size_cbar", "False return im def get_proper_size(self, rows, cols): \"\"\" Obtains the", "for all the properties of the class''' return self.__dict__[\"_\" +", "{cur_var.shape}\") def nc_summary(self, ds): \"\"\" Prints a summary of the", "return cmocean.cm.oxy elif np.any([field_name.find(x) != -1 for x in ('u_',", "c_img, ax, cmap='gray', mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan) -> None: \"\"\" Plots", ":, :] else: c_np_data = np_variables # Single field if", "expand_ext maxLon = np.amax(lons) + expand_ext bbox = (minLon, maxLon,", "title=''): ''' This function plots points in a map :param", ":return: \"\"\" print(\"\\n========== Global attributes =========\") for name in ds.attrs:", "import matplotlib.pyplot as plt import matplotlib from matplotlib.colors import LogNorm", "it is just one cmap, then we use it for", "increases the bbox in all directions with that thres Args:", "np.any([field_name.find(x) != -1 for x in ('ssh', 'srfhgt', 'adt','surf_el')]): #", "import join import matplotlib.pyplot as plt import matplotlib from matplotlib.colors", "(bbox[1]-bbox[0])/(bbox[3]-bbox[2]) self._contour_labels = False for arg_name, arg_value in kwargs.items(): self.__dict__[\"_\"", "# Selects the colormap based on the name of the", "are displayed or just closed\"\"\" if self._disp_images: plt.show() else: plt.close()", "PIL import Image import cv2 from os import listdir from", "= [] for c_polygon in self._additional_polygons: if isinstance(c_polygon, shapely.geometry.linestring.LineString): x,y", "name if the field it chooses a colormap from cmocean", "__getattr__(self, attr): '''Generic getter for all the properties of the", "\"\"\" Obtains the proper size for a figure. :param rows:", "array :param ax: geoaxes :return: \"\"\" c_ax = ax if", "cmap='viridis', show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): ''' Wrapper function to receive", "title='', file_name_prefix='', cmap=None, flip_data=False, rot_90=False, show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): '''", "- expand_ext maxLon = np.amax(lons) + expand_ext bbox = (minLon,", "{file_name}\") c_file = join(input_folder, file_name) im = Image.open(c_file) np_im =", "1: # Single figure ax = _axs else: ax =", "rows = len(z_levels) else: rows = int(len(z_levels) * np.ceil(len(var_names)/cols)) fig,", "= c_ax.gridlines(draw_labels=True, color='grey', alpha=0.5, linestyle='--') # gl.xlabel_style = {'size': self._font_size/2,", "ax, show_color_bar) plt.tight_layout(pad=.5) file_name = F'{file_name_prefix}' pylab.savefig(join(self._output_folder, F'{file_name}.png'), bbox_inches='tight') self._close_figure()", "in vec_keys: density = self._vector_field['density'] if 'linewidth' in vec_keys: linewidth", "c_ax.contour(c_img, extent=self._extent, transform=self._projection) if mode == PlotMode.MERGED: if self._contour_labels: c_ax.contour(c_img,", "to normalize the colormap. For example with LogNorm # vizobj", "how many rows will the figure have :param cols: how", "rot_90=False, show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): ''' Wrapper function to receive", "flip_data=False, rot_90=False, show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): ''' Wrapper function to", "bbox def xr_summary(self, ds): \"\"\" Prints a summary of the", "axis=1) npdict_3d[field_name] = np.expand_dims(c_np_data, axis=0) self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title, file_name_prefix=file_name_prefix,", "etc) :param ds: :return: \"\"\" print(\"\\n========== Global attributes =========\") for", "that are passed to the constructor of the class MUST", "!= \"\": cbar.set_label(label, fontsize=font_size_cbar*1.2) else: cbar.set_label(self._units, fontsize=font_size_cbar*1.2) def plot_slice_eoa(self, c_img,", "-1 for x in ('ssh', 'srfhgt', 'adt','surf_el')]): # cmaps_fields.append(cmocean.cm.deep_r) return", "_axs.flatten()[c_zlevel*len(var_names) + idx_var] # Here we chose the min and", "== 0: z_levels = range(np_variables[var_names[0]].shape[0]) cols = np.min((self._max_imgs_per_row, len(var_names))) if", "np_variables: :param var_names: :param title: :param file_name_prefix: :param cmap: :param", "range(np_variables[var_names[0]].shape[0]) cols = np.min((self._max_imgs_per_row, len(var_names))) if cols == len(var_names): rows", "field if type(orig_cmap) is list: cmap = orig_cmap[idx_var] else: #", "= (np_im.shape[1], np_im.shape[0]) out_video = cv2.VideoWriter(output_file, cv2.VideoWriter_fourcc(*'mp4v'), fps, video_size, True)", "'w', 'k'] _figsize = 8 _font_size = 30 _units =", "enumerate(files[0:36]): if i % 10 == 0: print(F\"Adding file #", "self._lons = lons self._fig_prop = (bbox[1]-bbox[0])/(bbox[3]-bbox[2]) self._contour_labels = False for", "= False for arg_name, arg_value in kwargs.items(): self.__dict__[\"_\" + arg_name]", "for each field if not(np.all(np.isnan(mincbar))): if type(mincbar) is list: c_mincbar", "npdict_3d[field_name] = np.expand_dims(np_variables[field_name], axis=0) self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title, file_name_prefix=file_name_prefix, cmap=cmap,", "mincbar: :param maxcbar: :return: ''' npdict_3d = {} for i,", "By default we select the colorbar from the name of", "of them) # If you want to add a streamplot", "int(len(z_levels) * np.ceil(len(var_names)/cols)) fig, _axs = plt.subplots(rows, cols, figsize=self.get_proper_size(rows, cols),", "shape [x,y] :param var_names: :param title: :param file_name_prefix: :param cmap:", "subplot_kw={'projection': ccrs.PlateCarree()}) ax.set_extent(bbox) # If we do not set this,", "dictionary with keys x,y,u,v # and optional density, color, cmap,", "c_maxcbar = np.nan for idx_var, c_var in enumerate(var_names): # Iterate", "Proportion is the proportion to use w/h :return: \"\"\" if", "* self._fig_prop, self._figsize * rows def _close_figure(self): \"\"\"Depending on what", "density = self._vector_field['density'] if 'linewidth' in vec_keys: linewidth = self._vector_field['linewidth']", "!= None: try: u = self._vector_field['u'] v = self._vector_field['v'] x", "-1 for i, file_name in enumerate(files[0:36]): if i % 10", "keys x,y,u,v # and optional density, color, cmap, arrowsize, arrowstyle,", "np.any([field_name.find(x) != -1 for x in ('u_', 'v_', 'u-vel.', 'v-vel.','velocity')]):", "'b', 'g', 'w', 'k'] _figsize = 8 _font_size = 30", "in enumerate(z_levels): # Iterates over the z-levels # Verify the", ":param title: :param file_name_prefix: :param cmap: :param flip_data: :param rot_90:", "variables, etc) :param ds: :return: \"\"\" print(\"\\n========== Global attributes =========\")", "self._lats = lats self._lons = lons self._fig_prop = (bbox[1]-bbox[0])/(bbox[3]-bbox[2]) self._contour_labels", "bbox = (-180, 180, -90, 90) if lats is None:", "PlotMode.MERGED: c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) if mode == PlotMode.CONTOUR: im = c_ax.contour(c_img,", "origin='upper', extent=(-180,180,-90,90), transform=ccrs.PlateCarree()) if mode == PlotMode.RASTER or mode ==", "= ds.variables for cur_variable_name in netCDFvars.keys(): cur_var = ds.variables[cur_variable_name] print(F\"Dimensions", "cmocean import shapely import cartopy.crs as ccrs import cartopy.feature as", "print(\"\\n========== Variables =========\") netCDFvars = ds.variables for cur_variable_name in netCDFvars.keys():", "from os import listdir from os.path import join import matplotlib.pyplot", "'u-vel.', 'v-vel.','velocity')]): return cmocean.cm.speed class EOAImageVisualizer: \"\"\"This class makes plenty", "\"\"\" _COLORS = ['y', 'r', 'c', 'b', 'g', 'w', 'k',", "self._fig_prop, self._figsize else: return self._figsize * cols * self._fig_prop, self._figsize", "c_ax = ax if self._flip_data: origin = 'lower' else: origin", "the polygons c_ax.set_extent(self.getExtent(list(self._lats) + pol_lats, list(self._lons) + pol_lons, 0.5)) if", "ax.scatter(lons, lats, s=s, c=c, cmap=cmap) fig.colorbar(im, ax=ax, shrink=0.7) ax.coastlines() plt.title(title)", "Names come from: https://www.naturalearthdata.com/features/ # -- Add states roads =", "mincbar=mincbar, maxcbar=maxcbar) def plot_2d_data_np(self, np_variables:list, var_names:list, title='', file_name_prefix='', cmap=None, flip_data=False,", "EOAImageVisualizer(disp_images=True, output_folder='output', lats=[lats],lons=[lons]) \"\"\" _COLORS = ['y', 'r', 'c', 'b',", "list(self._lons))) if mode == PlotMode.CONTOUR: im = c_ax.contour(c_img, extent=self._extent, transform=self._projection)", "in ds.attrs: print(F\"{name} = {getattr(ds, name)}\") print(\"\\n========== Dimensions =========\") for", "elif np.any([field_name.find(x) != -1 for x in ('temp', 'sst', 'temperature')]):", "elif field_name.find('error') != -1: return cmocean.cm.diff elif field_name.find('binary') != -1:", "False for arg_name, arg_value in kwargs.items(): self.__dict__[\"_\" + arg_name] =", "of a vector field. It must be a dictionary with", "arg_name] = arg_value print(self.__dict__[\"_\" + arg_name]) def __getattr__(self, attr): '''Generic", "self._vector_field['x'] y = self._vector_field['y'] vec_keys = self._vector_field.keys() c = 'r'", "np.rot90(c_np_data) if flip_data: c_np_data = np.flip(np.flip(c_np_data), axis=1) npdict_3d[field_name] = np.expand_dims(c_np_data,", "vec_keys: linewidth = self._vector_field['linewidth'] if 'cmap' in vec_keys: vec_cmap =", "= np.nan for idx_var, c_var in enumerate(var_names): # Iterate over", "'lower' else: origin = 'upper' if self._background == BackgroundType.CARTO_DEF: c_ax.stock_img()", "to the plot to see the polygons c_ax.set_extent(self.getExtent(list(self._lats) + pol_lats,", "for 3D plotting :param np_variables: Numpy variables. They can be", "lons = self.lons fig, ax = plt.subplots(1, 1, figsize=(self._figsize, self._figsize),", "in ds.ncattrs(): print(F\"{name} = {getattr(ds, name)}\") print(\"\\n========== Variables =========\") netCDFvars", "must be a dictionary with keys x,y,u,v # and optional", "def __init__(self, disp_images=True, output_folder='output', lats=[-90,90], lons =[-180,180], projection=ccrs.PlateCarree(), **kwargs): #", "lats=[-90,90], lons =[-180,180], projection=ccrs.PlateCarree(), **kwargs): # All the arguments that", "cfeature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines', scale='50m', facecolor='none') ax.add_feature(states_provinces, edgecolor='gray') return ax def", "if lons is None: lons = self.lons fig, ax =", "shape [fields, x, y] or just a single field with", "size for a figure. :param rows: how many rows will", "in ds.variables: cur_var = ds[cur_variable_name] print(F\"{cur_variable_name}: {cur_var.dims} {cur_var.shape}\") def nc_summary(self,", "print(F\"{name}: {ds[name].shape}\") print(\"\\n========== Coordinates =========\") for name in ds.coords: print(F\"{name}:", "are plotting Geospatial data (maps). It is made to read", "{cur_var.shape}\") def add_roads(self, ax): # Names come from: https://www.naturalearthdata.com/features/ #", "the figures are displayed or just closed\"\"\" if self._disp_images: plt.show()", "self._disp_images: plt.show() else: plt.close() def getExtent(self, lats, lons, expand_ext=0.0): '''", "+ attr] def __setattr__(self, attr, value): '''Generic setter for all", "num_colors=255, cmap='inferno', extent=self._extent) else: if np.isnan(mincbar): im = c_ax.imshow(c_img, extent=self._extent,", "lats, s=s, c=c, cmap=cmap) fig.colorbar(im, ax=ax, shrink=0.7) ax.coastlines() plt.title(title) plt.show()", "the main project _contourf = False # When plotting non-regular", "= projection bbox = self.getExtent(lats, lons) self._extent = bbox self._lats", "netCDFvars = ds.variables for cur_variable_name in netCDFvars.keys(): cur_var = ds.variables[cur_variable_name]", "\"\"\" c_ax = ax if self._flip_data: origin = 'lower' else:", "np_im.shape[0]) out_video = cv2.VideoWriter(output_file, cv2.VideoWriter_fourcc(*'mp4v'), fps, video_size, True) out_video.write(np_im[:, :,", "of the field name in the titles _additional_polygons = []", "c_var in enumerate(var_names): # Iterate over the fields if rows*cols", "{output_file}\") out_video = -1 for i, file_name in enumerate(files[0:36]): if", "for x in ('vorticity', 'vort')]): return cmocean.cm.curl elif np.any([field_name.find(x) !=", "field_name.find('error') != -1: return cmocean.cm.diff elif field_name.find('binary') != -1: return", "arg_value print(self.__dict__[\"_\" + arg_name]) def __getattr__(self, attr): '''Generic getter for", "== BackgroundType.CARTO_DEF: c_ax.stock_img() else: if self._background == BackgroundType.BLUE_MARBLE_LR: img =", "Single field if rot_90: c_np_data = np.rot90(c_np_data) if flip_data: c_np_data", "0: pol_lats = [] pol_lons = [] for c_polygon in", "Wrapper function to receive raw 2D numpy data. It calls", "all directions with that thres Args: lats: lons: inc_threshold: Returns:", "and need precision _background = BackgroundType.BLUE_MARBLE_LR # Select the background", "for c_polygon in self._additional_polygons: if isinstance(c_polygon, shapely.geometry.linestring.LineString): x,y = c_polygon.xy", "_flip_data = True _eoas_pyutils_path = './eoas_pyutils'# This is the path", "maxcbar=np.nan): \"\"\" Plots multiple z_levels for multiple fields. It uses", "output_folder='output', # lats=[lats],lons=[lons]) def __init__(self, disp_images=True, output_folder='output', lats=[-90,90], lons =[-180,180],", "each depth, and columns for each variable \"\"\" create_folder(self._output_folder) orig_cmap", "import pylab import numpy as np import cmocean import shapely", "if mode == PlotMode.MERGED: if self._contour_labels: c_ax.contour(c_img, self._contour_labels, colors='r', extent=self._extent,", "the field _show_var_names = False # Includes the name of", "np.nan for idx_var, c_var in enumerate(var_names): # Iterate over the", "is None: lats = self.lats if lons is None: lons", "=========\") for name in ds.coords: print(F\"{name}: {ds[name].shape}\") print(\"\\n========== Variables =========\")", "# cmaps_fields.append(cmocean.cm.deep_r) return cmocean.cm.curl elif np.any([field_name.find(x) != -1 for x", "elif np.any([field_name.find(x) != -1 for x in ('vorticity', 'vort')]): return", "on the name if the field it chooses a colormap", "maxLon, minLat, maxLat) return bbox def xr_summary(self, ds): \"\"\" Prints", "== len(var_names): rows = len(z_levels) else: rows = int(len(z_levels) *", "where the eoas_utils folder is stored with respect to the", "c_img, num_colors=255, cmap='inferno', extent=self._extent) else: if np.isnan(mincbar): im = c_ax.imshow(c_img,", "for 3D plotting :param np_variables: :param var_names: :param title: :param", "enumerate(z_levels): # Iterates over the z-levels # Verify the index", "to use w/h :return: \"\"\" if rows == 1: return", "from matplotlib.colors import LogNorm from io_utils.io_common import create_folder from viz_utils.constants", "if self._vector_field != None: try: u = self._vector_field['u'] v =", "# All the arguments that are passed to the constructor", "just closed\"\"\" if self._disp_images: plt.show() else: plt.close() def getExtent(self, lats,", "- level: {c_slice_txt}\" ax.set_title(c_title, fontsize=self._font_size) self.add_colorbar(fig, im, ax, show_color_bar) plt.tight_layout(pad=.5)", "max colorbar values to 'force' same color bar to all", "states roads = cfeature.NaturalEarthFeature( category='cultural', name='roads', scale='10m', facecolor='none') ax.add_feature(roads, edgecolor='black')", "= None _norm = None # Use to normalize the", "= orig_cmap im = self.plot_slice_eoa(np_variables[c_var][c_slice,:,:], ax, cmap=cmap, mode=plot_mode, mincbar=c_mincbar, maxcbar=c_maxcbar)", "(all of them) # If you want to add a", "3 vec_cmap = cmocean.cm.solar if 'color' in vec_keys: c =", "normalize the colormap. For example with LogNorm # vizobj =", "var_names, z_levels=[0], title=title, file_name_prefix=file_name_prefix, cmap=cmap, z_names = [], show_color_bar=show_color_bar, plot_mode=plot_mode,", "scale='50m', facecolor='none') ax.add_feature(states_provinces, edgecolor='gray') return ax def plot_scatter_data(self, lats=None, lons=None,", "cmocean.cm.diff elif field_name.find('binary') != -1: return cmocean.cm.oxy elif np.any([field_name.find(x) !=", "vector field e:{e}\") gl = c_ax.gridlines(draw_labels=True, color='grey', alpha=0.5, linestyle='--') #", "data. :param c_img: 2D array :param ax: geoaxes :return: \"\"\"", "else: plt.close() def getExtent(self, lats, lons, expand_ext=0.0): ''' Obtains the", "field_name in enumerate(var_names): if len(np_variables.shape) == 3: c_np_data = np_variables[i,", "include additional polygons in the plots (all of them) #", "MUST have its name on it. self._disp_images = disp_images self._output_folder", "== PlotMode.MERGED: if self._contour_labels: c_ax.contour(c_img, self._contour_labels, colors='r', extent=self._extent, transform=self._projection) else:", "cmocean.cm.speed class EOAImageVisualizer: \"\"\"This class makes plenty of plots assuming", "attr] def __setattr__(self, attr, value): '''Generic setter for all the", "label != \"\": cbar.set_label(label, fontsize=font_size_cbar*1.2) else: cbar.set_label(self._units, fontsize=font_size_cbar*1.2) def plot_slice_eoa(self,", "https://www.naturalearthdata.com/features/ # -- Add states states_provinces = cfeature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines',", "netCDFvars.keys(): cur_var = ds.variables[cur_variable_name] print(F\"Dimensions for {cur_variable_name}: {cur_var.dimensions} {cur_var.shape}\") def", "expand_ext bbox = (minLon, maxLon, minLat, maxLat) return bbox def", "= [], show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar) def plot_2d_data_np(self, np_variables:list, var_names:list,", "isinstance(c_polygon, shapely.geometry.polygon.Polygon): x, y = c_polygon.exterior.xy pol_lats += y pol_lons", "v, transform=self._projection, density=density, color=c, cmap=vec_cmap, linewidth=linewidth) except Exception as e:", "return im def get_proper_size(self, rows, cols): \"\"\" Obtains the proper", "self.lats if lons is None: lons = self.lons fig, ax", "c_np_data = np.rot90(c_np_data) if flip_data: c_np_data = np.flip(np.flip(c_np_data), axis=1) npdict_3d[field_name]", "Variables =========\") netCDFvars = ds.variables for cur_variable_name in netCDFvars.keys(): cur_var", "is None: bbox = (-180, 180, -90, 90) if lats", "idx_var] # Here we chose the min and max colorbars", "extent=self._extent, transform=self._projection) else: c_ax.contour(c_img, extent=self._extent, transform=self._projection) if len(self._additional_polygons) > 0:", "inc_threshold: Returns: ''' minLat = np.amin(lats) - expand_ext maxLat =", "type(orig_cmap) is list: cmap = orig_cmap[idx_var] else: # If it", "the name of the field _show_var_names = False # Includes", "of colormaps we select the one for this field if", "index of the z_levels are the original ones. if len(z_names)", "of the z_levels are the original ones. if len(z_names) !=", "facecolor='none') ax.add_feature(states_provinces, edgecolor='gray') return ax def plot_scatter_data(self, lats=None, lons=None, bbox=None,", "-> None: \"\"\" Plots a 2D img for EOA data.", "= 3 vec_cmap = cmocean.cm.solar if 'color' in vec_keys: c", "makes plenty of plots assuming we are plotting Geospatial data", "the 'main' function for 3D plotting :param np_variables: Numpy variables.", "Add states roads = cfeature.NaturalEarthFeature( category='cultural', name='roads', scale='10m', facecolor='none') ax.add_feature(roads,", "if self._disp_images: plt.show() else: plt.close() def getExtent(self, lats, lons, expand_ext=0.0):", "name of the field name in the titles _additional_polygons =", "lats=None, lons=None, bbox=None, s=1, c='blue', cmap='plasma', title=''): ''' This function", "the properties of the class''' return self.__dict__[\"_\" + attr] def", "c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) c_ax.streamplot(x, y, u, v, transform=self._projection, density=density, color=c, cmap=vec_cmap,", "'y', 'r', 'c', 'b', 'g', 'w', 'k'] _figsize = 8", "idx_var, c_var in enumerate(var_names): # Iterate over the fields if", "_eoas_pyutils_path = './eoas_pyutils'# This is the path where the eoas_utils", "max colorbars for each field if not(np.all(np.isnan(mincbar))): if type(mincbar) is", "transform=ccrs.PlateCarree()) if mode == PlotMode.RASTER or mode == PlotMode.MERGED: if", "if lats is None: lats = self.lats if lons is", "name of the variable if self._auto_colormap and orig_cmap is None:", "= np.nan _flip_data = True _eoas_pyutils_path = './eoas_pyutils'# This is", "z_levels are the original ones. if len(z_names) != 0: c_slice_txt", "cfeature import cartopy def select_colormap(field_name): ''' Based on the name", "''' npdict_3d = {} for i, field_name in enumerate(var_names): if", "{cur_var.dimensions} {cur_var.shape}\") def add_roads(self, ax): # Names come from: https://www.naturalearthdata.com/features/", "self._contour_labels, colors='r', extent=self._extent, transform=self._projection) else: c_ax.contour(c_img, extent=self._extent, transform=self._projection) if len(self._additional_polygons)", "Names come from: https://www.naturalearthdata.com/features/ # -- Add states states_provinces =", "threshold then increases the bbox in all directions with that", "mode == PlotMode.MERGED: if self._contour_labels: c_ax.contour(c_img, self._contour_labels, colors='r', extent=self._extent, transform=self._projection)", "font_coords gl.top_labels = False gl.right_labels = False return im def", "Prints a summary of the netcdf (global attributes, variables, etc)", "len(z_levels) else: rows = int(len(z_levels) * np.ceil(len(var_names)/cols)) fig, _axs =", "= ax if self._flip_data: origin = 'lower' else: origin =", "c_maxcbar = maxcbar # By default we select the colorbar", "np.ceil(len(var_names)/cols)) fig, _axs = plt.subplots(rows, cols, figsize=self.get_proper_size(rows, cols), subplot_kw={'projection': self._projection})", "the name if the field it chooses a colormap from", "Numpy variables. They can be with shape [fields, x, y]", "self._figsize * cols * self._fig_prop, self._figsize else: return self._figsize *", "displayed or just closed\"\"\" if self._disp_images: plt.show() else: plt.close() def", "self._contourf: im = c_ax.contourf(self._lons, self._lats, c_img, num_colors=255, cmap='inferno', extent=self._extent) else:", "color bar to all plots _maxcbar = np.nan _flip_data =", "ax.coastlines() plt.title(title) plt.show() def plot_3d_data_npdict(self, np_variables:list, var_names:list, z_levels= [], title='',", "It uses rows for each depth, and columns for each", "each variable \"\"\" create_folder(self._output_folder) orig_cmap = cmap # If the", "add_roads(self, ax): # Names come from: https://www.naturalearthdata.com/features/ # -- Add", "plots points in a map :param bbox: :return: ''' if", "states_provinces = cfeature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines', scale='50m', facecolor='none') ax.add_feature(states_provinces, edgecolor='gray') return", "If it is just one cmap, then we use it", "example with LogNorm # vizobj = EOAImageVisualizer(disp_images=True, output_folder='output', # lats=[lats],lons=[lons])", "def plot_scatter_data(self, lats=None, lons=None, bbox=None, s=1, c='blue', cmap='plasma', title=''): '''", "fields cmap = orig_cmap im = self.plot_slice_eoa(np_variables[c_var][c_slice,:,:], ax, cmap=cmap, mode=plot_mode,", "and columns for each variable \"\"\" create_folder(self._output_folder) orig_cmap = cmap", "'density' in vec_keys: density = self._vector_field['density'] if 'linewidth' in vec_keys:", "self._background == BackgroundType.CARTO_DEF: c_ax.stock_img() else: if self._background == BackgroundType.BLUE_MARBLE_LR: img", ":param cols: how many colswill the figure have :param prop:", "gl.xlabel_style = font_coords gl.ylabel_style = font_coords gl.top_labels = False gl.right_labels", "font_coords gl.ylabel_style = font_coords gl.top_labels = False gl.right_labels = False", "values to 'force' same color bar to all plots _maxcbar", "mode == PlotMode.MERGED: if self._contourf: im = c_ax.contourf(self._lons, self._lats, c_img,", "cbar.ax.tick_params(labelsize=font_size_cbar) if label != \"\": cbar.set_label(label, fontsize=font_size_cbar*1.2) else: cbar.set_label(self._units, fontsize=font_size_cbar*1.2)", "-1: return cmocean.cm.diff elif field_name.find('binary') != -1: return cmocean.cm.oxy elif", ":param np_variables: :param var_names: :param title: :param file_name_prefix: :param cmap:", "if bbox is None: bbox = (-180, 180, -90, 90)", "arrowsize, arrowstyle, minlength _vector_field = None _norm = None #", "select_colormap(c_var) else: # If there is an array of colormaps", "self._vector_field['cmap'] c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) c_ax.streamplot(x, y, u, v, transform=self._projection, density=density, color=c,", "== BackgroundType.BLUE_MARBLE_LR: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble.png')) if self._background == BackgroundType.BLUE_MARBLE_HR: img", "(global attributes, variables, etc) :param ds: :return: \"\"\" print(\"\\n========== Global", "is None: lons = self.lons fig, ax = plt.subplots(1, 1,", "attr): '''Generic getter for all the properties of the class'''", "plt.show() def plot_3d_data_npdict(self, np_variables:list, var_names:list, z_levels= [], title='', file_name_prefix='', cmap=None,", "ax, show_color_bar, label=\"\"): # https://matplotlib.org/api/_as_gen/matplotlib.pyplot.colorbar.html if show_color_bar: font_size_cbar = self._font_size", "not set this, it will cropp it to the limits", "= [], show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): \"\"\" Plots multiple z_levels", "= plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bathymetry_3600x1800.jpg')) c_ax.imshow(img, origin='upper', extent=(-180,180,-90,90), transform=ccrs.PlateCarree()) if mode == PlotMode.RASTER", "data. It calls the 'main' function for 3D plotting :param", "for all the fields cmap = orig_cmap im = self.plot_slice_eoa(np_variables[c_var][c_slice,:,:],", "of the class''' self.__dict__[\"_\" + attr] = value def add_colorbar(self,", "= np.amax(lats) + expand_ext minLon = np.amin(lons) - expand_ext maxLon", "c_title = F'{var_names[idx_var]} {title}' else: c_title = F'{title}' if len(z_levels)", "\"\"\" Plots multiple z_levels for multiple fields. It uses rows", "Obtains the bbox of the coordinates. If included threshold then", ":param flip_data: :param rot_90: :param show_color_bar: :param plot_mode: :param mincbar:", "BackgroundType.BATHYMETRY: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bathymetry_3600x1800.jpg')) c_ax.imshow(img, origin='upper', extent=(-180,180,-90,90), transform=ccrs.PlateCarree()) if mode", "lats, lons, expand_ext=0.0): ''' Obtains the bbox of the coordinates.", "= self._vector_field['x'] y = self._vector_field['y'] vec_keys = self._vector_field.keys() c =", "lats: lons: inc_threshold: Returns: ''' minLat = np.amin(lats) - expand_ext", "-- Add states roads = cfeature.NaturalEarthFeature( category='cultural', name='roads', scale='10m', facecolor='none')", "folder is stored with respect to the main project _contourf", "on what is disp_images, the figures are displayed or just", "get_proper_size(self, rows, cols): \"\"\" Obtains the proper size for a", "class''' self.__dict__[\"_\" + attr] = value def add_colorbar(self, fig, im,", "lats=[lats],lons=[lons]) def __init__(self, disp_images=True, output_folder='output', lats=[-90,90], lons =[-180,180], projection=ccrs.PlateCarree(), **kwargs):", "be a dictionary with keys x,y,u,v # and optional density,", "cfeature.NaturalEarthFeature( category='cultural', name='roads', scale='10m', facecolor='none') ax.add_feature(roads, edgecolor='black') return ax def", "and orig_cmap is None: cmap = select_colormap(c_var) else: # If", "self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title, file_name_prefix=file_name_prefix, cmap=cmap, z_names = [], show_color_bar=show_color_bar,", "0.5)) if self._vector_field != None: try: u = self._vector_field['u'] v", "x in ('ssh', 'srfhgt', 'adt','surf_el')]): # cmaps_fields.append(cmocean.cm.deep_r) return cmocean.cm.curl elif", "cv2 from os import listdir from os.path import join import", "matplotlib.pyplot as plt import matplotlib from matplotlib.colors import LogNorm from", "# By default we select the colorbar from the name", "title='', file_name_prefix='', cmap='viridis', show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): ''' Wrapper function", "xr_summary(self, ds): \"\"\" Prints a summary of the netcdf (global", "just one cmap, then we use it for all the", "of the netcdf (global attributes, variables, etc) :param ds: :return:", "from: https://www.naturalearthdata.com/features/ # -- Add states roads = cfeature.NaturalEarthFeature( category='cultural',", "files.sort() print(F\"Generating video file: {output_file}\") out_video = -1 for i,", "we do not set this, it will cropp it to", "to read xarrays, numpy arrays, and numpy arrays in dictionaries", "plot_2d_data_xr(self, np_variables:list, var_names:list, title='', file_name_prefix='', cmap='viridis', show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan):", "User can set a min and max colorbar values to", "the constructor of the class MUST have its name on", "# Adds a threshold to the plot to see the", "font_coords = {'size': self._font_size*.6} gl.xlabel_style = font_coords gl.ylabel_style = font_coords", "= _axs else: ax = _axs.flatten()[c_zlevel*len(var_names) + idx_var] # Here", "i, field_name in enumerate(var_names): npdict_3d[field_name] = np.expand_dims(np_variables[field_name], axis=0) self.plot_3d_data_npdict(npdict_3d, var_names,", "try: u = self._vector_field['u'] v = self._vector_field['v'] x = self._vector_field['x']", "im = Image.open(c_file) np_im = np.asarray(im)[:, :, :3] if i", "# Names come from: https://www.naturalearthdata.com/features/ # -- Add states states_provinces", "LogNorm from io_utils.io_common import create_folder from viz_utils.constants import PlotMode, BackgroundType", "mode=plot_mode, mincbar=c_mincbar, maxcbar=c_maxcbar) if self._show_var_names: c_title = F'{var_names[idx_var]} {title}' else:", "the proportion to use w/h :return: \"\"\" if rows ==", "do not set this, it will cropp it to the", "listdir from os.path import join import matplotlib.pyplot as plt import", "> 1: c_title += F\"Z - level: {c_slice_txt}\" ax.set_title(c_title, fontsize=self._font_size)", "y] or just a single field with shape [x,y] :param", "or just a single field with shape [x,y] :param var_names:", "print(self.__dict__[\"_\" + arg_name]) def __getattr__(self, attr): '''Generic getter for all", "None # Use to normalize the colormap. For example with", "os import listdir from os.path import join import matplotlib.pyplot as", "= np_variables # Single field if rot_90: c_np_data = np.rot90(c_np_data)", "== PlotMode.CONTOUR or mode == PlotMode.MERGED: c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) if mode", "show_color_bar: :param plot_mode: :param mincbar: :param maxcbar: :return: ''' npdict_3d", "'v_', 'u-vel.', 'v-vel.','velocity')]): return cmocean.cm.speed class EOAImageVisualizer: \"\"\"This class makes", "mincbar if not(np.all(np.isnan(maxcbar))): if type(mincbar) is list: c_maxcbar = maxcbar[idx_var]", "= cv2.VideoWriter(output_file, cv2.VideoWriter_fourcc(*'mp4v'), fps, video_size, True) out_video.write(np_im[:, :, ::-1]) out_video.release()", "= output_folder self._projection = projection bbox = self.getExtent(lats, lons) self._extent", "in vec_keys: linewidth = self._vector_field['linewidth'] if 'cmap' in vec_keys: vec_cmap", "plt.show() else: plt.close() def getExtent(self, lats, lons, expand_ext=0.0): ''' Obtains", "norm=self._norm) if mode == PlotMode.CONTOUR or mode == PlotMode.MERGED: c_ax.set_extent(self.getExtent(list(self._lats),", "self._background == BackgroundType.TOPO: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/etopo.png')) if self._background == BackgroundType.BATHYMETRY:", "disp_images, the figures are displayed or just closed\"\"\" if self._disp_images:", "lons) self._extent = bbox self._lats = lats self._lons = lons", "np.amin(lats) - expand_ext maxLat = np.amax(lats) + expand_ext minLon =", "make_video_from_images(self, input_folder, output_file, fps=24): files = listdir(input_folder) files.sort() print(F\"Generating video", "else: # If there is an array of colormaps we", "extent=(-180,180,-90,90), transform=ccrs.PlateCarree()) if mode == PlotMode.RASTER or mode == PlotMode.MERGED:", "made to read xarrays, numpy arrays, and numpy arrays in", "roads = cfeature.NaturalEarthFeature( category='cultural', name='roads', scale='10m', facecolor='none') ax.add_feature(roads, edgecolor='black') return", "= np.nan c_maxcbar = np.nan for idx_var, c_var in enumerate(var_names):", "ax if self._flip_data: origin = 'lower' else: origin = 'upper'", "it will cropp it to the limits of the locations", "\"\"\"Depending on what is disp_images, the figures are displayed or", "= plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble.png')) if self._background == BackgroundType.BLUE_MARBLE_HR: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble_5400x2700.jpg')) if", "video_size, True) out_video.write(np_im[:, :, ::-1]) out_video.release() cv2.destroyAllWindows() print(\"Done! yeah babe!\")", "field if rot_90: c_np_data = np.rot90(c_np_data) if flip_data: c_np_data =", "are passed to the constructor of the class MUST have", "mode == PlotMode.CONTOUR: im = c_ax.contour(c_img, extent=self._extent, transform=self._projection) if mode", "cols * self._fig_prop, self._figsize else: return self._figsize * cols *", "file: {output_file}\") out_video = -1 for i, file_name in enumerate(files[0:36]):", "min and max colorbar values to 'force' same color bar", "axis=0) self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title, file_name_prefix=file_name_prefix, cmap=cmap, z_names = [],", "is just one cmap, then we use it for all", "If you want to add a streamplot of a vector", "=========\") netCDFvars = ds.variables for cur_variable_name in netCDFvars.keys(): cur_var =", "field. It must be a dictionary with keys x,y,u,v #", "and optional density, color, cmap, arrowsize, arrowstyle, minlength _vector_field =", "ax): # Names come from: https://www.naturalearthdata.com/features/ # -- Add states", "a 2D img for EOA data. :param c_img: 2D array", "z_levels for multiple fields. It uses rows for each depth,", "Plots a 2D img for EOA data. :param c_img: 2D", "BackgroundType.CARTO_DEF: c_ax.stock_img() else: if self._background == BackgroundType.BLUE_MARBLE_LR: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble.png'))", "np import cmocean import shapely import cartopy.crs as ccrs import", "if len(z_names) != 0: c_slice_txt = z_names[c_slice] else: c_slice_txt =", "density = 1 linewidth = 3 vec_cmap = cmocean.cm.solar if", "then all are plotted if len(z_levels) == 0: z_levels =", "output_file, fps=24): files = listdir(input_folder) files.sort() print(F\"Generating video file: {output_file}\")", "cols, figsize=self.get_proper_size(rows, cols), subplot_kw={'projection': self._projection}) for c_zlevel, c_slice in enumerate(z_levels):", "name)}\") print(\"\\n========== Dimensions =========\") for name in ds.dims: print(F\"{name}: {ds[name].shape}\")", "case we want to include additional polygons in the plots", "mincbar=mincbar, maxcbar=maxcbar) def make_video_from_images(self, input_folder, output_file, fps=24): files = listdir(input_folder)", "= value def add_colorbar(self, fig, im, ax, show_color_bar, label=\"\"): #", "def add_colorbar(self, fig, im, ax, show_color_bar, label=\"\"): # https://matplotlib.org/api/_as_gen/matplotlib.pyplot.colorbar.html if", "Image import cv2 from os import listdir from os.path import", "value def add_colorbar(self, fig, im, ax, show_color_bar, label=\"\"): # https://matplotlib.org/api/_as_gen/matplotlib.pyplot.colorbar.html", "!= -1 for x in ('vorticity', 'vort')]): return cmocean.cm.curl elif", "'r', 'c', 'b', 'g', 'w', 'k'] _figsize = 8 _font_size", "precision _background = BackgroundType.BLUE_MARBLE_LR # Select the background to use", "return ax def plot_scatter_data(self, lats=None, lons=None, bbox=None, s=1, c='blue', cmap='plasma',", "elif np.any([field_name.find(x) != -1 for x in ('u_', 'v_', 'u-vel.',", "the 'main' function for 3D plotting :param np_variables: :param var_names:", "field _show_var_names = False # Includes the name of the", "= self._vector_field['linewidth'] if 'cmap' in vec_keys: vec_cmap = self._vector_field['cmap'] c_ax.set_extent(self.getExtent(list(self._lats),", "transform=self._projection) if mode == PlotMode.MERGED: if self._contour_labels: c_ax.contour(c_img, self._contour_labels, colors='r',", "self._projection}) for c_zlevel, c_slice in enumerate(z_levels): # Iterates over the", "cmap=cmap, vmin=mincbar, vmax=maxcbar, transform=self._projection, norm=self._norm) if mode == PlotMode.CONTOUR or", "print(\"\\n========== Global attributes =========\") for name in ds.attrs: print(F\"{name} =", "_font_size = 30 _units = '' _max_imgs_per_row = 4 _mincbar", "[], show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar) def make_video_from_images(self, input_folder, output_file, fps=24):", "if label != \"\": cbar.set_label(label, fontsize=font_size_cbar*1.2) else: cbar.set_label(self._units, fontsize=font_size_cbar*1.2) def", "threshold to the plot to see the polygons c_ax.set_extent(self.getExtent(list(self._lats) +", "self._fig_prop = (bbox[1]-bbox[0])/(bbox[3]-bbox[2]) self._contour_labels = False for arg_name, arg_value in", "requires any z-leve, then all are plotted if len(z_levels) ==", "plt.subplots(1, 1, figsize=(self._figsize, self._figsize), subplot_kw={'projection': ccrs.PlateCarree()}) ax.set_extent(bbox) # If we", "!= -1: return cmocean.cm.oxy elif np.any([field_name.find(x) != -1 for x", "v = self._vector_field['v'] x = self._vector_field['x'] y = self._vector_field['y'] vec_keys", "automatic and works always cbar = fig.colorbar(im, ax=ax, shrink=.7) cbar.ax.tick_params(labelsize=font_size_cbar)", "default we select the colorbar from the name of the", "c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) if mode == PlotMode.CONTOUR: im = c_ax.contour(c_img, extent=self._extent,", "z_names[c_slice] else: c_slice_txt = c_slice c_mincbar = np.nan c_maxcbar =", "from viz_utils.constants import PlotMode, BackgroundType import pylab import numpy as", "from cmocean Args: field_name: Returns: ''' if np.any([field_name.find(x) != -1", "stored with respect to the main project _contourf = False", "original ones. if len(z_names) != 0: c_slice_txt = z_names[c_slice] else:", "cmap=vec_cmap, linewidth=linewidth) except Exception as e: print(F\"Couldn't add vector field", "{cur_variable_name}: {cur_var.dimensions} {cur_var.shape}\") def add_roads(self, ax): # Names come from:", "= {'size': self._font_size*.6} gl.xlabel_style = font_coords gl.ylabel_style = font_coords gl.top_labels", "are plotted if len(z_levels) == 0: z_levels = range(np_variables[var_names[0]].shape[0]) cols", "{ds[name].shape}\") print(\"\\n========== Coordinates =========\") for name in ds.coords: print(F\"{name}: {ds[name].shape}\")", "level: {c_slice_txt}\" ax.set_title(c_title, fontsize=self._font_size) self.add_colorbar(fig, im, ax, show_color_bar) plt.tight_layout(pad=.5) file_name", "len(z_levels) == 0: z_levels = range(np_variables[var_names[0]].shape[0]) cols = np.min((self._max_imgs_per_row, len(var_names)))", "value): '''Generic setter for all the properties of the class'''", "im, ax, show_color_bar) plt.tight_layout(pad=.5) file_name = F'{file_name_prefix}' pylab.savefig(join(self._output_folder, F'{file_name}.png'), bbox_inches='tight')", "function plots points in a map :param bbox: :return: '''", "list: cmap = orig_cmap[idx_var] else: # If it is just", "self._vector_field['v'] x = self._vector_field['x'] y = self._vector_field['y'] vec_keys = self._vector_field.keys()", "in ('salin', 'sss', 'sal')]): return cmocean.cm.haline elif field_name.find('error') != -1:", "lons, expand_ext=0.0): ''' Obtains the bbox of the coordinates. If", "c_ax.imshow(img, origin='upper', extent=(-180,180,-90,90), transform=ccrs.PlateCarree()) if mode == PlotMode.RASTER or mode", "have :param prop: Proportion is the proportion to use w/h", "out_video = cv2.VideoWriter(output_file, cv2.VideoWriter_fourcc(*'mp4v'), fps, video_size, True) out_video.write(np_im[:, :, ::-1])", "all the properties of the class''' return self.__dict__[\"_\" + attr]", "c_slice_txt = c_slice c_mincbar = np.nan c_maxcbar = np.nan for", "Includes the name of the field name in the titles", "is disp_images, the figures are displayed or just closed\"\"\" if", "ds.attrs: print(F\"{name} = {getattr(ds, name)}\") print(\"\\n========== Dimensions =========\") for name", "import cmocean import shapely import cartopy.crs as ccrs import cartopy.feature", "= {'size': self._font_size/2, 'color': '#aaaaaa', 'weight':'bold'} font_coords = {'size': self._font_size*.6}", "file_name_prefix: :param cmap: :param flip_data: :param rot_90: :param show_color_bar: :param", "projection bbox = self.getExtent(lats, lons) self._extent = bbox self._lats =", "''' if np.any([field_name.find(x) != -1 for x in ('ssh', 'srfhgt',", "it chooses a colormap from cmocean Args: field_name: Returns: '''", "{} for i, field_name in enumerate(var_names): if len(np_variables.shape) == 3:", "not(np.all(np.isnan(maxcbar))): if type(mincbar) is list: c_maxcbar = maxcbar[idx_var] else: c_maxcbar", "c = self._vector_field['color'] if 'density' in vec_keys: density = self._vector_field['density']", "img for EOA data. :param c_img: 2D array :param ax:", "name='roads', scale='10m', facecolor='none') ax.add_feature(roads, edgecolor='black') return ax def add_states(self, ax):", "expand_ext=0.0): ''' Obtains the bbox of the coordinates. If included", "def select_colormap(field_name): ''' Based on the name if the field", "linewidth = self._vector_field['linewidth'] if 'cmap' in vec_keys: vec_cmap = self._vector_field['cmap']", "npdict_3d = {} for i, field_name in enumerate(var_names): if len(np_variables.shape)", "of the field _show_var_names = False # Includes the name", "density=density, color=c, cmap=vec_cmap, linewidth=linewidth) except Exception as e: print(F\"Couldn't add", "use _auto_colormap = True # Selects the colormap based on", "else: origin = 'upper' if self._background == BackgroundType.CARTO_DEF: c_ax.stock_img() else:", "is list: c_mincbar = mincbar[idx_var] else: c_mincbar = mincbar if", "select_colormap(field_name): ''' Based on the name if the field it", "= self.plot_slice_eoa(np_variables[c_var][c_slice,:,:], ax, cmap=cmap, mode=plot_mode, mincbar=c_mincbar, maxcbar=c_maxcbar) if self._show_var_names: c_title", "the field it chooses a colormap from cmocean Args: field_name:", "== PlotMode.MERGED: c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) if mode == PlotMode.CONTOUR: im =", "if len(z_levels) > 1: c_title += F\"Z - level: {c_slice_txt}\"", "+ pol_lats, list(self._lons) + pol_lons, 0.5)) if self._vector_field != None:", "colorbar from the name of the variable if self._auto_colormap and", "3D plotting :param np_variables: Numpy variables. They can be with", "numpy as np import cmocean import shapely import cartopy.crs as", "('vorticity', 'vort')]): return cmocean.cm.curl elif np.any([field_name.find(x) != -1 for x", "proportion to use w/h :return: \"\"\" if rows == 1:", "gl.top_labels = False gl.right_labels = False return im def get_proper_size(self,", "_units = '' _max_imgs_per_row = 4 _mincbar = np.nan #", "= self._vector_field['cmap'] c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) c_ax.streamplot(x, y, u, v, transform=self._projection, density=density,", "def getExtent(self, lats, lons, expand_ext=0.0): ''' Obtains the bbox of", "ax=ax, shrink=.7) cbar.ax.tick_params(labelsize=font_size_cbar) if label != \"\": cbar.set_label(label, fontsize=font_size_cbar*1.2) else:", "= _axs.flatten()[c_zlevel*len(var_names) + idx_var] # Here we chose the min", "(maps). It is made to read xarrays, numpy arrays, and", "cols), subplot_kw={'projection': self._projection}) for c_zlevel, c_slice in enumerate(z_levels): # Iterates", "img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble.png')) if self._background == BackgroundType.BLUE_MARBLE_HR: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble_5400x2700.jpg'))", "# https://matplotlib.org/api/_as_gen/matplotlib.pyplot.colorbar.html if show_color_bar: font_size_cbar = self._font_size * .5 #", "print(F\"{name}: {ds[name].shape}\") print(\"\\n========== Variables =========\") for cur_variable_name in ds.variables: cur_var", "= np.asarray(im)[:, :, :3] if i == 0: video_size =", "is the proportion to use w/h :return: \"\"\" if rows", "xarrays, numpy arrays, and numpy arrays in dictionaries vizobj =", "plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/etopo.png')) if self._background == BackgroundType.BATHYMETRY: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bathymetry_3600x1800.jpg')) c_ax.imshow(img, origin='upper',", "= font_coords gl.top_labels = False gl.right_labels = False return im", "ds[cur_variable_name] print(F\"{cur_variable_name}: {cur_var.dims} {cur_var.shape}\") def nc_summary(self, ds): \"\"\" Prints a", "mincbar=c_mincbar, maxcbar=c_maxcbar) if self._show_var_names: c_title = F'{var_names[idx_var]} {title}' else: c_title", "field_name.find('binary') != -1: return cmocean.cm.oxy elif np.any([field_name.find(x) != -1 for", "PlotMode.MERGED: if self._contour_labels: c_ax.contour(c_img, self._contour_labels, colors='r', extent=self._extent, transform=self._projection) else: c_ax.contour(c_img,", "= plt.subplots(rows, cols, figsize=self.get_proper_size(rows, cols), subplot_kw={'projection': self._projection}) for c_zlevel, c_slice", "-- Add states states_provinces = cfeature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines', scale='50m', facecolor='none')", "arg_name, arg_value in kwargs.items(): self.__dict__[\"_\" + arg_name] = arg_value print(self.__dict__[\"_\"", "= fig.colorbar(im, ax=ax, shrink=.7) cbar.ax.tick_params(labelsize=font_size_cbar) if label != \"\": cbar.set_label(label,", "plots assuming we are plotting Geospatial data (maps). It is", "ccrs.PlateCarree()}) ax.set_extent(bbox) # If we do not set this, it", "if self._contourf: im = c_ax.contourf(self._lons, self._lats, c_img, num_colors=255, cmap='inferno', extent=self._extent)", "BackgroundType.TOPO: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/etopo.png')) if self._background == BackgroundType.BATHYMETRY: img =", "= bbox self._lats = lats self._lons = lons self._fig_prop =", "np.expand_dims(np_variables[field_name], axis=0) self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title, file_name_prefix=file_name_prefix, cmap=cmap, z_names =", "cmap # If the user do not requires any z-leve,", "the name of the variable if self._auto_colormap and orig_cmap is", "'v-vel.','velocity')]): return cmocean.cm.speed class EOAImageVisualizer: \"\"\"This class makes plenty of", "to include additional polygons in the plots (all of them)", "x in ('temp', 'sst', 'temperature')]): return cmocean.cm.thermal elif np.any([field_name.find(x) !=", "minlength _vector_field = None _norm = None # Use to", "numpy arrays in dictionaries vizobj = new EOAImageVisualizer(disp_images=True, output_folder='output', lats=[lats],lons=[lons])", "= plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble_5400x2700.jpg')) if self._background == BackgroundType.TOPO: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/etopo.png')) if", "https://www.naturalearthdata.com/features/ # -- Add states roads = cfeature.NaturalEarthFeature( category='cultural', name='roads',", "= False return im def get_proper_size(self, rows, cols): \"\"\" Obtains", "np.min((self._max_imgs_per_row, len(var_names))) if cols == len(var_names): rows = len(z_levels) else:", "im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, vmin=mincbar, vmax=maxcbar, transform=self._projection, norm=self._norm)", "the bbox of the coordinates. If included threshold then increases", "cmaps_fields.append(cmocean.cm.deep_r) return cmocean.cm.curl elif np.any([field_name.find(x) != -1 for x in", "ax def plot_scatter_data(self, lats=None, lons=None, bbox=None, s=1, c='blue', cmap='plasma', title=''):", "font_size_cbar = self._font_size * .5 # TODO how to make", "\"\"\"This class makes plenty of plots assuming we are plotting", "will the figure have :param cols: how many colswill the", "= listdir(input_folder) files.sort() print(F\"Generating video file: {output_file}\") out_video = -1", "('temp', 'sst', 'temperature')]): return cmocean.cm.thermal elif np.any([field_name.find(x) != -1 for", "all are plotted if len(z_levels) == 0: z_levels = range(np_variables[var_names[0]].shape[0])", "'force' same color bar to all plots _maxcbar = np.nan", "GEOMETRIES In case we want to include additional polygons in", "title='', file_name_prefix='', cmap=None, z_names = [], show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan):", "vmin=mincbar, vmax=maxcbar, transform=self._projection, norm=self._norm) if mode == PlotMode.CONTOUR or mode", "if self._show_var_names: c_title = F'{var_names[idx_var]} {title}' else: c_title = F'{title}'", "self._font_size * .5 # TODO how to make this automatic", "= self.getExtent(lats, lons) self._extent = bbox self._lats = lats self._lons", "maxcbar=np.nan): ''' Wrapper function to receive raw 2D numpy data.", "the figure have :param cols: how many colswill the figure", "input_folder, output_file, fps=24): files = listdir(input_folder) files.sort() print(F\"Generating video file:", "in the titles _additional_polygons = [] # MUST BE SHAPELY", "select the one for this field if type(orig_cmap) is list:", "figure have :param prop: Proportion is the proportion to use", "ax def add_states(self, ax): # Names come from: https://www.naturalearthdata.com/features/ #", "plt.tight_layout(pad=.5) file_name = F'{file_name_prefix}' pylab.savefig(join(self._output_folder, F'{file_name}.png'), bbox_inches='tight') self._close_figure() def plot_2d_data_xr(self,", "plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble_5400x2700.jpg')) if self._background == BackgroundType.TOPO: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/etopo.png')) if self._background", "respect to the main project _contourf = False # When", "Iterates over the z-levels # Verify the index of the", "for multiple fields. It uses rows for each depth, and", "enumerate(var_names): npdict_3d[field_name] = np.expand_dims(np_variables[field_name], axis=0) self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title, file_name_prefix=file_name_prefix,", "# If we do not set this, it will cropp", "closed\"\"\" if self._disp_images: plt.show() else: plt.close() def getExtent(self, lats, lons,", "plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): \"\"\" Plots multiple z_levels for multiple fields.", "file_name in enumerate(files[0:36]): if i % 10 == 0: print(F\"Adding", "''' Based on the name if the field it chooses", "just a single field with shape [x,y] :param var_names: :param", "Args: lats: lons: inc_threshold: Returns: ''' minLat = np.amin(lats) -", "if i % 10 == 0: print(F\"Adding file # {i}:", "to use _auto_colormap = True # Selects the colormap based", "[], show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar) def plot_2d_data_np(self, np_variables:list, var_names:list, title='',", "the figure have :param prop: Proportion is the proportion to", "= False # When plotting non-regular grids and need precision", "For example with LogNorm # vizobj = EOAImageVisualizer(disp_images=True, output_folder='output', #", "Add states states_provinces = cfeature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines', scale='50m', facecolor='none') ax.add_feature(states_provinces,", "locations ax.gridlines() im = ax.scatter(lons, lats, s=s, c=c, cmap=cmap) fig.colorbar(im,", "= cfeature.NaturalEarthFeature( category='cultural', name='roads', scale='10m', facecolor='none') ax.add_feature(roads, edgecolor='black') return ax", "= lats self._lons = lons self._fig_prop = (bbox[1]-bbox[0])/(bbox[3]-bbox[2]) self._contour_labels =", "as ccrs import cartopy.feature as cfeature import cartopy def select_colormap(field_name):", "# If the user do not requires any z-leve, then", "- expand_ext maxLat = np.amax(lats) + expand_ext minLon = np.amin(lons)", "on the name of the field _show_var_names = False #", ":param plot_mode: :param mincbar: :param maxcbar: :return: ''' npdict_3d =", "self._font_size*.6} gl.xlabel_style = font_coords gl.ylabel_style = font_coords gl.top_labels = False", "maxcbar: :return: ''' npdict_3d = {} for i, field_name in", "cmap=cmap, z_names = [], show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar) def make_video_from_images(self,", "self._vector_field['u'] v = self._vector_field['v'] x = self._vector_field['x'] y = self._vector_field['y']", "ds): \"\"\" Prints a summary of the netcdf (global attributes,", "= select_colormap(c_var) else: # If there is an array of", "False # Includes the name of the field name in", "fig, _axs = plt.subplots(rows, cols, figsize=self.get_proper_size(rows, cols), subplot_kw={'projection': self._projection}) for", "= BackgroundType.BLUE_MARBLE_LR # Select the background to use _auto_colormap =", "expand_ext maxLat = np.amax(lats) + expand_ext minLon = np.amin(lons) -", "plotting non-regular grids and need precision _background = BackgroundType.BLUE_MARBLE_LR #", "c_ax.plot(x,y, transform=self._projection, c='r') # Adds a threshold to the plot", "self._fig_prop, self._figsize * rows def _close_figure(self): \"\"\"Depending on what is", "= 4 _mincbar = np.nan # User can set a", "enumerate(var_names): # Iterate over the fields if rows*cols == 1:", "'w', 'k', 'y', 'r', 'c', 'b', 'g', 'w', 'k'] _figsize", "else: return self._figsize * cols * self._fig_prop, self._figsize * rows", "is stored with respect to the main project _contourf =", "the name of the field name in the titles _additional_polygons", "transform=self._projection, density=density, color=c, cmap=vec_cmap, linewidth=linewidth) except Exception as e: print(F\"Couldn't", "figures are displayed or just closed\"\"\" if self._disp_images: plt.show() else:", "np.any([field_name.find(x) != -1 for x in ('temp', 'sst', 'temperature')]): return", "name on it. self._disp_images = disp_images self._output_folder = output_folder self._projection", "= c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, transform=self._projection, norm=self._norm) else: im =", "# Verify the index of the z_levels are the original", "bbox is None: bbox = (-180, 180, -90, 90) if", "the netcdf (global attributes, variables, etc) :param ds: :return: \"\"\"", ":param show_color_bar: :param plot_mode: :param mincbar: :param maxcbar: :return: '''", ":param maxcbar: :return: ''' npdict_3d = {} for i, field_name", "len(self._additional_polygons) > 0: pol_lats = [] pol_lons = [] for", "print(F\"{name} = {getattr(ds, name)}\") print(\"\\n========== Variables =========\") netCDFvars = ds.variables", "+ pol_lons, 0.5)) if self._vector_field != None: try: u =", "_COLORS = ['y', 'r', 'c', 'b', 'g', 'w', 'k', 'y',", "cmap=None, z_names = [], show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): \"\"\" Plots", "0: video_size = (np_im.shape[1], np_im.shape[0]) out_video = cv2.VideoWriter(output_file, cv2.VideoWriter_fourcc(*'mp4v'), fps,", "set this, it will cropp it to the limits of", "else: if np.isnan(mincbar): im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, transform=self._projection,", "plotted if len(z_levels) == 0: z_levels = range(np_variables[var_names[0]].shape[0]) cols =", "+= y pol_lons += x c_ax.plot(x,y, transform=self._projection, c='r') # Adds", "= Image.open(c_file) np_im = np.asarray(im)[:, :, :3] if i ==", "path where the eoas_utils folder is stored with respect to", "cmap = orig_cmap[idx_var] else: # If it is just one", "plenty of plots assuming we are plotting Geospatial data (maps).", "category='cultural', name='admin_1_states_provinces_lines', scale='50m', facecolor='none') ax.add_feature(states_provinces, edgecolor='gray') return ax def plot_scatter_data(self,", "# User can set a min and max colorbar values", "if self._background == BackgroundType.TOPO: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/etopo.png')) if self._background ==", "cbar = fig.colorbar(im, ax=ax, shrink=.7) cbar.ax.tick_params(labelsize=font_size_cbar) if label != \"\":", "not(np.all(np.isnan(mincbar))): if type(mincbar) is list: c_mincbar = mincbar[idx_var] else: c_mincbar", "of the class MUST have its name on it. self._disp_images", "lons =[-180,180], projection=ccrs.PlateCarree(), **kwargs): # All the arguments that are", "thres Args: lats: lons: inc_threshold: Returns: ''' minLat = np.amin(lats)", "main project _contourf = False # When plotting non-regular grids", "cmap='inferno', extent=self._extent) else: if np.isnan(mincbar): im = c_ax.imshow(c_img, extent=self._extent, origin=origin,", ":return: \"\"\" if rows == 1: return self._figsize * cols", "file_name_prefix='', cmap=None, z_names = [], show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): \"\"\"", "fontsize=font_size_cbar*1.2) else: cbar.set_label(self._units, fontsize=font_size_cbar*1.2) def plot_slice_eoa(self, c_img, ax, cmap='gray', mode=PlotMode.RASTER,", "them) # If you want to add a streamplot of", "extent=self._extent, origin=origin, cmap=cmap, vmin=mincbar, vmax=maxcbar, transform=self._projection, norm=self._norm) if mode ==", ":return: ''' npdict_3d = {} for i, field_name in enumerate(var_names):", "_additional_polygons = [] # MUST BE SHAPELY GEOMETRIES In case", "== 0: video_size = (np_im.shape[1], np_im.shape[0]) out_video = cv2.VideoWriter(output_file, cv2.VideoWriter_fourcc(*'mp4v'),", "getExtent(self, lats, lons, expand_ext=0.0): ''' Obtains the bbox of the", "= np.amin(lons) - expand_ext maxLon = np.amax(lons) + expand_ext bbox", "cmap=cmap, z_names = [], show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar) def plot_2d_data_np(self,", "return cmocean.cm.diff elif field_name.find('binary') != -1: return cmocean.cm.oxy elif np.any([field_name.find(x)", "Global attributes =========\") for name in ds.attrs: print(F\"{name} = {getattr(ds,", "''' This function plots points in a map :param bbox:", "to add a streamplot of a vector field. It must", "maxLat) return bbox def xr_summary(self, ds): \"\"\" Prints a summary", "= ds[cur_variable_name] print(F\"{cur_variable_name}: {cur_var.dims} {cur_var.shape}\") def nc_summary(self, ds): \"\"\" Prints", "=========\") for name in ds.attrs: print(F\"{name} = {getattr(ds, name)}\") print(\"\\n==========", "'k', 'y', 'r', 'c', 'b', 'g', 'w', 'k'] _figsize =", "colormap. For example with LogNorm # vizobj = EOAImageVisualizer(disp_images=True, output_folder='output',", "if isinstance(c_polygon, shapely.geometry.linestring.LineString): x,y = c_polygon.xy elif isinstance(c_polygon, shapely.geometry.polygon.Polygon): x,", "return bbox def xr_summary(self, ds): \"\"\" Prints a summary of", "== BackgroundType.BATHYMETRY: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bathymetry_3600x1800.jpg')) c_ax.imshow(img, origin='upper', extent=(-180,180,-90,90), transform=ccrs.PlateCarree()) if", "* rows def _close_figure(self): \"\"\"Depending on what is disp_images, the", "new EOAImageVisualizer(disp_images=True, output_folder='output', lats=[lats],lons=[lons]) \"\"\" _COLORS = ['y', 'r', 'c',", "= int(len(z_levels) * np.ceil(len(var_names)/cols)) fig, _axs = plt.subplots(rows, cols, figsize=self.get_proper_size(rows,", "for name in ds.attrs: print(F\"{name} = {getattr(ds, name)}\") print(\"\\n========== Dimensions", "'k'] _figsize = 8 _font_size = 30 _units = ''", "elif np.any([field_name.find(x) != -1 for x in ('salin', 'sss', 'sal')]):", "self._vector_field != None: try: u = self._vector_field['u'] v = self._vector_field['v']", "cols == len(var_names): rows = len(z_levels) else: rows = int(len(z_levels)", "im, ax, show_color_bar, label=\"\"): # https://matplotlib.org/api/_as_gen/matplotlib.pyplot.colorbar.html if show_color_bar: font_size_cbar =", "def add_states(self, ax): # Names come from: https://www.naturalearthdata.com/features/ # --", "def plot_2d_data_xr(self, np_variables:list, var_names:list, title='', file_name_prefix='', cmap='viridis', show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan,", "c_mincbar = np.nan c_maxcbar = np.nan for idx_var, c_var in", "if the field it chooses a colormap from cmocean Args:", "to make this automatic and works always cbar = fig.colorbar(im,", "= join(input_folder, file_name) im = Image.open(c_file) np_im = np.asarray(im)[:, :,", "cartopy.feature as cfeature import cartopy def select_colormap(field_name): ''' Based on", ":return: \"\"\" c_ax = ax if self._flip_data: origin = 'lower'", "ax.add_feature(states_provinces, edgecolor='gray') return ax def plot_scatter_data(self, lats=None, lons=None, bbox=None, s=1,", "the one for this field if type(orig_cmap) is list: cmap", "def add_roads(self, ax): # Names come from: https://www.naturalearthdata.com/features/ # --", "fontsize=font_size_cbar*1.2) def plot_slice_eoa(self, c_img, ax, cmap='gray', mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan) ->", "ds.variables[cur_variable_name] print(F\"Dimensions for {cur_variable_name}: {cur_var.dimensions} {cur_var.shape}\") def add_roads(self, ax): #", "single field with shape [x,y] :param var_names: :param title: :param", "= disp_images self._output_folder = output_folder self._projection = projection bbox =", "the properties of the class''' self.__dict__[\"_\" + attr] = value", "as e: print(F\"Couldn't add vector field e:{e}\") gl = c_ax.gridlines(draw_labels=True,", "return ax def add_states(self, ax): # Names come from: https://www.naturalearthdata.com/features/", "+ expand_ext bbox = (minLon, maxLon, minLat, maxLat) return bbox", "grids and need precision _background = BackgroundType.BLUE_MARBLE_LR # Select the", "Exception as e: print(F\"Couldn't add vector field e:{e}\") gl =", "linewidth = 3 vec_cmap = cmocean.cm.solar if 'color' in vec_keys:", "for each depth, and columns for each variable \"\"\" create_folder(self._output_folder)", "x,y,u,v # and optional density, color, cmap, arrowsize, arrowstyle, minlength", "to 'force' same color bar to all plots _maxcbar =", "If the user do not requires any z-leve, then all", "rows*cols == 1: # Single figure ax = _axs else:", "print(F\"Generating video file: {output_file}\") out_video = -1 for i, file_name", "background to use _auto_colormap = True # Selects the colormap", "maxcbar=c_maxcbar) if self._show_var_names: c_title = F'{var_names[idx_var]} {title}' else: c_title =", ":3] if i == 0: video_size = (np_im.shape[1], np_im.shape[0]) out_video", "= self._vector_field['density'] if 'linewidth' in vec_keys: linewidth = self._vector_field['linewidth'] if", "fig.colorbar(im, ax=ax, shrink=0.7) ax.coastlines() plt.title(title) plt.show() def plot_3d_data_npdict(self, np_variables:list, var_names:list,", "works always cbar = fig.colorbar(im, ax=ax, shrink=.7) cbar.ax.tick_params(labelsize=font_size_cbar) if label", "= self._vector_field['u'] v = self._vector_field['v'] x = self._vector_field['x'] y =", "the colorbar from the name of the variable if self._auto_colormap", "np.expand_dims(c_np_data, axis=0) self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title, file_name_prefix=file_name_prefix, cmap=cmap, z_names =", "a map :param bbox: :return: ''' if bbox is None:", "of the class''' return self.__dict__[\"_\" + attr] def __setattr__(self, attr,", "2D array :param ax: geoaxes :return: \"\"\" c_ax = ax", "Single figure ax = _axs else: ax = _axs.flatten()[c_zlevel*len(var_names) +", "!= -1 for x in ('salin', 'sss', 'sal')]): return cmocean.cm.haline", "else: im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, vmin=mincbar, vmax=maxcbar, transform=self._projection,", "summary of the netcdf (global attributes, variables, etc) :param ds:", "= EOAImageVisualizer(disp_images=True, output_folder='output', # lats=[lats],lons=[lons]) def __init__(self, disp_images=True, output_folder='output', lats=[-90,90],", "file_name_prefix='', cmap='viridis', show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): ''' Wrapper function to", "return cmocean.cm.thermal elif np.any([field_name.find(x) != -1 for x in ('vorticity',", "name in the titles _additional_polygons = [] # MUST BE", "''' npdict_3d = {} for i, field_name in enumerate(var_names): npdict_3d[field_name]", "= self._vector_field['y'] vec_keys = self._vector_field.keys() c = 'r' density =", "plotting Geospatial data (maps). It is made to read xarrays,", "z_levels = range(np_variables[var_names[0]].shape[0]) cols = np.min((self._max_imgs_per_row, len(var_names))) if cols ==", "im = c_ax.contour(c_img, extent=self._extent, transform=self._projection) if mode == PlotMode.MERGED: if", "import os from PIL import Image import cv2 from os", "# If there is an array of colormaps we select", "= z_names[c_slice] else: c_slice_txt = c_slice c_mincbar = np.nan c_maxcbar", "cmocean.cm.curl elif np.any([field_name.find(x) != -1 for x in ('temp', 'sst',", "add_colorbar(self, fig, im, ax, show_color_bar, label=\"\"): # https://matplotlib.org/api/_as_gen/matplotlib.pyplot.colorbar.html if show_color_bar:", "all plots _maxcbar = np.nan _flip_data = True _eoas_pyutils_path =", "to the main project _contourf = False # When plotting", "the eoas_utils folder is stored with respect to the main", "if rot_90: c_np_data = np.rot90(c_np_data) if flip_data: c_np_data = np.flip(np.flip(c_np_data),", "def _close_figure(self): \"\"\"Depending on what is disp_images, the figures are", "elif isinstance(c_polygon, shapely.geometry.polygon.Polygon): x, y = c_polygon.exterior.xy pol_lats += y", "need precision _background = BackgroundType.BLUE_MARBLE_LR # Select the background to", "c_ax.stock_img() else: if self._background == BackgroundType.BLUE_MARBLE_LR: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble.png')) if", "BE SHAPELY GEOMETRIES In case we want to include additional", "# Single figure ax = _axs else: ax = _axs.flatten()[c_zlevel*len(var_names)", "for i, field_name in enumerate(var_names): if len(np_variables.shape) == 3: c_np_data", "-1 for x in ('temp', 'sst', 'temperature')]): return cmocean.cm.thermal elif", "0: c_slice_txt = z_names[c_slice] else: c_slice_txt = c_slice c_mincbar =", "a dictionary with keys x,y,u,v # and optional density, color,", "cmocean.cm.thermal elif np.any([field_name.find(x) != -1 for x in ('vorticity', 'vort')]):", "i, file_name in enumerate(files[0:36]): if i % 10 == 0:", "None: bbox = (-180, 180, -90, 90) if lats is", "+ arg_name] = arg_value print(self.__dict__[\"_\" + arg_name]) def __getattr__(self, attr):", "None: lons = self.lons fig, ax = plt.subplots(1, 1, figsize=(self._figsize,", "for x in ('temp', 'sst', 'temperature')]): return cmocean.cm.thermal elif np.any([field_name.find(x)", "file # {i}: {file_name}\") c_file = join(input_folder, file_name) im =", "_contourf = False # When plotting non-regular grids and need", "cropp it to the limits of the locations ax.gridlines() im", "{getattr(ds, name)}\") print(\"\\n========== Dimensions =========\") for name in ds.dims: print(F\"{name}:", "None: cmap = select_colormap(c_var) else: # If there is an", "# If it is just one cmap, then we use", "if mode == PlotMode.RASTER or mode == PlotMode.MERGED: if self._contourf:", "video file: {output_file}\") out_video = -1 for i, file_name in", "bbox of the coordinates. If included threshold then increases the", "Variables =========\") for cur_variable_name in ds.variables: cur_var = ds[cur_variable_name] print(F\"{cur_variable_name}:", "for arg_name, arg_value in kwargs.items(): self.__dict__[\"_\" + arg_name] = arg_value", "= mincbar[idx_var] else: c_mincbar = mincbar if not(np.all(np.isnan(maxcbar))): if type(mincbar)", "= c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, vmin=mincbar, vmax=maxcbar, transform=self._projection, norm=self._norm) if", "from the name of the variable if self._auto_colormap and orig_cmap", "if type(orig_cmap) is list: cmap = orig_cmap[idx_var] else: # If", "an array of colormaps we select the one for this", "'' _max_imgs_per_row = 4 _mincbar = np.nan # User can", "= {} for i, field_name in enumerate(var_names): npdict_3d[field_name] = np.expand_dims(np_variables[field_name],", "self._disp_images = disp_images self._output_folder = output_folder self._projection = projection bbox", "= cmap # If the user do not requires any", "\"\"\" Plots a 2D img for EOA data. :param c_img:", "!= -1: return cmocean.cm.diff elif field_name.find('binary') != -1: return cmocean.cm.oxy", "in enumerate(var_names): if len(np_variables.shape) == 3: c_np_data = np_variables[i, :,", "'g', 'w', 'k', 'y', 'r', 'c', 'b', 'g', 'w', 'k']", "\"\"\" print(\"\\n========== Global attributes =========\") for name in ds.attrs: print(F\"{name}", "project _contourf = False # When plotting non-regular grids and", "the class''' return self.__dict__[\"_\" + attr] def __setattr__(self, attr, value):", "# Use to normalize the colormap. For example with LogNorm", "in ds.dims: print(F\"{name}: {ds[name].shape}\") print(\"\\n========== Coordinates =========\") for name in", "if self._auto_colormap and orig_cmap is None: cmap = select_colormap(c_var) else:", "array of colormaps we select the one for this field", "= False gl.right_labels = False return im def get_proper_size(self, rows,", "EOAImageVisualizer: \"\"\"This class makes plenty of plots assuming we are", "show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): \"\"\" Plots multiple z_levels for multiple", "(np_im.shape[1], np_im.shape[0]) out_video = cv2.VideoWriter(output_file, cv2.VideoWriter_fourcc(*'mp4v'), fps, video_size, True) out_video.write(np_im[:,", "name='admin_1_states_provinces_lines', scale='50m', facecolor='none') ax.add_feature(states_provinces, edgecolor='gray') return ax def plot_scatter_data(self, lats=None,", "or mode == PlotMode.MERGED: c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) if mode == PlotMode.CONTOUR:", "import cartopy def select_colormap(field_name): ''' Based on the name if", "field_name: Returns: ''' if np.any([field_name.find(x) != -1 for x in", "= np_variables[i, :, :] else: c_np_data = np_variables # Single", "in a map :param bbox: :return: ''' if bbox is", "np_variables: Numpy variables. They can be with shape [fields, x,", "EOAImageVisualizer(disp_images=True, output_folder='output', # lats=[lats],lons=[lons]) def __init__(self, disp_images=True, output_folder='output', lats=[-90,90], lons", "-1 for x in ('u_', 'v_', 'u-vel.', 'v-vel.','velocity')]): return cmocean.cm.speed", "extent=self._extent) else: if np.isnan(mincbar): im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap,", "= len(z_levels) else: rows = int(len(z_levels) * np.ceil(len(var_names)/cols)) fig, _axs", "print(F\"Adding file # {i}: {file_name}\") c_file = join(input_folder, file_name) im", "cols): \"\"\" Obtains the proper size for a figure. :param", "function for 3D plotting :param np_variables: Numpy variables. They can", "True _eoas_pyutils_path = './eoas_pyutils'# This is the path where the", "if self._contour_labels: c_ax.contour(c_img, self._contour_labels, colors='r', extent=self._extent, transform=self._projection) else: c_ax.contour(c_img, extent=self._extent,", "vec_keys: vec_cmap = self._vector_field['cmap'] c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) c_ax.streamplot(x, y, u, v,", "add_states(self, ax): # Names come from: https://www.naturalearthdata.com/features/ # -- Add", "= {getattr(ds, name)}\") print(\"\\n========== Variables =========\") netCDFvars = ds.variables for", "== PlotMode.MERGED: if self._contourf: im = c_ax.contourf(self._lons, self._lats, c_img, num_colors=255,", "+= x c_ax.plot(x,y, transform=self._projection, c='r') # Adds a threshold to", "1 linewidth = 3 vec_cmap = cmocean.cm.solar if 'color' in", "and max colorbar values to 'force' same color bar to", "then increases the bbox in all directions with that thres", "var_names: :param title: :param file_name_prefix: :param cmap: :param flip_data: :param", "if mode == PlotMode.CONTOUR: im = c_ax.contour(c_img, extent=self._extent, transform=self._projection) if", "for {cur_variable_name}: {cur_var.dimensions} {cur_var.shape}\") def add_roads(self, ax): # Names come", "transform=self._projection) if len(self._additional_polygons) > 0: pol_lats = [] pol_lons =", "z_names = [], show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar) def make_video_from_images(self, input_folder,", "ccrs import cartopy.feature as cfeature import cartopy def select_colormap(field_name): '''", "the class MUST have its name on it. self._disp_images =", "else: cbar.set_label(self._units, fontsize=font_size_cbar*1.2) def plot_slice_eoa(self, c_img, ax, cmap='gray', mode=PlotMode.RASTER, mincbar=np.nan,", "im = c_ax.contourf(self._lons, self._lats, c_img, num_colors=255, cmap='inferno', extent=self._extent) else: if", "'sst', 'temperature')]): return cmocean.cm.thermal elif np.any([field_name.find(x) != -1 for x", "for this field if type(orig_cmap) is list: cmap = orig_cmap[idx_var]", "its name on it. self._disp_images = disp_images self._output_folder = output_folder", "('salin', 'sss', 'sal')]): return cmocean.cm.haline elif field_name.find('error') != -1: return", "do not requires any z-leve, then all are plotted if", "[], show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): \"\"\" Plots multiple z_levels for", "if mode == PlotMode.CONTOUR or mode == PlotMode.MERGED: c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons)))", "ax.set_extent(bbox) # If we do not set this, it will", "many colswill the figure have :param prop: Proportion is the", "title=title, file_name_prefix=file_name_prefix, cmap=cmap, z_names = [], show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar)", "They can be with shape [fields, x, y] or just", "c_slice c_mincbar = np.nan c_maxcbar = np.nan for idx_var, c_var", "constructor of the class MUST have its name on it.", "import cartopy.feature as cfeature import cartopy def select_colormap(field_name): ''' Based", "'adt','surf_el')]): # cmaps_fields.append(cmocean.cm.deep_r) return cmocean.cm.curl elif np.any([field_name.find(x) != -1 for", "import Image import cv2 from os import listdir from os.path", "plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bathymetry_3600x1800.jpg')) c_ax.imshow(img, origin='upper', extent=(-180,180,-90,90), transform=ccrs.PlateCarree()) if mode == PlotMode.RASTER or", "It calls the 'main' function for 3D plotting :param np_variables:", "=[-180,180], projection=ccrs.PlateCarree(), **kwargs): # All the arguments that are passed", "x, y] or just a single field with shape [x,y]", "orig_cmap is None: cmap = select_colormap(c_var) else: # If there", "# Select the background to use _auto_colormap = True #", "the index of the z_levels are the original ones. if", "cmocean Args: field_name: Returns: ''' if np.any([field_name.find(x) != -1 for", "maxcbar[idx_var] else: c_maxcbar = maxcbar # By default we select", "= self._font_size * .5 # TODO how to make this", "else: c_slice_txt = c_slice c_mincbar = np.nan c_maxcbar = np.nan", "= np.min((self._max_imgs_per_row, len(var_names))) if cols == len(var_names): rows = len(z_levels)", "bbox_inches='tight') self._close_figure() def plot_2d_data_xr(self, np_variables:list, var_names:list, title='', file_name_prefix='', cmap='viridis', show_color_bar=True,", "for EOA data. :param c_img: 2D array :param ax: geoaxes", "from PIL import Image import cv2 from os import listdir", "lons is None: lons = self.lons fig, ax = plt.subplots(1,", "for c_zlevel, c_slice in enumerate(z_levels): # Iterates over the z-levels", "len(var_names))) if cols == len(var_names): rows = len(z_levels) else: rows", "-90, 90) if lats is None: lats = self.lats if", "video_size = (np_im.shape[1], np_im.shape[0]) out_video = cv2.VideoWriter(output_file, cv2.VideoWriter_fourcc(*'mp4v'), fps, video_size,", "nc_summary(self, ds): \"\"\" Prints a summary of the netcdf (global", "'sal')]): return cmocean.cm.haline elif field_name.find('error') != -1: return cmocean.cm.diff elif", "c_polygon in self._additional_polygons: if isinstance(c_polygon, shapely.geometry.linestring.LineString): x,y = c_polygon.xy elif", "% 10 == 0: print(F\"Adding file # {i}: {file_name}\") c_file", "lats is None: lats = self.lats if lons is None:", "list: c_mincbar = mincbar[idx_var] else: c_mincbar = mincbar if not(np.all(np.isnan(maxcbar))):", "use w/h :return: \"\"\" if rows == 1: return self._figsize", "c_file = join(input_folder, file_name) im = Image.open(c_file) np_im = np.asarray(im)[:,", "= True # Selects the colormap based on the name", "c_mincbar = mincbar[idx_var] else: c_mincbar = mincbar if not(np.all(np.isnan(maxcbar))): if", "same color bar to all plots _maxcbar = np.nan _flip_data", "plotting :param np_variables: :param var_names: :param title: :param file_name_prefix: :param", "with that thres Args: lats: lons: inc_threshold: Returns: ''' minLat", "self._vector_field['linewidth'] if 'cmap' in vec_keys: vec_cmap = self._vector_field['cmap'] c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons)))", "self._vector_field['color'] if 'density' in vec_keys: density = self._vector_field['density'] if 'linewidth'", "Coordinates =========\") for name in ds.coords: print(F\"{name}: {ds[name].shape}\") print(\"\\n========== Variables", "with keys x,y,u,v # and optional density, color, cmap, arrowsize,", "color=c, cmap=vec_cmap, linewidth=linewidth) except Exception as e: print(F\"Couldn't add vector", "depth, and columns for each variable \"\"\" create_folder(self._output_folder) orig_cmap =", "print(\"\\n========== Dimensions =========\") for name in ds.dims: print(F\"{name}: {ds[name].shape}\") print(\"\\n==========", "field it chooses a colormap from cmocean Args: field_name: Returns:", "np_variables:list, var_names:list, title='', file_name_prefix='', cmap=None, flip_data=False, rot_90=False, show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan,", "want to add a streamplot of a vector field. It", "class EOAImageVisualizer: \"\"\"This class makes plenty of plots assuming we", "if type(mincbar) is list: c_mincbar = mincbar[idx_var] else: c_mincbar =", "c_polygon.exterior.xy pol_lats += y pol_lons += x c_ax.plot(x,y, transform=self._projection, c='r')", ":param np_variables: Numpy variables. They can be with shape [fields,", "figure. :param rows: how many rows will the figure have", "+ idx_var] # Here we chose the min and max", "3D plotting :param np_variables: :param var_names: :param title: :param file_name_prefix:", "orig_cmap[idx_var] else: # If it is just one cmap, then", "lats = self.lats if lons is None: lons = self.lons", "add vector field e:{e}\") gl = c_ax.gridlines(draw_labels=True, color='grey', alpha=0.5, linestyle='--')", "def get_proper_size(self, rows, cols): \"\"\" Obtains the proper size for", "Selects the colormap based on the name of the field", "4 _mincbar = np.nan # User can set a min", "self._vector_field['density'] if 'linewidth' in vec_keys: linewidth = self._vector_field['linewidth'] if 'cmap'", "getter for all the properties of the class''' return self.__dict__[\"_\"", "plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar) def plot_2d_data_np(self, np_variables:list, var_names:list, title='', file_name_prefix='', cmap=None,", "= (minLon, maxLon, minLat, maxLat) return bbox def xr_summary(self, ds):", "cmap=cmap, mode=plot_mode, mincbar=c_mincbar, maxcbar=c_maxcbar) if self._show_var_names: c_title = F'{var_names[idx_var]} {title}'", "points in a map :param bbox: :return: ''' if bbox", "np.nan _flip_data = True _eoas_pyutils_path = './eoas_pyutils'# This is the", "as np import cmocean import shapely import cartopy.crs as ccrs", "('ssh', 'srfhgt', 'adt','surf_el')]): # cmaps_fields.append(cmocean.cm.deep_r) return cmocean.cm.curl elif np.any([field_name.find(x) !=", "lons self._fig_prop = (bbox[1]-bbox[0])/(bbox[3]-bbox[2]) self._contour_labels = False for arg_name, arg_value", "if rows == 1: return self._figsize * cols * self._fig_prop,", "import cv2 from os import listdir from os.path import join", ":param ds: :return: \"\"\" print(\"\\n========== Global attributes =========\") for name", "maxcbar=maxcbar) def make_video_from_images(self, input_folder, output_file, fps=24): files = listdir(input_folder) files.sort()", "[] for c_polygon in self._additional_polygons: if isinstance(c_polygon, shapely.geometry.linestring.LineString): x,y =", "class MUST have its name on it. self._disp_images = disp_images", "for cur_variable_name in ds.variables: cur_var = ds[cur_variable_name] print(F\"{cur_variable_name}: {cur_var.dims} {cur_var.shape}\")", "how many colswill the figure have :param prop: Proportion is", "=========\") for name in ds.dims: print(F\"{name}: {ds[name].shape}\") print(\"\\n========== Coordinates =========\")", "dictionaries vizobj = new EOAImageVisualizer(disp_images=True, output_folder='output', lats=[lats],lons=[lons]) \"\"\" _COLORS =", "= c_polygon.exterior.xy pol_lats += y pol_lons += x c_ax.plot(x,y, transform=self._projection,", "file_name) im = Image.open(c_file) np_im = np.asarray(im)[:, :, :3] if", "{i}: {file_name}\") c_file = join(input_folder, file_name) im = Image.open(c_file) np_im", "np.nan c_maxcbar = np.nan for idx_var, c_var in enumerate(var_names): #", "= self.lons fig, ax = plt.subplots(1, 1, figsize=(self._figsize, self._figsize), subplot_kw={'projection':", "self.__dict__[\"_\" + arg_name] = arg_value print(self.__dict__[\"_\" + arg_name]) def __getattr__(self,", "* self._fig_prop, self._figsize else: return self._figsize * cols * self._fig_prop,", "3: c_np_data = np_variables[i, :, :] else: c_np_data = np_variables", "plot_2d_data_np(self, np_variables:list, var_names:list, title='', file_name_prefix='', cmap=None, flip_data=False, rot_90=False, show_color_bar=True, plot_mode=PlotMode.RASTER,", "'temperature')]): return cmocean.cm.thermal elif np.any([field_name.find(x) != -1 for x in", "[x,y] :param var_names: :param title: :param file_name_prefix: :param cmap: :param", "color, cmap, arrowsize, arrowstyle, minlength _vector_field = None _norm =", "show_color_bar: font_size_cbar = self._font_size * .5 # TODO how to", "0: print(F\"Adding file # {i}: {file_name}\") c_file = join(input_folder, file_name)", "cmocean.cm.haline elif field_name.find('error') != -1: return cmocean.cm.diff elif field_name.find('binary') !=", "maxcbar=np.nan) -> None: \"\"\" Plots a 2D img for EOA", "user do not requires any z-leve, then all are plotted", "= F'{file_name_prefix}' pylab.savefig(join(self._output_folder, F'{file_name}.png'), bbox_inches='tight') self._close_figure() def plot_2d_data_xr(self, np_variables:list, var_names:list,", "it for all the fields cmap = orig_cmap im =", "what is disp_images, the figures are displayed or just closed\"\"\"", "Here we chose the min and max colorbars for each", "rows will the figure have :param cols: how many colswill", "z_levels=[0], title=title, file_name_prefix=file_name_prefix, cmap=cmap, z_names = [], show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar,", "ax, cmap=cmap, mode=plot_mode, mincbar=c_mincbar, maxcbar=c_maxcbar) if self._show_var_names: c_title = F'{var_names[idx_var]}", "maxLat = np.amax(lats) + expand_ext minLon = np.amin(lons) - expand_ext", "import PlotMode, BackgroundType import pylab import numpy as np import", "over the fields if rows*cols == 1: # Single figure", "pol_lons += x c_ax.plot(x,y, transform=self._projection, c='r') # Adds a threshold", "np.any([field_name.find(x) != -1 for x in ('vorticity', 'vort')]): return cmocean.cm.curl", "F'{file_name}.png'), bbox_inches='tight') self._close_figure() def plot_2d_data_xr(self, np_variables:list, var_names:list, title='', file_name_prefix='', cmap='viridis',", "shrink=.7) cbar.ax.tick_params(labelsize=font_size_cbar) if label != \"\": cbar.set_label(label, fontsize=font_size_cbar*1.2) else: cbar.set_label(self._units,", "vmax=maxcbar, transform=self._projection, norm=self._norm) if mode == PlotMode.CONTOUR or mode ==", "maxcbar # By default we select the colorbar from the", "in vec_keys: vec_cmap = self._vector_field['cmap'] c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) c_ax.streamplot(x, y, u,", "c_ax.contour(c_img, self._contour_labels, colors='r', extent=self._extent, transform=self._projection) else: c_ax.contour(c_img, extent=self._extent, transform=self._projection) if", "self.add_colorbar(fig, im, ax, show_color_bar) plt.tight_layout(pad=.5) file_name = F'{file_name_prefix}' pylab.savefig(join(self._output_folder, F'{file_name}.png'),", "then we use it for all the fields cmap =", "name in ds.dims: print(F\"{name}: {ds[name].shape}\") print(\"\\n========== Coordinates =========\") for name", "= orig_cmap[idx_var] else: # If it is just one cmap,", "origin = 'lower' else: origin = 'upper' if self._background ==", "the background to use _auto_colormap = True # Selects the", "+ expand_ext minLon = np.amin(lons) - expand_ext maxLon = np.amax(lons)", ":param rows: how many rows will the figure have :param", "z_names = [], show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar) def plot_2d_data_np(self, np_variables:list,", "each field if not(np.all(np.isnan(mincbar))): if type(mincbar) is list: c_mincbar =", "np.amax(lats) + expand_ext minLon = np.amin(lons) - expand_ext maxLon =", "BackgroundType.BLUE_MARBLE_HR: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble_5400x2700.jpg')) if self._background == BackgroundType.TOPO: img =", "_auto_colormap = True # Selects the colormap based on the", "# Here we chose the min and max colorbars for", "= maxcbar[idx_var] else: c_maxcbar = maxcbar # By default we", "output_folder='output', lats=[-90,90], lons =[-180,180], projection=ccrs.PlateCarree(), **kwargs): # All the arguments", "'color' in vec_keys: c = self._vector_field['color'] if 'density' in vec_keys:", "cols: how many colswill the figure have :param prop: Proportion", "= 1 linewidth = 3 vec_cmap = cmocean.cm.solar if 'color'", "= ['y', 'r', 'c', 'b', 'g', 'w', 'k', 'y', 'r',", "in ds.coords: print(F\"{name}: {ds[name].shape}\") print(\"\\n========== Variables =========\") for cur_variable_name in", "plt import matplotlib from matplotlib.colors import LogNorm from io_utils.io_common import", "ax: geoaxes :return: \"\"\" c_ax = ax if self._flip_data: origin", "=========\") for name in ds.ncattrs(): print(F\"{name} = {getattr(ds, name)}\") print(\"\\n==========", "\"\": cbar.set_label(label, fontsize=font_size_cbar*1.2) else: cbar.set_label(self._units, fontsize=font_size_cbar*1.2) def plot_slice_eoa(self, c_img, ax,", "the colormap based on the name of the field _show_var_names", "how to make this automatic and works always cbar =", "over the z-levels # Verify the index of the z_levels", "files = listdir(input_folder) files.sort() print(F\"Generating video file: {output_file}\") out_video =", "self._background == BackgroundType.BLUE_MARBLE_LR: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble.png')) if self._background == BackgroundType.BLUE_MARBLE_HR:", "= False # Includes the name of the field name", "self._figsize * cols * self._fig_prop, self._figsize * rows def _close_figure(self):", "subplot_kw={'projection': self._projection}) for c_zlevel, c_slice in enumerate(z_levels): # Iterates over", "cmap: :param flip_data: :param rot_90: :param show_color_bar: :param plot_mode: :param", "the user do not requires any z-leve, then all are", "field if not(np.all(np.isnan(mincbar))): if type(mincbar) is list: c_mincbar = mincbar[idx_var]", "file_name_prefix=file_name_prefix, cmap=cmap, z_names = [], show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar) def", "all the fields cmap = orig_cmap im = self.plot_slice_eoa(np_variables[c_var][c_slice,:,:], ax,", "variable \"\"\" create_folder(self._output_folder) orig_cmap = cmap # If the user", "c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, vmin=mincbar, vmax=maxcbar, transform=self._projection, norm=self._norm) if mode", "= True _eoas_pyutils_path = './eoas_pyutils'# This is the path where", "arrowstyle, minlength _vector_field = None _norm = None # Use", "F'{title}' if len(z_levels) > 1: c_title += F\"Z - level:", "uses rows for each depth, and columns for each variable", "'srfhgt', 'adt','surf_el')]): # cmaps_fields.append(cmocean.cm.deep_r) return cmocean.cm.curl elif np.any([field_name.find(x) != -1", "c_title = F'{title}' if len(z_levels) > 1: c_title += F\"Z", "if self._background == BackgroundType.BLUE_MARBLE_HR: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble_5400x2700.jpg')) if self._background ==", "mode == PlotMode.RASTER or mode == PlotMode.MERGED: if self._contourf: im", "if rows*cols == 1: # Single figure ax = _axs", "attributes =========\") for name in ds.ncattrs(): print(F\"{name} = {getattr(ds, name)}\")", "and numpy arrays in dictionaries vizobj = new EOAImageVisualizer(disp_images=True, output_folder='output',", "= 'lower' else: origin = 'upper' if self._background == BackgroundType.CARTO_DEF:", "join import matplotlib.pyplot as plt import matplotlib from matplotlib.colors import", "PlotMode.MERGED: if self._contourf: im = c_ax.contourf(self._lons, self._lats, c_img, num_colors=255, cmap='inferno',", "a threshold to the plot to see the polygons c_ax.set_extent(self.getExtent(list(self._lats)", "if self._background == BackgroundType.BATHYMETRY: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bathymetry_3600x1800.jpg')) c_ax.imshow(img, origin='upper', extent=(-180,180,-90,90),", "multiple z_levels for multiple fields. It uses rows for each", "that thres Args: lats: lons: inc_threshold: Returns: ''' minLat =", "to the limits of the locations ax.gridlines() im = ax.scatter(lons,", "y = self._vector_field['y'] vec_keys = self._vector_field.keys() c = 'r' density", "always cbar = fig.colorbar(im, ax=ax, shrink=.7) cbar.ax.tick_params(labelsize=font_size_cbar) if label !=", "and works always cbar = fig.colorbar(im, ax=ax, shrink=.7) cbar.ax.tick_params(labelsize=font_size_cbar) if", "name in ds.attrs: print(F\"{name} = {getattr(ds, name)}\") print(\"\\n========== Dimensions =========\")", "maxcbar=maxcbar) def plot_2d_data_np(self, np_variables:list, var_names:list, title='', file_name_prefix='', cmap=None, flip_data=False, rot_90=False,", "plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): ''' Wrapper function to receive raw 2D", "plotting :param np_variables: Numpy variables. They can be with shape", "we want to include additional polygons in the plots (all", "ds.coords: print(F\"{name}: {ds[name].shape}\") print(\"\\n========== Variables =========\") for cur_variable_name in ds.variables:", "lons=None, bbox=None, s=1, c='blue', cmap='plasma', title=''): ''' This function plots", "self._font_size/2, 'color': '#aaaaaa', 'weight':'bold'} font_coords = {'size': self._font_size*.6} gl.xlabel_style =", "is made to read xarrays, numpy arrays, and numpy arrays", "chooses a colormap from cmocean Args: field_name: Returns: ''' if", "def plot_2d_data_np(self, np_variables:list, var_names:list, title='', file_name_prefix='', cmap=None, flip_data=False, rot_90=False, show_color_bar=True,", "any z-leve, then all are plotted if len(z_levels) == 0:", "extent=self._extent, transform=self._projection) if mode == PlotMode.MERGED: if self._contour_labels: c_ax.contour(c_img, self._contour_labels,", "ds: :return: \"\"\" print(\"\\n========== Global attributes =========\") for name in", "= F'{title}' if len(z_levels) > 1: c_title += F\"Z -", "not requires any z-leve, then all are plotted if len(z_levels)", "mode == PlotMode.CONTOUR or mode == PlotMode.MERGED: c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) if", "ax = plt.subplots(1, 1, figsize=(self._figsize, self._figsize), subplot_kw={'projection': ccrs.PlateCarree()}) ax.set_extent(bbox) #", "e:{e}\") gl = c_ax.gridlines(draw_labels=True, color='grey', alpha=0.5, linestyle='--') # gl.xlabel_style =", "_axs else: ax = _axs.flatten()[c_zlevel*len(var_names) + idx_var] # Here we", "s=s, c=c, cmap=cmap) fig.colorbar(im, ax=ax, shrink=0.7) ax.coastlines() plt.title(title) plt.show() def", "extent=self._extent, origin=origin, cmap=cmap, transform=self._projection, norm=self._norm) else: im = c_ax.imshow(c_img, extent=self._extent,", "if cols == len(var_names): rows = len(z_levels) else: rows =", "else: c_np_data = np_variables # Single field if rot_90: c_np_data", "cmap, arrowsize, arrowstyle, minlength _vector_field = None _norm = None", "x in ('salin', 'sss', 'sal')]): return cmocean.cm.haline elif field_name.find('error') !=", "=========\") for cur_variable_name in ds.variables: cur_var = ds[cur_variable_name] print(F\"{cur_variable_name}: {cur_var.dims}", "= self._vector_field['v'] x = self._vector_field['x'] y = self._vector_field['y'] vec_keys =", "for a figure. :param rows: how many rows will the", "c_img: 2D array :param ax: geoaxes :return: \"\"\" c_ax =", "2D img for EOA data. :param c_img: 2D array :param", "in enumerate(var_names): npdict_3d[field_name] = np.expand_dims(np_variables[field_name], axis=0) self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title,", "Obtains the proper size for a figure. :param rows: how", "'c', 'b', 'g', 'w', 'k'] _figsize = 8 _font_size =", "else: c_ax.contour(c_img, extent=self._extent, transform=self._projection) if len(self._additional_polygons) > 0: pol_lats =", "attr] = value def add_colorbar(self, fig, im, ax, show_color_bar, label=\"\"):", "== PlotMode.CONTOUR: im = c_ax.contour(c_img, extent=self._extent, transform=self._projection) if mode ==", "titles _additional_polygons = [] # MUST BE SHAPELY GEOMETRIES In", "''' Wrapper function to receive raw 2D numpy data. It", "* np.ceil(len(var_names)/cols)) fig, _axs = plt.subplots(rows, cols, figsize=self.get_proper_size(rows, cols), subplot_kw={'projection':", "pol_lats += y pol_lons += x c_ax.plot(x,y, transform=self._projection, c='r') #", "streamplot of a vector field. It must be a dictionary", "'linewidth' in vec_keys: linewidth = self._vector_field['linewidth'] if 'cmap' in vec_keys:", "np_variables # Single field if rot_90: c_np_data = np.rot90(c_np_data) if", "os.path import join import matplotlib.pyplot as plt import matplotlib from", "come from: https://www.naturalearthdata.com/features/ # -- Add states roads = cfeature.NaturalEarthFeature(", "_close_figure(self): \"\"\"Depending on what is disp_images, the figures are displayed", "make this automatic and works always cbar = fig.colorbar(im, ax=ax,", "there is an array of colormaps we select the one", "from: https://www.naturalearthdata.com/features/ # -- Add states states_provinces = cfeature.NaturalEarthFeature( category='cultural',", "self._additional_polygons: if isinstance(c_polygon, shapely.geometry.linestring.LineString): x,y = c_polygon.xy elif isinstance(c_polygon, shapely.geometry.polygon.Polygon):", "c_np_data = np.flip(np.flip(c_np_data), axis=1) npdict_3d[field_name] = np.expand_dims(c_np_data, axis=0) self.plot_3d_data_npdict(npdict_3d, var_names,", "= 30 _units = '' _max_imgs_per_row = 4 _mincbar =", "= '' _max_imgs_per_row = 4 _mincbar = np.nan # User", "disp_images self._output_folder = output_folder self._projection = projection bbox = self.getExtent(lats,", "for i, file_name in enumerate(files[0:36]): if i % 10 ==", "y, u, v, transform=self._projection, density=density, color=c, cmap=vec_cmap, linewidth=linewidth) except Exception", "list: c_maxcbar = maxcbar[idx_var] else: c_maxcbar = maxcbar # By", "True # Selects the colormap based on the name of", "np.flip(np.flip(c_np_data), axis=1) npdict_3d[field_name] = np.expand_dims(c_np_data, axis=0) self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title,", "False # When plotting non-regular grids and need precision _background", "x = self._vector_field['x'] y = self._vector_field['y'] vec_keys = self._vector_field.keys() c", "return self.__dict__[\"_\" + attr] def __setattr__(self, attr, value): '''Generic setter", "'r' density = 1 linewidth = 3 vec_cmap = cmocean.cm.solar", "bbox in all directions with that thres Args: lats: lons:", "io_utils.io_common import create_folder from viz_utils.constants import PlotMode, BackgroundType import pylab", "It is made to read xarrays, numpy arrays, and numpy", "len(var_names): rows = len(z_levels) else: rows = int(len(z_levels) * np.ceil(len(var_names)/cols))", "import matplotlib from matplotlib.colors import LogNorm from io_utils.io_common import create_folder", "title: :param file_name_prefix: :param cmap: :param flip_data: :param rot_90: :param", "list(self._lons))) c_ax.streamplot(x, y, u, v, transform=self._projection, density=density, color=c, cmap=vec_cmap, linewidth=linewidth)", "netcdf (global attributes, variables, etc) :param ds: :return: \"\"\" print(\"\\n==========", "= 'upper' if self._background == BackgroundType.CARTO_DEF: c_ax.stock_img() else: if self._background", "is None: cmap = select_colormap(c_var) else: # If there is", "non-regular grids and need precision _background = BackgroundType.BLUE_MARBLE_LR # Select", "F'{file_name_prefix}' pylab.savefig(join(self._output_folder, F'{file_name}.png'), bbox_inches='tight') self._close_figure() def plot_2d_data_xr(self, np_variables:list, var_names:list, title='',", "'b', 'g', 'w', 'k', 'y', 'r', 'c', 'b', 'g', 'w',", "if len(z_levels) == 0: z_levels = range(np_variables[var_names[0]].shape[0]) cols = np.min((self._max_imgs_per_row,", "or just closed\"\"\" if self._disp_images: plt.show() else: plt.close() def getExtent(self,", "def __setattr__(self, attr, value): '''Generic setter for all the properties", "rot_90: c_np_data = np.rot90(c_np_data) if flip_data: c_np_data = np.flip(np.flip(c_np_data), axis=1)", "we select the colorbar from the name of the variable", "len(np_variables.shape) == 3: c_np_data = np_variables[i, :, :] else: c_np_data", "vec_cmap = cmocean.cm.solar if 'color' in vec_keys: c = self._vector_field['color']", "c = 'r' density = 1 linewidth = 3 vec_cmap", "shapely import cartopy.crs as ccrs import cartopy.feature as cfeature import", "cartopy def select_colormap(field_name): ''' Based on the name if the", "the z_levels are the original ones. if len(z_names) != 0:", "\"\"\" if rows == 1: return self._figsize * cols *", "c='blue', cmap='plasma', title=''): ''' This function plots points in a", "create_folder from viz_utils.constants import PlotMode, BackgroundType import pylab import numpy", "print(F\"Dimensions for {cur_variable_name}: {cur_var.dimensions} {cur_var.shape}\") def add_roads(self, ax): # Names", "np.asarray(im)[:, :, :3] if i == 0: video_size = (np_im.shape[1],", "arrays, and numpy arrays in dictionaries vizobj = new EOAImageVisualizer(disp_images=True,", "ones. if len(z_names) != 0: c_slice_txt = z_names[c_slice] else: c_slice_txt", "colormap from cmocean Args: field_name: Returns: ''' if np.any([field_name.find(x) !=", "!= -1 for x in ('u_', 'v_', 'u-vel.', 'v-vel.','velocity')]): return", "output_folder self._projection = projection bbox = self.getExtent(lats, lons) self._extent =", "norm=self._norm) else: im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, vmin=mincbar, vmax=maxcbar,", "figure ax = _axs else: ax = _axs.flatten()[c_zlevel*len(var_names) + idx_var]", "in dictionaries vizobj = new EOAImageVisualizer(disp_images=True, output_folder='output', lats=[lats],lons=[lons]) \"\"\" _COLORS", "in enumerate(var_names): # Iterate over the fields if rows*cols ==", "== 0: print(F\"Adding file # {i}: {file_name}\") c_file = join(input_folder,", "return cmocean.cm.speed class EOAImageVisualizer: \"\"\"This class makes plenty of plots", "rows: how many rows will the figure have :param cols:", "columns for each variable \"\"\" create_folder(self._output_folder) orig_cmap = cmap #", "_vector_field = None _norm = None # Use to normalize", "cbar.set_label(label, fontsize=font_size_cbar*1.2) else: cbar.set_label(self._units, fontsize=font_size_cbar*1.2) def plot_slice_eoa(self, c_img, ax, cmap='gray',", "ax, cmap='gray', mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan) -> None: \"\"\" Plots a", "lats=[lats],lons=[lons]) \"\"\" _COLORS = ['y', 'r', 'c', 'b', 'g', 'w',", "use it for all the fields cmap = orig_cmap im", "ax=ax, shrink=0.7) ax.coastlines() plt.title(title) plt.show() def plot_3d_data_npdict(self, np_variables:list, var_names:list, z_levels=", "False gl.right_labels = False return im def get_proper_size(self, rows, cols):", "for x in ('ssh', 'srfhgt', 'adt','surf_el')]): # cmaps_fields.append(cmocean.cm.deep_r) return cmocean.cm.curl", "-1 for x in ('vorticity', 'vort')]): return cmocean.cm.curl elif np.any([field_name.find(x)", "'vort')]): return cmocean.cm.curl elif np.any([field_name.find(x) != -1 for x in", "self.__dict__[\"_\" + attr] = value def add_colorbar(self, fig, im, ax,", "a colormap from cmocean Args: field_name: Returns: ''' if np.any([field_name.find(x)", "self._vector_field.keys() c = 'r' density = 1 linewidth = 3", "if type(mincbar) is list: c_maxcbar = maxcbar[idx_var] else: c_maxcbar =", "c_title += F\"Z - level: {c_slice_txt}\" ax.set_title(c_title, fontsize=self._font_size) self.add_colorbar(fig, im,", ":param bbox: :return: ''' if bbox is None: bbox =", "= './eoas_pyutils'# This is the path where the eoas_utils folder", "bbox self._lats = lats self._lons = lons self._fig_prop = (bbox[1]-bbox[0])/(bbox[3]-bbox[2])", "shapely.geometry.linestring.LineString): x,y = c_polygon.xy elif isinstance(c_polygon, shapely.geometry.polygon.Polygon): x, y =", "label=\"\"): # https://matplotlib.org/api/_as_gen/matplotlib.pyplot.colorbar.html if show_color_bar: font_size_cbar = self._font_size * .5", "LogNorm # vizobj = EOAImageVisualizer(disp_images=True, output_folder='output', # lats=[lats],lons=[lons]) def __init__(self,", "bbox: :return: ''' if bbox is None: bbox = (-180,", "ax = _axs.flatten()[c_zlevel*len(var_names) + idx_var] # Here we chose the", "gl.xlabel_style = {'size': self._font_size/2, 'color': '#aaaaaa', 'weight':'bold'} font_coords = {'size':", "arg_name]) def __getattr__(self, attr): '''Generic getter for all the properties", "MUST BE SHAPELY GEOMETRIES In case we want to include", "= (bbox[1]-bbox[0])/(bbox[3]-bbox[2]) self._contour_labels = False for arg_name, arg_value in kwargs.items():", "np.isnan(mincbar): im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, transform=self._projection, norm=self._norm) else:", "gl = c_ax.gridlines(draw_labels=True, color='grey', alpha=0.5, linestyle='--') # gl.xlabel_style = {'size':", "in vec_keys: c = self._vector_field['color'] if 'density' in vec_keys: density", "as cfeature import cartopy def select_colormap(field_name): ''' Based on the", "if self._flip_data: origin = 'lower' else: origin = 'upper' if", "= None # Use to normalize the colormap. For example", "self._output_folder = output_folder self._projection = projection bbox = self.getExtent(lats, lons)", "= c_ax.contour(c_img, extent=self._extent, transform=self._projection) if mode == PlotMode.MERGED: if self._contour_labels:", "map :param bbox: :return: ''' if bbox is None: bbox", "are the original ones. if len(z_names) != 0: c_slice_txt =", "# Iterate over the fields if rows*cols == 1: #", "'c', 'b', 'g', 'w', 'k', 'y', 'r', 'c', 'b', 'g',", "attributes, variables, etc) :param ds: :return: \"\"\" print(\"\\n========== Global attributes", "_maxcbar = np.nan _flip_data = True _eoas_pyutils_path = './eoas_pyutils'# This", "ds.dims: print(F\"{name}: {ds[name].shape}\") print(\"\\n========== Coordinates =========\") for name in ds.coords:", "field with shape [x,y] :param var_names: :param title: :param file_name_prefix:", "fields. It uses rows for each depth, and columns for", "a streamplot of a vector field. It must be a", "import create_folder from viz_utils.constants import PlotMode, BackgroundType import pylab import", "the min and max colorbars for each field if not(np.all(np.isnan(mincbar))):", "the original ones. if len(z_names) != 0: c_slice_txt = z_names[c_slice]", "cur_var = ds.variables[cur_variable_name] print(F\"Dimensions for {cur_variable_name}: {cur_var.dimensions} {cur_var.shape}\") def add_roads(self,", "[] pol_lons = [] for c_polygon in self._additional_polygons: if isinstance(c_polygon,", "have :param cols: how many colswill the figure have :param", "None: try: u = self._vector_field['u'] v = self._vector_field['v'] x =", ":param c_img: 2D array :param ax: geoaxes :return: \"\"\" c_ax", "= plt.subplots(1, 1, figsize=(self._figsize, self._figsize), subplot_kw={'projection': ccrs.PlateCarree()}) ax.set_extent(bbox) # If", "= cfeature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines', scale='50m', facecolor='none') ax.add_feature(states_provinces, edgecolor='gray') return ax", "raw 2D numpy data. It calls the 'main' function for", "alpha=0.5, linestyle='--') # gl.xlabel_style = {'size': self._font_size/2, 'color': '#aaaaaa', 'weight':'bold'}", "self._close_figure() def plot_2d_data_xr(self, np_variables:list, var_names:list, title='', file_name_prefix='', cmap='viridis', show_color_bar=True, plot_mode=PlotMode.RASTER,", "# gl.xlabel_style = {'size': self._font_size/2, 'color': '#aaaaaa', 'weight':'bold'} font_coords =", "_background = BackgroundType.BLUE_MARBLE_LR # Select the background to use _auto_colormap", "the fields if rows*cols == 1: # Single figure ax", "density, color, cmap, arrowsize, arrowstyle, minlength _vector_field = None _norm", "https://matplotlib.org/api/_as_gen/matplotlib.pyplot.colorbar.html if show_color_bar: font_size_cbar = self._font_size * .5 # TODO", "setter for all the properties of the class''' self.__dict__[\"_\" +", "z-leve, then all are plotted if len(z_levels) == 0: z_levels", "the titles _additional_polygons = [] # MUST BE SHAPELY GEOMETRIES", "linewidth=linewidth) except Exception as e: print(F\"Couldn't add vector field e:{e}\")", "c=c, cmap=cmap) fig.colorbar(im, ax=ax, shrink=0.7) ax.coastlines() plt.title(title) plt.show() def plot_3d_data_npdict(self,", "the arguments that are passed to the constructor of the", "None: \"\"\" Plots a 2D img for EOA data. :param", "-1 for x in ('salin', 'sss', 'sal')]): return cmocean.cm.haline elif", "bar to all plots _maxcbar = np.nan _flip_data = True", "states states_provinces = cfeature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines', scale='50m', facecolor='none') ax.add_feature(states_provinces, edgecolor='gray')", "np_im = np.asarray(im)[:, :, :3] if i == 0: video_size", "for x in ('u_', 'v_', 'u-vel.', 'v-vel.','velocity')]): return cmocean.cm.speed class", "_max_imgs_per_row = 4 _mincbar = np.nan # User can set", "colors='r', extent=self._extent, transform=self._projection) else: c_ax.contour(c_img, extent=self._extent, transform=self._projection) if len(self._additional_polygons) >", "0: z_levels = range(np_variables[var_names[0]].shape[0]) cols = np.min((self._max_imgs_per_row, len(var_names))) if cols", "import listdir from os.path import join import matplotlib.pyplot as plt", "linestyle='--') # gl.xlabel_style = {'size': self._font_size/2, 'color': '#aaaaaa', 'weight':'bold'} font_coords", "{cur_var.dims} {cur_var.shape}\") def nc_summary(self, ds): \"\"\" Prints a summary of", "edgecolor='black') return ax def add_states(self, ax): # Names come from:", "np_variables[i, :, :] else: c_np_data = np_variables # Single field", "is list: c_maxcbar = maxcbar[idx_var] else: c_maxcbar = maxcbar #", "with LogNorm # vizobj = EOAImageVisualizer(disp_images=True, output_folder='output', # lats=[lats],lons=[lons]) def", "variable if self._auto_colormap and orig_cmap is None: cmap = select_colormap(c_var)", "_norm = None # Use to normalize the colormap. For", "self._contour_labels = False for arg_name, arg_value in kwargs.items(): self.__dict__[\"_\" +", "= new EOAImageVisualizer(disp_images=True, output_folder='output', lats=[lats],lons=[lons]) \"\"\" _COLORS = ['y', 'r',", "'#aaaaaa', 'weight':'bold'} font_coords = {'size': self._font_size*.6} gl.xlabel_style = font_coords gl.ylabel_style", ":param mincbar: :param maxcbar: :return: ''' npdict_3d = {} for", "the coordinates. If included threshold then increases the bbox in", "= arg_value print(self.__dict__[\"_\" + arg_name]) def __getattr__(self, attr): '''Generic getter", "mincbar=np.nan, maxcbar=np.nan) -> None: \"\"\" Plots a 2D img for", "cur_variable_name in netCDFvars.keys(): cur_var = ds.variables[cur_variable_name] print(F\"Dimensions for {cur_variable_name}: {cur_var.dimensions}", "else: rows = int(len(z_levels) * np.ceil(len(var_names)/cols)) fig, _axs = plt.subplots(rows,", "chose the min and max colorbars for each field if", "maxLon = np.amax(lons) + expand_ext bbox = (minLon, maxLon, minLat,", "= F'{var_names[idx_var]} {title}' else: c_title = F'{title}' if len(z_levels) >", "name of the field _show_var_names = False # Includes the", "a figure. :param rows: how many rows will the figure", "rows for each depth, and columns for each variable \"\"\"", "coordinates. If included threshold then increases the bbox in all", "enumerate(var_names): if len(np_variables.shape) == 3: c_np_data = np_variables[i, :, :]", "All the arguments that are passed to the constructor of", "Geospatial data (maps). It is made to read xarrays, numpy", "return self._figsize * cols * self._fig_prop, self._figsize * rows def", "Image.open(c_file) np_im = np.asarray(im)[:, :, :3] if i == 0:", "c_polygon.xy elif isinstance(c_polygon, shapely.geometry.polygon.Polygon): x, y = c_polygon.exterior.xy pol_lats +=", "to all plots _maxcbar = np.nan _flip_data = True _eoas_pyutils_path", "mode == PlotMode.MERGED: c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) if mode == PlotMode.CONTOUR: im", ":] else: c_np_data = np_variables # Single field if rot_90:", "orig_cmap im = self.plot_slice_eoa(np_variables[c_var][c_slice,:,:], ax, cmap=cmap, mode=plot_mode, mincbar=c_mincbar, maxcbar=c_maxcbar) if", "plot to see the polygons c_ax.set_extent(self.getExtent(list(self._lats) + pol_lats, list(self._lons) +", "__init__(self, disp_images=True, output_folder='output', lats=[-90,90], lons =[-180,180], projection=ccrs.PlateCarree(), **kwargs): # All", "10 == 0: print(F\"Adding file # {i}: {file_name}\") c_file =", "TODO how to make this automatic and works always cbar", "self._flip_data: origin = 'lower' else: origin = 'upper' if self._background", "for name in ds.ncattrs(): print(F\"{name} = {getattr(ds, name)}\") print(\"\\n========== Variables", "= np.amin(lats) - expand_ext maxLat = np.amax(lats) + expand_ext minLon", "x in ('vorticity', 'vort')]): return cmocean.cm.curl elif np.any([field_name.find(x) != -1", "will cropp it to the limits of the locations ax.gridlines()", "create_folder(self._output_folder) orig_cmap = cmap # If the user do not", "!= -1 for x in ('temp', 'sst', 'temperature')]): return cmocean.cm.thermal", "u = self._vector_field['u'] v = self._vector_field['v'] x = self._vector_field['x'] y", "= maxcbar # By default we select the colorbar from", "y = c_polygon.exterior.xy pol_lats += y pol_lons += x c_ax.plot(x,y,", "import numpy as np import cmocean import shapely import cartopy.crs", "!= 0: c_slice_txt = z_names[c_slice] else: c_slice_txt = c_slice c_mincbar", "print(F\"{cur_variable_name}: {cur_var.dims} {cur_var.shape}\") def nc_summary(self, ds): \"\"\" Prints a summary", "Verify the index of the z_levels are the original ones.", "else: c_maxcbar = maxcbar # By default we select the", "multiple fields. It uses rows for each depth, and columns", "Args: field_name: Returns: ''' if np.any([field_name.find(x) != -1 for x", "fig, ax = plt.subplots(1, 1, figsize=(self._figsize, self._figsize), subplot_kw={'projection': ccrs.PlateCarree()}) ax.set_extent(bbox)", "eoas_utils folder is stored with respect to the main project", "ds.variables for cur_variable_name in netCDFvars.keys(): cur_var = ds.variables[cur_variable_name] print(F\"Dimensions for", "> 0: pol_lats = [] pol_lons = [] for c_polygon", "# {i}: {file_name}\") c_file = join(input_folder, file_name) im = Image.open(c_file)", "{getattr(ds, name)}\") print(\"\\n========== Variables =========\") netCDFvars = ds.variables for cur_variable_name", "proper size for a figure. :param rows: how many rows", "lats self._lons = lons self._fig_prop = (bbox[1]-bbox[0])/(bbox[3]-bbox[2]) self._contour_labels = False", "rows, cols): \"\"\" Obtains the proper size for a figure.", "is the path where the eoas_utils folder is stored with", "'''Generic setter for all the properties of the class''' self.__dict__[\"_\"", "c_ax.set_extent(self.getExtent(list(self._lats) + pol_lats, list(self._lons) + pol_lons, 0.5)) if self._vector_field !=", "# Includes the name of the field name in the", "import shapely import cartopy.crs as ccrs import cartopy.feature as cfeature", "z_levels= [], title='', file_name_prefix='', cmap=None, z_names = [], show_color_bar=True, plot_mode=PlotMode.RASTER,", "= c_slice c_mincbar = np.nan c_maxcbar = np.nan for idx_var,", "def __getattr__(self, attr): '''Generic getter for all the properties of", "c_mincbar = mincbar if not(np.all(np.isnan(maxcbar))): if type(mincbar) is list: c_maxcbar", "# and optional density, color, cmap, arrowsize, arrowstyle, minlength _vector_field", "8 _font_size = 30 _units = '' _max_imgs_per_row = 4", "Iterate over the fields if rows*cols == 1: # Single", "in kwargs.items(): self.__dict__[\"_\" + arg_name] = arg_value print(self.__dict__[\"_\" + arg_name])", "1: c_title += F\"Z - level: {c_slice_txt}\" ax.set_title(c_title, fontsize=self._font_size) self.add_colorbar(fig,", "_axs = plt.subplots(rows, cols, figsize=self.get_proper_size(rows, cols), subplot_kw={'projection': self._projection}) for c_zlevel,", "is list: cmap = orig_cmap[idx_var] else: # If it is", "class''' return self.__dict__[\"_\" + attr] def __setattr__(self, attr, value): '''Generic", "plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble.png')) if self._background == BackgroundType.BLUE_MARBLE_HR: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble_5400x2700.jpg')) if self._background", "numpy arrays, and numpy arrays in dictionaries vizobj = new", "len(z_levels) > 1: c_title += F\"Z - level: {c_slice_txt}\" ax.set_title(c_title,", "minLat = np.amin(lats) - expand_ext maxLat = np.amax(lats) + expand_ext", "self._background == BackgroundType.BATHYMETRY: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bathymetry_3600x1800.jpg')) c_ax.imshow(img, origin='upper', extent=(-180,180,-90,90), transform=ccrs.PlateCarree())", "img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble_5400x2700.jpg')) if self._background == BackgroundType.TOPO: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/etopo.png'))", "figsize=self.get_proper_size(rows, cols), subplot_kw={'projection': self._projection}) for c_zlevel, c_slice in enumerate(z_levels): #", "{c_slice_txt}\" ax.set_title(c_title, fontsize=self._font_size) self.add_colorbar(fig, im, ax, show_color_bar) plt.tight_layout(pad=.5) file_name =", "a vector field. It must be a dictionary with keys", "vizobj = new EOAImageVisualizer(disp_images=True, output_folder='output', lats=[lats],lons=[lons]) \"\"\" _COLORS = ['y',", "directions with that thres Args: lats: lons: inc_threshold: Returns: '''", "min and max colorbars for each field if not(np.all(np.isnan(mincbar))): if", "select the colorbar from the name of the variable if", "'''Generic getter for all the properties of the class''' return", "(minLon, maxLon, minLat, maxLat) return bbox def xr_summary(self, ds): \"\"\"", "PlotMode, BackgroundType import pylab import numpy as np import cmocean", "= -1 for i, file_name in enumerate(files[0:36]): if i %", "c_ax.streamplot(x, y, u, v, transform=self._projection, density=density, color=c, cmap=vec_cmap, linewidth=linewidth) except", "if not(np.all(np.isnan(mincbar))): if type(mincbar) is list: c_mincbar = mincbar[idx_var] else:", "if 'density' in vec_keys: density = self._vector_field['density'] if 'linewidth' in", "in ('vorticity', 'vort')]): return cmocean.cm.curl elif np.any([field_name.find(x) != -1 for", "show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): ''' Wrapper function to receive raw", "in enumerate(files[0:36]): if i % 10 == 0: print(F\"Adding file", "cols * self._fig_prop, self._figsize * rows def _close_figure(self): \"\"\"Depending on", "z-levels # Verify the index of the z_levels are the", "vec_keys: c = self._vector_field['color'] if 'density' in vec_keys: density =", "print(\"\\n========== Coordinates =========\") for name in ds.coords: print(F\"{name}: {ds[name].shape}\") print(\"\\n==========", "self._contour_labels: c_ax.contour(c_img, self._contour_labels, colors='r', extent=self._extent, transform=self._projection) else: c_ax.contour(c_img, extent=self._extent, transform=self._projection)", "[] # MUST BE SHAPELY GEOMETRIES In case we want", "var_names:list, z_levels= [], title='', file_name_prefix='', cmap=None, z_names = [], show_color_bar=True,", "== PlotMode.RASTER or mode == PlotMode.MERGED: if self._contourf: im =", "Plots multiple z_levels for multiple fields. It uses rows for", "for idx_var, c_var in enumerate(var_names): # Iterate over the fields", "* cols * self._fig_prop, self._figsize else: return self._figsize * cols", "+ attr] = value def add_colorbar(self, fig, im, ax, show_color_bar,", "np_variables:list, var_names:list, title='', file_name_prefix='', cmap='viridis', show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): '''", "variables. They can be with shape [fields, x, y] or", "viz_utils.constants import PlotMode, BackgroundType import pylab import numpy as np", "the limits of the locations ax.gridlines() im = ax.scatter(lons, lats,", "self._figsize else: return self._figsize * cols * self._fig_prop, self._figsize *", "If included threshold then increases the bbox in all directions", "transform=self._projection, c='r') # Adds a threshold to the plot to", "field_name in enumerate(var_names): npdict_3d[field_name] = np.expand_dims(np_variables[field_name], axis=0) self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0],", "if i == 0: video_size = (np_im.shape[1], np_im.shape[0]) out_video =", "origin = 'upper' if self._background == BackgroundType.CARTO_DEF: c_ax.stock_img() else: if", "return cmocean.cm.curl elif np.any([field_name.find(x) != -1 for x in ('salin',", "{ds[name].shape}\") print(\"\\n========== Variables =========\") for cur_variable_name in ds.variables: cur_var =", "in all directions with that thres Args: lats: lons: inc_threshold:", "see the polygons c_ax.set_extent(self.getExtent(list(self._lats) + pol_lats, list(self._lons) + pol_lons, 0.5))", "print(F\"Couldn't add vector field e:{e}\") gl = c_ax.gridlines(draw_labels=True, color='grey', alpha=0.5,", "type(mincbar) is list: c_maxcbar = maxcbar[idx_var] else: c_maxcbar = maxcbar", "flip_data: c_np_data = np.flip(np.flip(c_np_data), axis=1) npdict_3d[field_name] = np.expand_dims(c_np_data, axis=0) self.plot_3d_data_npdict(npdict_3d,", "np.any([field_name.find(x) != -1 for x in ('salin', 'sss', 'sal')]): return", "for name in ds.dims: print(F\"{name}: {ds[name].shape}\") print(\"\\n========== Coordinates =========\") for", "in netCDFvars.keys(): cur_var = ds.variables[cur_variable_name] print(F\"Dimensions for {cur_variable_name}: {cur_var.dimensions} {cur_var.shape}\")", "'weight':'bold'} font_coords = {'size': self._font_size*.6} gl.xlabel_style = font_coords gl.ylabel_style =", "fps, video_size, True) out_video.write(np_im[:, :, ::-1]) out_video.release() cv2.destroyAllWindows() print(\"Done! yeah", "im def get_proper_size(self, rows, cols): \"\"\" Obtains the proper size", ":, :3] if i == 0: video_size = (np_im.shape[1], np_im.shape[0])", "expand_ext minLon = np.amin(lons) - expand_ext maxLon = np.amax(lons) +", "polygons in the plots (all of them) # If you", "e: print(F\"Couldn't add vector field e:{e}\") gl = c_ax.gridlines(draw_labels=True, color='grey',", "the path where the eoas_utils folder is stored with respect", "for x in ('salin', 'sss', 'sal')]): return cmocean.cm.haline elif field_name.find('error')", "= [] # MUST BE SHAPELY GEOMETRIES In case we", "arg_value in kwargs.items(): self.__dict__[\"_\" + arg_name] = arg_value print(self.__dict__[\"_\" +", "self._figsize * rows def _close_figure(self): \"\"\"Depending on what is disp_images,", "PlotMode.RASTER or mode == PlotMode.MERGED: if self._contourf: im = c_ax.contourf(self._lons,", "disp_images=True, output_folder='output', lats=[-90,90], lons =[-180,180], projection=ccrs.PlateCarree(), **kwargs): # All the", "extent=self._extent, transform=self._projection) if len(self._additional_polygons) > 0: pol_lats = [] pol_lons", "it to the limits of the locations ax.gridlines() im =", "self.getExtent(lats, lons) self._extent = bbox self._lats = lats self._lons =", "np_variables:list, var_names:list, z_levels= [], title='', file_name_prefix='', cmap=None, z_names = [],", "ax.gridlines() im = ax.scatter(lons, lats, s=s, c=c, cmap=cmap) fig.colorbar(im, ax=ax,", "geoaxes :return: \"\"\" c_ax = ax if self._flip_data: origin =", "cmocean.cm.curl elif np.any([field_name.find(x) != -1 for x in ('salin', 'sss',", "['y', 'r', 'c', 'b', 'g', 'w', 'k', 'y', 'r', 'c',", "a min and max colorbar values to 'force' same color", "mincbar=np.nan, maxcbar=np.nan): \"\"\" Plots multiple z_levels for multiple fields. It", ".5 # TODO how to make this automatic and works", "Based on the name if the field it chooses a", "this, it will cropp it to the limits of the", "plot_mode: :param mincbar: :param maxcbar: :return: ''' npdict_3d = {}", "set a min and max colorbar values to 'force' same", "Returns: ''' minLat = np.amin(lats) - expand_ext maxLat = np.amax(lats)", "calls the 'main' function for 3D plotting :param np_variables: :param", "c='r') # Adds a threshold to the plot to see", "= self._vector_field['color'] if 'density' in vec_keys: density = self._vector_field['density'] if", "2D numpy data. It calls the 'main' function for 3D", "i % 10 == 0: print(F\"Adding file # {i}: {file_name}\")", "in ('temp', 'sst', 'temperature')]): return cmocean.cm.thermal elif np.any([field_name.find(x) != -1", "polygons c_ax.set_extent(self.getExtent(list(self._lats) + pol_lats, list(self._lons) + pol_lons, 0.5)) if self._vector_field", "show_color_bar, label=\"\"): # https://matplotlib.org/api/_as_gen/matplotlib.pyplot.colorbar.html if show_color_bar: font_size_cbar = self._font_size *", "np.amin(lons) - expand_ext maxLon = np.amax(lons) + expand_ext bbox =", "return cmocean.cm.curl elif np.any([field_name.find(x) != -1 for x in ('temp',", "this field if type(orig_cmap) is list: cmap = orig_cmap[idx_var] else:", "the plots (all of them) # If you want to", "c_np_data = np_variables[i, :, :] else: c_np_data = np_variables #", "ds.ncattrs(): print(F\"{name} = {getattr(ds, name)}\") print(\"\\n========== Variables =========\") netCDFvars =", "from os.path import join import matplotlib.pyplot as plt import matplotlib", "for cur_variable_name in netCDFvars.keys(): cur_var = ds.variables[cur_variable_name] print(F\"Dimensions for {cur_variable_name}:", "the colormap. For example with LogNorm # vizobj = EOAImageVisualizer(disp_images=True,", "= 8 _font_size = 30 _units = '' _max_imgs_per_row =", "with respect to the main project _contourf = False #", "gl.ylabel_style = font_coords gl.top_labels = False gl.right_labels = False return", "cur_variable_name in ds.variables: cur_var = ds[cur_variable_name] print(F\"{cur_variable_name}: {cur_var.dims} {cur_var.shape}\") def", "BackgroundType.BLUE_MARBLE_LR # Select the background to use _auto_colormap = True", "if self._background == BackgroundType.BLUE_MARBLE_LR: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble.png')) if self._background ==", "Returns: ''' if np.any([field_name.find(x) != -1 for x in ('ssh',", "additional polygons in the plots (all of them) # If", "= np.nan # User can set a min and max", "passed to the constructor of the class MUST have its", "colormap based on the name of the field _show_var_names =", "to the constructor of the class MUST have its name", "or mode == PlotMode.MERGED: if self._contourf: im = c_ax.contourf(self._lons, self._lats,", "def nc_summary(self, ds): \"\"\" Prints a summary of the netcdf", "= c_ax.contourf(self._lons, self._lats, c_img, num_colors=255, cmap='inferno', extent=self._extent) else: if np.isnan(mincbar):", "BackgroundType.BLUE_MARBLE_LR: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble.png')) if self._background == BackgroundType.BLUE_MARBLE_HR: img =", "bbox = (minLon, maxLon, minLat, maxLat) return bbox def xr_summary(self,", "c_slice_txt = z_names[c_slice] else: c_slice_txt = c_slice c_mincbar = np.nan", "arguments that are passed to the constructor of the class", "cmap = select_colormap(c_var) else: # If there is an array", "c_ax.gridlines(draw_labels=True, color='grey', alpha=0.5, linestyle='--') # gl.xlabel_style = {'size': self._font_size/2, 'color':", "= 'r' density = 1 linewidth = 3 vec_cmap =", "cartopy.crs as ccrs import cartopy.feature as cfeature import cartopy def", "in self._additional_polygons: if isinstance(c_polygon, shapely.geometry.linestring.LineString): x,y = c_polygon.xy elif isinstance(c_polygon,", "it. self._disp_images = disp_images self._output_folder = output_folder self._projection = projection", "optional density, color, cmap, arrowsize, arrowstyle, minlength _vector_field = None", "cmocean.cm.solar if 'color' in vec_keys: c = self._vector_field['color'] if 'density'", "npdict_3d = {} for i, field_name in enumerate(var_names): npdict_3d[field_name] =", "one cmap, then we use it for all the fields", ":param cmap: :param flip_data: :param rot_90: :param show_color_bar: :param plot_mode:", "return self._figsize * cols * self._fig_prop, self._figsize else: return self._figsize", "included threshold then increases the bbox in all directions with", "if 'color' in vec_keys: c = self._vector_field['color'] if 'density' in", "have its name on it. self._disp_images = disp_images self._output_folder =", "fps=24): files = listdir(input_folder) files.sort() print(F\"Generating video file: {output_file}\") out_video", "if 'cmap' in vec_keys: vec_cmap = self._vector_field['cmap'] c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) c_ax.streamplot(x,", "fontsize=self._font_size) self.add_colorbar(fig, im, ax, show_color_bar) plt.tight_layout(pad=.5) file_name = F'{file_name_prefix}' pylab.savefig(join(self._output_folder,", "cv2.VideoWriter(output_file, cv2.VideoWriter_fourcc(*'mp4v'), fps, video_size, True) out_video.write(np_im[:, :, ::-1]) out_video.release() cv2.destroyAllWindows()", "rows = int(len(z_levels) * np.ceil(len(var_names)/cols)) fig, _axs = plt.subplots(rows, cols,", "{'size': self._font_size*.6} gl.xlabel_style = font_coords gl.ylabel_style = font_coords gl.top_labels =", "var_names:list, title='', file_name_prefix='', cmap=None, flip_data=False, rot_90=False, show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan):", "plots _maxcbar = np.nan _flip_data = True _eoas_pyutils_path = './eoas_pyutils'#", "# If you want to add a streamplot of a", ":param prop: Proportion is the proportion to use w/h :return:", "# MUST BE SHAPELY GEOMETRIES In case we want to", "self._extent = bbox self._lats = lats self._lons = lons self._fig_prop", "im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, transform=self._projection, norm=self._norm) else: im", "if len(np_variables.shape) == 3: c_np_data = np_variables[i, :, :] else:", "if np.isnan(mincbar): im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, transform=self._projection, norm=self._norm)", "self.plot_slice_eoa(np_variables[c_var][c_slice,:,:], ax, cmap=cmap, mode=plot_mode, mincbar=c_mincbar, maxcbar=c_maxcbar) if self._show_var_names: c_title =", "= plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/etopo.png')) if self._background == BackgroundType.BATHYMETRY: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bathymetry_3600x1800.jpg')) c_ax.imshow(img,", "type(mincbar) is list: c_mincbar = mincbar[idx_var] else: c_mincbar = mincbar", "i == 0: video_size = (np_im.shape[1], np_im.shape[0]) out_video = cv2.VideoWriter(output_file,", "cols = np.min((self._max_imgs_per_row, len(var_names))) if cols == len(var_names): rows =", "rows def _close_figure(self): \"\"\"Depending on what is disp_images, the figures", "plots (all of them) # If you want to add", ":param var_names: :param title: :param file_name_prefix: :param cmap: :param flip_data:", "_figsize = 8 _font_size = 30 _units = '' _max_imgs_per_row", "with shape [fields, x, y] or just a single field", "name)}\") print(\"\\n========== Variables =========\") netCDFvars = ds.variables for cur_variable_name in", "u, v, transform=self._projection, density=density, color=c, cmap=vec_cmap, linewidth=linewidth) except Exception as", "else: ax = _axs.flatten()[c_zlevel*len(var_names) + idx_var] # Here we chose", "minLon = np.amin(lons) - expand_ext maxLon = np.amax(lons) + expand_ext", "can be with shape [fields, x, y] or just a", "''' minLat = np.amin(lats) - expand_ext maxLat = np.amax(lats) +", "x, y = c_polygon.exterior.xy pol_lats += y pol_lons += x", "if len(self._additional_polygons) > 0: pol_lats = [] pol_lons = []", "= self._vector_field.keys() c = 'r' density = 1 linewidth =", "the class''' self.__dict__[\"_\" + attr] = value def add_colorbar(self, fig,", "x c_ax.plot(x,y, transform=self._projection, c='r') # Adds a threshold to the", "show_color_bar) plt.tight_layout(pad=.5) file_name = F'{file_name_prefix}' pylab.savefig(join(self._output_folder, F'{file_name}.png'), bbox_inches='tight') self._close_figure() def", "# When plotting non-regular grids and need precision _background =", "'r', 'c', 'b', 'g', 'w', 'k', 'y', 'r', 'c', 'b',", "else: c_mincbar = mincbar if not(np.all(np.isnan(maxcbar))): if type(mincbar) is list:", "the plot to see the polygons c_ax.set_extent(self.getExtent(list(self._lats) + pol_lats, list(self._lons)", "all the properties of the class''' self.__dict__[\"_\" + attr] =", "color='grey', alpha=0.5, linestyle='--') # gl.xlabel_style = {'size': self._font_size/2, 'color': '#aaaaaa',", "ds.variables: cur_var = ds[cur_variable_name] print(F\"{cur_variable_name}: {cur_var.dims} {cur_var.shape}\") def nc_summary(self, ds):", "# -- Add states states_provinces = cfeature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines', scale='50m',", "np.amax(lons) + expand_ext bbox = (minLon, maxLon, minLat, maxLat) return", "for all the properties of the class''' self.__dict__[\"_\" + attr]", "+= F\"Z - level: {c_slice_txt}\" ax.set_title(c_title, fontsize=self._font_size) self.add_colorbar(fig, im, ax,", "print(\"\\n========== Global attributes =========\") for name in ds.ncattrs(): print(F\"{name} =", ":return: ''' if bbox is None: bbox = (-180, 180,", "npdict_3d[field_name] = np.expand_dims(c_np_data, axis=0) self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title, file_name_prefix=file_name_prefix, cmap=cmap,", "rows == 1: return self._figsize * cols * self._fig_prop, self._figsize", "self._background == BackgroundType.BLUE_MARBLE_HR: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble_5400x2700.jpg')) if self._background == BackgroundType.TOPO:", "add a streamplot of a vector field. It must be", "self.__dict__[\"_\" + attr] def __setattr__(self, attr, value): '''Generic setter for", "of the locations ax.gridlines() im = ax.scatter(lons, lats, s=s, c=c,", "the fields cmap = orig_cmap im = self.plot_slice_eoa(np_variables[c_var][c_slice,:,:], ax, cmap=cmap,", "# -- Add states roads = cfeature.NaturalEarthFeature( category='cultural', name='roads', scale='10m',", "plt.subplots(rows, cols, figsize=self.get_proper_size(rows, cols), subplot_kw={'projection': self._projection}) for c_zlevel, c_slice in", "x in ('u_', 'v_', 'u-vel.', 'v-vel.','velocity')]): return cmocean.cm.speed class EOAImageVisualizer:", "In case we want to include additional polygons in the", "for i, field_name in enumerate(var_names): npdict_3d[field_name] = np.expand_dims(np_variables[field_name], axis=0) self.plot_3d_data_npdict(npdict_3d,", "i, field_name in enumerate(var_names): if len(np_variables.shape) == 3: c_np_data =", "vizobj = EOAImageVisualizer(disp_images=True, output_folder='output', # lats=[lats],lons=[lons]) def __init__(self, disp_images=True, output_folder='output',", "= np.expand_dims(c_np_data, axis=0) self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title, file_name_prefix=file_name_prefix, cmap=cmap, z_names", "Use to normalize the colormap. For example with LogNorm #", "If we do not set this, it will cropp it", "cmap = orig_cmap im = self.plot_slice_eoa(np_variables[c_var][c_slice,:,:], ax, cmap=cmap, mode=plot_mode, mincbar=c_mincbar,", "c_np_data = np_variables # Single field if rot_90: c_np_data =", "self._lats, c_img, num_colors=255, cmap='inferno', extent=self._extent) else: if np.isnan(mincbar): im =", "cmap='gray', mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan) -> None: \"\"\" Plots a 2D", "the proper size for a figure. :param rows: how many", "When plotting non-regular grids and need precision _background = BackgroundType.BLUE_MARBLE_LR", "the bbox in all directions with that thres Args: lats:", "{title}' else: c_title = F'{title}' if len(z_levels) > 1: c_title", "PlotMode.CONTOUR: im = c_ax.contour(c_img, extent=self._extent, transform=self._projection) if mode == PlotMode.MERGED:", "_mincbar = np.nan # User can set a min and", "cmap=cmap) fig.colorbar(im, ax=ax, shrink=0.7) ax.coastlines() plt.title(title) plt.show() def plot_3d_data_npdict(self, np_variables:list,", "== 1: # Single figure ax = _axs else: ax", "Global attributes =========\") for name in ds.ncattrs(): print(F\"{name} = {getattr(ds,", "is an array of colormaps we select the one for", "one for this field if type(orig_cmap) is list: cmap =", "\"\"\" create_folder(self._output_folder) orig_cmap = cmap # If the user do", "__setattr__(self, attr, value): '''Generic setter for all the properties of", "x,y = c_polygon.xy elif isinstance(c_polygon, shapely.geometry.polygon.Polygon): x, y = c_polygon.exterior.xy", "function for 3D plotting :param np_variables: :param var_names: :param title:", "'upper' if self._background == BackgroundType.CARTO_DEF: c_ax.stock_img() else: if self._background ==", "{} for i, field_name in enumerate(var_names): npdict_3d[field_name] = np.expand_dims(np_variables[field_name], axis=0)", "join(input_folder, file_name) im = Image.open(c_file) np_im = np.asarray(im)[:, :, :3]", "we are plotting Geospatial data (maps). It is made to", "name in ds.ncattrs(): print(F\"{name} = {getattr(ds, name)}\") print(\"\\n========== Variables =========\")", "colormaps we select the one for this field if type(orig_cmap)", "None: lats = self.lats if lons is None: lons =", "the locations ax.gridlines() im = ax.scatter(lons, lats, s=s, c=c, cmap=cmap)", "This is the path where the eoas_utils folder is stored", "if np.any([field_name.find(x) != -1 for x in ('ssh', 'srfhgt', 'adt','surf_el')]):", "for each variable \"\"\" create_folder(self._output_folder) orig_cmap = cmap # If", "cmocean.cm.oxy elif np.any([field_name.find(x) != -1 for x in ('u_', 'v_',", "be with shape [fields, x, y] or just a single", "'main' function for 3D plotting :param np_variables: :param var_names: :param", "Select the background to use _auto_colormap = True # Selects", "= {} for i, field_name in enumerate(var_names): if len(np_variables.shape) ==", "bbox = self.getExtent(lats, lons) self._extent = bbox self._lats = lats" ]
[ "# Dataset path target_path = Path('target/') annotation_images_path = Path('dataset/ade20k/annotations/training/').abspath() dataset", "10: cv2.imwrite(target_path +'ADE_train_0000000'+ str(count) + \".png\", arr) elif count <", "cv2.imwrite(target_path +'ADE_train_0000'+ str(count) + \".png\", arr) else: cv2.imwrite(target_path +'ADE_train_000'+ str(count)", "change it to 255. # If it is >= 1,", "arr) elif count < 10000 and count > 999: cv2.imwrite(target_path", "np import cv2 # Dataset path target_path = Path('target/') annotation_images_path", "count > 999: cv2.imwrite(target_path +'ADE_train_0000'+ str(count) + \".png\", arr) else:", "it to 255. # If it is >= 1, increment", "os.path import isfile, join from path import Path import numpy", "image images[n] = cv2.imread(join(annotation_images_path,dataset[n])) # Convert it to array array", "all Training Images for n in range(0, len(dataset)): # Read", "np.asarray(images[n],dtype=np.int8) # Conditions when the value equal less than 1,", "elif count < 10000 and count > 999: cv2.imwrite(target_path +'ADE_train_0000'+", "import cv2 # Dataset path target_path = Path('target/') annotation_images_path =", "n in range(0, len(dataset)): # Read image images[n] = cv2.imread(join(annotation_images_path,dataset[n]))", "and count > 999: cv2.imwrite(target_path +'ADE_train_0000'+ str(count) + \".png\", arr)", "in listdir(annotation_images_path) if isfile(join(annotation_images_path,f))] images = np.empty(len(dataset), dtype = object)", "target_path = Path('target/') annotation_images_path = Path('dataset/ade20k/annotations/training/').abspath() dataset = [ f", "array = np.asarray(images[n],dtype=np.int8) # Conditions when the value equal less", "from os.path import isfile, join from path import Path import", "+ \".png\", arr) elif count < 100 and count >", "If it is >= 1, increment it by -1 arr", "str(count) + \".png\", arr) else: cv2.imwrite(target_path +'ADE_train_000'+ str(count) + \".png\",", "import Path import numpy as np import cv2 # Dataset", "cv2.imread(join(annotation_images_path,dataset[n])) # Convert it to array array = np.asarray(images[n],dtype=np.int8) #", "cv2.imwrite(target_path +'ADE_train_00000'+ str(count) + \".png\", arr) elif count < 10000", "# Convert it to array array = np.asarray(images[n],dtype=np.int8) # Conditions", "in range(0, len(dataset)): # Read image images[n] = cv2.imread(join(annotation_images_path,dataset[n])) #", "count > 9: cv2.imwrite(target_path +'ADE_train_000000'+ str(count) + \".png\", arr) elif", "> 9: cv2.imwrite(target_path +'ADE_train_000000'+ str(count) + \".png\", arr) elif count", "as np import cv2 # Dataset path target_path = Path('target/')", "[ f for f in listdir(annotation_images_path) if isfile(join(annotation_images_path,f))] images =", "is >= 1, increment it by -1 arr = np.where(array", "< 1, 255, array -1) #Saved it to another file", "path import Path import numpy as np import cv2 #", "count = 1 # Iterate all Training Images for n", "and count > 9: cv2.imwrite(target_path +'ADE_train_000000'+ str(count) + \".png\", arr)", "# Iterate all Training Images for n in range(0, len(dataset)):", "Iterate all Training Images for n in range(0, len(dataset)): #", "\".png\", arr) else: cv2.imwrite(target_path +'ADE_train_000'+ str(count) + \".png\", arr) print(str(count)", "elif count < 100 and count > 9: cv2.imwrite(target_path +'ADE_train_000000'+", "< 1000 and count > 99: cv2.imwrite(target_path +'ADE_train_00000'+ str(count) +", "10000 and count > 999: cv2.imwrite(target_path +'ADE_train_0000'+ str(count) + \".png\",", "+'ADE_train_0000000'+ str(count) + \".png\", arr) elif count < 100 and", "from path import Path import numpy as np import cv2", "= Path('target/') annotation_images_path = Path('dataset/ade20k/annotations/training/').abspath() dataset = [ f for", "Path import numpy as np import cv2 # Dataset path", "str(count) + \".png\", arr) elif count < 10000 and count", "if count < 10: cv2.imwrite(target_path +'ADE_train_0000000'+ str(count) + \".png\", arr)", "f in listdir(annotation_images_path) if isfile(join(annotation_images_path,f))] images = np.empty(len(dataset), dtype =", "Read image images[n] = cv2.imread(join(annotation_images_path,dataset[n])) # Convert it to array", "listdir from os.path import isfile, join from path import Path", "Dataset path target_path = Path('target/') annotation_images_path = Path('dataset/ade20k/annotations/training/').abspath() dataset =", "# Conditions when the value equal less than 1, change", "and count > 99: cv2.imwrite(target_path +'ADE_train_00000'+ str(count) + \".png\", arr)", "+'ADE_train_00000'+ str(count) + \".png\", arr) elif count < 10000 and", "isfile, join from path import Path import numpy as np", "+ \".png\", arr) else: cv2.imwrite(target_path +'ADE_train_000'+ str(count) + \".png\", arr)", "isfile(join(annotation_images_path,f))] images = np.empty(len(dataset), dtype = object) count = 1", "+'ADE_train_0000'+ str(count) + \".png\", arr) else: cv2.imwrite(target_path +'ADE_train_000'+ str(count) +", "cv2.imwrite(target_path +'ADE_train_0000000'+ str(count) + \".png\", arr) elif count < 100", "file if count < 10: cv2.imwrite(target_path +'ADE_train_0000000'+ str(count) + \".png\",", "< 100 and count > 9: cv2.imwrite(target_path +'ADE_train_000000'+ str(count) +", "it is >= 1, increment it by -1 arr =", "1, 255, array -1) #Saved it to another file if", "= np.empty(len(dataset), dtype = object) count = 1 # Iterate", "= object) count = 1 # Iterate all Training Images", "import numpy as np import cv2 # Dataset path target_path", "object) count = 1 # Iterate all Training Images for", "1000 and count > 99: cv2.imwrite(target_path +'ADE_train_00000'+ str(count) + \".png\",", "#Saved it to another file if count < 10: cv2.imwrite(target_path", "else: cv2.imwrite(target_path +'ADE_train_000'+ str(count) + \".png\", arr) print(str(count) + \".png", "< 10: cv2.imwrite(target_path +'ADE_train_0000000'+ str(count) + \".png\", arr) elif count", "arr = np.where(array < 1, 255, array -1) #Saved it", "-1) #Saved it to another file if count < 10:", "str(count) + \".png\", arr) elif count < 1000 and count", "= np.asarray(images[n],dtype=np.int8) # Conditions when the value equal less than", "count < 1000 and count > 99: cv2.imwrite(target_path +'ADE_train_00000'+ str(count)", "import isfile, join from path import Path import numpy as", "it to another file if count < 10: cv2.imwrite(target_path +'ADE_train_0000000'+", "Convert it to array array = np.asarray(images[n],dtype=np.int8) # Conditions when", "import listdir from os.path import isfile, join from path import", "Path('target/') annotation_images_path = Path('dataset/ade20k/annotations/training/').abspath() dataset = [ f for f", "count > 99: cv2.imwrite(target_path +'ADE_train_00000'+ str(count) + \".png\", arr) elif", "from os import listdir from os.path import isfile, join from", "count < 10000 and count > 999: cv2.imwrite(target_path +'ADE_train_0000'+ str(count)", "to 255. # If it is >= 1, increment it", "np.empty(len(dataset), dtype = object) count = 1 # Iterate all", "-1 arr = np.where(array < 1, 255, array -1) #Saved", "+ \".png\", arr) elif count < 1000 and count >", "arr) else: cv2.imwrite(target_path +'ADE_train_000'+ str(count) + \".png\", arr) print(str(count) +", "value equal less than 1, change it to 255. #", "Training Images for n in range(0, len(dataset)): # Read image", "arr) elif count < 1000 and count > 99: cv2.imwrite(target_path", "images[n] = cv2.imread(join(annotation_images_path,dataset[n])) # Convert it to array array =", "range(0, len(dataset)): # Read image images[n] = cv2.imread(join(annotation_images_path,dataset[n])) # Convert", "path target_path = Path('target/') annotation_images_path = Path('dataset/ade20k/annotations/training/').abspath() dataset = [", "arr) elif count < 100 and count > 9: cv2.imwrite(target_path", "len(dataset)): # Read image images[n] = cv2.imread(join(annotation_images_path,dataset[n])) # Convert it", "+ \".png\", arr) elif count < 10000 and count >", "when the value equal less than 1, change it to", "than 1, change it to 255. # If it is", "+'ADE_train_000000'+ str(count) + \".png\", arr) elif count < 1000 and", "\".png\", arr) elif count < 1000 and count > 99:", "= 1 # Iterate all Training Images for n in", "str(count) + \".png\", arr) print(str(count) + \".png is printed\") count", "listdir(annotation_images_path) if isfile(join(annotation_images_path,f))] images = np.empty(len(dataset), dtype = object) count", "+'ADE_train_000'+ str(count) + \".png\", arr) print(str(count) + \".png is printed\")", "less than 1, change it to 255. # If it", "Images for n in range(0, len(dataset)): # Read image images[n]", "to array array = np.asarray(images[n],dtype=np.int8) # Conditions when the value", "= [ f for f in listdir(annotation_images_path) if isfile(join(annotation_images_path,f))] images", "for f in listdir(annotation_images_path) if isfile(join(annotation_images_path,f))] images = np.empty(len(dataset), dtype", "increment it by -1 arr = np.where(array < 1, 255,", "str(count) + \".png\", arr) elif count < 100 and count", "os import listdir from os.path import isfile, join from path", "# Read image images[n] = cv2.imread(join(annotation_images_path,dataset[n])) # Convert it to", "cv2.imwrite(target_path +'ADE_train_000'+ str(count) + \".png\", arr) print(str(count) + \".png is", "Conditions when the value equal less than 1, change it", "cv2.imwrite(target_path +'ADE_train_000000'+ str(count) + \".png\", arr) elif count < 1000", "> 99: cv2.imwrite(target_path +'ADE_train_00000'+ str(count) + \".png\", arr) elif count", "it to array array = np.asarray(images[n],dtype=np.int8) # Conditions when the", "count < 100 and count > 9: cv2.imwrite(target_path +'ADE_train_000000'+ str(count)", "join from path import Path import numpy as np import", "255, array -1) #Saved it to another file if count", "the value equal less than 1, change it to 255.", "numpy as np import cv2 # Dataset path target_path =", "dtype = object) count = 1 # Iterate all Training", "elif count < 1000 and count > 99: cv2.imwrite(target_path +'ADE_train_00000'+", "another file if count < 10: cv2.imwrite(target_path +'ADE_train_0000000'+ str(count) +", "\".png\", arr) elif count < 100 and count > 9:", "= cv2.imread(join(annotation_images_path,dataset[n])) # Convert it to array array = np.asarray(images[n],dtype=np.int8)", "for n in range(0, len(dataset)): # Read image images[n] =", "= Path('dataset/ade20k/annotations/training/').abspath() dataset = [ f for f in listdir(annotation_images_path)", "if isfile(join(annotation_images_path,f))] images = np.empty(len(dataset), dtype = object) count =", "count < 10: cv2.imwrite(target_path +'ADE_train_0000000'+ str(count) + \".png\", arr) elif", "by -1 arr = np.where(array < 1, 255, array -1)", "1, increment it by -1 arr = np.where(array < 1,", "equal less than 1, change it to 255. # If", "= np.where(array < 1, 255, array -1) #Saved it to", "f for f in listdir(annotation_images_path) if isfile(join(annotation_images_path,f))] images = np.empty(len(dataset),", "1 # Iterate all Training Images for n in range(0,", "it by -1 arr = np.where(array < 1, 255, array", "annotation_images_path = Path('dataset/ade20k/annotations/training/').abspath() dataset = [ f for f in", "9: cv2.imwrite(target_path +'ADE_train_000000'+ str(count) + \".png\", arr) elif count <", "99: cv2.imwrite(target_path +'ADE_train_00000'+ str(count) + \".png\", arr) elif count <", "> 999: cv2.imwrite(target_path +'ADE_train_0000'+ str(count) + \".png\", arr) else: cv2.imwrite(target_path", "1, change it to 255. # If it is >=", "array array = np.asarray(images[n],dtype=np.int8) # Conditions when the value equal", "dataset = [ f for f in listdir(annotation_images_path) if isfile(join(annotation_images_path,f))]", "+ \".png\", arr) print(str(count) + \".png is printed\") count +=", "array -1) #Saved it to another file if count <", "images = np.empty(len(dataset), dtype = object) count = 1 #", "np.where(array < 1, 255, array -1) #Saved it to another", "255. # If it is >= 1, increment it by", "999: cv2.imwrite(target_path +'ADE_train_0000'+ str(count) + \".png\", arr) else: cv2.imwrite(target_path +'ADE_train_000'+", "\".png\", arr) elif count < 10000 and count > 999:", "\".png\", arr) print(str(count) + \".png is printed\") count += 1", "< 10000 and count > 999: cv2.imwrite(target_path +'ADE_train_0000'+ str(count) +", "# If it is >= 1, increment it by -1", "100 and count > 9: cv2.imwrite(target_path +'ADE_train_000000'+ str(count) + \".png\",", "Path('dataset/ade20k/annotations/training/').abspath() dataset = [ f for f in listdir(annotation_images_path) if", "to another file if count < 10: cv2.imwrite(target_path +'ADE_train_0000000'+ str(count)", ">= 1, increment it by -1 arr = np.where(array <", "cv2 # Dataset path target_path = Path('target/') annotation_images_path = Path('dataset/ade20k/annotations/training/').abspath()" ]
[ ".diagnostics_sink_properties import DiagnosticsSinkProperties from .diagnostics_description import DiagnosticsDescription from .application_properties import", "ApplicationResourceDescription from .add_remove_replica_scaling_mechanism import AddRemoveReplicaScalingMechanism from .auto_scaling_metric import AutoScalingMetric from", "ProvisionedResourceProperties from .proxy_resource import ProxyResource from .managed_proxy_resource import ManagedProxyResource from", "ErrorModel, ErrorModelException from .operation_result import OperationResult from .provisioned_resource_properties import ProvisionedResourceProperties", "VolumeResourceDescription from .network_resource_properties import NetworkResourceProperties from .local_network_resource_properties import LocalNetworkResourceProperties from", "'HealthState', 'SecretKind', 'VolumeProvider', 'SizeTypes', 'ApplicationScopedVolumeKind', 'NetworkKind', 'HeaderMatchType', 'OperatingSystemType', 'DiagnosticsSinkKind', 'AutoScalingMechanismKind',", "from .volume_resource_description import VolumeResourceDescription from .network_resource_properties import NetworkResourceProperties from .local_network_resource_properties", "HttpRouteMatchPath from .http_route_match_header import HttpRouteMatchHeader from .http_route_match_rule import HttpRouteMatchRule from", "'ErrorDetailsModel', 'ErrorErrorModel', 'ErrorModel', 'ErrorModelException', 'OperationResult', 'ProvisionedResourceProperties', 'ProxyResource', 'ManagedProxyResource', 'Resource', 'TrackedResource',", "if the code is # regenerated. # -------------------------------------------------------------------------- from .available_operation_display", "import ServiceReplicaProperties from .service_replica_description import ServiceReplicaDescription from .average_load_scaling_trigger import AverageLoadScalingTrigger", "'GatewayResourceDescriptionPaged', 'ApplicationResourceDescriptionPaged', 'ServiceResourceDescriptionPaged', 'ServiceReplicaDescriptionPaged', 'ResourceStatus', 'HealthState', 'SecretKind', 'VolumeProvider', 'SizeTypes', 'ApplicationScopedVolumeKind',", "from .application_properties import ApplicationProperties from .azure_internal_monitoring_pipeline_sink_description import AzureInternalMonitoringPipelineSinkDescription from .application_resource_description", "-------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. #", "import NetworkResourcePropertiesBase from .network_resource_description import NetworkResourceDescription from .gateway_destination import GatewayDestination", "'HttpConfig', 'GatewayProperties', 'GatewayResourceDescription', 'ImageRegistryCredential', 'EnvironmentVariable', 'Setting', 'ContainerLabel', 'EndpointProperties', 'ResourceRequests', 'ResourceLimits',", ".endpoint_ref import EndpointRef from .network_ref import NetworkRef from .network_resource_properties_base import", "import EnvironmentVariable from .setting import Setting from .container_label import ContainerLabel", "import ResourceRequirements from .diagnostics_ref import DiagnosticsRef from .reliable_collections_ref import ReliableCollectionsRef", "VolumeResourceDescriptionPaged from .network_resource_description_paged import NetworkResourceDescriptionPaged from .gateway_resource_description_paged import GatewayResourceDescriptionPaged from", "from .inlined_value_secret_resource_properties import InlinedValueSecretResourceProperties from .secret_resource_properties_base import SecretResourcePropertiesBase from .secret_resource_description", "DiagnosticsRef from .reliable_collections_ref import ReliableCollectionsRef from .container_state import ContainerState from", ".volume_provider_parameters_azure_file import VolumeProviderParametersAzureFile from .volume_properties import VolumeProperties from .volume_reference import", "'ManagedProxyResource', 'Resource', 'TrackedResource', 'SecretResourceProperties', 'InlinedValueSecretResourceProperties', 'SecretResourcePropertiesBase', 'SecretResourceDescription', 'SecretValue', 'SecretValueProperties', 'SecretValueResourceDescription',", "import SecretResourceProperties from .inlined_value_secret_resource_properties import InlinedValueSecretResourceProperties from .secret_resource_properties_base import SecretResourcePropertiesBase", ".auto_scaling_policy import AutoScalingPolicy from .service_resource_description import ServiceResourceDescription from .diagnostics_sink_properties import", "incorrect behavior and will be lost if the code is", "'NetworkResourceDescriptionPaged', 'GatewayResourceDescriptionPaged', 'ApplicationResourceDescriptionPaged', 'ServiceResourceDescriptionPaged', 'ServiceReplicaDescriptionPaged', 'ResourceStatus', 'HealthState', 'SecretKind', 'VolumeProvider', 'SizeTypes',", "from .operation_result import OperationResult from .provisioned_resource_properties import ProvisionedResourceProperties from .proxy_resource", "'ReliableCollectionsRef', 'ContainerState', 'ContainerEvent', 'ContainerInstanceView', 'ContainerCodePackageProperties', 'AutoScalingTrigger', 'AutoScalingMechanism', 'AutoScalingPolicy', 'ServiceResourceDescription', 'DiagnosticsSinkProperties',", "AutoRest Code Generator. # Changes may cause incorrect behavior and", ".application_properties import ApplicationProperties from .azure_internal_monitoring_pipeline_sink_description import AzureInternalMonitoringPipelineSinkDescription from .application_resource_description import", ".service_replica_properties import ServiceReplicaProperties from .service_replica_description import ServiceReplicaDescription from .average_load_scaling_trigger import", "import ( ResourceStatus, HealthState, SecretKind, VolumeProvider, SizeTypes, ApplicationScopedVolumeKind, NetworkKind, HeaderMatchType,", "'NetworkResourcePropertiesBase', 'NetworkResourceDescription', 'GatewayDestination', 'TcpConfig', 'HttpRouteMatchPath', 'HttpRouteMatchHeader', 'HttpRouteMatchRule', 'HttpRouteConfig', 'HttpHostConfig', 'HttpConfig',", "'DiagnosticsDescription', 'ApplicationProperties', 'AzureInternalMonitoringPipelineSinkDescription', 'ApplicationResourceDescription', 'AddRemoveReplicaScalingMechanism', 'AutoScalingMetric', 'AutoScalingResourceMetric', 'ServiceProperties', 'ServiceReplicaProperties', 'ServiceReplicaDescription',", "'ApplicationScopedVolumeKind', 'NetworkKind', 'HeaderMatchType', 'OperatingSystemType', 'DiagnosticsSinkKind', 'AutoScalingMechanismKind', 'AutoScalingMetricKind', 'AutoScalingResourceMetricName', 'AutoScalingTriggerKind', ]", "may cause incorrect behavior and will be lost if the", "import AutoScalingMechanism from .auto_scaling_policy import AutoScalingPolicy from .service_resource_description import ServiceResourceDescription", "import ReliableCollectionsRef from .container_state import ContainerState from .container_event import ContainerEvent", "OperationResult from .provisioned_resource_properties import ProvisionedResourceProperties from .proxy_resource import ProxyResource from", "'ResourceRequirements', 'DiagnosticsRef', 'ReliableCollectionsRef', 'ContainerState', 'ContainerEvent', 'ContainerInstanceView', 'ContainerCodePackageProperties', 'AutoScalingTrigger', 'AutoScalingMechanism', 'AutoScalingPolicy',", "in the project root for # license information. # #", "'AvailableOperationDisplay', 'ErrorDetailsModel', 'ErrorErrorModel', 'ErrorModel', 'ErrorModelException', 'OperationResult', 'ProvisionedResourceProperties', 'ProxyResource', 'ManagedProxyResource', 'Resource',", "import ErrorDetailsModel from .error_error_model import ErrorErrorModel from .error_model import ErrorModel,", "from .secret_resource_properties_base import SecretResourcePropertiesBase from .secret_resource_description import SecretResourceDescription from .secret_value", "ErrorErrorModel from .error_model import ErrorModel, ErrorModelException from .operation_result import OperationResult", "'AutoScalingMechanism', 'AutoScalingPolicy', 'ServiceResourceDescription', 'DiagnosticsSinkProperties', 'DiagnosticsDescription', 'ApplicationProperties', 'AzureInternalMonitoringPipelineSinkDescription', 'ApplicationResourceDescription', 'AddRemoveReplicaScalingMechanism', 'AutoScalingMetric',", "import NetworkResourceDescription from .gateway_destination import GatewayDestination from .tcp_config import TcpConfig", "'NetworkResourceProperties', 'LocalNetworkResourceProperties', 'EndpointRef', 'NetworkRef', 'NetworkResourcePropertiesBase', 'NetworkResourceDescription', 'GatewayDestination', 'TcpConfig', 'HttpRouteMatchPath', 'HttpRouteMatchHeader',", "# # Code generated by Microsoft (R) AutoRest Code Generator.", "import SecretValueProperties from .secret_value_resource_description import SecretValueResourceDescription from .volume_provider_parameters_azure_file import VolumeProviderParametersAzureFile", "from .resource_limits import ResourceLimits from .resource_requirements import ResourceRequirements from .diagnostics_ref", "'ServiceReplicaDescriptionPaged', 'ResourceStatus', 'HealthState', 'SecretKind', 'VolumeProvider', 'SizeTypes', 'ApplicationScopedVolumeKind', 'NetworkKind', 'HeaderMatchType', 'OperatingSystemType',", ".service_fabric_mesh_management_client_enums import ( ResourceStatus, HealthState, SecretKind, VolumeProvider, SizeTypes, ApplicationScopedVolumeKind, NetworkKind,", ".resource import Resource from .tracked_resource import TrackedResource from .secret_resource_properties import", "import ResourceRequests from .resource_limits import ResourceLimits from .resource_requirements import ResourceRequirements", "AutoScalingResourceMetricName, AutoScalingTriggerKind, ) __all__ = [ 'AvailableOperationDisplay', 'ErrorDetailsModel', 'ErrorErrorModel', 'ErrorModel',", "from .http_config import HttpConfig from .gateway_properties import GatewayProperties from .gateway_resource_description", "from .error_details_model import ErrorDetailsModel from .error_error_model import ErrorErrorModel from .error_model", "import VolumeResourceDescriptionPaged from .network_resource_description_paged import NetworkResourceDescriptionPaged from .gateway_resource_description_paged import GatewayResourceDescriptionPaged", "'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk', 'VolumeResourceDescription', 'NetworkResourceProperties', 'LocalNetworkResourceProperties', 'EndpointRef', 'NetworkRef', 'NetworkResourcePropertiesBase', 'NetworkResourceDescription', 'GatewayDestination', 'TcpConfig',", "ResourceStatus, HealthState, SecretKind, VolumeProvider, SizeTypes, ApplicationScopedVolumeKind, NetworkKind, HeaderMatchType, OperatingSystemType, DiagnosticsSinkKind,", "'VolumeResourceDescriptionPaged', 'NetworkResourceDescriptionPaged', 'GatewayResourceDescriptionPaged', 'ApplicationResourceDescriptionPaged', 'ServiceResourceDescriptionPaged', 'ServiceReplicaDescriptionPaged', 'ResourceStatus', 'HealthState', 'SecretKind', 'VolumeProvider',", ".secret_resource_description_paged import SecretResourceDescriptionPaged from .secret_value_resource_description_paged import SecretValueResourceDescriptionPaged from .volume_resource_description_paged import", "from .volume_properties import VolumeProperties from .volume_reference import VolumeReference from .application_scoped_volume_creation_parameters", "NetworkResourceProperties from .local_network_resource_properties import LocalNetworkResourceProperties from .endpoint_ref import EndpointRef from", "'HttpRouteMatchPath', 'HttpRouteMatchHeader', 'HttpRouteMatchRule', 'HttpRouteConfig', 'HttpHostConfig', 'HttpConfig', 'GatewayProperties', 'GatewayResourceDescription', 'ImageRegistryCredential', 'EnvironmentVariable',", "import ContainerInstanceView from .container_code_package_properties import ContainerCodePackageProperties from .auto_scaling_trigger import AutoScalingTrigger", "GatewayResourceDescriptionPaged from .application_resource_description_paged import ApplicationResourceDescriptionPaged from .service_resource_description_paged import ServiceResourceDescriptionPaged from", "DiagnosticsSinkKind, AutoScalingMechanismKind, AutoScalingMetricKind, AutoScalingResourceMetricName, AutoScalingTriggerKind, ) __all__ = [ 'AvailableOperationDisplay',", "GatewayResourceDescription from .image_registry_credential import ImageRegistryCredential from .environment_variable import EnvironmentVariable from", "[ 'AvailableOperationDisplay', 'ErrorDetailsModel', 'ErrorErrorModel', 'ErrorModel', 'ErrorModelException', 'OperationResult', 'ProvisionedResourceProperties', 'ProxyResource', 'ManagedProxyResource',", ".application_scoped_volume_creation_parameters_service_fabric_volume_disk import ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk from .volume_resource_description import VolumeResourceDescription from .network_resource_properties import", "ApplicationScopedVolumeCreationParameters from .application_scoped_volume import ApplicationScopedVolume from .application_scoped_volume_creation_parameters_service_fabric_volume_disk import ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk from", "from .application_scoped_volume import ApplicationScopedVolume from .application_scoped_volume_creation_parameters_service_fabric_volume_disk import ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk from .volume_resource_description", ".managed_proxy_resource import ManagedProxyResource from .resource import Resource from .tracked_resource import", "'ContainerLabel', 'EndpointProperties', 'ResourceRequests', 'ResourceLimits', 'ResourceRequirements', 'DiagnosticsRef', 'ReliableCollectionsRef', 'ContainerState', 'ContainerEvent', 'ContainerInstanceView',", "from .secret_value_resource_description import SecretValueResourceDescription from .volume_provider_parameters_azure_file import VolumeProviderParametersAzureFile from .volume_properties", "'InlinedValueSecretResourceProperties', 'SecretResourcePropertiesBase', 'SecretResourceDescription', 'SecretValue', 'SecretValueProperties', 'SecretValueResourceDescription', 'VolumeProviderParametersAzureFile', 'VolumeProperties', 'VolumeReference', 'ApplicationScopedVolumeCreationParameters',", ".service_resource_description_paged import ServiceResourceDescriptionPaged from .service_replica_description_paged import ServiceReplicaDescriptionPaged from .service_fabric_mesh_management_client_enums import", "Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect", "from .http_route_match_rule import HttpRouteMatchRule from .http_route_config import HttpRouteConfig from .http_host_config", "'SizeTypes', 'ApplicationScopedVolumeKind', 'NetworkKind', 'HeaderMatchType', 'OperatingSystemType', 'DiagnosticsSinkKind', 'AutoScalingMechanismKind', 'AutoScalingMetricKind', 'AutoScalingResourceMetricName', 'AutoScalingTriggerKind',", "from .service_fabric_mesh_management_client_enums import ( ResourceStatus, HealthState, SecretKind, VolumeProvider, SizeTypes, ApplicationScopedVolumeKind,", "from .container_instance_view import ContainerInstanceView from .container_code_package_properties import ContainerCodePackageProperties from .auto_scaling_trigger", "'GatewayProperties', 'GatewayResourceDescription', 'ImageRegistryCredential', 'EnvironmentVariable', 'Setting', 'ContainerLabel', 'EndpointProperties', 'ResourceRequests', 'ResourceLimits', 'ResourceRequirements',", "import VolumeProperties from .volume_reference import VolumeReference from .application_scoped_volume_creation_parameters import ApplicationScopedVolumeCreationParameters", "HttpRouteConfig from .http_host_config import HttpHostConfig from .http_config import HttpConfig from", "NetworkResourceDescription from .gateway_destination import GatewayDestination from .tcp_config import TcpConfig from", "import GatewayDestination from .tcp_config import TcpConfig from .http_route_match_path import HttpRouteMatchPath", "from .setting import Setting from .container_label import ContainerLabel from .endpoint_properties", "ApplicationProperties from .azure_internal_monitoring_pipeline_sink_description import AzureInternalMonitoringPipelineSinkDescription from .application_resource_description import ApplicationResourceDescription from", "import NetworkResourceDescriptionPaged from .gateway_resource_description_paged import GatewayResourceDescriptionPaged from .application_resource_description_paged import ApplicationResourceDescriptionPaged", "root for # license information. # # Code generated by", "ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk from .volume_resource_description import VolumeResourceDescription from .network_resource_properties import NetworkResourceProperties from", "'VolumeProviderParametersAzureFile', 'VolumeProperties', 'VolumeReference', 'ApplicationScopedVolumeCreationParameters', 'ApplicationScopedVolume', 'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk', 'VolumeResourceDescription', 'NetworkResourceProperties', 'LocalNetworkResourceProperties', 'EndpointRef',", "from .secret_resource_description import SecretResourceDescription from .secret_value import SecretValue from .secret_value_properties", "from .tracked_resource import TrackedResource from .secret_resource_properties import SecretResourceProperties from .inlined_value_secret_resource_properties", "'ServiceReplicaProperties', 'ServiceReplicaDescription', 'AverageLoadScalingTrigger', 'ContainerLogs', 'OperationResultPaged', 'SecretResourceDescriptionPaged', 'SecretValueResourceDescriptionPaged', 'VolumeResourceDescriptionPaged', 'NetworkResourceDescriptionPaged', 'GatewayResourceDescriptionPaged',", "from .environment_variable import EnvironmentVariable from .setting import Setting from .container_label", "import HttpRouteMatchPath from .http_route_match_header import HttpRouteMatchHeader from .http_route_match_rule import HttpRouteMatchRule", "ApplicationScopedVolumeKind, NetworkKind, HeaderMatchType, OperatingSystemType, DiagnosticsSinkKind, AutoScalingMechanismKind, AutoScalingMetricKind, AutoScalingResourceMetricName, AutoScalingTriggerKind, )", ".http_config import HttpConfig from .gateway_properties import GatewayProperties from .gateway_resource_description import", "VolumeProperties from .volume_reference import VolumeReference from .application_scoped_volume_creation_parameters import ApplicationScopedVolumeCreationParameters from", "ReliableCollectionsRef from .container_state import ContainerState from .container_event import ContainerEvent from", "'ServiceProperties', 'ServiceReplicaProperties', 'ServiceReplicaDescription', 'AverageLoadScalingTrigger', 'ContainerLogs', 'OperationResultPaged', 'SecretResourceDescriptionPaged', 'SecretValueResourceDescriptionPaged', 'VolumeResourceDescriptionPaged', 'NetworkResourceDescriptionPaged',", "import ContainerState from .container_event import ContainerEvent from .container_instance_view import ContainerInstanceView", ".http_host_config import HttpHostConfig from .http_config import HttpConfig from .gateway_properties import", "from .service_replica_description import ServiceReplicaDescription from .average_load_scaling_trigger import AverageLoadScalingTrigger from .container_logs", "'EndpointProperties', 'ResourceRequests', 'ResourceLimits', 'ResourceRequirements', 'DiagnosticsRef', 'ReliableCollectionsRef', 'ContainerState', 'ContainerEvent', 'ContainerInstanceView', 'ContainerCodePackageProperties',", "from .application_scoped_volume_creation_parameters import ApplicationScopedVolumeCreationParameters from .application_scoped_volume import ApplicationScopedVolume from .application_scoped_volume_creation_parameters_service_fabric_volume_disk", ".gateway_destination import GatewayDestination from .tcp_config import TcpConfig from .http_route_match_path import", "'SecretValueProperties', 'SecretValueResourceDescription', 'VolumeProviderParametersAzureFile', 'VolumeProperties', 'VolumeReference', 'ApplicationScopedVolumeCreationParameters', 'ApplicationScopedVolume', 'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk', 'VolumeResourceDescription', 'NetworkResourceProperties',", ".resource_requests import ResourceRequests from .resource_limits import ResourceLimits from .resource_requirements import", "the MIT License. See License.txt in the project root for", ".auto_scaling_resource_metric import AutoScalingResourceMetric from .service_properties import ServiceProperties from .service_replica_properties import", "Resource from .tracked_resource import TrackedResource from .secret_resource_properties import SecretResourceProperties from", "VolumeProvider, SizeTypes, ApplicationScopedVolumeKind, NetworkKind, HeaderMatchType, OperatingSystemType, DiagnosticsSinkKind, AutoScalingMechanismKind, AutoScalingMetricKind, AutoScalingResourceMetricName,", "from .service_replica_properties import ServiceReplicaProperties from .service_replica_description import ServiceReplicaDescription from .average_load_scaling_trigger", "from .azure_internal_monitoring_pipeline_sink_description import AzureInternalMonitoringPipelineSinkDescription from .application_resource_description import ApplicationResourceDescription from .add_remove_replica_scaling_mechanism", "'SecretResourceDescriptionPaged', 'SecretValueResourceDescriptionPaged', 'VolumeResourceDescriptionPaged', 'NetworkResourceDescriptionPaged', 'GatewayResourceDescriptionPaged', 'ApplicationResourceDescriptionPaged', 'ServiceResourceDescriptionPaged', 'ServiceReplicaDescriptionPaged', 'ResourceStatus', 'HealthState',", "'AutoScalingTrigger', 'AutoScalingMechanism', 'AutoScalingPolicy', 'ServiceResourceDescription', 'DiagnosticsSinkProperties', 'DiagnosticsDescription', 'ApplicationProperties', 'AzureInternalMonitoringPipelineSinkDescription', 'ApplicationResourceDescription', 'AddRemoveReplicaScalingMechanism',", "( ResourceStatus, HealthState, SecretKind, VolumeProvider, SizeTypes, ApplicationScopedVolumeKind, NetworkKind, HeaderMatchType, OperatingSystemType,", ".image_registry_credential import ImageRegistryCredential from .environment_variable import EnvironmentVariable from .setting import", "import HttpRouteMatchRule from .http_route_config import HttpRouteConfig from .http_host_config import HttpHostConfig", "'GatewayResourceDescription', 'ImageRegistryCredential', 'EnvironmentVariable', 'Setting', 'ContainerLabel', 'EndpointProperties', 'ResourceRequests', 'ResourceLimits', 'ResourceRequirements', 'DiagnosticsRef',", ".local_network_resource_properties import LocalNetworkResourceProperties from .endpoint_ref import EndpointRef from .network_ref import", "HttpConfig from .gateway_properties import GatewayProperties from .gateway_resource_description import GatewayResourceDescription from", "SecretValue from .secret_value_properties import SecretValueProperties from .secret_value_resource_description import SecretValueResourceDescription from", ".tracked_resource import TrackedResource from .secret_resource_properties import SecretResourceProperties from .inlined_value_secret_resource_properties import", "import HttpRouteMatchHeader from .http_route_match_rule import HttpRouteMatchRule from .http_route_config import HttpRouteConfig", ".environment_variable import EnvironmentVariable from .setting import Setting from .container_label import", ".application_resource_description import ApplicationResourceDescription from .add_remove_replica_scaling_mechanism import AddRemoveReplicaScalingMechanism from .auto_scaling_metric import", "by Microsoft (R) AutoRest Code Generator. # Changes may cause", ".azure_internal_monitoring_pipeline_sink_description import AzureInternalMonitoringPipelineSinkDescription from .application_resource_description import ApplicationResourceDescription from .add_remove_replica_scaling_mechanism import", "import DiagnosticsRef from .reliable_collections_ref import ReliableCollectionsRef from .container_state import ContainerState", "Changes may cause incorrect behavior and will be lost if", ".volume_resource_description import VolumeResourceDescription from .network_resource_properties import NetworkResourceProperties from .local_network_resource_properties import", ".http_route_match_header import HttpRouteMatchHeader from .http_route_match_rule import HttpRouteMatchRule from .http_route_config import", ".network_resource_description_paged import NetworkResourceDescriptionPaged from .gateway_resource_description_paged import GatewayResourceDescriptionPaged from .application_resource_description_paged import", "NetworkKind, HeaderMatchType, OperatingSystemType, DiagnosticsSinkKind, AutoScalingMechanismKind, AutoScalingMetricKind, AutoScalingResourceMetricName, AutoScalingTriggerKind, ) __all__", "'ServiceReplicaDescription', 'AverageLoadScalingTrigger', 'ContainerLogs', 'OperationResultPaged', 'SecretResourceDescriptionPaged', 'SecretValueResourceDescriptionPaged', 'VolumeResourceDescriptionPaged', 'NetworkResourceDescriptionPaged', 'GatewayResourceDescriptionPaged', 'ApplicationResourceDescriptionPaged',", "import GatewayResourceDescriptionPaged from .application_resource_description_paged import ApplicationResourceDescriptionPaged from .service_resource_description_paged import ServiceResourceDescriptionPaged", "from .network_resource_properties_base import NetworkResourcePropertiesBase from .network_resource_description import NetworkResourceDescription from .gateway_destination", "import TrackedResource from .secret_resource_properties import SecretResourceProperties from .inlined_value_secret_resource_properties import InlinedValueSecretResourceProperties", ".secret_value import SecretValue from .secret_value_properties import SecretValueProperties from .secret_value_resource_description import", ".container_instance_view import ContainerInstanceView from .container_code_package_properties import ContainerCodePackageProperties from .auto_scaling_trigger import", ".container_label import ContainerLabel from .endpoint_properties import EndpointProperties from .resource_requests import", "'ProvisionedResourceProperties', 'ProxyResource', 'ManagedProxyResource', 'Resource', 'TrackedResource', 'SecretResourceProperties', 'InlinedValueSecretResourceProperties', 'SecretResourcePropertiesBase', 'SecretResourceDescription', 'SecretValue',", "AutoScalingTriggerKind, ) __all__ = [ 'AvailableOperationDisplay', 'ErrorDetailsModel', 'ErrorErrorModel', 'ErrorModel', 'ErrorModelException',", "'AutoScalingResourceMetric', 'ServiceProperties', 'ServiceReplicaProperties', 'ServiceReplicaDescription', 'AverageLoadScalingTrigger', 'ContainerLogs', 'OperationResultPaged', 'SecretResourceDescriptionPaged', 'SecretValueResourceDescriptionPaged', 'VolumeResourceDescriptionPaged',", "'OperationResult', 'ProvisionedResourceProperties', 'ProxyResource', 'ManagedProxyResource', 'Resource', 'TrackedResource', 'SecretResourceProperties', 'InlinedValueSecretResourceProperties', 'SecretResourcePropertiesBase', 'SecretResourceDescription',", "'EnvironmentVariable', 'Setting', 'ContainerLabel', 'EndpointProperties', 'ResourceRequests', 'ResourceLimits', 'ResourceRequirements', 'DiagnosticsRef', 'ReliableCollectionsRef', 'ContainerState',", "'ContainerLogs', 'OperationResultPaged', 'SecretResourceDescriptionPaged', 'SecretValueResourceDescriptionPaged', 'VolumeResourceDescriptionPaged', 'NetworkResourceDescriptionPaged', 'GatewayResourceDescriptionPaged', 'ApplicationResourceDescriptionPaged', 'ServiceResourceDescriptionPaged', 'ServiceReplicaDescriptionPaged',", ".application_scoped_volume_creation_parameters import ApplicationScopedVolumeCreationParameters from .application_scoped_volume import ApplicationScopedVolume from .application_scoped_volume_creation_parameters_service_fabric_volume_disk import", "generated by Microsoft (R) AutoRest Code Generator. # Changes may", "'GatewayDestination', 'TcpConfig', 'HttpRouteMatchPath', 'HttpRouteMatchHeader', 'HttpRouteMatchRule', 'HttpRouteConfig', 'HttpHostConfig', 'HttpConfig', 'GatewayProperties', 'GatewayResourceDescription',", "import HttpConfig from .gateway_properties import GatewayProperties from .gateway_resource_description import GatewayResourceDescription", "'HttpRouteConfig', 'HttpHostConfig', 'HttpConfig', 'GatewayProperties', 'GatewayResourceDescription', 'ImageRegistryCredential', 'EnvironmentVariable', 'Setting', 'ContainerLabel', 'EndpointProperties',", "from .http_host_config import HttpHostConfig from .http_config import HttpConfig from .gateway_properties", "'AddRemoveReplicaScalingMechanism', 'AutoScalingMetric', 'AutoScalingResourceMetric', 'ServiceProperties', 'ServiceReplicaProperties', 'ServiceReplicaDescription', 'AverageLoadScalingTrigger', 'ContainerLogs', 'OperationResultPaged', 'SecretResourceDescriptionPaged',", "is # regenerated. # -------------------------------------------------------------------------- from .available_operation_display import AvailableOperationDisplay from", "import GatewayResourceDescription from .image_registry_credential import ImageRegistryCredential from .environment_variable import EnvironmentVariable", ".secret_value_resource_description_paged import SecretValueResourceDescriptionPaged from .volume_resource_description_paged import VolumeResourceDescriptionPaged from .network_resource_description_paged import", "'AutoScalingMetric', 'AutoScalingResourceMetric', 'ServiceProperties', 'ServiceReplicaProperties', 'ServiceReplicaDescription', 'AverageLoadScalingTrigger', 'ContainerLogs', 'OperationResultPaged', 'SecretResourceDescriptionPaged', 'SecretValueResourceDescriptionPaged',", "from .http_route_config import HttpRouteConfig from .http_host_config import HttpHostConfig from .http_config", "EndpointRef from .network_ref import NetworkRef from .network_resource_properties_base import NetworkResourcePropertiesBase from", ".gateway_properties import GatewayProperties from .gateway_resource_description import GatewayResourceDescription from .image_registry_credential import", "import AvailableOperationDisplay from .error_details_model import ErrorDetailsModel from .error_error_model import ErrorErrorModel", "# Licensed under the MIT License. See License.txt in the", "'ResourceLimits', 'ResourceRequirements', 'DiagnosticsRef', 'ReliableCollectionsRef', 'ContainerState', 'ContainerEvent', 'ContainerInstanceView', 'ContainerCodePackageProperties', 'AutoScalingTrigger', 'AutoScalingMechanism',", "'DiagnosticsRef', 'ReliableCollectionsRef', 'ContainerState', 'ContainerEvent', 'ContainerInstanceView', 'ContainerCodePackageProperties', 'AutoScalingTrigger', 'AutoScalingMechanism', 'AutoScalingPolicy', 'ServiceResourceDescription',", "NetworkResourcePropertiesBase from .network_resource_description import NetworkResourceDescription from .gateway_destination import GatewayDestination from", "import HttpHostConfig from .http_config import HttpConfig from .gateway_properties import GatewayProperties", ".available_operation_display import AvailableOperationDisplay from .error_details_model import ErrorDetailsModel from .error_error_model import", "SecretResourceProperties from .inlined_value_secret_resource_properties import InlinedValueSecretResourceProperties from .secret_resource_properties_base import SecretResourcePropertiesBase from", "AutoScalingResourceMetric from .service_properties import ServiceProperties from .service_replica_properties import ServiceReplicaProperties from", "ResourceRequirements from .diagnostics_ref import DiagnosticsRef from .reliable_collections_ref import ReliableCollectionsRef from", ".service_replica_description_paged import ServiceReplicaDescriptionPaged from .service_fabric_mesh_management_client_enums import ( ResourceStatus, HealthState, SecretKind,", ".secret_value_properties import SecretValueProperties from .secret_value_resource_description import SecretValueResourceDescription from .volume_provider_parameters_azure_file import", "import VolumeReference from .application_scoped_volume_creation_parameters import ApplicationScopedVolumeCreationParameters from .application_scoped_volume import ApplicationScopedVolume", "import ContainerLogs from .operation_result_paged import OperationResultPaged from .secret_resource_description_paged import SecretResourceDescriptionPaged", "'Setting', 'ContainerLabel', 'EndpointProperties', 'ResourceRequests', 'ResourceLimits', 'ResourceRequirements', 'DiagnosticsRef', 'ReliableCollectionsRef', 'ContainerState', 'ContainerEvent',", "import HttpRouteConfig from .http_host_config import HttpHostConfig from .http_config import HttpConfig", ".operation_result_paged import OperationResultPaged from .secret_resource_description_paged import SecretResourceDescriptionPaged from .secret_value_resource_description_paged import", "will be lost if the code is # regenerated. #", ".error_details_model import ErrorDetailsModel from .error_error_model import ErrorErrorModel from .error_model import", "SecretValueProperties from .secret_value_resource_description import SecretValueResourceDescription from .volume_provider_parameters_azure_file import VolumeProviderParametersAzureFile from", "import ApplicationResourceDescription from .add_remove_replica_scaling_mechanism import AddRemoveReplicaScalingMechanism from .auto_scaling_metric import AutoScalingMetric", "from .available_operation_display import AvailableOperationDisplay from .error_details_model import ErrorDetailsModel from .error_error_model", "ContainerState from .container_event import ContainerEvent from .container_instance_view import ContainerInstanceView from", "and will be lost if the code is # regenerated.", "'ContainerCodePackageProperties', 'AutoScalingTrigger', 'AutoScalingMechanism', 'AutoScalingPolicy', 'ServiceResourceDescription', 'DiagnosticsSinkProperties', 'DiagnosticsDescription', 'ApplicationProperties', 'AzureInternalMonitoringPipelineSinkDescription', 'ApplicationResourceDescription',", "from .average_load_scaling_trigger import AverageLoadScalingTrigger from .container_logs import ContainerLogs from .operation_result_paged", "HealthState, SecretKind, VolumeProvider, SizeTypes, ApplicationScopedVolumeKind, NetworkKind, HeaderMatchType, OperatingSystemType, DiagnosticsSinkKind, AutoScalingMechanismKind,", "NetworkRef from .network_resource_properties_base import NetworkResourcePropertiesBase from .network_resource_description import NetworkResourceDescription from", "import SecretValueResourceDescriptionPaged from .volume_resource_description_paged import VolumeResourceDescriptionPaged from .network_resource_description_paged import NetworkResourceDescriptionPaged", "import AddRemoveReplicaScalingMechanism from .auto_scaling_metric import AutoScalingMetric from .auto_scaling_resource_metric import AutoScalingResourceMetric", ".add_remove_replica_scaling_mechanism import AddRemoveReplicaScalingMechanism from .auto_scaling_metric import AutoScalingMetric from .auto_scaling_resource_metric import", "from .http_route_match_header import HttpRouteMatchHeader from .http_route_match_rule import HttpRouteMatchRule from .http_route_config", "cause incorrect behavior and will be lost if the code", "from .volume_provider_parameters_azure_file import VolumeProviderParametersAzureFile from .volume_properties import VolumeProperties from .volume_reference", "import ServiceResourceDescription from .diagnostics_sink_properties import DiagnosticsSinkProperties from .diagnostics_description import DiagnosticsDescription", "(c) Microsoft Corporation. All rights reserved. # Licensed under the", "All rights reserved. # Licensed under the MIT License. See", "from .container_state import ContainerState from .container_event import ContainerEvent from .container_instance_view", "import ServiceProperties from .service_replica_properties import ServiceReplicaProperties from .service_replica_description import ServiceReplicaDescription", "ServiceResourceDescriptionPaged from .service_replica_description_paged import ServiceReplicaDescriptionPaged from .service_fabric_mesh_management_client_enums import ( ResourceStatus,", "'AzureInternalMonitoringPipelineSinkDescription', 'ApplicationResourceDescription', 'AddRemoveReplicaScalingMechanism', 'AutoScalingMetric', 'AutoScalingResourceMetric', 'ServiceProperties', 'ServiceReplicaProperties', 'ServiceReplicaDescription', 'AverageLoadScalingTrigger', 'ContainerLogs',", "'ResourceStatus', 'HealthState', 'SecretKind', 'VolumeProvider', 'SizeTypes', 'ApplicationScopedVolumeKind', 'NetworkKind', 'HeaderMatchType', 'OperatingSystemType', 'DiagnosticsSinkKind',", "import TcpConfig from .http_route_match_path import HttpRouteMatchPath from .http_route_match_header import HttpRouteMatchHeader", "'TrackedResource', 'SecretResourceProperties', 'InlinedValueSecretResourceProperties', 'SecretResourcePropertiesBase', 'SecretResourceDescription', 'SecretValue', 'SecretValueProperties', 'SecretValueResourceDescription', 'VolumeProviderParametersAzureFile', 'VolumeProperties',", "from .reliable_collections_ref import ReliableCollectionsRef from .container_state import ContainerState from .container_event", ".operation_result import OperationResult from .provisioned_resource_properties import ProvisionedResourceProperties from .proxy_resource import", "from .application_resource_description import ApplicationResourceDescription from .add_remove_replica_scaling_mechanism import AddRemoveReplicaScalingMechanism from .auto_scaling_metric", "from .error_model import ErrorModel, ErrorModelException from .operation_result import OperationResult from", "import VolumeResourceDescription from .network_resource_properties import NetworkResourceProperties from .local_network_resource_properties import LocalNetworkResourceProperties", "from .service_resource_description_paged import ServiceResourceDescriptionPaged from .service_replica_description_paged import ServiceReplicaDescriptionPaged from .service_fabric_mesh_management_client_enums", "ErrorModelException from .operation_result import OperationResult from .provisioned_resource_properties import ProvisionedResourceProperties from", "# Code generated by Microsoft (R) AutoRest Code Generator. #", ".http_route_match_path import HttpRouteMatchPath from .http_route_match_header import HttpRouteMatchHeader from .http_route_match_rule import", "from .application_scoped_volume_creation_parameters_service_fabric_volume_disk import ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk from .volume_resource_description import VolumeResourceDescription from .network_resource_properties", "import ContainerCodePackageProperties from .auto_scaling_trigger import AutoScalingTrigger from .auto_scaling_mechanism import AutoScalingMechanism", ".container_state import ContainerState from .container_event import ContainerEvent from .container_instance_view import", "import DiagnosticsDescription from .application_properties import ApplicationProperties from .azure_internal_monitoring_pipeline_sink_description import AzureInternalMonitoringPipelineSinkDescription", "from .service_resource_description import ServiceResourceDescription from .diagnostics_sink_properties import DiagnosticsSinkProperties from .diagnostics_description", "ResourceLimits from .resource_requirements import ResourceRequirements from .diagnostics_ref import DiagnosticsRef from", "import AutoScalingPolicy from .service_resource_description import ServiceResourceDescription from .diagnostics_sink_properties import DiagnosticsSinkProperties", ".http_route_match_rule import HttpRouteMatchRule from .http_route_config import HttpRouteConfig from .http_host_config import", "Code Generator. # Changes may cause incorrect behavior and will", "EndpointProperties from .resource_requests import ResourceRequests from .resource_limits import ResourceLimits from", "'ContainerState', 'ContainerEvent', 'ContainerInstanceView', 'ContainerCodePackageProperties', 'AutoScalingTrigger', 'AutoScalingMechanism', 'AutoScalingPolicy', 'ServiceResourceDescription', 'DiagnosticsSinkProperties', 'DiagnosticsDescription',", "be lost if the code is # regenerated. # --------------------------------------------------------------------------", "AutoScalingMetric from .auto_scaling_resource_metric import AutoScalingResourceMetric from .service_properties import ServiceProperties from", "import ServiceReplicaDescription from .average_load_scaling_trigger import AverageLoadScalingTrigger from .container_logs import ContainerLogs", "from .secret_resource_properties import SecretResourceProperties from .inlined_value_secret_resource_properties import InlinedValueSecretResourceProperties from .secret_resource_properties_base", "from .operation_result_paged import OperationResultPaged from .secret_resource_description_paged import SecretResourceDescriptionPaged from .secret_value_resource_description_paged", "import OperationResultPaged from .secret_resource_description_paged import SecretResourceDescriptionPaged from .secret_value_resource_description_paged import SecretValueResourceDescriptionPaged", "import ApplicationProperties from .azure_internal_monitoring_pipeline_sink_description import AzureInternalMonitoringPipelineSinkDescription from .application_resource_description import ApplicationResourceDescription", "'HttpHostConfig', 'HttpConfig', 'GatewayProperties', 'GatewayResourceDescription', 'ImageRegistryCredential', 'EnvironmentVariable', 'Setting', 'ContainerLabel', 'EndpointProperties', 'ResourceRequests',", "from .secret_resource_description_paged import SecretResourceDescriptionPaged from .secret_value_resource_description_paged import SecretValueResourceDescriptionPaged from .volume_resource_description_paged", "'ApplicationResourceDescriptionPaged', 'ServiceResourceDescriptionPaged', 'ServiceReplicaDescriptionPaged', 'ResourceStatus', 'HealthState', 'SecretKind', 'VolumeProvider', 'SizeTypes', 'ApplicationScopedVolumeKind', 'NetworkKind',", "import VolumeProviderParametersAzureFile from .volume_properties import VolumeProperties from .volume_reference import VolumeReference", "import SecretResourceDescriptionPaged from .secret_value_resource_description_paged import SecretValueResourceDescriptionPaged from .volume_resource_description_paged import VolumeResourceDescriptionPaged", "import InlinedValueSecretResourceProperties from .secret_resource_properties_base import SecretResourcePropertiesBase from .secret_resource_description import SecretResourceDescription", "AutoScalingMetricKind, AutoScalingResourceMetricName, AutoScalingTriggerKind, ) __all__ = [ 'AvailableOperationDisplay', 'ErrorDetailsModel', 'ErrorErrorModel',", "from .diagnostics_ref import DiagnosticsRef from .reliable_collections_ref import ReliableCollectionsRef from .container_state", "'ProxyResource', 'ManagedProxyResource', 'Resource', 'TrackedResource', 'SecretResourceProperties', 'InlinedValueSecretResourceProperties', 'SecretResourcePropertiesBase', 'SecretResourceDescription', 'SecretValue', 'SecretValueProperties',", "ContainerLogs from .operation_result_paged import OperationResultPaged from .secret_resource_description_paged import SecretResourceDescriptionPaged from", ".container_code_package_properties import ContainerCodePackageProperties from .auto_scaling_trigger import AutoScalingTrigger from .auto_scaling_mechanism import", "SecretValueResourceDescriptionPaged from .volume_resource_description_paged import VolumeResourceDescriptionPaged from .network_resource_description_paged import NetworkResourceDescriptionPaged from", "import ServiceResourceDescriptionPaged from .service_replica_description_paged import ServiceReplicaDescriptionPaged from .service_fabric_mesh_management_client_enums import (", "'ApplicationProperties', 'AzureInternalMonitoringPipelineSinkDescription', 'ApplicationResourceDescription', 'AddRemoveReplicaScalingMechanism', 'AutoScalingMetric', 'AutoScalingResourceMetric', 'ServiceProperties', 'ServiceReplicaProperties', 'ServiceReplicaDescription', 'AverageLoadScalingTrigger',", "'ApplicationScopedVolumeCreationParameters', 'ApplicationScopedVolume', 'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk', 'VolumeResourceDescription', 'NetworkResourceProperties', 'LocalNetworkResourceProperties', 'EndpointRef', 'NetworkRef', 'NetworkResourcePropertiesBase', 'NetworkResourceDescription',", "ErrorDetailsModel from .error_error_model import ErrorErrorModel from .error_model import ErrorModel, ErrorModelException", "'VolumeResourceDescription', 'NetworkResourceProperties', 'LocalNetworkResourceProperties', 'EndpointRef', 'NetworkRef', 'NetworkResourcePropertiesBase', 'NetworkResourceDescription', 'GatewayDestination', 'TcpConfig', 'HttpRouteMatchPath',", "HttpHostConfig from .http_config import HttpConfig from .gateway_properties import GatewayProperties from", "SizeTypes, ApplicationScopedVolumeKind, NetworkKind, HeaderMatchType, OperatingSystemType, DiagnosticsSinkKind, AutoScalingMechanismKind, AutoScalingMetricKind, AutoScalingResourceMetricName, AutoScalingTriggerKind,", "HttpRouteMatchHeader from .http_route_match_rule import HttpRouteMatchRule from .http_route_config import HttpRouteConfig from", "ImageRegistryCredential from .environment_variable import EnvironmentVariable from .setting import Setting from", "from .gateway_resource_description_paged import GatewayResourceDescriptionPaged from .application_resource_description_paged import ApplicationResourceDescriptionPaged from .service_resource_description_paged", "import OperationResult from .provisioned_resource_properties import ProvisionedResourceProperties from .proxy_resource import ProxyResource", "import ContainerLabel from .endpoint_properties import EndpointProperties from .resource_requests import ResourceRequests", "from .diagnostics_sink_properties import DiagnosticsSinkProperties from .diagnostics_description import DiagnosticsDescription from .application_properties", "import SecretResourceDescription from .secret_value import SecretValue from .secret_value_properties import SecretValueProperties", "VolumeProviderParametersAzureFile from .volume_properties import VolumeProperties from .volume_reference import VolumeReference from", "'ContainerEvent', 'ContainerInstanceView', 'ContainerCodePackageProperties', 'AutoScalingTrigger', 'AutoScalingMechanism', 'AutoScalingPolicy', 'ServiceResourceDescription', 'DiagnosticsSinkProperties', 'DiagnosticsDescription', 'ApplicationProperties',", ".gateway_resource_description_paged import GatewayResourceDescriptionPaged from .application_resource_description_paged import ApplicationResourceDescriptionPaged from .service_resource_description_paged import", "behavior and will be lost if the code is #", "SecretValueResourceDescription from .volume_provider_parameters_azure_file import VolumeProviderParametersAzureFile from .volume_properties import VolumeProperties from", "'ImageRegistryCredential', 'EnvironmentVariable', 'Setting', 'ContainerLabel', 'EndpointProperties', 'ResourceRequests', 'ResourceLimits', 'ResourceRequirements', 'DiagnosticsRef', 'ReliableCollectionsRef',", "from .network_ref import NetworkRef from .network_resource_properties_base import NetworkResourcePropertiesBase from .network_resource_description", "ApplicationScopedVolume from .application_scoped_volume_creation_parameters_service_fabric_volume_disk import ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk from .volume_resource_description import VolumeResourceDescription from", "MIT License. See License.txt in the project root for #", "import SecretValue from .secret_value_properties import SecretValueProperties from .secret_value_resource_description import SecretValueResourceDescription", ".application_resource_description_paged import ApplicationResourceDescriptionPaged from .service_resource_description_paged import ServiceResourceDescriptionPaged from .service_replica_description_paged import", "from .resource import Resource from .tracked_resource import TrackedResource from .secret_resource_properties", "'ServiceResourceDescriptionPaged', 'ServiceReplicaDescriptionPaged', 'ResourceStatus', 'HealthState', 'SecretKind', 'VolumeProvider', 'SizeTypes', 'ApplicationScopedVolumeKind', 'NetworkKind', 'HeaderMatchType',", "project root for # license information. # # Code generated", "# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed", "import Resource from .tracked_resource import TrackedResource from .secret_resource_properties import SecretResourceProperties", "AvailableOperationDisplay from .error_details_model import ErrorDetailsModel from .error_error_model import ErrorErrorModel from", ".error_model import ErrorModel, ErrorModelException from .operation_result import OperationResult from .provisioned_resource_properties", "import NetworkResourceProperties from .local_network_resource_properties import LocalNetworkResourceProperties from .endpoint_ref import EndpointRef", "import ProxyResource from .managed_proxy_resource import ManagedProxyResource from .resource import Resource", "ContainerLabel from .endpoint_properties import EndpointProperties from .resource_requests import ResourceRequests from", "= [ 'AvailableOperationDisplay', 'ErrorDetailsModel', 'ErrorErrorModel', 'ErrorModel', 'ErrorModelException', 'OperationResult', 'ProvisionedResourceProperties', 'ProxyResource',", "'ErrorErrorModel', 'ErrorModel', 'ErrorModelException', 'OperationResult', 'ProvisionedResourceProperties', 'ProxyResource', 'ManagedProxyResource', 'Resource', 'TrackedResource', 'SecretResourceProperties',", ".proxy_resource import ProxyResource from .managed_proxy_resource import ManagedProxyResource from .resource import", "'HttpRouteMatchHeader', 'HttpRouteMatchRule', 'HttpRouteConfig', 'HttpHostConfig', 'HttpConfig', 'GatewayProperties', 'GatewayResourceDescription', 'ImageRegistryCredential', 'EnvironmentVariable', 'Setting',", "from .resource_requests import ResourceRequests from .resource_limits import ResourceLimits from .resource_requirements", "'HttpRouteMatchRule', 'HttpRouteConfig', 'HttpHostConfig', 'HttpConfig', 'GatewayProperties', 'GatewayResourceDescription', 'ImageRegistryCredential', 'EnvironmentVariable', 'Setting', 'ContainerLabel',", "LocalNetworkResourceProperties from .endpoint_ref import EndpointRef from .network_ref import NetworkRef from", "SecretResourcePropertiesBase from .secret_resource_description import SecretResourceDescription from .secret_value import SecretValue from", ".reliable_collections_ref import ReliableCollectionsRef from .container_state import ContainerState from .container_event import", "from .http_route_match_path import HttpRouteMatchPath from .http_route_match_header import HttpRouteMatchHeader from .http_route_match_rule", "import Setting from .container_label import ContainerLabel from .endpoint_properties import EndpointProperties", ".container_event import ContainerEvent from .container_instance_view import ContainerInstanceView from .container_code_package_properties import", "from .secret_value import SecretValue from .secret_value_properties import SecretValueProperties from .secret_value_resource_description", "from .gateway_properties import GatewayProperties from .gateway_resource_description import GatewayResourceDescription from .image_registry_credential", "InlinedValueSecretResourceProperties from .secret_resource_properties_base import SecretResourcePropertiesBase from .secret_resource_description import SecretResourceDescription from", "Corporation. All rights reserved. # Licensed under the MIT License.", "import EndpointProperties from .resource_requests import ResourceRequests from .resource_limits import ResourceLimits", "AzureInternalMonitoringPipelineSinkDescription from .application_resource_description import ApplicationResourceDescription from .add_remove_replica_scaling_mechanism import AddRemoveReplicaScalingMechanism from", "'NetworkRef', 'NetworkResourcePropertiesBase', 'NetworkResourceDescription', 'GatewayDestination', 'TcpConfig', 'HttpRouteMatchPath', 'HttpRouteMatchHeader', 'HttpRouteMatchRule', 'HttpRouteConfig', 'HttpHostConfig',", "'Resource', 'TrackedResource', 'SecretResourceProperties', 'InlinedValueSecretResourceProperties', 'SecretResourcePropertiesBase', 'SecretResourceDescription', 'SecretValue', 'SecretValueProperties', 'SecretValueResourceDescription', 'VolumeProviderParametersAzureFile',", "import GatewayProperties from .gateway_resource_description import GatewayResourceDescription from .image_registry_credential import ImageRegistryCredential", "'ResourceRequests', 'ResourceLimits', 'ResourceRequirements', 'DiagnosticsRef', 'ReliableCollectionsRef', 'ContainerState', 'ContainerEvent', 'ContainerInstanceView', 'ContainerCodePackageProperties', 'AutoScalingTrigger',", "'SecretResourcePropertiesBase', 'SecretResourceDescription', 'SecretValue', 'SecretValueProperties', 'SecretValueResourceDescription', 'VolumeProviderParametersAzureFile', 'VolumeProperties', 'VolumeReference', 'ApplicationScopedVolumeCreationParameters', 'ApplicationScopedVolume',", ".setting import Setting from .container_label import ContainerLabel from .endpoint_properties import", "from .network_resource_description import NetworkResourceDescription from .gateway_destination import GatewayDestination from .tcp_config", "# -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved.", "from .managed_proxy_resource import ManagedProxyResource from .resource import Resource from .tracked_resource", "VolumeReference from .application_scoped_volume_creation_parameters import ApplicationScopedVolumeCreationParameters from .application_scoped_volume import ApplicationScopedVolume from", "coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights", "from .auto_scaling_mechanism import AutoScalingMechanism from .auto_scaling_policy import AutoScalingPolicy from .service_resource_description", "import AutoScalingTrigger from .auto_scaling_mechanism import AutoScalingMechanism from .auto_scaling_policy import AutoScalingPolicy", "lost if the code is # regenerated. # -------------------------------------------------------------------------- from", "from .application_resource_description_paged import ApplicationResourceDescriptionPaged from .service_resource_description_paged import ServiceResourceDescriptionPaged from .service_replica_description_paged", ".volume_properties import VolumeProperties from .volume_reference import VolumeReference from .application_scoped_volume_creation_parameters import", "from .add_remove_replica_scaling_mechanism import AddRemoveReplicaScalingMechanism from .auto_scaling_metric import AutoScalingMetric from .auto_scaling_resource_metric", "import ApplicationResourceDescriptionPaged from .service_resource_description_paged import ServiceResourceDescriptionPaged from .service_replica_description_paged import ServiceReplicaDescriptionPaged", "AverageLoadScalingTrigger from .container_logs import ContainerLogs from .operation_result_paged import OperationResultPaged from", "from .gateway_resource_description import GatewayResourceDescription from .image_registry_credential import ImageRegistryCredential from .environment_variable", "SecretKind, VolumeProvider, SizeTypes, ApplicationScopedVolumeKind, NetworkKind, HeaderMatchType, OperatingSystemType, DiagnosticsSinkKind, AutoScalingMechanismKind, AutoScalingMetricKind,", "'SecretValueResourceDescriptionPaged', 'VolumeResourceDescriptionPaged', 'NetworkResourceDescriptionPaged', 'GatewayResourceDescriptionPaged', 'ApplicationResourceDescriptionPaged', 'ServiceResourceDescriptionPaged', 'ServiceReplicaDescriptionPaged', 'ResourceStatus', 'HealthState', 'SecretKind',", "'VolumeReference', 'ApplicationScopedVolumeCreationParameters', 'ApplicationScopedVolume', 'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk', 'VolumeResourceDescription', 'NetworkResourceProperties', 'LocalNetworkResourceProperties', 'EndpointRef', 'NetworkRef', 'NetworkResourcePropertiesBase',", "'ApplicationScopedVolume', 'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk', 'VolumeResourceDescription', 'NetworkResourceProperties', 'LocalNetworkResourceProperties', 'EndpointRef', 'NetworkRef', 'NetworkResourcePropertiesBase', 'NetworkResourceDescription', 'GatewayDestination',", "from .auto_scaling_policy import AutoScalingPolicy from .service_resource_description import ServiceResourceDescription from .diagnostics_sink_properties", "from .secret_value_properties import SecretValueProperties from .secret_value_resource_description import SecretValueResourceDescription from .volume_provider_parameters_azure_file", "regenerated. # -------------------------------------------------------------------------- from .available_operation_display import AvailableOperationDisplay from .error_details_model import", "under the MIT License. See License.txt in the project root", "from .image_registry_credential import ImageRegistryCredential from .environment_variable import EnvironmentVariable from .setting", "from .service_replica_description_paged import ServiceReplicaDescriptionPaged from .service_fabric_mesh_management_client_enums import ( ResourceStatus, HealthState,", "ServiceReplicaDescriptionPaged from .service_fabric_mesh_management_client_enums import ( ResourceStatus, HealthState, SecretKind, VolumeProvider, SizeTypes,", ".tcp_config import TcpConfig from .http_route_match_path import HttpRouteMatchPath from .http_route_match_header import", "TrackedResource from .secret_resource_properties import SecretResourceProperties from .inlined_value_secret_resource_properties import InlinedValueSecretResourceProperties from", ") __all__ = [ 'AvailableOperationDisplay', 'ErrorDetailsModel', 'ErrorErrorModel', 'ErrorModel', 'ErrorModelException', 'OperationResult',", "from .container_event import ContainerEvent from .container_instance_view import ContainerInstanceView from .container_code_package_properties", "'ServiceResourceDescription', 'DiagnosticsSinkProperties', 'DiagnosticsDescription', 'ApplicationProperties', 'AzureInternalMonitoringPipelineSinkDescription', 'ApplicationResourceDescription', 'AddRemoveReplicaScalingMechanism', 'AutoScalingMetric', 'AutoScalingResourceMetric', 'ServiceProperties',", ".volume_resource_description_paged import VolumeResourceDescriptionPaged from .network_resource_description_paged import NetworkResourceDescriptionPaged from .gateway_resource_description_paged import", "GatewayProperties from .gateway_resource_description import GatewayResourceDescription from .image_registry_credential import ImageRegistryCredential from", "Microsoft Corporation. All rights reserved. # Licensed under the MIT", "ApplicationResourceDescriptionPaged from .service_resource_description_paged import ServiceResourceDescriptionPaged from .service_replica_description_paged import ServiceReplicaDescriptionPaged from", "HttpRouteMatchRule from .http_route_config import HttpRouteConfig from .http_host_config import HttpHostConfig from", "'NetworkResourceDescription', 'GatewayDestination', 'TcpConfig', 'HttpRouteMatchPath', 'HttpRouteMatchHeader', 'HttpRouteMatchRule', 'HttpRouteConfig', 'HttpHostConfig', 'HttpConfig', 'GatewayProperties',", "AutoScalingTrigger from .auto_scaling_mechanism import AutoScalingMechanism from .auto_scaling_policy import AutoScalingPolicy from", ".service_properties import ServiceProperties from .service_replica_properties import ServiceReplicaProperties from .service_replica_description import", "import SecretResourcePropertiesBase from .secret_resource_description import SecretResourceDescription from .secret_value import SecretValue", ".service_replica_description import ServiceReplicaDescription from .average_load_scaling_trigger import AverageLoadScalingTrigger from .container_logs import", "'OperationResultPaged', 'SecretResourceDescriptionPaged', 'SecretValueResourceDescriptionPaged', 'VolumeResourceDescriptionPaged', 'NetworkResourceDescriptionPaged', 'GatewayResourceDescriptionPaged', 'ApplicationResourceDescriptionPaged', 'ServiceResourceDescriptionPaged', 'ServiceReplicaDescriptionPaged', 'ResourceStatus',", "import ProvisionedResourceProperties from .proxy_resource import ProxyResource from .managed_proxy_resource import ManagedProxyResource", "'SecretKind', 'VolumeProvider', 'SizeTypes', 'ApplicationScopedVolumeKind', 'NetworkKind', 'HeaderMatchType', 'OperatingSystemType', 'DiagnosticsSinkKind', 'AutoScalingMechanismKind', 'AutoScalingMetricKind',", "'LocalNetworkResourceProperties', 'EndpointRef', 'NetworkRef', 'NetworkResourcePropertiesBase', 'NetworkResourceDescription', 'GatewayDestination', 'TcpConfig', 'HttpRouteMatchPath', 'HttpRouteMatchHeader', 'HttpRouteMatchRule',", "for # license information. # # Code generated by Microsoft", "AutoScalingMechanism from .auto_scaling_policy import AutoScalingPolicy from .service_resource_description import ServiceResourceDescription from", "ProxyResource from .managed_proxy_resource import ManagedProxyResource from .resource import Resource from", ".average_load_scaling_trigger import AverageLoadScalingTrigger from .container_logs import ContainerLogs from .operation_result_paged import", "import AutoScalingMetric from .auto_scaling_resource_metric import AutoScalingResourceMetric from .service_properties import ServiceProperties", "Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under", "DiagnosticsDescription from .application_properties import ApplicationProperties from .azure_internal_monitoring_pipeline_sink_description import AzureInternalMonitoringPipelineSinkDescription from", "import NetworkRef from .network_resource_properties_base import NetworkResourcePropertiesBase from .network_resource_description import NetworkResourceDescription", "from .local_network_resource_properties import LocalNetworkResourceProperties from .endpoint_ref import EndpointRef from .network_ref", "import ErrorModel, ErrorModelException from .operation_result import OperationResult from .provisioned_resource_properties import", "-------------------------------------------------------------------------- from .available_operation_display import AvailableOperationDisplay from .error_details_model import ErrorDetailsModel from", "'AutoScalingPolicy', 'ServiceResourceDescription', 'DiagnosticsSinkProperties', 'DiagnosticsDescription', 'ApplicationProperties', 'AzureInternalMonitoringPipelineSinkDescription', 'ApplicationResourceDescription', 'AddRemoveReplicaScalingMechanism', 'AutoScalingMetric', 'AutoScalingResourceMetric',", "ServiceProperties from .service_replica_properties import ServiceReplicaProperties from .service_replica_description import ServiceReplicaDescription from", "from .network_resource_description_paged import NetworkResourceDescriptionPaged from .gateway_resource_description_paged import GatewayResourceDescriptionPaged from .application_resource_description_paged", "from .gateway_destination import GatewayDestination from .tcp_config import TcpConfig from .http_route_match_path", "ResourceRequests from .resource_limits import ResourceLimits from .resource_requirements import ResourceRequirements from", "ContainerInstanceView from .container_code_package_properties import ContainerCodePackageProperties from .auto_scaling_trigger import AutoScalingTrigger from", "ManagedProxyResource from .resource import Resource from .tracked_resource import TrackedResource from", ".inlined_value_secret_resource_properties import InlinedValueSecretResourceProperties from .secret_resource_properties_base import SecretResourcePropertiesBase from .secret_resource_description import", "the project root for # license information. # # Code", ".gateway_resource_description import GatewayResourceDescription from .image_registry_credential import ImageRegistryCredential from .environment_variable import", "import ImageRegistryCredential from .environment_variable import EnvironmentVariable from .setting import Setting", "import AzureInternalMonitoringPipelineSinkDescription from .application_resource_description import ApplicationResourceDescription from .add_remove_replica_scaling_mechanism import AddRemoveReplicaScalingMechanism", "Generator. # Changes may cause incorrect behavior and will be", "from .error_error_model import ErrorErrorModel from .error_model import ErrorModel, ErrorModelException from", "ServiceReplicaProperties from .service_replica_description import ServiceReplicaDescription from .average_load_scaling_trigger import AverageLoadScalingTrigger from", "import AverageLoadScalingTrigger from .container_logs import ContainerLogs from .operation_result_paged import OperationResultPaged", "license information. # # Code generated by Microsoft (R) AutoRest", "AddRemoveReplicaScalingMechanism from .auto_scaling_metric import AutoScalingMetric from .auto_scaling_resource_metric import AutoScalingResourceMetric from", "from .resource_requirements import ResourceRequirements from .diagnostics_ref import DiagnosticsRef from .reliable_collections_ref", "'SecretResourceDescription', 'SecretValue', 'SecretValueProperties', 'SecretValueResourceDescription', 'VolumeProviderParametersAzureFile', 'VolumeProperties', 'VolumeReference', 'ApplicationScopedVolumeCreationParameters', 'ApplicationScopedVolume', 'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk',", "'ErrorModelException', 'OperationResult', 'ProvisionedResourceProperties', 'ProxyResource', 'ManagedProxyResource', 'Resource', 'TrackedResource', 'SecretResourceProperties', 'InlinedValueSecretResourceProperties', 'SecretResourcePropertiesBase',", "'VolumeProvider', 'SizeTypes', 'ApplicationScopedVolumeKind', 'NetworkKind', 'HeaderMatchType', 'OperatingSystemType', 'DiagnosticsSinkKind', 'AutoScalingMechanismKind', 'AutoScalingMetricKind', 'AutoScalingResourceMetricName',", ".provisioned_resource_properties import ProvisionedResourceProperties from .proxy_resource import ProxyResource from .managed_proxy_resource import", ".network_resource_properties import NetworkResourceProperties from .local_network_resource_properties import LocalNetworkResourceProperties from .endpoint_ref import", "import ErrorErrorModel from .error_model import ErrorModel, ErrorModelException from .operation_result import", "DiagnosticsSinkProperties from .diagnostics_description import DiagnosticsDescription from .application_properties import ApplicationProperties from", "from .proxy_resource import ProxyResource from .managed_proxy_resource import ManagedProxyResource from .resource", ".service_resource_description import ServiceResourceDescription from .diagnostics_sink_properties import DiagnosticsSinkProperties from .diagnostics_description import", ".auto_scaling_mechanism import AutoScalingMechanism from .auto_scaling_policy import AutoScalingPolicy from .service_resource_description import", "SecretResourceDescriptionPaged from .secret_value_resource_description_paged import SecretValueResourceDescriptionPaged from .volume_resource_description_paged import VolumeResourceDescriptionPaged from", "'SecretValue', 'SecretValueProperties', 'SecretValueResourceDescription', 'VolumeProviderParametersAzureFile', 'VolumeProperties', 'VolumeReference', 'ApplicationScopedVolumeCreationParameters', 'ApplicationScopedVolume', 'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk', 'VolumeResourceDescription',", "# -------------------------------------------------------------------------- from .available_operation_display import AvailableOperationDisplay from .error_details_model import ErrorDetailsModel", ".application_scoped_volume import ApplicationScopedVolume from .application_scoped_volume_creation_parameters_service_fabric_volume_disk import ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk from .volume_resource_description import", ".volume_reference import VolumeReference from .application_scoped_volume_creation_parameters import ApplicationScopedVolumeCreationParameters from .application_scoped_volume import", "'VolumeProperties', 'VolumeReference', 'ApplicationScopedVolumeCreationParameters', 'ApplicationScopedVolume', 'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk', 'VolumeResourceDescription', 'NetworkResourceProperties', 'LocalNetworkResourceProperties', 'EndpointRef', 'NetworkRef',", "the code is # regenerated. # -------------------------------------------------------------------------- from .available_operation_display import", ".auto_scaling_metric import AutoScalingMetric from .auto_scaling_resource_metric import AutoScalingResourceMetric from .service_properties import", "See License.txt in the project root for # license information.", "import ServiceReplicaDescriptionPaged from .service_fabric_mesh_management_client_enums import ( ResourceStatus, HealthState, SecretKind, VolumeProvider,", "reserved. # Licensed under the MIT License. See License.txt in", "NetworkResourceDescriptionPaged from .gateway_resource_description_paged import GatewayResourceDescriptionPaged from .application_resource_description_paged import ApplicationResourceDescriptionPaged from", "import DiagnosticsSinkProperties from .diagnostics_description import DiagnosticsDescription from .application_properties import ApplicationProperties", "import SecretValueResourceDescription from .volume_provider_parameters_azure_file import VolumeProviderParametersAzureFile from .volume_properties import VolumeProperties", "from .auto_scaling_metric import AutoScalingMetric from .auto_scaling_resource_metric import AutoScalingResourceMetric from .service_properties", "HeaderMatchType, OperatingSystemType, DiagnosticsSinkKind, AutoScalingMechanismKind, AutoScalingMetricKind, AutoScalingResourceMetricName, AutoScalingTriggerKind, ) __all__ =", "import EndpointRef from .network_ref import NetworkRef from .network_resource_properties_base import NetworkResourcePropertiesBase", "TcpConfig from .http_route_match_path import HttpRouteMatchPath from .http_route_match_header import HttpRouteMatchHeader from", "from .diagnostics_description import DiagnosticsDescription from .application_properties import ApplicationProperties from .azure_internal_monitoring_pipeline_sink_description", "'AverageLoadScalingTrigger', 'ContainerLogs', 'OperationResultPaged', 'SecretResourceDescriptionPaged', 'SecretValueResourceDescriptionPaged', 'VolumeResourceDescriptionPaged', 'NetworkResourceDescriptionPaged', 'GatewayResourceDescriptionPaged', 'ApplicationResourceDescriptionPaged', 'ServiceResourceDescriptionPaged',", "import AutoScalingResourceMetric from .service_properties import ServiceProperties from .service_replica_properties import ServiceReplicaProperties", "# Changes may cause incorrect behavior and will be lost", ".error_error_model import ErrorErrorModel from .error_model import ErrorModel, ErrorModelException from .operation_result", ".diagnostics_ref import DiagnosticsRef from .reliable_collections_ref import ReliableCollectionsRef from .container_state import", "import ContainerEvent from .container_instance_view import ContainerInstanceView from .container_code_package_properties import ContainerCodePackageProperties", "Code generated by Microsoft (R) AutoRest Code Generator. # Changes", "information. # # Code generated by Microsoft (R) AutoRest Code", "Setting from .container_label import ContainerLabel from .endpoint_properties import EndpointProperties from", "License. See License.txt in the project root for # license", "'SecretResourceProperties', 'InlinedValueSecretResourceProperties', 'SecretResourcePropertiesBase', 'SecretResourceDescription', 'SecretValue', 'SecretValueProperties', 'SecretValueResourceDescription', 'VolumeProviderParametersAzureFile', 'VolumeProperties', 'VolumeReference',", "from .service_properties import ServiceProperties from .service_replica_properties import ServiceReplicaProperties from .service_replica_description", "import ApplicationScopedVolumeCreationParameters from .application_scoped_volume import ApplicationScopedVolume from .application_scoped_volume_creation_parameters_service_fabric_volume_disk import ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk", "'ContainerInstanceView', 'ContainerCodePackageProperties', 'AutoScalingTrigger', 'AutoScalingMechanism', 'AutoScalingPolicy', 'ServiceResourceDescription', 'DiagnosticsSinkProperties', 'DiagnosticsDescription', 'ApplicationProperties', 'AzureInternalMonitoringPipelineSinkDescription',", "from .endpoint_properties import EndpointProperties from .resource_requests import ResourceRequests from .resource_limits", ".resource_requirements import ResourceRequirements from .diagnostics_ref import DiagnosticsRef from .reliable_collections_ref import", "import ResourceLimits from .resource_requirements import ResourceRequirements from .diagnostics_ref import DiagnosticsRef", "from .endpoint_ref import EndpointRef from .network_ref import NetworkRef from .network_resource_properties_base", "from .auto_scaling_resource_metric import AutoScalingResourceMetric from .service_properties import ServiceProperties from .service_replica_properties", "from .network_resource_properties import NetworkResourceProperties from .local_network_resource_properties import LocalNetworkResourceProperties from .endpoint_ref", "'DiagnosticsSinkProperties', 'DiagnosticsDescription', 'ApplicationProperties', 'AzureInternalMonitoringPipelineSinkDescription', 'ApplicationResourceDescription', 'AddRemoveReplicaScalingMechanism', 'AutoScalingMetric', 'AutoScalingResourceMetric', 'ServiceProperties', 'ServiceReplicaProperties',", "'ApplicationResourceDescription', 'AddRemoveReplicaScalingMechanism', 'AutoScalingMetric', 'AutoScalingResourceMetric', 'ServiceProperties', 'ServiceReplicaProperties', 'ServiceReplicaDescription', 'AverageLoadScalingTrigger', 'ContainerLogs', 'OperationResultPaged',", ".secret_resource_description import SecretResourceDescription from .secret_value import SecretValue from .secret_value_properties import", "ContainerCodePackageProperties from .auto_scaling_trigger import AutoScalingTrigger from .auto_scaling_mechanism import AutoScalingMechanism from", "OperatingSystemType, DiagnosticsSinkKind, AutoScalingMechanismKind, AutoScalingMetricKind, AutoScalingResourceMetricName, AutoScalingTriggerKind, ) __all__ = [", "SecretResourceDescription from .secret_value import SecretValue from .secret_value_properties import SecretValueProperties from", "ServiceReplicaDescription from .average_load_scaling_trigger import AverageLoadScalingTrigger from .container_logs import ContainerLogs from", "EnvironmentVariable from .setting import Setting from .container_label import ContainerLabel from", ".auto_scaling_trigger import AutoScalingTrigger from .auto_scaling_mechanism import AutoScalingMechanism from .auto_scaling_policy import", "# regenerated. # -------------------------------------------------------------------------- from .available_operation_display import AvailableOperationDisplay from .error_details_model", "from .volume_resource_description_paged import VolumeResourceDescriptionPaged from .network_resource_description_paged import NetworkResourceDescriptionPaged from .gateway_resource_description_paged", "__all__ = [ 'AvailableOperationDisplay', 'ErrorDetailsModel', 'ErrorErrorModel', 'ErrorModel', 'ErrorModelException', 'OperationResult', 'ProvisionedResourceProperties',", ".network_resource_description import NetworkResourceDescription from .gateway_destination import GatewayDestination from .tcp_config import", "AutoScalingMechanismKind, AutoScalingMetricKind, AutoScalingResourceMetricName, AutoScalingTriggerKind, ) __all__ = [ 'AvailableOperationDisplay', 'ErrorDetailsModel',", ".resource_limits import ResourceLimits from .resource_requirements import ResourceRequirements from .diagnostics_ref import", "Licensed under the MIT License. See License.txt in the project", "ContainerEvent from .container_instance_view import ContainerInstanceView from .container_code_package_properties import ContainerCodePackageProperties from", "from .container_logs import ContainerLogs from .operation_result_paged import OperationResultPaged from .secret_resource_description_paged", "from .provisioned_resource_properties import ProvisionedResourceProperties from .proxy_resource import ProxyResource from .managed_proxy_resource", "rights reserved. # Licensed under the MIT License. See License.txt", "from .auto_scaling_trigger import AutoScalingTrigger from .auto_scaling_mechanism import AutoScalingMechanism from .auto_scaling_policy", ".container_logs import ContainerLogs from .operation_result_paged import OperationResultPaged from .secret_resource_description_paged import", "from .secret_value_resource_description_paged import SecretValueResourceDescriptionPaged from .volume_resource_description_paged import VolumeResourceDescriptionPaged from .network_resource_description_paged", "AutoScalingPolicy from .service_resource_description import ServiceResourceDescription from .diagnostics_sink_properties import DiagnosticsSinkProperties from", "# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All", "code is # regenerated. # -------------------------------------------------------------------------- from .available_operation_display import AvailableOperationDisplay", "'SecretValueResourceDescription', 'VolumeProviderParametersAzureFile', 'VolumeProperties', 'VolumeReference', 'ApplicationScopedVolumeCreationParameters', 'ApplicationScopedVolume', 'ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk', 'VolumeResourceDescription', 'NetworkResourceProperties', 'LocalNetworkResourceProperties',", "License.txt in the project root for # license information. #", "# license information. # # Code generated by Microsoft (R)", "import ManagedProxyResource from .resource import Resource from .tracked_resource import TrackedResource", ".secret_resource_properties import SecretResourceProperties from .inlined_value_secret_resource_properties import InlinedValueSecretResourceProperties from .secret_resource_properties_base import", "'EndpointRef', 'NetworkRef', 'NetworkResourcePropertiesBase', 'NetworkResourceDescription', 'GatewayDestination', 'TcpConfig', 'HttpRouteMatchPath', 'HttpRouteMatchHeader', 'HttpRouteMatchRule', 'HttpRouteConfig',", ".network_resource_properties_base import NetworkResourcePropertiesBase from .network_resource_description import NetworkResourceDescription from .gateway_destination import", "ServiceResourceDescription from .diagnostics_sink_properties import DiagnosticsSinkProperties from .diagnostics_description import DiagnosticsDescription from", "'TcpConfig', 'HttpRouteMatchPath', 'HttpRouteMatchHeader', 'HttpRouteMatchRule', 'HttpRouteConfig', 'HttpHostConfig', 'HttpConfig', 'GatewayProperties', 'GatewayResourceDescription', 'ImageRegistryCredential',", ".secret_value_resource_description import SecretValueResourceDescription from .volume_provider_parameters_azure_file import VolumeProviderParametersAzureFile from .volume_properties import", "import ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk from .volume_resource_description import VolumeResourceDescription from .network_resource_properties import NetworkResourceProperties", "import ApplicationScopedVolume from .application_scoped_volume_creation_parameters_service_fabric_volume_disk import ApplicationScopedVolumeCreationParametersServiceFabricVolumeDisk from .volume_resource_description import VolumeResourceDescription", ".diagnostics_description import DiagnosticsDescription from .application_properties import ApplicationProperties from .azure_internal_monitoring_pipeline_sink_description import", "(R) AutoRest Code Generator. # Changes may cause incorrect behavior", ".secret_resource_properties_base import SecretResourcePropertiesBase from .secret_resource_description import SecretResourceDescription from .secret_value import", "GatewayDestination from .tcp_config import TcpConfig from .http_route_match_path import HttpRouteMatchPath from", "from .tcp_config import TcpConfig from .http_route_match_path import HttpRouteMatchPath from .http_route_match_header", ".endpoint_properties import EndpointProperties from .resource_requests import ResourceRequests from .resource_limits import", "OperationResultPaged from .secret_resource_description_paged import SecretResourceDescriptionPaged from .secret_value_resource_description_paged import SecretValueResourceDescriptionPaged from", "'ErrorModel', 'ErrorModelException', 'OperationResult', 'ProvisionedResourceProperties', 'ProxyResource', 'ManagedProxyResource', 'Resource', 'TrackedResource', 'SecretResourceProperties', 'InlinedValueSecretResourceProperties',", "from .volume_reference import VolumeReference from .application_scoped_volume_creation_parameters import ApplicationScopedVolumeCreationParameters from .application_scoped_volume", "import LocalNetworkResourceProperties from .endpoint_ref import EndpointRef from .network_ref import NetworkRef", "from .container_code_package_properties import ContainerCodePackageProperties from .auto_scaling_trigger import AutoScalingTrigger from .auto_scaling_mechanism", ".network_ref import NetworkRef from .network_resource_properties_base import NetworkResourcePropertiesBase from .network_resource_description import", ".http_route_config import HttpRouteConfig from .http_host_config import HttpHostConfig from .http_config import", "from .container_label import ContainerLabel from .endpoint_properties import EndpointProperties from .resource_requests" ]
[ "names = \"\" if self.map and self.object_pool: if objects and", "and (x,y) in self.map.get_visible_tiles()] names = ', '.join(names) # join", "the mouse's coordinates and in FOV objects = self.object_pool.get_objects_as_list() names", "def get_names_under_mouse(self): # return a string with the names of", "{}\".format(self.mouse_coord)) def get_names_under_mouse(self): # return a string with the names", "\"\" if self.map and self.object_pool: if objects and self.map: names", "== x and obj.coord.Y == y and (x,y) in self.map.get_visible_tiles()]", "= map def set_object_pool(self, object_pool): self.object_pool = object_pool def get_mouse_coord(self):", "(x, y) = self.camera.camera_coord + Vector2(*self.mouse_coord) # create a list", "self.camera.camera_coord + Vector2(*self.mouse_coord) # create a list with the names", "set_object_pool(self, object_pool): self.object_pool = object_pool def get_mouse_coord(self): return self.mouse_coord def", "all objects under the mouse (x, y) = self.camera.camera_coord +", "names, separated by commas else: logger.warning(\"map or object pool not", "= object_pool self.camera = None def set_map(self, map): self.map =", "the mouse (x, y) = self.camera.camera_coord + Vector2(*self.mouse_coord) # create", "import logging from models.GenericObjects import Vector2 logger = logging.getLogger('Rogue-EVE') class", "it \"\"\" def __init__(self, map=None, object_pool=None): self.mouse_coord = (0, 0)", "0) self.map = map self.object_pool = object_pool self.camera = None", "= self.camera.camera_coord + Vector2(*self.mouse_coord) # create a list with the", "if obj.coord.X == x and obj.coord.Y == y and (x,y)", "objects if obj.coord.X == x and obj.coord.Y == y and", "\"\"\" def __init__(self, map=None, object_pool=None): self.mouse_coord = (0, 0) self.map", "new_coord): self.mouse_coord = new_coord logger.debug(\"mouse position {}\".format(self.mouse_coord)) def get_names_under_mouse(self): #", "self.mouse_coord = new_coord logger.debug(\"mouse position {}\".format(self.mouse_coord)) def get_names_under_mouse(self): # return", "__init__(self, map=None, object_pool=None): self.mouse_coord = (0, 0) self.map = map", "= logging.getLogger('Rogue-EVE') class MouseController(object): \"\"\" Mouse controller needs the map,", "over it \"\"\" def __init__(self, map=None, object_pool=None): self.mouse_coord = (0,", "set_map(self, map): self.map = map def set_object_pool(self, object_pool): self.object_pool =", "names of all objects under the mouse (x, y) =", "object_pool): self.object_pool = object_pool def get_mouse_coord(self): return self.mouse_coord def set_mouse_coord(self,", "by commas else: logger.warning(\"map or object pool not initialized!\") return", "commas else: logger.warning(\"map or object pool not initialized!\") return names.capitalize()", "logging.getLogger('Rogue-EVE') class MouseController(object): \"\"\" Mouse controller needs the map, get", "None def set_map(self, map): self.map = map def set_object_pool(self, object_pool):", "new_coord logger.debug(\"mouse position {}\".format(self.mouse_coord)) def get_names_under_mouse(self): # return a string", "x and obj.coord.Y == y and (x,y) in self.map.get_visible_tiles()] names", "for obj in objects if obj.coord.X == x and obj.coord.Y", "self.object_pool.get_objects_as_list() names = \"\" if self.map and self.object_pool: if objects", "in self.map.get_visible_tiles()] names = ', '.join(names) # join the names,", "self.object_pool = object_pool def get_mouse_coord(self): return self.mouse_coord def set_mouse_coord(self, new_coord):", "obj.coord.Y == y and (x,y) in self.map.get_visible_tiles()] names = ',", "at the mouse's coordinates and in FOV objects = self.object_pool.get_objects_as_list()", "self.map and self.object_pool: if objects and self.map: names = [obj.name", "names = ', '.join(names) # join the names, separated by", "all objects at the mouse's coordinates and in FOV objects", "objects and self.map: names = [obj.name for obj in objects", "def set_object_pool(self, object_pool): self.object_pool = object_pool def get_mouse_coord(self): return self.mouse_coord", "= [obj.name for obj in objects if obj.coord.X == x", "= self.object_pool.get_objects_as_list() names = \"\" if self.map and self.object_pool: if", "[obj.name for obj in objects if obj.coord.X == x and", "+ Vector2(*self.mouse_coord) # create a list with the names of", "the map, get over it \"\"\" def __init__(self, map=None, object_pool=None):", "y and (x,y) in self.map.get_visible_tiles()] names = ', '.join(names) #", "= None def set_map(self, map): self.map = map def set_object_pool(self,", "def get_mouse_coord(self): return self.mouse_coord def set_mouse_coord(self, new_coord): self.mouse_coord = new_coord", "the names of all objects at the mouse's coordinates and", "<reponame>Scoppio/Rogue-EVE import logging from models.GenericObjects import Vector2 logger = logging.getLogger('Rogue-EVE')", "get over it \"\"\" def __init__(self, map=None, object_pool=None): self.mouse_coord =", "self.object_pool: if objects and self.map: names = [obj.name for obj", "with the names of all objects under the mouse (x,", "create a list with the names of all objects at", "# join the names, separated by commas else: logger.warning(\"map or", "= (0, 0) self.map = map self.object_pool = object_pool self.camera", "', '.join(names) # join the names, separated by commas else:", "self.map.get_visible_tiles()] names = ', '.join(names) # join the names, separated", "class MouseController(object): \"\"\" Mouse controller needs the map, get over", "= map self.object_pool = object_pool self.camera = None def set_map(self,", "= \"\" if self.map and self.object_pool: if objects and self.map:", "map self.object_pool = object_pool self.camera = None def set_map(self, map):", "(0, 0) self.map = map self.object_pool = object_pool self.camera =", "logger.debug(\"mouse position {}\".format(self.mouse_coord)) def get_names_under_mouse(self): # return a string with", "logger = logging.getLogger('Rogue-EVE') class MouseController(object): \"\"\" Mouse controller needs the", "a string with the names of all objects under the", "Vector2(*self.mouse_coord) # create a list with the names of all", "controller needs the map, get over it \"\"\" def __init__(self,", "names of all objects at the mouse's coordinates and in", "map=None, object_pool=None): self.mouse_coord = (0, 0) self.map = map self.object_pool", "map): self.map = map def set_object_pool(self, object_pool): self.object_pool = object_pool", "MouseController(object): \"\"\" Mouse controller needs the map, get over it", "self.mouse_coord def set_mouse_coord(self, new_coord): self.mouse_coord = new_coord logger.debug(\"mouse position {}\".format(self.mouse_coord))", "the names of all objects under the mouse (x, y)", "FOV objects = self.object_pool.get_objects_as_list() names = \"\" if self.map and", "coordinates and in FOV objects = self.object_pool.get_objects_as_list() names = \"\"", "self.map: names = [obj.name for obj in objects if obj.coord.X", "obj in objects if obj.coord.X == x and obj.coord.Y ==", "and obj.coord.Y == y and (x,y) in self.map.get_visible_tiles()] names =", "self.map = map self.object_pool = object_pool self.camera = None def", "get_mouse_coord(self): return self.mouse_coord def set_mouse_coord(self, new_coord): self.mouse_coord = new_coord logger.debug(\"mouse", "# return a string with the names of all objects", "map, get over it \"\"\" def __init__(self, map=None, object_pool=None): self.mouse_coord", "def __init__(self, map=None, object_pool=None): self.mouse_coord = (0, 0) self.map =", "mouse (x, y) = self.camera.camera_coord + Vector2(*self.mouse_coord) # create a", "def set_mouse_coord(self, new_coord): self.mouse_coord = new_coord logger.debug(\"mouse position {}\".format(self.mouse_coord)) def", "from models.GenericObjects import Vector2 logger = logging.getLogger('Rogue-EVE') class MouseController(object): \"\"\"", "get_names_under_mouse(self): # return a string with the names of all", "# create a list with the names of all objects", "set_mouse_coord(self, new_coord): self.mouse_coord = new_coord logger.debug(\"mouse position {}\".format(self.mouse_coord)) def get_names_under_mouse(self):", "a list with the names of all objects at the", "of all objects at the mouse's coordinates and in FOV", "= ', '.join(names) # join the names, separated by commas", "and in FOV objects = self.object_pool.get_objects_as_list() names = \"\" if", "and self.map: names = [obj.name for obj in objects if", "separated by commas else: logger.warning(\"map or object pool not initialized!\")", "if objects and self.map: names = [obj.name for obj in", "(x,y) in self.map.get_visible_tiles()] names = ', '.join(names) # join the", "join the names, separated by commas else: logger.warning(\"map or object", "needs the map, get over it \"\"\" def __init__(self, map=None,", "return self.mouse_coord def set_mouse_coord(self, new_coord): self.mouse_coord = new_coord logger.debug(\"mouse position", "obj.coord.X == x and obj.coord.Y == y and (x,y) in", "self.map = map def set_object_pool(self, object_pool): self.object_pool = object_pool def", "= new_coord logger.debug(\"mouse position {}\".format(self.mouse_coord)) def get_names_under_mouse(self): # return a", "logging from models.GenericObjects import Vector2 logger = logging.getLogger('Rogue-EVE') class MouseController(object):", "object_pool self.camera = None def set_map(self, map): self.map = map", "objects = self.object_pool.get_objects_as_list() names = \"\" if self.map and self.object_pool:", "in FOV objects = self.object_pool.get_objects_as_list() names = \"\" if self.map", "of all objects under the mouse (x, y) = self.camera.camera_coord", "y) = self.camera.camera_coord + Vector2(*self.mouse_coord) # create a list with", "object_pool def get_mouse_coord(self): return self.mouse_coord def set_mouse_coord(self, new_coord): self.mouse_coord =", "if self.map and self.object_pool: if objects and self.map: names =", "objects at the mouse's coordinates and in FOV objects =", "import Vector2 logger = logging.getLogger('Rogue-EVE') class MouseController(object): \"\"\" Mouse controller", "'.join(names) # join the names, separated by commas else: logger.warning(\"map", "with the names of all objects at the mouse's coordinates", "list with the names of all objects at the mouse's", "objects under the mouse (x, y) = self.camera.camera_coord + Vector2(*self.mouse_coord)", "Vector2 logger = logging.getLogger('Rogue-EVE') class MouseController(object): \"\"\" Mouse controller needs", "\"\"\" Mouse controller needs the map, get over it \"\"\"", "Mouse controller needs the map, get over it \"\"\" def", "map def set_object_pool(self, object_pool): self.object_pool = object_pool def get_mouse_coord(self): return", "and self.object_pool: if objects and self.map: names = [obj.name for", "models.GenericObjects import Vector2 logger = logging.getLogger('Rogue-EVE') class MouseController(object): \"\"\" Mouse", "return a string with the names of all objects under", "= object_pool def get_mouse_coord(self): return self.mouse_coord def set_mouse_coord(self, new_coord): self.mouse_coord", "== y and (x,y) in self.map.get_visible_tiles()] names = ', '.join(names)", "self.mouse_coord = (0, 0) self.map = map self.object_pool = object_pool", "def set_map(self, map): self.map = map def set_object_pool(self, object_pool): self.object_pool", "self.object_pool = object_pool self.camera = None def set_map(self, map): self.map", "self.camera = None def set_map(self, map): self.map = map def", "the names, separated by commas else: logger.warning(\"map or object pool", "position {}\".format(self.mouse_coord)) def get_names_under_mouse(self): # return a string with the", "mouse's coordinates and in FOV objects = self.object_pool.get_objects_as_list() names =", "object_pool=None): self.mouse_coord = (0, 0) self.map = map self.object_pool =", "string with the names of all objects under the mouse", "in objects if obj.coord.X == x and obj.coord.Y == y", "under the mouse (x, y) = self.camera.camera_coord + Vector2(*self.mouse_coord) #", "names = [obj.name for obj in objects if obj.coord.X ==" ]
[ "for t in tasks] dt = datetime.datetime.now() - t0 print('Unsync", "@unsync() def download_some_more(): print('Downloading more...') url = 'https://pythonbytes.fm./episodes/show/92/will-your-python-be-compiled' resp =", "dt = datetime.datetime.now() - t0 print('Unsync version done in {:,.2f}", "more...') url = 'https://pythonbytes.fm./episodes/show/92/will-your-python-be-compiled' resp = requests.get(url) resp.raise_for_status() text =", "'https://talkpython.fm/episodes/show/174/coming-into-python-from-another-industry-part-2' async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False)) as session: async with session.get(url) as", "download_some_more(), download_some_more(), wait_some(), wait_some(), wait_some(), wait_some()] [t.result() for t in", "download_some_more(): print('Downloading more...') url = 'https://pythonbytes.fm./episodes/show/92/will-your-python-be-compiled' resp = requests.get(url) resp.raise_for_status()", "(more) {:,} characters'.format(len(text))) @unsync() def download_some_more(): print('Downloading more...') url =", "version done in {:,.2f} seconds.'.format(dt.total_seconds())) @unsync(cpu_bound=True) def compute_some(): print('Computing...') for", "_ in range(1, 10_000_000): math.sqrt(25 ** 25 + .01) @unsync()", "aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False)) as session: async with session.get(url) as resp: resp.raise_for_status() text", "print('Downloaded (more) {:,} characters'.format(len(text))) @unsync() def download_some_more(): print('Downloading more...') url", "= resp.text print('Downloaded (more) {:,} characters'.format(len(text))) @unsync() async def wait_some():", "= datetime.datetime.now() - t0 print('Unsync version done in {:,.2f} seconds.'.format(dt.total_seconds()))", "print('Computing...') for _ in range(1, 10_000_000): math.sqrt(25 ** 25 +", "= datetime.datetime.now() tasks = [ compute_some(), compute_some(), compute_some(), download_some(), download_some(),", "datetime.datetime.now() tasks = [ compute_some(), compute_some(), compute_some(), download_some(), download_some(), download_some(),", "t in tasks] dt = datetime.datetime.now() - t0 print('Unsync version", "compute_some(), download_some(), download_some(), download_some(), download_some_more(), download_some_more(), wait_some(), wait_some(), wait_some(), wait_some()]", "session.get(url) as resp: resp.raise_for_status() text = await resp.text() print('Downloaded (more)", "from unsync import unsync import asyncio import datetime import math", "def main(): t0 = datetime.datetime.now() tasks = [ compute_some(), compute_some(),", "- t0 print('Unsync version done in {:,.2f} seconds.'.format(dt.total_seconds())) @unsync(cpu_bound=True) def", "datetime import math import aiohttp import requests def main(): t0", "{:,.2f} seconds.'.format(dt.total_seconds())) @unsync(cpu_bound=True) def compute_some(): print('Computing...') for _ in range(1,", "seconds.'.format(dt.total_seconds())) @unsync(cpu_bound=True) def compute_some(): print('Computing...') for _ in range(1, 10_000_000):", "wait_some(), wait_some(), wait_some(), wait_some()] [t.result() for t in tasks] dt", "url = 'https://pythonbytes.fm./episodes/show/92/will-your-python-be-compiled' resp = requests.get(url) resp.raise_for_status() text = resp.text", "wait_some(), wait_some()] [t.result() for t in tasks] dt = datetime.datetime.now()", "** 25 + .01) @unsync() async def download_some(): print('Downloading...') url", "def compute_some(): print('Computing...') for _ in range(1, 10_000_000): math.sqrt(25 **", "download_some(), download_some_more(), download_some_more(), wait_some(), wait_some(), wait_some(), wait_some()] [t.result() for t", "in {:,.2f} seconds.'.format(dt.total_seconds())) @unsync(cpu_bound=True) def compute_some(): print('Computing...') for _ in", "for _ in range(1, 10_000_000): math.sqrt(25 ** 25 + .01)", "print('Waiting...') for _ in range(1, 1000): await asyncio.sleep(.001) if __name__", "wait_some(), wait_some(), wait_some()] [t.result() for t in tasks] dt =", "characters'.format(len(text))) @unsync() def download_some_more(): print('Downloading more...') url = 'https://pythonbytes.fm./episodes/show/92/will-your-python-be-compiled' resp", "as resp: resp.raise_for_status() text = await resp.text() print('Downloaded (more) {:,}", "t0 print('Unsync version done in {:,.2f} seconds.'.format(dt.total_seconds())) @unsync(cpu_bound=True) def compute_some():", "download_some(), download_some(), download_some(), download_some_more(), download_some_more(), wait_some(), wait_some(), wait_some(), wait_some()] [t.result()", "download_some(), download_some(), download_some_more(), download_some_more(), wait_some(), wait_some(), wait_some(), wait_some()] [t.result() for", "requests def main(): t0 = datetime.datetime.now() tasks = [ compute_some(),", "resp.raise_for_status() text = await resp.text() print('Downloaded (more) {:,} characters'.format(len(text))) @unsync()", "text = await resp.text() print('Downloaded (more) {:,} characters'.format(len(text))) @unsync() def", "for _ in range(1, 1000): await asyncio.sleep(.001) if __name__ ==", "resp.text() print('Downloaded (more) {:,} characters'.format(len(text))) @unsync() def download_some_more(): print('Downloading more...')", "import math import aiohttp import requests def main(): t0 =", "in range(1, 1000): await asyncio.sleep(.001) if __name__ == '__main__': main()", "requests.get(url) resp.raise_for_status() text = resp.text print('Downloaded (more) {:,} characters'.format(len(text))) @unsync()", ".01) @unsync() async def download_some(): print('Downloading...') url = 'https://talkpython.fm/episodes/show/174/coming-into-python-from-another-industry-part-2' async", "download_some_more(), wait_some(), wait_some(), wait_some(), wait_some()] [t.result() for t in tasks]", "resp.text print('Downloaded (more) {:,} characters'.format(len(text))) @unsync() async def wait_some(): print('Waiting...')", "print('Unsync version done in {:,.2f} seconds.'.format(dt.total_seconds())) @unsync(cpu_bound=True) def compute_some(): print('Computing...')", "tasks = [ compute_some(), compute_some(), compute_some(), download_some(), download_some(), download_some(), download_some_more(),", "= await resp.text() print('Downloaded (more) {:,} characters'.format(len(text))) @unsync() def download_some_more():", "resp.raise_for_status() text = resp.text print('Downloaded (more) {:,} characters'.format(len(text))) @unsync() async", "in range(1, 10_000_000): math.sqrt(25 ** 25 + .01) @unsync() async", "with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False)) as session: async with session.get(url) as resp: resp.raise_for_status()", "import aiohttp import requests def main(): t0 = datetime.datetime.now() tasks", "resp: resp.raise_for_status() text = await resp.text() print('Downloaded (more) {:,} characters'.format(len(text)))", "= 'https://talkpython.fm/episodes/show/174/coming-into-python-from-another-industry-part-2' async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False)) as session: async with session.get(url)", "compute_some(), compute_some(), download_some(), download_some(), download_some(), download_some_more(), download_some_more(), wait_some(), wait_some(), wait_some(),", "= [ compute_some(), compute_some(), compute_some(), download_some(), download_some(), download_some(), download_some_more(), download_some_more(),", "math import aiohttp import requests def main(): t0 = datetime.datetime.now()", "_ in range(1, 1000): await asyncio.sleep(.001) if __name__ == '__main__':", "compute_some(), compute_some(), compute_some(), download_some(), download_some(), download_some(), download_some_more(), download_some_more(), wait_some(), wait_some(),", "aiohttp import requests def main(): t0 = datetime.datetime.now() tasks =", "as session: async with session.get(url) as resp: resp.raise_for_status() text =", "'https://pythonbytes.fm./episodes/show/92/will-your-python-be-compiled' resp = requests.get(url) resp.raise_for_status() text = resp.text print('Downloaded (more)", "datetime.datetime.now() - t0 print('Unsync version done in {:,.2f} seconds.'.format(dt.total_seconds())) @unsync(cpu_bound=True)", "print('Downloaded (more) {:,} characters'.format(len(text))) @unsync() async def wait_some(): print('Waiting...') for", "session: async with session.get(url) as resp: resp.raise_for_status() text = await", "{:,} characters'.format(len(text))) @unsync() async def wait_some(): print('Waiting...') for _ in", "@unsync(cpu_bound=True) def compute_some(): print('Computing...') for _ in range(1, 10_000_000): math.sqrt(25", "@unsync() async def wait_some(): print('Waiting...') for _ in range(1, 1000):", "main(): t0 = datetime.datetime.now() tasks = [ compute_some(), compute_some(), compute_some(),", "with session.get(url) as resp: resp.raise_for_status() text = await resp.text() print('Downloaded", "asyncio import datetime import math import aiohttp import requests def", "{:,} characters'.format(len(text))) @unsync() def download_some_more(): print('Downloading more...') url = 'https://pythonbytes.fm./episodes/show/92/will-your-python-be-compiled'", "25 + .01) @unsync() async def download_some(): print('Downloading...') url =", "async def download_some(): print('Downloading...') url = 'https://talkpython.fm/episodes/show/174/coming-into-python-from-another-industry-part-2' async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False))", "characters'.format(len(text))) @unsync() async def wait_some(): print('Waiting...') for _ in range(1,", "[t.result() for t in tasks] dt = datetime.datetime.now() - t0", "tasks] dt = datetime.datetime.now() - t0 print('Unsync version done in", "def wait_some(): print('Waiting...') for _ in range(1, 1000): await asyncio.sleep(.001)", "in tasks] dt = datetime.datetime.now() - t0 print('Unsync version done", "def download_some(): print('Downloading...') url = 'https://talkpython.fm/episodes/show/174/coming-into-python-from-another-industry-part-2' async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False)) as", "print('Downloading more...') url = 'https://pythonbytes.fm./episodes/show/92/will-your-python-be-compiled' resp = requests.get(url) resp.raise_for_status() text", "10_000_000): math.sqrt(25 ** 25 + .01) @unsync() async def download_some():", "async with session.get(url) as resp: resp.raise_for_status() text = await resp.text()", "async def wait_some(): print('Waiting...') for _ in range(1, 1000): await", "resp = requests.get(url) resp.raise_for_status() text = resp.text print('Downloaded (more) {:,}", "wait_some(): print('Waiting...') for _ in range(1, 1000): await asyncio.sleep(.001) if", "url = 'https://talkpython.fm/episodes/show/174/coming-into-python-from-another-industry-part-2' async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False)) as session: async with", "async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False)) as session: async with session.get(url) as resp:", "def download_some_more(): print('Downloading more...') url = 'https://pythonbytes.fm./episodes/show/92/will-your-python-be-compiled' resp = requests.get(url)", "[ compute_some(), compute_some(), compute_some(), download_some(), download_some(), download_some(), download_some_more(), download_some_more(), wait_some(),", "= requests.get(url) resp.raise_for_status() text = resp.text print('Downloaded (more) {:,} characters'.format(len(text)))", "text = resp.text print('Downloaded (more) {:,} characters'.format(len(text))) @unsync() async def", "(more) {:,} characters'.format(len(text))) @unsync() async def wait_some(): print('Waiting...') for _", "import asyncio import datetime import math import aiohttp import requests", "<gh_stars>0 from unsync import unsync import asyncio import datetime import", "import datetime import math import aiohttp import requests def main():", "done in {:,.2f} seconds.'.format(dt.total_seconds())) @unsync(cpu_bound=True) def compute_some(): print('Computing...') for _", "= 'https://pythonbytes.fm./episodes/show/92/will-your-python-be-compiled' resp = requests.get(url) resp.raise_for_status() text = resp.text print('Downloaded", "wait_some()] [t.result() for t in tasks] dt = datetime.datetime.now() -", "math.sqrt(25 ** 25 + .01) @unsync() async def download_some(): print('Downloading...')", "await resp.text() print('Downloaded (more) {:,} characters'.format(len(text))) @unsync() def download_some_more(): print('Downloading", "download_some(): print('Downloading...') url = 'https://talkpython.fm/episodes/show/174/coming-into-python-from-another-industry-part-2' async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False)) as session:", "range(1, 10_000_000): math.sqrt(25 ** 25 + .01) @unsync() async def", "+ .01) @unsync() async def download_some(): print('Downloading...') url = 'https://talkpython.fm/episodes/show/174/coming-into-python-from-another-industry-part-2'", "unsync import asyncio import datetime import math import aiohttp import", "@unsync() async def download_some(): print('Downloading...') url = 'https://talkpython.fm/episodes/show/174/coming-into-python-from-another-industry-part-2' async with", "t0 = datetime.datetime.now() tasks = [ compute_some(), compute_some(), compute_some(), download_some(),", "compute_some(): print('Computing...') for _ in range(1, 10_000_000): math.sqrt(25 ** 25", "import requests def main(): t0 = datetime.datetime.now() tasks = [", "import unsync import asyncio import datetime import math import aiohttp", "unsync import unsync import asyncio import datetime import math import", "print('Downloading...') url = 'https://talkpython.fm/episodes/show/174/coming-into-python-from-another-industry-part-2' async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False)) as session: async" ]
[ "= pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options", "None, resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):", "of the resource. :param pulumi.ResourceOptions opts: Options for the resource.", "Optional[pulumi.Input[str]] = None, friendly_name: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] =", "resource. \"\"\" pulumi.set(__self__, \"resource_group_name\", resource_group_name) if description is not None:", "created. \"\"\" return pulumi.get(self, \"location\") @location.setter def location(self, value: Optional[pulumi.Input[str]]):", "None: pulumi.set(__self__, \"tags\", tags) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]:", "resource to be created. \"\"\" return pulumi.get(self, \"name\") @name.setter def", "str resource_name: The name of the resource. :param pulumi.ResourceOptions opts:", "for the Virtual Desktop Workspace. \"\"\" return pulumi.get(self, \"description\") @description.setter", "@pulumi.getter def location(self) -> pulumi.Output[str]: \"\"\" The location/region where the", "location) if name is not None: pulumi.set(__self__, \"name\", name) if", "tags to assign to the resource. \"\"\" return pulumi.get(self, \"tags\")", "```python import pulumi import pulumi_azure as azure example = azure.core.ResourceGroup(\"example\",", "friendly_name=\"FriendlyName\", description=\"A description of my workspace\") ``` ## Import Virtual", "__props__=None): if opts is None: opts = pulumi.ResourceOptions() if not", "value: Optional[pulumi.Input[str]]): pulumi.set(self, \"friendly_name\", value) @property @pulumi.getter def location(self) ->", "be created. \"\"\" return pulumi.get(self, \"location\") @property @pulumi.getter def name(self)", "Tool. *** # *** Do not edit by hand unless", "tags to assign to the resource. \"\"\" ... @overload def", "= name if resource_group_name is None and not opts.urn: raise", "args: WorkspaceArgs, opts: Optional[pulumi.ResourceOptions] = None): \"\"\" Manages a Virtual", "-> 'Workspace': \"\"\" Get an existing Workspace resource's state with", "new resource to be created. \"\"\" return pulumi.get(self, \"location\") @property", "resource. :param WorkspaceArgs args: The arguments to use to populate", "if tags is not None: pulumi.set(__self__, \"tags\", tags) @property @pulumi.getter", "str resource_name: The name of the resource. :param WorkspaceArgs args:", "return pulumi.get(self, \"resource_group_name\") @resource_group_name.setter def resource_group_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"resource_group_name\",", "a new resource to be created. \"\"\" return pulumi.get(self, \"name\")", "ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if", "created. \"\"\" return pulumi.get(self, \"resource_group_name\") @resource_group_name.setter def resource_group_name(self, value: Optional[pulumi.Input[str]]):", "resource_group_name is None and not opts.urn: raise TypeError(\"Missing required property", "to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for", "the Virtual Desktop Workspace. :param pulumi.Input[str] friendly_name: A friendly name", "friendly_name: A friendly name for the Virtual Desktop Workspace. :param", "None: pulumi.set(__self__, \"tags\", tags) @property @pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self) -> pulumi.Input[str]:", "value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: \"\"\" A description", "pulumi import pulumi_azure as azure example = azure.core.ResourceGroup(\"example\", location=\"West Europe\")", "the resource. \"\"\" return pulumi.get(self, \"tags\") @tags.setter def tags(self, value:", "when passed in combination with a valid opts.id to get", "@property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: \"\"\" A description for", "value: Optional[pulumi.Input[str]]): pulumi.set(self, \"name\", value) @property @pulumi.getter def tags(self) ->", "is not None: pulumi.set(__self__, \"location\", location) if name is not", "= _utilities.get_resource_args_opts(WorkspaceArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None:", "Optional[pulumi.Input[str]]: \"\"\" The name of the resource group in which", "Desktop Workspace is located. Changing the location/region forces a new", ":param pulumi.Input[str] friendly_name: A friendly name for the Virtual Desktop", "options to be a ResourceOptions instance') if opts.version is None:", "of tags to assign to the resource. \"\"\" ... @overload", "None: pulumi.set(__self__, \"name\", name) if resource_group_name is not None: pulumi.set(__self__,", "_utilities.get_resource_args_opts(WorkspaceArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name,", "be imported using the `resource id`, e.g. ```sh $ pulumi", "raise TypeError('Expected resource options to be a ResourceOptions instance') if", "Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): \"\"\" The", "for the Virtual Desktop Workspace. :param pulumi.Input[str] location: The location/region", "assign to the resource. \"\"\" pulumi.set(__self__, \"resource_group_name\", resource_group_name) if description", "pulumi.Input[str]]]] = None, __props__=None): if opts is None: opts =", "tags is not None: pulumi.set(__self__, \"tags\", tags) @property @pulumi.getter(name=\"resourceGroupName\") def", "friendly name for the Virtual Desktop Workspace. \"\"\" return pulumi.get(self,", "Workspace resource's state with the given name, id, and optional", "-> pulumi.Input[str]: \"\"\" The name of the resource group in", "pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence,", "__props__.__dict__[\"tags\"] = tags super(Workspace, __self__).__init__( 'azure:desktopvirtualization/workspace:Workspace', resource_name, __props__, opts) @staticmethod", "= tags super(Workspace, __self__).__init__( 'azure:desktopvirtualization/workspace:Workspace', resource_name, __props__, opts) @staticmethod def", "location/region where the Virtual Desktop Workspace is located. Changing the", "Workspace. Changing the name forces a new resource to be", "Usage ```python import pulumi import pulumi_azure as azure example =", "None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if", "constructing a Workspace resource. :param pulumi.Input[str] resource_group_name: The name of", "pulumi.Input[str] description: A description for the Virtual Desktop Workspace. :param", "pulumi.Input[str] resource_group_name: The name of the resource group in which", "created. \"\"\" return pulumi.get(self, \"resource_group_name\") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]):", "= None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): \"\"\" Input properties", "\"\"\" return pulumi.get(self, \"resource_group_name\") @resource_group_name.setter def resource_group_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self,", "resource_group_name __props__.__dict__[\"tags\"] = tags super(Workspace, __self__).__init__( 'azure:desktopvirtualization/workspace:Workspace', resource_name, __props__, opts)", "Import Virtual Desktop Workspaces can be imported using the `resource", "\"\"\" ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts", "... @overload def __init__(__self__, resource_name: str, args: WorkspaceArgs, opts: Optional[pulumi.ResourceOptions]", "Optional[pulumi.Input[str]]): pulumi.set(self, \"resource_group_name\", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str,", "resource's state with the given name, id, and optional extra", "The unique provider ID of the resource to lookup. :param", "= description __props__.__dict__[\"friendly_name\"] = friendly_name __props__.__dict__[\"location\"] = location __props__.__dict__[\"name\"] =", "a new resource to be created. :param pulumi.Input[str] description: A", "@pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: \"\"\" A mapping of", "return pulumi.get(self, \"resource_group_name\") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, \"resource_group_name\",", "location __props__.__dict__[\"name\"] = name if resource_group_name is None and not", "None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, resource_group_name:", "name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"name\", value) @property @pulumi.getter def tags(self)", "mapping of tags to assign to the resource. \"\"\" opts", "is not None: pulumi.set(__self__, \"tags\", tags) @property @pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self)", "to be created. :param pulumi.Input[str] description: A description for the", "Virtual Desktop Workspace. Changing the name forces a new resource", "for the Virtual Desktop Workspace. \"\"\" return pulumi.get(self, \"friendly_name\") @property", "generated by the Pulumi Terraform Bridge (tfgen) Tool. *** #", "None: pulumi.set(__self__, \"description\", description) if friendly_name is not None: pulumi.set(__self__,", "@resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, \"resource_group_name\", value) @property @pulumi.getter", "Desktop Workspace. \"\"\" return pulumi.get(self, \"description\") @description.setter def description(self, value:", "example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myGroup1/providers/Microsoft.DesktopVirtualization/workspaces/myworkspace ``` :param str resource_name: The name of the", "if __props__ is not None: raise TypeError('__props__ is only valid", "name) if resource_group_name is not None: pulumi.set(__self__, \"resource_group_name\", resource_group_name) if", "= _WorkspaceState.__new__(_WorkspaceState) __props__.__dict__[\"description\"] = description __props__.__dict__[\"friendly_name\"] = friendly_name __props__.__dict__[\"location\"] =", "the Virtual Desktop Workspace. \"\"\" return pulumi.get(self, \"description\") @property @pulumi.getter(name=\"friendlyName\")", "def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"description\", value) @property @pulumi.getter(name=\"friendlyName\") def", "location=\"West Europe\") workspace = azure.desktopvirtualization.Workspace(\"workspace\", location=example.location, resource_group_name=example.name, friendly_name=\"FriendlyName\", description=\"A description", "resource options to be a ResourceOptions instance') if opts.version is", "*args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(WorkspaceArgs, pulumi.ResourceOptions, *args, **kwargs) if", "opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions):", "Workspace(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: \"\"\"", "is not None: pulumi.set(__self__, \"description\", description) if friendly_name is not", "the resulting resource. :param pulumi.Input[str] id: The unique provider ID", "'Workspace': \"\"\" Get an existing Workspace resource's state with the", "tags to assign to the resource. \"\"\" opts = pulumi.ResourceOptions.merge(opts,", "workspace\") ``` ## Import Virtual Desktop Workspaces can be imported", "resource group name forces a new resource to be created.", "the Virtual Desktop Workspace. \"\"\" return pulumi.get(self, \"friendly_name\") @friendly_name.setter def", "= None, description: Optional[pulumi.Input[str]] = None, friendly_name: Optional[pulumi.Input[str]] = None,", "__props__.__dict__[\"location\"] = location __props__.__dict__[\"name\"] = name if resource_group_name is None", "value) class Workspace(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]", "resource to be created. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping", "location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]]", "Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: \"\"\" A mapping of tags to assign to", "tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, \"tags\", value) @pulumi.input_type class _WorkspaceState:", "the location/region forces a new resource to be created. \"\"\"", "resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None):", "located. Changing the location/region forces a new resource to be", "tags) @property @pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self) -> pulumi.Input[str]: \"\"\" The name", "Desktop Workspace. \"\"\" return pulumi.get(self, \"friendly_name\") @friendly_name.setter def friendly_name(self, value:", "pulumi.set(__self__, \"name\", name) if tags is not None: pulumi.set(__self__, \"tags\",", "Virtual Desktop Workspace. \"\"\" return pulumi.get(self, \"description\") @description.setter def description(self,", "tags is not None: pulumi.set(__self__, \"tags\", tags) @property @pulumi.getter def", "not None: pulumi.set(__self__, \"name\", name) if tags is not None:", "and not opts.urn: raise TypeError(\"Missing required property 'resource_group_name'\") __props__.__dict__[\"resource_group_name\"] =", "\"name\", name) if tags is not None: pulumi.set(__self__, \"tags\", tags)", "a Virtual Desktop Workspace. ## Example Usage ```python import pulumi", "opts.version is None: opts.version = _utilities.get_version() if opts.id is None:", "hand unless you're certain you know what you are doing!", "-> Optional[pulumi.Input[str]]: \"\"\" A description for the Virtual Desktop Workspace.", "to create the Virtual Desktop Workspace. Changing the resource group", "tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Workspace': \"\"\" Get an", "id, and optional extra properties used to qualify the lookup.", "def __init__(__self__, *, description: Optional[pulumi.Input[str]] = None, friendly_name: Optional[pulumi.Input[str]] =", "for constructing a Workspace resource. :param pulumi.Input[str] resource_group_name: The name", "class WorkspaceArgs: def __init__(__self__, *, resource_group_name: pulumi.Input[str], description: Optional[pulumi.Input[str]] =", "= None, friendly_name: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None,", "def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"name\", value) @property @pulumi.getter(name=\"resourceGroupName\") def", "return pulumi.get(self, \"name\") @property @pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self) -> pulumi.Output[str]: \"\"\"", "to be created. \"\"\" return pulumi.get(self, \"resource_group_name\") @property @pulumi.getter def", "friendly_name is not None: pulumi.set(__self__, \"friendly_name\", friendly_name) if location is", "None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): \"\"\" The set of", ":param str resource_name: The unique name of the resulting resource.", "Virtual Desktop Workspace. :param pulumi.Input[str] friendly_name: A friendly name for", "Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None): if opts is None: opts", "**resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts:", "def friendly_name(self) -> pulumi.Output[Optional[str]]: \"\"\" A friendly name for the", "a Workspace resource. :param pulumi.Input[str] resource_group_name: The name of the", "unless you're certain you know what you are doing! ***", "Workspace(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None,", "def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]]", "pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to", "group name forces a new resource to be created. \"\"\"", "Desktop Workspace. :param pulumi.Input[str] location: The location/region where the Virtual", "a new resource to be created. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags:", "be created. \"\"\" return pulumi.get(self, \"location\") @location.setter def location(self, value:", "was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***", "to get an existing resource') __props__ = WorkspaceArgs.__new__(WorkspaceArgs) __props__.__dict__[\"description\"] =", "def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: \"\"\" A mapping of tags", "opts.urn: raise TypeError(\"Missing required property 'resource_group_name'\") __props__.__dict__[\"resource_group_name\"] = resource_group_name __props__.__dict__[\"tags\"]", "resource') __props__ = WorkspaceArgs.__new__(WorkspaceArgs) __props__.__dict__[\"description\"] = description __props__.__dict__[\"friendly_name\"] = friendly_name", "name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str,", "not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a", "overload from .. import _utilities __all__ = ['WorkspaceArgs', 'Workspace'] @pulumi.input_type", "opts: Optional[pulumi.ResourceOptions] = None): \"\"\" Manages a Virtual Desktop Workspace.", "a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version()", "if resource_group_name is not None: pulumi.set(__self__, \"resource_group_name\", resource_group_name) if tags", "valid opts.id to get an existing resource') __props__ = WorkspaceArgs.__new__(WorkspaceArgs)", "@location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"location\", value) @property @pulumi.getter", "be created. \"\"\" return pulumi.get(self, \"resource_group_name\") @property @pulumi.getter def tags(self)", "to use to populate this resource's properties. :param pulumi.ResourceOptions opts:", "opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str,", "Optional[pulumi.Input[str]]: \"\"\" The name of the Virtual Desktop Workspace. Changing", "@pulumi.getter(name=\"friendlyName\") def friendly_name(self) -> Optional[pulumi.Input[str]]: \"\"\" A friendly name for", "Optional[pulumi.Input[str]]): pulumi.set(self, \"name\", value) @property @pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self) -> Optional[pulumi.Input[str]]:", "resource to be created. :param pulumi.Input[str] resource_group_name: The name of", "__all__ = ['WorkspaceArgs', 'Workspace'] @pulumi.input_type class WorkspaceArgs: def __init__(__self__, *,", "pulumi.set(__self__, \"tags\", tags) @property @pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self) -> pulumi.Input[str]: \"\"\"", "resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(WorkspaceArgs, pulumi.ResourceOptions, *args,", "for the Virtual Desktop Workspace. \"\"\" return pulumi.get(self, \"description\") @property", "by the Pulumi Terraform Bridge (tfgen) Tool. *** # ***", "def description(self) -> pulumi.Output[Optional[str]]: \"\"\" A description for the Virtual", "-> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: \"\"\" A mapping of tags to assign", "be created. \"\"\" return pulumi.get(self, \"name\") @property @pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self)", "opts: Options for the resource. \"\"\" ... def __init__(__self__, resource_name:", "description(self) -> pulumi.Output[Optional[str]]: \"\"\" A description for the Virtual Desktop", "Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Workspace': \"\"\" Get an existing", "\"\"\" return pulumi.get(self, \"resource_group_name\") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self,", "if opts.id is None: if __props__ is not None: raise", "created. :param pulumi.Input[str] resource_group_name: The name of the resource group", "id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None,", "= None, __props__=None): if opts is None: opts = pulumi.ResourceOptions()", "Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not", "= pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _WorkspaceState.__new__(_WorkspaceState) __props__.__dict__[\"description\"] = description __props__.__dict__[\"friendly_name\"]", "\"name\") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"name\", value) @property", "name if resource_group_name is None and not opts.urn: raise TypeError(\"Missing", "Europe\") workspace = azure.desktopvirtualization.Workspace(\"workspace\", location=example.location, resource_group_name=example.name, friendly_name=\"FriendlyName\", description=\"A description of", "pulumi.Input[str]]]] = None, __props__=None): \"\"\" Manages a Virtual Desktop Workspace.", "import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union,", "= None, name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] =", "@property @pulumi.getter def name(self) -> pulumi.Output[str]: \"\"\" The name of", "__props__=__props__) @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: \"\"\" A description", "e.g. ```sh $ pulumi import azure:desktopvirtualization/workspace:Workspace example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myGroup1/providers/Microsoft.DesktopVirtualization/workspaces/myworkspace ``` :param", "None): \"\"\" Input properties used for looking up and filtering", "what you are doing! *** import warnings import pulumi import", "description: Optional[pulumi.Input[str]] = None, friendly_name: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]]", "\"resource_group_name\") @resource_group_name.setter def resource_group_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"resource_group_name\", value) @property", "can be imported using the `resource id`, e.g. ```sh $", "resource_name: str, args: WorkspaceArgs, opts: Optional[pulumi.ResourceOptions] = None): \"\"\" Manages", "azure example = azure.core.ResourceGroup(\"example\", location=\"West Europe\") workspace = azure.desktopvirtualization.Workspace(\"workspace\", location=example.location,", "str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] =", "the given name, id, and optional extra properties used to", "resource to be created. \"\"\" return pulumi.get(self, \"location\") @property @pulumi.getter", "args: The arguments to use to populate this resource's properties.", "extra properties used to qualify the lookup. :param str resource_name:", "__props__.__dict__[\"tags\"] = tags return Workspace(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def", "name of the resource group in which to create the", "assign to the resource. \"\"\" ... @overload def __init__(__self__, resource_name:", "-> Optional[pulumi.Input[str]]: \"\"\" The name of the Virtual Desktop Workspace.", "def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: \"\"\" A mapping of tags", "tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): \"\"\" The set of arguments", "pulumi.Input[str]]]]): pulumi.set(self, \"tags\", value) class Workspace(pulumi.CustomResource): @overload def __init__(__self__, resource_name:", "\"resource_group_name\", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: \"\"\" A", "_utilities.get_version() if opts.id is None: if __props__ is not None:", "Workspace. \"\"\" return pulumi.get(self, \"friendly_name\") @friendly_name.setter def friendly_name(self, value: Optional[pulumi.Input[str]]):", "of tags to assign to the resource. \"\"\" if description", "= None): \"\"\" The set of arguments for constructing a", "def __init__(__self__, *, resource_group_name: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None, friendly_name:", "pulumi.set(__self__, \"resource_group_name\", resource_group_name) if tags is not None: pulumi.set(__self__, \"tags\",", "resource to be created. \"\"\" return pulumi.get(self, \"name\") @property @pulumi.getter(name=\"resourceGroupName\")", "None: pulumi.set(__self__, \"friendly_name\", friendly_name) if location is not None: pulumi.set(__self__,", "None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, tags:", "Virtual Desktop Workspaces can be imported using the `resource id`,", "@overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description:", "The name of the resource. :param WorkspaceArgs args: The arguments", "pulumi.Output[Optional[str]]: \"\"\" A friendly name for the Virtual Desktop Workspace.", "description for the Virtual Desktop Workspace. \"\"\" return pulumi.get(self, \"description\")", "Optional[pulumi.Input[str]]: \"\"\" A friendly name for the Virtual Desktop Workspace.", "str, args: WorkspaceArgs, opts: Optional[pulumi.ResourceOptions] = None): \"\"\" Manages a", "location: The location/region where the Virtual Desktop Workspace is located.", "\"location\") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"location\", value) @property", "resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): \"\"\"", "@property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: \"\"\" A mapping", "def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"location\", value) @property @pulumi.getter def", "= None, resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] =", "__props__ = WorkspaceArgs.__new__(WorkspaceArgs) __props__.__dict__[\"description\"] = description __props__.__dict__[\"friendly_name\"] = friendly_name __props__.__dict__[\"location\"]", "resource_name: The unique name of the resulting resource. :param pulumi.Input[str]", "pulumi.Input[str]]]]): pulumi.set(self, \"tags\", value) @pulumi.input_type class _WorkspaceState: def __init__(__self__, *,", "Optional[pulumi.Input[str]]: \"\"\" A description for the Virtual Desktop Workspace. \"\"\"", "only valid when passed in combination with a valid opts.id", "= resource_group_name __props__.__dict__[\"tags\"] = tags return Workspace(resource_name, opts=opts, __props__=__props__) @property", "= resource_group_name __props__.__dict__[\"tags\"] = tags super(Workspace, __self__).__init__( 'azure:desktopvirtualization/workspace:Workspace', resource_name, __props__,", "import azure:desktopvirtualization/workspace:Workspace example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myGroup1/providers/Microsoft.DesktopVirtualization/workspaces/myworkspace ``` :param str resource_name: The name", "warnings import pulumi import pulumi.runtime from typing import Any, Mapping,", "Union, overload from .. import _utilities __all__ = ['WorkspaceArgs', 'Workspace']", "property 'resource_group_name'\") __props__.__dict__[\"resource_group_name\"] = resource_group_name __props__.__dict__[\"tags\"] = tags super(Workspace, __self__).__init__(", "Optional[pulumi.Input[str]]): pulumi.set(self, \"friendly_name\", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]:", "filtering Workspace resources. :param pulumi.Input[str] description: A description for the", "a new resource to be created. \"\"\" return pulumi.get(self, \"resource_group_name\")", "not None: pulumi.set(__self__, \"tags\", tags) @property @pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self) ->", ":param WorkspaceArgs args: The arguments to use to populate this", "Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None): if", "Desktop Workspace. Changing the name forces a new resource to", "new resource to be created. \"\"\" return pulumi.get(self, \"name\") @name.setter", "to the resource. \"\"\" return pulumi.get(self, \"tags\") @tags.setter def tags(self,", "= None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Workspace': \"\"\"", ":param pulumi.Input[str] description: A description for the Virtual Desktop Workspace.", "to the resource. \"\"\" opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ =", "for the Virtual Desktop Workspace. \"\"\" return pulumi.get(self, \"friendly_name\") @friendly_name.setter", "forces a new resource to be created. :param pulumi.Input[Mapping[str, pulumi.Input[str]]]", "# coding=utf-8 # *** WARNING: this file was generated by", ":param pulumi.ResourceOptions opts: Options for the resource. \"\"\" ... def", "import warnings import pulumi import pulumi.runtime from typing import Any,", "resource. \"\"\" if description is not None: pulumi.set(__self__, \"description\", description)", "= name __props__.__dict__[\"resource_group_name\"] = resource_group_name __props__.__dict__[\"tags\"] = tags return Workspace(resource_name,", "name) if tags is not None: pulumi.set(__self__, \"tags\", tags) @property", "resources. :param pulumi.Input[str] description: A description for the Virtual Desktop", "*** import warnings import pulumi import pulumi.runtime from typing import", "resource to be created. \"\"\" return pulumi.get(self, \"resource_group_name\") @resource_group_name.setter def", ":param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description:", ".. import _utilities __all__ = ['WorkspaceArgs', 'Workspace'] @pulumi.input_type class WorkspaceArgs:", "__props__.__dict__[\"friendly_name\"] = friendly_name __props__.__dict__[\"location\"] = location __props__.__dict__[\"name\"] = name __props__.__dict__[\"resource_group_name\"]", "@pulumi.getter def name(self) -> pulumi.Output[str]: \"\"\" The name of the", "to be created. :param pulumi.Input[str] resource_group_name: The name of the", "def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]]", "WorkspaceArgs.__new__(WorkspaceArgs) __props__.__dict__[\"description\"] = description __props__.__dict__[\"friendly_name\"] = friendly_name __props__.__dict__[\"location\"] = location", "you know what you are doing! *** import warnings import", "value) @property @pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self) -> Optional[pulumi.Input[str]]: \"\"\" The name", "def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"name\", value) @property @pulumi.getter def", "\"tags\", value) class Workspace(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts:", "str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, friendly_name:", "provider ID of the resource to lookup. :param pulumi.ResourceOptions opts:", "to assign to the resource. \"\"\" pulumi.set(__self__, \"resource_group_name\", resource_group_name) if", "Virtual Desktop Workspace. \"\"\" return pulumi.get(self, \"friendly_name\") @friendly_name.setter def friendly_name(self,", "location(self) -> Optional[pulumi.Input[str]]: \"\"\" The location/region where the Virtual Desktop", "is None: if __props__ is not None: raise TypeError('__props__ is", "The name of the Virtual Desktop Workspace. Changing the name", "Optional[pulumi.ResourceOptions] = None): \"\"\" Manages a Virtual Desktop Workspace. ##", "the name forces a new resource to be created. :param", "None, name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, tags:", "The set of arguments for constructing a Workspace resource. :param", "forces a new resource to be created. \"\"\" return pulumi.get(self,", "resulting resource. :param pulumi.Input[str] id: The unique provider ID of", "the resource to lookup. :param pulumi.ResourceOptions opts: Options for the", "-> Optional[pulumi.Input[str]]: \"\"\" The location/region where the Virtual Desktop Workspace", "__self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name:", "pulumi.get(self, \"description\") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"description\", value)", "\"\"\" if description is not None: pulumi.set(__self__, \"description\", description) if", "get an existing resource') __props__ = WorkspaceArgs.__new__(WorkspaceArgs) __props__.__dict__[\"description\"] = description", "Desktop Workspace. \"\"\" return pulumi.get(self, \"description\") @property @pulumi.getter(name=\"friendlyName\") def friendly_name(self)", "= None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None): if opts", "forces a new resource to be created. :param pulumi.Input[str] name:", "name forces a new resource to be created. :param pulumi.Input[Mapping[str,", "where the Virtual Desktop Workspace is located. Changing the location/region", "pulumi.Output[Optional[Mapping[str, str]]]: \"\"\" A mapping of tags to assign to", "new resource to be created. :param pulumi.Input[str] name: The name", "pulumi.set(__self__, \"description\", description) if friendly_name is not None: pulumi.set(__self__, \"friendly_name\",", "to be created. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of", "@property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: \"\"\" A description for", "name(self) -> Optional[pulumi.Input[str]]: \"\"\" The name of the Virtual Desktop", "Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, friendly_name: Optional[pulumi.Input[str]] =", "the resource. \"\"\" ... @overload def __init__(__self__, resource_name: str, args:", "resource_args, opts = _utilities.get_resource_args_opts(WorkspaceArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is", "pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance')", "def name(self) -> Optional[pulumi.Input[str]]: \"\"\" The name of the Virtual", "Options for the resource. :param pulumi.Input[str] description: A description for", "is not None: raise TypeError('__props__ is only valid when passed", "@name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"name\", value) @property @pulumi.getter(name=\"resourceGroupName\")", "@pulumi.getter(name=\"friendlyName\") def friendly_name(self) -> pulumi.Output[Optional[str]]: \"\"\" A friendly name for", "resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args,", "pulumi.Input[str] name: The name of the Virtual Desktop Workspace. Changing", "pulumi.set(__self__, \"name\", name) if resource_group_name is not None: pulumi.set(__self__, \"resource_group_name\",", "name of the resource. :param pulumi.ResourceOptions opts: Options for the", "@pulumi.input_type class WorkspaceArgs: def __init__(__self__, *, resource_group_name: pulumi.Input[str], description: Optional[pulumi.Input[str]]", "be created. :param pulumi.Input[str] name: The name of the Virtual", "tags) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: \"\"\" A description", "resource to be created. \"\"\" return pulumi.get(self, \"resource_group_name\") @property @pulumi.getter", "description for the Virtual Desktop Workspace. :param pulumi.Input[str] friendly_name: A", "unique provider ID of the resource to lookup. :param pulumi.ResourceOptions", "new resource to be created. \"\"\" return pulumi.get(self, \"resource_group_name\") @property", "valid when passed in combination with a valid opts.id to", "\"\"\" pulumi.set(__self__, \"resource_group_name\", resource_group_name) if description is not None: pulumi.set(__self__,", "= None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None,", "resource_group_name) if description is not None: pulumi.set(__self__, \"description\", description) if", "this file was generated by the Pulumi Terraform Bridge (tfgen)", "None): \"\"\" The set of arguments for constructing a Workspace", "\"friendly_name\") @friendly_name.setter def friendly_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"friendly_name\", value) @property", "Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None): \"\"\"", "\"description\", description) if friendly_name is not None: pulumi.set(__self__, \"friendly_name\", friendly_name)", "pulumi.ResourceOptions opts: Options for the resource. \"\"\" ... def __init__(__self__,", "opts=opts, __props__=__props__) @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: \"\"\" A", "*** # *** Do not edit by hand unless you're", "pulumi.set(self, \"tags\", value) class Workspace(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str,", "up and filtering Workspace resources. :param pulumi.Input[str] description: A description", "*** Do not edit by hand unless you're certain you", "Workspace. :param pulumi.Input[str] location: The location/region where the Virtual Desktop", "\"tags\", tags) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: \"\"\" A", ":param pulumi.Input[str] name: The name of the Virtual Desktop Workspace.", "pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to", "name of the resource. :param WorkspaceArgs args: The arguments to", "*args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__)", "Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] =", "@resource_group_name.setter def resource_group_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"resource_group_name\", value) @property @pulumi.getter", "name is not None: pulumi.set(__self__, \"name\", name) if resource_group_name is", "pulumi.set(self, \"name\", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:", "to be created. \"\"\" return pulumi.get(self, \"location\") @property @pulumi.getter def", "tags super(Workspace, __self__).__init__( 'azure:desktopvirtualization/workspace:Workspace', resource_name, __props__, opts) @staticmethod def get(resource_name:", "mapping of tags to assign to the resource. \"\"\" return", "'resource_group_name'\") __props__.__dict__[\"resource_group_name\"] = resource_group_name __props__.__dict__[\"tags\"] = tags super(Workspace, __self__).__init__( 'azure:desktopvirtualization/workspace:Workspace',", "_WorkspaceState: def __init__(__self__, *, description: Optional[pulumi.Input[str]] = None, friendly_name: Optional[pulumi.Input[str]]", "friendly_name __props__.__dict__[\"location\"] = location __props__.__dict__[\"name\"] = name if resource_group_name is", "A friendly name for the Virtual Desktop Workspace. \"\"\" return", "Workspace resource. :param pulumi.Input[str] resource_group_name: The name of the resource", "*, resource_group_name: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None, friendly_name: Optional[pulumi.Input[str]] =", "raise TypeError(\"Missing required property 'resource_group_name'\") __props__.__dict__[\"resource_group_name\"] = resource_group_name __props__.__dict__[\"tags\"] =", "@tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, \"tags\", value) @pulumi.input_type", "@property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: \"\"\" The location/region where", "populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the", "be a ResourceOptions instance') if opts.version is None: opts.version =", "with a valid opts.id to get an existing resource') __props__", "resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource.", "be created. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags", "Desktop Workspace. \"\"\" return pulumi.get(self, \"friendly_name\") @property @pulumi.getter def location(self)", ":param pulumi.Input[str] resource_group_name: The name of the resource group in", "def friendly_name(self) -> Optional[pulumi.Input[str]]: \"\"\" A friendly name for the", "of arguments for constructing a Workspace resource. :param pulumi.Input[str] resource_group_name:", "the Virtual Desktop Workspace. Changing the name forces a new", "to assign to the resource. \"\"\" if description is not", "pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description: A", "def resource_group_name(self) -> Optional[pulumi.Input[str]]: \"\"\" The name of the resource", "if description is not None: pulumi.set(__self__, \"description\", description) if friendly_name", "not None: pulumi.set(__self__, \"resource_group_name\", resource_group_name) if tags is not None:", "\"\"\" return pulumi.get(self, \"location\") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self,", "else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]", "doing! *** import warnings import pulumi import pulumi.runtime from typing", "azure.desktopvirtualization.Workspace(\"workspace\", location=example.location, resource_group_name=example.name, friendly_name=\"FriendlyName\", description=\"A description of my workspace\") ```", "def name(self) -> pulumi.Output[str]: \"\"\" The name of the Virtual", "\"name\") @property @pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self) -> pulumi.Output[str]: \"\"\" The name", "@property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: \"\"\" A mapping", "location is not None: pulumi.set(__self__, \"location\", location) if name is", "properties used for looking up and filtering Workspace resources. :param", "pulumi.get(self, \"location\") @property @pulumi.getter def name(self) -> pulumi.Output[str]: \"\"\" The", "pulumi.get(self, \"location\") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"location\", value)", "= location __props__.__dict__[\"name\"] = name if resource_group_name is None and", "\"resource_group_name\", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: \"\"\"", "opts = _utilities.get_resource_args_opts(WorkspaceArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not", "mapping of tags to assign to the resource. \"\"\" if", "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myGroup1/providers/Microsoft.DesktopVirtualization/workspaces/myworkspace ``` :param str resource_name: The name of the resource.", "return pulumi.get(self, \"description\") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"description\",", "description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"description\", value) @property @pulumi.getter(name=\"friendlyName\") def friendly_name(self)", "A friendly name for the Virtual Desktop Workspace. :param pulumi.Input[str]", "friendly_name) if location is not None: pulumi.set(__self__, \"location\", location) if", "tags to assign to the resource. \"\"\" if description is", "typing import Any, Mapping, Optional, Sequence, Union, overload from ..", "resource. :param pulumi.Input[str] description: A description for the Virtual Desktop", "in which to create the Virtual Desktop Workspace. Changing the", "pulumi.set(self, \"friendly_name\", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: \"\"\"", "azure.core.ResourceGroup(\"example\", location=\"West Europe\") workspace = azure.desktopvirtualization.Workspace(\"workspace\", location=example.location, resource_group_name=example.name, friendly_name=\"FriendlyName\", description=\"A", "name, id, and optional extra properties used to qualify the", "of the Virtual Desktop Workspace. Changing the name forces a", "name is not None: pulumi.set(__self__, \"name\", name) if tags is", "resource_group_name(self) -> pulumi.Input[str]: \"\"\" The name of the resource group", "instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id", "-> pulumi.Output[str]: \"\"\" The location/region where the Virtual Desktop Workspace", "tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: \"\"\" A mapping of tags to", "['WorkspaceArgs', 'Workspace'] @pulumi.input_type class WorkspaceArgs: def __init__(__self__, *, resource_group_name: pulumi.Input[str],", "description __props__.__dict__[\"friendly_name\"] = friendly_name __props__.__dict__[\"location\"] = location __props__.__dict__[\"name\"] = name", "__self__).__init__( 'azure:desktopvirtualization/workspace:Workspace', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id:", "tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: \"\"\" A mapping of tags to", "```sh $ pulumi import azure:desktopvirtualization/workspace:Workspace example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myGroup1/providers/Microsoft.DesktopVirtualization/workspaces/myworkspace ``` :param str", "tags return Workspace(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def description(self) ->", "\"\"\" return pulumi.get(self, \"description\") @property @pulumi.getter(name=\"friendlyName\") def friendly_name(self) -> pulumi.Output[Optional[str]]:", "resource_group_name) if tags is not None: pulumi.set(__self__, \"tags\", tags) @property", "pulumi.set(__self__, \"location\", location) if name is not None: pulumi.set(__self__, \"name\",", "new resource to be created. \"\"\" return pulumi.get(self, \"resource_group_name\") @resource_group_name.setter", "arguments to use to populate this resource's properties. :param pulumi.ResourceOptions", "resource. :param pulumi.Input[str] resource_group_name: The name of the resource group", "= None, __props__=None): \"\"\" Manages a Virtual Desktop Workspace. ##", "@pulumi.input_type class _WorkspaceState: def __init__(__self__, *, description: Optional[pulumi.Input[str]] = None,", "_internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] =", "name(self) -> pulumi.Output[str]: \"\"\" The name of the Virtual Desktop", "Options for the resource. \"\"\" ... def __init__(__self__, resource_name: str,", "resource. \"\"\" return pulumi.get(self, \"tags\") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str,", "*args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None,", "name for the Virtual Desktop Workspace. \"\"\" return pulumi.get(self, \"friendly_name\")", "resource group in which to create the Virtual Desktop Workspace.", "not None: pulumi.set(__self__, \"name\", name) if resource_group_name is not None:", "Virtual Desktop Workspace. Changing the resource group name forces a", "__props__=None): \"\"\" Manages a Virtual Desktop Workspace. ## Example Usage", "required property 'resource_group_name'\") __props__.__dict__[\"resource_group_name\"] = resource_group_name __props__.__dict__[\"tags\"] = tags super(Workspace,", "= friendly_name __props__.__dict__[\"location\"] = location __props__.__dict__[\"name\"] = name __props__.__dict__[\"resource_group_name\"] =", "this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource.", "Desktop Workspaces can be imported using the `resource id`, e.g.", "pulumi.Input[str]]]] = None): \"\"\" Input properties used for looking up", "\"\"\" A mapping of tags to assign to the resource.", "new resource to be created. :param pulumi.Input[str] description: A description", "pulumi.Input[str]]]] = None): \"\"\" The set of arguments for constructing", "pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload", "Optional[pulumi.Input[str]]): pulumi.set(self, \"name\", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str,", "\"\"\" opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _WorkspaceState.__new__(_WorkspaceState) __props__.__dict__[\"description\"] =", "assign to the resource. \"\"\" if description is not None:", "@description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"description\", value) @property @pulumi.getter(name=\"friendlyName\")", "properties. :param pulumi.ResourceOptions opts: Options for the resource. \"\"\" ...", "opts.version = _utilities.get_version() if opts.id is None: if __props__ is", "Optional[pulumi.Input[str]]: \"\"\" The location/region where the Virtual Desktop Workspace is", "name __props__.__dict__[\"resource_group_name\"] = resource_group_name __props__.__dict__[\"tags\"] = tags return Workspace(resource_name, opts=opts,", "pulumi.Output[Optional[str]]: \"\"\" A description for the Virtual Desktop Workspace. \"\"\"", "is not None: pulumi.set(__self__, \"resource_group_name\", resource_group_name) if tags is not", "Changing the name forces a new resource to be created.", "Optional[pulumi.Input[str]]): pulumi.set(self, \"description\", value) @property @pulumi.getter(name=\"friendlyName\") def friendly_name(self) -> Optional[pulumi.Input[str]]:", "None, friendly_name: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, name:", "pulumi.set(self, \"resource_group_name\", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: \"\"\"", "forces a new resource to be created. :param pulumi.Input[str] description:", "None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__,", "lookup. :param str resource_name: The unique name of the resulting", "resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, \"resource_group_name\", value) @property @pulumi.getter def description(self)", "super(Workspace, __self__).__init__( 'azure:desktopvirtualization/workspace:Workspace', resource_name, __props__, opts) @staticmethod def get(resource_name: str,", "return pulumi.get(self, \"name\") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"name\",", "__props__ = _WorkspaceState.__new__(_WorkspaceState) __props__.__dict__[\"description\"] = description __props__.__dict__[\"friendly_name\"] = friendly_name __props__.__dict__[\"location\"]", ":param pulumi.Input[str] location: The location/region where the Virtual Desktop Workspace", "\"\"\" The name of the resource group in which to", ":param str resource_name: The name of the resource. :param WorkspaceArgs", "opts.id is None: if __props__ is not None: raise TypeError('__props__", "resource_group_name is not None: pulumi.set(__self__, \"resource_group_name\", resource_group_name) if tags is", "def resource_group_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"resource_group_name\", value) @property @pulumi.getter def", "Changing the location/region forces a new resource to be created.", "__props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]", "The unique name of the resulting resource. :param pulumi.Input[str] id:", "__props__.__dict__[\"resource_group_name\"] = resource_group_name __props__.__dict__[\"tags\"] = tags return Workspace(resource_name, opts=opts, __props__=__props__)", "lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str]", "assign to the resource. \"\"\" opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__", "-> Optional[pulumi.Input[str]]: \"\"\" The name of the resource group in", "\"\"\" The name of the Virtual Desktop Workspace. Changing the", "not opts.urn: raise TypeError(\"Missing required property 'resource_group_name'\") __props__.__dict__[\"resource_group_name\"] = resource_group_name", "pulumi.Input[str]: \"\"\" The name of the resource group in which", "assign to the resource. \"\"\" return pulumi.get(self, \"tags\") @tags.setter def", "which to create the Virtual Desktop Workspace. Changing the resource", "Input properties used for looking up and filtering Workspace resources.", "import pulumi_azure as azure example = azure.core.ResourceGroup(\"example\", location=\"West Europe\") workspace", "for looking up and filtering Workspace resources. :param pulumi.Input[str] description:", "__init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] =", "... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts =", "``` :param str resource_name: The name of the resource. :param", "resource. \"\"\" ... @overload def __init__(__self__, resource_name: str, args: WorkspaceArgs,", "opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] =", "location/region forces a new resource to be created. :param pulumi.Input[str]", "from typing import Any, Mapping, Optional, Sequence, Union, overload from", "Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): \"\"\" The set of arguments for", "opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _WorkspaceState.__new__(_WorkspaceState) __props__.__dict__[\"description\"] = description", "return pulumi.get(self, \"resource_group_name\") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:", "location=example.location, resource_group_name=example.name, friendly_name=\"FriendlyName\", description=\"A description of my workspace\") ``` ##", "from .. import _utilities __all__ = ['WorkspaceArgs', 'Workspace'] @pulumi.input_type class", "friendly_name(self) -> pulumi.Output[Optional[str]]: \"\"\" A friendly name for the Virtual", "the Virtual Desktop Workspace is located. Changing the location/region forces", "pulumi.Input[str]]]]: \"\"\" A mapping of tags to assign to the", "__init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(WorkspaceArgs, pulumi.ResourceOptions,", "opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource", "pulumi.get(self, \"tags\") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, \"tags\",", "pulumi.set(__self__, \"friendly_name\", friendly_name) if location is not None: pulumi.set(__self__, \"location\",", "know what you are doing! *** import warnings import pulumi", "opts.id to get an existing resource') __props__ = WorkspaceArgs.__new__(WorkspaceArgs) __props__.__dict__[\"description\"]", "Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities", "## Import Virtual Desktop Workspaces can be imported using the", "Example Usage ```python import pulumi import pulumi_azure as azure example", "\"\"\" A friendly name for the Virtual Desktop Workspace. \"\"\"", "\"name\", name) if resource_group_name is not None: pulumi.set(__self__, \"resource_group_name\", resource_group_name)", "certain you know what you are doing! *** import warnings", "@pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: \"\"\" A mapping of", "is not None: pulumi.set(__self__, \"name\", name) if tags is not", "the Virtual Desktop Workspace. Changing the resource group name forces", "Terraform Bridge (tfgen) Tool. *** # *** Do not edit", "as azure example = azure.core.ResourceGroup(\"example\", location=\"West Europe\") workspace = azure.desktopvirtualization.Workspace(\"workspace\",", "tags to assign to the resource. \"\"\" pulumi.set(__self__, \"resource_group_name\", resource_group_name)", "\"\"\" ... @overload def __init__(__self__, resource_name: str, args: WorkspaceArgs, opts:", "qualify the lookup. :param str resource_name: The unique name of", "class _WorkspaceState: def __init__(__self__, *, description: Optional[pulumi.Input[str]] = None, friendly_name:", "the `resource id`, e.g. ```sh $ pulumi import azure:desktopvirtualization/workspace:Workspace example", "you're certain you know what you are doing! *** import", "value: Optional[pulumi.Input[str]]): pulumi.set(self, \"resource_group_name\", value) @property @pulumi.getter def tags(self) ->", "coding=utf-8 # *** WARNING: this file was generated by the", "\"\"\" The location/region where the Virtual Desktop Workspace is located.", "friendly_name(self) -> Optional[pulumi.Input[str]]: \"\"\" A friendly name for the Virtual", "pulumi.set(self, \"tags\", value) @pulumi.input_type class _WorkspaceState: def __init__(__self__, *, description:", "(tfgen) Tool. *** # *** Do not edit by hand", "the resource. \"\"\" if description is not None: pulumi.set(__self__, \"description\",", "value) @property @pulumi.getter(name=\"friendlyName\") def friendly_name(self) -> Optional[pulumi.Input[str]]: \"\"\" A friendly", "name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): \"\"\"", "Workspace. :param pulumi.Input[str] friendly_name: A friendly name for the Virtual", "not None: pulumi.set(__self__, \"description\", description) if friendly_name is not None:", "\"\"\" return pulumi.get(self, \"name\") @property @pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self) -> pulumi.Output[str]:", "pulumi.get(self, \"resource_group_name\") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: \"\"\"", "pulumi.set(__self__, \"resource_group_name\", resource_group_name) if description is not None: pulumi.set(__self__, \"description\",", "@property @pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self) -> pulumi.Output[str]: \"\"\" The name of", "= ['WorkspaceArgs', 'Workspace'] @pulumi.input_type class WorkspaceArgs: def __init__(__self__, *, resource_group_name:", "be created. \"\"\" return pulumi.get(self, \"resource_group_name\") @resource_group_name.setter def resource_group_name(self, value:", "value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, \"tags\", value) @pulumi.input_type class _WorkspaceState: def", "= azure.desktopvirtualization.Workspace(\"workspace\", location=example.location, resource_group_name=example.name, friendly_name=\"FriendlyName\", description=\"A description of my workspace\")", "**kwargs): resource_args, opts = _utilities.get_resource_args_opts(WorkspaceArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args", "return pulumi.get(self, \"description\") @property @pulumi.getter(name=\"friendlyName\") def friendly_name(self) -> pulumi.Output[Optional[str]]: \"\"\"", "be created. \"\"\" return pulumi.get(self, \"name\") @name.setter def name(self, value:", "arguments for constructing a Workspace resource. :param pulumi.Input[str] resource_group_name: The", "looking up and filtering Workspace resources. :param pulumi.Input[str] description: A", "the Virtual Desktop Workspace. \"\"\" return pulumi.get(self, \"description\") @description.setter def", "pulumi.Output[str]: \"\"\" The name of the resource group in which", "-> pulumi.Output[Optional[Mapping[str, str]]]: \"\"\" A mapping of tags to assign", "be created. :param pulumi.Input[str] description: A description for the Virtual", "the resource. \"\"\" ... def __init__(__self__, resource_name: str, *args, **kwargs):", "\"\"\" return pulumi.get(self, \"description\") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self,", "return pulumi.get(self, \"location\") @property @pulumi.getter def name(self) -> pulumi.Output[str]: \"\"\"", "description is not None: pulumi.set(__self__, \"description\", description) if friendly_name is", "pulumi.get(self, \"name\") @property @pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self) -> pulumi.Output[str]: \"\"\" The", "\"\"\" The set of arguments for constructing a Workspace resource.", "not edit by hand unless you're certain you know what", "str]]]: \"\"\" A mapping of tags to assign to the", "is not None: pulumi.set(__self__, \"tags\", tags) @property @pulumi.getter def description(self)", "Virtual Desktop Workspace is located. Changing the location/region forces a", "description of my workspace\") ``` ## Import Virtual Desktop Workspaces", "value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: \"\"\" The location/region", "given name, id, and optional extra properties used to qualify", "combination with a valid opts.id to get an existing resource')", "def resource_group_name(self) -> pulumi.Output[str]: \"\"\" The name of the resource", "@friendly_name.setter def friendly_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"friendly_name\", value) @property @pulumi.getter", "Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, \"tags\", value) @pulumi.input_type class _WorkspaceState: def __init__(__self__,", "if resource_group_name is None and not opts.urn: raise TypeError(\"Missing required", "pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, friendly_name:", "-> pulumi.Output[Optional[str]]: \"\"\" A friendly name for the Virtual Desktop", "pulumi.Output[str]: \"\"\" The name of the Virtual Desktop Workspace. Changing", "resource_group_name=example.name, friendly_name=\"FriendlyName\", description=\"A description of my workspace\") ``` ## Import", "\"\"\" A description for the Virtual Desktop Workspace. \"\"\" return", "None, description: Optional[pulumi.Input[str]] = None, friendly_name: Optional[pulumi.Input[str]] = None, location:", "= None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None): \"\"\" Manages", "# *** WARNING: this file was generated by the Pulumi", "Workspace. \"\"\" return pulumi.get(self, \"description\") @description.setter def description(self, value: Optional[pulumi.Input[str]]):", "not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def", "TypeError('__props__ is only valid when passed in combination with a", "to assign to the resource. \"\"\" opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))", "description) if friendly_name is not None: pulumi.set(__self__, \"friendly_name\", friendly_name) if", "\"\"\" return pulumi.get(self, \"location\") @property @pulumi.getter def name(self) -> pulumi.Output[str]:", "for the resource. \"\"\" ... def __init__(__self__, resource_name: str, *args,", "if opts.version is None: opts.version = _utilities.get_version() if opts.id is", "mapping of tags to assign to the resource. \"\"\" pulumi.set(__self__,", "created. \"\"\" return pulumi.get(self, \"name\") @name.setter def name(self, value: Optional[pulumi.Input[str]]):", "\"\"\" return pulumi.get(self, \"friendly_name\") @property @pulumi.getter def location(self) -> pulumi.Output[str]:", "Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] =", "not None: pulumi.set(__self__, \"friendly_name\", friendly_name) if location is not None:", "pulumi.Input[str] friendly_name: A friendly name for the Virtual Desktop Workspace.", "`resource id`, e.g. ```sh $ pulumi import azure:desktopvirtualization/workspace:Workspace example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myGroup1/providers/Microsoft.DesktopVirtualization/workspaces/myworkspace", "description(self) -> Optional[pulumi.Input[str]]: \"\"\" A description for the Virtual Desktop", "## Example Usage ```python import pulumi import pulumi_azure as azure", "by hand unless you're certain you know what you are", "value: Optional[pulumi.Input[str]]): pulumi.set(self, \"name\", value) @property @pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self) ->", "not None: raise TypeError('__props__ is only valid when passed in", "@property @pulumi.getter(name=\"friendlyName\") def friendly_name(self) -> Optional[pulumi.Input[str]]: \"\"\" A friendly name", "= None, name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None,", "name for the Virtual Desktop Workspace. :param pulumi.Input[str] location: The", "pulumi.Input[str]): pulumi.set(self, \"resource_group_name\", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]:", "resource. :param pulumi.Input[str] id: The unique provider ID of the", "location __props__.__dict__[\"name\"] = name __props__.__dict__[\"resource_group_name\"] = resource_group_name __props__.__dict__[\"tags\"] = tags", "if friendly_name is not None: pulumi.set(__self__, \"friendly_name\", friendly_name) if location", "Desktop Workspace. ## Example Usage ```python import pulumi import pulumi_azure", "\"friendly_name\") @property @pulumi.getter def location(self) -> pulumi.Output[str]: \"\"\" The location/region", "def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, description:", "pulumi.get(self, \"friendly_name\") @property @pulumi.getter def location(self) -> pulumi.Output[str]: \"\"\" The", "def location(self) -> pulumi.Output[str]: \"\"\" The location/region where the Virtual", "\"\"\" return pulumi.get(self, \"resource_group_name\") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str,", "resource to be created. \"\"\" return pulumi.get(self, \"location\") @location.setter def", "__props__.__dict__[\"name\"] = name __props__.__dict__[\"resource_group_name\"] = resource_group_name __props__.__dict__[\"tags\"] = tags return", "used for looking up and filtering Workspace resources. :param pulumi.Input[str]", "the resource. \"\"\" pulumi.set(__self__, \"resource_group_name\", resource_group_name) if description is not", "opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, friendly_name: Optional[pulumi.Input[str]]", "_utilities __all__ = ['WorkspaceArgs', 'Workspace'] @pulumi.input_type class WorkspaceArgs: def __init__(__self__,", "WorkspaceArgs, opts: Optional[pulumi.ResourceOptions] = None): \"\"\" Manages a Virtual Desktop", "The name of the resource group in which to create", "return pulumi.get(self, \"friendly_name\") @friendly_name.setter def friendly_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"friendly_name\",", "\"friendly_name\", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: \"\"\" The", "Workspace. \"\"\" return pulumi.get(self, \"description\") @property @pulumi.getter(name=\"friendlyName\") def friendly_name(self) ->", "Desktop Workspace. :param pulumi.Input[str] friendly_name: A friendly name for the", "of the resource group in which to create the Virtual", "value: pulumi.Input[str]): pulumi.set(self, \"resource_group_name\", value) @property @pulumi.getter def description(self) ->", "Sequence, Union, overload from .. import _utilities __all__ = ['WorkspaceArgs',", "None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): \"\"\" Input properties used", "pulumi.Input[str]]]] = None) -> 'Workspace': \"\"\" Get an existing Workspace", "used to qualify the lookup. :param str resource_name: The unique", "\"resource_group_name\") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, \"resource_group_name\", value) @property", "The arguments to use to populate this resource's properties. :param", "= None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): \"\"\" The set", "@name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"name\", value) @property @pulumi.getter", "you are doing! *** import warnings import pulumi import pulumi.runtime", "A description for the Virtual Desktop Workspace. :param pulumi.Input[str] friendly_name:", "new resource to be created. \"\"\" return pulumi.get(self, \"name\") @property", "pulumi.set(self, \"resource_group_name\", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:", "value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: \"\"\" The name", "Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]", "tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, \"tags\", value) class Workspace(pulumi.CustomResource): @overload", "Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__", "pulumi.Input[str] id: The unique provider ID of the resource to", "description: A description for the Virtual Desktop Workspace. :param pulumi.Input[str]", "a new resource to be created. :param pulumi.Input[str] resource_group_name: The", "azure:desktopvirtualization/workspace:Workspace example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myGroup1/providers/Microsoft.DesktopVirtualization/workspaces/myworkspace ``` :param str resource_name: The name of", "name forces a new resource to be created. \"\"\" return", "Bridge (tfgen) Tool. *** # *** Do not edit by", "name forces a new resource to be created. :param pulumi.Input[str]", "None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None): if opts is", "in combination with a valid opts.id to get an existing", "Desktop Workspace. Changing the resource group name forces a new", "set of arguments for constructing a Workspace resource. :param pulumi.Input[str]", "Changing the resource group name forces a new resource to", "passed in combination with a valid opts.id to get an", "get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]]", "-> pulumi.Output[Optional[str]]: \"\"\" A description for the Virtual Desktop Workspace.", "the Virtual Desktop Workspace. \"\"\" return pulumi.get(self, \"friendly_name\") @property @pulumi.getter", "to assign to the resource. \"\"\" ... @overload def __init__(__self__,", "WARNING: this file was generated by the Pulumi Terraform Bridge", "of the resource. :param WorkspaceArgs args: The arguments to use", "@property @pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self) -> pulumi.Input[str]: \"\"\" The name of", "the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param", "Virtual Desktop Workspace. \"\"\" return pulumi.get(self, \"description\") @property @pulumi.getter(name=\"friendlyName\") def", "not None: pulumi.set(__self__, \"location\", location) if name is not None:", "created. :param pulumi.Input[str] name: The name of the Virtual Desktop", "def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, \"resource_group_name\", value) @property @pulumi.getter def", "friendly_name __props__.__dict__[\"location\"] = location __props__.__dict__[\"name\"] = name __props__.__dict__[\"resource_group_name\"] = resource_group_name", "Workspace. \"\"\" return pulumi.get(self, \"friendly_name\") @property @pulumi.getter def location(self) ->", "\"resource_group_name\", resource_group_name) if description is not None: pulumi.set(__self__, \"description\", description)", "A mapping of tags to assign to the resource. \"\"\"", "Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, \"tags\", value) class Workspace(pulumi.CustomResource): @overload def __init__(__self__,", "to the resource. \"\"\" if description is not None: pulumi.set(__self__,", "*** WARNING: this file was generated by the Pulumi Terraform", "of tags to assign to the resource. \"\"\" pulumi.set(__self__, \"resource_group_name\",", "-> Optional[pulumi.Input[str]]: \"\"\" A friendly name for the Virtual Desktop", "created. \"\"\" return pulumi.get(self, \"name\") @property @pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self) ->", "def friendly_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"friendly_name\", value) @property @pulumi.getter def", "group name forces a new resource to be created. :param", "optional extra properties used to qualify the lookup. :param str", "= friendly_name __props__.__dict__[\"location\"] = location __props__.__dict__[\"name\"] = name if resource_group_name", "resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None,", "\"\"\" Manages a Virtual Desktop Workspace. ## Example Usage ```python", "of the resource to lookup. :param pulumi.ResourceOptions opts: Options for", "-> pulumi.Output[str]: \"\"\" The name of the resource group in", "if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts,", "resource to be created. :param pulumi.Input[str] name: The name of", "to be created. \"\"\" return pulumi.get(self, \"name\") @name.setter def name(self,", "None: pulumi.set(__self__, \"location\", location) if name is not None: pulumi.set(__self__,", "resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str]", "pulumi.ResourceOptions(id=id)) __props__ = _WorkspaceState.__new__(_WorkspaceState) __props__.__dict__[\"description\"] = description __props__.__dict__[\"friendly_name\"] = friendly_name", "import _utilities __all__ = ['WorkspaceArgs', 'Workspace'] @pulumi.input_type class WorkspaceArgs: def", "not None: pulumi.set(__self__, \"tags\", tags) @property @pulumi.getter def description(self) ->", "resource_group_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"resource_group_name\", value) @property @pulumi.getter def tags(self)", "id`, e.g. ```sh $ pulumi import azure:desktopvirtualization/workspace:Workspace example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myGroup1/providers/Microsoft.DesktopVirtualization/workspaces/myworkspace ```", "properties used to qualify the lookup. :param str resource_name: The", "def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, \"tags\", value) class Workspace(pulumi.CustomResource):", "tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None): if opts is None:", "the Virtual Desktop Workspace. :param pulumi.Input[str] location: The location/region where", "with the given name, id, and optional extra properties used", "return pulumi.get(self, \"tags\") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self,", "import pulumi import pulumi_azure as azure example = azure.core.ResourceGroup(\"example\", location=\"West", "unique name of the resulting resource. :param pulumi.Input[str] id: The", "None): \"\"\" Manages a Virtual Desktop Workspace. ## Example Usage", "import pulumi import pulumi.runtime from typing import Any, Mapping, Optional,", "value: Optional[pulumi.Input[str]]): pulumi.set(self, \"location\", value) @property @pulumi.getter def name(self) ->", "__init__(__self__, resource_name: str, args: WorkspaceArgs, opts: Optional[pulumi.ResourceOptions] = None): \"\"\"", "Virtual Desktop Workspace. ## Example Usage ```python import pulumi import", "return Workspace(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]:", "'Workspace'] @pulumi.input_type class WorkspaceArgs: def __init__(__self__, *, resource_group_name: pulumi.Input[str], description:", "pulumi.set(self, \"description\", value) @property @pulumi.getter(name=\"friendlyName\") def friendly_name(self) -> Optional[pulumi.Input[str]]: \"\"\"", "\"\"\" return pulumi.get(self, \"name\") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self,", "to be created. \"\"\" return pulumi.get(self, \"name\") @property @pulumi.getter(name=\"resourceGroupName\") def", "Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Workspace':", "TypeError('Expected resource options to be a ResourceOptions instance') if opts.version", "a new resource to be created. :param pulumi.Input[str] name: The", "The location/region where the Virtual Desktop Workspace is located. Changing", "pulumi.get(self, \"resource_group_name\") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, \"resource_group_name\", value)", "Virtual Desktop Workspace. \"\"\" return pulumi.get(self, \"friendly_name\") @property @pulumi.getter def", "Workspace is located. Changing the location/region forces a new resource", "to be a ResourceOptions instance') if opts.version is None: opts.version", "\"location\", location) if name is not None: pulumi.set(__self__, \"name\", name)", "the resource. \"\"\" opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _WorkspaceState.__new__(_WorkspaceState)", "and filtering Workspace resources. :param pulumi.Input[str] description: A description for", "\"resource_group_name\", resource_group_name) if tags is not None: pulumi.set(__self__, \"tags\", tags)", "pulumi.set(self, \"name\", value) @property @pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self) -> Optional[pulumi.Input[str]]: \"\"\"", "value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, \"tags\", value) class Workspace(pulumi.CustomResource): @overload def", "pulumi_azure as azure example = azure.core.ResourceGroup(\"example\", location=\"West Europe\") workspace =", "__self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] =", "@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None,", "for the Virtual Desktop Workspace. :param pulumi.Input[str] friendly_name: A friendly", "None) -> 'Workspace': \"\"\" Get an existing Workspace resource's state", "state with the given name, id, and optional extra properties", "= tags return Workspace(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def description(self)", "name of the resulting resource. :param pulumi.Input[str] id: The unique", "\"\"\" return pulumi.get(self, \"friendly_name\") @friendly_name.setter def friendly_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self,", "**kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description:", "Workspaces can be imported using the `resource id`, e.g. ```sh", "Workspace resources. :param pulumi.Input[str] description: A description for the Virtual", "and optional extra properties used to qualify the lookup. :param", "@pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self) -> pulumi.Input[str]: \"\"\" The name of the", "\"description\") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"description\", value) @property", "None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected", ":param pulumi.Input[str] id: The unique provider ID of the resource", "value) @pulumi.input_type class _WorkspaceState: def __init__(__self__, *, description: Optional[pulumi.Input[str]] =", "__props__.__dict__[\"description\"] = description __props__.__dict__[\"friendly_name\"] = friendly_name __props__.__dict__[\"location\"] = location __props__.__dict__[\"name\"]", "@property @pulumi.getter(name=\"friendlyName\") def friendly_name(self) -> pulumi.Output[Optional[str]]: \"\"\" A friendly name", "resource_group_name(self) -> pulumi.Output[str]: \"\"\" The name of the resource group", "str resource_name: The unique name of the resulting resource. :param", "to the resource. \"\"\" pulumi.set(__self__, \"resource_group_name\", resource_group_name) if description is", ":param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign", "location(self) -> pulumi.Output[str]: \"\"\" The location/region where the Virtual Desktop", "resource_group_name(self) -> Optional[pulumi.Input[str]]: \"\"\" The name of the resource group", "'azure:desktopvirtualization/workspace:Workspace', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str],", "__props__.__dict__[\"name\"] = name if resource_group_name is None and not opts.urn:", "Workspace. ## Example Usage ```python import pulumi import pulumi_azure as", "of tags to assign to the resource. \"\"\" return pulumi.get(self,", "= None): \"\"\" Manages a Virtual Desktop Workspace. ## Example", "resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) ->", "the location/region forces a new resource to be created. :param", "import Any, Mapping, Optional, Sequence, Union, overload from .. import", "workspace = azure.desktopvirtualization.Workspace(\"workspace\", location=example.location, resource_group_name=example.name, friendly_name=\"FriendlyName\", description=\"A description of my", "_WorkspaceState.__new__(_WorkspaceState) __props__.__dict__[\"description\"] = description __props__.__dict__[\"friendly_name\"] = friendly_name __props__.__dict__[\"location\"] = location", "pulumi.Input[str], description: Optional[pulumi.Input[str]] = None, friendly_name: Optional[pulumi.Input[str]] = None, location:", "using the `resource id`, e.g. ```sh $ pulumi import azure:desktopvirtualization/workspace:Workspace", "@pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: \"\"\" The name of the", "the name forces a new resource to be created. \"\"\"", "new resource to be created. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A", "$ pulumi import azure:desktopvirtualization/workspace:Workspace example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myGroup1/providers/Microsoft.DesktopVirtualization/workspaces/myworkspace ``` :param str resource_name:", "to the resource. \"\"\" ... @overload def __init__(__self__, resource_name: str,", "the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do", "WorkspaceArgs: def __init__(__self__, *, resource_group_name: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None,", "None, __props__=None): \"\"\" Manages a Virtual Desktop Workspace. ## Example", "A description for the Virtual Desktop Workspace. \"\"\" return pulumi.get(self,", "my workspace\") ``` ## Import Virtual Desktop Workspaces can be", "@pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self) -> Optional[pulumi.Input[str]]: \"\"\" The name of the", "None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Workspace': \"\"\" Get", "*, description: Optional[pulumi.Input[str]] = None, friendly_name: Optional[pulumi.Input[str]] = None, location:", "@pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: \"\"\" The location/region where the", "friendly_name: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]]", "resource_group_name: The name of the resource group in which to", "\"description\", value) @property @pulumi.getter(name=\"friendlyName\") def friendly_name(self) -> Optional[pulumi.Input[str]]: \"\"\" A", "def __init__(__self__, resource_name: str, args: WorkspaceArgs, opts: Optional[pulumi.ResourceOptions] = None):", "tags: A mapping of tags to assign to the resource.", "are doing! *** import warnings import pulumi import pulumi.runtime from", "= location __props__.__dict__[\"name\"] = name __props__.__dict__[\"resource_group_name\"] = resource_group_name __props__.__dict__[\"tags\"] =", "\"\"\" Input properties used for looking up and filtering Workspace", "__init__(__self__, *, resource_group_name: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None, friendly_name: Optional[pulumi.Input[str]]", "friendly_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"friendly_name\", value) @property @pulumi.getter def location(self)", "None, name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):", "the resource. :param pulumi.Input[str] description: A description for the Virtual", "\"tags\", tags) @property @pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self) -> pulumi.Input[str]: \"\"\" The", "def location(self) -> Optional[pulumi.Input[str]]: \"\"\" The location/region where the Virtual", "@pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: \"\"\" A description for the", "\"name\", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: \"\"\"", "ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options", "str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(WorkspaceArgs, pulumi.ResourceOptions, *args, **kwargs)", "location/region forces a new resource to be created. \"\"\" return", "is located. Changing the location/region forces a new resource to", "None: pulumi.set(__self__, \"name\", name) if tags is not None: pulumi.set(__self__,", "return pulumi.get(self, \"friendly_name\") @property @pulumi.getter def location(self) -> pulumi.Output[str]: \"\"\"", "id: The unique provider ID of the resource to lookup.", "created. \"\"\" return pulumi.get(self, \"location\") @property @pulumi.getter def name(self) ->", "is None: opts.version = _utilities.get_version() if opts.id is None: if", "edit by hand unless you're certain you know what you", "to be created. :param pulumi.Input[str] name: The name of the", "pulumi.get(self, \"name\") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"name\", value)", "tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): \"\"\" Input properties used for", "if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be", "group in which to create the Virtual Desktop Workspace. Changing", "\"resource_group_name\") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: \"\"\" A", "\"\"\" return pulumi.get(self, \"tags\") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):", "is None and not opts.urn: raise TypeError(\"Missing required property 'resource_group_name'\")", "The name of the resource. :param pulumi.ResourceOptions opts: Options for", "__props__ is not None: raise TypeError('__props__ is only valid when", "created. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to", "location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str,", "= None): \"\"\" Input properties used for looking up and", "if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name,", "value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: \"\"\" A", "\"description\") @property @pulumi.getter(name=\"friendlyName\") def friendly_name(self) -> pulumi.Output[Optional[str]]: \"\"\" A friendly", "pulumi.get(self, \"description\") @property @pulumi.getter(name=\"friendlyName\") def friendly_name(self) -> pulumi.Output[Optional[str]]: \"\"\" A", "Do not edit by hand unless you're certain you know", "imported using the `resource id`, e.g. ```sh $ pulumi import", "create the Virtual Desktop Workspace. Changing the resource group name", "if name is not None: pulumi.set(__self__, \"name\", name) if tags", "@pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self) -> pulumi.Output[str]: \"\"\" The name of the", "pulumi.set(self, \"location\", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: \"\"\"", "of my workspace\") ``` ## Import Virtual Desktop Workspaces can", "Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): \"\"\" Input properties used for looking", "None: pulumi.set(__self__, \"resource_group_name\", resource_group_name) if tags is not None: pulumi.set(__self__,", "resource_group_name: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None, friendly_name: Optional[pulumi.Input[str]] = None,", "name of the Virtual Desktop Workspace. Changing the name forces", "of tags to assign to the resource. \"\"\" opts =", "location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"location\", value) @property @pulumi.getter def name(self)", "mapping of tags to assign to the resource. \"\"\" ...", "new resource to be created. \"\"\" return pulumi.get(self, \"location\") @location.setter", "= azure.core.ResourceGroup(\"example\", location=\"West Europe\") workspace = azure.desktopvirtualization.Workspace(\"workspace\", location=example.location, resource_group_name=example.name, friendly_name=\"FriendlyName\",", "use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options", "Manages a Virtual Desktop Workspace. ## Example Usage ```python import", "is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise", "to assign to the resource. \"\"\" return pulumi.get(self, \"tags\") @tags.setter", "\"\"\" Get an existing Workspace resource's state with the given", "# *** Do not edit by hand unless you're certain", "**kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else:", "-> pulumi.Output[str]: \"\"\" The name of the Virtual Desktop Workspace.", "Optional, Sequence, Union, overload from .. import _utilities __all__ =", "def description(self) -> Optional[pulumi.Input[str]]: \"\"\" A description for the Virtual", "@property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: \"\"\" The name of", "@tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, \"tags\", value) class", "= None) -> 'Workspace': \"\"\" Get an existing Workspace resource's", "None, resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,", "resource_group_name __props__.__dict__[\"tags\"] = tags return Workspace(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter", "__init__(__self__, *, description: Optional[pulumi.Input[str]] = None, friendly_name: Optional[pulumi.Input[str]] = None,", "the resource group in which to create the Virtual Desktop", "if tags is not None: pulumi.set(__self__, \"tags\", tags) @property @pulumi.getter(name=\"resourceGroupName\")", "to qualify the lookup. :param str resource_name: The unique name", "__props__.__dict__[\"location\"] = location __props__.__dict__[\"name\"] = name __props__.__dict__[\"resource_group_name\"] = resource_group_name __props__.__dict__[\"tags\"]", "\"location\") @property @pulumi.getter def name(self) -> pulumi.Output[str]: \"\"\" The name", "Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): \"\"\" Input", "resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts:", "to be created. \"\"\" return pulumi.get(self, \"resource_group_name\") @resource_group_name.setter def resource_group_name(self,", "the lookup. :param str resource_name: The unique name of the", "of the resulting resource. :param pulumi.Input[str] id: The unique provider", "resource. \"\"\" opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _WorkspaceState.__new__(_WorkspaceState) __props__.__dict__[\"description\"]", "isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions", "resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options", "created. \"\"\" return pulumi.get(self, \"resource_group_name\") @property @pulumi.getter def tags(self) ->", "is not None: pulumi.set(__self__, \"friendly_name\", friendly_name) if location is not", "pulumi.get(self, \"resource_group_name\") @resource_group_name.setter def resource_group_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"resource_group_name\", value)", "value: Optional[pulumi.Input[str]]): pulumi.set(self, \"description\", value) @property @pulumi.getter(name=\"friendlyName\") def friendly_name(self) ->", "def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(WorkspaceArgs,", "tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None): \"\"\" Manages a Virtual", "pulumi.Input[str]]] tags: A mapping of tags to assign to the", "to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param", "example = azure.core.ResourceGroup(\"example\", location=\"West Europe\") workspace = azure.desktopvirtualization.Workspace(\"workspace\", location=example.location, resource_group_name=example.name,", "the resource group name forces a new resource to be", "@overload def __init__(__self__, resource_name: str, args: WorkspaceArgs, opts: Optional[pulumi.ResourceOptions] =", "= WorkspaceArgs.__new__(WorkspaceArgs) __props__.__dict__[\"description\"] = description __props__.__dict__[\"friendly_name\"] = friendly_name __props__.__dict__[\"location\"] =", "to be created. \"\"\" return pulumi.get(self, \"location\") @location.setter def location(self,", "created. :param pulumi.Input[str] description: A description for the Virtual Desktop", "if location is not None: pulumi.set(__self__, \"location\", location) if name", "the resource. :param WorkspaceArgs args: The arguments to use to", "return pulumi.get(self, \"location\") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"location\",", "\"tags\") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, \"tags\", value)", "WorkspaceArgs args: The arguments to use to populate this resource's", "= _utilities.get_version() if opts.id is None: if __props__ is not", "None: if __props__ is not None: raise TypeError('__props__ is only", "Workspace. Changing the resource group name forces a new resource", "is only valid when passed in combination with a valid", "Get an existing Workspace resource's state with the given name,", "description=\"A description of my workspace\") ``` ## Import Virtual Desktop", "be created. :param pulumi.Input[str] resource_group_name: The name of the resource", "existing resource') __props__ = WorkspaceArgs.__new__(WorkspaceArgs) __props__.__dict__[\"description\"] = description __props__.__dict__[\"friendly_name\"] =", "__props__.__dict__[\"friendly_name\"] = friendly_name __props__.__dict__[\"location\"] = location __props__.__dict__[\"name\"] = name if", "class Workspace(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] =", "an existing Workspace resource's state with the given name, id,", "pulumi.Input[str] location: The location/region where the Virtual Desktop Workspace is", "None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None): \"\"\" Manages a", "resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. \"\"\"", "pulumi.Output[str]: \"\"\" The location/region where the Virtual Desktop Workspace is", "@property @pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self) -> Optional[pulumi.Input[str]]: \"\"\" The name of", "resource to be created. :param pulumi.Input[str] description: A description for", "\"name\", value) @property @pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self) -> Optional[pulumi.Input[str]]: \"\"\" The", "forces a new resource to be created. :param pulumi.Input[str] resource_group_name:", "is not None: pulumi.set(__self__, \"name\", name) if resource_group_name is not", "an existing resource') __props__ = WorkspaceArgs.__new__(WorkspaceArgs) __props__.__dict__[\"description\"] = description __props__.__dict__[\"friendly_name\"]", "file was generated by the Pulumi Terraform Bridge (tfgen) Tool.", "for the resource. :param pulumi.Input[str] description: A description for the", "None and not opts.urn: raise TypeError(\"Missing required property 'resource_group_name'\") __props__.__dict__[\"resource_group_name\"]", "opts: Options for the resource. :param pulumi.Input[str] description: A description", "Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]", "Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None): \"\"\" Manages a Virtual Desktop", "Virtual Desktop Workspace. :param pulumi.Input[str] location: The location/region where the", "raise TypeError('__props__ is only valid when passed in combination with", "new resource to be created. :param pulumi.Input[str] resource_group_name: The name", "def resource_group_name(self) -> pulumi.Input[str]: \"\"\" The name of the resource", "\"friendly_name\", friendly_name) if location is not None: pulumi.set(__self__, \"location\", location)", "pulumi.get(self, \"friendly_name\") @friendly_name.setter def friendly_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"friendly_name\", value)", "resource_name: The name of the resource. :param WorkspaceArgs args: The", "name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"name\", value) @property @pulumi.getter(name=\"resourceGroupName\") def resource_group_name(self)", "<filename>sdk/python/pulumi_azure/desktopvirtualization/workspace.py # coding=utf-8 # *** WARNING: this file was generated", "Optional[pulumi.Input[str]]): pulumi.set(self, \"location\", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]:", "\"location\", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: \"\"\" The", "a valid opts.id to get an existing resource') __props__ =", "None, resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None)", "a new resource to be created. \"\"\" return pulumi.get(self, \"location\")", "@pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: \"\"\" A description for the", "None: raise TypeError('__props__ is only valid when passed in combination", "is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs)", "resource. \"\"\" ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args,", "TypeError(\"Missing required property 'resource_group_name'\") __props__.__dict__[\"resource_group_name\"] = resource_group_name __props__.__dict__[\"tags\"] = tags", "name: The name of the Virtual Desktop Workspace. Changing the", "friendly name for the Virtual Desktop Workspace. :param pulumi.Input[str] location:", "def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, \"tags\", value) @pulumi.input_type class", "existing Workspace resource's state with the given name, id, and", "\"tags\", value) @pulumi.input_type class _WorkspaceState: def __init__(__self__, *, description: Optional[pulumi.Input[str]]", "pulumi.set(__self__, \"tags\", tags) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: \"\"\"", "pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts,", "if name is not None: pulumi.set(__self__, \"name\", name) if resource_group_name", "pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _WorkspaceState.__new__(_WorkspaceState) __props__.__dict__[\"description\"] = description __props__.__dict__[\"friendly_name\"] =", "@property @pulumi.getter def location(self) -> pulumi.Output[str]: \"\"\" The location/region where", "__props__.__dict__[\"resource_group_name\"] = resource_group_name __props__.__dict__[\"tags\"] = tags super(Workspace, __self__).__init__( 'azure:desktopvirtualization/workspace:Workspace', resource_name,", "None: opts.version = _utilities.get_version() if opts.id is None: if __props__", ":param str resource_name: The name of the resource. :param pulumi.ResourceOptions", "pulumi import azure:desktopvirtualization/workspace:Workspace example /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myGroup1/providers/Microsoft.DesktopVirtualization/workspaces/myworkspace ``` :param str resource_name: The", "``` ## Import Virtual Desktop Workspaces can be imported using" ]
[ "'/formResponse') form_data = {'draftResponse': [], 'pageHistory': 0} for v in", "warnings.warn('Unknown Question: {}'.format(k), RuntimeWarning) if verbose: print(form_data) user_agent = {'Referer':", "not be accessed, check internet connectivity, \\ proxies and permissions:", "probably get the whole sheet all_vals = \"{base_url}/{cols}?key={kk}\".format(base_url=base_url, cols=columns, kk=kk)", "get_name(f): return get_names(f)[0] if len( get_names(f)) > 0 else 'unknown'", "kk, columns=\"A:AG\"): \"\"\" Gets the sheet as a list of", "v in f.attrs.items() if 'label' in k] def get_name(f): return", "for k, v in f.attrs.items() if 'label' in k] def", "a list of Dicts (directly importable to Pandas) :return: \"\"\"", "import warnings import json import pandas as pd from six.moves.urllib.parse", "to Pandas) :return: \"\"\" try: # TODO: we should probably", "pd.DataFrame([{}]) def sheet_api_url(sheet_id): return \"https://sheets.googleapis.com/v4/spreadsheets/{id}/values\".format( id=sheet_id) def get_questions(in_url): res =", "for reading and writing results to google sheets\"\"\" from bs4", "0} for v in cur_questions.values(): form_data[v] = '' for k,", "get_questions(in_url): res = urlopen(in_url) soup = BeautifulSoup(res.read(), 'html.parser') def get_names(f):", "> 0 else 'unknown' all_questions = soup.form.findChildren( attrs={'name': lambda x:", "[], 'pageHistory': 0} for v in cur_questions.values(): form_data[v] = ''", "v else: warnings.warn('Unknown Question: {}'.format(k), RuntimeWarning) if verbose: print(form_data) user_agent", "as e: warnings.warn( 'Sheet could not be accessed, check internet", "def submit_response(form_url, cur_questions, verbose=False, **answers): submit_url = form_url.replace('/viewform', '/formResponse') form_data", "= \"AIzaSyC8Zo-9EbXgHfqNzDxVb_YS_IIZBWtvoJ4\" def get_task_sheet(in_task): return get_sheet_as_df(sheet_api_url(in_task.sheet_id), _CELLSET_ID) def get_sheet_as_df(base_url, kk,", "form_url, 'User-Agent': \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537\\ .36 (KHTML, like", "frow = t_data.pop(0) return pd.DataFrame([ dict([(key, '' if idx >=", "'User-Agent': \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537\\ .36 (KHTML, like Gecko)", "attrs={'name': lambda x: x and x.startswith('entry.')}) return {get_name(q): q['name'] for", "x and x.startswith('entry.')}) return {get_name(q): q['name'] for q in all_questions}", "as pd from six.moves.urllib.parse import urlparse, parse_qs from six.moves.urllib.request import", "for idx, key in enumerate(frow)]) for irow in t_data]) except", "\\ proxies and permissions: {}'.format( e)) return pd.DataFrame([{}]) def sheet_api_url(sheet_id):", "soup = BeautifulSoup(res.read(), 'html.parser') def get_names(f): return [v for k,", "in t_data]) except IOError as e: warnings.warn( 'Sheet could not", "in answers.items(): if k in cur_questions: form_data[cur_questions[k]] = v else:", "i686) AppleWebKit/537\\ .36 (KHTML, like Gecko) Chrome/28.0.1500.52 Safari/537.36\"} return requests.post(submit_url,", "0 else 'unknown' all_questions = soup.form.findChildren( attrs={'name': lambda x: x", "cur_questions: form_data[cur_questions[k]] = v else: warnings.warn('Unknown Question: {}'.format(k), RuntimeWarning) if", "from six.moves.urllib.parse import urlparse, parse_qs from six.moves.urllib.request import urlopen _CELLSET_ID", "for irow in t_data]) except IOError as e: warnings.warn( 'Sheet", "connectivity, \\ proxies and permissions: {}'.format( e)) return pd.DataFrame([{}]) def", "of Dicts (directly importable to Pandas) :return: \"\"\" try: #", "for q in all_questions} def submit_response(form_url, cur_questions, verbose=False, **answers): submit_url", "else irow[idx]) for idx, key in enumerate(frow)]) for irow in", "the whole sheet all_vals = \"{base_url}/{cols}?key={kk}\".format(base_url=base_url, cols=columns, kk=kk) t_data =", "return pd.DataFrame([{}]) def sheet_api_url(sheet_id): return \"https://sheets.googleapis.com/v4/spreadsheets/{id}/values\".format( id=sheet_id) def get_questions(in_url): res", "enumerate(frow)]) for irow in t_data]) except IOError as e: warnings.warn(", "submit_response(form_url, cur_questions, verbose=False, **answers): submit_url = form_url.replace('/viewform', '/formResponse') form_data =", "print(form_data) user_agent = {'Referer': form_url, 'User-Agent': \"Mozilla/5.0 (X11; Linux i686)", "res = urlopen(in_url) soup = BeautifulSoup(res.read(), 'html.parser') def get_names(f): return", "in f.attrs.items() if 'label' in k] def get_name(f): return get_names(f)[0]", "= {'draftResponse': [], 'pageHistory': 0} for v in cur_questions.values(): form_data[v]", "six.moves.urllib.request import urlopen _CELLSET_ID = \"AIzaSyC8Zo-9EbXgHfqNzDxVb_YS_IIZBWtvoJ4\" def get_task_sheet(in_task): return get_sheet_as_df(sheet_api_url(in_task.sheet_id),", "_CELLSET_ID = \"AIzaSyC8Zo-9EbXgHfqNzDxVb_YS_IIZBWtvoJ4\" def get_task_sheet(in_task): return get_sheet_as_df(sheet_api_url(in_task.sheet_id), _CELLSET_ID) def get_sheet_as_df(base_url,", "get_names(f)[0] if len( get_names(f)) > 0 else 'unknown' all_questions =", "proxies and permissions: {}'.format( e)) return pd.DataFrame([{}]) def sheet_api_url(sheet_id): return", "return [v for k, v in f.attrs.items() if 'label' in", "if len( get_names(f)) > 0 else 'unknown' all_questions = soup.form.findChildren(", "if k in cur_questions: form_data[cur_questions[k]] = v else: warnings.warn('Unknown Question:", "else 'unknown' all_questions = soup.form.findChildren( attrs={'name': lambda x: x and", "id=sheet_id) def get_questions(in_url): res = urlopen(in_url) soup = BeautifulSoup(res.read(), 'html.parser')", "AppleWebKit/537\\ .36 (KHTML, like Gecko) Chrome/28.0.1500.52 Safari/537.36\"} return requests.post(submit_url, data=form_data,", "'values'] frow = t_data.pop(0) return pd.DataFrame([ dict([(key, '' if idx", "return pd.DataFrame([ dict([(key, '' if idx >= len(irow) else irow[idx])", "x.startswith('entry.')}) return {get_name(q): q['name'] for q in all_questions} def submit_response(form_url,", "warnings import json import pandas as pd from six.moves.urllib.parse import", "q in all_questions} def submit_response(form_url, cur_questions, verbose=False, **answers): submit_url =", "internet connectivity, \\ proxies and permissions: {}'.format( e)) return pd.DataFrame([{}])", "whole sheet all_vals = \"{base_url}/{cols}?key={kk}\".format(base_url=base_url, cols=columns, kk=kk) t_data = json.loads(urlopen(all_vals).read().decode('latin1'))[", "reading and writing results to google sheets\"\"\" from bs4 import", "parse_qs from six.moves.urllib.request import urlopen _CELLSET_ID = \"AIzaSyC8Zo-9EbXgHfqNzDxVb_YS_IIZBWtvoJ4\" def get_task_sheet(in_task):", "Pandas) :return: \"\"\" try: # TODO: we should probably get", "warnings.warn( 'Sheet could not be accessed, check internet connectivity, \\", "answers.items(): if k in cur_questions: form_data[cur_questions[k]] = v else: warnings.warn('Unknown", "user_agent = {'Referer': form_url, 'User-Agent': \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537\\", ".36 (KHTML, like Gecko) Chrome/28.0.1500.52 Safari/537.36\"} return requests.post(submit_url, data=form_data, headers=user_agent)", "for v in cur_questions.values(): form_data[v] = '' for k, v", "submit_url = form_url.replace('/viewform', '/formResponse') form_data = {'draftResponse': [], 'pageHistory': 0}", "should probably get the whole sheet all_vals = \"{base_url}/{cols}?key={kk}\".format(base_url=base_url, cols=columns,", "= {'Referer': form_url, 'User-Agent': \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537\\ .36", "def get_names(f): return [v for k, v in f.attrs.items() if", "pd from six.moves.urllib.parse import urlparse, parse_qs from six.moves.urllib.request import urlopen", "json import pandas as pd from six.moves.urllib.parse import urlparse, parse_qs", "permissions: {}'.format( e)) return pd.DataFrame([{}]) def sheet_api_url(sheet_id): return \"https://sheets.googleapis.com/v4/spreadsheets/{id}/values\".format( id=sheet_id)", "{}'.format( e)) return pd.DataFrame([{}]) def sheet_api_url(sheet_id): return \"https://sheets.googleapis.com/v4/spreadsheets/{id}/values\".format( id=sheet_id) def", "k, v in answers.items(): if k in cur_questions: form_data[cur_questions[k]] =", "cols=columns, kk=kk) t_data = json.loads(urlopen(all_vals).read().decode('latin1'))[ 'values'] frow = t_data.pop(0) return", "f.attrs.items() if 'label' in k] def get_name(f): return get_names(f)[0] if", "from bs4 import BeautifulSoup import requests import warnings import json", "all_questions} def submit_response(form_url, cur_questions, verbose=False, **answers): submit_url = form_url.replace('/viewform', '/formResponse')", "k in cur_questions: form_data[cur_questions[k]] = v else: warnings.warn('Unknown Question: {}'.format(k),", "the sheet as a list of Dicts (directly importable to", "t_data.pop(0) return pd.DataFrame([ dict([(key, '' if idx >= len(irow) else", "= soup.form.findChildren( attrs={'name': lambda x: x and x.startswith('entry.')}) return {get_name(q):", "get_task_sheet(in_task): return get_sheet_as_df(sheet_api_url(in_task.sheet_id), _CELLSET_ID) def get_sheet_as_df(base_url, kk, columns=\"A:AG\"): \"\"\" Gets", "TODO: we should probably get the whole sheet all_vals =", "\"{base_url}/{cols}?key={kk}\".format(base_url=base_url, cols=columns, kk=kk) t_data = json.loads(urlopen(all_vals).read().decode('latin1'))[ 'values'] frow = t_data.pop(0)", "def get_questions(in_url): res = urlopen(in_url) soup = BeautifulSoup(res.read(), 'html.parser') def", "sheets\"\"\" from bs4 import BeautifulSoup import requests import warnings import", "Question: {}'.format(k), RuntimeWarning) if verbose: print(form_data) user_agent = {'Referer': form_url,", "to google sheets\"\"\" from bs4 import BeautifulSoup import requests import", "urlopen _CELLSET_ID = \"AIzaSyC8Zo-9EbXgHfqNzDxVb_YS_IIZBWtvoJ4\" def get_task_sheet(in_task): return get_sheet_as_df(sheet_api_url(in_task.sheet_id), _CELLSET_ID) def", "in all_questions} def submit_response(form_url, cur_questions, verbose=False, **answers): submit_url = form_url.replace('/viewform',", "form_data[cur_questions[k]] = v else: warnings.warn('Unknown Question: {}'.format(k), RuntimeWarning) if verbose:", "bs4 import BeautifulSoup import requests import warnings import json import", ">= len(irow) else irow[idx]) for idx, key in enumerate(frow)]) for", "be accessed, check internet connectivity, \\ proxies and permissions: {}'.format(", "t_data]) except IOError as e: warnings.warn( 'Sheet could not be", "and permissions: {}'.format( e)) return pd.DataFrame([{}]) def sheet_api_url(sheet_id): return \"https://sheets.googleapis.com/v4/spreadsheets/{id}/values\".format(", "\"\"\"Code for reading and writing results to google sheets\"\"\" from", "form_data[v] = '' for k, v in answers.items(): if k", "except IOError as e: warnings.warn( 'Sheet could not be accessed,", "= form_url.replace('/viewform', '/formResponse') form_data = {'draftResponse': [], 'pageHistory': 0} for", "= v else: warnings.warn('Unknown Question: {}'.format(k), RuntimeWarning) if verbose: print(form_data)", "get_sheet_as_df(sheet_api_url(in_task.sheet_id), _CELLSET_ID) def get_sheet_as_df(base_url, kk, columns=\"A:AG\"): \"\"\" Gets the sheet", "{'Referer': form_url, 'User-Agent': \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537\\ .36 (KHTML,", "import urlparse, parse_qs from six.moves.urllib.request import urlopen _CELLSET_ID = \"AIzaSyC8Zo-9EbXgHfqNzDxVb_YS_IIZBWtvoJ4\"", "kk=kk) t_data = json.loads(urlopen(all_vals).read().decode('latin1'))[ 'values'] frow = t_data.pop(0) return pd.DataFrame([", "= t_data.pop(0) return pd.DataFrame([ dict([(key, '' if idx >= len(irow)", "k, v in f.attrs.items() if 'label' in k] def get_name(f):", "'unknown' all_questions = soup.form.findChildren( attrs={'name': lambda x: x and x.startswith('entry.')})", "[v for k, v in f.attrs.items() if 'label' in k]", "form_url.replace('/viewform', '/formResponse') form_data = {'draftResponse': [], 'pageHistory': 0} for v", "from six.moves.urllib.request import urlopen _CELLSET_ID = \"AIzaSyC8Zo-9EbXgHfqNzDxVb_YS_IIZBWtvoJ4\" def get_task_sheet(in_task): return", "= \"{base_url}/{cols}?key={kk}\".format(base_url=base_url, cols=columns, kk=kk) t_data = json.loads(urlopen(all_vals).read().decode('latin1'))[ 'values'] frow =", "cur_questions, verbose=False, **answers): submit_url = form_url.replace('/viewform', '/formResponse') form_data = {'draftResponse':", "get_sheet_as_df(base_url, kk, columns=\"A:AG\"): \"\"\" Gets the sheet as a list", "{'draftResponse': [], 'pageHistory': 0} for v in cur_questions.values(): form_data[v] =", "list of Dicts (directly importable to Pandas) :return: \"\"\" try:", "get_names(f): return [v for k, v in f.attrs.items() if 'label'", "lambda x: x and x.startswith('entry.')}) return {get_name(q): q['name'] for q", "v in answers.items(): if k in cur_questions: form_data[cur_questions[k]] = v", "urlparse, parse_qs from six.moves.urllib.request import urlopen _CELLSET_ID = \"AIzaSyC8Zo-9EbXgHfqNzDxVb_YS_IIZBWtvoJ4\" def", "importable to Pandas) :return: \"\"\" try: # TODO: we should", "form_data = {'draftResponse': [], 'pageHistory': 0} for v in cur_questions.values():", "columns=\"A:AG\"): \"\"\" Gets the sheet as a list of Dicts", "\"AIzaSyC8Zo-9EbXgHfqNzDxVb_YS_IIZBWtvoJ4\" def get_task_sheet(in_task): return get_sheet_as_df(sheet_api_url(in_task.sheet_id), _CELLSET_ID) def get_sheet_as_df(base_url, kk, columns=\"A:AG\"):", "\"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537\\ .36 (KHTML, like Gecko) Chrome/28.0.1500.52", "(directly importable to Pandas) :return: \"\"\" try: # TODO: we", "as a list of Dicts (directly importable to Pandas) :return:", "google sheets\"\"\" from bs4 import BeautifulSoup import requests import warnings", "Gets the sheet as a list of Dicts (directly importable", "we should probably get the whole sheet all_vals = \"{base_url}/{cols}?key={kk}\".format(base_url=base_url,", "return \"https://sheets.googleapis.com/v4/spreadsheets/{id}/values\".format( id=sheet_id) def get_questions(in_url): res = urlopen(in_url) soup =", "{}'.format(k), RuntimeWarning) if verbose: print(form_data) user_agent = {'Referer': form_url, 'User-Agent':", "if verbose: print(form_data) user_agent = {'Referer': form_url, 'User-Agent': \"Mozilla/5.0 (X11;", "x: x and x.startswith('entry.')}) return {get_name(q): q['name'] for q in", "return {get_name(q): q['name'] for q in all_questions} def submit_response(form_url, cur_questions,", "return get_sheet_as_df(sheet_api_url(in_task.sheet_id), _CELLSET_ID) def get_sheet_as_df(base_url, kk, columns=\"A:AG\"): \"\"\" Gets the", "pandas as pd from six.moves.urllib.parse import urlparse, parse_qs from six.moves.urllib.request", "key in enumerate(frow)]) for irow in t_data]) except IOError as", "def sheet_api_url(sheet_id): return \"https://sheets.googleapis.com/v4/spreadsheets/{id}/values\".format( id=sheet_id) def get_questions(in_url): res = urlopen(in_url)", "import BeautifulSoup import requests import warnings import json import pandas", "Linux i686) AppleWebKit/537\\ .36 (KHTML, like Gecko) Chrome/28.0.1500.52 Safari/537.36\"} return", "q['name'] for q in all_questions} def submit_response(form_url, cur_questions, verbose=False, **answers):", "sheet_api_url(sheet_id): return \"https://sheets.googleapis.com/v4/spreadsheets/{id}/values\".format( id=sheet_id) def get_questions(in_url): res = urlopen(in_url) soup", "e: warnings.warn( 'Sheet could not be accessed, check internet connectivity,", ":return: \"\"\" try: # TODO: we should probably get the", "verbose: print(form_data) user_agent = {'Referer': form_url, 'User-Agent': \"Mozilla/5.0 (X11; Linux", "idx >= len(irow) else irow[idx]) for idx, key in enumerate(frow)])", "'html.parser') def get_names(f): return [v for k, v in f.attrs.items()", "writing results to google sheets\"\"\" from bs4 import BeautifulSoup import", "{get_name(q): q['name'] for q in all_questions} def submit_response(form_url, cur_questions, verbose=False,", "for k, v in answers.items(): if k in cur_questions: form_data[cur_questions[k]]", "= BeautifulSoup(res.read(), 'html.parser') def get_names(f): return [v for k, v", "cur_questions.values(): form_data[v] = '' for k, v in answers.items(): if", "IOError as e: warnings.warn( 'Sheet could not be accessed, check", "'pageHistory': 0} for v in cur_questions.values(): form_data[v] = '' for", "<reponame>betatim/jupyanno \"\"\"Code for reading and writing results to google sheets\"\"\"", "# TODO: we should probably get the whole sheet all_vals", "could not be accessed, check internet connectivity, \\ proxies and", "BeautifulSoup import requests import warnings import json import pandas as", "idx, key in enumerate(frow)]) for irow in t_data]) except IOError", "\"\"\" Gets the sheet as a list of Dicts (directly", "Dicts (directly importable to Pandas) :return: \"\"\" try: # TODO:", "if idx >= len(irow) else irow[idx]) for idx, key in", "in k] def get_name(f): return get_names(f)[0] if len( get_names(f)) >", "k] def get_name(f): return get_names(f)[0] if len( get_names(f)) > 0", "def get_name(f): return get_names(f)[0] if len( get_names(f)) > 0 else", "in cur_questions: form_data[cur_questions[k]] = v else: warnings.warn('Unknown Question: {}'.format(k), RuntimeWarning)", "pd.DataFrame([ dict([(key, '' if idx >= len(irow) else irow[idx]) for", "accessed, check internet connectivity, \\ proxies and permissions: {}'.format( e))", "return get_names(f)[0] if len( get_names(f)) > 0 else 'unknown' all_questions", "RuntimeWarning) if verbose: print(form_data) user_agent = {'Referer': form_url, 'User-Agent': \"Mozilla/5.0", "v in cur_questions.values(): form_data[v] = '' for k, v in", "_CELLSET_ID) def get_sheet_as_df(base_url, kk, columns=\"A:AG\"): \"\"\" Gets the sheet as", "import requests import warnings import json import pandas as pd", "get the whole sheet all_vals = \"{base_url}/{cols}?key={kk}\".format(base_url=base_url, cols=columns, kk=kk) t_data", "len( get_names(f)) > 0 else 'unknown' all_questions = soup.form.findChildren( attrs={'name':", "**answers): submit_url = form_url.replace('/viewform', '/formResponse') form_data = {'draftResponse': [], 'pageHistory':", "irow in t_data]) except IOError as e: warnings.warn( 'Sheet could", "else: warnings.warn('Unknown Question: {}'.format(k), RuntimeWarning) if verbose: print(form_data) user_agent =", "= '' for k, v in answers.items(): if k in", "and writing results to google sheets\"\"\" from bs4 import BeautifulSoup", "def get_sheet_as_df(base_url, kk, columns=\"A:AG\"): \"\"\" Gets the sheet as a", "dict([(key, '' if idx >= len(irow) else irow[idx]) for idx,", "and x.startswith('entry.')}) return {get_name(q): q['name'] for q in all_questions} def", "requests import warnings import json import pandas as pd from", "t_data = json.loads(urlopen(all_vals).read().decode('latin1'))[ 'values'] frow = t_data.pop(0) return pd.DataFrame([ dict([(key,", "\"https://sheets.googleapis.com/v4/spreadsheets/{id}/values\".format( id=sheet_id) def get_questions(in_url): res = urlopen(in_url) soup = BeautifulSoup(res.read(),", "irow[idx]) for idx, key in enumerate(frow)]) for irow in t_data])", "in enumerate(frow)]) for irow in t_data]) except IOError as e:", "'label' in k] def get_name(f): return get_names(f)[0] if len( get_names(f))", "e)) return pd.DataFrame([{}]) def sheet_api_url(sheet_id): return \"https://sheets.googleapis.com/v4/spreadsheets/{id}/values\".format( id=sheet_id) def get_questions(in_url):", "'' for k, v in answers.items(): if k in cur_questions:", "len(irow) else irow[idx]) for idx, key in enumerate(frow)]) for irow", "(X11; Linux i686) AppleWebKit/537\\ .36 (KHTML, like Gecko) Chrome/28.0.1500.52 Safari/537.36\"}", "json.loads(urlopen(all_vals).read().decode('latin1'))[ 'values'] frow = t_data.pop(0) return pd.DataFrame([ dict([(key, '' if", "all_questions = soup.form.findChildren( attrs={'name': lambda x: x and x.startswith('entry.')}) return", "check internet connectivity, \\ proxies and permissions: {}'.format( e)) return", "\"\"\" try: # TODO: we should probably get the whole", "try: # TODO: we should probably get the whole sheet", "results to google sheets\"\"\" from bs4 import BeautifulSoup import requests", "if 'label' in k] def get_name(f): return get_names(f)[0] if len(", "sheet as a list of Dicts (directly importable to Pandas)", "'Sheet could not be accessed, check internet connectivity, \\ proxies", "'' if idx >= len(irow) else irow[idx]) for idx, key", "import urlopen _CELLSET_ID = \"AIzaSyC8Zo-9EbXgHfqNzDxVb_YS_IIZBWtvoJ4\" def get_task_sheet(in_task): return get_sheet_as_df(sheet_api_url(in_task.sheet_id), _CELLSET_ID)", "urlopen(in_url) soup = BeautifulSoup(res.read(), 'html.parser') def get_names(f): return [v for", "= urlopen(in_url) soup = BeautifulSoup(res.read(), 'html.parser') def get_names(f): return [v", "def get_task_sheet(in_task): return get_sheet_as_df(sheet_api_url(in_task.sheet_id), _CELLSET_ID) def get_sheet_as_df(base_url, kk, columns=\"A:AG\"): \"\"\"", "all_vals = \"{base_url}/{cols}?key={kk}\".format(base_url=base_url, cols=columns, kk=kk) t_data = json.loads(urlopen(all_vals).read().decode('latin1'))[ 'values'] frow", "verbose=False, **answers): submit_url = form_url.replace('/viewform', '/formResponse') form_data = {'draftResponse': [],", "import pandas as pd from six.moves.urllib.parse import urlparse, parse_qs from", "BeautifulSoup(res.read(), 'html.parser') def get_names(f): return [v for k, v in", "= json.loads(urlopen(all_vals).read().decode('latin1'))[ 'values'] frow = t_data.pop(0) return pd.DataFrame([ dict([(key, ''", "get_names(f)) > 0 else 'unknown' all_questions = soup.form.findChildren( attrs={'name': lambda", "import json import pandas as pd from six.moves.urllib.parse import urlparse,", "soup.form.findChildren( attrs={'name': lambda x: x and x.startswith('entry.')}) return {get_name(q): q['name']", "sheet all_vals = \"{base_url}/{cols}?key={kk}\".format(base_url=base_url, cols=columns, kk=kk) t_data = json.loads(urlopen(all_vals).read().decode('latin1'))[ 'values']", "six.moves.urllib.parse import urlparse, parse_qs from six.moves.urllib.request import urlopen _CELLSET_ID =", "in cur_questions.values(): form_data[v] = '' for k, v in answers.items():" ]
[ "98 / \\ 96 84 / \\ / \\ 92", "self.data[j],self.data[k] k = j def extractMax(self): ret = self.data[1] self.data[1],", "= 31 M = 100 heap = Maxheap(N) for i", "82 32 21 64 60 7 44 63 63 '''", "+ 1]= arr[i] self.count = cpacity for i in range(self.count", "self.count = 0 else: for i in range(0,cpacity): self.data[i +", "for i in range(0,N): k = random.randint(1, M) heap.insert(k) #", "+= 1 if self.data[k] > self.data[j]: break self.data[k], self.data[j] =", "self.data[k], self.data[j] = self.data[j],self.data[k] k = j def extractMax(self): ret", "\\ / \\ / \\ / \\ / \\ /", "self.data[i + 1]= arr[i] self.count = cpacity for i in", "else: for i in range(0,cpacity): self.data[i + 1]= arr[i] self.count", "= random.randint(1, M) heap.insert(k) # arr = [random.randint(1,M) for i", "94 82 32 21 64 60 7 44 63 63", "= data self.count += 1 self.__shiftUp(self.count) def __shifDown(self,k): while k", "self.data[int(k / 2)]: self.data[k],self.data[int(k / 2)] = self.data[int(k / 2)],", "98 51 7 17 94 82 32 21 64 60", "data self.count += 1 self.__shiftUp(self.count) def __shifDown(self,k): while k *", "\\ / \\ 40 51 98 51 7 17 94", "heap.insert(k) # arr = [random.randint(1,M) for i in range(N)] #", "-= 1 self.__shifDown(1) return ret if __name__ == '__main__': N", "1 self.__shifDown(1) return ret if __name__ == '__main__': N =", "= None): self.data = [None] * (cpacity + 1) self.cpacity", "i in range(self.count / 2, 0, -1): self.__shifDown(i) def size(self):", "(cpacity + 1) self.cpacity = cpacity if arr is None:", "92 82 78 47 / \\ / \\ / \\", "def __init__(self,cpacity,arr = None): self.data = [None] * (cpacity +", "100 heap = Maxheap(N) for i in range(0,N): k =", "range(self.count / 2, 0, -1): self.__shifDown(i) def size(self): return self.count", "self.data[k] > self.data[int(k / 2)]: self.data[k],self.data[int(k / 2)] = self.data[int(k", "/ \\ / \\ / \\ / \\ 40 51", "= self.data[self.count], self.data[1] self.count -= 1 self.__shifDown(1) return ret if", "1]= arr[i] self.count = cpacity for i in range(self.count /", "def isEmpty(self): return self.count == 0 def __shiftUp(self,k): while k", "k > 1 and self.data[k] > self.data[int(k / 2)]: self.data[k],self.data[int(k", "i in range(N)] # heap = Maxheap(len(arr),arr) print(heap.size()) print(heap.data) print(heap.extractMax())", "__init__(self,cpacity,arr = None): self.data = [None] * (cpacity + 1)", "最大堆实现 98 / \\ 96 84 / \\ / \\", "/ 2, 0, -1): self.__shifDown(i) def size(self): return self.count def", "self.count += 1 self.__shiftUp(self.count) def __shifDown(self,k): while k * 2", "M = 100 heap = Maxheap(N) for i in range(0,N):", "M) heap.insert(k) # arr = [random.randint(1,M) for i in range(N)]", "44 63 63 ''' import random class Maxheap(object): def __init__(self,cpacity,arr", "is None: self.count = 0 else: for i in range(0,cpacity):", "j = k * 2 if self.count >= j +", "import random class Maxheap(object): def __init__(self,cpacity,arr = None): self.data =", "if self.data[k] > self.data[j]: break self.data[k], self.data[j] = self.data[j],self.data[k] k", "> self.data[j]: j += 1 if self.data[k] > self.data[j]: break", "+ 1] = data self.count += 1 self.__shiftUp(self.count) def __shifDown(self,k):", "\\ / \\ / \\ 40 51 98 51 7", "self.data[k] > self.data[j]: break self.data[k], self.data[j] = self.data[j],self.data[k] k =", "1] = data self.count += 1 self.__shiftUp(self.count) def __shifDown(self,k): while", "/ \\ 96 84 / \\ / \\ 92 82", "return self.count def isEmpty(self): return self.count == 0 def __shiftUp(self,k):", "None): self.data = [None] * (cpacity + 1) self.cpacity =", "self.count = cpacity for i in range(self.count / 2, 0,", "= Maxheap(N) for i in range(0,N): k = random.randint(1, M)", "# arr = [random.randint(1,M) for i in range(N)] # heap", "-*- coding: utf-8 -* ''' 最大堆实现 98 / \\ 96", "1) self.cpacity = cpacity if arr is None: self.count =", "while k > 1 and self.data[k] > self.data[int(k / 2)]:", "__shifDown(self,k): while k * 2 <= self.count: j = k", "class Maxheap(object): def __init__(self,cpacity,arr = None): self.data = [None] *", "self.data[j + 1] > self.data[j]: j += 1 if self.data[k]", "> self.data[j]: break self.data[k], self.data[j] = self.data[j],self.data[k] k = j", "7 44 63 63 ''' import random class Maxheap(object): def", "\\ 92 82 78 47 / \\ / \\ /", "Maxheap(N) for i in range(0,N): k = random.randint(1, M) heap.insert(k)", "def size(self): return self.count def isEmpty(self): return self.count == 0", "''' 最大堆实现 98 / \\ 96 84 / \\ /", "\\ / \\ 33 26 51 85 50 15 44", "+ 1 and self.data[j + 1] > self.data[j]: j +=", "51 85 50 15 44 60 / \\ / \\", "def insert(self,data): self.data[self.count + 1] = data self.count += 1", "j def extractMax(self): ret = self.data[1] self.data[1], self.data[self.count] = self.data[self.count],", "\\ 33 26 51 85 50 15 44 60 /", "range(0,cpacity): self.data[i + 1]= arr[i] self.count = cpacity for i", "47 / \\ / \\ / \\ / \\ 33", "in range(self.count / 2, 0, -1): self.__shifDown(i) def size(self): return", "self.data[1], self.data[self.count] = self.data[self.count], self.data[1] self.count -= 1 self.__shifDown(1) return", "+= 1 self.__shiftUp(self.count) def __shifDown(self,k): while k * 2 <=", "/ \\ 92 82 78 47 / \\ / \\", "size(self): return self.count def isEmpty(self): return self.count == 0 def", "/ \\ / \\ 40 51 98 51 7 17", "== 0 def __shiftUp(self,k): while k > 1 and self.data[k]", "15 44 60 / \\ / \\ / \\ /", "''' import random class Maxheap(object): def __init__(self,cpacity,arr = None): self.data", "for i in range(0,cpacity): self.data[i + 1]= arr[i] self.count =", "__shiftUp(self,k): while k > 1 and self.data[k] > self.data[int(k /", "\\ / \\ / \\ / \\ / \\ 40", "64 60 7 44 63 63 ''' import random class", "__name__ == '__main__': N = 31 M = 100 heap", "/ 2)], self.data[k] k =int(k/2) def insert(self,data): self.data[self.count + 1]", "arr[i] self.count = cpacity for i in range(self.count / 2,", "/ \\ / \\ / \\ 40 51 98 51", "82 78 47 / \\ / \\ / \\ /", "self.data[k],self.data[int(k / 2)] = self.data[int(k / 2)], self.data[k] k =int(k/2)", "\\ / \\ / \\ / \\ 33 26 51", "self.count == 0 def __shiftUp(self,k): while k > 1 and", "* 2 if self.count >= j + 1 and self.data[j", "== '__main__': N = 31 M = 100 heap =", "self.data = [None] * (cpacity + 1) self.cpacity = cpacity", "+ 1] > self.data[j]: j += 1 if self.data[k] >", "def extractMax(self): ret = self.data[1] self.data[1], self.data[self.count] = self.data[self.count], self.data[1]", "* (cpacity + 1) self.cpacity = cpacity if arr is", "self.data[1] self.data[1], self.data[self.count] = self.data[self.count], self.data[1] self.count -= 1 self.__shifDown(1)", "in range(0,cpacity): self.data[i + 1]= arr[i] self.count = cpacity for", "/ 2)]: self.data[k],self.data[int(k / 2)] = self.data[int(k / 2)], self.data[k]", "84 / \\ / \\ 92 82 78 47 /", "self.data[j]: j += 1 if self.data[k] > self.data[j]: break self.data[k],", "85 50 15 44 60 / \\ / \\ /", "def __shifDown(self,k): while k * 2 <= self.count: j =", "/ \\ / \\ / \\ / \\ / \\", "self.count def isEmpty(self): return self.count == 0 def __shiftUp(self,k): while", "= cpacity for i in range(self.count / 2, 0, -1):", "if self.count >= j + 1 and self.data[j + 1]", "1 if self.data[k] > self.data[j]: break self.data[k], self.data[j] = self.data[j],self.data[k]", "self.__shifDown(i) def size(self): return self.count def isEmpty(self): return self.count ==", "coding: utf-8 -* ''' 最大堆实现 98 / \\ 96 84", "= k * 2 if self.count >= j + 1", "-1): self.__shifDown(i) def size(self): return self.count def isEmpty(self): return self.count", "<= self.count: j = k * 2 if self.count >=", "= [None] * (cpacity + 1) self.cpacity = cpacity if", "1 and self.data[k] > self.data[int(k / 2)]: self.data[k],self.data[int(k / 2)]", "63 ''' import random class Maxheap(object): def __init__(self,cpacity,arr = None):", "extractMax(self): ret = self.data[1] self.data[1], self.data[self.count] = self.data[self.count], self.data[1] self.count", "33 26 51 85 50 15 44 60 / \\", "None: self.count = 0 else: for i in range(0,cpacity): self.data[i", "Maxheap(object): def __init__(self,cpacity,arr = None): self.data = [None] * (cpacity", "= 100 heap = Maxheap(N) for i in range(0,N): k", "= [random.randint(1,M) for i in range(N)] # heap = Maxheap(len(arr),arr)", "self.count >= j + 1 and self.data[j + 1] >", "random.randint(1, M) heap.insert(k) # arr = [random.randint(1,M) for i in", "break self.data[k], self.data[j] = self.data[j],self.data[k] k = j def extractMax(self):", "/ \\ / \\ / \\ / \\ 33 26", "cpacity for i in range(self.count / 2, 0, -1): self.__shifDown(i)", "1 and self.data[j + 1] > self.data[j]: j += 1", "= j def extractMax(self): ret = self.data[1] self.data[1], self.data[self.count] =", "# -*- coding: utf-8 -* ''' 最大堆实现 98 / \\", "k = random.randint(1, M) heap.insert(k) # arr = [random.randint(1,M) for", "40 51 98 51 7 17 94 82 32 21", "51 7 17 94 82 32 21 64 60 7", "[random.randint(1,M) for i in range(N)] # heap = Maxheap(len(arr),arr) print(heap.size())", "\\ / \\ 92 82 78 47 / \\ /", "def __shiftUp(self,k): while k > 1 and self.data[k] > self.data[int(k", "j + 1 and self.data[j + 1] > self.data[j]: j", "in range(0,N): k = random.randint(1, M) heap.insert(k) # arr =", "> 1 and self.data[k] > self.data[int(k / 2)]: self.data[k],self.data[int(k /", "96 84 / \\ / \\ 92 82 78 47", "arr is None: self.count = 0 else: for i in", "arr = [random.randint(1,M) for i in range(N)] # heap =", "17 94 82 32 21 64 60 7 44 63", "and self.data[j + 1] > self.data[j]: j += 1 if", "\\ / \\ / \\ 33 26 51 85 50", "self.cpacity = cpacity if arr is None: self.count = 0", "self.count: j = k * 2 if self.count >= j", "k * 2 <= self.count: j = k * 2", "+ 1) self.cpacity = cpacity if arr is None: self.count", "/ \\ / \\ / \\ 33 26 51 85", "while k * 2 <= self.count: j = k *", "=int(k/2) def insert(self,data): self.data[self.count + 1] = data self.count +=", "0, -1): self.__shifDown(i) def size(self): return self.count def isEmpty(self): return", "= 0 else: for i in range(0,cpacity): self.data[i + 1]=", "60 7 44 63 63 ''' import random class Maxheap(object):", "26 51 85 50 15 44 60 / \\ /", "31 M = 100 heap = Maxheap(N) for i in", "for i in range(self.count / 2, 0, -1): self.__shifDown(i) def", "self.data[self.count] = self.data[self.count], self.data[1] self.count -= 1 self.__shifDown(1) return ret", "insert(self,data): self.data[self.count + 1] = data self.count += 1 self.__shiftUp(self.count)", "21 64 60 7 44 63 63 ''' import random", "ret if __name__ == '__main__': N = 31 M =", "/ \\ 33 26 51 85 50 15 44 60", "= self.data[j],self.data[k] k = j def extractMax(self): ret = self.data[1]", "self.count -= 1 self.__shifDown(1) return ret if __name__ == '__main__':", "'__main__': N = 31 M = 100 heap = Maxheap(N)", "2)]: self.data[k],self.data[int(k / 2)] = self.data[int(k / 2)], self.data[k] k", "0 else: for i in range(0,cpacity): self.data[i + 1]= arr[i]", "ret = self.data[1] self.data[1], self.data[self.count] = self.data[self.count], self.data[1] self.count -=", "range(0,N): k = random.randint(1, M) heap.insert(k) # arr = [random.randint(1,M)", "self.__shiftUp(self.count) def __shifDown(self,k): while k * 2 <= self.count: j", "utf-8 -* ''' 最大堆实现 98 / \\ 96 84 /", "51 98 51 7 17 94 82 32 21 64", "1 self.__shiftUp(self.count) def __shifDown(self,k): while k * 2 <= self.count:", "2)], self.data[k] k =int(k/2) def insert(self,data): self.data[self.count + 1] =", "* 2 <= self.count: j = k * 2 if", "\\ 96 84 / \\ / \\ 92 82 78", "for i in range(N)] # heap = Maxheap(len(arr),arr) print(heap.size()) print(heap.data)", "/ \\ / \\ 92 82 78 47 / \\", "= self.data[1] self.data[1], self.data[self.count] = self.data[self.count], self.data[1] self.count -= 1", "\\ 40 51 98 51 7 17 94 82 32", "if arr is None: self.count = 0 else: for i", "self.data[j] = self.data[j],self.data[k] k = j def extractMax(self): ret =", "heap = Maxheap(N) for i in range(0,N): k = random.randint(1,", "32 21 64 60 7 44 63 63 ''' import", "and self.data[k] > self.data[int(k / 2)]: self.data[k],self.data[int(k / 2)] =", "[None] * (cpacity + 1) self.cpacity = cpacity if arr", "k = j def extractMax(self): ret = self.data[1] self.data[1], self.data[self.count]", "self.data[k] k =int(k/2) def insert(self,data): self.data[self.count + 1] = data", "i in range(0,cpacity): self.data[i + 1]= arr[i] self.count = cpacity", "2, 0, -1): self.__shifDown(i) def size(self): return self.count def isEmpty(self):", "63 63 ''' import random class Maxheap(object): def __init__(self,cpacity,arr =", "0 def __shiftUp(self,k): while k > 1 and self.data[k] >", ">= j + 1 and self.data[j + 1] > self.data[j]:", "-* ''' 最大堆实现 98 / \\ 96 84 / \\", "2 if self.count >= j + 1 and self.data[j +", "self.data[int(k / 2)], self.data[k] k =int(k/2) def insert(self,data): self.data[self.count +", "i in range(0,N): k = random.randint(1, M) heap.insert(k) # arr", "k =int(k/2) def insert(self,data): self.data[self.count + 1] = data self.count", "self.data[self.count + 1] = data self.count += 1 self.__shiftUp(self.count) def", "self.__shifDown(1) return ret if __name__ == '__main__': N = 31", "/ \\ 40 51 98 51 7 17 94 82", "j += 1 if self.data[k] > self.data[j]: break self.data[k], self.data[j]", "7 17 94 82 32 21 64 60 7 44", "\\ / \\ / \\ / \\ 40 51 98", "1] > self.data[j]: j += 1 if self.data[k] > self.data[j]:", "random class Maxheap(object): def __init__(self,cpacity,arr = None): self.data = [None]", "isEmpty(self): return self.count == 0 def __shiftUp(self,k): while k >", "60 / \\ / \\ / \\ / \\ /", "= self.data[int(k / 2)], self.data[k] k =int(k/2) def insert(self,data): self.data[self.count", "return ret if __name__ == '__main__': N = 31 M", "= cpacity if arr is None: self.count = 0 else:", "k * 2 if self.count >= j + 1 and", "> self.data[int(k / 2)]: self.data[k],self.data[int(k / 2)] = self.data[int(k /", "/ 2)] = self.data[int(k / 2)], self.data[k] k =int(k/2) def", "self.data[j]: break self.data[k], self.data[j] = self.data[j],self.data[k] k = j def", "N = 31 M = 100 heap = Maxheap(N) for", "if __name__ == '__main__': N = 31 M = 100", "50 15 44 60 / \\ / \\ / \\", "2 <= self.count: j = k * 2 if self.count", "78 47 / \\ / \\ / \\ / \\", "2)] = self.data[int(k / 2)], self.data[k] k =int(k/2) def insert(self,data):", "44 60 / \\ / \\ / \\ / \\", "self.data[self.count], self.data[1] self.count -= 1 self.__shifDown(1) return ret if __name__", "/ \\ / \\ 33 26 51 85 50 15", "return self.count == 0 def __shiftUp(self,k): while k > 1", "cpacity if arr is None: self.count = 0 else: for", "self.data[1] self.count -= 1 self.__shifDown(1) return ret if __name__ ==" ]
[ "self.rootTree.searchElementById(OriginName,drawables) OriginObject.runDraw() def draw(self, isClip=False): if self.hasTransform(): transMatrix = self.getTransform()", "isClip=False): if self.hasTransform(): transMatrix = self.getTransform() self.canvasContext.transform(*transMatrix) self.drawClone() def getCloneId(self):", "def drawClone(self): drawables = self.rootTree.getDrawable() OriginName = self.getCloneId() OriginObject =", "self.hasTransform(): transMatrix = self.getTransform() self.canvasContext.transform(*transMatrix) self.drawClone() def getCloneId(self): return self.attr(\"href\",\"xlink\")[1:]", "= self.rootTree.searchElementById(OriginName,drawables) OriginObject.runDraw() def draw(self, isClip=False): if self.hasTransform(): transMatrix =", "from ink2canvas.svg.AbstractShape import AbstractShape class Use(AbstractShape): def drawClone(self): drawables =", "import AbstractShape class Use(AbstractShape): def drawClone(self): drawables = self.rootTree.getDrawable() OriginName", "draw(self, isClip=False): if self.hasTransform(): transMatrix = self.getTransform() self.canvasContext.transform(*transMatrix) self.drawClone() def", "class Use(AbstractShape): def drawClone(self): drawables = self.rootTree.getDrawable() OriginName = self.getCloneId()", "self.rootTree.getDrawable() OriginName = self.getCloneId() OriginObject = self.rootTree.searchElementById(OriginName,drawables) OriginObject.runDraw() def draw(self,", "= self.getCloneId() OriginObject = self.rootTree.searchElementById(OriginName,drawables) OriginObject.runDraw() def draw(self, isClip=False): if", "if self.hasTransform(): transMatrix = self.getTransform() self.canvasContext.transform(*transMatrix) self.drawClone() def getCloneId(self): return", "OriginName = self.getCloneId() OriginObject = self.rootTree.searchElementById(OriginName,drawables) OriginObject.runDraw() def draw(self, isClip=False):", "self.getCloneId() OriginObject = self.rootTree.searchElementById(OriginName,drawables) OriginObject.runDraw() def draw(self, isClip=False): if self.hasTransform():", "drawClone(self): drawables = self.rootTree.getDrawable() OriginName = self.getCloneId() OriginObject = self.rootTree.searchElementById(OriginName,drawables)", "def draw(self, isClip=False): if self.hasTransform(): transMatrix = self.getTransform() self.canvasContext.transform(*transMatrix) self.drawClone()", "OriginObject.runDraw() def draw(self, isClip=False): if self.hasTransform(): transMatrix = self.getTransform() self.canvasContext.transform(*transMatrix)", "ink2canvas.svg.AbstractShape import AbstractShape class Use(AbstractShape): def drawClone(self): drawables = self.rootTree.getDrawable()", "= self.rootTree.getDrawable() OriginName = self.getCloneId() OriginObject = self.rootTree.searchElementById(OriginName,drawables) OriginObject.runDraw() def", "drawables = self.rootTree.getDrawable() OriginName = self.getCloneId() OriginObject = self.rootTree.searchElementById(OriginName,drawables) OriginObject.runDraw()", "AbstractShape class Use(AbstractShape): def drawClone(self): drawables = self.rootTree.getDrawable() OriginName =", "OriginObject = self.rootTree.searchElementById(OriginName,drawables) OriginObject.runDraw() def draw(self, isClip=False): if self.hasTransform(): transMatrix", "Use(AbstractShape): def drawClone(self): drawables = self.rootTree.getDrawable() OriginName = self.getCloneId() OriginObject" ]
[]
[ "---------------------------------- @app.route(\"/\", methods=['GET', 'POST']) def hello(): if request.method == 'POST':", "= request.form[\"query\"] return render_template(\"index.html\",data=data) return render_template(\"main.html\") # ----------------------------------- # -----------------------------------", "@app.route(\"/\", methods=['GET', 'POST']) def hello(): if request.method == 'POST': data", "botty # ---------------------------------- @app.route(\"/\", methods=['GET', 'POST']) def hello(): if request.method", "'POST']) def hello(): if request.method == 'POST': data = request.form[\"query\"]", "----------------------------------- @app.route(\"/request\", methods=['POST']) def respond(): data = request.form[\"data\"] return botty.botty_get_response(data)", "methods=['POST']) def respond(): data = request.form[\"data\"] return botty.botty_get_response(data) # -----------------------------------", "def respond(): data = request.form[\"data\"] return botty.botty_get_response(data) # ----------------------------------- if", "if request.method == 'POST': data = request.form[\"query\"] return render_template(\"index.html\",data=data) return", "@app.route(\"/request\", methods=['POST']) def respond(): data = request.form[\"data\"] return botty.botty_get_response(data) #", "return botty.botty_get_response(data) # ----------------------------------- if __name__ == \"__main__\": app.debug =", "----------------------------------- # ----------------------------------- @app.route(\"/request\", methods=['POST']) def respond(): data = request.form[\"data\"]", "def hello(): if request.method == 'POST': data = request.form[\"query\"] return", "# ----------------------------------- if __name__ == \"__main__\": app.debug = True app.run(host=\"0.0.0.0\")", "render_template(\"main.html\") # ----------------------------------- # ----------------------------------- @app.route(\"/request\", methods=['POST']) def respond(): data", "data = request.form[\"data\"] return botty.botty_get_response(data) # ----------------------------------- if __name__ ==", "app = Flask(__name__) import botty # ---------------------------------- @app.route(\"/\", methods=['GET', 'POST'])", "# ----------------------------------- # ----------------------------------- @app.route(\"/request\", methods=['POST']) def respond(): data =", "'POST': data = request.form[\"query\"] return render_template(\"index.html\",data=data) return render_template(\"main.html\") # -----------------------------------", "methods=['GET', 'POST']) def hello(): if request.method == 'POST': data =", "# ----------------------------------- @app.route(\"/request\", methods=['POST']) def respond(): data = request.form[\"data\"] return", "return render_template(\"main.html\") # ----------------------------------- # ----------------------------------- @app.route(\"/request\", methods=['POST']) def respond():", "render_template(\"index.html\",data=data) return render_template(\"main.html\") # ----------------------------------- # ----------------------------------- @app.route(\"/request\", methods=['POST']) def", "# ---------------------------------- @app.route(\"/\", methods=['GET', 'POST']) def hello(): if request.method ==", "request.method == 'POST': data = request.form[\"query\"] return render_template(\"index.html\",data=data) return render_template(\"main.html\")", "<filename>app.py<gh_stars>0 from flask import * app = Flask(__name__) import botty", "Flask(__name__) import botty # ---------------------------------- @app.route(\"/\", methods=['GET', 'POST']) def hello():", "request.form[\"data\"] return botty.botty_get_response(data) # ----------------------------------- if __name__ == \"__main__\": app.debug", "= Flask(__name__) import botty # ---------------------------------- @app.route(\"/\", methods=['GET', 'POST']) def", "hello(): if request.method == 'POST': data = request.form[\"query\"] return render_template(\"index.html\",data=data)", "= request.form[\"data\"] return botty.botty_get_response(data) # ----------------------------------- if __name__ == \"__main__\":", "from flask import * app = Flask(__name__) import botty #", "flask import * app = Flask(__name__) import botty # ----------------------------------", "data = request.form[\"query\"] return render_template(\"index.html\",data=data) return render_template(\"main.html\") # ----------------------------------- #", "return render_template(\"index.html\",data=data) return render_template(\"main.html\") # ----------------------------------- # ----------------------------------- @app.route(\"/request\", methods=['POST'])", "* app = Flask(__name__) import botty # ---------------------------------- @app.route(\"/\", methods=['GET',", "import botty # ---------------------------------- @app.route(\"/\", methods=['GET', 'POST']) def hello(): if", "respond(): data = request.form[\"data\"] return botty.botty_get_response(data) # ----------------------------------- if __name__", "import * app = Flask(__name__) import botty # ---------------------------------- @app.route(\"/\",", "request.form[\"query\"] return render_template(\"index.html\",data=data) return render_template(\"main.html\") # ----------------------------------- # ----------------------------------- @app.route(\"/request\",", "== 'POST': data = request.form[\"query\"] return render_template(\"index.html\",data=data) return render_template(\"main.html\") #", "botty.botty_get_response(data) # ----------------------------------- if __name__ == \"__main__\": app.debug = True" ]
[ "in temp: name = i.split(\" \")[0].split(\"-\")[0].split(\".\")[0] if name.startswith(\"~\") or name.startswith(\"PR\")", "python3 import os DATABASE=\"/home/tomate/Warehouse/syte/meta.db\" XLSDIR = \"/mnt/c/Users/Natacha/Documents/TempDocs/progen/Formula/\" temp = [i", "i.endswith(\"xls\")] flist = {} for i in temp: name =", "import os DATABASE=\"/home/tomate/Warehouse/syte/meta.db\" XLSDIR = \"/mnt/c/Users/Natacha/Documents/TempDocs/progen/Formula/\" temp = [i for", "i in next(os.walk(XLSDIR))[2] if i.endswith(\"xlsx\") or i.endswith(\"xls\")] flist = {}", "i.endswith(\"xlsx\") or i.endswith(\"xls\")] flist = {} for i in temp:", "= {} for i in temp: name = i.split(\" \")[0].split(\"-\")[0].split(\".\")[0]", "XLSDIR = \"/mnt/c/Users/Natacha/Documents/TempDocs/progen/Formula/\" temp = [i for i in next(os.walk(XLSDIR))[2]", "\"/mnt/c/Users/Natacha/Documents/TempDocs/progen/Formula/\" temp = [i for i in next(os.walk(XLSDIR))[2] if i.endswith(\"xlsx\")", "{} for i in temp: name = i.split(\" \")[0].split(\"-\")[0].split(\".\")[0] if", "i.split(\" \")[0].split(\"-\")[0].split(\".\")[0] if name.startswith(\"~\") or name.startswith(\"PR\") or name.startswith(\"FAB\"): continue else:", "flist = {} for i in temp: name = i.split(\"", "if name.startswith(\"~\") or name.startswith(\"PR\") or name.startswith(\"FAB\"): continue else: flist[name] =", "next(os.walk(XLSDIR))[2] if i.endswith(\"xlsx\") or i.endswith(\"xls\")] flist = {} for i", "DATABASE=\"/home/tomate/Warehouse/syte/meta.db\" XLSDIR = \"/mnt/c/Users/Natacha/Documents/TempDocs/progen/Formula/\" temp = [i for i in", "for i in next(os.walk(XLSDIR))[2] if i.endswith(\"xlsx\") or i.endswith(\"xls\")] flist =", "if i.endswith(\"xlsx\") or i.endswith(\"xls\")] flist = {} for i in", "temp = [i for i in next(os.walk(XLSDIR))[2] if i.endswith(\"xlsx\") or", "for i in temp: name = i.split(\" \")[0].split(\"-\")[0].split(\".\")[0] if name.startswith(\"~\")", "[i for i in next(os.walk(XLSDIR))[2] if i.endswith(\"xlsx\") or i.endswith(\"xls\")] flist", "temp: name = i.split(\" \")[0].split(\"-\")[0].split(\".\")[0] if name.startswith(\"~\") or name.startswith(\"PR\") or", "= i.split(\" \")[0].split(\"-\")[0].split(\".\")[0] if name.startswith(\"~\") or name.startswith(\"PR\") or name.startswith(\"FAB\"): continue", "= [i for i in next(os.walk(XLSDIR))[2] if i.endswith(\"xlsx\") or i.endswith(\"xls\")]", "= \"/mnt/c/Users/Natacha/Documents/TempDocs/progen/Formula/\" temp = [i for i in next(os.walk(XLSDIR))[2] if", "or i.endswith(\"xls\")] flist = {} for i in temp: name", "i in temp: name = i.split(\" \")[0].split(\"-\")[0].split(\".\")[0] if name.startswith(\"~\") or", "\")[0].split(\"-\")[0].split(\".\")[0] if name.startswith(\"~\") or name.startswith(\"PR\") or name.startswith(\"FAB\"): continue else: flist[name]", "name.startswith(\"~\") or name.startswith(\"PR\") or name.startswith(\"FAB\"): continue else: flist[name] = i", "in next(os.walk(XLSDIR))[2] if i.endswith(\"xlsx\") or i.endswith(\"xls\")] flist = {} for", "name = i.split(\" \")[0].split(\"-\")[0].split(\".\")[0] if name.startswith(\"~\") or name.startswith(\"PR\") or name.startswith(\"FAB\"):", "os DATABASE=\"/home/tomate/Warehouse/syte/meta.db\" XLSDIR = \"/mnt/c/Users/Natacha/Documents/TempDocs/progen/Formula/\" temp = [i for i", "#!/usr/bin/env python3 import os DATABASE=\"/home/tomate/Warehouse/syte/meta.db\" XLSDIR = \"/mnt/c/Users/Natacha/Documents/TempDocs/progen/Formula/\" temp =", "<filename>config.py<gh_stars>0 #!/usr/bin/env python3 import os DATABASE=\"/home/tomate/Warehouse/syte/meta.db\" XLSDIR = \"/mnt/c/Users/Natacha/Documents/TempDocs/progen/Formula/\" temp" ]
[ "pass into py.test\")] def initialize_options(self): TestCommand.initialize_options(self) self.pytest_args = [] def", "[('pytest-args=', 'a', \"Arguments to pass into py.test\")] def initialize_options(self): TestCommand.initialize_options(self)", "= [] self.test_suite = True def run_tests(self): import pytest errno", "version='0.1', description='Asynchronous HTTP proxy for HTTP Range Requests', author='<NAME>', author_email='<EMAIL>',", "TestCommand class PyTest(TestCommand): user_options = [('pytest-args=', 'a', \"Arguments to pass", "finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self):", "py.test\")] def initialize_options(self): TestCommand.initialize_options(self) self.pytest_args = [] def finalize_options(self): TestCommand.finalize_options(self)", "test as TestCommand class PyTest(TestCommand): user_options = [('pytest-args=', 'a', \"Arguments", "as TestCommand class PyTest(TestCommand): user_options = [('pytest-args=', 'a', \"Arguments to", "TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): import", "= True def run_tests(self): import pytest errno = pytest.main(self.pytest_args) sys.exit(errno)", "proxy for HTTP Range Requests', author='<NAME>', author_email='<EMAIL>', cmdclass={'test': PyTest}, tests_require=['pytest>=2.8.0',", "Requests', author='<NAME>', author_email='<EMAIL>', cmdclass={'test': PyTest}, tests_require=['pytest>=2.8.0', 'mock==2.0.0'], install_requires=['tornado==4.4.1', 'pycurl==7.43.0'], packages=['rangerequestsproxy'],", "setuptools.command.test import test as TestCommand class PyTest(TestCommand): user_options = [('pytest-args=',", "import pytest errno = pytest.main(self.pytest_args) sys.exit(errno) setup( name='range-requests-proxy', version='0.1', description='Asynchronous", "sys.exit(errno) setup( name='range-requests-proxy', version='0.1', description='Asynchronous HTTP proxy for HTTP Range", "self.pytest_args = [] def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite", "#!/usr/bin/env python import sys from setuptools import setup from setuptools.command.test", "import setup from setuptools.command.test import test as TestCommand class PyTest(TestCommand):", "pytest errno = pytest.main(self.pytest_args) sys.exit(errno) setup( name='range-requests-proxy', version='0.1', description='Asynchronous HTTP", "[] def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True", "HTTP Range Requests', author='<NAME>', author_email='<EMAIL>', cmdclass={'test': PyTest}, tests_require=['pytest>=2.8.0', 'mock==2.0.0'], install_requires=['tornado==4.4.1',", "sys from setuptools import setup from setuptools.command.test import test as", "into py.test\")] def initialize_options(self): TestCommand.initialize_options(self) self.pytest_args = [] def finalize_options(self):", "to pass into py.test\")] def initialize_options(self): TestCommand.initialize_options(self) self.pytest_args = []", "PyTest(TestCommand): user_options = [('pytest-args=', 'a', \"Arguments to pass into py.test\")]", "def run_tests(self): import pytest errno = pytest.main(self.pytest_args) sys.exit(errno) setup( name='range-requests-proxy',", "TestCommand.initialize_options(self) self.pytest_args = [] def finalize_options(self): TestCommand.finalize_options(self) self.test_args = []", "name='range-requests-proxy', version='0.1', description='Asynchronous HTTP proxy for HTTP Range Requests', author='<NAME>',", "run_tests(self): import pytest errno = pytest.main(self.pytest_args) sys.exit(errno) setup( name='range-requests-proxy', version='0.1',", "def initialize_options(self): TestCommand.initialize_options(self) self.pytest_args = [] def finalize_options(self): TestCommand.finalize_options(self) self.test_args", "description='Asynchronous HTTP proxy for HTTP Range Requests', author='<NAME>', author_email='<EMAIL>', cmdclass={'test':", "from setuptools.command.test import test as TestCommand class PyTest(TestCommand): user_options =", "<gh_stars>1-10 #!/usr/bin/env python import sys from setuptools import setup from", "setup( name='range-requests-proxy', version='0.1', description='Asynchronous HTTP proxy for HTTP Range Requests',", "= pytest.main(self.pytest_args) sys.exit(errno) setup( name='range-requests-proxy', version='0.1', description='Asynchronous HTTP proxy for", "self.test_suite = True def run_tests(self): import pytest errno = pytest.main(self.pytest_args)", "author_email='<EMAIL>', cmdclass={'test': PyTest}, tests_require=['pytest>=2.8.0', 'mock==2.0.0'], install_requires=['tornado==4.4.1', 'pycurl==7.43.0'], packages=['rangerequestsproxy'], license='BSD', url='https://github.com/markostrajkov/range-requests-proxy',", "[] self.test_suite = True def run_tests(self): import pytest errno =", "cmdclass={'test': PyTest}, tests_require=['pytest>=2.8.0', 'mock==2.0.0'], install_requires=['tornado==4.4.1', 'pycurl==7.43.0'], packages=['rangerequestsproxy'], license='BSD', url='https://github.com/markostrajkov/range-requests-proxy', )", "user_options = [('pytest-args=', 'a', \"Arguments to pass into py.test\")] def", "'a', \"Arguments to pass into py.test\")] def initialize_options(self): TestCommand.initialize_options(self) self.pytest_args", "class PyTest(TestCommand): user_options = [('pytest-args=', 'a', \"Arguments to pass into", "python import sys from setuptools import setup from setuptools.command.test import", "import sys from setuptools import setup from setuptools.command.test import test", "self.test_args = [] self.test_suite = True def run_tests(self): import pytest", "\"Arguments to pass into py.test\")] def initialize_options(self): TestCommand.initialize_options(self) self.pytest_args =", "import test as TestCommand class PyTest(TestCommand): user_options = [('pytest-args=', 'a',", "errno = pytest.main(self.pytest_args) sys.exit(errno) setup( name='range-requests-proxy', version='0.1', description='Asynchronous HTTP proxy", "= [] def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite =", "author='<NAME>', author_email='<EMAIL>', cmdclass={'test': PyTest}, tests_require=['pytest>=2.8.0', 'mock==2.0.0'], install_requires=['tornado==4.4.1', 'pycurl==7.43.0'], packages=['rangerequestsproxy'], license='BSD',", "initialize_options(self): TestCommand.initialize_options(self) self.pytest_args = [] def finalize_options(self): TestCommand.finalize_options(self) self.test_args =", "pytest.main(self.pytest_args) sys.exit(errno) setup( name='range-requests-proxy', version='0.1', description='Asynchronous HTTP proxy for HTTP", "HTTP proxy for HTTP Range Requests', author='<NAME>', author_email='<EMAIL>', cmdclass={'test': PyTest},", "for HTTP Range Requests', author='<NAME>', author_email='<EMAIL>', cmdclass={'test': PyTest}, tests_require=['pytest>=2.8.0', 'mock==2.0.0'],", "def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def", "setup from setuptools.command.test import test as TestCommand class PyTest(TestCommand): user_options", "setuptools import setup from setuptools.command.test import test as TestCommand class", "= [('pytest-args=', 'a', \"Arguments to pass into py.test\")] def initialize_options(self):", "Range Requests', author='<NAME>', author_email='<EMAIL>', cmdclass={'test': PyTest}, tests_require=['pytest>=2.8.0', 'mock==2.0.0'], install_requires=['tornado==4.4.1', 'pycurl==7.43.0'],", "from setuptools import setup from setuptools.command.test import test as TestCommand", "True def run_tests(self): import pytest errno = pytest.main(self.pytest_args) sys.exit(errno) setup(" ]
[ "import pytorch_pfn_extras.onnx as tou from tests.pytorch_pfn_extras_tests.onnx.test_export_testcase import Net @pytest.mark.filterwarnings(\"ignore:Named tensors", "<reponame>kmaehashi/pytorch-pfn-extras<gh_stars>100-1000 import os import pytest import torch import pytorch_pfn_extras.onnx as", ".* experimental:UserWarning\") def test_onnx_load_model(): model = Net() outdir = \"out/load_model_test\"", "contains stripped .*:UserWarning\") def test_stripped_onnx_load_model(): model = Net() outdir =", "import os import pytest import torch import pytorch_pfn_extras.onnx as tou", "model = Net() outdir = \"out/load_model_test\" tou.export_testcase(model, torch.rand(1, 1, 28,", "= \"out/stripped_load_model_test\" tou.export_testcase(model, torch.rand(1, 1, 28, 28), outdir, strip_large_tensor_data=True, training=True,", "pytest import torch import pytorch_pfn_extras.onnx as tou from tests.pytorch_pfn_extras_tests.onnx.test_export_testcase import", "tou.export_testcase(model, torch.rand(1, 1, 28, 28), outdir, training=True, do_constant_folding=False) tou.load_model(os.path.join(outdir, \"model.onnx\"))", "os import pytest import torch import pytorch_pfn_extras.onnx as tou from", "torch.rand(1, 1, 28, 28), outdir, strip_large_tensor_data=True, training=True, do_constant_folding=False) tou.load_model(os.path.join(outdir, \"model.onnx\"))", "@pytest.mark.filterwarnings(\"ignore:.*ONNX contains stripped .*:UserWarning\") def test_stripped_onnx_load_model(): model = Net() outdir", "1, 28, 28), outdir, training=True, do_constant_folding=False) tou.load_model(os.path.join(outdir, \"model.onnx\")) @pytest.mark.filterwarnings(\"ignore:.*ONNX contains", "outdir, training=True, do_constant_folding=False) tou.load_model(os.path.join(outdir, \"model.onnx\")) @pytest.mark.filterwarnings(\"ignore:.*ONNX contains stripped .*:UserWarning\") def", "outdir = \"out/load_model_test\" tou.export_testcase(model, torch.rand(1, 1, 28, 28), outdir, training=True,", "training=True, do_constant_folding=False) tou.load_model(os.path.join(outdir, \"model.onnx\")) @pytest.mark.filterwarnings(\"ignore:.*ONNX contains stripped .*:UserWarning\") def test_stripped_onnx_load_model():", "tou.load_model(os.path.join(outdir, \"model.onnx\")) @pytest.mark.filterwarnings(\"ignore:.*ONNX contains stripped .*:UserWarning\") def test_stripped_onnx_load_model(): model =", "\"out/stripped_load_model_test\" tou.export_testcase(model, torch.rand(1, 1, 28, 28), outdir, strip_large_tensor_data=True, training=True, do_constant_folding=False)", "Net() outdir = \"out/stripped_load_model_test\" tou.export_testcase(model, torch.rand(1, 1, 28, 28), outdir,", "stripped .*:UserWarning\") def test_stripped_onnx_load_model(): model = Net() outdir = \"out/stripped_load_model_test\"", "import pytest import torch import pytorch_pfn_extras.onnx as tou from tests.pytorch_pfn_extras_tests.onnx.test_export_testcase", "\"model.onnx\")) @pytest.mark.filterwarnings(\"ignore:.*ONNX contains stripped .*:UserWarning\") def test_stripped_onnx_load_model(): model = Net()", "28, 28), outdir, training=True, do_constant_folding=False) tou.load_model(os.path.join(outdir, \"model.onnx\")) @pytest.mark.filterwarnings(\"ignore:.*ONNX contains stripped", "torch import pytorch_pfn_extras.onnx as tou from tests.pytorch_pfn_extras_tests.onnx.test_export_testcase import Net @pytest.mark.filterwarnings(\"ignore:Named", "test_stripped_onnx_load_model(): model = Net() outdir = \"out/stripped_load_model_test\" tou.export_testcase(model, torch.rand(1, 1,", "28), outdir, training=True, do_constant_folding=False) tou.load_model(os.path.join(outdir, \"model.onnx\")) @pytest.mark.filterwarnings(\"ignore:.*ONNX contains stripped .*:UserWarning\")", "import Net @pytest.mark.filterwarnings(\"ignore:Named tensors .* experimental:UserWarning\") def test_onnx_load_model(): model =", "experimental:UserWarning\") def test_onnx_load_model(): model = Net() outdir = \"out/load_model_test\" tou.export_testcase(model,", "Net @pytest.mark.filterwarnings(\"ignore:Named tensors .* experimental:UserWarning\") def test_onnx_load_model(): model = Net()", "def test_onnx_load_model(): model = Net() outdir = \"out/load_model_test\" tou.export_testcase(model, torch.rand(1,", "torch.rand(1, 1, 28, 28), outdir, training=True, do_constant_folding=False) tou.load_model(os.path.join(outdir, \"model.onnx\")) @pytest.mark.filterwarnings(\"ignore:.*ONNX", "@pytest.mark.filterwarnings(\"ignore:Named tensors .* experimental:UserWarning\") def test_onnx_load_model(): model = Net() outdir", "= \"out/load_model_test\" tou.export_testcase(model, torch.rand(1, 1, 28, 28), outdir, training=True, do_constant_folding=False)", "= Net() outdir = \"out/load_model_test\" tou.export_testcase(model, torch.rand(1, 1, 28, 28),", "outdir = \"out/stripped_load_model_test\" tou.export_testcase(model, torch.rand(1, 1, 28, 28), outdir, strip_large_tensor_data=True,", "model = Net() outdir = \"out/stripped_load_model_test\" tou.export_testcase(model, torch.rand(1, 1, 28,", "pytorch_pfn_extras.onnx as tou from tests.pytorch_pfn_extras_tests.onnx.test_export_testcase import Net @pytest.mark.filterwarnings(\"ignore:Named tensors .*", "Net() outdir = \"out/load_model_test\" tou.export_testcase(model, torch.rand(1, 1, 28, 28), outdir,", "tou.export_testcase(model, torch.rand(1, 1, 28, 28), outdir, strip_large_tensor_data=True, training=True, do_constant_folding=False) tou.load_model(os.path.join(outdir,", "def test_stripped_onnx_load_model(): model = Net() outdir = \"out/stripped_load_model_test\" tou.export_testcase(model, torch.rand(1,", "tou from tests.pytorch_pfn_extras_tests.onnx.test_export_testcase import Net @pytest.mark.filterwarnings(\"ignore:Named tensors .* experimental:UserWarning\") def", "= Net() outdir = \"out/stripped_load_model_test\" tou.export_testcase(model, torch.rand(1, 1, 28, 28),", "import torch import pytorch_pfn_extras.onnx as tou from tests.pytorch_pfn_extras_tests.onnx.test_export_testcase import Net", ".*:UserWarning\") def test_stripped_onnx_load_model(): model = Net() outdir = \"out/stripped_load_model_test\" tou.export_testcase(model,", "tensors .* experimental:UserWarning\") def test_onnx_load_model(): model = Net() outdir =", "test_onnx_load_model(): model = Net() outdir = \"out/load_model_test\" tou.export_testcase(model, torch.rand(1, 1,", "as tou from tests.pytorch_pfn_extras_tests.onnx.test_export_testcase import Net @pytest.mark.filterwarnings(\"ignore:Named tensors .* experimental:UserWarning\")", "do_constant_folding=False) tou.load_model(os.path.join(outdir, \"model.onnx\")) @pytest.mark.filterwarnings(\"ignore:.*ONNX contains stripped .*:UserWarning\") def test_stripped_onnx_load_model(): model", "from tests.pytorch_pfn_extras_tests.onnx.test_export_testcase import Net @pytest.mark.filterwarnings(\"ignore:Named tensors .* experimental:UserWarning\") def test_onnx_load_model():", "tests.pytorch_pfn_extras_tests.onnx.test_export_testcase import Net @pytest.mark.filterwarnings(\"ignore:Named tensors .* experimental:UserWarning\") def test_onnx_load_model(): model", "\"out/load_model_test\" tou.export_testcase(model, torch.rand(1, 1, 28, 28), outdir, training=True, do_constant_folding=False) tou.load_model(os.path.join(outdir," ]
[ "'name': '管理员编号', 'type': 'str', 'required': True }, 'time': { 'name':", "'str', 'required': True }, 'time': { 'name': '查询时间', 'type': 'str',", "14:02:02 ''' params = { # 验证字段 'fields': { 'type':", "''' params = { # 验证字段 'fields': { 'type': {", "数据库验证器 @Author: Zpp @Date: 2020-05-28 13:44:29 @LastEditors: Zpp @LastEditTime: 2020-05-28", "}, # 导出数据库 'Export': ['type'], # 导入数据库 'Import': ['document'], #", "2, 3], 'required': True }, 'document': { 'name': '数据库文件', 'type':", "@Description: 数据库验证器 @Author: Zpp @Date: 2020-05-28 13:44:29 @LastEditors: Zpp @LastEditTime:", "{ # 验证字段 'fields': { 'type': { 'name': '导出类型', 'type':", "2020-05-28 14:02:02 ''' params = { # 验证字段 'fields': {", "13:44:29 @LastEditors: Zpp @LastEditTime: 2020-05-28 14:02:02 ''' params = {", "Zpp @Date: 2020-05-28 13:44:29 @LastEditors: Zpp @LastEditTime: 2020-05-28 14:02:02 '''", "'required': True, 'msg': '请选择上传数据库文件' }, 'admin_id': { 'name': '管理员编号', 'type':", "'document': { 'name': '数据库文件', 'type': 'file', 'required': True, 'msg': '请选择上传数据库文件'", "}, 'admin_id': { 'name': '管理员编号', 'type': 'str', 'required': True },", "'admin_id': { 'name': '管理员编号', 'type': 'str', 'required': True }, 'time':", "'str', 'required': True } }, # 导出数据库 'Export': ['type'], #", "{ 'name': '查询时间', 'type': 'str', 'required': True } }, #", "验证字段 'fields': { 'type': { 'name': '导出类型', 'type': 'int', 'between':", "@LastEditors: Zpp @LastEditTime: 2020-05-28 14:02:02 ''' params = { #", "@Author: Zpp @Date: 2020-05-28 13:44:29 @LastEditors: Zpp @LastEditTime: 2020-05-28 14:02:02", "{ 'name': '管理员编号', 'type': 'str', 'required': True }, 'time': {", "'name': '查询时间', 'type': 'str', 'required': True } }, # 导出数据库", "@LastEditTime: 2020-05-28 14:02:02 ''' params = { # 验证字段 'fields':", "<reponame>huzidabanzhang/Python<gh_stars>1-10 #!/usr/bin/env python # -*- coding:UTF-8 -*- ''' @Description: 数据库验证器", "}, 'document': { 'name': '数据库文件', 'type': 'file', 'required': True, 'msg':", "-*- ''' @Description: 数据库验证器 @Author: Zpp @Date: 2020-05-28 13:44:29 @LastEditors:", "'required': True }, 'document': { 'name': '数据库文件', 'type': 'file', 'required':", "# 验证字段 'fields': { 'type': { 'name': '导出类型', 'type': 'int',", "'file', 'required': True, 'msg': '请选择上传数据库文件' }, 'admin_id': { 'name': '管理员编号',", "'required': True } }, # 导出数据库 'Export': ['type'], # 导入数据库", "# 导入数据库 'Import': ['document'], # 首页登录清空 'Login': ['admin_id', 'time'] }", "= { # 验证字段 'fields': { 'type': { 'name': '导出类型',", "'type': 'str', 'required': True } }, # 导出数据库 'Export': ['type'],", "'管理员编号', 'type': 'str', 'required': True }, 'time': { 'name': '查询时间',", "[1, 2, 3], 'required': True }, 'document': { 'name': '数据库文件',", "'between': [1, 2, 3], 'required': True }, 'document': { 'name':", "True }, 'time': { 'name': '查询时间', 'type': 'str', 'required': True", "'type': 'file', 'required': True, 'msg': '请选择上传数据库文件' }, 'admin_id': { 'name':", "} }, # 导出数据库 'Export': ['type'], # 导入数据库 'Import': ['document'],", "'导出类型', 'type': 'int', 'between': [1, 2, 3], 'required': True },", "'name': '导出类型', 'type': 'int', 'between': [1, 2, 3], 'required': True", "2020-05-28 13:44:29 @LastEditors: Zpp @LastEditTime: 2020-05-28 14:02:02 ''' params =", "Zpp @LastEditTime: 2020-05-28 14:02:02 ''' params = { # 验证字段", "{ 'name': '导出类型', 'type': 'int', 'between': [1, 2, 3], 'required':", "'name': '数据库文件', 'type': 'file', 'required': True, 'msg': '请选择上传数据库文件' }, 'admin_id':", "True } }, # 导出数据库 'Export': ['type'], # 导入数据库 'Import':", "'数据库文件', 'type': 'file', 'required': True, 'msg': '请选择上传数据库文件' }, 'admin_id': {", "#!/usr/bin/env python # -*- coding:UTF-8 -*- ''' @Description: 数据库验证器 @Author:", "# -*- coding:UTF-8 -*- ''' @Description: 数据库验证器 @Author: Zpp @Date:", "'查询时间', 'type': 'str', 'required': True } }, # 导出数据库 'Export':", "'Export': ['type'], # 导入数据库 'Import': ['document'], # 首页登录清空 'Login': ['admin_id',", "''' @Description: 数据库验证器 @Author: Zpp @Date: 2020-05-28 13:44:29 @LastEditors: Zpp", "coding:UTF-8 -*- ''' @Description: 数据库验证器 @Author: Zpp @Date: 2020-05-28 13:44:29", "True, 'msg': '请选择上传数据库文件' }, 'admin_id': { 'name': '管理员编号', 'type': 'str',", "'required': True }, 'time': { 'name': '查询时间', 'type': 'str', 'required':", "{ 'name': '数据库文件', 'type': 'file', 'required': True, 'msg': '请选择上传数据库文件' },", "# 导出数据库 'Export': ['type'], # 导入数据库 'Import': ['document'], # 首页登录清空", "-*- coding:UTF-8 -*- ''' @Description: 数据库验证器 @Author: Zpp @Date: 2020-05-28", "'type': 'int', 'between': [1, 2, 3], 'required': True }, 'document':", "导出数据库 'Export': ['type'], # 导入数据库 'Import': ['document'], # 首页登录清空 'Login':", "'int', 'between': [1, 2, 3], 'required': True }, 'document': {", "True }, 'document': { 'name': '数据库文件', 'type': 'file', 'required': True,", "}, 'time': { 'name': '查询时间', 'type': 'str', 'required': True }", "params = { # 验证字段 'fields': { 'type': { 'name':", "3], 'required': True }, 'document': { 'name': '数据库文件', 'type': 'file',", "['type'], # 导入数据库 'Import': ['document'], # 首页登录清空 'Login': ['admin_id', 'time']", "{ 'type': { 'name': '导出类型', 'type': 'int', 'between': [1, 2,", "'time': { 'name': '查询时间', 'type': 'str', 'required': True } },", "@Date: 2020-05-28 13:44:29 @LastEditors: Zpp @LastEditTime: 2020-05-28 14:02:02 ''' params", "'请选择上传数据库文件' }, 'admin_id': { 'name': '管理员编号', 'type': 'str', 'required': True", "'msg': '请选择上传数据库文件' }, 'admin_id': { 'name': '管理员编号', 'type': 'str', 'required':", "'fields': { 'type': { 'name': '导出类型', 'type': 'int', 'between': [1,", "'type': 'str', 'required': True }, 'time': { 'name': '查询时间', 'type':", "'type': { 'name': '导出类型', 'type': 'int', 'between': [1, 2, 3],", "python # -*- coding:UTF-8 -*- ''' @Description: 数据库验证器 @Author: Zpp" ]
[ "slice_symbol_to_seq_symobls(net, seq_len, axis=1, squeeze_axis=True): net = mx.sym.SliceChannel(data=net, num_outputs=seq_len, axis=axis, squeeze_axis=squeeze_axis)", "for seq_index in range(seq_len): hidden_all.append(net[seq_index]) net = hidden_all return net", "= mx.sym.SliceChannel(data=net, num_outputs=seq_len, axis=axis, squeeze_axis=squeeze_axis) hidden_all = [] for seq_index", "as mx def slice_symbol_to_seq_symobls(net, seq_len, axis=1, squeeze_axis=True): net = mx.sym.SliceChannel(data=net,", "import mxnet as mx def slice_symbol_to_seq_symobls(net, seq_len, axis=1, squeeze_axis=True): net", "mxnet as mx def slice_symbol_to_seq_symobls(net, seq_len, axis=1, squeeze_axis=True): net =", "seq_len, axis=1, squeeze_axis=True): net = mx.sym.SliceChannel(data=net, num_outputs=seq_len, axis=axis, squeeze_axis=squeeze_axis) hidden_all", "[] for seq_index in range(seq_len): hidden_all.append(net[seq_index]) net = hidden_all return", "mx.sym.SliceChannel(data=net, num_outputs=seq_len, axis=axis, squeeze_axis=squeeze_axis) hidden_all = [] for seq_index in", "squeeze_axis=True): net = mx.sym.SliceChannel(data=net, num_outputs=seq_len, axis=axis, squeeze_axis=squeeze_axis) hidden_all = []", "mx def slice_symbol_to_seq_symobls(net, seq_len, axis=1, squeeze_axis=True): net = mx.sym.SliceChannel(data=net, num_outputs=seq_len,", "squeeze_axis=squeeze_axis) hidden_all = [] for seq_index in range(seq_len): hidden_all.append(net[seq_index]) net", "net = mx.sym.SliceChannel(data=net, num_outputs=seq_len, axis=axis, squeeze_axis=squeeze_axis) hidden_all = [] for", "axis=1, squeeze_axis=True): net = mx.sym.SliceChannel(data=net, num_outputs=seq_len, axis=axis, squeeze_axis=squeeze_axis) hidden_all =", "num_outputs=seq_len, axis=axis, squeeze_axis=squeeze_axis) hidden_all = [] for seq_index in range(seq_len):", "hidden_all = [] for seq_index in range(seq_len): hidden_all.append(net[seq_index]) net =", "axis=axis, squeeze_axis=squeeze_axis) hidden_all = [] for seq_index in range(seq_len): hidden_all.append(net[seq_index])", "def slice_symbol_to_seq_symobls(net, seq_len, axis=1, squeeze_axis=True): net = mx.sym.SliceChannel(data=net, num_outputs=seq_len, axis=axis,", "= [] for seq_index in range(seq_len): hidden_all.append(net[seq_index]) net = hidden_all" ]
[ "from rest_framework_jwt.authentication import JSONWebTokenAuthentication from rest_framework_jwt.settings import api_settings from rest_framework_jwt.utils", "1. Look for account in \"\"\" user = None try:", "= profile.get('id') user = _temp_reverse_user(uid, 'facebook', access_token, access_token_secret, r.text) elif", "user.user.is_active: return Response({ 'status': 'Unauthorized', 'message': 'User account disabled' },", "= aa.provider sa.access_token = access_token sa.access_token_secret = access_token_secret sa.provider_data =", "= \"\" access_token_secret = \"\" if request.data.get('oauth_token') and request.data.get('oauth_verifier'): auth", "timegm( datetime.datetime.utcnow().utctimetuple() ) response_data = { 'token': jwt_encode_handler(payload), 'session': user.get_session_id()", "return Response(response_data) return Response({ 'status': 'Bad request', 'message': 'Authentication could", "SocialAccountLink() sa.user = user sa.social_id = aa.uid sa.type = aa.provider", "access_token_secret sa.provider_data = payload sa.save() except aamodels.SocialAccount.DoesNotExist: print('Need to create", "= dict(parse_qsl(r.text)) return Response(access_token) elif backend in ['facebook']: access_token_url =", "'Bad request', 'message': 'Authentication could not be performed with received", "access_token = profile.get('oauth_token') access_token_secret = profile.get('oauth_token_secret') user = _temp_reverse_user(uid, 'twitter',", "= \"\" access_token_secret = \"\" payload = dict(client_id=request.data.get('clientId'), redirect_uri=request.data.get('redirectUri'), client_secret=settings.SOCIAL_AUTH_GOOGLE_OAUTH_SECRET,", "in \"\"\" user = None try: sa = SocialAccountLink.objects.get(social_id=uid) sa.type", "authenticate users through social media.\"\"\" permission_classes = (AllowAny,) def post(self,", "'session': user.get_session_id() } return Response(response_data) return Response({ 'status': 'Bad request',", "to create UserProfile') # we got an allauth, create the", "if backend in ['twitter']: request_token_url = 'https://api.twitter.com/oauth/request_token' access_token_url = 'https://api.twitter.com/oauth/access_token'", "user. r = requests.get(graph_api_url, params=token) profile = json.loads(r.text) access_token =", "user = None if backend in ['twitter']: request_token_url = 'https://api.twitter.com/oauth/request_token'", "access_token_secret = \"\" params = { 'client_id': request.data.get('clientId'), 'redirect_uri': request.data.get('redirectUri'),", "} return Response(response_data) return Response({ 'status': 'Bad request', 'message': 'Authentication", "settings.SOCIAL_AUTH_FACEBOOK_SECRET, 'code': request.data.get('code') } # Step 1. Exchange authorization code", "access_token, access_token_secret, payload): \"\"\" Do some magic here to find", "sa = SocialAccountLink.objects.get(social_id=uid) sa.type = provider sa.social_id = uid sa.access_token", "permission_classes = () parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,) renderer_classes =", "received data.' }, status=status.HTTP_400_BAD_REQUEST) class ObtainUser(APIView): throttle_classes = () permission_classes", "token. r = requests.get(access_token_url, params=params) token = json.loads(r.text) # Step", "token = json.loads(r.text) headers = {'Authorization': 'Bearer {0}'.format(token['access_token'])} # Step", "if uid is not None and user is not None:", "calendar import timegm from urllib.parse import parse_qsl import requests from", "rest_framework.authtoken.models import Token from rest_framework.authtoken.serializers import AuthTokenSerializer from rest_framework.permissions import", "= SocialAccountLink() sa.user = user sa.social_id = aa.uid sa.type =", "SocialLoginHandler(APIView): \"\"\"View to authenticate users through social media.\"\"\" permission_classes =", "= 'https://graph.facebook.com/v2.3/oauth/access_token' graph_api_url = 'https://graph.facebook.com/v2.3/me' access_token = \"\" access_token_secret =", "access_token, access_token_secret, r.text) if uid is not None and user", "sa.save() except aamodels.SocialAccount.DoesNotExist: print('Need to create social model') return user", "from spa.models import UserProfile from spa.models.socialaccountlink import SocialAccountLink def _temp_reverse_user(uid,", "UserProfile from spa.models.socialaccountlink import SocialAccountLink def _temp_reverse_user(uid, provider, access_token, access_token_secret,", "profile.get('oauth_token') access_token_secret = profile.get('oauth_token_secret') user = _temp_reverse_user(uid, 'twitter', access_token, access_token_secret,", "elif backend in ['facebook']: access_token_url = 'https://graph.facebook.com/v2.3/oauth/access_token' graph_api_url = 'https://graph.facebook.com/v2.3/me'", "access_token_secret = profile.get('oauth_token_secret') user = _temp_reverse_user(uid, 'twitter', access_token, access_token_secret, payload)", "UserProfile') # we got an allauth, create the SocialAccountLink sa", "None try: sa = SocialAccountLink.objects.get(social_id=uid) sa.type = provider sa.social_id =", "dict(client_id=request.data.get('clientId'), redirect_uri=request.data.get('redirectUri'), client_secret=settings.SOCIAL_AUTH_GOOGLE_OAUTH_SECRET, code=request.data.get('code'), grant_type='authorization_code') # Step 1. Exchange authorization", "parsers, renderers from rest_framework import status from rest_framework.authtoken.models import Token", "= profile.get('oauth_token_secret') user = _temp_reverse_user(uid, 'twitter', access_token, access_token_secret, payload) else:", "import SocialAccountLink def _temp_reverse_user(uid, provider, access_token, access_token_secret, payload): \"\"\" Do", "class ObtainUser(APIView): throttle_classes = () permission_classes = () parser_classes =", "if request.user.is_authenticated(): return Response( status=status.HTTP_200_OK, data={ 'id': request.user.id, 'name': request.user.username,", "rest_framework_jwt.utils import jwt_payload_handler, jwt_encode_handler from dss import settings from spa.models", "'token': jwt_encode_handler(payload), 'session': user.get_session_id() } return Response(response_data) return Response({ 'status':", "() permission_classes = () parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,) renderer_classes", "2. Retrieve information about the current user. r = requests.get(graph_api_url,", "is not None: if not user.user.is_active: return Response({ 'status': 'Unauthorized',", "profile = json.loads(r.text) uid = profile.get('sub') user = _temp_reverse_user(uid, 'google',", "= json.loads(r.text) headers = {'Authorization': 'Bearer {0}'.format(token['access_token'])} # Step 2.", "}, status=status.HTTP_400_BAD_REQUEST) class ObtainUser(APIView): throttle_classes = () permission_classes = ()", "Response({ 'status': 'Bad request', 'message': 'Authentication could not be performed", "params=params) token = json.loads(r.text) # Step 2. Retrieve information about", "redirect_uri=request.data.get('redirectUri'), client_secret=settings.SOCIAL_AUTH_GOOGLE_OAUTH_SECRET, code=request.data.get('code'), grant_type='authorization_code') # Step 1. Exchange authorization code", "profile.get('user_id') access_token = profile.get('oauth_token') access_token_secret = profile.get('oauth_token_secret') user = _temp_reverse_user(uid,", "rest_framework.response import Response from rest_framework.views import APIView from rest_framework_jwt.authentication import", "= () permission_classes = () parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)", "allauth.socialaccount import models as aamodels from requests_oauthlib import OAuth1 from", "jwt_payload_handler(user.user) if api_settings.JWT_ALLOW_REFRESH: payload['orig_iat'] = timegm( datetime.datetime.utcnow().utctimetuple() ) response_data =", "user is not None: if not user.user.is_active: return Response({ 'status':", "SocialAccountLink def _temp_reverse_user(uid, provider, access_token, access_token_secret, payload): \"\"\" Do some", "if not user.user.is_active: return Response({ 'status': 'Unauthorized', 'message': 'User account", "permission_classes = (AllowAny,) def post(self, request): uid = None backend", "from rest_framework_jwt.utils import jwt_payload_handler, jwt_encode_handler from dss import settings from", "Retrieve information about the current user. r = requests.get(graph_api_url, params=token)", "request.user.userprofile.get_session_id(), 'slug': request.user.userprofile.slug, 'session': request.user.userprofile.get_session_id(), 'userRole': 'user', }) else: return", "OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, callback_uri=settings.TWITTER_CALLBACK_URL) r = requests.post(request_token_url, auth=oauth) access_token = dict(parse_qsl(r.text))", "token. r = requests.post(access_token_url, data=payload) token = json.loads(r.text) headers =", "= {'Authorization': 'Bearer {0}'.format(token['access_token'])} # Step 2. Retrieve information about", "from rest_framework.views import APIView from rest_framework_jwt.authentication import JSONWebTokenAuthentication from rest_framework_jwt.settings", "= requests.get(people_api_url, headers=headers) profile = json.loads(r.text) uid = profile.get('sub') user", "= \"\" access_token_secret = \"\" params = { 'client_id': request.data.get('clientId'),", "Do some magic here to find user account and deprecate", "import OAuth1 from rest_framework import parsers, renderers from rest_framework import", "from rest_framework.permissions import AllowAny from rest_framework.response import Response from rest_framework.views", "response_data = { 'token': jwt_encode_handler(payload), 'session': user.get_session_id() } return Response(response_data)", "from rest_framework import parsers, renderers from rest_framework import status from", "json.loads(r.text) # Step 2. Retrieve information about the current user.", "token.get('access_token') uid = profile.get('id') user = _temp_reverse_user(uid, 'facebook', access_token, access_token_secret,", "Step 2. Retrieve information about the current user. r =", "could not be performed with received data.' }, status=status.HTTP_400_BAD_REQUEST) class", "'slug': request.user.userprofile.slug, 'session': request.user.userprofile.get_session_id(), 'userRole': 'user', }) else: return Response(status=status.HTTP_401_UNAUTHORIZED)", "= json.dumps(profile) uid = profile.get('user_id') access_token = profile.get('oauth_token') access_token_secret =", "= requests.post(access_token_url, auth=auth) profile = dict(parse_qsl(r.text)) payload = json.dumps(profile) uid", "payload) else: oauth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, callback_uri=settings.TWITTER_CALLBACK_URL) r = requests.post(request_token_url,", "['facebook']: access_token_url = 'https://graph.facebook.com/v2.3/oauth/access_token' graph_api_url = 'https://graph.facebook.com/v2.3/me' access_token = \"\"", "request.data.get('clientId'), 'redirect_uri': request.data.get('redirectUri'), 'client_secret': settings.SOCIAL_AUTH_FACEBOOK_SECRET, 'code': request.data.get('code') } # Step", "uid is not None and user is not None: if", "'id': request.user.id, 'name': request.user.username, 'session': request.user.userprofile.get_session_id(), 'slug': request.user.userprofile.slug, 'session': request.user.userprofile.get_session_id(),", "except UserProfile.DoesNotExist: print('Need to create UserProfile') # we got an", "Response({ 'status': 'Unauthorized', 'message': 'User account disabled' }, status=status.HTTP_401_UNAUTHORIZED) payload", "request.user.username, 'session': request.user.userprofile.get_session_id(), 'slug': request.user.userprofile.slug, 'session': request.user.userprofile.get_session_id(), 'userRole': 'user', })", "r = requests.get(access_token_url, params=params) token = json.loads(r.text) # Step 2.", "api_settings from rest_framework_jwt.utils import jwt_payload_handler, jwt_encode_handler from dss import settings", "parsers.MultiPartParser, parsers.JSONParser,) renderer_classes = (renderers.JSONRenderer,) serializer_class = AuthTokenSerializer model =", "= \"\" if request.data.get('oauth_token') and request.data.get('oauth_verifier'): auth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET,", "Response( status=status.HTTP_200_OK, data={ 'id': request.user.id, 'name': request.user.username, 'session': request.user.userprofile.get_session_id(), 'slug':", "user else None class SocialLoginHandler(APIView): \"\"\"View to authenticate users through", "None: if not user.user.is_active: return Response({ 'status': 'Unauthorized', 'message': 'User", "def post(self, request): uid = None backend = request.query_params.get('backend') user", "sa = SocialAccountLink() sa.user = user sa.social_id = aa.uid sa.type", "OAuth1 from rest_framework import parsers, renderers from rest_framework import status", "to find user account and deprecate psa 1. Look for", "access_token = \"\" access_token_secret = \"\" params = { 'client_id':", "() parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,) renderer_classes = (renderers.JSONRenderer,) serializer_class", "'code': request.data.get('code') } # Step 1. Exchange authorization code for", "code for access token. r = requests.get(access_token_url, params=params) token =", "user = _temp_reverse_user(uid, 'facebook', access_token, access_token_secret, r.text) elif backend in", "= _temp_reverse_user(uid, 'twitter', access_token, access_token_secret, payload) else: oauth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY,", "= (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,) renderer_classes = (renderers.JSONRenderer,) serializer_class = AuthTokenSerializer", "Token def post(self, request): return self.get(request) def get(self, request): if", "status=status.HTTP_400_BAD_REQUEST) class ObtainUser(APIView): throttle_classes = () permission_classes = () parser_classes", "throttle_classes = () permission_classes = () parser_classes = (parsers.FormParser, parsers.MultiPartParser,", "json from calendar import timegm from urllib.parse import parse_qsl import", "access token. r = requests.get(access_token_url, params=params) token = json.loads(r.text) #", "some magic here to find user account and deprecate psa", "from requests_oauthlib import OAuth1 from rest_framework import parsers, renderers from", "the current user. r = requests.get(graph_api_url, params=token) profile = json.loads(r.text)", "= 'https://graph.facebook.com/v2.3/me' access_token = \"\" access_token_secret = \"\" params =", "for access token. r = requests.post(access_token_url, data=payload) token = json.loads(r.text)", "social media.\"\"\" permission_classes = (AllowAny,) def post(self, request): uid =", "'session': request.user.userprofile.get_session_id(), 'slug': request.user.userprofile.slug, 'session': request.user.userprofile.get_session_id(), 'userRole': 'user', }) else:", "information about the current user. r = requests.get(people_api_url, headers=headers) profile", "oauth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, callback_uri=settings.TWITTER_CALLBACK_URL) r = requests.post(request_token_url, auth=oauth) access_token", "Exchange authorization code for access token. r = requests.post(access_token_url, data=payload)", "parse_qsl import requests from allauth.socialaccount import models as aamodels from", "= OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, callback_uri=settings.TWITTER_CALLBACK_URL) r = requests.post(request_token_url, auth=oauth) access_token =", "sa.social_id = uid sa.access_token = access_token sa.access_token_secret = access_token_secret sa.provider_data", "deprecate psa 1. Look for account in \"\"\" user =", "create social model') return user if user else None class", "self.get(request) def get(self, request): if request.user.is_authenticated(): return Response( status=status.HTTP_200_OK, data={", "import Response from rest_framework.views import APIView from rest_framework_jwt.authentication import JSONWebTokenAuthentication", "through social media.\"\"\" permission_classes = (AllowAny,) def post(self, request): uid", "backend in ['facebook']: access_token_url = 'https://graph.facebook.com/v2.3/oauth/access_token' graph_api_url = 'https://graph.facebook.com/v2.3/me' access_token", "= aa.uid sa.type = aa.provider sa.access_token = access_token sa.access_token_secret =", "'message': 'Authentication could not be performed with received data.' },", "def _temp_reverse_user(uid, provider, access_token, access_token_secret, payload): \"\"\" Do some magic", "sa.access_token_secret = access_token_secret sa.provider_data = payload sa.save() user = UserProfile.objects.get(id=sa.user.id)", "code for access token. r = requests.post(access_token_url, data=payload) token =", "aamodels.SocialAccount.objects.get(uid=uid) try: user = UserProfile.objects.get(user__id=aa.user_id) except UserProfile.DoesNotExist: print('Need to create", "return Response({ 'status': 'Unauthorized', 'message': 'User account disabled' }, status=status.HTTP_401_UNAUTHORIZED)", "(parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,) renderer_classes = (renderers.JSONRenderer,) serializer_class = AuthTokenSerializer model", "information about the current user. r = requests.get(graph_api_url, params=token) profile", "request): return self.get(request) def get(self, request): if request.user.is_authenticated(): return Response(", "\"\"\" Do some magic here to find user account and", "AuthTokenSerializer model = Token def post(self, request): return self.get(request) def", "profile.get('sub') user = _temp_reverse_user(uid, 'google', access_token, access_token_secret, r.text) if uid", "profile.get('id') user = _temp_reverse_user(uid, 'facebook', access_token, access_token_secret, r.text) elif backend", "from rest_framework.authtoken.models import Token from rest_framework.authtoken.serializers import AuthTokenSerializer from rest_framework.permissions", "data=payload) token = json.loads(r.text) headers = {'Authorization': 'Bearer {0}'.format(token['access_token'])} #", "status=status.HTTP_200_OK, data={ 'id': request.user.id, 'name': request.user.username, 'session': request.user.userprofile.get_session_id(), 'slug': request.user.userprofile.slug,", "callback_uri=settings.TWITTER_CALLBACK_URL) r = requests.post(request_token_url, auth=oauth) access_token = dict(parse_qsl(r.text)) return Response(access_token)", "import timegm from urllib.parse import parse_qsl import requests from allauth.socialaccount", "access_token, access_token_secret, payload) else: oauth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, callback_uri=settings.TWITTER_CALLBACK_URL) r", "the current user. r = requests.get(people_api_url, headers=headers) profile = json.loads(r.text)", "request_token_url = 'https://api.twitter.com/oauth/request_token' access_token_url = 'https://api.twitter.com/oauth/access_token' access_token = \"\" access_token_secret", "sa.type = provider sa.social_id = uid sa.access_token = access_token sa.access_token_secret", "= UserProfile.objects.get(id=sa.user.id) except SocialAccountLink.DoesNotExist: # try allauth try: aa =", "about the current user. r = requests.get(people_api_url, headers=headers) profile =", ") response_data = { 'token': jwt_encode_handler(payload), 'session': user.get_session_id() } return", "return Response( status=status.HTTP_200_OK, data={ 'id': request.user.id, 'name': request.user.username, 'session': request.user.userprofile.get_session_id(),", "request.data.get('code') } # Step 1. Exchange authorization code for access", "except aamodels.SocialAccount.DoesNotExist: print('Need to create social model') return user if", "settings from spa.models import UserProfile from spa.models.socialaccountlink import SocialAccountLink def", "= _temp_reverse_user(uid, 'google', access_token, access_token_secret, r.text) if uid is not", "jwt_encode_handler from dss import settings from spa.models import UserProfile from", "r = requests.post(access_token_url, auth=auth) profile = dict(parse_qsl(r.text)) payload = json.dumps(profile)", "= (renderers.JSONRenderer,) serializer_class = AuthTokenSerializer model = Token def post(self,", "\"\" access_token_secret = \"\" params = { 'client_id': request.data.get('clientId'), 'redirect_uri':", "access_token = \"\" access_token_secret = \"\" payload = dict(client_id=request.data.get('clientId'), redirect_uri=request.data.get('redirectUri'),", "json.loads(r.text) access_token = token.get('access_token') uid = profile.get('id') user = _temp_reverse_user(uid,", "about the current user. r = requests.get(graph_api_url, params=token) profile =", "aamodels from requests_oauthlib import OAuth1 from rest_framework import parsers, renderers", "_temp_reverse_user(uid, 'google', access_token, access_token_secret, r.text) if uid is not None", "sa.provider_data = payload sa.save() except aamodels.SocialAccount.DoesNotExist: print('Need to create social", "JSONWebTokenAuthentication from rest_framework_jwt.settings import api_settings from rest_framework_jwt.utils import jwt_payload_handler, jwt_encode_handler", "= None try: sa = SocialAccountLink.objects.get(social_id=uid) sa.type = provider sa.social_id", "'https://graph.facebook.com/v2.3/oauth/access_token' graph_api_url = 'https://graph.facebook.com/v2.3/me' access_token = \"\" access_token_secret = \"\"", "jwt_payload_handler, jwt_encode_handler from dss import settings from spa.models import UserProfile", "the SocialAccountLink sa = SocialAccountLink() sa.user = user sa.social_id =", "= UserProfile.objects.get(user__id=aa.user_id) except UserProfile.DoesNotExist: print('Need to create UserProfile') # we", "profile.get('oauth_token_secret') user = _temp_reverse_user(uid, 'twitter', access_token, access_token_secret, payload) else: oauth", "timegm from urllib.parse import parse_qsl import requests from allauth.socialaccount import", "r = requests.get(people_api_url, headers=headers) profile = json.loads(r.text) uid = profile.get('sub')", "'message': 'User account disabled' }, status=status.HTTP_401_UNAUTHORIZED) payload = jwt_payload_handler(user.user) if", "'redirect_uri': request.data.get('redirectUri'), 'client_secret': settings.SOCIAL_AUTH_FACEBOOK_SECRET, 'code': request.data.get('code') } # Step 1.", "None class SocialLoginHandler(APIView): \"\"\"View to authenticate users through social media.\"\"\"", "rest_framework import status from rest_framework.authtoken.models import Token from rest_framework.authtoken.serializers import", "an allauth, create the SocialAccountLink sa = SocialAccountLink() sa.user =", "access_token sa.access_token_secret = access_token_secret sa.provider_data = payload sa.save() except aamodels.SocialAccount.DoesNotExist:", "requests.post(access_token_url, data=payload) token = json.loads(r.text) headers = {'Authorization': 'Bearer {0}'.format(token['access_token'])}", "user account and deprecate psa 1. Look for account in", "post(self, request): uid = None backend = request.query_params.get('backend') user =", "auth=oauth) access_token = dict(parse_qsl(r.text)) return Response(access_token) elif backend in ['facebook']:", "if user else None class SocialLoginHandler(APIView): \"\"\"View to authenticate users", "Step 1. Exchange authorization code for access token. r =", "as aamodels from requests_oauthlib import OAuth1 from rest_framework import parsers,", "rest_framework.authtoken.serializers import AuthTokenSerializer from rest_framework.permissions import AllowAny from rest_framework.response import", "'client_id': request.data.get('clientId'), 'redirect_uri': request.data.get('redirectUri'), 'client_secret': settings.SOCIAL_AUTH_FACEBOOK_SECRET, 'code': request.data.get('code') } #", "access_token_secret, r.text) elif backend in ['google']: access_token_url = 'https://accounts.google.com/o/oauth2/token' people_api_url", "return Response({ 'status': 'Bad request', 'message': 'Authentication could not be", "params = { 'client_id': request.data.get('clientId'), 'redirect_uri': request.data.get('redirectUri'), 'client_secret': settings.SOCIAL_AUTH_FACEBOOK_SECRET, 'code':", "performed with received data.' }, status=status.HTTP_400_BAD_REQUEST) class ObtainUser(APIView): throttle_classes =", "get(self, request): if request.user.is_authenticated(): return Response( status=status.HTTP_200_OK, data={ 'id': request.user.id,", "user sa.social_id = aa.uid sa.type = aa.provider sa.access_token = access_token", "'name': request.user.username, 'session': request.user.userprofile.get_session_id(), 'slug': request.user.userprofile.slug, 'session': request.user.userprofile.get_session_id(), 'userRole': 'user',", "requests.get(graph_api_url, params=token) profile = json.loads(r.text) access_token = token.get('access_token') uid =", "= uid sa.access_token = access_token sa.access_token_secret = access_token_secret sa.provider_data =", "access_token = dict(parse_qsl(r.text)) return Response(access_token) elif backend in ['facebook']: access_token_url", "elif backend in ['google']: access_token_url = 'https://accounts.google.com/o/oauth2/token' people_api_url = 'https://www.googleapis.com/plus/v1/people/me/openIdConnect'", "headers = {'Authorization': 'Bearer {0}'.format(token['access_token'])} # Step 2. Retrieve information", "= profile.get('oauth_token') access_token_secret = profile.get('oauth_token_secret') user = _temp_reverse_user(uid, 'twitter', access_token,", "from calendar import timegm from urllib.parse import parse_qsl import requests", "in ['google']: access_token_url = 'https://accounts.google.com/o/oauth2/token' people_api_url = 'https://www.googleapis.com/plus/v1/people/me/openIdConnect' access_token =", "= 'https://api.twitter.com/oauth/request_token' access_token_url = 'https://api.twitter.com/oauth/access_token' access_token = \"\" access_token_secret =", "rest_framework_jwt.settings import api_settings from rest_framework_jwt.utils import jwt_payload_handler, jwt_encode_handler from dss", "payload['orig_iat'] = timegm( datetime.datetime.utcnow().utctimetuple() ) response_data = { 'token': jwt_encode_handler(payload),", "{ 'token': jwt_encode_handler(payload), 'session': user.get_session_id() } return Response(response_data) return Response({", "Retrieve information about the current user. r = requests.get(people_api_url, headers=headers)", "sa.social_id = aa.uid sa.type = aa.provider sa.access_token = access_token sa.access_token_secret", "# we got an allauth, create the SocialAccountLink sa =", "got an allauth, create the SocialAccountLink sa = SocialAccountLink() sa.user", "'https://accounts.google.com/o/oauth2/token' people_api_url = 'https://www.googleapis.com/plus/v1/people/me/openIdConnect' access_token = \"\" access_token_secret = \"\"", "from rest_framework_jwt.settings import api_settings from rest_framework_jwt.utils import jwt_payload_handler, jwt_encode_handler from", "create the SocialAccountLink sa = SocialAccountLink() sa.user = user sa.social_id", "= requests.post(request_token_url, auth=oauth) access_token = dict(parse_qsl(r.text)) return Response(access_token) elif backend", "client_secret=settings.SOCIAL_AUTH_GOOGLE_OAUTH_SECRET, code=request.data.get('code'), grant_type='authorization_code') # Step 1. Exchange authorization code for", "current user. r = requests.get(people_api_url, headers=headers) profile = json.loads(r.text) uid", "import json from calendar import timegm from urllib.parse import parse_qsl", "users through social media.\"\"\" permission_classes = (AllowAny,) def post(self, request):", "aa = aamodels.SocialAccount.objects.get(uid=uid) try: user = UserProfile.objects.get(user__id=aa.user_id) except UserProfile.DoesNotExist: print('Need", "params=token) profile = json.loads(r.text) access_token = token.get('access_token') uid = profile.get('id')", "return user if user else None class SocialLoginHandler(APIView): \"\"\"View to", "'https://api.twitter.com/oauth/request_token' access_token_url = 'https://api.twitter.com/oauth/access_token' access_token = \"\" access_token_secret = \"\"", "try: sa = SocialAccountLink.objects.get(social_id=uid) sa.type = provider sa.social_id = uid", "authorization code for access token. r = requests.get(access_token_url, params=params) token", "sa.save() user = UserProfile.objects.get(id=sa.user.id) except SocialAccountLink.DoesNotExist: # try allauth try:", "to create social model') return user if user else None", "# Step 2. Retrieve information about the current user. r", "client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, callback_uri=settings.TWITTER_CALLBACK_URL) r = requests.post(request_token_url, auth=oauth) access_token = dict(parse_qsl(r.text)) return", "psa 1. Look for account in \"\"\" user = None", "user. r = requests.get(people_api_url, headers=headers) profile = json.loads(r.text) uid =", "status from rest_framework.authtoken.models import Token from rest_framework.authtoken.serializers import AuthTokenSerializer from", "= profile.get('sub') user = _temp_reverse_user(uid, 'google', access_token, access_token_secret, r.text) if", "import AllowAny from rest_framework.response import Response from rest_framework.views import APIView", "datetime import json from calendar import timegm from urllib.parse import", "'status': 'Unauthorized', 'message': 'User account disabled' }, status=status.HTTP_401_UNAUTHORIZED) payload =", "try: user = UserProfile.objects.get(user__id=aa.user_id) except UserProfile.DoesNotExist: print('Need to create UserProfile')", "import UserProfile from spa.models.socialaccountlink import SocialAccountLink def _temp_reverse_user(uid, provider, access_token,", "in ['twitter']: request_token_url = 'https://api.twitter.com/oauth/request_token' access_token_url = 'https://api.twitter.com/oauth/access_token' access_token =", "verifier=request.data.get('oauth_verifier')) r = requests.post(access_token_url, auth=auth) profile = dict(parse_qsl(r.text)) payload =", "json.dumps(profile) uid = profile.get('user_id') access_token = profile.get('oauth_token') access_token_secret = profile.get('oauth_token_secret')", "'https://www.googleapis.com/plus/v1/people/me/openIdConnect' access_token = \"\" access_token_secret = \"\" payload = dict(client_id=request.data.get('clientId'),", "auth=auth) profile = dict(parse_qsl(r.text)) payload = json.dumps(profile) uid = profile.get('user_id')", "user = None try: sa = SocialAccountLink.objects.get(social_id=uid) sa.type = provider", "(AllowAny,) def post(self, request): uid = None backend = request.query_params.get('backend')", "AuthTokenSerializer from rest_framework.permissions import AllowAny from rest_framework.response import Response from", "'Unauthorized', 'message': 'User account disabled' }, status=status.HTTP_401_UNAUTHORIZED) payload = jwt_payload_handler(user.user)", "_temp_reverse_user(uid, 'twitter', access_token, access_token_secret, payload) else: oauth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET,", "backend = request.query_params.get('backend') user = None if backend in ['twitter']:", "aamodels.SocialAccount.DoesNotExist: print('Need to create social model') return user if user", "dict(parse_qsl(r.text)) payload = json.dumps(profile) uid = profile.get('user_id') access_token = profile.get('oauth_token')", "account and deprecate psa 1. Look for account in \"\"\"", "= access_token sa.access_token_secret = access_token_secret sa.provider_data = payload sa.save() user", "access_token_secret, payload): \"\"\" Do some magic here to find user", "datetime.datetime.utcnow().utctimetuple() ) response_data = { 'token': jwt_encode_handler(payload), 'session': user.get_session_id() }", "{ 'client_id': request.data.get('clientId'), 'redirect_uri': request.data.get('redirectUri'), 'client_secret': settings.SOCIAL_AUTH_FACEBOOK_SECRET, 'code': request.data.get('code') }", "= user sa.social_id = aa.uid sa.type = aa.provider sa.access_token =", "backend in ['twitter']: request_token_url = 'https://api.twitter.com/oauth/request_token' access_token_url = 'https://api.twitter.com/oauth/access_token' access_token", "be performed with received data.' }, status=status.HTTP_400_BAD_REQUEST) class ObtainUser(APIView): throttle_classes", "magic here to find user account and deprecate psa 1.", "except SocialAccountLink.DoesNotExist: # try allauth try: aa = aamodels.SocialAccount.objects.get(uid=uid) try:", "we got an allauth, create the SocialAccountLink sa = SocialAccountLink()", "= _temp_reverse_user(uid, 'facebook', access_token, access_token_secret, r.text) elif backend in ['google']:", "} # Step 1. Exchange authorization code for access token.", "OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, resource_owner_key=request.data.get('oauth_token'), verifier=request.data.get('oauth_verifier')) r = requests.post(access_token_url, auth=auth) profile =", "import APIView from rest_framework_jwt.authentication import JSONWebTokenAuthentication from rest_framework_jwt.settings import api_settings", "import settings from spa.models import UserProfile from spa.models.socialaccountlink import SocialAccountLink", "UserProfile.objects.get(user__id=aa.user_id) except UserProfile.DoesNotExist: print('Need to create UserProfile') # we got", "social model') return user if user else None class SocialLoginHandler(APIView):", "request): if request.user.is_authenticated(): return Response( status=status.HTTP_200_OK, data={ 'id': request.user.id, 'name':", "model = Token def post(self, request): return self.get(request) def get(self,", "client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, resource_owner_key=request.data.get('oauth_token'), verifier=request.data.get('oauth_verifier')) r = requests.post(access_token_url, auth=auth) profile = dict(parse_qsl(r.text))", "= SocialAccountLink.objects.get(social_id=uid) sa.type = provider sa.social_id = uid sa.access_token =", "import JSONWebTokenAuthentication from rest_framework_jwt.settings import api_settings from rest_framework_jwt.utils import jwt_payload_handler,", "for account in \"\"\" user = None try: sa =", "try: aa = aamodels.SocialAccount.objects.get(uid=uid) try: user = UserProfile.objects.get(user__id=aa.user_id) except UserProfile.DoesNotExist:", "request): uid = None backend = request.query_params.get('backend') user = None", "serializer_class = AuthTokenSerializer model = Token def post(self, request): return", "authorization code for access token. r = requests.post(access_token_url, data=payload) token", "= payload sa.save() except aamodels.SocialAccount.DoesNotExist: print('Need to create social model')", "post(self, request): return self.get(request) def get(self, request): if request.user.is_authenticated(): return", "SocialAccountLink sa = SocialAccountLink() sa.user = user sa.social_id = aa.uid", "request.data.get('redirectUri'), 'client_secret': settings.SOCIAL_AUTH_FACEBOOK_SECRET, 'code': request.data.get('code') } # Step 1. Exchange", "renderer_classes = (renderers.JSONRenderer,) serializer_class = AuthTokenSerializer model = Token def", "payload sa.save() user = UserProfile.objects.get(id=sa.user.id) except SocialAccountLink.DoesNotExist: # try allauth", "import parse_qsl import requests from allauth.socialaccount import models as aamodels", "import AuthTokenSerializer from rest_framework.permissions import AllowAny from rest_framework.response import Response", "r.text) elif backend in ['google']: access_token_url = 'https://accounts.google.com/o/oauth2/token' people_api_url =", "None and user is not None: if not user.user.is_active: return", "'https://api.twitter.com/oauth/access_token' access_token = \"\" access_token_secret = \"\" if request.data.get('oauth_token') and", "user = _temp_reverse_user(uid, 'twitter', access_token, access_token_secret, payload) else: oauth =", "with received data.' }, status=status.HTTP_400_BAD_REQUEST) class ObtainUser(APIView): throttle_classes = ()", "ObtainUser(APIView): throttle_classes = () permission_classes = () parser_classes = (parsers.FormParser,", "requests.post(request_token_url, auth=oauth) access_token = dict(parse_qsl(r.text)) return Response(access_token) elif backend in", "\"\" access_token_secret = \"\" payload = dict(client_id=request.data.get('clientId'), redirect_uri=request.data.get('redirectUri'), client_secret=settings.SOCIAL_AUTH_GOOGLE_OAUTH_SECRET, code=request.data.get('code'),", "backend in ['google']: access_token_url = 'https://accounts.google.com/o/oauth2/token' people_api_url = 'https://www.googleapis.com/plus/v1/people/me/openIdConnect' access_token", "access_token_secret = \"\" if request.data.get('oauth_token') and request.data.get('oauth_verifier'): auth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY,", "AllowAny from rest_framework.response import Response from rest_framework.views import APIView from", "profile = dict(parse_qsl(r.text)) payload = json.dumps(profile) uid = profile.get('user_id') access_token", "import jwt_payload_handler, jwt_encode_handler from dss import settings from spa.models import", "payload = jwt_payload_handler(user.user) if api_settings.JWT_ALLOW_REFRESH: payload['orig_iat'] = timegm( datetime.datetime.utcnow().utctimetuple() )", "= timegm( datetime.datetime.utcnow().utctimetuple() ) response_data = { 'token': jwt_encode_handler(payload), 'session':", "access_token = token.get('access_token') uid = profile.get('id') user = _temp_reverse_user(uid, 'facebook',", "if request.data.get('oauth_token') and request.data.get('oauth_verifier'): auth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, resource_owner_key=request.data.get('oauth_token'), verifier=request.data.get('oauth_verifier'))", "rest_framework import parsers, renderers from rest_framework import status from rest_framework.authtoken.models", "request.user.is_authenticated(): return Response( status=status.HTTP_200_OK, data={ 'id': request.user.id, 'name': request.user.username, 'session':", "requests.get(access_token_url, params=params) token = json.loads(r.text) # Step 2. Retrieve information", "in ['facebook']: access_token_url = 'https://graph.facebook.com/v2.3/oauth/access_token' graph_api_url = 'https://graph.facebook.com/v2.3/me' access_token =", "= provider sa.social_id = uid sa.access_token = access_token sa.access_token_secret =", "provider, access_token, access_token_secret, payload): \"\"\" Do some magic here to", "(renderers.JSONRenderer,) serializer_class = AuthTokenSerializer model = Token def post(self, request):", "user = UserProfile.objects.get(user__id=aa.user_id) except UserProfile.DoesNotExist: print('Need to create UserProfile') #", "sa.access_token = access_token sa.access_token_secret = access_token_secret sa.provider_data = payload sa.save()", "}, status=status.HTTP_401_UNAUTHORIZED) payload = jwt_payload_handler(user.user) if api_settings.JWT_ALLOW_REFRESH: payload['orig_iat'] = timegm(", "requests_oauthlib import OAuth1 from rest_framework import parsers, renderers from rest_framework", "access_token_secret = \"\" payload = dict(client_id=request.data.get('clientId'), redirect_uri=request.data.get('redirectUri'), client_secret=settings.SOCIAL_AUTH_GOOGLE_OAUTH_SECRET, code=request.data.get('code'), grant_type='authorization_code')", "{'Authorization': 'Bearer {0}'.format(token['access_token'])} # Step 2. Retrieve information about the", "json.loads(r.text) uid = profile.get('sub') user = _temp_reverse_user(uid, 'google', access_token, access_token_secret,", "aa.uid sa.type = aa.provider sa.access_token = access_token sa.access_token_secret = access_token_secret", "= requests.get(graph_api_url, params=token) profile = json.loads(r.text) access_token = token.get('access_token') uid", "_temp_reverse_user(uid, 'facebook', access_token, access_token_secret, r.text) elif backend in ['google']: access_token_url", "access_token_url = 'https://graph.facebook.com/v2.3/oauth/access_token' graph_api_url = 'https://graph.facebook.com/v2.3/me' access_token = \"\" access_token_secret", "code=request.data.get('code'), grant_type='authorization_code') # Step 1. Exchange authorization code for access", "json.loads(r.text) headers = {'Authorization': 'Bearer {0}'.format(token['access_token'])} # Step 2. Retrieve", "# Step 1. Exchange authorization code for access token. r", "'status': 'Bad request', 'message': 'Authentication could not be performed with", "dss import settings from spa.models import UserProfile from spa.models.socialaccountlink import", "urllib.parse import parse_qsl import requests from allauth.socialaccount import models as", "access_token, access_token_secret, r.text) elif backend in ['google']: access_token_url = 'https://accounts.google.com/o/oauth2/token'", "= 'https://accounts.google.com/o/oauth2/token' people_api_url = 'https://www.googleapis.com/plus/v1/people/me/openIdConnect' access_token = \"\" access_token_secret =", "import models as aamodels from requests_oauthlib import OAuth1 from rest_framework", "request', 'message': 'Authentication could not be performed with received data.'", "r = requests.post(access_token_url, data=payload) token = json.loads(r.text) headers = {'Authorization':", "2. Retrieve information about the current user. r = requests.get(people_api_url,", "people_api_url = 'https://www.googleapis.com/plus/v1/people/me/openIdConnect' access_token = \"\" access_token_secret = \"\" payload", "'https://graph.facebook.com/v2.3/me' access_token = \"\" access_token_secret = \"\" params = {", "model') return user if user else None class SocialLoginHandler(APIView): \"\"\"View", "import status from rest_framework.authtoken.models import Token from rest_framework.authtoken.serializers import AuthTokenSerializer", "= OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, resource_owner_key=request.data.get('oauth_token'), verifier=request.data.get('oauth_verifier')) r = requests.post(access_token_url, auth=auth) profile", "allauth, create the SocialAccountLink sa = SocialAccountLink() sa.user = user", "= () parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,) renderer_classes = (renderers.JSONRenderer,)", "not be performed with received data.' }, status=status.HTTP_400_BAD_REQUEST) class ObtainUser(APIView):", "# try allauth try: aa = aamodels.SocialAccount.objects.get(uid=uid) try: user =", "user.get_session_id() } return Response(response_data) return Response({ 'status': 'Bad request', 'message':", "api_settings.JWT_ALLOW_REFRESH: payload['orig_iat'] = timegm( datetime.datetime.utcnow().utctimetuple() ) response_data = { 'token':", "uid = profile.get('id') user = _temp_reverse_user(uid, 'facebook', access_token, access_token_secret, r.text)", "create UserProfile') # we got an allauth, create the SocialAccountLink", "not user.user.is_active: return Response({ 'status': 'Unauthorized', 'message': 'User account disabled'", "\"\" payload = dict(client_id=request.data.get('clientId'), redirect_uri=request.data.get('redirectUri'), client_secret=settings.SOCIAL_AUTH_GOOGLE_OAUTH_SECRET, code=request.data.get('code'), grant_type='authorization_code') # Step", "from dss import settings from spa.models import UserProfile from spa.models.socialaccountlink", "token = json.loads(r.text) # Step 2. Retrieve information about the", "Look for account in \"\"\" user = None try: sa", "= profile.get('user_id') access_token = profile.get('oauth_token') access_token_secret = profile.get('oauth_token_secret') user =", "media.\"\"\" permission_classes = (AllowAny,) def post(self, request): uid = None", "= { 'client_id': request.data.get('clientId'), 'redirect_uri': request.data.get('redirectUri'), 'client_secret': settings.SOCIAL_AUTH_FACEBOOK_SECRET, 'code': request.data.get('code')", "try allauth try: aa = aamodels.SocialAccount.objects.get(uid=uid) try: user = UserProfile.objects.get(user__id=aa.user_id)", "current user. r = requests.get(graph_api_url, params=token) profile = json.loads(r.text) access_token", "'User account disabled' }, status=status.HTTP_401_UNAUTHORIZED) payload = jwt_payload_handler(user.user) if api_settings.JWT_ALLOW_REFRESH:", "Token from rest_framework.authtoken.serializers import AuthTokenSerializer from rest_framework.permissions import AllowAny from", "_temp_reverse_user(uid, provider, access_token, access_token_secret, payload): \"\"\" Do some magic here", "\"\"\" user = None try: sa = SocialAccountLink.objects.get(social_id=uid) sa.type =", "\"\"\"View to authenticate users through social media.\"\"\" permission_classes = (AllowAny,)", "for access token. r = requests.get(access_token_url, params=params) token = json.loads(r.text)", "from spa.models.socialaccountlink import SocialAccountLink def _temp_reverse_user(uid, provider, access_token, access_token_secret, payload):", "uid = None backend = request.query_params.get('backend') user = None if", "jwt_encode_handler(payload), 'session': user.get_session_id() } return Response(response_data) return Response({ 'status': 'Bad", "= json.loads(r.text) # Step 2. Retrieve information about the current", "['google']: access_token_url = 'https://accounts.google.com/o/oauth2/token' people_api_url = 'https://www.googleapis.com/plus/v1/people/me/openIdConnect' access_token = \"\"", "disabled' }, status=status.HTTP_401_UNAUTHORIZED) payload = jwt_payload_handler(user.user) if api_settings.JWT_ALLOW_REFRESH: payload['orig_iat'] =", "{0}'.format(token['access_token'])} # Step 2. Retrieve information about the current user.", "request.user.id, 'name': request.user.username, 'session': request.user.userprofile.get_session_id(), 'slug': request.user.userprofile.slug, 'session': request.user.userprofile.get_session_id(), 'userRole':", "spa.models import UserProfile from spa.models.socialaccountlink import SocialAccountLink def _temp_reverse_user(uid, provider,", "auth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, resource_owner_key=request.data.get('oauth_token'), verifier=request.data.get('oauth_verifier')) r = requests.post(access_token_url, auth=auth)", "rest_framework_jwt.authentication import JSONWebTokenAuthentication from rest_framework_jwt.settings import api_settings from rest_framework_jwt.utils import", "account in \"\"\" user = None try: sa = SocialAccountLink.objects.get(social_id=uid)", "Response from rest_framework.views import APIView from rest_framework_jwt.authentication import JSONWebTokenAuthentication from", "headers=headers) profile = json.loads(r.text) uid = profile.get('sub') user = _temp_reverse_user(uid,", "access_token = \"\" access_token_secret = \"\" if request.data.get('oauth_token') and request.data.get('oauth_verifier'):", "requests.post(access_token_url, auth=auth) profile = dict(parse_qsl(r.text)) payload = json.dumps(profile) uid =", "'google', access_token, access_token_secret, r.text) if uid is not None and", "import api_settings from rest_framework_jwt.utils import jwt_payload_handler, jwt_encode_handler from dss import", "APIView from rest_framework_jwt.authentication import JSONWebTokenAuthentication from rest_framework_jwt.settings import api_settings from", "= access_token_secret sa.provider_data = payload sa.save() user = UserProfile.objects.get(id=sa.user.id) except", "return self.get(request) def get(self, request): if request.user.is_authenticated(): return Response( status=status.HTTP_200_OK,", "r = requests.get(graph_api_url, params=token) profile = json.loads(r.text) access_token = token.get('access_token')", "'twitter', access_token, access_token_secret, payload) else: oauth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, callback_uri=settings.TWITTER_CALLBACK_URL)", "user if user else None class SocialLoginHandler(APIView): \"\"\"View to authenticate", "= None if backend in ['twitter']: request_token_url = 'https://api.twitter.com/oauth/request_token' access_token_url", "access_token_secret sa.provider_data = payload sa.save() user = UserProfile.objects.get(id=sa.user.id) except SocialAccountLink.DoesNotExist:", "from allauth.socialaccount import models as aamodels from requests_oauthlib import OAuth1", "= access_token sa.access_token_secret = access_token_secret sa.provider_data = payload sa.save() except", "1. Exchange authorization code for access token. r = requests.get(access_token_url,", "profile = json.loads(r.text) access_token = token.get('access_token') uid = profile.get('id') user", "= Token def post(self, request): return self.get(request) def get(self, request):", "from rest_framework.authtoken.serializers import AuthTokenSerializer from rest_framework.permissions import AllowAny from rest_framework.response", "renderers from rest_framework import status from rest_framework.authtoken.models import Token from", "= \"\" params = { 'client_id': request.data.get('clientId'), 'redirect_uri': request.data.get('redirectUri'), 'client_secret':", "\"\" if request.data.get('oauth_token') and request.data.get('oauth_verifier'): auth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, resource_owner_key=request.data.get('oauth_token'),", "find user account and deprecate psa 1. Look for account", "if api_settings.JWT_ALLOW_REFRESH: payload['orig_iat'] = timegm( datetime.datetime.utcnow().utctimetuple() ) response_data = {", "import requests from allauth.socialaccount import models as aamodels from requests_oauthlib", "user = UserProfile.objects.get(id=sa.user.id) except SocialAccountLink.DoesNotExist: # try allauth try: aa", "UserProfile.objects.get(id=sa.user.id) except SocialAccountLink.DoesNotExist: # try allauth try: aa = aamodels.SocialAccount.objects.get(uid=uid)", "resource_owner_key=request.data.get('oauth_token'), verifier=request.data.get('oauth_verifier')) r = requests.post(access_token_url, auth=auth) profile = dict(parse_qsl(r.text)) payload", "not None and user is not None: if not user.user.is_active:", "SocialAccountLink.objects.get(social_id=uid) sa.type = provider sa.social_id = uid sa.access_token = access_token", "None backend = request.query_params.get('backend') user = None if backend in", "payload): \"\"\" Do some magic here to find user account", "= { 'token': jwt_encode_handler(payload), 'session': user.get_session_id() } return Response(response_data) return", "requests.get(people_api_url, headers=headers) profile = json.loads(r.text) uid = profile.get('sub') user =", "access_token_secret, r.text) if uid is not None and user is", "here to find user account and deprecate psa 1. Look", "access_token_url = 'https://accounts.google.com/o/oauth2/token' people_api_url = 'https://www.googleapis.com/plus/v1/people/me/openIdConnect' access_token = \"\" access_token_secret", "= json.loads(r.text) uid = profile.get('sub') user = _temp_reverse_user(uid, 'google', access_token,", "class SocialLoginHandler(APIView): \"\"\"View to authenticate users through social media.\"\"\" permission_classes", "Exchange authorization code for access token. r = requests.get(access_token_url, params=params)", "models as aamodels from requests_oauthlib import OAuth1 from rest_framework import", "from rest_framework import status from rest_framework.authtoken.models import Token from rest_framework.authtoken.serializers", "and user is not None: if not user.user.is_active: return Response({", "= 'https://api.twitter.com/oauth/access_token' access_token = \"\" access_token_secret = \"\" if request.data.get('oauth_token')", "def get(self, request): if request.user.is_authenticated(): return Response( status=status.HTTP_200_OK, data={ 'id':", "data={ 'id': request.user.id, 'name': request.user.username, 'session': request.user.userprofile.get_session_id(), 'slug': request.user.userprofile.slug, 'session':", "= json.loads(r.text) access_token = token.get('access_token') uid = profile.get('id') user =", "parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,) renderer_classes = (renderers.JSONRenderer,) serializer_class =", "parsers.JSONParser,) renderer_classes = (renderers.JSONRenderer,) serializer_class = AuthTokenSerializer model = Token", "= AuthTokenSerializer model = Token def post(self, request): return self.get(request)", "1. Exchange authorization code for access token. r = requests.post(access_token_url,", "= \"\" payload = dict(client_id=request.data.get('clientId'), redirect_uri=request.data.get('redirectUri'), client_secret=settings.SOCIAL_AUTH_GOOGLE_OAUTH_SECRET, code=request.data.get('code'), grant_type='authorization_code') #", "\"\" access_token_secret = \"\" if request.data.get('oauth_token') and request.data.get('oauth_verifier'): auth =", "payload = json.dumps(profile) uid = profile.get('user_id') access_token = profile.get('oauth_token') access_token_secret", "rest_framework.views import APIView from rest_framework_jwt.authentication import JSONWebTokenAuthentication from rest_framework_jwt.settings import", "access_token_url = 'https://api.twitter.com/oauth/access_token' access_token = \"\" access_token_secret = \"\" if", "= aamodels.SocialAccount.objects.get(uid=uid) try: user = UserProfile.objects.get(user__id=aa.user_id) except UserProfile.DoesNotExist: print('Need to", "and request.data.get('oauth_verifier'): auth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, resource_owner_key=request.data.get('oauth_token'), verifier=request.data.get('oauth_verifier')) r =", "= request.query_params.get('backend') user = None if backend in ['twitter']: request_token_url", "request.data.get('oauth_token') and request.data.get('oauth_verifier'): auth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, resource_owner_key=request.data.get('oauth_token'), verifier=request.data.get('oauth_verifier')) r", "'client_secret': settings.SOCIAL_AUTH_FACEBOOK_SECRET, 'code': request.data.get('code') } # Step 1. Exchange authorization", "= dict(client_id=request.data.get('clientId'), redirect_uri=request.data.get('redirectUri'), client_secret=settings.SOCIAL_AUTH_GOOGLE_OAUTH_SECRET, code=request.data.get('code'), grant_type='authorization_code') # Step 1. Exchange", "None if backend in ['twitter']: request_token_url = 'https://api.twitter.com/oauth/request_token' access_token_url =", "UserProfile.DoesNotExist: print('Need to create UserProfile') # we got an allauth,", "= requests.post(access_token_url, data=payload) token = json.loads(r.text) headers = {'Authorization': 'Bearer", "not None: if not user.user.is_active: return Response({ 'status': 'Unauthorized', 'message':", "spa.models.socialaccountlink import SocialAccountLink def _temp_reverse_user(uid, provider, access_token, access_token_secret, payload): \"\"\"", "access_token_secret, payload) else: oauth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, callback_uri=settings.TWITTER_CALLBACK_URL) r =", "= 'https://www.googleapis.com/plus/v1/people/me/openIdConnect' access_token = \"\" access_token_secret = \"\" payload =", "['twitter']: request_token_url = 'https://api.twitter.com/oauth/request_token' access_token_url = 'https://api.twitter.com/oauth/access_token' access_token = \"\"", "payload = dict(client_id=request.data.get('clientId'), redirect_uri=request.data.get('redirectUri'), client_secret=settings.SOCIAL_AUTH_GOOGLE_OAUTH_SECRET, code=request.data.get('code'), grant_type='authorization_code') # Step 1.", "account disabled' }, status=status.HTTP_401_UNAUTHORIZED) payload = jwt_payload_handler(user.user) if api_settings.JWT_ALLOW_REFRESH: payload['orig_iat']", "access token. r = requests.post(access_token_url, data=payload) token = json.loads(r.text) headers", "is not None and user is not None: if not", "def post(self, request): return self.get(request) def get(self, request): if request.user.is_authenticated():", "\"\" params = { 'client_id': request.data.get('clientId'), 'redirect_uri': request.data.get('redirectUri'), 'client_secret': settings.SOCIAL_AUTH_FACEBOOK_SECRET,", "from rest_framework.response import Response from rest_framework.views import APIView from rest_framework_jwt.authentication", "uid sa.access_token = access_token sa.access_token_secret = access_token_secret sa.provider_data = payload", "to authenticate users through social media.\"\"\" permission_classes = (AllowAny,) def", "r = requests.post(request_token_url, auth=oauth) access_token = dict(parse_qsl(r.text)) return Response(access_token) elif", "print('Need to create UserProfile') # we got an allauth, create", "import parsers, renderers from rest_framework import status from rest_framework.authtoken.models import", "print('Need to create social model') return user if user else", "import datetime import json from calendar import timegm from urllib.parse", "request.data.get('oauth_verifier'): auth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, resource_owner_key=request.data.get('oauth_token'), verifier=request.data.get('oauth_verifier')) r = requests.post(access_token_url,", "uid = profile.get('sub') user = _temp_reverse_user(uid, 'google', access_token, access_token_secret, r.text)", "payload sa.save() except aamodels.SocialAccount.DoesNotExist: print('Need to create social model') return", "from urllib.parse import parse_qsl import requests from allauth.socialaccount import models", "SocialAccountLink.DoesNotExist: # try allauth try: aa = aamodels.SocialAccount.objects.get(uid=uid) try: user", "request.query_params.get('backend') user = None if backend in ['twitter']: request_token_url =", "status=status.HTTP_401_UNAUTHORIZED) payload = jwt_payload_handler(user.user) if api_settings.JWT_ALLOW_REFRESH: payload['orig_iat'] = timegm( datetime.datetime.utcnow().utctimetuple()", "sa.type = aa.provider sa.access_token = access_token sa.access_token_secret = access_token_secret sa.provider_data", "Response(access_token) elif backend in ['facebook']: access_token_url = 'https://graph.facebook.com/v2.3/oauth/access_token' graph_api_url =", "import Token from rest_framework.authtoken.serializers import AuthTokenSerializer from rest_framework.permissions import AllowAny", "sa.provider_data = payload sa.save() user = UserProfile.objects.get(id=sa.user.id) except SocialAccountLink.DoesNotExist: #", "dict(parse_qsl(r.text)) return Response(access_token) elif backend in ['facebook']: access_token_url = 'https://graph.facebook.com/v2.3/oauth/access_token'", "'Authentication could not be performed with received data.' }, status=status.HTTP_400_BAD_REQUEST)", "else: oauth = OAuth1(settings.SOCIAL_AUTH_TWITTER_KEY, client_secret=settings.SOCIAL_AUTH_TWITTER_SECRET, callback_uri=settings.TWITTER_CALLBACK_URL) r = requests.post(request_token_url, auth=oauth)", "r.text) if uid is not None and user is not", "= payload sa.save() user = UserProfile.objects.get(id=sa.user.id) except SocialAccountLink.DoesNotExist: # try", "access_token sa.access_token_secret = access_token_secret sa.provider_data = payload sa.save() user =", "'Bearer {0}'.format(token['access_token'])} # Step 2. Retrieve information about the current", "graph_api_url = 'https://graph.facebook.com/v2.3/me' access_token = \"\" access_token_secret = \"\" params", "provider sa.social_id = uid sa.access_token = access_token sa.access_token_secret = access_token_secret", "sa.access_token_secret = access_token_secret sa.provider_data = payload sa.save() except aamodels.SocialAccount.DoesNotExist: print('Need", "= access_token_secret sa.provider_data = payload sa.save() except aamodels.SocialAccount.DoesNotExist: print('Need to", "= dict(parse_qsl(r.text)) payload = json.dumps(profile) uid = profile.get('user_id') access_token =", "Response(response_data) return Response({ 'status': 'Bad request', 'message': 'Authentication could not", "grant_type='authorization_code') # Step 1. Exchange authorization code for access token.", "= token.get('access_token') uid = profile.get('id') user = _temp_reverse_user(uid, 'facebook', access_token,", "sa.user = user sa.social_id = aa.uid sa.type = aa.provider sa.access_token", "data.' }, status=status.HTTP_400_BAD_REQUEST) class ObtainUser(APIView): throttle_classes = () permission_classes =", "requests from allauth.socialaccount import models as aamodels from requests_oauthlib import", "else None class SocialLoginHandler(APIView): \"\"\"View to authenticate users through social", "rest_framework.permissions import AllowAny from rest_framework.response import Response from rest_framework.views import", "user = _temp_reverse_user(uid, 'google', access_token, access_token_secret, r.text) if uid is", "return Response(access_token) elif backend in ['facebook']: access_token_url = 'https://graph.facebook.com/v2.3/oauth/access_token' graph_api_url", "allauth try: aa = aamodels.SocialAccount.objects.get(uid=uid) try: user = UserProfile.objects.get(user__id=aa.user_id) except", "and deprecate psa 1. Look for account in \"\"\" user", "'facebook', access_token, access_token_secret, r.text) elif backend in ['google']: access_token_url =", "= requests.get(access_token_url, params=params) token = json.loads(r.text) # Step 2. Retrieve", "aa.provider sa.access_token = access_token sa.access_token_secret = access_token_secret sa.provider_data = payload", "= None backend = request.query_params.get('backend') user = None if backend", "= jwt_payload_handler(user.user) if api_settings.JWT_ALLOW_REFRESH: payload['orig_iat'] = timegm( datetime.datetime.utcnow().utctimetuple() ) response_data", "= (AllowAny,) def post(self, request): uid = None backend =", "uid = profile.get('user_id') access_token = profile.get('oauth_token') access_token_secret = profile.get('oauth_token_secret') user" ]
[ "@property def private(self): return self.anonymous or self._basic_info.get('isPrivate') @property def id(self):", "self._basic_info.get('id', 'Anonymous') @property def name(self): return self._basic_info.get('name') @property def username(self):", "# \"signedUrl\": \"\", # \"url\": \"\", # \"username\": \"disqus_FqhLpDGmTT\" #", "\"isCustom\": false, # \"large\": { # \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar92.png\", # \"permalink\":", "\"small\": { # \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar32.jpg?1433896551\", # \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\" # }", "\"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\", # \"small\": { # \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar32.jpg?1433896551\", # \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\"", "= details.json() if detail_json['code'] != 0: print(f'Problem with getting user", "import aiohttp from constants import API_KEY class User(object): def __init__(self,", "= author_info self._detailed_info = None async def load(self): async with", "\"response\": { # \"about\": \"\", # \"avatar\": { # \"cache\":", "0, # \"response\": { # \"about\": \"\", # \"avatar\": {", "# \"isPrivate\": true, # \"joinedAt\": \"2010-11-20T04:45:33\", # \"location\": \"\", #", "# \"reputation\": 3.5297520000000002, # \"reputationLabel\": \"High\", # \"signedUrl\": \"\", #", "self._detailed_info = detail_json['response'] @property def anonymous(self): return 'id' not in", "with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session: user_info = await session.get( 'https://disqus.com/api/3.0/users/details.json', params={'user':", "# \"profileUrl\": \"https://disqus.com/by/disqus_FqhLpDGmTT/\", # \"rep\": 3.5297520000000002, # \"reputation\": 3.5297520000000002, #", "print(\"WARNING: auto-loading user in async version of code!!!!\") details =", "# \"id\": \"137780765\", # \"isAnonymous\": false, # \"isPowerContributor\": false, #", "constants import API_KEY class User(object): def __init__(self, author_info): # \"author\":", "\"numFollowing\": 0, # \"numForumsFollowing\": 0, # \"numLikesReceived\": 8967, # \"numPosts\":", "} # } print(\"WARNING: auto-loading user in async version of", "https://disqus.com/api/3.0/users/details.json?user=137780765&api_key=<KEY> # { # \"code\": 0, # \"response\": { #", "{ # \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar92.png\", # \"isCustom\": false, # \"large\": {", "user details from user {self.id}') print(detail_json) self._detailed_info = detail_json['response'] @property", "# \"isPrivate\": false, # \"joinedAt\": \"2015-01-02T18:40:14\", # \"location\": \"\", #", "return self._basic_info.get('location') @property def joined_at(self): return self._basic_info.get('joinedAt') @property def profile_url(self):", "def location(self): return self._basic_info.get('location') @property def joined_at(self): return self._basic_info.get('joinedAt') @property", "# } # }, # \"disable3rdPartyTrackers\": false, # \"id\": \"137780765\",", "\"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar32.jpg?1433896551\", # \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\" # } # }, #", "\"signedUrl\": \"\", # \"url\": \"\", # \"username\": \"disqus_FqhLpDGmTT\" # }", "# \"username\": \"felix1999\" # }, self._basic_info = author_info self._detailed_info =", "\"isPrimary\": true, # \"isPrivate\": true, # \"joinedAt\": \"2010-11-20T04:45:33\", # \"location\":", "details from user {self.id}') print(detail_json) self._detailed_info = detail_json['response'] @property def", "# \"large\": { # \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551\", # \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\" #", "= detail_json['response'] def _get_detailed_info(self): # https://disqus.com/api/3.0/users/details.json?user=137780765&api_key=<KEY> # { # \"code\":", "\"\", # \"avatar\": { # \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551\", # \"isCustom\": true,", "profile_url(self): return self._basic_info.get('profileUrl') @property def total_posts(self): if self._detailed_info is None:", "# \"numFollowers\": 2, # \"numFollowing\": 0, # \"numForumsFollowing\": 0, #", "\"isPrimary\": true, # \"isPrivate\": false, # \"joinedAt\": \"2015-01-02T18:40:14\", # \"location\":", "\"https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551\", # \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\" # }, # \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\", #", "\"isPrivate\": false, # \"joinedAt\": \"2015-01-02T18:40:14\", # \"location\": \"\", # \"name\":", "# \"about\": \"\", # \"avatar\": { # \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551\", #", "getting user details from user {self.id}') print(detail_json) self._detailed_info = detail_json['response']", "user {self.id}') print(detail_json) self._detailed_info = detail_json['response'] @property def anonymous(self): return", "def load(self): async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session: user_info = await", "\"felix1999\", # \"profileUrl\": \"https://disqus.com/by/felix1999/\", # \"signedUrl\": \"\", # \"url\": \"\",", "false, # \"isPowerContributor\": false, # \"isPrimary\": true, # \"isPrivate\": true,", "\"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\" # }, # \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\", # \"small\": {", "0, # \"numForumsFollowing\": 0, # \"numLikesReceived\": 8967, # \"numPosts\": 4147,", "version of code!!!!\") details = requests.get( 'https://disqus.com/api/3.0/users/details.json', {'user': self.id, 'api_key':", "# }, # \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\", # \"small\": { # \"cache\":", "\"\", # \"url\": \"\", # \"username\": \"felix1999\" # }, self._basic_info", "'https://disqus.com/api/3.0/users/details.json', params={'user': self.id, 'api_key': API_KEY} ) detail_json = await user_info.json()", "\"username\": \"felix1999\" # }, self._basic_info = author_info self._detailed_info = None", "user {self.id}') print(detail_json) self._detailed_info = detail_json['response'] def _get_detailed_info(self): # https://disqus.com/api/3.0/users/details.json?user=137780765&api_key=<KEY>", "details.json() if detail_json['code'] != 0: print(f'Problem with getting user details", "'Anonymous') @property def name(self): return self._basic_info.get('name') @property def username(self): return", "def user_info_row(self): return [ self.id, self.name, self.username, self.total_posts, self.total_likes, self.location,", "\"\", # \"username\": \"felix1999\" # }, self._basic_info = author_info self._detailed_info", "@property def name(self): return self._basic_info.get('name') @property def username(self): return self._basic_info.get('username')", "\"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\" # } # }, # \"disable3rdPartyTrackers\": false, # \"id\":", "async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session: user_info = await session.get( 'https://disqus.com/api/3.0/users/details.json',", "params={'user': self.id, 'api_key': API_KEY} ) detail_json = await user_info.json() if", "as session: user_info = await session.get( 'https://disqus.com/api/3.0/users/details.json', params={'user': self.id, 'api_key':", "\"large\": { # \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551\", # \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\" # },", "{self.id}') print(detail_json) self._detailed_info = detail_json['response'] def _get_detailed_info(self): # https://disqus.com/api/3.0/users/details.json?user=137780765&api_key=<KEY> #", "true, # \"isPrivate\": true, # \"joinedAt\": \"2010-11-20T04:45:33\", # \"location\": \"\",", "# \"signedUrl\": \"\", # \"url\": \"\", # \"username\": \"felix1999\" #", "\"code\": 0, # \"response\": { # \"about\": \"\", # \"avatar\":", "\"reputation\": 3.5297520000000002, # \"reputationLabel\": \"High\", # \"signedUrl\": \"\", # \"url\":", "# \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\" # }, # \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\", # \"small\":", "\"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\", # \"small\": { # \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar32.jpg?1433896551\", # \"permalink\":", "3.5297520000000002, # \"reputationLabel\": \"High\", # \"signedUrl\": \"\", # \"url\": \"\",", "# } print(\"WARNING: auto-loading user in async version of code!!!!\")", "private(self): return self.anonymous or self._basic_info.get('isPrivate') @property def id(self): if self.private:", "None: self._get_detailed_info() return self._detailed_info.get('numPosts') @property def total_likes(self): if self._detailed_info is", "await user_info.json() if detail_json['code'] != 0: print(f'Problem with getting user", "true, # \"large\": { # \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551\", # \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\"", "print(detail_json) self._detailed_info = detail_json['response'] def _get_detailed_info(self): # https://disqus.com/api/3.0/users/details.json?user=137780765&api_key=<KEY> # {", "self._basic_info.get('username') @property def location(self): return self._basic_info.get('location') @property def joined_at(self): return", "\"//a.disquscdn.com/1519942534/images/noavatar92.png\", # \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\" # }, # \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\", #", "'api_key': API_KEY} ) detail_json = details.json() if detail_json['code'] != 0:", "# \"small\": { # \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar32.png\", # \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\" #", "{ # \"about\": \"\", # \"avatar\": { # \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar92.png\",", "self._basic_info.get('location') @property def joined_at(self): return self._basic_info.get('joinedAt') @property def profile_url(self): return", "# \"isCustom\": true, # \"large\": { # \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551\", #", "None async def load(self): async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session: user_info", "author_info): # \"author\": { # \"about\": \"\", # \"avatar\": {", "user_info.json() if detail_json['code'] != 0: print(f'Problem with getting user details", "\"username\": \"disqus_FqhLpDGmTT\" # } # } print(\"WARNING: auto-loading user in", "\"\", # \"url\": \"\", # \"username\": \"disqus_FqhLpDGmTT\" # } #", "author_info self._detailed_info = None async def load(self): async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False))", "\"url\": \"\", # \"username\": \"felix1999\" # }, self._basic_info = author_info", "if self._detailed_info is None: self._get_detailed_info() return self._detailed_info.get('numPosts') @property def total_likes(self):", "async def load(self): async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session: user_info =", "# \"rep\": 3.5297520000000002, # \"reputation\": 3.5297520000000002, # \"reputationLabel\": \"High\", #", "# \"isPrimary\": true, # \"isPrivate\": true, # \"joinedAt\": \"2010-11-20T04:45:33\", #", "}, # \"disable3rdPartyTrackers\": false, # \"id\": \"137780765\", # \"isAnonymous\": false,", "user details from user {self.id}') print(detail_json) self._detailed_info = detail_json['response'] def", "in self._basic_info @property def private(self): return self.anonymous or self._basic_info.get('isPrivate') @property", "return self.anonymous or self._basic_info.get('isPrivate') @property def id(self): if self.private: return", "@property def username(self): return self._basic_info.get('username') @property def location(self): return self._basic_info.get('location')", "@property def anonymous(self): return 'id' not in self._basic_info @property def", "# \"isPowerContributor\": false, # \"isPrimary\": true, # \"isPrivate\": false, #", "anonymous(self): return 'id' not in self._basic_info @property def private(self): return", "return self._basic_info.get('profileUrl') @property def total_posts(self): if self._detailed_info is None: self._get_detailed_info()", "from constants import API_KEY class User(object): def __init__(self, author_info): #", "# https://disqus.com/api/3.0/users/details.json?user=137780765&api_key=<KEY> # { # \"code\": 0, # \"response\": {", "self.anonymous or self._basic_info.get('isPrivate') @property def id(self): if self.private: return 'Private'", "\"numForumsFollowing\": 0, # \"numLikesReceived\": 8967, # \"numPosts\": 4147, # \"profileUrl\":", "self._basic_info.get('joinedAt') @property def profile_url(self): return self._basic_info.get('profileUrl') @property def total_posts(self): if", "# \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\" # } # }, # \"disable3rdPartyTrackers\": false,", "{ # \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551\", # \"isCustom\": true, # \"large\": {", "\"https://disqus.com/api/users/avatars/felix1999.jpg\" # }, # \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\", # \"small\": { #", "# \"small\": { # \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar32.jpg?1433896551\", # \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\" #", "id(self): if self.private: return 'Private' return self._basic_info.get('id', 'Anonymous') @property def", "\"https://disqus.com/api/users/avatars/felix1999.jpg\" # } # }, # \"disable3rdPartyTrackers\": false, # \"id\":", "# \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar32.png\", # \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\" # } # },", "# \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar92.png\", # \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\" # }, # \"permalink\":", "\"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\", # \"small\": { # \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar32.png\", # \"permalink\":", "false, # \"id\": \"137780765\", # \"isAnonymous\": false, # \"isPowerContributor\": false,", "self._basic_info.get('isPrivate') @property def id(self): if self.private: return 'Private' return self._basic_info.get('id',", "\"Bob\", # \"numFollowers\": 2, # \"numFollowing\": 0, # \"numForumsFollowing\": 0,", "\"//a.disquscdn.com/1519942534/images/noavatar92.png\", # \"isCustom\": false, # \"large\": { # \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar92.png\",", "false, # \"id\": \"5472588\", # \"isAnonymous\": false, # \"isPowerContributor\": false,", "}, # \"disable3rdPartyTrackers\": false, # \"id\": \"5472588\", # \"isAnonymous\": false,", "3.5297520000000002, # \"reputation\": 3.5297520000000002, # \"reputationLabel\": \"High\", # \"signedUrl\": \"\",", "\"avatar\": { # \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar92.png\", # \"isCustom\": false, # \"large\":", "\"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\" # }, # \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\", # \"small\": {", "__init__(self, author_info): # \"author\": { # \"about\": \"\", # \"avatar\":", "}, self._basic_info = author_info self._detailed_info = None async def load(self):", "\"id\": \"5472588\", # \"isAnonymous\": false, # \"isPowerContributor\": false, # \"isPrimary\":", "\"about\": \"\", # \"avatar\": { # \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar92.png\", # \"isCustom\":", "\"joinedAt\": \"2010-11-20T04:45:33\", # \"location\": \"\", # \"name\": \"felix1999\", # \"profileUrl\":", "\"location\": \"\", # \"name\": \"Bob\", # \"numFollowers\": 2, # \"numFollowing\":", "\"https://disqus.com/by/felix1999/\", # \"signedUrl\": \"\", # \"url\": \"\", # \"username\": \"felix1999\"", "= await session.get( 'https://disqus.com/api/3.0/users/details.json', params={'user': self.id, 'api_key': API_KEY} ) detail_json", "{self.id}') print(detail_json) self._detailed_info = detail_json['response'] @property def anonymous(self): return 'id'", "\"rep\": 3.5297520000000002, # \"reputation\": 3.5297520000000002, # \"reputationLabel\": \"High\", # \"signedUrl\":", "def __init__(self, author_info): # \"author\": { # \"about\": \"\", #", "return 'id' not in self._basic_info @property def private(self): return self.anonymous", "\"large\": { # \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar92.png\", # \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\" # },", "# \"location\": \"\", # \"name\": \"felix1999\", # \"profileUrl\": \"https://disqus.com/by/felix1999/\", #", "# \"numFollowing\": 0, # \"numForumsFollowing\": 0, # \"numLikesReceived\": 8967, #", "@property def profile_url(self): return self._basic_info.get('profileUrl') @property def total_posts(self): if self._detailed_info", "# \"code\": 0, # \"response\": { # \"about\": \"\", #", "\"disable3rdPartyTrackers\": false, # \"id\": \"5472588\", # \"isAnonymous\": false, # \"isPowerContributor\":", "# \"large\": { # \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar92.png\", # \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\" #", "details = requests.get( 'https://disqus.com/api/3.0/users/details.json', {'user': self.id, 'api_key': API_KEY} ) detail_json", "\"\", # \"name\": \"Bob\", # \"numFollowers\": 2, # \"numFollowing\": 0,", "# }, # \"disable3rdPartyTrackers\": false, # \"id\": \"5472588\", # \"isAnonymous\":", "self.id, 'api_key': API_KEY} ) detail_json = details.json() if detail_json['code'] !=", "API_KEY} ) detail_json = details.json() if detail_json['code'] != 0: print(f'Problem", "# \"location\": \"\", # \"name\": \"Bob\", # \"numFollowers\": 2, #", "# \"joinedAt\": \"2010-11-20T04:45:33\", # \"location\": \"\", # \"name\": \"felix1999\", #", "def _get_detailed_info(self): # https://disqus.com/api/3.0/users/details.json?user=137780765&api_key=<KEY> # { # \"code\": 0, #", "{ # \"about\": \"\", # \"avatar\": { # \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551\",", "\"https://c.disquscdn.com/uploads/users/13778/765/avatar32.jpg?1433896551\", # \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\" # } # }, # \"disable3rdPartyTrackers\":", "@property def id(self): if self.private: return 'Private' return self._basic_info.get('id', 'Anonymous')", "user in async version of code!!!!\") details = requests.get( 'https://disqus.com/api/3.0/users/details.json',", "# \"isPowerContributor\": false, # \"isPrimary\": true, # \"isPrivate\": true, #", "print(f'Problem with getting user details from user {self.id}') print(detail_json) self._detailed_info", "= detail_json['response'] @property def anonymous(self): return 'id' not in self._basic_info", "!= 0: print(f'Problem with getting user details from user {self.id}')", "# \"id\": \"5472588\", # \"isAnonymous\": false, # \"isPowerContributor\": false, #", "\"joinedAt\": \"2015-01-02T18:40:14\", # \"location\": \"\", # \"name\": \"Bob\", # \"numFollowers\":", "from user {self.id}') print(detail_json) self._detailed_info = detail_json['response'] def _get_detailed_info(self): #", "\"disqus_FqhLpDGmTT\" # } # } print(\"WARNING: auto-loading user in async", "None: self._get_detailed_info() return self._detailed_info.get('numLikesReceived') @property def user_info_row(self): return [ self.id,", "\"cache\": \"//a.disquscdn.com/1519942534/images/noavatar32.png\", # \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\" # } # }, #", "self._get_detailed_info() return self._detailed_info.get('numPosts') @property def total_likes(self): if self._detailed_info is None:", "self._basic_info = author_info self._detailed_info = None async def load(self): async", "false, # \"isPrimary\": true, # \"isPrivate\": false, # \"joinedAt\": \"2015-01-02T18:40:14\",", "# } # }, # \"disable3rdPartyTrackers\": false, # \"id\": \"5472588\",", "'api_key': API_KEY} ) detail_json = await user_info.json() if detail_json['code'] !=", "aiohttp from constants import API_KEY class User(object): def __init__(self, author_info):", "8967, # \"numPosts\": 4147, # \"profileUrl\": \"https://disqus.com/by/disqus_FqhLpDGmTT/\", # \"rep\": 3.5297520000000002,", "of code!!!!\") details = requests.get( 'https://disqus.com/api/3.0/users/details.json', {'user': self.id, 'api_key': API_KEY}", "\"137780765\", # \"isAnonymous\": false, # \"isPowerContributor\": false, # \"isPrimary\": true,", "\"isPrivate\": true, # \"joinedAt\": \"2010-11-20T04:45:33\", # \"location\": \"\", # \"name\":", "# \"url\": \"\", # \"username\": \"disqus_FqhLpDGmTT\" # } # }", "aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session: user_info = await session.get( 'https://disqus.com/api/3.0/users/details.json', params={'user': self.id,", "# \"about\": \"\", # \"avatar\": { # \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar92.png\", #", "@property def joined_at(self): return self._basic_info.get('joinedAt') @property def profile_url(self): return self._basic_info.get('profileUrl')", "return self._basic_info.get('joinedAt') @property def profile_url(self): return self._basic_info.get('profileUrl') @property def total_posts(self):", "API_KEY class User(object): def __init__(self, author_info): # \"author\": { #", "{ # \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar92.png\", # \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\" # }, #", "# }, # \"disable3rdPartyTrackers\": false, # \"id\": \"137780765\", # \"isAnonymous\":", "await session.get( 'https://disqus.com/api/3.0/users/details.json', params={'user': self.id, 'api_key': API_KEY} ) detail_json =", "detail_json['response'] @property def anonymous(self): return 'id' not in self._basic_info @property", "# \"username\": \"disqus_FqhLpDGmTT\" # } # } print(\"WARNING: auto-loading user", "# } # } print(\"WARNING: auto-loading user in async version", "\"name\": \"Bob\", # \"numFollowers\": 2, # \"numFollowing\": 0, # \"numForumsFollowing\":", "class User(object): def __init__(self, author_info): # \"author\": { # \"about\":", "# \"profileUrl\": \"https://disqus.com/by/felix1999/\", # \"signedUrl\": \"\", # \"url\": \"\", #", "requests import aiohttp from constants import API_KEY class User(object): def", "# \"author\": { # \"about\": \"\", # \"avatar\": { #", "return self._detailed_info.get('numLikesReceived') @property def user_info_row(self): return [ self.id, self.name, self.username,", "{ # \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar32.jpg?1433896551\", # \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\" # } #", "\"numLikesReceived\": 8967, # \"numPosts\": 4147, # \"profileUrl\": \"https://disqus.com/by/disqus_FqhLpDGmTT/\", # \"rep\":", "\"2015-01-02T18:40:14\", # \"location\": \"\", # \"name\": \"Bob\", # \"numFollowers\": 2,", "\"profileUrl\": \"https://disqus.com/by/felix1999/\", # \"signedUrl\": \"\", # \"url\": \"\", # \"username\":", "self._detailed_info.get('numPosts') @property def total_likes(self): if self._detailed_info is None: self._get_detailed_info() return", "# \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar92.png\", # \"isCustom\": false, # \"large\": { #", "\"name\": \"felix1999\", # \"profileUrl\": \"https://disqus.com/by/felix1999/\", # \"signedUrl\": \"\", # \"url\":", "detail_json = await user_info.json() if detail_json['code'] != 0: print(f'Problem with", "def username(self): return self._basic_info.get('username') @property def location(self): return self._basic_info.get('location') @property", "\"isPowerContributor\": false, # \"isPrimary\": true, # \"isPrivate\": true, # \"joinedAt\":", "# \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551\", # \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\" # }, # \"permalink\":", "import requests import aiohttp from constants import API_KEY class User(object):", "# }, self._basic_info = author_info self._detailed_info = None async def", "detail_json = details.json() if detail_json['code'] != 0: print(f'Problem with getting", "\"//a.disquscdn.com/1519942534/images/noavatar32.png\", # \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\" # } # }, # \"disable3rdPartyTrackers\":", "user_info_row(self): return [ self.id, self.name, self.username, self.total_posts, self.total_likes, self.location, self.joined_at,", "_get_detailed_info(self): # https://disqus.com/api/3.0/users/details.json?user=137780765&api_key=<KEY> # { # \"code\": 0, # \"response\":", "requests.get( 'https://disqus.com/api/3.0/users/details.json', {'user': self.id, 'api_key': API_KEY} ) detail_json = details.json()", "\"cache\": \"//a.disquscdn.com/1519942534/images/noavatar92.png\", # \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\" # }, # \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\",", "false, # \"isPrimary\": true, # \"isPrivate\": true, # \"joinedAt\": \"2010-11-20T04:45:33\",", "\"author\": { # \"about\": \"\", # \"avatar\": { # \"cache\":", "# \"isCustom\": false, # \"large\": { # \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar92.png\", #", "\"small\": { # \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar32.png\", # \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\" # }", "\"cache\": \"//a.disquscdn.com/1519942534/images/noavatar92.png\", # \"isCustom\": false, # \"large\": { # \"cache\":", "import API_KEY class User(object): def __init__(self, author_info): # \"author\": {", "= await user_info.json() if detail_json['code'] != 0: print(f'Problem with getting", "total_posts(self): if self._detailed_info is None: self._get_detailed_info() return self._detailed_info.get('numPosts') @property def", "# \"response\": { # \"about\": \"\", # \"avatar\": { #", "not in self._basic_info @property def private(self): return self.anonymous or self._basic_info.get('isPrivate')", "# \"disable3rdPartyTrackers\": false, # \"id\": \"5472588\", # \"isAnonymous\": false, #", "# \"joinedAt\": \"2015-01-02T18:40:14\", # \"location\": \"\", # \"name\": \"Bob\", #", "joined_at(self): return self._basic_info.get('joinedAt') @property def profile_url(self): return self._basic_info.get('profileUrl') @property def", "} # }, # \"disable3rdPartyTrackers\": false, # \"id\": \"5472588\", #", "# \"avatar\": { # \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar92.png\", # \"isCustom\": false, #", "@property def user_info_row(self): return [ self.id, self.name, self.username, self.total_posts, self.total_likes,", "# \"numForumsFollowing\": 0, # \"numLikesReceived\": 8967, # \"numPosts\": 4147, #", "# \"numLikesReceived\": 8967, # \"numPosts\": 4147, # \"profileUrl\": \"https://disqus.com/by/disqus_FqhLpDGmTT/\", #", "\"5472588\", # \"isAnonymous\": false, # \"isPowerContributor\": false, # \"isPrimary\": true,", "\"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\" # }, # \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\", # \"small\": { #", "'Private' return self._basic_info.get('id', 'Anonymous') @property def name(self): return self._basic_info.get('name') @property", "\"felix1999\" # }, self._basic_info = author_info self._detailed_info = None async", "\"https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551\", # \"isCustom\": true, # \"large\": { # \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551\",", "return self._basic_info.get('id', 'Anonymous') @property def name(self): return self._basic_info.get('name') @property def", "@property def location(self): return self._basic_info.get('location') @property def joined_at(self): return self._basic_info.get('joinedAt')", "self._detailed_info is None: self._get_detailed_info() return self._detailed_info.get('numPosts') @property def total_likes(self): if", "\"https://disqus.com/by/disqus_FqhLpDGmTT/\", # \"rep\": 3.5297520000000002, # \"reputation\": 3.5297520000000002, # \"reputationLabel\": \"High\",", "is None: self._get_detailed_info() return self._detailed_info.get('numPosts') @property def total_likes(self): if self._detailed_info", "= None async def load(self): async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session:", "true, # \"isPrivate\": false, # \"joinedAt\": \"2015-01-02T18:40:14\", # \"location\": \"\",", "\"url\": \"\", # \"username\": \"disqus_FqhLpDGmTT\" # } # } print(\"WARNING:", "with getting user details from user {self.id}') print(detail_json) self._detailed_info =", "\"High\", # \"signedUrl\": \"\", # \"url\": \"\", # \"username\": \"disqus_FqhLpDGmTT\"", "[ self.id, self.name, self.username, self.total_posts, self.total_likes, self.location, self.joined_at, self.profile_url ]", "'https://disqus.com/api/3.0/users/details.json', {'user': self.id, 'api_key': API_KEY} ) detail_json = details.json() if", "# \"reputationLabel\": \"High\", # \"signedUrl\": \"\", # \"url\": \"\", #", "User(object): def __init__(self, author_info): # \"author\": { # \"about\": \"\",", "\"location\": \"\", # \"name\": \"felix1999\", # \"profileUrl\": \"https://disqus.com/by/felix1999/\", # \"signedUrl\":", "def id(self): if self.private: return 'Private' return self._basic_info.get('id', 'Anonymous') @property", "load(self): async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session: user_info = await session.get(", "username(self): return self._basic_info.get('username') @property def location(self): return self._basic_info.get('location') @property def", "self.private: return 'Private' return self._basic_info.get('id', 'Anonymous') @property def name(self): return", "\"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\" # } # }, # \"disable3rdPartyTrackers\": false, #", "user_info = await session.get( 'https://disqus.com/api/3.0/users/details.json', params={'user': self.id, 'api_key': API_KEY} )", "async version of code!!!!\") details = requests.get( 'https://disqus.com/api/3.0/users/details.json', {'user': self.id,", "or self._basic_info.get('isPrivate') @property def id(self): if self.private: return 'Private' return", "\"isCustom\": true, # \"large\": { # \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551\", # \"permalink\":", "2, # \"numFollowing\": 0, # \"numForumsFollowing\": 0, # \"numLikesReceived\": 8967,", "# \"isPrimary\": true, # \"isPrivate\": false, # \"joinedAt\": \"2015-01-02T18:40:14\", #", "def total_likes(self): if self._detailed_info is None: self._get_detailed_info() return self._detailed_info.get('numLikesReceived') @property", "\"signedUrl\": \"\", # \"url\": \"\", # \"username\": \"felix1999\" # },", "details from user {self.id}') print(detail_json) self._detailed_info = detail_json['response'] def _get_detailed_info(self):", "if self._detailed_info is None: self._get_detailed_info() return self._detailed_info.get('numLikesReceived') @property def user_info_row(self):", "}, # \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\", # \"small\": { # \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar32.jpg?1433896551\",", "\"reputationLabel\": \"High\", # \"signedUrl\": \"\", # \"url\": \"\", # \"username\":", "return self._basic_info.get('username') @property def location(self): return self._basic_info.get('location') @property def joined_at(self):", "\"https://disqus.com/api/users/avatars/felix1999.jpg\", # \"small\": { # \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar32.png\", # \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\"", "false, # \"large\": { # \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar92.png\", # \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\"", "\"\", # \"username\": \"disqus_FqhLpDGmTT\" # } # } print(\"WARNING: auto-loading", "4147, # \"profileUrl\": \"https://disqus.com/by/disqus_FqhLpDGmTT/\", # \"rep\": 3.5297520000000002, # \"reputation\": 3.5297520000000002,", "total_likes(self): if self._detailed_info is None: self._get_detailed_info() return self._detailed_info.get('numLikesReceived') @property def", "in async version of code!!!!\") details = requests.get( 'https://disqus.com/api/3.0/users/details.json', {'user':", "\"profileUrl\": \"https://disqus.com/by/disqus_FqhLpDGmTT/\", # \"rep\": 3.5297520000000002, # \"reputation\": 3.5297520000000002, # \"reputationLabel\":", "# \"isAnonymous\": false, # \"isPowerContributor\": false, # \"isPrimary\": true, #", "def profile_url(self): return self._basic_info.get('profileUrl') @property def total_posts(self): if self._detailed_info is", "\"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551\", # \"isCustom\": true, # \"large\": { # \"cache\":", "print(detail_json) self._detailed_info = detail_json['response'] @property def anonymous(self): return 'id' not", "name(self): return self._basic_info.get('name') @property def username(self): return self._basic_info.get('username') @property def", "\"numFollowers\": 2, # \"numFollowing\": 0, # \"numForumsFollowing\": 0, # \"numLikesReceived\":", "detail_json['code'] != 0: print(f'Problem with getting user details from user", "true, # \"joinedAt\": \"2010-11-20T04:45:33\", # \"location\": \"\", # \"name\": \"felix1999\",", "@property def total_likes(self): if self._detailed_info is None: self._get_detailed_info() return self._detailed_info.get('numLikesReceived')", "def anonymous(self): return 'id' not in self._basic_info @property def private(self):", "# \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\" # } # }, # \"disable3rdPartyTrackers\": false,", "@property def total_posts(self): if self._detailed_info is None: self._get_detailed_info() return self._detailed_info.get('numPosts')", "API_KEY} ) detail_json = await user_info.json() if detail_json['code'] != 0:", "# \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar32.jpg?1433896551\", # \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\" # } # },", "if detail_json['code'] != 0: print(f'Problem with getting user details from", "0: print(f'Problem with getting user details from user {self.id}') print(detail_json)", "{'user': self.id, 'api_key': API_KEY} ) detail_json = details.json() if detail_json['code']", "# \"url\": \"\", # \"username\": \"felix1999\" # }, self._basic_info =", "from user {self.id}') print(detail_json) self._detailed_info = detail_json['response'] @property def anonymous(self):", "# \"name\": \"felix1999\", # \"profileUrl\": \"https://disqus.com/by/felix1999/\", # \"signedUrl\": \"\", #", "detail_json['response'] def _get_detailed_info(self): # https://disqus.com/api/3.0/users/details.json?user=137780765&api_key=<KEY> # { # \"code\": 0,", "false, # \"isPowerContributor\": false, # \"isPrimary\": true, # \"isPrivate\": false,", "} print(\"WARNING: auto-loading user in async version of code!!!!\") details", "'id' not in self._basic_info @property def private(self): return self.anonymous or", "is None: self._get_detailed_info() return self._detailed_info.get('numLikesReceived') @property def user_info_row(self): return [", "self._detailed_info.get('numLikesReceived') @property def user_info_row(self): return [ self.id, self.name, self.username, self.total_posts,", "def total_posts(self): if self._detailed_info is None: self._get_detailed_info() return self._detailed_info.get('numPosts') @property", "self._detailed_info is None: self._get_detailed_info() return self._detailed_info.get('numLikesReceived') @property def user_info_row(self): return", "\"disable3rdPartyTrackers\": false, # \"id\": \"137780765\", # \"isAnonymous\": false, # \"isPowerContributor\":", "# \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\", # \"small\": { # \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar32.jpg?1433896551\", #", "\"2010-11-20T04:45:33\", # \"location\": \"\", # \"name\": \"felix1999\", # \"profileUrl\": \"https://disqus.com/by/felix1999/\",", "return self._basic_info.get('name') @property def username(self): return self._basic_info.get('username') @property def location(self):", "0, # \"numLikesReceived\": 8967, # \"numPosts\": 4147, # \"profileUrl\": \"https://disqus.com/by/disqus_FqhLpDGmTT/\",", "\"\", # \"avatar\": { # \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar92.png\", # \"isCustom\": false,", "\"numPosts\": 4147, # \"profileUrl\": \"https://disqus.com/by/disqus_FqhLpDGmTT/\", # \"rep\": 3.5297520000000002, # \"reputation\":", "# \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551\", # \"isCustom\": true, # \"large\": { #", "self._get_detailed_info() return self._detailed_info.get('numLikesReceived') @property def user_info_row(self): return [ self.id, self.name,", "def joined_at(self): return self._basic_info.get('joinedAt') @property def profile_url(self): return self._basic_info.get('profileUrl') @property", "# \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\", # \"small\": { # \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar32.png\", #", "\"avatar\": { # \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551\", # \"isCustom\": true, # \"large\":", "self._detailed_info = None async def load(self): async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as", "# \"avatar\": { # \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551\", # \"isCustom\": true, #", "self._basic_info @property def private(self): return self.anonymous or self._basic_info.get('isPrivate') @property def", "# }, # \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\", # \"small\": { # \"cache\":", "\"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551\", # \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\" # }, # \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\",", "if self.private: return 'Private' return self._basic_info.get('id', 'Anonymous') @property def name(self):", "# \"disable3rdPartyTrackers\": false, # \"id\": \"137780765\", # \"isAnonymous\": false, #", "return self._detailed_info.get('numPosts') @property def total_likes(self): if self._detailed_info is None: self._get_detailed_info()", "# \"numPosts\": 4147, # \"profileUrl\": \"https://disqus.com/by/disqus_FqhLpDGmTT/\", # \"rep\": 3.5297520000000002, #", "\"id\": \"137780765\", # \"isAnonymous\": false, # \"isPowerContributor\": false, # \"isPrimary\":", "self._basic_info.get('name') @property def username(self): return self._basic_info.get('username') @property def location(self): return", "\"about\": \"\", # \"avatar\": { # \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551\", # \"isCustom\":", "# \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\" # }, # \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\", # \"small\":", "def name(self): return self._basic_info.get('name') @property def username(self): return self._basic_info.get('username') @property", "\"isAnonymous\": false, # \"isPowerContributor\": false, # \"isPrimary\": true, # \"isPrivate\":", "self._detailed_info = detail_json['response'] def _get_detailed_info(self): # https://disqus.com/api/3.0/users/details.json?user=137780765&api_key=<KEY> # { #", "{ # \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar32.png\", # \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\" # } #", "self._basic_info.get('profileUrl') @property def total_posts(self): if self._detailed_info is None: self._get_detailed_info() return", "return [ self.id, self.name, self.username, self.total_posts, self.total_likes, self.location, self.joined_at, self.profile_url", "false, # \"joinedAt\": \"2015-01-02T18:40:14\", # \"location\": \"\", # \"name\": \"Bob\",", "\"\", # \"name\": \"felix1999\", # \"profileUrl\": \"https://disqus.com/by/felix1999/\", # \"signedUrl\": \"\",", "} # }, # \"disable3rdPartyTrackers\": false, # \"id\": \"137780765\", #", "def private(self): return self.anonymous or self._basic_info.get('isPrivate') @property def id(self): if", "{ # \"code\": 0, # \"response\": { # \"about\": \"\",", "\"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\" # } # }, # \"disable3rdPartyTrackers\": false, #", "# \"name\": \"Bob\", # \"numFollowers\": 2, # \"numFollowing\": 0, #", "return 'Private' return self._basic_info.get('id', 'Anonymous') @property def name(self): return self._basic_info.get('name')", "}, # \"permalink\": \"https://disqus.com/api/users/avatars/felix1999.jpg\", # \"small\": { # \"cache\": \"//a.disquscdn.com/1519942534/images/noavatar32.png\",", "= requests.get( 'https://disqus.com/api/3.0/users/details.json', {'user': self.id, 'api_key': API_KEY} ) detail_json =", ") detail_json = details.json() if detail_json['code'] != 0: print(f'Problem with", "# { # \"code\": 0, # \"response\": { # \"about\":", "self.id, 'api_key': API_KEY} ) detail_json = await user_info.json() if detail_json['code']", ") detail_json = await user_info.json() if detail_json['code'] != 0: print(f'Problem", "session.get( 'https://disqus.com/api/3.0/users/details.json', params={'user': self.id, 'api_key': API_KEY} ) detail_json = await", "auto-loading user in async version of code!!!!\") details = requests.get(", "location(self): return self._basic_info.get('location') @property def joined_at(self): return self._basic_info.get('joinedAt') @property def", "code!!!!\") details = requests.get( 'https://disqus.com/api/3.0/users/details.json', {'user': self.id, 'api_key': API_KEY} )", "{ # \"cache\": \"https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551\", # \"permalink\": \"https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg\" # }, #", "session: user_info = await session.get( 'https://disqus.com/api/3.0/users/details.json', params={'user': self.id, 'api_key': API_KEY}", "\"isPowerContributor\": false, # \"isPrimary\": true, # \"isPrivate\": false, # \"joinedAt\":" ]
[ "= sok.objektTypeDef['navn'] print( 'Henter', stat['antall'], 'forekomster av objekttype', sok.objektTypeId, objtypenavn", "= gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) minGdf.to_file( filnavn, layer='vegnett', driver=\"GPKG\")", "feltbokstav: feltbokstav = feltbokstav[0] else: feltbokstav = 'vanlig' if feltbokstav", "'_' + str( ii+1 ) }, inplace=True ) mindf['geometry'] =", "vegreferansesystem https://www.vegvesen.no/_attachment/61505 for mulige verdier, kortversjon: 'vanlig' - Helt vanlig", "sjekkes. Mulige verdier: firefelt (default). Antar at firefeltsveg betyr at", "enten vanlig kj.felt, kollektivfelt eller reversibelt felt (flere varianter kommer", "nvdbapiv3-funksjonene til å gjøre nyttige ting, f.eks. lagre geografiske datasett", "Artsrik vegkant 800, # Fremmede arter 67, # Tunnelløp 846,", "= mittfilter.pop( 'egenskap', None) junk = mittfilter.pop( 'overlapp', None) veg.filter(", "# Trær 15, # Grasdekker 274, # Blomsterbeplanting 511, #", ") def filtrerfeltoversikt( feltoversikt, mittfilter=['vanlig', 'K', 'R' ]): \"\"\" Returnerer", "# from shapely.ops import unary_union import pandas as pd import", "idx, val in enumerate(lowerkolonner) if val in lowerkolonner[:idx]] for ii,", "inplace=True) mindf.drop( 'sluttnode', 1, inplace=True) mindf.drop( 'referanse', 1, inplace=True) mindf.drop(", "lage nytt \"enviroment\", uten at det påvirker hele python-installasjonen din.", "Skredsikring, bremsekjegler 850 # Skredsikring, forbygning ] objliste = []", ") mindf.drop( 'geometri', 1, inplace=True) mindf.drop( 'kontraktsområder', 1, inplace=True) mindf.drop(", "1: mindf = pd.DataFrame( data ) mindf['geometry'] = mindf['geometri'].apply( wkt.loads", "(default). Antar at firefeltsveg betyr at kjørefeltnummer 1-4 er brukt", "med anna filter if not 'vegsystemreferanse' in mittfilter.keys(): mittfilter['vegsystemreferanse'] =", "mindf.drop( 'href', 1, inplace=True) mindf.drop( 'metadata', 1, inplace=True) mindf.drop( 'kortform',", "ARGUMENTS feltoversikt - Liste med feltkoder for et vegsegment. KEYWORDS", "kjørefeltnummer 1-4 er brukt og er enten vanlig kj.felt, kollektivfelt", "mindf['geometry'] = mindf['geometri'].apply( wkt.loads ) minGdf = gpd.GeoDataFrame( mindf, geometry='geometry',", "Using list comprehension + list slicing # https://www.geeksforgeeks.org/python-duplicate-element-indices-in-list/ res =", "eller reversibelt felt (flere varianter kommer når de trengs) RETURNS", "res = [idx for idx, val in enumerate(lowerkolonner) if val", "'Ingen forekomster av', objtypenavn, 'for filter', mittfilter) if vegnett: veg", "{1, 2} ): svar = True return svar else: raise", "med kluss, samt ikke minst: Eventuelt kluss lar seg greit", "# Må trickse litt for å unngå navnekollisjon kolonner =", "sett) Anbefalingen er like fullt å bruke (ana)conda installasjon i", "slags felt vi skal telle med. Sjekk håndbok v830 Nasjonalt", "fagdata vegnett=True : Bool, default=True. Angir om vi skal ta", "mulige verdier, kortversjon: 'vanlig' - Helt vanlig kjørefelt, kjørefeltnumemr er", "- Sykkelfelt 'H' - Svingefelt mot høyre 'V' - Svingefelt", "for bompengeinnkreving RETURNS Liste med kjørefeltnummer hvor kun kjørefelt som", "mindf.columns[dublett] : kolonner[dublett] + '_' + str( ii+1 ) },", "mindf['geometry'] = mindf['geometri'].apply( wkt.loads ) mindf.drop( 'geometri', 1, inplace=True) mindf.drop(", "1, inplace=True) minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) minGdf.to_file(", "'_' + nvdbapiv3.esriSikkerTekst( objtypenavn.lower() ) rec = sok.to_records( vegsegmenter=vegsegmenter, geometri=geometri", "filter brukes på både vegnett og fagdata vegnett=True : Bool,", "in enumerate(lowerkolonner) if val in lowerkolonner[:idx]] for ii, dublett in", "print( 'Henter', stat['antall'], 'forekomster av objekttype', sok.objektTypeId, objtypenavn ) lagnavn", "vegsegmenter=vegsegmenter, geometri=geometri ) if len( rec ) > 0: mindf", "'VT,VTKB', 'adskiltelop' : 'med,nei' } ) data = [] vegsegment", "for felt in feltoversikt: feltbokstav = re.findall( '[A-Za-z]', felt) if", "firefeltrapport( mittfilter={}): \"\"\" Finner alle firefeltsveger i Norge, evt innafor", "800, # Fremmede arter 67, # Tunnelløp 846, # Skredsikring,", "høyre 'V' - Svingefelt mot venstre 'B' - Ekstra felt", "\"\"\" Dumper et har (hardkodede) kontraktsområder \"\"\" if not komr:", "ting som må installeres separat. Noen av disse bibliotekene kunne", "filnavn, layer=lagnavn, driver=\"GPKG\") else: print( 'Ingen forekomster av', objtypenavn, 'for", "= True # Siste klausul her har f.eks. forekommet på", "feltbokstav = feltbokstav[0] else: feltbokstav = 'vanlig' if feltbokstav in", "angitt som heltall uten noen bokstaver. 'K' - kollektivfelt 'R'", "= vegsegment['vegsystemreferanse']['kortform'] vegsegment['vegnr'] = vegsegment['vref'].split()[0] vegsegment['vegkategori'] = vegsegment['vref'][0] vegsegment['adskilte løp']", "windows. Slikt plunder hører historien til (stort sett) Anbefalingen er", "om vi skal hente geometri fra egengeometri (hvis det finnes)", "objtypenavn.lower() ) rec = sok.to_records( vegsegmenter=vegsegmenter, geometri=geometri ) if len(", ") }, inplace=True ) mindf['geometry'] = mindf['geometri'].apply( wkt.loads ) minGdf", "pd.DataFrame( rec) mindf['geometry'] = mindf['geometri'].apply( wkt.loads ) mindf.drop( 'geometri', 1,", "på kun fase = V (eksistende veg), såfremt det ikke", "såfremt det ikke kommer i konflikt med anna filter if", "[ '9302 Haugesund 2020-2025', '9304 Bergen', '9305 Sunnfjord' ] komr", "vegsegment['feltoversikt'] = ','.join( vegsegment['feltoversikt'] ) vegsegment['geometri'] = vegsegment['geometri']['wkt'] vegsegment['vref'] =", "inplace=True) minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) return minGdf", "gpd from datetime import datetime import nvdbapiv3 from apiforbindelse import", "Agder elektro og veglys 2021-2024'] objliste = [ 540, #", "Liste med feltkoder for et vegsegment. KEYWORDS mittfilter=['vanlig', 'K', 'R'", "'Henter vegnett') rec = veg.to_records() mindf = pd.DataFrame( rec) mindf['geometry']", "if vr in vegsegment.keys(): if sr in vegsegment[vr] and 'adskilte_løp'", "enkontrakt ) nvdb2gpkg( objliste, filnavn=filnavn, mittfilter={'kontraktsomrade' : enkontrakt }) def", "en bit av vegnettet hentet fra https://nvdbapiles-v3.atlas.vegvesen.no/vegnett/veglenkesekvenser/segmentert/ KEYWORDS: felttype -", "sjekkfelt( vegsegment, felttype='firefelt'): vegsegment['feltoversikt'] = ','.join( vegsegment['feltoversikt'] ) vegsegment['geometri'] =", "objekttyper: Liste med objekttyper du vil lagre KEYWORDS mittfilter=None :", "x.lower() for x in kolonner ] # Duplicate element indices", "for enObjTypeId in objekttyper: enObjTypeId = int( enObjTypeId ) sok", "sikrer minimalt med kluss, samt ikke minst: Eventuelt kluss lar", "liste med kjørefeltnummer filtrert på hva slags feltkode vi evt", "846, # Skredsikring, bremsekjegler 850 # Skredsikring, forbygning ] objliste", "3, 4}): svar = True # Siste klausul her har", "+ felttype + 'er ikke implementert (ennå)' ) def filtrerfeltoversikt(", "wkt.loads ) mindf.drop( 'geometri', 1, inplace=True) mindf.drop( 'kontraktsområder', 1, inplace=True)", "mindf.drop( 'startnode', 1, inplace=True) mindf.drop( 'sluttnode', 1, inplace=True) mindf.drop( 'referanse',", "default=True. Angir om vi skal ta med data om vegnett", "in mittfilter.keys(): mittfilter['vegsystemreferanse'] = 'Ev,Rv,Fv,Kv,Sv,Pv' if not 'kryssystem' in mittfilter.keys():", "1-4 er brukt og er enten vanlig kj.felt, kollektivfelt eller", "str(enObjTypeId) + '_' + nvdbapiv3.esriSikkerTekst( objtypenavn.lower() ) rec = sok.to_records(", "for idx, val in enumerate(lowerkolonner) if val in lowerkolonner[:idx]] for", "felt av type: ' + felttype + 'er ikke implementert", "veg = nvdbapiv3.nvdbVegnett() if mittfilter: junk = mittfilter.pop( 'egenskap', None)", "= 'vegsystemreferanse' sr = 'strekning' if felttype == 'firefelt': if", "eller ikke vegsegmenter=False : Bool, default=False. Angir om vi skal", "nvdbapiv3.esriSikkerTekst( objtypenavn.lower() ) rec = sok.to_records( vegsegmenter=vegsegmenter, geometri=geometri ) if", "kodehygiene og sikrer minimalt med kluss, samt ikke minst: Eventuelt", "geometry='geometry', crs=5973 ) minGdf.to_file( filnavn, layer='vegnett', driver=\"GPKG\") def dumpkontraktsomr( komr", "Duplicate element indices in list # Using list comprehension +", "list # Using list comprehension + list slicing # https://www.geeksforgeeks.org/python-duplicate-element-indices-in-list/", "felttype='firefelt'): vegsegment['feltoversikt'] = ','.join( vegsegment['feltoversikt'] ) vegsegment['geometri'] = vegsegment['geometri']['wkt'] vegsegment['vref']", "= nvdbapiv3.nvdbFagdata( enObjTypeId ) if mittfilter: sok.filter( mittfilter ) stat", ") lagnavn = 'type' + str(enObjTypeId) + '_' + nvdbapiv3.esriSikkerTekst(", "mittfilter: Dictionary med søkefilter RETURNS geodataframe med resultatet \"\"\" v", ") if len( rec ) > 0: mindf = pd.DataFrame(", "filter til søkeobjekt i nvdbapiv3.py, for eksempel { 'kommune' :", ": enkontrakt }) def firefeltrapport( mittfilter={}): \"\"\" Finner alle firefeltsveger", "filtrerfeltoversikt( feltoversikt, mittfilter=['vanlig', 'K', 'R' ]): \"\"\" Returnerer liste med", "and not kjfelt.issuperset( {1, 2} ): svar = True return", "av og til være plundrete å installere, evt ha versjonskonflikter", ") sok = nvdbapiv3.nvdbFagdata( enObjTypeId ) if mittfilter: sok.filter( mittfilter", "vegsegment ) vegsegment = v.nesteForekomst() if len( data ) >", "105, # Fartsgrense 810, # Vinterdriftsklasse 482, # trafikkregistreringsstasjon 153,", "and 'adskilte_løp' in vegsegment[vr][sr]: if vegsegment[vr][sr]['adskilte_løp'] == 'Nei' and kjfelt.issuperset(", "Liste med koder for hva slags felt vi skal telle", "vegsegment['geometri'] = vegsegment['geometri']['wkt'] vegsegment['vref'] = vegsegment['vegsystemreferanse']['kortform'] vegsegment['vegnr'] = vegsegment['vref'].split()[0] vegsegment['vegkategori']", "# Artsrik vegkant 800, # Fremmede arter 67, # Tunnelløp", "'K', 'R' ]): \"\"\" Returnerer liste med kjørefeltnummer filtrert på", "- dicionary med data om en bit av vegnettet hentet", "del andre ting som må installeres separat. Noen av disse", "data.append( vegsegment ) vegsegment = v.nesteForekomst() if len( data )", "geometry='geometry', crs=5973 ) return minGdf else: return None def sjekkfelt(", "vegsegment['vegnr'] = vegsegment['vref'].split()[0] vegsegment['vegkategori'] = vegsegment['vref'][0] vegsegment['adskilte løp'] = vegsegment['vegsystemreferanse']['strekning']['adskilte_løp']", "print( 'Ingen forekomster av', objtypenavn, 'for filter', mittfilter) if vegnett:", "if val in lowerkolonner[:idx]] for ii, dublett in enumerate( res):", "= re.findall( '[A-Za-z]', felt) if feltbokstav: feltbokstav = feltbokstav[0] else:", "Samme filter brukes på både vegnett og fagdata vegnett=True :", "din. \"\"\" import re import pdb from shapely import wkt", "): objekttyper = [ objekttyper ] for enObjTypeId in objekttyper:", "Disse hjelpefunksjonene forutsetter fungerende installasjon av geopandas, shapely og en", "'startnode', 1, inplace=True) mindf.drop( 'sluttnode', 1, inplace=True) mindf.drop( 'referanse', 1,", "feltnummer = int( re.split( '[A-Z]', felt)[0] ) data.append( feltnummer )", "): \"\"\" Dumper et har (hardkodede) kontraktsområder \"\"\" if not", "inplace=True) minGdf.to_file( filnavn, layer=lagnavn, driver=\"GPKG\") else: print( 'Ingen forekomster av',", "hvis kjørefeltene er av riktig type \"\"\" svar = False", "vegsegment['detaljnivå']: kjfelt = set( filtrerfeltoversikt( vegsegment['feltoversikt'], mittfilter=['vanlig', 'K', 'R']) )", "https://www.geeksforgeeks.org/python-duplicate-element-indices-in-list/ res = [idx for idx, val in enumerate(lowerkolonner) if", "og en del andre ting som må installeres separat. Noen", "minGdf else: return None def sjekkfelt( vegsegment, felttype='firefelt' ): \"\"\"", "v.filter( mittfilter ) # Kun kjørende, og kun øverste topologinivå,", "import re import pdb from shapely import wkt # from", "'kortform', 1, inplace=True) mindf.drop( 'veglenkenummer', 1, inplace=True) mindf.drop( 'segmentnummer', 1,", "# Tunnelløp 846, # Skredsikring, bremsekjegler 850 # Skredsikring, forbygning", "junk = mittfilter.pop( 'overlapp', None) veg.filter( mittfilter ) print( 'Henter", "data ) > 1: mindf = pd.DataFrame( data ) mindf['geometry']", "Tunnelløp 846, # Skredsikring, bremsekjegler 850 # Skredsikring, forbygning ]", "til (stort sett) Anbefalingen er like fullt å bruke (ana)conda", "varianter kommer når de trengs) RETURNS boolean - True hvis", "bompengeinnkreving RETURNS Liste med kjørefeltnummer hvor kun kjørefelt som angitt", "- Svingefelt mot høyre 'V' - Svingefelt mot venstre 'B'", "til være plundrete å installere, evt ha versjonskonflikter seg i", "telle med. Sjekk håndbok v830 Nasjonalt vegreferansesystem https://www.vegvesen.no/_attachment/61505 for mulige", "inplace=True) mindf.drop( 'referanse', 1, inplace=True) mindf.drop( 'målemetode', 1, inplace=True) mindf.drop(", "default=False. Angir om vi skal repetere objektet delt inn etter", "\"\"\" if not komr: komr = [ '9302 Haugesund 2020-2025',", "mittfilter=['vanlig', 'K', 'R' ]): \"\"\" Returnerer liste med kjørefeltnummer filtrert", "å gjøre nyttige ting, f.eks. lagre geografiske datasett Disse hjelpefunksjonene", "ARGUMENTS objekttyper: Liste med objekttyper du vil lagre KEYWORDS mittfilter=None", "in objekttyper: enObjTypeId = int( enObjTypeId ) sok = nvdbapiv3.nvdbFagdata(", "vegsegment.keys() and 'Vegtrase' in vegsegment['detaljnivå']: kjfelt = set( filtrerfeltoversikt( vegsegment['feltoversikt'],", "def filtrerfeltoversikt( feltoversikt, mittfilter=['vanlig', 'K', 'R' ]): \"\"\" Returnerer liste", "nvdbapiv3.nvdbFagdata( enObjTypeId ) if mittfilter: sok.filter( mittfilter ) stat =", "inkludert \"\"\" data = [ ] for felt in feltoversikt:", "'9302 Haugesund 2020-2025', '9304 Bergen', '9305 Sunnfjord' ] komr =", "2020-2025', '9304 Bergen', '9305 Sunnfjord' ] komr = [ '9253", "None) junk = mittfilter.pop( 'overlapp', None) veg.filter( mittfilter ) print(", "objtypenavn ) lagnavn = 'type' + str(enObjTypeId) + '_' +", "kunne historisk av og til være plundrete å installere, evt", "Rasteplass 48, # Fortau 199, # Trær 15, # Grasdekker", ") vegsegment['geometri'] = vegsegment['geometri']['wkt'] vegsegment['vref'] = vegsegment['vegsystemreferanse']['kortform'] vegsegment['vegnr'] = vegsegment['vref'].split()[0]", "'målemetode', 1, inplace=True) mindf.drop( 'måledato', 1, inplace=True) minGdf = gpd.GeoDataFrame(", "fra https://nvdbapiles-v3.atlas.vegvesen.no/vegnett/veglenkesekvenser/segmentert/ KEYWORDS: felttype - hva slags felttype som skal", "mindf['geometri'].apply( wkt.loads ) mindf.drop( 'geometri', 1, inplace=True) mindf.drop( 'kontraktsområder', 1,", "- Helt vanlig kjørefelt, kjørefeltnumemr er angitt som heltall uten", "'href', 1, inplace=True) mindf.drop( 'metadata', 1, inplace=True) mindf.drop( 'kortform', 1,", "ii+1 ) }, inplace=True ) mindf['geometry'] = mindf['geometri'].apply( wkt.loads )", "filtrerfeltoversikt( vegsegment['feltoversikt'], mittfilter=['vanlig', 'K', 'R']) ) if vr in vegsegment.keys():", "fra egengeometri (hvis det finnes) Hvis du ønsker å presentere", "mindf.drop( 'kortform', 1, inplace=True) mindf.drop( 'veglenkenummer', 1, inplace=True) mindf.drop( 'segmentnummer',", "implementert (ennå)' ) def filtrerfeltoversikt( feltoversikt, mittfilter=['vanlig', 'K', 'R' ]):", "not 'kryssystem' in mittfilter.keys(): mittfilter['kryssystem'] = 'false' if not 'sideanlegg'", "vegnettet hentet fra https://nvdbapiles-v3.atlas.vegvesen.no/vegnett/veglenkesekvenser/segmentert/ KEYWORDS: felttype - hva slags felttype", "adskiltelop=MOT v.filter( { 'trafikantgruppe' : 'K', 'detaljniva' : 'VT,VTKB', 'adskiltelop'", "vegsegment - dicionary med data om en bit av vegnettet", "felttype='firefelt' ): \"\"\" Sjekker hva slags felt som finnes på", "er enten vanlig kj.felt, kollektivfelt eller reversibelt felt (flere varianter", "Angir om vi skal ta med data om vegnett eller", "resultatet \"\"\" v = nvdbapiv3.nvdbVegnett() # Legger til filter på", ", # Naturområde (ingen treff i Haugesund kontrakt) 517, #", "= vegsegment['vref'][0] vegsegment['adskilte løp'] = vegsegment['vegsystemreferanse']['strekning']['adskilte_løp'] data.append( vegsegment ) vegsegment", ") mindf.drop( 'geometri', 1, inplace=True) minGdf = gpd.GeoDataFrame( mindf, geometry='geometry',", "inn etter vegsegementer geometri=True : Bool, default=True. Angir om vi", "vegsegment['vref'][0] vegsegment['adskilte løp'] = vegsegment['vegsystemreferanse']['strekning']['adskilte_løp'] data.append( vegsegment ) vegsegment =", "feltoversikt: feltbokstav = re.findall( '[A-Za-z]', felt) if feltbokstav: feltbokstav =", "innafor angitt søkekriterie Bruker søkeobjektet nvdbapiv3.nvdbVegnett fra biblioteket https://github.com/LtGlahn/nvdbapi-V3 ARGUMENTS", "hentet fra https://nvdbapiles-v3.atlas.vegvesen.no/vegnett/veglenkesekvenser/segmentert/ KEYWORDS: felttype - hva slags felttype som", "objektets stedfesting langs veg så bruker du kombinasjonen vegsegmenter=True, geometri=False", "[] for enkontrakt in komr: filnavn = nvdbapiv3.esriSikkerTekst( enkontrakt )", "= ','.join( vegsegment['feltoversikt'] ) vegsegment['geometri'] = vegsegment['geometri']['wkt'] vegsegment['vref'] = vegsegment['vegsystemreferanse']['kortform']", "reversibelt felt 'S' - Sykkelfelt 'H' - Svingefelt mot høyre", "vegsegment[vr] and 'adskilte_løp' in vegsegment[vr][sr]: if vegsegment[vr][sr]['adskilte_løp'] == 'Nei' and", "== 'Nei' and kjfelt.issuperset( { 1, 2, 3, 4}): svar", ") minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) # må", "vegnett: veg = nvdbapiv3.nvdbVegnett() if mittfilter: junk = mittfilter.pop( 'egenskap',", "Bergen', '9305 Sunnfjord' ] komr = [ '9253 Agder elektro", "mindf.drop( 'kontraktsområder', 1, inplace=True) mindf.drop( 'riksvegruter', 1, inplace=True) mindf.drop( 'href',", "5001 } Samme filter brukes på både vegnett og fagdata", "NotImplementedError('Sjekkfelt: Sjekk for felt av type: ' + felttype +", "\"\"\" En samling hjelpefunksjoner som bruker nvdbapiv3-funksjonene til å gjøre", "hva slags feltkode vi evt har ARGUMENTS feltoversikt - Liste", "så bruker du kombinasjonen vegsegmenter=True, geometri=False RETURNS None \"\"\" if", "du vil lagre KEYWORDS mittfilter=None : Dictionary med filter til", "'Med' and len( kjfelt ) >= 2 and not kjfelt.issuperset(", "skal sjekkes. Mulige verdier: firefelt (default). Antar at firefeltsveg betyr", "med filter til søkeobjekt i nvdbapiv3.py, for eksempel { 'kommune'", "ut fra objektets stedfesting langs veg så bruker du kombinasjonen", "import pandas as pd import geopandas as gpd from datetime", "mot venstre 'B' - Ekstra felt for bompengeinnkreving RETURNS Liste", "vegsegment[vr][sr]: if vegsegment[vr][sr]['adskilte_løp'] == 'Nei' and kjfelt.issuperset( { 1, 2,", "kolonner[dublett] + '_' + str( ii+1 ) }, inplace=True )", ") vegsegment = v.nesteForekomst() if len( data ) > 1:", "2} ): svar = True return svar else: raise NotImplementedError('Sjekkfelt:", "+ 'er ikke implementert (ennå)' ) def filtrerfeltoversikt( feltoversikt, mittfilter=['vanlig',", "å presentere vegobjekt ut fra objektets stedfesting langs veg så", "til geopackage ARGUMENTS objekttyper: Liste med objekttyper du vil lagre", "import pdb from shapely import wkt # from shapely.ops import", "for ii, dublett in enumerate( res): mindf.rename(columns={ mindf.columns[dublett] : kolonner[dublett]", "kluss, samt ikke minst: Eventuelt kluss lar seg greit reparere", "'detaljnivå' in vegsegment.keys() and 'Vegtrase' in vegsegment['detaljnivå']: kjfelt = set(", "Kun kjørende, og kun øverste topologinivå, og ikke adskiltelop=MOT v.filter(", "forekomster av', objtypenavn, 'for filter', mittfilter) if vegnett: veg =", "Trafikkmengde 105, # Fartsgrense 810, # Vinterdriftsklasse 482, # trafikkregistreringsstasjon", "+ '.gpkg' if not isinstance(objekttyper, list ): objekttyper = [", "\"\"\" if not '.gpkg' in filnavn: filnavn = filnavn +", "av riktig type \"\"\" svar = False vr = 'vegsystemreferanse'", "kontraktsområder \"\"\" if not komr: komr = [ '9302 Haugesund", "'referanse', 1, inplace=True) mindf.drop( 'målemetode', 1, inplace=True) mindf.drop( 'måledato', 1,", "objekttyper du vil lagre KEYWORDS mittfilter=None : Dictionary med filter", "filnavn: filnavn = filnavn + datetime.today().strftime('%Y-%m-%d') + '.gpkg' if not", "mittfilter=['vanlig', 'K', 'R' ] - Liste med koder for hva", "lagnavn = 'type' + str(enObjTypeId) + '_' + nvdbapiv3.esriSikkerTekst( objtypenavn.lower()", ": 'VT,VTKB', 'adskiltelop' : 'med,nei' } ) data = []", "'geometri', 1, inplace=True) mindf.drop( 'kontraktsområder', 1, inplace=True) mindf.drop( 'riksvegruter', 1,", "mellom, spesielt på windows. Slikt plunder hører historien til (stort", "er like fullt å bruke (ana)conda installasjon i et eget", "å unngå navnekollisjon kolonner = list( mindf.columns ) lowerkolonner =", "objektet delt inn etter vegsegementer geometri=True : Bool, default=True. Angir", ") # må droppe kolonne vegsegmenter hvis du har vegsegmenter=False", "mittfilter.pop( 'egenskap', None) junk = mittfilter.pop( 'overlapp', None) veg.filter( mittfilter", "] objliste = [] for enkontrakt in komr: filnavn =", "15, # Grasdekker 274, # Blomsterbeplanting 511, # Busker 300", "Sjekk håndbok v830 Nasjonalt vegreferansesystem https://www.vegvesen.no/_attachment/61505 for mulige verdier, kortversjon:", "in feltoversikt: feltbokstav = re.findall( '[A-Za-z]', felt) if feltbokstav: feltbokstav", "data = [ ] for felt in feltoversikt: feltbokstav =", "(hvis det finnes) Hvis du ønsker å presentere vegobjekt ut", "vegsegment.keys() and 'detaljnivå' in vegsegment.keys() and 'Vegtrase' in vegsegment['detaljnivå']: kjfelt", "Fortau 199, # Trær 15, # Grasdekker 274, # Blomsterbeplanting", "in mittfilter.keys(): mittfilter['sideanlegg'] = 'false' v.filter( mittfilter ) # Kun", "har (hardkodede) kontraktsområder \"\"\" if not komr: komr = [", "kolonner = list( mindf.columns ) lowerkolonner = [ x.lower() for", "RETURNS geodataframe med resultatet \"\"\" v = nvdbapiv3.nvdbVegnett() # Legger", "None \"\"\" if not '.gpkg' in filnavn: filnavn = filnavn", "kluss lar seg greit reparere ved å lage nytt \"enviroment\",", "inplace=True) minGdf.drop( 'geometri', 1, inplace=True) minGdf.to_file( filnavn, layer=lagnavn, driver=\"GPKG\") else:", "kjørefeltnumemr er angitt som heltall uten noen bokstaver. 'K' -", "from shapely.ops import unary_union import pandas as pd import geopandas", "vi skal hente geometri fra egengeometri (hvis det finnes) Hvis", "mittfilter.pop( 'overlapp', None) veg.filter( mittfilter ) print( 'Henter vegnett') rec", "slags felt som finnes på et vegsegment ARGUMENTS: vegsegment -", "pandas as pd import geopandas as gpd from datetime import", "lowerkolonner[:idx]] for ii, dublett in enumerate( res): mindf.rename(columns={ mindf.columns[dublett] :", "Fartsgrense 810, # Vinterdriftsklasse 482, # trafikkregistreringsstasjon 153, # Værstasjon", "med søkefilter RETURNS geodataframe med resultatet \"\"\" v = nvdbapiv3.nvdbVegnett()", "# Rasteplass 48, # Fortau 199, # Trær 15, #", "} ) data = [] vegsegment = v.nesteForekomst() while vegsegment:", "dumpkontraktsomr( komr = [] ): \"\"\" Dumper et har (hardkodede)", "gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) return minGdf else: return None", "vegsegment, felttype='firefelt'): vegsegment['feltoversikt'] = ','.join( vegsegment['feltoversikt'] ) vegsegment['geometri'] = vegsegment['geometri']['wkt']", "True hvis kjørefeltene er av riktig type \"\"\" svar =", "'R' - reversibelt felt 'S' - Sykkelfelt 'H' - Svingefelt", "1, inplace=True) mindf.drop( 'målemetode', 1, inplace=True) mindf.drop( 'måledato', 1, inplace=True)", "# trafikkregistreringsstasjon 153, # Værstasjon 64, # Ferjeleie 39, #", "[ '9253 Agder elektro og veglys 2021-2024'] objliste = [", "samling hjelpefunksjoner som bruker nvdbapiv3-funksjonene til å gjøre nyttige ting,", "i konflikt med anna filter if not 'vegsystemreferanse' in mittfilter.keys():", "'veglenkenummer', 1, inplace=True) mindf.drop( 'segmentnummer', 1, inplace=True) mindf.drop( 'startnode', 1,", "minst: Eventuelt kluss lar seg greit reparere ved å lage", "Bool, default=False. Angir om vi skal repetere objektet delt inn", "minGdf.drop( 'vegsegmenter', 1, inplace=True) minGdf.drop( 'geometri', 1, inplace=True) minGdf.to_file( filnavn,", "1, inplace=True) minGdf.to_file( filnavn, layer=lagnavn, driver=\"GPKG\") else: print( 'Ingen forekomster", ") print( 'Henter vegnett') rec = veg.to_records() mindf = pd.DataFrame(", "> 0: mindf = pd.DataFrame( rec ) # Må trickse", "vegnett') rec = veg.to_records() mindf = pd.DataFrame( rec) mindf['geometry'] =", "inplace=True) mindf.drop( 'riksvegruter', 1, inplace=True) mindf.drop( 'href', 1, inplace=True) mindf.drop(", "kjfelt ) >= 2 and not kjfelt.issuperset( {1, 2} ):", "kjfelt.issuperset( {1, 2} ): svar = True return svar else:", "felttype + 'er ikke implementert (ennå)' ) def filtrerfeltoversikt( feltoversikt,", "hva slags felt som finnes på et vegsegment ARGUMENTS: vegsegment", "f.eks. lagre geografiske datasett Disse hjelpefunksjonene forutsetter fungerende installasjon av", "i Norge, evt innafor angitt søkekriterie Bruker søkeobjektet nvdbapiv3.nvdbVegnett fra", "{ 'trafikantgruppe' : 'K', 'detaljniva' : 'VT,VTKB', 'adskiltelop' : 'med,nei'", "historisk av og til være plundrete å installere, evt ha", "Værstasjon 64, # Ferjeleie 39, # Rasteplass 48, # Fortau", "kontrakt) 517, # Artsrik vegkant 800, # Fremmede arter 67,", "for å unngå navnekollisjon kolonner = list( mindf.columns ) lowerkolonner", "spesielt på windows. Slikt plunder hører historien til (stort sett)", "installeres separat. Noen av disse bibliotekene kunne historisk av og", "mindf['geometri'].apply( wkt.loads ) mindf.drop( 'geometri', 1, inplace=True) minGdf = gpd.GeoDataFrame(", "as pd import geopandas as gpd from datetime import datetime", "# Legger til filter på kun fase = V (eksistende", "'false' v.filter( mittfilter ) # Kun kjørende, og kun øverste", "enkontrakt in komr: filnavn = nvdbapiv3.esriSikkerTekst( enkontrakt ) nvdb2gpkg( objliste,", "plundrete å installere, evt ha versjonskonflikter seg i mellom, spesielt", "inplace=True) mindf.drop( 'målemetode', 1, inplace=True) mindf.drop( 'måledato', 1, inplace=True) minGdf", "- True hvis kjørefeltene er av riktig type \"\"\" svar", "på både vegnett og fagdata vegnett=True : Bool, default=True. Angir", "mindf['geometry'] = mindf['geometri'].apply( wkt.loads ) mindf.drop( 'geometri', 1, inplace=True) minGdf", "filter på kun fase = V (eksistende veg), såfremt det", "vi skal telle med. Sjekk håndbok v830 Nasjonalt vegreferansesystem https://www.vegvesen.no/_attachment/61505", "# Fartsgrense 810, # Vinterdriftsklasse 482, # trafikkregistreringsstasjon 153, #", "Dette er god kodehygiene og sikrer minimalt med kluss, samt", "'kryssystem' in mittfilter.keys(): mittfilter['kryssystem'] = 'false' if not 'sideanlegg' in", "Sjekker hva slags felt som finnes på et vegsegment ARGUMENTS:", "if mittfilter: junk = mittfilter.pop( 'egenskap', None) junk = mittfilter.pop(", "in filnavn: filnavn = filnavn + datetime.today().strftime('%Y-%m-%d') + '.gpkg' if", "= vegsegment['vref'].split()[0] vegsegment['vegkategori'] = vegsegment['vref'][0] vegsegment['adskilte løp'] = vegsegment['vegsystemreferanse']['strekning']['adskilte_løp'] data.append(", ") return minGdf else: return None def sjekkfelt( vegsegment, felttype='firefelt'", "med data om en bit av vegnettet hentet fra https://nvdbapiles-v3.atlas.vegvesen.no/vegnett/veglenkesekvenser/segmentert/", "- Liste med koder for hva slags felt vi skal", "import nvdbapiv3 from apiforbindelse import apiforbindelse def nvdb2gpkg( objekttyper, filnavn='datadump',", "og sikrer minimalt med kluss, samt ikke minst: Eventuelt kluss", "hente geometri fra egengeometri (hvis det finnes) Hvis du ønsker", "geometri fra egengeometri (hvis det finnes) Hvis du ønsker å", "vegsegment, felttype='firefelt' ): \"\"\" Sjekker hva slags felt som finnes", "å lage nytt \"enviroment\", uten at det påvirker hele python-installasjonen", "feltbokstav[0] else: feltbokstav = 'vanlig' if feltbokstav in mittfilter: feltnummer", "] for enObjTypeId in objekttyper: enObjTypeId = int( enObjTypeId )", "= [ objekttyper ] for enObjTypeId in objekttyper: enObjTypeId =", "minGdf.columns: minGdf.drop( 'vegsegmenter', 1, inplace=True) minGdf.drop( 'geometri', 1, inplace=True) minGdf.to_file(", "Hvis du ønsker å presentere vegobjekt ut fra objektets stedfesting", "'feltoversikt' in vegsegment.keys() and 'detaljnivå' in vegsegment.keys() and 'Vegtrase' in", "None KEYWORDS: mittfilter: Dictionary med søkefilter RETURNS geodataframe med resultatet", "# Ferjeleie 39, # Rasteplass 48, # Fortau 199, #", "trickse litt for å unngå navnekollisjon kolonner = list( mindf.columns", "1, inplace=True) minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) return", "[] vegsegment = v.nesteForekomst() while vegsegment: if sjekkfelt( vegsegment, felttype='firefelt'):", "# Using list comprehension + list slicing # https://www.geeksforgeeks.org/python-duplicate-element-indices-in-list/ res", "bruker du kombinasjonen vegsegmenter=True, geometri=False RETURNS None \"\"\" if not", "'R']) ) if vr in vegsegment.keys(): if sr in vegsegment[vr]", "ikke vegsegmenter=False : Bool, default=False. Angir om vi skal repetere", "objliste = [ 540, # Trafikkmengde 105, # Fartsgrense 810,", "unary_union import pandas as pd import geopandas as gpd from", "kjørefelt som angitt med mittfilter-nøkkelord er inkludert \"\"\" data =", "reparere ved å lage nytt \"enviroment\", uten at det påvirker", "om vi skal repetere objektet delt inn etter vegsegementer geometri=True", "+ nvdbapiv3.esriSikkerTekst( objtypenavn.lower() ) rec = sok.to_records( vegsegmenter=vegsegmenter, geometri=geometri )", "1, inplace=True) mindf.drop( 'kontraktsområder', 1, inplace=True) mindf.drop( 'riksvegruter', 1, inplace=True)", "= v.nesteForekomst() while vegsegment: if sjekkfelt( vegsegment, felttype='firefelt'): vegsegment['feltoversikt'] =", "med. Sjekk håndbok v830 Nasjonalt vegreferansesystem https://www.vegvesen.no/_attachment/61505 for mulige verdier,", "in vegsegment['detaljnivå']: kjfelt = set( filtrerfeltoversikt( vegsegment['feltoversikt'], mittfilter=['vanlig', 'K', 'R'])", "enObjTypeId ) if mittfilter: sok.filter( mittfilter ) stat = sok.statistikk()", "540, # Trafikkmengde 105, # Fartsgrense 810, # Vinterdriftsklasse 482,", "vegsegmenter hvis du har vegsegmenter=False if 'vegsegmenter' in minGdf.columns: minGdf.drop(", "v830 Nasjonalt vegreferansesystem https://www.vegvesen.no/_attachment/61505 for mulige verdier, kortversjon: 'vanlig' -", "vegsegementer geometri=True : Bool, default=True. Angir om vi skal hente", "v.filter( { 'trafikantgruppe' : 'K', 'detaljniva' : 'VT,VTKB', 'adskiltelop' :", "# Duplicate element indices in list # Using list comprehension", "boolean - True hvis kjørefeltene er av riktig type \"\"\"", "angitt med mittfilter-nøkkelord er inkludert \"\"\" data = [ ]", "'S' - Sykkelfelt 'H' - Svingefelt mot høyre 'V' -", "stat = sok.statistikk() objtypenavn = sok.objektTypeDef['navn'] print( 'Henter', stat['antall'], 'forekomster", "str( ii+1 ) }, inplace=True ) mindf['geometry'] = mindf['geometri'].apply( wkt.loads", "data om vegnett eller ikke vegsegmenter=False : Bool, default=False. Angir", "sok.objektTypeDef['navn'] print( 'Henter', stat['antall'], 'forekomster av objekttype', sok.objektTypeId, objtypenavn )", "'egenskap', None) junk = mittfilter.pop( 'overlapp', None) veg.filter( mittfilter )", "ikke implementert (ennå)' ) def filtrerfeltoversikt( feltoversikt, mittfilter=['vanlig', 'K', 'R'", "bit av vegnettet hentet fra https://nvdbapiles-v3.atlas.vegvesen.no/vegnett/veglenkesekvenser/segmentert/ KEYWORDS: felttype - hva", "in kolonner ] # Duplicate element indices in list #", "løp'] = vegsegment['vegsystemreferanse']['strekning']['adskilte_løp'] data.append( vegsegment ) vegsegment = v.nesteForekomst() if", "vegsegment['feltoversikt'] ) vegsegment['geometri'] = vegsegment['geometri']['wkt'] vegsegment['vref'] = vegsegment['vegsystemreferanse']['kortform'] vegsegment['vegnr'] =", ": 5001 } Samme filter brukes på både vegnett og", "kolonner ] # Duplicate element indices in list # Using", "å bruke (ana)conda installasjon i et eget \"environment\". Dette er", "hjelpefunksjoner som bruker nvdbapiv3-funksjonene til å gjøre nyttige ting, f.eks.", "'kontraktsområder', 1, inplace=True) mindf.drop( 'riksvegruter', 1, inplace=True) mindf.drop( 'href', 1,", "evt innafor angitt søkekriterie Bruker søkeobjektet nvdbapiv3.nvdbVegnett fra biblioteket https://github.com/LtGlahn/nvdbapi-V3", "'sluttnode', 1, inplace=True) mindf.drop( 'referanse', 1, inplace=True) mindf.drop( 'målemetode', 1,", "trengs) RETURNS boolean - True hvis kjørefeltene er av riktig", "'trafikantgruppe' : 'K', 'detaljniva' : 'VT,VTKB', 'adskiltelop' : 'med,nei' }", "brukt og er enten vanlig kj.felt, kollektivfelt eller reversibelt felt", "'forekomster av objekttype', sok.objektTypeId, objtypenavn ) lagnavn = 'type' +", "mittfilter.keys(): mittfilter['vegsystemreferanse'] = 'Ev,Rv,Fv,Kv,Sv,Pv' if not 'kryssystem' in mittfilter.keys(): mittfilter['kryssystem']", "1, inplace=True) mindf.drop( 'href', 1, inplace=True) mindf.drop( 'metadata', 1, inplace=True)", "- hva slags felttype som skal sjekkes. Mulige verdier: firefelt", "for x in kolonner ] # Duplicate element indices in", "Finner alle firefeltsveger i Norge, evt innafor angitt søkekriterie Bruker", "Skredsikring, forbygning ] objliste = [] for enkontrakt in komr:", "vegsegment ARGUMENTS: vegsegment - dicionary med data om en bit", "crs=5973 ) # må droppe kolonne vegsegmenter hvis du har", "biblioteket https://github.com/LtGlahn/nvdbapi-V3 ARGUMENTS None KEYWORDS: mittfilter: Dictionary med søkefilter RETURNS", "gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) # må droppe kolonne vegsegmenter", "geopackage ARGUMENTS objekttyper: Liste med objekttyper du vil lagre KEYWORDS", "# Siste klausul her har f.eks. forekommet på Fv5724, envegskjørt", "felt) if feltbokstav: feltbokstav = feltbokstav[0] else: feltbokstav = 'vanlig'", "kjørefelt, kjørefeltnumemr er angitt som heltall uten noen bokstaver. 'K'", "minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) minGdf.to_file( filnavn, layer='vegnett',", "in lowerkolonner[:idx]] for ii, dublett in enumerate( res): mindf.rename(columns={ mindf.columns[dublett]", "= [] vegsegment = v.nesteForekomst() while vegsegment: if sjekkfelt( vegsegment,", "Svingefelt mot venstre 'B' - Ekstra felt for bompengeinnkreving RETURNS", "'vanlig' if feltbokstav in mittfilter: feltnummer = int( re.split( '[A-Z]',", "None def sjekkfelt( vegsegment, felttype='firefelt' ): \"\"\" Sjekker hva slags", "Angir om vi skal hente geometri fra egengeometri (hvis det", "enumerate( res): mindf.rename(columns={ mindf.columns[dublett] : kolonner[dublett] + '_' + str(", "NVDB vegnett og angitte objekttyper til geopackage ARGUMENTS objekttyper: Liste", "feltbokstav = 'vanlig' if feltbokstav in mittfilter: feltnummer = int(", "hva slags felttype som skal sjekkes. Mulige verdier: firefelt (default).", "et vegsegment ARGUMENTS: vegsegment - dicionary med data om en", ") mindf['geometry'] = mindf['geometri'].apply( wkt.loads ) mindf.drop( 'geometri', 1, inplace=True)", "'R' ]): \"\"\" Returnerer liste med kjørefeltnummer filtrert på hva", "at kjørefeltnummer 1-4 er brukt og er enten vanlig kj.felt,", "kun øverste topologinivå, og ikke adskiltelop=MOT v.filter( { 'trafikantgruppe' :", "'med,nei' } ) data = [] vegsegment = v.nesteForekomst() while", "av vegnettet hentet fra https://nvdbapiles-v3.atlas.vegvesen.no/vegnett/veglenkesekvenser/segmentert/ KEYWORDS: felttype - hva slags", "Svingefelt mot høyre 'V' - Svingefelt mot venstre 'B' -", "med kjørefeltnummer hvor kun kjørefelt som angitt med mittfilter-nøkkelord er", "ii, dublett in enumerate( res): mindf.rename(columns={ mindf.columns[dublett] : kolonner[dublett] +", "enObjTypeId in objekttyper: enObjTypeId = int( enObjTypeId ) sok =", "not '.gpkg' in filnavn: filnavn = filnavn + datetime.today().strftime('%Y-%m-%d') +", "vegsegment[vr][sr]['adskilte_løp'] == 'Med' and len( kjfelt ) >= 2 and", "- reversibelt felt 'S' - Sykkelfelt 'H' - Svingefelt mot", "'vegsystemreferanse' sr = 'strekning' if felttype == 'firefelt': if 'feltoversikt'", "objekttyper ] for enObjTypeId in objekttyper: enObjTypeId = int( enObjTypeId", "type: ' + felttype + 'er ikke implementert (ennå)' )", "'9304 Bergen', '9305 Sunnfjord' ] komr = [ '9253 Agder", "navnekollisjon kolonner = list( mindf.columns ) lowerkolonner = [ x.lower()", "ta med data om vegnett eller ikke vegsegmenter=False : Bool,", "objekttype', sok.objektTypeId, objtypenavn ) lagnavn = 'type' + str(enObjTypeId) +", "ARGUMENTS None KEYWORDS: mittfilter: Dictionary med søkefilter RETURNS geodataframe med", "mittfilter['sideanlegg'] = 'false' v.filter( mittfilter ) # Kun kjørende, og", "mindf.columns ) lowerkolonner = [ x.lower() for x in kolonner", "her har f.eks. forekommet på Fv5724, envegskjørt tunnel ved Oldenvatnet.", "installasjon av geopandas, shapely og en del andre ting som", "tunnel ved Oldenvatnet. elif vegsegment[vr][sr]['adskilte_løp'] == 'Med' and len( kjfelt", "hele python-installasjonen din. \"\"\" import re import pdb from shapely", "== 'Med' and len( kjfelt ) >= 2 and not", "# Kun kjørende, og kun øverste topologinivå, og ikke adskiltelop=MOT", "'K' - kollektivfelt 'R' - reversibelt felt 'S' - Sykkelfelt", "av', objtypenavn, 'for filter', mittfilter) if vegnett: veg = nvdbapiv3.nvdbVegnett()", "er angitt som heltall uten noen bokstaver. 'K' - kollektivfelt", "nvdbapiv3.esriSikkerTekst( enkontrakt ) nvdb2gpkg( objliste, filnavn=filnavn, mittfilter={'kontraktsomrade' : enkontrakt })", "feltbokstav = re.findall( '[A-Za-z]', felt) if feltbokstav: feltbokstav = feltbokstav[0]", "komr = [ '9302 Haugesund 2020-2025', '9304 Bergen', '9305 Sunnfjord'", "geometry='geometry', crs=5973 ) # må droppe kolonne vegsegmenter hvis du", "): svar = True return svar else: raise NotImplementedError('Sjekkfelt: Sjekk", "litt for å unngå navnekollisjon kolonner = list( mindf.columns )", "\"\"\" v = nvdbapiv3.nvdbVegnett() # Legger til filter på kun", "# Skredsikring, forbygning ] objliste = [] for enkontrakt in", "filnavn, layer='vegnett', driver=\"GPKG\") def dumpkontraktsomr( komr = [] ): \"\"\"", "kjfelt.issuperset( { 1, 2, 3, 4}): svar = True #", "inplace=True) mindf.drop( 'måledato', 1, inplace=True) minGdf = gpd.GeoDataFrame( mindf, geometry='geometry',", "bokstaver. 'K' - kollektivfelt 'R' - reversibelt felt 'S' -", "rec ) # Må trickse litt for å unngå navnekollisjon", "inplace=True) mindf.drop( 'startnode', 1, inplace=True) mindf.drop( 'sluttnode', 1, inplace=True) mindf.drop(", "evt har ARGUMENTS feltoversikt - Liste med feltkoder for et", "shapely import wkt # from shapely.ops import unary_union import pandas", "810, # Vinterdriftsklasse 482, # trafikkregistreringsstasjon 153, # Værstasjon 64,", "if vegsegment[vr][sr]['adskilte_løp'] == 'Nei' and kjfelt.issuperset( { 1, 2, 3,", "= nvdbapiv3.nvdbVegnett() if mittfilter: junk = mittfilter.pop( 'egenskap', None) junk", "64, # Ferjeleie 39, # Rasteplass 48, # Fortau 199,", "vegsegment['vegsystemreferanse']['kortform'] vegsegment['vegnr'] = vegsegment['vref'].split()[0] vegsegment['vegkategori'] = vegsegment['vref'][0] vegsegment['adskilte løp'] =", "v.nesteForekomst() if len( data ) > 1: mindf = pd.DataFrame(", "mittfilter ) print( 'Henter vegnett') rec = veg.to_records() mindf =", "= v.nesteForekomst() if len( data ) > 1: mindf =", "1, inplace=True) mindf.drop( 'kortform', 1, inplace=True) mindf.drop( 'veglenkenummer', 1, inplace=True)", "vegobjekt ut fra objektets stedfesting langs veg så bruker du", "de trengs) RETURNS boolean - True hvis kjørefeltene er av", "vegsegment.keys(): if sr in vegsegment[vr] and 'adskilte_løp' in vegsegment[vr][sr]: if", "wkt.loads ) mindf.drop( 'geometri', 1, inplace=True) minGdf = gpd.GeoDataFrame( mindf,", "re import pdb from shapely import wkt # from shapely.ops", "Sunnfjord' ] komr = [ '9253 Agder elektro og veglys", "2 and not kjfelt.issuperset( {1, 2} ): svar = True", "https://nvdbapiles-v3.atlas.vegvesen.no/vegnett/veglenkesekvenser/segmentert/ KEYWORDS: felttype - hva slags felttype som skal sjekkes.", "og er enten vanlig kj.felt, kollektivfelt eller reversibelt felt (flere", "som må installeres separat. Noen av disse bibliotekene kunne historisk", "dicionary med data om en bit av vegnettet hentet fra", "Liste med kjørefeltnummer hvor kun kjørefelt som angitt med mittfilter-nøkkelord", "Dictionary med filter til søkeobjekt i nvdbapiv3.py, for eksempel {", "in minGdf.columns: minGdf.drop( 'vegsegmenter', 1, inplace=True) minGdf.drop( 'geometri', 1, inplace=True)", "repetere objektet delt inn etter vegsegementer geometri=True : Bool, default=True.", "= gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) # må droppe kolonne", "verdier, kortversjon: 'vanlig' - Helt vanlig kjørefelt, kjørefeltnumemr er angitt", "len( kjfelt ) >= 2 and not kjfelt.issuperset( {1, 2}", "kortversjon: 'vanlig' - Helt vanlig kjørefelt, kjørefeltnumemr er angitt som", "når de trengs) RETURNS boolean - True hvis kjørefeltene er", "riktig type \"\"\" svar = False vr = 'vegsystemreferanse' sr", "data om en bit av vegnettet hentet fra https://nvdbapiles-v3.atlas.vegvesen.no/vegnett/veglenkesekvenser/segmentert/ KEYWORDS:", "RETURNS boolean - True hvis kjørefeltene er av riktig type", "Nasjonalt vegreferansesystem https://www.vegvesen.no/_attachment/61505 for mulige verdier, kortversjon: 'vanlig' - Helt", "kun kjørefelt som angitt med mittfilter-nøkkelord er inkludert \"\"\" data", "} Samme filter brukes på både vegnett og fagdata vegnett=True", "print( 'Henter vegnett') rec = veg.to_records() mindf = pd.DataFrame( rec)", "svar else: raise NotImplementedError('Sjekkfelt: Sjekk for felt av type: '", "kollektivfelt 'R' - reversibelt felt 'S' - Sykkelfelt 'H' -", "sok.filter( mittfilter ) stat = sok.statistikk() objtypenavn = sok.objektTypeDef['navn'] print(", "in enumerate( res): mindf.rename(columns={ mindf.columns[dublett] : kolonner[dublett] + '_' +", "data = [] vegsegment = v.nesteForekomst() while vegsegment: if sjekkfelt(", ": Bool, default=True. Angir om vi skal ta med data", "if not komr: komr = [ '9302 Haugesund 2020-2025', '9304", "installasjon i et eget \"environment\". Dette er god kodehygiene og", "du kombinasjonen vegsegmenter=True, geometri=False RETURNS None \"\"\" if not '.gpkg'", "om vegnett eller ikke vegsegmenter=False : Bool, default=False. Angir om", "nvdbapiv3.nvdbVegnett fra biblioteket https://github.com/LtGlahn/nvdbapi-V3 ARGUMENTS None KEYWORDS: mittfilter: Dictionary med", "ikke adskiltelop=MOT v.filter( { 'trafikantgruppe' : 'K', 'detaljniva' : 'VT,VTKB',", "slags felttype som skal sjekkes. Mulige verdier: firefelt (default). Antar", "if len( rec ) > 0: mindf = pd.DataFrame( rec", ") rec = sok.to_records( vegsegmenter=vegsegmenter, geometri=geometri ) if len( rec", "Må trickse litt for å unngå navnekollisjon kolonner = list(", "= mindf['geometri'].apply( wkt.loads ) minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973", "feltkoder for et vegsegment. KEYWORDS mittfilter=['vanlig', 'K', 'R' ] -", "v.nesteForekomst() while vegsegment: if sjekkfelt( vegsegment, felttype='firefelt'): vegsegment['feltoversikt'] = ','.join(", "vegsegment['feltoversikt'], mittfilter=['vanlig', 'K', 'R']) ) if vr in vegsegment.keys(): if", "and len( kjfelt ) >= 2 and not kjfelt.issuperset( {1,", "mindf.drop( 'veglenkenummer', 1, inplace=True) mindf.drop( 'segmentnummer', 1, inplace=True) mindf.drop( 'startnode',", "mindf.drop( 'referanse', 1, inplace=True) mindf.drop( 'målemetode', 1, inplace=True) mindf.drop( 'måledato',", "'vanlig' - Helt vanlig kjørefelt, kjørefeltnumemr er angitt som heltall", "mittfilter=None, vegnett=True, vegsegmenter=False, geometri=True): \"\"\" Lagrer NVDB vegnett og angitte", "i nvdbapiv3.py, for eksempel { 'kommune' : 5001 } Samme", "v = nvdbapiv3.nvdbVegnett() # Legger til filter på kun fase", "heltall uten noen bokstaver. 'K' - kollektivfelt 'R' - reversibelt", "felttype - hva slags felttype som skal sjekkes. Mulige verdier:", "# Fortau 199, # Trær 15, # Grasdekker 274, #", "Haugesund kontrakt) 517, # Artsrik vegkant 800, # Fremmede arter", "mittfilter: sok.filter( mittfilter ) stat = sok.statistikk() objtypenavn = sok.objektTypeDef['navn']", "= 'Ev,Rv,Fv,Kv,Sv,Pv' if not 'kryssystem' in mittfilter.keys(): mittfilter['kryssystem'] = 'false'", "fra objektets stedfesting langs veg så bruker du kombinasjonen vegsegmenter=True,", "samt ikke minst: Eventuelt kluss lar seg greit reparere ved", "= nvdbapiv3.nvdbVegnett() # Legger til filter på kun fase =", "= [ x.lower() for x in kolonner ] # Duplicate", "mindf.drop( 'geometri', 1, inplace=True) mindf.drop( 'kontraktsområder', 1, inplace=True) mindf.drop( 'riksvegruter',", "bibliotekene kunne historisk av og til være plundrete å installere,", "1, inplace=True) minGdf.drop( 'geometri', 1, inplace=True) minGdf.to_file( filnavn, layer=lagnavn, driver=\"GPKG\")", "in list # Using list comprehension + list slicing #", "slicing # https://www.geeksforgeeks.org/python-duplicate-element-indices-in-list/ res = [idx for idx, val in", "topologinivå, og ikke adskiltelop=MOT v.filter( { 'trafikantgruppe' : 'K', 'detaljniva'", "> 1: mindf = pd.DataFrame( data ) mindf['geometry'] = mindf['geometri'].apply(", "Antar at firefeltsveg betyr at kjørefeltnummer 1-4 er brukt og", "# Grasdekker 274, # Blomsterbeplanting 511, # Busker 300 ,", "skal ta med data om vegnett eller ikke vegsegmenter=False :", "mittfilter=None : Dictionary med filter til søkeobjekt i nvdbapiv3.py, for", ") > 0: mindf = pd.DataFrame( rec ) # Må", "= pd.DataFrame( data ) mindf['geometry'] = mindf['geometri'].apply( wkt.loads ) mindf.drop(", "det ikke kommer i konflikt med anna filter if not", "Mulige verdier: firefelt (default). Antar at firefeltsveg betyr at kjørefeltnummer", "Ferjeleie 39, # Rasteplass 48, # Fortau 199, # Trær", "filnavn='datadump', mittfilter=None, vegnett=True, vegsegmenter=False, geometri=True): \"\"\" Lagrer NVDB vegnett og", "med objekttyper du vil lagre KEYWORDS mittfilter=None : Dictionary med", ": Bool, default=False. Angir om vi skal repetere objektet delt", "val in lowerkolonner[:idx]] for ii, dublett in enumerate( res): mindf.rename(columns={", "if sjekkfelt( vegsegment, felttype='firefelt'): vegsegment['feltoversikt'] = ','.join( vegsegment['feltoversikt'] ) vegsegment['geometri']", ": kolonner[dublett] + '_' + str( ii+1 ) }, inplace=True", "inplace=True) minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) minGdf.to_file( filnavn,", "KEYWORDS mittfilter=None : Dictionary med filter til søkeobjekt i nvdbapiv3.py,", "på Fv5724, envegskjørt tunnel ved Oldenvatnet. elif vegsegment[vr][sr]['adskilte_løp'] == 'Med'", "vegsegment['adskilte løp'] = vegsegment['vegsystemreferanse']['strekning']['adskilte_løp'] data.append( vegsegment ) vegsegment = v.nesteForekomst()", "svar = False vr = 'vegsystemreferanse' sr = 'strekning' if", "venstre 'B' - Ekstra felt for bompengeinnkreving RETURNS Liste med", "re.findall( '[A-Za-z]', felt) if feltbokstav: feltbokstav = feltbokstav[0] else: feltbokstav", "element indices in list # Using list comprehension + list", "else: raise NotImplementedError('Sjekkfelt: Sjekk for felt av type: ' +", "for felt av type: ' + felttype + 'er ikke", "not komr: komr = [ '9302 Haugesund 2020-2025', '9304 Bergen',", "in komr: filnavn = nvdbapiv3.esriSikkerTekst( enkontrakt ) nvdb2gpkg( objliste, filnavn=filnavn,", "og ikke adskiltelop=MOT v.filter( { 'trafikantgruppe' : 'K', 'detaljniva' :", "minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) # må droppe", ") minGdf.to_file( filnavn, layer='vegnett', driver=\"GPKG\") def dumpkontraktsomr( komr = []", "vegsegmenter=True, geometri=False RETURNS None \"\"\" if not '.gpkg' in filnavn:", "= int( enObjTypeId ) sok = nvdbapiv3.nvdbFagdata( enObjTypeId ) if", "mittfilter ) stat = sok.statistikk() objtypenavn = sok.objektTypeDef['navn'] print( 'Henter',", "'sideanlegg' in mittfilter.keys(): mittfilter['sideanlegg'] = 'false' v.filter( mittfilter ) #", "return svar else: raise NotImplementedError('Sjekkfelt: Sjekk for felt av type:", "av geopandas, shapely og en del andre ting som må", "'.gpkg' in filnavn: filnavn = filnavn + datetime.today().strftime('%Y-%m-%d') + '.gpkg'", "egengeometri (hvis det finnes) Hvis du ønsker å presentere vegobjekt", "len( rec ) > 0: mindf = pd.DataFrame( rec )", "= [] for enkontrakt in komr: filnavn = nvdbapiv3.esriSikkerTekst( enkontrakt", "må droppe kolonne vegsegmenter hvis du har vegsegmenter=False if 'vegsegmenter'", "wkt # from shapely.ops import unary_union import pandas as pd", "= pd.DataFrame( rec) mindf['geometry'] = mindf['geometri'].apply( wkt.loads ) mindf.drop( 'geometri',", "kjørefeltnummer hvor kun kjørefelt som angitt med mittfilter-nøkkelord er inkludert", "list( mindf.columns ) lowerkolonner = [ x.lower() for x in", "Ekstra felt for bompengeinnkreving RETURNS Liste med kjørefeltnummer hvor kun", "= vegsegment['geometri']['wkt'] vegsegment['vref'] = vegsegment['vegsystemreferanse']['kortform'] vegsegment['vegnr'] = vegsegment['vref'].split()[0] vegsegment['vegkategori'] =", "nvdb2gpkg( objekttyper, filnavn='datadump', mittfilter=None, vegnett=True, vegsegmenter=False, geometri=True): \"\"\" Lagrer NVDB", "separat. Noen av disse bibliotekene kunne historisk av og til", "inplace=True) mindf.drop( 'veglenkenummer', 1, inplace=True) mindf.drop( 'segmentnummer', 1, inplace=True) mindf.drop(", "versjonskonflikter seg i mellom, spesielt på windows. Slikt plunder hører", "int( enObjTypeId ) sok = nvdbapiv3.nvdbFagdata( enObjTypeId ) if mittfilter:", ") nvdb2gpkg( objliste, filnavn=filnavn, mittfilter={'kontraktsomrade' : enkontrakt }) def firefeltrapport(", "forekommet på Fv5724, envegskjørt tunnel ved Oldenvatnet. elif vegsegment[vr][sr]['adskilte_løp'] ==", "import wkt # from shapely.ops import unary_union import pandas as", "'type' + str(enObjTypeId) + '_' + nvdbapiv3.esriSikkerTekst( objtypenavn.lower() ) rec", "hvis du har vegsegmenter=False if 'vegsegmenter' in minGdf.columns: minGdf.drop( 'vegsegmenter',", "med koder for hva slags felt vi skal telle med.", "finnes på et vegsegment ARGUMENTS: vegsegment - dicionary med data", "ved å lage nytt \"enviroment\", uten at det påvirker hele", "minimalt med kluss, samt ikke minst: Eventuelt kluss lar seg", "skal repetere objektet delt inn etter vegsegementer geometri=True : Bool,", "1, inplace=True) mindf.drop( 'måledato', 1, inplace=True) minGdf = gpd.GeoDataFrame( mindf,", "filter if not 'vegsystemreferanse' in mittfilter.keys(): mittfilter['vegsystemreferanse'] = 'Ev,Rv,Fv,Kv,Sv,Pv' if", "enObjTypeId = int( enObjTypeId ) sok = nvdbapiv3.nvdbFagdata( enObjTypeId )", "lagre KEYWORDS mittfilter=None : Dictionary med filter til søkeobjekt i", "Bool, default=True. Angir om vi skal ta med data om", "er inkludert \"\"\" data = [ ] for felt in", "else: feltbokstav = 'vanlig' if feltbokstav in mittfilter: feltnummer =", "Dictionary med søkefilter RETURNS geodataframe med resultatet \"\"\" v =", "(hardkodede) kontraktsområder \"\"\" if not komr: komr = [ '9302", "Lagrer NVDB vegnett og angitte objekttyper til geopackage ARGUMENTS objekttyper:", "mittfilter.keys(): mittfilter['kryssystem'] = 'false' if not 'sideanlegg' in mittfilter.keys(): mittfilter['sideanlegg']", "= 'false' v.filter( mittfilter ) # Kun kjørende, og kun", "and 'Vegtrase' in vegsegment['detaljnivå']: kjfelt = set( filtrerfeltoversikt( vegsegment['feltoversikt'], mittfilter=['vanlig',", "= mindf['geometri'].apply( wkt.loads ) mindf.drop( 'geometri', 1, inplace=True) mindf.drop( 'kontraktsområder',", "'firefelt': if 'feltoversikt' in vegsegment.keys() and 'detaljnivå' in vegsegment.keys() and", "= V (eksistende veg), såfremt det ikke kommer i konflikt", "'K', 'R' ] - Liste med koder for hva slags", "Sykkelfelt 'H' - Svingefelt mot høyre 'V' - Svingefelt mot", "felt in feltoversikt: feltbokstav = re.findall( '[A-Za-z]', felt) if feltbokstav:", "if not isinstance(objekttyper, list ): objekttyper = [ objekttyper ]", "val in enumerate(lowerkolonner) if val in lowerkolonner[:idx]] for ii, dublett", "# Trafikkmengde 105, # Fartsgrense 810, # Vinterdriftsklasse 482, #", "vanlig kj.felt, kollektivfelt eller reversibelt felt (flere varianter kommer når", "[ ] for felt in feltoversikt: feltbokstav = re.findall( '[A-Za-z]',", "mittfilter-nøkkelord er inkludert \"\"\" data = [ ] for felt", "vi skal repetere objektet delt inn etter vegsegementer geometri=True :", "'strekning' if felttype == 'firefelt': if 'feltoversikt' in vegsegment.keys() and", "fullt å bruke (ana)conda installasjon i et eget \"environment\". Dette", "[ x.lower() for x in kolonner ] # Duplicate element", "å installere, evt ha versjonskonflikter seg i mellom, spesielt på", "if not 'kryssystem' in mittfilter.keys(): mittfilter['kryssystem'] = 'false' if not", "\"\"\" Sjekker hva slags felt som finnes på et vegsegment", "= 'vanlig' if feltbokstav in mittfilter: feltnummer = int( re.split(", "sok.objektTypeId, objtypenavn ) lagnavn = 'type' + str(enObjTypeId) + '_'", "RETURNS None \"\"\" if not '.gpkg' in filnavn: filnavn =", "gjøre nyttige ting, f.eks. lagre geografiske datasett Disse hjelpefunksjonene forutsetter", "None) veg.filter( mittfilter ) print( 'Henter vegnett') rec = veg.to_records()", "return None def sjekkfelt( vegsegment, felttype='firefelt' ): \"\"\" Sjekker hva", "'kommune' : 5001 } Samme filter brukes på både vegnett", "in vegsegment.keys() and 'Vegtrase' in vegsegment['detaljnivå']: kjfelt = set( filtrerfeltoversikt(", "https://www.vegvesen.no/_attachment/61505 for mulige verdier, kortversjon: 'vanlig' - Helt vanlig kjørefelt,", "det påvirker hele python-installasjonen din. \"\"\" import re import pdb", "= sok.to_records( vegsegmenter=vegsegmenter, geometri=geometri ) if len( rec ) >", "\"enviroment\", uten at det påvirker hele python-installasjonen din. \"\"\" import", "øverste topologinivå, og ikke adskiltelop=MOT v.filter( { 'trafikantgruppe' : 'K',", "'H' - Svingefelt mot høyre 'V' - Svingefelt mot venstre", "finnes) Hvis du ønsker å presentere vegobjekt ut fra objektets", "evt ha versjonskonflikter seg i mellom, spesielt på windows. Slikt", "= mittfilter.pop( 'overlapp', None) veg.filter( mittfilter ) print( 'Henter vegnett')", "','.join( vegsegment['feltoversikt'] ) vegsegment['geometri'] = vegsegment['geometri']['wkt'] vegsegment['vref'] = vegsegment['vegsystemreferanse']['kortform'] vegsegment['vegnr']", "] - Liste med koder for hva slags felt vi", "layer='vegnett', driver=\"GPKG\") def dumpkontraktsomr( komr = [] ): \"\"\" Dumper", "Returnerer liste med kjørefeltnummer filtrert på hva slags feltkode vi", "vegnett og angitte objekttyper til geopackage ARGUMENTS objekttyper: Liste med", "in vegsegment.keys(): if sr in vegsegment[vr] and 'adskilte_løp' in vegsegment[vr][sr]:", "mittfilter: feltnummer = int( re.split( '[A-Z]', felt)[0] ) data.append( feltnummer", "'K', 'detaljniva' : 'VT,VTKB', 'adskiltelop' : 'med,nei' } ) data", "Fremmede arter 67, # Tunnelløp 846, # Skredsikring, bremsekjegler 850", "raise NotImplementedError('Sjekkfelt: Sjekk for felt av type: ' + felttype", "nvdb2gpkg( objliste, filnavn=filnavn, mittfilter={'kontraktsomrade' : enkontrakt }) def firefeltrapport( mittfilter={}):", "apiforbindelse import apiforbindelse def nvdb2gpkg( objekttyper, filnavn='datadump', mittfilter=None, vegnett=True, vegsegmenter=False,", "= gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) return minGdf else: return", "objekttyper, filnavn='datadump', mittfilter=None, vegnett=True, vegsegmenter=False, geometri=True): \"\"\" Lagrer NVDB vegnett", "Liste med objekttyper du vil lagre KEYWORDS mittfilter=None : Dictionary", "sr = 'strekning' if felttype == 'firefelt': if 'feltoversikt' in", "KEYWORDS mittfilter=['vanlig', 'K', 'R' ] - Liste med koder for", "= [ 540, # Trafikkmengde 105, # Fartsgrense 810, #", "andre ting som må installeres separat. Noen av disse bibliotekene", "[idx for idx, val in enumerate(lowerkolonner) if val in lowerkolonner[:idx]]", "2, 3, 4}): svar = True # Siste klausul her", "rec ) > 0: mindf = pd.DataFrame( rec ) #", "mindf = pd.DataFrame( data ) mindf['geometry'] = mindf['geometri'].apply( wkt.loads )", "Dumper et har (hardkodede) kontraktsområder \"\"\" if not komr: komr", "vegkant 800, # Fremmede arter 67, # Tunnelløp 846, #", "inplace=True) mindf.drop( 'kortform', 1, inplace=True) mindf.drop( 'veglenkenummer', 1, inplace=True) mindf.drop(", "filtrert på hva slags feltkode vi evt har ARGUMENTS feltoversikt", "for enkontrakt in komr: filnavn = nvdbapiv3.esriSikkerTekst( enkontrakt ) nvdb2gpkg(", "med kjørefeltnummer filtrert på hva slags feltkode vi evt har", "inplace=True) mindf.drop( 'metadata', 1, inplace=True) mindf.drop( 'kortform', 1, inplace=True) mindf.drop(", "mittfilter) if vegnett: veg = nvdbapiv3.nvdbVegnett() if mittfilter: junk =", "and 'detaljnivå' in vegsegment.keys() and 'Vegtrase' in vegsegment['detaljnivå']: kjfelt =", "lowerkolonner = [ x.lower() for x in kolonner ] #", "skal telle med. Sjekk håndbok v830 Nasjonalt vegreferansesystem https://www.vegvesen.no/_attachment/61505 for", "veglys 2021-2024'] objliste = [ 540, # Trafikkmengde 105, #", "67, # Tunnelløp 846, # Skredsikring, bremsekjegler 850 # Skredsikring,", "mittfilter={'kontraktsomrade' : enkontrakt }) def firefeltrapport( mittfilter={}): \"\"\" Finner alle", "konflikt med anna filter if not 'vegsystemreferanse' in mittfilter.keys(): mittfilter['vegsystemreferanse']", "\"\"\" Lagrer NVDB vegnett og angitte objekttyper til geopackage ARGUMENTS", "på hva slags feltkode vi evt har ARGUMENTS feltoversikt -", "vegnett=True, vegsegmenter=False, geometri=True): \"\"\" Lagrer NVDB vegnett og angitte objekttyper", "= 'strekning' if felttype == 'firefelt': if 'feltoversikt' in vegsegment.keys()", "på et vegsegment ARGUMENTS: vegsegment - dicionary med data om", "{ 1, 2, 3, 4}): svar = True # Siste", "med mittfilter-nøkkelord er inkludert \"\"\" data = [ ] for", "driver=\"GPKG\") def dumpkontraktsomr( komr = [] ): \"\"\" Dumper et", "vegsegmenter=False : Bool, default=False. Angir om vi skal repetere objektet", "+ datetime.today().strftime('%Y-%m-%d') + '.gpkg' if not isinstance(objekttyper, list ): objekttyper", "+ list slicing # https://www.geeksforgeeks.org/python-duplicate-element-indices-in-list/ res = [idx for idx,", "vegsegment. KEYWORDS mittfilter=['vanlig', 'K', 'R' ] - Liste med koder", "# Skredsikring, bremsekjegler 850 # Skredsikring, forbygning ] objliste =", "vr in vegsegment.keys(): if sr in vegsegment[vr] and 'adskilte_løp' in", "i Haugesund kontrakt) 517, # Artsrik vegkant 800, # Fremmede", "pd import geopandas as gpd from datetime import datetime import", "søkekriterie Bruker søkeobjektet nvdbapiv3.nvdbVegnett fra biblioteket https://github.com/LtGlahn/nvdbapi-V3 ARGUMENTS None KEYWORDS:", "= True return svar else: raise NotImplementedError('Sjekkfelt: Sjekk for felt", "# Vinterdriftsklasse 482, # trafikkregistreringsstasjon 153, # Værstasjon 64, #", "er god kodehygiene og sikrer minimalt med kluss, samt ikke", "list comprehension + list slicing # https://www.geeksforgeeks.org/python-duplicate-element-indices-in-list/ res = [idx", "veg.filter( mittfilter ) print( 'Henter vegnett') rec = veg.to_records() mindf", "komr = [] ): \"\"\" Dumper et har (hardkodede) kontraktsområder", "mittfilter.keys(): mittfilter['sideanlegg'] = 'false' v.filter( mittfilter ) # Kun kjørende,", "False vr = 'vegsystemreferanse' sr = 'strekning' if felttype ==", "feltoversikt, mittfilter=['vanlig', 'K', 'R' ]): \"\"\" Returnerer liste med kjørefeltnummer", "in vegsegment[vr] and 'adskilte_løp' in vegsegment[vr][sr]: if vegsegment[vr][sr]['adskilte_løp'] == 'Nei'", "hører historien til (stort sett) Anbefalingen er like fullt å", "filnavn = filnavn + datetime.today().strftime('%Y-%m-%d') + '.gpkg' if not isinstance(objekttyper,", "Legger til filter på kun fase = V (eksistende veg),", "Naturområde (ingen treff i Haugesund kontrakt) 517, # Artsrik vegkant", "1, inplace=True) mindf.drop( 'metadata', 1, inplace=True) mindf.drop( 'kortform', 1, inplace=True)", "- Ekstra felt for bompengeinnkreving RETURNS Liste med kjørefeltnummer hvor", "isinstance(objekttyper, list ): objekttyper = [ objekttyper ] for enObjTypeId", "300 , # Naturområde (ingen treff i Haugesund kontrakt) 517,", "}) def firefeltrapport( mittfilter={}): \"\"\" Finner alle firefeltsveger i Norge,", "vegnett=True : Bool, default=True. Angir om vi skal ta med", "RETURNS Liste med kjørefeltnummer hvor kun kjørefelt som angitt med", "objtypenavn = sok.objektTypeDef['navn'] print( 'Henter', stat['antall'], 'forekomster av objekttype', sok.objektTypeId,", "kombinasjonen vegsegmenter=True, geometri=False RETURNS None \"\"\" if not '.gpkg' in", "f.eks. forekommet på Fv5724, envegskjørt tunnel ved Oldenvatnet. elif vegsegment[vr][sr]['adskilte_løp']", ") > 1: mindf = pd.DataFrame( data ) mindf['geometry'] =", "geografiske datasett Disse hjelpefunksjonene forutsetter fungerende installasjon av geopandas, shapely", "if feltbokstav: feltbokstav = feltbokstav[0] else: feltbokstav = 'vanlig' if", "2021-2024'] objliste = [ 540, # Trafikkmengde 105, # Fartsgrense", "Bruker søkeobjektet nvdbapiv3.nvdbVegnett fra biblioteket https://github.com/LtGlahn/nvdbapi-V3 ARGUMENTS None KEYWORDS: mittfilter:", "vegsegment = v.nesteForekomst() if len( data ) > 1: mindf", "import datetime import nvdbapiv3 from apiforbindelse import apiforbindelse def nvdb2gpkg(", "geometri=True): \"\"\" Lagrer NVDB vegnett og angitte objekttyper til geopackage", "type \"\"\" svar = False vr = 'vegsystemreferanse' sr =", "import geopandas as gpd from datetime import datetime import nvdbapiv3", "kolonne vegsegmenter hvis du har vegsegmenter=False if 'vegsegmenter' in minGdf.columns:", "som skal sjekkes. Mulige verdier: firefelt (default). Antar at firefeltsveg", "objtypenavn, 'for filter', mittfilter) if vegnett: veg = nvdbapiv3.nvdbVegnett() if", "som finnes på et vegsegment ARGUMENTS: vegsegment - dicionary med", "minGdf.to_file( filnavn, layer='vegnett', driver=\"GPKG\") def dumpkontraktsomr( komr = [] ):", "være plundrete å installere, evt ha versjonskonflikter seg i mellom,", "1, inplace=True) mindf.drop( 'riksvegruter', 1, inplace=True) mindf.drop( 'href', 1, inplace=True)", "1, inplace=True) mindf.drop( 'segmentnummer', 1, inplace=True) mindf.drop( 'startnode', 1, inplace=True)", "and kjfelt.issuperset( { 1, 2, 3, 4}): svar = True", "har f.eks. forekommet på Fv5724, envegskjørt tunnel ved Oldenvatnet. elif", "\"\"\" data = [ ] for felt in feltoversikt: feltbokstav", "if len( data ) > 1: mindf = pd.DataFrame( data", "layer=lagnavn, driver=\"GPKG\") else: print( 'Ingen forekomster av', objtypenavn, 'for filter',", "(flere varianter kommer når de trengs) RETURNS boolean - True", "not kjfelt.issuperset( {1, 2} ): svar = True return svar", "langs veg så bruker du kombinasjonen vegsegmenter=True, geometri=False RETURNS None", "seg i mellom, spesielt på windows. Slikt plunder hører historien", "svar = True # Siste klausul her har f.eks. forekommet", "envegskjørt tunnel ved Oldenvatnet. elif vegsegment[vr][sr]['adskilte_løp'] == 'Med' and len(", "sr in vegsegment[vr] and 'adskilte_løp' in vegsegment[vr][sr]: if vegsegment[vr][sr]['adskilte_løp'] ==", "<gh_stars>0 \"\"\" En samling hjelpefunksjoner som bruker nvdbapiv3-funksjonene til å", "from apiforbindelse import apiforbindelse def nvdb2gpkg( objekttyper, filnavn='datadump', mittfilter=None, vegnett=True,", "et har (hardkodede) kontraktsområder \"\"\" if not komr: komr =", "felt for bompengeinnkreving RETURNS Liste med kjørefeltnummer hvor kun kjørefelt", "uten noen bokstaver. 'K' - kollektivfelt 'R' - reversibelt felt", "vi evt har ARGUMENTS feltoversikt - Liste med feltkoder for", "'vegsegmenter' in minGdf.columns: minGdf.drop( 'vegsegmenter', 1, inplace=True) minGdf.drop( 'geometri', 1,", "mindf, geometry='geometry', crs=5973 ) # må droppe kolonne vegsegmenter hvis", "199, # Trær 15, # Grasdekker 274, # Blomsterbeplanting 511,", "med resultatet \"\"\" v = nvdbapiv3.nvdbVegnett() # Legger til filter", "data ) mindf['geometry'] = mindf['geometri'].apply( wkt.loads ) mindf.drop( 'geometri', 1,", "Grasdekker 274, # Blomsterbeplanting 511, # Busker 300 , #", "presentere vegobjekt ut fra objektets stedfesting langs veg så bruker", "= int( re.split( '[A-Z]', felt)[0] ) data.append( feltnummer ) return", "if not 'vegsystemreferanse' in mittfilter.keys(): mittfilter['vegsystemreferanse'] = 'Ev,Rv,Fv,Kv,Sv,Pv' if not", "lar seg greit reparere ved å lage nytt \"enviroment\", uten", "# https://www.geeksforgeeks.org/python-duplicate-element-indices-in-list/ res = [idx for idx, val in enumerate(lowerkolonner)", "objliste, filnavn=filnavn, mittfilter={'kontraktsomrade' : enkontrakt }) def firefeltrapport( mittfilter={}): \"\"\"", "else: print( 'Ingen forekomster av', objtypenavn, 'for filter', mittfilter) if", "shapely.ops import unary_union import pandas as pd import geopandas as", "reversibelt felt (flere varianter kommer når de trengs) RETURNS boolean", "kollektivfelt eller reversibelt felt (flere varianter kommer når de trengs)", ") if vr in vegsegment.keys(): if sr in vegsegment[vr] and", "] for felt in feltoversikt: feltbokstav = re.findall( '[A-Za-z]', felt)", "som heltall uten noen bokstaver. 'K' - kollektivfelt 'R' -", "sok.statistikk() objtypenavn = sok.objektTypeDef['navn'] print( 'Henter', stat['antall'], 'forekomster av objekttype',", "enObjTypeId ) sok = nvdbapiv3.nvdbFagdata( enObjTypeId ) if mittfilter: sok.filter(", "søkeobjektet nvdbapiv3.nvdbVegnett fra biblioteket https://github.com/LtGlahn/nvdbapi-V3 ARGUMENTS None KEYWORDS: mittfilter: Dictionary", "if vegnett: veg = nvdbapiv3.nvdbVegnett() if mittfilter: junk = mittfilter.pop(", "(eksistende veg), såfremt det ikke kommer i konflikt med anna", "historien til (stort sett) Anbefalingen er like fullt å bruke", "treff i Haugesund kontrakt) 517, # Artsrik vegkant 800, #", "Slikt plunder hører historien til (stort sett) Anbefalingen er like", "'geometri', 1, inplace=True) minGdf.to_file( filnavn, layer=lagnavn, driver=\"GPKG\") else: print( 'Ingen", "objekttyper til geopackage ARGUMENTS objekttyper: Liste med objekttyper du vil", "mindf = pd.DataFrame( rec) mindf['geometry'] = mindf['geometri'].apply( wkt.loads ) mindf.drop(", "et eget \"environment\". Dette er god kodehygiene og sikrer minimalt", "til filter på kun fase = V (eksistende veg), såfremt", "'K', 'R']) ) if vr in vegsegment.keys(): if sr in", "in mittfilter.keys(): mittfilter['kryssystem'] = 'false' if not 'sideanlegg' in mittfilter.keys():", "med feltkoder for et vegsegment. KEYWORDS mittfilter=['vanlig', 'K', 'R' ]", "rec = sok.to_records( vegsegmenter=vegsegmenter, geometri=geometri ) if len( rec )", "disse bibliotekene kunne historisk av og til være plundrete å", "og til være plundrete å installere, evt ha versjonskonflikter seg", "39, # Rasteplass 48, # Fortau 199, # Trær 15,", "= [ '9302 Haugesund 2020-2025', '9304 Bergen', '9305 Sunnfjord' ]", "inplace=True) mindf.drop( 'kontraktsområder', 1, inplace=True) mindf.drop( 'riksvegruter', 1, inplace=True) mindf.drop(", "mindf.drop( 'riksvegruter', 1, inplace=True) mindf.drop( 'href', 1, inplace=True) mindf.drop( 'metadata',", "mittfilter=['vanlig', 'K', 'R']) ) if vr in vegsegment.keys(): if sr", "hvor kun kjørefelt som angitt med mittfilter-nøkkelord er inkludert \"\"\"", "mindf.drop( 'sluttnode', 1, inplace=True) mindf.drop( 'referanse', 1, inplace=True) mindf.drop( 'målemetode',", "i et eget \"environment\". Dette er god kodehygiene og sikrer", "else: return None def sjekkfelt( vegsegment, felttype='firefelt' ): \"\"\" Sjekker", "plunder hører historien til (stort sett) Anbefalingen er like fullt", "vil lagre KEYWORDS mittfilter=None : Dictionary med filter til søkeobjekt", "felttype == 'firefelt': if 'feltoversikt' in vegsegment.keys() and 'detaljnivå' in", "koder for hva slags felt vi skal telle med. Sjekk", "from datetime import datetime import nvdbapiv3 from apiforbindelse import apiforbindelse", "datetime.today().strftime('%Y-%m-%d') + '.gpkg' if not isinstance(objekttyper, list ): objekttyper =", "'Nei' and kjfelt.issuperset( { 1, 2, 3, 4}): svar =", "felt 'S' - Sykkelfelt 'H' - Svingefelt mot høyre 'V'", "\"\"\" import re import pdb from shapely import wkt #", "apiforbindelse def nvdb2gpkg( objekttyper, filnavn='datadump', mittfilter=None, vegnett=True, vegsegmenter=False, geometri=True): \"\"\"", "er brukt og er enten vanlig kj.felt, kollektivfelt eller reversibelt", "nyttige ting, f.eks. lagre geografiske datasett Disse hjelpefunksjonene forutsetter fungerende", "kun fase = V (eksistende veg), såfremt det ikke kommer", "'metadata', 1, inplace=True) mindf.drop( 'kortform', 1, inplace=True) mindf.drop( 'veglenkenummer', 1,", "set( filtrerfeltoversikt( vegsegment['feltoversikt'], mittfilter=['vanlig', 'K', 'R']) ) if vr in", "felt (flere varianter kommer når de trengs) RETURNS boolean -", "filter', mittfilter) if vegnett: veg = nvdbapiv3.nvdbVegnett() if mittfilter: junk", "+ str( ii+1 ) }, inplace=True ) mindf['geometry'] = mindf['geometri'].apply(", "'for filter', mittfilter) if vegnett: veg = nvdbapiv3.nvdbVegnett() if mittfilter:", "= veg.to_records() mindf = pd.DataFrame( rec) mindf['geometry'] = mindf['geometri'].apply( wkt.loads", "nvdbapiv3 from apiforbindelse import apiforbindelse def nvdb2gpkg( objekttyper, filnavn='datadump', mittfilter=None,", "len( data ) > 1: mindf = pd.DataFrame( data )", "def sjekkfelt( vegsegment, felttype='firefelt' ): \"\"\" Sjekker hva slags felt", "kommer når de trengs) RETURNS boolean - True hvis kjørefeltene", ": 'K', 'detaljniva' : 'VT,VTKB', 'adskiltelop' : 'med,nei' } )", "Bool, default=True. Angir om vi skal hente geometri fra egengeometri", "if 'feltoversikt' in vegsegment.keys() and 'detaljnivå' in vegsegment.keys() and 'Vegtrase'", "unngå navnekollisjon kolonner = list( mindf.columns ) lowerkolonner = [", "forutsetter fungerende installasjon av geopandas, shapely og en del andre", "felt som finnes på et vegsegment ARGUMENTS: vegsegment - dicionary", "from shapely import wkt # from shapely.ops import unary_union import", "'adskiltelop' : 'med,nei' } ) data = [] vegsegment =", "+ '_' + nvdbapiv3.esriSikkerTekst( objtypenavn.lower() ) rec = sok.to_records( vegsegmenter=vegsegmenter,", "inplace=True ) mindf['geometry'] = mindf['geometri'].apply( wkt.loads ) minGdf = gpd.GeoDataFrame(", "): \"\"\" Sjekker hva slags felt som finnes på et", "(ennå)' ) def filtrerfeltoversikt( feltoversikt, mittfilter=['vanlig', 'K', 'R' ]): \"\"\"", "'[A-Za-z]', felt) if feltbokstav: feltbokstav = feltbokstav[0] else: feltbokstav =", "'Henter', stat['antall'], 'forekomster av objekttype', sok.objektTypeId, objtypenavn ) lagnavn =", "48, # Fortau 199, # Trær 15, # Grasdekker 274,", "= pd.DataFrame( rec ) # Må trickse litt for å", "kjørende, og kun øverste topologinivå, og ikke adskiltelop=MOT v.filter( {", "datetime import datetime import nvdbapiv3 from apiforbindelse import apiforbindelse def", "= 'type' + str(enObjTypeId) + '_' + nvdbapiv3.esriSikkerTekst( objtypenavn.lower() )", "mittfilter['vegsystemreferanse'] = 'Ev,Rv,Fv,Kv,Sv,Pv' if not 'kryssystem' in mittfilter.keys(): mittfilter['kryssystem'] =", "482, # trafikkregistreringsstasjon 153, # Værstasjon 64, # Ferjeleie 39,", "True return svar else: raise NotImplementedError('Sjekkfelt: Sjekk for felt av", "installere, evt ha versjonskonflikter seg i mellom, spesielt på windows.", "bremsekjegler 850 # Skredsikring, forbygning ] objliste = [] for", "forbygning ] objliste = [] for enkontrakt in komr: filnavn", "til å gjøre nyttige ting, f.eks. lagre geografiske datasett Disse", "vi skal ta med data om vegnett eller ikke vegsegmenter=False", "] komr = [ '9253 Agder elektro og veglys 2021-2024']", "ha versjonskonflikter seg i mellom, spesielt på windows. Slikt plunder", "if not 'sideanlegg' in mittfilter.keys(): mittfilter['sideanlegg'] = 'false' v.filter( mittfilter", "en del andre ting som må installeres separat. Noen av", "greit reparere ved å lage nytt \"enviroment\", uten at det", "= 'false' if not 'sideanlegg' in mittfilter.keys(): mittfilter['sideanlegg'] = 'false'", "https://github.com/LtGlahn/nvdbapi-V3 ARGUMENTS None KEYWORDS: mittfilter: Dictionary med søkefilter RETURNS geodataframe", "in mittfilter: feltnummer = int( re.split( '[A-Z]', felt)[0] ) data.append(", "har ARGUMENTS feltoversikt - Liste med feltkoder for et vegsegment.", "mittfilter['kryssystem'] = 'false' if not 'sideanlegg' in mittfilter.keys(): mittfilter['sideanlegg'] =", "# må droppe kolonne vegsegmenter hvis du har vegsegmenter=False if", "153, # Værstasjon 64, # Ferjeleie 39, # Rasteplass 48,", "alle firefeltsveger i Norge, evt innafor angitt søkekriterie Bruker søkeobjektet", "while vegsegment: if sjekkfelt( vegsegment, felttype='firefelt'): vegsegment['feltoversikt'] = ','.join( vegsegment['feltoversikt']", "mindf['geometri'].apply( wkt.loads ) minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 )", "# Blomsterbeplanting 511, # Busker 300 , # Naturområde (ingen", "V (eksistende veg), såfremt det ikke kommer i konflikt med", "1, 2, 3, 4}): svar = True # Siste klausul", "vanlig kjørefelt, kjørefeltnumemr er angitt som heltall uten noen bokstaver.", "vegsegment: if sjekkfelt( vegsegment, felttype='firefelt'): vegsegment['feltoversikt'] = ','.join( vegsegment['feltoversikt'] )", "geopandas as gpd from datetime import datetime import nvdbapiv3 from", "+ '_' + str( ii+1 ) }, inplace=True ) mindf['geometry']", "def firefeltrapport( mittfilter={}): \"\"\" Finner alle firefeltsveger i Norge, evt", "mot høyre 'V' - Svingefelt mot venstre 'B' - Ekstra", "har vegsegmenter=False if 'vegsegmenter' in minGdf.columns: minGdf.drop( 'vegsegmenter', 1, inplace=True)", "'detaljniva' : 'VT,VTKB', 'adskiltelop' : 'med,nei' } ) data =", "(ana)conda installasjon i et eget \"environment\". Dette er god kodehygiene", "\"\"\" Finner alle firefeltsveger i Norge, evt innafor angitt søkekriterie", "det finnes) Hvis du ønsker å presentere vegobjekt ut fra", "verdier: firefelt (default). Antar at firefeltsveg betyr at kjørefeltnummer 1-4", ") # Må trickse litt for å unngå navnekollisjon kolonner", "'V' - Svingefelt mot venstre 'B' - Ekstra felt for", "ting, f.eks. lagre geografiske datasett Disse hjelpefunksjonene forutsetter fungerende installasjon", "geometri=True : Bool, default=True. Angir om vi skal hente geometri", "elektro og veglys 2021-2024'] objliste = [ 540, # Trafikkmengde", "'R' ] - Liste med koder for hva slags felt", "mindf.rename(columns={ mindf.columns[dublett] : kolonner[dublett] + '_' + str( ii+1 )", "i mellom, spesielt på windows. Slikt plunder hører historien til", "[] ): \"\"\" Dumper et har (hardkodede) kontraktsområder \"\"\" if", "felttype som skal sjekkes. Mulige verdier: firefelt (default). Antar at", "driver=\"GPKG\") else: print( 'Ingen forekomster av', objtypenavn, 'for filter', mittfilter)", ") data = [] vegsegment = v.nesteForekomst() while vegsegment: if", "fra biblioteket https://github.com/LtGlahn/nvdbapi-V3 ARGUMENTS None KEYWORDS: mittfilter: Dictionary med søkefilter", "= nvdbapiv3.esriSikkerTekst( enkontrakt ) nvdb2gpkg( objliste, filnavn=filnavn, mittfilter={'kontraktsomrade' : enkontrakt", "brukes på både vegnett og fagdata vegnett=True : Bool, default=True.", "}, inplace=True ) mindf['geometry'] = mindf['geometri'].apply( wkt.loads ) minGdf =", "list slicing # https://www.geeksforgeeks.org/python-duplicate-element-indices-in-list/ res = [idx for idx, val", "vr = 'vegsystemreferanse' sr = 'strekning' if felttype == 'firefelt':", "'.gpkg' if not isinstance(objekttyper, list ): objekttyper = [ objekttyper", "vegnett og fagdata vegnett=True : Bool, default=True. Angir om vi", ") lowerkolonner = [ x.lower() for x in kolonner ]", "elif vegsegment[vr][sr]['adskilte_løp'] == 'Med' and len( kjfelt ) >= 2", "vegsegment['vref'] = vegsegment['vegsystemreferanse']['kortform'] vegsegment['vegnr'] = vegsegment['vref'].split()[0] vegsegment['vegkategori'] = vegsegment['vref'][0] vegsegment['adskilte", "veg.to_records() mindf = pd.DataFrame( rec) mindf['geometry'] = mindf['geometri'].apply( wkt.loads )", "# Fremmede arter 67, # Tunnelløp 846, # Skredsikring, bremsekjegler", "'riksvegruter', 1, inplace=True) mindf.drop( 'href', 1, inplace=True) mindf.drop( 'metadata', 1,", "firefeltsveger i Norge, evt innafor angitt søkekriterie Bruker søkeobjektet nvdbapiv3.nvdbVegnett", "in vegsegment.keys() and 'detaljnivå' in vegsegment.keys() and 'Vegtrase' in vegsegment['detaljnivå']:", "pd.DataFrame( rec ) # Må trickse litt for å unngå", "crs=5973 ) return minGdf else: return None def sjekkfelt( vegsegment,", "droppe kolonne vegsegmenter hvis du har vegsegmenter=False if 'vegsegmenter' in", "+ str(enObjTypeId) + '_' + nvdbapiv3.esriSikkerTekst( objtypenavn.lower() ) rec =", "mindf.drop( 'målemetode', 1, inplace=True) mindf.drop( 'måledato', 1, inplace=True) minGdf =", "res): mindf.rename(columns={ mindf.columns[dublett] : kolonner[dublett] + '_' + str( ii+1", "geopandas, shapely og en del andre ting som må installeres", "søkeobjekt i nvdbapiv3.py, for eksempel { 'kommune' : 5001 }", "søkefilter RETURNS geodataframe med resultatet \"\"\" v = nvdbapiv3.nvdbVegnett() #", "Noen av disse bibliotekene kunne historisk av og til være", ") stat = sok.statistikk() objtypenavn = sok.objektTypeDef['navn'] print( 'Henter', stat['antall'],", "geometri=False RETURNS None \"\"\" if not '.gpkg' in filnavn: filnavn", "stat['antall'], 'forekomster av objekttype', sok.objektTypeId, objtypenavn ) lagnavn = 'type'", "minGdf.to_file( filnavn, layer=lagnavn, driver=\"GPKG\") else: print( 'Ingen forekomster av', objtypenavn,", "enkontrakt }) def firefeltrapport( mittfilter={}): \"\"\" Finner alle firefeltsveger i", "fase = V (eksistende veg), såfremt det ikke kommer i", "minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) return minGdf else:", "mindf, geometry='geometry', crs=5973 ) return minGdf else: return None def", "if sr in vegsegment[vr] and 'adskilte_løp' in vegsegment[vr][sr]: if vegsegment[vr][sr]['adskilte_løp']", "= [ '9253 Agder elektro og veglys 2021-2024'] objliste =", "at firefeltsveg betyr at kjørefeltnummer 1-4 er brukt og er", "Busker 300 , # Naturområde (ingen treff i Haugesund kontrakt)", "hva slags felt vi skal telle med. Sjekk håndbok v830", "uten at det påvirker hele python-installasjonen din. \"\"\" import re", "komr = [ '9253 Agder elektro og veglys 2021-2024'] objliste", "felt vi skal telle med. Sjekk håndbok v830 Nasjonalt vegreferansesystem", "'B' - Ekstra felt for bompengeinnkreving RETURNS Liste med kjørefeltnummer", "Sjekk for felt av type: ' + felttype + 'er", "python-installasjonen din. \"\"\" import re import pdb from shapely import", "du ønsker å presentere vegobjekt ut fra objektets stedfesting langs", "delt inn etter vegsegementer geometri=True : Bool, default=True. Angir om", "comprehension + list slicing # https://www.geeksforgeeks.org/python-duplicate-element-indices-in-list/ res = [idx for", "(ingen treff i Haugesund kontrakt) 517, # Artsrik vegkant 800,", "default=True. Angir om vi skal hente geometri fra egengeometri (hvis", "objekttyper = [ objekttyper ] for enObjTypeId in objekttyper: enObjTypeId", "og kun øverste topologinivå, og ikke adskiltelop=MOT v.filter( { 'trafikantgruppe'", "# Naturområde (ingen treff i Haugesund kontrakt) 517, # Artsrik", "Haugesund 2020-2025', '9304 Bergen', '9305 Sunnfjord' ] komr = [", "inplace=True) mindf.drop( 'href', 1, inplace=True) mindf.drop( 'metadata', 1, inplace=True) mindf.drop(", "def nvdb2gpkg( objekttyper, filnavn='datadump', mittfilter=None, vegnett=True, vegsegmenter=False, geometri=True): \"\"\" Lagrer", "du har vegsegmenter=False if 'vegsegmenter' in minGdf.columns: minGdf.drop( 'vegsegmenter', 1,", "274, # Blomsterbeplanting 511, # Busker 300 , # Naturområde", "arter 67, # Tunnelløp 846, # Skredsikring, bremsekjegler 850 #", "vegsegment['geometri']['wkt'] vegsegment['vref'] = vegsegment['vegsystemreferanse']['kortform'] vegsegment['vegnr'] = vegsegment['vref'].split()[0] vegsegment['vegkategori'] = vegsegment['vref'][0]", "int( re.split( '[A-Z]', felt)[0] ) data.append( feltnummer ) return data", "= [] ): \"\"\" Dumper et har (hardkodede) kontraktsområder \"\"\"", "= [idx for idx, val in enumerate(lowerkolonner) if val in", "indices in list # Using list comprehension + list slicing", "at det påvirker hele python-installasjonen din. \"\"\" import re import", "komr: komr = [ '9302 Haugesund 2020-2025', '9304 Bergen', '9305", "True # Siste klausul her har f.eks. forekommet på Fv5724,", "må installeres separat. Noen av disse bibliotekene kunne historisk av", "firefelt (default). Antar at firefeltsveg betyr at kjørefeltnummer 1-4 er", "ønsker å presentere vegobjekt ut fra objektets stedfesting langs veg", "av type: ' + felttype + 'er ikke implementert (ennå)'", "wkt.loads ) minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) #", "vegsegment = v.nesteForekomst() while vegsegment: if sjekkfelt( vegsegment, felttype='firefelt'): vegsegment['feltoversikt']", "x in kolonner ] # Duplicate element indices in list", "= vegsegment['vegsystemreferanse']['strekning']['adskilte_løp'] data.append( vegsegment ) vegsegment = v.nesteForekomst() if len(", "og fagdata vegnett=True : Bool, default=True. Angir om vi skal", "av objekttype', sok.objektTypeId, objtypenavn ) lagnavn = 'type' + str(enObjTypeId)", "seg greit reparere ved å lage nytt \"enviroment\", uten at", "import apiforbindelse def nvdb2gpkg( objekttyper, filnavn='datadump', mittfilter=None, vegnett=True, vegsegmenter=False, geometri=True):", "Blomsterbeplanting 511, # Busker 300 , # Naturområde (ingen treff", "angitt søkekriterie Bruker søkeobjektet nvdbapiv3.nvdbVegnett fra biblioteket https://github.com/LtGlahn/nvdbapi-V3 ARGUMENTS None", "nvdbapiv3.nvdbVegnett() # Legger til filter på kun fase = V", "angitte objekttyper til geopackage ARGUMENTS objekttyper: Liste med objekttyper du", "geometri=geometri ) if len( rec ) > 0: mindf =", "rec = veg.to_records() mindf = pd.DataFrame( rec) mindf['geometry'] = mindf['geometri'].apply(", "mindf.drop( 'segmentnummer', 1, inplace=True) mindf.drop( 'startnode', 1, inplace=True) mindf.drop( 'sluttnode',", "på windows. Slikt plunder hører historien til (stort sett) Anbefalingen", "'adskilte_løp' in vegsegment[vr][sr]: if vegsegment[vr][sr]['adskilte_løp'] == 'Nei' and kjfelt.issuperset( {", "mindf.drop( 'måledato', 1, inplace=True) minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973", "vegsegment[vr][sr]['adskilte_løp'] == 'Nei' and kjfelt.issuperset( { 1, 2, 3, 4}):", "sok.to_records( vegsegmenter=vegsegmenter, geometri=geometri ) if len( rec ) > 0:", "filnavn=filnavn, mittfilter={'kontraktsomrade' : enkontrakt }) def firefeltrapport( mittfilter={}): \"\"\" Finner", "som bruker nvdbapiv3-funksjonene til å gjøre nyttige ting, f.eks. lagre", "list ): objekttyper = [ objekttyper ] for enObjTypeId in", "KEYWORDS: felttype - hva slags felttype som skal sjekkes. Mulige", "\"\"\" Returnerer liste med kjørefeltnummer filtrert på hva slags feltkode", "[ objekttyper ] for enObjTypeId in objekttyper: enObjTypeId = int(", "{ 'kommune' : 5001 } Samme filter brukes på både", ") mindf['geometry'] = mindf['geometri'].apply( wkt.loads ) minGdf = gpd.GeoDataFrame( mindf,", "Trær 15, # Grasdekker 274, # Blomsterbeplanting 511, # Busker", "'9305 Sunnfjord' ] komr = [ '9253 Agder elektro og", "junk = mittfilter.pop( 'egenskap', None) junk = mittfilter.pop( 'overlapp', None)", "datetime import nvdbapiv3 from apiforbindelse import apiforbindelse def nvdb2gpkg( objekttyper,", "crs=5973 ) minGdf.to_file( filnavn, layer='vegnett', driver=\"GPKG\") def dumpkontraktsomr( komr =", "'måledato', 1, inplace=True) minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 )", "= list( mindf.columns ) lowerkolonner = [ x.lower() for x", "'er ikke implementert (ennå)' ) def filtrerfeltoversikt( feltoversikt, mittfilter=['vanlig', 'K',", "'geometri', 1, inplace=True) minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 )", "filnavn = nvdbapiv3.esriSikkerTekst( enkontrakt ) nvdb2gpkg( objliste, filnavn=filnavn, mittfilter={'kontraktsomrade' :", "betyr at kjørefeltnummer 1-4 er brukt og er enten vanlig", "et vegsegment. KEYWORDS mittfilter=['vanlig', 'K', 'R' ] - Liste med", "Norge, evt innafor angitt søkekriterie Bruker søkeobjektet nvdbapiv3.nvdbVegnett fra biblioteket", "] # Duplicate element indices in list # Using list", "'vegsegmenter', 1, inplace=True) minGdf.drop( 'geometri', 1, inplace=True) minGdf.to_file( filnavn, layer=lagnavn,", "inplace=True) mindf.drop( 'segmentnummer', 1, inplace=True) mindf.drop( 'startnode', 1, inplace=True) mindf.drop(", "= feltbokstav[0] else: feltbokstav = 'vanlig' if feltbokstav in mittfilter:", "'false' if not 'sideanlegg' in mittfilter.keys(): mittfilter['sideanlegg'] = 'false' v.filter(", "skal hente geometri fra egengeometri (hvis det finnes) Hvis du", "vegsegment['vegkategori'] = vegsegment['vref'][0] vegsegment['adskilte løp'] = vegsegment['vegsystemreferanse']['strekning']['adskilte_løp'] data.append( vegsegment )", "if feltbokstav in mittfilter: feltnummer = int( re.split( '[A-Z]', felt)[0]", "for hva slags felt vi skal telle med. Sjekk håndbok", "'Ev,Rv,Fv,Kv,Sv,Pv' if not 'kryssystem' in mittfilter.keys(): mittfilter['kryssystem'] = 'false' if", "nvdbapiv3.nvdbVegnett() if mittfilter: junk = mittfilter.pop( 'egenskap', None) junk =", "1, inplace=True) mindf.drop( 'referanse', 1, inplace=True) mindf.drop( 'målemetode', 1, inplace=True)", "'vegsystemreferanse' in mittfilter.keys(): mittfilter['vegsystemreferanse'] = 'Ev,Rv,Fv,Kv,Sv,Pv' if not 'kryssystem' in", "= set( filtrerfeltoversikt( vegsegment['feltoversikt'], mittfilter=['vanlig', 'K', 'R']) ) if vr", "eksempel { 'kommune' : 5001 } Samme filter brukes på", ": Bool, default=True. Angir om vi skal hente geometri fra", "nvdbapiv3.py, for eksempel { 'kommune' : 5001 } Samme filter", "[ 540, # Trafikkmengde 105, # Fartsgrense 810, # Vinterdriftsklasse", ": Dictionary med filter til søkeobjekt i nvdbapiv3.py, for eksempel", ": 'med,nei' } ) data = [] vegsegment = v.nesteForekomst()", "shapely og en del andre ting som må installeres separat.", "og angitte objekttyper til geopackage ARGUMENTS objekttyper: Liste med objekttyper", "bruker nvdbapiv3-funksjonene til å gjøre nyttige ting, f.eks. lagre geografiske", "Angir om vi skal repetere objektet delt inn etter vegsegementer", "klausul her har f.eks. forekommet på Fv5724, envegskjørt tunnel ved", ">= 2 and not kjfelt.issuperset( {1, 2} ): svar =", "def dumpkontraktsomr( komr = [] ): \"\"\" Dumper et har", "# Busker 300 , # Naturområde (ingen treff i Haugesund", "både vegnett og fagdata vegnett=True : Bool, default=True. Angir om", "\"environment\". Dette er god kodehygiene og sikrer minimalt med kluss,", "hjelpefunksjonene forutsetter fungerende installasjon av geopandas, shapely og en del", "som angitt med mittfilter-nøkkelord er inkludert \"\"\" data = [", "veg så bruker du kombinasjonen vegsegmenter=True, geometri=False RETURNS None \"\"\"", "håndbok v830 Nasjonalt vegreferansesystem https://www.vegvesen.no/_attachment/61505 for mulige verdier, kortversjon: 'vanlig'", "til søkeobjekt i nvdbapiv3.py, for eksempel { 'kommune' : 5001", "med data om vegnett eller ikke vegsegmenter=False : Bool, default=False.", "kjørefeltnummer filtrert på hva slags feltkode vi evt har ARGUMENTS", "kjfelt = set( filtrerfeltoversikt( vegsegment['feltoversikt'], mittfilter=['vanlig', 'K', 'R']) ) if", "mittfilter={}): \"\"\" Finner alle firefeltsveger i Norge, evt innafor angitt", "= filnavn + datetime.today().strftime('%Y-%m-%d') + '.gpkg' if not isinstance(objekttyper, list", "firefeltsveg betyr at kjørefeltnummer 1-4 er brukt og er enten", ") if mittfilter: sok.filter( mittfilter ) stat = sok.statistikk() objtypenavn", "KEYWORDS: mittfilter: Dictionary med søkefilter RETURNS geodataframe med resultatet \"\"\"", "slags feltkode vi evt har ARGUMENTS feltoversikt - Liste med", "517, # Artsrik vegkant 800, # Fremmede arter 67, #", "as gpd from datetime import datetime import nvdbapiv3 from apiforbindelse", "vegnett eller ikke vegsegmenter=False : Bool, default=False. Angir om vi", "komr: filnavn = nvdbapiv3.esriSikkerTekst( enkontrakt ) nvdb2gpkg( objliste, filnavn=filnavn, mittfilter={'kontraktsomrade'", "for mulige verdier, kortversjon: 'vanlig' - Helt vanlig kjørefelt, kjørefeltnumemr", "bruke (ana)conda installasjon i et eget \"environment\". Dette er god", "feltkode vi evt har ARGUMENTS feltoversikt - Liste med feltkoder", "(stort sett) Anbefalingen er like fullt å bruke (ana)conda installasjon", "Eventuelt kluss lar seg greit reparere ved å lage nytt", "stedfesting langs veg så bruker du kombinasjonen vegsegmenter=True, geometri=False RETURNS", "veg), såfremt det ikke kommer i konflikt med anna filter", "eget \"environment\". Dette er god kodehygiene og sikrer minimalt med", "sjekkfelt( vegsegment, felttype='firefelt' ): \"\"\" Sjekker hva slags felt som", "ARGUMENTS: vegsegment - dicionary med data om en bit av", "kjørefeltene er av riktig type \"\"\" svar = False vr", "'segmentnummer', 1, inplace=True) mindf.drop( 'startnode', 1, inplace=True) mindf.drop( 'sluttnode', 1,", "= sok.statistikk() objtypenavn = sok.objektTypeDef['navn'] print( 'Henter', stat['antall'], 'forekomster av", "er av riktig type \"\"\" svar = False vr =", "vegsegment['vegsystemreferanse']['strekning']['adskilte_løp'] data.append( vegsegment ) vegsegment = v.nesteForekomst() if len( data", "1, inplace=True) mindf.drop( 'sluttnode', 1, inplace=True) mindf.drop( 'referanse', 1, inplace=True)", "if not '.gpkg' in filnavn: filnavn = filnavn + datetime.today().strftime('%Y-%m-%d')", "og veglys 2021-2024'] objliste = [ 540, # Trafikkmengde 105,", "kommer i konflikt med anna filter if not 'vegsystemreferanse' in", "vegsegmenter=False, geometri=True): \"\"\" Lagrer NVDB vegnett og angitte objekttyper til", "== 'firefelt': if 'feltoversikt' in vegsegment.keys() and 'detaljnivå' in vegsegment.keys()", "= False vr = 'vegsystemreferanse' sr = 'strekning' if felttype", "not isinstance(objekttyper, list ): objekttyper = [ objekttyper ] for", "Fv5724, envegskjørt tunnel ved Oldenvatnet. elif vegsegment[vr][sr]['adskilte_løp'] == 'Med' and", ") >= 2 and not kjfelt.issuperset( {1, 2} ): svar", "- Liste med feltkoder for et vegsegment. KEYWORDS mittfilter=['vanlig', 'K',", "'9253 Agder elektro og veglys 2021-2024'] objliste = [ 540,", "Helt vanlig kjørefelt, kjørefeltnumemr er angitt som heltall uten noen", "import unary_union import pandas as pd import geopandas as gpd", "noen bokstaver. 'K' - kollektivfelt 'R' - reversibelt felt 'S'", "if 'vegsegmenter' in minGdf.columns: minGdf.drop( 'vegsegmenter', 1, inplace=True) minGdf.drop( 'geometri',", "anna filter if not 'vegsystemreferanse' in mittfilter.keys(): mittfilter['vegsystemreferanse'] = 'Ev,Rv,Fv,Kv,Sv,Pv'", "vegsegmenter=False if 'vegsegmenter' in minGdf.columns: minGdf.drop( 'vegsegmenter', 1, inplace=True) minGdf.drop(", "pd.DataFrame( data ) mindf['geometry'] = mindf['geometri'].apply( wkt.loads ) mindf.drop( 'geometri',", "'Vegtrase' in vegsegment['detaljnivå']: kjfelt = set( filtrerfeltoversikt( vegsegment['feltoversikt'], mittfilter=['vanlig', 'K',", "'overlapp', None) veg.filter( mittfilter ) print( 'Henter vegnett') rec =", "svar = True return svar else: raise NotImplementedError('Sjekkfelt: Sjekk for", "påvirker hele python-installasjonen din. \"\"\" import re import pdb from", "mindf.drop( 'geometri', 1, inplace=True) minGdf = gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973", "mindf.drop( 'metadata', 1, inplace=True) mindf.drop( 'kortform', 1, inplace=True) mindf.drop( 'veglenkenummer',", "filnavn + datetime.today().strftime('%Y-%m-%d') + '.gpkg' if not isinstance(objekttyper, list ):", "objliste = [] for enkontrakt in komr: filnavn = nvdbapiv3.esriSikkerTekst(", "om en bit av vegnettet hentet fra https://nvdbapiles-v3.atlas.vegvesen.no/vegnett/veglenkesekvenser/segmentert/ KEYWORDS: felttype", "Oldenvatnet. elif vegsegment[vr][sr]['adskilte_løp'] == 'Med' and len( kjfelt ) >=", "feltoversikt - Liste med feltkoder for et vegsegment. KEYWORDS mittfilter=['vanlig',", "sok = nvdbapiv3.nvdbFagdata( enObjTypeId ) if mittfilter: sok.filter( mittfilter )", "minGdf.drop( 'geometri', 1, inplace=True) minGdf.to_file( filnavn, layer=lagnavn, driver=\"GPKG\") else: print(", "= mindf['geometri'].apply( wkt.loads ) mindf.drop( 'geometri', 1, inplace=True) minGdf =", "ved Oldenvatnet. elif vegsegment[vr][sr]['adskilte_løp'] == 'Med' and len( kjfelt )", "not 'vegsystemreferanse' in mittfilter.keys(): mittfilter['vegsystemreferanse'] = 'Ev,Rv,Fv,Kv,Sv,Pv' if not 'kryssystem'", "mittfilter: junk = mittfilter.pop( 'egenskap', None) junk = mittfilter.pop( 'overlapp',", "datasett Disse hjelpefunksjonene forutsetter fungerende installasjon av geopandas, shapely og", "trafikkregistreringsstasjon 153, # Værstasjon 64, # Ferjeleie 39, # Rasteplass", "for eksempel { 'kommune' : 5001 } Samme filter brukes", "fungerende installasjon av geopandas, shapely og en del andre ting", "rec) mindf['geometry'] = mindf['geometri'].apply( wkt.loads ) mindf.drop( 'geometri', 1, inplace=True)", "Vinterdriftsklasse 482, # trafikkregistreringsstasjon 153, # Værstasjon 64, # Ferjeleie", "\"\"\" svar = False vr = 'vegsystemreferanse' sr = 'strekning'", "En samling hjelpefunksjoner som bruker nvdbapiv3-funksjonene til å gjøre nyttige", "lagre geografiske datasett Disse hjelpefunksjonene forutsetter fungerende installasjon av geopandas,", "geodataframe med resultatet \"\"\" v = nvdbapiv3.nvdbVegnett() # Legger til", "Anbefalingen er like fullt å bruke (ana)conda installasjon i et", "objekttyper: enObjTypeId = int( enObjTypeId ) sok = nvdbapiv3.nvdbFagdata( enObjTypeId", "1, inplace=True) mindf.drop( 'startnode', 1, inplace=True) mindf.drop( 'sluttnode', 1, inplace=True)", "Siste klausul her har f.eks. forekommet på Fv5724, envegskjørt tunnel", "850 # Skredsikring, forbygning ] objliste = [] for enkontrakt", "ikke minst: Eventuelt kluss lar seg greit reparere ved å", "enumerate(lowerkolonner) if val in lowerkolonner[:idx]] for ii, dublett in enumerate(", "mindf, geometry='geometry', crs=5973 ) minGdf.to_file( filnavn, layer='vegnett', driver=\"GPKG\") def dumpkontraktsomr(", ") # Kun kjørende, og kun øverste topologinivå, og ikke", "vegsegment['vref'].split()[0] vegsegment['vegkategori'] = vegsegment['vref'][0] vegsegment['adskilte løp'] = vegsegment['vegsystemreferanse']['strekning']['adskilte_løp'] data.append( vegsegment", "if felttype == 'firefelt': if 'feltoversikt' in vegsegment.keys() and 'detaljnivå'", "av disse bibliotekene kunne historisk av og til være plundrete", "if mittfilter: sok.filter( mittfilter ) stat = sok.statistikk() objtypenavn =", "ikke kommer i konflikt med anna filter if not 'vegsystemreferanse'", "for et vegsegment. KEYWORDS mittfilter=['vanlig', 'K', 'R' ] - Liste", "like fullt å bruke (ana)conda installasjon i et eget \"environment\".", "- kollektivfelt 'R' - reversibelt felt 'S' - Sykkelfelt 'H'", "pdb from shapely import wkt # from shapely.ops import unary_union", "mittfilter ) # Kun kjørende, og kun øverste topologinivå, og", "0: mindf = pd.DataFrame( rec ) # Må trickse litt", "# Værstasjon 64, # Ferjeleie 39, # Rasteplass 48, #", "om vi skal ta med data om vegnett eller ikke", "511, # Busker 300 , # Naturområde (ingen treff i", "' + felttype + 'er ikke implementert (ennå)' ) def", "= [ ] for felt in feltoversikt: feltbokstav = re.findall(", "- Svingefelt mot venstre 'B' - Ekstra felt for bompengeinnkreving", "nytt \"enviroment\", uten at det påvirker hele python-installasjonen din. \"\"\"", "not 'sideanlegg' in mittfilter.keys(): mittfilter['sideanlegg'] = 'false' v.filter( mittfilter )", "mindf = pd.DataFrame( rec ) # Må trickse litt for", "dublett in enumerate( res): mindf.rename(columns={ mindf.columns[dublett] : kolonner[dublett] + '_'", "1, inplace=True) mindf.drop( 'veglenkenummer', 1, inplace=True) mindf.drop( 'segmentnummer', 1, inplace=True)", "kj.felt, kollektivfelt eller reversibelt felt (flere varianter kommer når de", "4}): svar = True # Siste klausul her har f.eks.", "in vegsegment[vr][sr]: if vegsegment[vr][sr]['adskilte_løp'] == 'Nei' and kjfelt.issuperset( { 1,", "etter vegsegementer geometri=True : Bool, default=True. Angir om vi skal", "god kodehygiene og sikrer minimalt med kluss, samt ikke minst:", "]): \"\"\" Returnerer liste med kjørefeltnummer filtrert på hva slags", "return minGdf else: return None def sjekkfelt( vegsegment, felttype='firefelt' ):", "feltbokstav in mittfilter: feltnummer = int( re.split( '[A-Z]', felt)[0] )", "gpd.GeoDataFrame( mindf, geometry='geometry', crs=5973 ) minGdf.to_file( filnavn, layer='vegnett', driver=\"GPKG\") def" ]
[ "+= 1 dow = (dow + no_days(m, y)) % 7", "twenty-eight, rain or shine. And on leap years, twenty-nine. A", "given the following information, but you may prefer to do", "may prefer to do some research for yourself. 1 Jan", "April, June and November. All the rest have thirty-one, Saving", "y in range(1901, 2001): for m in range(0, 12): if", "a Monday. Thirty days has September, April, June and November.", "month in [0,2,4,6,7,9,11]: return 31 elif month in [3,5,8,10]: return", "day of week for 1 Jan 1901 (Tuesday) dow =", "on a century unless it is divisible by 400. How", "following information, but you may prefer to do some research", "for yourself. 1 Jan 1900 was a Monday. Thirty days", "is divisible by 400. How many Sundays fell on the", "in [0,2,4,6,7,9,11]: return 31 elif month in [3,5,8,10]: return 30", "alone, Which has twenty-eight, rain or shine. And on leap", "February alone, Which has twenty-eight, rain or shine. And on", "to 31 Dec 2000)? ans: 171 \"\"\" # set to", "# set to day of week for 1 Jan 1901", "set to day of week for 1 Jan 1901 (Tuesday)", "1 Jan 1901 (Tuesday) dow = 2 def no_days(month, year):", "400 == 0: return 29 elif year % 100 ==", "% 100 == 0: return 28 elif year % 4", "0: return 29 else: return 28 sum = 0 for", "return 28 elif year % 4 == 0: return 29", "but you may prefer to do some research for yourself.", "a century unless it is divisible by 400. How many", "4 == 0: return 29 else: return 28 sum =", "Saving February alone, Which has twenty-eight, rain or shine. And", "28 sum = 0 for y in range(1901, 2001): for", "[3,5,8,10]: return 30 elif year % 400 == 0: return", "== 0: sum += 1 dow = (dow + no_days(m,", "ans: 171 \"\"\" # set to day of week for", "twenty-nine. A leap year occurs on any year evenly divisible", "year): if month in [0,2,4,6,7,9,11]: return 31 elif month in", "\"\"\" # set to day of week for 1 Jan", "dow = 2 def no_days(month, year): if month in [0,2,4,6,7,9,11]:", "== 0: return 28 elif year % 4 == 0:", "1 dow = (dow + no_days(m, y)) % 7 print(sum)", "1901 (Tuesday) dow = 2 def no_days(month, year): if month", "during the twentieth century (1 Jan 1901 to 31 Dec", "Thirty days has September, April, June and November. All the", "for y in range(1901, 2001): for m in range(0, 12):", "any year evenly divisible by 4, but not on a", "September, April, June and November. All the rest have thirty-one,", "November. All the rest have thirty-one, Saving February alone, Which", "\"\"\" You are given the following information, but you may", "on the first of the month during the twentieth century", "Jan 1901 (Tuesday) dow = 2 def no_days(month, year): if", "elif month in [3,5,8,10]: return 30 elif year % 400", "% 400 == 0: return 29 elif year % 100", "month in [3,5,8,10]: return 30 elif year % 400 ==", "m in range(0, 12): if dow == 0: sum +=", "And on leap years, twenty-nine. A leap year occurs on", "year % 400 == 0: return 29 elif year %", "2 def no_days(month, year): if month in [0,2,4,6,7,9,11]: return 31", "divisible by 400. How many Sundays fell on the first", "information, but you may prefer to do some research for", "research for yourself. 1 Jan 1900 was a Monday. Thirty", "the first of the month during the twentieth century (1", "Which has twenty-eight, rain or shine. And on leap years,", "divisible by 4, but not on a century unless it", "Monday. Thirty days has September, April, June and November. All", "(Tuesday) dow = 2 def no_days(month, year): if month in", "Sundays fell on the first of the month during the", "return 29 else: return 28 sum = 0 for y", "century unless it is divisible by 400. How many Sundays", "leap years, twenty-nine. A leap year occurs on any year", "month during the twentieth century (1 Jan 1901 to 31", "to do some research for yourself. 1 Jan 1900 was", "years, twenty-nine. A leap year occurs on any year evenly", "leap year occurs on any year evenly divisible by 4,", "for m in range(0, 12): if dow == 0: sum", "fell on the first of the month during the twentieth", "30 elif year % 400 == 0: return 29 elif", "if dow == 0: sum += 1 dow = (dow", "= 2 def no_days(month, year): if month in [0,2,4,6,7,9,11]: return", "elif year % 4 == 0: return 29 else: return", "the following information, but you may prefer to do some", "31 elif month in [3,5,8,10]: return 30 elif year %", "of week for 1 Jan 1901 (Tuesday) dow = 2", "days has September, April, June and November. All the rest", "sum = 0 for y in range(1901, 2001): for m", "on leap years, twenty-nine. A leap year occurs on any", "== 0: return 29 else: return 28 sum = 0", "return 28 sum = 0 for y in range(1901, 2001):", "2001): for m in range(0, 12): if dow == 0:", "yourself. 1 Jan 1900 was a Monday. Thirty days has", "% 4 == 0: return 29 else: return 28 sum", "A leap year occurs on any year evenly divisible by", "return 30 elif year % 400 == 0: return 29", "== 0: return 29 elif year % 100 == 0:", "has September, April, June and November. All the rest have", "return 31 elif month in [3,5,8,10]: return 30 elif year", "has twenty-eight, rain or shine. And on leap years, twenty-nine.", "def no_days(month, year): if month in [0,2,4,6,7,9,11]: return 31 elif", "1 Jan 1900 was a Monday. Thirty days has September,", "do some research for yourself. 1 Jan 1900 was a", "many Sundays fell on the first of the month during", "first of the month during the twentieth century (1 Jan", "to day of week for 1 Jan 1901 (Tuesday) dow", "and November. All the rest have thirty-one, Saving February alone,", "have thirty-one, Saving February alone, Which has twenty-eight, rain or", "no_days(month, year): if month in [0,2,4,6,7,9,11]: return 31 elif month", "by 4, but not on a century unless it is", "0: sum += 1 dow = (dow + no_days(m, y))", "171 \"\"\" # set to day of week for 1", "12): if dow == 0: sum += 1 dow =", "Jan 1901 to 31 Dec 2000)? ans: 171 \"\"\" #", "else: return 28 sum = 0 for y in range(1901,", "the twentieth century (1 Jan 1901 to 31 Dec 2000)?", "year % 100 == 0: return 28 elif year %", "return 29 elif year % 100 == 0: return 28", "= 0 for y in range(1901, 2001): for m in", "shine. And on leap years, twenty-nine. A leap year occurs", "year occurs on any year evenly divisible by 4, but", "1900 was a Monday. Thirty days has September, April, June", "range(0, 12): if dow == 0: sum += 1 dow", "range(1901, 2001): for m in range(0, 12): if dow ==", "are given the following information, but you may prefer to", "rest have thirty-one, Saving February alone, Which has twenty-eight, rain", "for 1 Jan 1901 (Tuesday) dow = 2 def no_days(month,", "elif year % 400 == 0: return 29 elif year", "(1 Jan 1901 to 31 Dec 2000)? ans: 171 \"\"\"", "All the rest have thirty-one, Saving February alone, Which has", "not on a century unless it is divisible by 400.", "unless it is divisible by 400. How many Sundays fell", "0 for y in range(1901, 2001): for m in range(0,", "400. How many Sundays fell on the first of the", "occurs on any year evenly divisible by 4, but not", "by 400. How many Sundays fell on the first of", "dow == 0: sum += 1 dow = (dow +", "year evenly divisible by 4, but not on a century", "0: return 28 elif year % 4 == 0: return", "Dec 2000)? ans: 171 \"\"\" # set to day of", "the rest have thirty-one, Saving February alone, Which has twenty-eight,", "elif year % 100 == 0: return 28 elif year", "How many Sundays fell on the first of the month", "29 else: return 28 sum = 0 for y in", "rain or shine. And on leap years, twenty-nine. A leap", "week for 1 Jan 1901 (Tuesday) dow = 2 def", "year % 4 == 0: return 29 else: return 28", "28 elif year % 4 == 0: return 29 else:", "some research for yourself. 1 Jan 1900 was a Monday.", "you may prefer to do some research for yourself. 1", "31 Dec 2000)? ans: 171 \"\"\" # set to day", "was a Monday. Thirty days has September, April, June and", "You are given the following information, but you may prefer", "or shine. And on leap years, twenty-nine. A leap year", "but not on a century unless it is divisible by", "sum += 1 dow = (dow + no_days(m, y)) %", "29 elif year % 100 == 0: return 28 elif", "if month in [0,2,4,6,7,9,11]: return 31 elif month in [3,5,8,10]:", "100 == 0: return 28 elif year % 4 ==", "1901 to 31 Dec 2000)? ans: 171 \"\"\" # set", "2000)? ans: 171 \"\"\" # set to day of week", "century (1 Jan 1901 to 31 Dec 2000)? ans: 171", "thirty-one, Saving February alone, Which has twenty-eight, rain or shine.", "evenly divisible by 4, but not on a century unless", "it is divisible by 400. How many Sundays fell on", "of the month during the twentieth century (1 Jan 1901", "June and November. All the rest have thirty-one, Saving February", "Jan 1900 was a Monday. Thirty days has September, April,", "in range(0, 12): if dow == 0: sum += 1", "twentieth century (1 Jan 1901 to 31 Dec 2000)? ans:", "in [3,5,8,10]: return 30 elif year % 400 == 0:", "the month during the twentieth century (1 Jan 1901 to", "[0,2,4,6,7,9,11]: return 31 elif month in [3,5,8,10]: return 30 elif", "in range(1901, 2001): for m in range(0, 12): if dow", "on any year evenly divisible by 4, but not on", "0: return 29 elif year % 100 == 0: return", "4, but not on a century unless it is divisible", "prefer to do some research for yourself. 1 Jan 1900" ]
[ "meter data\", author=\"<NAME>\", author_email='<EMAIL>', url='https://github.com/dbservice/dbservice', packages=find_packages(), package_data={'': ['static/*.*', 'templates/*.*']}, scripts=['manage.py'],", "Setup for the dbservice \"\"\" from setuptools import setup, find_packages", "the dbservice \"\"\" from setuptools import setup, find_packages setup( name='dbservice',", "setup( name='dbservice', version='0.9', description=\"Database service for storing meter data\", author=\"<NAME>\",", "from setuptools import setup, find_packages setup( name='dbservice', version='0.9', description=\"Database service", "dbservice \"\"\" from setuptools import setup, find_packages setup( name='dbservice', version='0.9',", "data\", author=\"<NAME>\", author_email='<EMAIL>', url='https://github.com/dbservice/dbservice', packages=find_packages(), package_data={'': ['static/*.*', 'templates/*.*']}, scripts=['manage.py'], )", "name='dbservice', version='0.9', description=\"Database service for storing meter data\", author=\"<NAME>\", author_email='<EMAIL>',", "utf-8 -*- \"\"\" Setup for the dbservice \"\"\" from setuptools", "import setup, find_packages setup( name='dbservice', version='0.9', description=\"Database service for storing", "service for storing meter data\", author=\"<NAME>\", author_email='<EMAIL>', url='https://github.com/dbservice/dbservice', packages=find_packages(), package_data={'':", "storing meter data\", author=\"<NAME>\", author_email='<EMAIL>', url='https://github.com/dbservice/dbservice', packages=find_packages(), package_data={'': ['static/*.*', 'templates/*.*']},", "description=\"Database service for storing meter data\", author=\"<NAME>\", author_email='<EMAIL>', url='https://github.com/dbservice/dbservice', packages=find_packages(),", "find_packages setup( name='dbservice', version='0.9', description=\"Database service for storing meter data\",", "# -*- encoding: utf-8 -*- \"\"\" Setup for the dbservice", "<filename>setup.py #!/usr/bin/env python3 # -*- encoding: utf-8 -*- \"\"\" Setup", "encoding: utf-8 -*- \"\"\" Setup for the dbservice \"\"\" from", "#!/usr/bin/env python3 # -*- encoding: utf-8 -*- \"\"\" Setup for", "-*- \"\"\" Setup for the dbservice \"\"\" from setuptools import", "\"\"\" from setuptools import setup, find_packages setup( name='dbservice', version='0.9', description=\"Database", "setup, find_packages setup( name='dbservice', version='0.9', description=\"Database service for storing meter", "python3 # -*- encoding: utf-8 -*- \"\"\" Setup for the", "-*- encoding: utf-8 -*- \"\"\" Setup for the dbservice \"\"\"", "setuptools import setup, find_packages setup( name='dbservice', version='0.9', description=\"Database service for", "for storing meter data\", author=\"<NAME>\", author_email='<EMAIL>', url='https://github.com/dbservice/dbservice', packages=find_packages(), package_data={'': ['static/*.*',", "for the dbservice \"\"\" from setuptools import setup, find_packages setup(", "version='0.9', description=\"Database service for storing meter data\", author=\"<NAME>\", author_email='<EMAIL>', url='https://github.com/dbservice/dbservice',", "\"\"\" Setup for the dbservice \"\"\" from setuptools import setup," ]
[ "The arg spec for the junos facts module \"\"\" def", "or https://www.gnu.org/licenses/gpl-3.0.txt) \"\"\" The arg spec for the junos facts", "type=\"list\", elements=\"str\" ), \"config_format\": dict( default=\"text\", choices=[\"xml\", \"text\", \"set\", \"json\"]", "__future__ import absolute_import, division, print_function __metaclass__ = type class FactsArgs(object):", "for the junos facts module \"\"\" def __init__(self, **kwargs): pass", "{ \"gather_subset\": dict( default=[\"!config\"], type=\"list\", elements=\"str\" ), \"config_format\": dict( default=\"text\",", "FactsArgs(object): \"\"\" The arg spec for the junos facts module", "__metaclass__ = type class FactsArgs(object): \"\"\" The arg spec for", "choices=[\"xml\", \"text\", \"set\", \"json\"] ), \"gather_network_resources\": dict(type=\"list\", elements=\"str\"), \"available_network_resources\": {\"type\":", "\"text\", \"set\", \"json\"] ), \"gather_network_resources\": dict(type=\"list\", elements=\"str\"), \"available_network_resources\": {\"type\": \"bool\",", "\"\"\" The arg spec for the junos facts module. \"\"\"", "pass argument_spec = { \"gather_subset\": dict( default=[\"!config\"], type=\"list\", elements=\"str\" ),", "= { \"gather_subset\": dict( default=[\"!config\"], type=\"list\", elements=\"str\" ), \"config_format\": dict(", "Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) \"\"\" The", "dict( default=[\"!config\"], type=\"list\", elements=\"str\" ), \"config_format\": dict( default=\"text\", choices=[\"xml\", \"text\",", "absolute_import, division, print_function __metaclass__ = type class FactsArgs(object): \"\"\" The", "arg spec for the junos facts module. \"\"\" from __future__", "utf-8 -*- # Copyright 2019 Red Hat # GNU General", "GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)", "\"\"\" The arg spec for the junos facts module \"\"\"", "print_function __metaclass__ = type class FactsArgs(object): \"\"\" The arg spec", "module \"\"\" def __init__(self, **kwargs): pass argument_spec = { \"gather_subset\":", "https://www.gnu.org/licenses/gpl-3.0.txt) \"\"\" The arg spec for the junos facts module.", "dict( default=\"text\", choices=[\"xml\", \"text\", \"set\", \"json\"] ), \"gather_network_resources\": dict(type=\"list\", elements=\"str\"),", "default=\"text\", choices=[\"xml\", \"text\", \"set\", \"json\"] ), \"gather_network_resources\": dict(type=\"list\", elements=\"str\"), \"available_network_resources\":", "# -*- coding: utf-8 -*- # Copyright 2019 Red Hat", "\"gather_subset\": dict( default=[\"!config\"], type=\"list\", elements=\"str\" ), \"config_format\": dict( default=\"text\", choices=[\"xml\",", "def __init__(self, **kwargs): pass argument_spec = { \"gather_subset\": dict( default=[\"!config\"],", "\"json\"] ), \"gather_network_resources\": dict(type=\"list\", elements=\"str\"), \"available_network_resources\": {\"type\": \"bool\", \"default\": False},", "import absolute_import, division, print_function __metaclass__ = type class FactsArgs(object): \"\"\"", "arg spec for the junos facts module \"\"\" def __init__(self,", "(see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) \"\"\" The arg spec for the", "**kwargs): pass argument_spec = { \"gather_subset\": dict( default=[\"!config\"], type=\"list\", elements=\"str\"", "Hat # GNU General Public License v3.0+ # (see COPYING", "junos facts module. \"\"\" from __future__ import absolute_import, division, print_function", "v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) \"\"\" The arg spec", "type class FactsArgs(object): \"\"\" The arg spec for the junos", "\"config_format\": dict( default=\"text\", choices=[\"xml\", \"text\", \"set\", \"json\"] ), \"gather_network_resources\": dict(type=\"list\",", "spec for the junos facts module. \"\"\" from __future__ import", "License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) \"\"\" The arg", "from __future__ import absolute_import, division, print_function __metaclass__ = type class", "argument_spec = { \"gather_subset\": dict( default=[\"!config\"], type=\"list\", elements=\"str\" ), \"config_format\":", "# # -*- coding: utf-8 -*- # Copyright 2019 Red", "-*- # Copyright 2019 Red Hat # GNU General Public", "# Copyright 2019 Red Hat # GNU General Public License", "the junos facts module. \"\"\" from __future__ import absolute_import, division,", "division, print_function __metaclass__ = type class FactsArgs(object): \"\"\" The arg", "\"\"\" from __future__ import absolute_import, division, print_function __metaclass__ = type", "facts module. \"\"\" from __future__ import absolute_import, division, print_function __metaclass__", "spec for the junos facts module \"\"\" def __init__(self, **kwargs):", "junos facts module \"\"\" def __init__(self, **kwargs): pass argument_spec =", "default=[\"!config\"], type=\"list\", elements=\"str\" ), \"config_format\": dict( default=\"text\", choices=[\"xml\", \"text\", \"set\",", "General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) \"\"\"", "the junos facts module \"\"\" def __init__(self, **kwargs): pass argument_spec", "), \"gather_network_resources\": dict(type=\"list\", elements=\"str\"), \"available_network_resources\": {\"type\": \"bool\", \"default\": False}, }", "\"set\", \"json\"] ), \"gather_network_resources\": dict(type=\"list\", elements=\"str\"), \"available_network_resources\": {\"type\": \"bool\", \"default\":", "coding: utf-8 -*- # Copyright 2019 Red Hat # GNU", "COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) \"\"\" The arg spec for the junos", "2019 Red Hat # GNU General Public License v3.0+ #", "\"\"\" def __init__(self, **kwargs): pass argument_spec = { \"gather_subset\": dict(", "__init__(self, **kwargs): pass argument_spec = { \"gather_subset\": dict( default=[\"!config\"], type=\"list\",", "facts module \"\"\" def __init__(self, **kwargs): pass argument_spec = {", "), \"config_format\": dict( default=\"text\", choices=[\"xml\", \"text\", \"set\", \"json\"] ), \"gather_network_resources\":", "The arg spec for the junos facts module. \"\"\" from", "= type class FactsArgs(object): \"\"\" The arg spec for the", "class FactsArgs(object): \"\"\" The arg spec for the junos facts", "elements=\"str\" ), \"config_format\": dict( default=\"text\", choices=[\"xml\", \"text\", \"set\", \"json\"] ),", "# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) \"\"\" The arg spec for", "Copyright 2019 Red Hat # GNU General Public License v3.0+", "Red Hat # GNU General Public License v3.0+ # (see", "# GNU General Public License v3.0+ # (see COPYING or", "-*- coding: utf-8 -*- # Copyright 2019 Red Hat #", "module. \"\"\" from __future__ import absolute_import, division, print_function __metaclass__ =", "for the junos facts module. \"\"\" from __future__ import absolute_import," ]
[ "Authenticate(Resource): def post(self): try: args = parser.parse_args() decoded_token = firebase_auth.verify_id_token(args['token'])", "reqparse.RequestParser() parser.add_argument('token', type=str, required=True, nullable=False) class Authenticate(Resource): def post(self): try:", "except (ValueError, firebase_auth.AuthError) as e: return {'message': f'{e}'}, 400 firebase_uid", "'display_name': user.display_name, 'contact_uri': user.contact_uri, 'roles': [role.role_type for role in user.user_roles],", "required=True, nullable=False) class Authenticate(Resource): def post(self): try: args = parser.parse_args()", "e: return {'message': f'{e}'}, 400 firebase_uid = decoded_token['uid'] user =", "= reqparse.RequestParser() parser.add_argument('token', type=str, required=True, nullable=False) class Authenticate(Resource): def post(self):", "return {'message': f'{e}'}, 400 firebase_uid = decoded_token['uid'] user = User.query.filter_by(firebase_uid=firebase_uid).first()", "sign up.'}, 400 custom_token = firebase_auth.create_custom_token(firebase_uid) return { 'custom_token': custom_token.decode(),", "type=str, required=True, nullable=False) class Authenticate(Resource): def post(self): try: args =", "from flask_restful import Resource, reqparse from firebase_admin import auth as", "= firebase_auth.verify_id_token(args['token']) except (ValueError, firebase_auth.AuthError) as e: return {'message': f'{e}'},", "auth as firebase_auth from dbcls.models import User parser = reqparse.RequestParser()", "400 firebase_uid = decoded_token['uid'] user = User.query.filter_by(firebase_uid=firebase_uid).first() if not user:", "{'message': 'user not found. You have to sign up.'}, 400", "custom_token.decode(), 'display_name': user.display_name, 'contact_uri': user.contact_uri, 'roles': [role.role_type for role in", "parser.parse_args() decoded_token = firebase_auth.verify_id_token(args['token']) except (ValueError, firebase_auth.AuthError) as e: return", "reqparse from firebase_admin import auth as firebase_auth from dbcls.models import", "= parser.parse_args() decoded_token = firebase_auth.verify_id_token(args['token']) except (ValueError, firebase_auth.AuthError) as e:", "as e: return {'message': f'{e}'}, 400 firebase_uid = decoded_token['uid'] user", "from firebase_admin import auth as firebase_auth from dbcls.models import User", "if not user: return {'message': 'user not found. You have", "= firebase_auth.create_custom_token(firebase_uid) return { 'custom_token': custom_token.decode(), 'display_name': user.display_name, 'contact_uri': user.contact_uri,", "to sign up.'}, 400 custom_token = firebase_auth.create_custom_token(firebase_uid) return { 'custom_token':", "= User.query.filter_by(firebase_uid=firebase_uid).first() if not user: return {'message': 'user not found.", "user.display_name, 'contact_uri': user.contact_uri, 'roles': [role.role_type for role in user.user_roles], }", "parser.add_argument('token', type=str, required=True, nullable=False) class Authenticate(Resource): def post(self): try: args", "User parser = reqparse.RequestParser() parser.add_argument('token', type=str, required=True, nullable=False) class Authenticate(Resource):", "import User parser = reqparse.RequestParser() parser.add_argument('token', type=str, required=True, nullable=False) class", "decoded_token = firebase_auth.verify_id_token(args['token']) except (ValueError, firebase_auth.AuthError) as e: return {'message':", "up.'}, 400 custom_token = firebase_auth.create_custom_token(firebase_uid) return { 'custom_token': custom_token.decode(), 'display_name':", "{ 'custom_token': custom_token.decode(), 'display_name': user.display_name, 'contact_uri': user.contact_uri, 'roles': [role.role_type for", "class Authenticate(Resource): def post(self): try: args = parser.parse_args() decoded_token =", "user = User.query.filter_by(firebase_uid=firebase_uid).first() if not user: return {'message': 'user not", "custom_token = firebase_auth.create_custom_token(firebase_uid) return { 'custom_token': custom_token.decode(), 'display_name': user.display_name, 'contact_uri':", "nullable=False) class Authenticate(Resource): def post(self): try: args = parser.parse_args() decoded_token", "Resource, reqparse from firebase_admin import auth as firebase_auth from dbcls.models", "= decoded_token['uid'] user = User.query.filter_by(firebase_uid=firebase_uid).first() if not user: return {'message':", "post(self): try: args = parser.parse_args() decoded_token = firebase_auth.verify_id_token(args['token']) except (ValueError,", "from dbcls.models import User parser = reqparse.RequestParser() parser.add_argument('token', type=str, required=True,", "flask_restful import Resource, reqparse from firebase_admin import auth as firebase_auth", "firebase_auth.AuthError) as e: return {'message': f'{e}'}, 400 firebase_uid = decoded_token['uid']", "import auth as firebase_auth from dbcls.models import User parser =", "'custom_token': custom_token.decode(), 'display_name': user.display_name, 'contact_uri': user.contact_uri, 'roles': [role.role_type for role", "as firebase_auth from dbcls.models import User parser = reqparse.RequestParser() parser.add_argument('token',", "parser = reqparse.RequestParser() parser.add_argument('token', type=str, required=True, nullable=False) class Authenticate(Resource): def", "return {'message': 'user not found. You have to sign up.'},", "{'message': f'{e}'}, 400 firebase_uid = decoded_token['uid'] user = User.query.filter_by(firebase_uid=firebase_uid).first() if", "firebase_uid = decoded_token['uid'] user = User.query.filter_by(firebase_uid=firebase_uid).first() if not user: return", "def post(self): try: args = parser.parse_args() decoded_token = firebase_auth.verify_id_token(args['token']) except", "'user not found. You have to sign up.'}, 400 custom_token", "have to sign up.'}, 400 custom_token = firebase_auth.create_custom_token(firebase_uid) return {", "firebase_auth.verify_id_token(args['token']) except (ValueError, firebase_auth.AuthError) as e: return {'message': f'{e}'}, 400", "You have to sign up.'}, 400 custom_token = firebase_auth.create_custom_token(firebase_uid) return", "try: args = parser.parse_args() decoded_token = firebase_auth.verify_id_token(args['token']) except (ValueError, firebase_auth.AuthError)", "import Resource, reqparse from firebase_admin import auth as firebase_auth from", "decoded_token['uid'] user = User.query.filter_by(firebase_uid=firebase_uid).first() if not user: return {'message': 'user", "User.query.filter_by(firebase_uid=firebase_uid).first() if not user: return {'message': 'user not found. You", "firebase_admin import auth as firebase_auth from dbcls.models import User parser", "not user: return {'message': 'user not found. You have to", "args = parser.parse_args() decoded_token = firebase_auth.verify_id_token(args['token']) except (ValueError, firebase_auth.AuthError) as", "firebase_auth.create_custom_token(firebase_uid) return { 'custom_token': custom_token.decode(), 'display_name': user.display_name, 'contact_uri': user.contact_uri, 'roles':", "400 custom_token = firebase_auth.create_custom_token(firebase_uid) return { 'custom_token': custom_token.decode(), 'display_name': user.display_name,", "f'{e}'}, 400 firebase_uid = decoded_token['uid'] user = User.query.filter_by(firebase_uid=firebase_uid).first() if not", "dbcls.models import User parser = reqparse.RequestParser() parser.add_argument('token', type=str, required=True, nullable=False)", "(ValueError, firebase_auth.AuthError) as e: return {'message': f'{e}'}, 400 firebase_uid =", "return { 'custom_token': custom_token.decode(), 'display_name': user.display_name, 'contact_uri': user.contact_uri, 'roles': [role.role_type", "user: return {'message': 'user not found. You have to sign", "firebase_auth from dbcls.models import User parser = reqparse.RequestParser() parser.add_argument('token', type=str,", "not found. You have to sign up.'}, 400 custom_token =", "found. You have to sign up.'}, 400 custom_token = firebase_auth.create_custom_token(firebase_uid)" ]
[ "required input InputString = \"kobe is a basketball player\" headers", "\"kobe is a basketball player\" headers = { 'Content-type': 'application/json',", "headers = { 'Content-type': 'application/json', } data = '{\"text\":InputString =", "{ 'Content-type': 'application/json', } data = '{\"text\":InputString = '+ InputString", "InputString + '}' response = requests.post('http://66.76.242.198:9888/', data=data).json() #Adding a test", "a test comment to check if the automatic git pull", "for the required input InputString = \"kobe is a basketball", "data = '{\"text\":InputString = '+ InputString + '}' response =", "if the automatic git pull is working or not #print(json.dumps(response,", "import json # Get json results for the required input", "automatic git pull is working or not #print(json.dumps(response, indent=4, sort_keys=True))", "+ '}' response = requests.post('http://66.76.242.198:9888/', data=data).json() #Adding a test comment", "'application/json', } data = '{\"text\":InputString = '+ InputString + '}'", "#Adding a test comment to check if the automatic git", "comment to check if the automatic git pull is working", "} data = '{\"text\":InputString = '+ InputString + '}' response", "check if the automatic git pull is working or not", "results for the required input InputString = \"kobe is a", "import requests import json # Get json results for the", "is a basketball player\" headers = { 'Content-type': 'application/json', }", "= { 'Content-type': 'application/json', } data = '{\"text\":InputString = '+", "'}' response = requests.post('http://66.76.242.198:9888/', data=data).json() #Adding a test comment to", "data=data).json() #Adding a test comment to check if the automatic", "json # Get json results for the required input InputString", "basketball player\" headers = { 'Content-type': 'application/json', } data =", "= '{\"text\":InputString = '+ InputString + '}' response = requests.post('http://66.76.242.198:9888/',", "InputString = \"kobe is a basketball player\" headers = {", "modules import requests import json # Get json results for", "required modules import requests import json # Get json results", "'+ InputString + '}' response = requests.post('http://66.76.242.198:9888/', data=data).json() #Adding a", "the automatic git pull is working or not #print(json.dumps(response, indent=4,", "#Import required modules import requests import json # Get json", "test comment to check if the automatic git pull is", "# Get json results for the required input InputString =", "= \"kobe is a basketball player\" headers = { 'Content-type':", "Get json results for the required input InputString = \"kobe", "a basketball player\" headers = { 'Content-type': 'application/json', } data", "'{\"text\":InputString = '+ InputString + '}' response = requests.post('http://66.76.242.198:9888/', data=data).json()", "= requests.post('http://66.76.242.198:9888/', data=data).json() #Adding a test comment to check if", "= '+ InputString + '}' response = requests.post('http://66.76.242.198:9888/', data=data).json() #Adding", "player\" headers = { 'Content-type': 'application/json', } data = '{\"text\":InputString", "the required input InputString = \"kobe is a basketball player\"", "requests import json # Get json results for the required", "json results for the required input InputString = \"kobe is", "to check if the automatic git pull is working or", "<reponame>Feiyi-Ding/2021A #Import required modules import requests import json # Get", "response = requests.post('http://66.76.242.198:9888/', data=data).json() #Adding a test comment to check", "'Content-type': 'application/json', } data = '{\"text\":InputString = '+ InputString +", "requests.post('http://66.76.242.198:9888/', data=data).json() #Adding a test comment to check if the", "input InputString = \"kobe is a basketball player\" headers =" ]
[ "None, \"Path to output the processed dataset.\") FLAGS = flags.FLAGS", "x in data.split(\"\\n\") if len(x.split()) > 3]) logging.info(\"length = %d\",", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "raw wikitext103 train corpus.\") flags.DEFINE_string(\"output_path\", None, \"Path to output the", "FLAGS = flags.FLAGS def main(_): with open(FLAGS.wiki103_raw, \"r\") as f:", "@-@ \", \"-\").replace(\" ,\", \",\") data = data.replace(\" \\'\", \"\\'\").replace(\"", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "2.0 (the \"License\"); # you may not use this file", "agreed to in writing, software # distributed under the License", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Unless required by applicable law or agreed to in writing,", "License. # Lint as: python3 \"\"\"Sentencize the raw wikitext103.\"\"\" import", "f: data = f.read().strip().split(\"\\n\") data = [x.split(\" . \") for", "<filename>language/bert_extraction/steal_bert_classifier/utils/wiki103_sentencize.py<gh_stars>1000+ # coding=utf-8 # Copyright 2018 The Google AI Language", "\",\") data = data.replace(\" \\'\", \"\\'\").replace(\" )\", \")\").replace(\"( \", \"(\")", "distributed under the License is distributed on an \"AS IS\"", "@.@ \", \".\").replace(\" @-@ \", \"-\").replace(\" ,\", \",\") data =", "the raw wikitext103.\"\"\" import tensorflow.compat.v1 as tf app = tf.app", "x.strip()[0] != \"=\"] sentences = [] for para in data:", "the specific language governing permissions and # limitations under the", "[] for para in data: for sent in para: sentences.append(sent", "# limitations under the License. # Lint as: python3 \"\"\"Sentencize", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "open(FLAGS.output_path, \"w\") as f: f.write(data) if __name__ == \"__main__\": app.run(main)", "= tf.flags gfile = tf.gfile logging = tf.logging flags.DEFINE_string(\"wiki103_raw\", None,", "data = \"\\n\".join([x for x in data.split(\"\\n\") if len(x.split()) >", "if len(x.split()) > 3]) logging.info(\"length = %d\", len(data.split(\"\\n\"))) with open(FLAGS.output_path,", "x in data if x.strip() and x.strip()[0] != \"=\"] sentences", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "main(_): with open(FLAGS.wiki103_raw, \"r\") as f: data = f.read().strip().split(\"\\n\") data", "except in compliance with the License. # You may obtain", "train corpus.\") flags.DEFINE_string(\"output_path\", None, \"Path to output the processed dataset.\")", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "in data: for sent in para: sentences.append(sent + \".\") data", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "open(FLAGS.wiki103_raw, \"r\") as f: data = f.read().strip().split(\"\\n\") data = [x.split(\"", "with open(FLAGS.output_path, \"w\") as f: f.write(data) if __name__ == \"__main__\":", "# Copyright 2018 The Google AI Language Team Authors. #", "writing, software # distributed under the License is distributed on", "limitations under the License. # Lint as: python3 \"\"\"Sentencize the", "data if x.strip() and x.strip()[0] != \"=\"] sentences = []", "in writing, software # distributed under the License is distributed", "you may not use this file except in compliance with", "for sent in para: sentences.append(sent + \".\") data = \"\\n\".join(sentences)", "\".\") data = \"\\n\".join(sentences) data = data.replace(\" @.@ \", \".\").replace(\"", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "= data.replace(\" ;\", \";\") data = \"\\n\".join([x for x in", "len(x.split()) > 3]) logging.info(\"length = %d\", len(data.split(\"\\n\"))) with open(FLAGS.output_path, \"w\")", "\"r\") as f: data = f.read().strip().split(\"\\n\") data = [x.split(\" .", "%d\", len(data.split(\"\\n\"))) with open(FLAGS.output_path, \"w\") as f: f.write(data) if __name__", "tf.logging flags.DEFINE_string(\"wiki103_raw\", None, \"Path to raw wikitext103 train corpus.\") flags.DEFINE_string(\"output_path\",", ". \") for x in data if x.strip() and x.strip()[0]", ")\", \")\").replace(\"( \", \"(\") data = data.replace(\" ;\", \";\") data", "2018 The Google AI Language Team Authors. # # Licensed", "data = f.read().strip().split(\"\\n\") data = [x.split(\" . \") for x", "use this file except in compliance with the License. #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "flags.FLAGS def main(_): with open(FLAGS.wiki103_raw, \"r\") as f: data =", "in data if x.strip() and x.strip()[0] != \"=\"] sentences =", "para in data: for sent in para: sentences.append(sent + \".\")", "tf.gfile logging = tf.logging flags.DEFINE_string(\"wiki103_raw\", None, \"Path to raw wikitext103", "to raw wikitext103 train corpus.\") flags.DEFINE_string(\"output_path\", None, \"Path to output", "[x.split(\" . \") for x in data if x.strip() and", "governing permissions and # limitations under the License. # Lint", "Authors. # # Licensed under the Apache License, Version 2.0", "under the License. # Lint as: python3 \"\"\"Sentencize the raw", "data = data.replace(\" @.@ \", \".\").replace(\" @-@ \", \"-\").replace(\" ,\",", "= flags.FLAGS def main(_): with open(FLAGS.wiki103_raw, \"r\") as f: data", "\", \".\").replace(\" @-@ \", \"-\").replace(\" ,\", \",\") data = data.replace(\"", "CONDITIONS OF ANY KIND, either express or implied. # See", "data.replace(\" ;\", \";\") data = \"\\n\".join([x for x in data.split(\"\\n\")", "tf.app flags = tf.flags gfile = tf.gfile logging = tf.logging", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "wikitext103.\"\"\" import tensorflow.compat.v1 as tf app = tf.app flags =", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "\"Path to output the processed dataset.\") FLAGS = flags.FLAGS def", "data.replace(\" @.@ \", \".\").replace(\" @-@ \", \"-\").replace(\" ,\", \",\") data", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "f.read().strip().split(\"\\n\") data = [x.split(\" . \") for x in data", "import tensorflow.compat.v1 as tf app = tf.app flags = tf.flags", "# coding=utf-8 # Copyright 2018 The Google AI Language Team", "# You may obtain a copy of the License at", "flags.DEFINE_string(\"output_path\", None, \"Path to output the processed dataset.\") FLAGS =", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "logging.info(\"length = %d\", len(data.split(\"\\n\"))) with open(FLAGS.output_path, \"w\") as f: f.write(data)", "coding=utf-8 # Copyright 2018 The Google AI Language Team Authors.", "+ \".\") data = \"\\n\".join(sentences) data = data.replace(\" @.@ \",", "under the License is distributed on an \"AS IS\" BASIS,", "x.strip() and x.strip()[0] != \"=\"] sentences = [] for para", "Google AI Language Team Authors. # # Licensed under the", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "License for the specific language governing permissions and # limitations", "the License. # Lint as: python3 \"\"\"Sentencize the raw wikitext103.\"\"\"", "= \"\\n\".join(sentences) data = data.replace(\" @.@ \", \".\").replace(\" @-@ \",", "= \"\\n\".join([x for x in data.split(\"\\n\") if len(x.split()) > 3])", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "tensorflow.compat.v1 as tf app = tf.app flags = tf.flags gfile", "\"=\"] sentences = [] for para in data: for sent", "\"(\") data = data.replace(\" ;\", \";\") data = \"\\n\".join([x for", "in para: sentences.append(sent + \".\") data = \"\\n\".join(sentences) data =", "\";\") data = \"\\n\".join([x for x in data.split(\"\\n\") if len(x.split())", "output the processed dataset.\") FLAGS = flags.FLAGS def main(_): with", "para: sentences.append(sent + \".\") data = \"\\n\".join(sentences) data = data.replace(\"", ",\", \",\") data = data.replace(\" \\'\", \"\\'\").replace(\" )\", \")\").replace(\"( \",", "gfile = tf.gfile logging = tf.logging flags.DEFINE_string(\"wiki103_raw\", None, \"Path to", "sentences = [] for para in data: for sent in", "= [x.split(\" . \") for x in data if x.strip()", "the License for the specific language governing permissions and #", "processed dataset.\") FLAGS = flags.FLAGS def main(_): with open(FLAGS.wiki103_raw, \"r\")", "= [] for para in data: for sent in para:", "(the \"License\"); # you may not use this file except", "data.replace(\" \\'\", \"\\'\").replace(\" )\", \")\").replace(\"( \", \"(\") data = data.replace(\"", "Apache License, Version 2.0 (the \"License\"); # you may not", "# you may not use this file except in compliance", "\"-\").replace(\" ,\", \",\") data = data.replace(\" \\'\", \"\\'\").replace(\" )\", \")\").replace(\"(", "def main(_): with open(FLAGS.wiki103_raw, \"r\") as f: data = f.read().strip().split(\"\\n\")", "either express or implied. # See the License for the", "data.split(\"\\n\") if len(x.split()) > 3]) logging.info(\"length = %d\", len(data.split(\"\\n\"))) with", "and x.strip()[0] != \"=\"] sentences = [] for para in", "OR CONDITIONS OF ANY KIND, either express or implied. #", "if x.strip() and x.strip()[0] != \"=\"] sentences = [] for", "# Lint as: python3 \"\"\"Sentencize the raw wikitext103.\"\"\" import tensorflow.compat.v1", "for para in data: for sent in para: sentences.append(sent +", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "to output the processed dataset.\") FLAGS = flags.FLAGS def main(_):", "the License is distributed on an \"AS IS\" BASIS, #", "Language Team Authors. # # Licensed under the Apache License,", "\"\"\"Sentencize the raw wikitext103.\"\"\" import tensorflow.compat.v1 as tf app =", "= %d\", len(data.split(\"\\n\"))) with open(FLAGS.output_path, \"w\") as f: f.write(data) if", "in compliance with the License. # You may obtain a", "\"\\'\").replace(\" )\", \")\").replace(\"( \", \"(\") data = data.replace(\" ;\", \";\")", "software # distributed under the License is distributed on an", "3]) logging.info(\"length = %d\", len(data.split(\"\\n\"))) with open(FLAGS.output_path, \"w\") as f:", "flags = tf.flags gfile = tf.gfile logging = tf.logging flags.DEFINE_string(\"wiki103_raw\",", "len(data.split(\"\\n\"))) with open(FLAGS.output_path, \"w\") as f: f.write(data) if __name__ ==", ";\", \";\") data = \"\\n\".join([x for x in data.split(\"\\n\") if", "corpus.\") flags.DEFINE_string(\"output_path\", None, \"Path to output the processed dataset.\") FLAGS", "The Google AI Language Team Authors. # # Licensed under", "as: python3 \"\"\"Sentencize the raw wikitext103.\"\"\" import tensorflow.compat.v1 as tf", "# # Unless required by applicable law or agreed to", "sent in para: sentences.append(sent + \".\") data = \"\\n\".join(sentences) data", "None, \"Path to raw wikitext103 train corpus.\") flags.DEFINE_string(\"output_path\", None, \"Path", "AI Language Team Authors. # # Licensed under the Apache", "\"Path to raw wikitext103 train corpus.\") flags.DEFINE_string(\"output_path\", None, \"Path to", "tf.flags gfile = tf.gfile logging = tf.logging flags.DEFINE_string(\"wiki103_raw\", None, \"Path", "sentences.append(sent + \".\") data = \"\\n\".join(sentences) data = data.replace(\" @.@", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "logging = tf.logging flags.DEFINE_string(\"wiki103_raw\", None, \"Path to raw wikitext103 train", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "= tf.gfile logging = tf.logging flags.DEFINE_string(\"wiki103_raw\", None, \"Path to raw", "Version 2.0 (the \"License\"); # you may not use this", "python3 \"\"\"Sentencize the raw wikitext103.\"\"\" import tensorflow.compat.v1 as tf app", "law or agreed to in writing, software # distributed under", "for x in data if x.strip() and x.strip()[0] != \"=\"]", "the processed dataset.\") FLAGS = flags.FLAGS def main(_): with open(FLAGS.wiki103_raw,", "permissions and # limitations under the License. # Lint as:", "raw wikitext103.\"\"\" import tensorflow.compat.v1 as tf app = tf.app flags", "\"\\n\".join([x for x in data.split(\"\\n\") if len(x.split()) > 3]) logging.info(\"length", "implied. # See the License for the specific language governing", "app = tf.app flags = tf.flags gfile = tf.gfile logging", "\", \"-\").replace(\" ,\", \",\") data = data.replace(\" \\'\", \"\\'\").replace(\" )\",", "\"\\n\".join(sentences) data = data.replace(\" @.@ \", \".\").replace(\" @-@ \", \"-\").replace(\"", "under the Apache License, Version 2.0 (the \"License\"); # you", "\".\").replace(\" @-@ \", \"-\").replace(\" ,\", \",\") data = data.replace(\" \\'\",", "\"License\"); # you may not use this file except in", "in data.split(\"\\n\") if len(x.split()) > 3]) logging.info(\"length = %d\", len(data.split(\"\\n\")))", "and # limitations under the License. # Lint as: python3", "= f.read().strip().split(\"\\n\") data = [x.split(\" . \") for x in", "= data.replace(\" @.@ \", \".\").replace(\" @-@ \", \"-\").replace(\" ,\", \",\")", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "data = \"\\n\".join(sentences) data = data.replace(\" @.@ \", \".\").replace(\" @-@", "\", \"(\") data = data.replace(\" ;\", \";\") data = \"\\n\".join([x", "for x in data.split(\"\\n\") if len(x.split()) > 3]) logging.info(\"length =", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "with open(FLAGS.wiki103_raw, \"r\") as f: data = f.read().strip().split(\"\\n\") data =", "data = [x.split(\" . \") for x in data if", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "\") for x in data if x.strip() and x.strip()[0] !=", "= data.replace(\" \\'\", \"\\'\").replace(\" )\", \")\").replace(\"( \", \"(\") data =", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "= tf.logging flags.DEFINE_string(\"wiki103_raw\", None, \"Path to raw wikitext103 train corpus.\")", "Team Authors. # # Licensed under the Apache License, Version", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "to in writing, software # distributed under the License is", "\\'\", \"\\'\").replace(\" )\", \")\").replace(\"( \", \"(\") data = data.replace(\" ;\",", "flags.DEFINE_string(\"wiki103_raw\", None, \"Path to raw wikitext103 train corpus.\") flags.DEFINE_string(\"output_path\", None,", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "Copyright 2018 The Google AI Language Team Authors. # #", "as f: data = f.read().strip().split(\"\\n\") data = [x.split(\" . \")", "\")\").replace(\"( \", \"(\") data = data.replace(\" ;\", \";\") data =", "You may obtain a copy of the License at #", "language governing permissions and # limitations under the License. #", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "data: for sent in para: sentences.append(sent + \".\") data =", "required by applicable law or agreed to in writing, software", "data = data.replace(\" \\'\", \"\\'\").replace(\" )\", \")\").replace(\"( \", \"(\") data", "wikitext103 train corpus.\") flags.DEFINE_string(\"output_path\", None, \"Path to output the processed", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "Lint as: python3 \"\"\"Sentencize the raw wikitext103.\"\"\" import tensorflow.compat.v1 as", "as tf app = tf.app flags = tf.flags gfile =", "tf app = tf.app flags = tf.flags gfile = tf.gfile", "dataset.\") FLAGS = flags.FLAGS def main(_): with open(FLAGS.wiki103_raw, \"r\") as", "with the License. # You may obtain a copy of", "this file except in compliance with the License. # You", "> 3]) logging.info(\"length = %d\", len(data.split(\"\\n\"))) with open(FLAGS.output_path, \"w\") as", "the Apache License, Version 2.0 (the \"License\"); # you may", "data = data.replace(\" ;\", \";\") data = \"\\n\".join([x for x", "!= \"=\"] sentences = [] for para in data: for", "= tf.app flags = tf.flags gfile = tf.gfile logging =" ]
[]
[ "7419530 DARK_GOLD = 12745742 DARK_ORANGE = 11027200 DARK_RED = 10038562", "and (role in after.roles) def isExplorer(ctx): return hasRole(ctx.author, roles[\"explorer\"]) def", "return hasRole(ctx.author, roles[\"network\"]) def isLeader(ctx): return hasRole(ctx.author, roles[\"leader\"]) def isAdmin(ctx):", "roleID): role = member.guild.get_role(roleID) return role in member.roles def gainedRole(before,", "return role in member.roles def gainedRole(before, after, roleID): role =", "= 2123412 DARK_PURPLE = 7419530 DARK_GOLD = 12745742 DARK_ORANGE =", "10181046 GOLD = 15844367 ORANGE = 15105570 RED = 15158332", "isLeader(ctx): return hasRole(ctx.author, roles[\"leader\"]) def isAdmin(ctx): return hasRole(ctx.author, roles[\"admin\"]) def", "DARK_PURPLE = 7419530 DARK_GOLD = 12745742 DARK_ORANGE = 11027200 DARK_RED", "import roles def hasRole(member, roleID): role = member.guild.get_role(roleID) return role", "DARK_ORANGE = 11027200 DARK_RED = 10038562 DARK_GREY = 9936031 LIGHT_GREY", "= 9936031 LIGHT_GREY = 12370112 DARK_NAVY = 2899536 LUMINOUS_VIVID_PINK =", "hasRole(ctx.author, roles[\"network\"]) def isLeader(ctx): return hasRole(ctx.author, roles[\"leader\"]) def isAdmin(ctx): return", "= 7419530 DARK_GOLD = 12745742 DARK_ORANGE = 11027200 DARK_RED =", "return hasRole(ctx.author, roles[\"bot\"]) class Colours: DEFAULT = 0 AQUA =", "return (role not in before.roles) and (role in after.roles) def", "1752220 GREEN = 3066993 BLUE = 3447003 PURPLE = 10181046", "isExplorer(ctx): return hasRole(ctx.author, roles[\"explorer\"]) def isNetwork(ctx): return hasRole(ctx.author, roles[\"network\"]) def", "= 2067276 DARK_BLUE = 2123412 DARK_PURPLE = 7419530 DARK_GOLD =", "def isNetwork(ctx): return hasRole(ctx.author, roles[\"network\"]) def isLeader(ctx): return hasRole(ctx.author, roles[\"leader\"])", "GREEN = 3066993 BLUE = 3447003 PURPLE = 10181046 GOLD", "DARKER_GREY = 8359053 NAVY = 3426654 DARK_AQUA = 1146986 DARK_GREEN", "member.guild.get_role(roleID) return role in member.roles def gainedRole(before, after, roleID): role", "hasRole(ctx.author, roles[\"leader\"]) def isAdmin(ctx): return hasRole(ctx.author, roles[\"admin\"]) def isBot(ctx): return", "hasRole(member, roleID): role = member.guild.get_role(roleID) return role in member.roles def", "isNetwork(ctx): return hasRole(ctx.author, roles[\"network\"]) def isLeader(ctx): return hasRole(ctx.author, roles[\"leader\"]) def", "return hasRole(ctx.author, roles[\"leader\"]) def isAdmin(ctx): return hasRole(ctx.author, roles[\"admin\"]) def isBot(ctx):", "roles def hasRole(member, roleID): role = member.guild.get_role(roleID) return role in", "role = before.guild.get_role(roleID) return (role not in before.roles) and (role", "10038562 DARK_GREY = 9936031 LIGHT_GREY = 12370112 DARK_NAVY = 2899536", "def gainedRole(before, after, roleID): role = before.guild.get_role(roleID) return (role not", "hasRole(ctx.author, roles[\"explorer\"]) def isNetwork(ctx): return hasRole(ctx.author, roles[\"network\"]) def isLeader(ctx): return", "roles[\"bot\"]) class Colours: DEFAULT = 0 AQUA = 1752220 GREEN", "12745742 DARK_ORANGE = 11027200 DARK_RED = 10038562 DARK_GREY = 9936031", "DARK_BLUE = 2123412 DARK_PURPLE = 7419530 DARK_GOLD = 12745742 DARK_ORANGE", "role = member.guild.get_role(roleID) return role in member.roles def gainedRole(before, after,", "roleID): role = before.guild.get_role(roleID) return (role not in before.roles) and", "(role in after.roles) def isExplorer(ctx): return hasRole(ctx.author, roles[\"explorer\"]) def isNetwork(ctx):", "roles[\"network\"]) def isLeader(ctx): return hasRole(ctx.author, roles[\"leader\"]) def isAdmin(ctx): return hasRole(ctx.author,", "DARK_GREEN = 2067276 DARK_BLUE = 2123412 DARK_PURPLE = 7419530 DARK_GOLD", "in after.roles) def isExplorer(ctx): return hasRole(ctx.author, roles[\"explorer\"]) def isNetwork(ctx): return", "RED = 15158332 GREY = 9807270 DARKER_GREY = 8359053 NAVY", "return hasRole(ctx.author, roles[\"explorer\"]) def isNetwork(ctx): return hasRole(ctx.author, roles[\"network\"]) def isLeader(ctx):", "BLUE = 3447003 PURPLE = 10181046 GOLD = 15844367 ORANGE", "in before.roles) and (role in after.roles) def isExplorer(ctx): return hasRole(ctx.author,", "= 9807270 DARKER_GREY = 8359053 NAVY = 3426654 DARK_AQUA =", "15844367 ORANGE = 15105570 RED = 15158332 GREY = 9807270", "= 12370112 DARK_NAVY = 2899536 LUMINOUS_VIVID_PINK = 16580705 DARK_VIVID_PINK =", "after.roles) def isExplorer(ctx): return hasRole(ctx.author, roles[\"explorer\"]) def isNetwork(ctx): return hasRole(ctx.author,", "return hasRole(ctx.author, roles[\"admin\"]) def isBot(ctx): return hasRole(ctx.author, roles[\"bot\"]) class Colours:", "2067276 DARK_BLUE = 2123412 DARK_PURPLE = 7419530 DARK_GOLD = 12745742", "= 1752220 GREEN = 3066993 BLUE = 3447003 PURPLE =", "DARK_RED = 10038562 DARK_GREY = 9936031 LIGHT_GREY = 12370112 DARK_NAVY", "hasRole(ctx.author, roles[\"bot\"]) class Colours: DEFAULT = 0 AQUA = 1752220", "from server import roles def hasRole(member, roleID): role = member.guild.get_role(roleID)", "roles[\"leader\"]) def isAdmin(ctx): return hasRole(ctx.author, roles[\"admin\"]) def isBot(ctx): return hasRole(ctx.author,", "= member.guild.get_role(roleID) return role in member.roles def gainedRole(before, after, roleID):", "role in member.roles def gainedRole(before, after, roleID): role = before.guild.get_role(roleID)", "server import roles def hasRole(member, roleID): role = member.guild.get_role(roleID) return", "def isLeader(ctx): return hasRole(ctx.author, roles[\"leader\"]) def isAdmin(ctx): return hasRole(ctx.author, roles[\"admin\"])", "3066993 BLUE = 3447003 PURPLE = 10181046 GOLD = 15844367", "12370112 DARK_NAVY = 2899536 LUMINOUS_VIVID_PINK = 16580705 DARK_VIVID_PINK = 12320855", "before.guild.get_role(roleID) return (role not in before.roles) and (role in after.roles)", "1146986 DARK_GREEN = 2067276 DARK_BLUE = 2123412 DARK_PURPLE = 7419530", "AQUA = 1752220 GREEN = 3066993 BLUE = 3447003 PURPLE", "= 12745742 DARK_ORANGE = 11027200 DARK_RED = 10038562 DARK_GREY =", "member.roles def gainedRole(before, after, roleID): role = before.guild.get_role(roleID) return (role", "= 15105570 RED = 15158332 GREY = 9807270 DARKER_GREY =", "ORANGE = 15105570 RED = 15158332 GREY = 9807270 DARKER_GREY", "15105570 RED = 15158332 GREY = 9807270 DARKER_GREY = 8359053", "8359053 NAVY = 3426654 DARK_AQUA = 1146986 DARK_GREEN = 2067276", "def isBot(ctx): return hasRole(ctx.author, roles[\"bot\"]) class Colours: DEFAULT = 0", "DARK_GREY = 9936031 LIGHT_GREY = 12370112 DARK_NAVY = 2899536 LUMINOUS_VIVID_PINK", "= 15158332 GREY = 9807270 DARKER_GREY = 8359053 NAVY =", "DARK_AQUA = 1146986 DARK_GREEN = 2067276 DARK_BLUE = 2123412 DARK_PURPLE", "LIGHT_GREY = 12370112 DARK_NAVY = 2899536 LUMINOUS_VIVID_PINK = 16580705 DARK_VIVID_PINK", "GREY = 9807270 DARKER_GREY = 8359053 NAVY = 3426654 DARK_AQUA", "0 AQUA = 1752220 GREEN = 3066993 BLUE = 3447003", "isAdmin(ctx): return hasRole(ctx.author, roles[\"admin\"]) def isBot(ctx): return hasRole(ctx.author, roles[\"bot\"]) class", "= 0 AQUA = 1752220 GREEN = 3066993 BLUE =", "= before.guild.get_role(roleID) return (role not in before.roles) and (role in", "def isAdmin(ctx): return hasRole(ctx.author, roles[\"admin\"]) def isBot(ctx): return hasRole(ctx.author, roles[\"bot\"])", "Colours: DEFAULT = 0 AQUA = 1752220 GREEN = 3066993", "9936031 LIGHT_GREY = 12370112 DARK_NAVY = 2899536 LUMINOUS_VIVID_PINK = 16580705", "isBot(ctx): return hasRole(ctx.author, roles[\"bot\"]) class Colours: DEFAULT = 0 AQUA", "= 11027200 DARK_RED = 10038562 DARK_GREY = 9936031 LIGHT_GREY =", "after, roleID): role = before.guild.get_role(roleID) return (role not in before.roles)", "= 1146986 DARK_GREEN = 2067276 DARK_BLUE = 2123412 DARK_PURPLE =", "9807270 DARKER_GREY = 8359053 NAVY = 3426654 DARK_AQUA = 1146986", "NAVY = 3426654 DARK_AQUA = 1146986 DARK_GREEN = 2067276 DARK_BLUE", "not in before.roles) and (role in after.roles) def isExplorer(ctx): return", "def isExplorer(ctx): return hasRole(ctx.author, roles[\"explorer\"]) def isNetwork(ctx): return hasRole(ctx.author, roles[\"network\"])", "= 3066993 BLUE = 3447003 PURPLE = 10181046 GOLD =", "3447003 PURPLE = 10181046 GOLD = 15844367 ORANGE = 15105570", "DARK_GOLD = 12745742 DARK_ORANGE = 11027200 DARK_RED = 10038562 DARK_GREY", "in member.roles def gainedRole(before, after, roleID): role = before.guild.get_role(roleID) return", "DEFAULT = 0 AQUA = 1752220 GREEN = 3066993 BLUE", "= 15844367 ORANGE = 15105570 RED = 15158332 GREY =", "def hasRole(member, roleID): role = member.guild.get_role(roleID) return role in member.roles", "(role not in before.roles) and (role in after.roles) def isExplorer(ctx):", "= 10038562 DARK_GREY = 9936031 LIGHT_GREY = 12370112 DARK_NAVY =", "roles[\"explorer\"]) def isNetwork(ctx): return hasRole(ctx.author, roles[\"network\"]) def isLeader(ctx): return hasRole(ctx.author,", "= 3426654 DARK_AQUA = 1146986 DARK_GREEN = 2067276 DARK_BLUE =", "11027200 DARK_RED = 10038562 DARK_GREY = 9936031 LIGHT_GREY = 12370112", "= 10181046 GOLD = 15844367 ORANGE = 15105570 RED =", "= 8359053 NAVY = 3426654 DARK_AQUA = 1146986 DARK_GREEN =", "15158332 GREY = 9807270 DARKER_GREY = 8359053 NAVY = 3426654", "PURPLE = 10181046 GOLD = 15844367 ORANGE = 15105570 RED", "class Colours: DEFAULT = 0 AQUA = 1752220 GREEN =", "hasRole(ctx.author, roles[\"admin\"]) def isBot(ctx): return hasRole(ctx.author, roles[\"bot\"]) class Colours: DEFAULT", "3426654 DARK_AQUA = 1146986 DARK_GREEN = 2067276 DARK_BLUE = 2123412", "before.roles) and (role in after.roles) def isExplorer(ctx): return hasRole(ctx.author, roles[\"explorer\"])", "= 3447003 PURPLE = 10181046 GOLD = 15844367 ORANGE =", "GOLD = 15844367 ORANGE = 15105570 RED = 15158332 GREY", "2123412 DARK_PURPLE = 7419530 DARK_GOLD = 12745742 DARK_ORANGE = 11027200", "roles[\"admin\"]) def isBot(ctx): return hasRole(ctx.author, roles[\"bot\"]) class Colours: DEFAULT =", "gainedRole(before, after, roleID): role = before.guild.get_role(roleID) return (role not in" ]
[ "Writes Neo4jCsvSerializable into CSV files. There are multiple CSV files", "def init(self, conf): # type: (ConfigTree) -> None \"\"\" Initializing", "from databuilder.utils.closer import Closer LOGGER = logging.getLogger(__name__) class FsNeo4jCSVLoader(Loader): \"\"\"", "\\ conf.get_string(FsNeo4jCSVLoader.RELATION_DIR_PATH) self._delete_created_dir = \\ conf.get_bool(FsNeo4jCSVLoader.SHOULD_DELETE_CREATED_DIR) self._force_create_dir = conf.get_bool(FsNeo4jCSVLoader.FORCE_CREATE_DIR) self._create_directory(self._node_dir)", "writes. This is because there're not only node and relationship,", "False }) def __init__(self): # type: () -> None self._node_file_mapping", "= 'node_dir_path' RELATION_DIR_PATH = 'relationship_dir_path' FORCE_CREATE_DIR = 'force_create_directory' SHOULD_DELETE_CREATED_DIR =", "writer does not exist, it's creates a csv writer and", "a csv writer and update the mapping. :param csv_record_dict: :param", "# type: (ConfigTree) -> None \"\"\" Initializing FsNeo4jCsvLoader by creating", "logging.getLogger(__name__) class FsNeo4jCSVLoader(Loader): \"\"\" Write node and relationship CSV file(s)", "def file_out_close(): # type: () -> None LOGGER.info('Closing file IO", "file_suffix # type: str ): # type: (...) -> DictWriter", "from databuilder.models.neo4j_csv_serde import Neo4jCsvSerializable # noqa: F401 from databuilder.utils.closer import", "Neo4jCsvSerializable \"\"\" # Config keys NODE_DIR_PATH = 'node_dir_path' RELATION_DIR_PATH =", "and relationship CSV file(s) that can be consumed by Neo4jCsvPublisher.", "and relations: 1. retrieve csv row (a dict where keys", "open('{}/{}.csv'.format(dir_path, file_suffix), 'w') def file_out_close(): # type: () -> None", "= {} # type: Dict[Any, DictWriter] self._closer = Closer() def", "key2[2]) relation_writer = self._get_writer(relation_dict, self._relation_file_mapping, key2, self._relation_dir, file_suffix) relation_writer.writerow(relation_dict) relation_dict", "writer = file_mapping.get(key) if writer: return writer LOGGER.info('Creating file for", ":return: \"\"\" self._closer.close() def get_scope(self): # type: () -> str", "databuilder.utils.closer import Closer LOGGER = logging.getLogger(__name__) class FsNeo4jCSVLoader(Loader): \"\"\" Write", "csv_serializable.next_node() while node_dict: key = (node_dict[NODE_LABEL], len(node_dict)) file_suffix = '{}_{}'.format(*key)", "csv writer and write to it. 3. repeat 1 and", "CSV file(s) that can be consumed by Neo4jCsvPublisher. It assumes", ":param file_suffix: :return: \"\"\" writer = file_mapping.get(key) if writer: return", "# type: Dict[Any, DictWriter] key, # type: Any dir_path, #", "this method writes. This is because there're not only node", "it's creates a csv writer and update the mapping. :param", "writer based on csv record, key. If writer does not", "conf = conf.with_fallback(FsNeo4jCSVLoader._DEFAULT_CONFIG) self._node_dir = conf.get_string(FsNeo4jCSVLoader.NODE_DIR_PATH) self._relation_dir = \\ conf.get_string(FsNeo4jCSVLoader.RELATION_DIR_PATH)", "should be deleted after publish is finished Job.closer.register(_delete_dir) def load(self,", "in configuration should not exist. :param conf: :return: \"\"\" conf", ":return: \"\"\" conf = conf.with_fallback(FsNeo4jCSVLoader._DEFAULT_CONFIG) self._node_dir = conf.get_string(FsNeo4jCSVLoader.NODE_DIR_PATH) self._relation_dir =", "created directory function to Job.closer. :param path: :return: \"\"\" if", "consumed by Neo4jCsvPublisher. It assumes that the record it consumes", "\"\"\" conf = conf.with_fallback(FsNeo4jCSVLoader._DEFAULT_CONFIG) self._node_dir = conf.get_string(FsNeo4jCSVLoader.NODE_DIR_PATH) self._relation_dir = \\", "noqa: F401 from pyhocon import ConfigTree, ConfigFactory # noqa: F401", "_create_directory(self, path): # type: (str) -> None \"\"\" Validate directory", "node_writer = self._get_writer(node_dict, self._node_file_mapping, key, self._node_dir, file_suffix) node_writer.writerow(node_dict) node_dict =", "\"\"\" Any closeable callable registered in _closer, it will close.", "noqa: F401 from typing import Dict, Any # noqa: F401", "# type: str file_suffix # type: str ): # type:", "based on csv record, key. If writer does not exist,", "= 'force_create_directory' SHOULD_DELETE_CREATED_DIR = 'delete_created_directories' _DEFAULT_CONFIG = ConfigFactory.from_dict({ SHOULD_DELETE_CREATED_DIR: True,", "(Neo4jCsvSerializable) -> None \"\"\" Writes Neo4jCsvSerializable into CSV files. There", "-> None \"\"\" Writes Neo4jCsvSerializable into CSV files. There are", "= 'relationship_dir_path' FORCE_CREATE_DIR = 'force_create_directory' SHOULD_DELETE_CREATED_DIR = 'delete_created_directories' _DEFAULT_CONFIG =", "import os import shutil from csv import DictWriter # noqa:", "1. retrieve csv row (a dict where keys represent a", "conf.with_fallback(FsNeo4jCSVLoader._DEFAULT_CONFIG) self._node_dir = conf.get_string(FsNeo4jCSVLoader.NODE_DIR_PATH) self._relation_dir = \\ conf.get_string(FsNeo4jCSVLoader.RELATION_DIR_PATH) self._delete_created_dir =", "files. There are multiple CSV files that this method writes.", "__init__(self): # type: () -> None self._node_file_mapping = {} #", "relationship CSV file(s) that can be consumed by Neo4jCsvPublisher. It", "csv_serializable.next_relation() while relation_dict: key2 = (relation_dict[RELATION_START_LABEL], relation_dict[RELATION_END_LABEL], relation_dict[RELATION_TYPE], len(relation_dict)) file_suffix", "(...) -> DictWriter \"\"\" Finds a writer based on csv", "Dict[Any, DictWriter] key, # type: Any dir_path, # type: str", "and relationship files. Note that the directory defined in configuration", "file_suffix: :return: \"\"\" writer = file_mapping.get(key) if writer: return writer", "= csv_serializable.next_node() while node_dict: key = (node_dict[NODE_LABEL], len(node_dict)) file_suffix =", "Write node and relationship CSV file(s) that can be consumed", "that this method writes. This is because there're not only", "does not exist, creates it, register deletion of created directory", "F401 from databuilder.utils.closer import Closer LOGGER = logging.getLogger(__name__) class FsNeo4jCSVLoader(Loader):", "{}'.format(file_out)) file_out.close() self._closer.register(file_out_close) writer = csv.DictWriter(file_out, fieldnames=csv_record_dict.keys(), quoting=csv.QUOTE_NONNUMERIC) writer.writeheader() file_mapping[key]", "deletion of created directory function to Job.closer. :param path: :return:", "return LOGGER.info('Deleting directory {}'.format(path)) shutil.rmtree(path) # Directory should be deleted", "DictWriter] key, # type: Any dir_path, # type: str file_suffix", "(relation_dict[RELATION_START_LABEL], relation_dict[RELATION_END_LABEL], relation_dict[RELATION_TYPE], len(relation_dict)) file_suffix = '{}_{}_{}'.format(key2[0], key2[1], key2[2]) relation_writer", "it consumes is instance of Neo4jCsvSerializable \"\"\" # Config keys", "'{}_{}'.format(*key) node_writer = self._get_writer(node_dict, self._node_file_mapping, key, self._node_dir, file_suffix) node_writer.writerow(node_dict) node_dict", "os.makedirs(path) def _delete_dir(): # type: () -> None if not", ":return: \"\"\" writer = file_mapping.get(key) if writer: return writer LOGGER.info('Creating", "and write to it. 3. repeat 1 and 2 :param", "# type: Dict[Any, DictWriter] self._closer = Closer() def init(self, conf):", "typing import Dict, Any # noqa: F401 from databuilder.job.base_job import", "= {} # type: Dict[Any, DictWriter] self._relation_file_mapping = {} #", "\\ conf.get_bool(FsNeo4jCSVLoader.SHOULD_DELETE_CREATED_DIR) self._force_create_dir = conf.get_bool(FsNeo4jCSVLoader.FORCE_CREATE_DIR) self._create_directory(self._node_dir) self._create_directory(self._relation_dir) def _create_directory(self, path):", "can also have different nodes, and relationships. Common pattern for", "self._get_writer(node_dict, self._node_file_mapping, key, self._node_dir, file_suffix) node_writer.writerow(node_dict) node_dict = csv_serializable.next_node() relation_dict", ":param path: :return: \"\"\" if os.path.exists(path): if self._force_create_dir: LOGGER.info('Directory exist.", "str ): # type: (...) -> DictWriter \"\"\" Finds a", "type: (str) -> None \"\"\" Validate directory does not exist,", "close(self): # type: () -> None \"\"\" Any closeable callable", "dict where keys represent a header, values represent a row)", "directory {}'.format(path)) shutil.rmtree(path) # Directory should be deleted after publish", "self._node_file_mapping = {} # type: Dict[Any, DictWriter] self._relation_file_mapping = {}", "If writer does not exist, it's creates a csv writer", "Any closeable callable registered in _closer, it will close. :return:", "key, # type: Any dir_path, # type: str file_suffix #", "self._node_file_mapping, key, self._node_dir, file_suffix) node_writer.writerow(node_dict) node_dict = csv_serializable.next_node() relation_dict =", "conf: :return: \"\"\" conf = conf.with_fallback(FsNeo4jCSVLoader._DEFAULT_CONFIG) self._node_dir = conf.get_string(FsNeo4jCSVLoader.NODE_DIR_PATH) self._relation_dir", "creates it, register deletion of created directory function to Job.closer.", "Validate directory does not exist, creates it, register deletion of", "keys represent a header, values represent a row) 2. using", "represent a header, values represent a row) 2. using this", "self._closer.register(file_out_close) writer = csv.DictWriter(file_out, fieldnames=csv_record_dict.keys(), quoting=csv.QUOTE_NONNUMERIC) writer.writeheader() file_mapping[key] = writer", "This is because there're not only node and relationship, but", "LOGGER.info('Creating file for {}'.format(key)) file_out = open('{}/{}.csv'.format(dir_path, file_suffix), 'w') def", "1 and 2 :param csv_serializable: :return: \"\"\" node_dict = csv_serializable.next_node()", "() -> None self._node_file_mapping = {} # type: Dict[Any, DictWriter]", "None \"\"\" Writes Neo4jCsvSerializable into CSV files. There are multiple", "using this dict to get a appropriate csv writer and", "= ConfigFactory.from_dict({ SHOULD_DELETE_CREATED_DIR: True, FORCE_CREATE_DIR: False }) def __init__(self): #", "self._node_dir = conf.get_string(FsNeo4jCSVLoader.NODE_DIR_PATH) self._relation_dir = \\ conf.get_string(FsNeo4jCSVLoader.RELATION_DIR_PATH) self._delete_created_dir = \\", "close. :return: \"\"\" self._closer.close() def get_scope(self): # type: () ->", "this dict to get a appropriate csv writer and write", "pattern for both nodes and relations: 1. retrieve csv row", "type: Dict[Any, DictWriter] self._relation_file_mapping = {} # type: Dict[Any, DictWriter]", "and relationship, but also it can also have different nodes,", "def __init__(self): # type: () -> None self._node_file_mapping = {}", ":param csv_serializable: :return: \"\"\" node_dict = csv_serializable.next_node() while node_dict: key", "update the mapping. :param csv_record_dict: :param file_mapping: :param key: :param", ":param conf: :return: \"\"\" conf = conf.with_fallback(FsNeo4jCSVLoader._DEFAULT_CONFIG) self._node_dir = conf.get_string(FsNeo4jCSVLoader.NODE_DIR_PATH)", "# noqa: F401 from typing import Dict, Any # noqa:", "creating directory for node files and relationship files. Note that", "DictWriter \"\"\" Finds a writer based on csv record, key.", "file_mapping: :param key: :param file_suffix: :return: \"\"\" writer = file_mapping.get(key)", "Directory should be deleted after publish is finished Job.closer.register(_delete_dir) def", "\\ RELATION_START_LABEL, RELATION_END_LABEL, RELATION_TYPE from databuilder.models.neo4j_csv_serde import Neo4jCsvSerializable # noqa:", "RuntimeError('Directory should not exist: {}'.format(path)) os.makedirs(path) def _delete_dir(): # type:", "= '{}_{}_{}'.format(key2[0], key2[1], key2[2]) relation_writer = self._get_writer(relation_dict, self._relation_file_mapping, key2, self._relation_dir,", "writer return writer def close(self): # type: () -> None", "record it consumes is instance of Neo4jCsvSerializable \"\"\" # Config", "instance of Neo4jCsvSerializable \"\"\" # Config keys NODE_DIR_PATH = 'node_dir_path'", "but also it can also have different nodes, and relationships.", "where keys represent a header, values represent a row) 2.", "is finished Job.closer.register(_delete_dir) def load(self, csv_serializable): # type: (Neo4jCsvSerializable) ->", "writer and write to it. 3. repeat 1 and 2", "(str) -> None \"\"\" Validate directory does not exist, creates", "True, FORCE_CREATE_DIR: False }) def __init__(self): # type: () ->", "publish is finished Job.closer.register(_delete_dir) def load(self, csv_serializable): # type: (Neo4jCsvSerializable)", "import shutil from csv import DictWriter # noqa: F401 from", "= \\ conf.get_string(FsNeo4jCSVLoader.RELATION_DIR_PATH) self._delete_created_dir = \\ conf.get_bool(FsNeo4jCSVLoader.SHOULD_DELETE_CREATED_DIR) self._force_create_dir = conf.get_bool(FsNeo4jCSVLoader.FORCE_CREATE_DIR)", "csv record, key. If writer does not exist, it's creates", "relation_dict = csv_serializable.next_relation() def _get_writer(self, csv_record_dict, # type: Dict[str, Any]", "the directory defined in configuration should not exist. :param conf:", "directory does not exist, creates it, register deletion of created", "{}'.format(path)) return LOGGER.info('Deleting directory {}'.format(path)) shutil.rmtree(path) # Directory should be", "}) def __init__(self): # type: () -> None self._node_file_mapping =", "keys NODE_DIR_PATH = 'node_dir_path' RELATION_DIR_PATH = 'relationship_dir_path' FORCE_CREATE_DIR = 'force_create_directory'", "2. using this dict to get a appropriate csv writer", "{}'.format(path)) shutil.rmtree(path) # Directory should be deleted after publish is", "files that this method writes. This is because there're not", "str file_suffix # type: str ): # type: (...) ->", "# type: () -> None \"\"\" Any closeable callable registered", "import logging import os import shutil from csv import DictWriter", "databuilder.models.neo4j_csv_serde import NODE_LABEL, \\ RELATION_START_LABEL, RELATION_END_LABEL, RELATION_TYPE from databuilder.models.neo4j_csv_serde import", "self._node_dir, file_suffix) node_writer.writerow(node_dict) node_dict = csv_serializable.next_node() relation_dict = csv_serializable.next_relation() while", "= logging.getLogger(__name__) class FsNeo4jCSVLoader(Loader): \"\"\" Write node and relationship CSV", "Dict, Any # noqa: F401 from databuilder.job.base_job import Job from", "# noqa: F401 from databuilder.utils.closer import Closer LOGGER = logging.getLogger(__name__)", "while relation_dict: key2 = (relation_dict[RELATION_START_LABEL], relation_dict[RELATION_END_LABEL], relation_dict[RELATION_TYPE], len(relation_dict)) file_suffix =", "to Job.closer. :param path: :return: \"\"\" if os.path.exists(path): if self._force_create_dir:", "_DEFAULT_CONFIG = ConfigFactory.from_dict({ SHOULD_DELETE_CREATED_DIR: True, FORCE_CREATE_DIR: False }) def __init__(self):", "self._create_directory(self._relation_dir) def _create_directory(self, path): # type: (str) -> None \"\"\"", "# type: () -> None if not self._delete_created_dir: LOGGER.warn('Skip Deleting", "also have different nodes, and relationships. Common pattern for both", "file_mapping[key] = writer return writer def close(self): # type: ()", "os.path.exists(path): if self._force_create_dir: LOGGER.info('Directory exist. Deleting directory {}'.format(path)) shutil.rmtree(path) else:", "Job.closer. :param path: :return: \"\"\" if os.path.exists(path): if self._force_create_dir: LOGGER.info('Directory", "None LOGGER.info('Closing file IO {}'.format(file_out)) file_out.close() self._closer.register(file_out_close) writer = csv.DictWriter(file_out,", "F401 from typing import Dict, Any # noqa: F401 from", "noqa: F401 from databuilder.job.base_job import Job from databuilder.loader.base_loader import Loader", "by Neo4jCsvPublisher. It assumes that the record it consumes is", "if os.path.exists(path): if self._force_create_dir: LOGGER.info('Directory exist. Deleting directory {}'.format(path)) shutil.rmtree(path)", "= csv_serializable.next_relation() def _get_writer(self, csv_record_dict, # type: Dict[str, Any] file_mapping,", "type: Dict[str, Any] file_mapping, # type: Dict[Any, DictWriter] key, #", "self._get_writer(relation_dict, self._relation_file_mapping, key2, self._relation_dir, file_suffix) relation_writer.writerow(relation_dict) relation_dict = csv_serializable.next_relation() def", "nodes, and relationships. Common pattern for both nodes and relations:", "FORCE_CREATE_DIR = 'force_create_directory' SHOULD_DELETE_CREATED_DIR = 'delete_created_directories' _DEFAULT_CONFIG = ConfigFactory.from_dict({ SHOULD_DELETE_CREATED_DIR:", "values represent a row) 2. using this dict to get", "_closer, it will close. :return: \"\"\" self._closer.close() def get_scope(self): #", "to it. 3. repeat 1 and 2 :param csv_serializable: :return:", "databuilder.models.neo4j_csv_serde import Neo4jCsvSerializable # noqa: F401 from databuilder.utils.closer import Closer", "'w') def file_out_close(): # type: () -> None LOGGER.info('Closing file", "dir_path, # type: str file_suffix # type: str ): #", "shutil from csv import DictWriter # noqa: F401 from pyhocon", "F401 from databuilder.job.base_job import Job from databuilder.loader.base_loader import Loader from", "FsNeo4jCSVLoader(Loader): \"\"\" Write node and relationship CSV file(s) that can", "conf.get_bool(FsNeo4jCSVLoader.SHOULD_DELETE_CREATED_DIR) self._force_create_dir = conf.get_bool(FsNeo4jCSVLoader.FORCE_CREATE_DIR) self._create_directory(self._node_dir) self._create_directory(self._relation_dir) def _create_directory(self, path): #", "row (a dict where keys represent a header, values represent", "file_out = open('{}/{}.csv'.format(dir_path, file_suffix), 'w') def file_out_close(): # type: ()", "relationship files. Note that the directory defined in configuration should", "# Config keys NODE_DIR_PATH = 'node_dir_path' RELATION_DIR_PATH = 'relationship_dir_path' FORCE_CREATE_DIR", "should not exist. :param conf: :return: \"\"\" conf = conf.with_fallback(FsNeo4jCSVLoader._DEFAULT_CONFIG)", "on csv record, key. If writer does not exist, it's", "import Closer LOGGER = logging.getLogger(__name__) class FsNeo4jCSVLoader(Loader): \"\"\" Write node", "# type: str ): # type: (...) -> DictWriter \"\"\"", "Dict[Any, DictWriter] self._closer = Closer() def init(self, conf): # type:", "creates a csv writer and update the mapping. :param csv_record_dict:", "of created directory function to Job.closer. :param path: :return: \"\"\"", "file_suffix) relation_writer.writerow(relation_dict) relation_dict = csv_serializable.next_relation() def _get_writer(self, csv_record_dict, # type:", "NODE_LABEL, \\ RELATION_START_LABEL, RELATION_END_LABEL, RELATION_TYPE from databuilder.models.neo4j_csv_serde import Neo4jCsvSerializable #", "CSV files that this method writes. This is because there're", "also it can also have different nodes, and relationships. Common", "it will close. :return: \"\"\" self._closer.close() def get_scope(self): # type:", "key2 = (relation_dict[RELATION_START_LABEL], relation_dict[RELATION_END_LABEL], relation_dict[RELATION_TYPE], len(relation_dict)) file_suffix = '{}_{}_{}'.format(key2[0], key2[1],", "self._closer.close() def get_scope(self): # type: () -> str return \"loader.filesystem_csv_neo4j\"", "shutil.rmtree(path) # Directory should be deleted after publish is finished", "get a appropriate csv writer and write to it. 3.", "key2[1], key2[2]) relation_writer = self._get_writer(relation_dict, self._relation_file_mapping, key2, self._relation_dir, file_suffix) relation_writer.writerow(relation_dict)", "(a dict where keys represent a header, values represent a", "appropriate csv writer and write to it. 3. repeat 1", "Loader from databuilder.models.neo4j_csv_serde import NODE_LABEL, \\ RELATION_START_LABEL, RELATION_END_LABEL, RELATION_TYPE from", "csv row (a dict where keys represent a header, values", "= writer return writer def close(self): # type: () ->", "None \"\"\" Validate directory does not exist, creates it, register", "ConfigTree, ConfigFactory # noqa: F401 from typing import Dict, Any", "None self._node_file_mapping = {} # type: Dict[Any, DictWriter] self._relation_file_mapping =", "# noqa: F401 from pyhocon import ConfigTree, ConfigFactory # noqa:", "LOGGER = logging.getLogger(__name__) class FsNeo4jCSVLoader(Loader): \"\"\" Write node and relationship", "= (node_dict[NODE_LABEL], len(node_dict)) file_suffix = '{}_{}'.format(*key) node_writer = self._get_writer(node_dict, self._node_file_mapping,", "LOGGER.info('Deleting directory {}'.format(path)) shutil.rmtree(path) # Directory should be deleted after", "= open('{}/{}.csv'.format(dir_path, file_suffix), 'w') def file_out_close(): # type: () ->", "type: str ): # type: (...) -> DictWriter \"\"\" Finds", "DictWriter # noqa: F401 from pyhocon import ConfigTree, ConfigFactory #", "a header, values represent a row) 2. using this dict", "exist. Deleting directory {}'.format(path)) shutil.rmtree(path) else: raise RuntimeError('Directory should not", "# type: Dict[Any, DictWriter] self._relation_file_mapping = {} # type: Dict[Any,", "() -> None LOGGER.info('Closing file IO {}'.format(file_out)) file_out.close() self._closer.register(file_out_close) writer", "\"\"\" node_dict = csv_serializable.next_node() while node_dict: key = (node_dict[NODE_LABEL], len(node_dict))", "ConfigFactory # noqa: F401 from typing import Dict, Any #", "a appropriate csv writer and write to it. 3. repeat", "method writes. This is because there're not only node and", "if writer: return writer LOGGER.info('Creating file for {}'.format(key)) file_out =", "because there're not only node and relationship, but also it", "a row) 2. using this dict to get a appropriate", "LOGGER.info('Closing file IO {}'.format(file_out)) file_out.close() self._closer.register(file_out_close) writer = csv.DictWriter(file_out, fieldnames=csv_record_dict.keys(),", "os import shutil from csv import DictWriter # noqa: F401", "should not exist: {}'.format(path)) os.makedirs(path) def _delete_dir(): # type: ()", "databuilder.job.base_job import Job from databuilder.loader.base_loader import Loader from databuilder.models.neo4j_csv_serde import", "-> None self._node_file_mapping = {} # type: Dict[Any, DictWriter] self._relation_file_mapping", "configuration should not exist. :param conf: :return: \"\"\" conf =", "deleted after publish is finished Job.closer.register(_delete_dir) def load(self, csv_serializable): #", "and relationships. Common pattern for both nodes and relations: 1.", "be deleted after publish is finished Job.closer.register(_delete_dir) def load(self, csv_serializable):", "node_writer.writerow(node_dict) node_dict = csv_serializable.next_node() relation_dict = csv_serializable.next_relation() while relation_dict: key2", "relation_dict[RELATION_TYPE], len(relation_dict)) file_suffix = '{}_{}_{}'.format(key2[0], key2[1], key2[2]) relation_writer = self._get_writer(relation_dict,", "None \"\"\" Initializing FsNeo4jCsvLoader by creating directory for node files", "node and relationship, but also it can also have different", "len(node_dict)) file_suffix = '{}_{}'.format(*key) node_writer = self._get_writer(node_dict, self._node_file_mapping, key, self._node_dir,", "key = (node_dict[NODE_LABEL], len(node_dict)) file_suffix = '{}_{}'.format(*key) node_writer = self._get_writer(node_dict,", "# Directory should be deleted after publish is finished Job.closer.register(_delete_dir)", "() -> None \"\"\" Any closeable callable registered in _closer,", ":param csv_record_dict: :param file_mapping: :param key: :param file_suffix: :return: \"\"\"", "self._relation_dir = \\ conf.get_string(FsNeo4jCSVLoader.RELATION_DIR_PATH) self._delete_created_dir = \\ conf.get_bool(FsNeo4jCSVLoader.SHOULD_DELETE_CREATED_DIR) self._force_create_dir =", "shutil.rmtree(path) else: raise RuntimeError('Directory should not exist: {}'.format(path)) os.makedirs(path) def", "IO {}'.format(file_out)) file_out.close() self._closer.register(file_out_close) writer = csv.DictWriter(file_out, fieldnames=csv_record_dict.keys(), quoting=csv.QUOTE_NONNUMERIC) writer.writeheader()", "csv.DictWriter(file_out, fieldnames=csv_record_dict.keys(), quoting=csv.QUOTE_NONNUMERIC) writer.writeheader() file_mapping[key] = writer return writer def", "assumes that the record it consumes is instance of Neo4jCsvSerializable", "\"\"\" Initializing FsNeo4jCsvLoader by creating directory for node files and", "directory {}'.format(path)) shutil.rmtree(path) else: raise RuntimeError('Directory should not exist: {}'.format(path))", "file_suffix), 'w') def file_out_close(): # type: () -> None LOGGER.info('Closing", "not only node and relationship, but also it can also", "SHOULD_DELETE_CREATED_DIR: True, FORCE_CREATE_DIR: False }) def __init__(self): # type: ()", ":return: \"\"\" node_dict = csv_serializable.next_node() while node_dict: key = (node_dict[NODE_LABEL],", "self._force_create_dir = conf.get_bool(FsNeo4jCSVLoader.FORCE_CREATE_DIR) self._create_directory(self._node_dir) self._create_directory(self._relation_dir) def _create_directory(self, path): # type:", "= self._get_writer(node_dict, self._node_file_mapping, key, self._node_dir, file_suffix) node_writer.writerow(node_dict) node_dict = csv_serializable.next_node()", "from databuilder.job.base_job import Job from databuilder.loader.base_loader import Loader from databuilder.models.neo4j_csv_serde", "Initializing FsNeo4jCsvLoader by creating directory for node files and relationship", "RELATION_END_LABEL, RELATION_TYPE from databuilder.models.neo4j_csv_serde import Neo4jCsvSerializable # noqa: F401 from", "file(s) that can be consumed by Neo4jCsvPublisher. It assumes that", "Dict[str, Any] file_mapping, # type: Dict[Any, DictWriter] key, # type:", "the record it consumes is instance of Neo4jCsvSerializable \"\"\" #", "# type: Any dir_path, # type: str file_suffix # type:", "for {}'.format(key)) file_out = open('{}/{}.csv'.format(dir_path, file_suffix), 'w') def file_out_close(): #", "writer and update the mapping. :param csv_record_dict: :param file_mapping: :param", "\"\"\" if os.path.exists(path): if self._force_create_dir: LOGGER.info('Directory exist. Deleting directory {}'.format(path))", "Job from databuilder.loader.base_loader import Loader from databuilder.models.neo4j_csv_serde import NODE_LABEL, \\", "RELATION_START_LABEL, RELATION_END_LABEL, RELATION_TYPE from databuilder.models.neo4j_csv_serde import Neo4jCsvSerializable # noqa: F401", "relation_dict[RELATION_END_LABEL], relation_dict[RELATION_TYPE], len(relation_dict)) file_suffix = '{}_{}_{}'.format(key2[0], key2[1], key2[2]) relation_writer =", "-> None \"\"\" Initializing FsNeo4jCsvLoader by creating directory for node", "csv_record_dict, # type: Dict[str, Any] file_mapping, # type: Dict[Any, DictWriter]", "def _delete_dir(): # type: () -> None if not self._delete_created_dir:", "self._closer = Closer() def init(self, conf): # type: (ConfigTree) ->", ":param file_mapping: :param key: :param file_suffix: :return: \"\"\" writer =", "= csv_serializable.next_relation() while relation_dict: key2 = (relation_dict[RELATION_START_LABEL], relation_dict[RELATION_END_LABEL], relation_dict[RELATION_TYPE], len(relation_dict))", "multiple CSV files that this method writes. This is because", "Neo4jCsvSerializable # noqa: F401 from databuilder.utils.closer import Closer LOGGER =", "'relationship_dir_path' FORCE_CREATE_DIR = 'force_create_directory' SHOULD_DELETE_CREATED_DIR = 'delete_created_directories' _DEFAULT_CONFIG = ConfigFactory.from_dict({", "that the record it consumes is instance of Neo4jCsvSerializable \"\"\"", "pyhocon import ConfigTree, ConfigFactory # noqa: F401 from typing import", "type: () -> None if not self._delete_created_dir: LOGGER.warn('Skip Deleting directory", "relationships. Common pattern for both nodes and relations: 1. retrieve", "files and relationship files. Note that the directory defined in", "to get a appropriate csv writer and write to it.", "\"\"\" writer = file_mapping.get(key) if writer: return writer LOGGER.info('Creating file", "writer def close(self): # type: () -> None \"\"\" Any", "exist. :param conf: :return: \"\"\" conf = conf.with_fallback(FsNeo4jCSVLoader._DEFAULT_CONFIG) self._node_dir =", "csv writer and update the mapping. :param csv_record_dict: :param file_mapping:", "in _closer, it will close. :return: \"\"\" self._closer.close() def get_scope(self):", "directory {}'.format(path)) return LOGGER.info('Deleting directory {}'.format(path)) shutil.rmtree(path) # Directory should", "both nodes and relations: 1. retrieve csv row (a dict", "import DictWriter # noqa: F401 from pyhocon import ConfigTree, ConfigFactory", "DictWriter] self._closer = Closer() def init(self, conf): # type: (ConfigTree)", "self._delete_created_dir = \\ conf.get_bool(FsNeo4jCSVLoader.SHOULD_DELETE_CREATED_DIR) self._force_create_dir = conf.get_bool(FsNeo4jCSVLoader.FORCE_CREATE_DIR) self._create_directory(self._node_dir) self._create_directory(self._relation_dir) def", "represent a row) 2. using this dict to get a", "import NODE_LABEL, \\ RELATION_START_LABEL, RELATION_END_LABEL, RELATION_TYPE from databuilder.models.neo4j_csv_serde import Neo4jCsvSerializable", "csv_serializable.next_node() relation_dict = csv_serializable.next_relation() while relation_dict: key2 = (relation_dict[RELATION_START_LABEL], relation_dict[RELATION_END_LABEL],", "node_dict: key = (node_dict[NODE_LABEL], len(node_dict)) file_suffix = '{}_{}'.format(*key) node_writer =", "key. If writer does not exist, it's creates a csv", "import Job from databuilder.loader.base_loader import Loader from databuilder.models.neo4j_csv_serde import NODE_LABEL,", "def close(self): # type: () -> None \"\"\" Any closeable", "return writer def close(self): # type: () -> None \"\"\"", "conf.get_string(FsNeo4jCSVLoader.RELATION_DIR_PATH) self._delete_created_dir = \\ conf.get_bool(FsNeo4jCSVLoader.SHOULD_DELETE_CREATED_DIR) self._force_create_dir = conf.get_bool(FsNeo4jCSVLoader.FORCE_CREATE_DIR) self._create_directory(self._node_dir) self._create_directory(self._relation_dir)", "fieldnames=csv_record_dict.keys(), quoting=csv.QUOTE_NONNUMERIC) writer.writeheader() file_mapping[key] = writer return writer def close(self):", "not exist, creates it, register deletion of created directory function", "FORCE_CREATE_DIR: False }) def __init__(self): # type: () -> None", "csv_serializable): # type: (Neo4jCsvSerializable) -> None \"\"\" Writes Neo4jCsvSerializable into", "Any] file_mapping, # type: Dict[Any, DictWriter] key, # type: Any", "path: :return: \"\"\" if os.path.exists(path): if self._force_create_dir: LOGGER.info('Directory exist. Deleting", "else: raise RuntimeError('Directory should not exist: {}'.format(path)) os.makedirs(path) def _delete_dir():", "Dict[Any, DictWriter] self._relation_file_mapping = {} # type: Dict[Any, DictWriter] self._closer", "class FsNeo4jCSVLoader(Loader): \"\"\" Write node and relationship CSV file(s) that", "path): # type: (str) -> None \"\"\" Validate directory does", "have different nodes, and relationships. Common pattern for both nodes", "# type: () -> None self._node_file_mapping = {} # type:", "import csv import logging import os import shutil from csv", "can be consumed by Neo4jCsvPublisher. It assumes that the record", "FsNeo4jCsvLoader by creating directory for node files and relationship files.", "-> None \"\"\" Any closeable callable registered in _closer, it", "= conf.with_fallback(FsNeo4jCSVLoader._DEFAULT_CONFIG) self._node_dir = conf.get_string(FsNeo4jCSVLoader.NODE_DIR_PATH) self._relation_dir = \\ conf.get_string(FsNeo4jCSVLoader.RELATION_DIR_PATH) self._delete_created_dir", "register deletion of created directory function to Job.closer. :param path:", "type: (ConfigTree) -> None \"\"\" Initializing FsNeo4jCsvLoader by creating directory", "relation_dict = csv_serializable.next_relation() while relation_dict: key2 = (relation_dict[RELATION_START_LABEL], relation_dict[RELATION_END_LABEL], relation_dict[RELATION_TYPE],", "'delete_created_directories' _DEFAULT_CONFIG = ConfigFactory.from_dict({ SHOULD_DELETE_CREATED_DIR: True, FORCE_CREATE_DIR: False }) def", "= \\ conf.get_bool(FsNeo4jCSVLoader.SHOULD_DELETE_CREATED_DIR) self._force_create_dir = conf.get_bool(FsNeo4jCSVLoader.FORCE_CREATE_DIR) self._create_directory(self._node_dir) self._create_directory(self._relation_dir) def _create_directory(self,", "def _create_directory(self, path): # type: (str) -> None \"\"\" Validate", "NODE_DIR_PATH = 'node_dir_path' RELATION_DIR_PATH = 'relationship_dir_path' FORCE_CREATE_DIR = 'force_create_directory' SHOULD_DELETE_CREATED_DIR", "node_dict = csv_serializable.next_node() while node_dict: key = (node_dict[NODE_LABEL], len(node_dict)) file_suffix", "retrieve csv row (a dict where keys represent a header,", "the mapping. :param csv_record_dict: :param file_mapping: :param key: :param file_suffix:", "and 2 :param csv_serializable: :return: \"\"\" node_dict = csv_serializable.next_node() while", "import ConfigTree, ConfigFactory # noqa: F401 from typing import Dict,", "_delete_dir(): # type: () -> None if not self._delete_created_dir: LOGGER.warn('Skip", "will close. :return: \"\"\" self._closer.close() def get_scope(self): # type: ()", "file_suffix) node_writer.writerow(node_dict) node_dict = csv_serializable.next_node() relation_dict = csv_serializable.next_relation() while relation_dict:", "from csv import DictWriter # noqa: F401 from pyhocon import", "closeable callable registered in _closer, it will close. :return: \"\"\"", "type: () -> None \"\"\" Any closeable callable registered in", "_get_writer(self, csv_record_dict, # type: Dict[str, Any] file_mapping, # type: Dict[Any,", "type: Dict[Any, DictWriter] self._closer = Closer() def init(self, conf): #", "relations: 1. retrieve csv row (a dict where keys represent", "that the directory defined in configuration should not exist. :param", "len(relation_dict)) file_suffix = '{}_{}_{}'.format(key2[0], key2[1], key2[2]) relation_writer = self._get_writer(relation_dict, self._relation_file_mapping,", "node and relationship CSV file(s) that can be consumed by", "function to Job.closer. :param path: :return: \"\"\" if os.path.exists(path): if", "Finds a writer based on csv record, key. If writer", "None if not self._delete_created_dir: LOGGER.warn('Skip Deleting directory {}'.format(path)) return LOGGER.info('Deleting", "row) 2. using this dict to get a appropriate csv", "callable registered in _closer, it will close. :return: \"\"\" self._closer.close()", "file_suffix = '{}_{}_{}'.format(key2[0], key2[1], key2[2]) relation_writer = self._get_writer(relation_dict, self._relation_file_mapping, key2,", "self._delete_created_dir: LOGGER.warn('Skip Deleting directory {}'.format(path)) return LOGGER.info('Deleting directory {}'.format(path)) shutil.rmtree(path)", "import Dict, Any # noqa: F401 from databuilder.job.base_job import Job", "csv_record_dict: :param file_mapping: :param key: :param file_suffix: :return: \"\"\" writer", "RELATION_TYPE from databuilder.models.neo4j_csv_serde import Neo4jCsvSerializable # noqa: F401 from databuilder.utils.closer", "relation_dict: key2 = (relation_dict[RELATION_START_LABEL], relation_dict[RELATION_END_LABEL], relation_dict[RELATION_TYPE], len(relation_dict)) file_suffix = '{}_{}_{}'.format(key2[0],", "load(self, csv_serializable): # type: (Neo4jCsvSerializable) -> None \"\"\" Writes Neo4jCsvSerializable", "'node_dir_path' RELATION_DIR_PATH = 'relationship_dir_path' FORCE_CREATE_DIR = 'force_create_directory' SHOULD_DELETE_CREATED_DIR = 'delete_created_directories'", "# type: (str) -> None \"\"\" Validate directory does not", "csv_serializable.next_relation() def _get_writer(self, csv_record_dict, # type: Dict[str, Any] file_mapping, #", "writer LOGGER.info('Creating file for {}'.format(key)) file_out = open('{}/{}.csv'.format(dir_path, file_suffix), 'w')", "Neo4jCsvPublisher. It assumes that the record it consumes is instance", "= conf.get_bool(FsNeo4jCSVLoader.FORCE_CREATE_DIR) self._create_directory(self._node_dir) self._create_directory(self._relation_dir) def _create_directory(self, path): # type: (str)", "Closer LOGGER = logging.getLogger(__name__) class FsNeo4jCSVLoader(Loader): \"\"\" Write node and", "header, values represent a row) 2. using this dict to", "does not exist, it's creates a csv writer and update", "is instance of Neo4jCsvSerializable \"\"\" # Config keys NODE_DIR_PATH =", "not exist. :param conf: :return: \"\"\" conf = conf.with_fallback(FsNeo4jCSVLoader._DEFAULT_CONFIG) self._node_dir", "\"\"\" Validate directory does not exist, creates it, register deletion", "writer: return writer LOGGER.info('Creating file for {}'.format(key)) file_out = open('{}/{}.csv'.format(dir_path,", "of Neo4jCsvSerializable \"\"\" # Config keys NODE_DIR_PATH = 'node_dir_path' RELATION_DIR_PATH", "Common pattern for both nodes and relations: 1. retrieve csv", "# type: (...) -> DictWriter \"\"\" Finds a writer based", "directory for node files and relationship files. Note that the", "exist: {}'.format(path)) os.makedirs(path) def _delete_dir(): # type: () -> None", "noqa: F401 from databuilder.utils.closer import Closer LOGGER = logging.getLogger(__name__) class", "it. 3. repeat 1 and 2 :param csv_serializable: :return: \"\"\"", "(ConfigTree) -> None \"\"\" Initializing FsNeo4jCsvLoader by creating directory for", "from databuilder.loader.base_loader import Loader from databuilder.models.neo4j_csv_serde import NODE_LABEL, \\ RELATION_START_LABEL,", "directory defined in configuration should not exist. :param conf: :return:", "from databuilder.models.neo4j_csv_serde import NODE_LABEL, \\ RELATION_START_LABEL, RELATION_END_LABEL, RELATION_TYPE from databuilder.models.neo4j_csv_serde", "into CSV files. There are multiple CSV files that this", "file for {}'.format(key)) file_out = open('{}/{}.csv'.format(dir_path, file_suffix), 'w') def file_out_close():", "= '{}_{}'.format(*key) node_writer = self._get_writer(node_dict, self._node_file_mapping, key, self._node_dir, file_suffix) node_writer.writerow(node_dict)", "\"\"\" self._closer.close() def get_scope(self): # type: () -> str return", "type: (Neo4jCsvSerializable) -> None \"\"\" Writes Neo4jCsvSerializable into CSV files.", "self._create_directory(self._node_dir) self._create_directory(self._relation_dir) def _create_directory(self, path): # type: (str) -> None", "Config keys NODE_DIR_PATH = 'node_dir_path' RELATION_DIR_PATH = 'relationship_dir_path' FORCE_CREATE_DIR =", "Closer() def init(self, conf): # type: (ConfigTree) -> None \"\"\"", "3. repeat 1 and 2 :param csv_serializable: :return: \"\"\" node_dict", "not self._delete_created_dir: LOGGER.warn('Skip Deleting directory {}'.format(path)) return LOGGER.info('Deleting directory {}'.format(path))", "= self._get_writer(relation_dict, self._relation_file_mapping, key2, self._relation_dir, file_suffix) relation_writer.writerow(relation_dict) relation_dict = csv_serializable.next_relation()", "that can be consumed by Neo4jCsvPublisher. It assumes that the", "dict to get a appropriate csv writer and write to", "type: () -> None LOGGER.info('Closing file IO {}'.format(file_out)) file_out.close() self._closer.register(file_out_close)", "logging import os import shutil from csv import DictWriter #", "file_mapping.get(key) if writer: return writer LOGGER.info('Creating file for {}'.format(key)) file_out", "-> None \"\"\" Validate directory does not exist, creates it,", "{}'.format(key)) file_out = open('{}/{}.csv'.format(dir_path, file_suffix), 'w') def file_out_close(): # type:", "= csv.DictWriter(file_out, fieldnames=csv_record_dict.keys(), quoting=csv.QUOTE_NONNUMERIC) writer.writeheader() file_mapping[key] = writer return writer", "node files and relationship files. Note that the directory defined", "csv import DictWriter # noqa: F401 from pyhocon import ConfigTree,", "F401 from pyhocon import ConfigTree, ConfigFactory # noqa: F401 from", "different nodes, and relationships. Common pattern for both nodes and", "by creating directory for node files and relationship files. Note", "= (relation_dict[RELATION_START_LABEL], relation_dict[RELATION_END_LABEL], relation_dict[RELATION_TYPE], len(relation_dict)) file_suffix = '{}_{}_{}'.format(key2[0], key2[1], key2[2])", "key, self._node_dir, file_suffix) node_writer.writerow(node_dict) node_dict = csv_serializable.next_node() relation_dict = csv_serializable.next_relation()", "finished Job.closer.register(_delete_dir) def load(self, csv_serializable): # type: (Neo4jCsvSerializable) -> None", "init(self, conf): # type: (ConfigTree) -> None \"\"\" Initializing FsNeo4jCsvLoader", "def _get_writer(self, csv_record_dict, # type: Dict[str, Any] file_mapping, # type:", "type: Dict[Any, DictWriter] key, # type: Any dir_path, # type:", "after publish is finished Job.closer.register(_delete_dir) def load(self, csv_serializable): # type:", "not exist, it's creates a csv writer and update the", "for node files and relationship files. Note that the directory", "\"\"\" # Config keys NODE_DIR_PATH = 'node_dir_path' RELATION_DIR_PATH = 'relationship_dir_path'", "() -> None if not self._delete_created_dir: LOGGER.warn('Skip Deleting directory {}'.format(path))", ":return: \"\"\" if os.path.exists(path): if self._force_create_dir: LOGGER.info('Directory exist. Deleting directory", "directory function to Job.closer. :param path: :return: \"\"\" if os.path.exists(path):", "a writer based on csv record, key. If writer does", "It assumes that the record it consumes is instance of", ":param key: :param file_suffix: :return: \"\"\" writer = file_mapping.get(key) if", "nodes and relations: 1. retrieve csv row (a dict where", "self._relation_file_mapping = {} # type: Dict[Any, DictWriter] self._closer = Closer()", "conf.get_bool(FsNeo4jCSVLoader.FORCE_CREATE_DIR) self._create_directory(self._node_dir) self._create_directory(self._relation_dir) def _create_directory(self, path): # type: (str) ->", "Any # noqa: F401 from databuilder.job.base_job import Job from databuilder.loader.base_loader", "file IO {}'.format(file_out)) file_out.close() self._closer.register(file_out_close) writer = csv.DictWriter(file_out, fieldnames=csv_record_dict.keys(), quoting=csv.QUOTE_NONNUMERIC)", "import Neo4jCsvSerializable # noqa: F401 from databuilder.utils.closer import Closer LOGGER", "{} # type: Dict[Any, DictWriter] self._closer = Closer() def init(self,", "Deleting directory {}'.format(path)) return LOGGER.info('Deleting directory {}'.format(path)) shutil.rmtree(path) # Directory", "Deleting directory {}'.format(path)) shutil.rmtree(path) else: raise RuntimeError('Directory should not exist:", "only node and relationship, but also it can also have", "csv import logging import os import shutil from csv import", "it can also have different nodes, and relationships. Common pattern", "if self._force_create_dir: LOGGER.info('Directory exist. Deleting directory {}'.format(path)) shutil.rmtree(path) else: raise", "be consumed by Neo4jCsvPublisher. It assumes that the record it", "LOGGER.info('Directory exist. Deleting directory {}'.format(path)) shutil.rmtree(path) else: raise RuntimeError('Directory should", "type: () -> None self._node_file_mapping = {} # type: Dict[Any,", "self._relation_file_mapping, key2, self._relation_dir, file_suffix) relation_writer.writerow(relation_dict) relation_dict = csv_serializable.next_relation() def _get_writer(self,", "databuilder.loader.base_loader import Loader from databuilder.models.neo4j_csv_serde import NODE_LABEL, \\ RELATION_START_LABEL, RELATION_END_LABEL,", "if not self._delete_created_dir: LOGGER.warn('Skip Deleting directory {}'.format(path)) return LOGGER.info('Deleting directory", "DictWriter] self._relation_file_mapping = {} # type: Dict[Any, DictWriter] self._closer =", "it, register deletion of created directory function to Job.closer. :param", "): # type: (...) -> DictWriter \"\"\" Finds a writer", "key: :param file_suffix: :return: \"\"\" writer = file_mapping.get(key) if writer:", "raise RuntimeError('Directory should not exist: {}'.format(path)) os.makedirs(path) def _delete_dir(): #", "Job.closer.register(_delete_dir) def load(self, csv_serializable): # type: (Neo4jCsvSerializable) -> None \"\"\"", "file_out_close(): # type: () -> None LOGGER.info('Closing file IO {}'.format(file_out))", "is because there're not only node and relationship, but also", "repeat 1 and 2 :param csv_serializable: :return: \"\"\" node_dict =", "2 :param csv_serializable: :return: \"\"\" node_dict = csv_serializable.next_node() while node_dict:", "conf.get_string(FsNeo4jCSVLoader.NODE_DIR_PATH) self._relation_dir = \\ conf.get_string(FsNeo4jCSVLoader.RELATION_DIR_PATH) self._delete_created_dir = \\ conf.get_bool(FsNeo4jCSVLoader.SHOULD_DELETE_CREATED_DIR) self._force_create_dir", "exist, creates it, register deletion of created directory function to", "\"\"\" Writes Neo4jCsvSerializable into CSV files. There are multiple CSV", "# noqa: F401 from databuilder.job.base_job import Job from databuilder.loader.base_loader import", "quoting=csv.QUOTE_NONNUMERIC) writer.writeheader() file_mapping[key] = writer return writer def close(self): #", "def load(self, csv_serializable): # type: (Neo4jCsvSerializable) -> None \"\"\" Writes", "LOGGER.warn('Skip Deleting directory {}'.format(path)) return LOGGER.info('Deleting directory {}'.format(path)) shutil.rmtree(path) #", "record, key. If writer does not exist, it's creates a", "relation_writer.writerow(relation_dict) relation_dict = csv_serializable.next_relation() def _get_writer(self, csv_record_dict, # type: Dict[str,", "type: Any dir_path, # type: str file_suffix # type: str", "files. Note that the directory defined in configuration should not", "not exist: {}'.format(path)) os.makedirs(path) def _delete_dir(): # type: () ->", "= conf.get_string(FsNeo4jCSVLoader.NODE_DIR_PATH) self._relation_dir = \\ conf.get_string(FsNeo4jCSVLoader.RELATION_DIR_PATH) self._delete_created_dir = \\ conf.get_bool(FsNeo4jCSVLoader.SHOULD_DELETE_CREATED_DIR)", "Any dir_path, # type: str file_suffix # type: str ):", "csv_serializable: :return: \"\"\" node_dict = csv_serializable.next_node() while node_dict: key =", "node_dict = csv_serializable.next_node() relation_dict = csv_serializable.next_relation() while relation_dict: key2 =", "writer = csv.DictWriter(file_out, fieldnames=csv_record_dict.keys(), quoting=csv.QUOTE_NONNUMERIC) writer.writeheader() file_mapping[key] = writer return", "= 'delete_created_directories' _DEFAULT_CONFIG = ConfigFactory.from_dict({ SHOULD_DELETE_CREATED_DIR: True, FORCE_CREATE_DIR: False })", "\"\"\" Finds a writer based on csv record, key. If", "registered in _closer, it will close. :return: \"\"\" self._closer.close() def", "mapping. :param csv_record_dict: :param file_mapping: :param key: :param file_suffix: :return:", "-> DictWriter \"\"\" Finds a writer based on csv record,", "import Loader from databuilder.models.neo4j_csv_serde import NODE_LABEL, \\ RELATION_START_LABEL, RELATION_END_LABEL, RELATION_TYPE", "RELATION_DIR_PATH = 'relationship_dir_path' FORCE_CREATE_DIR = 'force_create_directory' SHOULD_DELETE_CREATED_DIR = 'delete_created_directories' _DEFAULT_CONFIG", "-> None if not self._delete_created_dir: LOGGER.warn('Skip Deleting directory {}'.format(path)) return", "and update the mapping. :param csv_record_dict: :param file_mapping: :param key:", "consumes is instance of Neo4jCsvSerializable \"\"\" # Config keys NODE_DIR_PATH", "# type: () -> None LOGGER.info('Closing file IO {}'.format(file_out)) file_out.close()", "# type: (Neo4jCsvSerializable) -> None \"\"\" Writes Neo4jCsvSerializable into CSV", "exist, it's creates a csv writer and update the mapping.", "file_mapping, # type: Dict[Any, DictWriter] key, # type: Any dir_path,", "self._force_create_dir: LOGGER.info('Directory exist. Deleting directory {}'.format(path)) shutil.rmtree(path) else: raise RuntimeError('Directory", "there're not only node and relationship, but also it can", "(node_dict[NODE_LABEL], len(node_dict)) file_suffix = '{}_{}'.format(*key) node_writer = self._get_writer(node_dict, self._node_file_mapping, key,", "file_suffix = '{}_{}'.format(*key) node_writer = self._get_writer(node_dict, self._node_file_mapping, key, self._node_dir, file_suffix)", "= Closer() def init(self, conf): # type: (ConfigTree) -> None", "CSV files. There are multiple CSV files that this method", "from pyhocon import ConfigTree, ConfigFactory # noqa: F401 from typing", "ConfigFactory.from_dict({ SHOULD_DELETE_CREATED_DIR: True, FORCE_CREATE_DIR: False }) def __init__(self): # type:", "conf): # type: (ConfigTree) -> None \"\"\" Initializing FsNeo4jCsvLoader by", "'{}_{}_{}'.format(key2[0], key2[1], key2[2]) relation_writer = self._get_writer(relation_dict, self._relation_file_mapping, key2, self._relation_dir, file_suffix)", "while node_dict: key = (node_dict[NODE_LABEL], len(node_dict)) file_suffix = '{}_{}'.format(*key) node_writer", "defined in configuration should not exist. :param conf: :return: \"\"\"", "There are multiple CSV files that this method writes. This", "{}'.format(path)) os.makedirs(path) def _delete_dir(): # type: () -> None if", "return writer LOGGER.info('Creating file for {}'.format(key)) file_out = open('{}/{}.csv'.format(dir_path, file_suffix),", "self._relation_dir, file_suffix) relation_writer.writerow(relation_dict) relation_dict = csv_serializable.next_relation() def _get_writer(self, csv_record_dict, #", "are multiple CSV files that this method writes. This is", "= csv_serializable.next_node() relation_dict = csv_serializable.next_relation() while relation_dict: key2 = (relation_dict[RELATION_START_LABEL],", "{}'.format(path)) shutil.rmtree(path) else: raise RuntimeError('Directory should not exist: {}'.format(path)) os.makedirs(path)", "for both nodes and relations: 1. retrieve csv row (a", "-> None LOGGER.info('Closing file IO {}'.format(file_out)) file_out.close() self._closer.register(file_out_close) writer =", "writer.writeheader() file_mapping[key] = writer return writer def close(self): # type:", "None \"\"\" Any closeable callable registered in _closer, it will", "{} # type: Dict[Any, DictWriter] self._relation_file_mapping = {} # type:", "relation_writer = self._get_writer(relation_dict, self._relation_file_mapping, key2, self._relation_dir, file_suffix) relation_writer.writerow(relation_dict) relation_dict =", "'force_create_directory' SHOULD_DELETE_CREATED_DIR = 'delete_created_directories' _DEFAULT_CONFIG = ConfigFactory.from_dict({ SHOULD_DELETE_CREATED_DIR: True, FORCE_CREATE_DIR:", "= file_mapping.get(key) if writer: return writer LOGGER.info('Creating file for {}'.format(key))", "type: (...) -> DictWriter \"\"\" Finds a writer based on", "Neo4jCsvSerializable into CSV files. There are multiple CSV files that", "# type: Dict[str, Any] file_mapping, # type: Dict[Any, DictWriter] key,", "Note that the directory defined in configuration should not exist.", "write to it. 3. repeat 1 and 2 :param csv_serializable:", "from typing import Dict, Any # noqa: F401 from databuilder.job.base_job", "SHOULD_DELETE_CREATED_DIR = 'delete_created_directories' _DEFAULT_CONFIG = ConfigFactory.from_dict({ SHOULD_DELETE_CREATED_DIR: True, FORCE_CREATE_DIR: False", "key2, self._relation_dir, file_suffix) relation_writer.writerow(relation_dict) relation_dict = csv_serializable.next_relation() def _get_writer(self, csv_record_dict,", "relationship, but also it can also have different nodes, and", "\"\"\" Write node and relationship CSV file(s) that can be", "type: str file_suffix # type: str ): # type: (...)", "file_out.close() self._closer.register(file_out_close) writer = csv.DictWriter(file_out, fieldnames=csv_record_dict.keys(), quoting=csv.QUOTE_NONNUMERIC) writer.writeheader() file_mapping[key] =" ]
[ "= ad_model.kneighbors(autoscaled_x_prediction) knn_distance_prediction = pd.DataFrame(knn_distance_prediction, index=x_prediction.index) # DataFrame型に変換 mean_of_knn_distance_prediction =", "つに # トレーニングデータでは k 最近傍サンプルの中に自分も含まれ、自分との距離の 0 を除いた距離を考える必要があるため、k_in_knn + 1 個と設定", "axis=1) # オートスケーリング autoscaled_x = (x - x.mean()) / x.std()", "x.mean()) / x.std() # k-NN による AD ad_model = NearestNeighbors(n_neighbors=k_in_knn,", "トレーニングデータでは k 最近傍サンプルの中に自分も含まれ、自分との距離の 0 を除いた距離を考える必要があるため、k_in_knn + 1 個と設定 knn_distance_train, knn_index_train", "DataFrame型に変換 mean_of_knn_distance_train = pd.DataFrame(knn_distance_train.iloc[:, 1:].mean(axis=1), columns=['mean_of_knn_distance']) # 自分以外の k_in_knn 個の距離の平均", "内となるトレーニングデータの割合。AD のしきい値を決めるときに使用 dataset = pd.read_csv('resin.csv', index_col=0, header=0) x_prediction = pd.read_csv('resin_prediction.csv',", "= (x_prediction - x.mean()) / x.std() # k-NN による AD", "が含まれるようにしきい値を設定 sorted_mean_of_knn_distance_train = mean_of_knn_distance_train.iloc[:, 0].sort_values(ascending=True) # 距離の平均の小さい順に並び替え ad_threshold = sorted_mean_of_knn_distance_train.iloc[", "# 距離の平均の小さい順に並び替え ad_threshold = sorted_mean_of_knn_distance_train.iloc[ round(autoscaled_x.shape[0] * rate_of_training_samples_inside_ad) - 1]", "# DataFrame型に変換 mean_of_knn_distance_prediction = pd.DataFrame(knn_distance_prediction.mean(axis=1), columns=['mean_of_knn_distance']) # k_in_knn 個の距離の平均 mean_of_knn_distance_prediction.to_csv('mean_of_knn_distance_prediction.csv')", "標準偏差が 0 の特徴量の削除 deleting_variables = x.columns[x.std() == 0] x =", "pandas as pd from sklearn.neighbors import NearestNeighbors # k-NN k_in_knn", "1 個と設定 knn_distance_train, knn_index_train = ad_model.kneighbors(autoscaled_x, n_neighbors=k_in_knn + 1) knn_distance_train", "k-NN k_in_knn = 5 # k-NN における k rate_of_training_samples_inside_ad =", "# csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対して、AD の中か外かを判定 inside_ad_flag_prediction = mean_of_knn_distance_prediction <=", "mean_of_knn_distance_prediction <= ad_threshold # AD 内のサンプルのみ TRUE inside_ad_flag_prediction.columns=['inside_ad_flag'] inside_ad_flag_prediction.to_csv('inside_ad_flag_prediction_knn.csv') #", "による AD ad_model = NearestNeighbors(n_neighbors=k_in_knn, metric='euclidean') # AD モデルの宣言 ad_model.fit(autoscaled_x)", "データ分割 y = dataset.iloc[:, 0] # 目的変数 x = dataset.iloc[:,", "AD ad_model = NearestNeighbors(n_neighbors=k_in_knn, metric='euclidean') # AD モデルの宣言 ad_model.fit(autoscaled_x) #", "= x_prediction.drop(deleting_variables, axis=1) # オートスケーリング autoscaled_x = (x - x.mean())", "距離の計算 knn_distance_prediction, knn_index_prediction = ad_model.kneighbors(autoscaled_x_prediction) knn_distance_prediction = pd.DataFrame(knn_distance_prediction, index=x_prediction.index) #", "knn_distance_train, knn_index_train = ad_model.kneighbors(autoscaled_x, n_neighbors=k_in_knn + 1) knn_distance_train = pd.DataFrame(knn_distance_train,", "rate_of_training_samples_inside_ad * 100 % が含まれるようにしきい値を設定 sorted_mean_of_knn_distance_train = mean_of_knn_distance_train.iloc[:, 0].sort_values(ascending=True) #", "5 # k-NN における k rate_of_training_samples_inside_ad = 0.96 # AD", "x.mean()) / x.std() autoscaled_x_prediction = (x_prediction - x.mean()) / x.std()", "x.columns[x.std() == 0] x = x.drop(deleting_variables, axis=1) x_prediction = x_prediction.drop(deleting_variables,", "# 自分以外の k_in_knn 個の距離の平均 mean_of_knn_distance_train.to_csv('mean_of_knn_distance_train.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # トレーニングデータのサンプルの", "AD 内のサンプルのみ TRUE inside_ad_flag_train.columns=['inside_ad_flag'] inside_ad_flag_train.to_csv('inside_ad_flag_train_knn.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対する", "ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # トレーニングデータのサンプルの rate_of_training_samples_inside_ad * 100 % が含まれるようにしきい値を設定 sorted_mean_of_knn_distance_train =", "index_col=0, header=0) x_prediction = pd.read_csv('resin_prediction.csv', index_col=0, header=0) # データ分割 y", "# k-NN による AD では、トレーニングデータの x を model_ad に格納することに対応 #", "utf-8 -*- \"\"\" @author: <NAME> \"\"\" import pandas as pd", "ad_threshold = sorted_mean_of_knn_distance_train.iloc[ round(autoscaled_x.shape[0] * rate_of_training_samples_inside_ad) - 1] # トレーニングデータに対して、AD", "AD モデルの宣言 ad_model.fit(autoscaled_x) # k-NN による AD では、トレーニングデータの x を", "knn_distance_prediction, knn_index_prediction = ad_model.kneighbors(autoscaled_x_prediction) knn_distance_prediction = pd.DataFrame(knn_distance_prediction, index=x_prediction.index) # DataFrame型に変換", "pd.read_csv('resin_prediction.csv', index_col=0, header=0) # データ分割 y = dataset.iloc[:, 0] #", "- x.mean()) / x.std() # k-NN による AD ad_model =", "# トレーニングデータに対して、AD の中か外かを判定 inside_ad_flag_train = mean_of_knn_distance_train <= ad_threshold # AD", "-*- \"\"\" @author: <NAME> \"\"\" import pandas as pd from", "knn_distance_train = pd.DataFrame(knn_distance_train, index=autoscaled_x.index) # DataFrame型に変換 mean_of_knn_distance_train = pd.DataFrame(knn_distance_train.iloc[:, 1:].mean(axis=1),", "# 予測用データに対する k-NN 距離の計算 knn_distance_prediction, knn_index_prediction = ad_model.kneighbors(autoscaled_x_prediction) knn_distance_prediction =", "= pd.DataFrame(knn_distance_prediction, index=x_prediction.index) # DataFrame型に変換 mean_of_knn_distance_prediction = pd.DataFrame(knn_distance_prediction.mean(axis=1), columns=['mean_of_knn_distance']) #", "csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対して、AD の中か外かを判定 inside_ad_flag_prediction = mean_of_knn_distance_prediction <= ad_threshold", "# AD 内のサンプルのみ TRUE inside_ad_flag_train.columns=['inside_ad_flag'] inside_ad_flag_train.to_csv('inside_ad_flag_train_knn.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 #", "ad_threshold # AD 内のサンプルのみ TRUE inside_ad_flag_prediction.columns=['inside_ad_flag'] inside_ad_flag_prediction.to_csv('inside_ad_flag_prediction_knn.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意", "- x.mean()) / x.std() autoscaled_x_prediction = (x_prediction - x.mean()) /", "sorted_mean_of_knn_distance_train.iloc[ round(autoscaled_x.shape[0] * rate_of_training_samples_inside_ad) - 1] # トレーニングデータに対して、AD の中か外かを判定 inside_ad_flag_train", "1:] # 説明変数 # 標準偏差が 0 の特徴量の削除 deleting_variables = x.columns[x.std()", "x_prediction.drop(deleting_variables, axis=1) # オートスケーリング autoscaled_x = (x - x.mean()) /", "<= ad_threshold # AD 内のサンプルのみ TRUE inside_ad_flag_train.columns=['inside_ad_flag'] inside_ad_flag_train.to_csv('inside_ad_flag_train_knn.csv') # csv", "= dataset.iloc[:, 1:] # 説明変数 # 標準偏差が 0 の特徴量の削除 deleting_variables", "columns=['mean_of_knn_distance']) # k_in_knn 個の距離の平均 mean_of_knn_distance_prediction.to_csv('mean_of_knn_distance_prediction.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対して、AD", "0].sort_values(ascending=True) # 距離の平均の小さい順に並び替え ad_threshold = sorted_mean_of_knn_distance_train.iloc[ round(autoscaled_x.shape[0] * rate_of_training_samples_inside_ad) -", "autoscaled_x_prediction = (x_prediction - x.mean()) / x.std() # k-NN による", "ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対して、AD の中か外かを判定 inside_ad_flag_prediction = mean_of_knn_distance_prediction <= ad_threshold #", "<NAME> \"\"\" import pandas as pd from sklearn.neighbors import NearestNeighbors", "sorted_mean_of_knn_distance_train = mean_of_knn_distance_train.iloc[:, 0].sort_values(ascending=True) # 距離の平均の小さい順に並び替え ad_threshold = sorted_mean_of_knn_distance_train.iloc[ round(autoscaled_x.shape[0]", "mean_of_knn_distance_prediction.to_csv('mean_of_knn_distance_prediction.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対して、AD の中か外かを判定 inside_ad_flag_prediction = mean_of_knn_distance_prediction", "NearestNeighbors # k-NN k_in_knn = 5 # k-NN における k", "= x.drop(deleting_variables, axis=1) x_prediction = x_prediction.drop(deleting_variables, axis=1) # オートスケーリング autoscaled_x", "+ 1 個と設定 knn_distance_train, knn_index_train = ad_model.kneighbors(autoscaled_x, n_neighbors=k_in_knn + 1)", "ad_threshold # AD 内のサンプルのみ TRUE inside_ad_flag_train.columns=['inside_ad_flag'] inside_ad_flag_train.to_csv('inside_ad_flag_train_knn.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意", "自分以外の k_in_knn 個の距離の平均 mean_of_knn_distance_train.to_csv('mean_of_knn_distance_train.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # トレーニングデータのサンプルの rate_of_training_samples_inside_ad", "距離の平均の小さい順に並び替え ad_threshold = sorted_mean_of_knn_distance_train.iloc[ round(autoscaled_x.shape[0] * rate_of_training_samples_inside_ad) - 1] #", "の中か外かを判定 inside_ad_flag_train = mean_of_knn_distance_train <= ad_threshold # AD 内のサンプルのみ TRUE", "pd.DataFrame(knn_distance_prediction, index=x_prediction.index) # DataFrame型に変換 mean_of_knn_distance_prediction = pd.DataFrame(knn_distance_prediction.mean(axis=1), columns=['mean_of_knn_distance']) # k_in_knn", "coding: utf-8 -*- \"\"\" @author: <NAME> \"\"\" import pandas as", "サンプルごとの k 最近傍サンプルとの距離に加えて、k 最近傍サンプルのインデックス番号も一緒に出力されるため、出力用の変数を 2 つに # トレーニングデータでは k 最近傍サンプルの中に自分も含まれ、自分との距離の", "import NearestNeighbors # k-NN k_in_knn = 5 # k-NN における", "予測用データに対する k-NN 距離の計算 knn_distance_prediction, knn_index_prediction = ad_model.kneighbors(autoscaled_x_prediction) knn_distance_prediction = pd.DataFrame(knn_distance_prediction,", "= sorted_mean_of_knn_distance_train.iloc[ round(autoscaled_x.shape[0] * rate_of_training_samples_inside_ad) - 1] # トレーニングデータに対して、AD の中か外かを判定", "index=x_prediction.index) # DataFrame型に変換 mean_of_knn_distance_prediction = pd.DataFrame(knn_distance_prediction.mean(axis=1), columns=['mean_of_knn_distance']) # k_in_knn 個の距離の平均", "= x.columns[x.std() == 0] x = x.drop(deleting_variables, axis=1) x_prediction =", "/ x.std() # k-NN による AD ad_model = NearestNeighbors(n_neighbors=k_in_knn, metric='euclidean')", "as pd from sklearn.neighbors import NearestNeighbors # k-NN k_in_knn =", "mean_of_knn_distance_train.iloc[:, 0].sort_values(ascending=True) # 距離の平均の小さい順に並び替え ad_threshold = sorted_mean_of_knn_distance_train.iloc[ round(autoscaled_x.shape[0] * rate_of_training_samples_inside_ad)", "inside_ad_flag_train = mean_of_knn_distance_train <= ad_threshold # AD 内のサンプルのみ TRUE inside_ad_flag_train.columns=['inside_ad_flag']", "# k-NN における k rate_of_training_samples_inside_ad = 0.96 # AD 内となるトレーニングデータの割合。AD", "pd.DataFrame(knn_distance_prediction.mean(axis=1), columns=['mean_of_knn_distance']) # k_in_knn 個の距離の平均 mean_of_knn_distance_prediction.to_csv('mean_of_knn_distance_prediction.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 #", "# サンプルごとの k 最近傍サンプルとの距離に加えて、k 最近傍サンプルのインデックス番号も一緒に出力されるため、出力用の変数を 2 つに # トレーニングデータでは k", "= mean_of_knn_distance_prediction <= ad_threshold # AD 内のサンプルのみ TRUE inside_ad_flag_prediction.columns=['inside_ad_flag'] inside_ad_flag_prediction.to_csv('inside_ad_flag_prediction_knn.csv')", "(x_prediction - x.mean()) / x.std() # k-NN による AD ad_model", "k-NN による AD では、トレーニングデータの x を model_ad に格納することに対応 # サンプルごとの", "sklearn.neighbors import NearestNeighbors # k-NN k_in_knn = 5 # k-NN", "from sklearn.neighbors import NearestNeighbors # k-NN k_in_knn = 5 #", "個の距離の平均 mean_of_knn_distance_train.to_csv('mean_of_knn_distance_train.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # トレーニングデータのサンプルの rate_of_training_samples_inside_ad * 100", "# オートスケーリング autoscaled_x = (x - x.mean()) / x.std() autoscaled_x_prediction", "csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # トレーニングデータのサンプルの rate_of_training_samples_inside_ad * 100 % が含まれるようにしきい値を設定 sorted_mean_of_knn_distance_train", "ad_model = NearestNeighbors(n_neighbors=k_in_knn, metric='euclidean') # AD モデルの宣言 ad_model.fit(autoscaled_x) # k-NN", "では、トレーニングデータの x を model_ad に格納することに対応 # サンプルごとの k 最近傍サンプルとの距離に加えて、k 最近傍サンプルのインデックス番号も一緒に出力されるため、出力用の変数を", "= mean_of_knn_distance_train <= ad_threshold # AD 内のサンプルのみ TRUE inside_ad_flag_train.columns=['inside_ad_flag'] inside_ad_flag_train.to_csv('inside_ad_flag_train_knn.csv')", "k 最近傍サンプルの中に自分も含まれ、自分との距離の 0 を除いた距離を考える必要があるため、k_in_knn + 1 個と設定 knn_distance_train, knn_index_train =", "rate_of_training_samples_inside_ad) - 1] # トレーニングデータに対して、AD の中か外かを判定 inside_ad_flag_train = mean_of_knn_distance_train <=", "を除いた距離を考える必要があるため、k_in_knn + 1 個と設定 knn_distance_train, knn_index_train = ad_model.kneighbors(autoscaled_x, n_neighbors=k_in_knn +", "# k_in_knn 個の距離の平均 mean_of_knn_distance_prediction.to_csv('mean_of_knn_distance_prediction.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対して、AD の中か外かを判定", "% が含まれるようにしきい値を設定 sorted_mean_of_knn_distance_train = mean_of_knn_distance_train.iloc[:, 0].sort_values(ascending=True) # 距離の平均の小さい順に並び替え ad_threshold =", "= pd.DataFrame(knn_distance_prediction.mean(axis=1), columns=['mean_of_knn_distance']) # k_in_knn 個の距離の平均 mean_of_knn_distance_prediction.to_csv('mean_of_knn_distance_prediction.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意", "1] # トレーニングデータに対して、AD の中か外かを判定 inside_ad_flag_train = mean_of_knn_distance_train <= ad_threshold #", "説明変数 # 標準偏差が 0 の特徴量の削除 deleting_variables = x.columns[x.std() == 0]", "k 最近傍サンプルとの距離に加えて、k 最近傍サンプルのインデックス番号も一緒に出力されるため、出力用の変数を 2 つに # トレーニングデータでは k 最近傍サンプルの中に自分も含まれ、自分との距離の 0", "= pd.DataFrame(knn_distance_train, index=autoscaled_x.index) # DataFrame型に変換 mean_of_knn_distance_train = pd.DataFrame(knn_distance_train.iloc[:, 1:].mean(axis=1), columns=['mean_of_knn_distance'])", "を model_ad に格納することに対応 # サンプルごとの k 最近傍サンプルとの距離に加えて、k 最近傍サンプルのインデックス番号も一緒に出力されるため、出力用の変数を 2 つに", "metric='euclidean') # AD モデルの宣言 ad_model.fit(autoscaled_x) # k-NN による AD では、トレーニングデータの", "1) knn_distance_train = pd.DataFrame(knn_distance_train, index=autoscaled_x.index) # DataFrame型に変換 mean_of_knn_distance_train = pd.DataFrame(knn_distance_train.iloc[:,", "\"\"\" import pandas as pd from sklearn.neighbors import NearestNeighbors #", "k-NN による AD ad_model = NearestNeighbors(n_neighbors=k_in_knn, metric='euclidean') # AD モデルの宣言", "NearestNeighbors(n_neighbors=k_in_knn, metric='euclidean') # AD モデルの宣言 ad_model.fit(autoscaled_x) # k-NN による AD", "オートスケーリング autoscaled_x = (x - x.mean()) / x.std() autoscaled_x_prediction =", "deleting_variables = x.columns[x.std() == 0] x = x.drop(deleting_variables, axis=1) x_prediction", "ad_model.kneighbors(autoscaled_x_prediction) knn_distance_prediction = pd.DataFrame(knn_distance_prediction, index=x_prediction.index) # DataFrame型に変換 mean_of_knn_distance_prediction = pd.DataFrame(knn_distance_prediction.mean(axis=1),", "columns=['mean_of_knn_distance']) # 自分以外の k_in_knn 個の距離の平均 mean_of_knn_distance_train.to_csv('mean_of_knn_distance_train.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 #", "100 % が含まれるようにしきい値を設定 sorted_mean_of_knn_distance_train = mean_of_knn_distance_train.iloc[:, 0].sort_values(ascending=True) # 距離の平均の小さい順に並び替え ad_threshold", "= mean_of_knn_distance_train.iloc[:, 0].sort_values(ascending=True) # 距離の平均の小さい順に並び替え ad_threshold = sorted_mean_of_knn_distance_train.iloc[ round(autoscaled_x.shape[0] *", "ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対する k-NN 距離の計算 knn_distance_prediction, knn_index_prediction = ad_model.kneighbors(autoscaled_x_prediction) knn_distance_prediction", "# 標準偏差が 0 の特徴量の削除 deleting_variables = x.columns[x.std() == 0] x", "における k rate_of_training_samples_inside_ad = 0.96 # AD 内となるトレーニングデータの割合。AD のしきい値を決めるときに使用 dataset", "= 5 # k-NN における k rate_of_training_samples_inside_ad = 0.96 #", "header=0) # データ分割 y = dataset.iloc[:, 0] # 目的変数 x", "model_ad に格納することに対応 # サンプルごとの k 最近傍サンプルとの距離に加えて、k 最近傍サンプルのインデックス番号も一緒に出力されるため、出力用の変数を 2 つに #", "最近傍サンプルのインデックス番号も一緒に出力されるため、出力用の変数を 2 つに # トレーニングデータでは k 最近傍サンプルの中に自分も含まれ、自分との距離の 0 を除いた距離を考える必要があるため、k_in_knn +", "x = x.drop(deleting_variables, axis=1) x_prediction = x_prediction.drop(deleting_variables, axis=1) # オートスケーリング", "= pd.DataFrame(knn_distance_train.iloc[:, 1:].mean(axis=1), columns=['mean_of_knn_distance']) # 自分以外の k_in_knn 個の距離の平均 mean_of_knn_distance_train.to_csv('mean_of_knn_distance_train.csv') #", "1:].mean(axis=1), columns=['mean_of_knn_distance']) # 自分以外の k_in_knn 個の距離の平均 mean_of_knn_distance_train.to_csv('mean_of_knn_distance_train.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意", "- 1] # トレーニングデータに対して、AD の中か外かを判定 inside_ad_flag_train = mean_of_knn_distance_train <= ad_threshold", "x_prediction = x_prediction.drop(deleting_variables, axis=1) # オートスケーリング autoscaled_x = (x -", "# csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対する k-NN 距離の計算 knn_distance_prediction, knn_index_prediction =", "目的変数 x = dataset.iloc[:, 1:] # 説明変数 # 標準偏差が 0", "= NearestNeighbors(n_neighbors=k_in_knn, metric='euclidean') # AD モデルの宣言 ad_model.fit(autoscaled_x) # k-NN による", "0 を除いた距離を考える必要があるため、k_in_knn + 1 個と設定 knn_distance_train, knn_index_train = ad_model.kneighbors(autoscaled_x, n_neighbors=k_in_knn", "mean_of_knn_distance_train.to_csv('mean_of_knn_distance_train.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # トレーニングデータのサンプルの rate_of_training_samples_inside_ad * 100 %", "k_in_knn = 5 # k-NN における k rate_of_training_samples_inside_ad = 0.96", "# 目的変数 x = dataset.iloc[:, 1:] # 説明変数 # 標準偏差が", "x.drop(deleting_variables, axis=1) x_prediction = x_prediction.drop(deleting_variables, axis=1) # オートスケーリング autoscaled_x =", "# k-NN k_in_knn = 5 # k-NN における k rate_of_training_samples_inside_ad", "予測用データに対して、AD の中か外かを判定 inside_ad_flag_prediction = mean_of_knn_distance_prediction <= ad_threshold # AD 内のサンプルのみ", "= 0.96 # AD 内となるトレーニングデータの割合。AD のしきい値を決めるときに使用 dataset = pd.read_csv('resin.csv', index_col=0,", "0 の特徴量の削除 deleting_variables = x.columns[x.std() == 0] x = x.drop(deleting_variables,", "inside_ad_flag_prediction = mean_of_knn_distance_prediction <= ad_threshold # AD 内のサンプルのみ TRUE inside_ad_flag_prediction.columns=['inside_ad_flag']", "import pandas as pd from sklearn.neighbors import NearestNeighbors # k-NN", "k-NN における k rate_of_training_samples_inside_ad = 0.96 # AD 内となるトレーニングデータの割合。AD のしきい値を決めるときに使用", "rate_of_training_samples_inside_ad = 0.96 # AD 内となるトレーニングデータの割合。AD のしきい値を決めるときに使用 dataset = pd.read_csv('resin.csv',", "のしきい値を決めるときに使用 dataset = pd.read_csv('resin.csv', index_col=0, header=0) x_prediction = pd.read_csv('resin_prediction.csv', index_col=0,", "DataFrame型に変換 mean_of_knn_distance_prediction = pd.DataFrame(knn_distance_prediction.mean(axis=1), columns=['mean_of_knn_distance']) # k_in_knn 個の距離の平均 mean_of_knn_distance_prediction.to_csv('mean_of_knn_distance_prediction.csv') #", "# データ分割 y = dataset.iloc[:, 0] # 目的変数 x =", "# AD 内となるトレーニングデータの割合。AD のしきい値を決めるときに使用 dataset = pd.read_csv('resin.csv', index_col=0, header=0) x_prediction", "0] # 目的変数 x = dataset.iloc[:, 1:] # 説明変数 #", "0.96 # AD 内となるトレーニングデータの割合。AD のしきい値を決めるときに使用 dataset = pd.read_csv('resin.csv', index_col=0, header=0)", "# k-NN による AD ad_model = NearestNeighbors(n_neighbors=k_in_knn, metric='euclidean') # AD", "ad_model.fit(autoscaled_x) # k-NN による AD では、トレーニングデータの x を model_ad に格納することに対応", "inside_ad_flag_train.to_csv('inside_ad_flag_train_knn.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対する k-NN 距離の計算 knn_distance_prediction, knn_index_prediction", "knn_index_prediction = ad_model.kneighbors(autoscaled_x_prediction) knn_distance_prediction = pd.DataFrame(knn_distance_prediction, index=x_prediction.index) # DataFrame型に変換 mean_of_knn_distance_prediction", "# -*- coding: utf-8 -*- \"\"\" @author: <NAME> \"\"\" import", "トレーニングデータに対して、AD の中か外かを判定 inside_ad_flag_train = mean_of_knn_distance_train <= ad_threshold # AD 内のサンプルのみ", "# 説明変数 # 標準偏差が 0 の特徴量の削除 deleting_variables = x.columns[x.std() ==", "による AD では、トレーニングデータの x を model_ad に格納することに対応 # サンプルごとの k", "内のサンプルのみ TRUE inside_ad_flag_train.columns=['inside_ad_flag'] inside_ad_flag_train.to_csv('inside_ad_flag_train_knn.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対する k-NN", "AD 内となるトレーニングデータの割合。AD のしきい値を決めるときに使用 dataset = pd.read_csv('resin.csv', index_col=0, header=0) x_prediction =", "n_neighbors=k_in_knn + 1) knn_distance_train = pd.DataFrame(knn_distance_train, index=autoscaled_x.index) # DataFrame型に変換 mean_of_knn_distance_train", "* 100 % が含まれるようにしきい値を設定 sorted_mean_of_knn_distance_train = mean_of_knn_distance_train.iloc[:, 0].sort_values(ascending=True) # 距離の平均の小さい順に並び替え", "k-NN 距離の計算 knn_distance_prediction, knn_index_prediction = ad_model.kneighbors(autoscaled_x_prediction) knn_distance_prediction = pd.DataFrame(knn_distance_prediction, index=x_prediction.index)", "= dataset.iloc[:, 0] # 目的変数 x = dataset.iloc[:, 1:] #", "の特徴量の削除 deleting_variables = x.columns[x.std() == 0] x = x.drop(deleting_variables, axis=1)", "k_in_knn 個の距離の平均 mean_of_knn_distance_prediction.to_csv('mean_of_knn_distance_prediction.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対して、AD の中か外かを判定 inside_ad_flag_prediction", "\"\"\" @author: <NAME> \"\"\" import pandas as pd from sklearn.neighbors", "(x - x.mean()) / x.std() autoscaled_x_prediction = (x_prediction - x.mean())", "knn_index_train = ad_model.kneighbors(autoscaled_x, n_neighbors=k_in_knn + 1) knn_distance_train = pd.DataFrame(knn_distance_train, index=autoscaled_x.index)", "= pd.read_csv('resin_prediction.csv', index_col=0, header=0) # データ分割 y = dataset.iloc[:, 0]", "0] x = x.drop(deleting_variables, axis=1) x_prediction = x_prediction.drop(deleting_variables, axis=1) #", "に格納することに対応 # サンプルごとの k 最近傍サンプルとの距離に加えて、k 最近傍サンプルのインデックス番号も一緒に出力されるため、出力用の変数を 2 つに # トレーニングデータでは", "pd.DataFrame(knn_distance_train, index=autoscaled_x.index) # DataFrame型に変換 mean_of_knn_distance_train = pd.DataFrame(knn_distance_train.iloc[:, 1:].mean(axis=1), columns=['mean_of_knn_distance']) #", "y = dataset.iloc[:, 0] # 目的変数 x = dataset.iloc[:, 1:]", "x_prediction = pd.read_csv('resin_prediction.csv', index_col=0, header=0) # データ分割 y = dataset.iloc[:,", "k_in_knn 個の距離の平均 mean_of_knn_distance_train.to_csv('mean_of_knn_distance_train.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # トレーニングデータのサンプルの rate_of_training_samples_inside_ad *", "* rate_of_training_samples_inside_ad) - 1] # トレーニングデータに対して、AD の中か外かを判定 inside_ad_flag_train = mean_of_knn_distance_train", "knn_distance_prediction = pd.DataFrame(knn_distance_prediction, index=x_prediction.index) # DataFrame型に変換 mean_of_knn_distance_prediction = pd.DataFrame(knn_distance_prediction.mean(axis=1), columns=['mean_of_knn_distance'])", "個と設定 knn_distance_train, knn_index_train = ad_model.kneighbors(autoscaled_x, n_neighbors=k_in_knn + 1) knn_distance_train =", "AD では、トレーニングデータの x を model_ad に格納することに対応 # サンプルごとの k 最近傍サンプルとの距離に加えて、k", "index_col=0, header=0) # データ分割 y = dataset.iloc[:, 0] # 目的変数", "axis=1) x_prediction = x_prediction.drop(deleting_variables, axis=1) # オートスケーリング autoscaled_x = (x", "/ x.std() autoscaled_x_prediction = (x_prediction - x.mean()) / x.std() #", "dataset = pd.read_csv('resin.csv', index_col=0, header=0) x_prediction = pd.read_csv('resin_prediction.csv', index_col=0, header=0)", "pd.read_csv('resin.csv', index_col=0, header=0) x_prediction = pd.read_csv('resin_prediction.csv', index_col=0, header=0) # データ分割", "mean_of_knn_distance_train <= ad_threshold # AD 内のサンプルのみ TRUE inside_ad_flag_train.columns=['inside_ad_flag'] inside_ad_flag_train.to_csv('inside_ad_flag_train_knn.csv') #", "index=autoscaled_x.index) # DataFrame型に変換 mean_of_knn_distance_train = pd.DataFrame(knn_distance_train.iloc[:, 1:].mean(axis=1), columns=['mean_of_knn_distance']) # 自分以外の", "round(autoscaled_x.shape[0] * rate_of_training_samples_inside_ad) - 1] # トレーニングデータに対して、AD の中か外かを判定 inside_ad_flag_train =", "TRUE inside_ad_flag_train.columns=['inside_ad_flag'] inside_ad_flag_train.to_csv('inside_ad_flag_train_knn.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対する k-NN 距離の計算", "inside_ad_flag_train.columns=['inside_ad_flag'] inside_ad_flag_train.to_csv('inside_ad_flag_train_knn.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対する k-NN 距離の計算 knn_distance_prediction,", "# 予測用データに対して、AD の中か外かを判定 inside_ad_flag_prediction = mean_of_knn_distance_prediction <= ad_threshold # AD", "2 つに # トレーニングデータでは k 最近傍サンプルの中に自分も含まれ、自分との距離の 0 を除いた距離を考える必要があるため、k_in_knn + 1", "の中か外かを判定 inside_ad_flag_prediction = mean_of_knn_distance_prediction <= ad_threshold # AD 内のサンプルのみ TRUE", "dataset.iloc[:, 1:] # 説明変数 # 標準偏差が 0 の特徴量の削除 deleting_variables =", "# csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # トレーニングデータのサンプルの rate_of_training_samples_inside_ad * 100 % が含まれるようにしきい値を設定", "# トレーニングデータでは k 最近傍サンプルの中に自分も含まれ、自分との距離の 0 を除いた距離を考える必要があるため、k_in_knn + 1 個と設定 knn_distance_train,", "= ad_model.kneighbors(autoscaled_x, n_neighbors=k_in_knn + 1) knn_distance_train = pd.DataFrame(knn_distance_train, index=autoscaled_x.index) #", "x.std() autoscaled_x_prediction = (x_prediction - x.mean()) / x.std() # k-NN", "+ 1) knn_distance_train = pd.DataFrame(knn_distance_train, index=autoscaled_x.index) # DataFrame型に変換 mean_of_knn_distance_train =", "個の距離の平均 mean_of_knn_distance_prediction.to_csv('mean_of_knn_distance_prediction.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対して、AD の中か外かを判定 inside_ad_flag_prediction =", "mean_of_knn_distance_train = pd.DataFrame(knn_distance_train.iloc[:, 1:].mean(axis=1), columns=['mean_of_knn_distance']) # 自分以外の k_in_knn 個の距離の平均 mean_of_knn_distance_train.to_csv('mean_of_knn_distance_train.csv')", "最近傍サンプルの中に自分も含まれ、自分との距離の 0 を除いた距離を考える必要があるため、k_in_knn + 1 個と設定 knn_distance_train, knn_index_train = ad_model.kneighbors(autoscaled_x,", "# トレーニングデータのサンプルの rate_of_training_samples_inside_ad * 100 % が含まれるようにしきい値を設定 sorted_mean_of_knn_distance_train = mean_of_knn_distance_train.iloc[:,", "pd from sklearn.neighbors import NearestNeighbors # k-NN k_in_knn = 5", "x を model_ad に格納することに対応 # サンプルごとの k 最近傍サンプルとの距離に加えて、k 最近傍サンプルのインデックス番号も一緒に出力されるため、出力用の変数を 2", "ad_model.kneighbors(autoscaled_x, n_neighbors=k_in_knn + 1) knn_distance_train = pd.DataFrame(knn_distance_train, index=autoscaled_x.index) # DataFrame型に変換", "<= ad_threshold # AD 内のサンプルのみ TRUE inside_ad_flag_prediction.columns=['inside_ad_flag'] inside_ad_flag_prediction.to_csv('inside_ad_flag_prediction_knn.csv') # csv", "pd.DataFrame(knn_distance_train.iloc[:, 1:].mean(axis=1), columns=['mean_of_knn_distance']) # 自分以外の k_in_knn 個の距離の平均 mean_of_knn_distance_train.to_csv('mean_of_knn_distance_train.csv') # csv", "header=0) x_prediction = pd.read_csv('resin_prediction.csv', index_col=0, header=0) # データ分割 y =", "dataset.iloc[:, 0] # 目的変数 x = dataset.iloc[:, 1:] # 説明変数", "@author: <NAME> \"\"\" import pandas as pd from sklearn.neighbors import", "-*- coding: utf-8 -*- \"\"\" @author: <NAME> \"\"\" import pandas", "最近傍サンプルとの距離に加えて、k 最近傍サンプルのインデックス番号も一緒に出力されるため、出力用の変数を 2 つに # トレーニングデータでは k 最近傍サンプルの中に自分も含まれ、自分との距離の 0 を除いた距離を考える必要があるため、k_in_knn", "k rate_of_training_samples_inside_ad = 0.96 # AD 内となるトレーニングデータの割合。AD のしきい値を決めるときに使用 dataset =", "mean_of_knn_distance_prediction = pd.DataFrame(knn_distance_prediction.mean(axis=1), columns=['mean_of_knn_distance']) # k_in_knn 個の距離の平均 mean_of_knn_distance_prediction.to_csv('mean_of_knn_distance_prediction.csv') # csv", "# AD モデルの宣言 ad_model.fit(autoscaled_x) # k-NN による AD では、トレーニングデータの x", "トレーニングデータのサンプルの rate_of_training_samples_inside_ad * 100 % が含まれるようにしきい値を設定 sorted_mean_of_knn_distance_train = mean_of_knn_distance_train.iloc[:, 0].sort_values(ascending=True)", "x.std() # k-NN による AD ad_model = NearestNeighbors(n_neighbors=k_in_knn, metric='euclidean') #", "= pd.read_csv('resin.csv', index_col=0, header=0) x_prediction = pd.read_csv('resin_prediction.csv', index_col=0, header=0) #", "モデルの宣言 ad_model.fit(autoscaled_x) # k-NN による AD では、トレーニングデータの x を model_ad", "csv ファイルに保存。同じ名前のファイルがあるときは上書きされるため注意 # 予測用データに対する k-NN 距離の計算 knn_distance_prediction, knn_index_prediction = ad_model.kneighbors(autoscaled_x_prediction)", "x = dataset.iloc[:, 1:] # 説明変数 # 標準偏差が 0 の特徴量の削除", "autoscaled_x = (x - x.mean()) / x.std() autoscaled_x_prediction = (x_prediction", "# DataFrame型に変換 mean_of_knn_distance_train = pd.DataFrame(knn_distance_train.iloc[:, 1:].mean(axis=1), columns=['mean_of_knn_distance']) # 自分以外の k_in_knn", "== 0] x = x.drop(deleting_variables, axis=1) x_prediction = x_prediction.drop(deleting_variables, axis=1)", "= (x - x.mean()) / x.std() autoscaled_x_prediction = (x_prediction -" ]
[ "net.addSwitch( 's4', listenPort=6674 ) s5 = net.addSwitch( 's5', listenPort=6675 )", "topology(): \"Create a network.\" net = Mininet( controller=RemoteController, link=TCLink, switch=OVSKernelSwitch", "Link, TCLink #conf_port=50000 conf_ip_1='10.0.0.254' conf_mac_1='11:12:13:14:15:16' def topology(): \"Create a network.\"", "h5) net.addLink(s1, s2) net.addLink(s2, s3) net.addLink(s3, s4) net.addLink(s4, s5) print", "listenPort=6674 ) s5 = net.addSwitch( 's5', listenPort=6675 ) c1 =", "h3.cmd('sysctl -w net.ipv4.ip_forward=1') h3.cmd('python3 listen.py &') h4.cmd('ip route add '+conf_ip_1+'/32", "add '+conf_ip_1+'/32 dev h1-eth0') h1.cmd('sudo arp -i h1-eth0 -s '+conf_ip_1+'", "h2.cmd('sudo arp -i h2-eth0 -s '+conf_ip_1+' '+conf_mac_1) h2.cmd('sysctl -w net.ipv4.ip_forward=1')", "print \"*** Creating nodes\" h1 = net.addHost( 'h1', mac='00:00:00:00:00:01', ip='10.0.0.1/24'", "s5) print \"*** Starting network\" net.build() h1.cmd('ip route add '+conf_ip_1+'/32", "link=TCLink, switch=OVSKernelSwitch ) print \"*** Creating nodes\" h1 = net.addHost(", "mininet.net import Mininet from mininet.node import Controller, RemoteController, OVSKernelSwitch,UserSwitch #OVSLegacyKernelSwitch,", "network\" net.stop() if __name__ == '__main__': setLogLevel( 'info' ) topology()", "mac='00:00:00:00:00:02', ip='10.0.0.2/24' ) h3 = net.addHost( 'h3', mac='00:00:00:00:00:03', ip='10.0.0.3/24' )", "ip='10.0.0.3/24' ) h4 = net.addHost( 'h4', mac='00:00:00:00:00:04', ip='10.0.0.4/24' ) h5", "net.ipv4.ip_forward=1') h1.cmd('python3 listen.py &') h2.cmd('ip route add '+conf_ip_1+'/32 dev h2-eth0')", "print \"*** Stopping network\" net.stop() if __name__ == '__main__': setLogLevel(", "controller=RemoteController, link=TCLink, switch=OVSKernelSwitch ) print \"*** Creating nodes\" h1 =", "-s '+conf_ip_1+' '+conf_mac_1) h3.cmd('sysctl -w net.ipv4.ip_forward=1') h3.cmd('python3 listen.py &') h4.cmd('ip", "h2) net.addLink(s3, h3) net.addLink(s4, h4) net.addLink(s5, h5) net.addLink(s1, s2) net.addLink(s2,", "mininet.log import setLogLevel from mininet.link import Link, TCLink #conf_port=50000 conf_ip_1='10.0.0.254'", "nodes\" h1 = net.addHost( 'h1', mac='00:00:00:00:00:01', ip='10.0.0.1/24' ) h2 =", "route add '+conf_ip_1+'/32 dev h2-eth0') h2.cmd('sudo arp -i h2-eth0 -s", "net.addLink(s1, s2) net.addLink(s2, s3) net.addLink(s3, s4) net.addLink(s4, s5) print \"***", "\"*** Starting network\" net.build() h1.cmd('ip route add '+conf_ip_1+'/32 dev h1-eth0')", "'+conf_mac_1) h4.cmd('sysctl -w net.ipv4.ip_forward=1') h4.cmd('python3 listen.py &') h5.cmd('ip route add", "import Controller, RemoteController, OVSKernelSwitch,UserSwitch #OVSLegacyKernelSwitch, UserSwitch from mininet.cli import CLI", "h2.cmd('ip route add '+conf_ip_1+'/32 dev h2-eth0') h2.cmd('sudo arp -i h2-eth0", "h1-eth0') h1.cmd('sudo arp -i h1-eth0 -s '+conf_ip_1+' '+conf_mac_1) h1.cmd('sysctl -w", "mininet.link import Link, TCLink #conf_port=50000 conf_ip_1='10.0.0.254' conf_mac_1='11:12:13:14:15:16' def topology(): \"Create", "add '+conf_ip_1+'/32 dev h2-eth0') h2.cmd('sudo arp -i h2-eth0 -s '+conf_ip_1+'", "h2-eth0') h2.cmd('sudo arp -i h2-eth0 -s '+conf_ip_1+' '+conf_mac_1) h2.cmd('sysctl -w", "net.addLink(s4, h4) net.addLink(s5, h5) net.addLink(s1, s2) net.addLink(s2, s3) net.addLink(s3, s4)", "import Link, TCLink #conf_port=50000 conf_ip_1='10.0.0.254' conf_mac_1='11:12:13:14:15:16' def topology(): \"Create a", "conf_mac_1='11:12:13:14:15:16' def topology(): \"Create a network.\" net = Mininet( controller=RemoteController,", "setLogLevel from mininet.link import Link, TCLink #conf_port=50000 conf_ip_1='10.0.0.254' conf_mac_1='11:12:13:14:15:16' def", "\"*** Stopping network\" net.stop() if __name__ == '__main__': setLogLevel( 'info'", "mac='00:00:00:00:00:01', ip='10.0.0.1/24' ) h2 = net.addHost( 'h2', mac='00:00:00:00:00:02', ip='10.0.0.2/24' )", "= net.addSwitch( 's2', listenPort=6672 ) s3 = net.addSwitch( 's3', listenPort=6673", "net.addSwitch( 's3', listenPort=6673 ) s4 = net.addSwitch( 's4', listenPort=6674 )", "net.addSwitch( 's5', listenPort=6675 ) c1 = net.addController( 'c1', controller=RemoteController, ip='127.0.0.1',", "'s2', listenPort=6672 ) s3 = net.addSwitch( 's3', listenPort=6673 ) s4", "-s '+conf_ip_1+' '+conf_mac_1) h5.cmd('sysctl -w net.ipv4.ip_forward=1') h5.cmd('python3 listen.py &') c1.start()", "'+conf_mac_1) h1.cmd('sysctl -w net.ipv4.ip_forward=1') h1.cmd('python3 listen.py &') h2.cmd('ip route add", ") s2.start( [c1] ) s3.start( [c1] ) s4.start( [c1] )", "import setLogLevel from mininet.link import Link, TCLink #conf_port=50000 conf_ip_1='10.0.0.254' conf_mac_1='11:12:13:14:15:16'", "UserSwitch from mininet.cli import CLI from mininet.log import setLogLevel from", "'+conf_ip_1+' '+conf_mac_1) h1.cmd('sysctl -w net.ipv4.ip_forward=1') h1.cmd('python3 listen.py &') h2.cmd('ip route", "&') h2.cmd('ip route add '+conf_ip_1+'/32 dev h2-eth0') h2.cmd('sudo arp -i", "arp -i h4-eth0 -s '+conf_ip_1+' '+conf_mac_1) h4.cmd('sysctl -w net.ipv4.ip_forward=1') h4.cmd('python3", "switch=OVSKernelSwitch ) print \"*** Creating nodes\" h1 = net.addHost( 'h1',", "h3.cmd('sudo arp -i h3-eth0 -s '+conf_ip_1+' '+conf_mac_1) h3.cmd('sysctl -w net.ipv4.ip_forward=1')", "import CLI from mininet.log import setLogLevel from mininet.link import Link,", "network.\" net = Mininet( controller=RemoteController, link=TCLink, switch=OVSKernelSwitch ) print \"***", "port=6633 ) print \"*** Creating links\" net.addLink(s1, h1) net.addLink(s2, h2)", "h1.cmd('sysctl -w net.ipv4.ip_forward=1') h1.cmd('python3 listen.py &') h2.cmd('ip route add '+conf_ip_1+'/32", ") print \"*** Creating nodes\" h1 = net.addHost( 'h1', mac='00:00:00:00:00:01',", "dev h2-eth0') h2.cmd('sudo arp -i h2-eth0 -s '+conf_ip_1+' '+conf_mac_1) h2.cmd('sysctl", "&') h4.cmd('ip route add '+conf_ip_1+'/32 dev h4-eth0') h4.cmd('sudo arp -i", "h4.cmd('ip route add '+conf_ip_1+'/32 dev h4-eth0') h4.cmd('sudo arp -i h4-eth0", "ip='10.0.0.4/24' ) h5 = net.addHost( 'h5', mac='00:00:00:00:00:05', ip='10.0.0.5/24' ) s1", "'+conf_mac_1) h5.cmd('sysctl -w net.ipv4.ip_forward=1') h5.cmd('python3 listen.py &') c1.start() s1.start( [c1]", "Mininet from mininet.node import Controller, RemoteController, OVSKernelSwitch,UserSwitch #OVSLegacyKernelSwitch, UserSwitch from", "s2.start( [c1] ) s3.start( [c1] ) s4.start( [c1] ) s5.start(", "Stopping network\" net.stop() if __name__ == '__main__': setLogLevel( 'info' )", "net.addSwitch( 's1', listenPort=6671 ) s2 = net.addSwitch( 's2', listenPort=6672 )", "[c1] ) s4.start( [c1] ) s5.start( [c1] ) print \"***", "h4) net.addLink(s5, h5) net.addLink(s1, s2) net.addLink(s2, s3) net.addLink(s3, s4) net.addLink(s4,", "'+conf_ip_1+'/32 dev h1-eth0') h1.cmd('sudo arp -i h1-eth0 -s '+conf_ip_1+' '+conf_mac_1)", "-w net.ipv4.ip_forward=1') h2.cmd('python3 listen.py &') h3.cmd('ip route add '+conf_ip_1+'/32 dev", "c1 = net.addController( 'c1', controller=RemoteController, ip='127.0.0.1', port=6633 ) print \"***", "arp -i h3-eth0 -s '+conf_ip_1+' '+conf_mac_1) h3.cmd('sysctl -w net.ipv4.ip_forward=1') h3.cmd('python3", "= net.addSwitch( 's5', listenPort=6675 ) c1 = net.addController( 'c1', controller=RemoteController,", "conf_ip_1='10.0.0.254' conf_mac_1='11:12:13:14:15:16' def topology(): \"Create a network.\" net = Mininet(", "arp -i h5-eth0 -s '+conf_ip_1+' '+conf_mac_1) h5.cmd('sysctl -w net.ipv4.ip_forward=1') h5.cmd('python3", ") h4 = net.addHost( 'h4', mac='00:00:00:00:00:04', ip='10.0.0.4/24' ) h5 =", "mac='00:00:00:00:00:03', ip='10.0.0.3/24' ) h4 = net.addHost( 'h4', mac='00:00:00:00:00:04', ip='10.0.0.4/24' )", "print \"*** Creating links\" net.addLink(s1, h1) net.addLink(s2, h2) net.addLink(s3, h3)", "dev h3-eth0') h3.cmd('sudo arp -i h3-eth0 -s '+conf_ip_1+' '+conf_mac_1) h3.cmd('sysctl", "def topology(): \"Create a network.\" net = Mininet( controller=RemoteController, link=TCLink,", "Mininet( controller=RemoteController, link=TCLink, switch=OVSKernelSwitch ) print \"*** Creating nodes\" h1", "'s5', listenPort=6675 ) c1 = net.addController( 'c1', controller=RemoteController, ip='127.0.0.1', port=6633", "'h1', mac='00:00:00:00:00:01', ip='10.0.0.1/24' ) h2 = net.addHost( 'h2', mac='00:00:00:00:00:02', ip='10.0.0.2/24'", "h4-eth0 -s '+conf_ip_1+' '+conf_mac_1) h4.cmd('sysctl -w net.ipv4.ip_forward=1') h4.cmd('python3 listen.py &')", "s1.start( [c1] ) s2.start( [c1] ) s3.start( [c1] ) s4.start(", "from mininet.link import Link, TCLink #conf_port=50000 conf_ip_1='10.0.0.254' conf_mac_1='11:12:13:14:15:16' def topology():", "'h5', mac='00:00:00:00:00:05', ip='10.0.0.5/24' ) s1 = net.addSwitch( 's1', listenPort=6671 )", "-i h1-eth0 -s '+conf_ip_1+' '+conf_mac_1) h1.cmd('sysctl -w net.ipv4.ip_forward=1') h1.cmd('python3 listen.py", "ip='10.0.0.2/24' ) h3 = net.addHost( 'h3', mac='00:00:00:00:00:03', ip='10.0.0.3/24' ) h4", "'h4', mac='00:00:00:00:00:04', ip='10.0.0.4/24' ) h5 = net.addHost( 'h5', mac='00:00:00:00:00:05', ip='10.0.0.5/24'", "net.addHost( 'h2', mac='00:00:00:00:00:02', ip='10.0.0.2/24' ) h3 = net.addHost( 'h3', mac='00:00:00:00:00:03',", "-w net.ipv4.ip_forward=1') h1.cmd('python3 listen.py &') h2.cmd('ip route add '+conf_ip_1+'/32 dev", "h4-eth0') h4.cmd('sudo arp -i h4-eth0 -s '+conf_ip_1+' '+conf_mac_1) h4.cmd('sysctl -w", "'+conf_ip_1+'/32 dev h5-eth0') h5.cmd('sudo arp -i h5-eth0 -s '+conf_ip_1+' '+conf_mac_1)", "Starting network\" net.build() h1.cmd('ip route add '+conf_ip_1+'/32 dev h1-eth0') h1.cmd('sudo", "from mininet.node import Controller, RemoteController, OVSKernelSwitch,UserSwitch #OVSLegacyKernelSwitch, UserSwitch from mininet.cli", ") print \"*** Creating links\" net.addLink(s1, h1) net.addLink(s2, h2) net.addLink(s3,", "dev h5-eth0') h5.cmd('sudo arp -i h5-eth0 -s '+conf_ip_1+' '+conf_mac_1) h5.cmd('sysctl", "-i h3-eth0 -s '+conf_ip_1+' '+conf_mac_1) h3.cmd('sysctl -w net.ipv4.ip_forward=1') h3.cmd('python3 listen.py", "from mininet.cli import CLI from mininet.log import setLogLevel from mininet.link", "h1-eth0 -s '+conf_ip_1+' '+conf_mac_1) h1.cmd('sysctl -w net.ipv4.ip_forward=1') h1.cmd('python3 listen.py &')", "CLI\" CLI( net ) print \"*** Stopping network\" net.stop() if", "#conf_port=50000 conf_ip_1='10.0.0.254' conf_mac_1='11:12:13:14:15:16' def topology(): \"Create a network.\" net =", "= net.addSwitch( 's1', listenPort=6671 ) s2 = net.addSwitch( 's2', listenPort=6672", ") s2 = net.addSwitch( 's2', listenPort=6672 ) s3 = net.addSwitch(", "s4 = net.addSwitch( 's4', listenPort=6674 ) s5 = net.addSwitch( 's5',", "listenPort=6673 ) s4 = net.addSwitch( 's4', listenPort=6674 ) s5 =", "h1.cmd('ip route add '+conf_ip_1+'/32 dev h1-eth0') h1.cmd('sudo arp -i h1-eth0", "h1) net.addLink(s2, h2) net.addLink(s3, h3) net.addLink(s4, h4) net.addLink(s5, h5) net.addLink(s1,", "net.addHost( 'h5', mac='00:00:00:00:00:05', ip='10.0.0.5/24' ) s1 = net.addSwitch( 's1', listenPort=6671", "'s4', listenPort=6674 ) s5 = net.addSwitch( 's5', listenPort=6675 ) c1", "net.addLink(s3, s4) net.addLink(s4, s5) print \"*** Starting network\" net.build() h1.cmd('ip", "print \"*** Starting network\" net.build() h1.cmd('ip route add '+conf_ip_1+'/32 dev", "route add '+conf_ip_1+'/32 dev h1-eth0') h1.cmd('sudo arp -i h1-eth0 -s", "h5.cmd('python3 listen.py &') c1.start() s1.start( [c1] ) s2.start( [c1] )", "c1.start() s1.start( [c1] ) s2.start( [c1] ) s3.start( [c1] )", "-s '+conf_ip_1+' '+conf_mac_1) h1.cmd('sysctl -w net.ipv4.ip_forward=1') h1.cmd('python3 listen.py &') h2.cmd('ip", "net.addSwitch( 's2', listenPort=6672 ) s3 = net.addSwitch( 's3', listenPort=6673 )", "net = Mininet( controller=RemoteController, link=TCLink, switch=OVSKernelSwitch ) print \"*** Creating", "s5.start( [c1] ) print \"*** Running CLI\" CLI( net )", "= net.addHost( 'h4', mac='00:00:00:00:00:04', ip='10.0.0.4/24' ) h5 = net.addHost( 'h5',", "h4 = net.addHost( 'h4', mac='00:00:00:00:00:04', ip='10.0.0.4/24' ) h5 = net.addHost(", "Running CLI\" CLI( net ) print \"*** Stopping network\" net.stop()", "s3) net.addLink(s3, s4) net.addLink(s4, s5) print \"*** Starting network\" net.build()", "[c1] ) s5.start( [c1] ) print \"*** Running CLI\" CLI(", "'h3', mac='00:00:00:00:00:03', ip='10.0.0.3/24' ) h4 = net.addHost( 'h4', mac='00:00:00:00:00:04', ip='10.0.0.4/24'", "OVSKernelSwitch,UserSwitch #OVSLegacyKernelSwitch, UserSwitch from mininet.cli import CLI from mininet.log import", "-s '+conf_ip_1+' '+conf_mac_1) h4.cmd('sysctl -w net.ipv4.ip_forward=1') h4.cmd('python3 listen.py &') h5.cmd('ip", "s3.start( [c1] ) s4.start( [c1] ) s5.start( [c1] ) print", "net.build() h1.cmd('ip route add '+conf_ip_1+'/32 dev h1-eth0') h1.cmd('sudo arp -i", "controller=RemoteController, ip='127.0.0.1', port=6633 ) print \"*** Creating links\" net.addLink(s1, h1)", "listen.py &') h4.cmd('ip route add '+conf_ip_1+'/32 dev h4-eth0') h4.cmd('sudo arp", "\"Create a network.\" net = Mininet( controller=RemoteController, link=TCLink, switch=OVSKernelSwitch )", "'h2', mac='00:00:00:00:00:02', ip='10.0.0.2/24' ) h3 = net.addHost( 'h3', mac='00:00:00:00:00:03', ip='10.0.0.3/24'", "h3-eth0 -s '+conf_ip_1+' '+conf_mac_1) h3.cmd('sysctl -w net.ipv4.ip_forward=1') h3.cmd('python3 listen.py &')", "&') c1.start() s1.start( [c1] ) s2.start( [c1] ) s3.start( [c1]", "ip='10.0.0.5/24' ) s1 = net.addSwitch( 's1', listenPort=6671 ) s2 =", ") h3 = net.addHost( 'h3', mac='00:00:00:00:00:03', ip='10.0.0.3/24' ) h4 =", ") s1 = net.addSwitch( 's1', listenPort=6671 ) s2 = net.addSwitch(", "s1 = net.addSwitch( 's1', listenPort=6671 ) s2 = net.addSwitch( 's2',", "h1.cmd('sudo arp -i h1-eth0 -s '+conf_ip_1+' '+conf_mac_1) h1.cmd('sysctl -w net.ipv4.ip_forward=1')", "net.addLink(s2, h2) net.addLink(s3, h3) net.addLink(s4, h4) net.addLink(s5, h5) net.addLink(s1, s2)", "= net.addHost( 'h5', mac='00:00:00:00:00:05', ip='10.0.0.5/24' ) s1 = net.addSwitch( 's1',", "a network.\" net = Mininet( controller=RemoteController, link=TCLink, switch=OVSKernelSwitch ) print", "&') h3.cmd('ip route add '+conf_ip_1+'/32 dev h3-eth0') h3.cmd('sudo arp -i", "= Mininet( controller=RemoteController, link=TCLink, switch=OVSKernelSwitch ) print \"*** Creating nodes\"", ") s3.start( [c1] ) s4.start( [c1] ) s5.start( [c1] )", "add '+conf_ip_1+'/32 dev h5-eth0') h5.cmd('sudo arp -i h5-eth0 -s '+conf_ip_1+'", "= net.addHost( 'h1', mac='00:00:00:00:00:01', ip='10.0.0.1/24' ) h2 = net.addHost( 'h2',", "-i h4-eth0 -s '+conf_ip_1+' '+conf_mac_1) h4.cmd('sysctl -w net.ipv4.ip_forward=1') h4.cmd('python3 listen.py", ") print \"*** Stopping network\" net.stop() if __name__ == '__main__':", "listen.py &') c1.start() s1.start( [c1] ) s2.start( [c1] ) s3.start(", "h3.cmd('ip route add '+conf_ip_1+'/32 dev h3-eth0') h3.cmd('sudo arp -i h3-eth0", "-w net.ipv4.ip_forward=1') h4.cmd('python3 listen.py &') h5.cmd('ip route add '+conf_ip_1+'/32 dev", "= net.addHost( 'h2', mac='00:00:00:00:00:02', ip='10.0.0.2/24' ) h3 = net.addHost( 'h3',", "net.addHost( 'h1', mac='00:00:00:00:00:01', ip='10.0.0.1/24' ) h2 = net.addHost( 'h2', mac='00:00:00:00:00:02',", "h2.cmd('python3 listen.py &') h3.cmd('ip route add '+conf_ip_1+'/32 dev h3-eth0') h3.cmd('sudo", "import Mininet from mininet.node import Controller, RemoteController, OVSKernelSwitch,UserSwitch #OVSLegacyKernelSwitch, UserSwitch", "Creating nodes\" h1 = net.addHost( 'h1', mac='00:00:00:00:00:01', ip='10.0.0.1/24' ) h2", "listenPort=6671 ) s2 = net.addSwitch( 's2', listenPort=6672 ) s3 =", "s2 = net.addSwitch( 's2', listenPort=6672 ) s3 = net.addSwitch( 's3',", "h3) net.addLink(s4, h4) net.addLink(s5, h5) net.addLink(s1, s2) net.addLink(s2, s3) net.addLink(s3,", "'+conf_ip_1+'/32 dev h4-eth0') h4.cmd('sudo arp -i h4-eth0 -s '+conf_ip_1+' '+conf_mac_1)", ") c1 = net.addController( 'c1', controller=RemoteController, ip='127.0.0.1', port=6633 ) print", "net.addLink(s3, h3) net.addLink(s4, h4) net.addLink(s5, h5) net.addLink(s1, s2) net.addLink(s2, s3)", "[c1] ) s3.start( [c1] ) s4.start( [c1] ) s5.start( [c1]", "add '+conf_ip_1+'/32 dev h3-eth0') h3.cmd('sudo arp -i h3-eth0 -s '+conf_ip_1+'", "ip='10.0.0.1/24' ) h2 = net.addHost( 'h2', mac='00:00:00:00:00:02', ip='10.0.0.2/24' ) h3", "net.addLink(s5, h5) net.addLink(s1, s2) net.addLink(s2, s3) net.addLink(s3, s4) net.addLink(s4, s5)", "h5-eth0 -s '+conf_ip_1+' '+conf_mac_1) h5.cmd('sysctl -w net.ipv4.ip_forward=1') h5.cmd('python3 listen.py &')", "'+conf_ip_1+' '+conf_mac_1) h5.cmd('sysctl -w net.ipv4.ip_forward=1') h5.cmd('python3 listen.py &') c1.start() s1.start(", "'+conf_ip_1+' '+conf_mac_1) h4.cmd('sysctl -w net.ipv4.ip_forward=1') h4.cmd('python3 listen.py &') h5.cmd('ip route", "net.addLink(s2, s3) net.addLink(s3, s4) net.addLink(s4, s5) print \"*** Starting network\"", "net.addHost( 'h3', mac='00:00:00:00:00:03', ip='10.0.0.3/24' ) h4 = net.addHost( 'h4', mac='00:00:00:00:00:04',", "mac='00:00:00:00:00:05', ip='10.0.0.5/24' ) s1 = net.addSwitch( 's1', listenPort=6671 ) s2", "dev h4-eth0') h4.cmd('sudo arp -i h4-eth0 -s '+conf_ip_1+' '+conf_mac_1) h4.cmd('sysctl", "listen.py &') h5.cmd('ip route add '+conf_ip_1+'/32 dev h5-eth0') h5.cmd('sudo arp", "[c1] ) print \"*** Running CLI\" CLI( net ) print", ") s4.start( [c1] ) s5.start( [c1] ) print \"*** Running", "listenPort=6672 ) s3 = net.addSwitch( 's3', listenPort=6673 ) s4 =", "\"*** Creating nodes\" h1 = net.addHost( 'h1', mac='00:00:00:00:00:01', ip='10.0.0.1/24' )", "h5 = net.addHost( 'h5', mac='00:00:00:00:00:05', ip='10.0.0.5/24' ) s1 = net.addSwitch(", "s4) net.addLink(s4, s5) print \"*** Starting network\" net.build() h1.cmd('ip route", "net.addLink(s4, s5) print \"*** Starting network\" net.build() h1.cmd('ip route add", "arp -i h2-eth0 -s '+conf_ip_1+' '+conf_mac_1) h2.cmd('sysctl -w net.ipv4.ip_forward=1') h2.cmd('python3", "-s '+conf_ip_1+' '+conf_mac_1) h2.cmd('sysctl -w net.ipv4.ip_forward=1') h2.cmd('python3 listen.py &') h3.cmd('ip", "h3 = net.addHost( 'h3', mac='00:00:00:00:00:03', ip='10.0.0.3/24' ) h4 = net.addHost(", "'+conf_ip_1+' '+conf_mac_1) h3.cmd('sysctl -w net.ipv4.ip_forward=1') h3.cmd('python3 listen.py &') h4.cmd('ip route", "-i h5-eth0 -s '+conf_ip_1+' '+conf_mac_1) h5.cmd('sysctl -w net.ipv4.ip_forward=1') h5.cmd('python3 listen.py", "h2-eth0 -s '+conf_ip_1+' '+conf_mac_1) h2.cmd('sysctl -w net.ipv4.ip_forward=1') h2.cmd('python3 listen.py &')", "s4.start( [c1] ) s5.start( [c1] ) print \"*** Running CLI\"", "'c1', controller=RemoteController, ip='127.0.0.1', port=6633 ) print \"*** Creating links\" net.addLink(s1,", "&') h5.cmd('ip route add '+conf_ip_1+'/32 dev h5-eth0') h5.cmd('sudo arp -i", "CLI( net ) print \"*** Stopping network\" net.stop() if __name__", "'+conf_mac_1) h2.cmd('sysctl -w net.ipv4.ip_forward=1') h2.cmd('python3 listen.py &') h3.cmd('ip route add", "mininet.node import Controller, RemoteController, OVSKernelSwitch,UserSwitch #OVSLegacyKernelSwitch, UserSwitch from mininet.cli import", "h1.cmd('python3 listen.py &') h2.cmd('ip route add '+conf_ip_1+'/32 dev h2-eth0') h2.cmd('sudo", "s2) net.addLink(s2, s3) net.addLink(s3, s4) net.addLink(s4, s5) print \"*** Starting", "print \"*** Running CLI\" CLI( net ) print \"*** Stopping", "net ) print \"*** Stopping network\" net.stop() if __name__ ==", "= net.addController( 'c1', controller=RemoteController, ip='127.0.0.1', port=6633 ) print \"*** Creating", "RemoteController, OVSKernelSwitch,UserSwitch #OVSLegacyKernelSwitch, UserSwitch from mininet.cli import CLI from mininet.log", "= net.addHost( 'h3', mac='00:00:00:00:00:03', ip='10.0.0.3/24' ) h4 = net.addHost( 'h4',", "arp -i h1-eth0 -s '+conf_ip_1+' '+conf_mac_1) h1.cmd('sysctl -w net.ipv4.ip_forward=1') h1.cmd('python3", "h4.cmd('python3 listen.py &') h5.cmd('ip route add '+conf_ip_1+'/32 dev h5-eth0') h5.cmd('sudo", "net.ipv4.ip_forward=1') h5.cmd('python3 listen.py &') c1.start() s1.start( [c1] ) s2.start( [c1]", "route add '+conf_ip_1+'/32 dev h5-eth0') h5.cmd('sudo arp -i h5-eth0 -s", "links\" net.addLink(s1, h1) net.addLink(s2, h2) net.addLink(s3, h3) net.addLink(s4, h4) net.addLink(s5,", "h5.cmd('ip route add '+conf_ip_1+'/32 dev h5-eth0') h5.cmd('sudo arp -i h5-eth0", "h3.cmd('python3 listen.py &') h4.cmd('ip route add '+conf_ip_1+'/32 dev h4-eth0') h4.cmd('sudo", "mac='00:00:00:00:00:04', ip='10.0.0.4/24' ) h5 = net.addHost( 'h5', mac='00:00:00:00:00:05', ip='10.0.0.5/24' )", "'+conf_ip_1+'/32 dev h2-eth0') h2.cmd('sudo arp -i h2-eth0 -s '+conf_ip_1+' '+conf_mac_1)", "'+conf_ip_1+'/32 dev h3-eth0') h3.cmd('sudo arp -i h3-eth0 -s '+conf_ip_1+' '+conf_mac_1)", "mininet.cli import CLI from mininet.log import setLogLevel from mininet.link import", "net.ipv4.ip_forward=1') h2.cmd('python3 listen.py &') h3.cmd('ip route add '+conf_ip_1+'/32 dev h3-eth0')", "#OVSLegacyKernelSwitch, UserSwitch from mininet.cli import CLI from mininet.log import setLogLevel", "net.ipv4.ip_forward=1') h4.cmd('python3 listen.py &') h5.cmd('ip route add '+conf_ip_1+'/32 dev h5-eth0')", "'+conf_ip_1+' '+conf_mac_1) h2.cmd('sysctl -w net.ipv4.ip_forward=1') h2.cmd('python3 listen.py &') h3.cmd('ip route", ") s4 = net.addSwitch( 's4', listenPort=6674 ) s5 = net.addSwitch(", "= net.addSwitch( 's4', listenPort=6674 ) s5 = net.addSwitch( 's5', listenPort=6675", "net.addLink(s1, h1) net.addLink(s2, h2) net.addLink(s3, h3) net.addLink(s4, h4) net.addLink(s5, h5)", "'s1', listenPort=6671 ) s2 = net.addSwitch( 's2', listenPort=6672 ) s3", "dev h1-eth0') h1.cmd('sudo arp -i h1-eth0 -s '+conf_ip_1+' '+conf_mac_1) h1.cmd('sysctl", "h4.cmd('sudo arp -i h4-eth0 -s '+conf_ip_1+' '+conf_mac_1) h4.cmd('sysctl -w net.ipv4.ip_forward=1')", ") print \"*** Running CLI\" CLI( net ) print \"***", "Creating links\" net.addLink(s1, h1) net.addLink(s2, h2) net.addLink(s3, h3) net.addLink(s4, h4)", "#!/usr/bin/python \"\"\" \"\"\" from mininet.net import Mininet from mininet.node import", "\"\"\" from mininet.net import Mininet from mininet.node import Controller, RemoteController,", "from mininet.net import Mininet from mininet.node import Controller, RemoteController, OVSKernelSwitch,UserSwitch", "listen.py &') h2.cmd('ip route add '+conf_ip_1+'/32 dev h2-eth0') h2.cmd('sudo arp", "TCLink #conf_port=50000 conf_ip_1='10.0.0.254' conf_mac_1='11:12:13:14:15:16' def topology(): \"Create a network.\" net", "net.ipv4.ip_forward=1') h3.cmd('python3 listen.py &') h4.cmd('ip route add '+conf_ip_1+'/32 dev h4-eth0')", "h4.cmd('sysctl -w net.ipv4.ip_forward=1') h4.cmd('python3 listen.py &') h5.cmd('ip route add '+conf_ip_1+'/32", "[c1] ) s2.start( [c1] ) s3.start( [c1] ) s4.start( [c1]", ") s5 = net.addSwitch( 's5', listenPort=6675 ) c1 = net.addController(", "= net.addSwitch( 's3', listenPort=6673 ) s4 = net.addSwitch( 's4', listenPort=6674", "add '+conf_ip_1+'/32 dev h4-eth0') h4.cmd('sudo arp -i h4-eth0 -s '+conf_ip_1+'", "'s3', listenPort=6673 ) s4 = net.addSwitch( 's4', listenPort=6674 ) s5", "h5.cmd('sudo arp -i h5-eth0 -s '+conf_ip_1+' '+conf_mac_1) h5.cmd('sysctl -w net.ipv4.ip_forward=1')", "route add '+conf_ip_1+'/32 dev h4-eth0') h4.cmd('sudo arp -i h4-eth0 -s", "h2.cmd('sysctl -w net.ipv4.ip_forward=1') h2.cmd('python3 listen.py &') h3.cmd('ip route add '+conf_ip_1+'/32", "\"*** Creating links\" net.addLink(s1, h1) net.addLink(s2, h2) net.addLink(s3, h3) net.addLink(s4,", "'+conf_mac_1) h3.cmd('sysctl -w net.ipv4.ip_forward=1') h3.cmd('python3 listen.py &') h4.cmd('ip route add", "h2 = net.addHost( 'h2', mac='00:00:00:00:00:02', ip='10.0.0.2/24' ) h3 = net.addHost(", ") s3 = net.addSwitch( 's3', listenPort=6673 ) s4 = net.addSwitch(", ") s5.start( [c1] ) print \"*** Running CLI\" CLI( net", "\"*** Running CLI\" CLI( net ) print \"*** Stopping network\"", "h5-eth0') h5.cmd('sudo arp -i h5-eth0 -s '+conf_ip_1+' '+conf_mac_1) h5.cmd('sysctl -w", "-w net.ipv4.ip_forward=1') h3.cmd('python3 listen.py &') h4.cmd('ip route add '+conf_ip_1+'/32 dev", "s3 = net.addSwitch( 's3', listenPort=6673 ) s4 = net.addSwitch( 's4',", "h1 = net.addHost( 'h1', mac='00:00:00:00:00:01', ip='10.0.0.1/24' ) h2 = net.addHost(", "listen.py &') h3.cmd('ip route add '+conf_ip_1+'/32 dev h3-eth0') h3.cmd('sudo arp", "CLI from mininet.log import setLogLevel from mininet.link import Link, TCLink", "s5 = net.addSwitch( 's5', listenPort=6675 ) c1 = net.addController( 'c1',", "net.addHost( 'h4', mac='00:00:00:00:00:04', ip='10.0.0.4/24' ) h5 = net.addHost( 'h5', mac='00:00:00:00:00:05',", "Controller, RemoteController, OVSKernelSwitch,UserSwitch #OVSLegacyKernelSwitch, UserSwitch from mininet.cli import CLI from", "from mininet.log import setLogLevel from mininet.link import Link, TCLink #conf_port=50000", "-i h2-eth0 -s '+conf_ip_1+' '+conf_mac_1) h2.cmd('sysctl -w net.ipv4.ip_forward=1') h2.cmd('python3 listen.py", "h5.cmd('sysctl -w net.ipv4.ip_forward=1') h5.cmd('python3 listen.py &') c1.start() s1.start( [c1] )", "-w net.ipv4.ip_forward=1') h5.cmd('python3 listen.py &') c1.start() s1.start( [c1] ) s2.start(", "network\" net.build() h1.cmd('ip route add '+conf_ip_1+'/32 dev h1-eth0') h1.cmd('sudo arp", "route add '+conf_ip_1+'/32 dev h3-eth0') h3.cmd('sudo arp -i h3-eth0 -s", "\"\"\" \"\"\" from mininet.net import Mininet from mininet.node import Controller,", "listenPort=6675 ) c1 = net.addController( 'c1', controller=RemoteController, ip='127.0.0.1', port=6633 )", "net.addController( 'c1', controller=RemoteController, ip='127.0.0.1', port=6633 ) print \"*** Creating links\"", "h3-eth0') h3.cmd('sudo arp -i h3-eth0 -s '+conf_ip_1+' '+conf_mac_1) h3.cmd('sysctl -w", ") h5 = net.addHost( 'h5', mac='00:00:00:00:00:05', ip='10.0.0.5/24' ) s1 =", "ip='127.0.0.1', port=6633 ) print \"*** Creating links\" net.addLink(s1, h1) net.addLink(s2,", ") h2 = net.addHost( 'h2', mac='00:00:00:00:00:02', ip='10.0.0.2/24' ) h3 =" ]
[ "una clase se declara de la siguiente manera class Lamp:", "la clase #antes de empezar una clase se declara de", "turn_on(self): self._is_turned_on = True self._display_image() def turn_off(self): self._is_turned_on = False", "\\ / _|=|_ |_____| ''', ''' ,-. ( ) \\", "#metodo instancia e init es el constructar osea es el", "/ _|=|_ |_____| '''] def __init__(self, is_turned_on): #metodo instancia e", "= False self._display_image() def _display_image(self): if self._is_turned_on: print(self._LAMPS[0]) else: print(self._LAMPS[1])", ",-. ( ) \\ / _|=|_ |_____| '''] def __init__(self,", "self._is_turned_on = True self._display_image() def turn_off(self): self._is_turned_on = False self._display_image()", "empezar una clase se declara de la siguiente manera class", "_|=|_ |_____| ''', ''' ,-. ( ) \\ / _|=|_", "de la siguiente manera class Lamp: _LAMPS = [''' .", "declara de la siguiente manera class Lamp: _LAMPS = ['''", "la siguiente manera class Lamp: _LAMPS = [''' . .", "|_____| ''', ''' ,-. ( ) \\ / _|=|_ |_____|", "osea es el primero que se ejecuta self._is_turned_on = is_turned_on", "--- \\ / _|=|_ |_____| ''', ''' ,-. ( )", "def turn_on(self): self._is_turned_on = True self._display_image() def turn_off(self): self._is_turned_on =", "''' ,-. ( ) \\ / _|=|_ |_____| '''] def", "que se ejecuta self._is_turned_on = is_turned_on def turn_on(self): self._is_turned_on =", "def turn_off(self): self._is_turned_on = False self._display_image() def _display_image(self): if self._is_turned_on:", "de la clase #antes de empezar una clase se declara", "def __init__(self, is_turned_on): #metodo instancia e init es el constructar", "se declara de la siguiente manera class Lamp: _LAMPS =", "_LAMPS = [''' . . | , \\ ' /", ", \\ ' / ` ,-. ' --- ( )", "el constructar osea es el primero que se ejecuta self._is_turned_on", "self._is_turned_on = is_turned_on def turn_on(self): self._is_turned_on = True self._display_image() def", "is_turned_on): #metodo instancia e init es el constructar osea es", "de empezar una clase se declara de la siguiente manera", "init es el constructar osea es el primero que se", "manera class Lamp: _LAMPS = [''' . . | ,", "= True self._display_image() def turn_off(self): self._is_turned_on = False self._display_image() def", "'''] def __init__(self, is_turned_on): #metodo instancia e init es el", "--- ( ) --- \\ / _|=|_ |_____| ''', '''", "Lamp: _LAMPS = [''' . . | , \\ '", "_|=|_ |_____| '''] def __init__(self, is_turned_on): #metodo instancia e init", "class Lamp: _LAMPS = [''' . . | , \\", "primero que se ejecuta self._is_turned_on = is_turned_on def turn_on(self): self._is_turned_on", "es el primero que se ejecuta self._is_turned_on = is_turned_on def", "= [''' . . | , \\ ' / `", "|_____| '''] def __init__(self, is_turned_on): #metodo instancia e init es", "se ejecuta self._is_turned_on = is_turned_on def turn_on(self): self._is_turned_on = True", "__init__(self, is_turned_on): #metodo instancia e init es el constructar osea", "#antes de empezar una clase se declara de la siguiente", "clase #antes de empezar una clase se declara de la", "siguiente manera class Lamp: _LAMPS = [''' . . |", ") \\ / _|=|_ |_____| '''] def __init__(self, is_turned_on): #metodo", "` ,-. ' --- ( ) --- \\ / _|=|_", "self._display_image() def turn_off(self): self._is_turned_on = False self._display_image() def _display_image(self): if", "el primero que se ejecuta self._is_turned_on = is_turned_on def turn_on(self):", "constructar osea es el primero que se ejecuta self._is_turned_on =", "instancia e init es el constructar osea es el primero", "<gh_stars>0 #Definicion de la clase #antes de empezar una clase", "#Definicion de la clase #antes de empezar una clase se", "turn_off(self): self._is_turned_on = False self._display_image() def _display_image(self): if self._is_turned_on: print(self._LAMPS[0])", "\\ / _|=|_ |_____| '''] def __init__(self, is_turned_on): #metodo instancia", "clase se declara de la siguiente manera class Lamp: _LAMPS", "\\ ' / ` ,-. ' --- ( ) ---", "''', ''' ,-. ( ) \\ / _|=|_ |_____| ''']", "[''' . . | , \\ ' / ` ,-.", "( ) \\ / _|=|_ |_____| '''] def __init__(self, is_turned_on):", "ejecuta self._is_turned_on = is_turned_on def turn_on(self): self._is_turned_on = True self._display_image()", "is_turned_on def turn_on(self): self._is_turned_on = True self._display_image() def turn_off(self): self._is_turned_on", "' / ` ,-. ' --- ( ) --- \\", "' --- ( ) --- \\ / _|=|_ |_____| ''',", "e init es el constructar osea es el primero que", "/ ` ,-. ' --- ( ) --- \\ /", "| , \\ ' / ` ,-. ' --- (", ",-. ' --- ( ) --- \\ / _|=|_ |_____|", "( ) --- \\ / _|=|_ |_____| ''', ''' ,-.", "/ _|=|_ |_____| ''', ''' ,-. ( ) \\ /", ") --- \\ / _|=|_ |_____| ''', ''' ,-. (", "self._is_turned_on = False self._display_image() def _display_image(self): if self._is_turned_on: print(self._LAMPS[0]) else:", "es el constructar osea es el primero que se ejecuta", ". | , \\ ' / ` ,-. ' ---", "True self._display_image() def turn_off(self): self._is_turned_on = False self._display_image() def _display_image(self):", "= is_turned_on def turn_on(self): self._is_turned_on = True self._display_image() def turn_off(self):", ". . | , \\ ' / ` ,-. '" ]
[ "__future__ import print_function import logging from sqlalchemy import ( Column,", "a bug when adding a column with both a ForeignKey", "import print_function import logging from sqlalchemy import ( Column, ForeignKey,", "from sqlalchemy import ( Column, ForeignKey, Integer, MetaData ) from", "a ForeignKey and a index in SQLite if migrate_engine.name !=", "index in SQLite if migrate_engine.name != 'sqlite': c = Column(\"ldda_id\",", "SQLAlchemy Migrate has a bug when adding a column with", "migrate_engine metadata.reflect() # SQLAlchemy Migrate has a bug when adding", "import ( add_column, drop_column ) log = logging.getLogger(__name__) metadata =", "add_column(c, 'implicitly_converted_dataset_association', metadata, index_name='ix_implicitly_converted_ds_assoc_ldda_id') def downgrade(migrate_engine): metadata.bind = migrate_engine metadata.reflect()", "= Column(\"ldda_id\", Integer, ForeignKey(\"library_dataset_dataset_association.id\"), index=True, nullable=True) else: c = Column(\"ldda_id\",", "add 'ldda_id' column to the implicitly_converted_dataset_association table. \"\"\" from __future__", "a index in SQLite if migrate_engine.name != 'sqlite': c =", "else: c = Column(\"ldda_id\", Integer, index=True, nullable=True) add_column(c, 'implicitly_converted_dataset_association', metadata,", "logging from sqlalchemy import ( Column, ForeignKey, Integer, MetaData )", "ForeignKey, Integer, MetaData ) from galaxy.model.migrate.versions.util import ( add_column, drop_column", "index=True, nullable=True) else: c = Column(\"ldda_id\", Integer, index=True, nullable=True) add_column(c,", "in SQLite if migrate_engine.name != 'sqlite': c = Column(\"ldda_id\", Integer,", "add_column, drop_column ) log = logging.getLogger(__name__) metadata = MetaData() def", "script to add 'ldda_id' column to the implicitly_converted_dataset_association table. \"\"\"", "= Column(\"ldda_id\", Integer, index=True, nullable=True) add_column(c, 'implicitly_converted_dataset_association', metadata, index_name='ix_implicitly_converted_ds_assoc_ldda_id') def", "a column with both a ForeignKey and a index in", "nullable=True) add_column(c, 'implicitly_converted_dataset_association', metadata, index_name='ix_implicitly_converted_ds_assoc_ldda_id') def downgrade(migrate_engine): metadata.bind = migrate_engine", "drop_column ) log = logging.getLogger(__name__) metadata = MetaData() def upgrade(migrate_engine):", "def upgrade(migrate_engine): print(__doc__) metadata.bind = migrate_engine metadata.reflect() # SQLAlchemy Migrate", "Column(\"ldda_id\", Integer, index=True, nullable=True) add_column(c, 'implicitly_converted_dataset_association', metadata, index_name='ix_implicitly_converted_ds_assoc_ldda_id') def downgrade(migrate_engine):", "metadata, index_name='ix_implicitly_converted_ds_assoc_ldda_id') def downgrade(migrate_engine): metadata.bind = migrate_engine metadata.reflect() drop_column('ldda_id', 'implicitly_converted_dataset_association',", "to add 'ldda_id' column to the implicitly_converted_dataset_association table. \"\"\" from", "\"\"\" from __future__ import print_function import logging from sqlalchemy import", "( Column, ForeignKey, Integer, MetaData ) from galaxy.model.migrate.versions.util import (", "adding a column with both a ForeignKey and a index", "index_name='ix_implicitly_converted_ds_assoc_ldda_id') def downgrade(migrate_engine): metadata.bind = migrate_engine metadata.reflect() drop_column('ldda_id', 'implicitly_converted_dataset_association', metadata)", "import ( Column, ForeignKey, Integer, MetaData ) from galaxy.model.migrate.versions.util import", "Column, ForeignKey, Integer, MetaData ) from galaxy.model.migrate.versions.util import ( add_column,", "index=True, nullable=True) add_column(c, 'implicitly_converted_dataset_association', metadata, index_name='ix_implicitly_converted_ds_assoc_ldda_id') def downgrade(migrate_engine): metadata.bind =", "ForeignKey(\"library_dataset_dataset_association.id\"), index=True, nullable=True) else: c = Column(\"ldda_id\", Integer, index=True, nullable=True)", "bug when adding a column with both a ForeignKey and", "nullable=True) else: c = Column(\"ldda_id\", Integer, index=True, nullable=True) add_column(c, 'implicitly_converted_dataset_association',", "c = Column(\"ldda_id\", Integer, index=True, nullable=True) add_column(c, 'implicitly_converted_dataset_association', metadata, index_name='ix_implicitly_converted_ds_assoc_ldda_id')", ") log = logging.getLogger(__name__) metadata = MetaData() def upgrade(migrate_engine): print(__doc__)", "Integer, index=True, nullable=True) add_column(c, 'implicitly_converted_dataset_association', metadata, index_name='ix_implicitly_converted_ds_assoc_ldda_id') def downgrade(migrate_engine): metadata.bind", "print_function import logging from sqlalchemy import ( Column, ForeignKey, Integer,", "'sqlite': c = Column(\"ldda_id\", Integer, ForeignKey(\"library_dataset_dataset_association.id\"), index=True, nullable=True) else: c", "metadata.reflect() # SQLAlchemy Migrate has a bug when adding a", "= MetaData() def upgrade(migrate_engine): print(__doc__) metadata.bind = migrate_engine metadata.reflect() #", "metadata = MetaData() def upgrade(migrate_engine): print(__doc__) metadata.bind = migrate_engine metadata.reflect()", "!= 'sqlite': c = Column(\"ldda_id\", Integer, ForeignKey(\"library_dataset_dataset_association.id\"), index=True, nullable=True) else:", "Column(\"ldda_id\", Integer, ForeignKey(\"library_dataset_dataset_association.id\"), index=True, nullable=True) else: c = Column(\"ldda_id\", Integer,", "column with both a ForeignKey and a index in SQLite", "= migrate_engine metadata.reflect() # SQLAlchemy Migrate has a bug when", "( add_column, drop_column ) log = logging.getLogger(__name__) metadata = MetaData()", "upgrade(migrate_engine): print(__doc__) metadata.bind = migrate_engine metadata.reflect() # SQLAlchemy Migrate has", "both a ForeignKey and a index in SQLite if migrate_engine.name", "from __future__ import print_function import logging from sqlalchemy import (", "galaxy.model.migrate.versions.util import ( add_column, drop_column ) log = logging.getLogger(__name__) metadata", "'implicitly_converted_dataset_association', metadata, index_name='ix_implicitly_converted_ds_assoc_ldda_id') def downgrade(migrate_engine): metadata.bind = migrate_engine metadata.reflect() drop_column('ldda_id',", "table. \"\"\" from __future__ import print_function import logging from sqlalchemy", "'ldda_id' column to the implicitly_converted_dataset_association table. \"\"\" from __future__ import", "column to the implicitly_converted_dataset_association table. \"\"\" from __future__ import print_function", "logging.getLogger(__name__) metadata = MetaData() def upgrade(migrate_engine): print(__doc__) metadata.bind = migrate_engine", "metadata.bind = migrate_engine metadata.reflect() # SQLAlchemy Migrate has a bug", "ForeignKey and a index in SQLite if migrate_engine.name != 'sqlite':", "to the implicitly_converted_dataset_association table. \"\"\" from __future__ import print_function import", "MetaData() def upgrade(migrate_engine): print(__doc__) metadata.bind = migrate_engine metadata.reflect() # SQLAlchemy", "Integer, MetaData ) from galaxy.model.migrate.versions.util import ( add_column, drop_column )", "Migration script to add 'ldda_id' column to the implicitly_converted_dataset_association table.", "import logging from sqlalchemy import ( Column, ForeignKey, Integer, MetaData", ") from galaxy.model.migrate.versions.util import ( add_column, drop_column ) log =", "print(__doc__) metadata.bind = migrate_engine metadata.reflect() # SQLAlchemy Migrate has a", "when adding a column with both a ForeignKey and a", "sqlalchemy import ( Column, ForeignKey, Integer, MetaData ) from galaxy.model.migrate.versions.util", "MetaData ) from galaxy.model.migrate.versions.util import ( add_column, drop_column ) log", "with both a ForeignKey and a index in SQLite if", "= logging.getLogger(__name__) metadata = MetaData() def upgrade(migrate_engine): print(__doc__) metadata.bind =", "and a index in SQLite if migrate_engine.name != 'sqlite': c", "the implicitly_converted_dataset_association table. \"\"\" from __future__ import print_function import logging", "SQLite if migrate_engine.name != 'sqlite': c = Column(\"ldda_id\", Integer, ForeignKey(\"library_dataset_dataset_association.id\"),", "from galaxy.model.migrate.versions.util import ( add_column, drop_column ) log = logging.getLogger(__name__)", "if migrate_engine.name != 'sqlite': c = Column(\"ldda_id\", Integer, ForeignKey(\"library_dataset_dataset_association.id\"), index=True,", "has a bug when adding a column with both a", "implicitly_converted_dataset_association table. \"\"\" from __future__ import print_function import logging from", "\"\"\" Migration script to add 'ldda_id' column to the implicitly_converted_dataset_association", "migrate_engine.name != 'sqlite': c = Column(\"ldda_id\", Integer, ForeignKey(\"library_dataset_dataset_association.id\"), index=True, nullable=True)", "log = logging.getLogger(__name__) metadata = MetaData() def upgrade(migrate_engine): print(__doc__) metadata.bind", "# SQLAlchemy Migrate has a bug when adding a column", "Integer, ForeignKey(\"library_dataset_dataset_association.id\"), index=True, nullable=True) else: c = Column(\"ldda_id\", Integer, index=True,", "c = Column(\"ldda_id\", Integer, ForeignKey(\"library_dataset_dataset_association.id\"), index=True, nullable=True) else: c =", "Migrate has a bug when adding a column with both" ]
[ "votes_t_shape = [3, 0, 1, 2] for i in range(6", "i in range(6 - 4): votes_t_shape += [i + 4]", "1, 2] for i in range(6 - 4): votes_t_shape +=", "= [3, 0, 1, 2] for i in range(6 -", "0, 1, 2] for i in range(6 - 4): votes_t_shape", "for i in range(6 - 4): votes_t_shape += [i +", "in range(6 - 4): votes_t_shape += [i + 4] print(votes_t_shape)", "<gh_stars>0 votes_t_shape = [3, 0, 1, 2] for i in", "2] for i in range(6 - 4): votes_t_shape += [i", "[3, 0, 1, 2] for i in range(6 - 4):" ]
[ "[] for v in values: if v is True: v", "0.0 --activation-fn relu --no-epoch-checkpoints --keep-best-checkpoints 0 --keep-interval-updates 0 --keep-last-epochs 0", "args3 = {} args2['lr-scheduler'] = 'polynomial_decay' args2['warmup-updates'] = 2000 args2['max-update']", "--distributed-port 54187 --fp16 --memory-efficient-fp16 --num-workers 2 --criterion cross_entropy --task language_modeling", "250 args2['clip-norm'] = 0.4 #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True,", "as np import itertools import gpuscheduler import argparse import os", "[] for key, values in args3.items(): if isinstance(key, tuple): keyvalues", "in args4: if len(args_prod) == 0: args_prod.append(('', '')) for i,", "[False] #args3['dist-scale'] = [1.0] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3['decoder-embed-dim']", "#key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq') args3[key] = [] #lrkey", "0.0 --attention-dropout 0.0 --activation-dropout 0.0 --activation-fn relu --no-epoch-checkpoints --keep-best-checkpoints 0", "= [] if len(args4) == 0: args4.append('') for seed in", "time_hours, fp16, cores=cores_per_job, mem=mem, constraint=constraint, exclude=exclude, time_minutes=time_minutes, gpus=gpus) if args.dry:", "True)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(True, True, True), (False, False,", "itertools import product from torch.optim.lr_scheduler import OneCycleLR from os.path import", "True)] args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,8, 0.00045)) #args3[key].append((2048,2688,10752,2)) #args3['use-emb-norm'] = [True] #lr", "'uninterruptible' change_dir = 'fairseq_private' repo = 'fairseq_private' exclude = ''", "= [(100, 0.1)] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('percentile-clipping', 'clip-norm')]", "= [job_cmd] if rdm.rand(1) <= args.p: jobs.append(job_cmd) s.add_job(logfolder, repo, change_dir,", "1e-8)) # adafactor #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('fused', 'adam-bits',", "#partition = 'uninterruptible' change_dir = 'fairseq_private' repo = 'fairseq_private' exclude", "'stable-emb', 'no-scale-embedding')] = [(True, True, True), (False, False, False)] #args3[('use-bnb',", "= job_cmd + save_dir cmds = [job_cmd] if rdm.rand(1) <=", "'quantile', 1)] #args2['optimizer'] = 'adafactor' #args2['beta1'] = 0.9 #args2['decay-rate'] =", "[1/512] #args3['prob-quant'] = [False] #args3['dist-scale'] = [1.0] #args3[('percentile-clipping', 'clip-norm')] =", "args2['clip-norm'] = 0.4 #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32,", "8, 'dynamic_tree', 1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8,", "= 0.999 ##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048+256,8192+2048,2)) ##args3[key].append((2048,2688,10752,2)) # #lr = 0.003239", "fp16, cores=cores_per_job, mem=mem, constraint=constraint, exclude=exclude, time_minutes=time_minutes, gpus=gpus) if args.dry: for", "[] # 32-bit baseline #args3['optimizer'] = ['adam'] #args3[('percentile-clipping', 'clip-norm')] =", "time_minutes=time_minutes, gpus=gpus) if args.dry: for i, job in enumerate(jobs): print(i,", "'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'quantile', 1)] #args3[('fused', 'adam-bits', 'adam8bits-method',", "os import uuid import hashlib import glob import math from", "0: arg += '{0} '.format(v) else: arg += '--{0} {1}", "v) keyvalues.append(arg) elif isinstance(key, str): keyvalues = [] for v", "constraint=constraint, exclude=exclude, time_minutes=time_minutes, gpus=gpus) if args.dry: for i, job in", "ckp_name = logfolder #time_hours = 24*2 cores_per_job = 5 mem", "= 0.4608e9 tokens -> optimal batch size 3460 # model", "'polynomial_decay' args2['warmup-updates'] = 2000 args2['max-update'] = 56250 args2['total-num-update'] = 56250", "args2['lr-scheduler'] = 'polynomial_decay' args2['warmup-updates'] = 2000 args2['max-update'] = 56250 args2['total-num-update']", "'no-scale-embedding', 'optim-bits')] = [(True, True, True, True)] args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,8,", "True: v = '' if v is False: keyvalues.append('') else:", "args3.items(): if isinstance(key, tuple): keyvalues = [] for tups in", "1)] #args3['adam8bits-offset'] = [1/512] #args3['prob-quant'] = [False] #args3['dist-scale'] = [1.0]", "i, v in enumerate(tups): if v is True: v =", "mem=mem, constraint=constraint, exclude=exclude, time_minutes=time_minutes, gpus=gpus) if args.dry: for i, job", "'blockwise5' constraint = 'volta32gb' # 1024 tokens * 8 update_freq", "job) print('') print('Total jobs', len(jobs)) print('Time hours: {0}'.format(time_hours)) print('GPUs: {0}'.format(gpus))", "'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1), (False, 8,", "# model sizes: 1.92bn, 2.43bn, 1.41bn logfolder = 'adam/cc100/{0}'.format(name) ckp_name", "'optim-bits')] = [(True, True, True, True)] args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,8, 0.00045))", "job_cmd = cmd + arg4 for val in values: job_cmd", "elif isinstance(key, str): keyvalues = [] for v in values:", "#args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8)) # 8-bit #args3[('percentile-clipping', 'clip-norm')]", "#args2['max-update'] = 56250*4 args2['fp16-scale-window'] = 250 args2['clip-norm'] = 0.4 #args3[('fused',", "56250 #args2['lr-scheduler'] = 'cosine' #args2['warmup-updates'] = 3000 #args2['max-update'] = 56250*4", "+ arg4 for val in values: job_cmd += ' {0}'", "-> optimal batch size 3460 # model sizes: 1.92bn, 2.43bn,", "#lr = 0.003239 + (-0.0001395*math.log(2.43e9)) #args3[lrkey].append((lr, 0.0)) #args2['train-subset'] = 'train11'", "tuple): keyvalues = [] for tups in values: arg =", "True, True, True)] args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,8, 0.00045)) #args3[key].append((2048,2688,10752,2)) #args3['use-emb-norm'] =", "i, job in enumerate(jobs): print(i, job) print('') print('Total jobs', len(jobs))", ".format(val) #job_cmd += ' --checkpoint /checkpoint/timdettmers/{1}/{0}/model.pt'.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) if not fp16:", "0.1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 32, 'quantile', 1)]", "--log-interval 25 --tokens-per-sample 1024 --arch transformer_lm_big --share-decoder-input-output-embed --decoder-layers 28 --decoder-attention-heads", "'')) for i, values in enumerate(args_prod): job_cmd = cmd +", "cmds, time_hours, fp16, cores=cores_per_job, mem=mem, constraint=constraint, exclude=exclude, time_minutes=time_minutes, gpus=gpus) if", "'learnfair,learnlab' #partition = 'learnfair' #partition = 'uninterruptible' change_dir = 'fairseq_private'", "in enumerate(jobs): print(i, job) print('') print('Total jobs', len(jobs)) print('Time hours:", "optimal batch size 3460 # model sizes: 1.92bn, 2.43bn, 1.41bn", "'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1), (False, 8, 'quantile',", "#args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] =", "#lr = 0.003239 + (-0.0001395*math.log(1.92e9)) #args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 +", "'clip-norm')] = [(100, 0.1)] #args3['decoder-embed-dim'] = [2048+256] #args3['decoder-ffn-embed-dim'] = [8192+2048]", "= new_args jobs = [] if len(args4) == 0: args4.append('')", "--task language_modeling --sample-break-mode none --log-interval 25 --tokens-per-sample 1024 --arch transformer_lm_big", "'update-freq') args3[key] = [] #lrkey = ('lr', 'warmup-init-lr') #args3[lrkey] =", "= [False] #args3['dist-scale'] = [1.0] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)]", "if len(key[i]) == 0: arg += '{0} '.format(v) else: arg", "--activation-fn relu --no-epoch-checkpoints --keep-best-checkpoints 0 --keep-interval-updates 0 --keep-last-epochs 0 --save-interval-updates", "= '' s = gpuscheduler.HyakScheduler(verbose=args.verbose, account='', partition=partition, use_gres=False) fp16 =", "fp16 = True args3 = {} args2['lr-scheduler'] = 'polynomial_decay' args2['warmup-updates']", "#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)]#, (False,", "'no-scale-embedding')] = [(True, True)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(True, True,", "'stable-emb', 'no-scale-embedding', 'optim-bits')] = [(True, True, True, True)] args3[key].append((2048,2048,8192,8, 0.00075))", "len(args4) == 0: args4.append('') for seed in range(num_seeds): seed =", "'dynamic_tree', 1), (False, 8, 'quantile', 1)] args3['optimizer'] = ['adam'] args3[('use-bnb',", "{0}'.format(checkpoint_dir) job_cmd = job_cmd + save_dir cmds = [job_cmd] if", "= [(False, 8, 'dynamic_tree', 1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] =", "(-0.0001395*math.log(2.43e9)) #args3[lrkey].append((lr, 0.0)) #args2['train-subset'] = 'train11' args4 = [] args5", "32, 'quantile', 1)] ##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,2)) # #lr = 0.003239", "lr*0.1, lr*0.1 + 1e-8)) # adafactor #args3[('percentile-clipping', 'clip-norm')] = [(100,", "'quantile', 1)] #args3['adam8bits-offset'] = [1/512] #args3['prob-quant'] = [False] #args3['dist-scale'] =", "= logfolder #time_hours = 24*2 cores_per_job = 5 mem =", "{0} --distributed-port 54187 --fp16 --memory-efficient-fp16 --num-workers 2 --criterion cross_entropy --task", "#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)] #args3['adam8bits-offset']", "[(False, 8, 'dynamic_tree', 1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False,", "jobs.append(job_cmd) s.add_job(logfolder, repo, change_dir, cmds, time_hours, fp16, cores=cores_per_job, mem=mem, constraint=constraint,", "gpuscheduler import argparse import os import uuid import hashlib import", "1000 --log-format simple --fp16-no-flatten-grads --ignore-unused-valid-subsets'.format(gpus) args2 = {} name =", "False: keyvalues.append('') else: keyvalues.append(' --{0} {1}'.format(key, v)) args_prod.append(keyvalues) if len(args_prod)", "--share-decoder-input-output-embed --decoder-layers 28 --decoder-attention-heads 16 --dropout 0.0 --attention-dropout 0.0 --activation-dropout", "= 'fairseq_private' repo = 'fairseq_private' exclude = '' s =", "#args3['dist-scale'] = [1.0] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3['decoder-embed-dim'] =", "' --checkpoint /checkpoint/timdettmers/{1}/{0}/model.pt'.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) if not fp16: job_cmd = job_cmd.replace('--fp16", "'' if v is False: continue if len(key[i]) == 0:", "28 --decoder-attention-heads 16 --dropout 0.0 --attention-dropout 0.0 --activation-dropout 0.0 --activation-fn", "seed_offset = 5 time_hours = 72 time_minutes = 0 #partition", "= 0.9 #args2['decay-rate'] = 0.999 ##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048+256,8192+2048,2)) ##args3[key].append((2048,2688,10752,2)) #", "= [] args5 = {} args6 = {} rdm =", "= [(True, True)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(True, True, True),", "for i, job in enumerate(jobs): print(i, job) print('') print('Total jobs',", "repo = 'fairseq_private' exclude = '' s = gpuscheduler.HyakScheduler(verbose=args.verbose, account='',", "3460 # model sizes: 1.92bn, 2.43bn, 1.41bn logfolder = 'adam/cc100/{0}'.format(name)", "1)] #args2['optimizer'] = 'adafactor' #args2['beta1'] = 0.9 #args2['decay-rate'] = 0.999", "gpus = 128 cmd = 'fairseq-train /private/home/namangoyal/dataset/data-bin/bookwiki_CC-NEWS_openwebtext_stories_cc100-mmap2-bin --distributed-world-size {0} --distributed-port", "= [] for v in values: if v is True:", "+ ' --seed {0}'.format(seed) checkpoint_dir = '/checkpoint/timdettmers/{1}/{0} '.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) save_dir", "#args3[('percentile-clipping', 'clip-norm')] = [(5, 0.0)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] =", "= '' if v is False: keyvalues.append('') else: keyvalues.append(' --{0}", "= 'volta32gb' # 1024 tokens * 8 update_freq * 56250", ">= 2: args_prod = list(product(*args_prod)) else: new_args = [] if", "--save-interval-updates 1000 --log-format simple --fp16-no-flatten-grads --ignore-unused-valid-subsets'.format(gpus) args2 = {} name", "[(True, 32, 'quantile', 1), (False, 8, 'quantile', 1), (False, 8,", "8, 'quantile', 1), (False, 8, 'dynamic_tree', 1), (False, 8, 'quantile',", "values: arg = '' for i, v in enumerate(tups): if", "name = 'blockwise5' constraint = 'volta32gb' # 1024 tokens *", "'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)] #args3['adam8bits-offset'] =", "import os import uuid import hashlib import glob import math", "= ' --save-dir {0}'.format(checkpoint_dir) job_cmd = job_cmd + save_dir cmds", "gpus) num_seeds = 1 seed_offset = 5 time_hours = 72", "num_seeds = 1 seed_offset = 5 time_hours = 72 time_minutes", "sizes: 1.92bn, 2.43bn, 1.41bn logfolder = 'adam/cc100/{0}'.format(name) ckp_name = logfolder", "'quantile', 1), (False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 25)]", "1024 tokens * 8 update_freq * 56250 steps = 0.4608e9", "= 'fairseq-train /private/home/namangoyal/dataset/data-bin/bookwiki_CC-NEWS_openwebtext_stories_cc100-mmap2-bin --distributed-world-size {0} --distributed-port 54187 --fp16 --memory-efficient-fp16 --num-workers", "[8192+2048] #args3['max-tokens'] = [3072] #args3['update-freq'] = [2] key = ('max-tokens',", "if len(args_prod) > 0: for arg in args_prod[0]: new_args.append([arg]) args_prod", "True, True), (False, False, False)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(False,", "logfolder = 'adam/cc100/{0}'.format(name) ckp_name = logfolder #time_hours = 24*2 cores_per_job", "import hashlib import glob import math from itertools import product", "> 8 else gpus) num_seeds = 1 seed_offset = 5", "+ 1e-8)) # adafactor #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('fused',", "= {} args6 = {} rdm = np.random.RandomState(5345) for key,", "= [(False, False, False)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding', 'optim-bits')] = [(True,", "(False, 8, 'quantile', 1)] args3['optimizer'] = ['adam'] args3[('use-bnb', 'optim-bits')] =", "time_minutes = 0 #partition = 'learnlab,learnfair,scavenge' partition = 'learnfair,learnlab' #partition", "--checkpoint /checkpoint/timdettmers/{1}/{0}/model.pt'.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) if not fp16: job_cmd = job_cmd.replace('--fp16 ',", "transformer_lm_big --share-decoder-input-output-embed --decoder-layers 28 --decoder-attention-heads 16 --dropout 0.0 --attention-dropout 0.0", "56250 args2['total-num-update'] = 56250 #args2['lr-scheduler'] = 'cosine' #args2['warmup-updates'] = 3000", "#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1)] #args3[('fused',", "0: args_prod.append(('', '')) for i, values in enumerate(args_prod): job_cmd =", "partition = 'learnfair,learnlab' #partition = 'learnfair' #partition = 'uninterruptible' change_dir", "= 'cosine' #args2['warmup-updates'] = 3000 #args2['max-update'] = 56250*4 args2['fp16-scale-window'] =", "[(True, True, True), (False, False, False)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] =", "= [] if len(args_prod) > 0: for arg in args_prod[0]:", "= ('lr', 'warmup-init-lr') #args3[lrkey] = [] # 32-bit baseline #args3['optimizer']", "= job_cmd.replace('--fp16 ', ' ') job_cmd = job_cmd + '", "+ save_dir cmds = [job_cmd] if rdm.rand(1) <= args.p: jobs.append(job_cmd)", "#lr = 0.003239 + (-0.0001395*math.log(1.41e9)) #args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 +", "args3[('stable-emb', 'no-scale-embedding')] = [(True, True)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(True,", "is True: v = '' if v is False: keyvalues.append('')", "relu --no-epoch-checkpoints --keep-best-checkpoints 0 --keep-interval-updates 0 --keep-last-epochs 0 --save-interval-updates 1000", "lr*0.1, lr*0.1 + 1e-8)) # 8-bit #args3[('percentile-clipping', 'clip-norm')] = [(100,", "', ' ') job_cmd = job_cmd + ' --seed {0}'.format(seed)", "keyvalues.append(' --{0} {1}'.format(key, v)) args_prod.append(keyvalues) if len(args_prod) >= 2: args_prod", "cores_per_job = 5 mem = 56*(8 if gpus > 8", "len(key[i]) == 0: arg += '{0} '.format(v) else: arg +=", "logfolder #time_hours = 24*2 cores_per_job = 5 mem = 56*(8", "'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1), (False, 8, 'quantile',", "= 'learnfair,learnlab' #partition = 'learnfair' #partition = 'uninterruptible' change_dir =", "False, False)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(False, False, False)] #args3[('use-bnb',", "exclude=exclude, time_minutes=time_minutes, gpus=gpus) if args.dry: for i, job in enumerate(jobs):", "which to select a configuration.') args = parser.parse_args() gpus =", "values: if v is True: v = '' if v", "seed + seed_offset for arg4 in args4: if len(args_prod) ==", "1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1)]", "72 time_minutes = 0 #partition = 'learnlab,learnfair,scavenge' partition = 'learnfair,learnlab'", "8, 'dynamic_tree', 1), (False, 8, 'quantile', 25)] #args3[('fused', 'adam-bits', 'adam8bits-method',", "##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,2)) # #lr = 0.003239 + (-0.0001395*math.log(1.41e9)) #args3[lrkey].append((lr,", "--no-epoch-checkpoints --keep-best-checkpoints 0 --keep-interval-updates 0 --keep-last-epochs 0 --save-interval-updates 1000 --log-format", "'quantile', 25)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile',", "--criterion cross_entropy --task language_modeling --sample-break-mode none --log-interval 25 --tokens-per-sample 1024", "1 seed_offset = 5 time_hours = 72 time_minutes = 0", "from os.path import join parser = argparse.ArgumentParser(description='Compute script.') parser.add_argument('--dry', action='store_true')", "= [] for tups in values: arg = '' for", "import join parser = argparse.ArgumentParser(description='Compute script.') parser.add_argument('--dry', action='store_true') parser.add_argument('--verbose', action='store_true')", "32-bit baseline #args3['optimizer'] = ['adam'] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)]", "exclude = '' s = gpuscheduler.HyakScheduler(verbose=args.verbose, account='', partition=partition, use_gres=False) fp16", "128 cmd = 'fairseq-train /private/home/namangoyal/dataset/data-bin/bookwiki_CC-NEWS_openwebtext_stories_cc100-mmap2-bin --distributed-world-size {0} --distributed-port 54187 --fp16", "[True] #lr = 0.003239 + (-0.0001395*math.log(2.43e9)) #args3[lrkey].append((lr, 0.0)) #args2['train-subset'] =", "in args3.items(): if isinstance(key, tuple): keyvalues = [] for tups", "for seed in range(num_seeds): seed = seed + seed_offset for", "will be run on: {0}'.format(partition)) print('Run in folder: {0}'.format(change_dir)) if", "time_hours = 72 time_minutes = 0 #partition = 'learnlab,learnfair,scavenge' partition", "{0}'.format(seed) checkpoint_dir = '/checkpoint/timdettmers/{1}/{0} '.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) save_dir = ' --save-dir", "script.') parser.add_argument('--dry', action='store_true') parser.add_argument('--verbose', action='store_true') parser.add_argument('--p', type=float, default=1.0, help='Probability with", "args = parser.parse_args() gpus = 128 cmd = 'fairseq-train /private/home/namangoyal/dataset/data-bin/bookwiki_CC-NEWS_openwebtext_stories_cc100-mmap2-bin", "= '' if v is False: continue if len(key[i]) ==", "'--{0} {1} '.format(key[i], v) keyvalues.append(arg) elif isinstance(key, str): keyvalues =", "--log-format simple --fp16-no-flatten-grads --ignore-unused-valid-subsets'.format(gpus) args2 = {} name = 'blockwise5'", "args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,8, 0.00045)) #args3[key].append((2048,2688,10752,2)) #args3['use-emb-norm'] = [True] #lr =", "= ['adam'] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('fused', 'adam-bits', 'adam8bits-method',", "2000 args2['max-update'] = 56250 args2['total-num-update'] = 56250 #args2['lr-scheduler'] = 'cosine'", "continue if len(key[i]) == 0: arg += '{0} '.format(v) else:", "hours: {0}'.format(time_hours)) print('GPUs: {0}'.format(gpus)) print('Jobs will be written to: {0}'.format(join('/private/home/timdettmers/logs/',", "'adam8bits-qfreq')] = [(True, 32, 'quantile', 1), (False, 8, 'quantile', 1),", "args_prod = new_args jobs = [] if len(args4) == 0:", "range(num_seeds): seed = seed + seed_offset for arg4 in args4:", "#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'quantile', 1)] #args3[('fused',", "= [(100, 0.1)] #args3['decoder-embed-dim'] = [2048+256] #args3['decoder-ffn-embed-dim'] = [8192+2048] #args3['max-tokens']", "--fp16 --memory-efficient-fp16 --num-workers 2 --criterion cross_entropy --task language_modeling --sample-break-mode none", "[] if len(args_prod) > 0: for arg in args_prod[0]: new_args.append([arg])", "str): keyvalues = [] for v in values: if v", "enumerate(jobs): print(i, job) print('') print('Total jobs', len(jobs)) print('Time hours: {0}'.format(time_hours))", "1), (False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 25)] #args3[('fused',", "args3[('use-bnb', 'optim-bits')] = [(True, 8)] args3[('stable-emb', 'no-scale-embedding')] = [(True, True)]", "[(True, 32, 'quantile', 1)]#, (False, 8, 'quantile', 1), (False, 8,", "+ seed_offset for arg4 in args4: if len(args_prod) == 0:", "/checkpoint/timdettmers/{1}/{0}/model.pt'.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) if not fp16: job_cmd = job_cmd.replace('--fp16 ', '", "1)] ##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,2)) # #lr = 0.003239 + (-0.0001395*math.log(1.41e9))", "= True args3 = {} args2['lr-scheduler'] = 'polynomial_decay' args2['warmup-updates'] =", "[(True, 32, 'quantile', 1)] #args3['adam8bits-offset'] = [1/512] #args3['prob-quant'] = [False]", "#args2['lr-scheduler'] = 'cosine' #args2['warmup-updates'] = 3000 #args2['max-update'] = 56250*4 args2['fp16-scale-window']", "'.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) save_dir = ' --save-dir {0}'.format(checkpoint_dir) job_cmd = job_cmd", "mem = 56*(8 if gpus > 8 else gpus) num_seeds", "True), (False, False, False)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(False, False,", "= 'learnfair' #partition = 'uninterruptible' change_dir = 'fairseq_private' repo =", "'update-freq', 'lr') #key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq') args3[key] =", "--sample-break-mode none --log-interval 25 --tokens-per-sample 1024 --arch transformer_lm_big --share-decoder-input-output-embed --decoder-layers", "#args3[key].append((2048,2048,8192,8, 0.00045)) #args3[key].append((2048,2688,10752,2)) #args3['use-emb-norm'] = [True] #lr = 0.003239 +", "= 3000 #args2['max-update'] = 56250*4 args2['fp16-scale-window'] = 250 args2['clip-norm'] =", "jobs = [] if len(args4) == 0: args4.append('') for seed", "partition=partition, use_gres=False) fp16 = True args3 = {} args2['lr-scheduler'] =", "enumerate(tups): if v is True: v = '' if v", "join parser = argparse.ArgumentParser(description='Compute script.') parser.add_argument('--dry', action='store_true') parser.add_argument('--verbose', action='store_true') parser.add_argument('--p',", "{} args2['lr-scheduler'] = 'polynomial_decay' args2['warmup-updates'] = 2000 args2['max-update'] = 56250", "'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)] ##args3[key].append((2048,2048,8192,8, 0.00075))", "5 time_hours = 72 time_minutes = 0 #partition = 'learnlab,learnfair,scavenge'", "= 'polynomial_decay' args2['warmup-updates'] = 2000 args2['max-update'] = 56250 args2['total-num-update'] =", "#args3['update-freq'] = [2] key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq', 'lr')", "import argparse import os import uuid import hashlib import glob", "'clip-norm')] = [(5, 0.0)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False,", "{1}'.format(key, v)) args_prod.append(keyvalues) if len(args_prod) >= 2: args_prod = list(product(*args_prod))", "--fp16-no-flatten-grads --ignore-unused-valid-subsets'.format(gpus) args2 = {} name = 'blockwise5' constraint =", "(False, False, False)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(False, False, False)]", "v is False: continue if len(key[i]) == 0: arg +=", "False: continue if len(key[i]) == 0: arg += '{0} '.format(v)", "[(True, 32, 'quantile', 1)] ##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,2)) # #lr =", "{0}'.format(join('/private/home/timdettmers/logs/', logfolder))) print('Jobs will be run on: {0}'.format(partition)) print('Run in", "= [True] #lr = 0.003239 + (-0.0001395*math.log(2.43e9)) #args3[lrkey].append((lr, 0.0)) #args2['train-subset']", "{1}'.format(key, value) args_prod = [] for key, values in args3.items():", "not fp16: job_cmd = job_cmd.replace('--fp16 ', ' ') job_cmd =", "print(i, job) print('') print('Total jobs', len(jobs)) print('Time hours: {0}'.format(time_hours)) print('GPUs:", "will be written to: {0}'.format(join('/private/home/timdettmers/logs/', logfolder))) print('Jobs will be run", "lr+1e-8, lr*0.1, lr*0.1 + 1e-8)) # 8-bit #args3[('percentile-clipping', 'clip-norm')] =", "= [1.0] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3['decoder-embed-dim'] = [2048+256]", "[(5, 0.0)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'quantile',", "keyvalues.append(arg) elif isinstance(key, str): keyvalues = [] for v in", "56250*4 args2['fp16-scale-window'] = 250 args2['clip-norm'] = 0.4 #args3[('fused', 'adam-bits', 'adam8bits-method',", "'adam8bits-qfreq')] = [(False, 8, 'quantile', 1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')]", "[] if len(args4) == 0: args4.append('') for seed in range(num_seeds):", "32, 'quantile', 1)]#, (False, 8, 'quantile', 1), (False, 8, 'dynamic_tree',", "0.1)] #args3[('percentile-clipping', 'clip-norm')] = [(5, 0.0)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')]", "print('GPUs: {0}'.format(gpus)) print('Jobs will be written to: {0}'.format(join('/private/home/timdettmers/logs/', logfolder))) print('Jobs", "#args3[key].append((2048,2688,10752,2)) #args3['use-emb-norm'] = [True] #lr = 0.003239 + (-0.0001395*math.log(2.43e9)) #args3[lrkey].append((lr,", "1)]#, (False, 8, 'quantile', 1), (False, 8, 'dynamic_tree', 1), (False,", "parser.add_argument('--verbose', action='store_true') parser.add_argument('--p', type=float, default=1.0, help='Probability with which to select", "'lr') #key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq') args3[key] = []", "'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)] #args3['adam8bits-offset'] = [1/512]", "cmd = cmd + ' --{0} {1}'.format(key, value) args_prod =", "'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 32, 'quantile', 1)] #args2['optimizer'] =", "len(jobs)) print('Time hours: {0}'.format(time_hours)) print('GPUs: {0}'.format(gpus)) print('Jobs will be written", "--ignore-unused-valid-subsets'.format(gpus) args2 = {} name = 'blockwise5' constraint = 'volta32gb'", "gpus=gpus) if args.dry: for i, job in enumerate(jobs): print(i, job)", "action='store_true') parser.add_argument('--p', type=float, default=1.0, help='Probability with which to select a", "# adafactor #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('fused', 'adam-bits', 'adam8bits-method',", "True args3 = {} args2['lr-scheduler'] = 'polynomial_decay' args2['warmup-updates'] = 2000", "args2['warmup-updates'] = 2000 args2['max-update'] = 56250 args2['total-num-update'] = 56250 #args2['lr-scheduler']", "job_cmd = job_cmd + save_dir cmds = [job_cmd] if rdm.rand(1)", "= cmd + ' --{0} {1}'.format(key, value) args_prod = []", "'no-scale-embedding')] = [(False, False, False)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding', 'optim-bits')] =", "['adam'] args3[('use-bnb', 'optim-bits')] = [(True, 8)] args3[('stable-emb', 'no-scale-embedding')] = [(True,", "0.4608e9 tokens -> optimal batch size 3460 # model sizes:", "job_cmd = job_cmd.replace('--fp16 ', ' ') job_cmd = job_cmd +", "'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1)] #args3[('fused', 'adam-bits',", "parser.parse_args() gpus = 128 cmd = 'fairseq-train /private/home/namangoyal/dataset/data-bin/bookwiki_CC-NEWS_openwebtext_stories_cc100-mmap2-bin --distributed-world-size {0}", "#args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(True, True, True), (False, False, False)]", "#args2['decay-rate'] = 0.999 ##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048+256,8192+2048,2)) ##args3[key].append((2048,2688,10752,2)) # #lr =", "= [] for key, values in args3.items(): if isinstance(key, tuple):", "0: for arg in args_prod[0]: new_args.append([arg]) args_prod = new_args jobs", "[(False, 32, 'quantile', 1)] #args2['optimizer'] = 'adafactor' #args2['beta1'] = 0.9", "+= ' --checkpoint /checkpoint/timdettmers/{1}/{0}/model.pt'.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) if not fp16: job_cmd =", "be written to: {0}'.format(join('/private/home/timdettmers/logs/', logfolder))) print('Jobs will be run on:", "#args3['adam8bits-offset'] = [1/512] #args3['prob-quant'] = [False] #args3['dist-scale'] = [1.0] #args3[('percentile-clipping',", "0.00045)) #args3[key].append((2048,2688,10752,2)) #args3['use-emb-norm'] = [True] #lr = 0.003239 + (-0.0001395*math.log(2.43e9))", "#args2['warmup-updates'] = 3000 #args2['max-update'] = 56250*4 args2['fp16-scale-window'] = 250 args2['clip-norm']", "import glob import math from itertools import product from torch.optim.lr_scheduler", "= 5 time_hours = 72 time_minutes = 0 #partition =", "from torch.optim.lr_scheduler import OneCycleLR from os.path import join parser =", "8 else gpus) num_seeds = 1 seed_offset = 5 time_hours", "#args3['prob-quant'] = [False] #args3['dist-scale'] = [1.0] #args3[('percentile-clipping', 'clip-norm')] = [(100,", "= [1/512] #args3['prob-quant'] = [False] #args3['dist-scale'] = [1.0] #args3[('percentile-clipping', 'clip-norm')]", "= [(100, 0.1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 32,", "1), (False, 8, 'quantile', 25)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] =", "= [(True, 8)] args3[('stable-emb', 'no-scale-embedding')] = [(True, True)] #args3[('use-bnb', 'stable-emb',", "54187 --fp16 --memory-efficient-fp16 --num-workers 2 --criterion cross_entropy --task language_modeling --sample-break-mode", "arg4 in args4: if len(args_prod) == 0: args_prod.append(('', '')) for", "== 0: args_prod.append(('', '')) for i, values in enumerate(args_prod): job_cmd", "#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1), (False,", "isinstance(key, tuple): keyvalues = [] for tups in values: arg", "##args3[key].append((2048,2688,10752,2)) # #lr = 0.003239 + (-0.0001395*math.log(1.92e9)) #args3[lrkey].append((lr, lr+1e-8, lr*0.1,", "[(100, 0.1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile',", "= [8192+2048] #args3['max-tokens'] = [3072] #args3['update-freq'] = [2] key =", "= 'adafactor' #args2['beta1'] = 0.9 #args2['decay-rate'] = 0.999 ##args3[key].append((2048,2048,8192,8, 0.00075))", "'' s = gpuscheduler.HyakScheduler(verbose=args.verbose, account='', partition=partition, use_gres=False) fp16 = True", "0.003239 + (-0.0001395*math.log(1.41e9)) #args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8)) #", "arg = '' for i, v in enumerate(tups): if v", "= 'uninterruptible' change_dir = 'fairseq_private' repo = 'fairseq_private' exclude =", "'dynamic_tree', 1), (False, 8, 'quantile', 25)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')]", "= [(False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 1)] args3['optimizer']", "v)) args_prod.append(keyvalues) if len(args_prod) >= 2: args_prod = list(product(*args_prod)) else:", "if args.dry: for i, job in enumerate(jobs): print(i, job) print('')", "[1.0] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3['decoder-embed-dim'] = [2048+256] #args3['decoder-ffn-embed-dim']", "#args2['beta1'] = 0.9 #args2['decay-rate'] = 0.999 ##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048+256,8192+2048,2)) ##args3[key].append((2048,2688,10752,2))", "enumerate(args_prod): job_cmd = cmd + arg4 for val in values:", "0 #partition = 'learnlab,learnfair,scavenge' partition = 'learnfair,learnlab' #partition = 'learnfair'", "args_prod = [] for key, values in args3.items(): if isinstance(key,", "args2.items(): cmd = cmd + ' --{0} {1}'.format(key, value) args_prod", "cmds = [job_cmd] if rdm.rand(1) <= args.p: jobs.append(job_cmd) s.add_job(logfolder, repo,", "0.1)] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('percentile-clipping', 'clip-norm')] = [(5,", "none --log-interval 25 --tokens-per-sample 1024 --arch transformer_lm_big --share-decoder-input-output-embed --decoder-layers 28", "in enumerate(args_prod): job_cmd = cmd + arg4 for val in", "steps = 0.4608e9 tokens -> optimal batch size 3460 #", "account='', partition=partition, use_gres=False) fp16 = True args3 = {} args2['lr-scheduler']", "repo, change_dir, cmds, time_hours, fp16, cores=cores_per_job, mem=mem, constraint=constraint, exclude=exclude, time_minutes=time_minutes,", "'' if v is False: keyvalues.append('') else: keyvalues.append(' --{0} {1}'.format(key,", "= seed + seed_offset for arg4 in args4: if len(args_prod)", "[(100, 0.1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 32, 'quantile',", "parser.add_argument('--p', type=float, default=1.0, help='Probability with which to select a configuration.')", "values: job_cmd += ' {0}' .format(val) #job_cmd += ' --checkpoint", "'fairseq_private' repo = 'fairseq_private' exclude = '' s = gpuscheduler.HyakScheduler(verbose=args.verbose,", "checkpoint_dir = '/checkpoint/timdettmers/{1}/{0} '.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) save_dir = ' --save-dir {0}'.format(checkpoint_dir)", "keyvalues = [] for tups in values: arg = ''", "'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)]#, (False, 8, 'quantile',", "> 0: for arg in args_prod[0]: new_args.append([arg]) args_prod = new_args", "1024 --arch transformer_lm_big --share-decoder-input-output-embed --decoder-layers 28 --decoder-attention-heads 16 --dropout 0.0", "--activation-dropout 0.0 --activation-fn relu --no-epoch-checkpoints --keep-best-checkpoints 0 --keep-interval-updates 0 --keep-last-epochs", "0.00075)) #args3[key].append((2048,2048+256,8192+2048,2)) ##args3[key].append((2048,2688,10752,2)) # #lr = 0.003239 + (-0.0001395*math.log(1.92e9)) #args3[lrkey].append((lr,", "cmd = 'fairseq-train /private/home/namangoyal/dataset/data-bin/bookwiki_CC-NEWS_openwebtext_stories_cc100-mmap2-bin --distributed-world-size {0} --distributed-port 54187 --fp16 --memory-efficient-fp16", "import gpuscheduler import argparse import os import uuid import hashlib", "--keep-last-epochs 0 --save-interval-updates 1000 --log-format simple --fp16-no-flatten-grads --ignore-unused-valid-subsets'.format(gpus) args2 =", "--{0} {1}'.format(key, v)) args_prod.append(keyvalues) if len(args_prod) >= 2: args_prod =", "'adafactor' #args2['beta1'] = 0.9 #args2['decay-rate'] = 0.999 ##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048+256,8192+2048,2))", "#args3['decoder-ffn-embed-dim'] = [8192+2048] #args3['max-tokens'] = [3072] #args3['update-freq'] = [2] key", "args3[key] = [] #lrkey = ('lr', 'warmup-init-lr') #args3[lrkey] = []", "= 2000 args2['max-update'] = 56250 args2['total-num-update'] = 56250 #args2['lr-scheduler'] =", "0.9 #args2['decay-rate'] = 0.999 ##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048+256,8192+2048,2)) ##args3[key].append((2048,2688,10752,2)) # #lr", "in enumerate(tups): if v is True: v = '' if", "False)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(False, False, False)] #args3[('use-bnb', 'stable-emb',", "+ (-0.0001395*math.log(2.43e9)) #args3[lrkey].append((lr, 0.0)) #args2['train-subset'] = 'train11' args4 = []", "= 128 cmd = 'fairseq-train /private/home/namangoyal/dataset/data-bin/bookwiki_CC-NEWS_openwebtext_stories_cc100-mmap2-bin --distributed-world-size {0} --distributed-port 54187", "uuid import hashlib import glob import math from itertools import", "* 56250 steps = 0.4608e9 tokens -> optimal batch size", "56250 steps = 0.4608e9 tokens -> optimal batch size 3460", "0.003239 + (-0.0001395*math.log(1.92e9)) #args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8)) #", "args2 = {} name = 'blockwise5' constraint = 'volta32gb' #", "args6 = {} rdm = np.random.RandomState(5345) for key, value in", "# #lr = 0.003239 + (-0.0001395*math.log(1.41e9)) #args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1", "jobs', len(jobs)) print('Time hours: {0}'.format(time_hours)) print('GPUs: {0}'.format(gpus)) print('Jobs will be", "3000 #args2['max-update'] = 56250*4 args2['fp16-scale-window'] = 250 args2['clip-norm'] = 0.4", "numpy as np import itertools import gpuscheduler import argparse import", "#args3['optimizer'] = ['adam'] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('fused', 'adam-bits',", "for i, values in enumerate(args_prod): job_cmd = cmd + arg4", "= gpuscheduler.HyakScheduler(verbose=args.verbose, account='', partition=partition, use_gres=False) fp16 = True args3 =", "24*2 cores_per_job = 5 mem = 56*(8 if gpus >", "cmd + arg4 for val in values: job_cmd += '", "* 8 update_freq * 56250 steps = 0.4608e9 tokens ->", "is True: v = '' if v is False: continue", "args.dry: for i, job in enumerate(jobs): print(i, job) print('') print('Total", "change_dir = 'fairseq_private' repo = 'fairseq_private' exclude = '' s", "= [(True, 32, 'quantile', 1)] ##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,2)) # #lr", "args4.append('') for seed in range(num_seeds): seed = seed + seed_offset", "for v in values: if v is True: v =", "len(args_prod) >= 2: args_prod = list(product(*args_prod)) else: new_args = []", "for arg in args_prod[0]: new_args.append([arg]) args_prod = new_args jobs =", "+ (-0.0001395*math.log(1.92e9)) #args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8)) # 8-bit", "import itertools import gpuscheduler import argparse import os import uuid", "'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')]", "1), (False, 8, 'quantile', 1)] args3['optimizer'] = ['adam'] args3[('use-bnb', 'optim-bits')]", "0.999 ##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048+256,8192+2048,2)) ##args3[key].append((2048,2688,10752,2)) # #lr = 0.003239 +", "0.00075)) #args3[key].append((2048,2048,8192,8, 0.00045)) #args3[key].append((2048,2688,10752,2)) #args3['use-emb-norm'] = [True] #lr = 0.003239", "#partition = 'learnlab,learnfair,scavenge' partition = 'learnfair,learnlab' #partition = 'learnfair' #partition", "'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1)] #args3[('fused', 'adam-bits', 'adam8bits-method',", "--arch transformer_lm_big --share-decoder-input-output-embed --decoder-layers 28 --decoder-attention-heads 16 --dropout 0.0 --attention-dropout", "isinstance(key, str): keyvalues = [] for v in values: if", "argparse import os import uuid import hashlib import glob import", "= 5 mem = 56*(8 if gpus > 8 else", "#args3[lrkey].append((lr, 0.0)) #args2['train-subset'] = 'train11' args4 = [] args5 =", "'no-scale-embedding')] = [(True, True, True), (False, False, False)] #args3[('use-bnb', 'stable-emb',", "batch size 3460 # model sizes: 1.92bn, 2.43bn, 1.41bn logfolder", "= [(False, 32, 'quantile', 1)] #args2['optimizer'] = 'adafactor' #args2['beta1'] =", "'adam8bits-method', 'adam8bits-qfreq')] = [(False, 32, 'quantile', 1)] #args2['optimizer'] = 'adafactor'", "cmd + ' --{0} {1}'.format(key, value) args_prod = [] for", "job_cmd = job_cmd + ' --seed {0}'.format(seed) checkpoint_dir = '/checkpoint/timdettmers/{1}/{0}", "save_dir cmds = [job_cmd] if rdm.rand(1) <= args.p: jobs.append(job_cmd) s.add_job(logfolder,", "v in values: if v is True: v = ''", "[job_cmd] if rdm.rand(1) <= args.p: jobs.append(job_cmd) s.add_job(logfolder, repo, change_dir, cmds,", "be run on: {0}'.format(partition)) print('Run in folder: {0}'.format(change_dir)) if not", "hashlib import glob import math from itertools import product from", "#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1), (False,", "if v is False: continue if len(key[i]) == 0: arg", "configuration.') args = parser.parse_args() gpus = 128 cmd = 'fairseq-train", "16 --dropout 0.0 --attention-dropout 0.0 --activation-dropout 0.0 --activation-fn relu --no-epoch-checkpoints", "else gpus) num_seeds = 1 seed_offset = 5 time_hours =", "if v is True: v = '' if v is", "for i, v in enumerate(tups): if v is True: v", "0: args4.append('') for seed in range(num_seeds): seed = seed +", "print('Time hours: {0}'.format(time_hours)) print('GPUs: {0}'.format(gpus)) print('Jobs will be written to:", "size 3460 # model sizes: 1.92bn, 2.43bn, 1.41bn logfolder =", "'learnlab,learnfair,scavenge' partition = 'learnfair,learnlab' #partition = 'learnfair' #partition = 'uninterruptible'", "' {0}' .format(val) #job_cmd += ' --checkpoint /checkpoint/timdettmers/{1}/{0}/model.pt'.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) if", "56*(8 if gpus > 8 else gpus) num_seeds = 1", "32, 'quantile', 1)] #args3['adam8bits-offset'] = [1/512] #args3['prob-quant'] = [False] #args3['dist-scale']", "8)] args3[('stable-emb', 'no-scale-embedding')] = [(True, True)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] =", "select a configuration.') args = parser.parse_args() gpus = 128 cmd", "in range(num_seeds): seed = seed + seed_offset for arg4 in", "v = '' if v is False: continue if len(key[i])", "'clip-norm')] = [(100, 0.1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False,", "'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)]#, (False, 8, 'quantile', 1),", "{} rdm = np.random.RandomState(5345) for key, value in args2.items(): cmd", "0.1)] #args3['decoder-embed-dim'] = [2048+256] #args3['decoder-ffn-embed-dim'] = [8192+2048] #args3['max-tokens'] = [3072]", "1)] args3['optimizer'] = ['adam'] args3[('use-bnb', 'optim-bits')] = [(True, 8)] args3[('stable-emb',", "= 1 seed_offset = 5 time_hours = 72 time_minutes =", "else: arg += '--{0} {1} '.format(key[i], v) keyvalues.append(arg) elif isinstance(key,", "32, 'quantile', 1), (False, 8, 'quantile', 1), (False, 8, 'dynamic_tree',", "update_freq * 56250 steps = 0.4608e9 tokens -> optimal batch", "if len(args4) == 0: args4.append('') for seed in range(num_seeds): seed", "<reponame>TimDettmers/sched<filename>scripts/adam/cc100_baselines.py import numpy as np import itertools import gpuscheduler import", "= [2] key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq', 'lr') #key", "tokens * 8 update_freq * 56250 steps = 0.4608e9 tokens", "[(True, 8)] args3[('stable-emb', 'no-scale-embedding')] = [(True, True)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding')]", "'adam/cc100/{0}'.format(name) ckp_name = logfolder #time_hours = 24*2 cores_per_job = 5", "torch.optim.lr_scheduler import OneCycleLR from os.path import join parser = argparse.ArgumentParser(description='Compute", "#job_cmd += ' --checkpoint /checkpoint/timdettmers/{1}/{0}/model.pt'.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) if not fp16: job_cmd", "args2['max-update'] = 56250 args2['total-num-update'] = 56250 #args2['lr-scheduler'] = 'cosine' #args2['warmup-updates']", "ckp_name) if not fp16: job_cmd = job_cmd.replace('--fp16 ', ' ')", "'clip-norm')] = [(100, 0.1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True,", "with which to select a configuration.') args = parser.parse_args() gpus", "help='Probability with which to select a configuration.') args = parser.parse_args()", "8, 'quantile', 1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8,", "rdm.rand(1) <= args.p: jobs.append(job_cmd) s.add_job(logfolder, repo, change_dir, cmds, time_hours, fp16,", "for val in values: job_cmd += ' {0}' .format(val) #job_cmd", "= 250 args2['clip-norm'] = 0.4 #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] =", "#args3['decoder-embed-dim'] = [2048+256] #args3['decoder-ffn-embed-dim'] = [8192+2048] #args3['max-tokens'] = [3072] #args3['update-freq']", "if len(args_prod) >= 2: args_prod = list(product(*args_prod)) else: new_args =", "'quantile', 1)]#, (False, 8, 'quantile', 1), (False, 8, 'dynamic_tree', 1),", "0.0)) #args2['train-subset'] = 'train11' args4 = [] args5 = {}", "= [] # 32-bit baseline #args3['optimizer'] = ['adam'] #args3[('percentile-clipping', 'clip-norm')]", "0 --keep-last-epochs 0 --save-interval-updates 1000 --log-format simple --fp16-no-flatten-grads --ignore-unused-valid-subsets'.format(gpus) args2", "#args3[key].append((2048,2048,8192,2)) # #lr = 0.003239 + (-0.0001395*math.log(1.41e9)) #args3[lrkey].append((lr, lr+1e-8, lr*0.1,", "= np.random.RandomState(5345) for key, value in args2.items(): cmd = cmd", "to: {0}'.format(join('/private/home/timdettmers/logs/', logfolder))) print('Jobs will be run on: {0}'.format(partition)) print('Run", "cross_entropy --task language_modeling --sample-break-mode none --log-interval 25 --tokens-per-sample 1024 --arch", "{} args6 = {} rdm = np.random.RandomState(5345) for key, value", "print('Jobs will be run on: {0}'.format(partition)) print('Run in folder: {0}'.format(change_dir))", "'decoder-ffn-embed-dim', 'update-freq', 'lr') #key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq') args3[key]", "fp16: job_cmd = job_cmd.replace('--fp16 ', ' ') job_cmd = job_cmd", "use_gres=False) fp16 = True args3 = {} args2['lr-scheduler'] = 'polynomial_decay'", "' --save-dir {0}'.format(checkpoint_dir) job_cmd = job_cmd + save_dir cmds =", "len(args_prod) > 0: for arg in args_prod[0]: new_args.append([arg]) args_prod =", "1.92bn, 2.43bn, 1.41bn logfolder = 'adam/cc100/{0}'.format(name) ckp_name = logfolder #time_hours", "'warmup-init-lr') #args3[lrkey] = [] # 32-bit baseline #args3['optimizer'] = ['adam']", "seed_offset for arg4 in args4: if len(args_prod) == 0: args_prod.append(('',", "[] #lrkey = ('lr', 'warmup-init-lr') #args3[lrkey] = [] # 32-bit", "32, 'quantile', 1)] #args2['optimizer'] = 'adafactor' #args2['beta1'] = 0.9 #args2['decay-rate']", "2 --criterion cross_entropy --task language_modeling --sample-break-mode none --log-interval 25 --tokens-per-sample", "import product from torch.optim.lr_scheduler import OneCycleLR from os.path import join", "--memory-efficient-fp16 --num-workers 2 --criterion cross_entropy --task language_modeling --sample-break-mode none --log-interval", "import numpy as np import itertools import gpuscheduler import argparse", "'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)] #args3['adam8bits-offset'] = [1/512] #args3['prob-quant']", "0.0 --activation-dropout 0.0 --activation-fn relu --no-epoch-checkpoints --keep-best-checkpoints 0 --keep-interval-updates 0", "values in enumerate(args_prod): job_cmd = cmd + arg4 for val", "args2['fp16-scale-window'] = 250 args2['clip-norm'] = 0.4 #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')]", "+ (-0.0001395*math.log(1.41e9)) #args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8)) # adafactor", "seed in range(num_seeds): seed = seed + seed_offset for arg4", "job in enumerate(jobs): print(i, job) print('') print('Total jobs', len(jobs)) print('Time", "job_cmd + save_dir cmds = [job_cmd] if rdm.rand(1) <= args.p:", "1e-8)) # 8-bit #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('percentile-clipping', 'clip-norm')]", "gpuscheduler.HyakScheduler(verbose=args.verbose, account='', partition=partition, use_gres=False) fp16 = True args3 = {}", "['adam'] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')]", "'optim-bits')] = [(True, 8)] args3[('stable-emb', 'no-scale-embedding')] = [(True, True)] #args3[('use-bnb',", "--distributed-world-size {0} --distributed-port 54187 --fp16 --memory-efficient-fp16 --num-workers 2 --criterion cross_entropy", "for tups in values: arg = '' for i, v", "+= '--{0} {1} '.format(key[i], v) keyvalues.append(arg) elif isinstance(key, str): keyvalues", "'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq', 'lr') #key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq')", "#lrkey = ('lr', 'warmup-init-lr') #args3[lrkey] = [] # 32-bit baseline", "False, False)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding', 'optim-bits')] = [(True, True, True,", "np.random.RandomState(5345) for key, value in args2.items(): cmd = cmd +", "--seed {0}'.format(seed) checkpoint_dir = '/checkpoint/timdettmers/{1}/{0} '.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) save_dir = '", "seed = seed + seed_offset for arg4 in args4: if", "= [(False, 8, 'quantile', 1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] =", "cores=cores_per_job, mem=mem, constraint=constraint, exclude=exclude, time_minutes=time_minutes, gpus=gpus) if args.dry: for i,", "= 24*2 cores_per_job = 5 mem = 56*(8 if gpus", "[(100, 0.1)] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('percentile-clipping', 'clip-norm')] =", "(-0.0001395*math.log(1.92e9)) #args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8)) # 8-bit #args3[('percentile-clipping',", "constraint = 'volta32gb' # 1024 tokens * 8 update_freq *", "= [] #lrkey = ('lr', 'warmup-init-lr') #args3[lrkey] = [] #", "[(100, 0.1)] #args3[('percentile-clipping', 'clip-norm')] = [(5, 0.0)] #args3[('fused', 'adam-bits', 'adam8bits-method',", "--decoder-layers 28 --decoder-attention-heads 16 --dropout 0.0 --attention-dropout 0.0 --activation-dropout 0.0", "'.format(key[i], v) keyvalues.append(arg) elif isinstance(key, str): keyvalues = [] for", "in values: if v is True: v = '' if", "--tokens-per-sample 1024 --arch transformer_lm_big --share-decoder-input-output-embed --decoder-layers 28 --decoder-attention-heads 16 --dropout", "#args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3['decoder-embed-dim'] = [2048+256] #args3['decoder-ffn-embed-dim'] =", "--keep-best-checkpoints 0 --keep-interval-updates 0 --keep-last-epochs 0 --save-interval-updates 1000 --log-format simple", "= ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq') args3[key] = [] #lrkey =", "' --seed {0}'.format(seed) checkpoint_dir = '/checkpoint/timdettmers/{1}/{0} '.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) save_dir =", "8 update_freq * 56250 steps = 0.4608e9 tokens -> optimal", "from itertools import product from torch.optim.lr_scheduler import OneCycleLR from os.path", "#args3[lrkey] = [] # 32-bit baseline #args3['optimizer'] = ['adam'] #args3[('percentile-clipping',", "values in args3.items(): if isinstance(key, tuple): keyvalues = [] for", "on: {0}'.format(partition)) print('Run in folder: {0}'.format(change_dir)) if not args.dry: s.run_jobs()", "if len(args_prod) == 0: args_prod.append(('', '')) for i, values in", "adafactor #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')]", "= [(100, 0.1)] #args3[('percentile-clipping', 'clip-norm')] = [(5, 0.0)] #args3[('fused', 'adam-bits',", "lr*0.1 + 1e-8)) # 8-bit #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)]", "action='store_true') parser.add_argument('--verbose', action='store_true') parser.add_argument('--p', type=float, default=1.0, help='Probability with which to", "args_prod[0]: new_args.append([arg]) args_prod = new_args jobs = [] if len(args4)", "== 0: args4.append('') for seed in range(num_seeds): seed = seed", "= [(True, 32, 'quantile', 1)]#, (False, 8, 'quantile', 1), (False,", "model sizes: 1.92bn, 2.43bn, 1.41bn logfolder = 'adam/cc100/{0}'.format(name) ckp_name =", "# 8-bit #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('percentile-clipping', 'clip-norm')] =", "= [(True, 32, 'quantile', 1)] #args3['adam8bits-offset'] = [1/512] #args3['prob-quant'] =", "'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq') args3[key] = [] #lrkey = ('lr', 'warmup-init-lr')", "{0}'.format(gpus)) print('Jobs will be written to: {0}'.format(join('/private/home/timdettmers/logs/', logfolder))) print('Jobs will", "0.0)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'quantile', 1)]", "else: new_args = [] if len(args_prod) > 0: for arg", "+ 1e-8)) # 8-bit #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('percentile-clipping',", "') job_cmd = job_cmd + ' --seed {0}'.format(seed) checkpoint_dir =", "= {} args2['lr-scheduler'] = 'polynomial_decay' args2['warmup-updates'] = 2000 args2['max-update'] =", "product from torch.optim.lr_scheduler import OneCycleLR from os.path import join parser", "#args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('percentile-clipping', 'clip-norm')] = [(5, 0.0)]", "1), (False, 8, 'quantile', 1), (False, 8, 'dynamic_tree', 1), (False,", "#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 32, 'quantile', 1)] #args2['optimizer']", "args5 = {} args6 = {} rdm = np.random.RandomState(5345) for", "8, 'quantile', 1)] args3['optimizer'] = ['adam'] args3[('use-bnb', 'optim-bits')] = [(True,", "arg4 for val in values: job_cmd += ' {0}' .format(val)", "'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 1)]", "default=1.0, help='Probability with which to select a configuration.') args =", "= 56*(8 if gpus > 8 else gpus) num_seeds =", "in args_prod[0]: new_args.append([arg]) args_prod = new_args jobs = [] if", "= 72 time_minutes = 0 #partition = 'learnlab,learnfair,scavenge' partition =", "len(args_prod) == 0: args_prod.append(('', '')) for i, values in enumerate(args_prod):", "job_cmd += ' {0}' .format(val) #job_cmd += ' --checkpoint /checkpoint/timdettmers/{1}/{0}/model.pt'.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(),", "0.4 #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1),", "'decoder-ffn-embed-dim', 'update-freq') args3[key] = [] #lrkey = ('lr', 'warmup-init-lr') #args3[lrkey]", "'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)] ##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,2)) #", "+= ' {0}' .format(val) #job_cmd += ' --checkpoint /checkpoint/timdettmers/{1}/{0}/model.pt'.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name)", "#partition = 'learnfair' #partition = 'uninterruptible' change_dir = 'fairseq_private' repo", "args3['optimizer'] = ['adam'] args3[('use-bnb', 'optim-bits')] = [(True, 8)] args3[('stable-emb', 'no-scale-embedding')]", "True, True)] args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,8, 0.00045)) #args3[key].append((2048,2688,10752,2)) #args3['use-emb-norm'] = [True]", "'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1), (False, 8,", "key, value in args2.items(): cmd = cmd + ' --{0}", "parser = argparse.ArgumentParser(description='Compute script.') parser.add_argument('--dry', action='store_true') parser.add_argument('--verbose', action='store_true') parser.add_argument('--p', type=float,", "#args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8)) # adafactor #args3[('percentile-clipping', 'clip-norm')]", "'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'quantile', 1)] #args3[('fused', 'adam-bits',", "##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048+256,8192+2048,2)) ##args3[key].append((2048,2688,10752,2)) # #lr = 0.003239 + (-0.0001395*math.log(1.92e9))", "#args3['use-emb-norm'] = [True] #lr = 0.003239 + (-0.0001395*math.log(2.43e9)) #args3[lrkey].append((lr, 0.0))", "'fairseq-train /private/home/namangoyal/dataset/data-bin/bookwiki_CC-NEWS_openwebtext_stories_cc100-mmap2-bin --distributed-world-size {0} --distributed-port 54187 --fp16 --memory-efficient-fp16 --num-workers 2", "0 --keep-interval-updates 0 --keep-last-epochs 0 --save-interval-updates 1000 --log-format simple --fp16-no-flatten-grads", "for key, value in args2.items(): cmd = cmd + '", "[2048+256] #args3['decoder-ffn-embed-dim'] = [8192+2048] #args3['max-tokens'] = [3072] #args3['update-freq'] = [2]", "save_dir = ' --save-dir {0}'.format(checkpoint_dir) job_cmd = job_cmd + save_dir", "[(False, False, False)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding', 'optim-bits')] = [(True, True,", "gpus > 8 else gpus) num_seeds = 1 seed_offset =", "= 'train11' args4 = [] args5 = {} args6 =", "'dynamic_tree', 1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree',", "baseline #args3['optimizer'] = ['adam'] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('fused',", "new_args = [] if len(args_prod) > 0: for arg in", "'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)] ##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,2))", "= 'fairseq_private' exclude = '' s = gpuscheduler.HyakScheduler(verbose=args.verbose, account='', partition=partition,", "25)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)]", "0.00075)) #args3[key].append((2048,2048,8192,2)) # #lr = 0.003239 + (-0.0001395*math.log(1.41e9)) #args3[lrkey].append((lr, lr+1e-8,", "--attention-dropout 0.0 --activation-dropout 0.0 --activation-fn relu --no-epoch-checkpoints --keep-best-checkpoints 0 --keep-interval-updates", "--num-workers 2 --criterion cross_entropy --task language_modeling --sample-break-mode none --log-interval 25", "--decoder-attention-heads 16 --dropout 0.0 --attention-dropout 0.0 --activation-dropout 0.0 --activation-fn relu", "[(True, True, True, True)] args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,8, 0.00045)) #args3[key].append((2048,2688,10752,2)) #args3['use-emb-norm']", "#args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)]", "math from itertools import product from torch.optim.lr_scheduler import OneCycleLR from", "{1} '.format(key[i], v) keyvalues.append(arg) elif isinstance(key, str): keyvalues = []", "args_prod.append(keyvalues) if len(args_prod) >= 2: args_prod = list(product(*args_prod)) else: new_args", "'clip-norm')] = [(100, 0.1)] #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('percentile-clipping',", "job_cmd + ' --seed {0}'.format(seed) checkpoint_dir = '/checkpoint/timdettmers/{1}/{0} '.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name)", "('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq', 'lr') #key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim',", "# #lr = 0.003239 + (-0.0001395*math.log(1.92e9)) #args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1", "[(False, 8, 'quantile', 1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False,", "1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree', 1),", "(False, 8, 'quantile', 25)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True,", "#args2['train-subset'] = 'train11' args4 = [] args5 = {} args6", "25 --tokens-per-sample 1024 --arch transformer_lm_big --share-decoder-input-output-embed --decoder-layers 28 --decoder-attention-heads 16", "run on: {0}'.format(partition)) print('Run in folder: {0}'.format(change_dir)) if not args.dry:", "args4: if len(args_prod) == 0: args_prod.append(('', '')) for i, values", "glob import math from itertools import product from torch.optim.lr_scheduler import", "[2] key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq', 'lr') #key =", "--save-dir {0}'.format(checkpoint_dir) job_cmd = job_cmd + save_dir cmds = [job_cmd]", "lr+1e-8, lr*0.1, lr*0.1 + 1e-8)) # adafactor #args3[('percentile-clipping', 'clip-norm')] =", "in args2.items(): cmd = cmd + ' --{0} {1}'.format(key, value)", "= [(True, True, True), (False, False, False)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding')]", "(False, 8, 'quantile', 1), (False, 8, 'dynamic_tree', 1), (False, 8,", "language_modeling --sample-break-mode none --log-interval 25 --tokens-per-sample 1024 --arch transformer_lm_big --share-decoder-input-output-embed", "= {} name = 'blockwise5' constraint = 'volta32gb' # 1024", "= {} rdm = np.random.RandomState(5345) for key, value in args2.items():", "'fairseq_private' exclude = '' s = gpuscheduler.HyakScheduler(verbose=args.verbose, account='', partition=partition, use_gres=False)", "'' for i, v in enumerate(tups): if v is True:", "ckp_name) save_dir = ' --save-dir {0}'.format(checkpoint_dir) job_cmd = job_cmd +", "v is True: v = '' if v is False:", "= 0.003239 + (-0.0001395*math.log(1.41e9)) #args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8))", "== 0: arg += '{0} '.format(v) else: arg += '--{0}", "= job_cmd + ' --seed {0}'.format(seed) checkpoint_dir = '/checkpoint/timdettmers/{1}/{0} '.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(),", "[] args5 = {} args6 = {} rdm = np.random.RandomState(5345)", "'quantile', 1), (False, 8, 'quantile', 1), (False, 8, 'dynamic_tree', 1),", "= ['adam'] args3[('use-bnb', 'optim-bits')] = [(True, 8)] args3[('stable-emb', 'no-scale-embedding')] =", "type=float, default=1.0, help='Probability with which to select a configuration.') args", "= 0.4 #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile',", "= ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq', 'lr') #key = ('max-tokens', 'decoder-embed-dim',", "i, values in enumerate(args_prod): job_cmd = cmd + arg4 for", "print('Total jobs', len(jobs)) print('Time hours: {0}'.format(time_hours)) print('GPUs: {0}'.format(gpus)) print('Jobs will", "+= '{0} '.format(v) else: arg += '--{0} {1} '.format(key[i], v)", "0 --save-interval-updates 1000 --log-format simple --fp16-no-flatten-grads --ignore-unused-valid-subsets'.format(gpus) args2 = {}", "<= args.p: jobs.append(job_cmd) s.add_job(logfolder, repo, change_dir, cmds, time_hours, fp16, cores=cores_per_job,", "= '' for i, v in enumerate(tups): if v is", "#args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(False, False, False)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding',", "if v is False: keyvalues.append('') else: keyvalues.append(' --{0} {1}'.format(key, v))", "logfolder))) print('Jobs will be run on: {0}'.format(partition)) print('Run in folder:", "import math from itertools import product from torch.optim.lr_scheduler import OneCycleLR", "is False: keyvalues.append('') else: keyvalues.append(' --{0} {1}'.format(key, v)) args_prod.append(keyvalues) if", "s = gpuscheduler.HyakScheduler(verbose=args.verbose, account='', partition=partition, use_gres=False) fp16 = True args3", "for key, values in args3.items(): if isinstance(key, tuple): keyvalues =", "v is False: keyvalues.append('') else: keyvalues.append(' --{0} {1}'.format(key, v)) args_prod.append(keyvalues)", "'quantile', 1)] ##args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,2)) # #lr = 0.003239 +", "args4 = [] args5 = {} args6 = {} rdm", "{0}' .format(val) #job_cmd += ' --checkpoint /checkpoint/timdettmers/{1}/{0}/model.pt'.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) if not", "a configuration.') args = parser.parse_args() gpus = 128 cmd =", "= '/checkpoint/timdettmers/{1}/{0} '.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) save_dir = ' --save-dir {0}'.format(checkpoint_dir) job_cmd", "#time_hours = 24*2 cores_per_job = 5 mem = 56*(8 if", "= 56250 #args2['lr-scheduler'] = 'cosine' #args2['warmup-updates'] = 3000 #args2['max-update'] =", "True: v = '' if v is False: continue if", "tokens -> optimal batch size 3460 # model sizes: 1.92bn,", "{0}'.format(time_hours)) print('GPUs: {0}'.format(gpus)) print('Jobs will be written to: {0}'.format(join('/private/home/timdettmers/logs/', logfolder)))", "= cmd + arg4 for val in values: job_cmd +=", "#args3['max-tokens'] = [3072] #args3['update-freq'] = [2] key = ('max-tokens', 'decoder-embed-dim',", "/private/home/namangoyal/dataset/data-bin/bookwiki_CC-NEWS_openwebtext_stories_cc100-mmap2-bin --distributed-world-size {0} --distributed-port 54187 --fp16 --memory-efficient-fp16 --num-workers 2 --criterion", "to select a configuration.') args = parser.parse_args() gpus = 128", "# 32-bit baseline #args3['optimizer'] = ['adam'] #args3[('percentile-clipping', 'clip-norm')] = [(100,", "os.path import join parser = argparse.ArgumentParser(description='Compute script.') parser.add_argument('--dry', action='store_true') parser.add_argument('--verbose',", "#args3[('use-bnb', 'stable-emb', 'no-scale-embedding', 'optim-bits')] = [(True, True, True, True)] args3[key].append((2048,2048,8192,8,", "('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq') args3[key] = [] #lrkey = ('lr',", "5 mem = 56*(8 if gpus > 8 else gpus)", "'volta32gb' # 1024 tokens * 8 update_freq * 56250 steps", "key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq', 'lr') #key = ('max-tokens',", "value in args2.items(): cmd = cmd + ' --{0} {1}'.format(key,", "2: args_prod = list(product(*args_prod)) else: new_args = [] if len(args_prod)", "False)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding', 'optim-bits')] = [(True, True, True, True)]", "args_prod.append(('', '')) for i, values in enumerate(args_prod): job_cmd = cmd", "0.003239 + (-0.0001395*math.log(2.43e9)) #args3[lrkey].append((lr, 0.0)) #args2['train-subset'] = 'train11' args4 =", "'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)]#, (False, 8,", "s.add_job(logfolder, repo, change_dir, cmds, time_hours, fp16, cores=cores_per_job, mem=mem, constraint=constraint, exclude=exclude,", "# 1024 tokens * 8 update_freq * 56250 steps =", "--keep-interval-updates 0 --keep-last-epochs 0 --save-interval-updates 1000 --log-format simple --fp16-no-flatten-grads --ignore-unused-valid-subsets'.format(gpus)", "v = '' if v is False: keyvalues.append('') else: keyvalues.append('", "'quantile', 1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8, 'dynamic_tree',", "if isinstance(key, tuple): keyvalues = [] for tups in values:", "list(product(*args_prod)) else: new_args = [] if len(args_prod) > 0: for", "OneCycleLR from os.path import join parser = argparse.ArgumentParser(description='Compute script.') parser.add_argument('--dry',", "'adam8bits-qfreq')] = [(False, 32, 'quantile', 1)] #args2['optimizer'] = 'adafactor' #args2['beta1']", "= 0.003239 + (-0.0001395*math.log(1.92e9)) #args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8))", "import OneCycleLR from os.path import join parser = argparse.ArgumentParser(description='Compute script.')", "[(True, True)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding')] = [(True, True, True), (False,", "--dropout 0.0 --attention-dropout 0.0 --activation-dropout 0.0 --activation-fn relu --no-epoch-checkpoints --keep-best-checkpoints", "= 0 #partition = 'learnlab,learnfair,scavenge' partition = 'learnfair,learnlab' #partition =", "= [3072] #args3['update-freq'] = [2] key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim',", "0.1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)]", "= 'learnlab,learnfair,scavenge' partition = 'learnfair,learnlab' #partition = 'learnfair' #partition =", "new_args jobs = [] if len(args4) == 0: args4.append('') for", "for arg4 in args4: if len(args_prod) == 0: args_prod.append(('', ''))", "arg += '--{0} {1} '.format(key[i], v) keyvalues.append(arg) elif isinstance(key, str):", "change_dir, cmds, time_hours, fp16, cores=cores_per_job, mem=mem, constraint=constraint, exclude=exclude, time_minutes=time_minutes, gpus=gpus)", "if rdm.rand(1) <= args.p: jobs.append(job_cmd) s.add_job(logfolder, repo, change_dir, cmds, time_hours,", "[] for tups in values: arg = '' for i,", "in values: job_cmd += ' {0}' .format(val) #job_cmd += '", "arg in args_prod[0]: new_args.append([arg]) args_prod = new_args jobs = []", "('lr', 'warmup-init-lr') #args3[lrkey] = [] # 32-bit baseline #args3['optimizer'] =", "import uuid import hashlib import glob import math from itertools", "'learnfair' #partition = 'uninterruptible' change_dir = 'fairseq_private' repo = 'fairseq_private'", "8, 'quantile', 25)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32,", "keyvalues.append('') else: keyvalues.append(' --{0} {1}'.format(key, v)) args_prod.append(keyvalues) if len(args_prod) >=", "'/checkpoint/timdettmers/{1}/{0} '.format(hashlib.md5(str(job_cmd).encode('utf-8')).hexdigest(), ckp_name) save_dir = ' --save-dir {0}'.format(checkpoint_dir) job_cmd =", "= 'adam/cc100/{0}'.format(name) ckp_name = logfolder #time_hours = 24*2 cores_per_job =", "written to: {0}'.format(join('/private/home/timdettmers/logs/', logfolder))) print('Jobs will be run on: {0}'.format(partition))", "itertools import gpuscheduler import argparse import os import uuid import", "parser.add_argument('--dry', action='store_true') parser.add_argument('--verbose', action='store_true') parser.add_argument('--p', type=float, default=1.0, help='Probability with which", "if not fp16: job_cmd = job_cmd.replace('--fp16 ', ' ') job_cmd", "= 56250*4 args2['fp16-scale-window'] = 250 args2['clip-norm'] = 0.4 #args3[('fused', 'adam-bits',", "tups in values: arg = '' for i, v in", "[(False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 1)] args3['optimizer'] =", "else: keyvalues.append(' --{0} {1}'.format(key, v)) args_prod.append(keyvalues) if len(args_prod) >= 2:", "rdm = np.random.RandomState(5345) for key, value in args2.items(): cmd =", "if gpus > 8 else gpus) num_seeds = 1 seed_offset", "args2['total-num-update'] = 56250 #args2['lr-scheduler'] = 'cosine' #args2['warmup-updates'] = 3000 #args2['max-update']", "key, values in args3.items(): if isinstance(key, tuple): keyvalues = []", "= list(product(*args_prod)) else: new_args = [] if len(args_prod) > 0:", "'clip-norm')] = [(100, 0.1)] #args3[('percentile-clipping', 'clip-norm')] = [(5, 0.0)] #args3[('fused',", "= [(5, 0.0)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(False, 8,", "args.p: jobs.append(job_cmd) s.add_job(logfolder, repo, change_dir, cmds, time_hours, fp16, cores=cores_per_job, mem=mem,", "[3072] #args3['update-freq'] = [2] key = ('max-tokens', 'decoder-embed-dim', 'decoder-ffn-embed-dim', 'update-freq',", "= 56250 args2['total-num-update'] = 56250 #args2['lr-scheduler'] = 'cosine' #args2['warmup-updates'] =", "= 'blockwise5' constraint = 'volta32gb' # 1024 tokens * 8", "argparse.ArgumentParser(description='Compute script.') parser.add_argument('--dry', action='store_true') parser.add_argument('--verbose', action='store_true') parser.add_argument('--p', type=float, default=1.0, help='Probability", "2.43bn, 1.41bn logfolder = 'adam/cc100/{0}'.format(name) ckp_name = logfolder #time_hours =", "[(100, 0.1)] #args3['decoder-embed-dim'] = [2048+256] #args3['decoder-ffn-embed-dim'] = [8192+2048] #args3['max-tokens'] =", "(-0.0001395*math.log(1.41e9)) #args3[lrkey].append((lr, lr+1e-8, lr*0.1, lr*0.1 + 1e-8)) # adafactor #args3[('percentile-clipping',", "keyvalues = [] for v in values: if v is", "= [(True, 32, 'quantile', 1), (False, 8, 'quantile', 1), (False,", "8-bit #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)] #args3[('percentile-clipping', 'clip-norm')] = [(100,", "' --{0} {1}'.format(key, value) args_prod = [] for key, values", "(False, 8, 'dynamic_tree', 1), (False, 8, 'quantile', 25)] #args3[('fused', 'adam-bits',", "'train11' args4 = [] args5 = {} args6 = {}", "np import itertools import gpuscheduler import argparse import os import", "print('Jobs will be written to: {0}'.format(join('/private/home/timdettmers/logs/', logfolder))) print('Jobs will be", "'.format(v) else: arg += '--{0} {1} '.format(key[i], v) keyvalues.append(arg) elif", "= parser.parse_args() gpus = 128 cmd = 'fairseq-train /private/home/namangoyal/dataset/data-bin/bookwiki_CC-NEWS_openwebtext_stories_cc100-mmap2-bin --distributed-world-size", "#args3[key].append((2048,2048+256,8192+2048,2)) ##args3[key].append((2048,2688,10752,2)) # #lr = 0.003239 + (-0.0001395*math.log(1.92e9)) #args3[lrkey].append((lr, lr+1e-8,", "'stable-emb', 'no-scale-embedding')] = [(False, False, False)] #args3[('use-bnb', 'stable-emb', 'no-scale-embedding', 'optim-bits')]", "'cosine' #args2['warmup-updates'] = 3000 #args2['max-update'] = 56250*4 args2['fp16-scale-window'] = 250", "= [(100, 0.1)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32,", "= [(True, True, True, True)] args3[key].append((2048,2048,8192,8, 0.00075)) #args3[key].append((2048,2048,8192,8, 0.00045)) #args3[key].append((2048,2688,10752,2))", "val in values: job_cmd += ' {0}' .format(val) #job_cmd +=", "{} name = 'blockwise5' constraint = 'volta32gb' # 1024 tokens", "print('') print('Total jobs', len(jobs)) print('Time hours: {0}'.format(time_hours)) print('GPUs: {0}'.format(gpus)) print('Jobs", "= 0.003239 + (-0.0001395*math.log(2.43e9)) #args3[lrkey].append((lr, 0.0)) #args2['train-subset'] = 'train11' args4", "v in enumerate(tups): if v is True: v = ''", "'quantile', 1)] args3['optimizer'] = ['adam'] args3[('use-bnb', 'optim-bits')] = [(True, 8)]", "lr*0.1 + 1e-8)) # adafactor #args3[('percentile-clipping', 'clip-norm')] = [(100, 0.1)]", "new_args.append([arg]) args_prod = new_args jobs = [] if len(args4) ==", "'{0} '.format(v) else: arg += '--{0} {1} '.format(key[i], v) keyvalues.append(arg)", "25)] #args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)]#,", "arg += '{0} '.format(v) else: arg += '--{0} {1} '.format(key[i],", "#args2['optimizer'] = 'adafactor' #args2['beta1'] = 0.9 #args2['decay-rate'] = 0.999 ##args3[key].append((2048,2048,8192,8,", "args_prod = list(product(*args_prod)) else: new_args = [] if len(args_prod) >", "= [2048+256] #args3['decoder-ffn-embed-dim'] = [8192+2048] #args3['max-tokens'] = [3072] #args3['update-freq'] =", "= argparse.ArgumentParser(description='Compute script.') parser.add_argument('--dry', action='store_true') parser.add_argument('--verbose', action='store_true') parser.add_argument('--p', type=float, default=1.0,", "' ') job_cmd = job_cmd + ' --seed {0}'.format(seed) checkpoint_dir", "simple --fp16-no-flatten-grads --ignore-unused-valid-subsets'.format(gpus) args2 = {} name = 'blockwise5' constraint", "--{0} {1}'.format(key, value) args_prod = [] for key, values in", "is False: continue if len(key[i]) == 0: arg += '{0}", "in values: arg = '' for i, v in enumerate(tups):", "value) args_prod = [] for key, values in args3.items(): if", "#args3[('fused', 'adam-bits', 'adam8bits-method', 'adam8bits-qfreq')] = [(True, 32, 'quantile', 1)] ##args3[key].append((2048,2048,8192,8,", "+ ' --{0} {1}'.format(key, value) args_prod = [] for key,", "8, 'dynamic_tree', 1), (False, 8, 'quantile', 1)] args3['optimizer'] = ['adam']", "job_cmd.replace('--fp16 ', ' ') job_cmd = job_cmd + ' --seed", "1.41bn logfolder = 'adam/cc100/{0}'.format(name) ckp_name = logfolder #time_hours = 24*2" ]
[ "from boa3.builtin import public from boa3.builtin.contract import Nep5TransferEvent transfer =", "transfer = Nep5TransferEvent @public def Main(from_addr: bytes, to_addr: bytes, amount:", "<filename>boa3_test/test_sc/event_test/EventNep5Transfer.py from boa3.builtin import public from boa3.builtin.contract import Nep5TransferEvent transfer", "from boa3.builtin.contract import Nep5TransferEvent transfer = Nep5TransferEvent @public def Main(from_addr:", "import Nep5TransferEvent transfer = Nep5TransferEvent @public def Main(from_addr: bytes, to_addr:", "Nep5TransferEvent transfer = Nep5TransferEvent @public def Main(from_addr: bytes, to_addr: bytes,", "Nep5TransferEvent @public def Main(from_addr: bytes, to_addr: bytes, amount: int): transfer(from_addr,", "import public from boa3.builtin.contract import Nep5TransferEvent transfer = Nep5TransferEvent @public", "= Nep5TransferEvent @public def Main(from_addr: bytes, to_addr: bytes, amount: int):", "def Main(from_addr: bytes, to_addr: bytes, amount: int): transfer(from_addr, to_addr, amount)", "boa3.builtin.contract import Nep5TransferEvent transfer = Nep5TransferEvent @public def Main(from_addr: bytes,", "boa3.builtin import public from boa3.builtin.contract import Nep5TransferEvent transfer = Nep5TransferEvent", "public from boa3.builtin.contract import Nep5TransferEvent transfer = Nep5TransferEvent @public def", "@public def Main(from_addr: bytes, to_addr: bytes, amount: int): transfer(from_addr, to_addr," ]
[ "any later version. # # The Topical Guide is distributed", "# for more details. # # You should have received", "You should have received a copy of the GNU Affero", "Affero General Public License # along with the Topical Guide.", "from abtest.settings import TEST_LIST from visualize import root # Create", "of the GNU Affero General Public License as published by", "but # WITHOUT ANY WARRANTY; without even the implied warranty", "Young University, 3760 HBLL, # Provo, UT 84602, (801) 422-9339", "the License, or (at your # option) any later version.", "with the Topical Guide. If not, see <http://www.gnu.org/licenses/>. # #", "is part of the Topical Guide <http://nlp.cs.byu.edu/topic_browser>. # # The", "or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU", "getattr(__import__(package, fromlist=[view_package]), view_package) return view(request, args, kwargs) # This view", "package_list = TEST_LIST[arg]['VIEW_PACKAGE'].split('.') view_package = package_list.pop() package = \".\".join(package_list) view", "Young University # # This file is part of the", "even the implied warranty of MERCHANTABILITY or # FITNESS FOR", "given url does not match anything def unknown(request, arg, *args,", "file is part of the Topical Guide <http://nlp.cs.byu.edu/topic_browser>. # #", "University, 3760 HBLL, # Provo, UT 84602, (801) 422-9339 or", "published by the # Free Software Foundation, either version 3", "HttpResponse import abtest from abtest.settings import TEST_LIST from visualize import", "can redistribute it and/or modify it # under the terms", "software: you can redistribute it and/or modify it # under", "a copy of the GNU Affero General Public License #", "If you have inquiries regarding any further use of the", "is called when the given url does not match anything", "# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero", "copy of the GNU Affero General Public License # along", "package_list.pop() package = \".\".join(package_list) view = getattr(__import__(package, fromlist=[view_package]), view_package) return", "# # The Topical Guide is distributed in the hope", "arg, *args, **kwargs): # redirect to the root view return", "been hit instead\") package_list = TEST_LIST[arg]['VIEW_PACKAGE'].split('.') view_package = package_list.pop() package", "use of the Topical Guide, please # contact the Copyright", "The Topical Guide # Copyright 2010-2011 Brigham Young University #", "General Public License as published by the # Free Software", "PURPOSE. See the GNU Affero General Public License # for", "Topical Guide is distributed in the hope that it will", "in TEST_LIST: print(\"Error! Unknown view should have been hit instead\")", "# Create your views here. def test(request, arg, *args, **kwargs):", "abtest.settings import TEST_LIST from visualize import root # Create your", "of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See", "def test(request, arg, *args, **kwargs): if arg not in TEST_LIST:", "def unknown(request, arg, *args, **kwargs): # redirect to the root", "of the Topical Guide, please # contact the Copyright Licensing", "terms of the GNU Affero General Public License as published", "the hope that it will be useful, but # WITHOUT", "Affero General Public License as published by the # Free", "part of the Topical Guide <http://nlp.cs.byu.edu/topic_browser>. # # The Topical", "ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or", "License # for more details. # # You should have", "for more details. # # You should have received a", "have been hit instead\") package_list = TEST_LIST[arg]['VIEW_PACKAGE'].split('.') view_package = package_list.pop()", "PARTICULAR PURPOSE. See the GNU Affero General Public License #", "arg not in TEST_LIST: print(\"Error! Unknown view should have been", "in the hope that it will be useful, but #", "# along with the Topical Guide. If not, see <http://www.gnu.org/licenses/>.", "the terms of the GNU Affero General Public License as", "GNU Affero General Public License as published by the #", "Guide # Copyright 2010-2011 Brigham Young University # # This", "django.shortcuts import render, redirect from django.http import HttpResponse import abtest", "Copyright 2010-2011 Brigham Young University # # This file is", "Public License # along with the Topical Guide. If not,", "redirect from django.http import HttpResponse import abtest from abtest.settings import", "view should have been hit instead\") package_list = TEST_LIST[arg]['VIEW_PACKAGE'].split('.') view_package", "the Topical Guide <http://nlp.cs.byu.edu/topic_browser>. # # The Topical Guide is", "This view is called when the given url does not", "distributed in the hope that it will be useful, but", "UT 84602, (801) 422-9339 or 422-3821, e-mail <EMAIL>. from __future__", "import print_function from django.shortcuts import render, redirect from django.http import", "have received a copy of the GNU Affero General Public", "License as published by the # Free Software Foundation, either", "(801) 422-9339 or 422-3821, e-mail <EMAIL>. from __future__ import print_function", "you can redistribute it and/or modify it # under the", "received a copy of the GNU Affero General Public License", "free software: you can redistribute it and/or modify it #", "WARRANTY; without even the implied warranty of MERCHANTABILITY or #", "or 422-3821, e-mail <EMAIL>. from __future__ import print_function from django.shortcuts", "from django.http import HttpResponse import abtest from abtest.settings import TEST_LIST", "# under the terms of the GNU Affero General Public", "Affero General Public License # for more details. # #", "= package_list.pop() package = \".\".join(package_list) view = getattr(__import__(package, fromlist=[view_package]), view_package)", "General Public License # for more details. # # You", "does not match anything def unknown(request, arg, *args, **kwargs): #", "3760 HBLL, # Provo, UT 84602, (801) 422-9339 or 422-3821,", "match anything def unknown(request, arg, *args, **kwargs): # redirect to", "= TEST_LIST[arg]['VIEW_PACKAGE'].split('.') view_package = package_list.pop() package = \".\".join(package_list) view =", "test(request, arg, *args, **kwargs): if arg not in TEST_LIST: print(\"Error!", "# This file is part of the Topical Guide <http://nlp.cs.byu.edu/topic_browser>.", "422-3821, e-mail <EMAIL>. from __future__ import print_function from django.shortcuts import", "more details. # # You should have received a copy", "along with the Topical Guide. If not, see <http://www.gnu.org/licenses/>. #", "not in TEST_LIST: print(\"Error! Unknown view should have been hit", "Office, Brigham Young University, 3760 HBLL, # Provo, UT 84602,", "# # The Topical Guide is free software: you can", "when the given url does not match anything def unknown(request,", "License, or (at your # option) any later version. #", "MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the", "view = getattr(__import__(package, fromlist=[view_package]), view_package) return view(request, args, kwargs) #", "print(\"Error! Unknown view should have been hit instead\") package_list =", "GNU Affero General Public License # for more details. #", "the implied warranty of MERCHANTABILITY or # FITNESS FOR A", "Topical Guide, please # contact the Copyright Licensing Office, Brigham", "e-mail <EMAIL>. from __future__ import print_function from django.shortcuts import render,", "you have inquiries regarding any further use of the Topical", "later version. # # The Topical Guide is distributed in", "view is called when the given url does not match", "without even the implied warranty of MERCHANTABILITY or # FITNESS", "your # option) any later version. # # The Topical", "Create your views here. def test(request, arg, *args, **kwargs): if", "here. def test(request, arg, *args, **kwargs): if arg not in", "If not, see <http://www.gnu.org/licenses/>. # # If you have inquiries", "arg, *args, **kwargs): if arg not in TEST_LIST: print(\"Error! Unknown", "should have been hit instead\") package_list = TEST_LIST[arg]['VIEW_PACKAGE'].split('.') view_package =", "Copyright Licensing Office, Brigham Young University, 3760 HBLL, # Provo,", "Brigham Young University # # This file is part of", "# Provo, UT 84602, (801) 422-9339 or 422-3821, e-mail <EMAIL>.", "it # under the terms of the GNU Affero General", "is free software: you can redistribute it and/or modify it", "useful, but # WITHOUT ANY WARRANTY; without even the implied", "Foundation, either version 3 of the License, or (at your", "anything def unknown(request, arg, *args, **kwargs): # redirect to the", "Public License as published by the # Free Software Foundation,", "inquiries regarding any further use of the Topical Guide, please", "from django.shortcuts import render, redirect from django.http import HttpResponse import", "__future__ import print_function from django.shortcuts import render, redirect from django.http", "# # This file is part of the Topical Guide", "either version 3 of the License, or (at your #", "should have received a copy of the GNU Affero General", "# contact the Copyright Licensing Office, Brigham Young University, 3760", "import HttpResponse import abtest from abtest.settings import TEST_LIST from visualize", "url does not match anything def unknown(request, arg, *args, **kwargs):", "it and/or modify it # under the terms of the", "unknown(request, arg, *args, **kwargs): # redirect to the root view", "the # Free Software Foundation, either version 3 of the", "the GNU Affero General Public License as published by the", "# The Topical Guide is free software: you can redistribute", "the GNU Affero General Public License # for more details.", "not, see <http://www.gnu.org/licenses/>. # # If you have inquiries regarding", "University # # This file is part of the Topical", "Guide. If not, see <http://www.gnu.org/licenses/>. # # If you have", "any further use of the Topical Guide, please # contact", "2010-2011 Brigham Young University # # This file is part", "Guide is distributed in the hope that it will be", "# Copyright 2010-2011 Brigham Young University # # This file", "Topical Guide. If not, see <http://www.gnu.org/licenses/>. # # If you", "**kwargs): if arg not in TEST_LIST: print(\"Error! Unknown view should", "of the License, or (at your # option) any later", "of the GNU Affero General Public License # along with", "print_function from django.shortcuts import render, redirect from django.http import HttpResponse", "Provo, UT 84602, (801) 422-9339 or 422-3821, e-mail <EMAIL>. from", "view_package) return view(request, args, kwargs) # This view is called", "<http://nlp.cs.byu.edu/topic_browser>. # # The Topical Guide is free software: you", "django.http import HttpResponse import abtest from abtest.settings import TEST_LIST from", "visualize import root # Create your views here. def test(request,", "The Topical Guide is free software: you can redistribute it", "TEST_LIST from visualize import root # Create your views here.", "view(request, args, kwargs) # This view is called when the", "Brigham Young University, 3760 HBLL, # Provo, UT 84602, (801)", "root # Create your views here. def test(request, arg, *args,", "modify it # under the terms of the GNU Affero", "# WITHOUT ANY WARRANTY; without even the implied warranty of", "# # If you have inquiries regarding any further use", "redistribute it and/or modify it # under the terms of", "Free Software Foundation, either version 3 of the License, or", "GNU Affero General Public License # along with the Topical", "Public License # for more details. # # You should", "of the Topical Guide <http://nlp.cs.byu.edu/topic_browser>. # # The Topical Guide", "# If you have inquiries regarding any further use of", "# The Topical Guide # Copyright 2010-2011 Brigham Young University", "\".\".join(package_list) view = getattr(__import__(package, fromlist=[view_package]), view_package) return view(request, args, kwargs)", "option) any later version. # # The Topical Guide is", "args, kwargs) # This view is called when the given", "view_package = package_list.pop() package = \".\".join(package_list) view = getattr(__import__(package, fromlist=[view_package]),", "further use of the Topical Guide, please # contact the", "have inquiries regarding any further use of the Topical Guide,", "abtest from abtest.settings import TEST_LIST from visualize import root #", "TEST_LIST: print(\"Error! Unknown view should have been hit instead\") package_list", "not match anything def unknown(request, arg, *args, **kwargs): # redirect", "version. # # The Topical Guide is distributed in the", "the given url does not match anything def unknown(request, arg,", "as published by the # Free Software Foundation, either version", "General Public License # along with the Topical Guide. If", "please # contact the Copyright Licensing Office, Brigham Young University,", "= \".\".join(package_list) view = getattr(__import__(package, fromlist=[view_package]), view_package) return view(request, args,", "return view(request, args, kwargs) # This view is called when", "3 of the License, or (at your # option) any", "contact the Copyright Licensing Office, Brigham Young University, 3760 HBLL,", "implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR", "# This view is called when the given url does", "Licensing Office, Brigham Young University, 3760 HBLL, # Provo, UT", "package = \".\".join(package_list) view = getattr(__import__(package, fromlist=[view_package]), view_package) return view(request,", "called when the given url does not match anything def", "regarding any further use of the Topical Guide, please #", "be useful, but # WITHOUT ANY WARRANTY; without even the", "# The Topical Guide is distributed in the hope that", "by the # Free Software Foundation, either version 3 of", "or (at your # option) any later version. # #", "that it will be useful, but # WITHOUT ANY WARRANTY;", "TEST_LIST[arg]['VIEW_PACKAGE'].split('.') view_package = package_list.pop() package = \".\".join(package_list) view = getattr(__import__(package,", "HBLL, # Provo, UT 84602, (801) 422-9339 or 422-3821, e-mail", "Guide <http://nlp.cs.byu.edu/topic_browser>. # # The Topical Guide is free software:", "see <http://www.gnu.org/licenses/>. # # If you have inquiries regarding any", "hit instead\") package_list = TEST_LIST[arg]['VIEW_PACKAGE'].split('.') view_package = package_list.pop() package =", "84602, (801) 422-9339 or 422-3821, e-mail <EMAIL>. from __future__ import", "render, redirect from django.http import HttpResponse import abtest from abtest.settings", "under the terms of the GNU Affero General Public License", "from visualize import root # Create your views here. def", "= getattr(__import__(package, fromlist=[view_package]), view_package) return view(request, args, kwargs) # This", "from __future__ import print_function from django.shortcuts import render, redirect from", "fromlist=[view_package]), view_package) return view(request, args, kwargs) # This view is", "the Topical Guide, please # contact the Copyright Licensing Office,", "<http://www.gnu.org/licenses/>. # # If you have inquiries regarding any further", "See the GNU Affero General Public License # for more", "Guide, please # contact the Copyright Licensing Office, Brigham Young", "kwargs) # This view is called when the given url", "# option) any later version. # # The Topical Guide", "the Copyright Licensing Office, Brigham Young University, 3760 HBLL, #", "hope that it will be useful, but # WITHOUT ANY", "# You should have received a copy of the GNU", "Topical Guide # Copyright 2010-2011 Brigham Young University # #", "instead\") package_list = TEST_LIST[arg]['VIEW_PACKAGE'].split('.') view_package = package_list.pop() package = \".\".join(package_list)", "if arg not in TEST_LIST: print(\"Error! Unknown view should have", "Guide is free software: you can redistribute it and/or modify", "This file is part of the Topical Guide <http://nlp.cs.byu.edu/topic_browser>. #", "Software Foundation, either version 3 of the License, or (at", "your views here. def test(request, arg, *args, **kwargs): if arg", "License # along with the Topical Guide. If not, see", "and/or modify it # under the terms of the GNU", "views here. def test(request, arg, *args, **kwargs): if arg not", "warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE.", "The Topical Guide is distributed in the hope that it", "the Topical Guide. If not, see <http://www.gnu.org/licenses/>. # # If", "import root # Create your views here. def test(request, arg,", "A PARTICULAR PURPOSE. See the GNU Affero General Public License", "# Free Software Foundation, either version 3 of the License,", "FOR A PARTICULAR PURPOSE. See the GNU Affero General Public", "Unknown view should have been hit instead\") package_list = TEST_LIST[arg]['VIEW_PACKAGE'].split('.')", "will be useful, but # WITHOUT ANY WARRANTY; without even", "import render, redirect from django.http import HttpResponse import abtest from", "is distributed in the hope that it will be useful,", "it will be useful, but # WITHOUT ANY WARRANTY; without", "details. # # You should have received a copy of", "FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General", "the GNU Affero General Public License # along with the", "422-9339 or 422-3821, e-mail <EMAIL>. from __future__ import print_function from", "(at your # option) any later version. # # The", "*args, **kwargs): # redirect to the root view return redirect('/')", "*args, **kwargs): if arg not in TEST_LIST: print(\"Error! Unknown view", "Topical Guide is free software: you can redistribute it and/or", "<EMAIL>. from __future__ import print_function from django.shortcuts import render, redirect", "import TEST_LIST from visualize import root # Create your views", "version 3 of the License, or (at your # option)", "# # You should have received a copy of the", "import abtest from abtest.settings import TEST_LIST from visualize import root", "Topical Guide <http://nlp.cs.byu.edu/topic_browser>. # # The Topical Guide is free", "WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY" ]
[ "= [\"du -sh /usr\", \"rm --help\"] packpath = _create_packfile(cmd, tmpdir)", "[\"ls -l /\", \"grep --help\"] _create_packfile(cmd, tmpdir) pattern = os.path.join(tmpdir,", "--help\"] _create_packfile(cmd, tmpdir) pattern = os.path.join(tmpdir, '*.rpz') packfiles = glob(pattern)", "def _create_packfile(commands, dir): \"\"\"Create packfile from list `commands` in debian:stretch", "= ReproZipMinimizer(container.id, commands, packfile_save_dir=dir) packfile_path = minimizer.run() except: raise finally:", "new_name = \"first-pack.rpz\" os.rename(packpath, os.path.join(tmpdir, new_name)) cmd = [\"ls -l", "= os.path.join(tmpdir, 'merged.rpz') merge_pack_files(outfile=outfile, packfiles=packfiles) with tarfile.open(outfile) as tar: tar.extractall(path=tmpdir)", "os.rename(packpath, os.path.join(tmpdir, new_name)) cmd = [\"ls -l /\", \"grep --help\"]", "cmd = [\"du -sh /usr\", \"rm --help\"] packpath = _create_packfile(cmd,", "assert os.path.isfile(os.path.join(usr_path, 'rm')) assert not os.path.isfile(os.path.join(usr_path, 'sed')) assert not os.path.isfile(os.path.join(usr_path,", "tmpdir) pattern = os.path.join(tmpdir, '*.rpz') packfiles = glob(pattern) assert packfiles,", "outfile = os.path.join(tmpdir, 'merged.rpz') merge_pack_files(outfile=outfile, packfiles=packfiles) with tarfile.open(outfile) as tar:", "import glob import os import tarfile import tempfile from neurodocker.docker", "'DATA', 'usr', 'bin') assert os.path.isfile(os.path.join(usr_path, 'du')) assert os.path.isfile(os.path.join(usr_path, 'grep')) assert", "ReproZipMinimizer from neurodocker.reprozip.merge import merge_pack_files def _create_packfile(commands, dir): \"\"\"Create packfile", "os.path.isfile(os.path.join(usr_path, 'rm')) assert not os.path.isfile(os.path.join(usr_path, 'sed')) assert not os.path.isfile(os.path.join(usr_path, 'tar'))", "\"packfiles not found\" outfile = os.path.join(tmpdir, 'merged.rpz') merge_pack_files(outfile=outfile, packfiles=packfiles) with", "glob import os import tarfile import tempfile from neurodocker.docker import", "client from neurodocker.reprozip.trace import ReproZipMinimizer from neurodocker.reprozip.merge import merge_pack_files def", "= client.containers.run('debian:stretch', detach=True, tty=True, security_opt=['seccomp:unconfined']) try: minimizer = ReproZipMinimizer(container.id, commands,", "for merge.py.\"\"\" from __future__ import absolute_import, division, print_function from glob", "assert os.path.isfile(os.path.join(usr_path, 'du')) assert os.path.isfile(os.path.join(usr_path, 'grep')) assert os.path.isfile(os.path.join(usr_path, 'ls')) assert", "as tardata: tardata.extractall(path=tmpdir) usr_path = os.path.join(tmpdir, 'DATA', 'usr', 'bin') assert", "return packfile_path def test_merge_pack_files(): tmpdir = tempfile.mkdtemp() cmd = [\"du", "finally: container.stop() container.remove() return packfile_path def test_merge_pack_files(): tmpdir = tempfile.mkdtemp()", "import absolute_import, division, print_function from glob import glob import os", "os.path.join(tmpdir, 'DATA', 'usr', 'bin') assert os.path.isfile(os.path.join(usr_path, 'du')) assert os.path.isfile(os.path.join(usr_path, 'grep'))", "container = client.containers.run('debian:stretch', detach=True, tty=True, security_opt=['seccomp:unconfined']) try: minimizer = ReproZipMinimizer(container.id,", "absolute_import, division, print_function from glob import glob import os import", "raise finally: container.stop() container.remove() return packfile_path def test_merge_pack_files(): tmpdir =", "container.remove() return packfile_path def test_merge_pack_files(): tmpdir = tempfile.mkdtemp() cmd =", "import tempfile from neurodocker.docker import client from neurodocker.reprozip.trace import ReproZipMinimizer", "found\" outfile = os.path.join(tmpdir, 'merged.rpz') merge_pack_files(outfile=outfile, packfiles=packfiles) with tarfile.open(outfile) as", "merge.py.\"\"\" from __future__ import absolute_import, division, print_function from glob import", "os.path.join(tmpdir, new_name)) cmd = [\"ls -l /\", \"grep --help\"] _create_packfile(cmd,", "tarfile import tempfile from neurodocker.docker import client from neurodocker.reprozip.trace import", "\"first-pack.rpz\" os.rename(packpath, os.path.join(tmpdir, new_name)) cmd = [\"ls -l /\", \"grep", "import tarfile import tempfile from neurodocker.docker import client from neurodocker.reprozip.trace", "in debian:stretch container.\"\"\" container = client.containers.run('debian:stretch', detach=True, tty=True, security_opt=['seccomp:unconfined']) try:", "usr_path = os.path.join(tmpdir, 'DATA', 'usr', 'bin') assert os.path.isfile(os.path.join(usr_path, 'du')) assert", "from neurodocker.reprozip.merge import merge_pack_files def _create_packfile(commands, dir): \"\"\"Create packfile from", "glob import glob import os import tarfile import tempfile from", "tardata.extractall(path=tmpdir) usr_path = os.path.join(tmpdir, 'DATA', 'usr', 'bin') assert os.path.isfile(os.path.join(usr_path, 'du'))", "security_opt=['seccomp:unconfined']) try: minimizer = ReproZipMinimizer(container.id, commands, packfile_save_dir=dir) packfile_path = minimizer.run()", "packfiles = glob(pattern) assert packfiles, \"packfiles not found\" outfile =", "'ls')) assert os.path.isfile(os.path.join(usr_path, 'rm')) assert not os.path.isfile(os.path.join(usr_path, 'sed')) assert not", "tardata: tardata.extractall(path=tmpdir) usr_path = os.path.join(tmpdir, 'DATA', 'usr', 'bin') assert os.path.isfile(os.path.join(usr_path,", "/usr\", \"rm --help\"] packpath = _create_packfile(cmd, tmpdir) new_name = \"first-pack.rpz\"", "os.path.join(tmpdir, 'DATA.tar.gz') with tarfile.open(datafile) as tardata: tardata.extractall(path=tmpdir) usr_path = os.path.join(tmpdir,", "dir): \"\"\"Create packfile from list `commands` in debian:stretch container.\"\"\" container", "'grep')) assert os.path.isfile(os.path.join(usr_path, 'ls')) assert os.path.isfile(os.path.join(usr_path, 'rm')) assert not os.path.isfile(os.path.join(usr_path,", "'usr', 'bin') assert os.path.isfile(os.path.join(usr_path, 'du')) assert os.path.isfile(os.path.join(usr_path, 'grep')) assert os.path.isfile(os.path.join(usr_path,", "__future__ import absolute_import, division, print_function from glob import glob import", "ReproZipMinimizer(container.id, commands, packfile_save_dir=dir) packfile_path = minimizer.run() except: raise finally: container.stop()", "from __future__ import absolute_import, division, print_function from glob import glob", "import client from neurodocker.reprozip.trace import ReproZipMinimizer from neurodocker.reprozip.merge import merge_pack_files", "tempfile.mkdtemp() cmd = [\"du -sh /usr\", \"rm --help\"] packpath =", "'DATA.tar.gz') with tarfile.open(datafile) as tardata: tardata.extractall(path=tmpdir) usr_path = os.path.join(tmpdir, 'DATA',", "assert packfiles, \"packfiles not found\" outfile = os.path.join(tmpdir, 'merged.rpz') merge_pack_files(outfile=outfile,", "commands, packfile_save_dir=dir) packfile_path = minimizer.run() except: raise finally: container.stop() container.remove()", "os.path.isfile(os.path.join(usr_path, 'grep')) assert os.path.isfile(os.path.join(usr_path, 'ls')) assert os.path.isfile(os.path.join(usr_path, 'rm')) assert not", "'du')) assert os.path.isfile(os.path.join(usr_path, 'grep')) assert os.path.isfile(os.path.join(usr_path, 'ls')) assert os.path.isfile(os.path.join(usr_path, 'rm'))", "'*.rpz') packfiles = glob(pattern) assert packfiles, \"packfiles not found\" outfile", "_create_packfile(commands, dir): \"\"\"Create packfile from list `commands` in debian:stretch container.\"\"\"", "not found\" outfile = os.path.join(tmpdir, 'merged.rpz') merge_pack_files(outfile=outfile, packfiles=packfiles) with tarfile.open(outfile)", "new_name)) cmd = [\"ls -l /\", \"grep --help\"] _create_packfile(cmd, tmpdir)", "= os.path.join(tmpdir, 'DATA', 'usr', 'bin') assert os.path.isfile(os.path.join(usr_path, 'du')) assert os.path.isfile(os.path.join(usr_path,", "-l /\", \"grep --help\"] _create_packfile(cmd, tmpdir) pattern = os.path.join(tmpdir, '*.rpz')", "\"rm --help\"] packpath = _create_packfile(cmd, tmpdir) new_name = \"first-pack.rpz\" os.rename(packpath,", "neurodocker.reprozip.trace import ReproZipMinimizer from neurodocker.reprozip.merge import merge_pack_files def _create_packfile(commands, dir):", "datafile = os.path.join(tmpdir, 'DATA.tar.gz') with tarfile.open(datafile) as tardata: tardata.extractall(path=tmpdir) usr_path", "packfile_save_dir=dir) packfile_path = minimizer.run() except: raise finally: container.stop() container.remove() return", "print_function from glob import glob import os import tarfile import", "'bin') assert os.path.isfile(os.path.join(usr_path, 'du')) assert os.path.isfile(os.path.join(usr_path, 'grep')) assert os.path.isfile(os.path.join(usr_path, 'ls'))", "from neurodocker.reprozip.trace import ReproZipMinimizer from neurodocker.reprozip.merge import merge_pack_files def _create_packfile(commands,", "assert os.path.isfile(os.path.join(usr_path, 'ls')) assert os.path.isfile(os.path.join(usr_path, 'rm')) assert not os.path.isfile(os.path.join(usr_path, 'sed'))", "tarfile.open(outfile) as tar: tar.extractall(path=tmpdir) datafile = os.path.join(tmpdir, 'DATA.tar.gz') with tarfile.open(datafile)", "list `commands` in debian:stretch container.\"\"\" container = client.containers.run('debian:stretch', detach=True, tty=True,", "\"\"\"Tests for merge.py.\"\"\" from __future__ import absolute_import, division, print_function from", "tarfile.open(datafile) as tardata: tardata.extractall(path=tmpdir) usr_path = os.path.join(tmpdir, 'DATA', 'usr', 'bin')", "tar.extractall(path=tmpdir) datafile = os.path.join(tmpdir, 'DATA.tar.gz') with tarfile.open(datafile) as tardata: tardata.extractall(path=tmpdir)", "packfiles=packfiles) with tarfile.open(outfile) as tar: tar.extractall(path=tmpdir) datafile = os.path.join(tmpdir, 'DATA.tar.gz')", "assert os.path.isfile(os.path.join(usr_path, 'grep')) assert os.path.isfile(os.path.join(usr_path, 'ls')) assert os.path.isfile(os.path.join(usr_path, 'rm')) assert", "-sh /usr\", \"rm --help\"] packpath = _create_packfile(cmd, tmpdir) new_name =", "= glob(pattern) assert packfiles, \"packfiles not found\" outfile = os.path.join(tmpdir,", "container.stop() container.remove() return packfile_path def test_merge_pack_files(): tmpdir = tempfile.mkdtemp() cmd", "merge_pack_files def _create_packfile(commands, dir): \"\"\"Create packfile from list `commands` in", "\"grep --help\"] _create_packfile(cmd, tmpdir) pattern = os.path.join(tmpdir, '*.rpz') packfiles =", "with tarfile.open(datafile) as tardata: tardata.extractall(path=tmpdir) usr_path = os.path.join(tmpdir, 'DATA', 'usr',", "tmpdir) new_name = \"first-pack.rpz\" os.rename(packpath, os.path.join(tmpdir, new_name)) cmd = [\"ls", "--help\"] packpath = _create_packfile(cmd, tmpdir) new_name = \"first-pack.rpz\" os.rename(packpath, os.path.join(tmpdir,", "container.\"\"\" container = client.containers.run('debian:stretch', detach=True, tty=True, security_opt=['seccomp:unconfined']) try: minimizer =", "packfile_path def test_merge_pack_files(): tmpdir = tempfile.mkdtemp() cmd = [\"du -sh", "except: raise finally: container.stop() container.remove() return packfile_path def test_merge_pack_files(): tmpdir", "`commands` in debian:stretch container.\"\"\" container = client.containers.run('debian:stretch', detach=True, tty=True, security_opt=['seccomp:unconfined'])", "= minimizer.run() except: raise finally: container.stop() container.remove() return packfile_path def", "\"\"\"Create packfile from list `commands` in debian:stretch container.\"\"\" container =", "os import tarfile import tempfile from neurodocker.docker import client from", "= tempfile.mkdtemp() cmd = [\"du -sh /usr\", \"rm --help\"] packpath", "detach=True, tty=True, security_opt=['seccomp:unconfined']) try: minimizer = ReproZipMinimizer(container.id, commands, packfile_save_dir=dir) packfile_path", "'merged.rpz') merge_pack_files(outfile=outfile, packfiles=packfiles) with tarfile.open(outfile) as tar: tar.extractall(path=tmpdir) datafile =", "os.path.isfile(os.path.join(usr_path, 'ls')) assert os.path.isfile(os.path.join(usr_path, 'rm')) assert not os.path.isfile(os.path.join(usr_path, 'sed')) assert", "minimizer.run() except: raise finally: container.stop() container.remove() return packfile_path def test_merge_pack_files():", "glob(pattern) assert packfiles, \"packfiles not found\" outfile = os.path.join(tmpdir, 'merged.rpz')", "packpath = _create_packfile(cmd, tmpdir) new_name = \"first-pack.rpz\" os.rename(packpath, os.path.join(tmpdir, new_name))", "tty=True, security_opt=['seccomp:unconfined']) try: minimizer = ReproZipMinimizer(container.id, commands, packfile_save_dir=dir) packfile_path =", "_create_packfile(cmd, tmpdir) pattern = os.path.join(tmpdir, '*.rpz') packfiles = glob(pattern) assert", "test_merge_pack_files(): tmpdir = tempfile.mkdtemp() cmd = [\"du -sh /usr\", \"rm", "neurodocker.docker import client from neurodocker.reprozip.trace import ReproZipMinimizer from neurodocker.reprozip.merge import", "= [\"ls -l /\", \"grep --help\"] _create_packfile(cmd, tmpdir) pattern =", "os.path.join(tmpdir, '*.rpz') packfiles = glob(pattern) assert packfiles, \"packfiles not found\"", "as tar: tar.extractall(path=tmpdir) datafile = os.path.join(tmpdir, 'DATA.tar.gz') with tarfile.open(datafile) as", "minimizer = ReproZipMinimizer(container.id, commands, packfile_save_dir=dir) packfile_path = minimizer.run() except: raise", "packfile_path = minimizer.run() except: raise finally: container.stop() container.remove() return packfile_path", "tempfile from neurodocker.docker import client from neurodocker.reprozip.trace import ReproZipMinimizer from", "/\", \"grep --help\"] _create_packfile(cmd, tmpdir) pattern = os.path.join(tmpdir, '*.rpz') packfiles", "from neurodocker.docker import client from neurodocker.reprozip.trace import ReproZipMinimizer from neurodocker.reprozip.merge", "cmd = [\"ls -l /\", \"grep --help\"] _create_packfile(cmd, tmpdir) pattern", "with tarfile.open(outfile) as tar: tar.extractall(path=tmpdir) datafile = os.path.join(tmpdir, 'DATA.tar.gz') with", "pattern = os.path.join(tmpdir, '*.rpz') packfiles = glob(pattern) assert packfiles, \"packfiles", "import os import tarfile import tempfile from neurodocker.docker import client", "from glob import glob import os import tarfile import tempfile", "= os.path.join(tmpdir, '*.rpz') packfiles = glob(pattern) assert packfiles, \"packfiles not", "tar: tar.extractall(path=tmpdir) datafile = os.path.join(tmpdir, 'DATA.tar.gz') with tarfile.open(datafile) as tardata:", "_create_packfile(cmd, tmpdir) new_name = \"first-pack.rpz\" os.rename(packpath, os.path.join(tmpdir, new_name)) cmd =", "= _create_packfile(cmd, tmpdir) new_name = \"first-pack.rpz\" os.rename(packpath, os.path.join(tmpdir, new_name)) cmd", "packfiles, \"packfiles not found\" outfile = os.path.join(tmpdir, 'merged.rpz') merge_pack_files(outfile=outfile, packfiles=packfiles)", "= os.path.join(tmpdir, 'DATA.tar.gz') with tarfile.open(datafile) as tardata: tardata.extractall(path=tmpdir) usr_path =", "import ReproZipMinimizer from neurodocker.reprozip.merge import merge_pack_files def _create_packfile(commands, dir): \"\"\"Create", "client.containers.run('debian:stretch', detach=True, tty=True, security_opt=['seccomp:unconfined']) try: minimizer = ReproZipMinimizer(container.id, commands, packfile_save_dir=dir)", "= \"first-pack.rpz\" os.rename(packpath, os.path.join(tmpdir, new_name)) cmd = [\"ls -l /\",", "os.path.isfile(os.path.join(usr_path, 'du')) assert os.path.isfile(os.path.join(usr_path, 'grep')) assert os.path.isfile(os.path.join(usr_path, 'ls')) assert os.path.isfile(os.path.join(usr_path,", "def test_merge_pack_files(): tmpdir = tempfile.mkdtemp() cmd = [\"du -sh /usr\",", "[\"du -sh /usr\", \"rm --help\"] packpath = _create_packfile(cmd, tmpdir) new_name", "debian:stretch container.\"\"\" container = client.containers.run('debian:stretch', detach=True, tty=True, security_opt=['seccomp:unconfined']) try: minimizer", "os.path.join(tmpdir, 'merged.rpz') merge_pack_files(outfile=outfile, packfiles=packfiles) with tarfile.open(outfile) as tar: tar.extractall(path=tmpdir) datafile", "import merge_pack_files def _create_packfile(commands, dir): \"\"\"Create packfile from list `commands`", "packfile from list `commands` in debian:stretch container.\"\"\" container = client.containers.run('debian:stretch',", "try: minimizer = ReproZipMinimizer(container.id, commands, packfile_save_dir=dir) packfile_path = minimizer.run() except:", "tmpdir = tempfile.mkdtemp() cmd = [\"du -sh /usr\", \"rm --help\"]", "from list `commands` in debian:stretch container.\"\"\" container = client.containers.run('debian:stretch', detach=True,", "neurodocker.reprozip.merge import merge_pack_files def _create_packfile(commands, dir): \"\"\"Create packfile from list", "merge_pack_files(outfile=outfile, packfiles=packfiles) with tarfile.open(outfile) as tar: tar.extractall(path=tmpdir) datafile = os.path.join(tmpdir,", "division, print_function from glob import glob import os import tarfile" ]
[ "LinkStringIO(io.StringIO): def __init__(self): super().__init__() self._write_text = False # switch between", "Default is a dash. For serialisation, an encoding is included", "('<!DOCTYPE html>\\n' '<html>\\n' '<title>{}</title>\\n' '<meta charset=\"{}\">'.format(self._title, self._encoding)) print(html5) return new_target", "is a dash. For serialisation, an encoding is included and", "return count class write_link(contextlib.redirect_stdout): \"\"\"Combine any two subsequent non-empty writes", "yield print('</{}>'.format(name)) class LinkStringIO(io.StringIO): def __init__(self): super().__init__() self._write_text = False", "is included and defaults to UTF-8. Make sure the output", "A HTML document title can be specified, but should not", "self._write_text: count = super().write('<a href=\"') count += super().write(s) count +=", "\"\"\" HTML5 contexts. :author: <NAME> :license: MIT \"\"\" import contextlib", "super().__init__() self._write_text = False # switch between link href=\"...\" and", "'<title>{}</title>\\n' '<meta charset=\"{}\">'.format(self._title, self._encoding)) print(html5) return new_target @contextlib.contextmanager def tag(name):", "non-empty writes into an HTML link.\"\"\" def __init__(self): super().__init__(LinkStringIO()) def", "str(title) self._encoding = encoding def __enter__(self): new_target = contextlib.redirect_stdout.__enter__(self) html5", "print('</{}>'.format(name)) class LinkStringIO(io.StringIO): def __init__(self): super().__init__() self._write_text = False #", "__init__(self, new_target, *, title='-', encoding='utf-8'): super().__init__(new_target) self._title = str(title) self._encoding", "super().__init__(LinkStringIO()) def __exit__(self, exctype, excinst, exctb): super().__exit__(exctype, excinst, exctb) with", "def __init__(self): super().__init__(LinkStringIO()) def __exit__(self, exctype, excinst, exctb): super().__exit__(exctype, excinst,", "the name.\"\"\" print('<{}>'.format(name)) yield print('</{}>'.format(name)) class LinkStringIO(io.StringIO): def __init__(self): super().__init__()", "an HTML tag denoted by the name.\"\"\" print('<{}>'.format(name)) yield print('</{}>'.format(name))", "new_target. A HTML document title can be specified, but should", "return # else: if s.isspace(): return super().write(s) # else: if", "False # switch between link href=\"...\" and text def write(self,", "subsequent non-empty writes into an HTML link.\"\"\" def __init__(self): super().__init__(LinkStringIO())", "defaults to UTF-8. Make sure the output (likely ``new_target``) uses", "and text def write(self, s): if not s: return #", "# else: if s.isspace(): return super().write(s) # else: if self._write_text:", "tag denoted by the name.\"\"\" print('<{}>'.format(name)) yield print('</{}>'.format(name)) class LinkStringIO(io.StringIO):", "\"\"\" import contextlib import io import sys __all__ = ['create_document',", "class create_document(contextlib.redirect_stdout): \"\"\"Redirect output to an HTML5 document specified by", "specified, but should not consist of whitespace only. Default is", "\"\"\"Enclose output in an HTML tag denoted by the name.\"\"\"", "count += super().write(s) count += super().write('\">') else: count = super().write(s)", "UTF-8. Make sure the output (likely ``new_target``) uses the correct", "import sys __all__ = ['create_document', 'tag', 'as_link'] class create_document(contextlib.redirect_stdout): \"\"\"Redirect", "= ('<!DOCTYPE html>\\n' '<html>\\n' '<title>{}</title>\\n' '<meta charset=\"{}\">'.format(self._title, self._encoding)) print(html5) return", "new_target = contextlib.redirect_stdout.__enter__(self) html5 = ('<!DOCTYPE html>\\n' '<html>\\n' '<title>{}</title>\\n' '<meta", "``new_target``) uses the correct one. Arguments are not checked for", "Make sure the output (likely ``new_target``) uses the correct one.", "s.isspace(): return super().write(s) # else: if self._write_text: count = super().write('<a", "two subsequent non-empty writes into an HTML link.\"\"\" def __init__(self):", "by the name.\"\"\" print('<{}>'.format(name)) yield print('</{}>'.format(name)) class LinkStringIO(io.StringIO): def __init__(self):", "super().write('<a href=\"') count += super().write(s) count += super().write('\">') else: count", "href=\"...\" and text def write(self, s): if not s: return", "and defaults to UTF-8. Make sure the output (likely ``new_target``)", "title can be specified, but should not consist of whitespace", "import io import sys __all__ = ['create_document', 'tag', 'as_link'] class", "switch between link href=\"...\" and text def write(self, s): if", "any two subsequent non-empty writes into an HTML link.\"\"\" def", "html>\\n' '<html>\\n' '<title>{}</title>\\n' '<meta charset=\"{}\">'.format(self._title, self._encoding)) print(html5) return new_target @contextlib.contextmanager", "import contextlib import io import sys __all__ = ['create_document', 'tag',", "= encoding def __enter__(self): new_target = contextlib.redirect_stdout.__enter__(self) html5 = ('<!DOCTYPE", "= super().write('<a href=\"') count += super().write(s) count += super().write('\">') else:", "For serialisation, an encoding is included and defaults to UTF-8.", "href=\"') count += super().write(s) count += super().write('\">') else: count =", "specified by new_target. A HTML document title can be specified,", "+= super().write(s) count += super().write('\">') else: count = super().write(s) count", "<NAME> :license: MIT \"\"\" import contextlib import io import sys", "@contextlib.contextmanager def tag(name): \"\"\"Enclose output in an HTML tag denoted", "contextlib import io import sys __all__ = ['create_document', 'tag', 'as_link']", "__enter__(self): new_target = contextlib.redirect_stdout.__enter__(self) html5 = ('<!DOCTYPE html>\\n' '<html>\\n' '<title>{}</title>\\n'", "an HTML5 document specified by new_target. A HTML document title", "'as_link'] class create_document(contextlib.redirect_stdout): \"\"\"Redirect output to an HTML5 document specified", "print('<{}>'.format(name)) yield print('</{}>'.format(name)) class LinkStringIO(io.StringIO): def __init__(self): super().__init__() self._write_text =", "super().write(s) count += super().write('\">') else: count = super().write(s) count +=", "count = super().write(s) count += super().write('</a>') self._write_text = not self._write_text", "but should not consist of whitespace only. Default is a", "output (likely ``new_target``) uses the correct one. Arguments are not", "are not checked for validity. \"\"\" def __init__(self, new_target, *,", "title='-', encoding='utf-8'): super().__init__(new_target) self._title = str(title) self._encoding = encoding def", "return super().write(s) # else: if self._write_text: count = super().write('<a href=\"')", "document title can be specified, but should not consist of", "\"\"\"Redirect output to an HTML5 document specified by new_target. A", "def __exit__(self, exctype, excinst, exctb): super().__exit__(exctype, excinst, exctb) with contextlib.closing(self._new_target):", "super().__init__(new_target) self._title = str(title) self._encoding = encoding def __enter__(self): new_target", "HTML link.\"\"\" def __init__(self): super().__init__(LinkStringIO()) def __exit__(self, exctype, excinst, exctb):", "super().write('</a>') self._write_text = not self._write_text return count class write_link(contextlib.redirect_stdout): \"\"\"Combine", "count = super().write('<a href=\"') count += super().write(s) count += super().write('\">')", "super().write(s) # else: if self._write_text: count = super().write('<a href=\"') count", "# else: if self._write_text: count = super().write('<a href=\"') count +=", "__init__(self): super().__init__(LinkStringIO()) def __exit__(self, exctype, excinst, exctb): super().__exit__(exctype, excinst, exctb)", "s: return # else: if s.isspace(): return super().write(s) # else:", "'tag', 'as_link'] class create_document(contextlib.redirect_stdout): \"\"\"Redirect output to an HTML5 document", "encoding='utf-8'): super().__init__(new_target) self._title = str(title) self._encoding = encoding def __enter__(self):", "self._write_text = not self._write_text return count class write_link(contextlib.redirect_stdout): \"\"\"Combine any", "exctype, excinst, exctb): super().__exit__(exctype, excinst, exctb) with contextlib.closing(self._new_target): self._new_target.seek(0) sys.stdout.write(self._new_target.read())", "can be specified, but should not consist of whitespace only.", "charset=\"{}\">'.format(self._title, self._encoding)) print(html5) return new_target @contextlib.contextmanager def tag(name): \"\"\"Enclose output", "into an HTML link.\"\"\" def __init__(self): super().__init__(LinkStringIO()) def __exit__(self, exctype,", "= ['create_document', 'tag', 'as_link'] class create_document(contextlib.redirect_stdout): \"\"\"Redirect output to an", "uses the correct one. Arguments are not checked for validity.", "'<html>\\n' '<title>{}</title>\\n' '<meta charset=\"{}\">'.format(self._title, self._encoding)) print(html5) return new_target @contextlib.contextmanager def", ":author: <NAME> :license: MIT \"\"\" import contextlib import io import", "def write(self, s): if not s: return # else: if", "= str(title) self._encoding = encoding def __enter__(self): new_target = contextlib.redirect_stdout.__enter__(self)", "by new_target. A HTML document title can be specified, but", "self._write_text return count class write_link(contextlib.redirect_stdout): \"\"\"Combine any two subsequent non-empty", "sure the output (likely ``new_target``) uses the correct one. Arguments", "the correct one. Arguments are not checked for validity. \"\"\"", "output in an HTML tag denoted by the name.\"\"\" print('<{}>'.format(name))", "an encoding is included and defaults to UTF-8. Make sure", "tag(name): \"\"\"Enclose output in an HTML tag denoted by the", "else: count = super().write(s) count += super().write('</a>') self._write_text = not", "name.\"\"\" print('<{}>'.format(name)) yield print('</{}>'.format(name)) class LinkStringIO(io.StringIO): def __init__(self): super().__init__() self._write_text", "not s: return # else: if s.isspace(): return super().write(s) #", "+= super().write('\">') else: count = super().write(s) count += super().write('</a>') self._write_text", "output to an HTML5 document specified by new_target. A HTML", "between link href=\"...\" and text def write(self, s): if not", "not consist of whitespace only. Default is a dash. For", "Arguments are not checked for validity. \"\"\" def __init__(self, new_target,", "count += super().write('\">') else: count = super().write(s) count += super().write('</a>')", "denoted by the name.\"\"\" print('<{}>'.format(name)) yield print('</{}>'.format(name)) class LinkStringIO(io.StringIO): def", "correct one. Arguments are not checked for validity. \"\"\" def", "document specified by new_target. A HTML document title can be", "checked for validity. \"\"\" def __init__(self, new_target, *, title='-', encoding='utf-8'):", "included and defaults to UTF-8. Make sure the output (likely", "consist of whitespace only. Default is a dash. For serialisation,", "to an HTML5 document specified by new_target. A HTML document", "html5 = ('<!DOCTYPE html>\\n' '<html>\\n' '<title>{}</title>\\n' '<meta charset=\"{}\">'.format(self._title, self._encoding)) print(html5)", "create_document(contextlib.redirect_stdout): \"\"\"Redirect output to an HTML5 document specified by new_target.", "self._encoding)) print(html5) return new_target @contextlib.contextmanager def tag(name): \"\"\"Enclose output in", "contexts. :author: <NAME> :license: MIT \"\"\" import contextlib import io", "+= super().write('</a>') self._write_text = not self._write_text return count class write_link(contextlib.redirect_stdout):", "writes into an HTML link.\"\"\" def __init__(self): super().__init__(LinkStringIO()) def __exit__(self,", "HTML tag denoted by the name.\"\"\" print('<{}>'.format(name)) yield print('</{}>'.format(name)) class", "text def write(self, s): if not s: return # else:", "only. Default is a dash. For serialisation, an encoding is", "super().write('\">') else: count = super().write(s) count += super().write('</a>') self._write_text =", "return new_target @contextlib.contextmanager def tag(name): \"\"\"Enclose output in an HTML", "new_target @contextlib.contextmanager def tag(name): \"\"\"Enclose output in an HTML tag", "if self._write_text: count = super().write('<a href=\"') count += super().write(s) count", "class write_link(contextlib.redirect_stdout): \"\"\"Combine any two subsequent non-empty writes into an", "dash. For serialisation, an encoding is included and defaults to", "a dash. For serialisation, an encoding is included and defaults", "= False # switch between link href=\"...\" and text def", "io import sys __all__ = ['create_document', 'tag', 'as_link'] class create_document(contextlib.redirect_stdout):", "= not self._write_text return count class write_link(contextlib.redirect_stdout): \"\"\"Combine any two", "HTML5 contexts. :author: <NAME> :license: MIT \"\"\" import contextlib import", "sys __all__ = ['create_document', 'tag', 'as_link'] class create_document(contextlib.redirect_stdout): \"\"\"Redirect output", "__all__ = ['create_document', 'tag', 'as_link'] class create_document(contextlib.redirect_stdout): \"\"\"Redirect output to", "def __init__(self): super().__init__() self._write_text = False # switch between link", "def __enter__(self): new_target = contextlib.redirect_stdout.__enter__(self) html5 = ('<!DOCTYPE html>\\n' '<html>\\n'", "be specified, but should not consist of whitespace only. Default", "encoding is included and defaults to UTF-8. Make sure the", "should not consist of whitespace only. Default is a dash.", ":license: MIT \"\"\" import contextlib import io import sys __all__", "link href=\"...\" and text def write(self, s): if not s:", "not checked for validity. \"\"\" def __init__(self, new_target, *, title='-',", "# switch between link href=\"...\" and text def write(self, s):", "else: if self._write_text: count = super().write('<a href=\"') count += super().write(s)", "count class write_link(contextlib.redirect_stdout): \"\"\"Combine any two subsequent non-empty writes into", "*, title='-', encoding='utf-8'): super().__init__(new_target) self._title = str(title) self._encoding = encoding", "super().write(s) count += super().write('</a>') self._write_text = not self._write_text return count", "['create_document', 'tag', 'as_link'] class create_document(contextlib.redirect_stdout): \"\"\"Redirect output to an HTML5", "contextlib.redirect_stdout.__enter__(self) html5 = ('<!DOCTYPE html>\\n' '<html>\\n' '<title>{}</title>\\n' '<meta charset=\"{}\">'.format(self._title, self._encoding))", "else: if s.isspace(): return super().write(s) # else: if self._write_text: count", "def __init__(self, new_target, *, title='-', encoding='utf-8'): super().__init__(new_target) self._title = str(title)", "the output (likely ``new_target``) uses the correct one. Arguments are", "to UTF-8. Make sure the output (likely ``new_target``) uses the", "print(html5) return new_target @contextlib.contextmanager def tag(name): \"\"\"Enclose output in an", "not self._write_text return count class write_link(contextlib.redirect_stdout): \"\"\"Combine any two subsequent", "an HTML link.\"\"\" def __init__(self): super().__init__(LinkStringIO()) def __exit__(self, exctype, excinst,", "validity. \"\"\" def __init__(self, new_target, *, title='-', encoding='utf-8'): super().__init__(new_target) self._title", "class LinkStringIO(io.StringIO): def __init__(self): super().__init__() self._write_text = False # switch", "(likely ``new_target``) uses the correct one. Arguments are not checked", "HTML document title can be specified, but should not consist", "whitespace only. Default is a dash. For serialisation, an encoding", "link.\"\"\" def __init__(self): super().__init__(LinkStringIO()) def __exit__(self, exctype, excinst, exctb): super().__exit__(exctype,", "one. Arguments are not checked for validity. \"\"\" def __init__(self,", "'<meta charset=\"{}\">'.format(self._title, self._encoding)) print(html5) return new_target @contextlib.contextmanager def tag(name): \"\"\"Enclose", "count += super().write('</a>') self._write_text = not self._write_text return count class", "serialisation, an encoding is included and defaults to UTF-8. Make", "of whitespace only. Default is a dash. For serialisation, an", "= super().write(s) count += super().write('</a>') self._write_text = not self._write_text return", "HTML5 document specified by new_target. A HTML document title can", "\"\"\" def __init__(self, new_target, *, title='-', encoding='utf-8'): super().__init__(new_target) self._title =", "\"\"\"Combine any two subsequent non-empty writes into an HTML link.\"\"\"", "s): if not s: return # else: if s.isspace(): return", "write_link(contextlib.redirect_stdout): \"\"\"Combine any two subsequent non-empty writes into an HTML", "for validity. \"\"\" def __init__(self, new_target, *, title='-', encoding='utf-8'): super().__init__(new_target)", "MIT \"\"\" import contextlib import io import sys __all__ =", "if s.isspace(): return super().write(s) # else: if self._write_text: count =", "self._title = str(title) self._encoding = encoding def __enter__(self): new_target =", "= contextlib.redirect_stdout.__enter__(self) html5 = ('<!DOCTYPE html>\\n' '<html>\\n' '<title>{}</title>\\n' '<meta charset=\"{}\">'.format(self._title,", "self._encoding = encoding def __enter__(self): new_target = contextlib.redirect_stdout.__enter__(self) html5 =", "__init__(self): super().__init__() self._write_text = False # switch between link href=\"...\"", "encoding def __enter__(self): new_target = contextlib.redirect_stdout.__enter__(self) html5 = ('<!DOCTYPE html>\\n'", "if not s: return # else: if s.isspace(): return super().write(s)", "write(self, s): if not s: return # else: if s.isspace():", "def tag(name): \"\"\"Enclose output in an HTML tag denoted by", "self._write_text = False # switch between link href=\"...\" and text", "new_target, *, title='-', encoding='utf-8'): super().__init__(new_target) self._title = str(title) self._encoding =", "in an HTML tag denoted by the name.\"\"\" print('<{}>'.format(name)) yield", "__exit__(self, exctype, excinst, exctb): super().__exit__(exctype, excinst, exctb) with contextlib.closing(self._new_target): self._new_target.seek(0)" ]
[ "only needed when doing a *loop* of sess.run() calls, and", "\"\"\" def __init__(self, label, f, d): self.func = f self.deriv", "following lead from matlab example by professor: # http://comet.lehman.cuny.edu/schneider/Fall17/CMP464/Maple/PartialDerivatives1.pdf import", "instructions, following lead from matlab example by professor: # http://comet.lehman.cuny.edu/schneider/Fall17/CMP464/Maple/PartialDerivatives1.pdf", "so just ignore optimization class Differentiable: \"\"\" encapsulation of a", "4x^3+2 fExFourth = Differentiable(\"fExFourth\", lambda x: tf.add_n([tf.pow(x, 4), tf.multiply(2, x),", "print(log_dir) with tf.Session() as sess: writer = tf.summary.FileWriter(log_dir, sess.graph) fOfTwo,", "and want to see # intermediary results per-loop. #writer.add_summary(results) writer.flush()", "derivative \"\"\" def __init__(self, label, f, d): self.func = f", "I \"\"\" # Per homework instructions, following lead from matlab", "d self.func.name = label self.deriv.name = \"%sDeriv\" % label #", "sess: writer = tf.summary.FileWriter(log_dir, sess.graph) fOfTwo, fDerivOfTwo = results =", "self.deriv = d self.func.name = label self.deriv.name = \"%sDeriv\" %", "doing intersting things in this lab, so just ignore optimization", "-7]), lambda x: tf.add_n([tf.multiply(4, tf.pow(x, 3)), 2])) tFofTwo = fExFourth.func(2)", "really doing intersting things in this lab, so just ignore", "g(x) = x^4+2x-7 ; per matlab example # g'(x) =", "fOfTwo, fDerivOfTwo = results = sess.run([tFofTwo, tFofDerivTwo]) sys.stderr.write(\"results:\\n\\tf(2)=%s\\n\\tf'(2)=%s\\n\" % (fOfTwo,", "of a function and its derivative \"\"\" def __init__(self, label,", "os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # not really doing intersting things in this lab,", "# Per homework instructions, following lead from matlab example by", "tempfile import os import numpy as np os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # not", "= d self.func.name = label self.deriv.name = \"%sDeriv\" % label", "ignore optimization class Differentiable: \"\"\" encapsulation of a function and", "a function and its derivative \"\"\" def __init__(self, label, f,", "np os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # not really doing intersting things in this", "sess.run([tFofTwo, tFofDerivTwo]) sys.stderr.write(\"results:\\n\\tf(2)=%s\\n\\tf'(2)=%s\\n\" % (fOfTwo, fDerivOfTwo)) # note: only needed", "doing a *loop* of sess.run() calls, and want to see", "x^4+2x-7 ; per matlab example # g'(x) = 4x^3+2 fExFourth", "optimization class Differentiable: \"\"\" encapsulation of a function and its", "import os import numpy as np os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # not really", "(fOfTwo, fDerivOfTwo)) # note: only needed when doing a *loop*", "http://comet.lehman.cuny.edu/schneider/Fall17/CMP464/Maple/PartialDerivatives1.pdf import sys import tensorflow as tf import tempfile import", "tensorflow as tf import tempfile import os import numpy as", "matlab example by professor: # http://comet.lehman.cuny.edu/schneider/Fall17/CMP464/Maple/PartialDerivatives1.pdf import sys import tensorflow", "2])) tFofTwo = fExFourth.func(2) tFofDerivTwo = fExFourth.deriv(2) log_dir = tempfile.mkdtemp(prefix=\"hw3-nov14-parti\")", "just ignore optimization class Differentiable: \"\"\" encapsulation of a function", "needed when doing a *loop* of sess.run() calls, and want", "want to see # intermediary results per-loop. #writer.add_summary(results) writer.flush() writer.close()", "function and its derivative \"\"\" def __init__(self, label, f, d):", "sess.graph) fOfTwo, fDerivOfTwo = results = sess.run([tFofTwo, tFofDerivTwo]) sys.stderr.write(\"results:\\n\\tf(2)=%s\\n\\tf'(2)=%s\\n\" %", "fDerivOfTwo)) # note: only needed when doing a *loop* of", "by professor: # http://comet.lehman.cuny.edu/schneider/Fall17/CMP464/Maple/PartialDerivatives1.pdf import sys import tensorflow as tf", "\"\"\" # Per homework instructions, following lead from matlab example", "x), -7]), lambda x: tf.add_n([tf.multiply(4, tf.pow(x, 3)), 2])) tFofTwo =", "Differentiable: \"\"\" encapsulation of a function and its derivative \"\"\"", "f self.deriv = d self.func.name = label self.deriv.name = \"%sDeriv\"", "Differentiable(\"fExFourth\", lambda x: tf.add_n([tf.pow(x, 4), tf.multiply(2, x), -7]), lambda x:", "intersting things in this lab, so just ignore optimization class", "label, f, d): self.func = f self.deriv = d self.func.name", "# note: only needed when doing a *loop* of sess.run()", "14., Part I \"\"\" # Per homework instructions, following lead", "import sys import tensorflow as tf import tempfile import os", "self.func.name = label self.deriv.name = \"%sDeriv\" % label # g(x)", "solution to homework #3, Nov 14., Part I \"\"\" #", "% (fOfTwo, fDerivOfTwo)) # note: only needed when doing a", "= Differentiable(\"fExFourth\", lambda x: tf.add_n([tf.pow(x, 4), tf.multiply(2, x), -7]), lambda", "as np os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # not really doing intersting things in", "= f self.deriv = d self.func.name = label self.deriv.name =", "tf.add_n([tf.multiply(4, tf.pow(x, 3)), 2])) tFofTwo = fExFourth.func(2) tFofDerivTwo = fExFourth.deriv(2)", "not really doing intersting things in this lab, so just", "sys.stderr.write(\"results:\\n\\tf(2)=%s\\n\\tf'(2)=%s\\n\" % (fOfTwo, fDerivOfTwo)) # note: only needed when doing", "= sess.run([tFofTwo, tFofDerivTwo]) sys.stderr.write(\"results:\\n\\tf(2)=%s\\n\\tf'(2)=%s\\n\" % (fOfTwo, fDerivOfTwo)) # note: only", "\"\"\" encapsulation of a function and its derivative \"\"\" def", "= \"%sDeriv\" % label # g(x) = x^4+2x-7 ; per", "x: tf.add_n([tf.pow(x, 4), tf.multiply(2, x), -7]), lambda x: tf.add_n([tf.multiply(4, tf.pow(x,", "= results = sess.run([tFofTwo, tFofDerivTwo]) sys.stderr.write(\"results:\\n\\tf(2)=%s\\n\\tf'(2)=%s\\n\" % (fOfTwo, fDerivOfTwo)) #", "of sess.run() calls, and want to see # intermediary results", "tf.multiply(2, x), -7]), lambda x: tf.add_n([tf.multiply(4, tf.pow(x, 3)), 2])) tFofTwo", "its derivative \"\"\" def __init__(self, label, f, d): self.func =", "% label # g(x) = x^4+2x-7 ; per matlab example", "tempfile.mkdtemp(prefix=\"hw3-nov14-parti\") print(log_dir) with tf.Session() as sess: writer = tf.summary.FileWriter(log_dir, sess.graph)", "homework instructions, following lead from matlab example by professor: #", "label # g(x) = x^4+2x-7 ; per matlab example #", "import numpy as np os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # not really doing intersting", "homework #3, Nov 14., Part I \"\"\" # Per homework", "tFofDerivTwo]) sys.stderr.write(\"results:\\n\\tf(2)=%s\\n\\tf'(2)=%s\\n\" % (fOfTwo, fDerivOfTwo)) # note: only needed when", "professor: # http://comet.lehman.cuny.edu/schneider/Fall17/CMP464/Maple/PartialDerivatives1.pdf import sys import tensorflow as tf import", "example by professor: # http://comet.lehman.cuny.edu/schneider/Fall17/CMP464/Maple/PartialDerivatives1.pdf import sys import tensorflow as", "encapsulation of a function and its derivative \"\"\" def __init__(self,", "log_dir = tempfile.mkdtemp(prefix=\"hw3-nov14-parti\") print(log_dir) with tf.Session() as sess: writer =", "4), tf.multiply(2, x), -7]), lambda x: tf.add_n([tf.multiply(4, tf.pow(x, 3)), 2]))", "import tempfile import os import numpy as np os.environ['TF_CPP_MIN_LOG_LEVEL']='2' #", "<reponame>jzacsh/neuralnets-cmp464<gh_stars>1-10 \"\"\" <NAME> solution to homework #3, Nov 14., Part", "#3, Nov 14., Part I \"\"\" # Per homework instructions,", "numpy as np os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # not really doing intersting things", "this lab, so just ignore optimization class Differentiable: \"\"\" encapsulation", "per matlab example # g'(x) = 4x^3+2 fExFourth = Differentiable(\"fExFourth\",", "tf.add_n([tf.pow(x, 4), tf.multiply(2, x), -7]), lambda x: tf.add_n([tf.multiply(4, tf.pow(x, 3)),", "lambda x: tf.add_n([tf.multiply(4, tf.pow(x, 3)), 2])) tFofTwo = fExFourth.func(2) tFofDerivTwo", "fExFourth.deriv(2) log_dir = tempfile.mkdtemp(prefix=\"hw3-nov14-parti\") print(log_dir) with tf.Session() as sess: writer", "example # g'(x) = 4x^3+2 fExFourth = Differentiable(\"fExFourth\", lambda x:", "a *loop* of sess.run() calls, and want to see #", "# g'(x) = 4x^3+2 fExFourth = Differentiable(\"fExFourth\", lambda x: tf.add_n([tf.pow(x,", "tf.Session() as sess: writer = tf.summary.FileWriter(log_dir, sess.graph) fOfTwo, fDerivOfTwo =", "# not really doing intersting things in this lab, so", "<NAME> solution to homework #3, Nov 14., Part I \"\"\"", "# g(x) = x^4+2x-7 ; per matlab example # g'(x)", "results = sess.run([tFofTwo, tFofDerivTwo]) sys.stderr.write(\"results:\\n\\tf(2)=%s\\n\\tf'(2)=%s\\n\" % (fOfTwo, fDerivOfTwo)) # note:", "and its derivative \"\"\" def __init__(self, label, f, d): self.func", "Nov 14., Part I \"\"\" # Per homework instructions, following", "lab, so just ignore optimization class Differentiable: \"\"\" encapsulation of", "= fExFourth.deriv(2) log_dir = tempfile.mkdtemp(prefix=\"hw3-nov14-parti\") print(log_dir) with tf.Session() as sess:", "to homework #3, Nov 14., Part I \"\"\" # Per", "lead from matlab example by professor: # http://comet.lehman.cuny.edu/schneider/Fall17/CMP464/Maple/PartialDerivatives1.pdf import sys", "class Differentiable: \"\"\" encapsulation of a function and its derivative", "# http://comet.lehman.cuny.edu/schneider/Fall17/CMP464/Maple/PartialDerivatives1.pdf import sys import tensorflow as tf import tempfile", "tFofTwo = fExFourth.func(2) tFofDerivTwo = fExFourth.deriv(2) log_dir = tempfile.mkdtemp(prefix=\"hw3-nov14-parti\") print(log_dir)", "\"\"\" <NAME> solution to homework #3, Nov 14., Part I", "with tf.Session() as sess: writer = tf.summary.FileWriter(log_dir, sess.graph) fOfTwo, fDerivOfTwo", "lambda x: tf.add_n([tf.pow(x, 4), tf.multiply(2, x), -7]), lambda x: tf.add_n([tf.multiply(4,", "; per matlab example # g'(x) = 4x^3+2 fExFourth =", "__init__(self, label, f, d): self.func = f self.deriv = d", "= label self.deriv.name = \"%sDeriv\" % label # g(x) =", "fDerivOfTwo = results = sess.run([tFofTwo, tFofDerivTwo]) sys.stderr.write(\"results:\\n\\tf(2)=%s\\n\\tf'(2)=%s\\n\" % (fOfTwo, fDerivOfTwo))", "def __init__(self, label, f, d): self.func = f self.deriv =", "tf.pow(x, 3)), 2])) tFofTwo = fExFourth.func(2) tFofDerivTwo = fExFourth.deriv(2) log_dir", "*loop* of sess.run() calls, and want to see # intermediary", "tFofDerivTwo = fExFourth.deriv(2) log_dir = tempfile.mkdtemp(prefix=\"hw3-nov14-parti\") print(log_dir) with tf.Session() as", "tf import tempfile import os import numpy as np os.environ['TF_CPP_MIN_LOG_LEVEL']='2'", "3)), 2])) tFofTwo = fExFourth.func(2) tFofDerivTwo = fExFourth.deriv(2) log_dir =", "g'(x) = 4x^3+2 fExFourth = Differentiable(\"fExFourth\", lambda x: tf.add_n([tf.pow(x, 4),", "sys import tensorflow as tf import tempfile import os import", "self.func = f self.deriv = d self.func.name = label self.deriv.name", "x: tf.add_n([tf.multiply(4, tf.pow(x, 3)), 2])) tFofTwo = fExFourth.func(2) tFofDerivTwo =", "Part I \"\"\" # Per homework instructions, following lead from", "d): self.func = f self.deriv = d self.func.name = label", "fExFourth = Differentiable(\"fExFourth\", lambda x: tf.add_n([tf.pow(x, 4), tf.multiply(2, x), -7]),", "= tf.summary.FileWriter(log_dir, sess.graph) fOfTwo, fDerivOfTwo = results = sess.run([tFofTwo, tFofDerivTwo])", "in this lab, so just ignore optimization class Differentiable: \"\"\"", "Per homework instructions, following lead from matlab example by professor:", "writer = tf.summary.FileWriter(log_dir, sess.graph) fOfTwo, fDerivOfTwo = results = sess.run([tFofTwo,", "= 4x^3+2 fExFourth = Differentiable(\"fExFourth\", lambda x: tf.add_n([tf.pow(x, 4), tf.multiply(2,", "tf.summary.FileWriter(log_dir, sess.graph) fOfTwo, fDerivOfTwo = results = sess.run([tFofTwo, tFofDerivTwo]) sys.stderr.write(\"results:\\n\\tf(2)=%s\\n\\tf'(2)=%s\\n\"", "calls, and want to see # intermediary results per-loop. #writer.add_summary(results)", "= fExFourth.func(2) tFofDerivTwo = fExFourth.deriv(2) log_dir = tempfile.mkdtemp(prefix=\"hw3-nov14-parti\") print(log_dir) with", "= tempfile.mkdtemp(prefix=\"hw3-nov14-parti\") print(log_dir) with tf.Session() as sess: writer = tf.summary.FileWriter(log_dir,", "label self.deriv.name = \"%sDeriv\" % label # g(x) = x^4+2x-7", "things in this lab, so just ignore optimization class Differentiable:", "as sess: writer = tf.summary.FileWriter(log_dir, sess.graph) fOfTwo, fDerivOfTwo = results", "f, d): self.func = f self.deriv = d self.func.name =", "self.deriv.name = \"%sDeriv\" % label # g(x) = x^4+2x-7 ;", "\"%sDeriv\" % label # g(x) = x^4+2x-7 ; per matlab", "os import numpy as np os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # not really doing", "note: only needed when doing a *loop* of sess.run() calls,", "as tf import tempfile import os import numpy as np", "= x^4+2x-7 ; per matlab example # g'(x) = 4x^3+2", "fExFourth.func(2) tFofDerivTwo = fExFourth.deriv(2) log_dir = tempfile.mkdtemp(prefix=\"hw3-nov14-parti\") print(log_dir) with tf.Session()", "sess.run() calls, and want to see # intermediary results per-loop.", "import tensorflow as tf import tempfile import os import numpy", "matlab example # g'(x) = 4x^3+2 fExFourth = Differentiable(\"fExFourth\", lambda", "from matlab example by professor: # http://comet.lehman.cuny.edu/schneider/Fall17/CMP464/Maple/PartialDerivatives1.pdf import sys import", "when doing a *loop* of sess.run() calls, and want to" ]
[ "classification_report(actual, att_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] grad_pred = classification_report(actual, grad_set[:,_k_].cpu().data.numpy(),", "round(len(data) / 10) + 1 model.embedding.weight.requires_grad_(True) actual = [] results", "1 model.embedding.weight.requires_grad_(True) actual = [] results = {} results[\"random\"] =", "_ = next(iter(data)) maximum = max(lengths) if max(lengths) <= 10", "att_set = torch.zeros([data_size, maximum]).long().to(device) rand_set = torch.zeros([data_size, maximum]).long().to(device) att_grad_set =", "weights.size(1), largest = largest)[1] top_rand = torch.randn(top_att.shape) top_rand = torch.topk(top_rand,", "as prfs device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') def", "weights.size(1), largest = largest)[1] top_att_grad = torch.topk(weights_def_grad_soft, k = weights.size(1),", "att_set[doc_id, 0] = yhat.max(-1)[1] grad_set[doc_id, 0] = yhat.max(-1)[1] att_grad_set[doc_id, 0]", "largest: ordering = \"descending\" plt.savefig(save_path + \"_correct_classified_\" + ordering +", "_k_ in range(0,maximum): actual = actual_set.flatten().cpu().data.numpy() rand_pred = classification_report(actual, rand_set[:,_k_].cpu().data.numpy(),", "as plt import pandas as pd from sklearn.metrics import *", "weights_or = model(sentences, lengths, retain_gradient = True) masking = yhat.max(-1)[1]", "yhat_att_grad, _ = model(sentence_att_grad,lengths) att_grad_set[doc_id, _j_] = yhat_att_grad.max(-1)[1] yhat_att_x_grad, _", "k = weights.size(1), largest = largest)[1] temp_pred = [] temp_act", "weights.size(1), largest = largest)[1] top_att_mul_grad = torch.topk(weight_mul_grad, k = weights.size(1),", "print(\"\\n--- Degrading Model Performance \\n\") modulo = round(len(data) / 10)", "torch import torch.nn as nn import numpy as np import", "== 0 : print(\"Remaining: \", len(data)- batchi) docs = torch.LongTensor(docs)", "= yhat_att_x_grad.max(-1)[1] if batchi % modulo == 0 : print(\"Remaining:", "= 0 sentence_att_grad[rows, top_att_grad[:,_j_]] = 0 sentence_att_mul_grad[rows, top_att_mul_grad[:,_j_]] = 0", "temp_pred = [] temp_act = [] temp_act.append(labels.cpu().data.numpy()) temp_pred.append(yhat.max(-1)[1].cpu().data.numpy()) model.eval() actual_set[doc_id]", "ordering = \"descending\" plt.savefig(save_path + \"_correct_classified_\" + ordering + \".png\")", "yhat.max(-1)[1] grad_set[doc_id, 0] = yhat.max(-1)[1] att_grad_set[doc_id, 0] = yhat.max(-1)[1] att_x_grad_set[doc_id,", "torch.topk(g1, k = g1.size(1), largest = largest)[1] top_att = torch.topk(weights,", "= [] results = {} results[\"random\"] = [] results[\"attention\"]= []", "range(1,maximum): sentence_grad[rows, top_grad[:,_j_]] = 0 sentence_att[rows, top_att[:,_j_]] = 0 sentence_att_grad[rows,", "0 sentence_rand[rows, top_rand[:,_j_]] = 0 yhat_rand, _ = model(sentence_rand,lengths) rand_set[doc_id,", "largest == False: masking = yhat.max(-1)[1] != labels yhat.max(-1)[0].sum().backward(retain_graph =", "model.masks[masking] with torch.no_grad(): weights = weights_or.clone() weight_mul_grad = weights_or *", "rows = torch.arange(sentences.size(0)) for _j_ in range(1,maximum): sentence_grad[rows, top_grad[:,_j_]] =", "max(lengths) <= 10 : maximum = max(lengths) - 1 elif", "0 sentence_att_grad[rows, top_att_grad[:,_j_]] = 0 sentence_att_mul_grad[rows, top_att_mul_grad[:,_j_]] = 0 sentence_rand[rows,", "g1[model_masks[:,:max_lengths]] = float(\"-inf\") top_grad = torch.topk(g1, k = g1.size(1), largest", "10) + 1 model.embedding.weight.requires_grad_(True) actual = [] results = {}", "results.plot(kind = \"line\", figsize = (18,10)) ordering = \"ascending\" if", "sklearn.metrics import * from sklearn.metrics import precision_recall_fscore_support as prfs device", "temp_act.append(labels.cpu().data.numpy()) temp_pred.append(yhat.max(-1)[1].cpu().data.numpy()) model.eval() actual_set[doc_id] = labels.unsqueeze(-1) rand_set[doc_id, 0] = yhat.max(-1)[1]", "labels[masking] lengths = lengths[masking] weights_or = weights_or[masking] docs.extend(doc_id) g =", "True)[\"macro avg\"][\"f1-score\"] att_pred = classification_report(actual, att_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"]", "10 print(maximum) grad_set = torch.zeros([data_size, maximum]).long().to(device) att_set = torch.zeros([data_size, maximum]).long().to(device)", "g1[model_masks[:,:max_lengths]] = float(\"-inf\") sentence_att = sentences.clone()[:,:max_lengths] sentence_grad = sentences.clone()[:,:max_lengths] sentence_rand", "model.zero_grad() sentences, lengths, labels = sentences.to(device), lengths.to(device), labels.to(device) yhat, weights_or", "= classification_report(actual, att_x_grad_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] results[\"random\"].append(rand_pred) results[\"attention\"].append(att_pred) results[\"gradient\"].append(grad_pred)", "== labels if largest == False: masking = yhat.max(-1)[1] !=", "actual = [] results = {} results[\"random\"] = [] results[\"attention\"]=", "= model(sentence_att,lengths) att_set[doc_id, _j_] = yhat_att.max(-1)[1] yhat_grad, _ = model(sentence_grad,lengths)", "= weights_def_grad.clone() weights_def_grad_soft[model_masks[:,:max_lengths]] = float(\"-inf\") em = model.embed[masking] g1 =", "modulo == 0 : print(\"Remaining: \", len(data)- batchi) docs =", "att_grad_set = att_grad_set[docs] att_x_grad_set = att_x_grad_set[docs] actual_set = actual_set[docs] for", "* weights_def_grad weight_mul_grad[model_masks[:,:max_lengths]] = float(\"-inf\") weights_def_grad_soft = weights_def_grad.clone() weights_def_grad_soft[model_masks[:,:max_lengths]] =", "maximum = 10 print(maximum) grad_set = torch.zeros([data_size, maximum]).long().to(device) att_set =", "True)[\"macro avg\"][\"f1-score\"] att_grad_pred = classification_report(actual, att_grad_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"]", "yhat_grad.max(-1)[1] yhat_att_grad, _ = model(sentence_att_grad,lengths) att_grad_set[doc_id, _j_] = yhat_att_grad.max(-1)[1] yhat_att_x_grad,", "0] = yhat.max(-1)[1] rows = torch.arange(sentences.size(0)) for _j_ in range(1,maximum):", "rand_set[doc_id, _j_] = yhat_rand.max(-1)[1] yhat_att, _ = model(sentence_att,lengths) att_set[doc_id, _j_]", "figsize = (18,10)) ordering = \"ascending\" if largest: ordering =", "len(data)- batchi) docs = torch.LongTensor(docs) rand_set = rand_set[docs] att_set =", "= 0 sentence_att[rows, top_att[:,_j_]] = 0 sentence_att_grad[rows, top_att_grad[:,_j_]] = 0", "Degrading Model Performance \\n\") modulo = round(len(data) / 10) +", "maximum]).long().to(device) att_grad_set = torch.zeros([data_size, maximum]).long().to(device) att_x_grad_set = torch.zeros([data_size, maximum]).long().to(device) actual_set", "= float(\"-inf\") em = model.embed[masking] g1 = (g* em).sum(-1)[:,:max_lengths] g1[model_masks[:,:max_lengths]]", "matplotlib.use(\"Agg\") import matplotlib.pyplot as plt import pandas as pd from", "torch.cuda.empty_cache() model.zero_grad() sentences, lengths, labels = sentences.to(device), lengths.to(device), labels.to(device) yhat,", "yhat = yhat[masking] sentences = sentences[masking] labels = labels[masking] lengths", "lengths[masking] weights_or = weights_or[masking] docs.extend(doc_id) g = model.embed.grad[masking] weights_def_grad =", "retain_gradient = True) masking = yhat.max(-1)[1] == labels if largest", "0] = yhat.max(-1)[1] att_grad_set[doc_id, 0] = yhat.max(-1)[1] att_x_grad_set[doc_id, 0] =", "torch.topk(top_rand, k = weights.size(1), largest = largest)[1] top_att_grad = torch.topk(weights_def_grad_soft,", "= torch.zeros([data_size, maximum]).long().to(device) att_x_grad_set = torch.zeros([data_size, maximum]).long().to(device) actual_set = torch.zeros([data_size,", "labels yhat.max(-1)[0].sum().backward(retain_graph = True) maxi = max(lengths) doc_id = doc_id[masking]", "= rand_set[docs] att_set = att_set[docs] grad_set = grad_set[docs] att_grad_set =", "= torch.zeros([data_size, 1]).long().to(device) docs = [] for batchi, (doc_id, sentences,", "= largest)[1] top_rand = torch.randn(top_att.shape) top_rand = torch.topk(top_rand, k =", "= yhat_grad.max(-1)[1] yhat_att_grad, _ = model(sentence_att_grad,lengths) att_grad_set[doc_id, _j_] = yhat_att_grad.max(-1)[1]", "yhat_rand.max(-1)[1] yhat_att, _ = model(sentence_att,lengths) att_set[doc_id, _j_] = yhat_att.max(-1)[1] yhat_grad,", "actual_set[docs] for _k_ in range(0,maximum): actual = actual_set.flatten().cpu().data.numpy() rand_pred =", "results = {} results[\"random\"] = [] results[\"attention\"]= [] results[\"gradient\"] =", "{} results[\"random\"] = [] results[\"attention\"]= [] results[\"gradient\"] = [] results[\"grad_attention\"]", "= max(lengths) doc_id = doc_id[masking] yhat = yhat[masking] sentences =", "plt.savefig(save_path + \"_correct_classified_\" + ordering + \".png\") results.to_csv(save_path + \"_correct_classified_\"", "torch.nn as nn import numpy as np import matplotlib matplotlib.use(\"Agg\")", "True)[\"macro avg\"][\"f1-score\"] att_x_grad_pred = classification_report(actual, att_x_grad_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"]", "labels = sentences.to(device), lengths.to(device), labels.to(device) yhat, weights_or = model(sentences, lengths,", "= \"descending\" plt.savefig(save_path + \"_correct_classified_\" + ordering + \".png\") results.to_csv(save_path", "att_grad_set[doc_id, _j_] = yhat_att_grad.max(-1)[1] yhat_att_x_grad, _ = model(sentence_att_mul_grad,lengths) att_x_grad_set[doc_id, _j_]", "lengths, retain_gradient = True) masking = yhat.max(-1)[1] == labels if", "= [] temp_act = [] temp_act.append(labels.cpu().data.numpy()) temp_pred.append(yhat.max(-1)[1].cpu().data.numpy()) model.eval() actual_set[doc_id] =", "top_att_mul_grad[:,_j_]] = 0 sentence_rand[rows, top_rand[:,_j_]] = 0 yhat_rand, _ =", "= yhat[masking] sentences = sentences[masking] labels = labels[masking] lengths =", "g1.size(1), largest = largest)[1] top_att = torch.topk(weights, k = weights.size(1),", "lengths, labels = sentences.to(device), lengths.to(device), labels.to(device) yhat, weights_or = model(sentences,", "model.embed[masking] g1 = (g* em).sum(-1)[:,:max_lengths] g1[model_masks[:,:max_lengths]] = float(\"-inf\") sentence_att =", "= weights.size(1), largest = largest)[1] temp_pred = [] temp_act =", "model(sentence_att,lengths) att_set[doc_id, _j_] = yhat_att.max(-1)[1] yhat_grad, _ = model(sentence_grad,lengths) grad_set[doc_id,", "torch.zeros([data_size, 1]).long().to(device) docs = [] for batchi, (doc_id, sentences, lengths,", "sentence_att[rows, top_att[:,_j_]] = 0 sentence_att_grad[rows, top_att_grad[:,_j_]] = 0 sentence_att_mul_grad[rows, top_att_mul_grad[:,_j_]]", "enumerate(data): model.train() torch.cuda.empty_cache() model.zero_grad() sentences, lengths, labels = sentences.to(device), lengths.to(device),", "largest)[1] top_att = torch.topk(weights, k = weights.size(1), largest = largest)[1]", "True): print(\"\\n--- Degrading Model Performance \\n\") modulo = round(len(data) /", "maximum = max(lengths) - 1 elif max(lengths) > 10 :", "elif max(lengths) > 10 : maximum = 10 print(maximum) grad_set", "rand_set = rand_set[docs] att_set = att_set[docs] grad_set = grad_set[docs] att_grad_set", "\\n\") modulo = round(len(data) / 10) + 1 model.embedding.weight.requires_grad_(True) actual", "<= 10 : maximum = max(lengths) - 1 elif max(lengths)", "actual_set[doc_id] = labels.unsqueeze(-1) rand_set[doc_id, 0] = yhat.max(-1)[1] att_set[doc_id, 0] =", "[] results[\"attention\"]= [] results[\"gradient\"] = [] results[\"grad_attention\"] = [] results[\"grad*attention\"]", "(doc_id, sentences, lengths, labels) in enumerate(data): model.train() torch.cuda.empty_cache() model.zero_grad() sentences,", "yhat.max(-1)[1] att_set[doc_id, 0] = yhat.max(-1)[1] grad_set[doc_id, 0] = yhat.max(-1)[1] att_grad_set[doc_id,", "= True)[\"macro avg\"][\"f1-score\"] att_pred = classification_report(actual, att_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro", ": maximum = 10 print(maximum) grad_set = torch.zeros([data_size, maximum]).long().to(device) att_set", "= sentences.to(device), lengths.to(device), labels.to(device) yhat, weights_or = model(sentences, lengths, retain_gradient", "batchi) docs = torch.LongTensor(docs) rand_set = rand_set[docs] att_set = att_set[docs]", "= yhat.max(-1)[1] att_set[doc_id, 0] = yhat.max(-1)[1] grad_set[doc_id, 0] = yhat.max(-1)[1]", "= model.embed[masking] g1 = (g* em).sum(-1)[:,:max_lengths] g1[model_masks[:,:max_lengths]] = float(\"-inf\") sentence_att", "grad_pred = classification_report(actual, grad_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] att_grad_pred =", "= att_set[docs] grad_set = grad_set[docs] att_grad_set = att_grad_set[docs] att_x_grad_set =", "top_att = torch.topk(weights, k = weights.size(1), largest = largest)[1] top_rand", "+ \"_correct_classified_\" + ordering + \".png\") results.to_csv(save_path + \"_correct_classified_\" +", "weights_or = weights_or[masking] docs.extend(doc_id) g = model.embed.grad[masking] weights_def_grad = model.weights.grad[masking]", "largest = largest)[1] temp_pred = [] temp_act = [] temp_act.append(labels.cpu().data.numpy())", "if largest: ordering = \"descending\" plt.savefig(save_path + \"_correct_classified_\" + ordering", "weights_or.clone() weight_mul_grad = weights_or * weights_def_grad weight_mul_grad[model_masks[:,:max_lengths]] = float(\"-inf\") weights_def_grad_soft", "docs = [] for batchi, (doc_id, sentences, lengths, labels) in", "plt import pandas as pd from sklearn.metrics import * from", "yhat_att_grad.max(-1)[1] yhat_att_x_grad, _ = model(sentence_att_mul_grad,lengths) att_x_grad_set[doc_id, _j_] = yhat_att_x_grad.max(-1)[1] if", "att_grad_set = torch.zeros([data_size, maximum]).long().to(device) att_x_grad_set = torch.zeros([data_size, maximum]).long().to(device) actual_set =", "max(lengths) doc_id = doc_id[masking] yhat = yhat[masking] sentences = sentences[masking]", "= sentences.clone()[:,:max_lengths] sentence_grad = sentences.clone()[:,:max_lengths] sentence_rand = sentences.clone()[:,:max_lengths] sentence_att_grad =", "largest = True): print(\"\\n--- Degrading Model Performance \\n\") modulo =", "= [] _, _, lengths, _ = next(iter(data)) maximum =", "/ 10) + 1 model.embedding.weight.requires_grad_(True) actual = [] results =", "yhat_grad, _ = model(sentence_grad,lengths) grad_set[doc_id, _j_] = yhat_grad.max(-1)[1] yhat_att_grad, _", "= float(\"-inf\") sentence_att = sentences.clone()[:,:max_lengths] sentence_grad = sentences.clone()[:,:max_lengths] sentence_rand =", "[] _, _, lengths, _ = next(iter(data)) maximum = max(lengths)", "= weights.size(1), largest = largest)[1] top_att_mul_grad = torch.topk(weight_mul_grad, k =", "yhat_att.max(-1)[1] yhat_grad, _ = model(sentence_grad,lengths) grad_set[doc_id, _j_] = yhat_grad.max(-1)[1] yhat_att_grad,", "actual_set.flatten().cpu().data.numpy() rand_pred = classification_report(actual, rand_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] att_pred", "[] results[\"gradient\"] = [] results[\"grad_attention\"] = [] results[\"grad*attention\"] = []", "pandas as pd from sklearn.metrics import * from sklearn.metrics import", "= largest)[1] top_att_grad = torch.topk(weights_def_grad_soft, k = weights.size(1), largest =", "_ = model(sentence_rand,lengths) rand_set[doc_id, _j_] = yhat_rand.max(-1)[1] yhat_att, _ =", "= weights_or[masking] docs.extend(doc_id) g = model.embed.grad[masking] weights_def_grad = model.weights.grad[masking] max_lengths", "[] results = {} results[\"random\"] = [] results[\"attention\"]= [] results[\"gradient\"]", "= model.weights.grad[masking] max_lengths = max(max(lengths), maxi) model_masks = model.masks[masking] with", "= torch.randn(top_att.shape) top_rand = torch.topk(top_rand, k = weights.size(1), largest =", "= True)[\"macro avg\"][\"f1-score\"] results[\"random\"].append(rand_pred) results[\"attention\"].append(att_pred) results[\"gradient\"].append(grad_pred) results[\"grad_attention\"].append(att_grad_pred) results[\"grad*attention\"].append(att_x_grad_pred) results =", "sentences.to(device), lengths.to(device), labels.to(device) yhat, weights_or = model(sentences, lengths, retain_gradient =", "sentence_grad = sentences.clone()[:,:max_lengths] sentence_rand = sentences.clone()[:,:max_lengths] sentence_att_grad = sentences.clone()[:,:max_lengths] sentence_att_mul_grad", "att_pred = classification_report(actual, att_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] grad_pred =", "0 sentence_att[rows, top_att[:,_j_]] = 0 sentence_att_grad[rows, top_att_grad[:,_j_]] = 0 sentence_att_mul_grad[rows,", "sentence_att_mul_grad = sentences.clone()[:,:max_lengths] g1[model_masks[:,:max_lengths]] = float(\"-inf\") top_grad = torch.topk(g1, k", "att_x_grad_set = att_x_grad_set[docs] actual_set = actual_set[docs] for _k_ in range(0,maximum):", "model(sentence_att_grad,lengths) att_grad_set[doc_id, _j_] = yhat_att_grad.max(-1)[1] yhat_att_x_grad, _ = model(sentence_att_mul_grad,lengths) att_x_grad_set[doc_id,", "output_dict = True)[\"macro avg\"][\"f1-score\"] grad_pred = classification_report(actual, grad_set[:,_k_].cpu().data.numpy(), output_dict =", "att_grad_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] att_x_grad_pred = classification_report(actual, att_x_grad_set[:,_k_].cpu().data.numpy(), output_dict", "results[\"grad*attention\"].append(att_x_grad_pred) results = pd.DataFrame.from_dict(results) results.plot(kind = \"line\", figsize = (18,10))", "= sentences.clone()[:,:max_lengths] sentence_att_mul_grad = sentences.clone()[:,:max_lengths] g1[model_masks[:,:max_lengths]] = float(\"-inf\") top_grad =", "= True): print(\"\\n--- Degrading Model Performance \\n\") modulo = round(len(data)", "grad_set = grad_set[docs] att_grad_set = att_grad_set[docs] att_x_grad_set = att_x_grad_set[docs] actual_set", "actual = actual_set.flatten().cpu().data.numpy() rand_pred = classification_report(actual, rand_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro", "= classification_report(actual, grad_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] att_grad_pred = classification_report(actual,", "matplotlib matplotlib.use(\"Agg\") import matplotlib.pyplot as plt import pandas as pd", "yhat.max(-1)[1] != labels yhat.max(-1)[0].sum().backward(retain_graph = True) maxi = max(lengths) doc_id", "import torch.nn as nn import numpy as np import matplotlib", "batchi % modulo == 0 : print(\"Remaining: \", len(data)- batchi)", "weights_def_grad weight_mul_grad[model_masks[:,:max_lengths]] = float(\"-inf\") weights_def_grad_soft = weights_def_grad.clone() weights_def_grad_soft[model_masks[:,:max_lengths]] = float(\"-inf\")", "= actual_set[docs] for _k_ in range(0,maximum): actual = actual_set.flatten().cpu().data.numpy() rand_pred", "top_grad[:,_j_]] = 0 sentence_att[rows, top_att[:,_j_]] = 0 sentence_att_grad[rows, top_att_grad[:,_j_]] =", "model(sentence_att_mul_grad,lengths) att_x_grad_set[doc_id, _j_] = yhat_att_x_grad.max(-1)[1] if batchi % modulo ==", "att_x_grad_set[docs] actual_set = actual_set[docs] for _k_ in range(0,maximum): actual =", "as np import matplotlib matplotlib.use(\"Agg\") import matplotlib.pyplot as plt import", "= model(sentence_att_grad,lengths) att_grad_set[doc_id, _j_] = yhat_att_grad.max(-1)[1] yhat_att_x_grad, _ = model(sentence_att_mul_grad,lengths)", "results[\"gradient\"] = [] results[\"grad_attention\"] = [] results[\"grad*attention\"] = [] _,", "masking = yhat.max(-1)[1] == labels if largest == False: masking", "sentences, lengths, labels) in enumerate(data): model.train() torch.cuda.empty_cache() model.zero_grad() sentences, lengths,", "= weights_or.clone() weight_mul_grad = weights_or * weights_def_grad weight_mul_grad[model_masks[:,:max_lengths]] = float(\"-inf\")", "= torch.LongTensor(docs) rand_set = rand_set[docs] att_set = att_set[docs] grad_set =", "= torch.topk(weight_mul_grad, k = weights.size(1), largest = largest)[1] temp_pred =", "top_rand[:,_j_]] = 0 yhat_rand, _ = model(sentence_rand,lengths) rand_set[doc_id, _j_] =", "Performance \\n\") modulo = round(len(data) / 10) + 1 model.embedding.weight.requires_grad_(True)", "from sklearn.metrics import * from sklearn.metrics import precision_recall_fscore_support as prfs", "att_set[docs] grad_set = grad_set[docs] att_grad_set = att_grad_set[docs] att_x_grad_set = att_x_grad_set[docs]", "labels) in enumerate(data): model.train() torch.cuda.empty_cache() model.zero_grad() sentences, lengths, labels =", "= torch.topk(weights_def_grad_soft, k = weights.size(1), largest = largest)[1] top_att_mul_grad =", "temp_pred.append(yhat.max(-1)[1].cpu().data.numpy()) model.eval() actual_set[doc_id] = labels.unsqueeze(-1) rand_set[doc_id, 0] = yhat.max(-1)[1] att_set[doc_id,", "yhat[masking] sentences = sentences[masking] labels = labels[masking] lengths = lengths[masking]", "= torch.zeros([data_size, maximum]).long().to(device) att_set = torch.zeros([data_size, maximum]).long().to(device) rand_set = torch.zeros([data_size,", "actual_set = actual_set[docs] for _k_ in range(0,maximum): actual = actual_set.flatten().cpu().data.numpy()", "grad_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] att_grad_pred = classification_report(actual, att_grad_set[:,_k_].cpu().data.numpy(), output_dict", "= (18,10)) ordering = \"ascending\" if largest: ordering = \"descending\"", "(18,10)) ordering = \"ascending\" if largest: ordering = \"descending\" plt.savefig(save_path", "= \"line\", figsize = (18,10)) ordering = \"ascending\" if largest:", "= grad_set[docs] att_grad_set = att_grad_set[docs] att_x_grad_set = att_x_grad_set[docs] actual_set =", "= model(sentence_att_mul_grad,lengths) att_x_grad_set[doc_id, _j_] = yhat_att_x_grad.max(-1)[1] if batchi % modulo", "!= labels yhat.max(-1)[0].sum().backward(retain_graph = True) maxi = max(lengths) doc_id =", "classification_report(actual, att_grad_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] att_x_grad_pred = classification_report(actual, att_x_grad_set[:,_k_].cpu().data.numpy(),", "grad_set = torch.zeros([data_size, maximum]).long().to(device) att_set = torch.zeros([data_size, maximum]).long().to(device) rand_set =", "sentences = sentences[masking] labels = labels[masking] lengths = lengths[masking] weights_or", "model_masks = model.masks[masking] with torch.no_grad(): weights = weights_or.clone() weight_mul_grad =", "= \"ascending\" if largest: ordering = \"descending\" plt.savefig(save_path + \"_correct_classified_\"", "1]).long().to(device) docs = [] for batchi, (doc_id, sentences, lengths, labels)", "lengths, labels) in enumerate(data): model.train() torch.cuda.empty_cache() model.zero_grad() sentences, lengths, labels", "model.weights.grad[masking] max_lengths = max(max(lengths), maxi) model_masks = model.masks[masking] with torch.no_grad():", "= [] for batchi, (doc_id, sentences, lengths, labels) in enumerate(data):", "max(lengths) > 10 : maximum = 10 print(maximum) grad_set =", "> 10 : maximum = 10 print(maximum) grad_set = torch.zeros([data_size,", "model(sentence_rand,lengths) rand_set[doc_id, _j_] = yhat_rand.max(-1)[1] yhat_att, _ = model(sentence_att,lengths) att_set[doc_id,", "torch.topk(weight_mul_grad, k = weights.size(1), largest = largest)[1] temp_pred = []", "save_path, data_size, largest = True): print(\"\\n--- Degrading Model Performance \\n\")", "nn import numpy as np import matplotlib matplotlib.use(\"Agg\") import matplotlib.pyplot", "= True) maxi = max(lengths) doc_id = doc_id[masking] yhat =", "weight_mul_grad = weights_or * weights_def_grad weight_mul_grad[model_masks[:,:max_lengths]] = float(\"-inf\") weights_def_grad_soft =", "= torch.zeros([data_size, maximum]).long().to(device) rand_set = torch.zeros([data_size, maximum]).long().to(device) att_grad_set = torch.zeros([data_size,", "largest = largest)[1] top_att_grad = torch.topk(weights_def_grad_soft, k = weights.size(1), largest", "as nn import numpy as np import matplotlib matplotlib.use(\"Agg\") import", "sentences.clone()[:,:max_lengths] g1[model_masks[:,:max_lengths]] = float(\"-inf\") top_grad = torch.topk(g1, k = g1.size(1),", "grad_set[doc_id, 0] = yhat.max(-1)[1] att_grad_set[doc_id, 0] = yhat.max(-1)[1] att_x_grad_set[doc_id, 0]", "import torch import torch.nn as nn import numpy as np", "_j_] = yhat_rand.max(-1)[1] yhat_att, _ = model(sentence_att,lengths) att_set[doc_id, _j_] =", "classification_report(actual, att_x_grad_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] results[\"random\"].append(rand_pred) results[\"attention\"].append(att_pred) results[\"gradient\"].append(grad_pred) results[\"grad_attention\"].append(att_grad_pred)", "output_dict = True)[\"macro avg\"][\"f1-score\"] att_pred = classification_report(actual, att_set[:,_k_].cpu().data.numpy(), output_dict =", "torch.zeros([data_size, maximum]).long().to(device) actual_set = torch.zeros([data_size, 1]).long().to(device) docs = [] for", "top_rand = torch.topk(top_rand, k = weights.size(1), largest = largest)[1] top_att_grad", "= 0 yhat_rand, _ = model(sentence_rand,lengths) rand_set[doc_id, _j_] = yhat_rand.max(-1)[1]", "maxi = max(lengths) doc_id = doc_id[masking] yhat = yhat[masking] sentences", "labels = labels[masking] lengths = lengths[masking] weights_or = weights_or[masking] docs.extend(doc_id)", "rand_pred = classification_report(actual, rand_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] att_pred =", "[] results[\"grad*attention\"] = [] _, _, lengths, _ = next(iter(data))", "results[\"gradient\"].append(grad_pred) results[\"grad_attention\"].append(att_grad_pred) results[\"grad*attention\"].append(att_x_grad_pred) results = pd.DataFrame.from_dict(results) results.plot(kind = \"line\", figsize", "numpy as np import matplotlib matplotlib.use(\"Agg\") import matplotlib.pyplot as plt", "= torch.topk(top_rand, k = weights.size(1), largest = largest)[1] top_att_grad =", "= labels[masking] lengths = lengths[masking] weights_or = weights_or[masking] docs.extend(doc_id) g", "for _j_ in range(1,maximum): sentence_grad[rows, top_grad[:,_j_]] = 0 sentence_att[rows, top_att[:,_j_]]", "rand_set = torch.zeros([data_size, maximum]).long().to(device) att_grad_set = torch.zeros([data_size, maximum]).long().to(device) att_x_grad_set =", "model.train() torch.cuda.empty_cache() model.zero_grad() sentences, lengths, labels = sentences.to(device), lengths.to(device), labels.to(device)", "largest)[1] temp_pred = [] temp_act = [] temp_act.append(labels.cpu().data.numpy()) temp_pred.append(yhat.max(-1)[1].cpu().data.numpy()) model.eval()", "% modulo == 0 : print(\"Remaining: \", len(data)- batchi) docs", "= largest)[1] top_att = torch.topk(weights, k = weights.size(1), largest =", "= sentences.clone()[:,:max_lengths] sentence_att_grad = sentences.clone()[:,:max_lengths] sentence_att_mul_grad = sentences.clone()[:,:max_lengths] g1[model_masks[:,:max_lengths]] =", "= float(\"-inf\") weights_def_grad_soft = weights_def_grad.clone() weights_def_grad_soft[model_masks[:,:max_lengths]] = float(\"-inf\") em =", "maximum]).long().to(device) att_set = torch.zeros([data_size, maximum]).long().to(device) rand_set = torch.zeros([data_size, maximum]).long().to(device) att_grad_set", "pd.DataFrame.from_dict(results) results.plot(kind = \"line\", figsize = (18,10)) ordering = \"ascending\"", "= [] results[\"attention\"]= [] results[\"gradient\"] = [] results[\"grad_attention\"] = []", "[] results[\"grad_attention\"] = [] results[\"grad*attention\"] = [] _, _, lengths,", "lengths, _ = next(iter(data)) maximum = max(lengths) if max(lengths) <=", "g = model.embed.grad[masking] weights_def_grad = model.weights.grad[masking] max_lengths = max(max(lengths), maxi)", "yhat.max(-1)[1] rows = torch.arange(sentences.size(0)) for _j_ in range(1,maximum): sentence_grad[rows, top_grad[:,_j_]]", "rand_set[doc_id, 0] = yhat.max(-1)[1] att_set[doc_id, 0] = yhat.max(-1)[1] grad_set[doc_id, 0]", "model.embedding.weight.requires_grad_(True) actual = [] results = {} results[\"random\"] = []", "= max(lengths) - 1 elif max(lengths) > 10 : maximum", "doc_id[masking] yhat = yhat[masking] sentences = sentences[masking] labels = labels[masking]", "= yhat_att_grad.max(-1)[1] yhat_att_x_grad, _ = model(sentence_att_mul_grad,lengths) att_x_grad_set[doc_id, _j_] = yhat_att_x_grad.max(-1)[1]", "_j_] = yhat_att_x_grad.max(-1)[1] if batchi % modulo == 0 :", "= sentences[masking] labels = labels[masking] lengths = lengths[masking] weights_or =", "top_grad = torch.topk(g1, k = g1.size(1), largest = largest)[1] top_att", "largest = largest)[1] top_rand = torch.randn(top_att.shape) top_rand = torch.topk(top_rand, k", "= weights.size(1), largest = largest)[1] top_att_grad = torch.topk(weights_def_grad_soft, k =", "temp_act = [] temp_act.append(labels.cpu().data.numpy()) temp_pred.append(yhat.max(-1)[1].cpu().data.numpy()) model.eval() actual_set[doc_id] = labels.unsqueeze(-1) rand_set[doc_id,", "avg\"][\"f1-score\"] grad_pred = classification_report(actual, grad_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] att_grad_pred", "= round(len(data) / 10) + 1 model.embedding.weight.requires_grad_(True) actual = []", "torch.arange(sentences.size(0)) for _j_ in range(1,maximum): sentence_grad[rows, top_grad[:,_j_]] = 0 sentence_att[rows,", "data_size, largest = True): print(\"\\n--- Degrading Model Performance \\n\") modulo", "in enumerate(data): model.train() torch.cuda.empty_cache() model.zero_grad() sentences, lengths, labels = sentences.to(device),", "= torch.topk(weights, k = weights.size(1), largest = largest)[1] top_rand =", "0] = yhat.max(-1)[1] att_set[doc_id, 0] = yhat.max(-1)[1] grad_set[doc_id, 0] =", "True) maxi = max(lengths) doc_id = doc_id[masking] yhat = yhat[masking]", "sentence_att_grad = sentences.clone()[:,:max_lengths] sentence_att_mul_grad = sentences.clone()[:,:max_lengths] g1[model_masks[:,:max_lengths]] = float(\"-inf\") top_grad", "torch.cuda.is_available() else 'cpu') def degrading_model_perf(data, model, save_path, data_size, largest =", "yhat_att_x_grad, _ = model(sentence_att_mul_grad,lengths) att_x_grad_set[doc_id, _j_] = yhat_att_x_grad.max(-1)[1] if batchi", "= [] results[\"grad_attention\"] = [] results[\"grad*attention\"] = [] _, _,", "= (g* em).sum(-1)[:,:max_lengths] g1[model_masks[:,:max_lengths]] = float(\"-inf\") sentence_att = sentences.clone()[:,:max_lengths] sentence_grad", "\"ascending\" if largest: ordering = \"descending\" plt.savefig(save_path + \"_correct_classified_\" +", "Model Performance \\n\") modulo = round(len(data) / 10) + 1", "sentence_att_grad[rows, top_att_grad[:,_j_]] = 0 sentence_att_mul_grad[rows, top_att_mul_grad[:,_j_]] = 0 sentence_rand[rows, top_rand[:,_j_]]", "att_x_grad_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] results[\"random\"].append(rand_pred) results[\"attention\"].append(att_pred) results[\"gradient\"].append(grad_pred) results[\"grad_attention\"].append(att_grad_pred) results[\"grad*attention\"].append(att_x_grad_pred)", "= [] temp_act.append(labels.cpu().data.numpy()) temp_pred.append(yhat.max(-1)[1].cpu().data.numpy()) model.eval() actual_set[doc_id] = labels.unsqueeze(-1) rand_set[doc_id, 0]", "+ ordering + \".png\") results.to_csv(save_path + \"_correct_classified_\" + ordering +", "model(sentences, lengths, retain_gradient = True) masking = yhat.max(-1)[1] == labels", "_ = model(sentence_att_mul_grad,lengths) att_x_grad_set[doc_id, _j_] = yhat_att_x_grad.max(-1)[1] if batchi %", "sentences.clone()[:,:max_lengths] sentence_att_grad = sentences.clone()[:,:max_lengths] sentence_att_mul_grad = sentences.clone()[:,:max_lengths] g1[model_masks[:,:max_lengths]] = float(\"-inf\")", "= True)[\"macro avg\"][\"f1-score\"] att_x_grad_pred = classification_report(actual, att_x_grad_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro", "= largest)[1] top_att_mul_grad = torch.topk(weight_mul_grad, k = weights.size(1), largest =", "max(lengths) if max(lengths) <= 10 : maximum = max(lengths) -", "results[\"grad_attention\"].append(att_grad_pred) results[\"grad*attention\"].append(att_x_grad_pred) results = pd.DataFrame.from_dict(results) results.plot(kind = \"line\", figsize =", "results[\"grad_attention\"] = [] results[\"grad*attention\"] = [] _, _, lengths, _", "largest)[1] top_att_mul_grad = torch.topk(weight_mul_grad, k = weights.size(1), largest = largest)[1]", "True)[\"macro avg\"][\"f1-score\"] results[\"random\"].append(rand_pred) results[\"attention\"].append(att_pred) results[\"gradient\"].append(grad_pred) results[\"grad_attention\"].append(att_grad_pred) results[\"grad*attention\"].append(att_x_grad_pred) results = pd.DataFrame.from_dict(results)", "print(maximum) grad_set = torch.zeros([data_size, maximum]).long().to(device) att_set = torch.zeros([data_size, maximum]).long().to(device) rand_set", "= pd.DataFrame.from_dict(results) results.plot(kind = \"line\", figsize = (18,10)) ordering =", "= yhat_rand.max(-1)[1] yhat_att, _ = model(sentence_att,lengths) att_set[doc_id, _j_] = yhat_att.max(-1)[1]", "float(\"-inf\") top_grad = torch.topk(g1, k = g1.size(1), largest = largest)[1]", "= next(iter(data)) maximum = max(lengths) if max(lengths) <= 10 :", "largest)[1] top_rand = torch.randn(top_att.shape) top_rand = torch.topk(top_rand, k = weights.size(1),", "results[\"attention\"].append(att_pred) results[\"gradient\"].append(grad_pred) results[\"grad_attention\"].append(att_grad_pred) results[\"grad*attention\"].append(att_x_grad_pred) results = pd.DataFrame.from_dict(results) results.plot(kind = \"line\",", "matplotlib.pyplot as plt import pandas as pd from sklearn.metrics import", "sentence_att_mul_grad[rows, top_att_mul_grad[:,_j_]] = 0 sentence_rand[rows, top_rand[:,_j_]] = 0 yhat_rand, _", "torch.topk(weights_def_grad_soft, k = weights.size(1), largest = largest)[1] top_att_mul_grad = torch.topk(weight_mul_grad,", "\"line\", figsize = (18,10)) ordering = \"ascending\" if largest: ordering", "rand_set[docs] att_set = att_set[docs] grad_set = grad_set[docs] att_grad_set = att_grad_set[docs]", "ordering = \"ascending\" if largest: ordering = \"descending\" plt.savefig(save_path +", "results[\"grad*attention\"] = [] _, _, lengths, _ = next(iter(data)) maximum", "classification_report(actual, rand_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] att_pred = classification_report(actual, att_set[:,_k_].cpu().data.numpy(),", "= torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') def degrading_model_perf(data, model, save_path,", "yhat_att, _ = model(sentence_att,lengths) att_set[doc_id, _j_] = yhat_att.max(-1)[1] yhat_grad, _", "0] = yhat.max(-1)[1] grad_set[doc_id, 0] = yhat.max(-1)[1] att_grad_set[doc_id, 0] =", "= att_x_grad_set[docs] actual_set = actual_set[docs] for _k_ in range(0,maximum): actual", "= max(max(lengths), maxi) model_masks = model.masks[masking] with torch.no_grad(): weights =", "avg\"][\"f1-score\"] results[\"random\"].append(rand_pred) results[\"attention\"].append(att_pred) results[\"gradient\"].append(grad_pred) results[\"grad_attention\"].append(att_grad_pred) results[\"grad*attention\"].append(att_x_grad_pred) results = pd.DataFrame.from_dict(results) results.plot(kind", "k = weights.size(1), largest = largest)[1] top_rand = torch.randn(top_att.shape) top_rand", "from sklearn.metrics import precision_recall_fscore_support as prfs device = torch.device('cuda:0' if", "top_att_grad = torch.topk(weights_def_grad_soft, k = weights.size(1), largest = largest)[1] top_att_mul_grad", "top_rand = torch.randn(top_att.shape) top_rand = torch.topk(top_rand, k = weights.size(1), largest", "k = weights.size(1), largest = largest)[1] top_att_grad = torch.topk(weights_def_grad_soft, k", "0] = yhat.max(-1)[1] att_x_grad_set[doc_id, 0] = yhat.max(-1)[1] rows = torch.arange(sentences.size(0))", "0 sentence_att_mul_grad[rows, top_att_mul_grad[:,_j_]] = 0 sentence_rand[rows, top_rand[:,_j_]] = 0 yhat_rand,", "'cpu') def degrading_model_perf(data, model, save_path, data_size, largest = True): print(\"\\n---", "att_grad_set[docs] att_x_grad_set = att_x_grad_set[docs] actual_set = actual_set[docs] for _k_ in", "yhat.max(-1)[1] att_x_grad_set[doc_id, 0] = yhat.max(-1)[1] rows = torch.arange(sentences.size(0)) for _j_", "torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') def degrading_model_perf(data, model, save_path, data_size,", "rand_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] att_pred = classification_report(actual, att_set[:,_k_].cpu().data.numpy(), output_dict", "grad_set[doc_id, _j_] = yhat_grad.max(-1)[1] yhat_att_grad, _ = model(sentence_att_grad,lengths) att_grad_set[doc_id, _j_]", "+ 1 model.embedding.weight.requires_grad_(True) actual = [] results = {} results[\"random\"]", "as pd from sklearn.metrics import * from sklearn.metrics import precision_recall_fscore_support", "weight_mul_grad[model_masks[:,:max_lengths]] = float(\"-inf\") weights_def_grad_soft = weights_def_grad.clone() weights_def_grad_soft[model_masks[:,:max_lengths]] = float(\"-inf\") em", "sentences, lengths, labels = sentences.to(device), lengths.to(device), labels.to(device) yhat, weights_or =", "= True)[\"macro avg\"][\"f1-score\"] grad_pred = classification_report(actual, grad_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro", "_j_] = yhat_att.max(-1)[1] yhat_grad, _ = model(sentence_grad,lengths) grad_set[doc_id, _j_] =", "att_x_grad_set[doc_id, 0] = yhat.max(-1)[1] rows = torch.arange(sentences.size(0)) for _j_ in", "= torch.zeros([data_size, maximum]).long().to(device) actual_set = torch.zeros([data_size, 1]).long().to(device) docs = []", "_ = model(sentence_att,lengths) att_set[doc_id, _j_] = yhat_att.max(-1)[1] yhat_grad, _ =", "labels.unsqueeze(-1) rand_set[doc_id, 0] = yhat.max(-1)[1] att_set[doc_id, 0] = yhat.max(-1)[1] grad_set[doc_id,", "= 0 sentence_att_mul_grad[rows, top_att_mul_grad[:,_j_]] = 0 sentence_rand[rows, top_rand[:,_j_]] = 0", "torch.no_grad(): weights = weights_or.clone() weight_mul_grad = weights_or * weights_def_grad weight_mul_grad[model_masks[:,:max_lengths]]", "= yhat.max(-1)[1] rows = torch.arange(sentences.size(0)) for _j_ in range(1,maximum): sentence_grad[rows,", "top_att_mul_grad = torch.topk(weight_mul_grad, k = weights.size(1), largest = largest)[1] temp_pred", "yhat, weights_or = model(sentences, lengths, retain_gradient = True) masking =", "lengths = lengths[masking] weights_or = weights_or[masking] docs.extend(doc_id) g = model.embed.grad[masking]", "yhat.max(-1)[0].sum().backward(retain_graph = True) maxi = max(lengths) doc_id = doc_id[masking] yhat", "torch.zeros([data_size, maximum]).long().to(device) rand_set = torch.zeros([data_size, maximum]).long().to(device) att_grad_set = torch.zeros([data_size, maximum]).long().to(device)", "(g* em).sum(-1)[:,:max_lengths] g1[model_masks[:,:max_lengths]] = float(\"-inf\") sentence_att = sentences.clone()[:,:max_lengths] sentence_grad =", "import matplotlib.pyplot as plt import pandas as pd from sklearn.metrics", "= 0 sentence_rand[rows, top_rand[:,_j_]] = 0 yhat_rand, _ = model(sentence_rand,lengths)", "= yhat.max(-1)[1] == labels if largest == False: masking =", "torch.zeros([data_size, maximum]).long().to(device) att_x_grad_set = torch.zeros([data_size, maximum]).long().to(device) actual_set = torch.zeros([data_size, 1]).long().to(device)", "for _k_ in range(0,maximum): actual = actual_set.flatten().cpu().data.numpy() rand_pred = classification_report(actual,", "weights_def_grad_soft[model_masks[:,:max_lengths]] = float(\"-inf\") em = model.embed[masking] g1 = (g* em).sum(-1)[:,:max_lengths]", "_, _, lengths, _ = next(iter(data)) maximum = max(lengths) if", "= yhat.max(-1)[1] att_grad_set[doc_id, 0] = yhat.max(-1)[1] att_x_grad_set[doc_id, 0] = yhat.max(-1)[1]", "False: masking = yhat.max(-1)[1] != labels yhat.max(-1)[0].sum().backward(retain_graph = True) maxi", "float(\"-inf\") em = model.embed[masking] g1 = (g* em).sum(-1)[:,:max_lengths] g1[model_masks[:,:max_lengths]] =", "weights_or[masking] docs.extend(doc_id) g = model.embed.grad[masking] weights_def_grad = model.weights.grad[masking] max_lengths =", "10 : maximum = max(lengths) - 1 elif max(lengths) >", "= weights_or * weights_def_grad weight_mul_grad[model_masks[:,:max_lengths]] = float(\"-inf\") weights_def_grad_soft = weights_def_grad.clone()", "if torch.cuda.is_available() else 'cpu') def degrading_model_perf(data, model, save_path, data_size, largest", "= model(sentence_grad,lengths) grad_set[doc_id, _j_] = yhat_grad.max(-1)[1] yhat_att_grad, _ = model(sentence_att_grad,lengths)", "max(max(lengths), maxi) model_masks = model.masks[masking] with torch.no_grad(): weights = weights_or.clone()", "g1 = (g* em).sum(-1)[:,:max_lengths] g1[model_masks[:,:max_lengths]] = float(\"-inf\") sentence_att = sentences.clone()[:,:max_lengths]", "if max(lengths) <= 10 : maximum = max(lengths) - 1", "torch.topk(weights, k = weights.size(1), largest = largest)[1] top_rand = torch.randn(top_att.shape)", "att_grad_pred = classification_report(actual, att_grad_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] att_x_grad_pred =", "yhat_att_x_grad.max(-1)[1] if batchi % modulo == 0 : print(\"Remaining: \",", "em).sum(-1)[:,:max_lengths] g1[model_masks[:,:max_lengths]] = float(\"-inf\") sentence_att = sentences.clone()[:,:max_lengths] sentence_grad = sentences.clone()[:,:max_lengths]", "results[\"attention\"]= [] results[\"gradient\"] = [] results[\"grad_attention\"] = [] results[\"grad*attention\"] =", "else 'cpu') def degrading_model_perf(data, model, save_path, data_size, largest = True):", "= [] results[\"grad*attention\"] = [] _, _, lengths, _ =", "prfs device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') def degrading_model_perf(data,", "0 yhat_rand, _ = model(sentence_rand,lengths) rand_set[doc_id, _j_] = yhat_rand.max(-1)[1] yhat_att,", "output_dict = True)[\"macro avg\"][\"f1-score\"] att_x_grad_pred = classification_report(actual, att_x_grad_set[:,_k_].cpu().data.numpy(), output_dict =", "yhat.max(-1)[1] att_grad_set[doc_id, 0] = yhat.max(-1)[1] att_x_grad_set[doc_id, 0] = yhat.max(-1)[1] rows", "\", len(data)- batchi) docs = torch.LongTensor(docs) rand_set = rand_set[docs] att_set", "== False: masking = yhat.max(-1)[1] != labels yhat.max(-1)[0].sum().backward(retain_graph = True)", "avg\"][\"f1-score\"] att_x_grad_pred = classification_report(actual, att_x_grad_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] results[\"random\"].append(rand_pred)", "sentences.clone()[:,:max_lengths] sentence_att_mul_grad = sentences.clone()[:,:max_lengths] g1[model_masks[:,:max_lengths]] = float(\"-inf\") top_grad = torch.topk(g1,", "att_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] grad_pred = classification_report(actual, grad_set[:,_k_].cpu().data.numpy(), output_dict", "True) masking = yhat.max(-1)[1] == labels if largest == False:", "sentence_rand[rows, top_rand[:,_j_]] = 0 yhat_rand, _ = model(sentence_rand,lengths) rand_set[doc_id, _j_]", "top_att_grad[:,_j_]] = 0 sentence_att_mul_grad[rows, top_att_mul_grad[:,_j_]] = 0 sentence_rand[rows, top_rand[:,_j_]] =", "= classification_report(actual, rand_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] att_pred = classification_report(actual,", "_j_] = yhat_grad.max(-1)[1] yhat_att_grad, _ = model(sentence_att_grad,lengths) att_grad_set[doc_id, _j_] =", "def degrading_model_perf(data, model, save_path, data_size, largest = True): print(\"\\n--- Degrading", "maximum]).long().to(device) att_x_grad_set = torch.zeros([data_size, maximum]).long().to(device) actual_set = torch.zeros([data_size, 1]).long().to(device) docs", "att_set[doc_id, _j_] = yhat_att.max(-1)[1] yhat_grad, _ = model(sentence_grad,lengths) grad_set[doc_id, _j_]", "torch.zeros([data_size, maximum]).long().to(device) att_set = torch.zeros([data_size, maximum]).long().to(device) rand_set = torch.zeros([data_size, maximum]).long().to(device)", "avg\"][\"f1-score\"] att_pred = classification_report(actual, att_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] grad_pred", "model.eval() actual_set[doc_id] = labels.unsqueeze(-1) rand_set[doc_id, 0] = yhat.max(-1)[1] att_set[doc_id, 0]", "results[\"random\"].append(rand_pred) results[\"attention\"].append(att_pred) results[\"gradient\"].append(grad_pred) results[\"grad_attention\"].append(att_grad_pred) results[\"grad*attention\"].append(att_x_grad_pred) results = pd.DataFrame.from_dict(results) results.plot(kind =", "10 : maximum = 10 print(maximum) grad_set = torch.zeros([data_size, maximum]).long().to(device)", "lengths.to(device), labels.to(device) yhat, weights_or = model(sentences, lengths, retain_gradient = True)", "weights_def_grad_soft = weights_def_grad.clone() weights_def_grad_soft[model_masks[:,:max_lengths]] = float(\"-inf\") em = model.embed[masking] g1", "results = pd.DataFrame.from_dict(results) results.plot(kind = \"line\", figsize = (18,10)) ordering", "model(sentence_grad,lengths) grad_set[doc_id, _j_] = yhat_grad.max(-1)[1] yhat_att_grad, _ = model(sentence_att_grad,lengths) att_grad_set[doc_id,", ": print(\"Remaining: \", len(data)- batchi) docs = torch.LongTensor(docs) rand_set =", "= float(\"-inf\") top_grad = torch.topk(g1, k = g1.size(1), largest =", "_ = model(sentence_grad,lengths) grad_set[doc_id, _j_] = yhat_grad.max(-1)[1] yhat_att_grad, _ =", "ordering + \".png\") results.to_csv(save_path + \"_correct_classified_\" + ordering + \".csv\")", "max_lengths = max(max(lengths), maxi) model_masks = model.masks[masking] with torch.no_grad(): weights", "sentences[masking] labels = labels[masking] lengths = lengths[masking] weights_or = weights_or[masking]", "avg\"][\"f1-score\"] att_grad_pred = classification_report(actual, att_grad_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] att_x_grad_pred", "= torch.zeros([data_size, maximum]).long().to(device) att_grad_set = torch.zeros([data_size, maximum]).long().to(device) att_x_grad_set = torch.zeros([data_size,", "max(lengths) - 1 elif max(lengths) > 10 : maximum =", "masking = yhat.max(-1)[1] != labels yhat.max(-1)[0].sum().backward(retain_graph = True) maxi =", "= model.embed.grad[masking] weights_def_grad = model.weights.grad[masking] max_lengths = max(max(lengths), maxi) model_masks", "= torch.arange(sentences.size(0)) for _j_ in range(1,maximum): sentence_grad[rows, top_grad[:,_j_]] = 0", "* from sklearn.metrics import precision_recall_fscore_support as prfs device = torch.device('cuda:0'", "doc_id = doc_id[masking] yhat = yhat[masking] sentences = sentences[masking] labels", "largest)[1] top_att_grad = torch.topk(weights_def_grad_soft, k = weights.size(1), largest = largest)[1]", "[] temp_act.append(labels.cpu().data.numpy()) temp_pred.append(yhat.max(-1)[1].cpu().data.numpy()) model.eval() actual_set[doc_id] = labels.unsqueeze(-1) rand_set[doc_id, 0] =", "in range(0,maximum): actual = actual_set.flatten().cpu().data.numpy() rand_pred = classification_report(actual, rand_set[:,_k_].cpu().data.numpy(), output_dict", "= model.masks[masking] with torch.no_grad(): weights = weights_or.clone() weight_mul_grad = weights_or", "weights_or * weights_def_grad weight_mul_grad[model_masks[:,:max_lengths]] = float(\"-inf\") weights_def_grad_soft = weights_def_grad.clone() weights_def_grad_soft[model_masks[:,:max_lengths]]", "att_set = att_set[docs] grad_set = grad_set[docs] att_grad_set = att_grad_set[docs] att_x_grad_set", "sentence_rand = sentences.clone()[:,:max_lengths] sentence_att_grad = sentences.clone()[:,:max_lengths] sentence_att_mul_grad = sentences.clone()[:,:max_lengths] g1[model_masks[:,:max_lengths]]", "True)[\"macro avg\"][\"f1-score\"] grad_pred = classification_report(actual, grad_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"]", "float(\"-inf\") weights_def_grad_soft = weights_def_grad.clone() weights_def_grad_soft[model_masks[:,:max_lengths]] = float(\"-inf\") em = model.embed[masking]", "maximum = max(lengths) if max(lengths) <= 10 : maximum =", "_, lengths, _ = next(iter(data)) maximum = max(lengths) if max(lengths)", "= labels.unsqueeze(-1) rand_set[doc_id, 0] = yhat.max(-1)[1] att_set[doc_id, 0] = yhat.max(-1)[1]", "import precision_recall_fscore_support as prfs device = torch.device('cuda:0' if torch.cuda.is_available() else", "precision_recall_fscore_support as prfs device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')", "- 1 elif max(lengths) > 10 : maximum = 10", "grad_set[docs] att_grad_set = att_grad_set[docs] att_x_grad_set = att_x_grad_set[docs] actual_set = actual_set[docs]", "0 : print(\"Remaining: \", len(data)- batchi) docs = torch.LongTensor(docs) rand_set", "degrading_model_perf(data, model, save_path, data_size, largest = True): print(\"\\n--- Degrading Model", "modulo = round(len(data) / 10) + 1 model.embedding.weight.requires_grad_(True) actual =", "results[\"random\"] = [] results[\"attention\"]= [] results[\"gradient\"] = [] results[\"grad_attention\"] =", "model, save_path, data_size, largest = True): print(\"\\n--- Degrading Model Performance", "actual_set = torch.zeros([data_size, 1]).long().to(device) docs = [] for batchi, (doc_id,", "= lengths[masking] weights_or = weights_or[masking] docs.extend(doc_id) g = model.embed.grad[masking] weights_def_grad", "= True) masking = yhat.max(-1)[1] == labels if largest ==", "model.embed.grad[masking] weights_def_grad = model.weights.grad[masking] max_lengths = max(max(lengths), maxi) model_masks =", "if largest == False: masking = yhat.max(-1)[1] != labels yhat.max(-1)[0].sum().backward(retain_graph", "weights_def_grad = model.weights.grad[masking] max_lengths = max(max(lengths), maxi) model_masks = model.masks[masking]", "1 elif max(lengths) > 10 : maximum = 10 print(maximum)", "\"_correct_classified_\" + ordering + \".png\") results.to_csv(save_path + \"_correct_classified_\" + ordering", "= model(sentences, lengths, retain_gradient = True) masking = yhat.max(-1)[1] ==", "= model(sentence_rand,lengths) rand_set[doc_id, _j_] = yhat_rand.max(-1)[1] yhat_att, _ = model(sentence_att,lengths)", "yhat.max(-1)[1] == labels if largest == False: masking = yhat.max(-1)[1]", "= actual_set.flatten().cpu().data.numpy() rand_pred = classification_report(actual, rand_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"]", "= weights.size(1), largest = largest)[1] top_rand = torch.randn(top_att.shape) top_rand =", "torch.randn(top_att.shape) top_rand = torch.topk(top_rand, k = weights.size(1), largest = largest)[1]", ": maximum = max(lengths) - 1 elif max(lengths) > 10", "torch.zeros([data_size, maximum]).long().to(device) att_grad_set = torch.zeros([data_size, maximum]).long().to(device) att_x_grad_set = torch.zeros([data_size, maximum]).long().to(device)", "sentence_att = sentences.clone()[:,:max_lengths] sentence_grad = sentences.clone()[:,:max_lengths] sentence_rand = sentences.clone()[:,:max_lengths] sentence_att_grad", "range(0,maximum): actual = actual_set.flatten().cpu().data.numpy() rand_pred = classification_report(actual, rand_set[:,_k_].cpu().data.numpy(), output_dict =", "output_dict = True)[\"macro avg\"][\"f1-score\"] results[\"random\"].append(rand_pred) results[\"attention\"].append(att_pred) results[\"gradient\"].append(grad_pred) results[\"grad_attention\"].append(att_grad_pred) results[\"grad*attention\"].append(att_x_grad_pred) results", "= yhat.max(-1)[1] != labels yhat.max(-1)[0].sum().backward(retain_graph = True) maxi = max(lengths)", "top_att[:,_j_]] = 0 sentence_att_grad[rows, top_att_grad[:,_j_]] = 0 sentence_att_mul_grad[rows, top_att_mul_grad[:,_j_]] =", "sentences.clone()[:,:max_lengths] sentence_rand = sentences.clone()[:,:max_lengths] sentence_att_grad = sentences.clone()[:,:max_lengths] sentence_att_mul_grad = sentences.clone()[:,:max_lengths]", "classification_report(actual, grad_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] att_grad_pred = classification_report(actual, att_grad_set[:,_k_].cpu().data.numpy(),", "import numpy as np import matplotlib matplotlib.use(\"Agg\") import matplotlib.pyplot as", "= classification_report(actual, att_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] grad_pred = classification_report(actual,", "import pandas as pd from sklearn.metrics import * from sklearn.metrics", "maximum]).long().to(device) rand_set = torch.zeros([data_size, maximum]).long().to(device) att_grad_set = torch.zeros([data_size, maximum]).long().to(device) att_x_grad_set", "= sentences.clone()[:,:max_lengths] g1[model_masks[:,:max_lengths]] = float(\"-inf\") top_grad = torch.topk(g1, k =", "weights.size(1), largest = largest)[1] temp_pred = [] temp_act = []", "_j_] = yhat_att_grad.max(-1)[1] yhat_att_x_grad, _ = model(sentence_att_mul_grad,lengths) att_x_grad_set[doc_id, _j_] =", "= att_grad_set[docs] att_x_grad_set = att_x_grad_set[docs] actual_set = actual_set[docs] for _k_", "= {} results[\"random\"] = [] results[\"attention\"]= [] results[\"gradient\"] = []", "maxi) model_masks = model.masks[masking] with torch.no_grad(): weights = weights_or.clone() weight_mul_grad", "k = g1.size(1), largest = largest)[1] top_att = torch.topk(weights, k", "att_x_grad_set = torch.zeros([data_size, maximum]).long().to(device) actual_set = torch.zeros([data_size, 1]).long().to(device) docs =", "import matplotlib matplotlib.use(\"Agg\") import matplotlib.pyplot as plt import pandas as", "em = model.embed[masking] g1 = (g* em).sum(-1)[:,:max_lengths] g1[model_masks[:,:max_lengths]] = float(\"-inf\")", "= sentences.clone()[:,:max_lengths] sentence_rand = sentences.clone()[:,:max_lengths] sentence_att_grad = sentences.clone()[:,:max_lengths] sentence_att_mul_grad =", "print(\"Remaining: \", len(data)- batchi) docs = torch.LongTensor(docs) rand_set = rand_set[docs]", "largest = largest)[1] top_att_mul_grad = torch.topk(weight_mul_grad, k = weights.size(1), largest", "\"descending\" plt.savefig(save_path + \"_correct_classified_\" + ordering + \".png\") results.to_csv(save_path +", "att_x_grad_set[doc_id, _j_] = yhat_att_x_grad.max(-1)[1] if batchi % modulo == 0", "= True)[\"macro avg\"][\"f1-score\"] att_grad_pred = classification_report(actual, att_grad_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro", "= classification_report(actual, att_grad_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] att_x_grad_pred = classification_report(actual,", "largest = largest)[1] top_att = torch.topk(weights, k = weights.size(1), largest", "= 10 print(maximum) grad_set = torch.zeros([data_size, maximum]).long().to(device) att_set = torch.zeros([data_size,", "= torch.topk(g1, k = g1.size(1), largest = largest)[1] top_att =", "_ = model(sentence_att_grad,lengths) att_grad_set[doc_id, _j_] = yhat_att_grad.max(-1)[1] yhat_att_x_grad, _ =", "= max(lengths) if max(lengths) <= 10 : maximum = max(lengths)", "batchi, (doc_id, sentences, lengths, labels) in enumerate(data): model.train() torch.cuda.empty_cache() model.zero_grad()", "torch.LongTensor(docs) rand_set = rand_set[docs] att_set = att_set[docs] grad_set = grad_set[docs]", "weights = weights_or.clone() weight_mul_grad = weights_or * weights_def_grad weight_mul_grad[model_masks[:,:max_lengths]] =", "next(iter(data)) maximum = max(lengths) if max(lengths) <= 10 : maximum", "= largest)[1] temp_pred = [] temp_act = [] temp_act.append(labels.cpu().data.numpy()) temp_pred.append(yhat.max(-1)[1].cpu().data.numpy())", "weights_def_grad.clone() weights_def_grad_soft[model_masks[:,:max_lengths]] = float(\"-inf\") em = model.embed[masking] g1 = (g*", "sentences.clone()[:,:max_lengths] sentence_grad = sentences.clone()[:,:max_lengths] sentence_rand = sentences.clone()[:,:max_lengths] sentence_att_grad = sentences.clone()[:,:max_lengths]", "= doc_id[masking] yhat = yhat[masking] sentences = sentences[masking] labels =", "= yhat.max(-1)[1] grad_set[doc_id, 0] = yhat.max(-1)[1] att_grad_set[doc_id, 0] = yhat.max(-1)[1]", "sklearn.metrics import precision_recall_fscore_support as prfs device = torch.device('cuda:0' if torch.cuda.is_available()", "[] for batchi, (doc_id, sentences, lengths, labels) in enumerate(data): model.train()", "= yhat_att.max(-1)[1] yhat_grad, _ = model(sentence_grad,lengths) grad_set[doc_id, _j_] = yhat_grad.max(-1)[1]", "= g1.size(1), largest = largest)[1] top_att = torch.topk(weights, k =", "output_dict = True)[\"macro avg\"][\"f1-score\"] att_grad_pred = classification_report(actual, att_grad_set[:,_k_].cpu().data.numpy(), output_dict =", "[] temp_act = [] temp_act.append(labels.cpu().data.numpy()) temp_pred.append(yhat.max(-1)[1].cpu().data.numpy()) model.eval() actual_set[doc_id] = labels.unsqueeze(-1)", "import * from sklearn.metrics import precision_recall_fscore_support as prfs device =", "pd from sklearn.metrics import * from sklearn.metrics import precision_recall_fscore_support as", "= yhat.max(-1)[1] att_x_grad_set[doc_id, 0] = yhat.max(-1)[1] rows = torch.arange(sentences.size(0)) for", "labels.to(device) yhat, weights_or = model(sentences, lengths, retain_gradient = True) masking", "labels if largest == False: masking = yhat.max(-1)[1] != labels", "with torch.no_grad(): weights = weights_or.clone() weight_mul_grad = weights_or * weights_def_grad", "att_x_grad_pred = classification_report(actual, att_x_grad_set[:,_k_].cpu().data.numpy(), output_dict = True)[\"macro avg\"][\"f1-score\"] results[\"random\"].append(rand_pred) results[\"attention\"].append(att_pred)", "device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') def degrading_model_perf(data, model,", "k = weights.size(1), largest = largest)[1] top_att_mul_grad = torch.topk(weight_mul_grad, k", "if batchi % modulo == 0 : print(\"Remaining: \", len(data)-", "yhat_rand, _ = model(sentence_rand,lengths) rand_set[doc_id, _j_] = yhat_rand.max(-1)[1] yhat_att, _", "sentence_grad[rows, top_grad[:,_j_]] = 0 sentence_att[rows, top_att[:,_j_]] = 0 sentence_att_grad[rows, top_att_grad[:,_j_]]", "att_grad_set[doc_id, 0] = yhat.max(-1)[1] att_x_grad_set[doc_id, 0] = yhat.max(-1)[1] rows =", "for batchi, (doc_id, sentences, lengths, labels) in enumerate(data): model.train() torch.cuda.empty_cache()", "_j_ in range(1,maximum): sentence_grad[rows, top_grad[:,_j_]] = 0 sentence_att[rows, top_att[:,_j_]] =", "float(\"-inf\") sentence_att = sentences.clone()[:,:max_lengths] sentence_grad = sentences.clone()[:,:max_lengths] sentence_rand = sentences.clone()[:,:max_lengths]", "maximum]).long().to(device) actual_set = torch.zeros([data_size, 1]).long().to(device) docs = [] for batchi,", "np import matplotlib matplotlib.use(\"Agg\") import matplotlib.pyplot as plt import pandas", "docs.extend(doc_id) g = model.embed.grad[masking] weights_def_grad = model.weights.grad[masking] max_lengths = max(max(lengths),", "docs = torch.LongTensor(docs) rand_set = rand_set[docs] att_set = att_set[docs] grad_set", "in range(1,maximum): sentence_grad[rows, top_grad[:,_j_]] = 0 sentence_att[rows, top_att[:,_j_]] = 0" ]
[ "and store %d\" % cast_vote_id) @shared_task def voters_email(election_id, subject_template, body_template,", "subject_template, body_template, extra_vars={}, voter_constraints_include=None, voter_constraints_exclude=None): \"\"\" voter_constraints_include are conditions on", "extra_vars={}): voter = Voter.objects.get(uuid=voter_uuid) the_vars = copy.copy(extra_vars) the_vars.update({'voter': voter}) notification", "celery.utils.log import get_logger import signals from models import CastVote, Election,", "\"\"\" Celery queued tasks for Helios 2010-08-01 <EMAIL> \"\"\" import", "voters: single_voter_email.delay(voter.uuid, subject_template, body_template, extra_vars) @shared_task def voters_notify(election_id, notification_template, extra_vars={}):", "election.helios_trustee_decrypt() election_notify_admin.delay(election_id=election_id, subject='Helios Decrypt', body=\"\"\" Helios has decrypted its portion", "@shared_task def cast_vote_verify_and_store(cast_vote_id, status_update_message=None, **kwargs): cast_vote = CastVote.objects.get(id=cast_vote_id) result =", "tally_helios_decrypt.delay(election_id=election.id) @shared_task def tally_helios_decrypt(election_id): election = Election.objects.get(id=election_id) election.helios_trustee_decrypt() election_notify_admin.delay(election_id=election_id, subject='Helios", "\"\"\" % (voter_file.election.name, voter_file.num_voters)) @shared_task def election_notify_admin(election_id, subject, body): election", "the_vars = copy.copy(extra_vars) the_vars.update({'voter': voter}) notification = render_template_raw(None, notification_template, the_vars)", "single_voter_notify.delay(voter.uuid, notification_template, extra_vars) @shared_task def single_voter_email(voter_uuid, subject_template, body_template, extra_vars={}): voter", "in voters: single_voter_email.delay(voter.uuid, subject_template, body_template, extra_vars) @shared_task def voters_notify(election_id, notification_template,", "subject=\"encrypted tally computed\", body=\"\"\" The encrypted tally for election %s", "def election_compute_tally(election_id): election = Election.objects.get(id=election_id) election.compute_tally() election_notify_admin.delay(election_id=election_id, subject=\"encrypted tally computed\",", "voter.send_notification(notification) @shared_task def election_compute_tally(election_id): election = Election.objects.get(id=election_id) election.compute_tally() election_notify_admin.delay(election_id=election_id, subject=\"encrypted", "@shared_task def single_voter_notify(voter_uuid, notification_template, extra_vars={}): voter = Voter.objects.get(uuid=voter_uuid) the_vars =", "% cast_vote_id) @shared_task def voters_email(election_id, subject_template, body_template, extra_vars={}, voter_constraints_include=None, voter_constraints_exclude=None):", "election = voter.election user = voter.get_user() if result: # send", "\"\"\" election = Election.objects.get(id=election_id) # select the right list of", "if election.has_helios_trustee(): tally_helios_decrypt.delay(election_id=election.id) @shared_task def tally_helios_decrypt(election_id): election = Election.objects.get(id=election_id) election.helios_trustee_decrypt()", "the_vars.update({'voter': voter}) subject = render_template_raw(None, subject_template, the_vars) body = render_template_raw(None,", "voter_file_process(voter_file_id): voter_file = VoterFile.objects.get(id=voter_file_id) voter_file.process() election_notify_admin.delay(election_id=voter_file.election.id, subject='voter file processed', body=\"\"\"", "decrypted its portion of the tally for election %s. --", "voters voters = election.voter_set.all() if voter_constraints_include: voters = voters.filter(**voter_constraints_include) if", "= voter.election user = voter.get_user() if result: # send the", "celery import shared_task from celery.utils.log import get_logger import signals from", "% election.name) @shared_task def voter_file_process(voter_file_id): voter_file = VoterFile.objects.get(id=voter_file_id) voter_file.process() election_notify_admin.delay(election_id=voter_file.election.id,", "the signal signals.vote_cast.send(sender=election, election=election, user=user, voter=voter, cast_vote=cast_vote) if status_update_message and", "(voter_file.election.name, voter_file.num_voters)) @shared_task def election_notify_admin(election_id, subject, body): election = Election.objects.get(id=election_id)", "\"\"\" % election.name) @shared_task def voter_file_process(voter_file_id): voter_file = VoterFile.objects.get(id=voter_file_id) voter_file.process()", "body_template, extra_vars={}): voter = Voter.objects.get(uuid=voter_uuid) the_vars = copy.copy(extra_vars) the_vars.update({'voter': voter})", "extra_vars={}): election = Election.objects.get(id=election_id) for voter in election.voter_set.all(): single_voter_notify.delay(voter.uuid, notification_template,", "have been created. -- Helios \"\"\" % (voter_file.election.name, voter_file.num_voters)) @shared_task", "= cast_vote.verify_and_store() voter = cast_vote.voter election = voter.election user =", "extra_vars) @shared_task def voters_notify(election_id, notification_template, extra_vars={}): election = Election.objects.get(id=election_id) for", "body) @shared_task def single_voter_notify(voter_uuid, notification_template, extra_vars={}): voter = Voter.objects.get(uuid=voter_uuid) the_vars", "file processed', body=\"\"\" Your voter file upload for election %s", "copy.copy(extra_vars) the_vars.update({'voter': voter}) notification = render_template_raw(None, notification_template, the_vars) voter.send_notification(notification) @shared_task", "= cast_vote.voter election = voter.election user = voter.get_user() if result:", "the_vars = copy.copy(extra_vars) the_vars.update({'voter': voter}) subject = render_template_raw(None, subject_template, the_vars)", "body_template, extra_vars={}, voter_constraints_include=None, voter_constraints_exclude=None): \"\"\" voter_constraints_include are conditions on including", "election %s has been processed. %s voters have been created.", "voters = election.voter_set.all() if voter_constraints_include: voters = voters.filter(**voter_constraints_include) if voter_constraints_exclude:", "-- Helios \"\"\" % election.name) @shared_task def voter_file_process(voter_file_id): voter_file =", "voters = voters.filter(**voter_constraints_include) if voter_constraints_exclude: voters = voters.exclude(**voter_constraints_exclude) for voter", "\"\"\" import copy from celery import shared_task from celery.utils.log import", "Election.objects.get(id=election_id) election.compute_tally() election_notify_admin.delay(election_id=election_id, subject=\"encrypted tally computed\", body=\"\"\" The encrypted tally", "from view_utils import render_template_raw @shared_task def cast_vote_verify_and_store(cast_vote_id, status_update_message=None, **kwargs): cast_vote", "voters_notify(election_id, notification_template, extra_vars={}): election = Election.objects.get(id=election_id) for voter in election.voter_set.all():", "of the tally for election %s. -- Helios \"\"\" %", "= get_logger(cast_vote_verify_and_store.__name__) logger.error(\"Failed to verify and store %d\" % cast_vote_id)", "-- Helios \"\"\" % (voter_file.election.name, voter_file.num_voters)) @shared_task def election_notify_admin(election_id, subject,", "= render_template_raw(None, body_template, the_vars) voter.send_message(subject, body) @shared_task def single_voter_notify(voter_uuid, notification_template,", "of voters voters = election.voter_set.all() if voter_constraints_include: voters = voters.filter(**voter_constraints_include)", "= voter.get_user() if result: # send the signal signals.vote_cast.send(sender=election, election=election,", "been computed. -- Helios \"\"\" % election.name) if election.has_helios_trustee(): tally_helios_decrypt.delay(election_id=election.id)", "election %s. -- Helios \"\"\" % election.name) @shared_task def voter_file_process(voter_file_id):", "= Voter.objects.get(uuid=voter_uuid) the_vars = copy.copy(extra_vars) the_vars.update({'voter': voter}) notification = render_template_raw(None,", "result: # send the signal signals.vote_cast.send(sender=election, election=election, user=user, voter=voter, cast_vote=cast_vote)", "user=user, voter=voter, cast_vote=cast_vote) if status_update_message and user.can_update_status(): user.update_status(status_update_message) else: logger", "copy.copy(extra_vars) the_vars.update({'voter': voter}) subject = render_template_raw(None, subject_template, the_vars) body =", "extra_vars) @shared_task def single_voter_email(voter_uuid, subject_template, body_template, extra_vars={}): voter = Voter.objects.get(uuid=voter_uuid)", "import render_template_raw @shared_task def cast_vote_verify_and_store(cast_vote_id, status_update_message=None, **kwargs): cast_vote = CastVote.objects.get(id=cast_vote_id)", "body_template, the_vars) voter.send_message(subject, body) @shared_task def single_voter_notify(voter_uuid, notification_template, extra_vars={}): voter", "= Voter.objects.get(uuid=voter_uuid) the_vars = copy.copy(extra_vars) the_vars.update({'voter': voter}) subject = render_template_raw(None,", "encrypted tally for election %s has been computed. -- Helios", "subject='voter file processed', body=\"\"\" Your voter file upload for election", "%s has been processed. %s voters have been created. --", "2010-08-01 <EMAIL> \"\"\" import copy from celery import shared_task from", "**kwargs): cast_vote = CastVote.objects.get(id=cast_vote_id) result = cast_vote.verify_and_store() voter = cast_vote.voter", "right list of voters voters = election.voter_set.all() if voter_constraints_include: voters", "Election.objects.get(id=election_id) # select the right list of voters voters =", "= copy.copy(extra_vars) the_vars.update({'voter': voter}) notification = render_template_raw(None, notification_template, the_vars) voter.send_notification(notification)", "voter_constraints_exclude: voters = voters.exclude(**voter_constraints_exclude) for voter in voters: single_voter_email.delay(voter.uuid, subject_template,", "the_vars) voter.send_notification(notification) @shared_task def election_compute_tally(election_id): election = Election.objects.get(id=election_id) election.compute_tally() election_notify_admin.delay(election_id=election_id,", "copy from celery import shared_task from celery.utils.log import get_logger import", "notification_template, extra_vars={}): voter = Voter.objects.get(uuid=voter_uuid) the_vars = copy.copy(extra_vars) the_vars.update({'voter': voter})", "Decrypt', body=\"\"\" Helios has decrypted its portion of the tally", "store %d\" % cast_vote_id) @shared_task def voters_email(election_id, subject_template, body_template, extra_vars={},", "for voter in election.voter_set.all(): single_voter_notify.delay(voter.uuid, notification_template, extra_vars) @shared_task def single_voter_email(voter_uuid,", "notification_template, the_vars) voter.send_notification(notification) @shared_task def election_compute_tally(election_id): election = Election.objects.get(id=election_id) election.compute_tally()", "election.has_helios_trustee(): tally_helios_decrypt.delay(election_id=election.id) @shared_task def tally_helios_decrypt(election_id): election = Election.objects.get(id=election_id) election.helios_trustee_decrypt() election_notify_admin.delay(election_id=election_id,", "VoterFile.objects.get(id=voter_file_id) voter_file.process() election_notify_admin.delay(election_id=voter_file.election.id, subject='voter file processed', body=\"\"\" Your voter file", "voter}) notification = render_template_raw(None, notification_template, the_vars) voter.send_notification(notification) @shared_task def election_compute_tally(election_id):", "status_update_message=None, **kwargs): cast_vote = CastVote.objects.get(id=cast_vote_id) result = cast_vote.verify_and_store() voter =", "status_update_message and user.can_update_status(): user.update_status(status_update_message) else: logger = get_logger(cast_vote_verify_and_store.__name__) logger.error(\"Failed to", "voter.send_message(subject, body) @shared_task def single_voter_notify(voter_uuid, notification_template, extra_vars={}): voter = Voter.objects.get(uuid=voter_uuid)", "voters = voters.exclude(**voter_constraints_exclude) for voter in voters: single_voter_email.delay(voter.uuid, subject_template, body_template,", "has been computed. -- Helios \"\"\" % election.name) if election.has_helios_trustee():", "% election.name) if election.has_helios_trustee(): tally_helios_decrypt.delay(election_id=election.id) @shared_task def tally_helios_decrypt(election_id): election =", "render_template_raw @shared_task def cast_vote_verify_and_store(cast_vote_id, status_update_message=None, **kwargs): cast_vote = CastVote.objects.get(id=cast_vote_id) result", "verify and store %d\" % cast_vote_id) @shared_task def voters_email(election_id, subject_template,", "election.compute_tally() election_notify_admin.delay(election_id=election_id, subject=\"encrypted tally computed\", body=\"\"\" The encrypted tally for", "voter_file.num_voters)) @shared_task def election_notify_admin(election_id, subject, body): election = Election.objects.get(id=election_id) election.admin.send_message(subject,", "processed. %s voters have been created. -- Helios \"\"\" %", "voter = cast_vote.voter election = voter.election user = voter.get_user() if", "voters.filter(**voter_constraints_include) if voter_constraints_exclude: voters = voters.exclude(**voter_constraints_exclude) for voter in voters:", "CastVote, Election, Voter, VoterFile from view_utils import render_template_raw @shared_task def", "def voter_file_process(voter_file_id): voter_file = VoterFile.objects.get(id=voter_file_id) voter_file.process() election_notify_admin.delay(election_id=voter_file.election.id, subject='voter file processed',", "single_voter_email(voter_uuid, subject_template, body_template, extra_vars={}): voter = Voter.objects.get(uuid=voter_uuid) the_vars = copy.copy(extra_vars)", "for election %s has been processed. %s voters have been", "cast_vote_verify_and_store(cast_vote_id, status_update_message=None, **kwargs): cast_vote = CastVote.objects.get(id=cast_vote_id) result = cast_vote.verify_and_store() voter", "computed. -- Helios \"\"\" % election.name) if election.has_helios_trustee(): tally_helios_decrypt.delay(election_id=election.id) @shared_task", "the_vars) body = render_template_raw(None, body_template, the_vars) voter.send_message(subject, body) @shared_task def", "import CastVote, Election, Voter, VoterFile from view_utils import render_template_raw @shared_task", "Voter.objects.get(uuid=voter_uuid) the_vars = copy.copy(extra_vars) the_vars.update({'voter': voter}) subject = render_template_raw(None, subject_template,", "= render_template_raw(None, subject_template, the_vars) body = render_template_raw(None, body_template, the_vars) voter.send_message(subject,", "for Helios 2010-08-01 <EMAIL> \"\"\" import copy from celery import", "single_voter_notify(voter_uuid, notification_template, extra_vars={}): voter = Voter.objects.get(uuid=voter_uuid) the_vars = copy.copy(extra_vars) the_vars.update({'voter':", "Helios \"\"\" % election.name) if election.has_helios_trustee(): tally_helios_decrypt.delay(election_id=election.id) @shared_task def tally_helios_decrypt(election_id):", "from celery import shared_task from celery.utils.log import get_logger import signals", "voter_file = VoterFile.objects.get(id=voter_file_id) voter_file.process() election_notify_admin.delay(election_id=voter_file.election.id, subject='voter file processed', body=\"\"\" Your", "def single_voter_email(voter_uuid, subject_template, body_template, extra_vars={}): voter = Voter.objects.get(uuid=voter_uuid) the_vars =", "election = Election.objects.get(id=election_id) election.helios_trustee_decrypt() election_notify_admin.delay(election_id=election_id, subject='Helios Decrypt', body=\"\"\" Helios has", "= Election.objects.get(id=election_id) election.helios_trustee_decrypt() election_notify_admin.delay(election_id=election_id, subject='Helios Decrypt', body=\"\"\" Helios has decrypted", "result = cast_vote.verify_and_store() voter = cast_vote.voter election = voter.election user", "@shared_task def election_compute_tally(election_id): election = Election.objects.get(id=election_id) election.compute_tally() election_notify_admin.delay(election_id=election_id, subject=\"encrypted tally", "voter_constraints_exclude are conditions on excluding voters \"\"\" election = Election.objects.get(id=election_id)", "-- Helios \"\"\" % election.name) if election.has_helios_trustee(): tally_helios_decrypt.delay(election_id=election.id) @shared_task def", "%s voters have been created. -- Helios \"\"\" % (voter_file.election.name,", "user.can_update_status(): user.update_status(status_update_message) else: logger = get_logger(cast_vote_verify_and_store.__name__) logger.error(\"Failed to verify and", "conditions on including voters voter_constraints_exclude are conditions on excluding voters", "voters \"\"\" election = Election.objects.get(id=election_id) # select the right list", "if voter_constraints_include: voters = voters.filter(**voter_constraints_include) if voter_constraints_exclude: voters = voters.exclude(**voter_constraints_exclude)", "= CastVote.objects.get(id=cast_vote_id) result = cast_vote.verify_and_store() voter = cast_vote.voter election =", "def voters_email(election_id, subject_template, body_template, extra_vars={}, voter_constraints_include=None, voter_constraints_exclude=None): \"\"\" voter_constraints_include are", "voter in election.voter_set.all(): single_voter_notify.delay(voter.uuid, notification_template, extra_vars) @shared_task def single_voter_email(voter_uuid, subject_template,", "the right list of voters voters = election.voter_set.all() if voter_constraints_include:", "else: logger = get_logger(cast_vote_verify_and_store.__name__) logger.error(\"Failed to verify and store %d\"", "voters.exclude(**voter_constraints_exclude) for voter in voters: single_voter_email.delay(voter.uuid, subject_template, body_template, extra_vars) @shared_task", "list of voters voters = election.voter_set.all() if voter_constraints_include: voters =", "body = render_template_raw(None, body_template, the_vars) voter.send_message(subject, body) @shared_task def single_voter_notify(voter_uuid,", "tally computed\", body=\"\"\" The encrypted tally for election %s has", "voter_file.process() election_notify_admin.delay(election_id=voter_file.election.id, subject='voter file processed', body=\"\"\" Your voter file upload", "conditions on excluding voters \"\"\" election = Election.objects.get(id=election_id) # select", "if result: # send the signal signals.vote_cast.send(sender=election, election=election, user=user, voter=voter,", "Voter, VoterFile from view_utils import render_template_raw @shared_task def cast_vote_verify_and_store(cast_vote_id, status_update_message=None,", "def voters_notify(election_id, notification_template, extra_vars={}): election = Election.objects.get(id=election_id) for voter in", "%d\" % cast_vote_id) @shared_task def voters_email(election_id, subject_template, body_template, extra_vars={}, voter_constraints_include=None,", "if voter_constraints_exclude: voters = voters.exclude(**voter_constraints_exclude) for voter in voters: single_voter_email.delay(voter.uuid,", "= voters.exclude(**voter_constraints_exclude) for voter in voters: single_voter_email.delay(voter.uuid, subject_template, body_template, extra_vars)", "election = Election.objects.get(id=election_id) election.compute_tally() election_notify_admin.delay(election_id=election_id, subject=\"encrypted tally computed\", body=\"\"\" The", "render_template_raw(None, body_template, the_vars) voter.send_message(subject, body) @shared_task def single_voter_notify(voter_uuid, notification_template, extra_vars={}):", "election_notify_admin.delay(election_id=election_id, subject='Helios Decrypt', body=\"\"\" Helios has decrypted its portion of", "election.name) @shared_task def voter_file_process(voter_file_id): voter_file = VoterFile.objects.get(id=voter_file_id) voter_file.process() election_notify_admin.delay(election_id=voter_file.election.id, subject='voter", "subject_template, the_vars) body = render_template_raw(None, body_template, the_vars) voter.send_message(subject, body) @shared_task", "\"\"\" voter_constraints_include are conditions on including voters voter_constraints_exclude are conditions", "@shared_task def single_voter_email(voter_uuid, subject_template, body_template, extra_vars={}): voter = Voter.objects.get(uuid=voter_uuid) the_vars", "subject_template, body_template, extra_vars={}): voter = Voter.objects.get(uuid=voter_uuid) the_vars = copy.copy(extra_vars) the_vars.update({'voter':", "voter}) subject = render_template_raw(None, subject_template, the_vars) body = render_template_raw(None, body_template,", "upload for election %s has been processed. %s voters have", "on including voters voter_constraints_exclude are conditions on excluding voters \"\"\"", "for election %s has been computed. -- Helios \"\"\" %", "election_notify_admin.delay(election_id=voter_file.election.id, subject='voter file processed', body=\"\"\" Your voter file upload for", "extra_vars={}): voter = Voter.objects.get(uuid=voter_uuid) the_vars = copy.copy(extra_vars) the_vars.update({'voter': voter}) subject", "user.update_status(status_update_message) else: logger = get_logger(cast_vote_verify_and_store.__name__) logger.error(\"Failed to verify and store", "the_vars.update({'voter': voter}) notification = render_template_raw(None, notification_template, the_vars) voter.send_notification(notification) @shared_task def", "voter file upload for election %s has been processed. %s", "has decrypted its portion of the tally for election %s.", "has been processed. %s voters have been created. -- Helios", "CastVote.objects.get(id=cast_vote_id) result = cast_vote.verify_and_store() voter = cast_vote.voter election = voter.election", "voter=voter, cast_vote=cast_vote) if status_update_message and user.can_update_status(): user.update_status(status_update_message) else: logger =", "been created. -- Helios \"\"\" % (voter_file.election.name, voter_file.num_voters)) @shared_task def", "been processed. %s voters have been created. -- Helios \"\"\"", "import shared_task from celery.utils.log import get_logger import signals from models", "Election.objects.get(id=election_id) election.helios_trustee_decrypt() election_notify_admin.delay(election_id=election_id, subject='Helios Decrypt', body=\"\"\" Helios has decrypted its", "cast_vote.voter election = voter.election user = voter.get_user() if result: #", "and user.can_update_status(): user.update_status(status_update_message) else: logger = get_logger(cast_vote_verify_and_store.__name__) logger.error(\"Failed to verify", "= Election.objects.get(id=election_id) election.compute_tally() election_notify_admin.delay(election_id=election_id, subject=\"encrypted tally computed\", body=\"\"\" The encrypted", "render_template_raw(None, notification_template, the_vars) voter.send_notification(notification) @shared_task def election_compute_tally(election_id): election = Election.objects.get(id=election_id)", "send the signal signals.vote_cast.send(sender=election, election=election, user=user, voter=voter, cast_vote=cast_vote) if status_update_message", "\"\"\" % election.name) if election.has_helios_trustee(): tally_helios_decrypt.delay(election_id=election.id) @shared_task def tally_helios_decrypt(election_id): election", "from celery.utils.log import get_logger import signals from models import CastVote,", "%s. -- Helios \"\"\" % election.name) @shared_task def voter_file_process(voter_file_id): voter_file", "excluding voters \"\"\" election = Election.objects.get(id=election_id) # select the right", "@shared_task def election_notify_admin(election_id, subject, body): election = Election.objects.get(id=election_id) election.admin.send_message(subject, body)", "get_logger import signals from models import CastVote, Election, Voter, VoterFile", "to verify and store %d\" % cast_vote_id) @shared_task def voters_email(election_id,", "= copy.copy(extra_vars) the_vars.update({'voter': voter}) subject = render_template_raw(None, subject_template, the_vars) body", "election_compute_tally(election_id): election = Election.objects.get(id=election_id) election.compute_tally() election_notify_admin.delay(election_id=election_id, subject=\"encrypted tally computed\", body=\"\"\"", "The encrypted tally for election %s has been computed. --", "election.name) if election.has_helios_trustee(): tally_helios_decrypt.delay(election_id=election.id) @shared_task def tally_helios_decrypt(election_id): election = Election.objects.get(id=election_id)", "tally_helios_decrypt(election_id): election = Election.objects.get(id=election_id) election.helios_trustee_decrypt() election_notify_admin.delay(election_id=election_id, subject='Helios Decrypt', body=\"\"\" Helios", "tally for election %s has been computed. -- Helios \"\"\"", "the tally for election %s. -- Helios \"\"\" % election.name)", "voters voter_constraints_exclude are conditions on excluding voters \"\"\" election =", "voter_constraints_include are conditions on including voters voter_constraints_exclude are conditions on", "= Election.objects.get(id=election_id) # select the right list of voters voters", "subject='Helios Decrypt', body=\"\"\" Helios has decrypted its portion of the", "view_utils import render_template_raw @shared_task def cast_vote_verify_and_store(cast_vote_id, status_update_message=None, **kwargs): cast_vote =", "extra_vars={}, voter_constraints_include=None, voter_constraints_exclude=None): \"\"\" voter_constraints_include are conditions on including voters", "user = voter.get_user() if result: # send the signal signals.vote_cast.send(sender=election,", "@shared_task def voter_file_process(voter_file_id): voter_file = VoterFile.objects.get(id=voter_file_id) voter_file.process() election_notify_admin.delay(election_id=voter_file.election.id, subject='voter file", "Your voter file upload for election %s has been processed.", "render_template_raw(None, subject_template, the_vars) body = render_template_raw(None, body_template, the_vars) voter.send_message(subject, body)", "election = Election.objects.get(id=election_id) # select the right list of voters", "# select the right list of voters voters = election.voter_set.all()", "election %s has been computed. -- Helios \"\"\" % election.name)", "notification = render_template_raw(None, notification_template, the_vars) voter.send_notification(notification) @shared_task def election_compute_tally(election_id): election", "voter = Voter.objects.get(uuid=voter_uuid) the_vars = copy.copy(extra_vars) the_vars.update({'voter': voter}) notification =", "tasks for Helios 2010-08-01 <EMAIL> \"\"\" import copy from celery", "are conditions on including voters voter_constraints_exclude are conditions on excluding", "voters_email(election_id, subject_template, body_template, extra_vars={}, voter_constraints_include=None, voter_constraints_exclude=None): \"\"\" voter_constraints_include are conditions", "@shared_task def voters_notify(election_id, notification_template, extra_vars={}): election = Election.objects.get(id=election_id) for voter", "@shared_task def tally_helios_decrypt(election_id): election = Election.objects.get(id=election_id) election.helios_trustee_decrypt() election_notify_admin.delay(election_id=election_id, subject='Helios Decrypt',", "its portion of the tally for election %s. -- Helios", "<EMAIL> \"\"\" import copy from celery import shared_task from celery.utils.log", "import get_logger import signals from models import CastVote, Election, Voter,", "signals.vote_cast.send(sender=election, election=election, user=user, voter=voter, cast_vote=cast_vote) if status_update_message and user.can_update_status(): user.update_status(status_update_message)", "import copy from celery import shared_task from celery.utils.log import get_logger", "voter_constraints_exclude=None): \"\"\" voter_constraints_include are conditions on including voters voter_constraints_exclude are", "signals from models import CastVote, Election, Voter, VoterFile from view_utils", "= election.voter_set.all() if voter_constraints_include: voters = voters.filter(**voter_constraints_include) if voter_constraints_exclude: voters", "Helios 2010-08-01 <EMAIL> \"\"\" import copy from celery import shared_task", "subject_template, body_template, extra_vars) @shared_task def voters_notify(election_id, notification_template, extra_vars={}): election =", "from models import CastVote, Election, Voter, VoterFile from view_utils import", "election.voter_set.all(): single_voter_notify.delay(voter.uuid, notification_template, extra_vars) @shared_task def single_voter_email(voter_uuid, subject_template, body_template, extra_vars={}):", "def tally_helios_decrypt(election_id): election = Election.objects.get(id=election_id) election.helios_trustee_decrypt() election_notify_admin.delay(election_id=election_id, subject='Helios Decrypt', body=\"\"\"", "get_logger(cast_vote_verify_and_store.__name__) logger.error(\"Failed to verify and store %d\" % cast_vote_id) @shared_task", "in election.voter_set.all(): single_voter_notify.delay(voter.uuid, notification_template, extra_vars) @shared_task def single_voter_email(voter_uuid, subject_template, body_template,", "def single_voter_notify(voter_uuid, notification_template, extra_vars={}): voter = Voter.objects.get(uuid=voter_uuid) the_vars = copy.copy(extra_vars)", "for election %s. -- Helios \"\"\" % election.name) @shared_task def", "notification_template, extra_vars) @shared_task def single_voter_email(voter_uuid, subject_template, body_template, extra_vars={}): voter =", "queued tasks for Helios 2010-08-01 <EMAIL> \"\"\" import copy from", "= voters.filter(**voter_constraints_include) if voter_constraints_exclude: voters = voters.exclude(**voter_constraints_exclude) for voter in", "Election.objects.get(id=election_id) for voter in election.voter_set.all(): single_voter_notify.delay(voter.uuid, notification_template, extra_vars) @shared_task def", "file upload for election %s has been processed. %s voters", "election_notify_admin.delay(election_id=election_id, subject=\"encrypted tally computed\", body=\"\"\" The encrypted tally for election", "for voter in voters: single_voter_email.delay(voter.uuid, subject_template, body_template, extra_vars) @shared_task def", "body=\"\"\" The encrypted tally for election %s has been computed.", "election = Election.objects.get(id=election_id) for voter in election.voter_set.all(): single_voter_notify.delay(voter.uuid, notification_template, extra_vars)", "voter = Voter.objects.get(uuid=voter_uuid) the_vars = copy.copy(extra_vars) the_vars.update({'voter': voter}) subject =", "created. -- Helios \"\"\" % (voter_file.election.name, voter_file.num_voters)) @shared_task def election_notify_admin(election_id,", "single_voter_email.delay(voter.uuid, subject_template, body_template, extra_vars) @shared_task def voters_notify(election_id, notification_template, extra_vars={}): election", "= render_template_raw(None, notification_template, the_vars) voter.send_notification(notification) @shared_task def election_compute_tally(election_id): election =", "%s has been computed. -- Helios \"\"\" % election.name) if", "cast_vote = CastVote.objects.get(id=cast_vote_id) result = cast_vote.verify_and_store() voter = cast_vote.voter election", "logger = get_logger(cast_vote_verify_and_store.__name__) logger.error(\"Failed to verify and store %d\" %", "subject = render_template_raw(None, subject_template, the_vars) body = render_template_raw(None, body_template, the_vars)", "select the right list of voters voters = election.voter_set.all() if", "= VoterFile.objects.get(id=voter_file_id) voter_file.process() election_notify_admin.delay(election_id=voter_file.election.id, subject='voter file processed', body=\"\"\" Your voter", "portion of the tally for election %s. -- Helios \"\"\"", "shared_task from celery.utils.log import get_logger import signals from models import", "are conditions on excluding voters \"\"\" election = Election.objects.get(id=election_id) #", "Helios \"\"\" % election.name) @shared_task def voter_file_process(voter_file_id): voter_file = VoterFile.objects.get(id=voter_file_id)", "processed', body=\"\"\" Your voter file upload for election %s has", "body=\"\"\" Helios has decrypted its portion of the tally for", "cast_vote_id) @shared_task def voters_email(election_id, subject_template, body_template, extra_vars={}, voter_constraints_include=None, voter_constraints_exclude=None): \"\"\"", "Celery queued tasks for Helios 2010-08-01 <EMAIL> \"\"\" import copy", "if status_update_message and user.can_update_status(): user.update_status(status_update_message) else: logger = get_logger(cast_vote_verify_and_store.__name__) logger.error(\"Failed", "notification_template, extra_vars={}): election = Election.objects.get(id=election_id) for voter in election.voter_set.all(): single_voter_notify.delay(voter.uuid,", "logger.error(\"Failed to verify and store %d\" % cast_vote_id) @shared_task def", "cast_vote.verify_and_store() voter = cast_vote.voter election = voter.election user = voter.get_user()", "Helios \"\"\" % (voter_file.election.name, voter_file.num_voters)) @shared_task def election_notify_admin(election_id, subject, body):", "def cast_vote_verify_and_store(cast_vote_id, status_update_message=None, **kwargs): cast_vote = CastVote.objects.get(id=cast_vote_id) result = cast_vote.verify_and_store()", "@shared_task def voters_email(election_id, subject_template, body_template, extra_vars={}, voter_constraints_include=None, voter_constraints_exclude=None): \"\"\" voter_constraints_include", "on excluding voters \"\"\" election = Election.objects.get(id=election_id) # select the", "# send the signal signals.vote_cast.send(sender=election, election=election, user=user, voter=voter, cast_vote=cast_vote) if", "Voter.objects.get(uuid=voter_uuid) the_vars = copy.copy(extra_vars) the_vars.update({'voter': voter}) notification = render_template_raw(None, notification_template,", "VoterFile from view_utils import render_template_raw @shared_task def cast_vote_verify_and_store(cast_vote_id, status_update_message=None, **kwargs):", "election=election, user=user, voter=voter, cast_vote=cast_vote) if status_update_message and user.can_update_status(): user.update_status(status_update_message) else:", "Helios has decrypted its portion of the tally for election", "<filename>helios/tasks.py \"\"\" Celery queued tasks for Helios 2010-08-01 <EMAIL> \"\"\"", "models import CastVote, Election, Voter, VoterFile from view_utils import render_template_raw", "voter_constraints_include: voters = voters.filter(**voter_constraints_include) if voter_constraints_exclude: voters = voters.exclude(**voter_constraints_exclude) for", "cast_vote=cast_vote) if status_update_message and user.can_update_status(): user.update_status(status_update_message) else: logger = get_logger(cast_vote_verify_and_store.__name__)", "including voters voter_constraints_exclude are conditions on excluding voters \"\"\" election", "voter.election user = voter.get_user() if result: # send the signal", "the_vars) voter.send_message(subject, body) @shared_task def single_voter_notify(voter_uuid, notification_template, extra_vars={}): voter =", "tally for election %s. -- Helios \"\"\" % election.name) @shared_task", "body_template, extra_vars) @shared_task def voters_notify(election_id, notification_template, extra_vars={}): election = Election.objects.get(id=election_id)", "voter in voters: single_voter_email.delay(voter.uuid, subject_template, body_template, extra_vars) @shared_task def voters_notify(election_id,", "signal signals.vote_cast.send(sender=election, election=election, user=user, voter=voter, cast_vote=cast_vote) if status_update_message and user.can_update_status():", "import signals from models import CastVote, Election, Voter, VoterFile from", "computed\", body=\"\"\" The encrypted tally for election %s has been", "election.voter_set.all() if voter_constraints_include: voters = voters.filter(**voter_constraints_include) if voter_constraints_exclude: voters =", "= Election.objects.get(id=election_id) for voter in election.voter_set.all(): single_voter_notify.delay(voter.uuid, notification_template, extra_vars) @shared_task", "body=\"\"\" Your voter file upload for election %s has been", "voter_constraints_include=None, voter_constraints_exclude=None): \"\"\" voter_constraints_include are conditions on including voters voter_constraints_exclude", "Election, Voter, VoterFile from view_utils import render_template_raw @shared_task def cast_vote_verify_and_store(cast_vote_id,", "% (voter_file.election.name, voter_file.num_voters)) @shared_task def election_notify_admin(election_id, subject, body): election =", "voters have been created. -- Helios \"\"\" % (voter_file.election.name, voter_file.num_voters))", "voter.get_user() if result: # send the signal signals.vote_cast.send(sender=election, election=election, user=user," ]
[ "yield mock_datastore @pytest.fixture(name=\"datastore_key\") def fixture_datastore_key(): \"\"\" Datastore Key Mock \"\"\"", "import unittest.mock as mock import pytest import virtual_dealer.api @pytest.fixture(name=\"client\") def", "for store::Store \"\"\" with mock.patch(\"virtual_dealer.api.store\", autospec=True) as mock_store: yield mock_store", "for testing flask APIs \"\"\" return virtual_dealer.api.app.test_client() @pytest.fixture(name=\"store\") def fixture_store():", "import pytest import virtual_dealer.api @pytest.fixture(name=\"client\") def fixture_client(): \"\"\" Client test", "\"\"\" Mock for store::Store \"\"\" with mock.patch(\"virtual_dealer.api.store\", autospec=True) as mock_store:", "mock_datastore: yield mock_datastore @pytest.fixture(name=\"datastore_key\") def fixture_datastore_key(): \"\"\" Datastore Key Mock", "return mock.MagicMock() @pytest.fixture(name=\"datastore_entity\") def fixture_datastore_entity(): \"\"\" Datastore Entity Mock \"\"\"", "<reponame>AlanRosenthal/virtual-dealer<gh_stars>1-10 \"\"\" pytest fixtures \"\"\" import unittest.mock as mock import", "@pytest.fixture(name=\"datastore\") def fixture_datastore(): \"\"\" Client test fixture for testing Google's", "autospec=True) as mock_datastore: yield mock_datastore @pytest.fixture(name=\"datastore_key\") def fixture_datastore_key(): \"\"\" Datastore", "autospec=True) as mock_store: yield mock_store @pytest.fixture(name=\"datastore\") def fixture_datastore(): \"\"\" Client", "@pytest.fixture(name=\"datastore_entity\") def fixture_datastore_entity(): \"\"\" Datastore Entity Mock \"\"\" return mock.MagicMock()", "unittest.mock as mock import pytest import virtual_dealer.api @pytest.fixture(name=\"client\") def fixture_client():", "pytest import virtual_dealer.api @pytest.fixture(name=\"client\") def fixture_client(): \"\"\" Client test fixture", "import virtual_dealer.api @pytest.fixture(name=\"client\") def fixture_client(): \"\"\" Client test fixture for", "mock_store: yield mock_store @pytest.fixture(name=\"datastore\") def fixture_datastore(): \"\"\" Client test fixture", "as mock_store: yield mock_store @pytest.fixture(name=\"datastore\") def fixture_datastore(): \"\"\" Client test", "mock.patch(\"virtual_dealer.store.datastore\", autospec=True) as mock_datastore: yield mock_datastore @pytest.fixture(name=\"datastore_key\") def fixture_datastore_key(): \"\"\"", "Datastore Key Mock \"\"\" return mock.MagicMock() @pytest.fixture(name=\"datastore_entity\") def fixture_datastore_entity(): \"\"\"", "\"\"\" return mock.MagicMock() @pytest.fixture(name=\"datastore_entity\") def fixture_datastore_entity(): \"\"\" Datastore Entity Mock", "fixture_client(): \"\"\" Client test fixture for testing flask APIs \"\"\"", "@pytest.fixture(name=\"store\") def fixture_store(): \"\"\" Mock for store::Store \"\"\" with mock.patch(\"virtual_dealer.api.store\",", "APIs \"\"\" return virtual_dealer.api.app.test_client() @pytest.fixture(name=\"store\") def fixture_store(): \"\"\" Mock for", "\"\"\" pytest fixtures \"\"\" import unittest.mock as mock import pytest", "def fixture_store(): \"\"\" Mock for store::Store \"\"\" with mock.patch(\"virtual_dealer.api.store\", autospec=True)", "mock.patch(\"virtual_dealer.api.store\", autospec=True) as mock_store: yield mock_store @pytest.fixture(name=\"datastore\") def fixture_datastore(): \"\"\"", "mock_store @pytest.fixture(name=\"datastore\") def fixture_datastore(): \"\"\" Client test fixture for testing", "def fixture_client(): \"\"\" Client test fixture for testing flask APIs", "\"\"\" import unittest.mock as mock import pytest import virtual_dealer.api @pytest.fixture(name=\"client\")", "store::Store \"\"\" with mock.patch(\"virtual_dealer.api.store\", autospec=True) as mock_store: yield mock_store @pytest.fixture(name=\"datastore\")", "as mock_datastore: yield mock_datastore @pytest.fixture(name=\"datastore_key\") def fixture_datastore_key(): \"\"\" Datastore Key", "as mock import pytest import virtual_dealer.api @pytest.fixture(name=\"client\") def fixture_client(): \"\"\"", "with mock.patch(\"virtual_dealer.store.datastore\", autospec=True) as mock_datastore: yield mock_datastore @pytest.fixture(name=\"datastore_key\") def fixture_datastore_key():", "datastore APIs \"\"\" with mock.patch(\"virtual_dealer.store.datastore\", autospec=True) as mock_datastore: yield mock_datastore", "Key Mock \"\"\" return mock.MagicMock() @pytest.fixture(name=\"datastore_entity\") def fixture_datastore_entity(): \"\"\" Datastore", "\"\"\" Client test fixture for testing Google's datastore APIs \"\"\"", "for testing Google's datastore APIs \"\"\" with mock.patch(\"virtual_dealer.store.datastore\", autospec=True) as", "def fixture_datastore_key(): \"\"\" Datastore Key Mock \"\"\" return mock.MagicMock() @pytest.fixture(name=\"datastore_entity\")", "\"\"\" return virtual_dealer.api.app.test_client() @pytest.fixture(name=\"store\") def fixture_store(): \"\"\" Mock for store::Store", "Mock \"\"\" return mock.MagicMock() @pytest.fixture(name=\"datastore_entity\") def fixture_datastore_entity(): \"\"\" Datastore Entity", "yield mock_store @pytest.fixture(name=\"datastore\") def fixture_datastore(): \"\"\" Client test fixture for", "fixture_datastore(): \"\"\" Client test fixture for testing Google's datastore APIs", "\"\"\" with mock.patch(\"virtual_dealer.api.store\", autospec=True) as mock_store: yield mock_store @pytest.fixture(name=\"datastore\") def", "APIs \"\"\" with mock.patch(\"virtual_dealer.store.datastore\", autospec=True) as mock_datastore: yield mock_datastore @pytest.fixture(name=\"datastore_key\")", "mock import pytest import virtual_dealer.api @pytest.fixture(name=\"client\") def fixture_client(): \"\"\" Client", "def fixture_datastore(): \"\"\" Client test fixture for testing Google's datastore", "Google's datastore APIs \"\"\" with mock.patch(\"virtual_dealer.store.datastore\", autospec=True) as mock_datastore: yield", "Client test fixture for testing flask APIs \"\"\" return virtual_dealer.api.app.test_client()", "with mock.patch(\"virtual_dealer.api.store\", autospec=True) as mock_store: yield mock_store @pytest.fixture(name=\"datastore\") def fixture_datastore():", "\"\"\" with mock.patch(\"virtual_dealer.store.datastore\", autospec=True) as mock_datastore: yield mock_datastore @pytest.fixture(name=\"datastore_key\") def", "virtual_dealer.api.app.test_client() @pytest.fixture(name=\"store\") def fixture_store(): \"\"\" Mock for store::Store \"\"\" with", "\"\"\" Datastore Key Mock \"\"\" return mock.MagicMock() @pytest.fixture(name=\"datastore_entity\") def fixture_datastore_entity():", "fixture for testing flask APIs \"\"\" return virtual_dealer.api.app.test_client() @pytest.fixture(name=\"store\") def", "Mock for store::Store \"\"\" with mock.patch(\"virtual_dealer.api.store\", autospec=True) as mock_store: yield", "fixture for testing Google's datastore APIs \"\"\" with mock.patch(\"virtual_dealer.store.datastore\", autospec=True)", "\"\"\" Client test fixture for testing flask APIs \"\"\" return", "fixtures \"\"\" import unittest.mock as mock import pytest import virtual_dealer.api", "testing flask APIs \"\"\" return virtual_dealer.api.app.test_client() @pytest.fixture(name=\"store\") def fixture_store(): \"\"\"", "pytest fixtures \"\"\" import unittest.mock as mock import pytest import", "test fixture for testing Google's datastore APIs \"\"\" with mock.patch(\"virtual_dealer.store.datastore\",", "flask APIs \"\"\" return virtual_dealer.api.app.test_client() @pytest.fixture(name=\"store\") def fixture_store(): \"\"\" Mock", "test fixture for testing flask APIs \"\"\" return virtual_dealer.api.app.test_client() @pytest.fixture(name=\"store\")", "@pytest.fixture(name=\"client\") def fixture_client(): \"\"\" Client test fixture for testing flask", "mock_datastore @pytest.fixture(name=\"datastore_key\") def fixture_datastore_key(): \"\"\" Datastore Key Mock \"\"\" return", "fixture_datastore_key(): \"\"\" Datastore Key Mock \"\"\" return mock.MagicMock() @pytest.fixture(name=\"datastore_entity\") def", "fixture_store(): \"\"\" Mock for store::Store \"\"\" with mock.patch(\"virtual_dealer.api.store\", autospec=True) as", "testing Google's datastore APIs \"\"\" with mock.patch(\"virtual_dealer.store.datastore\", autospec=True) as mock_datastore:", "Client test fixture for testing Google's datastore APIs \"\"\" with", "return virtual_dealer.api.app.test_client() @pytest.fixture(name=\"store\") def fixture_store(): \"\"\" Mock for store::Store \"\"\"", "virtual_dealer.api @pytest.fixture(name=\"client\") def fixture_client(): \"\"\" Client test fixture for testing", "mock.MagicMock() @pytest.fixture(name=\"datastore_entity\") def fixture_datastore_entity(): \"\"\" Datastore Entity Mock \"\"\" return", "@pytest.fixture(name=\"datastore_key\") def fixture_datastore_key(): \"\"\" Datastore Key Mock \"\"\" return mock.MagicMock()" ]
[ "fixturegenerators from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField, \\ FixtureItemField,", "properties={} ) ] ), \"district_name\": FieldList( field_list=[ FixtureItemField( field_value=\"Delhi_in_HIN\", properties={\"lang\":", "= \"district\" self.data_type = FixtureDataType( domain=self.domain, tag=self.tag, name=\"Districts\", fields=[ FixtureTypeField(", "<fixture id=\"item-list:district\" user_id=\"%s\"> <district_list> <district> <state_name>Delhi_state</state_name> <district_name lang=\"hin\">Delhi_in_HIN</district_name> <district_name lang=\"eng\">Delhi_in_ENG</district_name>", ") ], item_attributes=[], ) self.data_type.save() self.data_item = FixtureDataItem( domain=self.domain, data_type_id=self.data_type.get_id,", ") ] ) }, item_attributes={}, ) self.data_item.save() self.user = CommCareUser.create(self.domain,", "], item_attributes=[], ) self.data_type.save() self.data_item = FixtureDataItem( domain=self.domain, data_type_id=self.data_type.get_id, fields=", "tearDown(self): self.data_type.delete() self.data_item.delete() self.user.delete() self.fixture_ownership.delete() def test_xml(self): check_xml_line_by_line(self, \"\"\" <district>", "from corehq.apps.fixtures.exceptions import FixtureVersionError from corehq.apps.users.models import CommCareUser from django.test", "<district_name lang=\"hin\">Delhi_in_HIN</district_name> <district_name lang=\"eng\">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district> \"\"\", ElementTree.tostring(self.data_item.to_xml())) def test_ownership(self):", "self.data_item = FixtureDataItem( domain=self.domain, data_type_id=self.data_type.get_id, fields= { \"state_name\": FieldList( field_list=[", "from corehq.apps.fixtures.views import update_tables from corehq.apps.fixtures.exceptions import FixtureVersionError from corehq.apps.users.models", "\"district_id\": FieldList( field_list=[ FixtureItemField( field_value=\"Delhi_id\", properties={} ) ] ) },", "'qwerty' self.tag = \"district\" self.data_type = FixtureDataType( domain=self.domain, tag=self.tag, name=\"Districts\",", "\"\"\" <district> <state_name>Delhi_state</state_name> <district_name lang=\"hin\">Delhi_in_HIN</district_name> <district_name lang=\"eng\">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district> \"\"\",", "lang=\"eng\">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district> </district_list> </fixture> \"\"\" % self.user.user_id, ElementTree.tostring(fixture)) self.data_item.remove_user(self.user)", "test_xml(self): check_xml_line_by_line(self, \"\"\" <district> <state_name>Delhi_state</state_name> <district_name lang=\"hin\">Delhi_in_HIN</district_name> <district_name lang=\"eng\">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id>", "from django.test import TestCase class FixtureDataTest(TestCase): def setUp(self): self.domain =", "self.data_item.get_all_users(wrap=False)) fixture, = fixturegenerators.item_lists(self.user, V2) check_xml_line_by_line(self, \"\"\" <fixture id=\"item-list:district\" user_id=\"%s\">", "FixtureItemField( field_value=\"Delhi_in_ENG\", properties={\"lang\": \"eng\"} ) ] ), \"district_id\": FieldList( field_list=[", "self.data_item.remove_user(self.user) self.assertItemsEqual([], self.data_item.get_all_users()) self.fixture_ownership = self.data_item.add_user(self.user) self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False)) def test_get_indexed_items(self):", "wrap=False)) self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False)) fixture, = fixturegenerators.item_lists(self.user, V2) check_xml_line_by_line(self, \"\"\" <fixture", "% self.user.user_id, ElementTree.tostring(fixture)) self.data_item.remove_user(self.user) self.assertItemsEqual([], self.data_item.get_all_users()) self.fixture_ownership = self.data_item.add_user(self.user) self.assertItemsEqual([self.user.get_id],", "= 'qwerty' self.tag = \"district\" self.data_type = FixtureDataType( domain=self.domain, tag=self.tag,", "properties=[] ) ], item_attributes=[], ) self.data_type.save() self.data_item = FixtureDataItem( domain=self.domain,", "= CommCareUser.create(self.domain, 'to_delete', '***') self.fixture_ownership = FixtureOwnership( domain=self.domain, owner_id=self.user.get_id, owner_type='user',", "import check_xml_line_by_line from casexml.apps.case.xml import V2 from corehq.apps.fixtures import fixturegenerators", "] ) }, item_attributes={}, ) self.data_item.save() self.user = CommCareUser.create(self.domain, 'to_delete',", "self.fixture_ownership = self.data_item.add_user(self.user) self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False)) def test_get_indexed_items(self): with self.assertRaises(FixtureVersionError): fixtures", "with self.assertRaises(FixtureVersionError): fixtures = FixtureDataItem.get_indexed_items(self.domain, self.tag, 'state_name') delhi_id = fixtures['Delhi_state']['district_id']", "from corehq.apps.fixtures import fixturegenerators from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType, FixtureOwnership,", "}, item_attributes={}, ) self.data_item.save() self.user = CommCareUser.create(self.domain, 'to_delete', '***') self.fixture_ownership", "properties={\"lang\": \"eng\"} ) ] ), \"district_id\": FieldList( field_list=[ FixtureItemField( field_value=\"Delhi_id\",", ") self.data_item.save() self.user = CommCareUser.create(self.domain, 'to_delete', '***') self.fixture_ownership = FixtureOwnership(", "'***') self.fixture_ownership = FixtureOwnership( domain=self.domain, owner_id=self.user.get_id, owner_type='user', data_item_id=self.data_item.get_id ) self.fixture_ownership.save()", "TestCase class FixtureDataTest(TestCase): def setUp(self): self.domain = 'qwerty' self.tag =", "FixtureDataItem.by_user(self.user, wrap=False)) self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False)) fixture, = fixturegenerators.item_lists(self.user, V2) check_xml_line_by_line(self, \"\"\"", "fixture, = fixturegenerators.item_lists(self.user, V2) check_xml_line_by_line(self, \"\"\" <fixture id=\"item-list:district\" user_id=\"%s\"> <district_list>", "field_value=\"Delhi_id\", properties={} ) ] ) }, item_attributes={}, ) self.data_item.save() self.user", "FixtureItemField( field_value=\"Delhi_state\", properties={} ) ] ), \"district_name\": FieldList( field_list=[ FixtureItemField(", "domain=self.domain, data_type_id=self.data_type.get_id, fields= { \"state_name\": FieldList( field_list=[ FixtureItemField( field_value=\"Delhi_state\", properties={}", "V2) check_xml_line_by_line(self, \"\"\" <fixture id=\"item-list:district\" user_id=\"%s\"> <district_list> <district> <state_name>Delhi_state</state_name> <district_name", "<district> <state_name>Delhi_state</state_name> <district_name lang=\"hin\">Delhi_in_HIN</district_name> <district_name lang=\"eng\">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district> \"\"\", ElementTree.tostring(self.data_item.to_xml()))", "corehq.apps.fixtures import fixturegenerators from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField,", "properties=[] ), FixtureTypeField( field_name=\"district_name\", properties=[\"lang\"] ), FixtureTypeField( field_name=\"district_id\", properties=[] )", "FixtureDataType( domain=self.domain, tag=self.tag, name=\"Districts\", fields=[ FixtureTypeField( field_name=\"state_name\", properties=[] ), FixtureTypeField(", "setUp(self): self.domain = 'qwerty' self.tag = \"district\" self.data_type = FixtureDataType(", "<district_name lang=\"hin\">Delhi_in_HIN</district_name> <district_name lang=\"eng\">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district> </district_list> </fixture> \"\"\" %", "<district_id>Delhi_id</district_id> </district> </district_list> </fixture> \"\"\" % self.user.user_id, ElementTree.tostring(fixture)) self.data_item.remove_user(self.user) self.assertItemsEqual([],", "FixtureDataItem( domain=self.domain, data_type_id=self.data_type.get_id, fields= { \"state_name\": FieldList( field_list=[ FixtureItemField( field_value=\"Delhi_state\",", "from xml.etree import ElementTree from casexml.apps.case.tests.util import check_xml_line_by_line from casexml.apps.case.xml", "field_name=\"state_name\", properties=[] ), FixtureTypeField( field_name=\"district_name\", properties=[\"lang\"] ), FixtureTypeField( field_name=\"district_id\", properties=[]", "properties={\"lang\": \"hin\"} ), FixtureItemField( field_value=\"Delhi_in_ENG\", properties={\"lang\": \"eng\"} ) ] ),", "tag=self.tag, name=\"Districts\", fields=[ FixtureTypeField( field_name=\"state_name\", properties=[] ), FixtureTypeField( field_name=\"district_name\", properties=[\"lang\"]", "field_list=[ FixtureItemField( field_value=\"Delhi_id\", properties={} ) ] ) }, item_attributes={}, )", "self.data_type.save() self.data_item = FixtureDataItem( domain=self.domain, data_type_id=self.data_type.get_id, fields= { \"state_name\": FieldList(", "field_name=\"district_id\", properties=[] ) ], item_attributes=[], ) self.data_type.save() self.data_item = FixtureDataItem(", "<state_name>Delhi_state</state_name> <district_name lang=\"hin\">Delhi_in_HIN</district_name> <district_name lang=\"eng\">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district> \"\"\", ElementTree.tostring(self.data_item.to_xml())) def", "ElementTree.tostring(self.data_item.to_xml())) def test_ownership(self): self.assertItemsEqual([self.data_item.get_id], FixtureDataItem.by_user(self.user, wrap=False)) self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False)) fixture, =", "import fixturegenerators from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField, \\", "\\ FixtureItemField, FieldList from corehq.apps.fixtures.views import update_tables from corehq.apps.fixtures.exceptions import", "field_value=\"Delhi_in_ENG\", properties={\"lang\": \"eng\"} ) ] ), \"district_id\": FieldList( field_list=[ FixtureItemField(", "self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False)) def test_get_indexed_items(self): with self.assertRaises(FixtureVersionError): fixtures = FixtureDataItem.get_indexed_items(self.domain, self.tag,", "self.user = CommCareUser.create(self.domain, 'to_delete', '***') self.fixture_ownership = FixtureOwnership( domain=self.domain, owner_id=self.user.get_id,", "self.data_item.get_all_users(wrap=False)) def test_get_indexed_items(self): with self.assertRaises(FixtureVersionError): fixtures = FixtureDataItem.get_indexed_items(self.domain, self.tag, 'state_name')", "FixtureItemField( field_value=\"Delhi_in_HIN\", properties={\"lang\": \"hin\"} ), FixtureItemField( field_value=\"Delhi_in_ENG\", properties={\"lang\": \"eng\"} )", "FixtureDataType, FixtureOwnership, FixtureTypeField, \\ FixtureItemField, FieldList from corehq.apps.fixtures.views import update_tables", "field_value=\"Delhi_in_HIN\", properties={\"lang\": \"hin\"} ), FixtureItemField( field_value=\"Delhi_in_ENG\", properties={\"lang\": \"eng\"} ) ]", "import CommCareUser from django.test import TestCase class FixtureDataTest(TestCase): def setUp(self):", "id=\"item-list:district\" user_id=\"%s\"> <district_list> <district> <state_name>Delhi_state</state_name> <district_name lang=\"hin\">Delhi_in_HIN</district_name> <district_name lang=\"eng\">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id>", "self.data_type = FixtureDataType( domain=self.domain, tag=self.tag, name=\"Districts\", fields=[ FixtureTypeField( field_name=\"state_name\", properties=[]", "<district_list> <district> <state_name>Delhi_state</state_name> <district_name lang=\"hin\">Delhi_in_HIN</district_name> <district_name lang=\"eng\">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district> </district_list>", "self.user.delete() self.fixture_ownership.delete() def test_xml(self): check_xml_line_by_line(self, \"\"\" <district> <state_name>Delhi_state</state_name> <district_name lang=\"hin\">Delhi_in_HIN</district_name>", "), FixtureTypeField( field_name=\"district_id\", properties=[] ) ], item_attributes=[], ) self.data_type.save() self.data_item", "def test_get_indexed_items(self): with self.assertRaises(FixtureVersionError): fixtures = FixtureDataItem.get_indexed_items(self.domain, self.tag, 'state_name') delhi_id", "'to_delete', '***') self.fixture_ownership = FixtureOwnership( domain=self.domain, owner_id=self.user.get_id, owner_type='user', data_item_id=self.data_item.get_id )", "CommCareUser from django.test import TestCase class FixtureDataTest(TestCase): def setUp(self): self.domain", "self.user.user_id, ElementTree.tostring(fixture)) self.data_item.remove_user(self.user) self.assertItemsEqual([], self.data_item.get_all_users()) self.fixture_ownership = self.data_item.add_user(self.user) self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False))", "domain=self.domain, owner_id=self.user.get_id, owner_type='user', data_item_id=self.data_item.get_id ) self.fixture_ownership.save() def tearDown(self): self.data_type.delete() self.data_item.delete()", "import TestCase class FixtureDataTest(TestCase): def setUp(self): self.domain = 'qwerty' self.tag", "corehq.apps.fixtures.exceptions import FixtureVersionError from corehq.apps.users.models import CommCareUser from django.test import", "= FixtureDataItem( domain=self.domain, data_type_id=self.data_type.get_id, fields= { \"state_name\": FieldList( field_list=[ FixtureItemField(", "self.domain = 'qwerty' self.tag = \"district\" self.data_type = FixtureDataType( domain=self.domain,", ") ] ), \"district_id\": FieldList( field_list=[ FixtureItemField( field_value=\"Delhi_id\", properties={} )", "ElementTree.tostring(fixture)) self.data_item.remove_user(self.user) self.assertItemsEqual([], self.data_item.get_all_users()) self.fixture_ownership = self.data_item.add_user(self.user) self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False)) def", "] ), \"district_id\": FieldList( field_list=[ FixtureItemField( field_value=\"Delhi_id\", properties={} ) ]", "check_xml_line_by_line(self, \"\"\" <district> <state_name>Delhi_state</state_name> <district_name lang=\"hin\">Delhi_in_HIN</district_name> <district_name lang=\"eng\">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district>", "self.tag = \"district\" self.data_type = FixtureDataType( domain=self.domain, tag=self.tag, name=\"Districts\", fields=[", "</district> </district_list> </fixture> \"\"\" % self.user.user_id, ElementTree.tostring(fixture)) self.data_item.remove_user(self.user) self.assertItemsEqual([], self.data_item.get_all_users())", "import V2 from corehq.apps.fixtures import fixturegenerators from corehq.apps.fixtures.models import FixtureDataItem,", "self.fixture_ownership = FixtureOwnership( domain=self.domain, owner_id=self.user.get_id, owner_type='user', data_item_id=self.data_item.get_id ) self.fixture_ownership.save() def", "update_tables from corehq.apps.fixtures.exceptions import FixtureVersionError from corehq.apps.users.models import CommCareUser from", "from corehq.apps.users.models import CommCareUser from django.test import TestCase class FixtureDataTest(TestCase):", "<district_name lang=\"eng\">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district> </district_list> </fixture> \"\"\" % self.user.user_id, ElementTree.tostring(fixture))", "), \"district_id\": FieldList( field_list=[ FixtureItemField( field_value=\"Delhi_id\", properties={} ) ] )", "field_name=\"district_name\", properties=[\"lang\"] ), FixtureTypeField( field_name=\"district_id\", properties=[] ) ], item_attributes=[], )", "field_list=[ FixtureItemField( field_value=\"Delhi_state\", properties={} ) ] ), \"district_name\": FieldList( field_list=[", "</fixture> \"\"\" % self.user.user_id, ElementTree.tostring(fixture)) self.data_item.remove_user(self.user) self.assertItemsEqual([], self.data_item.get_all_users()) self.fixture_ownership =", "from casexml.apps.case.xml import V2 from corehq.apps.fixtures import fixturegenerators from corehq.apps.fixtures.models", "from casexml.apps.case.tests.util import check_xml_line_by_line from casexml.apps.case.xml import V2 from corehq.apps.fixtures", "owner_type='user', data_item_id=self.data_item.get_id ) self.fixture_ownership.save() def tearDown(self): self.data_type.delete() self.data_item.delete() self.user.delete() self.fixture_ownership.delete()", "<state_name>Delhi_state</state_name> <district_name lang=\"hin\">Delhi_in_HIN</district_name> <district_name lang=\"eng\">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district> </district_list> </fixture> \"\"\"", "self.assertItemsEqual([], self.data_item.get_all_users()) self.fixture_ownership = self.data_item.add_user(self.user) self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False)) def test_get_indexed_items(self): with", "fields=[ FixtureTypeField( field_name=\"state_name\", properties=[] ), FixtureTypeField( field_name=\"district_name\", properties=[\"lang\"] ), FixtureTypeField(", "FieldList( field_list=[ FixtureItemField( field_value=\"Delhi_in_HIN\", properties={\"lang\": \"hin\"} ), FixtureItemField( field_value=\"Delhi_in_ENG\", properties={\"lang\":", "self.fixture_ownership.save() def tearDown(self): self.data_type.delete() self.data_item.delete() self.user.delete() self.fixture_ownership.delete() def test_xml(self): check_xml_line_by_line(self,", "<district_name lang=\"eng\">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district> \"\"\", ElementTree.tostring(self.data_item.to_xml())) def test_ownership(self): self.assertItemsEqual([self.data_item.get_id], FixtureDataItem.by_user(self.user,", "self.data_item.save() self.user = CommCareUser.create(self.domain, 'to_delete', '***') self.fixture_ownership = FixtureOwnership( domain=self.domain,", "check_xml_line_by_line from casexml.apps.case.xml import V2 from corehq.apps.fixtures import fixturegenerators from", "corehq.apps.users.models import CommCareUser from django.test import TestCase class FixtureDataTest(TestCase): def", "import ElementTree from casexml.apps.case.tests.util import check_xml_line_by_line from casexml.apps.case.xml import V2", "FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField, \\ FixtureItemField, FieldList from corehq.apps.fixtures.views import", "django.test import TestCase class FixtureDataTest(TestCase): def setUp(self): self.domain = 'qwerty'", "def tearDown(self): self.data_type.delete() self.data_item.delete() self.user.delete() self.fixture_ownership.delete() def test_xml(self): check_xml_line_by_line(self, \"\"\"", "fixtures = FixtureDataItem.get_indexed_items(self.domain, self.tag, 'state_name') delhi_id = fixtures['Delhi_state']['district_id'] self.assertEqual(delhi_id, 'Delhi_id')", "name=\"Districts\", fields=[ FixtureTypeField( field_name=\"state_name\", properties=[] ), FixtureTypeField( field_name=\"district_name\", properties=[\"lang\"] ),", "\"\"\" % self.user.user_id, ElementTree.tostring(fixture)) self.data_item.remove_user(self.user) self.assertItemsEqual([], self.data_item.get_all_users()) self.fixture_ownership = self.data_item.add_user(self.user)", "= self.data_item.add_user(self.user) self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False)) def test_get_indexed_items(self): with self.assertRaises(FixtureVersionError): fixtures =", "field_list=[ FixtureItemField( field_value=\"Delhi_in_HIN\", properties={\"lang\": \"hin\"} ), FixtureItemField( field_value=\"Delhi_in_ENG\", properties={\"lang\": \"eng\"}", "properties=[\"lang\"] ), FixtureTypeField( field_name=\"district_id\", properties=[] ) ], item_attributes=[], ) self.data_type.save()", "def test_ownership(self): self.assertItemsEqual([self.data_item.get_id], FixtureDataItem.by_user(self.user, wrap=False)) self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False)) fixture, = fixturegenerators.item_lists(self.user,", "fields= { \"state_name\": FieldList( field_list=[ FixtureItemField( field_value=\"Delhi_state\", properties={} ) ]", "FixtureTypeField( field_name=\"state_name\", properties=[] ), FixtureTypeField( field_name=\"district_name\", properties=[\"lang\"] ), FixtureTypeField( field_name=\"district_id\",", "fixturegenerators.item_lists(self.user, V2) check_xml_line_by_line(self, \"\"\" <fixture id=\"item-list:district\" user_id=\"%s\"> <district_list> <district> <state_name>Delhi_state</state_name>", "check_xml_line_by_line(self, \"\"\" <fixture id=\"item-list:district\" user_id=\"%s\"> <district_list> <district> <state_name>Delhi_state</state_name> <district_name lang=\"hin\">Delhi_in_HIN</district_name>", "self.fixture_ownership.delete() def test_xml(self): check_xml_line_by_line(self, \"\"\" <district> <state_name>Delhi_state</state_name> <district_name lang=\"hin\">Delhi_in_HIN</district_name> <district_name", "\"eng\"} ) ] ), \"district_id\": FieldList( field_list=[ FixtureItemField( field_value=\"Delhi_id\", properties={}", "self.data_type.delete() self.data_item.delete() self.user.delete() self.fixture_ownership.delete() def test_xml(self): check_xml_line_by_line(self, \"\"\" <district> <state_name>Delhi_state</state_name>", "data_item_id=self.data_item.get_id ) self.fixture_ownership.save() def tearDown(self): self.data_type.delete() self.data_item.delete() self.user.delete() self.fixture_ownership.delete() def", "\"district_name\": FieldList( field_list=[ FixtureItemField( field_value=\"Delhi_in_HIN\", properties={\"lang\": \"hin\"} ), FixtureItemField( field_value=\"Delhi_in_ENG\",", "item_attributes=[], ) self.data_type.save() self.data_item = FixtureDataItem( domain=self.domain, data_type_id=self.data_type.get_id, fields= {", "{ \"state_name\": FieldList( field_list=[ FixtureItemField( field_value=\"Delhi_state\", properties={} ) ] ),", "] ), \"district_name\": FieldList( field_list=[ FixtureItemField( field_value=\"Delhi_in_HIN\", properties={\"lang\": \"hin\"} ),", "FixtureItemField, FieldList from corehq.apps.fixtures.views import update_tables from corehq.apps.fixtures.exceptions import FixtureVersionError", "corehq.apps.fixtures.views import update_tables from corehq.apps.fixtures.exceptions import FixtureVersionError from corehq.apps.users.models import", "self.assertItemsEqual([self.data_item.get_id], FixtureDataItem.by_user(self.user, wrap=False)) self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False)) fixture, = fixturegenerators.item_lists(self.user, V2) check_xml_line_by_line(self,", "import FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField, \\ FixtureItemField, FieldList from corehq.apps.fixtures.views", "FixtureTypeField, \\ FixtureItemField, FieldList from corehq.apps.fixtures.views import update_tables from corehq.apps.fixtures.exceptions", "\"hin\"} ), FixtureItemField( field_value=\"Delhi_in_ENG\", properties={\"lang\": \"eng\"} ) ] ), \"district_id\":", "V2 from corehq.apps.fixtures import fixturegenerators from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType,", "self.data_item.delete() self.user.delete() self.fixture_ownership.delete() def test_xml(self): check_xml_line_by_line(self, \"\"\" <district> <state_name>Delhi_state</state_name> <district_name", "FixtureItemField( field_value=\"Delhi_id\", properties={} ) ] ) }, item_attributes={}, ) self.data_item.save()", "test_ownership(self): self.assertItemsEqual([self.data_item.get_id], FixtureDataItem.by_user(self.user, wrap=False)) self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False)) fixture, = fixturegenerators.item_lists(self.user, V2)", "def setUp(self): self.domain = 'qwerty' self.tag = \"district\" self.data_type =", "self.data_item.get_all_users()) self.fixture_ownership = self.data_item.add_user(self.user) self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False)) def test_get_indexed_items(self): with self.assertRaises(FixtureVersionError):", "\"district\" self.data_type = FixtureDataType( domain=self.domain, tag=self.tag, name=\"Districts\", fields=[ FixtureTypeField( field_name=\"state_name\",", "FixtureTypeField( field_name=\"district_id\", properties=[] ) ], item_attributes=[], ) self.data_type.save() self.data_item =", "= FixtureOwnership( domain=self.domain, owner_id=self.user.get_id, owner_type='user', data_item_id=self.data_item.get_id ) self.fixture_ownership.save() def tearDown(self):", ") self.fixture_ownership.save() def tearDown(self): self.data_type.delete() self.data_item.delete() self.user.delete() self.fixture_ownership.delete() def test_xml(self):", "import update_tables from corehq.apps.fixtures.exceptions import FixtureVersionError from corehq.apps.users.models import CommCareUser", "class FixtureDataTest(TestCase): def setUp(self): self.domain = 'qwerty' self.tag = \"district\"", "FixtureTypeField( field_name=\"district_name\", properties=[\"lang\"] ), FixtureTypeField( field_name=\"district_id\", properties=[] ) ], item_attributes=[],", "field_value=\"Delhi_state\", properties={} ) ] ), \"district_name\": FieldList( field_list=[ FixtureItemField( field_value=\"Delhi_in_HIN\",", "), FixtureItemField( field_value=\"Delhi_in_ENG\", properties={\"lang\": \"eng\"} ) ] ), \"district_id\": FieldList(", "self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False)) fixture, = fixturegenerators.item_lists(self.user, V2) check_xml_line_by_line(self, \"\"\" <fixture id=\"item-list:district\"", "FixtureVersionError from corehq.apps.users.models import CommCareUser from django.test import TestCase class", "test_get_indexed_items(self): with self.assertRaises(FixtureVersionError): fixtures = FixtureDataItem.get_indexed_items(self.domain, self.tag, 'state_name') delhi_id =", "casexml.apps.case.xml import V2 from corehq.apps.fixtures import fixturegenerators from corehq.apps.fixtures.models import", "casexml.apps.case.tests.util import check_xml_line_by_line from casexml.apps.case.xml import V2 from corehq.apps.fixtures import", "properties={} ) ] ) }, item_attributes={}, ) self.data_item.save() self.user =", "<district> <state_name>Delhi_state</state_name> <district_name lang=\"hin\">Delhi_in_HIN</district_name> <district_name lang=\"eng\">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district> </district_list> </fixture>", "FieldList from corehq.apps.fixtures.views import update_tables from corehq.apps.fixtures.exceptions import FixtureVersionError from", "FixtureOwnership, FixtureTypeField, \\ FixtureItemField, FieldList from corehq.apps.fixtures.views import update_tables from", "FixtureDataTest(TestCase): def setUp(self): self.domain = 'qwerty' self.tag = \"district\" self.data_type", "domain=self.domain, tag=self.tag, name=\"Districts\", fields=[ FixtureTypeField( field_name=\"state_name\", properties=[] ), FixtureTypeField( field_name=\"district_name\",", "owner_id=self.user.get_id, owner_type='user', data_item_id=self.data_item.get_id ) self.fixture_ownership.save() def tearDown(self): self.data_type.delete() self.data_item.delete() self.user.delete()", "\"\"\" <fixture id=\"item-list:district\" user_id=\"%s\"> <district_list> <district> <state_name>Delhi_state</state_name> <district_name lang=\"hin\">Delhi_in_HIN</district_name> <district_name", "CommCareUser.create(self.domain, 'to_delete', '***') self.fixture_ownership = FixtureOwnership( domain=self.domain, owner_id=self.user.get_id, owner_type='user', data_item_id=self.data_item.get_id", "lang=\"hin\">Delhi_in_HIN</district_name> <district_name lang=\"eng\">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district> </district_list> </fixture> \"\"\" % self.user.user_id,", "), FixtureTypeField( field_name=\"district_name\", properties=[\"lang\"] ), FixtureTypeField( field_name=\"district_id\", properties=[] ) ],", "data_type_id=self.data_type.get_id, fields= { \"state_name\": FieldList( field_list=[ FixtureItemField( field_value=\"Delhi_state\", properties={} )", "self.data_item.add_user(self.user) self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False)) def test_get_indexed_items(self): with self.assertRaises(FixtureVersionError): fixtures = FixtureDataItem.get_indexed_items(self.domain,", "</district> \"\"\", ElementTree.tostring(self.data_item.to_xml())) def test_ownership(self): self.assertItemsEqual([self.data_item.get_id], FixtureDataItem.by_user(self.user, wrap=False)) self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False))", "xml.etree import ElementTree from casexml.apps.case.tests.util import check_xml_line_by_line from casexml.apps.case.xml import", "ElementTree from casexml.apps.case.tests.util import check_xml_line_by_line from casexml.apps.case.xml import V2 from", "\"state_name\": FieldList( field_list=[ FixtureItemField( field_value=\"Delhi_state\", properties={} ) ] ), \"district_name\":", "import FixtureVersionError from corehq.apps.users.models import CommCareUser from django.test import TestCase", ") ] ), \"district_name\": FieldList( field_list=[ FixtureItemField( field_value=\"Delhi_in_HIN\", properties={\"lang\": \"hin\"}", "), \"district_name\": FieldList( field_list=[ FixtureItemField( field_value=\"Delhi_in_HIN\", properties={\"lang\": \"hin\"} ), FixtureItemField(", "corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField, \\ FixtureItemField, FieldList from", "lang=\"eng\">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district> \"\"\", ElementTree.tostring(self.data_item.to_xml())) def test_ownership(self): self.assertItemsEqual([self.data_item.get_id], FixtureDataItem.by_user(self.user, wrap=False))", "FixtureOwnership( domain=self.domain, owner_id=self.user.get_id, owner_type='user', data_item_id=self.data_item.get_id ) self.fixture_ownership.save() def tearDown(self): self.data_type.delete()", "item_attributes={}, ) self.data_item.save() self.user = CommCareUser.create(self.domain, 'to_delete', '***') self.fixture_ownership =", "= FixtureDataType( domain=self.domain, tag=self.tag, name=\"Districts\", fields=[ FixtureTypeField( field_name=\"state_name\", properties=[] ),", "user_id=\"%s\"> <district_list> <district> <state_name>Delhi_state</state_name> <district_name lang=\"hin\">Delhi_in_HIN</district_name> <district_name lang=\"eng\">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district>", "self.assertRaises(FixtureVersionError): fixtures = FixtureDataItem.get_indexed_items(self.domain, self.tag, 'state_name') delhi_id = fixtures['Delhi_state']['district_id'] self.assertEqual(delhi_id,", "lang=\"hin\">Delhi_in_HIN</district_name> <district_name lang=\"eng\">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district> \"\"\", ElementTree.tostring(self.data_item.to_xml())) def test_ownership(self): self.assertItemsEqual([self.data_item.get_id],", "from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType, FixtureOwnership, FixtureTypeField, \\ FixtureItemField, FieldList", "FieldList( field_list=[ FixtureItemField( field_value=\"Delhi_id\", properties={} ) ] ) }, item_attributes={},", "<district_id>Delhi_id</district_id> </district> \"\"\", ElementTree.tostring(self.data_item.to_xml())) def test_ownership(self): self.assertItemsEqual([self.data_item.get_id], FixtureDataItem.by_user(self.user, wrap=False)) self.assertItemsEqual([self.user.get_id],", "</district_list> </fixture> \"\"\" % self.user.user_id, ElementTree.tostring(fixture)) self.data_item.remove_user(self.user) self.assertItemsEqual([], self.data_item.get_all_users()) self.fixture_ownership", ") self.data_type.save() self.data_item = FixtureDataItem( domain=self.domain, data_type_id=self.data_type.get_id, fields= { \"state_name\":", "\"\"\", ElementTree.tostring(self.data_item.to_xml())) def test_ownership(self): self.assertItemsEqual([self.data_item.get_id], FixtureDataItem.by_user(self.user, wrap=False)) self.assertItemsEqual([self.user.get_id], self.data_item.get_all_users(wrap=False)) fixture,", "FieldList( field_list=[ FixtureItemField( field_value=\"Delhi_state\", properties={} ) ] ), \"district_name\": FieldList(", "= fixturegenerators.item_lists(self.user, V2) check_xml_line_by_line(self, \"\"\" <fixture id=\"item-list:district\" user_id=\"%s\"> <district_list> <district>", ") }, item_attributes={}, ) self.data_item.save() self.user = CommCareUser.create(self.domain, 'to_delete', '***')", "def test_xml(self): check_xml_line_by_line(self, \"\"\" <district> <state_name>Delhi_state</state_name> <district_name lang=\"hin\">Delhi_in_HIN</district_name> <district_name lang=\"eng\">Delhi_in_ENG</district_name>" ]
[ "instance_list], } # Do not index if autosync is disabled", "from readthedocs.search.documents import ProjectDocument kwargs = { 'app_label': Project._meta.app_label, 'model_name':", "globally if DEDConfig.autosync_enabled(): index_objects_to_es(**kwargs) @receiver(bulk_post_delete, sender=HTMLFile) def remove_html_file(instance_list, **_): \"\"\"Remove", "it async, replacing how django-elasticsearch-dsl does it. \"\"\" from readthedocs.search.documents", "Project.__name__, 'document_class': str(ProjectDocument), 'objects_id': [instance.id], } # Don't `delay` this", "delete_objects_in_es, index_objects_to_es @receiver(bulk_post_create, sender=HTMLFile) def index_html_file(instance_list, **_): \"\"\"Handle indexing from", "readthedocs.search.tasks import delete_objects_in_es, index_objects_to_es @receiver(bulk_post_create, sender=HTMLFile) def index_html_file(instance_list, **_): \"\"\"Handle", "if DEDConfig.autosync_enabled(): index_objects_to_es.delay(**kwargs) @receiver(pre_delete, sender=Project) def remove_project_delete(instance, *args, **kwargs): from", "signals to trigger before executing searches.\"\"\" from django.db.models.signals import post_save,", "index_objects_to_es @receiver(bulk_post_create, sender=HTMLFile) def index_html_file(instance_list, **_): \"\"\"Handle indexing from the", "\"\"\"We define custom Django signals to trigger before executing searches.\"\"\"", "sender=Project) def index_project_save(instance, *args, **kwargs): \"\"\" Save a Project instance", "instance based on the post_save signal.post_save. This uses Celery to", "# Don't `delay` this because the objects will be deleted", "obj in instance_list], } # Do not index if autosync", "Django signals to trigger before executing searches.\"\"\" from django.db.models.signals import", "from readthedocs.projects.signals import bulk_post_create, bulk_post_delete from readthedocs.search.tasks import delete_objects_in_es, index_objects_to_es", "index_html_file(instance_list, **_): \"\"\"Handle indexing from the build process.\"\"\" from readthedocs.search.documents", "@receiver(bulk_post_delete, sender=HTMLFile) def remove_html_file(instance_list, **_): \"\"\"Remove deleted files from the", "globally if DEDConfig.autosync_enabled(): index_objects_to_es.delay(**kwargs) @receiver(pre_delete, sender=Project) def remove_project_delete(instance, *args, **kwargs):", "@receiver(pre_delete, sender=Project) def remove_project_delete(instance, *args, **kwargs): from readthedocs.search.documents import ProjectDocument", "disabled globally if DEDConfig.autosync_enabled(): index_objects_to_es(**kwargs) @receiver(bulk_post_delete, sender=HTMLFile) def remove_html_file(instance_list, **_):", "{ 'app_label': HTMLFile._meta.app_label, 'model_name': HTMLFile.__name__, 'document_class': str(PageDocument), 'objects_id': [obj.id for", "Celery to do it async, replacing how django-elasticsearch-dsl does it.", "'objects_id': [obj.id for obj in instance_list], } # Do not", "sender=HTMLFile) def index_html_file(instance_list, **_): \"\"\"Handle indexing from the build process.\"\"\"", "this because the objects will be deleted already if DEDConfig.autosync_enabled():", "= { 'app_label': HTMLFile._meta.app_label, 'model_name': HTMLFile.__name__, 'document_class': str(PageDocument), 'objects_id': [obj.id", "from django.db.models.signals import post_save, pre_delete from django.dispatch import receiver from", "readthedocs.projects.signals import bulk_post_create, bulk_post_delete from readthedocs.search.tasks import delete_objects_in_es, index_objects_to_es @receiver(bulk_post_create,", "bulk_post_delete from readthedocs.search.tasks import delete_objects_in_es, index_objects_to_es @receiver(bulk_post_create, sender=HTMLFile) def index_html_file(instance_list,", "bulk_post_create, bulk_post_delete from readthedocs.search.tasks import delete_objects_in_es, index_objects_to_es @receiver(bulk_post_create, sender=HTMLFile) def", "how django-elasticsearch-dsl does it. \"\"\" from readthedocs.search.documents import ProjectDocument kwargs", "HTMLFile.__name__, 'document_class': str(PageDocument), 'objects_id': [obj.id for obj in instance_list], }", "if autosync is disabled globally if DEDConfig.autosync_enabled(): index_objects_to_es.delay(**kwargs) @receiver(pre_delete, sender=Project)", "'model_name': Project.__name__, 'document_class': str(ProjectDocument), 'objects_id': [instance.id], } # Do not", "'document_class': str(ProjectDocument), 'objects_id': [instance.id], } # Do not index if", "'document_class': str(ProjectDocument), 'objects_id': [instance.id], } # Don't `delay` this because", "DEDConfig.autosync_enabled(): index_objects_to_es.delay(**kwargs) @receiver(pre_delete, sender=Project) def remove_project_delete(instance, *args, **kwargs): from readthedocs.search.documents", "\"\"\"Handle indexing from the build process.\"\"\" from readthedocs.search.documents import PageDocument", "from readthedocs.search.documents import PageDocument kwargs = { 'app_label': HTMLFile._meta.app_label, 'model_name':", "in instance_list], } # Do not index if autosync is", "sender=HTMLFile) def remove_html_file(instance_list, **_): \"\"\"Remove deleted files from the build", "disabled globally if DEDConfig.autosync_enabled(): index_objects_to_es.delay(**kwargs) @receiver(pre_delete, sender=Project) def remove_project_delete(instance, *args,", "indexing from the build process.\"\"\" from readthedocs.search.documents import PageDocument kwargs", "Do not index if autosync is disabled globally if DEDConfig.autosync_enabled():", "index_objects_to_es(**kwargs) @receiver(bulk_post_delete, sender=HTMLFile) def remove_html_file(instance_list, **_): \"\"\"Remove deleted files from", "Project instance based on the post_save signal.post_save. This uses Celery", "executing searches.\"\"\" from django.db.models.signals import post_save, pre_delete from django.dispatch import", "import ProjectDocument kwargs = { 'app_label': Project._meta.app_label, 'model_name': Project.__name__, 'document_class':", "from django_elasticsearch_dsl.apps import DEDConfig from readthedocs.projects.models import HTMLFile, Project from", "django_elasticsearch_dsl.apps import DEDConfig from readthedocs.projects.models import HTMLFile, Project from readthedocs.projects.signals", "'objects_id': [instance.id], } # Do not index if autosync is", "signal.post_save. This uses Celery to do it async, replacing how", "DEDConfig.autosync_enabled(): delete_objects_in_es(**kwargs) @receiver(post_save, sender=Project) def index_project_save(instance, *args, **kwargs): \"\"\" Save", "because the objects will be deleted already if DEDConfig.autosync_enabled(): delete_objects_in_es(**kwargs)", "pre_delete from django.dispatch import receiver from django_elasticsearch_dsl.apps import DEDConfig from", "receiver from django_elasticsearch_dsl.apps import DEDConfig from readthedocs.projects.models import HTMLFile, Project", "kwargs = { 'app_label': HTMLFile._meta.app_label, 'model_name': HTMLFile.__name__, 'document_class': str(PageDocument), 'objects_id':", "autosync is disabled globally if DEDConfig.autosync_enabled(): delete_objects_in_es(**kwargs) @receiver(post_save, sender=Project) def", "def index_project_save(instance, *args, **kwargs): \"\"\" Save a Project instance based", "do it async, replacing how django-elasticsearch-dsl does it. \"\"\" from", "if autosync is disabled globally if DEDConfig.autosync_enabled(): index_objects_to_es(**kwargs) @receiver(bulk_post_delete, sender=HTMLFile)", "index_objects_to_es.delay(**kwargs) @receiver(pre_delete, sender=Project) def remove_project_delete(instance, *args, **kwargs): from readthedocs.search.documents import", "is disabled globally if DEDConfig.autosync_enabled(): index_objects_to_es.delay(**kwargs) @receiver(pre_delete, sender=Project) def remove_project_delete(instance,", "remove_html_file(instance_list, **_): \"\"\"Remove deleted files from the build process.\"\"\" from", "import PageDocument kwargs = { 'app_label': HTMLFile._meta.app_label, 'model_name': HTMLFile.__name__, 'document_class':", "DEDConfig from readthedocs.projects.models import HTMLFile, Project from readthedocs.projects.signals import bulk_post_create,", "HTMLFile._meta.app_label, 'model_name': HTMLFile.__name__, 'document_class': str(PageDocument), 'objects_id': [obj.id for obj in", "} # Do not index if autosync is disabled globally", "\"\"\" from readthedocs.search.documents import ProjectDocument kwargs = { 'app_label': Project._meta.app_label,", "[instance.id], } # Do not index if autosync is disabled", "process.\"\"\" from readthedocs.search.documents import PageDocument kwargs = { 'app_label': HTMLFile._meta.app_label,", "'app_label': Project._meta.app_label, 'model_name': Project.__name__, 'document_class': str(ProjectDocument), 'objects_id': [instance.id], } #", "to do it async, replacing how django-elasticsearch-dsl does it. \"\"\"", "index if autosync is disabled globally if DEDConfig.autosync_enabled(): index_objects_to_es.delay(**kwargs) @receiver(pre_delete,", "trigger before executing searches.\"\"\" from django.db.models.signals import post_save, pre_delete from", "from readthedocs.projects.models import HTMLFile, Project from readthedocs.projects.signals import bulk_post_create, bulk_post_delete", "a Project instance based on the post_save signal.post_save. This uses", "it. \"\"\" from readthedocs.search.documents import ProjectDocument kwargs = { 'app_label':", "django.db.models.signals import post_save, pre_delete from django.dispatch import receiver from django_elasticsearch_dsl.apps", "def remove_project_delete(instance, *args, **kwargs): from readthedocs.search.documents import ProjectDocument kwargs =", "Project from readthedocs.projects.signals import bulk_post_create, bulk_post_delete from readthedocs.search.tasks import delete_objects_in_es,", "**_): \"\"\"Handle indexing from the build process.\"\"\" from readthedocs.search.documents import", "sender=Project) def remove_project_delete(instance, *args, **kwargs): from readthedocs.search.documents import ProjectDocument kwargs", "does it. \"\"\" from readthedocs.search.documents import ProjectDocument kwargs = {", "str(PageDocument), 'objects_id': [obj.id for obj in instance_list], } # Do", "'model_name': Project.__name__, 'document_class': str(ProjectDocument), 'objects_id': [instance.id], } # Don't `delay`", "delete_objects_in_es(**kwargs) @receiver(post_save, sender=Project) def index_project_save(instance, *args, **kwargs): \"\"\" Save a", "-*- coding: utf-8 -*- \"\"\"We define custom Django signals to", "} # Don't `delay` this because the objects will be", "index if autosync is disabled globally if DEDConfig.autosync_enabled(): index_objects_to_es(**kwargs) @receiver(bulk_post_delete,", "from django.dispatch import receiver from django_elasticsearch_dsl.apps import DEDConfig from readthedocs.projects.models", "*args, **kwargs): \"\"\" Save a Project instance based on the", "to trigger before executing searches.\"\"\" from django.db.models.signals import post_save, pre_delete", "if DEDConfig.autosync_enabled(): delete_objects_in_es(**kwargs) @receiver(post_save, sender=Project) def index_project_save(instance, *args, **kwargs): \"\"\"", "before executing searches.\"\"\" from django.db.models.signals import post_save, pre_delete from django.dispatch", "the post_save signal.post_save. This uses Celery to do it async,", "coding: utf-8 -*- \"\"\"We define custom Django signals to trigger", "PageDocument kwargs = { 'app_label': HTMLFile._meta.app_label, 'model_name': HTMLFile.__name__, 'document_class': str(PageDocument),", "Save a Project instance based on the post_save signal.post_save. This", "str(ProjectDocument), 'objects_id': [instance.id], } # Do not index if autosync", "'app_label': HTMLFile._meta.app_label, 'model_name': HTMLFile.__name__, 'document_class': str(PageDocument), 'objects_id': [obj.id for obj", "from the build process.\"\"\" from readthedocs.search.documents import PageDocument kwargs =", "post_save, pre_delete from django.dispatch import receiver from django_elasticsearch_dsl.apps import DEDConfig", "django.dispatch import receiver from django_elasticsearch_dsl.apps import DEDConfig from readthedocs.projects.models import", "import receiver from django_elasticsearch_dsl.apps import DEDConfig from readthedocs.projects.models import HTMLFile,", "# Do not index if autosync is disabled globally if", "import delete_objects_in_es, index_objects_to_es @receiver(bulk_post_create, sender=HTMLFile) def index_html_file(instance_list, **_): \"\"\"Handle indexing", "from readthedocs.search.tasks import delete_objects_in_es, index_objects_to_es @receiver(bulk_post_create, sender=HTMLFile) def index_html_file(instance_list, **_):", "Don't `delay` this because the objects will be deleted already", "str(ProjectDocument), 'objects_id': [instance.id], } # Don't `delay` this because the", "import HTMLFile, Project from readthedocs.projects.signals import bulk_post_create, bulk_post_delete from readthedocs.search.tasks", "This uses Celery to do it async, replacing how django-elasticsearch-dsl", "*args, **kwargs): from readthedocs.search.documents import ProjectDocument kwargs = { 'app_label':", "build process.\"\"\" from readthedocs.search.documents import PageDocument kwargs = { 'app_label':", "replacing how django-elasticsearch-dsl does it. \"\"\" from readthedocs.search.documents import ProjectDocument", "django-elasticsearch-dsl does it. \"\"\" from readthedocs.search.documents import ProjectDocument kwargs =", "not index if autosync is disabled globally if DEDConfig.autosync_enabled(): index_objects_to_es.delay(**kwargs)", "searches.\"\"\" from django.db.models.signals import post_save, pre_delete from django.dispatch import receiver", "files from the build process.\"\"\" from readthedocs.search.documents import PageDocument kwargs", "globally if DEDConfig.autosync_enabled(): delete_objects_in_es(**kwargs) @receiver(post_save, sender=Project) def index_project_save(instance, *args, **kwargs):", "the build process.\"\"\" from readthedocs.search.documents import PageDocument kwargs = {", "@receiver(bulk_post_create, sender=HTMLFile) def index_html_file(instance_list, **_): \"\"\"Handle indexing from the build", "autosync is disabled globally if DEDConfig.autosync_enabled(): index_objects_to_es(**kwargs) @receiver(bulk_post_delete, sender=HTMLFile) def", "DEDConfig.autosync_enabled(): index_objects_to_es(**kwargs) @receiver(bulk_post_delete, sender=HTMLFile) def remove_html_file(instance_list, **_): \"\"\"Remove deleted files", "import DEDConfig from readthedocs.projects.models import HTMLFile, Project from readthedocs.projects.signals import", "\"\"\"Remove deleted files from the build process.\"\"\" from readthedocs.search.documents import", "readthedocs.search.documents import ProjectDocument kwargs = { 'app_label': Project._meta.app_label, 'model_name': Project.__name__,", "kwargs = { 'app_label': Project._meta.app_label, 'model_name': Project.__name__, 'document_class': str(ProjectDocument), 'objects_id':", "if DEDConfig.autosync_enabled(): index_objects_to_es(**kwargs) @receiver(bulk_post_delete, sender=HTMLFile) def remove_html_file(instance_list, **_): \"\"\"Remove deleted", "autosync is disabled globally if DEDConfig.autosync_enabled(): index_objects_to_es.delay(**kwargs) @receiver(pre_delete, sender=Project) def", "disabled globally if DEDConfig.autosync_enabled(): delete_objects_in_es(**kwargs) @receiver(post_save, sender=Project) def index_project_save(instance, *args,", "= { 'app_label': Project._meta.app_label, 'model_name': Project.__name__, 'document_class': str(ProjectDocument), 'objects_id': [instance.id],", "Project.__name__, 'document_class': str(ProjectDocument), 'objects_id': [instance.id], } # Do not index", "remove_project_delete(instance, *args, **kwargs): from readthedocs.search.documents import ProjectDocument kwargs = {", "Project._meta.app_label, 'model_name': Project.__name__, 'document_class': str(ProjectDocument), 'objects_id': [instance.id], } # Don't", "post_save signal.post_save. This uses Celery to do it async, replacing", "if autosync is disabled globally if DEDConfig.autosync_enabled(): delete_objects_in_es(**kwargs) @receiver(post_save, sender=Project)", "# -*- coding: utf-8 -*- \"\"\"We define custom Django signals", "-*- \"\"\"We define custom Django signals to trigger before executing", "utf-8 -*- \"\"\"We define custom Django signals to trigger before", "readthedocs.projects.models import HTMLFile, Project from readthedocs.projects.signals import bulk_post_create, bulk_post_delete from", "[obj.id for obj in instance_list], } # Do not index", "async, replacing how django-elasticsearch-dsl does it. \"\"\" from readthedocs.search.documents import", "not index if autosync is disabled globally if DEDConfig.autosync_enabled(): delete_objects_in_es(**kwargs)", "`delay` this because the objects will be deleted already if", "**_): \"\"\"Remove deleted files from the build process.\"\"\" from readthedocs.search.documents", "**kwargs): \"\"\" Save a Project instance based on the post_save", "define custom Django signals to trigger before executing searches.\"\"\" from", "is disabled globally if DEDConfig.autosync_enabled(): index_objects_to_es(**kwargs) @receiver(bulk_post_delete, sender=HTMLFile) def remove_html_file(instance_list,", "is disabled globally if DEDConfig.autosync_enabled(): delete_objects_in_es(**kwargs) @receiver(post_save, sender=Project) def index_project_save(instance,", "Project._meta.app_label, 'model_name': Project.__name__, 'document_class': str(ProjectDocument), 'objects_id': [instance.id], } # Do", "based on the post_save signal.post_save. This uses Celery to do", "**kwargs): from readthedocs.search.documents import ProjectDocument kwargs = { 'app_label': Project._meta.app_label,", "\"\"\" Save a Project instance based on the post_save signal.post_save.", "'model_name': HTMLFile.__name__, 'document_class': str(PageDocument), 'objects_id': [obj.id for obj in instance_list],", "deleted files from the build process.\"\"\" from readthedocs.search.documents import PageDocument", "HTMLFile, Project from readthedocs.projects.signals import bulk_post_create, bulk_post_delete from readthedocs.search.tasks import", "index_project_save(instance, *args, **kwargs): \"\"\" Save a Project instance based on", "'document_class': str(PageDocument), 'objects_id': [obj.id for obj in instance_list], } #", "not index if autosync is disabled globally if DEDConfig.autosync_enabled(): index_objects_to_es(**kwargs)", "'objects_id': [instance.id], } # Don't `delay` this because the objects", "index if autosync is disabled globally if DEDConfig.autosync_enabled(): delete_objects_in_es(**kwargs) @receiver(post_save,", "for obj in instance_list], } # Do not index if", "[instance.id], } # Don't `delay` this because the objects will", "import bulk_post_create, bulk_post_delete from readthedocs.search.tasks import delete_objects_in_es, index_objects_to_es @receiver(bulk_post_create, sender=HTMLFile)", "uses Celery to do it async, replacing how django-elasticsearch-dsl does", "import post_save, pre_delete from django.dispatch import receiver from django_elasticsearch_dsl.apps import", "on the post_save signal.post_save. This uses Celery to do it", "def index_html_file(instance_list, **_): \"\"\"Handle indexing from the build process.\"\"\" from", "ProjectDocument kwargs = { 'app_label': Project._meta.app_label, 'model_name': Project.__name__, 'document_class': str(ProjectDocument),", "readthedocs.search.documents import PageDocument kwargs = { 'app_label': HTMLFile._meta.app_label, 'model_name': HTMLFile.__name__,", "@receiver(post_save, sender=Project) def index_project_save(instance, *args, **kwargs): \"\"\" Save a Project", "{ 'app_label': Project._meta.app_label, 'model_name': Project.__name__, 'document_class': str(ProjectDocument), 'objects_id': [instance.id], }", "def remove_html_file(instance_list, **_): \"\"\"Remove deleted files from the build process.\"\"\"", "custom Django signals to trigger before executing searches.\"\"\" from django.db.models.signals" ]
[ "_| _| | <| -__| |. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|", "distribute this software, either in source code form or as", "MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "SOFTWARE. For more information, please refer to <https://unlicense.org> \"\"\" _filevantage_endpoints", "], [ \"queryChanges\", \"GET\", \"/filevantage/queries/changes/v2\", \"Returns one or more change", "- `host.host_name`\\n - `action_timestamp`\\n\\n \" \"The full list of allowed", "Language (FQL). \\n\\n\" \"Common filter options include:\\n\\n - `host.host_name`\\n -", "_______ __ _______ __ __ __ | _ .----.-----.--.--.--.--| |", "manage pagination of results.\", \"name\": \"offset\", \"in\": \"query\" }, {", "interest in the software to the public domain. We make", "unencumbered software released into the public domain. Anyone is free", "free to copy, modify, publish, use, compile, sell, or distribute", "`action_timestamp`\\n\\n \" \"The full list of allowed filter parameters can", "of results\", \"name\": \"limit\", \"in\": \"query\" }, { \"type\": \"string\",", "to the public domain. We make this dedication for the", "{ \"type\": \"array\", \"items\": { \"type\": \"string\" }, \"collectionFormat\": \"multi\",", "any means. In jurisdictions that recognize copyright laws, the author", "public domain. Anyone is free to copy, modify, publish, use,", "\" \"For example: `action_timestamp|asc`.\\n\" \"The full list of allowed sorting", "changes to return in the response \" \"(default: 100; max:", "-__| |. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____| |: 1 | |:", "\"query\" }, { \"type\": \"string\", \"description\": \"Sort changes using options", "constant library. _______ __ _______ __ __ __ | _", "FALCON |::.. . | FalconPy `-------' `-------' OAuth2 API -", "the `limit` parameter to manage pagination of results.\", \"name\": \"offset\",", "options like:\\n\\n\" \"- `action_timestamp` (timestamp of the change occurrence) \\n\\n", "on changes\", \"filevantage\", [ { \"type\": \"array\", \"items\": { \"type\":", "\"in\": \"query\" }, { \"type\": \"string\", \"description\": \"Sort changes using", "allowed sorting options can be reviewed in our API documentation.\",", "API documentation.\", \"name\": \"filter\", \"in\": \"query\" } ] ] ]", "_ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----. |. 1___| _|", "into the public domain. Anyone is free to copy, modify,", "\"getChanges\", \"GET\", \"/filevantage/entities/changes/v2\", \"Retrieve information on changes\", \"filevantage\", [ {", "number of changes to return in the response \" \"(default:", "this dedication for the benefit of the public at large", "the author or authors of this software dedicate any and", "\"queryChanges\", \"GET\", \"/filevantage/queries/changes/v2\", \"Returns one or more change IDs\", \"filevantage\",", "allowed filter parameters can be reviewed in our API documentation.\",", "be reviewed in our API documentation.\", \"name\": \"sort\", \"in\": \"query\"", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "\"type\": \"string\" }, \"collectionFormat\": \"multi\", \"description\": \"Comma separated values of", "parameters can be reviewed in our API documentation.\", \"name\": \"filter\",", "changes using a query in Falcon Query Language (FQL). \\n\\n\"", "query in Falcon Query Language (FQL). \\n\\n\" \"Common filter options", "|: 1 | |::.. . | CROWDSTRIKE FALCON |::.. .", "occurrence) \\n\\n \" \"Sort either `asc` (ascending) or `desc` (descending).", "form or as a compiled binary, for any purpose, commercial", "all present and future rights to this software under copyright", "|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy `-------'", "values of change ids\", \"name\": \"ids\", \"in\": \"query\", \"required\": True", "DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "\"Use with the `limit` parameter to manage pagination of results.\",", "_ | 1___| _| _| | <| -__| |. |___|__|", "ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE", "OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH", "purpose, commercial or non-commercial, and by any means. In jurisdictions", "\"description\": \"The first change index to return in the response.", "reviewed in our API documentation.\", \"name\": \"filter\", \"in\": \"query\" }", "\" \"Sort either `asc` (ascending) or `desc` (descending). \" \"For", "to manage pagination of results.\", \"name\": \"offset\", \"in\": \"query\" },", "of changes to return in the response \" \"(default: 100;", "in the response. \" \"If not provided it will default", "\"type\": \"string\", \"description\": \"Sort changes using options like:\\n\\n\" \"- `action_timestamp`", "\" \"The full list of allowed filter parameters can be", "software to the public domain. We make this dedication for", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A", "like:\\n\\n\" \"- `action_timestamp` (timestamp of the change occurrence) \\n\\n \"", ". | CROWDSTRIKE FALCON |::.. . | FalconPy `-------' `-------'", "CROWDSTRIKE FALCON |::.. . | FalconPy `-------' `-------' OAuth2 API", "| |_.----|__| |--.-----. |. 1___| _| _ | | |", "\"required\": True } ] ], [ \"queryChanges\", \"GET\", \"/filevantage/queries/changes/v2\", \"Returns", "} ] ], [ \"queryChanges\", \"GET\", \"/filevantage/queries/changes/v2\", \"Returns one or", "}, { \"type\": \"string\", \"description\": \"Filter changes using a query", "under copyright law. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "please refer to <https://unlicense.org> \"\"\" _filevantage_endpoints = [ [ \"getChanges\",", "as a compiled binary, for any purpose, commercial or non-commercial,", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE", "_filevantage_endpoints = [ [ \"getChanges\", \"GET\", \"/filevantage/entities/changes/v2\", \"Retrieve information on", "| FalconPy `-------' `-------' OAuth2 API - Customer SDK This", "SDK This is free and unencumbered software released into the", "1 | |::.. . | CROWDSTRIKE FALCON |::.. . |", "or more change IDs\", \"filevantage\", [ { \"minimum\": 0, \"type\":", "or distribute this software, either in source code form or", "in our API documentation.\", \"name\": \"sort\", \"in\": \"query\" }, {", "parameter to manage pagination of results\", \"name\": \"limit\", \"in\": \"query\"", "_______ __ __ __ | _ .----.-----.--.--.--.--| | _ |", "options include:\\n\\n - `host.host_name`\\n - `action_timestamp`\\n\\n \" \"The full list", "THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more", "`-------' `-------' OAuth2 API - Customer SDK This is free", "| |::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy", "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "`-------' OAuth2 API - Customer SDK This is free and", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "with the `limit` parameter to manage pagination of results.\", \"name\":", "\"name\": \"limit\", \"in\": \"query\" }, { \"type\": \"string\", \"description\": \"Sort", "to the detriment of our heirs and successors. We intend", "SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "`asc` (ascending) or `desc` (descending). \" \"For example: `action_timestamp|asc`.\\n\" \"The", "OR OTHER DEALINGS IN THE SOFTWARE. For more information, please", "\"limit\", \"in\": \"query\" }, { \"type\": \"string\", \"description\": \"Sort changes", "[ { \"type\": \"array\", \"items\": { \"type\": \"string\" }, \"collectionFormat\":", "_| | <| -__| |. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____| |:", "for the benefit of the public at large and to", "to return in the response. \" \"If not provided it", "software, either in source code form or as a compiled", "a compiled binary, for any purpose, commercial or non-commercial, and", "`action_timestamp|asc`.\\n\" \"The full list of allowed sorting options can be", "\"Common filter options include:\\n\\n - `host.host_name`\\n - `action_timestamp`\\n\\n \" \"The", "options can be reviewed in our API documentation.\", \"name\": \"sort\",", "API endpoint constant library. _______ __ _______ __ __ __", "\"Sort either `asc` (ascending) or `desc` (descending). \" \"For example:", "OAuth2 API - Customer SDK This is free and unencumbered", "\"in\": \"query\" }, { \"type\": \"string\", \"description\": \"Filter changes using", "In jurisdictions that recognize copyright laws, the author or authors", "or authors of this software dedicate any and all copyright", "overt act of relinquishment in perpetuity of all present and", "THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "jurisdictions that recognize copyright laws, the author or authors of", "or non-commercial, and by any means. In jurisdictions that recognize", "\"Comma separated values of change ids\", \"name\": \"ids\", \"in\": \"query\",", "return in the response. \" \"If not provided it will", "|. 1___| _| _ | | | | _ |", "compile, sell, or distribute this software, either in source code", "is free and unencumbered software released into the public domain.", "OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For", "to manage pagination of results\", \"name\": \"limit\", \"in\": \"query\" },", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF", "\"filevantage\", [ { \"type\": \"array\", \"items\": { \"type\": \"string\" },", "separated values of change ids\", \"name\": \"ids\", \"in\": \"query\", \"required\":", "USE OR OTHER DEALINGS IN THE SOFTWARE. For more information,", "\"multi\", \"description\": \"Comma separated values of change ids\", \"name\": \"ids\",", "{ \"minimum\": 0, \"type\": \"integer\", \"description\": \"The first change index", "the public domain. Anyone is free to copy, modify, publish,", "<reponame>kra-ts/falconpy \"\"\"Internal API endpoint constant library. _______ __ _______ __", "change IDs\", \"filevantage\", [ { \"minimum\": 0, \"type\": \"integer\", \"description\":", "500). \" \"Use with the `offset` parameter to manage pagination", "`desc` (descending). \" \"For example: `action_timestamp|asc`.\\n\" \"The full list of", "| _ | |_.----|__| |--.-----. |. 1___| _| _ |", "endpoint constant library. _______ __ _______ __ __ __ |", "_| _ | | | | _ | 1___| _|", "copyright laws, the author or authors of this software dedicate", "`host.host_name`\\n - `action_timestamp`\\n\\n \" \"The full list of allowed filter", "dedication to be an overt act of relinquishment in perpetuity", "|::.. . | FalconPy `-------' `-------' OAuth2 API - Customer", "THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND", "future rights to this software under copyright law. THE SOFTWARE", "of the public at large and to the detriment of", "will default to '0'. \" \"Use with the `limit` parameter", "default to '0'. \" \"Use with the `limit` parameter to", "\"name\": \"ids\", \"in\": \"query\", \"required\": True } ] ], [", "copyright law. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "__ __ __ | _ .----.-----.--.--.--.--| | _ | |_.----|__|", "be reviewed in our API documentation.\", \"name\": \"filter\", \"in\": \"query\"", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "\"integer\", \"description\": \"The maximum number of changes to return in", "Anyone is free to copy, modify, publish, use, compile, sell,", "DEALINGS IN THE SOFTWARE. For more information, please refer to", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "\"The maximum number of changes to return in the response", "our heirs and successors. We intend this dedication to be", "IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY", "domain. We make this dedication for the benefit of the", "\\n\\n\" \"Common filter options include:\\n\\n - `host.host_name`\\n - `action_timestamp`\\n\\n \"", "_ | | | | _ | 1___| _| _|", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "pagination of results.\", \"name\": \"offset\", \"in\": \"query\" }, { \"type\":", "released into the public domain. Anyone is free to copy,", "response \" \"(default: 100; max: 500). \" \"Use with the", "\"integer\", \"description\": \"The first change index to return in the", "\"type\": \"array\", \"items\": { \"type\": \"string\" }, \"collectionFormat\": \"multi\", \"description\":", "1 | |: 1 | |::.. . | CROWDSTRIKE FALCON", "__ _______ __ __ __ | _ .----.-----.--.--.--.--| | _", "intend this dedication to be an overt act of relinquishment", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "make this dedication for the benefit of the public at", "\" \"If not provided it will default to '0'. \"", "}, { \"type\": \"integer\", \"description\": \"The maximum number of changes", "change occurrence) \\n\\n \" \"Sort either `asc` (ascending) or `desc`", "AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,", ".----.-----.--.--.--.--| | _ | |_.----|__| |--.-----. |. 1___| _| _", "1___| _| _ | | | | _ | 1___|", "information on changes\", \"filevantage\", [ { \"type\": \"array\", \"items\": {", "\"in\": \"query\", \"required\": True } ] ], [ \"queryChanges\", \"GET\",", "in our API documentation.\", \"name\": \"filter\", \"in\": \"query\" } ]", "the detriment of our heirs and successors. We intend this", "means. In jurisdictions that recognize copyright laws, the author or", "more change IDs\", \"filevantage\", [ { \"minimum\": 0, \"type\": \"integer\",", "\"description\": \"The maximum number of changes to return in the", "that recognize copyright laws, the author or authors of this", "include:\\n\\n - `host.host_name`\\n - `action_timestamp`\\n\\n \" \"The full list of", "| _ | 1___| _| _| | <| -__| |.", "the benefit of the public at large and to the", "perpetuity of all present and future rights to this software", "the response. \" \"If not provided it will default to", "EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES", "either in source code form or as a compiled binary,", "100; max: 500). \" \"Use with the `offset` parameter to", "library. _______ __ _______ __ __ __ | _ .----.-----.--.--.--.--|", "rights to this software under copyright law. THE SOFTWARE IS", "\"type\": \"integer\", \"description\": \"The first change index to return in", "source code form or as a compiled binary, for any", "\"description\": \"Filter changes using a query in Falcon Query Language", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "not provided it will default to '0'. \" \"Use with", "author or authors of this software dedicate any and all", "API documentation.\", \"name\": \"sort\", \"in\": \"query\" }, { \"type\": \"string\",", "changes using options like:\\n\\n\" \"- `action_timestamp` (timestamp of the change", "}, \"collectionFormat\": \"multi\", \"description\": \"Comma separated values of change ids\",", "maximum number of changes to return in the response \"", "\"GET\", \"/filevantage/queries/changes/v2\", \"Returns one or more change IDs\", \"filevantage\", [", "index to return in the response. \" \"If not provided", "OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "with the `offset` parameter to manage pagination of results\", \"name\":", "results.\", \"name\": \"offset\", \"in\": \"query\" }, { \"type\": \"integer\", \"description\":", "one or more change IDs\", \"filevantage\", [ { \"minimum\": 0,", "1___| _| _| | <| -__| |. |___|__| |_____|________|_____|____ |____|__|", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR", "\"\"\"Internal API endpoint constant library. _______ __ _______ __ __", "Falcon Query Language (FQL). \\n\\n\" \"Common filter options include:\\n\\n -", "filter options include:\\n\\n - `host.host_name`\\n - `action_timestamp`\\n\\n \" \"The full", "(descending). \" \"For example: `action_timestamp|asc`.\\n\" \"The full list of allowed", "software released into the public domain. Anyone is free to", "ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "to copy, modify, publish, use, compile, sell, or distribute this", "[ \"queryChanges\", \"GET\", \"/filevantage/queries/changes/v2\", \"Returns one or more change IDs\",", "recognize copyright laws, the author or authors of this software", "use, compile, sell, or distribute this software, either in source", "documentation.\", \"name\": \"sort\", \"in\": \"query\" }, { \"type\": \"string\", \"description\":", "We make this dedication for the benefit of the public", "\"items\": { \"type\": \"string\" }, \"collectionFormat\": \"multi\", \"description\": \"Comma separated", "Customer SDK This is free and unencumbered software released into", "in perpetuity of all present and future rights to this", "\"\"\" _filevantage_endpoints = [ [ \"getChanges\", \"GET\", \"/filevantage/entities/changes/v2\", \"Retrieve information", ". | FalconPy `-------' `-------' OAuth2 API - Customer SDK", "\"filevantage\", [ { \"minimum\": 0, \"type\": \"integer\", \"description\": \"The first", "to <https://unlicense.org> \"\"\" _filevantage_endpoints = [ [ \"getChanges\", \"GET\", \"/filevantage/entities/changes/v2\",", "at large and to the detriment of our heirs and", "(timestamp of the change occurrence) \\n\\n \" \"Sort either `asc`", "\"Sort changes using options like:\\n\\n\" \"- `action_timestamp` (timestamp of the", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "the response \" \"(default: 100; max: 500). \" \"Use with", "<https://unlicense.org> \"\"\" _filevantage_endpoints = [ [ \"getChanges\", \"GET\", \"/filevantage/entities/changes/v2\", \"Retrieve", "API - Customer SDK This is free and unencumbered software", "detriment of our heirs and successors. We intend this dedication", "to '0'. \" \"Use with the `limit` parameter to manage", "WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT", "\"string\", \"description\": \"Filter changes using a query in Falcon Query", "for any purpose, commercial or non-commercial, and by any means.", "IN THE SOFTWARE. For more information, please refer to <https://unlicense.org>", "TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE", "THE SOFTWARE. For more information, please refer to <https://unlicense.org> \"\"\"", "| CROWDSTRIKE FALCON |::.. . | FalconPy `-------' `-------' OAuth2", "dedication for the benefit of the public at large and", "this dedication to be an overt act of relinquishment in", "_ | |_.----|__| |--.-----. |. 1___| _| _ | |", "of change ids\", \"name\": \"ids\", \"in\": \"query\", \"required\": True }", "information, please refer to <https://unlicense.org> \"\"\" _filevantage_endpoints = [ [", "NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM,", "first change index to return in the response. \" \"If", "Query Language (FQL). \\n\\n\" \"Common filter options include:\\n\\n - `host.host_name`\\n", "and future rights to this software under copyright law. THE", "results\", \"name\": \"limit\", \"in\": \"query\" }, { \"type\": \"string\", \"description\":", "| <| -__| |. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____| |: 1", "or as a compiled binary, for any purpose, commercial or", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "\"Use with the `offset` parameter to manage pagination of results\",", "law. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "to be an overt act of relinquishment in perpetuity of", "relinquishment in perpetuity of all present and future rights to", "by any means. In jurisdictions that recognize copyright laws, the", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "\"type\": \"integer\", \"description\": \"The maximum number of changes to return", "can be reviewed in our API documentation.\", \"name\": \"filter\", \"in\":", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "`action_timestamp` (timestamp of the change occurrence) \\n\\n \" \"Sort either", "\"Filter changes using a query in Falcon Query Language (FQL).", "\"The full list of allowed sorting options can be reviewed", "it will default to '0'. \" \"Use with the `limit`", "software under copyright law. THE SOFTWARE IS PROVIDED \"AS IS\",", "of this software dedicate any and all copyright interest in", "\"name\": \"offset\", \"in\": \"query\" }, { \"type\": \"integer\", \"description\": \"The", "more information, please refer to <https://unlicense.org> \"\"\" _filevantage_endpoints = [", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER", "max: 500). \" \"Use with the `offset` parameter to manage", "either `asc` (ascending) or `desc` (descending). \" \"For example: `action_timestamp|asc`.\\n\"", "= [ [ \"getChanges\", \"GET\", \"/filevantage/entities/changes/v2\", \"Retrieve information on changes\",", "`offset` parameter to manage pagination of results\", \"name\": \"limit\", \"in\":", "\"query\", \"required\": True } ] ], [ \"queryChanges\", \"GET\", \"/filevantage/queries/changes/v2\",", "in source code form or as a compiled binary, for", "change ids\", \"name\": \"ids\", \"in\": \"query\", \"required\": True } ]", "|_.----|__| |--.-----. |. 1___| _| _ | | | |", "sorting options can be reviewed in our API documentation.\", \"name\":", "\" \"(default: 100; max: 500). \" \"Use with the `offset`", "or `desc` (descending). \" \"For example: `action_timestamp|asc`.\\n\" \"The full list", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS", "|--.-----. |. 1___| _| _ | | | | _", "heirs and successors. We intend this dedication to be an", "| | _ | 1___| _| _| | <| -__|", "parameter to manage pagination of results.\", \"name\": \"offset\", \"in\": \"query\"", "(FQL). \\n\\n\" \"Common filter options include:\\n\\n - `host.host_name`\\n - `action_timestamp`\\n\\n", "|____|__| |__|__|__|_____| |: 1 | |: 1 | |::.. .", "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR", "| |: 1 | |::.. . | CROWDSTRIKE FALCON |::..", "and unencumbered software released into the public domain. Anyone is", "OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED", "our API documentation.\", \"name\": \"filter\", \"in\": \"query\" } ] ]", "public at large and to the detriment of our heirs", "return in the response \" \"(default: 100; max: 500). \"", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "\" \"Use with the `offset` parameter to manage pagination of", "\"For example: `action_timestamp|asc`.\\n\" \"The full list of allowed sorting options", "manage pagination of results\", \"name\": \"limit\", \"in\": \"query\" }, {", "\"string\" }, \"collectionFormat\": \"multi\", \"description\": \"Comma separated values of change", "\"in\": \"query\" }, { \"type\": \"integer\", \"description\": \"The maximum number", "\"offset\", \"in\": \"query\" }, { \"type\": \"integer\", \"description\": \"The maximum", "- Customer SDK This is free and unencumbered software released", "__ __ | _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.", "[ \"getChanges\", \"GET\", \"/filevantage/entities/changes/v2\", \"Retrieve information on changes\", \"filevantage\", [", "\"The first change index to return in the response. \"", "domain. Anyone is free to copy, modify, publish, use, compile,", "changes\", \"filevantage\", [ { \"type\": \"array\", \"items\": { \"type\": \"string\"", "|___|__| |_____|________|_____|____ |____|__| |__|__|__|_____| |: 1 | |: 1 |", "__ | _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----. |.", "|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____| |: 1 | |: 1", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN", "\"string\", \"description\": \"Sort changes using options like:\\n\\n\" \"- `action_timestamp` (timestamp", "|_____|________|_____|____ |____|__| |__|__|__|_____| |: 1 | |: 1 | |::..", "and by any means. In jurisdictions that recognize copyright laws,", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "response. \" \"If not provided it will default to '0'.", "filter parameters can be reviewed in our API documentation.\", \"name\":", "in Falcon Query Language (FQL). \\n\\n\" \"Common filter options include:\\n\\n", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR", "of allowed filter parameters can be reviewed in our API", "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "\"description\": \"Comma separated values of change ids\", \"name\": \"ids\", \"in\":", "(ascending) or `desc` (descending). \" \"For example: `action_timestamp|asc`.\\n\" \"The full", "IDs\", \"filevantage\", [ { \"minimum\": 0, \"type\": \"integer\", \"description\": \"The", "free and unencumbered software released into the public domain. Anyone", "to this software under copyright law. THE SOFTWARE IS PROVIDED", "and to the detriment of our heirs and successors. We", "\" \"Use with the `limit` parameter to manage pagination of", "CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF", "of allowed sorting options can be reviewed in our API", "laws, the author or authors of this software dedicate any", "\"description\": \"Sort changes using options like:\\n\\n\" \"- `action_timestamp` (timestamp of", "|: 1 | |: 1 | |::.. . | CROWDSTRIKE", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN", "\"/filevantage/entities/changes/v2\", \"Retrieve information on changes\", \"filevantage\", [ { \"type\": \"array\",", "full list of allowed filter parameters can be reviewed in", "large and to the detriment of our heirs and successors.", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "and all copyright interest in the software to the public", "\"Retrieve information on changes\", \"filevantage\", [ { \"type\": \"array\", \"items\":", "refer to <https://unlicense.org> \"\"\" _filevantage_endpoints = [ [ \"getChanges\", \"GET\",", "[ { \"minimum\": 0, \"type\": \"integer\", \"description\": \"The first change", "successors. We intend this dedication to be an overt act", "any and all copyright interest in the software to the", "of results.\", \"name\": \"offset\", \"in\": \"query\" }, { \"type\": \"integer\",", "in the software to the public domain. We make this", "list of allowed filter parameters can be reviewed in our", "|__|__|__|_____| |: 1 | |: 1 | |::.. . |", "any purpose, commercial or non-commercial, and by any means. In", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,", "compiled binary, for any purpose, commercial or non-commercial, and by", "reviewed in our API documentation.\", \"name\": \"sort\", \"in\": \"query\" },", "\"query\" }, { \"type\": \"string\", \"description\": \"Filter changes using a", "- `action_timestamp`\\n\\n \" \"The full list of allowed filter parameters", "| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----. |. 1___|", "We intend this dedication to be an overt act of", "the change occurrence) \\n\\n \" \"Sort either `asc` (ascending) or", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING", "OTHER DEALINGS IN THE SOFTWARE. For more information, please refer", "KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "change index to return in the response. \" \"If not", "\"collectionFormat\": \"multi\", \"description\": \"Comma separated values of change ids\", \"name\":", "binary, for any purpose, commercial or non-commercial, and by any", "\"type\": \"string\", \"description\": \"Filter changes using a query in Falcon", "\"query\" }, { \"type\": \"integer\", \"description\": \"The maximum number of", "this software, either in source code form or as a", "] ], [ \"queryChanges\", \"GET\", \"/filevantage/queries/changes/v2\", \"Returns one or more", "\"name\": \"sort\", \"in\": \"query\" }, { \"type\": \"string\", \"description\": \"Filter", "publish, use, compile, sell, or distribute this software, either in", "\"ids\", \"in\": \"query\", \"required\": True } ] ], [ \"queryChanges\",", "FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN", "{ \"type\": \"string\", \"description\": \"Sort changes using options like:\\n\\n\" \"-", "| | | _ | 1___| _| _| | <|", "of relinquishment in perpetuity of all present and future rights", "\"sort\", \"in\": \"query\" }, { \"type\": \"string\", \"description\": \"Filter changes", "all copyright interest in the software to the public domain.", "pagination of results\", \"name\": \"limit\", \"in\": \"query\" }, { \"type\":", "to return in the response \" \"(default: 100; max: 500).", "this software under copyright law. THE SOFTWARE IS PROVIDED \"AS", "For more information, please refer to <https://unlicense.org> \"\"\" _filevantage_endpoints =", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT", "non-commercial, and by any means. In jurisdictions that recognize copyright", "FalconPy `-------' `-------' OAuth2 API - Customer SDK This is", "[ [ \"getChanges\", \"GET\", \"/filevantage/entities/changes/v2\", \"Retrieve information on changes\", \"filevantage\",", "\"If not provided it will default to '0'. \" \"Use", "copyright interest in the software to the public domain. We", "OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE", "of our heirs and successors. We intend this dedication to", "commercial or non-commercial, and by any means. In jurisdictions that", "of all present and future rights to this software under", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "provided it will default to '0'. \" \"Use with the", "\\n\\n \" \"Sort either `asc` (ascending) or `desc` (descending). \"", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.", "list of allowed sorting options can be reviewed in our", "public domain. We make this dedication for the benefit of", "code form or as a compiled binary, for any purpose,", "\"minimum\": 0, \"type\": \"integer\", \"description\": \"The first change index to", "}, { \"type\": \"string\", \"description\": \"Sort changes using options like:\\n\\n\"", "dedicate any and all copyright interest in the software to", "\"Returns one or more change IDs\", \"filevantage\", [ { \"minimum\":", "our API documentation.\", \"name\": \"sort\", \"in\": \"query\" }, { \"type\":", "\"The full list of allowed filter parameters can be reviewed", "an overt act of relinquishment in perpetuity of all present", "\"- `action_timestamp` (timestamp of the change occurrence) \\n\\n \" \"Sort", "{ \"type\": \"string\" }, \"collectionFormat\": \"multi\", \"description\": \"Comma separated values", "`limit` parameter to manage pagination of results.\", \"name\": \"offset\", \"in\":", "of the change occurrence) \\n\\n \" \"Sort either `asc` (ascending)", "'0'. \" \"Use with the `limit` parameter to manage pagination", "using a query in Falcon Query Language (FQL). \\n\\n\" \"Common", "{ \"type\": \"integer\", \"description\": \"The maximum number of changes to", "TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION", "| | | | _ | 1___| _| _| |", "sell, or distribute this software, either in source code form", "\"GET\", \"/filevantage/entities/changes/v2\", \"Retrieve information on changes\", \"filevantage\", [ { \"type\":", "True } ] ], [ \"queryChanges\", \"GET\", \"/filevantage/queries/changes/v2\", \"Returns one", "using options like:\\n\\n\" \"- `action_timestamp` (timestamp of the change occurrence)", "\"array\", \"items\": { \"type\": \"string\" }, \"collectionFormat\": \"multi\", \"description\": \"Comma", "copy, modify, publish, use, compile, sell, or distribute this software,", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR", "<| -__| |. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____| |: 1 |", "example: `action_timestamp|asc`.\\n\" \"The full list of allowed sorting options can", "in the response \" \"(default: 100; max: 500). \" \"Use", "{ \"type\": \"string\", \"description\": \"Filter changes using a query in", "present and future rights to this software under copyright law.", "is free to copy, modify, publish, use, compile, sell, or", "full list of allowed sorting options can be reviewed in", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS", "the software to the public domain. We make this dedication", "This is free and unencumbered software released into the public", "act of relinquishment in perpetuity of all present and future", "benefit of the public at large and to the detriment", "the public domain. We make this dedication for the benefit", "| 1___| _| _| | <| -__| |. |___|__| |_____|________|_____|____", "the public at large and to the detriment of our", "the `offset` parameter to manage pagination of results\", \"name\": \"limit\",", "\"/filevantage/queries/changes/v2\", \"Returns one or more change IDs\", \"filevantage\", [ {", "can be reviewed in our API documentation.\", \"name\": \"sort\", \"in\":", "be an overt act of relinquishment in perpetuity of all", "software dedicate any and all copyright interest in the software", "this software dedicate any and all copyright interest in the", "ids\", \"name\": \"ids\", \"in\": \"query\", \"required\": True } ] ],", "authors of this software dedicate any and all copyright interest", "\"(default: 100; max: 500). \" \"Use with the `offset` parameter", "0, \"type\": \"integer\", \"description\": \"The first change index to return", "ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION", "and successors. We intend this dedication to be an overt", "a query in Falcon Query Language (FQL). \\n\\n\" \"Common filter", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "modify, publish, use, compile, sell, or distribute this software, either" ]
[ "B305 is incorrectly raised here # https://github.com/PyCQA/flake8-bugbear/issues/59 self.next(len(x)) # noqa:", "hasattr(self, \"_phaser\"): self._phaser = itertools.cycle(self.phases) return next(self._phaser) def update(self): #", "self.finish() before delegating to the original SIGINT handler. This handler", "sys.stdout message = \"%(percent)d%%\" suffix = \"%(downloaded)s %(download_speed)s %(pretty_eta)s\" class", "normally, or gets interrupted. \"\"\" super().finish() # type: ignore signal(SIGINT,", "use with the preferred # bar. characters = [ getattr(preferred,", "https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type: ignore self.message = (\" \"", "type: str @property def downloaded(self): # type: () -> str", "Lots of different errors can come from this, including SystemError", "that hide_cursor is False on # Windows. # This call", "but the colorama.AnsiToWin32() object doesn't have that, so we'll #", "\"\"\" self.finish() self.original_handler(signum, frame) class SilentBar(Bar): def update(self): # type:", "# fancier bar and if not we'll fall back to", "https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type: ignore # Check if we", "version 1.2, that helper has the following problems: 1. It", "itertools import sys from signal import SIGINT, default_int_handler, signal from", "interrupt some time after a progress-displaying download has already completed,", "+ 2)) + self.message # type: str @property def downloaded(self):", "use the # fancier bar and if not we'll fall", "class is similar to the progress library's existing SigIntMixin helper,", "* (get_indentation() + 2)) + self.message # type: str @property", "This probably should not happen, # but if it does,", "DownloadBlueEmojiProgressBar(BaseDownloadProgressBar, BlueEmojiBar): pass class DownloadProgressSpinner( WindowsMixin, InterruptibleMixin, DownloadProgressMixin, Spinner ):", "): file = sys.stdout suffix = \"%(downloaded)s %(download_speed)s\" def next_phase(self):", "self.writeln(line) BAR_TYPES = { \"off\": (DownloadSilentBar, DownloadSilentBar), \"on\": (DefaultDownloadProgressBar, DownloadProgressSpinner),", "at least. # The least bad option should be Python's", "# just raises KeyboardInterrupt. if self.original_handler is None: self.original_handler =", "hidden cursors) behind. This class is similar to the progress", "interrupted without leaving temporary state (like hidden cursors) behind. This", "assume that we can use the # fancier bar and", "if self.eta: # type: ignore return f\"eta {self.eta_td}\" # type:", "we'll assume that we can use the # fancier bar", "ensure that self.finish() gets called on keyboard interrupt. This allows", "UnicodeEncodeError: return fallback else: return preferred _BaseBar = _select_progress_class(IncrementalBar, Bar)", "() -> None \"\"\" Restore the original SIGINT handler after", "class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar, BlueEmojiBar): pass class DownloadProgressSpinner( WindowsMixin, InterruptibleMixin, DownloadProgressMixin, Spinner", "self.avg) + \"/s\" # type: ignore @property def pretty_eta(self): #", "assume # that it doesn't support unicode and use the", "restore it. This probably should not happen, # but if", "will have unexpected delayed effects if the user triggers an", "temporary state (like hidden cursors) behind. This class is similar", "BlueEmojiBar(IncrementalBar): suffix = \"%(percent)d%%\" bar_prefix = \" \" bar_suffix =", "encoding # of the given file, if this works then", "of whether the progress display finishes normally, or gets interrupted.", "of the given file, if this works then we'll assume", "file = sys.stdout message = \"%(percent)d%%\" suffix = \"%(downloaded)s %(download_speed)s", "= \"%(percent)d%%\" suffix = \"%(downloaded)s %(download_speed)s %(pretty_eta)s\" class DefaultDownloadProgressBar(BaseDownloadProgressBar, _BaseBar):", "DownloadProgressMixin, Spinner ): file = sys.stdout suffix = \"%(downloaded)s %(download_speed)s\"", "code expects to be able to call self.file.isatty() # but", "\" if suffix else \"\", suffix, ] ) self.writeln(line) BAR_TYPES", "don't set this soon # enough, we get a \"hide\"", "to call self.file.isatty() # but the colorama.AnsiToWin32() object doesn't have", "\"\"\" Call self.finish() before delegating to the original SIGINT handler.", "lambda: self.file.wrapped.flush() class BaseDownloadProgressBar(WindowsMixin, InterruptibleMixin, DownloadProgressMixin): file = sys.stdout message", "self.original_handler is None: self.original_handler = default_int_handler def finish(self): # type:", "was not installed from # Python, and we cannot restore", "Any, Dict, List from pip._vendor.progress.bar import Bar, FillingCirclesBar, IncrementalBar from", "if the user triggers an unrelated keyboard interrupt some time", "ignore # The progress code expects to be able to", "it. if WINDOWS and colorama: self.file = colorama.AnsiToWin32(self.file) # type:", "and we cannot restore it. This probably should not happen,", "type: () -> str # Avoid zero division errors... if", "call needs to go before the super() call, so that", "code to the terminal in its init, so if we", "that, so we'll # add it. self.file.flush = lambda: self.file.wrapped.flush()", "Check if we are running on Windows and we have", "() -> str # Avoid zero division errors... if self.avg", "# Try to decode the characters we're using for the", "# bar. characters = [ getattr(preferred, \"empty_fill\", \"\"), getattr(preferred, \"fill\",", "SilentBar): pass class DownloadBar(BaseDownloadProgressBar, Bar): pass class DownloadFillingCirclesBar(BaseDownloadProgressBar, FillingCirclesBar): pass", "in its init, so if we don't set this soon", "cursor\" # code to the terminal in its init, so", "suffix, ] ) self.writeln(line) BAR_TYPES = { \"off\": (DownloadSilentBar, DownloadSilentBar),", "ensure that hide_cursor is False on # Windows. # This", "on Windows and we have the colorama module, # if", "ignore for x in it: yield x # B305 is", "leaves its own handler in place even after an uninterrupted", "class WindowsMixin: def __init__(self, *args, **kwargs): # type: (List[Any], Dict[Any,", "def iter(self, it): # type: ignore for x in it:", "have the colorama module, # if we do then wrap", "that it doesn't support unicode and use the ASCII bar.", "want to use with the preferred # bar. characters =", "the # fancier bar and if not we'll fall back", "uninterrupted finish, which will have unexpected delayed effects if the", "pip._internal.utils.misc import format_size try: from pip._vendor import colorama # Lots", "type: (List[Any], Dict[Any, Any]) -> None # The Windows terminal", "self.file.isatty() # but the colorama.AnsiToWin32() object doesn't have that, so", "# type: ignore self.original_handler = signal(SIGINT, self.handle_sigint) # If signal()", "Any]) -> None # The Windows terminal does not support", "iter(self, it): # type: ignore for x in it: yield", "next(self._phaser) def update(self): # type: () -> None message =", "the following problems: 1. It calls sys.exit(). 2. It discards", "back to the plaintext bar. try: \"\".join(characters).encode(encoding) except UnicodeEncodeError: return", "if not hasattr(self, \"_phaser\"): self._phaser = itertools.cycle(self.phases) return next(self._phaser) def", "SIGINT, default_int_handler, signal from typing import Any, Dict, List from", "[ getattr(preferred, \"empty_fill\", \"\"), getattr(preferred, \"fill\", \"\"), ] characters +=", "super().__init__(*args, **kwargs) # type: ignore self.message = (\" \" *", "== 0.0: # type: ignore return \"...\" return format_size(1 /", "get_indentation from pip._internal.utils.misc import format_size try: from pip._vendor import colorama", "time after a progress-displaying download has already completed, for example.", "ignore signal(SIGINT, self.original_handler) def handle_sigint(self, signum, frame): # type: ignore", "encoding this file is in, then we'll just assume #", "# type: ignore # Check if we are running on", "object doesn't have that, so we'll # add it. self.file.flush", "\"off\": (DownloadSilentBar, DownloadSilentBar), \"on\": (DefaultDownloadProgressBar, DownloadProgressSpinner), \"ascii\": (DownloadBar, DownloadProgressSpinner), \"pretty\":", "if self.original_handler is None: self.original_handler = default_int_handler def finish(self): #", "] ) self.writeln(line) BAR_TYPES = { \"off\": (DownloadSilentBar, DownloadSilentBar), \"on\":", "\"encoding\", None) # If we don't know what encoding this", "pass class DownloadSilentBar(BaseDownloadProgressBar, SilentBar): pass class DownloadBar(BaseDownloadProgressBar, Bar): pass class", "display is active. \"\"\" self.finish() self.original_handler(signum, frame) class SilentBar(Bar): def", "effects if the user triggers an unrelated keyboard interrupt some", "and we have the colorama module, # if we do", "(DownloadSilentBar, DownloadSilentBar), \"on\": (DefaultDownloadProgressBar, DownloadProgressSpinner), \"ascii\": (DownloadBar, DownloadProgressSpinner), \"pretty\": (DownloadFillingCirclesBar,", "behind. This class is similar to the progress library's existing", "type: ignore \"\"\" Call self.finish() before delegating to the original", "None # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type: ignore self.message =", "getattr(preferred, \"empty_fill\", \"\"), getattr(preferred, \"fill\", \"\"), ] characters += list(getattr(preferred,", "finishing. This should happen regardless of whether the progress display", "sys.exit(). 2. It discards the existing SIGINT handler completely. 3.", "noqa: B305 self.finish() class WindowsMixin: def __init__(self, *args, **kwargs): #", "SigIntMixin helper, but as of version 1.2, that helper has", "-> Bar encoding = getattr(preferred.file, \"encoding\", None) # If we", "regardless of whether the progress display finishes normally, or gets", "and self.hide_cursor: # type: ignore self.hide_cursor = False # https://github.com/python/mypy/issues/5887", "something sensible instead, at least. # The least bad option", "type: ignore return \"...\" return format_size(1 / self.avg) + \"/s\"", "decode the characters we're using for the bar using the", "# type: ignore return f\"eta {self.eta_td}\" # type: ignore return", "doesn't support unicode and use the ASCII bar. if not", "ignore return \"\" def iter(self, it): # type: ignore for", "this soon # enough, we get a \"hide\" with no", "wrap our file with it. if WINDOWS and colorama: self.file", "that helper has the following problems: 1. It calls sys.exit().", "str # Avoid zero division errors... if self.avg == 0.0:", "previous handler was not installed from # Python, and we", "-> str if self.eta: # type: ignore return f\"eta {self.eta_td}\"", "type: ignore # Check if we are running on Windows", "fall back to the plaintext bar. try: \"\".join(characters).encode(encoding) except UnicodeEncodeError:", "def update(self): # type: () -> None message = self.message", "return fallback # Collect all of the possible characters we", "after an uninterrupted finish, which will have unexpected delayed effects", "and colorama: self.file = colorama.AnsiToWin32(self.file) # type: ignore # The", "on # Windows. # This call needs to go before", "user triggers an unrelated keyboard interrupt some time after a", "Windows and we have the colorama module, # if we", "DownloadFillingCirclesBar(BaseDownloadProgressBar, FillingCirclesBar): pass class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar, BlueEmojiBar): pass class DownloadProgressSpinner( WindowsMixin,", "WindowsMixin: def __init__(self, *args, **kwargs): # type: (List[Any], Dict[Any, Any])", "cursor ANSI codes # even with colorama. So we'll ensure", "[ message, \" \" if message else \"\", phase, \"", "is active. \"\"\" self.finish() self.original_handler(signum, frame) class SilentBar(Bar): def update(self):", "InterruptibleMixin, DownloadProgressMixin): file = sys.stdout message = \"%(percent)d%%\" suffix =", "zero division errors... if self.avg == 0.0: # type: ignore", "if message else \"\", phase, \" \" if suffix else", "finish(self): # type: () -> None \"\"\" Restore the original", "state (like hidden cursors) behind. This class is similar to", "which # just raises KeyboardInterrupt. if self.original_handler is None: self.original_handler", "def __init__(self, *args, **kwargs): # type: (List[Any], Dict[Any, Any]) ->", "format_size try: from pip._vendor import colorama # Lots of different", "as of version 1.2, that helper has the following problems:", "object doesn't have that, so we'll # add it. self.file.isatty", "\"%(percent)d%%\" suffix = \"%(downloaded)s %(download_speed)s %(pretty_eta)s\" class DefaultDownloadProgressBar(BaseDownloadProgressBar, _BaseBar): pass", "calls sys.exit(). 2. It discards the existing SIGINT handler completely.", "None or max == 0: return BAR_TYPES[progress_bar][1]().iter else: return BAR_TYPES[progress_bar][0](max=max).iter", "None \"\"\" Save the original SIGINT handler for later. \"\"\"", "# If we don't know what encoding this file is", "the user triggers an unrelated keyboard interrupt some time after", "FillingCirclesBar, IncrementalBar from pip._vendor.progress.spinner import Spinner from pip._internal.utils.compat import WINDOWS", "the progress library's existing SigIntMixin helper, but as of version", "format_size(self.index) # type: ignore @property def download_speed(self): # type: ()", "so we'll # add it. self.file.flush = lambda: self.file.wrapped.flush() class", "in place even after an uninterrupted finish, which will have", "progress code expects to be able to call self.file.flush() #", "except UnicodeEncodeError: return fallback else: return preferred _BaseBar = _select_progress_class(IncrementalBar,", "we can use the # fancier bar and if not", "itertools.cycle(self.phases) return next(self._phaser) def update(self): # type: () -> None", "helper, but as of version 1.2, that helper has the", "Spinner ): file = sys.stdout suffix = \"%(downloaded)s %(download_speed)s\" def", "that we can use the # fancier bar and if", "() -> None message = self.message % self phase =", "corresponding \"show\"... if WINDOWS and self.hide_cursor: # type: ignore self.hide_cursor", "return fallback else: return preferred _BaseBar = _select_progress_class(IncrementalBar, Bar) #", "is incorrectly raised here # https://github.com/PyCQA/flake8-bugbear/issues/59 self.next(len(x)) # noqa: B305", "from pip._internal.utils.logging import get_indentation from pip._internal.utils.misc import format_size try: from", "**kwargs): # type: (List[Any], Dict[Any, Any]) -> None \"\"\" Save", "# type: ignore @property def pretty_eta(self): # type: () ->", "# B305 is incorrectly raised here # https://github.com/PyCQA/flake8-bugbear/issues/59 self.next(len(x)) #", "Bar): pass class DownloadFillingCirclesBar(BaseDownloadProgressBar, FillingCirclesBar): pass class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar, BlueEmojiBar): pass", "delegating to the original SIGINT handler. This handler should only", "set in time. The base progress bar class writes the", "-> str if not hasattr(self, \"_phaser\"): self._phaser = itertools.cycle(self.phases) return", "bar. characters = [ getattr(preferred, \"empty_fill\", \"\"), getattr(preferred, \"fill\", \"\"),", "the preferred # bar. characters = [ getattr(preferred, \"empty_fill\", \"\"),", "None \"\"\" Restore the original SIGINT handler after finishing. This", "to be interrupted without leaving temporary state (like hidden cursors)", "the bar using the encoding # of the given file,", "DownloadBar(BaseDownloadProgressBar, Bar): pass class DownloadFillingCirclesBar(BaseDownloadProgressBar, FillingCirclesBar): pass class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar, BlueEmojiBar):", "from pip._internal.utils.misc import format_size try: from pip._vendor import colorama #", "-> None pass class BlueEmojiBar(IncrementalBar): suffix = \"%(percent)d%%\" bar_prefix =", "if we do then wrap our file with it. if", "characters we want to use with the preferred # bar.", "the encoding # of the given file, if this works", "colorama. So we'll ensure that hide_cursor is False on #", "] characters += list(getattr(preferred, \"phases\", [])) # Try to decode", "type: () -> None message = self.message % self phase", "\"...\" return format_size(1 / self.avg) + \"/s\" # type: ignore", "frame) class SilentBar(Bar): def update(self): # type: () -> None", "not we'll fall back to the plaintext bar. try: \"\".join(characters).encode(encoding)", "signal(SIGINT, self.original_handler) def handle_sigint(self, signum, frame): # type: ignore \"\"\"", "if we don't set this soon # enough, we get", "= \"\".join( [ message, \" \" if message else \"\",", "from this, including SystemError and # ImportError. except Exception: colorama", "the characters we're using for the bar using the encoding", "\"\\U0001F535\") class DownloadProgressMixin: def __init__(self, *args, **kwargs): # type: (List[Any],", "# noqa: B305 self.finish() class WindowsMixin: def __init__(self, *args, **kwargs):", "\"\", phase, \" \" if suffix else \"\", suffix, ]", "self.original_handler) def handle_sigint(self, signum, frame): # type: ignore \"\"\" Call", "Avoid zero division errors... if self.avg == 0.0: # type:", "import get_indentation from pip._internal.utils.misc import format_size try: from pip._vendor import", "_BaseBar): pass class DownloadSilentBar(BaseDownloadProgressBar, SilentBar): pass class DownloadBar(BaseDownloadProgressBar, Bar): pass", "\" \" if suffix else \"\", suffix, ] ) self.writeln(line)", "\" \" phases = (\"\\U0001F539\", \"\\U0001F537\", \"\\U0001F535\") class DownloadProgressMixin: def", "preferred _BaseBar = _select_progress_class(IncrementalBar, Bar) # type: Any class InterruptibleMixin:", "WINDOWS and colorama: self.file = colorama.AnsiToWin32(self.file) # type: ignore #", "triggers an unrelated keyboard interrupt some time after a progress-displaying", "%(download_speed)s %(pretty_eta)s\" class DefaultDownloadProgressBar(BaseDownloadProgressBar, _BaseBar): pass class DownloadSilentBar(BaseDownloadProgressBar, SilentBar): pass", "So we'll ensure that hide_cursor is False on # Windows.", "if max is None or max == 0: return BAR_TYPES[progress_bar][1]().iter", "using for the bar using the encoding # of the", "This class is similar to the progress library's existing SigIntMixin", "-> None \"\"\" Restore the original SIGINT handler after finishing.", "type: () -> str if self.eta: # type: ignore return", "Dict[Any, Any]) -> None # The Windows terminal does not", "# The progress code expects to be able to call", "encoding = getattr(preferred.file, \"encoding\", None) # If we don't know", "# type: ignore return \"\" def iter(self, it): # type:", "# type: ignore signal(SIGINT, self.original_handler) def handle_sigint(self, signum, frame): #", "for example. \"\"\" def __init__(self, *args, **kwargs): # type: (List[Any],", "downloads to be interrupted without leaving temporary state (like hidden", "type: ignore return f\"eta {self.eta_td}\" # type: ignore return \"\"", "file, if this works then we'll assume that we can", "not encoding: return fallback # Collect all of the possible", "# of the given file, if this works then we'll", "# type: ignore for x in it: yield x #", "# type: ignore @property def download_speed(self): # type: () ->", "know what encoding this file is in, then we'll just", "we want to use with the preferred # bar. characters", "class DownloadSilentBar(BaseDownloadProgressBar, SilentBar): pass class DownloadBar(BaseDownloadProgressBar, Bar): pass class DownloadFillingCirclesBar(BaseDownloadProgressBar,", "(\" \" * (get_indentation() + 2)) + self.message # type:", "whether the progress display finishes normally, or gets interrupted. \"\"\"", "import Any, Dict, List from pip._vendor.progress.bar import Bar, FillingCirclesBar, IncrementalBar", "some time after a progress-displaying download has already completed, for", "does not support the hide/show cursor ANSI codes # even", "from typing import Any, Dict, List from pip._vendor.progress.bar import Bar,", "ignore self.original_handler = signal(SIGINT, self.handle_sigint) # If signal() returns None,", "self.handle_sigint) # If signal() returns None, the previous handler was", "pip._vendor.progress.spinner import Spinner from pip._internal.utils.compat import WINDOWS from pip._internal.utils.logging import", "if this works then we'll assume that we can use", "It calls sys.exit(). 2. It discards the existing SIGINT handler", "DefaultDownloadProgressBar(BaseDownloadProgressBar, _BaseBar): pass class DownloadSilentBar(BaseDownloadProgressBar, SilentBar): pass class DownloadBar(BaseDownloadProgressBar, Bar):", "def handle_sigint(self, signum, frame): # type: ignore \"\"\" Call self.finish()", "unrelated keyboard interrupt some time after a progress-displaying download has", "# type: () -> str if not hasattr(self, \"_phaser\"): self._phaser", "() -> str return format_size(self.index) # type: ignore @property def", "\"\"\" super().finish() # type: ignore signal(SIGINT, self.original_handler) def handle_sigint(self, signum,", "with the preferred # bar. characters = [ getattr(preferred, \"empty_fill\",", "2)) + self.message # type: str @property def downloaded(self): #", "line = \"\".join( [ message, \" \" if message else", "active. \"\"\" self.finish() self.original_handler(signum, frame) class SilentBar(Bar): def update(self): #", "+= list(getattr(preferred, \"phases\", [])) # Try to decode the characters", "# code to the terminal in its init, so if", "-> None # The Windows terminal does not support the", "= (\" \" * (get_indentation() + 2)) + self.message #", "the original SIGINT handler for later. \"\"\" # https://github.com/python/mypy/issues/5887 super().__init__(*args,", "# but the colorama.AnsiToWin32() object doesn't have that, so we'll", "(DownloadFillingCirclesBar, DownloadProgressSpinner), \"emoji\": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner), } def DownloadProgressProvider(progress_bar, max=None): #", "# if we do then wrap our file with it.", "\"\"), ] characters += list(getattr(preferred, \"phases\", [])) # Try to", "to the plaintext bar. try: \"\".join(characters).encode(encoding) except UnicodeEncodeError: return fallback", "unexpected delayed effects if the user triggers an unrelated keyboard", "\"\"\" def __init__(self, *args, **kwargs): # type: (List[Any], Dict[Any, Any])", "-> str # Avoid zero division errors... if self.avg ==", "pass class BlueEmojiBar(IncrementalBar): suffix = \"%(percent)d%%\" bar_prefix = \" \"", "is None or max == 0: return BAR_TYPES[progress_bar][1]().iter else: return", "\"%(downloaded)s %(download_speed)s\" def next_phase(self): # type: () -> str if", "fallback): # type: (Bar, Bar) -> Bar encoding = getattr(preferred.file,", "happen, # but if it does, we must restore something", "\" \" if message else \"\", phase, \" \" if", "ignore return f\"eta {self.eta_td}\" # type: ignore return \"\" def", "gets called on keyboard interrupt. This allows downloads to be", "= colorama.AnsiToWin32(self.file) # type: ignore # The progress code expects", "\"on\": (DefaultDownloadProgressBar, DownloadProgressSpinner), \"ascii\": (DownloadBar, DownloadProgressSpinner), \"pretty\": (DownloadFillingCirclesBar, DownloadProgressSpinner), \"emoji\":", "has the following problems: 1. It calls sys.exit(). 2. It", "ignore return \"...\" return format_size(1 / self.avg) + \"/s\" #", "original SIGINT handler. This handler should only be in place", "must restore something sensible instead, at least. # The least", "default_int_handler def finish(self): # type: () -> None \"\"\" Restore", "-> None \"\"\" Save the original SIGINT handler for later.", "errors can come from this, including SystemError and # ImportError.", "returns None, the previous handler was not installed from #", "} def DownloadProgressProvider(progress_bar, max=None): # type: ignore if max is", "self phase = self.next_phase() suffix = self.suffix % self line", "\"\"\" # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type: ignore self.original_handler =", "suffix = \"%(downloaded)s %(download_speed)s\" def next_phase(self): # type: () ->", "# type: ignore if max is None or max ==", "sensible instead, at least. # The least bad option should", "signal import SIGINT, default_int_handler, signal from typing import Any, Dict,", "the super() call, so that hide_cursor # is set in", "\"show\"... if WINDOWS and self.hide_cursor: # type: ignore self.hide_cursor =", "message = self.message % self phase = self.next_phase() suffix =", "but as of version 1.2, that helper has the following", "# Collect all of the possible characters we want to", "its init, so if we don't set this soon #", "# ImportError. except Exception: colorama = None def _select_progress_class(preferred, fallback):", "\"\".join( [ message, \" \" if message else \"\", phase,", "def update(self): # type: () -> None pass class BlueEmojiBar(IncrementalBar):", "library's existing SigIntMixin helper, but as of version 1.2, that", "\"phases\", [])) # Try to decode the characters we're using", "we don't set this soon # enough, we get a", "= signal(SIGINT, self.handle_sigint) # If signal() returns None, the previous", "if self.avg == 0.0: # type: ignore return \"...\" return", "# type: str @property def downloaded(self): # type: () ->", "characters = [ getattr(preferred, \"empty_fill\", \"\"), getattr(preferred, \"fill\", \"\"), ]", "ignore if max is None or max == 0: return", "self.message # type: str @property def downloaded(self): # type: ()", "\"fill\", \"\"), ] characters += list(getattr(preferred, \"phases\", [])) # Try", "bar. if not encoding: return fallback # Collect all of", "similar to the progress library's existing SigIntMixin helper, but as", "completely. 3. It leaves its own handler in place even", "errors... if self.avg == 0.0: # type: ignore return \"...\"", "ignore @property def pretty_eta(self): # type: () -> str if", "handler was not installed from # Python, and we cannot", "completed, for example. \"\"\" def __init__(self, *args, **kwargs): # type:", "class DownloadProgressSpinner( WindowsMixin, InterruptibleMixin, DownloadProgressMixin, Spinner ): file = sys.stdout", "pip._internal.utils.logging import get_indentation from pip._internal.utils.misc import format_size try: from pip._vendor", "should be Python's default SIGINT handler, which # just raises", "if WINDOWS and self.hide_cursor: # type: ignore self.hide_cursor = False", "the progress display finishes normally, or gets interrupted. \"\"\" super().finish()", "WINDOWS from pip._internal.utils.logging import get_indentation from pip._internal.utils.misc import format_size try:", "progress code expects to be able to call self.file.isatty() #", "frame): # type: ignore \"\"\" Call self.finish() before delegating to", "() -> None pass class BlueEmojiBar(IncrementalBar): suffix = \"%(percent)d%%\" bar_prefix", "module, # if we do then wrap our file with", "if we are running on Windows and we have the", "this file is in, then we'll just assume # that", "type: ignore for x in it: yield x # B305", "writes the \"hide cursor\" # code to the terminal in", "# add it. self.file.flush = lambda: self.file.wrapped.flush() class BaseDownloadProgressBar(WindowsMixin, InterruptibleMixin,", "cannot restore it. This probably should not happen, # but", "(List[Any], Dict[Any, Any]) -> None # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) #", "type: ignore @property def download_speed(self): # type: () -> str", "Windows. # This call needs to go before the super()", "() -> str if self.eta: # type: ignore return f\"eta", "import SIGINT, default_int_handler, signal from typing import Any, Dict, List", "https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type: ignore self.original_handler = signal(SIGINT, self.handle_sigint)", "finishes normally, or gets interrupted. \"\"\" super().finish() # type: ignore", "Save the original SIGINT handler for later. \"\"\" # https://github.com/python/mypy/issues/5887", "= default_int_handler def finish(self): # type: () -> None \"\"\"", "type: (List[Any], Dict[Any, Any]) -> None # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs)", "@property def download_speed(self): # type: () -> str # Avoid", "# type: () -> str if self.eta: # type: ignore", "type: ignore return \"\" def iter(self, it): # type: ignore", "so that hide_cursor # is set in time. The base", "init, so if we don't set this soon # enough,", "is False on # Windows. # This call needs to", "it): # type: ignore for x in it: yield x", "to be able to call self.file.flush() # but the colorama.AnsiToWin32()", "pip._vendor.progress.bar import Bar, FillingCirclesBar, IncrementalBar from pip._vendor.progress.spinner import Spinner from", "signal() returns None, the previous handler was not installed from", "# even with colorama. So we'll ensure that hide_cursor is", "DownloadProgressSpinner( WindowsMixin, InterruptibleMixin, DownloadProgressMixin, Spinner ): file = sys.stdout suffix", "# type: () -> str return format_size(self.index) # type: ignore", "try: from pip._vendor import colorama # Lots of different errors", "# is set in time. The base progress bar class", "raises KeyboardInterrupt. if self.original_handler is None: self.original_handler = default_int_handler def", "= { \"off\": (DownloadSilentBar, DownloadSilentBar), \"on\": (DefaultDownloadProgressBar, DownloadProgressSpinner), \"ascii\": (DownloadBar,", "= sys.stdout message = \"%(percent)d%%\" suffix = \"%(downloaded)s %(download_speed)s %(pretty_eta)s\"", "Dict, List from pip._vendor.progress.bar import Bar, FillingCirclesBar, IncrementalBar from pip._vendor.progress.spinner", "in place while the progress display is active. \"\"\" self.finish()", "Python, and we cannot restore it. This probably should not", "# type: Any class InterruptibleMixin: \"\"\" Helper to ensure that", "and if not we'll fall back to the plaintext bar.", "if not encoding: return fallback # Collect all of the", "f\"eta {self.eta_td}\" # type: ignore return \"\" def iter(self, it):", "so we'll # add it. self.file.isatty = lambda: self.file.wrapped.isatty() #", "KeyboardInterrupt. if self.original_handler is None: self.original_handler = default_int_handler def finish(self):", "SIGINT handler for later. \"\"\" # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) #", "characters += list(getattr(preferred, \"phases\", [])) # Try to decode the", "time. The base progress bar class writes the \"hide cursor\"", "that, so we'll # add it. self.file.isatty = lambda: self.file.wrapped.isatty()", "def _select_progress_class(preferred, fallback): # type: (Bar, Bar) -> Bar encoding", "before the super() call, so that hide_cursor # is set", "the progress display is active. \"\"\" self.finish() self.original_handler(signum, frame) class", "including SystemError and # ImportError. except Exception: colorama = None", "signum, frame): # type: ignore \"\"\" Call self.finish() before delegating", "it. self.file.isatty = lambda: self.file.wrapped.isatty() # The progress code expects", "is set in time. The base progress bar class writes", "self.next_phase() suffix = self.suffix % self line = \"\".join( [", "example. \"\"\" def __init__(self, *args, **kwargs): # type: (List[Any], Dict[Any,", "\" if message else \"\", phase, \" \" if suffix", "else \"\", phase, \" \" if suffix else \"\", suffix,", "original SIGINT handler for later. \"\"\" # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs)", "progress display finishes normally, or gets interrupted. \"\"\" super().finish() #", "= \"%(downloaded)s %(download_speed)s\" def next_phase(self): # type: () -> str", "keyboard interrupt some time after a progress-displaying download has already", "use the ASCII bar. if not encoding: return fallback #", "Any]) -> None \"\"\" Save the original SIGINT handler for", "except Exception: colorama = None def _select_progress_class(preferred, fallback): # type:", "the terminal in its init, so if we don't set", "(like hidden cursors) behind. This class is similar to the", "= None def _select_progress_class(preferred, fallback): # type: (Bar, Bar) ->", "base progress bar class writes the \"hide cursor\" # code", "# Lots of different errors can come from this, including", "**kwargs): # type: (List[Any], Dict[Any, Any]) -> None # The", "x in it: yield x # B305 is incorrectly raised", "suffix = \"%(percent)d%%\" bar_prefix = \" \" bar_suffix = \"", "to the progress library's existing SigIntMixin helper, but as of", "BlueEmojiBar): pass class DownloadProgressSpinner( WindowsMixin, InterruptibleMixin, DownloadProgressMixin, Spinner ): file", "type: () -> str return format_size(self.index) # type: ignore @property", "handler for later. \"\"\" # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type:", "\"\\U0001F537\", \"\\U0001F535\") class DownloadProgressMixin: def __init__(self, *args, **kwargs): # type:", "we'll # add it. self.file.flush = lambda: self.file.wrapped.flush() class BaseDownloadProgressBar(WindowsMixin,", "-> None message = self.message % self phase = self.next_phase()", "phase, \" \" if suffix else \"\", suffix, ] )", "ASCII bar. if not encoding: return fallback # Collect all", "place even after an uninterrupted finish, which will have unexpected", "SIGINT handler, which # just raises KeyboardInterrupt. if self.original_handler is", "pip._vendor import colorama # Lots of different errors can come", "file is in, then we'll just assume # that it", "different errors can come from this, including SystemError and #", "allows downloads to be interrupted without leaving temporary state (like", "to ensure that self.finish() gets called on keyboard interrupt. This", "\"%(downloaded)s %(download_speed)s %(pretty_eta)s\" class DefaultDownloadProgressBar(BaseDownloadProgressBar, _BaseBar): pass class DownloadSilentBar(BaseDownloadProgressBar, SilentBar):", "Restore the original SIGINT handler after finishing. This should happen", "fallback else: return preferred _BaseBar = _select_progress_class(IncrementalBar, Bar) # type:", "colorama module, # if we do then wrap our file", "str return format_size(self.index) # type: ignore @property def download_speed(self): #", "be able to call self.file.flush() # but the colorama.AnsiToWin32() object", "# https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type: ignore self.message = (\"", "type: (List[Any], Dict[Any, Any]) -> None \"\"\" Save the original", "call, so that hide_cursor # is set in time. The", "self.next(len(x)) # noqa: B305 self.finish() class WindowsMixin: def __init__(self, *args,", "bar class writes the \"hide cursor\" # code to the", "str if not hasattr(self, \"_phaser\"): self._phaser = itertools.cycle(self.phases) return next(self._phaser)", "with colorama. So we'll ensure that hide_cursor is False on", "import WINDOWS from pip._internal.utils.logging import get_indentation from pip._internal.utils.misc import format_size", "-> str return format_size(self.index) # type: ignore @property def download_speed(self):", "This call needs to go before the super() call, so", "else \"\", suffix, ] ) self.writeln(line) BAR_TYPES = { \"off\":", "SystemError and # ImportError. except Exception: colorama = None def", "(get_indentation() + 2)) + self.message # type: str @property def", "pass class DownloadFillingCirclesBar(BaseDownloadProgressBar, FillingCirclesBar): pass class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar, BlueEmojiBar): pass class", "Windows terminal does not support the hide/show cursor ANSI codes", "The base progress bar class writes the \"hide cursor\" #", "we're using for the bar using the encoding # of", "can use the # fancier bar and if not we'll", "3. It leaves its own handler in place even after", "existing SIGINT handler completely. 3. It leaves its own handler", "%(pretty_eta)s\" class DefaultDownloadProgressBar(BaseDownloadProgressBar, _BaseBar): pass class DownloadSilentBar(BaseDownloadProgressBar, SilentBar): pass class", "ignore @property def download_speed(self): # type: () -> str #", "here # https://github.com/PyCQA/flake8-bugbear/issues/59 self.next(len(x)) # noqa: B305 self.finish() class WindowsMixin:", "message = \"%(percent)d%%\" suffix = \"%(downloaded)s %(download_speed)s %(pretty_eta)s\" class DefaultDownloadProgressBar(BaseDownloadProgressBar,", "DownloadProgressProvider(progress_bar, max=None): # type: ignore if max is None or", "_select_progress_class(IncrementalBar, Bar) # type: Any class InterruptibleMixin: \"\"\" Helper to", "helper has the following problems: 1. It calls sys.exit(). 2.", "pass class DownloadProgressSpinner( WindowsMixin, InterruptibleMixin, DownloadProgressMixin, Spinner ): file =", "# type: ignore # The progress code expects to be", "progress bar class writes the \"hide cursor\" # code to", "max is None or max == 0: return BAR_TYPES[progress_bar][1]().iter else:", "%(download_speed)s\" def next_phase(self): # type: () -> str if not", "handler, which # just raises KeyboardInterrupt. if self.original_handler is None:", "it doesn't support unicode and use the ASCII bar. if", "file with it. if WINDOWS and colorama: self.file = colorama.AnsiToWin32(self.file)", "code expects to be able to call self.file.flush() # but", "\" phases = (\"\\U0001F539\", \"\\U0001F537\", \"\\U0001F535\") class DownloadProgressMixin: def __init__(self,", "super().__init__(*args, **kwargs) # type: ignore self.original_handler = signal(SIGINT, self.handle_sigint) #", "ignore # Check if we are running on Windows and", "# type: (List[Any], Dict[Any, Any]) -> None # https://github.com/python/mypy/issues/5887 super().__init__(*args,", "bar_suffix = \" \" phases = (\"\\U0001F539\", \"\\U0001F537\", \"\\U0001F535\") class", "# The Windows terminal does not support the hide/show cursor", "from pip._vendor.progress.spinner import Spinner from pip._internal.utils.compat import WINDOWS from pip._internal.utils.logging", "# type: () -> None pass class BlueEmojiBar(IncrementalBar): suffix =", "type: ignore self.message = (\" \" * (get_indentation() + 2))", "= \" \" phases = (\"\\U0001F539\", \"\\U0001F537\", \"\\U0001F535\") class DownloadProgressMixin:", "+ self.message # type: str @property def downloaded(self): # type:", "= lambda: self.file.wrapped.isatty() # The progress code expects to be", "expects to be able to call self.file.isatty() # but the", "are running on Windows and we have the colorama module,", "BAR_TYPES = { \"off\": (DownloadSilentBar, DownloadSilentBar), \"on\": (DefaultDownloadProgressBar, DownloadProgressSpinner), \"ascii\":", "import format_size try: from pip._vendor import colorama # Lots of", "on keyboard interrupt. This allows downloads to be interrupted without", "SIGINT handler completely. 3. It leaves its own handler in", "delayed effects if the user triggers an unrelated keyboard interrupt", "bad option should be Python's default SIGINT handler, which #", "raised here # https://github.com/PyCQA/flake8-bugbear/issues/59 self.next(len(x)) # noqa: B305 self.finish() class", "option should be Python's default SIGINT handler, which # just", "Bar, FillingCirclesBar, IncrementalBar from pip._vendor.progress.spinner import Spinner from pip._internal.utils.compat import", "ImportError. except Exception: colorama = None def _select_progress_class(preferred, fallback): #", "self._phaser = itertools.cycle(self.phases) return next(self._phaser) def update(self): # type: ()", "pass class DownloadBar(BaseDownloadProgressBar, Bar): pass class DownloadFillingCirclesBar(BaseDownloadProgressBar, FillingCirclesBar): pass class", "with no corresponding \"show\"... if WINDOWS and self.hide_cursor: # type:", "= sys.stdout suffix = \"%(downloaded)s %(download_speed)s\" def next_phase(self): # type:", "import sys from signal import SIGINT, default_int_handler, signal from typing", "IncrementalBar from pip._vendor.progress.spinner import Spinner from pip._internal.utils.compat import WINDOWS from", "\"hide cursor\" # code to the terminal in its init,", "the colorama.AnsiToWin32() object doesn't have that, so we'll # add", "colorama.AnsiToWin32(self.file) # type: ignore # The progress code expects to", "= [ getattr(preferred, \"empty_fill\", \"\"), getattr(preferred, \"fill\", \"\"), ] characters", "what encoding this file is in, then we'll just assume", "plaintext bar. try: \"\".join(characters).encode(encoding) except UnicodeEncodeError: return fallback else: return", "class InterruptibleMixin: \"\"\" Helper to ensure that self.finish() gets called", "do then wrap our file with it. if WINDOWS and", "yield x # B305 is incorrectly raised here # https://github.com/PyCQA/flake8-bugbear/issues/59", "it: yield x # B305 is incorrectly raised here #", "existing SigIntMixin helper, but as of version 1.2, that helper", "= itertools.cycle(self.phases) return next(self._phaser) def update(self): # type: () ->", "Spinner from pip._internal.utils.compat import WINDOWS from pip._internal.utils.logging import get_indentation from", "colorama # Lots of different errors can come from this,", "Collect all of the possible characters we want to use", "using the encoding # of the given file, if this", "List from pip._vendor.progress.bar import Bar, FillingCirclesBar, IncrementalBar from pip._vendor.progress.spinner import", "update(self): # type: () -> None pass class BlueEmojiBar(IncrementalBar): suffix", "None) # If we don't know what encoding this file", "= (\"\\U0001F539\", \"\\U0001F537\", \"\\U0001F535\") class DownloadProgressMixin: def __init__(self, *args, **kwargs):", "super() call, so that hide_cursor # is set in time.", "# type: ignore self.message = (\" \" * (get_indentation() +", "progress-displaying download has already completed, for example. \"\"\" def __init__(self,", "we get a \"hide\" with no corresponding \"show\"... if WINDOWS", "from signal import SIGINT, default_int_handler, signal from typing import Any,", "class BaseDownloadProgressBar(WindowsMixin, InterruptibleMixin, DownloadProgressMixin): file = sys.stdout message = \"%(percent)d%%\"", "DownloadProgressSpinner), } def DownloadProgressProvider(progress_bar, max=None): # type: ignore if max", "before delegating to the original SIGINT handler. This handler should", "This should happen regardless of whether the progress display finishes", "# https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type: ignore # Check if", "\" bar_suffix = \" \" phases = (\"\\U0001F539\", \"\\U0001F537\", \"\\U0001F535\")", "*args, **kwargs): # type: (List[Any], Dict[Any, Any]) -> None #", "in, then we'll just assume # that it doesn't support", "SIGINT handler after finishing. This should happen regardless of whether", "a progress-displaying download has already completed, for example. \"\"\" def", "# If signal() returns None, the previous handler was not", "default SIGINT handler, which # just raises KeyboardInterrupt. if self.original_handler", "= self.message % self phase = self.next_phase() suffix = self.suffix", "It leaves its own handler in place even after an", "we do then wrap our file with it. if WINDOWS", "handler should only be in place while the progress display", "the hide/show cursor ANSI codes # even with colorama. So", "message, \" \" if message else \"\", phase, \" \"", "don't know what encoding this file is in, then we'll", "type: ignore self.original_handler = signal(SIGINT, self.handle_sigint) # If signal() returns", "self.message % self phase = self.next_phase() suffix = self.suffix %", "this, including SystemError and # ImportError. except Exception: colorama =", "should not happen, # but if it does, we must", "and use the ASCII bar. if not encoding: return fallback", "then wrap our file with it. if WINDOWS and colorama:", "progress display is active. \"\"\" self.finish() self.original_handler(signum, frame) class SilentBar(Bar):", "without leaving temporary state (like hidden cursors) behind. This class", "next_phase(self): # type: () -> str if not hasattr(self, \"_phaser\"):", "this works then we'll assume that we can use the", "the possible characters we want to use with the preferred", "Bar) # type: Any class InterruptibleMixin: \"\"\" Helper to ensure", "be in place while the progress display is active. \"\"\"", "if it does, we must restore something sensible instead, at", "# Windows. # This call needs to go before the", "if suffix else \"\", suffix, ] ) self.writeln(line) BAR_TYPES =", "(DownloadBlueEmojiProgressBar, DownloadProgressSpinner), } def DownloadProgressProvider(progress_bar, max=None): # type: ignore if", "ANSI codes # even with colorama. So we'll ensure that", "return \"...\" return format_size(1 / self.avg) + \"/s\" # type:", "getattr(preferred.file, \"encoding\", None) # If we don't know what encoding", "(DownloadBar, DownloadProgressSpinner), \"pretty\": (DownloadFillingCirclesBar, DownloadProgressSpinner), \"emoji\": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner), } def", "InterruptibleMixin: \"\"\" Helper to ensure that self.finish() gets called on", "bar. try: \"\".join(characters).encode(encoding) except UnicodeEncodeError: return fallback else: return preferred", "already completed, for example. \"\"\" def __init__(self, *args, **kwargs): #", "Call self.finish() before delegating to the original SIGINT handler. This", "# type: () -> None \"\"\" Restore the original SIGINT", "# The least bad option should be Python's default SIGINT", "\"/s\" # type: ignore @property def pretty_eta(self): # type: ()", "have that, so we'll # add it. self.file.isatty = lambda:", "does, we must restore something sensible instead, at least. #", "soon # enough, we get a \"hide\" with no corresponding", "= \"%(downloaded)s %(download_speed)s %(pretty_eta)s\" class DefaultDownloadProgressBar(BaseDownloadProgressBar, _BaseBar): pass class DownloadSilentBar(BaseDownloadProgressBar,", "sys from signal import SIGINT, default_int_handler, signal from typing import", "ignore self.message = (\" \" * (get_indentation() + 2)) +", "*args, **kwargs): # type: (List[Any], Dict[Any, Any]) -> None \"\"\"", "that hide_cursor # is set in time. The base progress", "running on Windows and we have the colorama module, #", "DownloadSilentBar(BaseDownloadProgressBar, SilentBar): pass class DownloadBar(BaseDownloadProgressBar, Bar): pass class DownloadFillingCirclesBar(BaseDownloadProgressBar, FillingCirclesBar):", "class DownloadFillingCirclesBar(BaseDownloadProgressBar, FillingCirclesBar): pass class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar, BlueEmojiBar): pass class DownloadProgressSpinner(", "works then we'll assume that we can use the #", "# Check if we are running on Windows and we", "\"%(percent)d%%\" bar_prefix = \" \" bar_suffix = \" \" phases", "= \"%(percent)d%%\" bar_prefix = \" \" bar_suffix = \" \"", "handle_sigint(self, signum, frame): # type: ignore \"\"\" Call self.finish() before", "of version 1.2, that helper has the following problems: 1.", "hide_cursor is False on # Windows. # This call needs", "return next(self._phaser) def update(self): # type: () -> None message", "# type: (Bar, Bar) -> Bar encoding = getattr(preferred.file, \"encoding\",", "least bad option should be Python's default SIGINT handler, which", "place while the progress display is active. \"\"\" self.finish() self.original_handler(signum,", "to use with the preferred # bar. characters = [", "self.file.wrapped.flush() class BaseDownloadProgressBar(WindowsMixin, InterruptibleMixin, DownloadProgressMixin): file = sys.stdout message =", "= \" \" bar_suffix = \" \" phases = (\"\\U0001F539\",", "{ \"off\": (DownloadSilentBar, DownloadSilentBar), \"on\": (DefaultDownloadProgressBar, DownloadProgressSpinner), \"ascii\": (DownloadBar, DownloadProgressSpinner),", "None message = self.message % self phase = self.next_phase() suffix", "super().__init__(*args, **kwargs) # type: ignore # Check if we are", "type: ignore if max is None or max == 0:", "Any class InterruptibleMixin: \"\"\" Helper to ensure that self.finish() gets", "import itertools import sys from signal import SIGINT, default_int_handler, signal", "2. It discards the existing SIGINT handler completely. 3. It", "(DefaultDownloadProgressBar, DownloadProgressSpinner), \"ascii\": (DownloadBar, DownloadProgressSpinner), \"pretty\": (DownloadFillingCirclesBar, DownloadProgressSpinner), \"emoji\": (DownloadBlueEmojiProgressBar,", "downloaded(self): # type: () -> str return format_size(self.index) # type:", "str if self.eta: # type: ignore return f\"eta {self.eta_td}\" #", "\"pretty\": (DownloadFillingCirclesBar, DownloadProgressSpinner), \"emoji\": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner), } def DownloadProgressProvider(progress_bar, max=None):", "DownloadProgressSpinner), \"ascii\": (DownloadBar, DownloadProgressSpinner), \"pretty\": (DownloadFillingCirclesBar, DownloadProgressSpinner), \"emoji\": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner),", "fallback # Collect all of the possible characters we want", "not support the hide/show cursor ANSI codes # even with", "def DownloadProgressProvider(progress_bar, max=None): # type: ignore if max is None", "try: \"\".join(characters).encode(encoding) except UnicodeEncodeError: return fallback else: return preferred _BaseBar", "we must restore something sensible instead, at least. # The", "InterruptibleMixin, DownloadProgressMixin, Spinner ): file = sys.stdout suffix = \"%(downloaded)s", "in time. The base progress bar class writes the \"hide", "= self.next_phase() suffix = self.suffix % self line = \"\".join(", "self.hide_cursor = False # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type: ignore", "we don't know what encoding this file is in, then", "Helper to ensure that self.finish() gets called on keyboard interrupt.", "SilentBar(Bar): def update(self): # type: () -> None pass class", "ignore \"\"\" Call self.finish() before delegating to the original SIGINT", "add it. self.file.flush = lambda: self.file.wrapped.flush() class BaseDownloadProgressBar(WindowsMixin, InterruptibleMixin, DownloadProgressMixin):", "of the possible characters we want to use with the", "Python's default SIGINT handler, which # just raises KeyboardInterrupt. if", "the original SIGINT handler. This handler should only be in", "pretty_eta(self): # type: () -> str if self.eta: # type:", "to the terminal in its init, so if we don't", "set this soon # enough, we get a \"hide\" with", "sys.stdout suffix = \"%(downloaded)s %(download_speed)s\" def next_phase(self): # type: ()", "restore something sensible instead, at least. # The least bad", "to decode the characters we're using for the bar using", "discards the existing SIGINT handler completely. 3. It leaves its", "handler in place even after an uninterrupted finish, which will", "None def _select_progress_class(preferred, fallback): # type: (Bar, Bar) -> Bar", "download has already completed, for example. \"\"\" def __init__(self, *args,", "an uninterrupted finish, which will have unexpected delayed effects if", "self.eta: # type: ignore return f\"eta {self.eta_td}\" # type: ignore", "type: () -> None \"\"\" Restore the original SIGINT handler", "# type: () -> None message = self.message % self", "return f\"eta {self.eta_td}\" # type: ignore return \"\" def iter(self,", "bar using the encoding # of the given file, if", "\"_phaser\"): self._phaser = itertools.cycle(self.phases) return next(self._phaser) def update(self): # type:", "# https://github.com/PyCQA/flake8-bugbear/issues/59 self.next(len(x)) # noqa: B305 self.finish() class WindowsMixin: def", "even after an uninterrupted finish, which will have unexpected delayed", "preferred # bar. characters = [ getattr(preferred, \"empty_fill\", \"\"), getattr(preferred,", "or gets interrupted. \"\"\" super().finish() # type: ignore signal(SIGINT, self.original_handler)", "type: ignore # The progress code expects to be able", "getattr(preferred, \"fill\", \"\"), ] characters += list(getattr(preferred, \"phases\", [])) #", "self.file = colorama.AnsiToWin32(self.file) # type: ignore # The progress code", "self.original_handler(signum, frame) class SilentBar(Bar): def update(self): # type: () ->", "type: ignore self.hide_cursor = False # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) #", "if WINDOWS and colorama: self.file = colorama.AnsiToWin32(self.file) # type: ignore", "[])) # Try to decode the characters we're using for", "def downloaded(self): # type: () -> str return format_size(self.index) #", "that self.finish() gets called on keyboard interrupt. This allows downloads", "happen regardless of whether the progress display finishes normally, or", "SIGINT handler. This handler should only be in place while", "an unrelated keyboard interrupt some time after a progress-displaying download", "0.0: # type: ignore return \"...\" return format_size(1 / self.avg)", "to the original SIGINT handler. This handler should only be", "# type: ignore return \"...\" return format_size(1 / self.avg) +", "given file, if this works then we'll assume that we", "doesn't have that, so we'll # add it. self.file.flush =", "file = sys.stdout suffix = \"%(downloaded)s %(download_speed)s\" def next_phase(self): #", "the given file, if this works then we'll assume that", "not happen, # but if it does, we must restore", "return format_size(1 / self.avg) + \"/s\" # type: ignore @property", "come from this, including SystemError and # ImportError. except Exception:", "get a \"hide\" with no corresponding \"show\"... if WINDOWS and", "\"hide\" with no corresponding \"show\"... if WINDOWS and self.hide_cursor: #", "\"empty_fill\", \"\"), getattr(preferred, \"fill\", \"\"), ] characters += list(getattr(preferred, \"phases\",", "in it: yield x # B305 is incorrectly raised here", "suffix = \"%(downloaded)s %(download_speed)s %(pretty_eta)s\" class DefaultDownloadProgressBar(BaseDownloadProgressBar, _BaseBar): pass class", "\"emoji\": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner), } def DownloadProgressProvider(progress_bar, max=None): # type: ignore", "the plaintext bar. try: \"\".join(characters).encode(encoding) except UnicodeEncodeError: return fallback else:", "colorama.AnsiToWin32() object doesn't have that, so we'll # add it.", "have unexpected delayed effects if the user triggers an unrelated", "ignore self.hide_cursor = False # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type:", "The Windows terminal does not support the hide/show cursor ANSI", "If signal() returns None, the previous handler was not installed", "DownloadProgressMixin): file = sys.stdout message = \"%(percent)d%%\" suffix = \"%(downloaded)s", "# but if it does, we must restore something sensible", "str @property def downloaded(self): # type: () -> str return", "terminal does not support the hide/show cursor ANSI codes #", "class DownloadProgressMixin: def __init__(self, *args, **kwargs): # type: (List[Any], Dict[Any,", "+ \"/s\" # type: ignore @property def pretty_eta(self): # type:", "terminal in its init, so if we don't set this", "with it. if WINDOWS and colorama: self.file = colorama.AnsiToWin32(self.file) #", "possible characters we want to use with the preferred #", "WindowsMixin, InterruptibleMixin, DownloadProgressMixin, Spinner ): file = sys.stdout suffix =", "even with colorama. So we'll ensure that hide_cursor is False", "problems: 1. It calls sys.exit(). 2. It discards the existing", "self.avg == 0.0: # type: ignore return \"...\" return format_size(1", "else: return preferred _BaseBar = _select_progress_class(IncrementalBar, Bar) # type: Any", "gets interrupted. \"\"\" super().finish() # type: ignore signal(SIGINT, self.original_handler) def", "None, the previous handler was not installed from # Python,", "characters we're using for the bar using the encoding #", "if not we'll fall back to the plaintext bar. try:", "own handler in place even after an uninterrupted finish, which", "signal(SIGINT, self.handle_sigint) # If signal() returns None, the previous handler", "= False # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type: ignore #", "None: self.original_handler = default_int_handler def finish(self): # type: () ->", "self.original_handler = signal(SIGINT, self.handle_sigint) # If signal() returns None, the", "add it. self.file.isatty = lambda: self.file.wrapped.isatty() # The progress code", "class DefaultDownloadProgressBar(BaseDownloadProgressBar, _BaseBar): pass class DownloadSilentBar(BaseDownloadProgressBar, SilentBar): pass class DownloadBar(BaseDownloadProgressBar,", "https://github.com/PyCQA/flake8-bugbear/issues/59 self.next(len(x)) # noqa: B305 self.finish() class WindowsMixin: def __init__(self,", "we'll fall back to the plaintext bar. try: \"\".join(characters).encode(encoding) except", "_select_progress_class(preferred, fallback): # type: (Bar, Bar) -> Bar encoding =", "to go before the super() call, so that hide_cursor #", "class SilentBar(Bar): def update(self): # type: () -> None pass", "\"\".join(characters).encode(encoding) except UnicodeEncodeError: return fallback else: return preferred _BaseBar =", "enough, we get a \"hide\" with no corresponding \"show\"... if", "class writes the \"hide cursor\" # code to the terminal", "codes # even with colorama. So we'll ensure that hide_cursor", "{self.eta_td}\" # type: ignore return \"\" def iter(self, it): #", "after a progress-displaying download has already completed, for example. \"\"\"", "(Bar, Bar) -> Bar encoding = getattr(preferred.file, \"encoding\", None) #", "import Bar, FillingCirclesBar, IncrementalBar from pip._vendor.progress.spinner import Spinner from pip._internal.utils.compat", "# type: ignore \"\"\" Call self.finish() before delegating to the", "# type: ignore self.hide_cursor = False # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs)", "division errors... if self.avg == 0.0: # type: ignore return", "This handler should only be in place while the progress", "interrupted. \"\"\" super().finish() # type: ignore signal(SIGINT, self.original_handler) def handle_sigint(self,", "hide/show cursor ANSI codes # even with colorama. So we'll", "DownloadProgressSpinner), \"emoji\": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner), } def DownloadProgressProvider(progress_bar, max=None): # type:", "self.finish() gets called on keyboard interrupt. This allows downloads to", "update(self): # type: () -> None message = self.message %", "\" * (get_indentation() + 2)) + self.message # type: str", "\"\", suffix, ] ) self.writeln(line) BAR_TYPES = { \"off\": (DownloadSilentBar,", "default_int_handler, signal from typing import Any, Dict, List from pip._vendor.progress.bar", ") self.writeln(line) BAR_TYPES = { \"off\": (DownloadSilentBar, DownloadSilentBar), \"on\": (DefaultDownloadProgressBar,", "we cannot restore it. This probably should not happen, #", "def next_phase(self): # type: () -> str if not hasattr(self,", "# type: () -> str # Avoid zero division errors...", "handler. This handler should only be in place while the", "from # Python, and we cannot restore it. This probably", "be Python's default SIGINT handler, which # just raises KeyboardInterrupt.", "return format_size(self.index) # type: ignore @property def download_speed(self): # type:", "% self phase = self.next_phase() suffix = self.suffix % self", "= self.suffix % self line = \"\".join( [ message, \"", "hide_cursor # is set in time. The base progress bar", "after finishing. This should happen regardless of whether the progress", "we have the colorama module, # if we do then", "for x in it: yield x # B305 is incorrectly", "all of the possible characters we want to use with", "self.finish() class WindowsMixin: def __init__(self, *args, **kwargs): # type: (List[Any],", "False on # Windows. # This call needs to go", "bar and if not we'll fall back to the plaintext", "super().finish() # type: ignore signal(SIGINT, self.original_handler) def handle_sigint(self, signum, frame):", "call self.file.isatty() # but the colorama.AnsiToWin32() object doesn't have that,", "-> None # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type: ignore self.message", "The progress code expects to be able to call self.file.isatty()", "finish, which will have unexpected delayed effects if the user", "The least bad option should be Python's default SIGINT handler,", "format_size(1 / self.avg) + \"/s\" # type: ignore @property def", "# enough, we get a \"hide\" with no corresponding \"show\"...", "**kwargs) # type: ignore self.message = (\" \" * (get_indentation()", "a \"hide\" with no corresponding \"show\"... if WINDOWS and self.hide_cursor:", "type: () -> str if not hasattr(self, \"_phaser\"): self._phaser =", "\"\"\" Restore the original SIGINT handler after finishing. This should", "we'll # add it. self.file.isatty = lambda: self.file.wrapped.isatty() # The", "B305 self.finish() class WindowsMixin: def __init__(self, *args, **kwargs): # type:", "just raises KeyboardInterrupt. if self.original_handler is None: self.original_handler = default_int_handler", "expects to be able to call self.file.flush() # but the", "of different errors can come from this, including SystemError and", "handler after finishing. This should happen regardless of whether the", "\"\"\" Helper to ensure that self.finish() gets called on keyboard", "False # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type: ignore # Check", "type: ignore signal(SIGINT, self.original_handler) def handle_sigint(self, signum, frame): # type:", "probably should not happen, # but if it does, we", "the original SIGINT handler after finishing. This should happen regardless", "but if it does, we must restore something sensible instead,", "least. # The least bad option should be Python's default", "self.file.isatty = lambda: self.file.wrapped.isatty() # The progress code expects to", "# type: (List[Any], Dict[Any, Any]) -> None \"\"\" Save the", "self.suffix % self line = \"\".join( [ message, \" \"", "**kwargs) # type: ignore self.original_handler = signal(SIGINT, self.handle_sigint) # If", "no corresponding \"show\"... if WINDOWS and self.hide_cursor: # type: ignore", "not installed from # Python, and we cannot restore it.", "# that it doesn't support unicode and use the ASCII", "the colorama module, # if we do then wrap our", "doesn't have that, so we'll # add it. self.file.isatty =", "pip._internal.utils.compat import WINDOWS from pip._internal.utils.logging import get_indentation from pip._internal.utils.misc import", "needs to go before the super() call, so that hide_cursor", "be interrupted without leaving temporary state (like hidden cursors) behind.", "self.file.flush() # but the colorama.AnsiToWin32() object doesn't have that, so", "bar_prefix = \" \" bar_suffix = \" \" phases =", "colorama = None def _select_progress_class(preferred, fallback): # type: (Bar, Bar)", "handler completely. 3. It leaves its own handler in place", "x # B305 is incorrectly raised here # https://github.com/PyCQA/flake8-bugbear/issues/59 self.next(len(x))", "it. self.file.flush = lambda: self.file.wrapped.flush() class BaseDownloadProgressBar(WindowsMixin, InterruptibleMixin, DownloadProgressMixin): file", "for the bar using the encoding # of the given", "Dict[Any, Any]) -> None # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type:", "def pretty_eta(self): # type: () -> str if self.eta: #", "unicode and use the ASCII bar. if not encoding: return", "cursors) behind. This class is similar to the progress library's", "it does, we must restore something sensible instead, at least.", "Any]) -> None # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type: ignore", "suffix else \"\", suffix, ] ) self.writeln(line) BAR_TYPES = {", "(List[Any], Dict[Any, Any]) -> None \"\"\" Save the original SIGINT", "_BaseBar = _select_progress_class(IncrementalBar, Bar) # type: Any class InterruptibleMixin: \"\"\"", "from pip._vendor.progress.bar import Bar, FillingCirclesBar, IncrementalBar from pip._vendor.progress.spinner import Spinner", "list(getattr(preferred, \"phases\", [])) # Try to decode the characters we're", "then we'll assume that we can use the # fancier", "is None: self.original_handler = default_int_handler def finish(self): # type: ()", "we'll ensure that hide_cursor is False on # Windows. #", "class DownloadBar(BaseDownloadProgressBar, Bar): pass class DownloadFillingCirclesBar(BaseDownloadProgressBar, FillingCirclesBar): pass class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar,", "the ASCII bar. if not encoding: return fallback # Collect", "type: () -> None pass class BlueEmojiBar(IncrementalBar): suffix = \"%(percent)d%%\"", "class BlueEmojiBar(IncrementalBar): suffix = \"%(percent)d%%\" bar_prefix = \" \" bar_suffix", "() -> str if not hasattr(self, \"_phaser\"): self._phaser = itertools.cycle(self.phases)", "FillingCirclesBar): pass class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar, BlueEmojiBar): pass class DownloadProgressSpinner( WindowsMixin, InterruptibleMixin,", "% self line = \"\".join( [ message, \" \" if", "**kwargs): # type: (List[Any], Dict[Any, Any]) -> None # https://github.com/python/mypy/issues/5887", "The progress code expects to be able to call self.file.flush()", "/ self.avg) + \"/s\" # type: ignore @property def pretty_eta(self):", "type: Any class InterruptibleMixin: \"\"\" Helper to ensure that self.finish()", "self.original_handler = default_int_handler def finish(self): # type: () -> None", "call self.file.flush() # but the colorama.AnsiToWin32() object doesn't have that,", "None # The Windows terminal does not support the hide/show", "phase = self.next_phase() suffix = self.suffix % self line =", "= lambda: self.file.wrapped.flush() class BaseDownloadProgressBar(WindowsMixin, InterruptibleMixin, DownloadProgressMixin): file = sys.stdout", "original SIGINT handler after finishing. This should happen regardless of", "suffix = self.suffix % self line = \"\".join( [ message,", "should only be in place while the progress display is", "keyboard interrupt. This allows downloads to be interrupted without leaving", "@property def downloaded(self): # type: () -> str return format_size(self.index)", "WINDOWS and self.hide_cursor: # type: ignore self.hide_cursor = False #", "then we'll just assume # that it doesn't support unicode", "phases = (\"\\U0001F539\", \"\\U0001F537\", \"\\U0001F535\") class DownloadProgressMixin: def __init__(self, *args,", "self line = \"\".join( [ message, \" \" if message", "not hasattr(self, \"_phaser\"): self._phaser = itertools.cycle(self.phases) return next(self._phaser) def update(self):", "later. \"\"\" # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type: ignore self.original_handler", "we are running on Windows and we have the colorama", "is similar to the progress library's existing SigIntMixin helper, but", "It discards the existing SIGINT handler completely. 3. It leaves", "return \"\" def iter(self, it): # type: ignore for x", "instead, at least. # The least bad option should be", "and # ImportError. except Exception: colorama = None def _select_progress_class(preferred,", "# Python, and we cannot restore it. This probably should", "from pip._internal.utils.compat import WINDOWS from pip._internal.utils.logging import get_indentation from pip._internal.utils.misc", "to be able to call self.file.isatty() # but the colorama.AnsiToWin32()", "max=None): # type: ignore if max is None or max", "following problems: 1. It calls sys.exit(). 2. It discards the", "# https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type: ignore self.original_handler = signal(SIGINT,", "Exception: colorama = None def _select_progress_class(preferred, fallback): # type: (Bar,", "while the progress display is active. \"\"\" self.finish() self.original_handler(signum, frame)", "message else \"\", phase, \" \" if suffix else \"\",", "leaving temporary state (like hidden cursors) behind. This class is", "it. This probably should not happen, # but if it", "= getattr(preferred.file, \"encoding\", None) # If we don't know what", "def finish(self): # type: () -> None \"\"\" Restore the", "(\"\\U0001F539\", \"\\U0001F537\", \"\\U0001F535\") class DownloadProgressMixin: def __init__(self, *args, **kwargs): #", "# add it. self.file.isatty = lambda: self.file.wrapped.isatty() # The progress", "lambda: self.file.wrapped.isatty() # The progress code expects to be able", "type: (Bar, Bar) -> Bar encoding = getattr(preferred.file, \"encoding\", None)", "BaseDownloadProgressBar(WindowsMixin, InterruptibleMixin, DownloadProgressMixin): file = sys.stdout message = \"%(percent)d%%\" suffix", "\"\"\" Save the original SIGINT handler for later. \"\"\" #", "# Avoid zero division errors... if self.avg == 0.0: #", "its own handler in place even after an uninterrupted finish,", "# This call needs to go before the super() call,", "typing import Any, Dict, List from pip._vendor.progress.bar import Bar, FillingCirclesBar,", "be able to call self.file.isatty() # but the colorama.AnsiToWin32() object", "we'll just assume # that it doesn't support unicode and", "incorrectly raised here # https://github.com/PyCQA/flake8-bugbear/issues/59 self.next(len(x)) # noqa: B305 self.finish()", "is in, then we'll just assume # that it doesn't", "display finishes normally, or gets interrupted. \"\"\" super().finish() # type:", "from pip._vendor import colorama # Lots of different errors can", "(List[Any], Dict[Any, Any]) -> None # The Windows terminal does", "interrupt. This allows downloads to be interrupted without leaving temporary", "\"\"), getattr(preferred, \"fill\", \"\"), ] characters += list(getattr(preferred, \"phases\", []))", "pass class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar, BlueEmojiBar): pass class DownloadProgressSpinner( WindowsMixin, InterruptibleMixin, DownloadProgressMixin,", "fancier bar and if not we'll fall back to the", "\" \" bar_suffix = \" \" phases = (\"\\U0001F539\", \"\\U0001F537\",", "called on keyboard interrupt. This allows downloads to be interrupted", "signal from typing import Any, Dict, List from pip._vendor.progress.bar import", "installed from # Python, and we cannot restore it. This", "have that, so we'll # add it. self.file.flush = lambda:", "return preferred _BaseBar = _select_progress_class(IncrementalBar, Bar) # type: Any class", "can come from this, including SystemError and # ImportError. except", "import colorama # Lots of different errors can come from", "@property def pretty_eta(self): # type: () -> str if self.eta:", "self.hide_cursor: # type: ignore self.hide_cursor = False # https://github.com/python/mypy/issues/5887 super().__init__(*args,", "our file with it. if WINDOWS and colorama: self.file =", "1. It calls sys.exit(). 2. It discards the existing SIGINT", "1.2, that helper has the following problems: 1. It calls", "the \"hide cursor\" # code to the terminal in its", "to call self.file.flush() # but the colorama.AnsiToWin32() object doesn't have", "DownloadProgressSpinner), \"pretty\": (DownloadFillingCirclesBar, DownloadProgressSpinner), \"emoji\": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner), } def DownloadProgressProvider(progress_bar,", "support the hide/show cursor ANSI codes # even with colorama.", "encoding: return fallback # Collect all of the possible characters", "download_speed(self): # type: () -> str # Avoid zero division", "__init__(self, *args, **kwargs): # type: (List[Any], Dict[Any, Any]) -> None", "only be in place while the progress display is active.", "type: ignore @property def pretty_eta(self): # type: () -> str", "import Spinner from pip._internal.utils.compat import WINDOWS from pip._internal.utils.logging import get_indentation", "If we don't know what encoding this file is in,", "should happen regardless of whether the progress display finishes normally,", "This allows downloads to be interrupted without leaving temporary state", "Try to decode the characters we're using for the bar", "so if we don't set this soon # enough, we", "DownloadProgressMixin: def __init__(self, *args, **kwargs): # type: (List[Any], Dict[Any, Any])", "able to call self.file.flush() # but the colorama.AnsiToWin32() object doesn't", "able to call self.file.isatty() # but the colorama.AnsiToWin32() object doesn't", "self.file.wrapped.isatty() # The progress code expects to be able to", "\"ascii\": (DownloadBar, DownloadProgressSpinner), \"pretty\": (DownloadFillingCirclesBar, DownloadProgressSpinner), \"emoji\": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner), }", "the existing SIGINT handler completely. 3. It leaves its own", "self.message = (\" \" * (get_indentation() + 2)) + self.message", "Bar encoding = getattr(preferred.file, \"encoding\", None) # If we don't", "Dict[Any, Any]) -> None \"\"\" Save the original SIGINT handler", "support unicode and use the ASCII bar. if not encoding:", "progress library's existing SigIntMixin helper, but as of version 1.2,", "None pass class BlueEmojiBar(IncrementalBar): suffix = \"%(percent)d%%\" bar_prefix = \"", "def download_speed(self): # type: () -> str # Avoid zero", "self.finish() self.original_handler(signum, frame) class SilentBar(Bar): def update(self): # type: ()", "colorama: self.file = colorama.AnsiToWin32(self.file) # type: ignore # The progress", "= _select_progress_class(IncrementalBar, Bar) # type: Any class InterruptibleMixin: \"\"\" Helper", "has already completed, for example. \"\"\" def __init__(self, *args, **kwargs):", "the previous handler was not installed from # Python, and", "**kwargs) # type: ignore # Check if we are running", "# type: (List[Any], Dict[Any, Any]) -> None # The Windows", "\"\" def iter(self, it): # type: ignore for x in", "which will have unexpected delayed effects if the user triggers", "Bar) -> Bar encoding = getattr(preferred.file, \"encoding\", None) # If", "go before the super() call, so that hide_cursor # is", "self.file.flush = lambda: self.file.wrapped.flush() class BaseDownloadProgressBar(WindowsMixin, InterruptibleMixin, DownloadProgressMixin): file =", "DownloadSilentBar), \"on\": (DefaultDownloadProgressBar, DownloadProgressSpinner), \"ascii\": (DownloadBar, DownloadProgressSpinner), \"pretty\": (DownloadFillingCirclesBar, DownloadProgressSpinner),", "for later. \"\"\" # https://github.com/python/mypy/issues/5887 super().__init__(*args, **kwargs) # type: ignore", "just assume # that it doesn't support unicode and use" ]
[ "Number of runs. Each run takes a sample in `params.hyper_space`", "or `minimize` the metric. (default: 'maximize') :param num_runs: Number of", "= tuner.tune() >>> sorted(results['best'].keys()) ['#', 'params', 'sample', 'score'] \"\"\" tuner", "pass to `fit`. (default: `dict(epochs=10, verbose=0)`) :param metric: Metric to", "`Tuner` builds a model, for each hyper parameter in `model.params`,", "be used. See `tutorials/model_tuning.ipynb` for a detailed walkthrough on usage.", "use. Should be a `DataLoader`. :param validloader: Testing data to", "Tuner( params=params, optimizer=optimizer, trainloader=trainloader, validloader=validloader, embedding=embedding, fit_kwargs=fit_kwargs, metric=metric, mode=mode, num_runs=num_runs,", "a sample will be taken in the space. However, if", "typing.Union[str, BaseMetric] = None, mode: str = 'maximize', num_runs: int", "results = tuner.tune() >>> sorted(results['best'].keys()) ['#', 'params', 'sample', 'score'] \"\"\"", "str = 'adam', trainloader: mz.dataloader.DataLoader = None, validloader: mz.dataloader.DataLoader =", "space, which is the cross-product of individual hyper parameter's hyper", ">>> model.params['task'] = mz.tasks.Ranking() >>> optimizer = 'adam' >>> embedding", "dict = None, metric: typing.Union[str, BaseMetric] = None, mode: str", "import BaseMetric from .tuner import Tuner def tune( params: 'mz.ParamTable',", "import Tuner def tune( params: 'mz.ParamTable', optimizer: str = 'adam',", "`model.params` of the desired model to tune. `params.completed()` should be", "to tune. `params.completed()` should be `True`. :param optimizer: Str or", "... validloader=validloader, ... embedding=embedding, ... num_runs=1, ... verbose=0 ... )", "from scripts.study_case.ID_5.matchzoo.engine.base_metric import BaseMetric from .tuner import Tuner def tune(", "in `model.params['task'].metrics`. (default: the first metric in `params.['task'].metrics`. :param mode:", "model.params['task'] = mz.tasks.Ranking() >>> optimizer = 'adam' >>> embedding =", "= mz.datasets.toy.load_data('dev') >>> prpr = mz.models.DenseBaseline.get_default_preprocessor() >>> train = prpr.fit_transform(train,", "model, for each hyper parameter in `model.params`, if the hyper-parameter", "'adam', trainloader: mz.dataloader.DataLoader = None, validloader: mz.dataloader.DataLoader = None, embedding:", "of the hyper-parameter will be used. See `tutorials/model_tuning.ipynb` for a", "params=model.params, ... optimizer=optimizer, ... trainloader=trainloader, ... validloader=validloader, ... embedding=embedding, ...", "tune. `params.completed()` should be `True`. :param optimizer: Str or `Optimizer`", "runs. Each run takes a sample in `params.hyper_space` and build", "a detailed walkthrough on usage. :param params: A completed parameter", "be a `DataLoader`. :param embedding: Embedding used by model. :param", "mz.dataloader.DataLoader = None, embedding: np.ndarray = None, fit_kwargs: dict =", "'maximize') :param num_runs: Number of runs. Each run takes a", "mz.models.DenseBaseline.get_default_preprocessor() >>> train = prpr.fit_transform(train, verbose=0) >>> valid = prpr.transform(valid,", "optimizer=optimizer, ... trainloader=trainloader, ... validloader=validloader, ... embedding=embedding, ... num_runs=1, ...", "will be taken in the space. However, if the hyper-parameter", ":param callbacks: A list of callbacks to handle. Handled sequentially", "on usage. :param params: A completed parameter table to tune.", "verbose=0 ... ) >>> results = tuner.tune() >>> sorted(results['best'].keys()) ['#',", "space. However, if the hyper-parameter does not have a hyper-space,", "'score'] \"\"\" tuner = Tuner( params=params, optimizer=optimizer, trainloader=trainloader, validloader=validloader, embedding=embedding,", "taken in the space. However, if the hyper-parameter does not", "Handled sequentially at every callback point. :param verbose: Verbosity. (default:", "use. Should be a `DataLoader`. :param embedding: Embedding used by", "`DataLoader`. :param validloader: Testing data to use. Should be a", "embedding=embedding, ... num_runs=1, ... verbose=0 ... ) >>> results =", "a hyper-space, then a sample will be taken in the", "for optimizing model. :param trainloader: Training data to use. Should", "import numpy as np import scripts.study_case.ID_5.matchzoo as mz from scripts.study_case.ID_5.matchzoo.engine.base_metric", "the metric. (default: 'maximize') :param num_runs: Number of runs. Each", "in `model.params`, if the hyper-parameter has a hyper-space, then a", "hyper-parameter will be used. See `tutorials/model_tuning.ipynb` for a detailed walkthrough", "be a `DataLoader`. :param validloader: Testing data to use. Should", "train = prpr.fit_transform(train, verbose=0) >>> valid = prpr.transform(valid, verbose=0) >>>", "to handle. Handled sequentially at every callback point. :param verbose:", "`Optimizer` class. Optimizer for optimizing model. :param trainloader: Training data", "Testing data to use. Should be a `DataLoader`. :param embedding:", "to use. Should be a `DataLoader`. :param embedding: Embedding used", "callback point. :param verbose: Verbosity. (default: 1) Example: >>> import", "`DataLoader`. :param embedding: Embedding used by model. :param fit_kwargs: Extra", "valid = mz.datasets.toy.load_data('dev') >>> prpr = mz.models.DenseBaseline.get_default_preprocessor() >>> train =", "Tune model hyper-parameters. A simple shorthand for using :class:`matchzoo.auto.Tuner`. `model.params.hyper_space`", "to `fit`. (default: `dict(epochs=10, verbose=0)`) :param metric: Metric to tune", "then the default value of the hyper-parameter will be used.", "have a hyper-space, then the default value of the hyper-parameter", "on the sample. (default: 10) :param callbacks: A list of", "... (prpr.context['vocab_size'], 100)) >>> tuner = mz.auto.Tuner( ... params=model.params, ...", "by model. :param fit_kwargs: Extra keyword arguments to pass to", "... ) >>> results = tuner.tune() >>> sorted(results['best'].keys()) ['#', 'params',", "individual hyper parameter's hyper space. When a `Tuner` builds a", "import typing import numpy as np import scripts.study_case.ID_5.matchzoo as mz", "a `DataLoader`. :param embedding: Embedding used by model. :param fit_kwargs:", "= mz.models.DenseBaseline() >>> model.params['task'] = mz.tasks.Ranking() >>> optimizer = 'adam'", "): \"\"\" Tune model hyper-parameters. A simple shorthand for using", "is the cross-product of individual hyper parameter's hyper space. When", ":param trainloader: Training data to use. Should be a `DataLoader`.", "metrics in `model.params['task'].metrics`. (default: the first metric in `params.['task'].metrics`. :param", "every callback point. :param verbose: Verbosity. (default: 1) Example: >>>", "Verbosity. (default: 1) Example: >>> import scripts.study_case.ID_5.matchzoo as mz >>>", "model to tune. `params.completed()` should be `True`. :param optimizer: Str", ":param metric: Metric to tune upon. Must be one of", "When a `Tuner` builds a model, for each hyper parameter", "the sample. (default: 10) :param callbacks: A list of callbacks", "... verbose=0 ... ) >>> results = tuner.tune() >>> sorted(results['best'].keys())", "tune( params: 'mz.ParamTable', optimizer: str = 'adam', trainloader: mz.dataloader.DataLoader =", "which is the cross-product of individual hyper parameter's hyper space.", "optimizer = 'adam' >>> embedding = np.random.uniform(-0.2, 0.2, ... (prpr.context['vocab_size'],", "hyper parameter in `model.params`, if the hyper-parameter has a hyper-space,", "hyper-space, then a sample will be taken in the space.", "should be `True`. :param optimizer: Str or `Optimizer` class. Optimizer", ">>> validloader = mz.dataloader.DataLoader(validset, callback=padding) >>> model = mz.models.DenseBaseline() >>>", "num_runs: int = 10, verbose=1 ): \"\"\" Tune model hyper-parameters.", "a sample in `params.hyper_space` and build a model based on", "None, fit_kwargs: dict = None, metric: typing.Union[str, BaseMetric] = None,", "embedding = np.random.uniform(-0.2, 0.2, ... (prpr.context['vocab_size'], 100)) >>> tuner =", "of the metrics in `model.params['task'].metrics`. (default: the first metric in", "= prpr.fit_transform(train, verbose=0) >>> valid = prpr.transform(valid, verbose=0) >>> trainset", "space. When a `Tuner` builds a model, for each hyper", "= mz.dataloader.Dataset(train) >>> validset = mz.dataloader.Dataset(valid) >>> padding = mz.models.DenseBaseline.get_default_padding_callback()", ":class:`matchzoo.auto.Tuner`. `model.params.hyper_space` reprensents the model's hyper-parameters search space, which is", ":param verbose: Verbosity. (default: 1) Example: >>> import scripts.study_case.ID_5.matchzoo as", "validloader=validloader, ... embedding=embedding, ... num_runs=1, ... verbose=0 ... ) >>>", "based on the sample. (default: 10) :param callbacks: A list", "(prpr.context['vocab_size'], 100)) >>> tuner = mz.auto.Tuner( ... params=model.params, ... optimizer=optimizer,", ">>> valid = mz.datasets.toy.load_data('dev') >>> prpr = mz.models.DenseBaseline.get_default_preprocessor() >>> train", "mz.datasets.toy.load_data('train') >>> valid = mz.datasets.toy.load_data('dev') >>> prpr = mz.models.DenseBaseline.get_default_preprocessor() >>>", "= 'maximize', num_runs: int = 10, verbose=1 ): \"\"\" Tune", "verbose=0)`) :param metric: Metric to tune upon. Must be one", "Either `maximize` the metric or `minimize` the metric. (default: 'maximize')", "mz >>> import numpy as np >>> train = mz.datasets.toy.load_data('train')", "100)) >>> tuner = mz.auto.Tuner( ... params=model.params, ... optimizer=optimizer, ...", "= None, metric: typing.Union[str, BaseMetric] = None, mode: str =", "'maximize', num_runs: int = 10, verbose=1 ): \"\"\" Tune model", "Extra keyword arguments to pass to `fit`. (default: `dict(epochs=10, verbose=0)`)", "`tutorials/model_tuning.ipynb` for a detailed walkthrough on usage. :param params: A", "trainloader=trainloader, validloader=validloader, embedding=embedding, fit_kwargs=fit_kwargs, metric=metric, mode=mode, num_runs=num_runs, verbose=verbose ) return", "import scripts.study_case.ID_5.matchzoo as mz >>> import numpy as np >>>", "not have a hyper-space, then the default value of the", "verbose: Verbosity. (default: 1) Example: >>> import scripts.study_case.ID_5.matchzoo as mz", "mz.datasets.toy.load_data('dev') >>> prpr = mz.models.DenseBaseline.get_default_preprocessor() >>> train = prpr.fit_transform(train, verbose=0)", "reprensents the model's hyper-parameters search space, which is the cross-product", "verbose=1 ): \"\"\" Tune model hyper-parameters. A simple shorthand for", ":param mode: Either `maximize` the metric or `minimize` the metric.", "in `params.['task'].metrics`. :param mode: Either `maximize` the metric or `minimize`", ">>> trainset = mz.dataloader.Dataset(train) >>> validset = mz.dataloader.Dataset(valid) >>> padding", "the space. However, if the hyper-parameter does not have a", ":param params: A completed parameter table to tune. Usually `model.params`", "tuner = mz.auto.Tuner( ... params=model.params, ... optimizer=optimizer, ... trainloader=trainloader, ...", "for each hyper parameter in `model.params`, if the hyper-parameter has", "tuner = Tuner( params=params, optimizer=optimizer, trainloader=trainloader, validloader=validloader, embedding=embedding, fit_kwargs=fit_kwargs, metric=metric,", "the hyper-parameter has a hyper-space, then a sample will be", "mz.dataloader.Dataset(valid) >>> padding = mz.models.DenseBaseline.get_default_padding_callback() >>> trainloader = mz.dataloader.DataLoader(trainset, callback=padding)", "1) Example: >>> import scripts.study_case.ID_5.matchzoo as mz >>> import numpy", "num_runs=1, ... verbose=0 ... ) >>> results = tuner.tune() >>>", "Tuner def tune( params: 'mz.ParamTable', optimizer: str = 'adam', trainloader:", "prpr.transform(valid, verbose=0) >>> trainset = mz.dataloader.Dataset(train) >>> validset = mz.dataloader.Dataset(valid)", "mode: str = 'maximize', num_runs: int = 10, verbose=1 ):", "the desired model to tune. `params.completed()` should be `True`. :param", "as mz >>> import numpy as np >>> train =", "A completed parameter table to tune. Usually `model.params` of the", "be `True`. :param optimizer: Str or `Optimizer` class. Optimizer for", "None, mode: str = 'maximize', num_runs: int = 10, verbose=1", "10) :param callbacks: A list of callbacks to handle. Handled", "prpr = mz.models.DenseBaseline.get_default_preprocessor() >>> train = prpr.fit_transform(train, verbose=0) >>> valid", "\"\"\" tuner = Tuner( params=params, optimizer=optimizer, trainloader=trainloader, validloader=validloader, embedding=embedding, fit_kwargs=fit_kwargs,", ">>> train = mz.datasets.toy.load_data('train') >>> valid = mz.datasets.toy.load_data('dev') >>> prpr", "completed parameter table to tune. Usually `model.params` of the desired", "the first metric in `params.['task'].metrics`. :param mode: Either `maximize` the", "sample. (default: 10) :param callbacks: A list of callbacks to", "then a sample will be taken in the space. However,", "for using :class:`matchzoo.auto.Tuner`. `model.params.hyper_space` reprensents the model's hyper-parameters search space,", "handle. Handled sequentially at every callback point. :param verbose: Verbosity.", "trainloader: Training data to use. Should be a `DataLoader`. :param", "embedding: Embedding used by model. :param fit_kwargs: Extra keyword arguments", "as np >>> train = mz.datasets.toy.load_data('train') >>> valid = mz.datasets.toy.load_data('dev')", "data to use. Should be a `DataLoader`. :param validloader: Testing", "scripts.study_case.ID_5.matchzoo as mz from scripts.study_case.ID_5.matchzoo.engine.base_metric import BaseMetric from .tuner import", "BaseMetric] = None, mode: str = 'maximize', num_runs: int =", "if the hyper-parameter has a hyper-space, then a sample will", "arguments to pass to `fit`. (default: `dict(epochs=10, verbose=0)`) :param metric:", "Each run takes a sample in `params.hyper_space` and build a", "optimizing model. :param trainloader: Training data to use. Should be", "Training data to use. Should be a `DataLoader`. :param validloader:", "(default: the first metric in `params.['task'].metrics`. :param mode: Either `maximize`", "and build a model based on the sample. (default: 10)", "hyper-parameter has a hyper-space, then a sample will be taken", "as np import scripts.study_case.ID_5.matchzoo as mz from scripts.study_case.ID_5.matchzoo.engine.base_metric import BaseMetric", "be taken in the space. However, if the hyper-parameter does", "= None, mode: str = 'maximize', num_runs: int = 10,", "metric: Metric to tune upon. Must be one of the", "= prpr.transform(valid, verbose=0) >>> trainset = mz.dataloader.Dataset(train) >>> validset =", "mz.tasks.Ranking() >>> optimizer = 'adam' >>> embedding = np.random.uniform(-0.2, 0.2,", "the default value of the hyper-parameter will be used. See", "= 10, verbose=1 ): \"\"\" Tune model hyper-parameters. A simple", "optimizer=optimizer, trainloader=trainloader, validloader=validloader, embedding=embedding, fit_kwargs=fit_kwargs, metric=metric, mode=mode, num_runs=num_runs, verbose=verbose )", "tune upon. Must be one of the metrics in `model.params['task'].metrics`.", "`model.params['task'].metrics`. (default: the first metric in `params.['task'].metrics`. :param mode: Either", "['#', 'params', 'sample', 'score'] \"\"\" tuner = Tuner( params=params, optimizer=optimizer,", "each hyper parameter in `model.params`, if the hyper-parameter has a", "... embedding=embedding, ... num_runs=1, ... verbose=0 ... ) >>> results", ":param num_runs: Number of runs. Each run takes a sample", "the metrics in `model.params['task'].metrics`. (default: the first metric in `params.['task'].metrics`.", "= 'adam' >>> embedding = np.random.uniform(-0.2, 0.2, ... (prpr.context['vocab_size'], 100))", "numpy as np import scripts.study_case.ID_5.matchzoo as mz from scripts.study_case.ID_5.matchzoo.engine.base_metric import", "list of callbacks to handle. Handled sequentially at every callback", "= Tuner( params=params, optimizer=optimizer, trainloader=trainloader, validloader=validloader, embedding=embedding, fit_kwargs=fit_kwargs, metric=metric, mode=mode,", "first metric in `params.['task'].metrics`. :param mode: Either `maximize` the metric", "if the hyper-parameter does not have a hyper-space, then the", "using :class:`matchzoo.auto.Tuner`. `model.params.hyper_space` reprensents the model's hyper-parameters search space, which", "See `tutorials/model_tuning.ipynb` for a detailed walkthrough on usage. :param params:", "a model based on the sample. (default: 10) :param callbacks:", ":param fit_kwargs: Extra keyword arguments to pass to `fit`. (default:", "tuner.tune() >>> sorted(results['best'].keys()) ['#', 'params', 'sample', 'score'] \"\"\" tuner =", ">>> train = prpr.fit_transform(train, verbose=0) >>> valid = prpr.transform(valid, verbose=0)", "callbacks: A list of callbacks to handle. Handled sequentially at", "`params.['task'].metrics`. :param mode: Either `maximize` the metric or `minimize` the", "optimizer: str = 'adam', trainloader: mz.dataloader.DataLoader = None, validloader: mz.dataloader.DataLoader", "fit_kwargs: dict = None, metric: typing.Union[str, BaseMetric] = None, mode:", "'params', 'sample', 'score'] \"\"\" tuner = Tuner( params=params, optimizer=optimizer, trainloader=trainloader,", "of individual hyper parameter's hyper space. When a `Tuner` builds", "for a detailed walkthrough on usage. :param params: A completed", "walkthrough on usage. :param params: A completed parameter table to", "= None, validloader: mz.dataloader.DataLoader = None, embedding: np.ndarray = None,", "`params.completed()` should be `True`. :param optimizer: Str or `Optimizer` class.", "typing import numpy as np import scripts.study_case.ID_5.matchzoo as mz from", "model. :param trainloader: Training data to use. Should be a", ") >>> results = tuner.tune() >>> sorted(results['best'].keys()) ['#', 'params', 'sample',", "padding = mz.models.DenseBaseline.get_default_padding_callback() >>> trainloader = mz.dataloader.DataLoader(trainset, callback=padding) >>> validloader", "`model.params`, if the hyper-parameter has a hyper-space, then a sample", ">>> padding = mz.models.DenseBaseline.get_default_padding_callback() >>> trainloader = mz.dataloader.DataLoader(trainset, callback=padding) >>>", "= mz.datasets.toy.load_data('train') >>> valid = mz.datasets.toy.load_data('dev') >>> prpr = mz.models.DenseBaseline.get_default_preprocessor()", "table to tune. Usually `model.params` of the desired model to", "in `params.hyper_space` and build a model based on the sample.", "trainloader=trainloader, ... validloader=validloader, ... embedding=embedding, ... num_runs=1, ... verbose=0 ...", "validloader = mz.dataloader.DataLoader(validset, callback=padding) >>> model = mz.models.DenseBaseline() >>> model.params['task']", "params: 'mz.ParamTable', optimizer: str = 'adam', trainloader: mz.dataloader.DataLoader = None,", "`True`. :param optimizer: Str or `Optimizer` class. Optimizer for optimizing", "prpr.fit_transform(train, verbose=0) >>> valid = prpr.transform(valid, verbose=0) >>> trainset =", "search space, which is the cross-product of individual hyper parameter's", "= mz.models.DenseBaseline.get_default_preprocessor() >>> train = prpr.fit_transform(train, verbose=0) >>> valid =", ">>> tuner = mz.auto.Tuner( ... params=model.params, ... optimizer=optimizer, ... trainloader=trainloader,", "import scripts.study_case.ID_5.matchzoo as mz from scripts.study_case.ID_5.matchzoo.engine.base_metric import BaseMetric from .tuner", "used. See `tutorials/model_tuning.ipynb` for a detailed walkthrough on usage. :param", "= mz.auto.Tuner( ... params=model.params, ... optimizer=optimizer, ... trainloader=trainloader, ... validloader=validloader,", "class. Optimizer for optimizing model. :param trainloader: Training data to", "mode: Either `maximize` the metric or `minimize` the metric. (default:", "Metric to tune upon. Must be one of the metrics", ">>> embedding = np.random.uniform(-0.2, 0.2, ... (prpr.context['vocab_size'], 100)) >>> tuner", "sorted(results['best'].keys()) ['#', 'params', 'sample', 'score'] \"\"\" tuner = Tuner( params=params,", "of runs. Each run takes a sample in `params.hyper_space` and", "at every callback point. :param verbose: Verbosity. (default: 1) Example:", "Should be a `DataLoader`. :param validloader: Testing data to use.", "mz from scripts.study_case.ID_5.matchzoo.engine.base_metric import BaseMetric from .tuner import Tuner def", ">>> model = mz.models.DenseBaseline() >>> model.params['task'] = mz.tasks.Ranking() >>> optimizer", "... optimizer=optimizer, ... trainloader=trainloader, ... validloader=validloader, ... embedding=embedding, ... num_runs=1,", "verbose=0) >>> valid = prpr.transform(valid, verbose=0) >>> trainset = mz.dataloader.Dataset(train)", "build a model based on the sample. (default: 10) :param", "`minimize` the metric. (default: 'maximize') :param num_runs: Number of runs.", ">>> import scripts.study_case.ID_5.matchzoo as mz >>> import numpy as np", "mz.auto.Tuner( ... params=model.params, ... optimizer=optimizer, ... trainloader=trainloader, ... validloader=validloader, ...", "(default: 'maximize') :param num_runs: Number of runs. Each run takes", "params=params, optimizer=optimizer, trainloader=trainloader, validloader=validloader, embedding=embedding, fit_kwargs=fit_kwargs, metric=metric, mode=mode, num_runs=num_runs, verbose=verbose", "metric. (default: 'maximize') :param num_runs: Number of runs. Each run", "hyper parameter's hyper space. When a `Tuner` builds a model,", "or `Optimizer` class. Optimizer for optimizing model. :param trainloader: Training", "A list of callbacks to handle. Handled sequentially at every", ".tuner import Tuner def tune( params: 'mz.ParamTable', optimizer: str =", "'adam' >>> embedding = np.random.uniform(-0.2, 0.2, ... (prpr.context['vocab_size'], 100)) >>>", "mz.dataloader.DataLoader(validset, callback=padding) >>> model = mz.models.DenseBaseline() >>> model.params['task'] = mz.tasks.Ranking()", ":param embedding: Embedding used by model. :param fit_kwargs: Extra keyword", "np >>> train = mz.datasets.toy.load_data('train') >>> valid = mz.datasets.toy.load_data('dev') >>>", "in the space. However, if the hyper-parameter does not have", "usage. :param params: A completed parameter table to tune. Usually", "model's hyper-parameters search space, which is the cross-product of individual", "parameter in `model.params`, if the hyper-parameter has a hyper-space, then", "the hyper-parameter does not have a hyper-space, then the default", "keyword arguments to pass to `fit`. (default: `dict(epochs=10, verbose=0)`) :param", "callback=padding) >>> model = mz.models.DenseBaseline() >>> model.params['task'] = mz.tasks.Ranking() >>>", ">>> import numpy as np >>> train = mz.datasets.toy.load_data('train') >>>", "int = 10, verbose=1 ): \"\"\" Tune model hyper-parameters. A", "= mz.tasks.Ranking() >>> optimizer = 'adam' >>> embedding = np.random.uniform(-0.2,", "does not have a hyper-space, then the default value of", "optimizer: Str or `Optimizer` class. Optimizer for optimizing model. :param", "... trainloader=trainloader, ... validloader=validloader, ... embedding=embedding, ... num_runs=1, ... verbose=0", "validloader=validloader, embedding=embedding, fit_kwargs=fit_kwargs, metric=metric, mode=mode, num_runs=num_runs, verbose=verbose ) return tuner.tune()", "to pass to `fit`. (default: `dict(epochs=10, verbose=0)`) :param metric: Metric", "`params.hyper_space` and build a model based on the sample. (default:", "tune. Usually `model.params` of the desired model to tune. `params.completed()`", "mz.dataloader.DataLoader = None, validloader: mz.dataloader.DataLoader = None, embedding: np.ndarray =", "to use. Should be a `DataLoader`. :param validloader: Testing data", "one of the metrics in `model.params['task'].metrics`. (default: the first metric", "trainloader: mz.dataloader.DataLoader = None, validloader: mz.dataloader.DataLoader = None, embedding: np.ndarray", "validset = mz.dataloader.Dataset(valid) >>> padding = mz.models.DenseBaseline.get_default_padding_callback() >>> trainloader =", ">>> valid = prpr.transform(valid, verbose=0) >>> trainset = mz.dataloader.Dataset(train) >>>", "np import scripts.study_case.ID_5.matchzoo as mz from scripts.study_case.ID_5.matchzoo.engine.base_metric import BaseMetric from", "`maximize` the metric or `minimize` the metric. (default: 'maximize') :param", "... num_runs=1, ... verbose=0 ... ) >>> results = tuner.tune()", "a `Tuner` builds a model, for each hyper parameter in", "def tune( params: 'mz.ParamTable', optimizer: str = 'adam', trainloader: mz.dataloader.DataLoader", "\"\"\" Tune model hyper-parameters. A simple shorthand for using :class:`matchzoo.auto.Tuner`.", "parameter table to tune. Usually `model.params` of the desired model", "(default: `dict(epochs=10, verbose=0)`) :param metric: Metric to tune upon. Must", "`fit`. (default: `dict(epochs=10, verbose=0)`) :param metric: Metric to tune upon.", ">>> prpr = mz.models.DenseBaseline.get_default_preprocessor() >>> train = prpr.fit_transform(train, verbose=0) >>>", "the metric or `minimize` the metric. (default: 'maximize') :param num_runs:", "hyper-parameters. A simple shorthand for using :class:`matchzoo.auto.Tuner`. `model.params.hyper_space` reprensents the", "np.random.uniform(-0.2, 0.2, ... (prpr.context['vocab_size'], 100)) >>> tuner = mz.auto.Tuner( ...", "trainset = mz.dataloader.Dataset(train) >>> validset = mz.dataloader.Dataset(valid) >>> padding =", "metric in `params.['task'].metrics`. :param mode: Either `maximize` the metric or", "= mz.models.DenseBaseline.get_default_padding_callback() >>> trainloader = mz.dataloader.DataLoader(trainset, callback=padding) >>> validloader =", "run takes a sample in `params.hyper_space` and build a model", "... params=model.params, ... optimizer=optimizer, ... trainloader=trainloader, ... validloader=validloader, ... embedding=embedding,", "parameter's hyper space. When a `Tuner` builds a model, for", "None, embedding: np.ndarray = None, fit_kwargs: dict = None, metric:", "of the desired model to tune. `params.completed()` should be `True`.", "mz.dataloader.DataLoader(trainset, callback=padding) >>> validloader = mz.dataloader.DataLoader(validset, callback=padding) >>> model =", "'mz.ParamTable', optimizer: str = 'adam', trainloader: mz.dataloader.DataLoader = None, validloader:", "the model's hyper-parameters search space, which is the cross-product of", "numpy as np >>> train = mz.datasets.toy.load_data('train') >>> valid =", "str = 'maximize', num_runs: int = 10, verbose=1 ): \"\"\"", "a model, for each hyper parameter in `model.params`, if the", "0.2, ... (prpr.context['vocab_size'], 100)) >>> tuner = mz.auto.Tuner( ... params=model.params,", "metric or `minimize` the metric. (default: 'maximize') :param num_runs: Number", "10, verbose=1 ): \"\"\" Tune model hyper-parameters. A simple shorthand", "of callbacks to handle. Handled sequentially at every callback point.", "from .tuner import Tuner def tune( params: 'mz.ParamTable', optimizer: str", "simple shorthand for using :class:`matchzoo.auto.Tuner`. `model.params.hyper_space` reprensents the model's hyper-parameters", "hyper space. When a `Tuner` builds a model, for each", "a hyper-space, then the default value of the hyper-parameter will", "data to use. Should be a `DataLoader`. :param embedding: Embedding", "Must be one of the metrics in `model.params['task'].metrics`. (default: the", "mz.dataloader.Dataset(train) >>> validset = mz.dataloader.Dataset(valid) >>> padding = mz.models.DenseBaseline.get_default_padding_callback() >>>", "model based on the sample. (default: 10) :param callbacks: A", "None, metric: typing.Union[str, BaseMetric] = None, mode: str = 'maximize',", "detailed walkthrough on usage. :param params: A completed parameter table", "Str or `Optimizer` class. Optimizer for optimizing model. :param trainloader:", "hyper-parameters search space, which is the cross-product of individual hyper", "to tune. Usually `model.params` of the desired model to tune.", "upon. Must be one of the metrics in `model.params['task'].metrics`. (default:", "value of the hyper-parameter will be used. See `tutorials/model_tuning.ipynb` for", "valid = prpr.transform(valid, verbose=0) >>> trainset = mz.dataloader.Dataset(train) >>> validset", "= None, embedding: np.ndarray = None, fit_kwargs: dict = None,", "model hyper-parameters. A simple shorthand for using :class:`matchzoo.auto.Tuner`. `model.params.hyper_space` reprensents", "= 'adam', trainloader: mz.dataloader.DataLoader = None, validloader: mz.dataloader.DataLoader = None,", "has a hyper-space, then a sample will be taken in", "callback=padding) >>> validloader = mz.dataloader.DataLoader(validset, callback=padding) >>> model = mz.models.DenseBaseline()", "callbacks to handle. Handled sequentially at every callback point. :param", "to tune upon. Must be one of the metrics in", ":param validloader: Testing data to use. Should be a `DataLoader`.", "scripts.study_case.ID_5.matchzoo.engine.base_metric import BaseMetric from .tuner import Tuner def tune( params:", ">>> optimizer = 'adam' >>> embedding = np.random.uniform(-0.2, 0.2, ...", "builds a model, for each hyper parameter in `model.params`, if", "sample will be taken in the space. However, if the", "be one of the metrics in `model.params['task'].metrics`. (default: the first", "point. :param verbose: Verbosity. (default: 1) Example: >>> import scripts.study_case.ID_5.matchzoo", "metric: typing.Union[str, BaseMetric] = None, mode: str = 'maximize', num_runs:", "= None, fit_kwargs: dict = None, metric: typing.Union[str, BaseMetric] =", "the hyper-parameter will be used. See `tutorials/model_tuning.ipynb` for a detailed", "However, if the hyper-parameter does not have a hyper-space, then", "mz.models.DenseBaseline.get_default_padding_callback() >>> trainloader = mz.dataloader.DataLoader(trainset, callback=padding) >>> validloader = mz.dataloader.DataLoader(validset,", "(default: 1) Example: >>> import scripts.study_case.ID_5.matchzoo as mz >>> import", "model. :param fit_kwargs: Extra keyword arguments to pass to `fit`.", "= np.random.uniform(-0.2, 0.2, ... (prpr.context['vocab_size'], 100)) >>> tuner = mz.auto.Tuner(", "will be used. See `tutorials/model_tuning.ipynb` for a detailed walkthrough on", "default value of the hyper-parameter will be used. See `tutorials/model_tuning.ipynb`", ":param optimizer: Str or `Optimizer` class. Optimizer for optimizing model.", "desired model to tune. `params.completed()` should be `True`. :param optimizer:", "mz.models.DenseBaseline() >>> model.params['task'] = mz.tasks.Ranking() >>> optimizer = 'adam' >>>", "BaseMetric from .tuner import Tuner def tune( params: 'mz.ParamTable', optimizer:", "sample in `params.hyper_space` and build a model based on the", "embedding: np.ndarray = None, fit_kwargs: dict = None, metric: typing.Union[str,", "as mz from scripts.study_case.ID_5.matchzoo.engine.base_metric import BaseMetric from .tuner import Tuner", "Optimizer for optimizing model. :param trainloader: Training data to use.", "np.ndarray = None, fit_kwargs: dict = None, metric: typing.Union[str, BaseMetric]", "fit_kwargs: Extra keyword arguments to pass to `fit`. (default: `dict(epochs=10,", "`dict(epochs=10, verbose=0)`) :param metric: Metric to tune upon. Must be", "num_runs: Number of runs. Each run takes a sample in", "Should be a `DataLoader`. :param embedding: Embedding used by model.", "scripts.study_case.ID_5.matchzoo as mz >>> import numpy as np >>> train", "shorthand for using :class:`matchzoo.auto.Tuner`. `model.params.hyper_space` reprensents the model's hyper-parameters search", "cross-product of individual hyper parameter's hyper space. When a `Tuner`", "sequentially at every callback point. :param verbose: Verbosity. (default: 1)", "model = mz.models.DenseBaseline() >>> model.params['task'] = mz.tasks.Ranking() >>> optimizer =", "trainloader = mz.dataloader.DataLoader(trainset, callback=padding) >>> validloader = mz.dataloader.DataLoader(validset, callback=padding) >>>", "train = mz.datasets.toy.load_data('train') >>> valid = mz.datasets.toy.load_data('dev') >>> prpr =", "takes a sample in `params.hyper_space` and build a model based", "Example: >>> import scripts.study_case.ID_5.matchzoo as mz >>> import numpy as", "None, validloader: mz.dataloader.DataLoader = None, embedding: np.ndarray = None, fit_kwargs:", "`model.params.hyper_space` reprensents the model's hyper-parameters search space, which is the", "= mz.dataloader.DataLoader(validset, callback=padding) >>> model = mz.models.DenseBaseline() >>> model.params['task'] =", ">>> results = tuner.tune() >>> sorted(results['best'].keys()) ['#', 'params', 'sample', 'score']", "'sample', 'score'] \"\"\" tuner = Tuner( params=params, optimizer=optimizer, trainloader=trainloader, validloader=validloader,", "the cross-product of individual hyper parameter's hyper space. When a", ">>> trainloader = mz.dataloader.DataLoader(trainset, callback=padding) >>> validloader = mz.dataloader.DataLoader(validset, callback=padding)", ">>> validset = mz.dataloader.Dataset(valid) >>> padding = mz.models.DenseBaseline.get_default_padding_callback() >>> trainloader", "A simple shorthand for using :class:`matchzoo.auto.Tuner`. `model.params.hyper_space` reprensents the model's", "(default: 10) :param callbacks: A list of callbacks to handle.", "params: A completed parameter table to tune. Usually `model.params` of", "a `DataLoader`. :param validloader: Testing data to use. Should be", "verbose=0) >>> trainset = mz.dataloader.Dataset(train) >>> validset = mz.dataloader.Dataset(valid) >>>", ">>> sorted(results['best'].keys()) ['#', 'params', 'sample', 'score'] \"\"\" tuner = Tuner(", "Embedding used by model. :param fit_kwargs: Extra keyword arguments to", "import numpy as np >>> train = mz.datasets.toy.load_data('train') >>> valid", "hyper-space, then the default value of the hyper-parameter will be", "hyper-parameter does not have a hyper-space, then the default value", "= mz.dataloader.Dataset(valid) >>> padding = mz.models.DenseBaseline.get_default_padding_callback() >>> trainloader = mz.dataloader.DataLoader(trainset,", "Usually `model.params` of the desired model to tune. `params.completed()` should", "= mz.dataloader.DataLoader(trainset, callback=padding) >>> validloader = mz.dataloader.DataLoader(validset, callback=padding) >>> model", "validloader: Testing data to use. Should be a `DataLoader`. :param", "validloader: mz.dataloader.DataLoader = None, embedding: np.ndarray = None, fit_kwargs: dict", "used by model. :param fit_kwargs: Extra keyword arguments to pass" ]
[ "= PixelObservationWrapper( env, pixel_keys=(pixel_key,), pixels_only=pixels_only, render_kwargs={pixel_key: {\"width\": width, \"height\": height}},", ") wrapped_env.observation_space = wrapped_env.observation_space assert isinstance(wrapped_env.observation_space, spaces.Dict) if pixels_only: assert", "== (32, 32, 3) assert depth_observation.dtype == np.uint8 if not", "spaces.Dict) width, height = (320, 240) # The wrapper should", "gym import spaces from gym.wrappers.pixel_observation import PixelObservationWrapper, STATE_KEY class FakeEnvironment(gym.Env):", "FakeEnvironment(gym.Env): def __init__(self): self.action_space = spaces.Box(shape=(1,), low=-1, high=1, dtype=np.float32) def", "assert isinstance(observation_space, spaces.Dict) width, height = (320, 240) # The", "= env.observation_space assert isinstance(observation_space, spaces.Dict) width, height = (320, 240)", "= list(observation_space.spaces.keys()) + [pixel_key] assert list(wrapped_env.observation_space.spaces.keys()) == expected_keys # Check", "( len(wrapped_env.observation_space.spaces) == len(observation_space.spaces) + 1 ) expected_keys = list(observation_space.spaces.keys())", "spaces.Dict( { \"state\": spaces.Box(shape=(2,), low=-1, high=1, dtype=np.float32), } ) super(FakeDictObservationEnvironment,", "return observation def step(self, action): del action observation = self.observation_space.sample()", "reward, terminal, info class FakeArrayObservationEnvironment(FakeEnvironment): def __init__(self, *args, **kwargs): self.observation_space", "the right environment for the test. observation_space = env.observation_space assert", "with the added observation. observation = wrapped_env.reset() rgb_observation = observation[pixel_key]", "observation = self.observation_space.sample() return observation def step(self, action): del action", "wrapped_env.observation_space = wrapped_env.observation_space assert isinstance(wrapped_env.observation_space, spaces.Dict) if pixels_only: assert len(wrapped_env.observation_space.spaces)", "typing import Optional import pytest import numpy as np import", "def render(self, width=32, height=32, *args, **kwargs): del args del kwargs", "height = (320, 240) # The wrapper should only add", "= self.observation_space.sample() reward, terminal, info = 0.0, False, {} return", "low=-1, high=1, dtype=np.float32 ) super(FakeArrayObservationEnvironment, self).__init__(*args, **kwargs) class FakeDictObservationEnvironment(FakeEnvironment): def", "== 1 assert list(wrapped_env.observation_space.spaces.keys()) == [pixel_key] else: assert len(wrapped_env.observation_space.spaces) ==", "pytest import numpy as np import gym from gym import", "def __init__(self): self.action_space = spaces.Box(shape=(1,), low=-1, high=1, dtype=np.float32) def render(self,", "observation. observation = wrapped_env.reset() rgb_observation = observation[pixel_key] assert rgb_observation.shape ==", "= spaces.Box( shape=(2,), low=-1, high=1, dtype=np.float32 ) super(FakeArrayObservationEnvironment, self).__init__(*args, **kwargs)", "(True, False)) def test_single_array_observation(self, pixels_only): pixel_key = \"depth\" env =", "observation. wrapped_env = PixelObservationWrapper( env, pixel_keys=(pixel_key,), pixels_only=pixels_only, render_kwargs={pixel_key: {\"width\": width,", "{\"width\": width, \"height\": height}}, ) assert isinstance(wrapped_env.observation_space, spaces.Dict) if pixels_only:", "= None): super().reset(seed=seed) observation = self.observation_space.sample() return observation def step(self,", "import PixelObservationWrapper, STATE_KEY class FakeEnvironment(gym.Env): def __init__(self): self.action_space = spaces.Box(shape=(1,),", "consistent with the added observation. observation = wrapped_env.reset() rgb_observation =", "isinstance(observation_space, spaces.Dict) width, height = (320, 240) # The wrapper", "spaces.Dict) if pixels_only: assert len(wrapped_env.observation_space.spaces) == 1 assert list(wrapped_env.observation_space.spaces.keys()) ==", "pixel_key, ] observation = wrapped_env.reset() depth_observation = observation[pixel_key] assert depth_observation.shape", "len(observation_space.spaces) + 1 ) expected_keys = list(observation_space.spaces.keys()) + [pixel_key] assert", "# The wrapper should only add one observation. wrapped_env =", "= \"rgb\" env = FakeDictObservationEnvironment() # Make sure we are", "env.observation_space assert isinstance(observation_space, spaces.Box) wrapped_env = PixelObservationWrapper( env, pixel_keys=(pixel_key,), pixels_only=pixels_only", "self).__init__(*args, **kwargs) class FakeDictObservationEnvironment(FakeEnvironment): def __init__(self, *args, **kwargs): self.observation_space =", "def __init__(self, *args, **kwargs): self.observation_space = spaces.Dict( { \"state\": spaces.Box(shape=(2,),", "the test. observation_space = env.observation_space assert isinstance(observation_space, spaces.Dict) width, height", "del args del kwargs image_shape = (height, width, 3) return", "pixel_key = \"rgb\" env = FakeDictObservationEnvironment() # Make sure we", "len(wrapped_env.observation_space.spaces) == 2 assert list(wrapped_env.observation_space.spaces.keys()) == [ STATE_KEY, pixel_key, ]", "TestPixelObservationWrapper(object): @pytest.mark.parametrize(\"pixels_only\", (True, False)) def test_dict_observation(self, pixels_only): pixel_key = \"rgb\"", "expected_keys # Check that the added space item is consistent", "len(wrapped_env.observation_space.spaces) == 1 assert list(wrapped_env.observation_space.spaces.keys()) == [pixel_key] else: assert (", "STATE_KEY, pixel_key, ] observation = wrapped_env.reset() depth_observation = observation[pixel_key] assert", "width, 3) return np.zeros(image_shape, dtype=np.uint8) def reset(self, seed: Optional[int] =", "The wrapper should only add one observation. wrapped_env = PixelObservationWrapper(", "list(wrapped_env.observation_space.spaces.keys()) == [ STATE_KEY, pixel_key, ] observation = wrapped_env.reset() depth_observation", "env = FakeDictObservationEnvironment() # Make sure we are testing the", "return np.zeros(image_shape, dtype=np.uint8) def reset(self, seed: Optional[int] = None): super().reset(seed=seed)", "1 ) expected_keys = list(observation_space.spaces.keys()) + [pixel_key] assert list(wrapped_env.observation_space.spaces.keys()) ==", "= PixelObservationWrapper( env, pixel_keys=(pixel_key,), pixels_only=pixels_only ) wrapped_env.observation_space = wrapped_env.observation_space assert", "__init__(self, *args, **kwargs): self.observation_space = spaces.Dict( { \"state\": spaces.Box(shape=(2,), low=-1,", "assert depth_observation.shape == (32, 32, 3) assert depth_observation.dtype == np.uint8", "*args, **kwargs): del args del kwargs image_shape = (height, width,", "None): super().reset(seed=seed) observation = self.observation_space.sample() return observation def step(self, action):", "test. observation_space = env.observation_space assert isinstance(observation_space, spaces.Dict) width, height =", "terminal, info = 0.0, False, {} return observation, reward, terminal,", "False, {} return observation, reward, terminal, info class FakeArrayObservationEnvironment(FakeEnvironment): def", "[pixel_key] assert list(wrapped_env.observation_space.spaces.keys()) == expected_keys # Check that the added", "space item is consistent with the added observation. observation =", "observation = wrapped_env.reset() depth_observation = observation[pixel_key] assert depth_observation.shape == (32,", "# Check that the added space item is consistent with", "from gym.wrappers.pixel_observation import PixelObservationWrapper, STATE_KEY class FakeEnvironment(gym.Env): def __init__(self): self.action_space", "should only add one observation. wrapped_env = PixelObservationWrapper( env, pixel_keys=(pixel_key,),", "test_single_array_observation(self, pixels_only): pixel_key = \"depth\" env = FakeArrayObservationEnvironment() observation_space =", "if pixels_only: assert len(wrapped_env.observation_space.spaces) == 1 assert list(wrapped_env.observation_space.spaces.keys()) == [pixel_key]", "width=32, height=32, *args, **kwargs): del args del kwargs image_shape =", "import spaces from gym.wrappers.pixel_observation import PixelObservationWrapper, STATE_KEY class FakeEnvironment(gym.Env): def", "environment for the test. observation_space = env.observation_space assert isinstance(observation_space, spaces.Dict)", "**kwargs): self.observation_space = spaces.Box( shape=(2,), low=-1, high=1, dtype=np.float32 ) super(FakeArrayObservationEnvironment,", "env, pixel_keys=(pixel_key,), pixels_only=pixels_only ) wrapped_env.observation_space = wrapped_env.observation_space assert isinstance(wrapped_env.observation_space, spaces.Dict)", "*args, **kwargs): self.observation_space = spaces.Box( shape=(2,), low=-1, high=1, dtype=np.float32 )", "super().reset(seed=seed) observation = self.observation_space.sample() return observation def step(self, action): del", "assert ( len(wrapped_env.observation_space.spaces) == len(observation_space.spaces) + 1 ) expected_keys =", "env = FakeArrayObservationEnvironment() observation_space = env.observation_space assert isinstance(observation_space, spaces.Box) wrapped_env", "testing the right environment for the test. observation_space = env.observation_space", "observation = wrapped_env.reset() rgb_observation = observation[pixel_key] assert rgb_observation.shape == (height,", "gym.wrappers.pixel_observation import PixelObservationWrapper, STATE_KEY class FakeEnvironment(gym.Env): def __init__(self): self.action_space =", "one observation. wrapped_env = PixelObservationWrapper( env, pixel_keys=(pixel_key,), pixels_only=pixels_only, render_kwargs={pixel_key: {\"width\":", "= wrapped_env.reset() rgb_observation = observation[pixel_key] assert rgb_observation.shape == (height, width,", "width, height = (320, 240) # The wrapper should only", "== 2 assert list(wrapped_env.observation_space.spaces.keys()) == [ STATE_KEY, pixel_key, ] observation", "= FakeArrayObservationEnvironment() observation_space = env.observation_space assert isinstance(observation_space, spaces.Box) wrapped_env =", "self).__init__(*args, **kwargs) class TestPixelObservationWrapper(object): @pytest.mark.parametrize(\"pixels_only\", (True, False)) def test_dict_observation(self, pixels_only):", "del action observation = self.observation_space.sample() reward, terminal, info = 0.0,", "(height, width, 3) assert rgb_observation.dtype == np.uint8 @pytest.mark.parametrize(\"pixels_only\", (True, False))", "high=1, dtype=np.float32) def render(self, width=32, height=32, *args, **kwargs): del args", "FakeArrayObservationEnvironment(FakeEnvironment): def __init__(self, *args, **kwargs): self.observation_space = spaces.Box( shape=(2,), low=-1,", "\"state\": spaces.Box(shape=(2,), low=-1, high=1, dtype=np.float32), } ) super(FakeDictObservationEnvironment, self).__init__(*args, **kwargs)", "kwargs image_shape = (height, width, 3) return np.zeros(image_shape, dtype=np.uint8) def", "added observation. observation = wrapped_env.reset() rgb_observation = observation[pixel_key] assert rgb_observation.shape", "PixelObservationWrapper( env, pixel_keys=(pixel_key,), pixels_only=pixels_only ) wrapped_env.observation_space = wrapped_env.observation_space assert isinstance(wrapped_env.observation_space,", "seed: Optional[int] = None): super().reset(seed=seed) observation = self.observation_space.sample() return observation", "0.0, False, {} return observation, reward, terminal, info class FakeArrayObservationEnvironment(FakeEnvironment):", "FakeDictObservationEnvironment() # Make sure we are testing the right environment", "== np.uint8 @pytest.mark.parametrize(\"pixels_only\", (True, False)) def test_single_array_observation(self, pixels_only): pixel_key =", "# Make sure we are testing the right environment for", "from typing import Optional import pytest import numpy as np", "dtype=np.float32), } ) super(FakeDictObservationEnvironment, self).__init__(*args, **kwargs) class TestPixelObservationWrapper(object): @pytest.mark.parametrize(\"pixels_only\", (True,", "PixelObservationWrapper( env, pixel_keys=(pixel_key,), pixels_only=pixels_only, render_kwargs={pixel_key: {\"width\": width, \"height\": height}}, )", "render_kwargs={pixel_key: {\"width\": width, \"height\": height}}, ) assert isinstance(wrapped_env.observation_space, spaces.Dict) if", "low=-1, high=1, dtype=np.float32) def render(self, width=32, height=32, *args, **kwargs): del", "(True, False)) def test_dict_observation(self, pixels_only): pixel_key = \"rgb\" env =", "the pixel observation wrapper.\"\"\" from typing import Optional import pytest", "(320, 240) # The wrapper should only add one observation.", "== expected_keys # Check that the added space item is", ") assert isinstance(wrapped_env.observation_space, spaces.Dict) if pixels_only: assert len(wrapped_env.observation_space.spaces) == 1", "2 assert list(wrapped_env.observation_space.spaces.keys()) == [ STATE_KEY, pixel_key, ] observation =", "reward, terminal, info = 0.0, False, {} return observation, reward,", "observation[pixel_key] assert rgb_observation.shape == (height, width, 3) assert rgb_observation.dtype ==", "== (height, width, 3) assert rgb_observation.dtype == np.uint8 @pytest.mark.parametrize(\"pixels_only\", (True,", "render(self, width=32, height=32, *args, **kwargs): del args del kwargs image_shape", "assert list(wrapped_env.observation_space.spaces.keys()) == [ STATE_KEY, pixel_key, ] observation = wrapped_env.reset()", "right environment for the test. observation_space = env.observation_space assert isinstance(observation_space,", "\"\"\"Tests for the pixel observation wrapper.\"\"\" from typing import Optional", "**kwargs) class FakeDictObservationEnvironment(FakeEnvironment): def __init__(self, *args, **kwargs): self.observation_space = spaces.Dict(", "= FakeDictObservationEnvironment() # Make sure we are testing the right", "assert list(wrapped_env.observation_space.spaces.keys()) == [pixel_key] else: assert ( len(wrapped_env.observation_space.spaces) == len(observation_space.spaces)", "Check that the added space item is consistent with the", "[ STATE_KEY, pixel_key, ] observation = wrapped_env.reset() depth_observation = observation[pixel_key]", "spaces from gym.wrappers.pixel_observation import PixelObservationWrapper, STATE_KEY class FakeEnvironment(gym.Env): def __init__(self):", "return observation, reward, terminal, info class FakeArrayObservationEnvironment(FakeEnvironment): def __init__(self, *args,", "wrapper.\"\"\" from typing import Optional import pytest import numpy as", ") super(FakeDictObservationEnvironment, self).__init__(*args, **kwargs) class TestPixelObservationWrapper(object): @pytest.mark.parametrize(\"pixels_only\", (True, False)) def", "\"height\": height}}, ) assert isinstance(wrapped_env.observation_space, spaces.Dict) if pixels_only: assert len(wrapped_env.observation_space.spaces)", "STATE_KEY class FakeEnvironment(gym.Env): def __init__(self): self.action_space = spaces.Box(shape=(1,), low=-1, high=1,", "class TestPixelObservationWrapper(object): @pytest.mark.parametrize(\"pixels_only\", (True, False)) def test_dict_observation(self, pixels_only): pixel_key =", "FakeDictObservationEnvironment(FakeEnvironment): def __init__(self, *args, **kwargs): self.observation_space = spaces.Dict( { \"state\":", "import pytest import numpy as np import gym from gym", "is consistent with the added observation. observation = wrapped_env.reset() rgb_observation", "**kwargs) class TestPixelObservationWrapper(object): @pytest.mark.parametrize(\"pixels_only\", (True, False)) def test_dict_observation(self, pixels_only): pixel_key", "wrapped_env.reset() rgb_observation = observation[pixel_key] assert rgb_observation.shape == (height, width, 3)", "assert rgb_observation.shape == (height, width, 3) assert rgb_observation.dtype == np.uint8", "pixel_key = \"depth\" env = FakeArrayObservationEnvironment() observation_space = env.observation_space assert", "pixels_only=pixels_only ) wrapped_env.observation_space = wrapped_env.observation_space assert isinstance(wrapped_env.observation_space, spaces.Dict) if pixels_only:", "del kwargs image_shape = (height, width, 3) return np.zeros(image_shape, dtype=np.uint8)", "list(observation_space.spaces.keys()) + [pixel_key] assert list(wrapped_env.observation_space.spaces.keys()) == expected_keys # Check that", "== [pixel_key] else: assert len(wrapped_env.observation_space.spaces) == 2 assert list(wrapped_env.observation_space.spaces.keys()) ==", "Optional import pytest import numpy as np import gym from", "wrapped_env = PixelObservationWrapper( env, pixel_keys=(pixel_key,), pixels_only=pixels_only, render_kwargs={pixel_key: {\"width\": width, \"height\":", "args del kwargs image_shape = (height, width, 3) return np.zeros(image_shape,", "{} return observation, reward, terminal, info class FakeArrayObservationEnvironment(FakeEnvironment): def __init__(self,", "3) return np.zeros(image_shape, dtype=np.uint8) def reset(self, seed: Optional[int] = None):", "assert len(wrapped_env.observation_space.spaces) == 1 assert list(wrapped_env.observation_space.spaces.keys()) == [pixel_key] else: assert", "= observation[pixel_key] assert depth_observation.shape == (32, 32, 3) assert depth_observation.dtype", "add one observation. wrapped_env = PixelObservationWrapper( env, pixel_keys=(pixel_key,), pixels_only=pixels_only, render_kwargs={pixel_key:", "<reponame>maxgold/icml22 \"\"\"Tests for the pixel observation wrapper.\"\"\" from typing import", "FakeArrayObservationEnvironment() observation_space = env.observation_space assert isinstance(observation_space, spaces.Box) wrapped_env = PixelObservationWrapper(", "\"depth\" env = FakeArrayObservationEnvironment() observation_space = env.observation_space assert isinstance(observation_space, spaces.Box)", "pixel_keys=(pixel_key,), pixels_only=pixels_only ) wrapped_env.observation_space = wrapped_env.observation_space assert isinstance(wrapped_env.observation_space, spaces.Dict) if", "np.zeros(image_shape, dtype=np.uint8) def reset(self, seed: Optional[int] = None): super().reset(seed=seed) observation", "terminal, info class FakeArrayObservationEnvironment(FakeEnvironment): def __init__(self, *args, **kwargs): self.observation_space =", "wrapper should only add one observation. wrapped_env = PixelObservationWrapper( env,", "+ [pixel_key] assert list(wrapped_env.observation_space.spaces.keys()) == expected_keys # Check that the", "**kwargs): del args del kwargs image_shape = (height, width, 3)", ") expected_keys = list(observation_space.spaces.keys()) + [pixel_key] assert list(wrapped_env.observation_space.spaces.keys()) == expected_keys", "are testing the right environment for the test. observation_space =", "only add one observation. wrapped_env = PixelObservationWrapper( env, pixel_keys=(pixel_key,), pixels_only=pixels_only,", "= spaces.Dict( { \"state\": spaces.Box(shape=(2,), low=-1, high=1, dtype=np.float32), } )", "= (320, 240) # The wrapper should only add one", "@pytest.mark.parametrize(\"pixels_only\", (True, False)) def test_single_array_observation(self, pixels_only): pixel_key = \"depth\" env", "= (height, width, 3) return np.zeros(image_shape, dtype=np.uint8) def reset(self, seed:", "dtype=np.uint8) def reset(self, seed: Optional[int] = None): super().reset(seed=seed) observation =", "sure we are testing the right environment for the test.", "== [pixel_key] else: assert ( len(wrapped_env.observation_space.spaces) == len(observation_space.spaces) + 1", "env.observation_space assert isinstance(observation_space, spaces.Dict) width, height = (320, 240) #", "def test_dict_observation(self, pixels_only): pixel_key = \"rgb\" env = FakeDictObservationEnvironment() #", "image_shape = (height, width, 3) return np.zeros(image_shape, dtype=np.uint8) def reset(self,", "from gym import spaces from gym.wrappers.pixel_observation import PixelObservationWrapper, STATE_KEY class", "shape=(2,), low=-1, high=1, dtype=np.float32 ) super(FakeArrayObservationEnvironment, self).__init__(*args, **kwargs) class FakeDictObservationEnvironment(FakeEnvironment):", "high=1, dtype=np.float32), } ) super(FakeDictObservationEnvironment, self).__init__(*args, **kwargs) class TestPixelObservationWrapper(object): @pytest.mark.parametrize(\"pixels_only\",", "== len(observation_space.spaces) + 1 ) expected_keys = list(observation_space.spaces.keys()) + [pixel_key]", "class FakeEnvironment(gym.Env): def __init__(self): self.action_space = spaces.Box(shape=(1,), low=-1, high=1, dtype=np.float32)", "def __init__(self, *args, **kwargs): self.observation_space = spaces.Box( shape=(2,), low=-1, high=1,", "np import gym from gym import spaces from gym.wrappers.pixel_observation import", "__init__(self, *args, **kwargs): self.observation_space = spaces.Box( shape=(2,), low=-1, high=1, dtype=np.float32", "wrapped_env = PixelObservationWrapper( env, pixel_keys=(pixel_key,), pixels_only=pixels_only ) wrapped_env.observation_space = wrapped_env.observation_space", "wrapped_env.reset() depth_observation = observation[pixel_key] assert depth_observation.shape == (32, 32, 3)", "32, 3) assert depth_observation.dtype == np.uint8 if not pixels_only: assert", "step(self, action): del action observation = self.observation_space.sample() reward, terminal, info", "info = 0.0, False, {} return observation, reward, terminal, info", "for the test. observation_space = env.observation_space assert isinstance(observation_space, spaces.Dict) width,", "1 assert list(wrapped_env.observation_space.spaces.keys()) == [pixel_key] else: assert ( len(wrapped_env.observation_space.spaces) ==", "rgb_observation.shape == (height, width, 3) assert rgb_observation.dtype == np.uint8 @pytest.mark.parametrize(\"pixels_only\",", "high=1, dtype=np.float32 ) super(FakeArrayObservationEnvironment, self).__init__(*args, **kwargs) class FakeDictObservationEnvironment(FakeEnvironment): def __init__(self,", "rgb_observation.dtype == np.uint8 @pytest.mark.parametrize(\"pixels_only\", (True, False)) def test_single_array_observation(self, pixels_only): pixel_key", "numpy as np import gym from gym import spaces from", "@pytest.mark.parametrize(\"pixels_only\", (True, False)) def test_dict_observation(self, pixels_only): pixel_key = \"rgb\" env", "dtype=np.float32) def render(self, width=32, height=32, *args, **kwargs): del args del", "assert isinstance(observation_space, spaces.Box) wrapped_env = PixelObservationWrapper( env, pixel_keys=(pixel_key,), pixels_only=pixels_only )", "super(FakeDictObservationEnvironment, self).__init__(*args, **kwargs) class TestPixelObservationWrapper(object): @pytest.mark.parametrize(\"pixels_only\", (True, False)) def test_dict_observation(self,", "False)) def test_single_array_observation(self, pixels_only): pixel_key = \"depth\" env = FakeArrayObservationEnvironment()", "pixel_keys=(pixel_key,), pixels_only=pixels_only, render_kwargs={pixel_key: {\"width\": width, \"height\": height}}, ) assert isinstance(wrapped_env.observation_space,", "[pixel_key] else: assert ( len(wrapped_env.observation_space.spaces) == len(observation_space.spaces) + 1 )", "assert depth_observation.dtype == np.uint8 if not pixels_only: assert isinstance(observation[STATE_KEY], np.ndarray)", "width, 3) assert rgb_observation.dtype == np.uint8 @pytest.mark.parametrize(\"pixels_only\", (True, False)) def", "depth_observation = observation[pixel_key] assert depth_observation.shape == (32, 32, 3) assert", "self.observation_space.sample() return observation def step(self, action): del action observation =", "test_dict_observation(self, pixels_only): pixel_key = \"rgb\" env = FakeDictObservationEnvironment() # Make", "observation def step(self, action): del action observation = self.observation_space.sample() reward,", "spaces.Box(shape=(2,), low=-1, high=1, dtype=np.float32), } ) super(FakeDictObservationEnvironment, self).__init__(*args, **kwargs) class", "that the added space item is consistent with the added", "= env.observation_space assert isinstance(observation_space, spaces.Box) wrapped_env = PixelObservationWrapper( env, pixel_keys=(pixel_key,),", "expected_keys = list(observation_space.spaces.keys()) + [pixel_key] assert list(wrapped_env.observation_space.spaces.keys()) == expected_keys #", "info class FakeArrayObservationEnvironment(FakeEnvironment): def __init__(self, *args, **kwargs): self.observation_space = spaces.Box(", "height}}, ) assert isinstance(wrapped_env.observation_space, spaces.Dict) if pixels_only: assert len(wrapped_env.observation_space.spaces) ==", "observation = self.observation_space.sample() reward, terminal, info = 0.0, False, {}", "= 0.0, False, {} return observation, reward, terminal, info class", "pixels_only): pixel_key = \"depth\" env = FakeArrayObservationEnvironment() observation_space = env.observation_space", "rgb_observation = observation[pixel_key] assert rgb_observation.shape == (height, width, 3) assert", "assert list(wrapped_env.observation_space.spaces.keys()) == [pixel_key] else: assert len(wrapped_env.observation_space.spaces) == 2 assert", "spaces.Box(shape=(1,), low=-1, high=1, dtype=np.float32) def render(self, width=32, height=32, *args, **kwargs):", "= observation[pixel_key] assert rgb_observation.shape == (height, width, 3) assert rgb_observation.dtype", "len(wrapped_env.observation_space.spaces) == len(observation_space.spaces) + 1 ) expected_keys = list(observation_space.spaces.keys()) +", "Optional[int] = None): super().reset(seed=seed) observation = self.observation_space.sample() return observation def", "observation_space = env.observation_space assert isinstance(observation_space, spaces.Box) wrapped_env = PixelObservationWrapper( env,", "else: assert len(wrapped_env.observation_space.spaces) == 2 assert list(wrapped_env.observation_space.spaces.keys()) == [ STATE_KEY,", "False)) def test_dict_observation(self, pixels_only): pixel_key = \"rgb\" env = FakeDictObservationEnvironment()", "pixels_only): pixel_key = \"rgb\" env = FakeDictObservationEnvironment() # Make sure", "Make sure we are testing the right environment for the", "list(wrapped_env.observation_space.spaces.keys()) == [pixel_key] else: assert ( len(wrapped_env.observation_space.spaces) == len(observation_space.spaces) +", "dtype=np.float32 ) super(FakeArrayObservationEnvironment, self).__init__(*args, **kwargs) class FakeDictObservationEnvironment(FakeEnvironment): def __init__(self, *args,", "observation, reward, terminal, info class FakeArrayObservationEnvironment(FakeEnvironment): def __init__(self, *args, **kwargs):", "width, \"height\": height}}, ) assert isinstance(wrapped_env.observation_space, spaces.Dict) if pixels_only: assert", "self.observation_space.sample() reward, terminal, info = 0.0, False, {} return observation,", "observation wrapper.\"\"\" from typing import Optional import pytest import numpy", "isinstance(wrapped_env.observation_space, spaces.Dict) if pixels_only: assert len(wrapped_env.observation_space.spaces) == 1 assert list(wrapped_env.observation_space.spaces.keys())", "else: assert ( len(wrapped_env.observation_space.spaces) == len(observation_space.spaces) + 1 ) expected_keys", "] observation = wrapped_env.reset() depth_observation = observation[pixel_key] assert depth_observation.shape ==", "def step(self, action): del action observation = self.observation_space.sample() reward, terminal,", "= wrapped_env.reset() depth_observation = observation[pixel_key] assert depth_observation.shape == (32, 32,", "assert isinstance(wrapped_env.observation_space, spaces.Dict) if pixels_only: assert len(wrapped_env.observation_space.spaces) == 1 assert", "gym from gym import spaces from gym.wrappers.pixel_observation import PixelObservationWrapper, STATE_KEY", "240) # The wrapper should only add one observation. wrapped_env", "self.observation_space = spaces.Dict( { \"state\": spaces.Box(shape=(2,), low=-1, high=1, dtype=np.float32), }", "observation[pixel_key] assert depth_observation.shape == (32, 32, 3) assert depth_observation.dtype ==", "3) assert rgb_observation.dtype == np.uint8 @pytest.mark.parametrize(\"pixels_only\", (True, False)) def test_single_array_observation(self,", "observation_space = env.observation_space assert isinstance(observation_space, spaces.Dict) width, height = (320,", "env, pixel_keys=(pixel_key,), pixels_only=pixels_only, render_kwargs={pixel_key: {\"width\": width, \"height\": height}}, ) assert", "pixels_only=pixels_only, render_kwargs={pixel_key: {\"width\": width, \"height\": height}}, ) assert isinstance(wrapped_env.observation_space, spaces.Dict)", "\"rgb\" env = FakeDictObservationEnvironment() # Make sure we are testing", "= spaces.Box(shape=(1,), low=-1, high=1, dtype=np.float32) def render(self, width=32, height=32, *args,", "== 1 assert list(wrapped_env.observation_space.spaces.keys()) == [pixel_key] else: assert ( len(wrapped_env.observation_space.spaces)", "} ) super(FakeDictObservationEnvironment, self).__init__(*args, **kwargs) class TestPixelObservationWrapper(object): @pytest.mark.parametrize(\"pixels_only\", (True, False))", "the added space item is consistent with the added observation.", "pixels_only: assert len(wrapped_env.observation_space.spaces) == 1 assert list(wrapped_env.observation_space.spaces.keys()) == [pixel_key] else:", "[pixel_key] else: assert len(wrapped_env.observation_space.spaces) == 2 assert list(wrapped_env.observation_space.spaces.keys()) == [", "pixel observation wrapper.\"\"\" from typing import Optional import pytest import", "low=-1, high=1, dtype=np.float32), } ) super(FakeDictObservationEnvironment, self).__init__(*args, **kwargs) class TestPixelObservationWrapper(object):", "super(FakeArrayObservationEnvironment, self).__init__(*args, **kwargs) class FakeDictObservationEnvironment(FakeEnvironment): def __init__(self, *args, **kwargs): self.observation_space", "added space item is consistent with the added observation. observation", "= self.observation_space.sample() return observation def step(self, action): del action observation", "list(wrapped_env.observation_space.spaces.keys()) == expected_keys # Check that the added space item", "assert list(wrapped_env.observation_space.spaces.keys()) == expected_keys # Check that the added space", "self.observation_space = spaces.Box( shape=(2,), low=-1, high=1, dtype=np.float32 ) super(FakeArrayObservationEnvironment, self).__init__(*args,", "def test_single_array_observation(self, pixels_only): pixel_key = \"depth\" env = FakeArrayObservationEnvironment() observation_space", "assert len(wrapped_env.observation_space.spaces) == 2 assert list(wrapped_env.observation_space.spaces.keys()) == [ STATE_KEY, pixel_key,", "(height, width, 3) return np.zeros(image_shape, dtype=np.uint8) def reset(self, seed: Optional[int]", "reset(self, seed: Optional[int] = None): super().reset(seed=seed) observation = self.observation_space.sample() return", "spaces.Box) wrapped_env = PixelObservationWrapper( env, pixel_keys=(pixel_key,), pixels_only=pixels_only ) wrapped_env.observation_space =", "as np import gym from gym import spaces from gym.wrappers.pixel_observation", "isinstance(observation_space, spaces.Box) wrapped_env = PixelObservationWrapper( env, pixel_keys=(pixel_key,), pixels_only=pixels_only ) wrapped_env.observation_space", "for the pixel observation wrapper.\"\"\" from typing import Optional import", "wrapped_env.observation_space assert isinstance(wrapped_env.observation_space, spaces.Dict) if pixels_only: assert len(wrapped_env.observation_space.spaces) == 1", "assert rgb_observation.dtype == np.uint8 @pytest.mark.parametrize(\"pixels_only\", (True, False)) def test_single_array_observation(self, pixels_only):", "(32, 32, 3) assert depth_observation.dtype == np.uint8 if not pixels_only:", "self.action_space = spaces.Box(shape=(1,), low=-1, high=1, dtype=np.float32) def render(self, width=32, height=32,", "1 assert list(wrapped_env.observation_space.spaces.keys()) == [pixel_key] else: assert len(wrapped_env.observation_space.spaces) == 2", "spaces.Box( shape=(2,), low=-1, high=1, dtype=np.float32 ) super(FakeArrayObservationEnvironment, self).__init__(*args, **kwargs) class", "= \"depth\" env = FakeArrayObservationEnvironment() observation_space = env.observation_space assert isinstance(observation_space,", "len(wrapped_env.observation_space.spaces) == 1 assert list(wrapped_env.observation_space.spaces.keys()) == [pixel_key] else: assert len(wrapped_env.observation_space.spaces)", "we are testing the right environment for the test. observation_space", ") super(FakeArrayObservationEnvironment, self).__init__(*args, **kwargs) class FakeDictObservationEnvironment(FakeEnvironment): def __init__(self, *args, **kwargs):", "class FakeArrayObservationEnvironment(FakeEnvironment): def __init__(self, *args, **kwargs): self.observation_space = spaces.Box( shape=(2,),", "class FakeDictObservationEnvironment(FakeEnvironment): def __init__(self, *args, **kwargs): self.observation_space = spaces.Dict( {", "import Optional import pytest import numpy as np import gym", "import numpy as np import gym from gym import spaces", "the added observation. observation = wrapped_env.reset() rgb_observation = observation[pixel_key] assert", "depth_observation.shape == (32, 32, 3) assert depth_observation.dtype == np.uint8 if", "action observation = self.observation_space.sample() reward, terminal, info = 0.0, False,", "item is consistent with the added observation. observation = wrapped_env.reset()", "np.uint8 @pytest.mark.parametrize(\"pixels_only\", (True, False)) def test_single_array_observation(self, pixels_only): pixel_key = \"depth\"", "= wrapped_env.observation_space assert isinstance(wrapped_env.observation_space, spaces.Dict) if pixels_only: assert len(wrapped_env.observation_space.spaces) ==", "def reset(self, seed: Optional[int] = None): super().reset(seed=seed) observation = self.observation_space.sample()", "action): del action observation = self.observation_space.sample() reward, terminal, info =", "== [ STATE_KEY, pixel_key, ] observation = wrapped_env.reset() depth_observation =", "PixelObservationWrapper, STATE_KEY class FakeEnvironment(gym.Env): def __init__(self): self.action_space = spaces.Box(shape=(1,), low=-1,", "__init__(self): self.action_space = spaces.Box(shape=(1,), low=-1, high=1, dtype=np.float32) def render(self, width=32,", "{ \"state\": spaces.Box(shape=(2,), low=-1, high=1, dtype=np.float32), } ) super(FakeDictObservationEnvironment, self).__init__(*args,", "**kwargs): self.observation_space = spaces.Dict( { \"state\": spaces.Box(shape=(2,), low=-1, high=1, dtype=np.float32),", "list(wrapped_env.observation_space.spaces.keys()) == [pixel_key] else: assert len(wrapped_env.observation_space.spaces) == 2 assert list(wrapped_env.observation_space.spaces.keys())", "3) assert depth_observation.dtype == np.uint8 if not pixels_only: assert isinstance(observation[STATE_KEY],", "+ 1 ) expected_keys = list(observation_space.spaces.keys()) + [pixel_key] assert list(wrapped_env.observation_space.spaces.keys())", "*args, **kwargs): self.observation_space = spaces.Dict( { \"state\": spaces.Box(shape=(2,), low=-1, high=1,", "height=32, *args, **kwargs): del args del kwargs image_shape = (height,", "import gym from gym import spaces from gym.wrappers.pixel_observation import PixelObservationWrapper," ]
[ "<reponame>MuAuan/Scipy-Swan<filename>real_plot_fft_stft_impl.py<gh_stars>0 import pyaudio import wave from scipy.fftpack import fft, ifft", "= np.sqrt(freq*freq.conj())*2/fn f = np.arange(int(fn/2)) ax3.axis([200, 20000, 0,0.000075]) ax3.set_xscale('log') ax3.plot(f,Pyy)", "22.1kHz 44.1kHz RECORD_SECONDS = 5 # 5秒録音 WAVE_OUTPUT_FILENAME = \"output2.wav\"", "RATE #wr.getframerate() fn = wr.getnframes() fs = fn / fr", "pyaudio.paInt16 # int16型 CHANNELS = 1 # 1;monoral 2;ステレオ- RATE", "ax3 = fig.add_subplot(313) ax2.axis([0, 5, 200,20000]) ax2.set_yscale('log') while True: fig.delaxes(ax1)", "wf.setnchannels(CHANNELS) wf.setsampwidth(p.get_sample_size(FORMAT)) wf.setframerate(RATE) wf.writeframes(b''.join(frames)) wf.close() wavfile = WAVE_OUTPUT_FILENAME wr =", "= wave.open(WAVE_OUTPUT_FILENAME, 'wb') wf.setnchannels(CHANNELS) wf.setsampwidth(p.get_sample_size(FORMAT)) wf.setframerate(RATE) wf.writeframes(b''.join(frames)) wf.close() wavfile =", "from scipy import signal from swan import pycwt CHUNK =", "width = p.get_sample_size(FORMAT) #wr.getsampwidth() fr = RATE #wr.getframerate() fn =", "= fn / fr origin = wr.readframes(wr.getnframes()) data = origin[:fn]", "figureの初期化 fig = plt.figure(figsize=(12, 10)) ax1 = fig.add_subplot(311) ax2 =", "range(0, int(RATE / CHUNK * RECORD_SECONDS)): data = stream.read(CHUNK) frames.append(data)", "wf.setsampwidth(p.get_sample_size(FORMAT)) wf.setframerate(RATE) wf.writeframes(b''.join(frames)) wf.close() wavfile = WAVE_OUTPUT_FILENAME wr = wave.open(wavfile,", "nperseg = 256 f, t, Zxx = signal.stft(sig, fs=fs*fn/50, nperseg=nperseg)", "import matplotlib.pyplot as plt import cv2 from scipy import signal", "t = np.linspace(0,fs, fn/2, endpoint=False) ax1.axis([0, 5, -0.0075,0.0075]) ax1.plot(t, sig)", "in range(0, int(RATE / CHUNK * RECORD_SECONDS)): data = stream.read(CHUNK)", "import wave from scipy.fftpack import fft, ifft import numpy as", "22100 # 22.1kHz 44.1kHz RECORD_SECONDS = 5 # 5秒録音 WAVE_OUTPUT_FILENAME", "= [] for i in range(0, int(RATE / CHUNK *", "= np.linspace(0,fs, fn/2, endpoint=False) ax1.axis([0, 5, -0.0075,0.0075]) ax1.plot(t, sig) nperseg", "frames_per_buffer=CHUNK) s=1 # figureの初期化 fig = plt.figure(figsize=(12, 10)) ax1 =", "wf.setframerate(RATE) wf.writeframes(b''.join(frames)) wf.close() wavfile = WAVE_OUTPUT_FILENAME wr = wave.open(wavfile, \"rb\")", "wr.getnframes() fs = fn / fr origin = wr.readframes(wr.getnframes()) data", "1 # 1;monoral 2;ステレオ- RATE = 22100 # 22.1kHz 44.1kHz", "# 1;monoral 2;ステレオ- RATE = 22100 # 22.1kHz 44.1kHz RECORD_SECONDS", "# int16型 CHANNELS = 1 # 1;monoral 2;ステレオ- RATE =", "fig.delaxes(ax1) fig.delaxes(ax3) ax1 = fig.add_subplot(311) ax3 = fig.add_subplot(313) print(\"* recording\")", "ch = CHANNELS #wr.getnchannels() width = p.get_sample_size(FORMAT) #wr.getsampwidth() fr =", "-0.0075,0.0075]) ax1.plot(t, sig) nperseg = 256 f, t, Zxx =", "import numpy as np import matplotlib.pyplot as plt import cv2", "/ fr origin = wr.readframes(wr.getnframes()) data = origin[:fn] wr.close() sig", "import pyaudio import wave from scipy.fftpack import fft, ifft import", "* RECORD_SECONDS)): data = stream.read(CHUNK) frames.append(data) print(\"* done recording\") wf", "input=True, frames_per_buffer=CHUNK) s=1 # figureの初期化 fig = plt.figure(figsize=(12, 10)) ax1", "ax1.plot(t, sig) nperseg = 256 f, t, Zxx = signal.stft(sig,", "fig.add_subplot(311) ax2 = fig.add_subplot(312) ax3 = fig.add_subplot(313) ax2.axis([0, 5, 200,20000])", "wr = wave.open(wavfile, \"rb\") ch = CHANNELS #wr.getnchannels() width =", "cv2 from scipy import signal from swan import pycwt CHUNK", "origin[:fn] wr.close() sig = np.frombuffer(data, dtype=\"int16\") /32768.0 t = np.linspace(0,fs,", "= stream.read(CHUNK) frames.append(data) print(\"* done recording\") wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')", "= fig.add_subplot(311) ax2 = fig.add_subplot(312) ax3 = fig.add_subplot(313) ax2.axis([0, 5,", "fs = fn / fr origin = wr.readframes(wr.getnframes()) data =", "Pyy = np.sqrt(freq*freq.conj())*2/fn f = np.arange(int(fn/2)) ax3.axis([200, 20000, 0,0.000075]) ax3.set_xscale('log')", "i in range(0, int(RATE / CHUNK * RECORD_SECONDS)): data =", "p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) s=1 # figureの初期化 fig =", "cmap='hsv') freq =fft(sig,int(fn/2)) Pyy = np.sqrt(freq*freq.conj())*2/fn f = np.arange(int(fn/2)) ax3.axis([200,", "t, Zxx = signal.stft(sig, fs=fs*fn/50, nperseg=nperseg) ax2.pcolormesh(t, 5*f, np.abs(Zxx), cmap='hsv')", "# 22.1kHz 44.1kHz RECORD_SECONDS = 5 # 5秒録音 WAVE_OUTPUT_FILENAME =", "= p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) s=1 # figureの初期化 fig", "= p.get_sample_size(FORMAT) #wr.getsampwidth() fr = RATE #wr.getframerate() fn = wr.getnframes()", "= origin[:fn] wr.close() sig = np.frombuffer(data, dtype=\"int16\") /32768.0 t =", "5*f, np.abs(Zxx), cmap='hsv') freq =fft(sig,int(fn/2)) Pyy = np.sqrt(freq*freq.conj())*2/fn f =", "1024 FORMAT = pyaudio.paInt16 # int16型 CHANNELS = 1 #", "ifft import numpy as np import matplotlib.pyplot as plt import", "import signal from swan import pycwt CHUNK = 1024 FORMAT", "import pycwt CHUNK = 1024 FORMAT = pyaudio.paInt16 # int16型", "256 f, t, Zxx = signal.stft(sig, fs=fs*fn/50, nperseg=nperseg) ax2.pcolormesh(t, 5*f,", "wf.close() wavfile = WAVE_OUTPUT_FILENAME wr = wave.open(wavfile, \"rb\") ch =", "CHUNK = 1024 FORMAT = pyaudio.paInt16 # int16型 CHANNELS =", "fig.add_subplot(312) ax3 = fig.add_subplot(313) ax2.axis([0, 5, 200,20000]) ax2.set_yscale('log') while True:", "= fig.add_subplot(313) ax2.axis([0, 5, 200,20000]) ax2.set_yscale('log') while True: fig.delaxes(ax1) fig.delaxes(ax3)", "fig.add_subplot(311) ax3 = fig.add_subplot(313) print(\"* recording\") frames = [] for", "= wave.open(wavfile, \"rb\") ch = CHANNELS #wr.getnchannels() width = p.get_sample_size(FORMAT)", "ax1 = fig.add_subplot(311) ax3 = fig.add_subplot(313) print(\"* recording\") frames =", "= wr.readframes(wr.getnframes()) data = origin[:fn] wr.close() sig = np.frombuffer(data, dtype=\"int16\")", "wave.open(WAVE_OUTPUT_FILENAME, 'wb') wf.setnchannels(CHANNELS) wf.setsampwidth(p.get_sample_size(FORMAT)) wf.setframerate(RATE) wf.writeframes(b''.join(frames)) wf.close() wavfile = WAVE_OUTPUT_FILENAME", "np.linspace(0,fs, fn/2, endpoint=False) ax1.axis([0, 5, -0.0075,0.0075]) ax1.plot(t, sig) nperseg =", "np.abs(Zxx), cmap='hsv') freq =fft(sig,int(fn/2)) Pyy = np.sqrt(freq*freq.conj())*2/fn f = np.arange(int(fn/2))", "while True: fig.delaxes(ax1) fig.delaxes(ax3) ax1 = fig.add_subplot(311) ax3 = fig.add_subplot(313)", "44.1kHz RECORD_SECONDS = 5 # 5秒録音 WAVE_OUTPUT_FILENAME = \"output2.wav\" p", "wave from scipy.fftpack import fft, ifft import numpy as np", "True: fig.delaxes(ax1) fig.delaxes(ax3) ax1 = fig.add_subplot(311) ax3 = fig.add_subplot(313) print(\"*", "import cv2 from scipy import signal from swan import pycwt", "#wr.getnchannels() width = p.get_sample_size(FORMAT) #wr.getsampwidth() fr = RATE #wr.getframerate() fn", "endpoint=False) ax1.axis([0, 5, -0.0075,0.0075]) ax1.plot(t, sig) nperseg = 256 f,", "print(\"* recording\") frames = [] for i in range(0, int(RATE", "rate=RATE, input=True, frames_per_buffer=CHUNK) s=1 # figureの初期化 fig = plt.figure(figsize=(12, 10))", "wave.open(wavfile, \"rb\") ch = CHANNELS #wr.getnchannels() width = p.get_sample_size(FORMAT) #wr.getsampwidth()", "fig = plt.figure(figsize=(12, 10)) ax1 = fig.add_subplot(311) ax2 = fig.add_subplot(312)", "WAVE_OUTPUT_FILENAME wr = wave.open(wavfile, \"rb\") ch = CHANNELS #wr.getnchannels() width", "Zxx = signal.stft(sig, fs=fs*fn/50, nperseg=nperseg) ax2.pcolormesh(t, 5*f, np.abs(Zxx), cmap='hsv') freq", "data = stream.read(CHUNK) frames.append(data) print(\"* done recording\") wf = wave.open(WAVE_OUTPUT_FILENAME,", "'wb') wf.setnchannels(CHANNELS) wf.setsampwidth(p.get_sample_size(FORMAT)) wf.setframerate(RATE) wf.writeframes(b''.join(frames)) wf.close() wavfile = WAVE_OUTPUT_FILENAME wr", "\"rb\") ch = CHANNELS #wr.getnchannels() width = p.get_sample_size(FORMAT) #wr.getsampwidth() fr", "= 5 # 5秒録音 WAVE_OUTPUT_FILENAME = \"output2.wav\" p = pyaudio.PyAudio()", "5, -0.0075,0.0075]) ax1.plot(t, sig) nperseg = 256 f, t, Zxx", "fig.add_subplot(313) print(\"* recording\") frames = [] for i in range(0,", "fn/2, endpoint=False) ax1.axis([0, 5, -0.0075,0.0075]) ax1.plot(t, sig) nperseg = 256", "ax2.set_yscale('log') while True: fig.delaxes(ax1) fig.delaxes(ax3) ax1 = fig.add_subplot(311) ax3 =", "pyaudio import wave from scipy.fftpack import fft, ifft import numpy", "= signal.stft(sig, fs=fs*fn/50, nperseg=nperseg) ax2.pcolormesh(t, 5*f, np.abs(Zxx), cmap='hsv') freq =fft(sig,int(fn/2))", "5秒録音 WAVE_OUTPUT_FILENAME = \"output2.wav\" p = pyaudio.PyAudio() stream = p.open(format=FORMAT,", "stream.read(CHUNK) frames.append(data) print(\"* done recording\") wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb') wf.setnchannels(CHANNELS)", "= wr.getnframes() fs = fn / fr origin = wr.readframes(wr.getnframes())", "= fig.add_subplot(312) ax3 = fig.add_subplot(313) ax2.axis([0, 5, 200,20000]) ax2.set_yscale('log') while", "= WAVE_OUTPUT_FILENAME wr = wave.open(wavfile, \"rb\") ch = CHANNELS #wr.getnchannels()", "origin = wr.readframes(wr.getnframes()) data = origin[:fn] wr.close() sig = np.frombuffer(data,", "pyaudio.PyAudio() stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) s=1 #", "ax2 = fig.add_subplot(312) ax3 = fig.add_subplot(313) ax2.axis([0, 5, 200,20000]) ax2.set_yscale('log')", "= pyaudio.paInt16 # int16型 CHANNELS = 1 # 1;monoral 2;ステレオ-", "/ CHUNK * RECORD_SECONDS)): data = stream.read(CHUNK) frames.append(data) print(\"* done", "f, t, Zxx = signal.stft(sig, fs=fs*fn/50, nperseg=nperseg) ax2.pcolormesh(t, 5*f, np.abs(Zxx),", "RECORD_SECONDS)): data = stream.read(CHUNK) frames.append(data) print(\"* done recording\") wf =", "fn / fr origin = wr.readframes(wr.getnframes()) data = origin[:fn] wr.close()", "fft, ifft import numpy as np import matplotlib.pyplot as plt", "as plt import cv2 from scipy import signal from swan", "fig.add_subplot(313) ax2.axis([0, 5, 200,20000]) ax2.set_yscale('log') while True: fig.delaxes(ax1) fig.delaxes(ax3) ax1", "CHANNELS #wr.getnchannels() width = p.get_sample_size(FORMAT) #wr.getsampwidth() fr = RATE #wr.getframerate()", "= 1 # 1;monoral 2;ステレオ- RATE = 22100 # 22.1kHz", "swan import pycwt CHUNK = 1024 FORMAT = pyaudio.paInt16 #", "freq =fft(sig,int(fn/2)) Pyy = np.sqrt(freq*freq.conj())*2/fn f = np.arange(int(fn/2)) ax3.axis([200, 20000,", "signal.stft(sig, fs=fs*fn/50, nperseg=nperseg) ax2.pcolormesh(t, 5*f, np.abs(Zxx), cmap='hsv') freq =fft(sig,int(fn/2)) Pyy", "as np import matplotlib.pyplot as plt import cv2 from scipy", "import fft, ifft import numpy as np import matplotlib.pyplot as", "= 256 f, t, Zxx = signal.stft(sig, fs=fs*fn/50, nperseg=nperseg) ax2.pcolormesh(t,", "ax1.axis([0, 5, -0.0075,0.0075]) ax1.plot(t, sig) nperseg = 256 f, t,", "= fig.add_subplot(313) print(\"* recording\") frames = [] for i in", "np.sqrt(freq*freq.conj())*2/fn f = np.arange(int(fn/2)) ax3.axis([200, 20000, 0,0.000075]) ax3.set_xscale('log') ax3.plot(f,Pyy) plt.pause(1)", "#wr.getsampwidth() fr = RATE #wr.getframerate() fn = wr.getnframes() fs =", "CHUNK * RECORD_SECONDS)): data = stream.read(CHUNK) frames.append(data) print(\"* done recording\")", "WAVE_OUTPUT_FILENAME = \"output2.wav\" p = pyaudio.PyAudio() stream = p.open(format=FORMAT, channels=CHANNELS,", "sig = np.frombuffer(data, dtype=\"int16\") /32768.0 t = np.linspace(0,fs, fn/2, endpoint=False)", "= fig.add_subplot(311) ax3 = fig.add_subplot(313) print(\"* recording\") frames = []", "nperseg=nperseg) ax2.pcolormesh(t, 5*f, np.abs(Zxx), cmap='hsv') freq =fft(sig,int(fn/2)) Pyy = np.sqrt(freq*freq.conj())*2/fn", "RECORD_SECONDS = 5 # 5秒録音 WAVE_OUTPUT_FILENAME = \"output2.wav\" p =", "frames = [] for i in range(0, int(RATE / CHUNK", "ax2.axis([0, 5, 200,20000]) ax2.set_yscale('log') while True: fig.delaxes(ax1) fig.delaxes(ax3) ax1 =", "ax3 = fig.add_subplot(313) print(\"* recording\") frames = [] for i", "= RATE #wr.getframerate() fn = wr.getnframes() fs = fn /", "5 # 5秒録音 WAVE_OUTPUT_FILENAME = \"output2.wav\" p = pyaudio.PyAudio() stream", "plt.figure(figsize=(12, 10)) ax1 = fig.add_subplot(311) ax2 = fig.add_subplot(312) ax3 =", "np.frombuffer(data, dtype=\"int16\") /32768.0 t = np.linspace(0,fs, fn/2, endpoint=False) ax1.axis([0, 5,", "# figureの初期化 fig = plt.figure(figsize=(12, 10)) ax1 = fig.add_subplot(311) ax2", "1;monoral 2;ステレオ- RATE = 22100 # 22.1kHz 44.1kHz RECORD_SECONDS =", "\"output2.wav\" p = pyaudio.PyAudio() stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True,", "p = pyaudio.PyAudio() stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK)", "recording\") frames = [] for i in range(0, int(RATE /", "from swan import pycwt CHUNK = 1024 FORMAT = pyaudio.paInt16", "int16型 CHANNELS = 1 # 1;monoral 2;ステレオ- RATE = 22100", "[] for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):", "s=1 # figureの初期化 fig = plt.figure(figsize=(12, 10)) ax1 = fig.add_subplot(311)", "data = origin[:fn] wr.close() sig = np.frombuffer(data, dtype=\"int16\") /32768.0 t", "= 1024 FORMAT = pyaudio.paInt16 # int16型 CHANNELS = 1", "wr.readframes(wr.getnframes()) data = origin[:fn] wr.close() sig = np.frombuffer(data, dtype=\"int16\") /32768.0", "fig.delaxes(ax3) ax1 = fig.add_subplot(311) ax3 = fig.add_subplot(313) print(\"* recording\") frames", "= 22100 # 22.1kHz 44.1kHz RECORD_SECONDS = 5 # 5秒録音", "ax3.axis([200, 20000, 0,0.000075]) ax3.set_xscale('log') ax3.plot(f,Pyy) plt.pause(1) plt.savefig('figure'+str(s)+'.png') s += 1", "5, 200,20000]) ax2.set_yscale('log') while True: fig.delaxes(ax1) fig.delaxes(ax3) ax1 = fig.add_subplot(311)", "fr origin = wr.readframes(wr.getnframes()) data = origin[:fn] wr.close() sig =", "channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) s=1 # figureの初期化 fig = plt.figure(figsize=(12,", "= np.frombuffer(data, dtype=\"int16\") /32768.0 t = np.linspace(0,fs, fn/2, endpoint=False) ax1.axis([0,", "CHANNELS = 1 # 1;monoral 2;ステレオ- RATE = 22100 #", "scipy.fftpack import fft, ifft import numpy as np import matplotlib.pyplot", "ax2.pcolormesh(t, 5*f, np.abs(Zxx), cmap='hsv') freq =fft(sig,int(fn/2)) Pyy = np.sqrt(freq*freq.conj())*2/fn f", "#wr.getframerate() fn = wr.getnframes() fs = fn / fr origin", "RATE = 22100 # 22.1kHz 44.1kHz RECORD_SECONDS = 5 #", "wf.writeframes(b''.join(frames)) wf.close() wavfile = WAVE_OUTPUT_FILENAME wr = wave.open(wavfile, \"rb\") ch", "=fft(sig,int(fn/2)) Pyy = np.sqrt(freq*freq.conj())*2/fn f = np.arange(int(fn/2)) ax3.axis([200, 20000, 0,0.000075])", "p.get_sample_size(FORMAT) #wr.getsampwidth() fr = RATE #wr.getframerate() fn = wr.getnframes() fs", "numpy as np import matplotlib.pyplot as plt import cv2 from", "frames.append(data) print(\"* done recording\") wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb') wf.setnchannels(CHANNELS) wf.setsampwidth(p.get_sample_size(FORMAT))", "recording\") wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb') wf.setnchannels(CHANNELS) wf.setsampwidth(p.get_sample_size(FORMAT)) wf.setframerate(RATE) wf.writeframes(b''.join(frames)) wf.close()", "signal from swan import pycwt CHUNK = 1024 FORMAT =", "plt import cv2 from scipy import signal from swan import", "for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)): data", "ax1 = fig.add_subplot(311) ax2 = fig.add_subplot(312) ax3 = fig.add_subplot(313) ax2.axis([0,", "2;ステレオ- RATE = 22100 # 22.1kHz 44.1kHz RECORD_SECONDS = 5", "/32768.0 t = np.linspace(0,fs, fn/2, endpoint=False) ax1.axis([0, 5, -0.0075,0.0075]) ax1.plot(t,", "= pyaudio.PyAudio() stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) s=1", "print(\"* done recording\") wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb') wf.setnchannels(CHANNELS) wf.setsampwidth(p.get_sample_size(FORMAT)) wf.setframerate(RATE)", "# 5秒録音 WAVE_OUTPUT_FILENAME = \"output2.wav\" p = pyaudio.PyAudio() stream =", "= \"output2.wav\" p = pyaudio.PyAudio() stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE,", "= plt.figure(figsize=(12, 10)) ax1 = fig.add_subplot(311) ax2 = fig.add_subplot(312) ax3", "10)) ax1 = fig.add_subplot(311) ax2 = fig.add_subplot(312) ax3 = fig.add_subplot(313)", "200,20000]) ax2.set_yscale('log') while True: fig.delaxes(ax1) fig.delaxes(ax3) ax1 = fig.add_subplot(311) ax3", "f = np.arange(int(fn/2)) ax3.axis([200, 20000, 0,0.000075]) ax3.set_xscale('log') ax3.plot(f,Pyy) plt.pause(1) plt.savefig('figure'+str(s)+'.png')", "done recording\") wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb') wf.setnchannels(CHANNELS) wf.setsampwidth(p.get_sample_size(FORMAT)) wf.setframerate(RATE) wf.writeframes(b''.join(frames))", "np import matplotlib.pyplot as plt import cv2 from scipy import", "= np.arange(int(fn/2)) ax3.axis([200, 20000, 0,0.000075]) ax3.set_xscale('log') ax3.plot(f,Pyy) plt.pause(1) plt.savefig('figure'+str(s)+'.png') s", "dtype=\"int16\") /32768.0 t = np.linspace(0,fs, fn/2, endpoint=False) ax1.axis([0, 5, -0.0075,0.0075])", "scipy import signal from swan import pycwt CHUNK = 1024", "fr = RATE #wr.getframerate() fn = wr.getnframes() fs = fn", "pycwt CHUNK = 1024 FORMAT = pyaudio.paInt16 # int16型 CHANNELS", "wavfile = WAVE_OUTPUT_FILENAME wr = wave.open(wavfile, \"rb\") ch = CHANNELS", "wr.close() sig = np.frombuffer(data, dtype=\"int16\") /32768.0 t = np.linspace(0,fs, fn/2,", "from scipy.fftpack import fft, ifft import numpy as np import", "FORMAT = pyaudio.paInt16 # int16型 CHANNELS = 1 # 1;monoral", "fn = wr.getnframes() fs = fn / fr origin =", "sig) nperseg = 256 f, t, Zxx = signal.stft(sig, fs=fs*fn/50,", "fs=fs*fn/50, nperseg=nperseg) ax2.pcolormesh(t, 5*f, np.abs(Zxx), cmap='hsv') freq =fft(sig,int(fn/2)) Pyy =", "= CHANNELS #wr.getnchannels() width = p.get_sample_size(FORMAT) #wr.getsampwidth() fr = RATE", "wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb') wf.setnchannels(CHANNELS) wf.setsampwidth(p.get_sample_size(FORMAT)) wf.setframerate(RATE) wf.writeframes(b''.join(frames)) wf.close() wavfile", "int(RATE / CHUNK * RECORD_SECONDS)): data = stream.read(CHUNK) frames.append(data) print(\"*", "np.arange(int(fn/2)) ax3.axis([200, 20000, 0,0.000075]) ax3.set_xscale('log') ax3.plot(f,Pyy) plt.pause(1) plt.savefig('figure'+str(s)+'.png') s +=", "stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK) s=1 # figureの初期化", "matplotlib.pyplot as plt import cv2 from scipy import signal from" ]
[]
[ "as plt class Robot(object): def __init__(self, length=20.0): \"\"\" Creates robotand", "orientation % (2.0 * np.pi) def set_noise(self, steering_noise, distance_noise): \"\"\"", "run(robot, 0.1, 1.0) n = len(x_trajectory) fig, ax1 = plt.subplots(1,", "robot = Robot() robot.set(0, 1, 0) robot.set_noise(0.1,0.05) def run(robot, tau_p,", "x=robot.x ,caculate y. x1 = robot.x y1 = startY +(x1", "{}, {}\".format(i,robot.x, robot.y,steering, crosstrack)) return x_trajectory, y_trajectory x_trajectory, y_trajectory =", "drift): \"\"\" Sets thesystematical steering drift parameter \"\"\" self.steering_drift =", "crosstrack_error[i] print(\"{} [{}, {}] {}, {}\".format(i,robot.x, robot.y,steering, crosstrack)) return x_trajectory,", "class Robot(object): def __init__(self, length=20.0): \"\"\" Creates robotand initializes location/orientation", "import matplotlib.pyplot as plt class Robot(object): def __init__(self, length=20.0): \"\"\"", "model for motion radius =distance2 / turn cx =self.x -", "self.y = y self.orientation = orientation % (2.0 * np.pi)", "* np.cos(self.orientation) self.y +=distance2 * np.sin(self.orientation) self.orientation = (self.orientation +", "wheel steering angle, limited by max_steering_angle distance =total distance driven,", "0.0 self.distance_noise = 0.0 self.steering_drift = 0.0 def set(self, x,y,", "tau_d * diff_CTE crosstrack_error= [] crosstrack_error.append(0.0) diff_CTE = 0.0 startX", "thenoise parameters. \"\"\" # makes itpossible to change the noise", "= len(x_trajectory) fig, ax1 = plt.subplots(1, 1, figsize=(8, 8)) ax1.plot(x_trajectory,", "import random import numpy as np import matplotlib.pyplot as plt", "i in range(n): cte = robot.y steer = -tau* cte", "crosstrack_error[i] - tau_d * diff_CTE distance =speed robot.move(steering, distance) x_trajectory.append(robot.x)", "0.0 # apply noise steering2 =random.gauss(steering, self.steering_noise) distance2 =random.gauss(distance, self.distance_noise)", "0.0 def set(self, x,y, orientation): \"\"\" Sets a robotcoordinate. \"\"\"", "= 0.0 self.length =length self.steering_noise = 0.0 self.distance_noise = 0.0", "in theoriginal path, x=robot.x ,caculate y. x1 = robot.x y1", "=distance2 / turn cx =self.x - (np.sin(self.orientation) * radius) cy", "= Robot() robot.set(0, 1, 0) robot.set_noise(0.1,0.05) def run(robot, tau_p, tau_d,", "radius) self.y =cy - (np.cos(self.orientation) * radius) def __repr__(self): return'[x=%.5f", "= [] y_trajectory = [] for i in range(n): cte", "steer = -tau* cte robot.move(steer, speed) x_trajectory.append(robot.x) y_trajectory.append(robot.y) return x_trajectory,", "tau_d, n=100, speed=1.0): x_trajectory = [] y_trajectory = [] #steering", "distance =total distance driven, most be non-negative \"\"\" if steering>", "y. x1 = robot.x y1 = startY +(x1 - startX)", "0.0: distance= 0.0 # apply noise steering2 =random.gauss(steering, self.steering_noise) distance2", "x1 = robot.x y1 = startY +(x1 - startX) *", "# when in theoriginal path, x=robot.x ,caculate y. x1 =", "= [] y_trajectory = [] #steering =-tau_p * CTE -", "turn cx =self.x - (np.sin(self.orientation) * radius) cy =self.y +", "move(self,steering, distance, tolerance=0.001, max_steering_angle=np.pi / 4.0): \"\"\" steering =front wheel", "self.steering_drift = 0.0 def set(self, x,y, orientation): \"\"\" Sets a", "Robot() robot.set(0, 1, 0) robot.set_noise(0.1,0.05) def run(robot, tau_p, tau_d, n=100,", "robot.y startOrientation= robot.orientation distance = 0.0 for i in range(n):", "# makes itpossible to change the noise parameters # this", "crosstrack_error.append(crosstrack) diff_CTE =crosstrack_error[i+1] - crosstrack_error[i] print(\"{} [{}, {}] {}, {}\".format(i,robot.x,", "* crosstrack_error[i] - tau_d * diff_CTE distance =speed robot.move(steering, distance)", "i in range(n): steering =-tau_p * crosstrack_error[i] - tau_d *", "= run(robot, 0.1, 1.0) n = len(x_trajectory) fig, ax1 =", "import numpy as np import matplotlib.pyplot as plt class Robot(object):", "{}] {}, {}\".format(i,robot.x, robot.y,steering, crosstrack)) return x_trajectory, y_trajectory x_trajectory, y_trajectory", "cte = robot.y steer = -tau* cte robot.move(steer, speed) x_trajectory.append(robot.x)", "def run_p(robot, tau, n=100, speed=1.0): x_trajectory = [] y_trajectory =", "self.y +=distance2 * np.sin(self.orientation) self.orientation = (self.orientation + turn) %", "self.distance_noise = 0.0 self.steering_drift = 0.0 def set(self, x,y, orientation):", "(self.orientation + turn) % (2.0 * np.pi) else: #approximate bicycle", "+ (np.cos(self.orientation) * radius) self.orientation = (self.orientation + turn) %", "CTE - tau_d * diff_CTE crosstrack_error= [] crosstrack_error.append(0.0) diff_CTE =", "def __repr__(self): return'[x=%.5f y=%.5f orient=%.5f]' % (self.x, self.y, self.orientation) def", "robot.move(steer, speed) x_trajectory.append(robot.x) y_trajectory.append(robot.y) return x_trajectory, y_trajectory robot = Robot()", "* np.pi) self.x =cx + (np.sin(self.orientation) * radius) self.y =cy", "tolerance=0.001, max_steering_angle=np.pi / 4.0): \"\"\" steering =front wheel steering angle,", "- startX) * np.tan(startOrientation) crosstrack =(robot.y - y1) * np.cos(startOrientation)", "\"\"\" self.x = 0.0 self.y = 0.0 self.orientation = 0.0", "= robot.x y1 = startY +(x1 - startX) * np.tan(startOrientation)", "print(\"{} [{}, {}] {}, {}\".format(i,robot.x, robot.y,steering, crosstrack)) return x_trajectory, y_trajectory", "orientation): \"\"\" Sets a robotcoordinate. \"\"\" self.x = x self.y", "to change the noise parameters # this isoften useful in", "np import matplotlib.pyplot as plt class Robot(object): def __init__(self, length=20.0):", "-*- coding:utf-8 -*- import random import numpy as np import", "\"\"\" self.steering_drift = drift def move(self,steering, distance, tolerance=0.001, max_steering_angle=np.pi /", "diff_CTE crosstrack_error= [] crosstrack_error.append(0.0) diff_CTE = 0.0 startX = robot.x", "distance= 0.0 # apply noise steering2 =random.gauss(steering, self.steering_noise) distance2 =random.gauss(distance,", "- (np.sin(self.orientation) * radius) cy =self.y + (np.cos(self.orientation) * radius)", "- tau_d * diff_CTE crosstrack_error= [] crosstrack_error.append(0.0) diff_CTE = 0.0", "if abs(turn)< tolerance: #approximate by straight line motion self.x +=distance2", "(np.cos(self.orientation) * radius) def __repr__(self): return'[x=%.5f y=%.5f orient=%.5f]' % (self.x,", "Execute motion turn =np.tan(steering2) * distance2 / self.length if abs(turn)<", "matplotlib.pyplot as plt class Robot(object): def __init__(self, length=20.0): \"\"\" Creates", "0.1, 1.0) n = len(x_trajectory) fig, ax1 = plt.subplots(1, 1,", "=front wheel steering angle, limited by max_steering_angle distance =total distance", "robot.set_noise(0.1,0.05) def run(robot, tau_p, tau_d, n=100, speed=1.0): x_trajectory = []", "robotcoordinate. \"\"\" self.x = x self.y = y self.orientation =", "distance_noise): \"\"\" Sets thenoise parameters. \"\"\" # makes itpossible to", "x_trajectory.append(robot.x) y_trajectory.append(robot.y) return x_trajectory, y_trajectory robot = Robot() robot.set(0, 1,", "\"\"\" Sets thesystematical steering drift parameter \"\"\" self.steering_drift = drift", "1, figsize=(8, 8)) ax1.plot(x_trajectory, y_trajectory, 'g', label='PDcontroller') ax1.plot(x_trajectory, np.zeros(n), 'r',", "robot.x startY = robot.y startOrientation= robot.orientation distance = 0.0 for", "self.distance_noise = distance_noise def set_steering_drift(self, drift): \"\"\" Sets thesystematical steering", "non-negative \"\"\" if steering> max_steering_angle: steering= max_steering_angle if steering <-max_steering_angle:", "distance) x_trajectory.append(robot.x) y_trajectory.append(robot.y) # when in theoriginal path, x=robot.x ,caculate", "self.steering_noise) distance2 =random.gauss(distance, self.distance_noise) # applysteering drift steering2 +=self.steering_drift #", "startX) * np.tan(startOrientation) crosstrack =(robot.y - y1) * np.cos(startOrientation) crosstrack_error.append(crosstrack)", "turn) % (2.0 * np.pi) else: #approximate bicycle model for", "change the noise parameters # this isoften useful in particle", "% (2.0 * np.pi) self.x =cx + (np.sin(self.orientation) * radius)", "coding:utf-8 -*- import random import numpy as np import matplotlib.pyplot", "0) robot.set_noise(0.1,0.05) def run(robot, tau_p, tau_d, n=100, speed=1.0): x_trajectory =", "distance =speed robot.move(steering, distance) x_trajectory.append(robot.x) y_trajectory.append(robot.y) # when in theoriginal", "# Execute motion turn =np.tan(steering2) * distance2 / self.length if", "= 0.0 self.orientation = 0.0 self.length =length self.steering_noise = 0.0", "0, 0. \"\"\" self.x = 0.0 self.y = 0.0 self.orientation", "= steering_noise self.distance_noise = distance_noise def set_steering_drift(self, drift): \"\"\" Sets", "to 0, 0, 0. \"\"\" self.x = 0.0 self.y =", "/ self.length if abs(turn)< tolerance: #approximate by straight line motion", "when in theoriginal path, x=robot.x ,caculate y. x1 = robot.x", "crosstrack_error.append(0.0) diff_CTE = 0.0 startX = robot.x startY = robot.y", "in particle filters self.steering_noise = steering_noise self.distance_noise = distance_noise def", "noise steering2 =random.gauss(steering, self.steering_noise) distance2 =random.gauss(distance, self.distance_noise) # applysteering drift", "0.0 for i in range(n): steering =-tau_p * crosstrack_error[i] -", "if steering <-max_steering_angle: steering= -max_steering_angle if distance< 0.0: distance= 0.0", "0. \"\"\" self.x = 0.0 self.y = 0.0 self.orientation =", "diff_CTE = 0.0 startX = robot.x startY = robot.y startOrientation=", "# applysteering drift steering2 +=self.steering_drift # Execute motion turn =np.tan(steering2)", "steering drift parameter \"\"\" self.steering_drift = drift def move(self,steering, distance,", "as np import matplotlib.pyplot as plt class Robot(object): def __init__(self,", "1, 0) robot.set_noise(0.1,0.05) def run(robot, tau_p, tau_d, n=100, speed=1.0): x_trajectory", "by max_steering_angle distance =total distance driven, most be non-negative \"\"\"", "\"\"\" steering =front wheel steering angle, limited by max_steering_angle distance", "def set_noise(self, steering_noise, distance_noise): \"\"\" Sets thenoise parameters. \"\"\" #", "n = len(x_trajectory) fig, ax1 = plt.subplots(1, 1, figsize=(8, 8))", "ax1 = plt.subplots(1, 1, figsize=(8, 8)) ax1.plot(x_trajectory, y_trajectory, 'g', label='PDcontroller')", "self.orientation = (self.orientation + turn) % (2.0 * np.pi) self.x", "#steering =-tau_p * CTE - tau_d * diff_CTE crosstrack_error= []", "noise parameters # this isoften useful in particle filters self.steering_noise", "+ turn) % (2.0 * np.pi) else: #approximate bicycle model", "startOrientation= robot.orientation distance = 0.0 for i in range(n): steering", "driven, most be non-negative \"\"\" if steering> max_steering_angle: steering= max_steering_angle", "=total distance driven, most be non-negative \"\"\" if steering> max_steering_angle:", "len(x_trajectory) fig, ax1 = plt.subplots(1, 1, figsize=(8, 8)) ax1.plot(x_trajectory, y_trajectory,", "plt.subplots(1, 1, figsize=(8, 8)) ax1.plot(x_trajectory, y_trajectory, 'g', label='PDcontroller') ax1.plot(x_trajectory, np.zeros(n),", "length=20.0): \"\"\" Creates robotand initializes location/orientation to 0, 0, 0.", "max_steering_angle=np.pi / 4.0): \"\"\" steering =front wheel steering angle, limited", "straight line motion self.x +=distance2 * np.cos(self.orientation) self.y +=distance2 *", "cx =self.x - (np.sin(self.orientation) * radius) cy =self.y + (np.cos(self.orientation)", "{}\".format(i,robot.x, robot.y,steering, crosstrack)) return x_trajectory, y_trajectory x_trajectory, y_trajectory = run(robot,", "steering2 =random.gauss(steering, self.steering_noise) distance2 =random.gauss(distance, self.distance_noise) # applysteering drift steering2", "+=distance2 * np.sin(self.orientation) self.orientation = (self.orientation + turn) % (2.0", "= (self.orientation + turn) % (2.0 * np.pi) self.x =cx", "for motion radius =distance2 / turn cx =self.x - (np.sin(self.orientation)", "* np.pi) def set_noise(self, steering_noise, distance_noise): \"\"\" Sets thenoise parameters.", "= 0.0 self.distance_noise = 0.0 self.steering_drift = 0.0 def set(self,", "steering =-tau_p * crosstrack_error[i] - tau_d * diff_CTE distance =speed", "def __init__(self, length=20.0): \"\"\" Creates robotand initializes location/orientation to 0,", "% (2.0 * np.pi) else: #approximate bicycle model for motion", "self.y, self.orientation) def run_p(robot, tau, n=100, speed=1.0): x_trajectory = []", "run(robot, tau_p, tau_d, n=100, speed=1.0): x_trajectory = [] y_trajectory =", "y_trajectory x_trajectory, y_trajectory = run(robot, 0.1, 1.0) n = len(x_trajectory)", "for i in range(n): steering =-tau_p * crosstrack_error[i] - tau_d", "fig, ax1 = plt.subplots(1, 1, figsize=(8, 8)) ax1.plot(x_trajectory, y_trajectory, 'g',", "parameters # this isoften useful in particle filters self.steering_noise =", "self.orientation = 0.0 self.length =length self.steering_noise = 0.0 self.distance_noise =", "def set(self, x,y, orientation): \"\"\" Sets a robotcoordinate. \"\"\" self.x", "steering_noise self.distance_noise = distance_noise def set_steering_drift(self, drift): \"\"\" Sets thesystematical", "1.0) n = len(x_trajectory) fig, ax1 = plt.subplots(1, 1, figsize=(8,", "+=distance2 * np.cos(self.orientation) self.y +=distance2 * np.sin(self.orientation) self.orientation = (self.orientation", "Sets a robotcoordinate. \"\"\" self.x = x self.y = y", "steering2 +=self.steering_drift # Execute motion turn =np.tan(steering2) * distance2 /", "thesystematical steering drift parameter \"\"\" self.steering_drift = drift def move(self,steering,", "speed=1.0): x_trajectory = [] y_trajectory = [] for i in", "y_trajectory = run(robot, 0.1, 1.0) n = len(x_trajectory) fig, ax1", "= [] for i in range(n): cte = robot.y steer", "crosstrack)) return x_trajectory, y_trajectory x_trajectory, y_trajectory = run(robot, 0.1, 1.0)", "0.0 self.steering_drift = 0.0 def set(self, x,y, orientation): \"\"\" Sets", "run_p(robot, tau, n=100, speed=1.0): x_trajectory = [] y_trajectory = []", "bicycle model for motion radius =distance2 / turn cx =self.x", "np.tan(startOrientation) crosstrack =(robot.y - y1) * np.cos(startOrientation) crosstrack_error.append(crosstrack) diff_CTE =crosstrack_error[i+1]", "Sets thenoise parameters. \"\"\" # makes itpossible to change the", "= 0.0 self.y = 0.0 self.orientation = 0.0 self.length =length", "itpossible to change the noise parameters # this isoften useful", "motion turn =np.tan(steering2) * distance2 / self.length if abs(turn)< tolerance:", "__repr__(self): return'[x=%.5f y=%.5f orient=%.5f]' % (self.x, self.y, self.orientation) def run_p(robot,", "= 0.0 def set(self, x,y, orientation): \"\"\" Sets a robotcoordinate.", "y self.orientation = orientation % (2.0 * np.pi) def set_noise(self,", "=self.x - (np.sin(self.orientation) * radius) cy =self.y + (np.cos(self.orientation) *", "= robot.y steer = -tau* cte robot.move(steer, speed) x_trajectory.append(robot.x) y_trajectory.append(robot.y)", "Robot(object): def __init__(self, length=20.0): \"\"\" Creates robotand initializes location/orientation to", "radius) cy =self.y + (np.cos(self.orientation) * radius) self.orientation = (self.orientation", "=crosstrack_error[i+1] - crosstrack_error[i] print(\"{} [{}, {}] {}, {}\".format(i,robot.x, robot.y,steering, crosstrack))", "=-tau_p * CTE - tau_d * diff_CTE crosstrack_error= [] crosstrack_error.append(0.0)", "% (2.0 * np.pi) def set_noise(self, steering_noise, distance_noise): \"\"\" Sets", "python # -*- coding:utf-8 -*- import random import numpy as", "[] y_trajectory = [] for i in range(n): cte =", "= plt.subplots(1, 1, figsize=(8, 8)) ax1.plot(x_trajectory, y_trajectory, 'g', label='PDcontroller') ax1.plot(x_trajectory,", "- (np.cos(self.orientation) * radius) def __repr__(self): return'[x=%.5f y=%.5f orient=%.5f]' %", "* diff_CTE distance =speed robot.move(steering, distance) x_trajectory.append(robot.x) y_trajectory.append(robot.y) # when", "filters self.steering_noise = steering_noise self.distance_noise = distance_noise def set_steering_drift(self, drift):", "=speed robot.move(steering, distance) x_trajectory.append(robot.x) y_trajectory.append(robot.y) # when in theoriginal path,", "= robot.y startOrientation= robot.orientation distance = 0.0 for i in", "* radius) cy =self.y + (np.cos(self.orientation) * radius) self.orientation =", "= y self.orientation = orientation % (2.0 * np.pi) def", "x_trajectory, y_trajectory x_trajectory, y_trajectory = run(robot, 0.1, 1.0) n =", "startX = robot.x startY = robot.y startOrientation= robot.orientation distance =", "np.pi) else: #approximate bicycle model for motion radius =distance2 /", "* np.pi) else: #approximate bicycle model for motion radius =distance2", "n=100, speed=1.0): x_trajectory = [] y_trajectory = [] #steering =-tau_p", "parameters. \"\"\" # makes itpossible to change the noise parameters", "crosstrack =(robot.y - y1) * np.cos(startOrientation) crosstrack_error.append(crosstrack) diff_CTE =crosstrack_error[i+1] -", "robot.y,steering, crosstrack)) return x_trajectory, y_trajectory x_trajectory, y_trajectory = run(robot, 0.1,", "\"\"\" Creates robotand initializes location/orientation to 0, 0, 0. \"\"\"", "radius) def __repr__(self): return'[x=%.5f y=%.5f orient=%.5f]' % (self.x, self.y, self.orientation)", "path, x=robot.x ,caculate y. x1 = robot.x y1 = startY", "max_steering_angle if steering <-max_steering_angle: steering= -max_steering_angle if distance< 0.0: distance=", "x_trajectory, y_trajectory = run(robot, 0.1, 1.0) n = len(x_trajectory) fig,", "np.cos(startOrientation) crosstrack_error.append(crosstrack) diff_CTE =crosstrack_error[i+1] - crosstrack_error[i] print(\"{} [{}, {}] {},", "* diff_CTE crosstrack_error= [] crosstrack_error.append(0.0) diff_CTE = 0.0 startX =", "line motion self.x +=distance2 * np.cos(self.orientation) self.y +=distance2 * np.sin(self.orientation)", "return x_trajectory, y_trajectory robot = Robot() robot.set(0, 1, 0) robot.set_noise(0.1,0.05)", "steering_noise, distance_noise): \"\"\" Sets thenoise parameters. \"\"\" # makes itpossible", "=length self.steering_noise = 0.0 self.distance_noise = 0.0 self.steering_drift = 0.0", "* radius) self.y =cy - (np.cos(self.orientation) * radius) def __repr__(self):", "self.steering_drift = drift def move(self,steering, distance, tolerance=0.001, max_steering_angle=np.pi / 4.0):", "angle, limited by max_steering_angle distance =total distance driven, most be", "= x self.y = y self.orientation = orientation % (2.0", "distance2 =random.gauss(distance, self.distance_noise) # applysteering drift steering2 +=self.steering_drift # Execute", "turn =np.tan(steering2) * distance2 / self.length if abs(turn)< tolerance: #approximate", "y_trajectory = [] #steering =-tau_p * CTE - tau_d *", "be non-negative \"\"\" if steering> max_steering_angle: steering= max_steering_angle if steering", "max_steering_angle distance =total distance driven, most be non-negative \"\"\" if", "self.length =length self.steering_noise = 0.0 self.distance_noise = 0.0 self.steering_drift =", "* radius) self.orientation = (self.orientation + turn) % (2.0 *", "=(robot.y - y1) * np.cos(startOrientation) crosstrack_error.append(crosstrack) diff_CTE =crosstrack_error[i+1] - crosstrack_error[i]", "0, 0, 0. \"\"\" self.x = 0.0 self.y = 0.0", "most be non-negative \"\"\" if steering> max_steering_angle: steering= max_steering_angle if", "#approximate by straight line motion self.x +=distance2 * np.cos(self.orientation) self.y", "= robot.x startY = robot.y startOrientation= robot.orientation distance = 0.0", "tolerance: #approximate by straight line motion self.x +=distance2 * np.cos(self.orientation)", "(2.0 * np.pi) self.x =cx + (np.sin(self.orientation) * radius) self.y", "distance< 0.0: distance= 0.0 # apply noise steering2 =random.gauss(steering, self.steering_noise)", "[] for i in range(n): cte = robot.y steer =", "= -tau* cte robot.move(steer, speed) x_trajectory.append(robot.x) y_trajectory.append(robot.y) return x_trajectory, y_trajectory", "speed=1.0): x_trajectory = [] y_trajectory = [] #steering =-tau_p *", "distance2 / self.length if abs(turn)< tolerance: #approximate by straight line", "__init__(self, length=20.0): \"\"\" Creates robotand initializes location/orientation to 0, 0,", "plt class Robot(object): def __init__(self, length=20.0): \"\"\" Creates robotand initializes", "distance, tolerance=0.001, max_steering_angle=np.pi / 4.0): \"\"\" steering =front wheel steering", "drift parameter \"\"\" self.steering_drift = drift def move(self,steering, distance, tolerance=0.001,", "this isoften useful in particle filters self.steering_noise = steering_noise self.distance_noise", "-max_steering_angle if distance< 0.0: distance= 0.0 # apply noise steering2", "\"\"\" self.x = x self.y = y self.orientation = orientation", "self.orientation = (self.orientation + turn) % (2.0 * np.pi) else:", "x_trajectory, y_trajectory robot = Robot() robot.set(0, 1, 0) robot.set_noise(0.1,0.05) def", "= 0.0 startX = robot.x startY = robot.y startOrientation= robot.orientation", "makes itpossible to change the noise parameters # this isoften", "=cx + (np.sin(self.orientation) * radius) self.y =cy - (np.cos(self.orientation) *", "/ 4.0): \"\"\" steering =front wheel steering angle, limited by", "(2.0 * np.pi) else: #approximate bicycle model for motion radius", "<filename>PID/PDControl.py #!/usr/bin/env python # -*- coding:utf-8 -*- import random import", "[{}, {}] {}, {}\".format(i,robot.x, robot.y,steering, crosstrack)) return x_trajectory, y_trajectory x_trajectory,", "# apply noise steering2 =random.gauss(steering, self.steering_noise) distance2 =random.gauss(distance, self.distance_noise) #", "y_trajectory = [] for i in range(n): cte = robot.y", "x_trajectory = [] y_trajectory = [] for i in range(n):", "y_trajectory.append(robot.y) # when in theoriginal path, x=robot.x ,caculate y. x1", "robot.set(0, 1, 0) robot.set_noise(0.1,0.05) def run(robot, tau_p, tau_d, n=100, speed=1.0):", "y_trajectory.append(robot.y) return x_trajectory, y_trajectory robot = Robot() robot.set(0, 1, 0)", "def run(robot, tau_p, tau_d, n=100, speed=1.0): x_trajectory = [] y_trajectory", "self.steering_noise = 0.0 self.distance_noise = 0.0 self.steering_drift = 0.0 def", "motion radius =distance2 / turn cx =self.x - (np.sin(self.orientation) *", "/ turn cx =self.x - (np.sin(self.orientation) * radius) cy =self.y", "self.x = x self.y = y self.orientation = orientation %", "* np.cos(startOrientation) crosstrack_error.append(crosstrack) diff_CTE =crosstrack_error[i+1] - crosstrack_error[i] print(\"{} [{}, {}]", "by straight line motion self.x +=distance2 * np.cos(self.orientation) self.y +=distance2", "theoriginal path, x=robot.x ,caculate y. x1 = robot.x y1 =", "return'[x=%.5f y=%.5f orient=%.5f]' % (self.x, self.y, self.orientation) def run_p(robot, tau,", "robotand initializes location/orientation to 0, 0, 0. \"\"\" self.x =", "self.length if abs(turn)< tolerance: #approximate by straight line motion self.x", "=np.tan(steering2) * distance2 / self.length if abs(turn)< tolerance: #approximate by", "x self.y = y self.orientation = orientation % (2.0 *", "if steering> max_steering_angle: steering= max_steering_angle if steering <-max_steering_angle: steering= -max_steering_angle", "0.0 self.orientation = 0.0 self.length =length self.steering_noise = 0.0 self.distance_noise", "limited by max_steering_angle distance =total distance driven, most be non-negative", "* CTE - tau_d * diff_CTE crosstrack_error= [] crosstrack_error.append(0.0) diff_CTE", "radius) self.orientation = (self.orientation + turn) % (2.0 * np.pi)", "robot.y steer = -tau* cte robot.move(steer, speed) x_trajectory.append(robot.x) y_trajectory.append(robot.y) return", "x_trajectory.append(robot.x) y_trajectory.append(robot.y) # when in theoriginal path, x=robot.x ,caculate y.", "+=self.steering_drift # Execute motion turn =np.tan(steering2) * distance2 / self.length", "set(self, x,y, orientation): \"\"\" Sets a robotcoordinate. \"\"\" self.x =", "0.0 self.length =length self.steering_noise = 0.0 self.distance_noise = 0.0 self.steering_drift", "abs(turn)< tolerance: #approximate by straight line motion self.x +=distance2 *", "=-tau_p * crosstrack_error[i] - tau_d * diff_CTE distance =speed robot.move(steering,", "self.orientation) def run_p(robot, tau, n=100, speed=1.0): x_trajectory = [] y_trajectory", "steering angle, limited by max_steering_angle distance =total distance driven, most", "y_trajectory robot = Robot() robot.set(0, 1, 0) robot.set_noise(0.1,0.05) def run(robot,", "orient=%.5f]' % (self.x, self.y, self.orientation) def run_p(robot, tau, n=100, speed=1.0):", "y1 = startY +(x1 - startX) * np.tan(startOrientation) crosstrack =(robot.y", "(np.cos(self.orientation) * radius) self.orientation = (self.orientation + turn) % (2.0", "(self.x, self.y, self.orientation) def run_p(robot, tau, n=100, speed=1.0): x_trajectory =", "8)) ax1.plot(x_trajectory, y_trajectory, 'g', label='PDcontroller') ax1.plot(x_trajectory, np.zeros(n), 'r', label='reference') plt.show()", "(2.0 * np.pi) def set_noise(self, steering_noise, distance_noise): \"\"\" Sets thenoise", "diff_CTE distance =speed robot.move(steering, distance) x_trajectory.append(robot.x) y_trajectory.append(robot.y) # when in", "parameter \"\"\" self.steering_drift = drift def move(self,steering, distance, tolerance=0.001, max_steering_angle=np.pi", "applysteering drift steering2 +=self.steering_drift # Execute motion turn =np.tan(steering2) *", "np.sin(self.orientation) self.orientation = (self.orientation + turn) % (2.0 * np.pi)", "apply noise steering2 =random.gauss(steering, self.steering_noise) distance2 =random.gauss(distance, self.distance_noise) # applysteering", "= [] #steering =-tau_p * CTE - tau_d * diff_CTE", "isoften useful in particle filters self.steering_noise = steering_noise self.distance_noise =", "x,y, orientation): \"\"\" Sets a robotcoordinate. \"\"\" self.x = x", "self.y =cy - (np.cos(self.orientation) * radius) def __repr__(self): return'[x=%.5f y=%.5f", "self.steering_noise = steering_noise self.distance_noise = distance_noise def set_steering_drift(self, drift): \"\"\"", "-*- import random import numpy as np import matplotlib.pyplot as", "self.y = 0.0 self.orientation = 0.0 self.length =length self.steering_noise =", "x_trajectory = [] y_trajectory = [] #steering =-tau_p * CTE", "Sets thesystematical steering drift parameter \"\"\" self.steering_drift = drift def", "set_noise(self, steering_noise, distance_noise): \"\"\" Sets thenoise parameters. \"\"\" # makes", "cy =self.y + (np.cos(self.orientation) * radius) self.orientation = (self.orientation +", "set_steering_drift(self, drift): \"\"\" Sets thesystematical steering drift parameter \"\"\" self.steering_drift", "n=100, speed=1.0): x_trajectory = [] y_trajectory = [] for i", "in range(n): cte = robot.y steer = -tau* cte robot.move(steer,", "robot.orientation distance = 0.0 for i in range(n): steering =-tau_p", "[] y_trajectory = [] #steering =-tau_p * CTE - tau_d", "startY +(x1 - startX) * np.tan(startOrientation) crosstrack =(robot.y - y1)", "- crosstrack_error[i] print(\"{} [{}, {}] {}, {}\".format(i,robot.x, robot.y,steering, crosstrack)) return", "return x_trajectory, y_trajectory x_trajectory, y_trajectory = run(robot, 0.1, 1.0) n", "steering =front wheel steering angle, limited by max_steering_angle distance =total", "=random.gauss(distance, self.distance_noise) # applysteering drift steering2 +=self.steering_drift # Execute motion", "\"\"\" Sets a robotcoordinate. \"\"\" self.x = x self.y =", "=cy - (np.cos(self.orientation) * radius) def __repr__(self): return'[x=%.5f y=%.5f orient=%.5f]'", "=self.y + (np.cos(self.orientation) * radius) self.orientation = (self.orientation + turn)", "4.0): \"\"\" steering =front wheel steering angle, limited by max_steering_angle", "for i in range(n): cte = robot.y steer = -tau*", "(np.sin(self.orientation) * radius) cy =self.y + (np.cos(self.orientation) * radius) self.orientation", "+(x1 - startX) * np.tan(startOrientation) crosstrack =(robot.y - y1) *", "diff_CTE =crosstrack_error[i+1] - crosstrack_error[i] print(\"{} [{}, {}] {}, {}\".format(i,robot.x, robot.y,steering,", "location/orientation to 0, 0, 0. \"\"\" self.x = 0.0 self.y", "* distance2 / self.length if abs(turn)< tolerance: #approximate by straight", "= drift def move(self,steering, distance, tolerance=0.001, max_steering_angle=np.pi / 4.0): \"\"\"", "tau, n=100, speed=1.0): x_trajectory = [] y_trajectory = [] for", "#approximate bicycle model for motion radius =distance2 / turn cx", "\"\"\" Sets thenoise parameters. \"\"\" # makes itpossible to change", "-tau* cte robot.move(steer, speed) x_trajectory.append(robot.x) y_trajectory.append(robot.y) return x_trajectory, y_trajectory robot", "* np.tan(startOrientation) crosstrack =(robot.y - y1) * np.cos(startOrientation) crosstrack_error.append(crosstrack) diff_CTE", "steering= max_steering_angle if steering <-max_steering_angle: steering= -max_steering_angle if distance< 0.0:", "numpy as np import matplotlib.pyplot as plt class Robot(object): def", "def move(self,steering, distance, tolerance=0.001, max_steering_angle=np.pi / 4.0): \"\"\" steering =front", "steering <-max_steering_angle: steering= -max_steering_angle if distance< 0.0: distance= 0.0 #", "np.pi) self.x =cx + (np.sin(self.orientation) * radius) self.y =cy -", "0.0 self.y = 0.0 self.orientation = 0.0 self.length =length self.steering_noise", "y=%.5f orient=%.5f]' % (self.x, self.y, self.orientation) def run_p(robot, tau, n=100,", "a robotcoordinate. \"\"\" self.x = x self.y = y self.orientation", "- y1) * np.cos(startOrientation) crosstrack_error.append(crosstrack) diff_CTE =crosstrack_error[i+1] - crosstrack_error[i] print(\"{}", "0.0 startX = robot.x startY = robot.y startOrientation= robot.orientation distance", "self.x = 0.0 self.y = 0.0 self.orientation = 0.0 self.length", "[] #steering =-tau_p * CTE - tau_d * diff_CTE crosstrack_error=", "range(n): cte = robot.y steer = -tau* cte robot.move(steer, speed)", "self.orientation = orientation % (2.0 * np.pi) def set_noise(self, steering_noise,", "np.cos(self.orientation) self.y +=distance2 * np.sin(self.orientation) self.orientation = (self.orientation + turn)", "particle filters self.steering_noise = steering_noise self.distance_noise = distance_noise def set_steering_drift(self,", "<-max_steering_angle: steering= -max_steering_angle if distance< 0.0: distance= 0.0 # apply", "= (self.orientation + turn) % (2.0 * np.pi) else: #approximate", "the noise parameters # this isoften useful in particle filters", "self.x +=distance2 * np.cos(self.orientation) self.y +=distance2 * np.sin(self.orientation) self.orientation =", "tau_p, tau_d, n=100, speed=1.0): x_trajectory = [] y_trajectory = []", "+ (np.sin(self.orientation) * radius) self.y =cy - (np.cos(self.orientation) * radius)", "Creates robotand initializes location/orientation to 0, 0, 0. \"\"\" self.x", "\"\"\" # makes itpossible to change the noise parameters #", "= 0.0 self.steering_drift = 0.0 def set(self, x,y, orientation): \"\"\"", "turn) % (2.0 * np.pi) self.x =cx + (np.sin(self.orientation) *", "robot.x y1 = startY +(x1 - startX) * np.tan(startOrientation) crosstrack", "motion self.x +=distance2 * np.cos(self.orientation) self.y +=distance2 * np.sin(self.orientation) self.orientation", "useful in particle filters self.steering_noise = steering_noise self.distance_noise = distance_noise", "= startY +(x1 - startX) * np.tan(startOrientation) crosstrack =(robot.y -", "#!/usr/bin/env python # -*- coding:utf-8 -*- import random import numpy", "% (self.x, self.y, self.orientation) def run_p(robot, tau, n=100, speed=1.0): x_trajectory", "robot.move(steering, distance) x_trajectory.append(robot.x) y_trajectory.append(robot.y) # when in theoriginal path, x=robot.x", "self.x =cx + (np.sin(self.orientation) * radius) self.y =cy - (np.cos(self.orientation)", "initializes location/orientation to 0, 0, 0. \"\"\" self.x = 0.0", ",caculate y. x1 = robot.x y1 = startY +(x1 -", "# -*- coding:utf-8 -*- import random import numpy as np", "* np.sin(self.orientation) self.orientation = (self.orientation + turn) % (2.0 *", "distance_noise def set_steering_drift(self, drift): \"\"\" Sets thesystematical steering drift parameter", "=random.gauss(steering, self.steering_noise) distance2 =random.gauss(distance, self.distance_noise) # applysteering drift steering2 +=self.steering_drift", "speed) x_trajectory.append(robot.x) y_trajectory.append(robot.y) return x_trajectory, y_trajectory robot = Robot() robot.set(0,", "y1) * np.cos(startOrientation) crosstrack_error.append(crosstrack) diff_CTE =crosstrack_error[i+1] - crosstrack_error[i] print(\"{} [{},", "\"\"\" if steering> max_steering_angle: steering= max_steering_angle if steering <-max_steering_angle: steering=", "drift steering2 +=self.steering_drift # Execute motion turn =np.tan(steering2) * distance2", "cte robot.move(steer, speed) x_trajectory.append(robot.x) y_trajectory.append(robot.y) return x_trajectory, y_trajectory robot =", "- tau_d * diff_CTE distance =speed robot.move(steering, distance) x_trajectory.append(robot.x) y_trajectory.append(robot.y)", "startY = robot.y startOrientation= robot.orientation distance = 0.0 for i", "steering= -max_steering_angle if distance< 0.0: distance= 0.0 # apply noise", "# this isoften useful in particle filters self.steering_noise = steering_noise", "= orientation % (2.0 * np.pi) def set_noise(self, steering_noise, distance_noise):", "tau_d * diff_CTE distance =speed robot.move(steering, distance) x_trajectory.append(robot.x) y_trajectory.append(robot.y) #", "figsize=(8, 8)) ax1.plot(x_trajectory, y_trajectory, 'g', label='PDcontroller') ax1.plot(x_trajectory, np.zeros(n), 'r', label='reference')", "(self.orientation + turn) % (2.0 * np.pi) self.x =cx +", "np.pi) def set_noise(self, steering_noise, distance_noise): \"\"\" Sets thenoise parameters. \"\"\"", "= 0.0 for i in range(n): steering =-tau_p * crosstrack_error[i]", "+ turn) % (2.0 * np.pi) self.x =cx + (np.sin(self.orientation)", "in range(n): steering =-tau_p * crosstrack_error[i] - tau_d * diff_CTE", "random import numpy as np import matplotlib.pyplot as plt class", "(np.sin(self.orientation) * radius) self.y =cy - (np.cos(self.orientation) * radius) def", "distance = 0.0 for i in range(n): steering =-tau_p *", "distance driven, most be non-negative \"\"\" if steering> max_steering_angle: steering=", "* radius) def __repr__(self): return'[x=%.5f y=%.5f orient=%.5f]' % (self.x, self.y,", "else: #approximate bicycle model for motion radius =distance2 / turn", "drift def move(self,steering, distance, tolerance=0.001, max_steering_angle=np.pi / 4.0): \"\"\" steering", "max_steering_angle: steering= max_steering_angle if steering <-max_steering_angle: steering= -max_steering_angle if distance<", "crosstrack_error= [] crosstrack_error.append(0.0) diff_CTE = 0.0 startX = robot.x startY", "= distance_noise def set_steering_drift(self, drift): \"\"\" Sets thesystematical steering drift", "range(n): steering =-tau_p * crosstrack_error[i] - tau_d * diff_CTE distance", "steering> max_steering_angle: steering= max_steering_angle if steering <-max_steering_angle: steering= -max_steering_angle if", "[] crosstrack_error.append(0.0) diff_CTE = 0.0 startX = robot.x startY =", "self.distance_noise) # applysteering drift steering2 +=self.steering_drift # Execute motion turn", "radius =distance2 / turn cx =self.x - (np.sin(self.orientation) * radius)", "def set_steering_drift(self, drift): \"\"\" Sets thesystematical steering drift parameter \"\"\"", "if distance< 0.0: distance= 0.0 # apply noise steering2 =random.gauss(steering," ]
[ "<gh_stars>1000+ from .clip_sampler import DistributedSampler, UniformClipSampler, RandomClipSampler __all__ = (\"DistributedSampler\",", ".clip_sampler import DistributedSampler, UniformClipSampler, RandomClipSampler __all__ = (\"DistributedSampler\", \"UniformClipSampler\", \"RandomClipSampler\")", "from .clip_sampler import DistributedSampler, UniformClipSampler, RandomClipSampler __all__ = (\"DistributedSampler\", \"UniformClipSampler\"," ]
[ "print(\"\\tb.Camry\") print(\"\\tc.Fortuner\") pilih3 = input(\"Mana yang ingin anda pilih??\") if", "New Terios adalah 215 juta\") elif(Pilih1== \"c\"): print(\"Harga mobil New", "pada Honda >>>>>>>>>\") print(\"\\ta.Honda Brio Satya S\") print(\"\\tb.Honda Jazz \")", "560 Juta\") elif (pilih3==\"c\"): print(\"Harga mobil Fortuner adalah 492 Juta\")", "print(\"Harga mobil Grand New Xenia adalah 183 juta \") elif(Pilih1==", "print(\"<<<<<<<< Macam macam mobil pada Toyota>>>>>>>>?\") print(\"\\ta.Alphard\") print(\"\\tb.Camry\") print(\"\\tc.Fortuner\") pilih3", "Mobilio \") pilih2 = input(\"Mana yang ingin anda pilih??\") if(pilih2==\"a\"):", "Telp = \") print(\"\\n\") print(\"=================INFORMASI HARGA MOBIL DEALER JAYA ABADI===============\")", "mobil HOnda Brio Satya S adalah 131 juta\") elif(pilih2==\"b\"): print(\"Harga", "if (pilihan==1): print(\"<<<<<<<< Macam macam mobil pada Daihatsu >>>>>>>>>\") print(\"\\ta.Grand", "Macam macam mobil pada Daihatsu >>>>>>>>>\") print(\"\\ta.Grand New Xenia\") print(\"\\tb.All", "print(\"<<<<<<<< Macam macam mobil pada Honda >>>>>>>>>\") print(\"\\ta.Honda Brio Satya", "\") NoTelp = input(\"No Telp = \") print(\"\\n\") print(\"=================INFORMASI HARGA", "print(\"Tidak terdefinisi\") elif (pilihan==2): print(\"<<<<<<<< Macam macam mobil pada Honda", "print(\"\\t 3.Toyota \") print(\"\") pilihan = int(input(\"Pilih jenis mobil yang", "215 juta\") elif(Pilih1== \"c\"): print(\"Harga mobil New Ayla adalah 110", "juta\") elif(pilih2==\"c\"): print(\"Harga mobil Honda mobilio adalah 189 juta\") else:", "New Xenia\") print(\"\\tb.All New Terios\") print(\"\\tc.New Ayla\") Pilih1 = input(\"Mana", "juta\") else: print(\"Tidak terdefinisi\") elif (pilihan==3): print(\"<<<<<<<< Macam macam mobil", "\") print(\"\\t 3.Toyota \") print(\"\") pilihan = int(input(\"Pilih jenis mobil", "adalah 870 juta\") elif (pilih3==\"b\"): print(\"Harga mobil Camry adalah 560", "print(\"Harga mobil Camry adalah 560 Juta\") elif (pilih3==\"c\"): print(\"Harga mobil", "= \") NoTelp = input(\"No Telp = \") print(\"\\n\") print(\"=================INFORMASI", "mobil Camry adalah 560 Juta\") elif (pilih3==\"c\"): print(\"Harga mobil Fortuner", "print(\"Harga mobil New Ayla adalah 110 juta\") else: print(\"Tidak terdefinisi\")", "if(Pilih1 == \"a\"): print(\"Harga mobil Grand New Xenia adalah 183", "\") if(Pilih1 == \"a\"): print(\"Harga mobil Grand New Xenia adalah", "input(\"Mana yang ingin anda pilih??\") if (pilih3==\"a\"): print(\"Harga mobil Alphard", "131 juta\") elif(pilih2==\"b\"): print(\"Harga mobil Honda Jazz adalah 232 juta\")", "jenis mobil yang ingin dibeli : \")) print(\"\") if (pilihan==1):", "yang ingin anda pilih??\") if(pilih2==\"a\"): print(\"Harga mobil HOnda Brio Satya", "adalah 560 Juta\") elif (pilih3==\"c\"): print(\"Harga mobil Fortuner adalah 492", "1.Daihatsu \") print(\"\\t 2.Honda \") print(\"\\t 3.Toyota \") print(\"\") pilihan", "yang ingin anda pilih??\") if (pilih3==\"a\"): print(\"Harga mobil Alphard adalah", "mobil pada Toyota>>>>>>>>?\") print(\"\\ta.Alphard\") print(\"\\tb.Camry\") print(\"\\tc.Fortuner\") pilih3 = input(\"Mana yang", "input(\"Mana yang ingin anda pilih??\") if(pilih2==\"a\"): print(\"Harga mobil HOnda Brio", "name = input(\"masukkan nama pembeli = \") alamat= input(\"Alamat =", "elif (pilihan==2): print(\"<<<<<<<< Macam macam mobil pada Honda >>>>>>>>>\") print(\"\\ta.Honda", "Daihatsu >>>>>>>>>\") print(\"\\ta.Grand New Xenia\") print(\"\\tb.All New Terios\") print(\"\\tc.New Ayla\")", "All New Terios adalah 215 juta\") elif(Pilih1== \"c\"): print(\"Harga mobil", "elif(Pilih1== \"c\"): print(\"Harga mobil New Ayla adalah 110 juta\") else:", "870 juta\") elif (pilih3==\"b\"): print(\"Harga mobil Camry adalah 560 Juta\")", "print(\"Harga mobil All New Terios adalah 215 juta\") elif(Pilih1== \"c\"):", "mobil New Ayla adalah 110 juta\") else: print(\"Tidak terdefinisi\") elif", "183 juta \") elif(Pilih1== \"b\"): print(\"Harga mobil All New Terios", "adalah 232 juta\") elif(pilih2==\"c\"): print(\"Harga mobil Honda mobilio adalah 189", "ingin anda pilih ?? = \") if(Pilih1 == \"a\"): print(\"Harga", "pada Daihatsu >>>>>>>>>\") print(\"\\ta.Grand New Xenia\") print(\"\\tb.All New Terios\") print(\"\\tc.New", "anda pilih??\") if (pilih3==\"a\"): print(\"Harga mobil Alphard adalah 870 juta\")", "MOBIL DEALER JAYA ABADI===============\") print(\"Pilih Jenis Mobil :\") print(\"\\t 1.Daihatsu", "HOnda Brio Satya S adalah 131 juta\") elif(pilih2==\"b\"): print(\"Harga mobil", "mobil Honda Jazz adalah 232 juta\") elif(pilih2==\"c\"): print(\"Harga mobil Honda", "elif(Pilih1== \"b\"): print(\"Harga mobil All New Terios adalah 215 juta\")", "New Terios\") print(\"\\tc.New Ayla\") Pilih1 = input(\"Mana yang ingin anda", "input(\"masukkan nama pembeli = \") alamat= input(\"Alamat = \") NoTelp", "print(\"\\tc.New Ayla\") Pilih1 = input(\"Mana yang ingin anda pilih ??", "= input(\"Mana yang ingin anda pilih??\") if(pilih2==\"a\"): print(\"Harga mobil HOnda", "print(\"Harga mobil Honda mobilio adalah 189 juta\") else: print(\"Tidak terdefinisi\")", "New Xenia adalah 183 juta \") elif(Pilih1== \"b\"): print(\"Harga mobil", "input(\"Alamat = \") NoTelp = input(\"No Telp = \") print(\"\\n\")", "= int(input(\"Pilih jenis mobil yang ingin dibeli : \")) print(\"\")", "anda pilih ?? = \") if(Pilih1 == \"a\"): print(\"Harga mobil", "terdefinisi\") elif (pilihan==2): print(\"<<<<<<<< Macam macam mobil pada Honda >>>>>>>>>\")", "mobil Alphard adalah 870 juta\") elif (pilih3==\"b\"): print(\"Harga mobil Camry", "juta\") elif(Pilih1== \"c\"): print(\"Harga mobil New Ayla adalah 110 juta\")", "\") print(\"\") pilihan = int(input(\"Pilih jenis mobil yang ingin dibeli", "Terios\") print(\"\\tc.New Ayla\") Pilih1 = input(\"Mana yang ingin anda pilih", "elif (pilih3==\"b\"): print(\"Harga mobil Camry adalah 560 Juta\") elif (pilih3==\"c\"):", "input(\"Mana yang ingin anda pilih ?? = \") if(Pilih1 ==", "\") print(\"\\n\") print(\"=================INFORMASI HARGA MOBIL DEALER JAYA ABADI===============\") print(\"Pilih Jenis", "juta \") elif(Pilih1== \"b\"): print(\"Harga mobil All New Terios adalah", "== \"a\"): print(\"Harga mobil Grand New Xenia adalah 183 juta", ">>>>>>>>>\") print(\"\\ta.Honda Brio Satya S\") print(\"\\tb.Honda Jazz \") print(\"\\tb.Honda Mobilio", "= \") alamat= input(\"Alamat = \") NoTelp = input(\"No Telp", "print(\"Harga mobil Honda Jazz adalah 232 juta\") elif(pilih2==\"c\"): print(\"Harga mobil", "adalah 183 juta \") elif(Pilih1== \"b\"): print(\"Harga mobil All New", "S adalah 131 juta\") elif(pilih2==\"b\"): print(\"Harga mobil Honda Jazz adalah", "juta\") elif(pilih2==\"b\"): print(\"Harga mobil Honda Jazz adalah 232 juta\") elif(pilih2==\"c\"):", "print(\"Pilih Jenis Mobil :\") print(\"\\t 1.Daihatsu \") print(\"\\t 2.Honda \")", "New Ayla adalah 110 juta\") else: print(\"Tidak terdefinisi\") elif (pilihan==2):", "JAYA ABADI===============\") print(\"Pilih Jenis Mobil :\") print(\"\\t 1.Daihatsu \") print(\"\\t", "pilih??\") if(pilih2==\"a\"): print(\"Harga mobil HOnda Brio Satya S adalah 131", "mobilio adalah 189 juta\") else: print(\"Tidak terdefinisi\") elif (pilihan==3): print(\"<<<<<<<<", "macam mobil pada Toyota>>>>>>>>?\") print(\"\\ta.Alphard\") print(\"\\tb.Camry\") print(\"\\tc.Fortuner\") pilih3 = input(\"Mana", "2.Honda \") print(\"\\t 3.Toyota \") print(\"\") pilihan = int(input(\"Pilih jenis", "Xenia\") print(\"\\tb.All New Terios\") print(\"\\tc.New Ayla\") Pilih1 = input(\"Mana yang", "110 juta\") else: print(\"Tidak terdefinisi\") elif (pilihan==2): print(\"<<<<<<<< Macam macam", "else: print(\"Tidak terdefinisi\") elif (pilihan==2): print(\"<<<<<<<< Macam macam mobil pada", "if (pilih3==\"a\"): print(\"Harga mobil Alphard adalah 870 juta\") elif (pilih3==\"b\"):", "Ayla adalah 110 juta\") else: print(\"Tidak terdefinisi\") elif (pilihan==2): print(\"<<<<<<<<", "Macam macam mobil pada Honda >>>>>>>>>\") print(\"\\ta.Honda Brio Satya S\")", "232 juta\") elif(pilih2==\"c\"): print(\"Harga mobil Honda mobilio adalah 189 juta\")", "pilih3 = input(\"Mana yang ingin anda pilih??\") if (pilih3==\"a\"): print(\"Harga", "pembeli = \") alamat= input(\"Alamat = \") NoTelp = input(\"No", "int(input(\"Pilih jenis mobil yang ingin dibeli : \")) print(\"\") if", "input(\"No Telp = \") print(\"\\n\") print(\"=================INFORMASI HARGA MOBIL DEALER JAYA", "Jazz \") print(\"\\tb.Honda Mobilio \") pilih2 = input(\"Mana yang ingin", "mobil pada Daihatsu >>>>>>>>>\") print(\"\\ta.Grand New Xenia\") print(\"\\tb.All New Terios\")", "= \") print(\"\\n\") print(\"=================INFORMASI HARGA MOBIL DEALER JAYA ABADI===============\") print(\"Pilih", "terdefinisi\") elif (pilihan==3): print(\"<<<<<<<< Macam macam mobil pada Toyota>>>>>>>>?\") print(\"\\ta.Alphard\")", "mobil yang ingin dibeli : \")) print(\"\") if (pilihan==1): print(\"<<<<<<<<", "Honda Jazz adalah 232 juta\") elif(pilih2==\"c\"): print(\"Harga mobil Honda mobilio", "print(\"\\tb.All New Terios\") print(\"\\tc.New Ayla\") Pilih1 = input(\"Mana yang ingin", "mobil All New Terios adalah 215 juta\") elif(Pilih1== \"c\"): print(\"Harga", "(pilihan==1): print(\"<<<<<<<< Macam macam mobil pada Daihatsu >>>>>>>>>\") print(\"\\ta.Grand New", "pilih ?? = \") if(Pilih1 == \"a\"): print(\"Harga mobil Grand", "juta\") elif (pilih3==\"b\"): print(\"Harga mobil Camry adalah 560 Juta\") elif", "\")) print(\"\") if (pilihan==1): print(\"<<<<<<<< Macam macam mobil pada Daihatsu", "mobil pada Honda >>>>>>>>>\") print(\"\\ta.Honda Brio Satya S\") print(\"\\tb.Honda Jazz", "= input(\"Mana yang ingin anda pilih ?? = \") if(Pilih1", "\"b\"): print(\"Harga mobil All New Terios adalah 215 juta\") elif(Pilih1==", "Ayla\") Pilih1 = input(\"Mana yang ingin anda pilih ?? =", "adalah 215 juta\") elif(Pilih1== \"c\"): print(\"Harga mobil New Ayla adalah", "(pilih3==\"b\"): print(\"Harga mobil Camry adalah 560 Juta\") elif (pilih3==\"c\"): print(\"Harga", ">>>>>>>>>\") print(\"\\ta.Grand New Xenia\") print(\"\\tb.All New Terios\") print(\"\\tc.New Ayla\") Pilih1", "print(\"\\n\") print(\"=================INFORMASI HARGA MOBIL DEALER JAYA ABADI===============\") print(\"Pilih Jenis Mobil", "Macam macam mobil pada Toyota>>>>>>>>?\") print(\"\\ta.Alphard\") print(\"\\tb.Camry\") print(\"\\tc.Fortuner\") pilih3 =", "ingin anda pilih??\") if(pilih2==\"a\"): print(\"Harga mobil HOnda Brio Satya S", "print(\"\") if (pilihan==1): print(\"<<<<<<<< Macam macam mobil pada Daihatsu >>>>>>>>>\")", "elif(pilih2==\"c\"): print(\"Harga mobil Honda mobilio adalah 189 juta\") else: print(\"Tidak", "= input(\"masukkan nama pembeli = \") alamat= input(\"Alamat = \")", "nama pembeli = \") alamat= input(\"Alamat = \") NoTelp =", "pilihan = int(input(\"Pilih jenis mobil yang ingin dibeli : \"))", "print(\"Tidak terdefinisi\") elif (pilihan==3): print(\"<<<<<<<< Macam macam mobil pada Toyota>>>>>>>>?\")", "DEALER JAYA ABADI===============\") print(\"Pilih Jenis Mobil :\") print(\"\\t 1.Daihatsu \")", "pilih2 = input(\"Mana yang ingin anda pilih??\") if(pilih2==\"a\"): print(\"Harga mobil", "\") print(\"\\tb.Honda Mobilio \") pilih2 = input(\"Mana yang ingin anda", "\") print(\"\\t 2.Honda \") print(\"\\t 3.Toyota \") print(\"\") pilihan =", "3.Toyota \") print(\"\") pilihan = int(input(\"Pilih jenis mobil yang ingin", "alamat= input(\"Alamat = \") NoTelp = input(\"No Telp = \")", "?? = \") if(Pilih1 == \"a\"): print(\"Harga mobil Grand New", "print(\"\\ta.Alphard\") print(\"\\tb.Camry\") print(\"\\tc.Fortuner\") pilih3 = input(\"Mana yang ingin anda pilih??\")", "print(\"\\t 1.Daihatsu \") print(\"\\t 2.Honda \") print(\"\\t 3.Toyota \") print(\"\")", "elif(pilih2==\"b\"): print(\"Harga mobil Honda Jazz adalah 232 juta\") elif(pilih2==\"c\"): print(\"Harga", "\") elif(Pilih1== \"b\"): print(\"Harga mobil All New Terios adalah 215", "dibeli : \")) print(\"\") if (pilihan==1): print(\"<<<<<<<< Macam macam mobil", "Jenis Mobil :\") print(\"\\t 1.Daihatsu \") print(\"\\t 2.Honda \") print(\"\\t", "Pilih1 = input(\"Mana yang ingin anda pilih ?? = \")", "Toyota>>>>>>>>?\") print(\"\\ta.Alphard\") print(\"\\tb.Camry\") print(\"\\tc.Fortuner\") pilih3 = input(\"Mana yang ingin anda", "(pilihan==2): print(\"<<<<<<<< Macam macam mobil pada Honda >>>>>>>>>\") print(\"\\ta.Honda Brio", "print(\"\\ta.Honda Brio Satya S\") print(\"\\tb.Honda Jazz \") print(\"\\tb.Honda Mobilio \")", "print(\"Harga mobil Alphard adalah 870 juta\") elif (pilih3==\"b\"): print(\"Harga mobil", "print(\"\\ta.Grand New Xenia\") print(\"\\tb.All New Terios\") print(\"\\tc.New Ayla\") Pilih1 =", "Grand New Xenia adalah 183 juta \") elif(Pilih1== \"b\"): print(\"Harga", "Satya S\") print(\"\\tb.Honda Jazz \") print(\"\\tb.Honda Mobilio \") pilih2 =", "Jazz adalah 232 juta\") elif(pilih2==\"c\"): print(\"Harga mobil Honda mobilio adalah", "HARGA MOBIL DEALER JAYA ABADI===============\") print(\"Pilih Jenis Mobil :\") print(\"\\t", "\"a\"): print(\"Harga mobil Grand New Xenia adalah 183 juta \")", ": \")) print(\"\") if (pilihan==1): print(\"<<<<<<<< Macam macam mobil pada", "yang ingin dibeli : \")) print(\"\") if (pilihan==1): print(\"<<<<<<<< Macam", ":\") print(\"\\t 1.Daihatsu \") print(\"\\t 2.Honda \") print(\"\\t 3.Toyota \")", "print(\"<<<<<<<< Macam macam mobil pada Daihatsu >>>>>>>>>\") print(\"\\ta.Grand New Xenia\")", "mobil Honda mobilio adalah 189 juta\") else: print(\"Tidak terdefinisi\") elif", "S\") print(\"\\tb.Honda Jazz \") print(\"\\tb.Honda Mobilio \") pilih2 = input(\"Mana", "print(\"\\tb.Honda Jazz \") print(\"\\tb.Honda Mobilio \") pilih2 = input(\"Mana yang", "adalah 131 juta\") elif(pilih2==\"b\"): print(\"Harga mobil Honda Jazz adalah 232", "= input(\"No Telp = \") print(\"\\n\") print(\"=================INFORMASI HARGA MOBIL DEALER", "Terios adalah 215 juta\") elif(Pilih1== \"c\"): print(\"Harga mobil New Ayla", "print(\"\\tc.Fortuner\") pilih3 = input(\"Mana yang ingin anda pilih??\") if (pilih3==\"a\"):", "print(\"\") pilihan = int(input(\"Pilih jenis mobil yang ingin dibeli :", "pilih??\") if (pilih3==\"a\"): print(\"Harga mobil Alphard adalah 870 juta\") elif", "else: print(\"Tidak terdefinisi\") elif (pilihan==3): print(\"<<<<<<<< Macam macam mobil pada", "print(\"\\tb.Honda Mobilio \") pilih2 = input(\"Mana yang ingin anda pilih??\")", "print(\"\\t 2.Honda \") print(\"\\t 3.Toyota \") print(\"\") pilihan = int(input(\"Pilih", "mobil Grand New Xenia adalah 183 juta \") elif(Pilih1== \"b\"):", "Brio Satya S adalah 131 juta\") elif(pilih2==\"b\"): print(\"Harga mobil Honda", "NoTelp = input(\"No Telp = \") print(\"\\n\") print(\"=================INFORMASI HARGA MOBIL", "macam mobil pada Honda >>>>>>>>>\") print(\"\\ta.Honda Brio Satya S\") print(\"\\tb.Honda", "Brio Satya S\") print(\"\\tb.Honda Jazz \") print(\"\\tb.Honda Mobilio \") pilih2", "anda pilih??\") if(pilih2==\"a\"): print(\"Harga mobil HOnda Brio Satya S adalah", "(pilih3==\"a\"): print(\"Harga mobil Alphard adalah 870 juta\") elif (pilih3==\"b\"): print(\"Harga", "Mobil :\") print(\"\\t 1.Daihatsu \") print(\"\\t 2.Honda \") print(\"\\t 3.Toyota", "= \") if(Pilih1 == \"a\"): print(\"Harga mobil Grand New Xenia", "juta\") else: print(\"Tidak terdefinisi\") elif (pilihan==2): print(\"<<<<<<<< Macam macam mobil", "adalah 189 juta\") else: print(\"Tidak terdefinisi\") elif (pilihan==3): print(\"<<<<<<<< Macam", "Xenia adalah 183 juta \") elif(Pilih1== \"b\"): print(\"Harga mobil All", "pada Toyota>>>>>>>>?\") print(\"\\ta.Alphard\") print(\"\\tb.Camry\") print(\"\\tc.Fortuner\") pilih3 = input(\"Mana yang ingin", "= input(\"Mana yang ingin anda pilih??\") if (pilih3==\"a\"): print(\"Harga mobil", "\") pilih2 = input(\"Mana yang ingin anda pilih??\") if(pilih2==\"a\"): print(\"Harga", "if(pilih2==\"a\"): print(\"Harga mobil HOnda Brio Satya S adalah 131 juta\")", "macam mobil pada Daihatsu >>>>>>>>>\") print(\"\\ta.Grand New Xenia\") print(\"\\tb.All New", "(pilihan==3): print(\"<<<<<<<< Macam macam mobil pada Toyota>>>>>>>>?\") print(\"\\ta.Alphard\") print(\"\\tb.Camry\") print(\"\\tc.Fortuner\")", "print(\"Harga mobil HOnda Brio Satya S adalah 131 juta\") elif(pilih2==\"b\"):", "Alphard adalah 870 juta\") elif (pilih3==\"b\"): print(\"Harga mobil Camry adalah", "ingin dibeli : \")) print(\"\") if (pilihan==1): print(\"<<<<<<<< Macam macam", "yang ingin anda pilih ?? = \") if(Pilih1 == \"a\"):", "\"c\"): print(\"Harga mobil New Ayla adalah 110 juta\") else: print(\"Tidak", "ABADI===============\") print(\"Pilih Jenis Mobil :\") print(\"\\t 1.Daihatsu \") print(\"\\t 2.Honda", "189 juta\") else: print(\"Tidak terdefinisi\") elif (pilihan==3): print(\"<<<<<<<< Macam macam", "Honda >>>>>>>>>\") print(\"\\ta.Honda Brio Satya S\") print(\"\\tb.Honda Jazz \") print(\"\\tb.Honda", "elif (pilihan==3): print(\"<<<<<<<< Macam macam mobil pada Toyota>>>>>>>>?\") print(\"\\ta.Alphard\") print(\"\\tb.Camry\")", "adalah 110 juta\") else: print(\"Tidak terdefinisi\") elif (pilihan==2): print(\"<<<<<<<< Macam", "\") alamat= input(\"Alamat = \") NoTelp = input(\"No Telp =", "ingin anda pilih??\") if (pilih3==\"a\"): print(\"Harga mobil Alphard adalah 870", "Camry adalah 560 Juta\") elif (pilih3==\"c\"): print(\"Harga mobil Fortuner adalah", "print(\"=================INFORMASI HARGA MOBIL DEALER JAYA ABADI===============\") print(\"Pilih Jenis Mobil :\")", "Honda mobilio adalah 189 juta\") else: print(\"Tidak terdefinisi\") elif (pilihan==3):", "Satya S adalah 131 juta\") elif(pilih2==\"b\"): print(\"Harga mobil Honda Jazz" ]
[ "l1loss_mean, \"of_l1_loss_sum\": l1loss_sum, } of_out_l1loss_dict = oneflow_l1loss(input, target) assert np.allclose(", "= { \"np_l1_loss\": np_l1, \"np_l1_loss_mean\": np_l1_mean, \"np_l1_loss_sum\": np_l1_sum, } return", "arg_dict[\"device_type\"] = [device_type] arg_dict[\"machine_ids\"] = [machine_ids] arg_dict[\"device_counts\"] = [device_counts] return", "diff original_shape = np_target.shape elemcnt = np_target.size prediction = np_input.reshape(-1)", "+ v # watch the diff flow.watch_diff(x_var, assert_prediction_grad) l1loss =", "= prediction[i] - label[i] prediction_grad[i] = np.sign(diff) grad_mean = prediction_grad.reshape(original_shape)", "32, 16), device_type=\"gpu\", machine_ids=\"0:0-1\", device_counts=2 ) for arg in GenArgList(arg_dict):", "as flow import numpy as np import oneflow.typing as tp", "np_input) np_l1_mean = np.mean(np_l1) np_l1_sum = np.sum(np_l1) np_l1_dict = {", "assert_prediction_grad(blob: tp.Numpy): # Evaluate the gradient. Here we only test", "flow import numpy as np import oneflow.typing as tp from", "watch the diff flow.watch_diff(x_var, assert_prediction_grad) l1loss = flow.nn.L1Loss(x_var, of_target, reduction=\"none\",", "numpy as np import oneflow.typing as tp from test_util import", "np.random.random(size=target_shape).astype(np.float32) assert device_type in [\"cpu\", \"gpu\"] func_config = flow.FunctionConfig() flow.clear_default_session()", ") x_var = of_input + v # watch the diff", "cpu cases\") def test_l1loss_gpu_1n2d(test_case): arg_dict = _gen_arg_dict( shape=(3, 32, 16),", "target = np.random.random(size=target_shape).astype(np.float32) assert device_type in [\"cpu\", \"gpu\"] func_config =", "\"gpu\"] func_config = flow.FunctionConfig() flow.clear_default_session() if device_type == \"cpu\": flow.config.cpu_device_num(device_counts)", "Apache License, Version 2.0 (the \"License\"); you may not use", "assert np.allclose( of_out_l1loss_dict[\"of_l1_loss\"], np_out_l1loss_dict[\"np_l1_loss\"] ) assert np.allclose( of_out_l1loss_dict[\"of_l1_loss_mean\"][0], np_out_l1loss_dict[\"np_l1_loss_mean\"] )", "device_type, machine_ids, device_counts): # Generate a dict to pass parameter", "np_target.reshape(-1) prediction_grad = np.zeros((elemcnt)).astype(prediction.dtype) for i in np.arange(elemcnt): diff =", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "} return np_l1_dict def np_l1_loss_diff(np_input, np_target): # Use numpy to", "of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law", "[shape] arg_dict[\"device_type\"] = [device_type] arg_dict[\"machine_ids\"] = [machine_ids] arg_dict[\"device_counts\"] = [device_counts]", "flow.watch_diff(x_var, assert_prediction_grad) l1loss = flow.nn.L1Loss(x_var, of_target, reduction=\"none\", name=\"of_l1loss\") l1loss_mean =", "target_shape, device_type, machine_ids, device_counts ): input = np.random.random(size=input_shape).astype(np.float32) target =", "machine_ids, device_counts ): input = np.random.random(size=input_shape).astype(np.float32) target = np.random.random(size=target_shape).astype(np.float32) assert", "np_out_l1loss_dict = np_l1loss(input, target) # Use Numpy to compute l1", "for arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) if __name__ == \"__main__\": unittest.main()", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "= [device_type] arg_dict[\"machine_ids\"] = [machine_ids] arg_dict[\"device_counts\"] = [device_counts] return arg_dict", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "test reduction=\"mean\" diff flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0 ).minimize(l1loss_mean) return {", "device_counts): # Generate a dict to pass parameter to test", "= \"sum\", you can use the follow code # grad_sum", "the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or", "import Dict import os def _compare_l1loss_with_np( input_shape, target_shape, device_type, machine_ids,", "The OneFlow Authors. All rights reserved. Licensed under the Apache", "ANY KIND, either express or implied. See the License for", "= _gen_arg_dict( shape=(3, 32, 16), device_type=\"gpu\", machine_ids=\"0:0-1\", device_counts=2 ) for", "prediction = np_input.reshape(-1) label = np_target.reshape(-1) prediction_grad = np.zeros((elemcnt)).astype(prediction.dtype) for", "tp.Numpy.Placeholder(shape=target.shape), ) -> Dict[str, tp.Numpy]: with flow.scope.placement(device_type, \"0:0\"): v =", "http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in", "may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless", "reserved. Licensed under the Apache License, Version 2.0 (the \"License\");", "func_config.default_logical_view(flow.scope.consistent_view()) def np_l1loss(np_input, np_target): np_l1 = np.abs(np_target - np_input) np_l1_mean", "np.arange(elemcnt): diff = prediction[i] - label[i] prediction_grad[i] = np.sign(diff) grad_mean", "arg_dict = _gen_arg_dict( shape=(3, 16, 32), device_type=\"gpu\", machine_ids=\"0:0\", device_counts=1 )", "test_l1loss_gpu(test_case): arg_dict = _gen_arg_dict( shape=(3, 16, 32), device_type=\"gpu\", machine_ids=\"0:0\", device_counts=1", "= flow.nn.L1Loss( x_var, of_target, reduction=\"sum\", name=\"of_l1loss_sum\" ) with flow.scope.placement(device_type, \"0:0\"):", "Use Numpy to compute l1 grad np_grad_dict = np_l1_loss_diff(input, target)", "import os def _compare_l1loss_with_np( input_shape, target_shape, device_type, machine_ids, device_counts ):", "under the License is distributed on an \"AS IS\" BASIS,", "= _gen_arg_dict( shape=(16, 3), device_type=\"cpu\", machine_ids=\"0:0\", device_counts=1 ) for arg", "governing permissions and limitations under the License. \"\"\" import oneflow", "assert np.allclose(blob, np_grad_dict[\"np_grad_mean\"]) @flow.global_function(type=\"train\", function_config=func_config) def oneflow_l1loss( of_input: tp.Numpy.Placeholder(shape=input.shape), of_target:", "np_l1 = np.abs(np_target - np_input) np_l1_mean = np.mean(np_l1) np_l1_sum =", "= np_target.shape elemcnt = np_target.size prediction = np_input.reshape(-1) label =", "l1loss_sum = flow.nn.L1Loss( x_var, of_target, reduction=\"sum\", name=\"of_l1loss_sum\" ) with flow.scope.placement(device_type,", "of_target, reduction=\"sum\", name=\"of_l1loss_sum\" ) with flow.scope.placement(device_type, \"0:0\"): # We only", "if device_type == \"cpu\": flow.config.cpu_device_num(device_counts) else: flow.config.gpu_device_num(device_counts) func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids)) func_config.default_logical_view(flow.scope.consistent_view())", ") with flow.scope.placement(device_type, \"0:0\"): # We only test reduction=\"mean\" diff", "\"of_l1_loss\": l1loss, \"of_l1_loss_mean\": l1loss_mean, \"of_l1_loss_sum\": l1loss_sum, } of_out_l1loss_dict = oneflow_l1loss(input,", "get the grad when the reduction = \"sum\", you can", "this file except in compliance with the License. You may", "follow code # grad_sum = prediction_grad.reshape(original_shape) grad_dict = { \"np_grad_mean\":", "the License. \"\"\" import oneflow as flow import numpy as", "under the License. \"\"\" import oneflow as flow import numpy", "i in np.arange(elemcnt): diff = prediction[i] - label[i] prediction_grad[i] =", "16, 32), device_type=\"gpu\", machine_ids=\"0:0\", device_counts=1 ) for arg in GenArgList(arg_dict):", "def np_l1_loss_diff(np_input, np_target): # Use numpy to compute diff original_shape", "the grad when the reduction = \"sum\", you can use", "a dict to pass parameter to test case arg_dict =", "= prediction_grad.reshape(original_shape) / elemcnt # TODO: if you want to", "test case arg_dict = OrderedDict() arg_dict[\"input_shape\"] = [shape] arg_dict[\"target_shape\"] =", "oneflow_l1loss( of_input: tp.Numpy.Placeholder(shape=input.shape), of_target: tp.Numpy.Placeholder(shape=target.shape), ) -> Dict[str, tp.Numpy]: with", "[1e-3]), momentum=0 ).minimize(l1loss_mean) return { \"of_l1_loss\": l1loss, \"of_l1_loss_mean\": l1loss_mean, \"of_l1_loss_sum\":", "diff flow.watch_diff(x_var, assert_prediction_grad) l1loss = flow.nn.L1Loss(x_var, of_target, reduction=\"none\", name=\"of_l1loss\") l1loss_mean", "flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0 ).minimize(l1loss_mean) return { \"of_l1_loss\": l1loss, \"of_l1_loss_mean\":", "np_l1_loss_diff(input, target) def assert_prediction_grad(blob: tp.Numpy): # Evaluate the gradient. Here", "test_l1loss_cpu(test_case): arg_dict = _gen_arg_dict( shape=(16, 3), device_type=\"cpu\", machine_ids=\"0:0\", device_counts=1 )", "name=\"of_l1loss\") l1loss_mean = flow.nn.L1Loss( x_var, of_target, reduction=\"mean\", name=\"of_l1loss_mean\" ) l1loss_sum", "assert np.allclose( of_out_l1loss_dict[\"of_l1_loss_sum\"][0], np_out_l1loss_dict[\"np_l1_loss_sum\"] ) def _gen_arg_dict(shape, device_type, machine_ids, device_counts):", "OneFlow Authors. All rights reserved. Licensed under the Apache License,", "Authors. All rights reserved. Licensed under the Apache License, Version", "want to get the grad when the reduction = \"sum\",", "compute l1 loss np_out_l1loss_dict = np_l1loss(input, target) # Use Numpy", "Use Numpy to compute l1 loss np_out_l1loss_dict = np_l1loss(input, target)", "elemcnt = np_target.size prediction = np_input.reshape(-1) label = np_target.reshape(-1) prediction_grad", "x_var = of_input + v # watch the diff flow.watch_diff(x_var,", "return np_l1_dict def np_l1_loss_diff(np_input, np_target): # Use numpy to compute", "[device_type] arg_dict[\"machine_ids\"] = [machine_ids] arg_dict[\"device_counts\"] = [device_counts] return arg_dict @flow.unittest.skip_unless_1n1d()", "l1 grad np_grad_dict = np_l1_loss_diff(input, target) def assert_prediction_grad(blob: tp.Numpy): #", "file except in compliance with the License. You may obtain", "# Evaluate the gradient. Here we only test the reduction", "import numpy as np import oneflow.typing as tp from test_util", "for arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")", "3), device_type=\"cpu\", machine_ids=\"0:0\", device_counts=1 ) for arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg)", "test cpu cases\") def test_l1loss_gpu(test_case): arg_dict = _gen_arg_dict( shape=(3, 16,", "OR CONDITIONS OF ANY KIND, either express or implied. See", "tp from test_util import GenArgList import unittest from collections import", "def _compare_l1loss_with_np( input_shape, target_shape, device_type, machine_ids, device_counts ): input =", "input_shape, target_shape, device_type, machine_ids, device_counts ): input = np.random.random(size=input_shape).astype(np.float32) target", "under the Apache License, Version 2.0 (the \"License\"); you may", "@flow.unittest.skip_unless_1n2d() class Testl1loss1n2d(flow.unittest.TestCase): @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\") def test_l1loss_gpu_1n2d(test_case):", "= np.random.random(size=target_shape).astype(np.float32) assert device_type in [\"cpu\", \"gpu\"] func_config = flow.FunctionConfig()", "of_out_l1loss_dict[\"of_l1_loss_sum\"][0], np_out_l1loss_dict[\"np_l1_loss_sum\"] ) def _gen_arg_dict(shape, device_type, machine_ids, device_counts): # Generate", "\"only test cpu cases\") def test_l1loss_gpu_1n2d(test_case): arg_dict = _gen_arg_dict( shape=(3,", "= np.sign(diff) grad_mean = prediction_grad.reshape(original_shape) / elemcnt # TODO: if", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "See the License for the specific language governing permissions and", "np.random.random(size=input_shape).astype(np.float32) target = np.random.random(size=target_shape).astype(np.float32) assert device_type in [\"cpu\", \"gpu\"] func_config", "np_l1_dict = { \"np_l1_loss\": np_l1, \"np_l1_loss_mean\": np_l1_mean, \"np_l1_loss_sum\": np_l1_sum, }", "the reduction type == \"mean\" assert np.allclose(blob, np_grad_dict[\"np_grad_mean\"]) @flow.global_function(type=\"train\", function_config=func_config)", "= np.sum(np_l1) np_l1_dict = { \"np_l1_loss\": np_l1, \"np_l1_loss_mean\": np_l1_mean, \"np_l1_loss_sum\":", "only test the reduction type == \"mean\" assert np.allclose(blob, np_grad_dict[\"np_grad_mean\"])", "tp.Numpy): # Evaluate the gradient. Here we only test the", "in writing, software distributed under the License is distributed on", "required by applicable law or agreed to in writing, software", "for i in np.arange(elemcnt): diff = prediction[i] - label[i] prediction_grad[i]", "[shape] arg_dict[\"target_shape\"] = [shape] arg_dict[\"device_type\"] = [device_type] arg_dict[\"machine_ids\"] = [machine_ids]", "np import oneflow.typing as tp from test_util import GenArgList import", ") for arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) @flow.unittest.skip_unless_1n2d() class Testl1loss1n2d(flow.unittest.TestCase): @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"),", "Dict import os def _compare_l1loss_with_np( input_shape, target_shape, device_type, machine_ids, device_counts", "np_l1_mean = np.mean(np_l1) np_l1_sum = np.sum(np_l1) np_l1_dict = { \"np_l1_loss\":", "np_target.size prediction = np_input.reshape(-1) label = np_target.reshape(-1) prediction_grad = np.zeros((elemcnt)).astype(prediction.dtype)", "x_var, of_target, reduction=\"mean\", name=\"of_l1loss_mean\" ) l1loss_sum = flow.nn.L1Loss( x_var, of_target,", "@unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\") def test_l1loss_gpu(test_case): arg_dict = _gen_arg_dict(", "np_l1_mean, \"np_l1_loss_sum\": np_l1_sum, } return np_l1_dict def np_l1_loss_diff(np_input, np_target): #", "= [shape] arg_dict[\"device_type\"] = [device_type] arg_dict[\"machine_ids\"] = [machine_ids] arg_dict[\"device_counts\"] =", "we only test the reduction type == \"mean\" assert np.allclose(blob,", "\"np_l1_loss\": np_l1, \"np_l1_loss_mean\": np_l1_mean, \"np_l1_loss_sum\": np_l1_sum, } return np_l1_dict def", "prediction_grad.reshape(original_shape) grad_dict = { \"np_grad_mean\": grad_mean, } return grad_dict #", "v = flow.get_variable( shape=target.shape, dtype=flow.float32, initializer=flow.constant_initializer(0), name=\"v\", ) x_var =", "of_out_l1loss_dict = oneflow_l1loss(input, target) assert np.allclose( of_out_l1loss_dict[\"of_l1_loss\"], np_out_l1loss_dict[\"np_l1_loss\"] ) assert", "numpy to compute diff original_shape = np_target.shape elemcnt = np_target.size", "software distributed under the License is distributed on an \"AS", "distributed under the License is distributed on an \"AS IS\"", ") assert np.allclose( of_out_l1loss_dict[\"of_l1_loss_mean\"][0], np_out_l1loss_dict[\"np_l1_loss_mean\"] ) assert np.allclose( of_out_l1loss_dict[\"of_l1_loss_sum\"][0], np_out_l1loss_dict[\"np_l1_loss_sum\"]", "and limitations under the License. \"\"\" import oneflow as flow", "np.allclose( of_out_l1loss_dict[\"of_l1_loss_sum\"][0], np_out_l1loss_dict[\"np_l1_loss_sum\"] ) def _gen_arg_dict(shape, device_type, machine_ids, device_counts): #", "CONDITIONS OF ANY KIND, either express or implied. See the", "diff = prediction[i] - label[i] prediction_grad[i] = np.sign(diff) grad_mean =", "OrderedDict() arg_dict[\"input_shape\"] = [shape] arg_dict[\"target_shape\"] = [shape] arg_dict[\"device_type\"] = [device_type]", "Version 2.0 (the \"License\"); you may not use this file", "x_var, of_target, reduction=\"sum\", name=\"of_l1loss_sum\" ) with flow.scope.placement(device_type, \"0:0\"): # We", "= [device_counts] return arg_dict @flow.unittest.skip_unless_1n1d() class Testl1loss1n1d(flow.unittest.TestCase): def test_l1loss_cpu(test_case): arg_dict", "Numpy to compute l1 loss np_out_l1loss_dict = np_l1loss(input, target) #", "not use this file except in compliance with the License.", "2.0 (the \"License\"); you may not use this file except", "copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable", "np.abs(np_target - np_input) np_l1_mean = np.mean(np_l1) np_l1_sum = np.sum(np_l1) np_l1_dict", "gradient. Here we only test the reduction type == \"mean\"", "= OrderedDict() arg_dict[\"input_shape\"] = [shape] arg_dict[\"target_shape\"] = [shape] arg_dict[\"device_type\"] =", "machine_ids)) func_config.default_logical_view(flow.scope.consistent_view()) def np_l1loss(np_input, np_target): np_l1 = np.abs(np_target - np_input)", "np_target): # Use numpy to compute diff original_shape = np_target.shape", "you may not use this file except in compliance with", "\"mean\" assert np.allclose(blob, np_grad_dict[\"np_grad_mean\"]) @flow.global_function(type=\"train\", function_config=func_config) def oneflow_l1loss( of_input: tp.Numpy.Placeholder(shape=input.shape),", "@flow.unittest.skip_unless_1n1d() class Testl1loss1n1d(flow.unittest.TestCase): def test_l1loss_cpu(test_case): arg_dict = _gen_arg_dict( shape=(16, 3),", "test_l1loss_gpu_1n2d(test_case): arg_dict = _gen_arg_dict( shape=(3, 32, 16), device_type=\"gpu\", machine_ids=\"0:0-1\", device_counts=2", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "import OrderedDict from typing import Dict import os def _compare_l1loss_with_np(", "the License. You may obtain a copy of the License", "32), device_type=\"gpu\", machine_ids=\"0:0\", device_counts=1 ) for arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg)", "TODO: if you want to get the grad when the", "momentum=0 ).minimize(l1loss_mean) return { \"of_l1_loss\": l1loss, \"of_l1_loss_mean\": l1loss_mean, \"of_l1_loss_sum\": l1loss_sum,", "use this file except in compliance with the License. You", "\"\"\" import oneflow as flow import numpy as np import", "machine_ids=\"0:0-1\", device_counts=2 ) for arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) if __name__", "target) # Use Numpy to compute l1 grad np_grad_dict =", "np.allclose( of_out_l1loss_dict[\"of_l1_loss_mean\"][0], np_out_l1loss_dict[\"np_l1_loss_mean\"] ) assert np.allclose( of_out_l1loss_dict[\"of_l1_loss_sum\"][0], np_out_l1loss_dict[\"np_l1_loss_sum\"] ) def", ").minimize(l1loss_mean) return { \"of_l1_loss\": l1loss, \"of_l1_loss_mean\": l1loss_mean, \"of_l1_loss_sum\": l1loss_sum, }", "l1loss, \"of_l1_loss_mean\": l1loss_mean, \"of_l1_loss_sum\": l1loss_sum, } of_out_l1loss_dict = oneflow_l1loss(input, target)", "you want to get the grad when the reduction =", "np_out_l1loss_dict[\"np_l1_loss_mean\"] ) assert np.allclose( of_out_l1loss_dict[\"of_l1_loss_sum\"][0], np_out_l1loss_dict[\"np_l1_loss_sum\"] ) def _gen_arg_dict(shape, device_type,", "np_out_l1loss_dict[\"np_l1_loss_sum\"] ) def _gen_arg_dict(shape, device_type, machine_ids, device_counts): # Generate a", "np.sum(np_l1) np_l1_dict = { \"np_l1_loss\": np_l1, \"np_l1_loss_mean\": np_l1_mean, \"np_l1_loss_sum\": np_l1_sum,", "shape=(3, 32, 16), device_type=\"gpu\", machine_ids=\"0:0-1\", device_counts=2 ) for arg in", "[\"cpu\", \"gpu\"] func_config = flow.FunctionConfig() flow.clear_default_session() if device_type == \"cpu\":", "device_counts ): input = np.random.random(size=input_shape).astype(np.float32) target = np.random.random(size=target_shape).astype(np.float32) assert device_type", "shape=target.shape, dtype=flow.float32, initializer=flow.constant_initializer(0), name=\"v\", ) x_var = of_input + v", "in np.arange(elemcnt): diff = prediction[i] - label[i] prediction_grad[i] = np.sign(diff)", "_gen_arg_dict( shape=(3, 16, 32), device_type=\"gpu\", machine_ids=\"0:0\", device_counts=1 ) for arg", ") for arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu", "the diff flow.watch_diff(x_var, assert_prediction_grad) l1loss = flow.nn.L1Loss(x_var, of_target, reduction=\"none\", name=\"of_l1loss\")", "of_out_l1loss_dict[\"of_l1_loss_mean\"][0], np_out_l1loss_dict[\"np_l1_loss_mean\"] ) assert np.allclose( of_out_l1loss_dict[\"of_l1_loss_sum\"][0], np_out_l1loss_dict[\"np_l1_loss_sum\"] ) def _gen_arg_dict(shape,", "np.mean(np_l1) np_l1_sum = np.sum(np_l1) np_l1_dict = { \"np_l1_loss\": np_l1, \"np_l1_loss_mean\":", "machine_ids=\"0:0\", device_counts=1 ) for arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only", "rights reserved. Licensed under the Apache License, Version 2.0 (the", "func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids)) func_config.default_logical_view(flow.scope.consistent_view()) def np_l1loss(np_input, np_target): np_l1 = np.abs(np_target -", "grad_mean = prediction_grad.reshape(original_shape) / elemcnt # TODO: if you want", "to compute l1 grad np_grad_dict = np_l1_loss_diff(input, target) def assert_prediction_grad(blob:", "(the \"License\"); you may not use this file except in", "return { \"of_l1_loss\": l1loss, \"of_l1_loss_mean\": l1loss_mean, \"of_l1_loss_sum\": l1loss_sum, } of_out_l1loss_dict", "elemcnt # TODO: if you want to get the grad", "np_grad_dict = np_l1_loss_diff(input, target) def assert_prediction_grad(blob: tp.Numpy): # Evaluate the", "@unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\") def test_l1loss_gpu_1n2d(test_case): arg_dict = _gen_arg_dict(", "device_type, machine_ids, device_counts ): input = np.random.random(size=input_shape).astype(np.float32) target = np.random.random(size=target_shape).astype(np.float32)", "Numpy to compute l1 grad np_grad_dict = np_l1_loss_diff(input, target) def", "test the reduction type == \"mean\" assert np.allclose(blob, np_grad_dict[\"np_grad_mean\"]) @flow.global_function(type=\"train\",", "np.allclose(blob, np_grad_dict[\"np_grad_mean\"]) @flow.global_function(type=\"train\", function_config=func_config) def oneflow_l1loss( of_input: tp.Numpy.Placeholder(shape=input.shape), of_target: tp.Numpy.Placeholder(shape=target.shape),", "\"of_l1_loss_sum\": l1loss_sum, } of_out_l1loss_dict = oneflow_l1loss(input, target) assert np.allclose( of_out_l1loss_dict[\"of_l1_loss\"],", "from typing import Dict import os def _compare_l1loss_with_np( input_shape, target_shape,", "device_type=\"gpu\", machine_ids=\"0:0-1\", device_counts=2 ) for arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) if", "flow.config.cpu_device_num(device_counts) else: flow.config.gpu_device_num(device_counts) func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids)) func_config.default_logical_view(flow.scope.consistent_view()) def np_l1loss(np_input, np_target): np_l1", "reduction = \"sum\", you can use the follow code #", "You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0", "shape=(16, 3), device_type=\"cpu\", machine_ids=\"0:0\", device_counts=1 ) for arg in GenArgList(arg_dict):", "{ \"of_l1_loss\": l1loss, \"of_l1_loss_mean\": l1loss_mean, \"of_l1_loss_sum\": l1loss_sum, } of_out_l1loss_dict =", "func_config = flow.FunctionConfig() flow.clear_default_session() if device_type == \"cpu\": flow.config.cpu_device_num(device_counts) else:", "= { \"np_grad_mean\": grad_mean, } return grad_dict # Use Numpy", "def test_l1loss_cpu(test_case): arg_dict = _gen_arg_dict( shape=(16, 3), device_type=\"cpu\", machine_ids=\"0:0\", device_counts=1", "\"sum\", you can use the follow code # grad_sum =", "= flow.FunctionConfig() flow.clear_default_session() if device_type == \"cpu\": flow.config.cpu_device_num(device_counts) else: flow.config.gpu_device_num(device_counts)", "input = np.random.random(size=input_shape).astype(np.float32) target = np.random.random(size=target_shape).astype(np.float32) assert device_type in [\"cpu\",", "in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\") def test_l1loss_gpu(test_case):", "the Apache License, Version 2.0 (the \"License\"); you may not", "or implied. See the License for the specific language governing", "KIND, either express or implied. See the License for the", "_gen_arg_dict( shape=(3, 32, 16), device_type=\"gpu\", machine_ids=\"0:0-1\", device_counts=2 ) for arg", "to in writing, software distributed under the License is distributed", ") for arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) if __name__ == \"__main__\":", "_compare_l1loss_with_np(*arg) @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\") def test_l1loss_gpu(test_case): arg_dict =", "law or agreed to in writing, software distributed under the", "for arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) @flow.unittest.skip_unless_1n2d() class Testl1loss1n2d(flow.unittest.TestCase): @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only", "Testl1loss1n1d(flow.unittest.TestCase): def test_l1loss_cpu(test_case): arg_dict = _gen_arg_dict( shape=(16, 3), device_type=\"cpu\", machine_ids=\"0:0\",", "flow.config.gpu_device_num(device_counts) func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids)) func_config.default_logical_view(flow.scope.consistent_view()) def np_l1loss(np_input, np_target): np_l1 = np.abs(np_target", "l1 loss np_out_l1loss_dict = np_l1loss(input, target) # Use Numpy to", "flow.nn.L1Loss( x_var, of_target, reduction=\"mean\", name=\"of_l1loss_mean\" ) l1loss_sum = flow.nn.L1Loss( x_var,", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "loss np_out_l1loss_dict = np_l1loss(input, target) # Use Numpy to compute", "of_input + v # watch the diff flow.watch_diff(x_var, assert_prediction_grad) l1loss", "code # grad_sum = prediction_grad.reshape(original_shape) grad_dict = { \"np_grad_mean\": grad_mean,", "License. \"\"\" import oneflow as flow import numpy as np", "limitations under the License. \"\"\" import oneflow as flow import", "can use the follow code # grad_sum = prediction_grad.reshape(original_shape) grad_dict", "for the specific language governing permissions and limitations under the", "= np.zeros((elemcnt)).astype(prediction.dtype) for i in np.arange(elemcnt): diff = prediction[i] -", "# Use Numpy to compute l1 loss np_out_l1loss_dict = np_l1loss(input,", "oneflow as flow import numpy as np import oneflow.typing as", "flow.get_variable( shape=target.shape, dtype=flow.float32, initializer=flow.constant_initializer(0), name=\"v\", ) x_var = of_input +", "to compute l1 loss np_out_l1loss_dict = np_l1loss(input, target) # Use", "prediction_grad.reshape(original_shape) / elemcnt # TODO: if you want to get", "prediction[i] - label[i] prediction_grad[i] = np.sign(diff) grad_mean = prediction_grad.reshape(original_shape) /", "= flow.nn.L1Loss(x_var, of_target, reduction=\"none\", name=\"of_l1loss\") l1loss_mean = flow.nn.L1Loss( x_var, of_target,", "target) def assert_prediction_grad(blob: tp.Numpy): # Evaluate the gradient. Here we", "arg_dict[\"target_shape\"] = [shape] arg_dict[\"device_type\"] = [device_type] arg_dict[\"machine_ids\"] = [machine_ids] arg_dict[\"device_counts\"]", "of_out_l1loss_dict[\"of_l1_loss\"], np_out_l1loss_dict[\"np_l1_loss\"] ) assert np.allclose( of_out_l1loss_dict[\"of_l1_loss_mean\"][0], np_out_l1loss_dict[\"np_l1_loss_mean\"] ) assert np.allclose(", "compute l1 grad np_grad_dict = np_l1_loss_diff(input, target) def assert_prediction_grad(blob: tp.Numpy):", "target) assert np.allclose( of_out_l1loss_dict[\"of_l1_loss\"], np_out_l1loss_dict[\"np_l1_loss\"] ) assert np.allclose( of_out_l1loss_dict[\"of_l1_loss_mean\"][0], np_out_l1loss_dict[\"np_l1_loss_mean\"]", "Generate a dict to pass parameter to test case arg_dict", "the License for the specific language governing permissions and limitations", "may not use this file except in compliance with the", "of_target, reduction=\"mean\", name=\"of_l1loss_mean\" ) l1loss_sum = flow.nn.L1Loss( x_var, of_target, reduction=\"sum\",", "_compare_l1loss_with_np( input_shape, target_shape, device_type, machine_ids, device_counts ): input = np.random.random(size=input_shape).astype(np.float32)", "implied. See the License for the specific language governing permissions", "as tp from test_util import GenArgList import unittest from collections", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "[device_counts] return arg_dict @flow.unittest.skip_unless_1n1d() class Testl1loss1n1d(flow.unittest.TestCase): def test_l1loss_cpu(test_case): arg_dict =", "def assert_prediction_grad(blob: tp.Numpy): # Evaluate the gradient. Here we only", "return arg_dict @flow.unittest.skip_unless_1n1d() class Testl1loss1n1d(flow.unittest.TestCase): def test_l1loss_cpu(test_case): arg_dict = _gen_arg_dict(", "Use numpy to compute diff original_shape = np_target.shape elemcnt =", "GenArgList import unittest from collections import OrderedDict from typing import", "\"\"\" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed", "= [machine_ids] arg_dict[\"device_counts\"] = [device_counts] return arg_dict @flow.unittest.skip_unless_1n1d() class Testl1loss1n1d(flow.unittest.TestCase):", "only test reduction=\"mean\" diff flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0 ).minimize(l1loss_mean) return", "arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\") def", "in [\"cpu\", \"gpu\"] func_config = flow.FunctionConfig() flow.clear_default_session() if device_type ==", "= prediction_grad.reshape(original_shape) grad_dict = { \"np_grad_mean\": grad_mean, } return grad_dict", "arg_dict = OrderedDict() arg_dict[\"input_shape\"] = [shape] arg_dict[\"target_shape\"] = [shape] arg_dict[\"device_type\"]", "def np_l1loss(np_input, np_target): np_l1 = np.abs(np_target - np_input) np_l1_mean =", "def test_l1loss_gpu(test_case): arg_dict = _gen_arg_dict( shape=(3, 16, 32), device_type=\"gpu\", machine_ids=\"0:0\",", "\"np_l1_loss_mean\": np_l1_mean, \"np_l1_loss_sum\": np_l1_sum, } return np_l1_dict def np_l1_loss_diff(np_input, np_target):", "np.zeros((elemcnt)).astype(prediction.dtype) for i in np.arange(elemcnt): diff = prediction[i] - label[i]", "reduction=\"sum\", name=\"of_l1loss_sum\" ) with flow.scope.placement(device_type, \"0:0\"): # We only test", "test_util import GenArgList import unittest from collections import OrderedDict from", "assert_prediction_grad) l1loss = flow.nn.L1Loss(x_var, of_target, reduction=\"none\", name=\"of_l1loss\") l1loss_mean = flow.nn.L1Loss(", "= [shape] arg_dict[\"target_shape\"] = [shape] arg_dict[\"device_type\"] = [device_type] arg_dict[\"machine_ids\"] =", "the follow code # grad_sum = prediction_grad.reshape(original_shape) grad_dict = {", "} of_out_l1loss_dict = oneflow_l1loss(input, target) assert np.allclose( of_out_l1loss_dict[\"of_l1_loss\"], np_out_l1loss_dict[\"np_l1_loss\"] )", "device_type=\"cpu\", machine_ids=\"0:0\", device_counts=1 ) for arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"),", "writing, software distributed under the License is distributed on an", "@flow.global_function(type=\"train\", function_config=func_config) def oneflow_l1loss( of_input: tp.Numpy.Placeholder(shape=input.shape), of_target: tp.Numpy.Placeholder(shape=target.shape), ) ->", "= np_l1_loss_diff(input, target) def assert_prediction_grad(blob: tp.Numpy): # Evaluate the gradient.", "of_input: tp.Numpy.Placeholder(shape=input.shape), of_target: tp.Numpy.Placeholder(shape=target.shape), ) -> Dict[str, tp.Numpy]: with flow.scope.placement(device_type,", ") l1loss_sum = flow.nn.L1Loss( x_var, of_target, reduction=\"sum\", name=\"of_l1loss_sum\" ) with", "in compliance with the License. You may obtain a copy", "compute diff original_shape = np_target.shape elemcnt = np_target.size prediction =", "reduction=\"mean\" diff flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0 ).minimize(l1loss_mean) return { \"of_l1_loss\":", "agreed to in writing, software distributed under the License is", "# Use numpy to compute diff original_shape = np_target.shape elemcnt", "cases\") def test_l1loss_gpu(test_case): arg_dict = _gen_arg_dict( shape=(3, 16, 32), device_type=\"gpu\",", "import unittest from collections import OrderedDict from typing import Dict", "type == \"mean\" assert np.allclose(blob, np_grad_dict[\"np_grad_mean\"]) @flow.global_function(type=\"train\", function_config=func_config) def oneflow_l1loss(", "== \"mean\" assert np.allclose(blob, np_grad_dict[\"np_grad_mean\"]) @flow.global_function(type=\"train\", function_config=func_config) def oneflow_l1loss( of_input:", "at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to", "of_target, reduction=\"none\", name=\"of_l1loss\") l1loss_mean = flow.nn.L1Loss( x_var, of_target, reduction=\"mean\", name=\"of_l1loss_mean\"", "np_target): np_l1 = np.abs(np_target - np_input) np_l1_mean = np.mean(np_l1) np_l1_sum", "label = np_target.reshape(-1) prediction_grad = np.zeros((elemcnt)).astype(prediction.dtype) for i in np.arange(elemcnt):", "typing import Dict import os def _compare_l1loss_with_np( input_shape, target_shape, device_type,", "either express or implied. See the License for the specific", "name=\"of_l1loss_sum\" ) with flow.scope.placement(device_type, \"0:0\"): # We only test reduction=\"mean\"", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "<gh_stars>1-10 \"\"\" Copyright 2020 The OneFlow Authors. All rights reserved.", "\"License\"); you may not use this file except in compliance", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "Licensed under the Apache License, Version 2.0 (the \"License\"); you", "device_counts=2 ) for arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) if __name__ ==", "# We only test reduction=\"mean\" diff flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0", "np_l1, \"np_l1_loss_mean\": np_l1_mean, \"np_l1_loss_sum\": np_l1_sum, } return np_l1_dict def np_l1_loss_diff(np_input,", "tp.Numpy.Placeholder(shape=input.shape), of_target: tp.Numpy.Placeholder(shape=target.shape), ) -> Dict[str, tp.Numpy]: with flow.scope.placement(device_type, \"0:0\"):", "License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed", ") assert np.allclose( of_out_l1loss_dict[\"of_l1_loss_sum\"][0], np_out_l1loss_dict[\"np_l1_loss_sum\"] ) def _gen_arg_dict(shape, device_type, machine_ids,", "License for the specific language governing permissions and limitations under", "arg_dict = _gen_arg_dict( shape=(16, 3), device_type=\"cpu\", machine_ids=\"0:0\", device_counts=1 ) for", "GenArgList(arg_dict): _compare_l1loss_with_np(*arg) @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\") def test_l1loss_gpu(test_case): arg_dict", "): input = np.random.random(size=input_shape).astype(np.float32) target = np.random.random(size=target_shape).astype(np.float32) assert device_type in", ") -> Dict[str, tp.Numpy]: with flow.scope.placement(device_type, \"0:0\"): v = flow.get_variable(", "shape=(3, 16, 32), device_type=\"gpu\", machine_ids=\"0:0\", device_counts=1 ) for arg in", "\"np_grad_mean\": grad_mean, } return grad_dict # Use Numpy to compute", "reduction type == \"mean\" assert np.allclose(blob, np_grad_dict[\"np_grad_mean\"]) @flow.global_function(type=\"train\", function_config=func_config) def", "diff flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0 ).minimize(l1loss_mean) return { \"of_l1_loss\": l1loss,", "All rights reserved. Licensed under the Apache License, Version 2.0", "from collections import OrderedDict from typing import Dict import os", "reduction=\"none\", name=\"of_l1loss\") l1loss_mean = flow.nn.L1Loss( x_var, of_target, reduction=\"mean\", name=\"of_l1loss_mean\" )", "Testl1loss1n2d(flow.unittest.TestCase): @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\") def test_l1loss_gpu_1n2d(test_case): arg_dict =", "\"only test cpu cases\") def test_l1loss_gpu(test_case): arg_dict = _gen_arg_dict( shape=(3,", "to pass parameter to test case arg_dict = OrderedDict() arg_dict[\"input_shape\"]", "case arg_dict = OrderedDict() arg_dict[\"input_shape\"] = [shape] arg_dict[\"target_shape\"] = [shape]", "grad_dict = { \"np_grad_mean\": grad_mean, } return grad_dict # Use", "initializer=flow.constant_initializer(0), name=\"v\", ) x_var = of_input + v # watch", "\"cpu\": flow.config.cpu_device_num(device_counts) else: flow.config.gpu_device_num(device_counts) func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids)) func_config.default_logical_view(flow.scope.consistent_view()) def np_l1loss(np_input, np_target):", "except in compliance with the License. You may obtain a", "unittest from collections import OrderedDict from typing import Dict import", "with flow.scope.placement(device_type, \"0:0\"): v = flow.get_variable( shape=target.shape, dtype=flow.float32, initializer=flow.constant_initializer(0), name=\"v\",", "grad_dict # Use Numpy to compute l1 loss np_out_l1loss_dict =", "-> Dict[str, tp.Numpy]: with flow.scope.placement(device_type, \"0:0\"): v = flow.get_variable( shape=target.shape,", ") def _gen_arg_dict(shape, device_type, machine_ids, device_counts): # Generate a dict", "oneflow.typing as tp from test_util import GenArgList import unittest from", "to test case arg_dict = OrderedDict() arg_dict[\"input_shape\"] = [shape] arg_dict[\"target_shape\"]", "= _gen_arg_dict( shape=(3, 16, 32), device_type=\"gpu\", machine_ids=\"0:0\", device_counts=1 ) for", "import oneflow as flow import numpy as np import oneflow.typing", "permissions and limitations under the License. \"\"\" import oneflow as", "compliance with the License. You may obtain a copy of", "language governing permissions and limitations under the License. \"\"\" import", "collections import OrderedDict from typing import Dict import os def", "arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) @flow.unittest.skip_unless_1n2d() class Testl1loss1n2d(flow.unittest.TestCase): @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test", "# TODO: if you want to get the grad when", "\"0:0\"): # We only test reduction=\"mean\" diff flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([], [1e-3]),", "dict to pass parameter to test case arg_dict = OrderedDict()", "v # watch the diff flow.watch_diff(x_var, assert_prediction_grad) l1loss = flow.nn.L1Loss(x_var,", "/ elemcnt # TODO: if you want to get the", "flow.FunctionConfig() flow.clear_default_session() if device_type == \"cpu\": flow.config.cpu_device_num(device_counts) else: flow.config.gpu_device_num(device_counts) func_config.default_placement_scope(flow.scope.placement(device_type,", "parameter to test case arg_dict = OrderedDict() arg_dict[\"input_shape\"] = [shape]", "grad_mean, } return grad_dict # Use Numpy to compute l1", "tp.Numpy]: with flow.scope.placement(device_type, \"0:0\"): v = flow.get_variable( shape=target.shape, dtype=flow.float32, initializer=flow.constant_initializer(0),", "[machine_ids] arg_dict[\"device_counts\"] = [device_counts] return arg_dict @flow.unittest.skip_unless_1n1d() class Testl1loss1n1d(flow.unittest.TestCase): def", "# grad_sum = prediction_grad.reshape(original_shape) grad_dict = { \"np_grad_mean\": grad_mean, }", "np_grad_dict[\"np_grad_mean\"]) @flow.global_function(type=\"train\", function_config=func_config) def oneflow_l1loss( of_input: tp.Numpy.Placeholder(shape=input.shape), of_target: tp.Numpy.Placeholder(shape=target.shape), )", "if you want to get the grad when the reduction", "grad np_grad_dict = np_l1_loss_diff(input, target) def assert_prediction_grad(blob: tp.Numpy): # Evaluate", "flow.scope.placement(device_type, \"0:0\"): # We only test reduction=\"mean\" diff flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([],", "in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) @flow.unittest.skip_unless_1n2d() class Testl1loss1n2d(flow.unittest.TestCase): @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu", "arg_dict[\"device_counts\"] = [device_counts] return arg_dict @flow.unittest.skip_unless_1n1d() class Testl1loss1n1d(flow.unittest.TestCase): def test_l1loss_cpu(test_case):", "name=\"v\", ) x_var = of_input + v # watch the", "oneflow_l1loss(input, target) assert np.allclose( of_out_l1loss_dict[\"of_l1_loss\"], np_out_l1loss_dict[\"np_l1_loss\"] ) assert np.allclose( of_out_l1loss_dict[\"of_l1_loss_mean\"][0],", "device_counts=1 ) for arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test", "\"0:0\"): v = flow.get_variable( shape=target.shape, dtype=flow.float32, initializer=flow.constant_initializer(0), name=\"v\", ) x_var", "arg_dict @flow.unittest.skip_unless_1n1d() class Testl1loss1n1d(flow.unittest.TestCase): def test_l1loss_cpu(test_case): arg_dict = _gen_arg_dict( shape=(16,", "assert device_type in [\"cpu\", \"gpu\"] func_config = flow.FunctionConfig() flow.clear_default_session() if", "dtype=flow.float32, initializer=flow.constant_initializer(0), name=\"v\", ) x_var = of_input + v #", "= oneflow_l1loss(input, target) assert np.allclose( of_out_l1loss_dict[\"of_l1_loss\"], np_out_l1loss_dict[\"np_l1_loss\"] ) assert np.allclose(", "l1loss_mean = flow.nn.L1Loss( x_var, of_target, reduction=\"mean\", name=\"of_l1loss_mean\" ) l1loss_sum =", "np_l1loss(np_input, np_target): np_l1 = np.abs(np_target - np_input) np_l1_mean = np.mean(np_l1)", "np.allclose( of_out_l1loss_dict[\"of_l1_loss\"], np_out_l1loss_dict[\"np_l1_loss\"] ) assert np.allclose( of_out_l1loss_dict[\"of_l1_loss_mean\"][0], np_out_l1loss_dict[\"np_l1_loss_mean\"] ) assert", "the reduction = \"sum\", you can use the follow code", "import oneflow.typing as tp from test_util import GenArgList import unittest", "Unless required by applicable law or agreed to in writing,", "by applicable law or agreed to in writing, software distributed", "grad_sum = prediction_grad.reshape(original_shape) grad_dict = { \"np_grad_mean\": grad_mean, } return", "as np import oneflow.typing as tp from test_util import GenArgList", "= np_target.size prediction = np_input.reshape(-1) label = np_target.reshape(-1) prediction_grad =", "{ \"np_grad_mean\": grad_mean, } return grad_dict # Use Numpy to", "flow.clear_default_session() if device_type == \"cpu\": flow.config.cpu_device_num(device_counts) else: flow.config.gpu_device_num(device_counts) func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids))", "= np.random.random(size=input_shape).astype(np.float32) target = np.random.random(size=target_shape).astype(np.float32) assert device_type in [\"cpu\", \"gpu\"]", "\"of_l1_loss_mean\": l1loss_mean, \"of_l1_loss_sum\": l1loss_sum, } of_out_l1loss_dict = oneflow_l1loss(input, target) assert", "arg_dict[\"input_shape\"] = [shape] arg_dict[\"target_shape\"] = [shape] arg_dict[\"device_type\"] = [device_type] arg_dict[\"machine_ids\"]", "use the follow code # grad_sum = prediction_grad.reshape(original_shape) grad_dict =", "Here we only test the reduction type == \"mean\" assert", "Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under", "express or implied. See the License for the specific language", "else: flow.config.gpu_device_num(device_counts) func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids)) func_config.default_logical_view(flow.scope.consistent_view()) def np_l1loss(np_input, np_target): np_l1 =", "GenArgList(arg_dict): _compare_l1loss_with_np(*arg) @flow.unittest.skip_unless_1n2d() class Testl1loss1n2d(flow.unittest.TestCase): @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")", "original_shape = np_target.shape elemcnt = np_target.size prediction = np_input.reshape(-1) label", "of_target: tp.Numpy.Placeholder(shape=target.shape), ) -> Dict[str, tp.Numpy]: with flow.scope.placement(device_type, \"0:0\"): v", "pass parameter to test case arg_dict = OrderedDict() arg_dict[\"input_shape\"] =", "Dict[str, tp.Numpy]: with flow.scope.placement(device_type, \"0:0\"): v = flow.get_variable( shape=target.shape, dtype=flow.float32,", "device_counts=1 ) for arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) @flow.unittest.skip_unless_1n2d() class Testl1loss1n2d(flow.unittest.TestCase):", "assert np.allclose( of_out_l1loss_dict[\"of_l1_loss_mean\"][0], np_out_l1loss_dict[\"np_l1_loss_mean\"] ) assert np.allclose( of_out_l1loss_dict[\"of_l1_loss_sum\"][0], np_out_l1loss_dict[\"np_l1_loss_sum\"] )", "you can use the follow code # grad_sum = prediction_grad.reshape(original_shape)", "class Testl1loss1n1d(flow.unittest.TestCase): def test_l1loss_cpu(test_case): arg_dict = _gen_arg_dict( shape=(16, 3), device_type=\"cpu\",", "return grad_dict # Use Numpy to compute l1 loss np_out_l1loss_dict", "np_target.shape elemcnt = np_target.size prediction = np_input.reshape(-1) label = np_target.reshape(-1)", "= np_l1loss(input, target) # Use Numpy to compute l1 grad", "= of_input + v # watch the diff flow.watch_diff(x_var, assert_prediction_grad)", "def test_l1loss_gpu_1n2d(test_case): arg_dict = _gen_arg_dict( shape=(3, 32, 16), device_type=\"gpu\", machine_ids=\"0:0-1\",", "os def _compare_l1loss_with_np( input_shape, target_shape, device_type, machine_ids, device_counts ): input", "np_l1_dict def np_l1_loss_diff(np_input, np_target): # Use numpy to compute diff", "= flow.get_variable( shape=target.shape, dtype=flow.float32, initializer=flow.constant_initializer(0), name=\"v\", ) x_var = of_input", "obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required", "def _gen_arg_dict(shape, device_type, machine_ids, device_counts): # Generate a dict to", "\"np_l1_loss_sum\": np_l1_sum, } return np_l1_dict def np_l1_loss_diff(np_input, np_target): # Use", "# watch the diff flow.watch_diff(x_var, assert_prediction_grad) l1loss = flow.nn.L1Loss(x_var, of_target,", "reduction=\"mean\", name=\"of_l1loss_mean\" ) l1loss_sum = flow.nn.L1Loss( x_var, of_target, reduction=\"sum\", name=\"of_l1loss_sum\"", "device_type == \"cpu\": flow.config.cpu_device_num(device_counts) else: flow.config.gpu_device_num(device_counts) func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids)) func_config.default_logical_view(flow.scope.consistent_view()) def", "when the reduction = \"sum\", you can use the follow", "_gen_arg_dict( shape=(16, 3), device_type=\"cpu\", machine_ids=\"0:0\", device_counts=1 ) for arg in", "import GenArgList import unittest from collections import OrderedDict from typing", "with the License. You may obtain a copy of the", "cpu cases\") def test_l1loss_gpu(test_case): arg_dict = _gen_arg_dict( shape=(3, 16, 32),", "{ \"np_l1_loss\": np_l1, \"np_l1_loss_mean\": np_l1_mean, \"np_l1_loss_sum\": np_l1_sum, } return np_l1_dict", "= np.abs(np_target - np_input) np_l1_mean = np.mean(np_l1) np_l1_sum = np.sum(np_l1)", "np_out_l1loss_dict[\"np_l1_loss\"] ) assert np.allclose( of_out_l1loss_dict[\"of_l1_loss_mean\"][0], np_out_l1loss_dict[\"np_l1_loss_mean\"] ) assert np.allclose( of_out_l1loss_dict[\"of_l1_loss_sum\"][0],", "16), device_type=\"gpu\", machine_ids=\"0:0-1\", device_counts=2 ) for arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg)", "to get the grad when the reduction = \"sum\", you", "flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0 ).minimize(l1loss_mean) return { \"of_l1_loss\": l1loss, \"of_l1_loss_mean\": l1loss_mean,", "flow.scope.placement(device_type, \"0:0\"): v = flow.get_variable( shape=target.shape, dtype=flow.float32, initializer=flow.constant_initializer(0), name=\"v\", )", "prediction_grad[i] = np.sign(diff) grad_mean = prediction_grad.reshape(original_shape) / elemcnt # TODO:", "} return grad_dict # Use Numpy to compute l1 loss", "machine_ids=\"0:0\", device_counts=1 ) for arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) @flow.unittest.skip_unless_1n2d() class", "specific language governing permissions and limitations under the License. \"\"\"", "np.sign(diff) grad_mean = prediction_grad.reshape(original_shape) / elemcnt # TODO: if you", "label[i] prediction_grad[i] = np.sign(diff) grad_mean = prediction_grad.reshape(original_shape) / elemcnt #", "flow.nn.L1Loss(x_var, of_target, reduction=\"none\", name=\"of_l1loss\") l1loss_mean = flow.nn.L1Loss( x_var, of_target, reduction=\"mean\",", "applicable law or agreed to in writing, software distributed under", "device_type=\"gpu\", machine_ids=\"0:0\", device_counts=1 ) for arg in GenArgList(arg_dict): _compare_l1loss_with_np(*arg) @flow.unittest.skip_unless_1n2d()", "name=\"of_l1loss_mean\" ) l1loss_sum = flow.nn.L1Loss( x_var, of_target, reduction=\"sum\", name=\"of_l1loss_sum\" )", "l1loss_sum, } of_out_l1loss_dict = oneflow_l1loss(input, target) assert np.allclose( of_out_l1loss_dict[\"of_l1_loss\"], np_out_l1loss_dict[\"np_l1_loss\"]", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "from test_util import GenArgList import unittest from collections import OrderedDict", "the specific language governing permissions and limitations under the License.", "np_l1_loss_diff(np_input, np_target): # Use numpy to compute diff original_shape =", "np_l1_sum, } return np_l1_dict def np_l1_loss_diff(np_input, np_target): # Use numpy", "np_input.reshape(-1) label = np_target.reshape(-1) prediction_grad = np.zeros((elemcnt)).astype(prediction.dtype) for i in", "== \"cpu\": flow.config.cpu_device_num(device_counts) else: flow.config.gpu_device_num(device_counts) func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids)) func_config.default_logical_view(flow.scope.consistent_view()) def np_l1loss(np_input,", "device_type in [\"cpu\", \"gpu\"] func_config = flow.FunctionConfig() flow.clear_default_session() if device_type", "flow.nn.L1Loss( x_var, of_target, reduction=\"sum\", name=\"of_l1loss_sum\" ) with flow.scope.placement(device_type, \"0:0\"): #", "2020 The OneFlow Authors. All rights reserved. Licensed under the", "or agreed to in writing, software distributed under the License", "class Testl1loss1n2d(flow.unittest.TestCase): @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\") def test_l1loss_gpu_1n2d(test_case): arg_dict", "- label[i] prediction_grad[i] = np.sign(diff) grad_mean = prediction_grad.reshape(original_shape) / elemcnt", "OF ANY KIND, either express or implied. See the License", "np_l1loss(input, target) # Use Numpy to compute l1 grad np_grad_dict", "Evaluate the gradient. Here we only test the reduction type", "the gradient. Here we only test the reduction type ==", "= np_input.reshape(-1) label = np_target.reshape(-1) prediction_grad = np.zeros((elemcnt)).astype(prediction.dtype) for i", "l1loss = flow.nn.L1Loss(x_var, of_target, reduction=\"none\", name=\"of_l1loss\") l1loss_mean = flow.nn.L1Loss( x_var,", "_compare_l1loss_with_np(*arg) @flow.unittest.skip_unless_1n2d() class Testl1loss1n2d(flow.unittest.TestCase): @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\") def", "License, Version 2.0 (the \"License\"); you may not use this", "test cpu cases\") def test_l1loss_gpu_1n2d(test_case): arg_dict = _gen_arg_dict( shape=(3, 32,", "np_l1_sum = np.sum(np_l1) np_l1_dict = { \"np_l1_loss\": np_l1, \"np_l1_loss_mean\": np_l1_mean,", "= np.mean(np_l1) np_l1_sum = np.sum(np_l1) np_l1_dict = { \"np_l1_loss\": np_l1,", "grad when the reduction = \"sum\", you can use the", "_gen_arg_dict(shape, device_type, machine_ids, device_counts): # Generate a dict to pass", "= np_target.reshape(-1) prediction_grad = np.zeros((elemcnt)).astype(prediction.dtype) for i in np.arange(elemcnt): diff", "a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by", "arg_dict[\"machine_ids\"] = [machine_ids] arg_dict[\"device_counts\"] = [device_counts] return arg_dict @flow.unittest.skip_unless_1n1d() class", "machine_ids, device_counts): # Generate a dict to pass parameter to", "License. You may obtain a copy of the License at", "We only test reduction=\"mean\" diff flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0 ).minimize(l1loss_mean)", "with flow.scope.placement(device_type, \"0:0\"): # We only test reduction=\"mean\" diff flow.optimizer.SGD(", "arg_dict = _gen_arg_dict( shape=(3, 32, 16), device_type=\"gpu\", machine_ids=\"0:0-1\", device_counts=2 )", "= flow.nn.L1Loss( x_var, of_target, reduction=\"mean\", name=\"of_l1loss_mean\" ) l1loss_sum = flow.nn.L1Loss(", "- np_input) np_l1_mean = np.mean(np_l1) np_l1_sum = np.sum(np_l1) np_l1_dict =", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "def oneflow_l1loss( of_input: tp.Numpy.Placeholder(shape=input.shape), of_target: tp.Numpy.Placeholder(shape=target.shape), ) -> Dict[str, tp.Numpy]:", "OrderedDict from typing import Dict import os def _compare_l1loss_with_np( input_shape,", "function_config=func_config) def oneflow_l1loss( of_input: tp.Numpy.Placeholder(shape=input.shape), of_target: tp.Numpy.Placeholder(shape=target.shape), ) -> Dict[str,", "# Use Numpy to compute l1 grad np_grad_dict = np_l1_loss_diff(input,", "prediction_grad = np.zeros((elemcnt)).astype(prediction.dtype) for i in np.arange(elemcnt): diff = prediction[i]", "to compute diff original_shape = np_target.shape elemcnt = np_target.size prediction", "# Generate a dict to pass parameter to test case", "cases\") def test_l1loss_gpu_1n2d(test_case): arg_dict = _gen_arg_dict( shape=(3, 32, 16), device_type=\"gpu\"," ]
[ "def test_load_trigger(self): self.assertRaises(ValidationError, schemas.TriggerSchema().load, {\"type\": \"unknown\"}) obj = schemas.TriggerSchema().load({'type': \"interval\"})", "dump:', result) def test_load_trigger(self): self.assertRaises(ValidationError, schemas.TriggerSchema().load, {\"type\": \"unknown\"}) obj =", "unittest import datetime from dida import schemas, triggers from marshmallow", "ValidationError class TestTriggerSchema(unittest.TestCase): def test_dump_trigger(self): result = schemas.TriggerSchema().dump(triggers.IntervalTrigger()) print('IntervalTrigger dump:',", "result = schemas.TriggerSchema().dump(triggers.IntervalTrigger()) print('IntervalTrigger dump:', result) result = schemas.TriggerSchema().dump(triggers.DateTrigger()) print('DateTrigger", "class TestTriggerSchema(unittest.TestCase): def test_dump_trigger(self): result = schemas.TriggerSchema().dump(triggers.IntervalTrigger()) print('IntervalTrigger dump:', result)", "test_dump_trigger(self): result = schemas.TriggerSchema().dump(triggers.IntervalTrigger()) print('IntervalTrigger dump:', result) result = schemas.TriggerSchema().dump(triggers.DateTrigger())", "\"interval\"}) self.assertIsInstance(obj, triggers.IntervalTrigger) obj = schemas.TriggerSchema().load({'type': 'date', \"params\": {'run_date': \"2020-01-01", "print('IntervalTrigger dump:', result) result = schemas.TriggerSchema().dump(triggers.DateTrigger()) print('DateTrigger dump:', result) def", "= schemas.TriggerSchema().load({'type': \"interval\"}) self.assertIsInstance(obj, triggers.IntervalTrigger) obj = schemas.TriggerSchema().load({'type': 'date', \"params\":", "result) result = schemas.TriggerSchema().dump(triggers.DateTrigger()) print('DateTrigger dump:', result) def test_load_trigger(self): self.assertRaises(ValidationError,", "result) def test_load_trigger(self): self.assertRaises(ValidationError, schemas.TriggerSchema().load, {\"type\": \"unknown\"}) obj = schemas.TriggerSchema().load({'type':", "= schemas.TriggerSchema().load({'type': 'date', \"params\": {'run_date': \"2020-01-01 00:00:00\"}}) self.assertEqual(obj.run_date, datetime.datetime(2020, 1,", "schemas, triggers from marshmallow import ValidationError class TestTriggerSchema(unittest.TestCase): def test_dump_trigger(self):", "self.assertIsInstance(obj, triggers.IntervalTrigger) obj = schemas.TriggerSchema().load({'type': 'date', \"params\": {'run_date': \"2020-01-01 00:00:00\"}})", "import ValidationError class TestTriggerSchema(unittest.TestCase): def test_dump_trigger(self): result = schemas.TriggerSchema().dump(triggers.IntervalTrigger()) print('IntervalTrigger", "test_load_trigger(self): self.assertRaises(ValidationError, schemas.TriggerSchema().load, {\"type\": \"unknown\"}) obj = schemas.TriggerSchema().load({'type': \"interval\"}) self.assertIsInstance(obj,", "marshmallow import ValidationError class TestTriggerSchema(unittest.TestCase): def test_dump_trigger(self): result = schemas.TriggerSchema().dump(triggers.IntervalTrigger())", "= schemas.TriggerSchema().dump(triggers.DateTrigger()) print('DateTrigger dump:', result) def test_load_trigger(self): self.assertRaises(ValidationError, schemas.TriggerSchema().load, {\"type\":", "{\"type\": \"unknown\"}) obj = schemas.TriggerSchema().load({'type': \"interval\"}) self.assertIsInstance(obj, triggers.IntervalTrigger) obj =", "obj = schemas.TriggerSchema().load({'type': \"interval\"}) self.assertIsInstance(obj, triggers.IntervalTrigger) obj = schemas.TriggerSchema().load({'type': 'date',", "dump:', result) result = schemas.TriggerSchema().dump(triggers.DateTrigger()) print('DateTrigger dump:', result) def test_load_trigger(self):", "dida import schemas, triggers from marshmallow import ValidationError class TestTriggerSchema(unittest.TestCase):", "from dida import schemas, triggers from marshmallow import ValidationError class", "result = schemas.TriggerSchema().dump(triggers.DateTrigger()) print('DateTrigger dump:', result) def test_load_trigger(self): self.assertRaises(ValidationError, schemas.TriggerSchema().load,", "schemas.TriggerSchema().dump(triggers.DateTrigger()) print('DateTrigger dump:', result) def test_load_trigger(self): self.assertRaises(ValidationError, schemas.TriggerSchema().load, {\"type\": \"unknown\"})", "TestTriggerSchema(unittest.TestCase): def test_dump_trigger(self): result = schemas.TriggerSchema().dump(triggers.IntervalTrigger()) print('IntervalTrigger dump:', result) result", "schemas.TriggerSchema().load({'type': 'date', \"params\": {'run_date': \"2020-01-01 00:00:00\"}}) self.assertEqual(obj.run_date, datetime.datetime(2020, 1, 1).astimezone())", "datetime from dida import schemas, triggers from marshmallow import ValidationError", "self.assertRaises(ValidationError, schemas.TriggerSchema().load, {\"type\": \"unknown\"}) obj = schemas.TriggerSchema().load({'type': \"interval\"}) self.assertIsInstance(obj, triggers.IntervalTrigger)", "triggers from marshmallow import ValidationError class TestTriggerSchema(unittest.TestCase): def test_dump_trigger(self): result", "def test_dump_trigger(self): result = schemas.TriggerSchema().dump(triggers.IntervalTrigger()) print('IntervalTrigger dump:', result) result =", "schemas.TriggerSchema().dump(triggers.IntervalTrigger()) print('IntervalTrigger dump:', result) result = schemas.TriggerSchema().dump(triggers.DateTrigger()) print('DateTrigger dump:', result)", "print('DateTrigger dump:', result) def test_load_trigger(self): self.assertRaises(ValidationError, schemas.TriggerSchema().load, {\"type\": \"unknown\"}) obj", "schemas.TriggerSchema().load, {\"type\": \"unknown\"}) obj = schemas.TriggerSchema().load({'type': \"interval\"}) self.assertIsInstance(obj, triggers.IntervalTrigger) obj", "triggers.IntervalTrigger) obj = schemas.TriggerSchema().load({'type': 'date', \"params\": {'run_date': \"2020-01-01 00:00:00\"}}) self.assertEqual(obj.run_date,", "from marshmallow import ValidationError class TestTriggerSchema(unittest.TestCase): def test_dump_trigger(self): result =", "import unittest import datetime from dida import schemas, triggers from", "import schemas, triggers from marshmallow import ValidationError class TestTriggerSchema(unittest.TestCase): def", "schemas.TriggerSchema().load({'type': \"interval\"}) self.assertIsInstance(obj, triggers.IntervalTrigger) obj = schemas.TriggerSchema().load({'type': 'date', \"params\": {'run_date':", "obj = schemas.TriggerSchema().load({'type': 'date', \"params\": {'run_date': \"2020-01-01 00:00:00\"}}) self.assertEqual(obj.run_date, datetime.datetime(2020,", "\"unknown\"}) obj = schemas.TriggerSchema().load({'type': \"interval\"}) self.assertIsInstance(obj, triggers.IntervalTrigger) obj = schemas.TriggerSchema().load({'type':", "= schemas.TriggerSchema().dump(triggers.IntervalTrigger()) print('IntervalTrigger dump:', result) result = schemas.TriggerSchema().dump(triggers.DateTrigger()) print('DateTrigger dump:',", "import datetime from dida import schemas, triggers from marshmallow import" ]
[ "def get(self, request): try: subject = request.query_params.get('subject', None) if subject", "error: context = {'success': \"false\", 'message': 'Failed to get OtherContent", "e: pass path = settings.MEDIA_ROOT + '/files/' data_frame = pd.DataFrame(final_list", "queryset = SubSubSectionKeyword.objects.filter(sub_sub_section__id = sub_sub_section_id) serializer = SubSubSectionKeywordsSerializer(queryset, many=True) else:", "Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentRejectedList(ListAPIView): queryset = Content.objects.all() serializer_class = ContentListSerializer", "try: try: content_list = self.get_object() except Exception as error: context", "context = {\"success\": True, \"message\": \"Content List\",\"data\": serializer.data} return Response(context,", "= settings.AZURE_ACCOUNT_KEY CONTAINER_NAME= settings.AZURE_CONTAINER block_blob_service = BlockBlobService(account_name=account_name, account_key=account_key) class ContentList(ListCreateAPIView):", "\"Updation Successful\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) context = {\"success\": False,", "BookNestedSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Conetent List\",\"data\": serializer.data}", "Textbook Unit','Level 4 Textbook Unit', 'total', 'approved_contents', 'rejected_contents', 'pending_contents', 'hard_spots'])", "= {\"success\": True, \"message\": \"url link\", \"token\":sas_url,\"base_url\":base_url} return Response(context, status=status.HTTP_200_OK)", "permission_required from rest_framework.parsers import MultiPartParser from apps.dataupload.models import (Chapter, Section,", "book_id is not None: book_name=Book.objects.get(id=book_id) chapters=Chapter.objects.filter(book__id=book_id).order_by('id') serializer = ContentStatusSerializer(chapters, many=True)", "os import itertools from django.db.models import Q import threading account_name", "\"message\": \"update successfull\"} return Response(context, status=status.HTTP_200_OK) except Exception as error:", "{\"success\": True, \"message\": \"Successful\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) else:", "accountKey = settings.AZURE_ACCOUNT_KEY containerName= settings.AZURE_CONTAINER try: blobService = BlockBlobService(account_name=accountName, account_key=accountKey)", "self.get_queryset().filter(approved=False, approved_by=None) serializer = KeywordSerializer(queryset, many=True) context = {\"success\": True,", "return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {", "from django.contrib.auth.decorators import permission_required from rest_framework.parsers import MultiPartParser from apps.dataupload.models", "= self.get_queryset().filter(sub_section__id=sub_section_id, approved=False, approved_by=None) elif sub_sub_section_id is not None: queryset", "status=status.HTTP_500_INTERNAL_SERVER_ERROR) def put(self, request, pk, format=None): try: try: content_list =", "\"message\": \"Content Approved List\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except", "details\"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception as error: context =", "return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BookListView(ListAPIView): queryset = Book.objects.all() serializer_class =", "queryset = self.get_queryset().filter(section__id=section_id, approved=False, approved_by=None) elif sub_section_id is not None:", "pandas as pd from evolve import settings from evolve import", "import itertools from django.db.models import Q import threading account_name =", "queryset = Content.objects.all() serializer_class = ContentListSerializer def get(self, request): try:", "= settings.AZURE_ACCOUNT_NAME accountKey = settings.AZURE_ACCOUNT_KEY containerName= settings.AZURE_CONTAINER try: blobService =", "queryset=self.get_queryset().filter(subject__id=subject, content_only=True) else: queryset = self.get_queryset().filter(content_only=True) serializer = BookNestedSerializer(queryset, many=True)", "Conetent list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentApprovedList(ListAPIView): queryset = Content.objects.all()", "d in data['chapter']: final_list.append(d) repeat_list=['Content Name','Content Link/Video Link','Content Rating (By", "queryset = self.get_queryset() serializer = ContentListSerializer(queryset, many=True) context = {\"success\":", "serializer.is_valid(): serializer.save() context = {\"success\": True, \"message\": \"Created Successful\", \"data\":", "= {\"success\": True, \"message\": \"Activity List\", \"data\": 'media/files/ApprovedContent.csv'} return Response(context,", "'message': 'Failed to get Content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class", "'Failed to get Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class GetSASView(ListAPIView):", "from shutil import copyfile book = request.query_params.get('book', None) chapters=Chapter.objects.filter(book_id=book).order_by('id') serializer", "try: serializer = ContentListSerializer(data=request.data) if serializer.is_valid(): serializer.save() context = {\"success\":", "in datalist: print(data) Content.objects.filter(pk=data['content_id']).update(video=data['video']) context = {\"success\": True, \"message\": \"update", "\"data\": 'media/files/BackupContent.csv'} return Response(context, status=status.HTTP_200_OK) except Exception as error: context", "from rest_framework.generics import ( ListAPIView, ListCreateAPIView, ListAPIView, RetrieveUpdateAPIView,) from rest_framework.response", "None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('chapter', None)) elif request.query_params.get('section', None) is", "Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentListUrlPutRequestRevert(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class = ContentStatusSerializer", "list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BookListView(ListAPIView): queryset = Book.objects.all() serializer_class", "not None : final=[i.id,i.video] final_list.append(final) except Exception as e: pass", "= HardSpotCreateSerializer def get(self, request): try: final_list = [] import", "\"message\": \"Content Rejected List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception", "self.get_queryset() serializer = ContentListSerializer(queryset, many=True) context = {\"success\": True, \"message\":", "columns=['first_name', 'last_name','mobile', 'email','city_name','school_name','textbook_name']).drop_duplicates() exists = os.path.isfile('content_contributers.csv') path = settings.MEDIA_ROOT +", "None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('chapter', None)) elif request.query_params.get('section', None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('section',", "= Book.objects.all() serializer_class = BookNestedSerializer def get(self, request): try: subject", "class GetSASView(ListAPIView): def get(self,request): try: sas_url = block_blob_service.generate_container_shared_access_signature( CONTAINER_NAME, ContainerPermissions.WRITE,", "parser_classes = (MultiPartParser,) def get(self, request): try: queryset = self.get_queryset()", "\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) else: serializer = ContentContributorSerializer(data=request.data) if", "is not None: book_name=Book.objects.get(id=book_id) chapters=Chapter.objects.filter(book__id=book_id).order_by('id') serializer = ContentStatusSerializer(chapters, many=True) for", "context = {'success': \"false\", 'message': 'Failed to get Content Pending", "= self.get_queryset() serializer = KeywordSerializer(queryset, many=True) context = {\"success\": True,", "is not None : final=[i.id,i.video] final_list.append(final) except Exception as e:", "timedelta(hours=10)) context = {\"success\": True, \"token\":sas_token} return Response(context, status=status.HTTP_200_OK) except:", "pd.DataFrame(final_list , columns=['Board', 'Medium', 'Grade', 'Subject', 'Textbook Name', 'Level 1", "\"OtherContent Approved List\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception", "context = {'success': \"false\", 'message': 'content Id does not exist.'}", "<filename>apps/content/views.py from django.shortcuts import render from rest_framework import status from", "'Failed to get Content Pending list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class", "if chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False, approved_by=None) elif section_id", "self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=False,approved_by=None) else: queryset = self.get_queryset().filter(approved=False, approved_by=None) serializer =", "ContentStatusListSerializer, SectionKeywordSerializer, SubSectionKeywordSerializer, SectionKeywordsSerializer, ChapterKeywordsSerializer, SubSectionKeywordsSerializer, KeywordSerializer, ContentContributorSerializer, ApprovedContentSerializer, ContentStatusSerializer,", "Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class GetSasDownloadView(ListAPIView): def get(self,request): from evolve import settings", "serializer_class = BookNestedSerializer def get(self, request): try: subject = request.query_params.get('subject',", "Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentListUrlPutRequestRevert(RetrieveUpdateAPIView): queryset = Content.objects.all()", "= request.query_params.get('chapter', None) section_id = request.query_params.get('section', None) sub_section_id = request.query_params.get('sub_section',", "format=None): try: try: content_list = self.get_object() except Exception as error:", "None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('sub_section', None)) else: queryset = self.get_queryset() serializer = ContentListSerializer(queryset,", "'Level 2 Textbook Unit', 'Level 3 Textbook Unit','Level 4 Textbook", "not None: ContentContributors.objects.filter(id=queryset.id).update(email=request.data['email']) queryset.refresh_from_db() serializer = ContentContributorSerializer(queryset) context = {\"success\":", "from evolve import settings from evolve import settings from azure.storage.blob", "return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentContributorCreateView(ListCreateAPIView): queryset = ContentContributors.objects.all() serializer_class =", "{'success': \"false\", 'message': 'Failed to get content list.'} return Response(context,", "chapters=Chapter.objects.filter(book__id=book_id).order_by('id') serializer = ContentStatusSerializer(chapters, many=True) for data in serializer.data: for", "list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class GetSasDownloadView(ListAPIView): def get(self,request): from evolve", "'Keywords',]+(list(itertools.chain.from_iterable(itertools.repeat(repeat_list, 5))))) exists = os.path.isfile('ApprovedContent.csv') path = settings.MEDIA_ROOT + '/files/'", "SectionKeywordsSerializer(queryset, many=True) elif sub_section_id is not None: queryset = SubSectionKeyword.objects.filter(sub_section__id", "= self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=False,approved_by=None) else: queryset = self.get_queryset().filter(approved=False, approved_by=None) serializer", "path = settings.MEDIA_ROOT + '/files/' if exists: os.remove('content_contributers.csv') # data_frame.to_excel(path", "'message': 'content Id does not exist.'} return Response(context, status=status.HTTP_404_NOT_FOUND) serializer", "pd.DataFrame(final_list , columns=['Board', 'Medium','Grade', 'Subject', 'Textbook Name', 'Level 1 Textbook", "get(self, request): try: queryset = self.get_queryset() serializer = ContentStatusListSerializer(queryset, many=True)", "= {\"success\": True, \"message\": \"Chapter List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK)", "return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ContentRetrieveUpdate(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class", "approved=True) elif section_id is not None: queryset = self.get_queryset().filter(section__id=section_id, approved=True)", "import os from shutil import copyfile book_id = request.query_params.get('book', None)", "datalist = request.data print(datalist) for data in datalist: Content.objects.filter(pk=data['content_id']).update(video=data['file_path_from_database']) context", "{\"success\": True, \"message\": \"update successfull\"} return Response(context, status=status.HTTP_200_OK) except Exception", "import json import pandas as pd from evolve import settings", "= ContentStatusSerializerFileFormat(queryset, many=True) context = {\"success\": True, \"message\": \"OtherContent Approved", "queryset = Book.objects.all() serializer_class = BookListSerializer def get(self, request): try:", "'pending_contents', 'hard_spots']) exists = os.path.isfile('{}_contentstatus.csv'.format(book_name)) path = settings.MEDIA_ROOT + '/files/'", "django.contrib.auth.decorators import permission_required from rest_framework.parsers import MultiPartParser from apps.dataupload.models import", "= [] for i in range(len(serializer.data)): if serializer.data[i] not in", "rest_framework.permissions import IsAuthenticated from rest_framework.decorators import permission_classes from apps.configuration.models import", "SubSubSectionKeywordsSerializer, ContentStatusSerializerFileFormat, ) from django.utils.decorators import method_decorator from django.contrib.auth.decorators import", "exist.'} return Response(context, status=status.HTTP_404_NOT_FOUND) serializer = ContentListSerializer(content_list, data=request.data, context={\"user\":request.user}, partial=True)", "serializer = KeywordSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Content", "is not None: queryset=ChapterKeyword.objects.filter(chapter__id = chapter_id) serializer = ChapterKeywordsSerializer(queryset, many=True)", "if chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=True) elif section_id is", "sas_url = block_blob_service.generate_container_shared_access_signature( CONTAINER_NAME, ContainerPermissions.WRITE, datetime.utcnow() + timedelta(hours=1), ) base_url=account_name+\".blob.core.windows.net/\"+CONTAINER_NAME", "True, \"message\": \"Content Approved List\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK)", "data_frame.to_csv(path + 'ApprovedContent.csv', encoding=\"utf-8-sig\", index=False) context = {\"success\": True, \"message\":", "context = {'success': \"false\", 'message': 'Failed to get Chapter list.'}", "for i in range(len(serializer.data)): if serializer.data[i] not in serializer.data[i +", "return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def index(self): final_list,final = [],[] queryset =", "+ 'ApprovedContent.csv', encoding=\"utf-8-sig\", index=False) context = {\"success\": True, \"message\": \"Activity", "chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False).exclude(approved_by=None) elif section_id is not", "status=status.HTTP_400_BAD_REQUEST) except Exception as error: context = {'success': \"false\", 'message':", "datalist: print(data) Content.objects.filter(pk=data['content_id']).update(video=data['video']) context = {\"success\": True, \"message\": \"update successfull\"}", "datalist: Content.objects.filter(pk=data['content_id']).update(video=data['file_path_from_database']) context = {\"success\": True, \"message\": \"update successfull\"} return", "= settings.MEDIA_ROOT + '/files/' data_frame = pd.DataFrame(final_list , columns=['id','url']) data_frame.to_csv(path+", "Textbook Unit', 'total', 'approved_contents', 'rejected_contents', 'pending_contents', 'hard_spots']) exists = os.path.isfile('{}_contentstatus.csv'.format(book_name))", "class ContentStatusList(ListCreateAPIView): queryset = Content.objects.all() serializer_class = ContentListSerializer def get(self,", "index=False) context = {\"success\": True, \"message\": \"Activity List\",\"data\": 'media/files/content_contributers.csv'} return", "not None: queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=True) else: queryset =", "sub_section_id is not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False).exclude(approved_by=None) elif sub_sub_section_id", "shutil import copyfile book = request.query_params.get('book', None) chapters=Chapter.objects.filter(book_id=book).order_by('id') serializer =", "+ '/files/' if exists: os.remove('{}_contentstatus.csv'.format(book_name)) # data_frame.to_excel(path + 'contentstatus.xlsx') data_frame.to_csv(path", "None) sub_section_id = request.query_params.get('sub_section', None) sub_sub_section_id = request.query_params.get('sub_sub_section',None) if chapter_id", "None: if str(queryset.email) == \"\" and request.data['email'] is not None:", "get(self, request): try: subject = request.query_params.get('subject', None) if subject is", "many=True) context = {\"success\": True, \"message\": \"Conetent List\",\"data\": serializer.data} return", "state_id) ).distinct() else: queryset = self.get_queryset() serializer = ContentContributorsSerializer(queryset, many=True)", "'/files/' if exists: os.remove('ApprovedContent.csv') data_frame.to_csv(path + 'ApprovedContent.csv', encoding=\"utf-8-sig\", index=False) context", "get content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def put(self, request, pk,", "context = {'success': \"false\", 'message': 'Failed to get Activity list.'}", "List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context", "request.query_params.get('subject', None) if subject is not None: queryset=self.get_queryset().filter(subject__id=subject, content_only=True) else:", "for data in serializer.data: for d in data['chapter']: final_list.append(d) data_frame", "Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentStatusDownloadView(RetrieveUpdateAPIView): queryset = HardSpot.objects.all()", "= Content.objects.all() serializer_class = ContentListSerializer def get(self, request): try: queryset", "ApprovedContentSerializer, ContentStatusSerializer, HardSpotCreateSerializer, ContentContributorsSerializer, SubSubSectionKeywordsSerializer, ContentStatusSerializerFileFormat, ) from django.utils.decorators import", "chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=True) elif section_id is not", "sub_sub_section_id) serializer = SubSubSectionKeywordsSerializer(queryset, many=True) else: queryset = self.get_queryset() serializer", "Content.objects.filter(pk=data['content_id']).update(video=data['video']) context = {\"success\": True, \"message\": \"update successfull\"} return Response(context,", "settings.AZURE_ACCOUNT_NAME account_key = settings.AZURE_ACCOUNT_KEY CONTAINER_NAME= settings.AZURE_CONTAINER block_blob_service = BlockBlobService(account_name=account_name, account_key=account_key)", "queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False, approved_by=None) elif sub_sub_section_id is not None:", "section_id = request.query_params.get('section', None) sub_section_id = request.query_params.get('sub_section', None) sub_sub_section_id =", "from django.db.models import Q import threading account_name = settings.AZURE_ACCOUNT_NAME account_key", "ContentContributorsSerializer, SubSubSectionKeywordsSerializer, ContentStatusSerializerFileFormat, ) from django.utils.decorators import method_decorator from django.contrib.auth.decorators", "return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception as error: context = {'success':", "import (Chapter, Section, SubSection, ChapterKeyword, SectionKeyword, SubSectionKeyword, SubSubSectionKeyword, ) import", "elif sub_sub_section_id is not None: queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=True)", "i in queryset: try: if i.video is not None :", "to get Content Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentPendingList(ListAPIView):", "= {'success': \"false\", 'message': 'Failed To Update content Details.'} return", "Content.objects.all() def get(self, request): try: chapter_id = request.query_params.get('chapter', None) section_id", "range(len(serializer.data)): if serializer.data[i] not in serializer.data[i + 1:]: res_list.append(serializer.data[i]) for", "context = {\"success\": True, \"message\": \"Successful\", \"data\": serializer.data} return Response(context,", "Content Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentPendingList(ListAPIView): queryset =", "sub_sub_section_id,approved=True) else: queryset = self.get_queryset().filter(approved=True) serializer = KeywordSerializer(queryset, many=True) context", "queryset = Content.objects.all() serializer_class = ContentStatusSerializer def post(self, request): try:", "{'success': \"false\", 'message': 'Failed to get Chapter list.'} return Response(context,", "apps.configuration.models import Book from apps.hardspot.models import HardSpot from .models import", "'rejected_contents', 'pending_contents', 'hard_spots']) exists = os.path.isfile('{}_contentstatus.csv'.format(book_name)) path = settings.MEDIA_ROOT +", "path = settings.MEDIA_ROOT + '/files/' if exists: os.remove('ApprovedContent.csv') data_frame.to_csv(path +", "= KeywordSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Content Rejected", "list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class GetSASView(ListAPIView): def get(self,request): try: sas_url", "if chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False).exclude(approved_by=None) elif section_id is", "Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentListUrlPutRequest(RetrieveUpdateAPIView): queryset = Content.objects.all()", "approved_by=None) elif sub_sub_section_id is not None: queryset = self.get_queryset().filter(sub_sub_section__id =", "import os import itertools from django.db.models import Q import threading", "= settings.MEDIA_ROOT + '/files/' if exists: os.remove('ApprovedContent.csv') data_frame.to_csv(path + 'ApprovedContent.csv',", "content Details.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BookNestedList(ListAPIView): queryset = Book.objects.all()", "2 Textbook Unit', 'Level 3 Textbook Unit','Level 4 Textbook Unit',", "'Grade', 'Subject', 'Textbook Name', 'Level 1 Textbook Unit', 'Level 2", "None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False).exclude(approved_by=None) elif sub_sub_section_id is not None:", "many=True) context = {\"success\": True, \"message\": \"Content Approved List\", \"data\":", "is not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=True) elif sub_sub_section_id is", "SubSectionKeywordSerializer, SectionKeywordsSerializer, ChapterKeywordsSerializer, SubSectionKeywordsSerializer, KeywordSerializer, ContentContributorSerializer, ApprovedContentSerializer, ContentStatusSerializer, HardSpotCreateSerializer, ContentContributorsSerializer,", "Data to create content\"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception as", "KeywordSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Content List\",\"data\": serializer.data}", "final_list = [] import os from shutil import copyfile book_id", "{'success': \"false\", 'message': 'Failed to get Content Status list.'} return", "+ 'content_contributers.csv', encoding=\"utf-8-sig\", index=False) context = {\"success\": True, \"message\": \"Activity", "GetSASView(ListAPIView): def get(self,request): try: sas_url = block_blob_service.generate_container_shared_access_signature( CONTAINER_NAME, ContainerPermissions.WRITE, datetime.utcnow()", "elif request.query_params.get('section', None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('section', None)) elif request.query_params.get('section',", "serializer_class = ContentListSerializer def get(self, request): try: chapter_id = request.query_params.get('chapter',", "\"false\", 'message': 'Failed To Update content Details.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "import settings from azure.storage.blob import ( BlockBlobService, ContainerPermissions ) from", "Content Pending list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentStatusList(ListCreateAPIView): queryset =", "None: queryset = SubSubSectionKeyword.objects.filter(sub_sub_section__id = sub_sub_section_id) serializer = SubSubSectionKeywordsSerializer(queryset, many=True)", "= SectionKeyword.objects.filter(section__id = section_id) serializer = SectionKeywordsSerializer(queryset, many=True) elif sub_section_id", "status=status.HTTP_200_OK) else: serializer = ContentContributorSerializer(data=request.data) if serializer.is_valid(): serializer.save() context =", "= ApprovedContentSerializer(chapters, many=True) for data in serializer.data: for d in", ": final=[i.id,i.video] final_list.append(final) except Exception as e: pass path =", "Keywords(ListAPIView): queryset = Content.objects.all() def get(self, request): try: chapter_id =", "= {'success': \"false\", 'message': 'Failed to get OtherContent Approved list.'}", "= request.data print(datalist) for data in datalist: Content.objects.filter(pk=data['content_id']).update(video=data['file_path_from_database']) context =", "MultiPartParser from apps.dataupload.models import (Chapter, Section, SubSection, ChapterKeyword, SectionKeyword, SubSectionKeyword,", "get Content Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentPendingList(ListAPIView): queryset", "if state_id is not None: queryset = Content.objects.filter(Q(sub_sub_section__subsection__section__chapter__book__subject__grade__medium__state__id=state_id) | Q(sub_section__section__chapter__book__subject__grade__medium__state__id", "ContentContributorsDownloadView(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class = HardSpotCreateSerializer def get(self, request):", "SectionKeywordSerializer, SubSectionKeywordSerializer, SectionKeywordsSerializer, ChapterKeywordsSerializer, SubSectionKeywordsSerializer, KeywordSerializer, ContentContributorSerializer, ApprovedContentSerializer, ContentStatusSerializer, HardSpotCreateSerializer,", "os.remove('ApprovedContent.csv') data_frame.to_csv(path + 'ApprovedContent.csv', encoding=\"utf-8-sig\", index=False) context = {\"success\": True,", "timedelta(hours=1), ) base_url=account_name+\".blob.core.windows.net/\"+CONTAINER_NAME context = {\"success\": True, \"message\": \"url link\",", "create Pesonal details\"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception as error:", "= self.get_queryset().filter(section__id=section_id, approved=False).exclude(approved_by=None) elif sub_section_id is not None: queryset =", "request.query_params.get('sub_sub_section', None) if chapter_id is not None: queryset=ChapterKeyword.objects.filter(chapter__id = chapter_id)", "request.query_params.get('section', None) sub_section_id = request.query_params.get('sub_section', None) sub_sub_section_id = request.query_params.get('sub_sub_section', None)", "not None: queryset = SubSectionKeyword.objects.filter(sub_section__id = sub_section_id) serializer = SubSectionKeywordsSerializer(queryset,", "if subject is not None: queryset=self.get_queryset().filter(subject__id=subject) else: queryset = self.get_queryset()", "= sub_sub_section_id,approved=True) else: queryset = self.get_queryset().filter(approved=True) serializer = KeywordSerializer(queryset, many=True)", "to get Content Pending list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentStatusList(ListCreateAPIView):", "ContentStatusDownloadView(RetrieveUpdateAPIView): queryset = HardSpot.objects.all() serializer_class = HardSpotCreateSerializer def get(self, request):", "= request.data print(datalist) for data in datalist: print(data) Content.objects.filter(pk=data['content_id']).update(video=data['video']) context", ") from datetime import datetime, timedelta import os import itertools", "as error: context = {'success': \"false\", 'message': 'Failed To Update", "not None: queryset = SectionKeyword.objects.filter(section__id = section_id) serializer = SectionKeywordsSerializer(queryset,", "Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ContentContributorsDownloadView(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class =", "{'success': \"false\", 'message': 'Failed To Update content Details.'} return Response(context,", "= ContentListSerializer def get(self, request): try: chapter_id = request.query_params.get('chapter', None)", "= self.get_object() serializer = ContentListSerializer(queryset, many=True) context = {\"success\": True,", "from datetime import datetime, timedelta import os import itertools from", "def get(self,request): try: t = threading.Thread(target=self.index, args=(), kwargs={}) t.setDaemon(True) t.start()", "shutil import copyfile book_id = request.query_params.get('book', None) book_name=\"\" if book_id", "Q(chapter__book__subject__grade__medium__state__id = state_id) ).distinct() else: queryset = self.get_queryset() serializer =", "context = {\"success\": True, \"message\": \"Activity List\",\"data\": 'media/files/content_contributers.csv'} return Response(context,", "class ContentApprovedList(ListAPIView): queryset = Content.objects.all() serializer_class = KeywordSerializer def get(self,", "CONTAINER_NAME= settings.AZURE_CONTAINER block_blob_service = BlockBlobService(account_name=account_name, account_key=account_key) class ContentList(ListCreateAPIView): queryset =", "= request.query_params.get('book', None) chapters=Chapter.objects.filter(book_id=book).order_by('id') serializer = ApprovedContentSerializer(chapters, many=True) for data", "status=status.HTTP_200_OK) except Exception as error: context = { 'success': \"false\",", "None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('section', None)) elif request.query_params.get('section', None) is", "many=True) elif sub_sub_section_id is not None: queryset = SubSubSectionKeyword.objects.filter(sub_sub_section__id =", "queryset =self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id , approved = False).exclude(approved_by=None) else: queryset", "data_frame = pd.DataFrame(final_list , columns=['Board', 'Medium','Grade', 'Subject', 'Textbook Name', 'Level", "None) book_name=\"\" if book_id is not None: book_name=Book.objects.get(id=book_id) chapters=Chapter.objects.filter(book__id=book_id).order_by('id') serializer", "= ContentListSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Content Status", "sub_sub_section_id = request.query_params.get('sub_sub_section', None) if chapter_id is not None: queryset=ChapterKeyword.objects.filter(chapter__id", "serializer = ContentContributorSerializer(data=request.data) if serializer.is_valid(): serializer.save() context = {\"success\": True,", "To Update content Details.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BookNestedList(ListAPIView): queryset", "for data in datalist: print(data) Content.objects.filter(pk=data['content_id']).update(video=data['video']) context = {\"success\": True,", "'message': 'Failed to get Content Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "= {\"success\": True, \"message\": \"Content Pending List\",\"data\": serializer.data} return Response(context,", "self.get_queryset() serializer = ContentStatusListSerializer(queryset, many=True) context = {\"success\": True, \"message\":", "Personal Details.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ApprovedContentDownloadView(ListAPIView): queryset =", "data in datalist: Content.objects.filter(pk=data['content_id']).update(video=data['file_path_from_database']) context = {\"success\": True, \"message\": \"update", "queryset = self.get_queryset().filter(section__id=section_id, approved=True) elif sub_section_id is not None: queryset", "= os.path.isfile('content_contributers.csv') path = settings.MEDIA_ROOT + '/files/' if exists: os.remove('content_contributers.csv')", "status=status.HTTP_200_OK) context = {\"success\": False, \"message\": \"Updation Failed\"} return Response(context,", "get Content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentContributorCreateView(ListCreateAPIView): queryset =", "= {\"success\": True, \"message\": \"OtherContent Approved List\", \"data\": serializer.data} return", ").distinct() else: queryset = self.get_queryset() serializer = ContentContributorsSerializer(queryset, many=True) res_list", "Book.objects.all() def get(self, request): try: final_list = [] import os", "Content.objects.filter(pk=data['content_id']).update(video=data['file_path_from_database']) context = {\"success\": True, \"message\": \"update successfull\"} return Response(context,", "= settings.AZURE_ACCOUNT_NAME account_key = settings.AZURE_ACCOUNT_KEY CONTAINER_NAME= settings.AZURE_CONTAINER block_blob_service = BlockBlobService(account_name=account_name,", "return Response(context, status=status.HTTP_200_OK) context = {\"success\": False, \"message\": \"Invalid Input", "KeywordSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Content Pending List\",\"data\":", "True, \"message\": \"Chapter List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception", "{\"success\": True, \"message\": \"Content Status List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK)", "is not None: queryset = Content.objects.filter(Q(sub_sub_section__subsection__section__chapter__book__subject__grade__medium__state__id=state_id) | Q(sub_section__section__chapter__book__subject__grade__medium__state__id = state_id)", "\"Content Status List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as", "{\"success\": True, \"message\": \"Activity List\", \"data\": 'media/files/BackupContent.csv'} return Response(context, status=status.HTTP_200_OK)", "request): try: queryset = ContentContributors.objects.filter(first_name__iexact=request.data['first_name'].strip(),last_name__iexact=request.data['last_name'].strip(), mobile=request.data['mobile'].strip()).first() if queryset is not", "list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentApprovedList(ListAPIView): queryset = Content.objects.all() serializer_class", "data_frame.to_csv(path + 'content_contributers.csv', encoding=\"utf-8-sig\", index=False) context = {\"success\": True, \"message\":", "None: queryset=ChapterKeyword.objects.filter(chapter__id = chapter_id) serializer = ChapterKeywordsSerializer(queryset, many=True) elif section_id", "approved_by=None) elif section_id is not None: queryset = self.get_queryset().filter(section__id=section_id, approved=False,", "columns=['Board', 'Medium', 'Grade', 'Subject', 'Textbook Name', 'Level 1 Textbook Unit',", "ContentStatusSerializer, HardSpotCreateSerializer, ContentContributorsSerializer, SubSubSectionKeywordsSerializer, ContentStatusSerializerFileFormat, ) from django.utils.decorators import method_decorator", "Successful\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) context = {\"success\": False,", "= {'success': \"false\", 'message': 'Failed to get Content Approved list.'}", "final_list.append(d) data_frame = pd.DataFrame(final_list , columns=['Board', 'Medium','Grade', 'Subject', 'Textbook Name',", "serializer_class = ContentListSerializer def get(self, request): try: if request.query_params.get('chapter', None)", "'message': 'Failed to get OtherContent Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "BlockBlobService(account_name=account_name, account_key=account_key) class ContentList(ListCreateAPIView): queryset = Content.objects.all() serializer_class = KeywordSerializer", "class BackupContent(ListAPIView): queryset = Book.objects.all() def get(self,request): try: t =", "queryset=self.get_queryset().filter(chapter_id=request.query_params.get('sub_section', None)) else: queryset = self.get_queryset() serializer = ContentListSerializer(queryset, many=True)", "request.query_params.get('state', None) if state_id is not None: queryset = Content.objects.filter(Q(sub_sub_section__subsection__section__chapter__book__subject__grade__medium__state__id=state_id)", "None)) elif request.query_params.get('section', None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('section', None)) elif", "blobService = BlockBlobService(account_name=accountName, account_key=accountKey) sas_token = blobService.generate_container_shared_access_signature(containerName,ContainerPermissions.READ, datetime.utcnow() + timedelta(hours=10))", "elif sub_sub_section_id is not None: queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=False,approved_by=None)", "status=status.HTTP_500_INTERNAL_SERVER_ERROR) class Keywords(ListAPIView): queryset = Content.objects.all() def get(self, request): try:", "elif section_id is not None: queryset = self.get_queryset().filter(section__id=section_id, approved=False, approved_by=None)", "None: queryset=self.get_queryset().filter(subject__id=subject, content_only=True) else: queryset = self.get_queryset().filter(content_only=True) serializer = BookNestedSerializer(queryset,", "SubSectionKeywordsSerializer(queryset, many=True) elif sub_sub_section_id is not None: queryset = SubSubSectionKeyword.objects.filter(sub_sub_section__id", "= Book.objects.all() def get(self, request): try: final_list = [] import", "if book_id is not None: book_name=Book.objects.get(id=book_id) chapters=Chapter.objects.filter(book__id=book_id).order_by('id') serializer = ContentStatusSerializer(chapters,", "'Failed to get Content Status list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class", "content_only=True) else: queryset = self.get_queryset().filter(content_only=True) serializer = BookNestedSerializer(queryset, many=True) context", "= state_id) ).distinct() else: queryset = self.get_queryset() serializer = ContentContributorsSerializer(queryset,", "context = {\"success\": True, \"message\": \"Content Approved List\", \"data\": serializer.data}", "3 Textbook Unit','Level 4 Textbook Unit', 'Keywords',]+(list(itertools.chain.from_iterable(itertools.repeat(repeat_list, 5))))) exists =", "'/files/' data_frame = pd.DataFrame(final_list , columns=['id','url']) data_frame.to_csv(path+ 'BackupContent.csv', encoding=\"utf-8-sig\", index=False)", "True, \"message\": \"Created Successful\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) context", "class ContentContributorCreateView(ListCreateAPIView): queryset = ContentContributors.objects.all() serializer_class = ContentContributorSerializer def post(self,", "is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('section', None)) elif request.query_params.get('section', None) is not", "request): try: queryset = self.get_object() serializer = ContentListSerializer(queryset, many=True) context", "= self.get_queryset() serializer = ContentContributorsSerializer(queryset, many=True) res_list = [] for", "= {\"success\": True, \"message\": \"Conetent List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK)", "Name','Content Link/Video Link','Content Rating (By Reviewer)','Comment (By Reviewer)', 'linked_keywords'] data_frame", "status=status.HTTP_500_INTERNAL_SERVER_ERROR) class GetSasDownloadView(ListAPIView): def get(self,request): from evolve import settings accountName", "'Failed To Update content Details.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BookNestedList(ListAPIView):", "context = {\"success\": True, \"token\":sas_token} return Response(context, status=status.HTTP_200_OK) except: return", "os from shutil import copyfile book = request.query_params.get('book', None) chapters=Chapter.objects.filter(book_id=book).order_by('id')", "ListAPIView, RetrieveUpdateAPIView,) from rest_framework.response import Response from rest_framework.permissions import IsAuthenticated", "for d in data['chapter']: final_list.append(d) repeat_list=['Content Name','Content Link/Video Link','Content Rating", "\"false\", 'message': 'Failed to get Content Pending list.'} return Response(context,", "[],[] queryset = Content.objects.filter(approved=True) for i in queryset: try: if", "'message': 'Failed to create content.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class", "request.query_params.get('section', None) sub_section_id = request.query_params.get('sub_section', None) sub_sub_section_id = request.query_params.get('sub_sub_section',None) if", "queryset = Content.objects.all() serializer_class = HardSpotCreateSerializer def get(self, request): try:", "ContainerPermissions ) from datetime import datetime, timedelta import os import", "return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class GetSASView(ListAPIView): def get(self,request): try: sas_url =", "import Q import threading account_name = settings.AZURE_ACCOUNT_NAME account_key = settings.AZURE_ACCOUNT_KEY", "self.get_queryset().filter(sub_section__id=sub_section_id, approved=False, approved_by=None) elif sub_sub_section_id is not None: queryset =", "django.db.models import Q import threading account_name = settings.AZURE_ACCOUNT_NAME account_key =", "= {\"success\": True, \"message\": \"Content Status List\",\"data\": serializer.data} return Response(context,", "'Textbook Name', 'Level 1 Textbook Unit', 'Level 2 Textbook Unit',", "ContentStatusSerializerFileFormat(queryset, many=True) context = {\"success\": True, \"message\": \"OtherContent Approved List\",", "= KeywordSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Content Approved", ".serializers import ( ContentListSerializer, BookNestedSerializer, BookListSerializer, ContentStatusListSerializer, SectionKeywordSerializer, SubSectionKeywordSerializer, SectionKeywordsSerializer,", "not None: queryset = SubSubSectionKeyword.objects.filter(sub_sub_section__id = sub_sub_section_id) serializer = SubSubSectionKeywordsSerializer(queryset,", "if subject is not None: queryset=self.get_queryset().filter(subject__id=subject, content_only=True) else: queryset =", "False, \"message\": \"Invalid Input Data to create Pesonal details\"} return", "List\", \"data\": 'media/files/BackupContent.csv'} return Response(context, status=status.HTTP_200_OK) except Exception as error:", "serializer.data: for d in data['chapter']: final_list.append(d) repeat_list=['Content Name','Content Link/Video Link','Content", "encoding=\"utf-8-sig\", index=False) context = {\"success\": True, \"message\": \"Activity List\", \"data\":", "= Content.objects.all() serializer_class = HardSpotCreateSerializer def get(self, request): try: final_list", "= BlockBlobService(account_name=account_name, account_key=account_key) class ContentList(ListCreateAPIView): queryset = Content.objects.all() serializer_class =", "Response(context, status=status.HTTP_404_NOT_FOUND) serializer = ContentListSerializer(content_list, data=request.data, context={\"user\":request.user}, partial=True) if serializer.is_valid():", "| Q(chapter__book__subject__grade__medium__state__id = state_id) ).distinct() else: queryset = self.get_queryset() serializer", "Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BookNestedList(ListAPIView): queryset = Book.objects.all() serializer_class = BookNestedSerializer", "chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False, approved_by=None) elif section_id is", "= {\"success\": True, \"message\": \"update successfull\"} return Response(context, status=status.HTTP_200_OK) except", ", approved = False).exclude(approved_by=None) else: queryset = self.get_queryset().filter(approved=False).exclude(approved_by=None) serializer =", "'total', 'approved_contents', 'rejected_contents', 'pending_contents', 'hard_spots']) exists = os.path.isfile('{}_contentstatus.csv'.format(book_name)) path =", "= [] import os from shutil import copyfile book_id =", "else: queryset = self.get_queryset().filter(approved=True) serializer = KeywordSerializer(queryset, many=True) context =", "not None: if str(queryset.email) == \"\" and request.data['email'] is not", "error: context = {'success': \"false\", 'message': 'Failed to create content.'}", "= Content.objects.filter(Q(sub_sub_section__subsection__section__chapter__book__subject__grade__medium__state__id=state_id) | Q(sub_section__section__chapter__book__subject__grade__medium__state__id = state_id) | Q(section__chapter__book__subject__grade__medium__state__id= state_id) |", "'message': 'Failed to get Content Pending list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "Activity list.' ,\"error\" :str(error)} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def index(self): final_list,final", "Input Data to create Pesonal details\"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except", "\"message\": \"Updation Successful\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) context = {\"success\":", "OtherContent Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentListUrlPutRequest(RetrieveUpdateAPIView): queryset =", "request,format=None): try: serializer = ContentListSerializer(data=request.data) if serializer.is_valid(): serializer.save() context =", "\"message\": \"Invalid Input Data to create content\"} return Response(context, status=status.HTTP_400_BAD_REQUEST)", "chapter_id = request.query_params.get('chapter', None) section_id = request.query_params.get('section', None) sub_section_id =", "\"false\", 'message': 'Failed to get Content Status list.'} return Response(context,", "ApprovedContentSerializer(chapters, many=True) for data in serializer.data: for d in data['chapter']:", "self.get_object() serializer = ContentListSerializer(queryset, many=True) context = {\"success\": True, \"message\":", "Response(context, status=status.HTTP_200_OK) except Exception as error: context = { 'success':", "for i in queryset: try: if i.video is not None", "serializer = ContentListSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Content", "import Book from apps.hardspot.models import HardSpot from .models import Content,ContentContributors", "request.query_params.get('book', None) chapters=Chapter.objects.filter(book_id=book).order_by('id') serializer = ApprovedContentSerializer(chapters, many=True) for data in", "serializer_class = BookListSerializer def get(self, request): try: subject = request.query_params.get('subject',", "True, \"message\": \"OtherContent Approved List\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK)", "as error: context = {'success': \"false\", 'message': 'content Id does", "approved_by=None) serializer = KeywordSerializer(queryset, many=True) context = {\"success\": True, \"message\":", "Exception as e: pass path = settings.MEDIA_ROOT + '/files/' data_frame", "'message': 'Failed to get Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class", "KeywordSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Content Rejected List\",\"data\":", "\"data\": 'media/files/ApprovedContent.csv'} return Response(context, status=status.HTTP_200_OK) except Exception as error: context", "serializer.data} return Response(context, status=status.HTTP_200_OK) context = {\"success\": False, \"message\": \"Updation", "get OtherContent Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BackupContent(ListAPIView): queryset", "context = {'success': \"false\", 'message': 'Failed to Personal Details.'} return", "import datetime, timedelta import os import itertools from django.db.models import", "\"Invalid Input Data to create Pesonal details\"} return Response(context, status=status.HTTP_400_BAD_REQUEST)", "self.get_queryset().filter(content_only=True) serializer = BookNestedSerializer(queryset, many=True) context = {\"success\": True, \"message\":", "= self.get_queryset().filter(approved=False).exclude(approved_by=None) serializer = KeywordSerializer(queryset, many=True) context = {\"success\": True,", "Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': \"false\",", "class ContentListUrlUpdate(ListAPIView): queryset = Content.objects.all() serializer_class = ContentStatusSerializer def get(self,", ") base_url=account_name+\".blob.core.windows.net/\"+CONTAINER_NAME context = {\"success\": True, \"message\": \"url link\", \"token\":sas_url,\"base_url\":base_url}", "if chapter_id is not None: queryset=ChapterKeyword.objects.filter(chapter__id = chapter_id) serializer =", "Pending list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentStatusList(ListCreateAPIView): queryset = Content.objects.all()", "'Level 3 Textbook Unit','Level 4 Textbook Unit', 'total', 'approved_contents', 'rejected_contents',", "status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BackupContent(ListAPIView): queryset = Book.objects.all() def get(self,request): try: t", "{\"success\": True, \"message\": \"OtherContent Approved List\", \"data\": serializer.data} return Response(context,", "= settings.AZURE_ACCOUNT_KEY containerName= settings.AZURE_CONTAINER try: blobService = BlockBlobService(account_name=accountName, account_key=accountKey) sas_token", "os.remove('content_contributers.csv') # data_frame.to_excel(path + 'content_contributers.xlsx') data_frame.to_csv(path + 'content_contributers.csv', encoding=\"utf-8-sig\", index=False)", "elif section_id is not None: queryset = SectionKeyword.objects.filter(section__id = section_id)", "= sub_sub_section_id , approved = False).exclude(approved_by=None) else: queryset = self.get_queryset().filter(approved=False).exclude(approved_by=None)", "many=True) context = {\"success\": True, \"message\": \"Content Status List\",\"data\": serializer.data}", "ContentListSerializer(content_list, data=request.data, context={\"user\":request.user}, partial=True) if serializer.is_valid(): serializer.save() context = {\"success\":", "import method_decorator from django.contrib.auth.decorators import permission_required from rest_framework.parsers import MultiPartParser", "= [] import os from shutil import copyfile book =", "= SubSubSectionKeyword.objects.filter(sub_sub_section__id = sub_sub_section_id) serializer = SubSubSectionKeywordsSerializer(queryset, many=True) else: queryset", "KeywordSerializer def get(self, request): try: chapter_id = request.query_params.get('chapter', None) section_id", "from shutil import copyfile state_id = request.query_params.get('state', None) if state_id", "List\",\"data\": 'media/files/content_contributers.csv'} return Response(context, status=status.HTTP_200_OK) except Exception as error: context", "approved=False).exclude(approved_by=None) elif sub_sub_section_id is not None: queryset =self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id", "link\", \"token\":sas_url,\"base_url\":base_url} return Response(context, status=status.HTTP_200_OK) except Exception as error: context", "queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=True) elif sub_sub_section_id is not None: queryset", "for data in res_list: for d in res_list: final_list.append(d) data_frame", "None: queryset = Content.objects.filter(Q(sub_sub_section__subsection__section__chapter__book__subject__grade__medium__state__id=state_id) | Q(sub_section__section__chapter__book__subject__grade__medium__state__id = state_id) | Q(section__chapter__book__subject__grade__medium__state__id=", "= request.query_params.get('subject', None) if subject is not None: queryset=self.get_queryset().filter(subject__id=subject, content_only=True)", "Q(section__chapter__book__subject__grade__medium__state__id= state_id) | Q(chapter__book__subject__grade__medium__state__id = state_id) ).distinct() else: queryset =", "'message': 'Failed to get Conetent list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class", "get(self, request): try: queryset = self.get_object() serializer = ContentListSerializer(queryset, many=True)", "return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ContentContributorsDownloadView(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class", "post(self, request): try: datalist = request.data print(datalist) for data in", "to get Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ContentContributorsDownloadView(RetrieveUpdateAPIView):", "\"Successful\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) else: serializer = ContentContributorSerializer(data=request.data)", "in serializer.data[i + 1:]: res_list.append(serializer.data[i]) for data in res_list: for", "put(self, request, pk, format=None): try: try: content_list = self.get_object() except", "= {'success': \"false\", 'message': 'Failed to get content list.'} return", "chapter_id) serializer = ChapterKeywordsSerializer(queryset, many=True) elif section_id is not None:", "self.get_queryset().filter(approved=True) serializer = ContentStatusSerializerFileFormat(queryset, many=True) context = {\"success\": True, \"message\":", "{'success': \"false\", 'message': 'Failed to get Content Approved list.'} return", "ContentListUrlPutRequestRevert(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class = ContentStatusSerializer def post(self, request):", "serializer = SubSectionKeywordsSerializer(queryset, many=True) elif sub_sub_section_id is not None: queryset", "try: subject = request.query_params.get('subject', None) if subject is not None:", "ContentStatusSerializer def post(self, request): try: datalist = request.data print(datalist) for", "if exists: os.remove('content_contributers.csv') # data_frame.to_excel(path + 'content_contributers.xlsx') data_frame.to_csv(path + 'content_contributers.csv',", "in data['chapter']: final_list.append(d) repeat_list=['Content Name','Content Link/Video Link','Content Rating (By Reviewer)','Comment", "to get Content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BookListView(ListAPIView): queryset", "None) if subject is not None: queryset=self.get_queryset().filter(subject__id=subject, content_only=True) else: queryset", "= request.query_params.get('sub_sub_section',None) if chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=True) elif", "queryset = self.get_queryset().filter(section__id=section_id, approved=False).exclude(approved_by=None) elif sub_section_id is not None: queryset", "'Failed to Personal Details.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ApprovedContentDownloadView(ListAPIView):", "Response(context, status=status.HTTP_200_OK) context = {\"success\": False, \"message\": \"Updation Failed\"} return", "partial=True) if serializer.is_valid(): serializer.save() context = {\"success\": True, \"message\": \"Updation", "{\"success\": False, \"message\": \"Updation Failed\"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception", "Content.objects.filter(approved=True) for i in queryset: try: if i.video is not", "\"message\": \"Successful\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) else: serializer =", "else: queryset = self.get_queryset() serializer = ContentListSerializer(queryset, many=True) context =", "return Response(context, status=status.HTTP_200_OK) context = {\"success\": False, \"message\": \"Updation Failed\"}", "Name', 'Level 1 Textbook Unit', 'Level 2 Textbook Unit', 'Level", "HardSpotCreateSerializer, ContentContributorsSerializer, SubSubSectionKeywordsSerializer, ContentStatusSerializerFileFormat, ) from django.utils.decorators import method_decorator from", "else: queryset = self.get_queryset() serializer = KeywordSerializer(queryset, many=True) context =", "List\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error:", "return Response(context, status=status.HTTP_404_NOT_FOUND) serializer = ContentListSerializer(content_list, data=request.data, context={\"user\":request.user}, partial=True) if", "serializer_class = ContentStatusSerializer def post(self, request): try: datalist = request.data", "= ContentStatusListSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Chapter List\",\"data\":", "'media/files/{}_contentstatus.csv'.format(book_name)} return Response(context, status=status.HTTP_200_OK) except Exception as error: context =", "not None: queryset = Content.objects.filter(Q(sub_sub_section__subsection__section__chapter__book__subject__grade__medium__state__id=state_id) | Q(sub_section__section__chapter__book__subject__grade__medium__state__id = state_id) |", "IsAuthenticated from rest_framework.decorators import permission_classes from apps.configuration.models import Book from", "azure.storage.blob import ( BlockBlobService, ContainerPermissions ) from datetime import datetime,", "\"false\", 'message': 'Failed to get Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def post(self, request): try: queryset = ContentContributors.objects.filter(first_name__iexact=request.data['first_name'].strip(),last_name__iexact=request.data['last_name'].strip(), mobile=request.data['mobile'].strip()).first() if queryset", "= [] import os from shutil import copyfile state_id =", "{'success': \"false\", 'message': 'Failed to get Content Pending list.'} return", "serializer = ContentContributorsSerializer(queryset, many=True) res_list = [] for i in", "None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False, approved_by=None) elif sub_sub_section_id is not", "pd from evolve import settings from evolve import settings from", ") from django.utils.decorators import method_decorator from django.contrib.auth.decorators import permission_required from", "list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentStatusList(ListCreateAPIView): queryset = Content.objects.all() serializer_class", "import copyfile book_id = request.query_params.get('book', None) book_name=\"\" if book_id is", "5))))) exists = os.path.isfile('ApprovedContent.csv') path = settings.MEDIA_ROOT + '/files/' if", "return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentPendingList(ListAPIView): queryset = Content.objects.all() serializer_class =", "many=True) context = {\"success\": True, \"message\": \"Content Pending List\",\"data\": serializer.data}", "\"false\", 'message': 'Failed to get Chapter list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "'Level 3 Textbook Unit','Level 4 Textbook Unit', 'Keywords',]+(list(itertools.chain.from_iterable(itertools.repeat(repeat_list, 5))))) exists", "not exist.'} return Response(context, status=status.HTTP_404_NOT_FOUND) serializer = ContentListSerializer(content_list, data=request.data, context={\"user\":request.user},", "is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=True) elif section_id is not None:", "= [],[] queryset = Content.objects.filter(approved=True) for i in queryset: try:", "class ContentListUrlPutRequest(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class = ContentStatusSerializer def post(self,", "= self.get_queryset().filter(content_only=True) serializer = BookNestedSerializer(queryset, many=True) context = {\"success\": True,", "return Response(context, status=status.HTTP_200_OK) except: return None class ContentListUrlUpdate(ListAPIView): queryset =", "from shutil import copyfile book_id = request.query_params.get('book', None) book_name=\"\" if", "3 Textbook Unit','Level 4 Textbook Unit', 'total', 'approved_contents', 'rejected_contents', 'pending_contents',", "BackupContent(ListAPIView): queryset = Book.objects.all() def get(self,request): try: t = threading.Thread(target=self.index,", "SubSubSectionKeyword, ) import json import pandas as pd from evolve", "if exists: os.remove('{}_contentstatus.csv'.format(book_name)) # data_frame.to_excel(path + 'contentstatus.xlsx') data_frame.to_csv(path + str(book_name)+'_contentstatus.csv',", "section_id is not None: queryset = SectionKeyword.objects.filter(section__id = section_id) serializer", "\"message\": \"Invalid Input Data to create Pesonal details\"} return Response(context,", "data in res_list: for d in res_list: final_list.append(d) data_frame =", "None) if subject is not None: queryset=self.get_queryset().filter(subject__id=subject) else: queryset =", "'email','city_name','school_name','textbook_name']).drop_duplicates() exists = os.path.isfile('content_contributers.csv') path = settings.MEDIA_ROOT + '/files/' if", "status from rest_framework.generics import ( ListAPIView, ListCreateAPIView, ListAPIView, RetrieveUpdateAPIView,) from", "{'success': \"false\", 'message': 'Failed to Personal Details.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "request.data['email'] is not None: ContentContributors.objects.filter(id=queryset.id).update(email=request.data['email']) queryset.refresh_from_db() serializer = ContentContributorSerializer(queryset) context", "not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False).exclude(approved_by=None) elif section_id is not None: queryset", "SubSectionKeyword, SubSubSectionKeyword, ) import json import pandas as pd from", "serializer.data[i + 1:]: res_list.append(serializer.data[i]) for data in res_list: for d", "= block_blob_service.generate_container_shared_access_signature( CONTAINER_NAME, ContainerPermissions.WRITE, datetime.utcnow() + timedelta(hours=1), ) base_url=account_name+\".blob.core.windows.net/\"+CONTAINER_NAME context", "= False).exclude(approved_by=None) else: queryset = self.get_queryset().filter(approved=False).exclude(approved_by=None) serializer = KeywordSerializer(queryset, many=True)", "from rest_framework.parsers import MultiPartParser from apps.dataupload.models import (Chapter, Section, SubSection,", "sub_sub_section_id is not None: queryset = SubSubSectionKeyword.objects.filter(sub_sub_section__id = sub_sub_section_id) serializer", "queryset = self.get_queryset().filter(content_only=True) serializer = BookNestedSerializer(queryset, many=True) context = {\"success\":", "= request.query_params.get('sub_sub_section',None) if chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False).exclude(approved_by=None) elif", "index=False) context = {\"success\": True, \"message\": \"Activity List\",\"data\": 'media/files/{}_contentstatus.csv'.format(book_name)} return", "else: queryset = self.get_queryset().filter(content_only=True) serializer = BookNestedSerializer(queryset, many=True) context =", "def get(self, request): try: queryset = self.get_queryset() serializer = ContentStatusListSerializer(queryset,", "queryset = self.get_object() serializer = ContentListSerializer(queryset, many=True) context = {\"success\":", "SectionKeyword.objects.filter(section__id = section_id) serializer = SectionKeywordsSerializer(queryset, many=True) elif sub_section_id is", "{'success': \"false\", 'message': 'Failed to create content.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class GetSASView(ListAPIView): def get(self,request): try:", "Textbook Unit', 'Level 3 Textbook Unit','Level 4 Textbook Unit', 'Keywords',]+(list(itertools.chain.from_iterable(itertools.repeat(repeat_list,", "request.query_params.get('sub_sub_section',None) if chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False).exclude(approved_by=None) elif section_id", "as pd from evolve import settings from evolve import settings", "serializer_class = HardSpotCreateSerializer def get(self, request): try: final_list = []", "SubSectionKeyword.objects.filter(sub_section__id = sub_section_id) serializer = SubSectionKeywordsSerializer(queryset, many=True) elif sub_sub_section_id is", "request.query_params.get('sub_section', None) sub_sub_section_id = request.query_params.get('sub_sub_section',None) if chapter_id is not None:", "= BookListSerializer def get(self, request): try: subject = request.query_params.get('subject', None)", "list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def post(self, request,format=None): try: serializer =", "settings from azure.storage.blob import ( BlockBlobService, ContainerPermissions ) from datetime", "Book.objects.all() def get(self,request): try: t = threading.Thread(target=self.index, args=(), kwargs={}) t.setDaemon(True)", "request.query_params.get('sub_section', None) sub_sub_section_id = request.query_params.get('sub_sub_section', None) if chapter_id is not", "settings from evolve import settings from azure.storage.blob import ( BlockBlobService,", "shutil import copyfile state_id = request.query_params.get('state', None) if state_id is", "= {\"success\": True, \"message\": \"Content Rejected List\",\"data\": serializer.data} return Response(context,", "status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BookNestedList(ListAPIView): queryset = Book.objects.all() serializer_class = BookNestedSerializer def", "approved=False, approved_by=None) elif section_id is not None: queryset = self.get_queryset().filter(section__id=section_id,", "Book.objects.all() serializer_class = BookNestedSerializer def get(self, request): try: subject =", "not None: queryset = self.get_queryset().filter(section__id=section_id, approved=True) elif sub_section_id is not", "from django.shortcuts import render from rest_framework import status from rest_framework.generics", "to get Content Status list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentRejectedList(ListAPIView):", "serializer_class = ContentListSerializer def get(self, request): try: queryset = self.get_object()", "data in serializer.data: for d in data['chapter']: final_list.append(d) data_frame =", "= os.path.isfile('{}_contentstatus.csv'.format(book_name)) path = settings.MEDIA_ROOT + '/files/' if exists: os.remove('{}_contentstatus.csv'.format(book_name))", "import os from shutil import copyfile book = request.query_params.get('book', None)", "= blobService.generate_container_shared_access_signature(containerName,ContainerPermissions.READ, datetime.utcnow() + timedelta(hours=10)) context = {\"success\": True, \"token\":sas_token}", "def get(self, request): try: queryset = self.get_queryset().filter(approved=True) serializer = ContentStatusSerializerFileFormat(queryset,", "serializer = ContentContributorSerializer(queryset) context = {\"success\": True, \"message\": \"Successful\", \"data\":", "Unit', 'Level 3 Textbook Unit','Level 4 Textbook Unit', 'Keywords',]+(list(itertools.chain.from_iterable(itertools.repeat(repeat_list, 5)))))", "queryset = self.get_queryset() serializer = ContentContributorsSerializer(queryset, many=True) res_list = []", "'last_name','mobile', 'email','city_name','school_name','textbook_name']).drop_duplicates() exists = os.path.isfile('content_contributers.csv') path = settings.MEDIA_ROOT + '/files/'", "None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('section', None)) elif request.query_params.get('section', None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('sub_section',", "approved=False, approved_by=None) elif sub_sub_section_id is not None: queryset = self.get_queryset().filter(sub_sub_section__id", "queryset=ChapterKeyword.objects.filter(chapter__id = chapter_id) serializer = ChapterKeywordsSerializer(queryset, many=True) elif section_id is", "def get(self, request): try: final_list = [] import os from", "is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False).exclude(approved_by=None) elif section_id is not None:", "content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def put(self, request, pk, format=None):", "d in data['chapter']: final_list.append(d) data_frame = pd.DataFrame(final_list , columns=['Board', 'Medium','Grade',", "ContentContributors.objects.all() serializer_class = ContentContributorSerializer def post(self, request): try: queryset =", "= request.query_params.get('sub_section', None) sub_sub_section_id = request.query_params.get('sub_sub_section',None) if chapter_id is not", "\"false\", 'message': 'Failed to get content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "request.query_params.get('sub_sub_section',None) if chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False, approved_by=None) elif", "rest_framework.response import Response from rest_framework.permissions import IsAuthenticated from rest_framework.decorators import", "exists: os.remove('ApprovedContent.csv') data_frame.to_csv(path + 'ApprovedContent.csv', encoding=\"utf-8-sig\", index=False) context = {\"success\":", "= ContentStatusSerializer def get(self, request): try: queryset = self.get_queryset().filter(approved=True) serializer", "to create content\"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception as error:", "for d in res_list: final_list.append(d) data_frame = pd.DataFrame(final_list , columns=['first_name',", "( ContentListSerializer, BookNestedSerializer, BookListSerializer, ContentStatusListSerializer, SectionKeywordSerializer, SubSectionKeywordSerializer, SectionKeywordsSerializer, ChapterKeywordsSerializer, SubSectionKeywordsSerializer,", "sub_section_id is not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=True) elif sub_sub_section_id", "sub_section_id) serializer = SubSectionKeywordsSerializer(queryset, many=True) elif sub_sub_section_id is not None:", "request.data print(datalist) for data in datalist: Content.objects.filter(pk=data['content_id']).update(video=data['file_path_from_database']) context = {\"success\":", "in serializer.data: for d in data['chapter']: final_list.append(d) data_frame = pd.DataFrame(final_list", "apps.dataupload.models import (Chapter, Section, SubSection, ChapterKeyword, SectionKeyword, SubSectionKeyword, SubSubSectionKeyword, )", "queryset.refresh_from_db() serializer = ContentContributorSerializer(queryset) context = {\"success\": True, \"message\": \"Successful\",", "{ 'success': \"false\", 'message': 'Failed to get Activity list.'} return", "def index(self): final_list,final = [],[] queryset = Content.objects.filter(approved=True) for i", "self.get_object() except Exception as error: context = {'success': \"false\", 'message':", "Failed\"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception as error: context =", "subject = request.query_params.get('subject', None) if subject is not None: queryset=self.get_queryset().filter(subject__id=subject,", "= Content.objects.all() serializer_class = KeywordSerializer parser_classes = (MultiPartParser,) def get(self,", "subject = request.query_params.get('subject', None) if subject is not None: queryset=self.get_queryset().filter(subject__id=subject)", "= Content.objects.all() serializer_class = ContentListSerializer def get(self, request): try: chapter_id", "not None: queryset=self.get_queryset().filter(subject__id=subject, content_only=True) else: queryset = self.get_queryset().filter(content_only=True) serializer =", "context = {\"success\": True, \"message\": \"update successfull\"} return Response(context, status=status.HTTP_200_OK)", "queryset = self.get_queryset().filter(approved=True) serializer = KeywordSerializer(queryset, many=True) context = {\"success\":", "= {'success': \"false\", 'message': 'Failed to get Activity list.' ,\"error\"", "data['chapter']: final_list.append(d) data_frame = pd.DataFrame(final_list , columns=['Board', 'Medium','Grade', 'Subject', 'Textbook", "Unit', 'total', 'approved_contents', 'rejected_contents', 'pending_contents', 'hard_spots']) exists = os.path.isfile('{}_contentstatus.csv'.format(book_name)) path", "pd.DataFrame(final_list , columns=['first_name', 'last_name','mobile', 'email','city_name','school_name','textbook_name']).drop_duplicates() exists = os.path.isfile('content_contributers.csv') path =", "Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class GetSasDownloadView(ListAPIView): def get(self,request): from", "= {\"success\": True, \"token\":sas_token} return Response(context, status=status.HTTP_200_OK) except: return None", "request.query_params.get('subject', None) if subject is not None: queryset=self.get_queryset().filter(subject__id=subject) else: queryset", "return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class GetSasDownloadView(ListAPIView): def get(self,request): from evolve import", "= SubSectionKeyword.objects.filter(sub_section__id = sub_section_id) serializer = SubSectionKeywordsSerializer(queryset, many=True) elif sub_sub_section_id", "print(data) Content.objects.filter(pk=data['content_id']).update(video=data['video']) context = {\"success\": True, \"message\": \"update successfull\"} return", "context = {\"success\": True, \"message\": \"Content Rejected List\",\"data\": serializer.data} return", "ContentContributors.objects.filter(first_name__iexact=request.data['first_name'].strip(),last_name__iexact=request.data['last_name'].strip(), mobile=request.data['mobile'].strip()).first() if queryset is not None: if str(queryset.email) ==", "import os from shutil import copyfile state_id = request.query_params.get('state', None)", "except Exception as error: context = { 'success': \"false\", 'message':", "is not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False, approved_by=None) elif sub_sub_section_id", "serializer = ChapterKeywordsSerializer(queryset, many=True) elif section_id is not None: queryset", "\"Content Approved List\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception", "import settings from evolve import settings from azure.storage.blob import (", "BookNestedList(ListAPIView): queryset = Book.objects.all() serializer_class = BookNestedSerializer def get(self, request):", "Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentApprovedList(ListAPIView): queryset = Content.objects.all() serializer_class = KeywordSerializer", "from django.utils.decorators import method_decorator from django.contrib.auth.decorators import permission_required from rest_framework.parsers", "'message': 'Failed to get Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,))", "ContentStatusListSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Chapter List\",\"data\": serializer.data}", "sas_token = blobService.generate_container_shared_access_signature(containerName,ContainerPermissions.READ, datetime.utcnow() + timedelta(hours=10)) context = {\"success\": True,", "= settings.MEDIA_ROOT + '/files/' if exists: os.remove('{}_contentstatus.csv'.format(book_name)) # data_frame.to_excel(path +", "content_list = self.get_object() except Exception as error: context = {'success':", "= Content.objects.filter(approved=True) for i in queryset: try: if i.video is", "block_blob_service.generate_container_shared_access_signature( CONTAINER_NAME, ContainerPermissions.WRITE, datetime.utcnow() + timedelta(hours=1), ) base_url=account_name+\".blob.core.windows.net/\"+CONTAINER_NAME context =", "queryset = self.get_queryset().filter(approved=False).exclude(approved_by=None) serializer = KeywordSerializer(queryset, many=True) context = {\"success\":", "to get Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class GetSASView(ListAPIView): def", "to get content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def put(self, request,", "os from shutil import copyfile book_id = request.query_params.get('book', None) book_name=\"\"", "Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentPendingList(ListAPIView): queryset = Content.objects.all() serializer_class = KeywordSerializer", "section_id) serializer = SectionKeywordsSerializer(queryset, many=True) elif sub_section_id is not None:", "None class ContentListUrlUpdate(ListAPIView): queryset = Content.objects.all() serializer_class = ContentStatusSerializer def", "BookListSerializer def get(self, request): try: subject = request.query_params.get('subject', None) if", "settings.MEDIA_ROOT + '/files/' if exists: os.remove('content_contributers.csv') # data_frame.to_excel(path + 'content_contributers.xlsx')", "serializer = SectionKeywordsSerializer(queryset, many=True) elif sub_section_id is not None: queryset", "serializer.data[i] not in serializer.data[i + 1:]: res_list.append(serializer.data[i]) for data in", "True, \"message\": \"Activity List\",\"data\": 'media/files/{}_contentstatus.csv'.format(book_name)} return Response(context, status=status.HTTP_200_OK) except Exception", "1:]: res_list.append(serializer.data[i]) for data in res_list: for d in res_list:", "{\"success\": True, \"message\": \"Content Rejected List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK)", "import ( ContentListSerializer, BookNestedSerializer, BookListSerializer, ContentStatusListSerializer, SectionKeywordSerializer, SubSectionKeywordSerializer, SectionKeywordsSerializer, ChapterKeywordsSerializer,", "ChapterKeyword, SectionKeyword, SubSectionKeyword, SubSubSectionKeyword, ) import json import pandas as", "try: t = threading.Thread(target=self.index, args=(), kwargs={}) t.setDaemon(True) t.start() context =", "request.data print(datalist) for data in datalist: print(data) Content.objects.filter(pk=data['content_id']).update(video=data['video']) context =", "as error: context = { 'success': \"false\", 'message': 'Failed to", "Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ContentContributorsDownloadView(RetrieveUpdateAPIView): queryset =", "if i.video is not None : final=[i.id,i.video] final_list.append(final) except Exception", "context = {\"success\": True, \"message\": \"Activity List\", \"data\": 'media/files/ApprovedContent.csv'} return", "( BlockBlobService, ContainerPermissions ) from datetime import datetime, timedelta import", "ContentListSerializer(data=request.data) if serializer.is_valid(): serializer.save() context = {\"success\": True, \"message\": \"Created", "is not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False).exclude(approved_by=None) elif sub_sub_section_id is", "and request.data['email'] is not None: ContentContributors.objects.filter(id=queryset.id).update(email=request.data['email']) queryset.refresh_from_db() serializer = ContentContributorSerializer(queryset)", "pass path = settings.MEDIA_ROOT + '/files/' data_frame = pd.DataFrame(final_list ,", "List\",\"data\": 'media/files/{}_contentstatus.csv'.format(book_name)} return Response(context, status=status.HTTP_200_OK) except Exception as error: context", "self.get_queryset() serializer = BookListSerializer(queryset, many=True) context = {\"success\": True, \"message\":", "successfull\"} return Response(context, status=status.HTTP_200_OK) except Exception as error: context =", "(MultiPartParser,) def get(self, request): try: queryset = self.get_queryset() serializer =", "+ timedelta(hours=1), ) base_url=account_name+\".blob.core.windows.net/\"+CONTAINER_NAME context = {\"success\": True, \"message\": \"url", "except Exception as e: pass path = settings.MEDIA_ROOT + '/files/'", "+ str(book_name)+'_contentstatus.csv', encoding=\"utf-8-sig\", index=False) context = {\"success\": True, \"message\": \"Activity", "import ( ListAPIView, ListCreateAPIView, ListAPIView, RetrieveUpdateAPIView,) from rest_framework.response import Response", "= {'success': \"false\", 'message': 'Failed to get Content Status list.'}", "as error: context = {'success': \"false\", 'message': 'Failed to get", "= self.get_queryset().filter(section__id=section_id, approved=True) elif sub_section_id is not None: queryset =", "if serializer.is_valid(): serializer.save() context = {\"success\": True, \"message\": \"Created Successful\",", "return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentStatusList(ListCreateAPIView): queryset = Content.objects.all() serializer_class =", "elif sub_sub_section_id is not None: queryset =self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id ,", "\"false\", 'message': 'Failed to get Content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "section_id is not None: queryset = self.get_queryset().filter(section__id=section_id, approved=True) elif sub_section_id", "= BookListSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Content List\",\"data\":", "t.start() context = {\"success\": True, \"message\": \"Activity List\", \"data\": 'media/files/BackupContent.csv'}", "Response from rest_framework.permissions import IsAuthenticated from rest_framework.decorators import permission_classes from", "True, \"message\": \"Content List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception", "to create content.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ContentRetrieveUpdate(RetrieveUpdateAPIView): queryset", "if request.query_params.get('chapter', None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('chapter', None)) elif request.query_params.get('section',", "not in serializer.data[i + 1:]: res_list.append(serializer.data[i]) for data in res_list:", "\"Activity List\", \"data\": 'media/files/ApprovedContent.csv'} return Response(context, status=status.HTTP_200_OK) except Exception as", "Rejected List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error:", "\"Chapter List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error:", "blobService.generate_container_shared_access_signature(containerName,ContainerPermissions.READ, datetime.utcnow() + timedelta(hours=10)) context = {\"success\": True, \"token\":sas_token} return", "{'success': \"false\", 'message': 'Failed to get Activity list.' ,\"error\" :str(error)}", "content.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ContentRetrieveUpdate(RetrieveUpdateAPIView): queryset = Content.objects.all()", "book = request.query_params.get('book', None) chapters=Chapter.objects.filter(book_id=book).order_by('id') serializer = ApprovedContentSerializer(chapters, many=True) for", "return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BackupContent(ListAPIView): queryset = Book.objects.all() def get(self,request):", "serializer = ContentListSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Chapter", "to get OtherContent Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentListUrlPutRequest(RetrieveUpdateAPIView):", "Response(context, status=status.HTTP_200_OK) context = {\"success\": False, \"message\": \"Invalid Input Data", "else: serializer = ContentContributorSerializer(data=request.data) if serializer.is_valid(): serializer.save() context = {\"success\":", "path = settings.MEDIA_ROOT + '/files/' if exists: os.remove('{}_contentstatus.csv'.format(book_name)) # data_frame.to_excel(path", "status=status.HTTP_500_INTERNAL_SERVER_ERROR) def index(self): final_list,final = [],[] queryset = Content.objects.filter(approved=True) for", "= ContentContributorSerializer(data=request.data) if serializer.is_valid(): serializer.save() context = {\"success\": True, \"message\":", "\"Invalid Input Data to create content\"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except", "ListCreateAPIView, ListAPIView, RetrieveUpdateAPIView,) from rest_framework.response import Response from rest_framework.permissions import", "get(self, request): try: final_list = [] import os from shutil", "RetrieveUpdateAPIView,) from rest_framework.response import Response from rest_framework.permissions import IsAuthenticated from", "True, \"message\": \"Conetent List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception", "return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentApprovedList(ListAPIView): queryset = Content.objects.all() serializer_class =", "Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentListUrlPutRequest(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class = ContentStatusSerializer", "False, \"message\": \"Updation Failed\"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception as", "import settings accountName = settings.AZURE_ACCOUNT_NAME accountKey = settings.AZURE_ACCOUNT_KEY containerName= settings.AZURE_CONTAINER", "request.query_params.get('section', None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('sub_section', None)) else: queryset =", "settings.AZURE_ACCOUNT_NAME accountKey = settings.AZURE_ACCOUNT_KEY containerName= settings.AZURE_CONTAINER try: blobService = BlockBlobService(account_name=accountName,", "{\"success\": True, \"message\": \"Activity List\",\"data\": 'media/files/{}_contentstatus.csv'.format(book_name)} return Response(context, status=status.HTTP_200_OK) except", "+ timedelta(hours=10)) context = {\"success\": True, \"token\":sas_token} return Response(context, status=status.HTTP_200_OK)", "if serializer.is_valid(): serializer.save() context = {\"success\": True, \"message\": \"Updation Successful\",\"data\":", "Chapter list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def post(self, request,format=None): try: serializer", "threading account_name = settings.AZURE_ACCOUNT_NAME account_key = settings.AZURE_ACCOUNT_KEY CONTAINER_NAME= settings.AZURE_CONTAINER block_blob_service", "= self.get_queryset() serializer = ContentStatusListSerializer(queryset, many=True) context = {\"success\": True,", "is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False, approved_by=None) elif section_id is not", "List\", \"data\": 'media/files/ApprovedContent.csv'} return Response(context, status=status.HTTP_200_OK) except Exception as error:", "queryset = HardSpot.objects.all() serializer_class = HardSpotCreateSerializer def get(self, request): try:", "approved=True) elif sub_sub_section_id is not None: queryset = self.get_queryset().filter(sub_sub_section__id =", "ContentPendingList(ListAPIView): queryset = Content.objects.all() serializer_class = KeywordSerializer def get(self, request):", "str(book_name)+'_contentstatus.csv', encoding=\"utf-8-sig\", index=False) context = {\"success\": True, \"message\": \"Activity List\",\"data\":", "except Exception as error: context = {'success': \"false\", 'message': 'content", "return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ApprovedContentDownloadView(ListAPIView): queryset = Book.objects.all() def", "ContentContributorSerializer(data=request.data) if serializer.is_valid(): serializer.save() context = {\"success\": True, \"message\": \"Successful\",", "\"message\": \"Activity List\", \"data\": 'media/files/ApprovedContent.csv'} return Response(context, status=status.HTTP_200_OK) except Exception", "create content.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ContentRetrieveUpdate(RetrieveUpdateAPIView): queryset =", "return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentListUrlPutRequestRevert(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class =", "None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False).exclude(approved_by=None) elif section_id is not None: queryset =", "book_name=\"\" if book_id is not None: book_name=Book.objects.get(id=book_id) chapters=Chapter.objects.filter(book__id=book_id).order_by('id') serializer =", "list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentContributorCreateView(ListCreateAPIView): queryset = ContentContributors.objects.all() serializer_class", "d in res_list: final_list.append(d) data_frame = pd.DataFrame(final_list , columns=['first_name', 'last_name','mobile',", "| Q(section__chapter__book__subject__grade__medium__state__id= state_id) | Q(chapter__book__subject__grade__medium__state__id = state_id) ).distinct() else: queryset", "queryset = self.get_queryset() serializer = ContentStatusListSerializer(queryset, many=True) context = {\"success\":", "{\"success\": True, \"message\": \"Content Pending List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK)", "list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentStatusDownloadView(RetrieveUpdateAPIView): queryset = HardSpot.objects.all() serializer_class", "approved=False).exclude(approved_by=None) elif section_id is not None: queryset = self.get_queryset().filter(section__id=section_id, approved=False).exclude(approved_by=None)", "= BlockBlobService(account_name=accountName, account_key=accountKey) sas_token = blobService.generate_container_shared_access_signature(containerName,ContainerPermissions.READ, datetime.utcnow() + timedelta(hours=10)) context", "self.get_queryset().filter(section__id=section_id, approved=False, approved_by=None) elif sub_section_id is not None: queryset =", "encoding=\"utf-8-sig\", index=False) context = {\"success\": True, \"message\": \"Activity List\",\"data\": 'media/files/content_contributers.csv'}", "get Content Rejected list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class Keywords(ListAPIView): queryset", "+ 'content_contributers.xlsx') data_frame.to_csv(path + 'content_contributers.csv', encoding=\"utf-8-sig\", index=False) context = {\"success\":", "context = {\"success\": True, \"message\": \"Activity List\",\"data\": 'media/files/{}_contentstatus.csv'.format(book_name)} return Response(context,", "containerName= settings.AZURE_CONTAINER try: blobService = BlockBlobService(account_name=accountName, account_key=accountKey) sas_token = blobService.generate_container_shared_access_signature(containerName,ContainerPermissions.READ,", "Pesonal details\"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception as error: context", "class BookNestedList(ListAPIView): queryset = Book.objects.all() serializer_class = BookNestedSerializer def get(self,", "class ContentList(ListCreateAPIView): queryset = Content.objects.all() serializer_class = KeywordSerializer parser_classes =", "+ '/files/' if exists: os.remove('ApprovedContent.csv') data_frame.to_csv(path + 'ApprovedContent.csv', encoding=\"utf-8-sig\", index=False)", "= chapter_id) serializer = ChapterKeywordsSerializer(queryset, many=True) elif section_id is not", "(By Reviewer)','Comment (By Reviewer)', 'linked_keywords'] data_frame = pd.DataFrame(final_list , columns=['Board',", "Response(context, status=status.HTTP_200_OK) except: return None class ContentListUrlUpdate(ListAPIView): queryset = Content.objects.all()", "import copyfile book = request.query_params.get('book', None) chapters=Chapter.objects.filter(book_id=book).order_by('id') serializer = ApprovedContentSerializer(chapters,", "Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BookListView(ListAPIView): queryset = Book.objects.all() serializer_class = BookListSerializer", "= self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=True) else: queryset = self.get_queryset().filter(approved=True) serializer =", "not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=True) elif sub_sub_section_id is not", "'Failed to get Activity list.' ,\"error\" :str(error)} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "None: book_name=Book.objects.get(id=book_id) chapters=Chapter.objects.filter(book__id=book_id).order_by('id') serializer = ContentStatusSerializer(chapters, many=True) for data in", "ContentRetrieveUpdate(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class = ContentListSerializer def get(self, request):", "\"Updation Failed\"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception as error: context", "serializer = ContentStatusListSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Chapter", ") import json import pandas as pd from evolve import", "get Content Status list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentRejectedList(ListAPIView): queryset", "to Personal Details.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ApprovedContentDownloadView(ListAPIView): queryset", "ContentListUrlPutRequest(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class = ContentStatusSerializer def post(self, request):", "elif sub_section_id is not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=True) elif", "Exception as error: context = {'success': \"false\", 'message': 'content Id", "get(self,request): try: t = threading.Thread(target=self.index, args=(), kwargs={}) t.setDaemon(True) t.start() context", "queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False, approved_by=None) elif section_id is not None: queryset =", "status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentStatusDownloadView(RetrieveUpdateAPIView): queryset = HardSpot.objects.all() serializer_class = HardSpotCreateSerializer def", "BookListSerializer, ContentStatusListSerializer, SectionKeywordSerializer, SubSectionKeywordSerializer, SectionKeywordsSerializer, ChapterKeywordsSerializer, SubSectionKeywordsSerializer, KeywordSerializer, ContentContributorSerializer, ApprovedContentSerializer,", "str(queryset.email) == \"\" and request.data['email'] is not None: ContentContributors.objects.filter(id=queryset.id).update(email=request.data['email']) queryset.refresh_from_db()", "status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BookListView(ListAPIView): queryset = Book.objects.all() serializer_class = BookListSerializer def", "\"Content Rejected List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as", "= Content.objects.all() serializer_class = ContentListSerializer def get(self, request): try: if", "from evolve import settings accountName = settings.AZURE_ACCOUNT_NAME accountKey = settings.AZURE_ACCOUNT_KEY", "import MultiPartParser from apps.dataupload.models import (Chapter, Section, SubSection, ChapterKeyword, SectionKeyword,", "= ContentContributorSerializer def post(self, request): try: queryset = ContentContributors.objects.filter(first_name__iexact=request.data['first_name'].strip(),last_name__iexact=request.data['last_name'].strip(), mobile=request.data['mobile'].strip()).first()", "= KeywordSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Content List\",\"data\":", "Book from apps.hardspot.models import HardSpot from .models import Content,ContentContributors from", "\"false\", 'message': 'Failed to create content.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,))", "BookNestedSerializer, BookListSerializer, ContentStatusListSerializer, SectionKeywordSerializer, SubSectionKeywordSerializer, SectionKeywordsSerializer, ChapterKeywordsSerializer, SubSectionKeywordsSerializer, KeywordSerializer, ContentContributorSerializer,", "Id does not exist.'} return Response(context, status=status.HTTP_404_NOT_FOUND) serializer = ContentListSerializer(content_list,", "[] import os from shutil import copyfile book_id = request.query_params.get('book',", "args=(), kwargs={}) t.setDaemon(True) t.start() context = {\"success\": True, \"message\": \"Activity", "os.path.isfile('content_contributers.csv') path = settings.MEDIA_ROOT + '/files/' if exists: os.remove('content_contributers.csv') #", "OtherContent Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentListUrlPutRequestRevert(RetrieveUpdateAPIView): queryset =", "queryset = Book.objects.all() def get(self, request): try: final_list = []", "\"message\": \"Content Status List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception", "'Level 1 Textbook Unit', 'Level 2 Textbook Unit', 'Level 3", "= ContentContributors.objects.filter(first_name__iexact=request.data['first_name'].strip(),last_name__iexact=request.data['last_name'].strip(), mobile=request.data['mobile'].strip()).first() if queryset is not None: if str(queryset.email)", "SubSectionKeywordsSerializer, KeywordSerializer, ContentContributorSerializer, ApprovedContentSerializer, ContentStatusSerializer, HardSpotCreateSerializer, ContentContributorsSerializer, SubSubSectionKeywordsSerializer, ContentStatusSerializerFileFormat, )", "error: context = {'success': \"false\", 'message': 'Failed to get Conetent", "state_id) | Q(section__chapter__book__subject__grade__medium__state__id= state_id) | Q(chapter__book__subject__grade__medium__state__id = state_id) ).distinct() else:", "= ContentContributors.objects.all() serializer_class = ContentContributorSerializer def post(self, request): try: queryset", "= pd.DataFrame(final_list , columns=['Board', 'Medium','Grade', 'Subject', 'Textbook Name', 'Level 1", "queryset = Content.objects.all() serializer_class = KeywordSerializer def get(self, request): try:", "accountName = settings.AZURE_ACCOUNT_NAME accountKey = settings.AZURE_ACCOUNT_KEY containerName= settings.AZURE_CONTAINER try: blobService", "index=False) context = {\"success\": True, \"message\": \"Activity List\", \"data\": 'media/files/ApprovedContent.csv'}", "Content Rejected list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class Keywords(ListAPIView): queryset =", "= pd.DataFrame(final_list , columns=['Board', 'Medium', 'Grade', 'Subject', 'Textbook Name', 'Level", "try: blobService = BlockBlobService(account_name=accountName, account_key=accountKey) sas_token = blobService.generate_container_shared_access_signature(containerName,ContainerPermissions.READ, datetime.utcnow() +", "try: if i.video is not None : final=[i.id,i.video] final_list.append(final) except", "datetime, timedelta import os import itertools from django.db.models import Q", "{\"success\": True, \"message\": \"Content List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except", "None: queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=True) else: queryset = self.get_queryset().filter(approved=True)", "elif sub_section_id is not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False, approved_by=None)", "timedelta import os import itertools from django.db.models import Q import", "\"message\": \"url link\", \"token\":sas_url,\"base_url\":base_url} return Response(context, status=status.HTTP_200_OK) except Exception as", "Content.objects.all() serializer_class = KeywordSerializer def get(self, request): try: chapter_id =", "None: queryset = self.get_queryset().filter(section__id=section_id, approved=False).exclude(approved_by=None) elif sub_section_id is not None:", "sub_section_id is not None: queryset = SubSectionKeyword.objects.filter(sub_section__id = sub_section_id) serializer", "\"message\": \"Updation Failed\"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception as error:", "try: datalist = request.data print(datalist) for data in datalist: Content.objects.filter(pk=data['content_id']).update(video=data['file_path_from_database'])", "from rest_framework import status from rest_framework.generics import ( ListAPIView, ListCreateAPIView,", "in serializer.data: for d in data['chapter']: final_list.append(d) repeat_list=['Content Name','Content Link/Video", "post(self, request,format=None): try: serializer = ContentListSerializer(data=request.data) if serializer.is_valid(): serializer.save() context", "self.get_queryset().filter(section__id=section_id, approved=True) elif sub_section_id is not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id,", "status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ContentContributorsDownloadView(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class = HardSpotCreateSerializer", "status=status.HTTP_200_OK) except: return None class ContentListUrlUpdate(ListAPIView): queryset = Content.objects.all() serializer_class", "= request.query_params.get('sub_section', None) sub_sub_section_id = request.query_params.get('sub_sub_section', None) if chapter_id is", "elif section_id is not None: queryset = self.get_queryset().filter(section__id=section_id, approved=False).exclude(approved_by=None) elif", "state_id) | Q(chapter__book__subject__grade__medium__state__id = state_id) ).distinct() else: queryset = self.get_queryset()", "'content_contributers.csv', encoding=\"utf-8-sig\", index=False) context = {\"success\": True, \"message\": \"Activity List\",\"data\":", "Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BackupContent(ListAPIView): queryset = Book.objects.all()", "= ContentListSerializer(content_list, data=request.data, context={\"user\":request.user}, partial=True) if serializer.is_valid(): serializer.save() context =", "error: context = {'success': \"false\", 'message': 'content Id does not", "return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def put(self, request, pk, format=None): try: try:", "render from rest_framework import status from rest_framework.generics import ( ListAPIView,", "try: if request.query_params.get('chapter', None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('chapter', None)) elif", "+ 'contentstatus.xlsx') data_frame.to_csv(path + str(book_name)+'_contentstatus.csv', encoding=\"utf-8-sig\", index=False) context = {\"success\":", "Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class GetSASView(ListAPIView): def get(self,request): try: sas_url = block_blob_service.generate_container_shared_access_signature(", "serializer_class = KeywordSerializer parser_classes = (MultiPartParser,) def get(self, request): try:", "context = {'success': \"false\", 'message': 'Failed to get Content Status", "Textbook Unit', 'Level 3 Textbook Unit','Level 4 Textbook Unit', 'total',", "{\"success\": False, \"message\": \"Invalid Input Data to create Pesonal details\"}", "Content.objects.all() serializer_class = HardSpotCreateSerializer def get(self, request): try: final_list =", "approved = False).exclude(approved_by=None) else: queryset = self.get_queryset().filter(approved=False).exclude(approved_by=None) serializer = KeywordSerializer(queryset,", "t = threading.Thread(target=self.index, args=(), kwargs={}) t.setDaemon(True) t.start() context = {\"success\":", "else: queryset = self.get_queryset() serializer = BookListSerializer(queryset, many=True) context =", "Unit', 'Level 3 Textbook Unit','Level 4 Textbook Unit', 'total', 'approved_contents',", "as e: pass path = settings.MEDIA_ROOT + '/files/' data_frame =", "many=True) context = {\"success\": True, \"message\": \"OtherContent Approved List\", \"data\":", "queryset = Content.objects.filter(approved=True) for i in queryset: try: if i.video", "= request.query_params.get('state', None) if state_id is not None: queryset =", "def get(self,request): from evolve import settings accountName = settings.AZURE_ACCOUNT_NAME accountKey", "get Activity list.' ,\"error\" :str(error)} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def index(self):", ",\"error\" :str(error)} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def index(self): final_list,final = [],[]", "True, \"message\": \"Activity List\",\"data\": 'media/files/content_contributers.csv'} return Response(context, status=status.HTTP_200_OK) except Exception", "datetime import datetime, timedelta import os import itertools from django.db.models", "\"Successful\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) context = {\"success\": False,", "serializer.save() context = {\"success\": True, \"message\": \"Created Successful\", \"data\": serializer.data}", "get Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentStatusDownloadView(RetrieveUpdateAPIView): queryset =", "= KeywordSerializer def get(self, request): try: chapter_id = request.query_params.get('chapter', None)", "True, \"message\": \"Successful\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) context =", "def get(self, request): try: chapter_id = request.query_params.get('chapter', None) section_id =", "'hard_spots']) exists = os.path.isfile('{}_contentstatus.csv'.format(book_name)) path = settings.MEDIA_ROOT + '/files/' if", "\"token\":sas_url,\"base_url\":base_url} return Response(context, status=status.HTTP_200_OK) except Exception as error: context =", "True, \"message\": \"Activity List\", \"data\": 'media/files/ApprovedContent.csv'} return Response(context, status=status.HTTP_200_OK) except", "BookListSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Content List\",\"data\": serializer.data}", "= ContentListSerializer(data=request.data) if serializer.is_valid(): serializer.save() context = {\"success\": True, \"message\":", "many=True) else: queryset = self.get_queryset() serializer = KeywordSerializer(queryset, many=True) context", "import render from rest_framework import status from rest_framework.generics import (", "KeywordSerializer, ContentContributorSerializer, ApprovedContentSerializer, ContentStatusSerializer, HardSpotCreateSerializer, ContentContributorsSerializer, SubSubSectionKeywordsSerializer, ContentStatusSerializerFileFormat, ) from", "context = {\"success\": True, \"message\": \"Created Successful\", \"data\": serializer.data} return", "Textbook Unit', 'Keywords',]+(list(itertools.chain.from_iterable(itertools.repeat(repeat_list, 5))))) exists = os.path.isfile('ApprovedContent.csv') path = settings.MEDIA_ROOT", "'Failed to get OtherContent Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class", "to get Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class GetSasDownloadView(ListAPIView): def", "= request.query_params.get('section', None) sub_section_id = request.query_params.get('sub_section', None) sub_sub_section_id = request.query_params.get('sub_sub_section',None)", "is not None: queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=True) else: queryset", "many=True) res_list = [] for i in range(len(serializer.data)): if serializer.data[i]", "error: context = {'success': \"false\", 'message': 'Failed to get Chapter", "context = {'success': \"false\", 'message': 'Failed to get Content list.'}", "def post(self, request): try: datalist = request.data print(datalist) for data", "ContentListUrlUpdate(ListAPIView): queryset = Content.objects.all() serializer_class = ContentStatusSerializer def get(self, request):", "exists: os.remove('{}_contentstatus.csv'.format(book_name)) # data_frame.to_excel(path + 'contentstatus.xlsx') data_frame.to_csv(path + str(book_name)+'_contentstatus.csv', encoding=\"utf-8-sig\",", "Content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentContributorCreateView(ListCreateAPIView): queryset = ContentContributors.objects.all()", "status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ApprovedContentDownloadView(ListAPIView): queryset = Book.objects.all() def get(self, request):", "Approved List\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as", "'Failed to get Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class GetSasDownloadView(ListAPIView):", "not None: queryset=self.get_queryset().filter(subject__id=subject) else: queryset = self.get_queryset() serializer = BookListSerializer(queryset,", "= Book.objects.all() def get(self,request): try: t = threading.Thread(target=self.index, args=(), kwargs={})", "settings.MEDIA_ROOT + '/files/' if exists: os.remove('{}_contentstatus.csv'.format(book_name)) # data_frame.to_excel(path + 'contentstatus.xlsx')", "is not None: ContentContributors.objects.filter(id=queryset.id).update(email=request.data['email']) queryset.refresh_from_db() serializer = ContentContributorSerializer(queryset) context =", "True, \"message\": \"update successfull\"} return Response(context, status=status.HTTP_200_OK) except Exception as", "[] import os from shutil import copyfile state_id = request.query_params.get('state',", "= self.get_queryset().filter(sub_section__id=sub_section_id, approved=True) elif sub_sub_section_id is not None: queryset =", "Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentPendingList(ListAPIView): queryset = Content.objects.all()", "final_list.append(d) data_frame = pd.DataFrame(final_list , columns=['first_name', 'last_name','mobile', 'email','city_name','school_name','textbook_name']).drop_duplicates() exists =", "[] for i in range(len(serializer.data)): if serializer.data[i] not in serializer.data[i", "{\"success\": True, \"message\": \"Content Approved List\", \"data\": serializer.data} return Response(context,", "\"message\": \"Content List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as", "try: final_list = [] import os from shutil import copyfile", "block_blob_service = BlockBlobService(account_name=account_name, account_key=account_key) class ContentList(ListCreateAPIView): queryset = Content.objects.all() serializer_class", "ContentListSerializer def get(self, request): try: if request.query_params.get('chapter', None) is not", "queryset = ContentContributors.objects.filter(first_name__iexact=request.data['first_name'].strip(),last_name__iexact=request.data['last_name'].strip(), mobile=request.data['mobile'].strip()).first() if queryset is not None: if", "'/files/' if exists: os.remove('{}_contentstatus.csv'.format(book_name)) # data_frame.to_excel(path + 'contentstatus.xlsx') data_frame.to_csv(path +", "None) if state_id is not None: queryset = Content.objects.filter(Q(sub_sub_section__subsection__section__chapter__book__subject__grade__medium__state__id=state_id) |", "context = {\"success\": True, \"message\": \"Conetent List\",\"data\": serializer.data} return Response(context,", "Status List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error:", "to get Content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentContributorCreateView(ListCreateAPIView): queryset", "path = settings.MEDIA_ROOT + '/files/' data_frame = pd.DataFrame(final_list , columns=['id','url'])", "Q import threading account_name = settings.AZURE_ACCOUNT_NAME account_key = settings.AZURE_ACCOUNT_KEY CONTAINER_NAME=", "else: queryset = self.get_queryset() serializer = ContentContributorsSerializer(queryset, many=True) res_list =", "\"token\":sas_token} return Response(context, status=status.HTTP_200_OK) except: return None class ContentListUrlUpdate(ListAPIView): queryset", "class ContentRetrieveUpdate(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class = ContentListSerializer def get(self,", "try: datalist = request.data print(datalist) for data in datalist: print(data)", "= Content.objects.all() serializer_class = ContentStatusSerializer def post(self, request): try: datalist", "self.get_queryset() serializer = KeywordSerializer(queryset, many=True) context = {\"success\": True, \"message\":", "data in datalist: print(data) Content.objects.filter(pk=data['content_id']).update(video=data['video']) context = {\"success\": True, \"message\":", "except Exception as error: context = {'success': \"false\", 'message': 'Failed", "is not None: queryset = self.get_queryset().filter(section__id=section_id, approved=False).exclude(approved_by=None) elif sub_section_id is", "\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context", "base_url=account_name+\".blob.core.windows.net/\"+CONTAINER_NAME context = {\"success\": True, \"message\": \"url link\", \"token\":sas_url,\"base_url\":base_url} return", "Input Data to create content\"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception", "= Content.objects.all() serializer_class = KeywordSerializer def get(self, request): try: chapter_id", "is not None: queryset = self.get_queryset().filter(section__id=section_id, approved=True) elif sub_section_id is", "+ '/files/' if exists: os.remove('content_contributers.csv') # data_frame.to_excel(path + 'content_contributers.xlsx') data_frame.to_csv(path", "{\"success\": True, \"message\": \"Activity List\", \"data\": 'media/files/ApprovedContent.csv'} return Response(context, status=status.HTTP_200_OK)", "True, \"message\": \"url link\", \"token\":sas_url,\"base_url\":base_url} return Response(context, status=status.HTTP_200_OK) except Exception", "queryset = Content.objects.filter(Q(sub_sub_section__subsection__section__chapter__book__subject__grade__medium__state__id=state_id) | Q(sub_section__section__chapter__book__subject__grade__medium__state__id = state_id) | Q(section__chapter__book__subject__grade__medium__state__id= state_id)", "Content.objects.filter(Q(sub_sub_section__subsection__section__chapter__book__subject__grade__medium__state__id=state_id) | Q(sub_section__section__chapter__book__subject__grade__medium__state__id = state_id) | Q(section__chapter__book__subject__grade__medium__state__id= state_id) | Q(chapter__book__subject__grade__medium__state__id", "request): try: chapter_id = request.query_params.get('chapter', None) section_id = request.query_params.get('section', None)", "= sub_sub_section_id) serializer = SubSubSectionKeywordsSerializer(queryset, many=True) else: queryset = self.get_queryset()", "{\"success\": False, \"message\": \"Invalid Input Data to create content\"} return", "Textbook Unit', 'Level 2 Textbook Unit', 'Level 3 Textbook Unit','Level", "self.get_queryset().filter(sub_section__id=sub_section_id, approved=True) elif sub_sub_section_id is not None: queryset = self.get_queryset().filter(sub_sub_section__id", "Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ApprovedContentDownloadView(ListAPIView): queryset = Book.objects.all() def get(self,", "ContentApprovedList(ListAPIView): queryset = Content.objects.all() serializer_class = KeywordSerializer def get(self, request):", "HardSpotCreateSerializer def get(self, request): try: final_list = [] import os", "encoding=\"utf-8-sig\", index=False) context = {\"success\": True, \"message\": \"Activity List\",\"data\": 'media/files/{}_contentstatus.csv'.format(book_name)}", "context = {'success': \"false\", 'message': 'Failed to get OtherContent Approved", "'media/files/BackupContent.csv'} return Response(context, status=status.HTTP_200_OK) except Exception as error: context =", "ContentListSerializer def get(self, request): try: chapter_id = request.query_params.get('chapter', None) section_id", "= {\"success\": True, \"message\": \"Updation Successful\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK)", "is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('sub_section', None)) else: queryset = self.get_queryset() serializer", "queryset = SectionKeyword.objects.filter(section__id = section_id) serializer = SectionKeywordsSerializer(queryset, many=True) elif", "Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ContentRetrieveUpdate(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class =", "django.shortcuts import render from rest_framework import status from rest_framework.generics import", "= {\"success\": True, \"message\": \"Successful\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK)", "many=True) context = {\"success\": True, \"message\": \"Chapter List\",\"data\": serializer.data} return", "= ContentListSerializer def get(self, request): try: queryset = self.get_object() serializer", "queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False).exclude(approved_by=None) elif sub_sub_section_id is not None: queryset", "= {'success': \"false\", 'message': 'Failed to get Content Pending list.'}", "rest_framework.decorators import permission_classes from apps.configuration.models import Book from apps.hardspot.models import", "BlockBlobService, ContainerPermissions ) from datetime import datetime, timedelta import os", "\"message\": \"Conetent List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as", "= self.get_queryset().filter(approved=False, approved_by=None) serializer = KeywordSerializer(queryset, many=True) context = {\"success\":", "Section, SubSection, ChapterKeyword, SectionKeyword, SubSectionKeyword, SubSubSectionKeyword, ) import json import", "request): try: if request.query_params.get('chapter', None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('chapter', None))", "import status from rest_framework.generics import ( ListAPIView, ListCreateAPIView, ListAPIView, RetrieveUpdateAPIView,)", "KeywordSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Content Approved List\",", "elif section_id is not None: queryset = self.get_queryset().filter(section__id=section_id, approved=True) elif", "serializer_class = ContentStatusSerializer def get(self, request): try: queryset = self.get_queryset().filter(approved=True)", "data_frame = pd.DataFrame(final_list , columns=['Board', 'Medium', 'Grade', 'Subject', 'Textbook Name',", "status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentPendingList(ListAPIView): queryset = Content.objects.all() serializer_class = KeywordSerializer def", "None) sub_sub_section_id = request.query_params.get('sub_sub_section',None) if chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id,", "status=status.HTTP_404_NOT_FOUND) serializer = ContentListSerializer(content_list, data=request.data, context={\"user\":request.user}, partial=True) if serializer.is_valid(): serializer.save()", "context = {\"success\": False, \"message\": \"Updation Failed\"} return Response(context, status=status.HTTP_400_BAD_REQUEST)", "{\"success\": True, \"message\": \"Activity List\",\"data\": 'media/files/content_contributers.csv'} return Response(context, status=status.HTTP_200_OK) except", "'message': 'Failed to get Activity list.' ,\"error\" :str(error)} return Response(context,", "+ '/files/' data_frame = pd.DataFrame(final_list , columns=['id','url']) data_frame.to_csv(path+ 'BackupContent.csv', encoding=\"utf-8-sig\",", "if exists: os.remove('ApprovedContent.csv') data_frame.to_csv(path + 'ApprovedContent.csv', encoding=\"utf-8-sig\", index=False) context =", "Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def index(self): final_list,final = [],[] queryset = Content.objects.filter(approved=True)", "import pandas as pd from evolve import settings from evolve", "status=status.HTTP_200_OK) except Exception as error: context = {'success': \"false\", 'message':", "post(self, request): try: queryset = ContentContributors.objects.filter(first_name__iexact=request.data['first_name'].strip(),last_name__iexact=request.data['last_name'].strip(), mobile=request.data['mobile'].strip()).first() if queryset is", "sub_section_id = request.query_params.get('sub_section', None) sub_sub_section_id = request.query_params.get('sub_sub_section',None) if chapter_id is", "from rest_framework.response import Response from rest_framework.permissions import IsAuthenticated from rest_framework.decorators", "evolve import settings from azure.storage.blob import ( BlockBlobService, ContainerPermissions )", "= {\"success\": True, \"message\": \"Activity List\",\"data\": 'media/files/{}_contentstatus.csv'.format(book_name)} return Response(context, status=status.HTTP_200_OK)", "queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False).exclude(approved_by=None) elif section_id is not None: queryset = self.get_queryset().filter(section__id=section_id,", "queryset = self.get_queryset().filter(approved=False, approved_by=None) serializer = KeywordSerializer(queryset, many=True) context =", "context = {\"success\": True, \"message\": \"Content Pending List\",\"data\": serializer.data} return", "'Subject', 'Textbook Name', 'Level 1 Textbook Unit', 'Level 2 Textbook", "approved=False, approved_by=None) elif sub_section_id is not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id,", "= {\"success\": True, \"message\": \"Activity List\",\"data\": 'media/files/content_contributers.csv'} return Response(context, status=status.HTTP_200_OK)", "request.query_params.get('section', None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('section', None)) elif request.query_params.get('section', None)", "True, \"message\": \"Content Status List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except", "Data to create Pesonal details\"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception", "account_name = settings.AZURE_ACCOUNT_NAME account_key = settings.AZURE_ACCOUNT_KEY CONTAINER_NAME= settings.AZURE_CONTAINER block_blob_service =", "account_key=account_key) class ContentList(ListCreateAPIView): queryset = Content.objects.all() serializer_class = KeywordSerializer parser_classes", "= {\"success\": True, \"message\": \"Created Successful\", \"data\": serializer.data} return Response(context,", "class ContentRejectedList(ListAPIView): queryset = Content.objects.all() serializer_class = ContentListSerializer def get(self,", "BlockBlobService(account_name=accountName, account_key=accountKey) sas_token = blobService.generate_container_shared_access_signature(containerName,ContainerPermissions.READ, datetime.utcnow() + timedelta(hours=10)) context =", "Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def post(self, request,format=None): try: serializer = ContentListSerializer(data=request.data) if", "os.path.isfile('ApprovedContent.csv') path = settings.MEDIA_ROOT + '/files/' if exists: os.remove('ApprovedContent.csv') data_frame.to_csv(path", "create content\"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception as error: context", "Link/Video Link','Content Rating (By Reviewer)','Comment (By Reviewer)', 'linked_keywords'] data_frame =", "self.get_queryset().filter(sub_section__id=sub_section_id, approved=False).exclude(approved_by=None) elif sub_sub_section_id is not None: queryset =self.get_queryset().filter(sub_sub_section__id =", "BookNestedSerializer def get(self, request): try: subject = request.query_params.get('subject', None) if", "HardSpot from .models import Content,ContentContributors from .serializers import ( ContentListSerializer,", "content\"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception as error: context =", "not None: queryset =self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id , approved = False).exclude(approved_by=None)", "ContentRejectedList(ListAPIView): queryset = Content.objects.all() serializer_class = ContentListSerializer def get(self, request):", ", columns=['Board', 'Medium','Grade', 'Subject', 'Textbook Name', 'Level 1 Textbook Unit',", "= {\"success\": False, \"message\": \"Invalid Input Data to create content\"}", "ContentStatusList(ListCreateAPIView): queryset = Content.objects.all() serializer_class = ContentListSerializer def get(self, request):", "data_frame = pd.DataFrame(final_list , columns=['first_name', 'last_name','mobile', 'email','city_name','school_name','textbook_name']).drop_duplicates() exists = os.path.isfile('content_contributers.csv')", "\"message\": \"Activity List\",\"data\": 'media/files/content_contributers.csv'} return Response(context, status=status.HTTP_200_OK) except Exception as", "import HardSpot from .models import Content,ContentContributors from .serializers import (", "\"\" and request.data['email'] is not None: ContentContributors.objects.filter(id=queryset.id).update(email=request.data['email']) queryset.refresh_from_db() serializer =", "def put(self, request, pk, format=None): try: try: content_list = self.get_object()", "context = { 'success': \"false\", 'message': 'Failed to get Activity", ".models import Content,ContentContributors from .serializers import ( ContentListSerializer, BookNestedSerializer, BookListSerializer,", "'Failed to get Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class", "context = {\"success\": True, \"message\": \"Updation Successful\",\"data\": serializer.data} return Response(context,", "request.query_params.get('chapter', None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('chapter', None)) elif request.query_params.get('section', None)", "None)) elif request.query_params.get('section', None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('sub_section', None)) else:", "many=True) for data in serializer.data: for d in data['chapter']: final_list.append(d)", "else: queryset = self.get_queryset().filter(approved=False).exclude(approved_by=None) serializer = KeywordSerializer(queryset, many=True) context =", "ContentListSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Content Status List\",\"data\":", "apps.hardspot.models import HardSpot from .models import Content,ContentContributors from .serializers import", "= self.get_queryset().filter(section__id=section_id, approved=False, approved_by=None) elif sub_section_id is not None: queryset", "class ContentStatusDownloadView(RetrieveUpdateAPIView): queryset = HardSpot.objects.all() serializer_class = HardSpotCreateSerializer def get(self,", "datetime.utcnow() + timedelta(hours=1), ) base_url=account_name+\".blob.core.windows.net/\"+CONTAINER_NAME context = {\"success\": True, \"message\":", "queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=True) else: queryset = self.get_queryset().filter(approved=True) serializer", "if queryset is not None: if str(queryset.email) == \"\" and", "to get Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentStatusDownloadView(RetrieveUpdateAPIView): queryset", "in range(len(serializer.data)): if serializer.data[i] not in serializer.data[i + 1:]: res_list.append(serializer.data[i])", "Rating (By Reviewer)','Comment (By Reviewer)', 'linked_keywords'] data_frame = pd.DataFrame(final_list ,", "status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentListUrlPutRequestRevert(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class = ContentStatusSerializer def", "serializer = ContentListSerializer(content_list, data=request.data, context={\"user\":request.user}, partial=True) if serializer.is_valid(): serializer.save() context", "serializer.is_valid(): serializer.save() context = {\"success\": True, \"message\": \"Updation Successful\",\"data\": serializer.data}", "is not None: queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=False,approved_by=None) else: queryset", "queryset = Book.objects.all() serializer_class = BookNestedSerializer def get(self, request): try:", "class GetSasDownloadView(ListAPIView): def get(self,request): from evolve import settings accountName =", "Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception as error: context = {'success': \"false\",", "section_id is not None: queryset = self.get_queryset().filter(section__id=section_id, approved=False).exclude(approved_by=None) elif sub_section_id", "exists: os.remove('content_contributers.csv') # data_frame.to_excel(path + 'content_contributers.xlsx') data_frame.to_csv(path + 'content_contributers.csv', encoding=\"utf-8-sig\",", "{\"success\": True, \"token\":sas_token} return Response(context, status=status.HTTP_200_OK) except: return None class", "kwargs={}) t.setDaemon(True) t.start() context = {\"success\": True, \"message\": \"Activity List\",", "request): try: queryset = self.get_queryset() serializer = ContentStatusListSerializer(queryset, many=True) context", "from apps.dataupload.models import (Chapter, Section, SubSection, ChapterKeyword, SectionKeyword, SubSectionKeyword, SubSubSectionKeyword,", "{\"success\": True, \"message\": \"Chapter List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except", "True, \"message\": \"Updation Successful\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) context =", "is not None: queryset = SubSubSectionKeyword.objects.filter(sub_sub_section__id = sub_sub_section_id) serializer =", "copyfile book = request.query_params.get('book', None) chapters=Chapter.objects.filter(book_id=book).order_by('id') serializer = ApprovedContentSerializer(chapters, many=True)", "serializer.save() context = {\"success\": True, \"message\": \"Successful\", \"data\": serializer.data} return", "serializer.data} return Response(context, status=status.HTTP_200_OK) context = {\"success\": False, \"message\": \"Invalid", ", columns=['first_name', 'last_name','mobile', 'email','city_name','school_name','textbook_name']).drop_duplicates() exists = os.path.isfile('content_contributers.csv') path = settings.MEDIA_ROOT", "= {\"success\": True, \"message\": \"Activity List\", \"data\": 'media/files/BackupContent.csv'} return Response(context,", "= ContentStatusSerializer def post(self, request): try: datalist = request.data print(datalist)", "res_list: for d in res_list: final_list.append(d) data_frame = pd.DataFrame(final_list ,", "= sub_section_id) serializer = SubSectionKeywordsSerializer(queryset, many=True) elif sub_sub_section_id is not", "try: queryset = self.get_object() serializer = ContentListSerializer(queryset, many=True) context =", "\"Conetent List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error:", "error: context = {'success': \"false\", 'message': 'Failed to get Content", "res_list.append(serializer.data[i]) for data in res_list: for d in res_list: final_list.append(d)", "error: context = { 'success': \"false\", 'message': 'Failed to get", "serializer = SubSubSectionKeywordsSerializer(queryset, many=True) else: queryset = self.get_queryset() serializer =", "= self.get_queryset().filter(approved=True) serializer = KeywordSerializer(queryset, many=True) context = {\"success\": True,", "list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentListUrlPutRequest(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class", "get(self, request): try: chapter_id = request.query_params.get('chapter', None) section_id = request.query_params.get('section',", "sub_sub_section_id = request.query_params.get('sub_sub_section',None) if chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False,", "= HardSpot.objects.all() serializer_class = HardSpotCreateSerializer def get(self, request): try: final_list", "data_frame.to_csv(path + str(book_name)+'_contentstatus.csv', encoding=\"utf-8-sig\", index=False) context = {\"success\": True, \"message\":", "self.get_queryset() serializer = ContentContributorsSerializer(queryset, many=True) res_list = [] for i", "list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class Keywords(ListAPIView): queryset = Content.objects.all() def", "ListAPIView, ListCreateAPIView, ListAPIView, RetrieveUpdateAPIView,) from rest_framework.response import Response from rest_framework.permissions", "try: queryset = ContentContributors.objects.filter(first_name__iexact=request.data['first_name'].strip(),last_name__iexact=request.data['last_name'].strip(), mobile=request.data['mobile'].strip()).first() if queryset is not None:", "\"url link\", \"token\":sas_url,\"base_url\":base_url} return Response(context, status=status.HTTP_200_OK) except Exception as error:", "[] import os from shutil import copyfile book = request.query_params.get('book',", "True, \"message\": \"Content Pending List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except", "state_id is not None: queryset = Content.objects.filter(Q(sub_sub_section__subsection__section__chapter__book__subject__grade__medium__state__id=state_id) | Q(sub_section__section__chapter__book__subject__grade__medium__state__id =", "Unit', 'Level 2 Textbook Unit', 'Level 3 Textbook Unit','Level 4", "queryset = Content.objects.all() serializer_class = KeywordSerializer parser_classes = (MultiPartParser,) def", "= os.path.isfile('ApprovedContent.csv') path = settings.MEDIA_ROOT + '/files/' if exists: os.remove('ApprovedContent.csv')", "GetSasDownloadView(ListAPIView): def get(self,request): from evolve import settings accountName = settings.AZURE_ACCOUNT_NAME", "itertools from django.db.models import Q import threading account_name = settings.AZURE_ACCOUNT_NAME", "to get Conetent list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentApprovedList(ListAPIView): queryset", "None : final=[i.id,i.video] final_list.append(final) except Exception as e: pass path", "context = {'success': \"false\", 'message': 'Failed to create content.'} return", "OtherContent Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BackupContent(ListAPIView): queryset =", "\"Activity List\", \"data\": 'media/files/BackupContent.csv'} return Response(context, status=status.HTTP_200_OK) except Exception as", "None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=True) elif sub_sub_section_id is not None:", "many=True) elif sub_section_id is not None: queryset = SubSectionKeyword.objects.filter(sub_section__id =", "{\"success\": True, \"message\": \"url link\", \"token\":sas_url,\"base_url\":base_url} return Response(context, status=status.HTTP_200_OK) except", "= request.query_params.get('section', None) sub_section_id = request.query_params.get('sub_section', None) sub_sub_section_id = request.query_params.get('sub_sub_section',", "as error: context = {'success': \"false\", 'message': 'Failed to create", "Unit', 'Keywords',]+(list(itertools.chain.from_iterable(itertools.repeat(repeat_list, 5))))) exists = os.path.isfile('ApprovedContent.csv') path = settings.MEDIA_ROOT +", "status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ContentRetrieveUpdate(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class = ContentListSerializer", "Details.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BookNestedList(ListAPIView): queryset = Book.objects.all() serializer_class", "= request.query_params.get('book', None) book_name=\"\" if book_id is not None: book_name=Book.objects.get(id=book_id)", "for d in data['chapter']: final_list.append(d) data_frame = pd.DataFrame(final_list , columns=['Board',", "{\"success\": True, \"message\": \"Successful\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) context", "not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('chapter', None)) elif request.query_params.get('section', None) is not None:", "queryset = Content.objects.all() def get(self, request): try: chapter_id = request.query_params.get('chapter',", "status=status.HTTP_500_INTERNAL_SERVER_ERROR) class GetSASView(ListAPIView): def get(self,request): try: sas_url = block_blob_service.generate_container_shared_access_signature( CONTAINER_NAME,", "self.get_queryset().filter(section__id=section_id, approved=False).exclude(approved_by=None) elif sub_section_id is not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id,", "True, \"message\": \"Activity List\", \"data\": 'media/files/BackupContent.csv'} return Response(context, status=status.HTTP_200_OK) except", "not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False).exclude(approved_by=None) elif sub_sub_section_id is not", "get(self,request): try: sas_url = block_blob_service.generate_container_shared_access_signature( CONTAINER_NAME, ContainerPermissions.WRITE, datetime.utcnow() + timedelta(hours=1),", "queryset = Book.objects.all() def get(self,request): try: t = threading.Thread(target=self.index, args=(),", "import permission_classes from apps.configuration.models import Book from apps.hardspot.models import HardSpot", "not None: queryset = self.get_queryset().filter(section__id=section_id, approved=False, approved_by=None) elif sub_section_id is", "Reviewer)','Comment (By Reviewer)', 'linked_keywords'] data_frame = pd.DataFrame(final_list , columns=['Board', 'Medium',", "CONTAINER_NAME, ContainerPermissions.WRITE, datetime.utcnow() + timedelta(hours=1), ) base_url=account_name+\".blob.core.windows.net/\"+CONTAINER_NAME context = {\"success\":", "to get Chapter list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def post(self, request,format=None):", "'content_contributers.xlsx') data_frame.to_csv(path + 'content_contributers.csv', encoding=\"utf-8-sig\", index=False) context = {\"success\": True,", "serializer = BookNestedSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Conetent", "try: chapter_id = request.query_params.get('chapter', None) section_id = request.query_params.get('section', None) sub_section_id", "queryset=self.get_queryset().filter(chapter_id=request.query_params.get('section', None)) elif request.query_params.get('section', None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('sub_section', None))", "chapters=Chapter.objects.filter(book_id=book).order_by('id') serializer = ApprovedContentSerializer(chapters, many=True) for data in serializer.data: for", "is not None: if str(queryset.email) == \"\" and request.data['email'] is", "rest_framework import status from rest_framework.generics import ( ListAPIView, ListCreateAPIView, ListAPIView,", "return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success':", "final_list.append(final) except Exception as e: pass path = settings.MEDIA_ROOT +", "None: queryset = self.get_queryset().filter(section__id=section_id, approved=False, approved_by=None) elif sub_section_id is not", "\"message\": \"Successful\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) context = {\"success\":", "status=status.HTTP_200_OK) context = {\"success\": False, \"message\": \"Invalid Input Data to", "to get Content Rejected list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class Keywords(ListAPIView):", "= self.get_queryset() serializer = BookListSerializer(queryset, many=True) context = {\"success\": True,", "many=True) context = {\"success\": True, \"message\": \"Content Rejected List\",\"data\": serializer.data}", "SubSection, ChapterKeyword, SectionKeyword, SubSectionKeyword, SubSubSectionKeyword, ) import json import pandas", "None) section_id = request.query_params.get('section', None) sub_section_id = request.query_params.get('sub_section', None) sub_sub_section_id", "Pending List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error:", "settings.AZURE_ACCOUNT_KEY CONTAINER_NAME= settings.AZURE_CONTAINER block_blob_service = BlockBlobService(account_name=account_name, account_key=account_key) class ContentList(ListCreateAPIView): queryset", "queryset = SubSectionKeyword.objects.filter(sub_section__id = sub_section_id) serializer = SubSectionKeywordsSerializer(queryset, many=True) elif", "{\"success\": True, \"message\": \"Conetent List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except", "None)) else: queryset = self.get_queryset() serializer = ContentListSerializer(queryset, many=True) context", "'Medium', 'Grade', 'Subject', 'Textbook Name', 'Level 1 Textbook Unit', 'Level", "def get(self, request): try: queryset = self.get_object() serializer = ContentListSerializer(queryset,", "'Failed to get Content Rejected list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class", "None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=True) elif section_id is not None: queryset =", "elif sub_section_id is not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False).exclude(approved_by=None) elif", "settings.AZURE_CONTAINER try: blobService = BlockBlobService(account_name=accountName, account_key=accountKey) sas_token = blobService.generate_container_shared_access_signature(containerName,ContainerPermissions.READ, datetime.utcnow()", "'Failed to get Content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentContributorCreateView(ListCreateAPIView):", "= {\"success\": True, \"message\": \"Content List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK)", "None) sub_section_id = request.query_params.get('sub_section', None) sub_sub_section_id = request.query_params.get('sub_sub_section', None) if", "to create Pesonal details\"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception as", "not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False, approved_by=None) elif sub_sub_section_id is", "Successful\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) context = {\"success\": False, \"message\":", "request): try: datalist = request.data print(datalist) for data in datalist:", "mobile=request.data['mobile'].strip()).first() if queryset is not None: if str(queryset.email) == \"\"", "\"false\", 'message': 'Failed to get Content Approved list.'} return Response(context,", "== \"\" and request.data['email'] is not None: ContentContributors.objects.filter(id=queryset.id).update(email=request.data['email']) queryset.refresh_from_db() serializer", "Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class Keywords(ListAPIView): queryset = Content.objects.all() def get(self, request):", "None) sub_sub_section_id = request.query_params.get('sub_sub_section', None) if chapter_id is not None:", "= request.query_params.get('subject', None) if subject is not None: queryset=self.get_queryset().filter(subject__id=subject) else:", "queryset is not None: if str(queryset.email) == \"\" and request.data['email']", "1 Textbook Unit', 'Level 2 Textbook Unit', 'Level 3 Textbook", "'Failed to get Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentStatusDownloadView(RetrieveUpdateAPIView):", "= pd.DataFrame(final_list , columns=['first_name', 'last_name','mobile', 'email','city_name','school_name','textbook_name']).drop_duplicates() exists = os.path.isfile('content_contributers.csv') path", "try: queryset = self.get_queryset().filter(approved=True) serializer = ContentStatusSerializerFileFormat(queryset, many=True) context =", "return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentRejectedList(ListAPIView): queryset = Content.objects.all() serializer_class =", "to get Activity list.' ,\"error\" :str(error)} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def", "True, \"message\": \"Content Rejected List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except", "account_key = settings.AZURE_ACCOUNT_KEY CONTAINER_NAME= settings.AZURE_CONTAINER block_blob_service = BlockBlobService(account_name=account_name, account_key=account_key) class", "serializer.data: for d in data['chapter']: final_list.append(d) data_frame = pd.DataFrame(final_list ,", "ContentStatusSerializer def get(self, request): try: queryset = self.get_queryset().filter(approved=True) serializer =", "= threading.Thread(target=self.index, args=(), kwargs={}) t.setDaemon(True) t.start() context = {\"success\": True,", "serializer = BookListSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Content", "= {\"success\": False, \"message\": \"Updation Failed\"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except", "{\"success\": True, \"message\": \"Created Successful\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK)", "'Failed to create content.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ContentRetrieveUpdate(RetrieveUpdateAPIView):", "columns=['Board', 'Medium','Grade', 'Subject', 'Textbook Name', 'Level 1 Textbook Unit', 'Level", "Unit','Level 4 Textbook Unit', 'total', 'approved_contents', 'rejected_contents', 'pending_contents', 'hard_spots']) exists", "self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=True) else: queryset = self.get_queryset().filter(approved=True) serializer = KeywordSerializer(queryset,", "datetime.utcnow() + timedelta(hours=10)) context = {\"success\": True, \"token\":sas_token} return Response(context,", "class BookListView(ListAPIView): queryset = Book.objects.all() serializer_class = BookListSerializer def get(self,", "Exception as error: context = {'success': \"false\", 'message': 'Failed to", "json import pandas as pd from evolve import settings from", "'message': 'Failed To Update content Details.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class", "| Q(sub_section__section__chapter__book__subject__grade__medium__state__id = state_id) | Q(section__chapter__book__subject__grade__medium__state__id= state_id) | Q(chapter__book__subject__grade__medium__state__id =", "= {'success': \"false\", 'message': 'Failed to Personal Details.'} return Response(context,", "queryset=self.get_queryset().filter(subject__id=subject) else: queryset = self.get_queryset() serializer = BookListSerializer(queryset, many=True) context", "'Failed to get Content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BookListView(ListAPIView):", "from rest_framework.decorators import permission_classes from apps.configuration.models import Book from apps.hardspot.models", "\"message\": \"Activity List\",\"data\": 'media/files/{}_contentstatus.csv'.format(book_name)} return Response(context, status=status.HTTP_200_OK) except Exception as", "= (MultiPartParser,) def get(self, request): try: queryset = self.get_queryset() serializer", "sub_section_id is not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False, approved_by=None) elif", "res_list: final_list.append(d) data_frame = pd.DataFrame(final_list , columns=['first_name', 'last_name','mobile', 'email','city_name','school_name','textbook_name']).drop_duplicates() exists", "return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BookNestedList(ListAPIView): queryset = Book.objects.all() serializer_class =", "KeywordSerializer parser_classes = (MultiPartParser,) def get(self, request): try: queryset =", "@permission_classes((IsAuthenticated,)) class ContentContributorsDownloadView(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class = HardSpotCreateSerializer def", "queryset = self.get_queryset() serializer = KeywordSerializer(queryset, many=True) context = {\"success\":", "class ApprovedContentDownloadView(ListAPIView): queryset = Book.objects.all() def get(self, request): try: final_list", "request.query_params.get('book', None) book_name=\"\" if book_id is not None: book_name=Book.objects.get(id=book_id) chapters=Chapter.objects.filter(book__id=book_id).order_by('id')", "Content.objects.all() serializer_class = ContentListSerializer def get(self, request): try: queryset =", "import Response from rest_framework.permissions import IsAuthenticated from rest_framework.decorators import permission_classes", "sub_sub_section_id = request.query_params.get('sub_sub_section',None) if chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=True)", "settings.MEDIA_ROOT + '/files/' if exists: os.remove('ApprovedContent.csv') data_frame.to_csv(path + 'ApprovedContent.csv', encoding=\"utf-8-sig\",", "pk, format=None): try: try: content_list = self.get_object() except Exception as", "self.get_queryset().filter(approved=True) serializer = KeywordSerializer(queryset, many=True) context = {\"success\": True, \"message\":", "section_id is not None: queryset = self.get_queryset().filter(section__id=section_id, approved=False, approved_by=None) elif", "= SectionKeywordsSerializer(queryset, many=True) elif sub_section_id is not None: queryset =", "= Content.objects.all() serializer_class = ContentStatusSerializer def get(self, request): try: queryset", "from apps.configuration.models import Book from apps.hardspot.models import HardSpot from .models", "return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def post(self, request,format=None): try: serializer = ContentListSerializer(data=request.data)", "= SubSubSectionKeywordsSerializer(queryset, many=True) else: queryset = self.get_queryset() serializer = KeywordSerializer(queryset,", "queryset = self.get_queryset().filter(approved=True) serializer = ContentStatusSerializerFileFormat(queryset, many=True) context = {\"success\":", "is not None: queryset =self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id , approved =", "is not None: queryset = self.get_queryset().filter(section__id=section_id, approved=False, approved_by=None) elif sub_section_id", "{'success': \"false\", 'message': 'Failed to get Content Rejected list.'} return", "request, pk, format=None): try: try: content_list = self.get_object() except Exception", "= {'success': \"false\", 'message': 'Failed to get Content Rejected list.'}", "Content.objects.all() serializer_class = ContentStatusSerializer def get(self, request): try: queryset =", "= KeywordSerializer parser_classes = (MultiPartParser,) def get(self, request): try: queryset", "request.query_params.get('chapter', None) section_id = request.query_params.get('section', None) sub_section_id = request.query_params.get('sub_section', None)", "sub_sub_section_id is not None: queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=False,approved_by=None) else:", "to get OtherContent Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BackupContent(ListAPIView):", "request): try: subject = request.query_params.get('subject', None) if subject is not", "Content.objects.all() serializer_class = KeywordSerializer parser_classes = (MultiPartParser,) def get(self, request):", "'message': 'Failed to get content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def", "= {\"success\": True, \"message\": \"Content Approved List\", \"data\": serializer.data} return", "Content,ContentContributors from .serializers import ( ContentListSerializer, BookNestedSerializer, BookListSerializer, ContentStatusListSerializer, SectionKeywordSerializer,", "get OtherContent Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentListUrlPutRequestRevert(RetrieveUpdateAPIView): queryset", "ContentListSerializer def get(self, request): try: queryset = self.get_object() serializer =", "Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentContributorCreateView(ListCreateAPIView): queryset = ContentContributors.objects.all() serializer_class = ContentContributorSerializer", "None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('sub_section', None)) else: queryset = self.get_queryset()", "get Conetent list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentApprovedList(ListAPIView): queryset =", "serializer_class = KeywordSerializer def get(self, request): try: chapter_id = request.query_params.get('chapter',", "Content Status list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentRejectedList(ListAPIView): queryset =", "context = {\"success\": True, \"message\": \"Chapter List\",\"data\": serializer.data} return Response(context,", "None: queryset = SubSectionKeyword.objects.filter(sub_section__id = sub_section_id) serializer = SubSectionKeywordsSerializer(queryset, many=True)", "= state_id) | Q(section__chapter__book__subject__grade__medium__state__id= state_id) | Q(chapter__book__subject__grade__medium__state__id = state_id) ).distinct()", "chapter_id is not None: queryset=ChapterKeyword.objects.filter(chapter__id = chapter_id) serializer = ChapterKeywordsSerializer(queryset,", "\"false\", 'message': 'Failed to get OtherContent Approved list.'} return Response(context,", "not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('section', None)) elif request.query_params.get('section', None) is not None:", "import Content,ContentContributors from .serializers import ( ContentListSerializer, BookNestedSerializer, BookListSerializer, ContentStatusListSerializer,", "context = {'success': \"false\", 'message': 'Failed to get Conetent list.'}", "'/files/' if exists: os.remove('content_contributers.csv') # data_frame.to_excel(path + 'content_contributers.xlsx') data_frame.to_csv(path +", "\"Activity List\",\"data\": 'media/files/{}_contentstatus.csv'.format(book_name)} return Response(context, status=status.HTTP_200_OK) except Exception as error:", "{'success': \"false\", 'message': 'Failed to get Content list.'} return Response(context,", "threading.Thread(target=self.index, args=(), kwargs={}) t.setDaemon(True) t.start() context = {\"success\": True, \"message\":", "serializer.save() context = {\"success\": True, \"message\": \"Updation Successful\",\"data\": serializer.data} return", "4 Textbook Unit', 'total', 'approved_contents', 'rejected_contents', 'pending_contents', 'hard_spots']) exists =", "'message': 'Failed to get Chapter list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def", "not None: queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=False,approved_by=None) else: queryset =", "from azure.storage.blob import ( BlockBlobService, ContainerPermissions ) from datetime import", "final_list,final = [],[] queryset = Content.objects.filter(approved=True) for i in queryset:", "Exception as error: context = { 'success': \"false\", 'message': 'Failed", "rest_framework.parsers import MultiPartParser from apps.dataupload.models import (Chapter, Section, SubSection, ChapterKeyword,", "elif sub_sub_section_id is not None: queryset = SubSubSectionKeyword.objects.filter(sub_sub_section__id = sub_sub_section_id)", "SubSubSectionKeywordsSerializer(queryset, many=True) else: queryset = self.get_queryset() serializer = KeywordSerializer(queryset, many=True)", "status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentRejectedList(ListAPIView): queryset = Content.objects.all() serializer_class = ContentListSerializer def", "= ChapterKeywordsSerializer(queryset, many=True) elif section_id is not None: queryset =", "i in range(len(serializer.data)): if serializer.data[i] not in serializer.data[i + 1:]:", "does not exist.'} return Response(context, status=status.HTTP_404_NOT_FOUND) serializer = ContentListSerializer(content_list, data=request.data,", "list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentListUrlPutRequestRevert(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class", "list.' ,\"error\" :str(error)} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def index(self): final_list,final =", "None: queryset=self.get_queryset().filter(subject__id=subject) else: queryset = self.get_queryset() serializer = BookListSerializer(queryset, many=True)", "serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context =", "queryset = Content.objects.all() serializer_class = ContentStatusSerializer def get(self, request): try:", "ContentContributorSerializer def post(self, request): try: queryset = ContentContributors.objects.filter(first_name__iexact=request.data['first_name'].strip(),last_name__iexact=request.data['last_name'].strip(), mobile=request.data['mobile'].strip()).first() if", "@permission_classes((IsAuthenticated,)) class ContentRetrieveUpdate(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class = ContentListSerializer def", "HardSpot.objects.all() serializer_class = HardSpotCreateSerializer def get(self, request): try: final_list =", "in res_list: for d in res_list: final_list.append(d) data_frame = pd.DataFrame(final_list", "in datalist: Content.objects.filter(pk=data['content_id']).update(video=data['file_path_from_database']) context = {\"success\": True, \"message\": \"update successfull\"}", "ChapterKeywordsSerializer, SubSectionKeywordsSerializer, KeywordSerializer, ContentContributorSerializer, ApprovedContentSerializer, ContentStatusSerializer, HardSpotCreateSerializer, ContentContributorsSerializer, SubSubSectionKeywordsSerializer, ContentStatusSerializerFileFormat,", "\"Content Pending List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as", "@permission_classes((IsAuthenticated,)) class ApprovedContentDownloadView(ListAPIView): queryset = Book.objects.all() def get(self, request): try:", "book_name=Book.objects.get(id=book_id) chapters=Chapter.objects.filter(book__id=book_id).order_by('id') serializer = ContentStatusSerializer(chapters, many=True) for data in serializer.data:", "= {\"success\": False, \"message\": \"Invalid Input Data to create Pesonal", "\"message\": \"Created Successful\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) context =", "list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def put(self, request, pk, format=None): try:", "sub_sub_section_id = request.query_params.get('sub_sub_section',None) if chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False).exclude(approved_by=None)", "= {'success': \"false\", 'message': 'Failed to get Content list.'} return", "os.path.isfile('{}_contentstatus.csv'.format(book_name)) path = settings.MEDIA_ROOT + '/files/' if exists: os.remove('{}_contentstatus.csv'.format(book_name)) #", "class ContentPendingList(ListAPIView): queryset = Content.objects.all() serializer_class = KeywordSerializer def get(self,", "Content.objects.all() serializer_class = ContentListSerializer def get(self, request): try: chapter_id =", "from rest_framework.permissions import IsAuthenticated from rest_framework.decorators import permission_classes from apps.configuration.models", "context = {\"success\": True, \"message\": \"url link\", \"token\":sas_url,\"base_url\":base_url} return Response(context,", "= section_id) serializer = SectionKeywordsSerializer(queryset, many=True) elif sub_section_id is not", "context = {\"success\": True, \"message\": \"Content Status List\",\"data\": serializer.data} return", "index(self): final_list,final = [],[] queryset = Content.objects.filter(approved=True) for i in", "from .models import Content,ContentContributors from .serializers import ( ContentListSerializer, BookNestedSerializer,", "error: context = {'success': \"false\", 'message': 'Failed to get Activity", "'media/files/content_contributers.csv'} return Response(context, status=status.HTTP_200_OK) except Exception as error: context =", "'message': 'Failed to get Content Rejected list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "if str(queryset.email) == \"\" and request.data['email'] is not None: ContentContributors.objects.filter(id=queryset.id).update(email=request.data['email'])", "Status list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentRejectedList(ListAPIView): queryset = Content.objects.all()", "= self.get_queryset().filter(sub_section__id=sub_section_id, approved=False).exclude(approved_by=None) elif sub_sub_section_id is not None: queryset =self.get_queryset().filter(sub_sub_section__id", "None: queryset = self.get_queryset().filter(section__id=section_id, approved=True) elif sub_section_id is not None:", "= BookNestedSerializer def get(self, request): try: subject = request.query_params.get('subject', None)", "context = {'success': \"false\", 'message': 'Failed to get Activity list.'", "method_decorator from django.contrib.auth.decorators import permission_required from rest_framework.parsers import MultiPartParser from", "repeat_list=['Content Name','Content Link/Video Link','Content Rating (By Reviewer)','Comment (By Reviewer)', 'linked_keywords']", "# data_frame.to_excel(path + 'content_contributers.xlsx') data_frame.to_csv(path + 'content_contributers.csv', encoding=\"utf-8-sig\", index=False) context", "ContentContributors.objects.filter(id=queryset.id).update(email=request.data['email']) queryset.refresh_from_db() serializer = ContentContributorSerializer(queryset) context = {\"success\": True, \"message\":", "as error: context = {'success': \"false\", 'message': 'Failed to Personal", "error: context = {'success': \"false\", 'message': 'Failed to Personal Details.'}", "= {'success': \"false\", 'message': 'Failed to create content.'} return Response(context,", "return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentStatusDownloadView(RetrieveUpdateAPIView): queryset = HardSpot.objects.all() serializer_class =", "request.query_params.get('sub_sub_section',None) if chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=True) elif section_id", "class Keywords(ListAPIView): queryset = Content.objects.all() def get(self, request): try: chapter_id", "= request.query_params.get('sub_sub_section', None) if chapter_id is not None: queryset=ChapterKeyword.objects.filter(chapter__id =", "\"false\", 'message': 'Failed to Personal Details.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,))", "not None: book_name=Book.objects.get(id=book_id) chapters=Chapter.objects.filter(book__id=book_id).order_by('id') serializer = ContentStatusSerializer(chapters, many=True) for data", "get(self, request): try: queryset = self.get_queryset().filter(approved=True) serializer = ContentStatusSerializerFileFormat(queryset, many=True)", "\"message\": \"OtherContent Approved List\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except", "t.setDaemon(True) t.start() context = {\"success\": True, \"message\": \"Activity List\", \"data\":", "\"message\": \"Activity List\", \"data\": 'media/files/BackupContent.csv'} return Response(context, status=status.HTTP_200_OK) except Exception", "approved=False).exclude(approved_by=None) elif sub_section_id is not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False).exclude(approved_by=None)", "data=request.data, context={\"user\":request.user}, partial=True) if serializer.is_valid(): serializer.save() context = {\"success\": True,", "data['chapter']: final_list.append(d) repeat_list=['Content Name','Content Link/Video Link','Content Rating (By Reviewer)','Comment (By", "'approved_contents', 'rejected_contents', 'pending_contents', 'hard_spots']) exists = os.path.isfile('{}_contentstatus.csv'.format(book_name)) path = settings.MEDIA_ROOT", "not None: queryset = self.get_queryset().filter(section__id=section_id, approved=False).exclude(approved_by=None) elif sub_section_id is not", "Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentStatusList(ListCreateAPIView): queryset = Content.objects.all() serializer_class = ContentListSerializer", "\"update successfull\"} return Response(context, status=status.HTTP_200_OK) except Exception as error: context", "Rejected list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class Keywords(ListAPIView): queryset = Content.objects.all()", "status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentContributorCreateView(ListCreateAPIView): queryset = ContentContributors.objects.all() serializer_class = ContentContributorSerializer def", "permission_classes from apps.configuration.models import Book from apps.hardspot.models import HardSpot from", "is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('chapter', None)) elif request.query_params.get('section', None) is not", "(By Reviewer)', 'linked_keywords'] data_frame = pd.DataFrame(final_list , columns=['Board', 'Medium', 'Grade',", "exists = os.path.isfile('{}_contentstatus.csv'.format(book_name)) path = settings.MEDIA_ROOT + '/files/' if exists:", "def post(self, request,format=None): try: serializer = ContentListSerializer(data=request.data) if serializer.is_valid(): serializer.save()", "SectionKeywordsSerializer, ChapterKeywordsSerializer, SubSectionKeywordsSerializer, KeywordSerializer, ContentContributorSerializer, ApprovedContentSerializer, ContentStatusSerializer, HardSpotCreateSerializer, ContentContributorsSerializer, SubSubSectionKeywordsSerializer,", "get Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ContentContributorsDownloadView(RetrieveUpdateAPIView): queryset", "'Failed to get Conetent list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentApprovedList(ListAPIView):", "'ApprovedContent.csv', encoding=\"utf-8-sig\", index=False) context = {\"success\": True, \"message\": \"Activity List\",", "not None: queryset=ChapterKeyword.objects.filter(chapter__id = chapter_id) serializer = ChapterKeywordsSerializer(queryset, many=True) elif", "return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentListUrlPutRequest(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class =", "Reviewer)', 'linked_keywords'] data_frame = pd.DataFrame(final_list , columns=['Board', 'Medium', 'Grade', 'Subject',", "= self.get_object() except Exception as error: context = {'success': \"false\",", "elif sub_section_id is not None: queryset = SubSectionKeyword.objects.filter(sub_section__id = sub_section_id)", "Q(sub_section__section__chapter__book__subject__grade__medium__state__id = state_id) | Q(section__chapter__book__subject__grade__medium__state__id= state_id) | Q(chapter__book__subject__grade__medium__state__id = state_id)", "Unit','Level 4 Textbook Unit', 'Keywords',]+(list(itertools.chain.from_iterable(itertools.repeat(repeat_list, 5))))) exists = os.path.isfile('ApprovedContent.csv') path", "if serializer.data[i] not in serializer.data[i + 1:]: res_list.append(serializer.data[i]) for data", "= BookNestedSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Conetent List\",\"data\":", "import ( BlockBlobService, ContainerPermissions ) from datetime import datetime, timedelta", "import copyfile state_id = request.query_params.get('state', None) if state_id is not", "state_id = request.query_params.get('state', None) if state_id is not None: queryset", "context = {\"success\": True, \"message\": \"OtherContent Approved List\", \"data\": serializer.data}", "final_list = [] import os from shutil import copyfile book", "ContentStatusSerializer(chapters, many=True) for data in serializer.data: for d in data['chapter']:", "= self.get_queryset().filter(approved=True) serializer = ContentStatusSerializerFileFormat(queryset, many=True) context = {\"success\": True,", ":str(error)} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def index(self): final_list,final = [],[] queryset", "Update content Details.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BookNestedList(ListAPIView): queryset =", "try: content_list = self.get_object() except Exception as error: context =", "Details.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ApprovedContentDownloadView(ListAPIView): queryset = Book.objects.all()", "def get(self, request): try: if request.query_params.get('chapter', None) is not None:", "final_list.append(d) repeat_list=['Content Name','Content Link/Video Link','Content Rating (By Reviewer)','Comment (By Reviewer)',", "list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentRejectedList(ListAPIView): queryset = Content.objects.all() serializer_class", "in queryset: try: if i.video is not None : final=[i.id,i.video]", "serializer.data} return Response(context, status=status.HTTP_200_OK) else: serializer = ContentContributorSerializer(data=request.data) if serializer.is_valid():", "None) chapters=Chapter.objects.filter(book_id=book).order_by('id') serializer = ApprovedContentSerializer(chapters, many=True) for data in serializer.data:", "Content.objects.all() serializer_class = ContentStatusSerializer def post(self, request): try: datalist =", "None) if chapter_id is not None: queryset=ChapterKeyword.objects.filter(chapter__id = chapter_id) serializer", "Exception as error: context = {'success': \"false\", 'message': 'Failed To", "Response(context, status=status.HTTP_200_OK) else: serializer = ContentContributorSerializer(data=request.data) if serializer.is_valid(): serializer.save() context", "= Book.objects.all() serializer_class = BookListSerializer def get(self, request): try: subject", "list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentPendingList(ListAPIView): queryset = Content.objects.all() serializer_class", "queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=False,approved_by=None) else: queryset = self.get_queryset().filter(approved=False, approved_by=None)", "not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('sub_section', None)) else: queryset = self.get_queryset() serializer =", "Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def put(self, request, pk, format=None): try: try: content_list", "\"message\": \"Content Pending List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception", "for data in datalist: Content.objects.filter(pk=data['content_id']).update(video=data['file_path_from_database']) context = {\"success\": True, \"message\":", "context = {'success': \"false\", 'message': 'Failed to get content list.'}", "False).exclude(approved_by=None) else: queryset = self.get_queryset().filter(approved=False).exclude(approved_by=None) serializer = KeywordSerializer(queryset, many=True) context", "= SubSectionKeywordsSerializer(queryset, many=True) elif sub_sub_section_id is not None: queryset =", "print(datalist) for data in datalist: Content.objects.filter(pk=data['content_id']).update(video=data['file_path_from_database']) context = {\"success\": True,", "from evolve import settings from azure.storage.blob import ( BlockBlobService, ContainerPermissions", ", columns=['Board', 'Medium', 'Grade', 'Subject', 'Textbook Name', 'Level 1 Textbook", "'message': 'Failed to get Content Status list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "context = {'success': \"false\", 'message': 'Failed to get Content Approved", "# data_frame.to_excel(path + 'contentstatus.xlsx') data_frame.to_csv(path + str(book_name)+'_contentstatus.csv', encoding=\"utf-8-sig\", index=False) context", "request): try: final_list = [] import os from shutil import", "sub_sub_section_id , approved = False).exclude(approved_by=None) else: queryset = self.get_queryset().filter(approved=False).exclude(approved_by=None) serializer", "request): try: queryset = self.get_queryset().filter(approved=True) serializer = ContentStatusSerializerFileFormat(queryset, many=True) context", "\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) context = {\"success\": False, \"message\":", "settings.MEDIA_ROOT + '/files/' data_frame = pd.DataFrame(final_list , columns=['id','url']) data_frame.to_csv(path+ 'BackupContent.csv',", "in res_list: final_list.append(d) data_frame = pd.DataFrame(final_list , columns=['first_name', 'last_name','mobile', 'email','city_name','school_name','textbook_name']).drop_duplicates()", "= request.query_params.get('sub_sub_section',None) if chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False, approved_by=None)", "get Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class GetSASView(ListAPIView): def get(self,request):", "in data['chapter']: final_list.append(d) data_frame = pd.DataFrame(final_list , columns=['Board', 'Medium','Grade', 'Subject',", "{'success': \"false\", 'message': 'Failed to get OtherContent Approved list.'} return", "evolve import settings from evolve import settings from azure.storage.blob import", "'Medium','Grade', 'Subject', 'Textbook Name', 'Level 1 Textbook Unit', 'Level 2", "= { 'success': \"false\", 'message': 'Failed to get Activity list.'}", "None: ContentContributors.objects.filter(id=queryset.id).update(email=request.data['email']) queryset.refresh_from_db() serializer = ContentContributorSerializer(queryset) context = {\"success\": True,", "\"Created Successful\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) context = {\"success\":", "data in serializer.data: for d in data['chapter']: final_list.append(d) repeat_list=['Content Name','Content", "get(self, request): try: if request.query_params.get('chapter', None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('chapter',", "is not None: queryset = SectionKeyword.objects.filter(section__id = section_id) serializer =", "settings.AZURE_CONTAINER block_blob_service = BlockBlobService(account_name=account_name, account_key=account_key) class ContentList(ListCreateAPIView): queryset = Content.objects.all()", "= {'success': \"false\", 'message': 'Failed to get Activity list.'} return", "ContentContributorSerializer(queryset) context = {\"success\": True, \"message\": \"Successful\", \"data\": serializer.data} return", "queryset=self.get_queryset().filter(chapter_id=request.query_params.get('chapter', None)) elif request.query_params.get('section', None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('section', None))", "= {'success': \"false\", 'message': 'content Id does not exist.'} return", "error: context = {'success': \"false\", 'message': 'Failed to get content", "'media/files/ApprovedContent.csv'} return Response(context, status=status.HTTP_200_OK) except Exception as error: context =", "import permission_required from rest_framework.parsers import MultiPartParser from apps.dataupload.models import (Chapter,", "account_key=accountKey) sas_token = blobService.generate_container_shared_access_signature(containerName,ContainerPermissions.READ, datetime.utcnow() + timedelta(hours=10)) context = {\"success\":", "os.remove('{}_contentstatus.csv'.format(book_name)) # data_frame.to_excel(path + 'contentstatus.xlsx') data_frame.to_csv(path + str(book_name)+'_contentstatus.csv', encoding=\"utf-8-sig\", index=False)", "from apps.hardspot.models import HardSpot from .models import Content,ContentContributors from .serializers", "list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BackupContent(ListAPIView): queryset = Book.objects.all() def", "try: queryset = self.get_queryset() serializer = ContentStatusListSerializer(queryset, many=True) context =", "exists = os.path.isfile('content_contributers.csv') path = settings.MEDIA_ROOT + '/files/' if exists:", "queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=True) elif section_id is not None: queryset = self.get_queryset().filter(section__id=section_id,", "= ContentListSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Chapter List\",\"data\":", "ContainerPermissions.WRITE, datetime.utcnow() + timedelta(hours=1), ) base_url=account_name+\".blob.core.windows.net/\"+CONTAINER_NAME context = {\"success\": True,", "ContentList(ListCreateAPIView): queryset = Content.objects.all() serializer_class = KeywordSerializer parser_classes = (MultiPartParser,)", "serializer = ContentStatusSerializer(chapters, many=True) for data in serializer.data: for d", "sub_sub_section_id is not None: queryset =self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id , approved", "status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentApprovedList(ListAPIView): queryset = Content.objects.all() serializer_class = KeywordSerializer def", "datalist = request.data print(datalist) for data in datalist: print(data) Content.objects.filter(pk=data['content_id']).update(video=data['video'])", "BookListView(ListAPIView): queryset = Book.objects.all() serializer_class = BookListSerializer def get(self, request):", "queryset = self.get_queryset() serializer = BookListSerializer(queryset, many=True) context = {\"success\":", "elif request.query_params.get('section', None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('sub_section', None)) else: queryset", "ContentContributorCreateView(ListCreateAPIView): queryset = ContentContributors.objects.all() serializer_class = ContentContributorSerializer def post(self, request):", "serializer = ContentStatusSerializerFileFormat(queryset, many=True) context = {\"success\": True, \"message\": \"OtherContent", "SectionKeyword, SubSectionKeyword, SubSubSectionKeyword, ) import json import pandas as pd", "\"Activity List\",\"data\": 'media/files/content_contributers.csv'} return Response(context, status=status.HTTP_200_OK) except Exception as error:", "get Chapter list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def post(self, request,format=None): try:", "approved=True) elif sub_section_id is not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=True)", "many=True) context = {\"success\": True, \"message\": \"Content List\",\"data\": serializer.data} return", "print(datalist) for data in datalist: print(data) Content.objects.filter(pk=data['content_id']).update(video=data['video']) context = {\"success\":", "sub_sub_section_id,approved=False,approved_by=None) else: queryset = self.get_queryset().filter(approved=False, approved_by=None) serializer = KeywordSerializer(queryset, many=True)", "context = {'success': \"false\", 'message': 'Failed To Update content Details.'}", "serializer_class = ContentContributorSerializer def post(self, request): try: queryset = ContentContributors.objects.filter(first_name__iexact=request.data['first_name'].strip(),last_name__iexact=request.data['last_name'].strip(),", "ContentContributorSerializer, ApprovedContentSerializer, ContentStatusSerializer, HardSpotCreateSerializer, ContentContributorsSerializer, SubSubSectionKeywordsSerializer, ContentStatusSerializerFileFormat, ) from django.utils.decorators", "get(self,request): from evolve import settings accountName = settings.AZURE_ACCOUNT_NAME accountKey =", "many=True) elif section_id is not None: queryset = SectionKeyword.objects.filter(section__id =", "context = {\"success\": False, \"message\": \"Invalid Input Data to create", "Book.objects.all() serializer_class = BookListSerializer def get(self, request): try: subject =", "return Response(context, status=status.HTTP_200_OK) else: serializer = ContentContributorSerializer(data=request.data) if serializer.is_valid(): serializer.save()", "True, \"message\": \"Successful\", \"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) else: serializer", "for data in serializer.data: for d in data['chapter']: final_list.append(d) repeat_list=['Content", "data_frame.to_excel(path + 'contentstatus.xlsx') data_frame.to_csv(path + str(book_name)+'_contentstatus.csv', encoding=\"utf-8-sig\", index=False) context =", "None: queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=False,approved_by=None) else: queryset = self.get_queryset().filter(approved=False,", "final_list = [] import os from shutil import copyfile state_id", "= ContentContributorSerializer(queryset) context = {\"success\": True, \"message\": \"Successful\", \"data\": serializer.data}", "= KeywordSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Content Pending", "except: return None class ContentListUrlUpdate(ListAPIView): queryset = Content.objects.all() serializer_class =", "class ContentListUrlPutRequestRevert(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class = ContentStatusSerializer def post(self,", "status=status.HTTP_500_INTERNAL_SERVER_ERROR) def post(self, request,format=None): try: serializer = ContentListSerializer(data=request.data) if serializer.is_valid():", "True, \"token\":sas_token} return Response(context, status=status.HTTP_200_OK) except: return None class ContentListUrlUpdate(ListAPIView):", "class ContentContributorsDownloadView(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class = HardSpotCreateSerializer def get(self,", "'linked_keywords'] data_frame = pd.DataFrame(final_list , columns=['Board', 'Medium', 'Grade', 'Subject', 'Textbook", "= Content.objects.all() def get(self, request): try: chapter_id = request.query_params.get('chapter', None)", "= ContentListSerializer def get(self, request): try: if request.query_params.get('chapter', None) is", "is not None: queryset=self.get_queryset().filter(subject__id=subject, content_only=True) else: queryset = self.get_queryset().filter(content_only=True) serializer", "error: context = {'success': \"false\", 'message': 'Failed To Update content", "to get OtherContent Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentListUrlPutRequestRevert(RetrieveUpdateAPIView):", "import IsAuthenticated from rest_framework.decorators import permission_classes from apps.configuration.models import Book", "list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ContentContributorsDownloadView(RetrieveUpdateAPIView): queryset = Content.objects.all()", "\"message\": \"Chapter List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as", "ChapterKeywordsSerializer(queryset, many=True) elif section_id is not None: queryset = SectionKeyword.objects.filter(section__id", "ContentListSerializer, BookNestedSerializer, BookListSerializer, ContentStatusListSerializer, SectionKeywordSerializer, SubSectionKeywordSerializer, SectionKeywordsSerializer, ChapterKeywordsSerializer, SubSectionKeywordsSerializer, KeywordSerializer,", "=self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id , approved = False).exclude(approved_by=None) else: queryset =", "def get(self,request): try: sas_url = block_blob_service.generate_container_shared_access_signature( CONTAINER_NAME, ContainerPermissions.WRITE, datetime.utcnow() +", "ContentContributorsSerializer(queryset, many=True) res_list = [] for i in range(len(serializer.data)): if", "os from shutil import copyfile state_id = request.query_params.get('state', None) if", "'Failed to get Content Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class", "queryset: try: if i.video is not None : final=[i.id,i.video] final_list.append(final)", "get Content Pending list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentStatusList(ListCreateAPIView): queryset", "ContentStatusSerializerFileFormat, ) from django.utils.decorators import method_decorator from django.contrib.auth.decorators import permission_required", "is not None: queryset=self.get_queryset().filter(subject__id=subject) else: queryset = self.get_queryset() serializer =", "import threading account_name = settings.AZURE_ACCOUNT_NAME account_key = settings.AZURE_ACCOUNT_KEY CONTAINER_NAME= settings.AZURE_CONTAINER", "'content Id does not exist.'} return Response(context, status=status.HTTP_404_NOT_FOUND) serializer =", "get Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class GetSasDownloadView(ListAPIView): def get(self,request):", "context = {'success': \"false\", 'message': 'Failed to get Content Rejected", "if serializer.is_valid(): serializer.save() context = {\"success\": True, \"message\": \"Successful\", \"data\":", "= sub_sub_section_id,approved=False,approved_by=None) else: queryset = self.get_queryset().filter(approved=False, approved_by=None) serializer = KeywordSerializer(queryset,", "'Failed to get Chapter list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def post(self,", "= self.get_queryset() serializer = ContentListSerializer(queryset, many=True) context = {\"success\": True,", "not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False, approved_by=None) elif section_id is not None:", "queryset = ContentContributors.objects.all() serializer_class = ContentContributorSerializer def post(self, request): try:", "subject is not None: queryset=self.get_queryset().filter(subject__id=subject) else: queryset = self.get_queryset() serializer", "book_id = request.query_params.get('book', None) book_name=\"\" if book_id is not None:", "i.video is not None : final=[i.id,i.video] final_list.append(final) except Exception as", "None: queryset = SectionKeyword.objects.filter(section__id = section_id) serializer = SectionKeywordsSerializer(queryset, many=True)", "'Failed to get content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def put(self,", "sub_sub_section_id is not None: queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=True) else:", "return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class Keywords(ListAPIView): queryset = Content.objects.all() def get(self,", "from .serializers import ( ContentListSerializer, BookNestedSerializer, BookListSerializer, ContentStatusListSerializer, SectionKeywordSerializer, SubSectionKeywordSerializer,", "self.get_queryset().filter(approved=False).exclude(approved_by=None) serializer = KeywordSerializer(queryset, many=True) context = {\"success\": True, \"message\":", "Content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BookListView(ListAPIView): queryset = Book.objects.all()", "{\"success\": True, \"message\": \"Updation Successful\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) context", "return None class ContentListUrlUpdate(ListAPIView): queryset = Content.objects.all() serializer_class = ContentStatusSerializer", "(Chapter, Section, SubSection, ChapterKeyword, SectionKeyword, SubSectionKeyword, SubSubSectionKeyword, ) import json", "ApprovedContentDownloadView(ListAPIView): queryset = Book.objects.all() def get(self, request): try: final_list =", "sub_section_id = request.query_params.get('sub_section', None) sub_sub_section_id = request.query_params.get('sub_sub_section', None) if chapter_id", "Textbook Unit','Level 4 Textbook Unit', 'Keywords',]+(list(itertools.chain.from_iterable(itertools.repeat(repeat_list, 5))))) exists = os.path.isfile('ApprovedContent.csv')", "'success': \"false\", 'message': 'Failed to get Activity list.'} return Response(context,", "serializer = ApprovedContentSerializer(chapters, many=True) for data in serializer.data: for d", "Link','Content Rating (By Reviewer)','Comment (By Reviewer)', 'linked_keywords'] data_frame = pd.DataFrame(final_list", "rest_framework.generics import ( ListAPIView, ListCreateAPIView, ListAPIView, RetrieveUpdateAPIView,) from rest_framework.response import", "get Content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BookListView(ListAPIView): queryset =", "\"false\", 'message': 'Failed to get Content Rejected list.'} return Response(context,", "django.utils.decorators import method_decorator from django.contrib.auth.decorators import permission_required from rest_framework.parsers import", "SubSubSectionKeyword.objects.filter(sub_sub_section__id = sub_sub_section_id) serializer = SubSubSectionKeywordsSerializer(queryset, many=True) else: queryset =", "copyfile book_id = request.query_params.get('book', None) book_name=\"\" if book_id is not", "{'success': \"false\", 'message': 'content Id does not exist.'} return Response(context,", "{'success': \"false\", 'message': 'Failed to get Conetent list.'} return Response(context,", "try: sas_url = block_blob_service.generate_container_shared_access_signature( CONTAINER_NAME, ContainerPermissions.WRITE, datetime.utcnow() + timedelta(hours=1), )", "( ListAPIView, ListCreateAPIView, ListAPIView, RetrieveUpdateAPIView,) from rest_framework.response import Response from", "= settings.MEDIA_ROOT + '/files/' if exists: os.remove('content_contributers.csv') # data_frame.to_excel(path +", "4 Textbook Unit', 'Keywords',]+(list(itertools.chain.from_iterable(itertools.repeat(repeat_list, 5))))) exists = os.path.isfile('ApprovedContent.csv') path =", "context={\"user\":request.user}, partial=True) if serializer.is_valid(): serializer.save() context = {\"success\": True, \"message\":", "\"false\", 'message': 'Failed to get Conetent list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "settings accountName = settings.AZURE_ACCOUNT_NAME accountKey = settings.AZURE_ACCOUNT_KEY containerName= settings.AZURE_CONTAINER try:", "\"Content List\",\"data\": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error:", "Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentStatusDownloadView(RetrieveUpdateAPIView): queryset = HardSpot.objects.all() serializer_class = HardSpotCreateSerializer", "res_list = [] for i in range(len(serializer.data)): if serializer.data[i] not", "final=[i.id,i.video] final_list.append(final) except Exception as e: pass path = settings.MEDIA_ROOT", "exists = os.path.isfile('ApprovedContent.csv') path = settings.MEDIA_ROOT + '/files/' if exists:", "is not None: queryset = SubSectionKeyword.objects.filter(sub_section__id = sub_section_id) serializer =", "subject is not None: queryset=self.get_queryset().filter(subject__id=subject, content_only=True) else: queryset = self.get_queryset().filter(content_only=True)", "evolve import settings accountName = settings.AZURE_ACCOUNT_NAME accountKey = settings.AZURE_ACCOUNT_KEY containerName=", "context = {\"success\": True, \"message\": \"Activity List\", \"data\": 'media/files/BackupContent.csv'} return", "get OtherContent Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentListUrlPutRequest(RetrieveUpdateAPIView): queryset", "status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentStatusList(ListCreateAPIView): queryset = Content.objects.all() serializer_class = ContentListSerializer def", "status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentListUrlPutRequest(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class = ContentStatusSerializer def", "= {'success': \"false\", 'message': 'Failed to get Conetent list.'} return", "serializer = ContentListSerializer(data=request.data) if serializer.is_valid(): serializer.save() context = {\"success\": True,", "Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BackupContent(ListAPIView): queryset = Book.objects.all() def get(self,request): try:", "None: queryset =self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id , approved = False).exclude(approved_by=None) else:", "copyfile state_id = request.query_params.get('state', None) if state_id is not None:", "approved_by=None) elif sub_section_id is not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False,", "data_frame.to_excel(path + 'content_contributers.xlsx') data_frame.to_csv(path + 'content_contributers.csv', encoding=\"utf-8-sig\", index=False) context =", "Content.objects.all() serializer_class = ContentListSerializer def get(self, request): try: if request.query_params.get('chapter',", "= ContentStatusSerializer(chapters, many=True) for data in serializer.data: for d in", "not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=True) elif section_id is not None: queryset", "None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False, approved_by=None) elif section_id is not None: queryset", "settings.AZURE_ACCOUNT_KEY containerName= settings.AZURE_CONTAINER try: blobService = BlockBlobService(account_name=accountName, account_key=accountKey) sas_token =", "'message': 'Failed to Personal Details.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class", "else: queryset = self.get_queryset().filter(approved=False, approved_by=None) serializer = KeywordSerializer(queryset, many=True) context", "{'success': \"false\", 'message': 'Failed to get Activity list.'} return Response(context,", "ContentListSerializer(queryset, many=True) context = {\"success\": True, \"message\": \"Chapter List\",\"data\": serializer.data}", "+ 1:]: res_list.append(serializer.data[i]) for data in res_list: for d in", "serializer.is_valid(): serializer.save() context = {\"success\": True, \"message\": \"Successful\", \"data\": serializer.data}", "\"false\", 'message': 'content Id does not exist.'} return Response(context, status=status.HTTP_404_NOT_FOUND)", "\"false\", 'message': 'Failed to get Activity list.' ,\"error\" :str(error)} return", "= ContentContributorsSerializer(queryset, many=True) res_list = [] for i in range(len(serializer.data)):", "= {'success': \"false\", 'message': 'Failed to get Chapter list.'} return", "'contentstatus.xlsx') data_frame.to_csv(path + str(book_name)+'_contentstatus.csv', encoding=\"utf-8-sig\", index=False) context = {\"success\": True,", "False, \"message\": \"Invalid Input Data to create content\"} return Response(context," ]
[ "# -*- coding: utf-8 -*- \"\"\" Example to train and", "learning_rate=0.01, lambda_reg=0.02, use_bias=True, early_stop=True, verbose=True) # Evaluation result = eval_method.evaluate(model=mf,", "model with given data @author: <NAME> <<EMAIL>> \"\"\" from cornac.data", "Example to train and evaluate a model with given data", "from cornac.eval_methods import BaseMethod from cornac.models import MF from cornac.metrics", "evaluate a model with given data @author: <NAME> <<EMAIL>> \"\"\"", "cornac.utils import cache # Download MovieLens 100K provided training and", "test_data=test_data, exclude_unknowns=False, verbose=True) mf = MF(k=10, max_iter=25, learning_rate=0.01, lambda_reg=0.02, use_bias=True,", "import Reader from cornac.eval_methods import BaseMethod from cornac.models import MF", "train and evaluate a model with given data @author: <NAME>", "-*- coding: utf-8 -*- \"\"\" Example to train and evaluate", "training and test splits reader = Reader() train_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.base'))", "eval_method = BaseMethod.from_splits(train_data=train_data, test_data=test_data, exclude_unknowns=False, verbose=True) mf = MF(k=10, max_iter=25,", "= Reader() train_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.base')) test_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.test')) eval_method =", "cornac.models import MF from cornac.metrics import MAE, RMSE from cornac.utils", "MF(k=10, max_iter=25, learning_rate=0.01, lambda_reg=0.02, use_bias=True, early_stop=True, verbose=True) # Evaluation result", "verbose=True) # Evaluation result = eval_method.evaluate(model=mf, metrics=[MAE(), RMSE()], user_based=True) print(result)", "Download MovieLens 100K provided training and test splits reader =", "BaseMethod from cornac.models import MF from cornac.metrics import MAE, RMSE", "-*- \"\"\" Example to train and evaluate a model with", "use_bias=True, early_stop=True, verbose=True) # Evaluation result = eval_method.evaluate(model=mf, metrics=[MAE(), RMSE()],", "import cache # Download MovieLens 100K provided training and test", "mf = MF(k=10, max_iter=25, learning_rate=0.01, lambda_reg=0.02, use_bias=True, early_stop=True, verbose=True) #", "@author: <NAME> <<EMAIL>> \"\"\" from cornac.data import Reader from cornac.eval_methods", "import BaseMethod from cornac.models import MF from cornac.metrics import MAE,", "train_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.base')) test_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.test')) eval_method = BaseMethod.from_splits(train_data=train_data, test_data=test_data,", "provided training and test splits reader = Reader() train_data =", "reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.base')) test_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.test')) eval_method = BaseMethod.from_splits(train_data=train_data, test_data=test_data, exclude_unknowns=False, verbose=True)", "# Download MovieLens 100K provided training and test splits reader", "from cornac.data import Reader from cornac.eval_methods import BaseMethod from cornac.models", "splits reader = Reader() train_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.base')) test_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.test'))", "import MAE, RMSE from cornac.utils import cache # Download MovieLens", "early_stop=True, verbose=True) # Evaluation result = eval_method.evaluate(model=mf, metrics=[MAE(), RMSE()], user_based=True)", "from cornac.metrics import MAE, RMSE from cornac.utils import cache #", "<<EMAIL>> \"\"\" from cornac.data import Reader from cornac.eval_methods import BaseMethod", "from cornac.models import MF from cornac.metrics import MAE, RMSE from", "RMSE from cornac.utils import cache # Download MovieLens 100K provided", "lambda_reg=0.02, use_bias=True, early_stop=True, verbose=True) # Evaluation result = eval_method.evaluate(model=mf, metrics=[MAE(),", "Reader() train_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.base')) test_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.test')) eval_method = BaseMethod.from_splits(train_data=train_data,", "MAE, RMSE from cornac.utils import cache # Download MovieLens 100K", "from cornac.utils import cache # Download MovieLens 100K provided training", "MF from cornac.metrics import MAE, RMSE from cornac.utils import cache", "= reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.test')) eval_method = BaseMethod.from_splits(train_data=train_data, test_data=test_data, exclude_unknowns=False, verbose=True) mf =", "verbose=True) mf = MF(k=10, max_iter=25, learning_rate=0.01, lambda_reg=0.02, use_bias=True, early_stop=True, verbose=True)", "= MF(k=10, max_iter=25, learning_rate=0.01, lambda_reg=0.02, use_bias=True, early_stop=True, verbose=True) # Evaluation", "\"\"\" Example to train and evaluate a model with given", "and evaluate a model with given data @author: <NAME> <<EMAIL>>", "100K provided training and test splits reader = Reader() train_data", "cornac.data import Reader from cornac.eval_methods import BaseMethod from cornac.models import", "given data @author: <NAME> <<EMAIL>> \"\"\" from cornac.data import Reader", "BaseMethod.from_splits(train_data=train_data, test_data=test_data, exclude_unknowns=False, verbose=True) mf = MF(k=10, max_iter=25, learning_rate=0.01, lambda_reg=0.02,", "to train and evaluate a model with given data @author:", "reader = Reader() train_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.base')) test_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.test')) eval_method", "reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.test')) eval_method = BaseMethod.from_splits(train_data=train_data, test_data=test_data, exclude_unknowns=False, verbose=True) mf = MF(k=10,", "= BaseMethod.from_splits(train_data=train_data, test_data=test_data, exclude_unknowns=False, verbose=True) mf = MF(k=10, max_iter=25, learning_rate=0.01,", "utf-8 -*- \"\"\" Example to train and evaluate a model", "exclude_unknowns=False, verbose=True) mf = MF(k=10, max_iter=25, learning_rate=0.01, lambda_reg=0.02, use_bias=True, early_stop=True,", "a model with given data @author: <NAME> <<EMAIL>> \"\"\" from", "Reader from cornac.eval_methods import BaseMethod from cornac.models import MF from", "MovieLens 100K provided training and test splits reader = Reader()", "cache # Download MovieLens 100K provided training and test splits", "= reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.base')) test_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.test')) eval_method = BaseMethod.from_splits(train_data=train_data, test_data=test_data, exclude_unknowns=False,", "data @author: <NAME> <<EMAIL>> \"\"\" from cornac.data import Reader from", "import MF from cornac.metrics import MAE, RMSE from cornac.utils import", "max_iter=25, learning_rate=0.01, lambda_reg=0.02, use_bias=True, early_stop=True, verbose=True) # Evaluation result =", "\"\"\" from cornac.data import Reader from cornac.eval_methods import BaseMethod from", "coding: utf-8 -*- \"\"\" Example to train and evaluate a", "with given data @author: <NAME> <<EMAIL>> \"\"\" from cornac.data import", "cornac.metrics import MAE, RMSE from cornac.utils import cache # Download", "and test splits reader = Reader() train_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.base')) test_data", "test_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.test')) eval_method = BaseMethod.from_splits(train_data=train_data, test_data=test_data, exclude_unknowns=False, verbose=True) mf", "test splits reader = Reader() train_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.base')) test_data =", "<NAME> <<EMAIL>> \"\"\" from cornac.data import Reader from cornac.eval_methods import", "cornac.eval_methods import BaseMethod from cornac.models import MF from cornac.metrics import" ]
[ "f.read().splitlines() self.scene_categories = dict(line.split() for line in self.scene_categories) with open(self.data_csv,", "np import cv2 import albumentations from PIL import Image from", "= {\"train\": \"data/ade20k_train.txt\", \"validation\": \"data/ade20k_test.txt\"}[self.split] self.data_root = \"./data/ade20k_root\" with open(os.path.join(self.data_root,", "__init__(self, config=None, size=None, random_crop=False, interpolation=\"bicubic\", crop_size=None): self.split = self.get_split() self.n_labels", "for l in self.image_paths], \"scene_category\": [self.scene_categories[l.replace(\".jpg\", \"\")] for l in", "= (processed[\"image\"]/127.5 - 1.0).astype(np.float32) segmentation = processed[\"mask\"] onehot = np.eye(self.n_labels)[segmentation]", "= crop_size if self.size is not None: self.interpolation = interpolation", "size self.size = size if crop_size is None: self.crop_size =", "self.size is not None: image = self.image_rescaler(image=image)[\"image\"] segmentation = Image.open(example[\"segmentation_path_\"])", "is not None: processed = self.preprocessor(image=image, mask=segmentation) else: processed =", "Image from torch.utils.data import Dataset from taming.data.sflckr import SegmentationBase #", "crop_size=None): super().__init__(config=config, size=size, random_crop=random_crop, interpolation=interpolation, crop_size=crop_size) def get_split(self): return \"train\"", "image = image.convert(\"RGB\") image = np.array(image).astype(np.uint8) if self.size is not", "in repo class Examples(SegmentationBase): def __init__(self, size=256, random_crop=False, interpolation=\"bicubic\"): super().__init__(data_csv=\"data/ade20k_examples.txt\",", "ss='training' self.labels = { \"relative_file_path_\": [l for l in self.image_paths],", "# for examples included in repo class Examples(SegmentationBase): def __init__(self,", "example = dict((k, self.labels[k][i]) for k in self.labels) image =", "= 151 # unknown + 150 self.data_csv = {\"train\": \"data/ade20k_train.txt\",", "interpolation self.interpolation = { \"nearest\": cv2.INTER_NEAREST, \"bilinear\": cv2.INTER_LINEAR, \"bicubic\": cv2.INTER_CUBIC,", "= self.preprocessor(image=image, mask=segmentation) else: processed = {\"image\": image, \"mask\": segmentation}", "== \"__main__\": dset = ADE20kValidation() ex = dset[0] for k", "def __getitem__(self, i): example = dict((k, self.labels[k][i]) for k in", "return example class ADE20kTrain(ADE20kBase): # default to random_crop=True def __init__(self,", "for l in self.image_paths], \"relative_segmentation_path_\": [l.replace(\".jpg\", \".png\") for l in", "self.interpolation = { \"nearest\": cv2.INTER_NEAREST, \"bilinear\": cv2.INTER_LINEAR, \"bicubic\": cv2.INTER_CUBIC, \"area\":", "else size self.size = size if crop_size is None: self.crop_size", "segmentation_root=\"data/ade20k_segmentations\", size=size, random_crop=random_crop, interpolation=interpolation, n_labels=151, shift_segmentation=False) # With semantic map", "random_crop=True, interpolation=\"bicubic\", crop_size=None): super().__init__(config=config, size=size, random_crop=random_crop, interpolation=interpolation, crop_size=crop_size) def get_split(self):", "n_labels=151, shift_segmentation=False) # With semantic map and scene label class", "None: image = self.image_rescaler(image=image)[\"image\"] segmentation = Image.open(example[\"segmentation_path_\"]) segmentation = np.array(segmentation).astype(np.uint8)", "self.center_crop: self.cropper = albumentations.CenterCrop(height=self.crop_size, width=self.crop_size) else: self.cropper = albumentations.RandomCrop(height=self.crop_size, width=self.crop_size)", "processed[\"mask\"] onehot = np.eye(self.n_labels)[segmentation] example[\"segmentation\"] = onehot return example class", "if self.size is not None: image = self.image_rescaler(image=image)[\"image\"] segmentation =", "ADE20kValidation(ADE20kBase): def get_split(self): return \"validation\" if __name__ == \"__main__\": dset", "import SegmentationBase # for examples included in repo class Examples(SegmentationBase):", "= None if size is not None and size<=0 else", "= {\"image\": image, \"mask\": segmentation} example[\"image\"] = (processed[\"image\"]/127.5 - 1.0).astype(np.float32)", "not random_crop if self.center_crop: self.cropper = albumentations.CenterCrop(height=self.crop_size, width=self.crop_size) else: self.cropper", "\"./data/ade20k_root\" with open(os.path.join(self.data_root, \"sceneCategories.txt\"), \"r\") as f: self.scene_categories = f.read().splitlines()", "self.image_paths], \"file_path_\": [os.path.join(self.data_root, \"images\",ss, l) for l in self.image_paths], \"relative_segmentation_path_\":", "= processed[\"mask\"] onehot = np.eye(self.n_labels)[segmentation] example[\"segmentation\"] = onehot return example", "for l in self.image_paths], \"segmentation_path_\": [os.path.join(self.data_root, \"annotations\",ss, l.replace(\".jpg\", \".png\")) for", "None: self.interpolation = interpolation self.interpolation = { \"nearest\": cv2.INTER_NEAREST, \"bilinear\":", "random_crop=random_crop, interpolation=interpolation, n_labels=151, shift_segmentation=False) # With semantic map and scene", "numpy as np import cv2 import albumentations from PIL import", "interpolation=interpolation, n_labels=151, shift_segmentation=False) # With semantic map and scene label", "\"data/ade20k_train.txt\", \"validation\": \"data/ade20k_test.txt\"}[self.split] self.data_root = \"./data/ade20k_root\" with open(os.path.join(self.data_root, \"sceneCategories.txt\"), \"r\")", "if __name__ == \"__main__\": dset = ADE20kValidation() ex = dset[0]", "not None else None else: self.crop_size = crop_size if self.size", "With semantic map and scene label class ADE20kBase(Dataset): def __init__(self,", "segmentation = self.segmentation_rescaler(image=segmentation)[\"image\"] if self.size is not None: processed =", "__getitem__(self, i): example = dict((k, self.labels[k][i]) for k in self.labels)", "\"\")] for l in self.image_paths], } size = None if", "interpolation=\"bicubic\", crop_size=None): super().__init__(config=config, size=size, random_crop=random_crop, interpolation=interpolation, crop_size=crop_size) def get_split(self): return", "as f: self.image_paths = f.read().splitlines() self._length = len(self.image_paths) ss =", "= f.read().splitlines() self.scene_categories = dict(line.split() for line in self.scene_categories) with", "i): example = dict((k, self.labels[k][i]) for k in self.labels) image", "__name__ == \"__main__\": dset = ADE20kValidation() ex = dset[0] for", "len(self.image_paths) ss = self.split if ss=='train': ss='training' self.labels = {", "size<=0 else size self.size = size if crop_size is None:", "[os.path.join(self.data_root, \"annotations\",ss, l.replace(\".jpg\", \".png\")) for l in self.image_paths], \"scene_category\": [self.scene_categories[l.replace(\".jpg\",", "= size if size is not None else None else:", "ex = dset[0] for k in [\"image\", \"scene_category\", \"segmentation\"]: print(type(ex[k]))", "f: self.image_paths = f.read().splitlines() self._length = len(self.image_paths) ss = self.split", "Examples(SegmentationBase): def __init__(self, size=256, random_crop=False, interpolation=\"bicubic\"): super().__init__(data_csv=\"data/ade20k_examples.txt\", data_root=\"data/ade20k_images\", segmentation_root=\"data/ade20k_segmentations\", size=size,", "151 # unknown + 150 self.data_csv = {\"train\": \"data/ade20k_train.txt\", \"validation\":", "ADE20kBase(Dataset): def __init__(self, config=None, size=None, random_crop=False, interpolation=\"bicubic\", crop_size=None): self.split =", "= Image.open(example[\"file_path_\"]) if not image.mode == \"RGB\": image = image.convert(\"RGB\")", "self.labels) image = Image.open(example[\"file_path_\"]) if not image.mode == \"RGB\": image", "image.mode == \"RGB\": image = image.convert(\"RGB\") image = np.array(image).astype(np.uint8) if", "with open(self.data_csv, \"r\") as f: self.image_paths = f.read().splitlines() self._length =", "cv2.INTER_LINEAR, \"bicubic\": cv2.INTER_CUBIC, \"area\": cv2.INTER_AREA, \"lanczos\": cv2.INTER_LANCZOS4}[self.interpolation] self.image_rescaler = albumentations.SmallestMaxSize(max_size=self.size,", "if crop_size is None: self.crop_size = size if size is", "None: self.center_crop = not random_crop if self.center_crop: self.cropper = albumentations.CenterCrop(height=self.crop_size,", "= np.array(segmentation).astype(np.uint8) if self.size is not None: segmentation = self.segmentation_rescaler(image=segmentation)[\"image\"]", "albumentations from PIL import Image from torch.utils.data import Dataset from", "dict((k, self.labels[k][i]) for k in self.labels) image = Image.open(example[\"file_path_\"]) if", "albumentations.RandomCrop(height=self.crop_size, width=self.crop_size) self.preprocessor = self.cropper def __len__(self): return self._length def", "is not None and size<=0 else size self.size = size", "self.image_paths], } size = None if size is not None", "if not image.mode == \"RGB\": image = image.convert(\"RGB\") image =", "def get_split(self): return \"validation\" if __name__ == \"__main__\": dset =", "in self.scene_categories) with open(self.data_csv, \"r\") as f: self.image_paths = f.read().splitlines()", "\"r\") as f: self.scene_categories = f.read().splitlines() self.scene_categories = dict(line.split() for", "as f: self.scene_categories = f.read().splitlines() self.scene_categories = dict(line.split() for line", "\"data/ade20k_test.txt\"}[self.split] self.data_root = \"./data/ade20k_root\" with open(os.path.join(self.data_root, \"sceneCategories.txt\"), \"r\") as f:", "# default to random_crop=True def __init__(self, config=None, size=None, random_crop=True, interpolation=\"bicubic\",", "dset[0] for k in [\"image\", \"scene_category\", \"segmentation\"]: print(type(ex[k])) try: print(ex[k].shape)", "ss = self.split if ss=='train': ss='training' self.labels = { \"relative_file_path_\":", "interpolation=\"bicubic\", crop_size=None): self.split = self.get_split() self.n_labels = 151 # unknown", "onehot return example class ADE20kTrain(ADE20kBase): # default to random_crop=True def", "self.crop_size = crop_size if self.size is not None: self.interpolation =", "l in self.image_paths], \"relative_segmentation_path_\": [l.replace(\".jpg\", \".png\") for l in self.image_paths],", "np.eye(self.n_labels)[segmentation] example[\"segmentation\"] = onehot return example class ADE20kTrain(ADE20kBase): # default", "class ADE20kValidation(ADE20kBase): def get_split(self): return \"validation\" if __name__ == \"__main__\":", "else None else: self.crop_size = crop_size if self.size is not", "np.array(segmentation).astype(np.uint8) if self.size is not None: segmentation = self.segmentation_rescaler(image=segmentation)[\"image\"] if", "segmentation = Image.open(example[\"segmentation_path_\"]) segmentation = np.array(segmentation).astype(np.uint8) if self.size is not", "= self.segmentation_rescaler(image=segmentation)[\"image\"] if self.size is not None: processed = self.preprocessor(image=image,", "processed = self.preprocessor(image=image, mask=segmentation) else: processed = {\"image\": image, \"mask\":", "import cv2 import albumentations from PIL import Image from torch.utils.data", "is None: self.crop_size = size if size is not None", "image = np.array(image).astype(np.uint8) if self.size is not None: image =", "not None: segmentation = self.segmentation_rescaler(image=segmentation)[\"image\"] if self.size is not None:", "\"area\": cv2.INTER_AREA, \"lanczos\": cv2.INTER_LANCZOS4}[self.interpolation] self.image_rescaler = albumentations.SmallestMaxSize(max_size=self.size, interpolation=self.interpolation) self.segmentation_rescaler =", "for l in self.image_paths], \"file_path_\": [os.path.join(self.data_root, \"images\",ss, l) for l", "width=self.crop_size) self.preprocessor = self.cropper def __len__(self): return self._length def __getitem__(self,", "# unknown + 150 self.data_csv = {\"train\": \"data/ade20k_train.txt\", \"validation\": \"data/ade20k_test.txt\"}[self.split]", "l) for l in self.image_paths], \"relative_segmentation_path_\": [l.replace(\".jpg\", \".png\") for l", "__len__(self): return self._length def __getitem__(self, i): example = dict((k, self.labels[k][i])", "\".png\")) for l in self.image_paths], \"scene_category\": [self.scene_categories[l.replace(\".jpg\", \"\")] for l", "= albumentations.CenterCrop(height=self.crop_size, width=self.crop_size) else: self.cropper = albumentations.RandomCrop(height=self.crop_size, width=self.crop_size) self.preprocessor =", "is not None: image = self.image_rescaler(image=image)[\"image\"] segmentation = Image.open(example[\"segmentation_path_\"]) segmentation", "self.image_paths], \"segmentation_path_\": [os.path.join(self.data_root, \"annotations\",ss, l.replace(\".jpg\", \".png\")) for l in self.image_paths],", "\"__main__\": dset = ADE20kValidation() ex = dset[0] for k in", "= interpolation self.interpolation = { \"nearest\": cv2.INTER_NEAREST, \"bilinear\": cv2.INTER_LINEAR, \"bicubic\":", "None else: self.crop_size = crop_size if self.size is not None:", "== \"RGB\": image = image.convert(\"RGB\") image = np.array(image).astype(np.uint8) if self.size", "dset = ADE20kValidation() ex = dset[0] for k in [\"image\",", "ADE20kValidation() ex = dset[0] for k in [\"image\", \"scene_category\", \"segmentation\"]:", "self.image_paths], \"relative_segmentation_path_\": [l.replace(\".jpg\", \".png\") for l in self.image_paths], \"segmentation_path_\": [os.path.join(self.data_root,", "torch.utils.data import Dataset from taming.data.sflckr import SegmentationBase # for examples", "in self.image_paths], } size = None if size is not", "interpolation=self.interpolation) self.segmentation_rescaler = albumentations.SmallestMaxSize(max_size=self.size, interpolation=cv2.INTER_NEAREST) if crop_size is not None:", "self.scene_categories = dict(line.split() for line in self.scene_categories) with open(self.data_csv, \"r\")", "map and scene label class ADE20kBase(Dataset): def __init__(self, config=None, size=None,", "Image.open(example[\"file_path_\"]) if not image.mode == \"RGB\": image = image.convert(\"RGB\") image", "self.image_rescaler(image=image)[\"image\"] segmentation = Image.open(example[\"segmentation_path_\"]) segmentation = np.array(segmentation).astype(np.uint8) if self.size is", "size=None, random_crop=False, interpolation=\"bicubic\", crop_size=None): self.split = self.get_split() self.n_labels = 151", "= { \"relative_file_path_\": [l for l in self.image_paths], \"file_path_\": [os.path.join(self.data_root,", "for l in self.image_paths], } size = None if size", "l in self.image_paths], \"file_path_\": [os.path.join(self.data_root, \"images\",ss, l) for l in", "from torch.utils.data import Dataset from taming.data.sflckr import SegmentationBase # for", "image.convert(\"RGB\") image = np.array(image).astype(np.uint8) if self.size is not None: image", "= self.cropper def __len__(self): return self._length def __getitem__(self, i): example", "l in self.image_paths], \"segmentation_path_\": [os.path.join(self.data_root, \"annotations\",ss, l.replace(\".jpg\", \".png\")) for l", "in self.image_paths], \"segmentation_path_\": [os.path.join(self.data_root, \"annotations\",ss, l.replace(\".jpg\", \".png\")) for l in", "config=None, size=None, random_crop=False, interpolation=\"bicubic\", crop_size=None): self.split = self.get_split() self.n_labels =", "{ \"relative_file_path_\": [l for l in self.image_paths], \"file_path_\": [os.path.join(self.data_root, \"images\",ss,", "config=None, size=None, random_crop=True, interpolation=\"bicubic\", crop_size=None): super().__init__(config=config, size=size, random_crop=random_crop, interpolation=interpolation, crop_size=crop_size)", "get_split(self): return \"validation\" if __name__ == \"__main__\": dset = ADE20kValidation()", "example[\"image\"] = (processed[\"image\"]/127.5 - 1.0).astype(np.float32) segmentation = processed[\"mask\"] onehot =", "example class ADE20kTrain(ADE20kBase): # default to random_crop=True def __init__(self, config=None,", "random_crop if self.center_crop: self.cropper = albumentations.CenterCrop(height=self.crop_size, width=self.crop_size) else: self.cropper =", "if size is not None else None else: self.crop_size =", "l in self.image_paths], \"scene_category\": [self.scene_categories[l.replace(\".jpg\", \"\")] for l in self.image_paths],", "random_crop=random_crop, interpolation=interpolation, crop_size=crop_size) def get_split(self): return \"train\" class ADE20kValidation(ADE20kBase): def", "= image.convert(\"RGB\") image = np.array(image).astype(np.uint8) if self.size is not None:", "= dict(line.split() for line in self.scene_categories) with open(self.data_csv, \"r\") as", "= self.image_rescaler(image=image)[\"image\"] segmentation = Image.open(example[\"segmentation_path_\"]) segmentation = np.array(segmentation).astype(np.uint8) if self.size", "self.cropper = albumentations.RandomCrop(height=self.crop_size, width=self.crop_size) self.preprocessor = self.cropper def __len__(self): return", "to random_crop=True def __init__(self, config=None, size=None, random_crop=True, interpolation=\"bicubic\", crop_size=None): super().__init__(config=config,", "import os import numpy as np import cv2 import albumentations", "crop_size=crop_size) def get_split(self): return \"train\" class ADE20kValidation(ADE20kBase): def get_split(self): return", "\"train\" class ADE20kValidation(ADE20kBase): def get_split(self): return \"validation\" if __name__ ==", "self.size is not None: processed = self.preprocessor(image=image, mask=segmentation) else: processed", "not image.mode == \"RGB\": image = image.convert(\"RGB\") image = np.array(image).astype(np.uint8)", "if crop_size is not None: self.center_crop = not random_crop if", "= Image.open(example[\"segmentation_path_\"]) segmentation = np.array(segmentation).astype(np.uint8) if self.size is not None:", "= np.array(image).astype(np.uint8) if self.size is not None: image = self.image_rescaler(image=image)[\"image\"]", "\"relative_segmentation_path_\": [l.replace(\".jpg\", \".png\") for l in self.image_paths], \"segmentation_path_\": [os.path.join(self.data_root, \"annotations\",ss,", "= albumentations.RandomCrop(height=self.crop_size, width=self.crop_size) self.preprocessor = self.cropper def __len__(self): return self._length", "in self.labels) image = Image.open(example[\"file_path_\"]) if not image.mode == \"RGB\":", "super().__init__(config=config, size=size, random_crop=random_crop, interpolation=interpolation, crop_size=crop_size) def get_split(self): return \"train\" class", "\".png\") for l in self.image_paths], \"segmentation_path_\": [os.path.join(self.data_root, \"annotations\",ss, l.replace(\".jpg\", \".png\"))", "albumentations.SmallestMaxSize(max_size=self.size, interpolation=cv2.INTER_NEAREST) if crop_size is not None: self.center_crop = not", "if self.center_crop: self.cropper = albumentations.CenterCrop(height=self.crop_size, width=self.crop_size) else: self.cropper = albumentations.RandomCrop(height=self.crop_size,", "= self.split if ss=='train': ss='training' self.labels = { \"relative_file_path_\": [l", "self.image_rescaler = albumentations.SmallestMaxSize(max_size=self.size, interpolation=self.interpolation) self.segmentation_rescaler = albumentations.SmallestMaxSize(max_size=self.size, interpolation=cv2.INTER_NEAREST) if crop_size", "mask=segmentation) else: processed = {\"image\": image, \"mask\": segmentation} example[\"image\"] =", "else: self.crop_size = crop_size if self.size is not None: self.interpolation", "semantic map and scene label class ADE20kBase(Dataset): def __init__(self, config=None,", "import numpy as np import cv2 import albumentations from PIL", "None and size<=0 else size self.size = size if crop_size", "self.labels[k][i]) for k in self.labels) image = Image.open(example[\"file_path_\"]) if not", "get_split(self): return \"train\" class ADE20kValidation(ADE20kBase): def get_split(self): return \"validation\" if", "onehot = np.eye(self.n_labels)[segmentation] example[\"segmentation\"] = onehot return example class ADE20kTrain(ADE20kBase):", "(processed[\"image\"]/127.5 - 1.0).astype(np.float32) segmentation = processed[\"mask\"] onehot = np.eye(self.n_labels)[segmentation] example[\"segmentation\"]", "not None: self.interpolation = interpolation self.interpolation = { \"nearest\": cv2.INTER_NEAREST,", "= onehot return example class ADE20kTrain(ADE20kBase): # default to random_crop=True", "cv2.INTER_NEAREST, \"bilinear\": cv2.INTER_LINEAR, \"bicubic\": cv2.INTER_CUBIC, \"area\": cv2.INTER_AREA, \"lanczos\": cv2.INTER_LANCZOS4}[self.interpolation] self.image_rescaler", "self.cropper = albumentations.CenterCrop(height=self.crop_size, width=self.crop_size) else: self.cropper = albumentations.RandomCrop(height=self.crop_size, width=self.crop_size) self.preprocessor", "= not random_crop if self.center_crop: self.cropper = albumentations.CenterCrop(height=self.crop_size, width=self.crop_size) else:", "segmentation} example[\"image\"] = (processed[\"image\"]/127.5 - 1.0).astype(np.float32) segmentation = processed[\"mask\"] onehot", "self.preprocessor = self.cropper def __len__(self): return self._length def __getitem__(self, i):", "} size = None if size is not None and", "self.split if ss=='train': ss='training' self.labels = { \"relative_file_path_\": [l for", "with open(os.path.join(self.data_root, \"sceneCategories.txt\"), \"r\") as f: self.scene_categories = f.read().splitlines() self.scene_categories", "k in self.labels) image = Image.open(example[\"file_path_\"]) if not image.mode ==", "for k in self.labels) image = Image.open(example[\"file_path_\"]) if not image.mode", "self._length def __getitem__(self, i): example = dict((k, self.labels[k][i]) for k", "if self.size is not None: self.interpolation = interpolation self.interpolation =", "size is not None and size<=0 else size self.size =", "None: self.crop_size = size if size is not None else", "image = Image.open(example[\"file_path_\"]) if not image.mode == \"RGB\": image =", "1.0).astype(np.float32) segmentation = processed[\"mask\"] onehot = np.eye(self.n_labels)[segmentation] example[\"segmentation\"] = onehot", "\"bilinear\": cv2.INTER_LINEAR, \"bicubic\": cv2.INTER_CUBIC, \"area\": cv2.INTER_AREA, \"lanczos\": cv2.INTER_LANCZOS4}[self.interpolation] self.image_rescaler =", "return self._length def __getitem__(self, i): example = dict((k, self.labels[k][i]) for", "l.replace(\".jpg\", \".png\")) for l in self.image_paths], \"scene_category\": [self.scene_categories[l.replace(\".jpg\", \"\")] for", "l in self.image_paths], } size = None if size is", "example[\"segmentation\"] = onehot return example class ADE20kTrain(ADE20kBase): # default to", "segmentation = np.array(segmentation).astype(np.uint8) if self.size is not None: segmentation =", "line in self.scene_categories) with open(self.data_csv, \"r\") as f: self.image_paths =", "image, \"mask\": segmentation} example[\"image\"] = (processed[\"image\"]/127.5 - 1.0).astype(np.float32) segmentation =", "for examples included in repo class Examples(SegmentationBase): def __init__(self, size=256,", "\"validation\": \"data/ade20k_test.txt\"}[self.split] self.data_root = \"./data/ade20k_root\" with open(os.path.join(self.data_root, \"sceneCategories.txt\"), \"r\") as", "interpolation=interpolation, crop_size=crop_size) def get_split(self): return \"train\" class ADE20kValidation(ADE20kBase): def get_split(self):", "= ADE20kValidation() ex = dset[0] for k in [\"image\", \"scene_category\",", "albumentations.CenterCrop(height=self.crop_size, width=self.crop_size) else: self.cropper = albumentations.RandomCrop(height=self.crop_size, width=self.crop_size) self.preprocessor = self.cropper", "{\"train\": \"data/ade20k_train.txt\", \"validation\": \"data/ade20k_test.txt\"}[self.split] self.data_root = \"./data/ade20k_root\" with open(os.path.join(self.data_root, \"sceneCategories.txt\"),", "for line in self.scene_categories) with open(self.data_csv, \"r\") as f: self.image_paths", "random_crop=False, interpolation=\"bicubic\", crop_size=None): self.split = self.get_split() self.n_labels = 151 #", "crop_size if self.size is not None: self.interpolation = interpolation self.interpolation", "self.image_paths = f.read().splitlines() self._length = len(self.image_paths) ss = self.split if", "def get_split(self): return \"train\" class ADE20kValidation(ADE20kBase): def get_split(self): return \"validation\"", "= np.eye(self.n_labels)[segmentation] example[\"segmentation\"] = onehot return example class ADE20kTrain(ADE20kBase): #", "None if size is not None and size<=0 else size", "= albumentations.SmallestMaxSize(max_size=self.size, interpolation=cv2.INTER_NEAREST) if crop_size is not None: self.center_crop =", "dict(line.split() for line in self.scene_categories) with open(self.data_csv, \"r\") as f:", "included in repo class Examples(SegmentationBase): def __init__(self, size=256, random_crop=False, interpolation=\"bicubic\"):", "size=size, random_crop=random_crop, interpolation=interpolation, crop_size=crop_size) def get_split(self): return \"train\" class ADE20kValidation(ADE20kBase):", "if self.size is not None: segmentation = self.segmentation_rescaler(image=segmentation)[\"image\"] if self.size", "\"validation\" if __name__ == \"__main__\": dset = ADE20kValidation() ex =", "albumentations.SmallestMaxSize(max_size=self.size, interpolation=self.interpolation) self.segmentation_rescaler = albumentations.SmallestMaxSize(max_size=self.size, interpolation=cv2.INTER_NEAREST) if crop_size is not", "if self.size is not None: processed = self.preprocessor(image=image, mask=segmentation) else:", "self.segmentation_rescaler = albumentations.SmallestMaxSize(max_size=self.size, interpolation=cv2.INTER_NEAREST) if crop_size is not None: self.center_crop", "\"relative_file_path_\": [l for l in self.image_paths], \"file_path_\": [os.path.join(self.data_root, \"images\",ss, l)", "= dict((k, self.labels[k][i]) for k in self.labels) image = Image.open(example[\"file_path_\"])", "crop_size=None): self.split = self.get_split() self.n_labels = 151 # unknown +", "is not None: self.interpolation = interpolation self.interpolation = { \"nearest\":", "self.labels = { \"relative_file_path_\": [l for l in self.image_paths], \"file_path_\":", "ADE20kTrain(ADE20kBase): # default to random_crop=True def __init__(self, config=None, size=None, random_crop=True,", "cv2.INTER_LANCZOS4}[self.interpolation] self.image_rescaler = albumentations.SmallestMaxSize(max_size=self.size, interpolation=self.interpolation) self.segmentation_rescaler = albumentations.SmallestMaxSize(max_size=self.size, interpolation=cv2.INTER_NEAREST) if", "[self.scene_categories[l.replace(\".jpg\", \"\")] for l in self.image_paths], } size = None", "Dataset from taming.data.sflckr import SegmentationBase # for examples included in", "repo class Examples(SegmentationBase): def __init__(self, size=256, random_crop=False, interpolation=\"bicubic\"): super().__init__(data_csv=\"data/ade20k_examples.txt\", data_root=\"data/ade20k_images\",", "cv2.INTER_AREA, \"lanczos\": cv2.INTER_LANCZOS4}[self.interpolation] self.image_rescaler = albumentations.SmallestMaxSize(max_size=self.size, interpolation=self.interpolation) self.segmentation_rescaler = albumentations.SmallestMaxSize(max_size=self.size,", "taming.data.sflckr import SegmentationBase # for examples included in repo class", "self.size is not None: self.interpolation = interpolation self.interpolation = {", "self.segmentation_rescaler(image=segmentation)[\"image\"] if self.size is not None: processed = self.preprocessor(image=image, mask=segmentation)", "self.size is not None: segmentation = self.segmentation_rescaler(image=segmentation)[\"image\"] if self.size is", "__init__(self, size=256, random_crop=False, interpolation=\"bicubic\"): super().__init__(data_csv=\"data/ade20k_examples.txt\", data_root=\"data/ade20k_images\", segmentation_root=\"data/ade20k_segmentations\", size=size, random_crop=random_crop, interpolation=interpolation,", "\"scene_category\": [self.scene_categories[l.replace(\".jpg\", \"\")] for l in self.image_paths], } size =", "open(self.data_csv, \"r\") as f: self.image_paths = f.read().splitlines() self._length = len(self.image_paths)", "\"images\",ss, l) for l in self.image_paths], \"relative_segmentation_path_\": [l.replace(\".jpg\", \".png\") for", "\"RGB\": image = image.convert(\"RGB\") image = np.array(image).astype(np.uint8) if self.size is", "= albumentations.SmallestMaxSize(max_size=self.size, interpolation=self.interpolation) self.segmentation_rescaler = albumentations.SmallestMaxSize(max_size=self.size, interpolation=cv2.INTER_NEAREST) if crop_size is", "None else None else: self.crop_size = crop_size if self.size is", "self.get_split() self.n_labels = 151 # unknown + 150 self.data_csv =", "as np import cv2 import albumentations from PIL import Image", "random_crop=True def __init__(self, config=None, size=None, random_crop=True, interpolation=\"bicubic\", crop_size=None): super().__init__(config=config, size=size,", "self.center_crop = not random_crop if self.center_crop: self.cropper = albumentations.CenterCrop(height=self.crop_size, width=self.crop_size)", "= size if crop_size is None: self.crop_size = size if", "segmentation = processed[\"mask\"] onehot = np.eye(self.n_labels)[segmentation] example[\"segmentation\"] = onehot return", "= f.read().splitlines() self._length = len(self.image_paths) ss = self.split if ss=='train':", "__init__(self, config=None, size=None, random_crop=True, interpolation=\"bicubic\", crop_size=None): super().__init__(config=config, size=size, random_crop=random_crop, interpolation=interpolation,", "shift_segmentation=False) # With semantic map and scene label class ADE20kBase(Dataset):", "crop_size is not None: self.center_crop = not random_crop if self.center_crop:", "None: segmentation = self.segmentation_rescaler(image=segmentation)[\"image\"] if self.size is not None: processed", "self.n_labels = 151 # unknown + 150 self.data_csv = {\"train\":", "size if crop_size is None: self.crop_size = size if size", "in self.image_paths], \"file_path_\": [os.path.join(self.data_root, \"images\",ss, l) for l in self.image_paths],", "size=None, random_crop=True, interpolation=\"bicubic\", crop_size=None): super().__init__(config=config, size=size, random_crop=random_crop, interpolation=interpolation, crop_size=crop_size) def", "f: self.scene_categories = f.read().splitlines() self.scene_categories = dict(line.split() for line in", "if ss=='train': ss='training' self.labels = { \"relative_file_path_\": [l for l", "not None: image = self.image_rescaler(image=image)[\"image\"] segmentation = Image.open(example[\"segmentation_path_\"]) segmentation =", "size if size is not None else None else: self.crop_size", "\"r\") as f: self.image_paths = f.read().splitlines() self._length = len(self.image_paths) ss", "import Image from torch.utils.data import Dataset from taming.data.sflckr import SegmentationBase", "return \"train\" class ADE20kValidation(ADE20kBase): def get_split(self): return \"validation\" if __name__", "\"nearest\": cv2.INTER_NEAREST, \"bilinear\": cv2.INTER_LINEAR, \"bicubic\": cv2.INTER_CUBIC, \"area\": cv2.INTER_AREA, \"lanczos\": cv2.INTER_LANCZOS4}[self.interpolation]", "size = None if size is not None and size<=0", "size is not None else None else: self.crop_size = crop_size", "not None: self.center_crop = not random_crop if self.center_crop: self.cropper =", "= len(self.image_paths) ss = self.split if ss=='train': ss='training' self.labels =", "self._length = len(self.image_paths) ss = self.split if ss=='train': ss='training' self.labels", "= self.get_split() self.n_labels = 151 # unknown + 150 self.data_csv", "unknown + 150 self.data_csv = {\"train\": \"data/ade20k_train.txt\", \"validation\": \"data/ade20k_test.txt\"}[self.split] self.data_root", "class Examples(SegmentationBase): def __init__(self, size=256, random_crop=False, interpolation=\"bicubic\"): super().__init__(data_csv=\"data/ade20k_examples.txt\", data_root=\"data/ade20k_images\", segmentation_root=\"data/ade20k_segmentations\",", "def __init__(self, config=None, size=None, random_crop=True, interpolation=\"bicubic\", crop_size=None): super().__init__(config=config, size=size, random_crop=random_crop,", "SegmentationBase # for examples included in repo class Examples(SegmentationBase): def", "is not None: segmentation = self.segmentation_rescaler(image=segmentation)[\"image\"] if self.size is not", "open(os.path.join(self.data_root, \"sceneCategories.txt\"), \"r\") as f: self.scene_categories = f.read().splitlines() self.scene_categories =", "return \"validation\" if __name__ == \"__main__\": dset = ADE20kValidation() ex", "is not None: self.center_crop = not random_crop if self.center_crop: self.cropper", "None: processed = self.preprocessor(image=image, mask=segmentation) else: processed = {\"image\": image,", "self.scene_categories) with open(self.data_csv, \"r\") as f: self.image_paths = f.read().splitlines() self._length", "= { \"nearest\": cv2.INTER_NEAREST, \"bilinear\": cv2.INTER_LINEAR, \"bicubic\": cv2.INTER_CUBIC, \"area\": cv2.INTER_AREA,", "[l.replace(\".jpg\", \".png\") for l in self.image_paths], \"segmentation_path_\": [os.path.join(self.data_root, \"annotations\",ss, l.replace(\".jpg\",", "f.read().splitlines() self._length = len(self.image_paths) ss = self.split if ss=='train': ss='training'", "in self.image_paths], \"relative_segmentation_path_\": [l.replace(\".jpg\", \".png\") for l in self.image_paths], \"segmentation_path_\":", "[os.path.join(self.data_root, \"images\",ss, l) for l in self.image_paths], \"relative_segmentation_path_\": [l.replace(\".jpg\", \".png\")", "self.split = self.get_split() self.n_labels = 151 # unknown + 150", "\"sceneCategories.txt\"), \"r\") as f: self.scene_categories = f.read().splitlines() self.scene_categories = dict(line.split()", "np.array(image).astype(np.uint8) if self.size is not None: image = self.image_rescaler(image=image)[\"image\"] segmentation", "self.interpolation = interpolation self.interpolation = { \"nearest\": cv2.INTER_NEAREST, \"bilinear\": cv2.INTER_LINEAR,", "scene label class ADE20kBase(Dataset): def __init__(self, config=None, size=None, random_crop=False, interpolation=\"bicubic\",", "def __init__(self, config=None, size=None, random_crop=False, interpolation=\"bicubic\", crop_size=None): self.split = self.get_split()", "for k in [\"image\", \"scene_category\", \"segmentation\"]: print(type(ex[k])) try: print(ex[k].shape) except:", "crop_size is None: self.crop_size = size if size is not", "Image.open(example[\"segmentation_path_\"]) segmentation = np.array(segmentation).astype(np.uint8) if self.size is not None: segmentation", "interpolation=\"bicubic\"): super().__init__(data_csv=\"data/ade20k_examples.txt\", data_root=\"data/ade20k_images\", segmentation_root=\"data/ade20k_segmentations\", size=size, random_crop=random_crop, interpolation=interpolation, n_labels=151, shift_segmentation=False) #", "is not None else None else: self.crop_size = crop_size if", "self.scene_categories = f.read().splitlines() self.scene_categories = dict(line.split() for line in self.scene_categories)", "class ADE20kBase(Dataset): def __init__(self, config=None, size=None, random_crop=False, interpolation=\"bicubic\", crop_size=None): self.split", "= \"./data/ade20k_root\" with open(os.path.join(self.data_root, \"sceneCategories.txt\"), \"r\") as f: self.scene_categories =", "ss=='train': ss='training' self.labels = { \"relative_file_path_\": [l for l in", "else: processed = {\"image\": image, \"mask\": segmentation} example[\"image\"] = (processed[\"image\"]/127.5", "width=self.crop_size) else: self.cropper = albumentations.RandomCrop(height=self.crop_size, width=self.crop_size) self.preprocessor = self.cropper def", "import albumentations from PIL import Image from torch.utils.data import Dataset", "self.size = size if crop_size is None: self.crop_size = size", "size=256, random_crop=False, interpolation=\"bicubic\"): super().__init__(data_csv=\"data/ade20k_examples.txt\", data_root=\"data/ade20k_images\", segmentation_root=\"data/ade20k_segmentations\", size=size, random_crop=random_crop, interpolation=interpolation, n_labels=151,", "super().__init__(data_csv=\"data/ade20k_examples.txt\", data_root=\"data/ade20k_images\", segmentation_root=\"data/ade20k_segmentations\", size=size, random_crop=random_crop, interpolation=interpolation, n_labels=151, shift_segmentation=False) # With", "def __init__(self, size=256, random_crop=False, interpolation=\"bicubic\"): super().__init__(data_csv=\"data/ade20k_examples.txt\", data_root=\"data/ade20k_images\", segmentation_root=\"data/ade20k_segmentations\", size=size, random_crop=random_crop,", "\"mask\": segmentation} example[\"image\"] = (processed[\"image\"]/127.5 - 1.0).astype(np.float32) segmentation = processed[\"mask\"]", "150 self.data_csv = {\"train\": \"data/ade20k_train.txt\", \"validation\": \"data/ade20k_test.txt\"}[self.split] self.data_root = \"./data/ade20k_root\"", "self.image_paths], \"scene_category\": [self.scene_categories[l.replace(\".jpg\", \"\")] for l in self.image_paths], } size", "{ \"nearest\": cv2.INTER_NEAREST, \"bilinear\": cv2.INTER_LINEAR, \"bicubic\": cv2.INTER_CUBIC, \"area\": cv2.INTER_AREA, \"lanczos\":", "os import numpy as np import cv2 import albumentations from", "examples included in repo class Examples(SegmentationBase): def __init__(self, size=256, random_crop=False,", "= dset[0] for k in [\"image\", \"scene_category\", \"segmentation\"]: print(type(ex[k])) try:", "self.preprocessor(image=image, mask=segmentation) else: processed = {\"image\": image, \"mask\": segmentation} example[\"image\"]", "image = self.image_rescaler(image=image)[\"image\"] segmentation = Image.open(example[\"segmentation_path_\"]) segmentation = np.array(segmentation).astype(np.uint8) if", "self.crop_size = size if size is not None else None", "and scene label class ADE20kBase(Dataset): def __init__(self, config=None, size=None, random_crop=False,", "\"segmentation_path_\": [os.path.join(self.data_root, \"annotations\",ss, l.replace(\".jpg\", \".png\")) for l in self.image_paths], \"scene_category\":", "data_root=\"data/ade20k_images\", segmentation_root=\"data/ade20k_segmentations\", size=size, random_crop=random_crop, interpolation=interpolation, n_labels=151, shift_segmentation=False) # With semantic", "not None: processed = self.preprocessor(image=image, mask=segmentation) else: processed = {\"image\":", "import Dataset from taming.data.sflckr import SegmentationBase # for examples included", "\"file_path_\": [os.path.join(self.data_root, \"images\",ss, l) for l in self.image_paths], \"relative_segmentation_path_\": [l.replace(\".jpg\",", "and size<=0 else size self.size = size if crop_size is", "def __len__(self): return self._length def __getitem__(self, i): example = dict((k,", "in self.image_paths], \"scene_category\": [self.scene_categories[l.replace(\".jpg\", \"\")] for l in self.image_paths], }", "processed = {\"image\": image, \"mask\": segmentation} example[\"image\"] = (processed[\"image\"]/127.5 -", "cv2 import albumentations from PIL import Image from torch.utils.data import", "self.data_root = \"./data/ade20k_root\" with open(os.path.join(self.data_root, \"sceneCategories.txt\"), \"r\") as f: self.scene_categories", "self.cropper def __len__(self): return self._length def __getitem__(self, i): example =", "interpolation=cv2.INTER_NEAREST) if crop_size is not None: self.center_crop = not random_crop", "+ 150 self.data_csv = {\"train\": \"data/ade20k_train.txt\", \"validation\": \"data/ade20k_test.txt\"}[self.split] self.data_root =", "# With semantic map and scene label class ADE20kBase(Dataset): def", "not None and size<=0 else size self.size = size if", "from taming.data.sflckr import SegmentationBase # for examples included in repo", "cv2.INTER_CUBIC, \"area\": cv2.INTER_AREA, \"lanczos\": cv2.INTER_LANCZOS4}[self.interpolation] self.image_rescaler = albumentations.SmallestMaxSize(max_size=self.size, interpolation=self.interpolation) self.segmentation_rescaler", "\"lanczos\": cv2.INTER_LANCZOS4}[self.interpolation] self.image_rescaler = albumentations.SmallestMaxSize(max_size=self.size, interpolation=self.interpolation) self.segmentation_rescaler = albumentations.SmallestMaxSize(max_size=self.size, interpolation=cv2.INTER_NEAREST)", "- 1.0).astype(np.float32) segmentation = processed[\"mask\"] onehot = np.eye(self.n_labels)[segmentation] example[\"segmentation\"] =", "[l for l in self.image_paths], \"file_path_\": [os.path.join(self.data_root, \"images\",ss, l) for", "if size is not None and size<=0 else size self.size", "class ADE20kTrain(ADE20kBase): # default to random_crop=True def __init__(self, config=None, size=None,", "size=size, random_crop=random_crop, interpolation=interpolation, n_labels=151, shift_segmentation=False) # With semantic map and", "k in [\"image\", \"scene_category\", \"segmentation\"]: print(type(ex[k])) try: print(ex[k].shape) except: print(ex[k])", "\"annotations\",ss, l.replace(\".jpg\", \".png\")) for l in self.image_paths], \"scene_category\": [self.scene_categories[l.replace(\".jpg\", \"\")]", "else: self.cropper = albumentations.RandomCrop(height=self.crop_size, width=self.crop_size) self.preprocessor = self.cropper def __len__(self):", "PIL import Image from torch.utils.data import Dataset from taming.data.sflckr import", "label class ADE20kBase(Dataset): def __init__(self, config=None, size=None, random_crop=False, interpolation=\"bicubic\", crop_size=None):", "\"bicubic\": cv2.INTER_CUBIC, \"area\": cv2.INTER_AREA, \"lanczos\": cv2.INTER_LANCZOS4}[self.interpolation] self.image_rescaler = albumentations.SmallestMaxSize(max_size=self.size, interpolation=self.interpolation)", "self.data_csv = {\"train\": \"data/ade20k_train.txt\", \"validation\": \"data/ade20k_test.txt\"}[self.split] self.data_root = \"./data/ade20k_root\" with", "from PIL import Image from torch.utils.data import Dataset from taming.data.sflckr", "random_crop=False, interpolation=\"bicubic\"): super().__init__(data_csv=\"data/ade20k_examples.txt\", data_root=\"data/ade20k_images\", segmentation_root=\"data/ade20k_segmentations\", size=size, random_crop=random_crop, interpolation=interpolation, n_labels=151, shift_segmentation=False)", "{\"image\": image, \"mask\": segmentation} example[\"image\"] = (processed[\"image\"]/127.5 - 1.0).astype(np.float32) segmentation", "default to random_crop=True def __init__(self, config=None, size=None, random_crop=True, interpolation=\"bicubic\", crop_size=None):" ]
[ ") continue # only add the host_to_delete if it was", "get WORKDIR and CFE_FR_TABLES values from config.sh\") sys.exit(1) # Primary", "%s, stderr:\\n%s\\nstdout:\\n%s\", proc.returncode, errs.decode(\"utf-8\"), outs.decode(\"utf-8\"), ) if len(hosts_to_delete) != 0:", "email = input(\"Enter email for fr_distributed_cleanup accounts: \") logger.info(\"Creating fr_distributed_cleanup", "together into hubs.cert CERT_PATH = os.path.join(DISTRIBUTED_CLEANUP_DIR, \"hubs.cert\") # Note: remove", "\"fr_distributed_cleanup Federated Host Cleanup role\", \"includeContext\": \"cfengine\", }, ) if", "CONFLICT (hostkey,hub_id) DO UPDATE SET deleted = excluded.deleted;\\n\" ) clear_sql", "sql = \"SELECT hub_id FROM __hubs WHERE hostkey = '{}'\".format(feeder_hostkey)", "Response was {}\".format(response)) continue # query API should return one", "= api.fr_hub_status() if ( status[\"status\"] == 200 and status[\"role\"] ==", "# Primary dir in which to place various needed files", "least one feeder hub before running this script.\" ) for", "= getpass( prompt=\"Enter admin credentials for {} at {}: \".format(", "from /var/cfengine/httpd/ssl/certs on # superhub and feeders and cat all", "== 200 and status[\"role\"] == \"superhub\" and status[\"configured\"] ): logger.debug(\"This", "feeders and superhubs with proper RBAC settings for normal operation.", "import read_secret, write_secret WORKDIR = None CFE_FR_TABLES = None #", "fr_distributed_cleanup user and role on superhub and all feeders. DISTRIBUTED_CLEANUP_SECRET_PATH", "overwrite fr_distributed_cleanup user and role on superhub and all feeders.", "running this script.\" ) sys.exit(1) email = input(\"Enter email for", "and delete from all federated tables similar to the clear_hosts_references()", "host(s) to delete on feeder %s\", len(hosts_to_delete), hub[\"ui_name\"] ) #", "fr_distributed_cleanup.py - a script to remove hosts which have migrated", ") ) sys.exit(1) feederResponse = api.fr_remote_hubs() if not feederResponse[\"hubs\"]: print(", "if response[\"status\"] != 201: print( \"Problem creating fr_distributed_cleanup role on", "value. host_to_delete = row[0] response = feeder_api.delete(\"host\", host_to_delete) # both", "hosts to delete. No actions taken.\", feeder_hostname) continue logger.debug( \"%s", "post processing\", feeder_hostname, ) continue # simulate the host api", "!= 201: print( \"Problem creating fr_distributed_cleanup user on superhub. {}\".format(", "{ \"description\": \"fr_distributed_cleanup Federated Host Cleanup role\", \"includeContext\": \"cfengine\", },", "feeder_api.put( \"user\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup Federated Host Cleanup user\",", "proc.returncode, errs.decode(\"utf-8\"), outs.decode(\"utf-8\"), ) if len(hosts_to_delete) != 0: logger.info( \"%s:", "can only be run on a Federated Reporting hub configured", "!= '{0}' )\"\"\".format( feeder_hubid ) response = api.query(sql) if response[\"status\"]", "{}\".format( status ) ) sys.exit(1) feederResponse = api.fr_remote_hubs() if not", "0: logger.info( \"%s: %s host deletions processed\", hub[\"ui_name\"], len(hosts_to_delete), )", "post_sql += \"INSERT INTO __hosts (hostkey,deleted) VALUES\" for hostkey in", "on feeder %s\", len(hosts_to_delete), hub[\"ui_name\"] ) # build up a", "subprocess.Popen( [\"/var/cfengine/bin/psql\", \"cfdb\"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) as proc: logger.debug(\"got", "# special case of partitioning, operating on parent table will", "\"SELECT hub_id FROM __hubs WHERE hostkey = '{}'\".format(feeder_hostkey) response =", "NovaApi( api_user=\"admin\", api_password=<PASSWORD>, cert_path=CERT_PATH, hostname=feeder_hostname, ) logger.info(\"Creating fr_distributed_cleanup role on", "hub before running this script.\" ) sys.exit(1) email = input(\"Enter", "not WORKDIR or not CFE_FR_TABLES: print(\"Unable to get WORKDIR and", "host is a superhub configured for Federated Reporting.\") else: if", "( response[\"status\"] == 200 and response[\"role\"] == \"superhub\" and response[\"configured\"]", "200: print( \"Unable to query for deletion candidates. Response was", "one row, [0], and one column, [0], in rows value", "prompt=\"Enter admin password for superhub {}: \".format(platform.node()) ) api =", "proc: lines = proc.stdout.readlines() WORKDIR = lines[0].decode().strip() CFE_FR_TABLES = [table.strip()", "\"Unable to get status for feeder {}. Skipping\".format(feeder_hostname) ) continue", "if response[\"status\"] != 201: print( \"Problem creating fr_distributed_cleanup user on", "__hubs WHERE hostkey = '{}'\".format(feeder_hostkey) response = api.query(sql) if response[\"status\"]", "= NovaApi( api_user=\"fr_distributed_cleanup\", api_password=<PASSWORD>_cleanup_password ) # defaults to localhost response", "to get status for feeder {}. Skipping\".format(feeder_hostname) ) continue sql", "delete_sql = \"\" post_hostkeys = [] for row in hosts_to_delete:", ") if response[\"status\"] != 201: print( \"Problem creating fr_distributed_cleanup user", "\"__promiselog\" clear_sql += ( \"DELETE FROM {} WHERE hub_id =", "row, [0], and one column, [0], in rows value feeder_hubid", "and response[\"configured\"] ): print( \"{} can only be run on", "#!/usr/bin/env python3 \"\"\" fr_distributed_cleanup.py - a script to remove hosts", "are incorrect, try again\") sys.exit(1) else: print( \"Check the status", "[\"query.post\", \"remoteHub.list\", \"hubStatus.get\"] ) if response[\"status\"] != 201: print(\"Unable to", "api = NovaApi(api_user=\"admin\", api_password=<PASSWORD>) # first confirm that this host", "as proc: lines = proc.stdout.readlines() WORKDIR = lines[0].decode().strip() CFE_FR_TABLES =", "status ) ) sys.exit(1) feederResponse = api.fr_remote_hubs() if not feederResponse[\"hubs\"]:", "\"user\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup Federated Host Cleanup user\", \"email\":", "NovaApi( api_user=\"fr_distributed_cleanup\", api_password=<PASSWORD>_cleanup_password ) # defaults to localhost response =", ") # build up a post-loop SQL statement to delete", "= logging.getLogger(\"fr_distributed_cleanup\") ch = logging.StreamHandler() if args.debug: logger.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) if", "!= 201: print( \"Problem creating fr_distributed_cleanup role on superhub. {}\".format(", "\"roles\": [\"fr_distributed_cleanup\"], }, ) if response[\"status\"] != 201: print( \"Problem", "cert_path=CERT_PATH, hostname=feeder_hostname, ) response = feeder_api.status() if response[\"status\"] != 200:", "\"\" post_hostkeys = [] for row in hosts_to_delete: # The", "201: print( \"Problem creating fr_distributed_cleanup user on superhub. {}\".format( response", ") clear_sql = \"set schema 'public';\\n\" for table in CFE_FR_TABLES:", "This enables policy in cfe_internal/enterprise/federation/federation.cf ```json { \"classes\": { \"cfengine_mp_enable_fr_distributed_cleanup\":", "admin password for superhub {}: \".format(platform.node()) ) api = NovaApi(api_user=\"admin\",", "main() else: raise ImportError(\"fr_distributed_cleanup.py must only be used as a", "superhub {}: \".format(platform.node()) ) api = NovaApi(api_user=\"admin\", api_password=<PASSWORD>) # first", "feeder %s\", len(hosts_to_delete), hub[\"ui_name\"] ) # build up a post-loop", "sql...\") outs, errs = proc.communicate(input=post_sql.encode()) if \"ERROR\" in errs.decode(\"utf-8\"): print(", "to localhost response = api.fr_hub_status() if not ( response[\"status\"] ==", ") ) sys.exit(1) response = feeder_api.put_role_permissions( \"fr_distributed_cleanup\", [\"host.delete\"] ) if", "delete from all federated tables similar to the clear_hosts_references() pgplsql", "= '{}'\".format(feeder_hostkey) response = api.query(sql) if response[\"status\"] != 200: print(\"Unable", "args.inform: logger.setLevel(logging.INFO) ch.setLevel(logging.INFO) logger.addHandler(ch) if not os.path.exists(DISTRIBUTED_CLEANUP_SECRET_PATH): if sys.stdout.isatty(): interactive_setup()", "# Note: remove the file at DISTRIBUTED_CLEANUP_SECRET_PATH to reset everything.", "200 and status[\"role\"] == \"superhub\" and status[\"configured\"] ): logger.debug(\"This host", "delete. No actions taken.\", feeder_hostname) continue logger.debug( \"%s host(s) to", "api delete process by setting current_timestamp in deleted column #", "all together into hubs.cert CERT_PATH = os.path.join(DISTRIBUTED_CLEANUP_DIR, \"hubs.cert\") # Note:", "api.fr_hub_status() if not ( response[\"status\"] == 200 and response[\"role\"] ==", "user on superhub\") response = api.put( \"user\", \"fr_distributed_cleanup\", { \"description\":", "to place various needed files DISTRIBUTED_CLEANUP_DIR = \"/opt/cfengine/federation/cftransport/distributed_cleanup\" # collect", "lastseendirection = 'INCOMING' GROUP BY hostkey ) as newest ON", "build up a post-loop SQL statement to delete hosts locally", "\"source {}; echo $WORKDIR; echo $CFE_FR_TABLES\".format(config_sh_path) with subprocess.Popen( cmd, stdout=subprocess.PIPE,", "for {} at {}: \".format( hub[\"ui_name\"], hub[\"api_url\"] ) ) feeder_hostname", ") sys.exit(1) response = api.fr_remote_hubs() if not response[\"hubs\"]: print( \"No", "errs.decode(\"utf-8\"): print( \"Problem running post processing SQL. returncode was {},", "will be prompted for superhub admin credentials and then admin", "api.put( \"role\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup Federated Host Cleanup role\",", "VALUES\" for hostkey in post_hostkeys: delete_sql += \"('{}', CURRENT_TIMESTAMP) \".format(hostkey)", "values. # We only selected hostkey so will take the", "cfsecret import read_secret, write_secret WORKDIR = None CFE_FR_TABLES = None", "cert_path=CERT_PATH, hostname=feeder_hostname, ) logger.info(\"Creating fr_distributed_cleanup role on %s\", feeder_hostname) response", ") continue # simulate the host api delete process by", "= hub[\"hostkey\"] feeder_hostname = hub[\"ui_name\"] feeder_api = NovaApi( api_user=\"fr_distributed_cleanup\", api_password=<PASSWORD>,", "print( \"Problem creating fr_distributed_cleanup user on {}. {}\".format( feeder_hostname, response", "+= \"\\\\set ON_ERROR STOP on\\n\" delete_sql = \"\" post_hostkeys =", "are lists of column values. # We only selected hostkey", ") if len(hosts_to_delete) != 0: logger.info( \"%s: %s host deletions", "superhubs with proper RBAC settings for normal operation. You will", "in errs.decode(\"utf-8\"): print( \"Problem running post processing SQL. returncode was", "[\"/var/cfengine/bin/psql\", \"cfdb\"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) as proc: logger.debug(\"got a", "hub[\"api_url\"] ) ) feeder_hostname = hub[\"ui_name\"] feeder_api = NovaApi( api_user=\"admin\",", "continue logger.debug( \"%s host(s) to delete on feeder %s\", len(hosts_to_delete),", "%s got %s status code\", host_to_delete, feeder_hostname, response[\"status\"], ) continue", "schema to make deletions easier/more direct without having to #", "user\", \"email\": \"{}\".format(email), \"password\": <PASSWORD>(fr_<PASSWORD>_cleanup_password), \"roles\": [\"fr_distributed_cleanup\"], }, ) if", "run as root\".format(os.path.basename(__file__))) parser = argparse.ArgumentParser( description=\"Clean up migrating clients", "# first confirm that this host is a superhub status", "ls.lastseentimestamp = newest.newesttimestamp AND ls.hostkey = hosts.hostkey AND ls.hub_id !=", "\"Problem creating fr_distributed_cleanup role on superhub. {}\".format( response ) )", "%s\", hub[\"ui_name\"], response[\"rows\"]) hosts_to_delete = response[\"rows\"] if len(hosts_to_delete) == 0:", "email for fr_distributed_cleanup accounts: \") logger.info(\"Creating fr_distributed_cleanup role on superhub...\")", "!= 0: logger.info( \"%s: %s host deletions processed\", hub[\"ui_name\"], len(hosts_to_delete),", "= feeder_api.status() if response[\"status\"] != 200: print( \"Unable to get", "+ clear_sql logger.debug(\"Running SQL:\\n%s\", post_sql) with subprocess.Popen( [\"/var/cfengine/bin/psql\", \"cfdb\"], stdin=subprocess.PIPE,", "= \"\" post_hostkeys = [] for row in hosts_to_delete: #", "superhub\".format( os.path.basename(__file__) ) ) sys.exit(1) response = api.fr_remote_hubs() if not", "proper RBAC settings for normal operation. You will be prompted", "the host api delete process by setting current_timestamp in deleted", "configured is True. {}\".format( status ) ) sys.exit(1) feederResponse =", "that this host is a superhub status = api.fr_hub_status() if", "be prompted for superhub admin credentials and then admin credentials", "post processing SQL. returncode was {}, stderr:\\n{}\\nstdout:\\n{}\".format( proc.returncode, errs.decode(\"utf-8\"), outs.decode(\"utf-8\")", "to query for feeder hub_id. Response was {}\".format(response)) continue #", "feederResponse = api.fr_remote_hubs() if not feederResponse[\"hubs\"]: print( \"No attached feeders.", "a superhub status = api.fr_hub_status() if ( status[\"status\"] == 200", "policy in cfe_internal/enterprise/federation/federation.cf ```json { \"classes\": { \"cfengine_mp_enable_fr_distributed_cleanup\": [ \"any::\"", "else: print( \"{} requires manual setup, please run as root", "os.path.basename(__file__) ) ) sys.exit(1) response = api.fr_remote_hubs() if not response[\"hubs\"]:", "== 401: print(\"admin credentials are incorrect, try again\") sys.exit(1) else:", "args.debug: logger.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) if args.inform: logger.setLevel(logging.INFO) ch.setLevel(logging.INFO) logger.addHandler(ch) if not", "for normal operation. You will be prompted for superhub admin", "each feeder. \"\"\" import argparse import logging import os import", "for hostkey in post_hostkeys: delete_sql += \"('{}', CURRENT_TIMESTAMP) \".format(hostkey) delete_sql", "\",\".join([\"'{}'\".format(hk) for hk in post_hostkeys]), ) ) post_sql += delete_sql", "= feeder_api.put( \"role\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup Federated Host Cleanup", "\"remoteHub.list\", \"hubStatus.get\"] ) if response[\"status\"] != 201: print(\"Unable to set", "ls.hub_id != '{0}' )\"\"\".format( feeder_hubid ) response = api.query(sql) if", "admin credentials on each feeder. \"\"\" import argparse import logging", "sys.exit(1) logger.debug( \"Ran post processing SQL. returncode was %s, stderr:\\n%s\\nstdout:\\n%s\",", "fr_distributed_cleanup_password) def main(): if not os.geteuid() == 0: sys.exit(\"\\n{} must", "post_sql = \"set schema 'hub_{}';\\n\".format(feeder_hubid) post_sql += \"\\\\set ON_ERROR STOP", "this script to setup fr_distributed_cleanup role and account on all", "len(hosts_to_delete), ) if __name__ == \"__main__\": main() else: raise ImportError(\"fr_distributed_cleanup.py", "logger.debug( \"%s host(s) to delete on feeder %s\", len(hosts_to_delete), hub[\"ui_name\"]", "configured for Federated Reporting.\") else: if status[\"status\"] == 401: print(\"admin", "FROM lastseenhosts WHERE lastseendirection = 'INCOMING' GROUP BY hostkey )", "so will take the first value. host_to_delete = row[0] response", "\"state/fr_distributed_cleanup.cfsecret\") def interactive_setup(): fr_distributed_cleanup_password = \"\".join(random.choices(string.printable, k=20)) admin_pass = getpass(", "queries post_sql = \"set schema 'hub_{}';\\n\".format(feeder_hubid) post_sql += \"\\\\set ON_ERROR", "clear_sql logger.debug(\"Running SQL:\\n%s\", post_sql) with subprocess.Popen( [\"/var/cfengine/bin/psql\", \"cfdb\"], stdin=subprocess.PIPE, stdout=subprocess.PIPE,", "have migrated to other feeder hubs. To be run on", "[\"host.delete\"] ) if response[\"status\"] != 201: print(\"Unable to set RBAC", "by setting current_timestamp in deleted column # and delete from", "WHERE lastseendirection = 'INCOMING' GROUP BY hostkey ) as newest", "hosts_to_delete: # The query API returns rows which are lists", "\"Problem creating fr_distributed_cleanup user on superhub. {}\".format( response ) )", "response ) ) sys.exit(1) response = api.put_role_permissions( \"fr_distributed_cleanup\", [\"query.post\", \"remoteHub.list\",", "hub_id. Response was {}\".format(response)) continue # query API should return", "defaults to localhost response = api.fr_hub_status() if not ( response[\"status\"]", "for Federated Reporting.\") else: if status[\"status\"] == 401: print(\"admin credentials", "user on superhub. {}\".format( response ) ) sys.exit(1) for hub", "delete_sql += ( \"ON CONFLICT (hostkey,hub_id) DO UPDATE SET deleted", "the policy has run on superhub and feeders, run this", "api = NovaApi( api_user=\"fr_distributed_cleanup\", api_password=<PASSWORD>_cleanup_password ) # defaults to localhost", "fr_distributed_cleanup role on superhub...\") response = api.put( \"role\", \"fr_distributed_cleanup\", {", "everything. # api calls will overwrite fr_distributed_cleanup user and role", "= \"\".join(random.choices(string.printable, k=20)) admin_pass = getpass( prompt=\"Enter admin password for", "a class in augments (def.json). This enables policy in cfe_internal/enterprise/federation/federation.cf", "class in augments (def.json). This enables policy in cfe_internal/enterprise/federation/federation.cf ```json", "for table in CFE_FR_TABLES: # special case of partitioning, operating", "a Federated Reporting hub configured to be superhub\".format( os.path.basename(__file__) )", "feeder. \"\"\" import argparse import logging import os import platform", "on %s\", feeder_hostname) response = feeder_api.put( \"user\", \"fr_distributed_cleanup\", { \"description\":", "[0], in rows value feeder_hubid = response[\"rows\"][0][0] sql = \"\"\"", "host_to_delete, feeder_hostname, response[\"status\"], ) continue # only add the host_to_delete", "\"\"\" SELECT DISTINCT hosts.hostkey FROM hosts WHERE hub_id = '{0}'", "1 FROM lastseenhosts ls JOIN ( SELECT hostkey, max(lastseentimestamp) as", "by setting a class in augments (def.json). This enables policy", "%s\", feeder_hostname) response = feeder_api.put( \"user\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup", "ON ls.hostkey = newest.hostkey AND ls.lastseentimestamp = newest.newesttimestamp AND ls.hostkey", "True. {}\".format( status ) ) sys.exit(1) feederResponse = api.fr_remote_hubs() if", "rows value feeder_hubid = response[\"rows\"][0][0] sql = \"\"\" SELECT DISTINCT", ") as newest ON ls.hostkey = newest.hostkey AND ls.lastseentimestamp =", "lastseenhosts ls JOIN ( SELECT hostkey, max(lastseentimestamp) as newesttimestamp FROM", "on each feeder. \"\"\" import argparse import logging import os", "to the clear_hosts_references() pgplsql function. post_sql += \"INSERT INTO __hosts", "delete on %s are %s\", hub[\"ui_name\"], response[\"rows\"]) hosts_to_delete = response[\"rows\"]", "remove hosts which have migrated to other feeder hubs. To", "executable=\"/bin/bash\" ) as proc: lines = proc.stdout.readlines() WORKDIR = lines[0].decode().strip()", "role fr_distributed_cleanup\") sys.exit(1) logger.info(\"Creating fr_distributed_cleanup user on %s\", feeder_hostname) response", "host is a superhub status = api.fr_hub_status() if ( status[\"status\"]", "are acceptable responses if response[\"status\"] not in [202, 404]: logger.warning(", "processing SQL. returncode was %s, stderr:\\n%s\\nstdout:\\n%s\", proc.returncode, errs.decode(\"utf-8\"), outs.decode(\"utf-8\"), )", "= hosts.hostkey AND ls.hub_id != '{0}' )\"\"\".format( feeder_hubid ) response", "status[\"role\"] == \"superhub\" and status[\"configured\"] ): logger.debug(\"This host is a", "continue sql = \"SELECT hub_id FROM __hubs WHERE hostkey =", "clear_sql = \"set schema 'public';\\n\" for table in CFE_FR_TABLES: #", "sys.exit(1) response = api.put_role_permissions( \"fr_distributed_cleanup\", [\"query.post\", \"remoteHub.list\", \"hubStatus.get\"] ) if", "api.put_role_permissions( \"fr_distributed_cleanup\", [\"query.post\", \"remoteHub.list\", \"hubStatus.get\"] ) if response[\"status\"] != 201:", "status[\"status\"] == 200 and status[\"role\"] == \"superhub\" and status[\"configured\"] ):", "You will be prompted for superhub admin credentials and then", "import argparse import logging import os import platform import string", "running post processing SQL. returncode was {}, stderr:\\n{}\\nstdout:\\n{}\".format( proc.returncode, errs.decode(\"utf-8\"),", "import subprocess import sys from getpass import getpass from nova_api", "on superhub and all feeders. DISTRIBUTED_CLEANUP_SECRET_PATH = os.path.join(WORKDIR, \"state/fr_distributed_cleanup.cfsecret\") def", "was {}, stderr:\\n{}\\nstdout:\\n{}\".format( proc.returncode, errs.decode(\"utf-8\"), outs.decode(\"utf-8\") ) ) sys.exit(1) logger.debug(", "code\", host_to_delete, feeder_hostname, response[\"status\"], ) continue # only add the", "$CFE_FR_TABLES\".format(config_sh_path) with subprocess.Popen( cmd, stdout=subprocess.PIPE, shell=True, executable=\"/bin/bash\" ) as proc:", "if \"ERROR\" in errs.decode(\"utf-8\"): print( \"Problem running post processing SQL.", "description=\"Clean up migrating clients in Federated Reporting setup\" ) group", "\"Unable to query for deletion candidates. Response was {}\".format( response", "logging import os import platform import string import random import", "and 404 Not Found are acceptable responses if response[\"status\"] not", "argparse import logging import os import platform import string import", "read_secret, write_secret WORKDIR = None CFE_FR_TABLES = None # get", "\"cfengine_mp_enable_fr_distributed_cleanup\": [ \"any::\" ] } } ``` After the policy", "status code\", host_to_delete, feeder_hostname, response[\"status\"], ) continue # only add", "__hosts (hostkey,deleted) VALUES\" for hostkey in post_hostkeys: delete_sql += \"('{}',", "CFE_FR_TABLES = None # get WORKDIR and CFE_FR_TABLES from config.sh", "proc.returncode, errs.decode(\"utf-8\"), outs.decode(\"utf-8\") ) ) sys.exit(1) logger.debug( \"Ran post processing", "will work if \"__promiselog_*\" in table: table = \"__promiselog\" clear_sql", "\"fr_distributed_cleanup\", [\"host.delete\"] ) if response[\"status\"] != 201: print(\"Unable to set", "print( \"Unable to query for deletion candidates. Response was {}\".format(", "be superhub\".format( os.path.basename(__file__) ) ) sys.exit(1) response = api.fr_remote_hubs() if", "hosts on feeder %s need processing on superhub so skipping", "errs = proc.communicate(input=post_sql.encode()) if \"ERROR\" in errs.decode(\"utf-8\"): print( \"Problem running", "logging.StreamHandler() if args.debug: logger.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) if args.inform: logger.setLevel(logging.INFO) ch.setLevel(logging.INFO) logger.addHandler(ch)", "Host Cleanup user\", \"email\": \"{}\".format(email), \"password\": \"{}\".format(<PASSWORD>), \"roles\": [\"fr_distributed_cleanup\"], },", "# superhub and feeders and cat all together into hubs.cert", "response = api.put( \"role\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup Federated Host", "a post-loop SQL statement to delete hosts locally from feeder", "locally from feeder schemas # change to feeder schema to", "logger.debug(\"Running SQL:\\n%s\", post_sql) with subprocess.Popen( [\"/var/cfengine/bin/psql\", \"cfdb\"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,", "201: print( \"Problem creating fr_distributed_cleanup user on {}. {}\".format( feeder_hostname,", "sys.stdout.isatty(): interactive_setup() else: print( \"{} requires manual setup, please run", "fr_distributed_cleanup role and account on all feeders and superhubs with", "print(\"Unable to get WORKDIR and CFE_FR_TABLES values from config.sh\") sys.exit(1)", "[\"fr_distributed_cleanup\"], }, ) if response[\"status\"] != 201: print( \"Problem creating", "and account on all feeders and superhubs with proper RBAC", "import string import random import subprocess import sys from getpass", "response[\"rows\"] if len(hosts_to_delete) == 0: logger.info(\"%s: No hosts to delete.", "enable fr_distributed_cleanup by setting a class in augments (def.json). This", "os.path.join(DISTRIBUTED_CLEANUP_DIR, \"hubs.cert\") # Note: remove the file at DISTRIBUTED_CLEANUP_SECRET_PATH to", "not CFE_FR_TABLES: print(\"Unable to get WORKDIR and CFE_FR_TABLES values from", "on {}. {}\".format( feeder_hostname, response ) ) sys.exit(1) write_secret(DISTRIBUTED_CLEANUP_SECRET_PATH, fr_distributed_cleanup_password)", "# query API should return one row, [0], and one", "sys.exit(1) email = input(\"Enter email for fr_distributed_cleanup accounts: \") logger.info(\"Creating", "cfe_internal/enterprise/federation/federation.cf ```json { \"classes\": { \"cfengine_mp_enable_fr_distributed_cleanup\": [ \"any::\" ] }", "query API should return one row, [0], and one column,", "both 202 Accepted and 404 Not Found are acceptable responses", "nova_api import NovaApi from cfsecret import read_secret, write_secret WORKDIR =", "= NovaApi( api_user=\"admin\", api_password=<PASSWORD>, cert_path=CERT_PATH, hostname=feeder_hostname, ) logger.info(\"Creating fr_distributed_cleanup role", "if not response[\"hubs\"]: print( \"No attached feeders. Please attach at", "} ``` After the policy has run on superhub and", "\"DELETE FROM {} WHERE hub_id = {} AND hostkey IN", "hub_id = {} AND hostkey IN ({});\\n\".format( table, feeder_hubid, \",\".join([\"'{}'\".format(hk)", "least one feeder hub before running this script.\" ) sys.exit(1)", "{}\".format( response ) ) sys.exit(1) response = api.put_role_permissions( \"fr_distributed_cleanup\", [\"query.post\",", "= hub[\"ui_name\"] feeder_api = NovaApi( api_user=\"admin\", api_password=<PASSWORD>, cert_path=CERT_PATH, hostname=feeder_hostname, )", "= lines[0].decode().strip() CFE_FR_TABLES = [table.strip() for table in lines[1].decode().split()] if", "column values. # We only selected hostkey so will take", ") api = NovaApi(api_user=\"admin\", api_password=<PASSWORD>) # first confirm that this", "pgplsql function. post_sql += \"INSERT INTO __hosts (hostkey,deleted) VALUES\" for", "api.fr_hub_status() if ( status[\"status\"] == 200 and status[\"role\"] == \"superhub\"", "which have migrated to other feeder hubs. To be run", "response[\"status\"] != 200: print( \"Unable to get status for feeder", "feeder hub_id. Response was {}\".format(response)) continue # query API should", "role\", \"includeContext\": \"cfengine\", }, ) if response[\"status\"] != 201: print(", "= hub[\"ui_name\"] feeder_api = NovaApi( api_user=\"fr_distributed_cleanup\", api_password=<PASSWORD>, cert_path=CERT_PATH, hostname=feeder_hostname, )", "\"%s: %s host deletions processed\", hub[\"ui_name\"], len(hosts_to_delete), ) if __name__", "credentials and then admin credentials on each feeder. \"\"\" import", "for row in hosts_to_delete: # The query API returns rows", "WORKDIR = None CFE_FR_TABLES = None # get WORKDIR and", "if not os.geteuid() == 0: sys.exit(\"\\n{} must be run as", "hosts which have migrated to other feeder hubs. To be", "response[\"status\"] != 200: print(\"Unable to query for feeder hub_id. Response", "= 'INCOMING' GROUP BY hostkey ) as newest ON ls.hostkey", "= os.path.join(WORKDIR, \"state/fr_distributed_cleanup.cfsecret\") def interactive_setup(): fr_distributed_cleanup_password = \"\".join(random.choices(string.printable, k=20)) admin_pass", "account on all feeders and superhubs with proper RBAC settings", "+= ( \"DELETE FROM {} WHERE hub_id = {} AND", "logger.debug( \"Ran post processing SQL. returncode was %s, stderr:\\n%s\\nstdout:\\n%s\", proc.returncode,", "logging.getLogger(\"fr_distributed_cleanup\") ch = logging.StreamHandler() if args.debug: logger.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) if args.inform:", "\"No hosts on feeder %s need processing on superhub so", "and role on superhub and all feeders. DISTRIBUTED_CLEANUP_SECRET_PATH = os.path.join(WORKDIR,", "Primary dir in which to place various needed files DISTRIBUTED_CLEANUP_DIR", "hostkey ) as newest ON ls.hostkey = newest.hostkey AND ls.lastseentimestamp", "rows which are lists of column values. # We only", "Federated Host Cleanup user\", \"email\": \"{}\".format(email), \"password\": <PASSWORD>(fr_<PASSWORD>_cleanup_password), \"roles\": [\"fr_distributed_cleanup\"],", "max(lastseentimestamp) as newesttimestamp FROM lastseenhosts WHERE lastseendirection = 'INCOMING' GROUP", ") if __name__ == \"__main__\": main() else: raise ImportError(\"fr_distributed_cleanup.py must", "{}: \".format(platform.node()) ) api = NovaApi(api_user=\"admin\", api_password=<PASSWORD>) # first confirm", "hub[\"ui_name\"], len(hosts_to_delete), ) if __name__ == \"__main__\": main() else: raise", "SQL. returncode was {}, stderr:\\n{}\\nstdout:\\n{}\".format( proc.returncode, errs.decode(\"utf-8\"), outs.decode(\"utf-8\") ) )", "os.path.exists(DISTRIBUTED_CLEANUP_SECRET_PATH): if sys.stdout.isatty(): interactive_setup() else: print( \"{} requires manual setup,", "get WORKDIR and CFE_FR_TABLES from config.sh config_sh_path = os.path.join(os.path.dirname(__file__), \"config.sh\")", "'{}'\".format(feeder_hostkey) response = api.query(sql) if response[\"status\"] != 200: print(\"Unable to", "{ \"cfengine_mp_enable_fr_distributed_cleanup\": [ \"any::\" ] } } ``` After the", "= api.fr_remote_hubs() if not feederResponse[\"hubs\"]: print( \"No attached feeders. Please", "excluded.deleted;\\n\" ) clear_sql = \"set schema 'public';\\n\" for table in", "in hosts_to_delete: # The query API returns rows which are", "{}\".format( response ) ) sys.exit(1) for hub in feederResponse[\"hubs\"]: feeder_credentials", "= logging.StreamHandler() if args.debug: logger.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) if args.inform: logger.setLevel(logging.INFO) ch.setLevel(logging.INFO)", "all federated tables similar to the clear_hosts_references() pgplsql function. post_sql", "will take the first value. host_to_delete = row[0] response =", "WORKDIR = lines[0].decode().strip() CFE_FR_TABLES = [table.strip() for table in lines[1].decode().split()]", "a proc, sending sql...\") outs, errs = proc.communicate(input=post_sql.encode()) if \"ERROR\"", "up migrating clients in Federated Reporting setup\" ) group =", "then admin credentials on each feeder. \"\"\" import argparse import", "superhub after each import of feeder data. First, to setup,", "IN ({});\\n\".format( table, feeder_hubid, \",\".join([\"'{}'\".format(hk) for hk in post_hostkeys]), )", "as proc: logger.debug(\"got a proc, sending sql...\") outs, errs =", "post_hostkeys.append(host_to_delete) if len(post_hostkeys) == 0: logger.info( \"No hosts on feeder", "response = api.put( \"user\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup Federated Host", "[] for row in hosts_to_delete: # The query API returns", "status for feeder {}. Skipping\".format(feeder_hostname) ) continue sql = \"SELECT", "hub_id in queries post_sql = \"set schema 'hub_{}';\\n\".format(feeder_hubid) post_sql +=", "proc.communicate(input=post_sql.encode()) if \"ERROR\" in errs.decode(\"utf-8\"): print( \"Problem running post processing", "( \"ON CONFLICT (hostkey,hub_id) DO UPDATE SET deleted = excluded.deleted;\\n\"", ") ) feeder_hostname = hub[\"ui_name\"] feeder_api = NovaApi( api_user=\"admin\", api_password=<PASSWORD>,", "delete on feeder %s\", len(hosts_to_delete), hub[\"ui_name\"] ) # build up", "action=\"store_true\") args = parser.parse_args() global logger logger = logging.getLogger(\"fr_distributed_cleanup\") ch", "stderr=subprocess.PIPE, ) as proc: logger.debug(\"got a proc, sending sql...\") outs,", "os.path.join(WORKDIR, \"state/fr_distributed_cleanup.cfsecret\") def interactive_setup(): fr_distributed_cleanup_password = \"\".join(random.choices(string.printable, k=20)) admin_pass =", "# build up a post-loop SQL statement to delete hosts", "at {}: \".format( hub[\"ui_name\"], hub[\"api_url\"] ) ) feeder_hostname = hub[\"ui_name\"]", "setting current_timestamp in deleted column # and delete from all", ") as proc: logger.debug(\"got a proc, sending sql...\") outs, errs", "if __name__ == \"__main__\": main() else: raise ImportError(\"fr_distributed_cleanup.py must only", "on %s\", feeder_hostname) response = feeder_api.put( \"role\", \"fr_distributed_cleanup\", { \"description\":", "response[\"rows\"]) hosts_to_delete = response[\"rows\"] if len(hosts_to_delete) == 0: logger.info(\"%s: No", "\"config.sh\") cmd = \"source {}; echo $WORKDIR; echo $CFE_FR_TABLES\".format(config_sh_path) with", ") post_sql += delete_sql + clear_sql logger.debug(\"Running SQL:\\n%s\", post_sql) with", "sys.exit(1) logger.info(\"Creating fr_distributed_cleanup user on %s\", feeder_hostname) response = feeder_api.put(", "the clear_hosts_references() pgplsql function. post_sql += \"INSERT INTO __hosts (hostkey,deleted)", "file at DISTRIBUTED_CLEANUP_SECRET_PATH to reset everything. # api calls will", "RBAC settings for normal operation. You will be prompted for", "admin credentials for {} at {}: \".format( hub[\"ui_name\"], hub[\"api_url\"] )", "deletion candidates. Response was {}\".format( response ) ) sys.exit(1) logger.debug(\"Hosts", "if ( status[\"status\"] == 200 and status[\"role\"] == \"superhub\" and", "for hk in post_hostkeys]), ) ) post_sql += delete_sql +", "into hubs.cert CERT_PATH = os.path.join(DISTRIBUTED_CLEANUP_DIR, \"hubs.cert\") # Note: remove the", "are %s\", hub[\"ui_name\"], response[\"rows\"]) hosts_to_delete = response[\"rows\"] if len(hosts_to_delete) ==", "table, feeder_hubid, \",\".join([\"'{}'\".format(hk) for hk in post_hostkeys]), ) ) post_sql", "values from config.sh\") sys.exit(1) # Primary dir in which to", ") continue sql = \"SELECT hub_id FROM __hubs WHERE hostkey", "hub[\"ui_name\"], hub[\"api_url\"] ) ) feeder_hostname = hub[\"ui_name\"] feeder_api = NovaApi(", "up a post-loop SQL statement to delete hosts locally from", "root\".format(os.path.basename(__file__))) parser = argparse.ArgumentParser( description=\"Clean up migrating clients in Federated", "{}: \".format( hub[\"ui_name\"], hub[\"api_url\"] ) ) feeder_hostname = hub[\"ui_name\"] feeder_api", "feeder_api.put_role_permissions( \"fr_distributed_cleanup\", [\"host.delete\"] ) if response[\"status\"] != 201: print(\"Unable to", "stderr:\\n{}\\nstdout:\\n{}\".format( proc.returncode, errs.decode(\"utf-8\"), outs.decode(\"utf-8\") ) ) sys.exit(1) logger.debug( \"Ran post", "setup fr_distributed_cleanup role and account on all feeders and superhubs", "be run on Federated Reporting superhub after each import of", "partitioning, operating on parent table will work if \"__promiselog_*\" in", "\") logger.info(\"Creating fr_distributed_cleanup role on superhub...\") response = api.put( \"role\",", "ch.setLevel(logging.INFO) logger.addHandler(ch) if not os.path.exists(DISTRIBUTED_CLEANUP_SECRET_PATH): if sys.stdout.isatty(): interactive_setup() else: print(", "setup, please run as root interactively.\".format( os.path.basename(__file__) ) ) sys.exit(1)", "EXISTS( SELECT 1 FROM lastseenhosts ls JOIN ( SELECT hostkey,", "settings for normal operation. You will be prompted for superhub", "%s need processing on superhub so skipping post processing\", feeder_hostname,", "setup\" ) group = parser.add_mutually_exclusive_group() group.add_argument(\"--debug\", action=\"store_true\") group.add_argument(\"--inform\", action=\"store_true\") args", "ls.hostkey = hosts.hostkey AND ls.hub_id != '{0}' )\"\"\".format( feeder_hubid )", "= {} AND hostkey IN ({});\\n\".format( table, feeder_hubid, \",\".join([\"'{}'\".format(hk) for", "random import subprocess import sys from getpass import getpass from", "augments (def.json). This enables policy in cfe_internal/enterprise/federation/federation.cf ```json { \"classes\":", "on\\n\" delete_sql = \"\" post_hostkeys = [] for row in", "\"role\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup Federated Host Cleanup role\", \"includeContext\":", "``` After the policy has run on superhub and feeders,", "credentials are incorrect, try again\") sys.exit(1) else: print( \"Check the", "permissions on role fr_distributed_cleanup\") sys.exit(1) logger.info(\"Creating fr_distributed_cleanup user on %s\",", "the file at DISTRIBUTED_CLEANUP_SECRET_PATH to reset everything. # api calls", "run on superhub and feeders, run this script to setup", "logger.info(\"Creating fr_distributed_cleanup role on %s\", feeder_hostname) response = feeder_api.put( \"role\",", "interactive_setup() else: print( \"{} requires manual setup, please run as", "taken.\", feeder_hostname) continue logger.debug( \"%s host(s) to delete on feeder", "api.query(sql) if response[\"status\"] != 200: print(\"Unable to query for feeder", "response ) ) sys.exit(1) logger.debug(\"Hosts to delete on %s are", "cat all together into hubs.cert CERT_PATH = os.path.join(DISTRIBUTED_CLEANUP_DIR, \"hubs.cert\") #", "Skipping\".format(feeder_hostname) ) continue sql = \"SELECT hub_id FROM __hubs WHERE", "one feeder hub before running this script.\" ) for hub", "\"on\": continue feeder_hostkey = hub[\"hostkey\"] feeder_hostname = hub[\"ui_name\"] feeder_api =", "on superhub. {}\".format( response ) ) sys.exit(1) for hub in", "fr_distributed_cleanup by setting a class in augments (def.json). This enables", "localhost response = api.fr_hub_status() if not ( response[\"status\"] == 200", "and configured is True. {}\".format( status ) ) sys.exit(1) feederResponse", "200 and response[\"role\"] == \"superhub\" and response[\"configured\"] ): print( \"{}", "other feeder hubs. To be run on Federated Reporting superhub", "please run as root interactively.\".format( os.path.basename(__file__) ) ) sys.exit(1) fr_distributed_cleanup_password", "{}, stderr:\\n{}\\nstdout:\\n{}\".format( proc.returncode, errs.decode(\"utf-8\"), outs.decode(\"utf-8\") ) ) sys.exit(1) logger.debug( \"Ran", "to other feeder hubs. To be run on Federated Reporting", "one feeder hub before running this script.\" ) sys.exit(1) email", "role fr_distributed_cleanup\") sys.exit(1) logger.info(\"Creating fr_distributed_cleanup user on superhub\") response =", "[0], and one column, [0], in rows value feeder_hubid =", "= \"SELECT hub_id FROM __hubs WHERE hostkey = '{}'\".format(feeder_hostkey) response", "401: print(\"admin credentials are incorrect, try again\") sys.exit(1) else: print(", "= api.put( \"user\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup Federated Host Cleanup", "role and account on all feeders and superhubs with proper", "run as root interactively.\".format( os.path.basename(__file__) ) ) sys.exit(1) fr_distributed_cleanup_password =", "+= ( \"ON CONFLICT (hostkey,hub_id) DO UPDATE SET deleted =", "echo $WORKDIR; echo $CFE_FR_TABLES\".format(config_sh_path) with subprocess.Popen( cmd, stdout=subprocess.PIPE, shell=True, executable=\"/bin/bash\"", "response[\"status\"] not in [202, 404]: logger.warning( \"Delete %s on feeder", "returncode was %s, stderr:\\n%s\\nstdout:\\n%s\", proc.returncode, errs.decode(\"utf-8\"), outs.decode(\"utf-8\"), ) if len(hosts_to_delete)", "on superhub and feeders, run this script to setup fr_distributed_cleanup", "without having to # specify hub_id in queries post_sql =", "feeder hub before running this script.\" ) for hub in", "feederResponse[\"hubs\"]: print( \"No attached feeders. Please attach at least one", "!= 200: print(\"Unable to query for feeder hub_id. Response was", "not in [202, 404]: logger.warning( \"Delete %s on feeder %s", "\"email\": \"{}\".format(email), \"password\": \"{}\".format(<PASSWORD>), \"roles\": [\"fr_distributed_cleanup\"], }, ) if response[\"status\"]", "feeder %s need processing on superhub so skipping post processing\",", "host api delete process by setting current_timestamp in deleted column", "normal operation. You will be prompted for superhub admin credentials", "import NovaApi from cfsecret import read_secret, write_secret WORKDIR = None", "hosts WHERE hub_id = '{0}' AND EXISTS( SELECT 1 FROM", "get status for feeder {}. Skipping\".format(feeder_hostname) ) continue sql =", "hubs. To be run on Federated Reporting superhub after each", "\"Problem creating fr_distributed_cleanup user on {}. {}\".format( feeder_hostname, response )", "in Federated Reporting setup\" ) group = parser.add_mutually_exclusive_group() group.add_argument(\"--debug\", action=\"store_true\")", "skipping post processing\", feeder_hostname, ) continue # simulate the host", "operation. You will be prompted for superhub admin credentials and", "selected hostkey so will take the first value. host_to_delete =", "as root\".format(os.path.basename(__file__))) parser = argparse.ArgumentParser( description=\"Clean up migrating clients in", "delete_sql + clear_sql logger.debug(\"Running SQL:\\n%s\", post_sql) with subprocess.Popen( [\"/var/cfengine/bin/psql\", \"cfdb\"],", "to # specify hub_id in queries post_sql = \"set schema", "!= 201: print(\"Unable to set RBAC permissions on role fr_distributed_cleanup\")", "'{0}' )\"\"\".format( feeder_hubid ) response = api.query(sql) if response[\"status\"] !=", "one column, [0], in rows value feeder_hubid = response[\"rows\"][0][0] sql", "from all federated tables similar to the clear_hosts_references() pgplsql function.", "response[\"rows\"][0][0] sql = \"\"\" SELECT DISTINCT hosts.hostkey FROM hosts WHERE", "on Federated Reporting superhub after each import of feeder data.", "We only selected hostkey so will take the first value.", "schemas # change to feeder schema to make deletions easier/more", "WORKDIR and CFE_FR_TABLES from config.sh config_sh_path = os.path.join(os.path.dirname(__file__), \"config.sh\") cmd", ") sys.exit(1) feederResponse = api.fr_remote_hubs() if not feederResponse[\"hubs\"]: print( \"No", ") feeder_hostname = hub[\"ui_name\"] feeder_api = NovaApi( api_user=\"admin\", api_password=<PASSWORD>, cert_path=CERT_PATH,", "permissions on role fr_distributed_cleanup\") sys.exit(1) logger.info(\"Creating fr_distributed_cleanup user on superhub\")", "logger.warning( \"Delete %s on feeder %s got %s status code\",", "FROM {} WHERE hub_id = {} AND hostkey IN ({});\\n\".format(", "= '{0}' AND EXISTS( SELECT 1 FROM lastseenhosts ls JOIN", "# specify hub_id in queries post_sql = \"set schema 'hub_{}';\\n\".format(feeder_hubid)", "feeder_hostname, ) continue # simulate the host api delete process", "this host is a superhub status = api.fr_hub_status() if (", "= \"\"\" SELECT DISTINCT hosts.hostkey FROM hosts WHERE hub_id =", "to delete. No actions taken.\", feeder_hostname) continue logger.debug( \"%s host(s)", "deleted on the feeder post_hostkeys.append(host_to_delete) if len(post_hostkeys) == 0: logger.info(", ") ) sys.exit(1) fr_distributed_cleanup_password = read_secret(DISTRIBUTED_CLEANUP_SECRET_PATH) api = NovaApi( api_user=\"fr_distributed_cleanup\",", "\"description\": \"fr_distributed_cleanup Federated Host Cleanup role\", \"includeContext\": \"cfengine\", }, )", "# api calls will overwrite fr_distributed_cleanup user and role on", "fr_distributed_cleanup\") sys.exit(1) logger.info(\"Creating fr_distributed_cleanup user on %s\", feeder_hostname) response =", "api_user=\"fr_distributed_cleanup\", api_password=<PASSWORD>_cleanup_password ) # defaults to localhost response = api.fr_hub_status()", "\"ERROR\" in errs.decode(\"utf-8\"): print( \"Problem running post processing SQL. returncode", "if hub[\"role\"] != \"feeder\" or hub[\"target_state\"] != \"on\": continue feeder_hostkey", "stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) as proc: logger.debug(\"got a proc, sending sql...\")", "for feeder hub_id. Response was {}\".format(response)) continue # query API", "= input(\"Enter email for fr_distributed_cleanup accounts: \") logger.info(\"Creating fr_distributed_cleanup role", "interactively.\".format( os.path.basename(__file__) ) ) sys.exit(1) fr_distributed_cleanup_password = read_secret(DISTRIBUTED_CLEANUP_SECRET_PATH) api =", "to remove hosts which have migrated to other feeder hubs.", "echo $CFE_FR_TABLES\".format(config_sh_path) with subprocess.Popen( cmd, stdout=subprocess.PIPE, shell=True, executable=\"/bin/bash\" ) as", "): print( \"{} can only be run on a Federated", "make deletions easier/more direct without having to # specify hub_id", "= api.query(sql) if response[\"status\"] != 200: print( \"Unable to query", "action=\"store_true\") group.add_argument(\"--inform\", action=\"store_true\") args = parser.parse_args() global logger logger =", "= getpass( prompt=\"Enter admin password for superhub {}: \".format(platform.node()) )", "202 Accepted and 404 Not Found are acceptable responses if", "= proc.communicate(input=post_sql.encode()) if \"ERROR\" in errs.decode(\"utf-8\"): print( \"Problem running post", "hub_id = '{0}' AND EXISTS( SELECT 1 FROM lastseenhosts ls", "in cfe_internal/enterprise/federation/federation.cf ```json { \"classes\": { \"cfengine_mp_enable_fr_distributed_cleanup\": [ \"any::\" ]", "FROM hosts WHERE hub_id = '{0}' AND EXISTS( SELECT 1", "table in lines[1].decode().split()] if not WORKDIR or not CFE_FR_TABLES: print(\"Unable", "\"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup Federated Host Cleanup user\", \"email\": \"{}\".format(email),", "404 Not Found are acceptable responses if response[\"status\"] not in", "enables policy in cfe_internal/enterprise/federation/federation.cf ```json { \"classes\": { \"cfengine_mp_enable_fr_distributed_cleanup\": [", "No hosts to delete. No actions taken.\", feeder_hostname) continue logger.debug(", ") ) post_sql += delete_sql + clear_sql logger.debug(\"Running SQL:\\n%s\", post_sql)", "platform import string import random import subprocess import sys from", "for fr_distributed_cleanup accounts: \") logger.info(\"Creating fr_distributed_cleanup role on superhub...\") response", "sys.exit(1) response = feeder_api.put_role_permissions( \"fr_distributed_cleanup\", [\"host.delete\"] ) if response[\"status\"] !=", "return one row, [0], and one column, [0], in rows", "on superhub so skipping post processing\", feeder_hostname, ) continue #", "errs.decode(\"utf-8\"), outs.decode(\"utf-8\"), ) if len(hosts_to_delete) != 0: logger.info( \"%s: %s", "cmd, stdout=subprocess.PIPE, shell=True, executable=\"/bin/bash\" ) as proc: lines = proc.stdout.readlines()", "having to # specify hub_id in queries post_sql = \"set", ") sys.exit(1) email = input(\"Enter email for fr_distributed_cleanup accounts: \")", "post_sql) with subprocess.Popen( [\"/var/cfengine/bin/psql\", \"cfdb\"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) as", "parser.add_mutually_exclusive_group() group.add_argument(\"--debug\", action=\"store_true\") group.add_argument(\"--inform\", action=\"store_true\") args = parser.parse_args() global logger", "accounts: \") logger.info(\"Creating fr_distributed_cleanup role on superhub...\") response = api.put(", "not response[\"hubs\"]: print( \"No attached feeders. Please attach at least", "!= 200: print( \"Unable to get status for feeder {}.", "== \"superhub\" and response[\"configured\"] ): print( \"{} can only be", "\"{} requires manual setup, please run as root interactively.\".format( os.path.basename(__file__)", "lines[0].decode().strip() CFE_FR_TABLES = [table.strip() for table in lines[1].decode().split()] if not", "on superhub. {}\".format( response ) ) sys.exit(1) response = feeder_api.put_role_permissions(", "take the first value. host_to_delete = row[0] response = feeder_api.delete(\"host\",", "WHERE hub_id = '{0}' AND EXISTS( SELECT 1 FROM lastseenhosts", "}, ) if response[\"status\"] != 201: print( \"Problem creating fr_distributed_cleanup", "== \"superhub\" and status[\"configured\"] ): logger.debug(\"This host is a superhub", "ensure role is superhub and configured is True. {}\".format( status", "= read_secret(DISTRIBUTED_CLEANUP_SECRET_PATH) api = NovaApi( api_user=\"fr_distributed_cleanup\", api_password=<PASSWORD>_cleanup_password ) # defaults", "= None CFE_FR_TABLES = None # get WORKDIR and CFE_FR_TABLES", "# get WORKDIR and CFE_FR_TABLES from config.sh config_sh_path = os.path.join(os.path.dirname(__file__),", ") ) sys.exit(1) logger.debug( \"Ran post processing SQL. returncode was", ") sys.exit(1) write_secret(DISTRIBUTED_CLEANUP_SECRET_PATH, fr_distributed_cleanup_password) def main(): if not os.geteuid() ==", "sys.exit(1) feederResponse = api.fr_remote_hubs() if not feederResponse[\"hubs\"]: print( \"No attached", "from config.sh config_sh_path = os.path.join(os.path.dirname(__file__), \"config.sh\") cmd = \"source {};", "\"__main__\": main() else: raise ImportError(\"fr_distributed_cleanup.py must only be used as", "CFE_FR_TABLES from config.sh config_sh_path = os.path.join(os.path.dirname(__file__), \"config.sh\") cmd = \"source", "hostname=feeder_hostname, ) response = feeder_api.status() if response[\"status\"] != 200: print(", "!= \"feeder\" or hub[\"target_state\"] != \"on\": continue feeder_hostkey = hub[\"hostkey\"]", "Not Found are acceptable responses if response[\"status\"] not in [202,", "before running this script.\" ) for hub in response[\"hubs\"]: if", "%s on feeder %s got %s status code\", host_to_delete, feeder_hostname,", "only selected hostkey so will take the first value. host_to_delete", "INTO __hosts (hostkey,deleted) VALUES\" for hostkey in post_hostkeys: delete_sql +=", "feeder_hubid = response[\"rows\"][0][0] sql = \"\"\" SELECT DISTINCT hosts.hostkey FROM", "= [table.strip() for table in lines[1].decode().split()] if not WORKDIR or", "logger.debug(\"This host is a superhub configured for Federated Reporting.\") else:", "role on superhub and all feeders. DISTRIBUTED_CLEANUP_SECRET_PATH = os.path.join(WORKDIR, \"state/fr_distributed_cleanup.cfsecret\")", ") as proc: lines = proc.stdout.readlines() WORKDIR = lines[0].decode().strip() CFE_FR_TABLES", "easier/more direct without having to # specify hub_id in queries", "role on superhub. {}\".format( response ) ) sys.exit(1) response =", "table in CFE_FR_TABLES: # special case of partitioning, operating on", "# The query API returns rows which are lists of", "hub configured to be superhub\".format( os.path.basename(__file__) ) ) sys.exit(1) response", "0: logger.info(\"%s: No hosts to delete. No actions taken.\", feeder_hostname)", "table will work if \"__promiselog_*\" in table: table = \"__promiselog\"", "\"description\": \"fr_distributed_cleanup Federated Host Cleanup user\", \"email\": \"{}\".format(email), \"password\": <PASSWORD>(fr_<PASSWORD>_cleanup_password),", "running this script.\" ) for hub in response[\"hubs\"]: if hub[\"role\"]", "len(post_hostkeys) == 0: logger.info( \"No hosts on feeder %s need", "+= \"('{}', CURRENT_TIMESTAMP) \".format(hostkey) delete_sql += ( \"ON CONFLICT (hostkey,hub_id)", "= \"source {}; echo $WORKDIR; echo $CFE_FR_TABLES\".format(config_sh_path) with subprocess.Popen( cmd,", "fr_distributed_cleanup user on {}. {}\".format( feeder_hostname, response ) ) sys.exit(1)", "for deletion candidates. Response was {}\".format( response ) ) sys.exit(1)", "on feeder %s got %s status code\", host_to_delete, feeder_hostname, response[\"status\"],", "SQL:\\n%s\", post_sql) with subprocess.Popen( [\"/var/cfengine/bin/psql\", \"cfdb\"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, )", "NovaApi( api_user=\"fr_distributed_cleanup\", api_password=<PASSWORD>, cert_path=CERT_PATH, hostname=feeder_hostname, ) response = feeder_api.status() if", "script to remove hosts which have migrated to other feeder", "it was successfully deleted on the feeder post_hostkeys.append(host_to_delete) if len(post_hostkeys)", "feeder %s got %s status code\", host_to_delete, feeder_hostname, response[\"status\"], )", "and superhubs with proper RBAC settings for normal operation. You", "STOP on\\n\" delete_sql = \"\" post_hostkeys = [] for row", "read_secret(DISTRIBUTED_CLEANUP_SECRET_PATH) api = NovaApi( api_user=\"fr_distributed_cleanup\", api_password=<PASSWORD>_cleanup_password ) # defaults to", "The query API returns rows which are lists of column", "on role fr_distributed_cleanup\") sys.exit(1) logger.info(\"Creating fr_distributed_cleanup user on %s\", feeder_hostname)", "if \"__promiselog_*\" in table: table = \"__promiselog\" clear_sql += (", "\"__promiselog_*\" in table: table = \"__promiselog\" clear_sql += ( \"DELETE", "column, [0], in rows value feeder_hubid = response[\"rows\"][0][0] sql =", "feeder_api = NovaApi( api_user=\"fr_distributed_cleanup\", api_password=<PASSWORD>, cert_path=CERT_PATH, hostname=feeder_hostname, ) response =", "ON_ERROR STOP on\\n\" delete_sql = \"\" post_hostkeys = [] for", "{}\".format( response ) ) sys.exit(1) logger.debug(\"Hosts to delete on %s", "to ensure role is superhub and configured is True. {}\".format(", "response[\"hubs\"]: if hub[\"role\"] != \"feeder\" or hub[\"target_state\"] != \"on\": continue", "\"INSERT INTO __hosts (hostkey,deleted) VALUES\" for hostkey in post_hostkeys: delete_sql", "proc: logger.debug(\"got a proc, sending sql...\") outs, errs = proc.communicate(input=post_sql.encode())", "subprocess.Popen( cmd, stdout=subprocess.PIPE, shell=True, executable=\"/bin/bash\" ) as proc: lines =", "__name__ == \"__main__\": main() else: raise ImportError(\"fr_distributed_cleanup.py must only be", "on all feeders and superhubs with proper RBAC settings for", "import sys from getpass import getpass from nova_api import NovaApi", "api_user=\"admin\", api_password=<PASSWORD>, cert_path=CERT_PATH, hostname=feeder_hostname, ) logger.info(\"Creating fr_distributed_cleanup role on %s\",", "feeder_hostname = hub[\"ui_name\"] feeder_api = NovaApi( api_user=\"admin\", api_password=<PASSWORD>, cert_path=CERT_PATH, hostname=feeder_hostname,", "feeder_hostkey = hub[\"hostkey\"] feeder_hostname = hub[\"ui_name\"] feeder_api = NovaApi( api_user=\"fr_distributed_cleanup\",", "a script to remove hosts which have migrated to other", "= parser.add_mutually_exclusive_group() group.add_argument(\"--debug\", action=\"store_true\") group.add_argument(\"--inform\", action=\"store_true\") args = parser.parse_args() global", "\"set schema 'hub_{}';\\n\".format(feeder_hubid) post_sql += \"\\\\set ON_ERROR STOP on\\n\" delete_sql", "WORKDIR and CFE_FR_TABLES values from config.sh\") sys.exit(1) # Primary dir", "setting a class in augments (def.json). This enables policy in", "logger.debug(\"Hosts to delete on %s are %s\", hub[\"ui_name\"], response[\"rows\"]) hosts_to_delete", "user on {}. {}\".format( feeder_hostname, response ) ) sys.exit(1) write_secret(DISTRIBUTED_CLEANUP_SECRET_PATH,", "logger.info(\"%s: No hosts to delete. No actions taken.\", feeder_hostname) continue", "on role fr_distributed_cleanup\") sys.exit(1) logger.info(\"Creating fr_distributed_cleanup user on superhub\") response", "or not CFE_FR_TABLES: print(\"Unable to get WORKDIR and CFE_FR_TABLES values", "RBAC permissions on role fr_distributed_cleanup\") sys.exit(1) logger.info(\"Creating fr_distributed_cleanup user on", "feeder_api = NovaApi( api_user=\"admin\", api_password=<PASSWORD>, cert_path=CERT_PATH, hostname=feeder_hostname, ) logger.info(\"Creating fr_distributed_cleanup", "\"feeder\" or hub[\"target_state\"] != \"on\": continue feeder_hostkey = hub[\"hostkey\"] feeder_hostname", "response[\"hubs\"]: print( \"No attached feeders. Please attach at least one", "query for deletion candidates. Response was {}\".format( response ) )", "NovaApi(api_user=\"admin\", api_password=<PASSWORD>) # first confirm that this host is a", "group.add_argument(\"--debug\", action=\"store_true\") group.add_argument(\"--inform\", action=\"store_true\") args = parser.parse_args() global logger logger", "current_timestamp in deleted column # and delete from all federated", "'hub_{}';\\n\".format(feeder_hubid) post_sql += \"\\\\set ON_ERROR STOP on\\n\" delete_sql = \"\"", "api_password=<PASSWORD>) # first confirm that this host is a superhub", "hostname=feeder_hostname, ) logger.info(\"Creating fr_distributed_cleanup role on %s\", feeder_hostname) response =", "credentials for {} at {}: \".format( hub[\"ui_name\"], hub[\"api_url\"] ) )", "status to ensure role is superhub and configured is True.", "CFE_FR_TABLES = [table.strip() for table in lines[1].decode().split()] if not WORKDIR", "!= 201: print( \"Problem creating fr_distributed_cleanup user on {}. {}\".format(", "hub[\"role\"] != \"feeder\" or hub[\"target_state\"] != \"on\": continue feeder_hostkey =", "\"('{}', CURRENT_TIMESTAMP) \".format(hostkey) delete_sql += ( \"ON CONFLICT (hostkey,hub_id) DO", "in table: table = \"__promiselog\" clear_sql += ( \"DELETE FROM", "Cleanup user\", \"email\": \"{}\".format(email), \"password\": \"{}\".format(<PASSWORD>), \"roles\": [\"fr_distributed_cleanup\"], }, )", "fr_distributed_cleanup user on superhub. {}\".format( response ) ) sys.exit(1) for", "and all feeders. DISTRIBUTED_CLEANUP_SECRET_PATH = os.path.join(WORKDIR, \"state/fr_distributed_cleanup.cfsecret\") def interactive_setup(): fr_distributed_cleanup_password", "deleted = excluded.deleted;\\n\" ) clear_sql = \"set schema 'public';\\n\" for", "confirm that this host is a superhub status = api.fr_hub_status()", "response = feeder_api.put_role_permissions( \"fr_distributed_cleanup\", [\"host.delete\"] ) if response[\"status\"] != 201:", "\".format( hub[\"ui_name\"], hub[\"api_url\"] ) ) feeder_hostname = hub[\"ui_name\"] feeder_api =", "\"/opt/cfengine/federation/cftransport/distributed_cleanup\" # collect cert files from /var/cfengine/httpd/ssl/certs on # superhub", "fr_distributed_cleanup user on superhub\") response = api.put( \"user\", \"fr_distributed_cleanup\", {", "on a Federated Reporting hub configured to be superhub\".format( os.path.basename(__file__)", ")\"\"\".format( feeder_hubid ) response = api.query(sql) if response[\"status\"] != 200:", "superhub configured for Federated Reporting.\") else: if status[\"status\"] == 401:", "superhub. {}\".format( response ) ) sys.exit(1) for hub in feederResponse[\"hubs\"]:", "hostkey so will take the first value. host_to_delete = row[0]", "outs.decode(\"utf-8\") ) ) sys.exit(1) logger.debug( \"Ran post processing SQL. returncode", "on superhub. {}\".format( response ) ) sys.exit(1) response = api.put_role_permissions(", "# change to feeder schema to make deletions easier/more direct", "No actions taken.\", feeder_hostname) continue logger.debug( \"%s host(s) to delete", "federated tables similar to the clear_hosts_references() pgplsql function. post_sql +=", "value feeder_hubid = response[\"rows\"][0][0] sql = \"\"\" SELECT DISTINCT hosts.hostkey", "sending sql...\") outs, errs = proc.communicate(input=post_sql.encode()) if \"ERROR\" in errs.decode(\"utf-8\"):", "import logging import os import platform import string import random", "in CFE_FR_TABLES: # special case of partitioning, operating on parent", "the first value. host_to_delete = row[0] response = feeder_api.delete(\"host\", host_to_delete)", "be run as root\".format(os.path.basename(__file__))) parser = argparse.ArgumentParser( description=\"Clean up migrating", "\"set schema 'public';\\n\" for table in CFE_FR_TABLES: # special case", "errs.decode(\"utf-8\"), outs.decode(\"utf-8\") ) ) sys.exit(1) logger.debug( \"Ran post processing SQL.", "to get WORKDIR and CFE_FR_TABLES values from config.sh\") sys.exit(1) #", "sys.exit(1) write_secret(DISTRIBUTED_CLEANUP_SECRET_PATH, fr_distributed_cleanup_password) def main(): if not os.geteuid() == 0:", "sys.exit(1) logger.debug(\"Hosts to delete on %s are %s\", hub[\"ui_name\"], response[\"rows\"])", "= NovaApi( api_user=\"fr_distributed_cleanup\", api_password=<PASSWORD>, cert_path=CERT_PATH, hostname=feeder_hostname, ) response = feeder_api.status()", "global logger logger = logging.getLogger(\"fr_distributed_cleanup\") ch = logging.StreamHandler() if args.debug:", "policy has run on superhub and feeders, run this script", "creating fr_distributed_cleanup user on {}. {}\".format( feeder_hostname, response ) )", "hub[\"hostkey\"] feeder_hostname = hub[\"ui_name\"] feeder_api = NovaApi( api_user=\"fr_distributed_cleanup\", api_password=<PASSWORD>, cert_path=CERT_PATH,", "os.path.basename(__file__) ) ) sys.exit(1) fr_distributed_cleanup_password = read_secret(DISTRIBUTED_CLEANUP_SECRET_PATH) api = NovaApi(", "= feeder_api.put( \"user\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup Federated Host Cleanup", "newest.newesttimestamp AND ls.hostkey = hosts.hostkey AND ls.hub_id != '{0}' )\"\"\".format(", "AND hostkey IN ({});\\n\".format( table, feeder_hubid, \",\".join([\"'{}'\".format(hk) for hk in", "NovaApi from cfsecret import read_secret, write_secret WORKDIR = None CFE_FR_TABLES", "api.fr_remote_hubs() if not feederResponse[\"hubs\"]: print( \"No attached feeders. Please attach", "requires manual setup, please run as root interactively.\".format( os.path.basename(__file__) )", "\"any::\" ] } } ``` After the policy has run", "BY hostkey ) as newest ON ls.hostkey = newest.hostkey AND", "api_user=\"fr_distributed_cleanup\", api_password=<PASSWORD>, cert_path=CERT_PATH, hostname=feeder_hostname, ) response = feeder_api.status() if response[\"status\"]", "role on %s\", feeder_hostname) response = feeder_api.put( \"role\", \"fr_distributed_cleanup\", {", "in rows value feeder_hubid = response[\"rows\"][0][0] sql = \"\"\" SELECT", "user on %s\", feeder_hostname) response = feeder_api.put( \"user\", \"fr_distributed_cleanup\", {", "hub[\"ui_name\"] ) # build up a post-loop SQL statement to", "): logger.debug(\"This host is a superhub configured for Federated Reporting.\")", "direct without having to # specify hub_id in queries post_sql", "user and role on superhub and all feeders. DISTRIBUTED_CLEANUP_SECRET_PATH =", "= api.put_role_permissions( \"fr_distributed_cleanup\", [\"query.post\", \"remoteHub.list\", \"hubStatus.get\"] ) if response[\"status\"] !=", "logger.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) if args.inform: logger.setLevel(logging.INFO) ch.setLevel(logging.INFO) logger.addHandler(ch) if not os.path.exists(DISTRIBUTED_CLEANUP_SECRET_PATH):", "hub in feederResponse[\"hubs\"]: feeder_credentials = getpass( prompt=\"Enter admin credentials for", "continue # simulate the host api delete process by setting", "api calls will overwrite fr_distributed_cleanup user and role on superhub", "as newesttimestamp FROM lastseenhosts WHERE lastseendirection = 'INCOMING' GROUP BY", "argparse.ArgumentParser( description=\"Clean up migrating clients in Federated Reporting setup\" )", "CFE_FR_TABLES: # special case of partitioning, operating on parent table", "post_hostkeys = [] for row in hosts_to_delete: # The query", "Accepted and 404 Not Found are acceptable responses if response[\"status\"]", "= feeder_api.put_role_permissions( \"fr_distributed_cleanup\", [\"host.delete\"] ) if response[\"status\"] != 201: print(\"Unable", "feeder hub before running this script.\" ) sys.exit(1) email =", "response = feeder_api.status() if response[\"status\"] != 200: print( \"Unable to", "{}. {}\".format( feeder_hostname, response ) ) sys.exit(1) write_secret(DISTRIBUTED_CLEANUP_SECRET_PATH, fr_distributed_cleanup_password) def", "response ) ) sys.exit(1) response = feeder_api.put_role_permissions( \"fr_distributed_cleanup\", [\"host.delete\"] )", "row[0] response = feeder_api.delete(\"host\", host_to_delete) # both 202 Accepted and", "response = api.fr_remote_hubs() if not response[\"hubs\"]: print( \"No attached feeders.", "$WORKDIR; echo $CFE_FR_TABLES\".format(config_sh_path) with subprocess.Popen( cmd, stdout=subprocess.PIPE, shell=True, executable=\"/bin/bash\" )", "run this script to setup fr_distributed_cleanup role and account on", "response = api.put_role_permissions( \"fr_distributed_cleanup\", [\"query.post\", \"remoteHub.list\", \"hubStatus.get\"] ) if response[\"status\"]", "schema 'hub_{}';\\n\".format(feeder_hubid) post_sql += \"\\\\set ON_ERROR STOP on\\n\" delete_sql =", "\"email\": \"{}\".format(email), \"password\": <PASSWORD>(fr_<PASSWORD>_cleanup_password), \"roles\": [\"fr_distributed_cleanup\"], }, ) if response[\"status\"]", "continue # query API should return one row, [0], and", "\"cfengine\", }, ) if response[\"status\"] != 201: print( \"Problem creating", "not os.path.exists(DISTRIBUTED_CLEANUP_SECRET_PATH): if sys.stdout.isatty(): interactive_setup() else: print( \"{} requires manual", "place various needed files DISTRIBUTED_CLEANUP_DIR = \"/opt/cfengine/federation/cftransport/distributed_cleanup\" # collect cert", "!= \"on\": continue feeder_hostkey = hub[\"hostkey\"] feeder_hostname = hub[\"ui_name\"] feeder_api", "print( \"Problem creating fr_distributed_cleanup role on superhub. {}\".format( response )", "} } ``` After the policy has run on superhub", "%s\", len(hosts_to_delete), hub[\"ui_name\"] ) # build up a post-loop SQL", "API returns rows which are lists of column values. #", "DISTRIBUTED_CLEANUP_SECRET_PATH to reset everything. # api calls will overwrite fr_distributed_cleanup", "lines = proc.stdout.readlines() WORKDIR = lines[0].decode().strip() CFE_FR_TABLES = [table.strip() for", "deleted column # and delete from all federated tables similar", "interactive_setup(): fr_distributed_cleanup_password = \"\".join(random.choices(string.printable, k=20)) admin_pass = getpass( prompt=\"Enter admin", "CERT_PATH = os.path.join(DISTRIBUTED_CLEANUP_DIR, \"hubs.cert\") # Note: remove the file at", "in queries post_sql = \"set schema 'hub_{}';\\n\".format(feeder_hubid) post_sql += \"\\\\set", "response[\"status\"], ) continue # only add the host_to_delete if it", "import of feeder data. First, to setup, enable fr_distributed_cleanup by", "getpass from nova_api import NovaApi from cfsecret import read_secret, write_secret", "was {}\".format( response ) ) sys.exit(1) logger.debug(\"Hosts to delete on", "in feederResponse[\"hubs\"]: feeder_credentials = getpass( prompt=\"Enter admin credentials for {}", "\"\"\" fr_distributed_cleanup.py - a script to remove hosts which have", "dir in which to place various needed files DISTRIBUTED_CLEANUP_DIR =", "is a superhub configured for Federated Reporting.\") else: if status[\"status\"]", "200: print(\"Unable to query for feeder hub_id. Response was {}\".format(response))", "stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) as proc: logger.debug(\"got a proc, sending", "creating fr_distributed_cleanup user on superhub. {}\".format( response ) ) sys.exit(1)", "and cat all together into hubs.cert CERT_PATH = os.path.join(DISTRIBUTED_CLEANUP_DIR, \"hubs.cert\")", "creating fr_distributed_cleanup role on superhub. {}\".format( response ) ) sys.exit(1)", "'public';\\n\" for table in CFE_FR_TABLES: # special case of partitioning,", "FROM __hubs WHERE hostkey = '{}'\".format(feeder_hostkey) response = api.query(sql) if", "sys.exit(1) # Primary dir in which to place various needed", "deletions easier/more direct without having to # specify hub_id in", "getpass import getpass from nova_api import NovaApi from cfsecret import", "+= delete_sql + clear_sql logger.debug(\"Running SQL:\\n%s\", post_sql) with subprocess.Popen( [\"/var/cfengine/bin/psql\",", "sys from getpass import getpass from nova_api import NovaApi from", "feeder_hostname) response = feeder_api.put( \"role\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup Federated", "for superhub {}: \".format(platform.node()) ) api = NovaApi(api_user=\"admin\", api_password=<PASSWORD>) #", "stderr:\\n%s\\nstdout:\\n%s\", proc.returncode, errs.decode(\"utf-8\"), outs.decode(\"utf-8\"), ) if len(hosts_to_delete) != 0: logger.info(", "response ) ) sys.exit(1) for hub in feederResponse[\"hubs\"]: feeder_credentials =", "= argparse.ArgumentParser( description=\"Clean up migrating clients in Federated Reporting setup\"", "on feeder %s need processing on superhub so skipping post", "to make deletions easier/more direct without having to # specify", "print( \"Unable to get status for feeder {}. Skipping\".format(feeder_hostname) )", "from config.sh\") sys.exit(1) # Primary dir in which to place", "fr_distributed_cleanup user on %s\", feeder_hostname) response = feeder_api.put( \"user\", \"fr_distributed_cleanup\",", "logger = logging.getLogger(\"fr_distributed_cleanup\") ch = logging.StreamHandler() if args.debug: logger.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG)", "\"cfdb\"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) as proc: logger.debug(\"got a proc,", "== 200 and response[\"role\"] == \"superhub\" and response[\"configured\"] ): print(", "{} WHERE hub_id = {} AND hostkey IN ({});\\n\".format( table,", "manual setup, please run as root interactively.\".format( os.path.basename(__file__) ) )", "needed files DISTRIBUTED_CLEANUP_DIR = \"/opt/cfengine/federation/cftransport/distributed_cleanup\" # collect cert files from", "clients in Federated Reporting setup\" ) group = parser.add_mutually_exclusive_group() group.add_argument(\"--debug\",", "feeder hubs. To be run on Federated Reporting superhub after", "{}; echo $WORKDIR; echo $CFE_FR_TABLES\".format(config_sh_path) with subprocess.Popen( cmd, stdout=subprocess.PIPE, shell=True,", "set RBAC permissions on role fr_distributed_cleanup\") sys.exit(1) logger.info(\"Creating fr_distributed_cleanup user", "if response[\"status\"] != 200: print(\"Unable to query for feeder hub_id.", ") group = parser.add_mutually_exclusive_group() group.add_argument(\"--debug\", action=\"store_true\") group.add_argument(\"--inform\", action=\"store_true\") args =", "feeder_hubid ) response = api.query(sql) if response[\"status\"] != 200: print(", "hub_id FROM __hubs WHERE hostkey = '{}'\".format(feeder_hostkey) response = api.query(sql)", "response[\"status\"] == 200 and response[\"role\"] == \"superhub\" and response[\"configured\"] ):", "feeders. DISTRIBUTED_CLEANUP_SECRET_PATH = os.path.join(WORKDIR, \"state/fr_distributed_cleanup.cfsecret\") def interactive_setup(): fr_distributed_cleanup_password = \"\".join(random.choices(string.printable,", "response = api.fr_hub_status() if not ( response[\"status\"] == 200 and", "with subprocess.Popen( [\"/var/cfengine/bin/psql\", \"cfdb\"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) as proc:", "if not os.path.exists(DISTRIBUTED_CLEANUP_SECRET_PATH): if sys.stdout.isatty(): interactive_setup() else: print( \"{} requires", "fr_distributed_cleanup role on %s\", feeder_hostname) response = feeder_api.put( \"role\", \"fr_distributed_cleanup\",", "fr_distributed_cleanup role on superhub. {}\".format( response ) ) sys.exit(1) response", "feeders, run this script to setup fr_distributed_cleanup role and account", "for hub in feederResponse[\"hubs\"]: feeder_credentials = getpass( prompt=\"Enter admin credentials", "and feeders and cat all together into hubs.cert CERT_PATH =", "collect cert files from /var/cfengine/httpd/ssl/certs on # superhub and feeders", "```json { \"classes\": { \"cfengine_mp_enable_fr_distributed_cleanup\": [ \"any::\" ] } }", "must be run as root\".format(os.path.basename(__file__))) parser = argparse.ArgumentParser( description=\"Clean up", "outs, errs = proc.communicate(input=post_sql.encode()) if \"ERROR\" in errs.decode(\"utf-8\"): print( \"Problem", "delete process by setting current_timestamp in deleted column # and", "feeder_credentials = getpass( prompt=\"Enter admin credentials for {} at {}:", "this script.\" ) sys.exit(1) email = input(\"Enter email for fr_distributed_cleanup", "was {}\".format(response)) continue # query API should return one row,", "the host_to_delete if it was successfully deleted on the feeder", "on # superhub and feeders and cat all together into", "if response[\"status\"] not in [202, 404]: logger.warning( \"Delete %s on", "not feederResponse[\"hubs\"]: print( \"No attached feeders. Please attach at least", "statement to delete hosts locally from feeder schemas # change", "{ \"description\": \"fr_distributed_cleanup Federated Host Cleanup user\", \"email\": \"{}\".format(email), \"password\":", "= os.path.join(DISTRIBUTED_CLEANUP_DIR, \"hubs.cert\") # Note: remove the file at DISTRIBUTED_CLEANUP_SECRET_PATH", "Response was {}\".format( response ) ) sys.exit(1) logger.debug(\"Hosts to delete", "actions taken.\", feeder_hostname) continue logger.debug( \"%s host(s) to delete on", "only add the host_to_delete if it was successfully deleted on", "DISTINCT hosts.hostkey FROM hosts WHERE hub_id = '{0}' AND EXISTS(", "which are lists of column values. # We only selected", "and CFE_FR_TABLES from config.sh config_sh_path = os.path.join(os.path.dirname(__file__), \"config.sh\") cmd =", ") sys.exit(1) response = api.put_role_permissions( \"fr_distributed_cleanup\", [\"query.post\", \"remoteHub.list\", \"hubStatus.get\"] )", "[ \"any::\" ] } } ``` After the policy has", "so skipping post processing\", feeder_hostname, ) continue # simulate the", "DISTRIBUTED_CLEANUP_DIR = \"/opt/cfengine/federation/cftransport/distributed_cleanup\" # collect cert files from /var/cfengine/httpd/ssl/certs on", "SET deleted = excluded.deleted;\\n\" ) clear_sql = \"set schema 'public';\\n\"", "Federated Reporting superhub after each import of feeder data. First,", "tables similar to the clear_hosts_references() pgplsql function. post_sql += \"INSERT", "was successfully deleted on the feeder post_hostkeys.append(host_to_delete) if len(post_hostkeys) ==", "!= 200: print( \"Unable to query for deletion candidates. Response", "to set RBAC permissions on role fr_distributed_cleanup\") sys.exit(1) logger.info(\"Creating fr_distributed_cleanup", "post_sql += delete_sql + clear_sql logger.debug(\"Running SQL:\\n%s\", post_sql) with subprocess.Popen(", "= proc.stdout.readlines() WORKDIR = lines[0].decode().strip() CFE_FR_TABLES = [table.strip() for table", ") response = feeder_api.status() if response[\"status\"] != 200: print( \"Unable", ") ) sys.exit(1) response = api.put_role_permissions( \"fr_distributed_cleanup\", [\"query.post\", \"remoteHub.list\", \"hubStatus.get\"]", "CURRENT_TIMESTAMP) \".format(hostkey) delete_sql += ( \"ON CONFLICT (hostkey,hub_id) DO UPDATE", "feeder_api.put( \"role\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup Federated Host Cleanup role\",", "not ( response[\"status\"] == 200 and response[\"role\"] == \"superhub\" and", "hub[\"ui_name\"] feeder_api = NovaApi( api_user=\"fr_distributed_cleanup\", api_password=<PASSWORD>, cert_path=CERT_PATH, hostname=feeder_hostname, ) response", "on %s are %s\", hub[\"ui_name\"], response[\"rows\"]) hosts_to_delete = response[\"rows\"] if", "config.sh config_sh_path = os.path.join(os.path.dirname(__file__), \"config.sh\") cmd = \"source {}; echo", "in augments (def.json). This enables policy in cfe_internal/enterprise/federation/federation.cf ```json {", "all feeders and superhubs with proper RBAC settings for normal", "responses if response[\"status\"] not in [202, 404]: logger.warning( \"Delete %s", "To be run on Federated Reporting superhub after each import", "and feeders, run this script to setup fr_distributed_cleanup role and", "\"hubs.cert\") # Note: remove the file at DISTRIBUTED_CLEANUP_SECRET_PATH to reset", "returncode was {}, stderr:\\n{}\\nstdout:\\n{}\".format( proc.returncode, errs.decode(\"utf-8\"), outs.decode(\"utf-8\") ) ) sys.exit(1)", "host_to_delete if it was successfully deleted on the feeder post_hostkeys.append(host_to_delete)", "FROM lastseenhosts ls JOIN ( SELECT hostkey, max(lastseentimestamp) as newesttimestamp", "in post_hostkeys]), ) ) post_sql += delete_sql + clear_sql logger.debug(\"Running", "sys.exit(\"\\n{} must be run as root\".format(os.path.basename(__file__))) parser = argparse.ArgumentParser( description=\"Clean", "api_password=<PASSWORD>, cert_path=CERT_PATH, hostname=feeder_hostname, ) response = feeder_api.status() if response[\"status\"] !=", "CFE_FR_TABLES: print(\"Unable to get WORKDIR and CFE_FR_TABLES values from config.sh\")", "= NovaApi(api_user=\"admin\", api_password=<PASSWORD>) # first confirm that this host is", "response[\"status\"] != 201: print( \"Problem creating fr_distributed_cleanup role on superhub.", "superhub and all feeders. DISTRIBUTED_CLEANUP_SECRET_PATH = os.path.join(WORKDIR, \"state/fr_distributed_cleanup.cfsecret\") def interactive_setup():", "None CFE_FR_TABLES = None # get WORKDIR and CFE_FR_TABLES from", "\"password\": \"{}\".format(<PASSWORD>), \"roles\": [\"fr_distributed_cleanup\"], }, ) if response[\"status\"] != 201:", "{ \"classes\": { \"cfengine_mp_enable_fr_distributed_cleanup\": [ \"any::\" ] } } ```", "- a script to remove hosts which have migrated to", "returns rows which are lists of column values. # We", "prompted for superhub admin credentials and then admin credentials on", "= \"__promiselog\" clear_sql += ( \"DELETE FROM {} WHERE hub_id", "# defaults to localhost response = api.fr_hub_status() if not (", ") response = api.query(sql) if response[\"status\"] != 200: print( \"Unable", "None # get WORKDIR and CFE_FR_TABLES from config.sh config_sh_path =", "script.\" ) sys.exit(1) email = input(\"Enter email for fr_distributed_cleanup accounts:", ") ) sys.exit(1) logger.debug(\"Hosts to delete on %s are %s\",", "credentials on each feeder. \"\"\" import argparse import logging import", "on superhub\") response = api.put( \"user\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup", "\"fr_distributed_cleanup Federated Host Cleanup user\", \"email\": \"{}\".format(email), \"password\": <PASSWORD>(fr_<PASSWORD>_cleanup_password), \"roles\":", "table: table = \"__promiselog\" clear_sql += ( \"DELETE FROM {}", "Cleanup role\", \"includeContext\": \"cfengine\", }, ) if response[\"status\"] != 201:", "similar to the clear_hosts_references() pgplsql function. post_sql += \"INSERT INTO", "is a superhub status = api.fr_hub_status() if ( status[\"status\"] ==", "response[\"status\"] != 201: print( \"Problem creating fr_distributed_cleanup user on superhub.", "\"Ran post processing SQL. returncode was %s, stderr:\\n%s\\nstdout:\\n%s\", proc.returncode, errs.decode(\"utf-8\"),", "parser = argparse.ArgumentParser( description=\"Clean up migrating clients in Federated Reporting", "write_secret WORKDIR = None CFE_FR_TABLES = None # get WORKDIR", "hub[\"ui_name\"] feeder_api = NovaApi( api_user=\"admin\", api_password=<PASSWORD>, cert_path=CERT_PATH, hostname=feeder_hostname, ) logger.info(\"Creating", "hub[\"ui_name\"], response[\"rows\"]) hosts_to_delete = response[\"rows\"] if len(hosts_to_delete) == 0: logger.info(\"%s:", "deletions processed\", hub[\"ui_name\"], len(hosts_to_delete), ) if __name__ == \"__main__\": main()", "\"superhub\" and status[\"configured\"] ): logger.debug(\"This host is a superhub configured", "(hostkey,hub_id) DO UPDATE SET deleted = excluded.deleted;\\n\" ) clear_sql =", "to query for deletion candidates. Response was {}\".format( response )", "hosts locally from feeder schemas # change to feeder schema", "# both 202 Accepted and 404 Not Found are acceptable", "host deletions processed\", hub[\"ui_name\"], len(hosts_to_delete), ) if __name__ == \"__main__\":", "and status[\"role\"] == \"superhub\" and status[\"configured\"] ): logger.debug(\"This host is", "hostkey = '{}'\".format(feeder_hostkey) response = api.query(sql) if response[\"status\"] != 200:", "admin credentials and then admin credentials on each feeder. \"\"\"", "\"fr_distributed_cleanup Federated Host Cleanup user\", \"email\": \"{}\".format(email), \"password\": \"{}\".format(<PASSWORD>), \"roles\":", "feeder_hostname = hub[\"ui_name\"] feeder_api = NovaApi( api_user=\"fr_distributed_cleanup\", api_password=<PASSWORD>, cert_path=CERT_PATH, hostname=feeder_hostname,", "at least one feeder hub before running this script.\" )", "'INCOMING' GROUP BY hostkey ) as newest ON ls.hostkey =", "\"\"\" import argparse import logging import os import platform import", "os.path.join(os.path.dirname(__file__), \"config.sh\") cmd = \"source {}; echo $WORKDIR; echo $CFE_FR_TABLES\".format(config_sh_path)", "if response[\"status\"] != 201: print(\"Unable to set RBAC permissions on", "# simulate the host api delete process by setting current_timestamp", "at DISTRIBUTED_CLEANUP_SECRET_PATH to reset everything. # api calls will overwrite", "hosts_to_delete = response[\"rows\"] if len(hosts_to_delete) == 0: logger.info(\"%s: No hosts", "password for superhub {}: \".format(platform.node()) ) api = NovaApi(api_user=\"admin\", api_password=<PASSWORD>)", "outs.decode(\"utf-8\"), ) if len(hosts_to_delete) != 0: logger.info( \"%s: %s host", "Reporting setup\" ) group = parser.add_mutually_exclusive_group() group.add_argument(\"--debug\", action=\"store_true\") group.add_argument(\"--inform\", action=\"store_true\")", "200: print( \"Unable to get status for feeder {}. Skipping\".format(feeder_hostname)", "admin_pass = getpass( prompt=\"Enter admin password for superhub {}: \".format(platform.node())", "fr_distributed_cleanup\") sys.exit(1) logger.info(\"Creating fr_distributed_cleanup user on superhub\") response = api.put(", "] } } ``` After the policy has run on", "response[\"status\"] != 201: print(\"Unable to set RBAC permissions on role", "if not feederResponse[\"hubs\"]: print( \"No attached feeders. Please attach at", "sys.exit(1) fr_distributed_cleanup_password = read_secret(DISTRIBUTED_CLEANUP_SECRET_PATH) api = NovaApi( api_user=\"fr_distributed_cleanup\", api_password=<PASSWORD>_cleanup_password )", "root interactively.\".format( os.path.basename(__file__) ) ) sys.exit(1) fr_distributed_cleanup_password = read_secret(DISTRIBUTED_CLEANUP_SECRET_PATH) api", "feeder {}. Skipping\".format(feeder_hostname) ) continue sql = \"SELECT hub_id FROM", "superhub and feeders, run this script to setup fr_distributed_cleanup role", "to delete hosts locally from feeder schemas # change to", "incorrect, try again\") sys.exit(1) else: print( \"Check the status to", "again\") sys.exit(1) else: print( \"Check the status to ensure role", "in response[\"hubs\"]: if hub[\"role\"] != \"feeder\" or hub[\"target_state\"] != \"on\":", "to feeder schema to make deletions easier/more direct without having", "for table in lines[1].decode().split()] if not WORKDIR or not CFE_FR_TABLES:", "table = \"__promiselog\" clear_sql += ( \"DELETE FROM {} WHERE", "candidates. Response was {}\".format( response ) ) sys.exit(1) logger.debug(\"Hosts to", "fr_distributed_cleanup_password = \"\".join(random.choices(string.printable, k=20)) admin_pass = getpass( prompt=\"Enter admin password", "( SELECT hostkey, max(lastseentimestamp) as newesttimestamp FROM lastseenhosts WHERE lastseendirection", "api.put( \"user\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup Federated Host Cleanup user\",", "= response[\"rows\"] if len(hosts_to_delete) == 0: logger.info(\"%s: No hosts to", "Federated Host Cleanup role\", \"includeContext\": \"cfengine\", }, ) if response[\"status\"]", "special case of partitioning, operating on parent table will work", "processing\", feeder_hostname, ) continue # simulate the host api delete", "DO UPDATE SET deleted = excluded.deleted;\\n\" ) clear_sql = \"set", "201: print(\"Unable to set RBAC permissions on role fr_distributed_cleanup\") sys.exit(1)", "logger logger = logging.getLogger(\"fr_distributed_cleanup\") ch = logging.StreamHandler() if args.debug: logger.setLevel(logging.DEBUG)", "lastseenhosts WHERE lastseendirection = 'INCOMING' GROUP BY hostkey ) as", "and response[\"role\"] == \"superhub\" and response[\"configured\"] ): print( \"{} can", "clear_sql += ( \"DELETE FROM {} WHERE hub_id = {}", "run on Federated Reporting superhub after each import of feeder", "processing SQL. returncode was {}, stderr:\\n{}\\nstdout:\\n{}\".format( proc.returncode, errs.decode(\"utf-8\"), outs.decode(\"utf-8\") )", "def main(): if not os.geteuid() == 0: sys.exit(\"\\n{} must be", "after each import of feeder data. First, to setup, enable", "print( \"Problem creating fr_distributed_cleanup user on superhub. {}\".format( response )", "work if \"__promiselog_*\" in table: table = \"__promiselog\" clear_sql +=", "Please attach at least one feeder hub before running this", "on superhub...\") response = api.put( \"role\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup", "from feeder schemas # change to feeder schema to make", "feeder_hubid, \",\".join([\"'{}'\".format(hk) for hk in post_hostkeys]), ) ) post_sql +=", "query API returns rows which are lists of column values.", "print( \"{} can only be run on a Federated Reporting", "{} at {}: \".format( hub[\"ui_name\"], hub[\"api_url\"] ) ) feeder_hostname =", "feeder_api.status() if response[\"status\"] != 200: print( \"Unable to get status", "to reset everything. # api calls will overwrite fr_distributed_cleanup user", "role on superhub...\") response = api.put( \"role\", \"fr_distributed_cleanup\", { \"description\":", "superhub and feeders and cat all together into hubs.cert CERT_PATH", "feeder_hostname, response ) ) sys.exit(1) write_secret(DISTRIBUTED_CLEANUP_SECRET_PATH, fr_distributed_cleanup_password) def main(): if", "print( \"{} requires manual setup, please run as root interactively.\".format(", "files DISTRIBUTED_CLEANUP_DIR = \"/opt/cfengine/federation/cftransport/distributed_cleanup\" # collect cert files from /var/cfengine/httpd/ssl/certs", "write_secret(DISTRIBUTED_CLEANUP_SECRET_PATH, fr_distributed_cleanup_password) def main(): if not os.geteuid() == 0: sys.exit(\"\\n{}", "got %s status code\", host_to_delete, feeder_hostname, response[\"status\"], ) continue #", "files from /var/cfengine/httpd/ssl/certs on # superhub and feeders and cat", "if len(post_hostkeys) == 0: logger.info( \"No hosts on feeder %s", "hostkey, max(lastseentimestamp) as newesttimestamp FROM lastseenhosts WHERE lastseendirection = 'INCOMING'", "hk in post_hostkeys]), ) ) post_sql += delete_sql + clear_sql", "response ) ) sys.exit(1) write_secret(DISTRIBUTED_CLEANUP_SECRET_PATH, fr_distributed_cleanup_password) def main(): if not", "run on a Federated Reporting hub configured to be superhub\".format(", "a superhub configured for Federated Reporting.\") else: if status[\"status\"] ==", "\"{}\".format(email), \"password\": \"{}\".format(<PASSWORD>), \"roles\": [\"fr_distributed_cleanup\"], }, ) if response[\"status\"] !=", "feeder schemas # change to feeder schema to make deletions", "%s are %s\", hub[\"ui_name\"], response[\"rows\"]) hosts_to_delete = response[\"rows\"] if len(hosts_to_delete)", "to delete on feeder %s\", len(hosts_to_delete), hub[\"ui_name\"] ) # build", "shell=True, executable=\"/bin/bash\" ) as proc: lines = proc.stdout.readlines() WORKDIR =", "delete hosts locally from feeder schemas # change to feeder", "if len(hosts_to_delete) != 0: logger.info( \"%s: %s host deletions processed\",", "{}\".format( feeder_hostname, response ) ) sys.exit(1) write_secret(DISTRIBUTED_CLEANUP_SECRET_PATH, fr_distributed_cleanup_password) def main():", "os.geteuid() == 0: sys.exit(\"\\n{} must be run as root\".format(os.path.basename(__file__))) parser", "= [] for row in hosts_to_delete: # The query API", "if args.inform: logger.setLevel(logging.INFO) ch.setLevel(logging.INFO) logger.addHandler(ch) if not os.path.exists(DISTRIBUTED_CLEANUP_SECRET_PATH): if sys.stdout.isatty():", "only be run on a Federated Reporting hub configured to", "Reporting superhub after each import of feeder data. First, to", "python3 \"\"\" fr_distributed_cleanup.py - a script to remove hosts which", "logger.info( \"%s: %s host deletions processed\", hub[\"ui_name\"], len(hosts_to_delete), ) if", "feeder data. First, to setup, enable fr_distributed_cleanup by setting a", "\"superhub\" and response[\"configured\"] ): print( \"{} can only be run", "response[\"configured\"] ): print( \"{} can only be run on a", "response = feeder_api.put( \"role\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup Federated Host", "Found are acceptable responses if response[\"status\"] not in [202, 404]:", "= None # get WORKDIR and CFE_FR_TABLES from config.sh config_sh_path", "response = feeder_api.put( \"user\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup Federated Host", "[202, 404]: logger.warning( \"Delete %s on feeder %s got %s", "be run on a Federated Reporting hub configured to be", "Federated Reporting setup\" ) group = parser.add_mutually_exclusive_group() group.add_argument(\"--debug\", action=\"store_true\") group.add_argument(\"--inform\",", "and one column, [0], in rows value feeder_hubid = response[\"rows\"][0][0]", "feeder_hostname) continue logger.debug( \"%s host(s) to delete on feeder %s\",", ") for hub in response[\"hubs\"]: if hub[\"role\"] != \"feeder\" or", "parent table will work if \"__promiselog_*\" in table: table =", "script to setup fr_distributed_cleanup role and account on all feeders", "AND ls.hub_id != '{0}' )\"\"\".format( feeder_hubid ) response = api.query(sql)", "remove the file at DISTRIBUTED_CLEANUP_SECRET_PATH to reset everything. # api", "{} AND hostkey IN ({});\\n\".format( table, feeder_hubid, \",\".join([\"'{}'\".format(hk) for hk", "# only add the host_to_delete if it was successfully deleted", "response = feeder_api.delete(\"host\", host_to_delete) # both 202 Accepted and 404", "feeder schema to make deletions easier/more direct without having to", "in [202, 404]: logger.warning( \"Delete %s on feeder %s got", "import getpass from nova_api import NovaApi from cfsecret import read_secret,", "sys.exit(1) logger.info(\"Creating fr_distributed_cleanup user on superhub\") response = api.put( \"user\",", "WORKDIR or not CFE_FR_TABLES: print(\"Unable to get WORKDIR and CFE_FR_TABLES", "api_password=<PASSWORD>_cleanup_password ) # defaults to localhost response = api.fr_hub_status() if", "to setup, enable fr_distributed_cleanup by setting a class in augments", "parser.parse_args() global logger logger = logging.getLogger(\"fr_distributed_cleanup\") ch = logging.StreamHandler() if", "Note: remove the file at DISTRIBUTED_CLEANUP_SECRET_PATH to reset everything. #", "superhub...\") response = api.put( \"role\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup Federated", "main(): if not os.geteuid() == 0: sys.exit(\"\\n{} must be run", "= api.fr_hub_status() if not ( response[\"status\"] == 200 and response[\"role\"]", "superhub\") response = api.put( \"user\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup Federated", "== \"__main__\": main() else: raise ImportError(\"fr_distributed_cleanup.py must only be used", "response[\"status\"] != 201: print( \"Problem creating fr_distributed_cleanup user on {}.", "all feeders. DISTRIBUTED_CLEANUP_SECRET_PATH = os.path.join(WORKDIR, \"state/fr_distributed_cleanup.cfsecret\") def interactive_setup(): fr_distributed_cleanup_password =", "404]: logger.warning( \"Delete %s on feeder %s got %s status", "first value. host_to_delete = row[0] response = feeder_api.delete(\"host\", host_to_delete) #", "After the policy has run on superhub and feeders, run", "getpass( prompt=\"Enter admin password for superhub {}: \".format(platform.node()) ) api", "len(hosts_to_delete) != 0: logger.info( \"%s: %s host deletions processed\", hub[\"ui_name\"],", "sys.exit(1) else: print( \"Check the status to ensure role is", "\".format(hostkey) delete_sql += ( \"ON CONFLICT (hostkey,hub_id) DO UPDATE SET", "os import platform import string import random import subprocess import", "ls.hostkey = newest.hostkey AND ls.lastseentimestamp = newest.newesttimestamp AND ls.hostkey =", ") sys.exit(1) logger.debug(\"Hosts to delete on %s are %s\", hub[\"ui_name\"],", "response = api.query(sql) if response[\"status\"] != 200: print( \"Unable to", "the feeder post_hostkeys.append(host_to_delete) if len(post_hostkeys) == 0: logger.info( \"No hosts", "need processing on superhub so skipping post processing\", feeder_hostname, )", "each import of feeder data. First, to setup, enable fr_distributed_cleanup", "if not ( response[\"status\"] == 200 and response[\"role\"] == \"superhub\"", "%s\", feeder_hostname) response = feeder_api.put( \"role\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup", "201: print( \"Problem creating fr_distributed_cleanup role on superhub. {}\".format( response", "len(hosts_to_delete) == 0: logger.info(\"%s: No hosts to delete. No actions", "stdout=subprocess.PIPE, shell=True, executable=\"/bin/bash\" ) as proc: lines = proc.stdout.readlines() WORKDIR", "status[\"status\"] == 401: print(\"admin credentials are incorrect, try again\") sys.exit(1)", "setup, enable fr_distributed_cleanup by setting a class in augments (def.json).", "hosts.hostkey AND ls.hub_id != '{0}' )\"\"\".format( feeder_hubid ) response =", "WHERE hub_id = {} AND hostkey IN ({});\\n\".format( table, feeder_hubid,", "# and delete from all federated tables similar to the", "import random import subprocess import sys from getpass import getpass", "logger.info(\"Creating fr_distributed_cleanup role on superhub...\") response = api.put( \"role\", \"fr_distributed_cleanup\",", "cert files from /var/cfengine/httpd/ssl/certs on # superhub and feeders and", "<PASSWORD>(fr_<PASSWORD>_cleanup_password), \"roles\": [\"fr_distributed_cleanup\"], }, ) if response[\"status\"] != 201: print(", "def interactive_setup(): fr_distributed_cleanup_password = \"\".join(random.choices(string.printable, k=20)) admin_pass = getpass( prompt=\"Enter", "else: if status[\"status\"] == 401: print(\"admin credentials are incorrect, try", "clear_hosts_references() pgplsql function. post_sql += \"INSERT INTO __hosts (hostkey,deleted) VALUES\"", "(hostkey,deleted) VALUES\" for hostkey in post_hostkeys: delete_sql += \"('{}', CURRENT_TIMESTAMP)", "is True. {}\".format( status ) ) sys.exit(1) feederResponse = api.fr_remote_hubs()", ") ) sys.exit(1) for hub in feederResponse[\"hubs\"]: feeder_credentials = getpass(", "0: logger.info( \"No hosts on feeder %s need processing on", "= \"/opt/cfengine/federation/cftransport/distributed_cleanup\" # collect cert files from /var/cfengine/httpd/ssl/certs on #", "\"\".join(random.choices(string.printable, k=20)) admin_pass = getpass( prompt=\"Enter admin password for superhub", "\".format(platform.node()) ) api = NovaApi(api_user=\"admin\", api_password=<PASSWORD>) # first confirm that", "SQL. returncode was %s, stderr:\\n%s\\nstdout:\\n%s\", proc.returncode, errs.decode(\"utf-8\"), outs.decode(\"utf-8\"), ) if", "\"{}\".format(<PASSWORD>), \"roles\": [\"fr_distributed_cleanup\"], }, ) if response[\"status\"] != 201: print(", "hub[\"target_state\"] != \"on\": continue feeder_hostkey = hub[\"hostkey\"] feeder_hostname = hub[\"ui_name\"]", "cmd = \"source {}; echo $WORKDIR; echo $CFE_FR_TABLES\".format(config_sh_path) with subprocess.Popen(", "feeder post_hostkeys.append(host_to_delete) if len(post_hostkeys) == 0: logger.info( \"No hosts on", "of partitioning, operating on parent table will work if \"__promiselog_*\"", "JOIN ( SELECT hostkey, max(lastseentimestamp) as newesttimestamp FROM lastseenhosts WHERE", "continue # only add the host_to_delete if it was successfully", "successfully deleted on the feeder post_hostkeys.append(host_to_delete) if len(post_hostkeys) == 0:", "hubs.cert CERT_PATH = os.path.join(DISTRIBUTED_CLEANUP_DIR, \"hubs.cert\") # Note: remove the file", "print( \"Check the status to ensure role is superhub and", "if it was successfully deleted on the feeder post_hostkeys.append(host_to_delete) if", "superhub and configured is True. {}\".format( status ) ) sys.exit(1)", "sys.exit(1) response = api.fr_remote_hubs() if not response[\"hubs\"]: print( \"No attached", "row in hosts_to_delete: # The query API returns rows which", "print( \"Problem running post processing SQL. returncode was {}, stderr:\\n{}\\nstdout:\\n{}\".format(", "( status[\"status\"] == 200 and status[\"role\"] == \"superhub\" and status[\"configured\"]", "\"fr_distributed_cleanup\", [\"query.post\", \"remoteHub.list\", \"hubStatus.get\"] ) if response[\"status\"] != 201: print(\"Unable", "{}\".format(response)) continue # query API should return one row, [0],", "processing on superhub so skipping post processing\", feeder_hostname, ) continue", "newest ON ls.hostkey = newest.hostkey AND ls.lastseentimestamp = newest.newesttimestamp AND", "AND ls.hostkey = hosts.hostkey AND ls.hub_id != '{0}' )\"\"\".format( feeder_hubid", "CFE_FR_TABLES values from config.sh\") sys.exit(1) # Primary dir in which", "first confirm that this host is a superhub status =", "feederResponse[\"hubs\"]: feeder_credentials = getpass( prompt=\"Enter admin credentials for {} at", "API should return one row, [0], and one column, [0],", "specify hub_id in queries post_sql = \"set schema 'hub_{}';\\n\".format(feeder_hubid) post_sql", "sql = \"\"\" SELECT DISTINCT hosts.hostkey FROM hosts WHERE hub_id", "role is superhub and configured is True. {}\".format( status )", "case of partitioning, operating on parent table will work if", "args = parser.parse_args() global logger logger = logging.getLogger(\"fr_distributed_cleanup\") ch =", ") ) sys.exit(1) write_secret(DISTRIBUTED_CLEANUP_SECRET_PATH, fr_distributed_cleanup_password) def main(): if not os.geteuid()", "\"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup Federated Host Cleanup role\", \"includeContext\": \"cfengine\",", "fr_distributed_cleanup accounts: \") logger.info(\"Creating fr_distributed_cleanup role on superhub...\") response =", "operating on parent table will work if \"__promiselog_*\" in table:", "Cleanup user\", \"email\": \"{}\".format(email), \"password\": <PASSWORD>(fr_<PASSWORD>_cleanup_password), \"roles\": [\"fr_distributed_cleanup\"], }, )", "fr_distributed_cleanup_password = read_secret(DISTRIBUTED_CLEANUP_SECRET_PATH) api = NovaApi( api_user=\"fr_distributed_cleanup\", api_password=<PASSWORD>_cleanup_password ) #", "on the feeder post_hostkeys.append(host_to_delete) if len(post_hostkeys) == 0: logger.info( \"No", "feeder_hostname) response = feeder_api.put( \"user\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup Federated", "of feeder data. First, to setup, enable fr_distributed_cleanup by setting", "Reporting.\") else: if status[\"status\"] == 401: print(\"admin credentials are incorrect,", "to delete on %s are %s\", hub[\"ui_name\"], response[\"rows\"]) hosts_to_delete =", "from getpass import getpass from nova_api import NovaApi from cfsecret", "First, to setup, enable fr_distributed_cleanup by setting a class in", "to be superhub\".format( os.path.basename(__file__) ) ) sys.exit(1) response = api.fr_remote_hubs()", "acceptable responses if response[\"status\"] not in [202, 404]: logger.warning( \"Delete", "and status[\"configured\"] ): logger.debug(\"This host is a superhub configured for", "superhub. {}\".format( response ) ) sys.exit(1) response = feeder_api.put_role_permissions( \"fr_distributed_cleanup\",", "UPDATE SET deleted = excluded.deleted;\\n\" ) clear_sql = \"set schema", "post processing SQL. returncode was %s, stderr:\\n%s\\nstdout:\\n%s\", proc.returncode, errs.decode(\"utf-8\"), outs.decode(\"utf-8\"),", "status[\"configured\"] ): logger.debug(\"This host is a superhub configured for Federated", "if response[\"status\"] != 200: print( \"Unable to get status for", "attach at least one feeder hub before running this script.\"", "and then admin credentials on each feeder. \"\"\" import argparse", "print(\"Unable to query for feeder hub_id. Response was {}\".format(response)) continue", "SELECT 1 FROM lastseenhosts ls JOIN ( SELECT hostkey, max(lastseentimestamp)", "superhub. {}\".format( response ) ) sys.exit(1) response = api.put_role_permissions( \"fr_distributed_cleanup\",", "= api.fr_remote_hubs() if not response[\"hubs\"]: print( \"No attached feeders. Please", "and CFE_FR_TABLES values from config.sh\") sys.exit(1) # Primary dir in", "group.add_argument(\"--inform\", action=\"store_true\") args = parser.parse_args() global logger logger = logging.getLogger(\"fr_distributed_cleanup\")", "print(\"admin credentials are incorrect, try again\") sys.exit(1) else: print( \"Check", "%s status code\", host_to_delete, feeder_hostname, response[\"status\"], ) continue # only", "api.fr_remote_hubs() if not response[\"hubs\"]: print( \"No attached feeders. Please attach", "for feeder {}. Skipping\".format(feeder_hostname) ) continue sql = \"SELECT hub_id", "reset everything. # api calls will overwrite fr_distributed_cleanup user and", "input(\"Enter email for fr_distributed_cleanup accounts: \") logger.info(\"Creating fr_distributed_cleanup role on", "SQL statement to delete hosts locally from feeder schemas #", "attached feeders. Please attach at least one feeder hub before", "host_to_delete = row[0] response = feeder_api.delete(\"host\", host_to_delete) # both 202", "response[\"role\"] == \"superhub\" and response[\"configured\"] ): print( \"{} can only", "migrating clients in Federated Reporting setup\" ) group = parser.add_mutually_exclusive_group()", "various needed files DISTRIBUTED_CLEANUP_DIR = \"/opt/cfengine/federation/cftransport/distributed_cleanup\" # collect cert files", "to setup fr_distributed_cleanup role and account on all feeders and", "AND EXISTS( SELECT 1 FROM lastseenhosts ls JOIN ( SELECT", "'{0}' AND EXISTS( SELECT 1 FROM lastseenhosts ls JOIN (", "getpass( prompt=\"Enter admin credentials for {} at {}: \".format( hub[\"ui_name\"],", "Federated Reporting.\") else: if status[\"status\"] == 401: print(\"admin credentials are", "data. First, to setup, enable fr_distributed_cleanup by setting a class", "= newest.newesttimestamp AND ls.hostkey = hosts.hostkey AND ls.hub_id != '{0}'", "response = api.query(sql) if response[\"status\"] != 200: print(\"Unable to query", ") if response[\"status\"] != 201: print( \"Problem creating fr_distributed_cleanup role", "print( \"No attached feeders. Please attach at least one feeder", ") # defaults to localhost response = api.fr_hub_status() if not", "query for feeder hub_id. Response was {}\".format(response)) continue # query", "before running this script.\" ) sys.exit(1) email = input(\"Enter email", ") if response[\"status\"] != 201: print(\"Unable to set RBAC permissions", "Reporting hub configured to be superhub\".format( os.path.basename(__file__) ) ) sys.exit(1)", "= api.query(sql) if response[\"status\"] != 200: print(\"Unable to query for", "lists of column values. # We only selected hostkey so", "if args.debug: logger.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) if args.inform: logger.setLevel(logging.INFO) ch.setLevel(logging.INFO) logger.addHandler(ch) if", "post_hostkeys: delete_sql += \"('{}', CURRENT_TIMESTAMP) \".format(hostkey) delete_sql += ( \"ON", "migrated to other feeder hubs. To be run on Federated", "\"%s host(s) to delete on feeder %s\", len(hosts_to_delete), hub[\"ui_name\"] )", "lines[1].decode().split()] if not WORKDIR or not CFE_FR_TABLES: print(\"Unable to get", "# collect cert files from /var/cfengine/httpd/ssl/certs on # superhub and", "simulate the host api delete process by setting current_timestamp in", "len(hosts_to_delete), hub[\"ui_name\"] ) # build up a post-loop SQL statement", "if response[\"status\"] != 200: print( \"Unable to query for deletion", "function. post_sql += \"INSERT INTO __hosts (hostkey,deleted) VALUES\" for hostkey", ") sys.exit(1) fr_distributed_cleanup_password = read_secret(DISTRIBUTED_CLEANUP_SECRET_PATH) api = NovaApi( api_user=\"fr_distributed_cleanup\", api_password=<PASSWORD>_cleanup_password", "logger.info(\"Creating fr_distributed_cleanup user on %s\", feeder_hostname) response = feeder_api.put( \"user\",", "= feeder_api.delete(\"host\", host_to_delete) # both 202 Accepted and 404 Not", "feeder_hostname, response[\"status\"], ) continue # only add the host_to_delete if", ") ) sys.exit(1) response = api.fr_remote_hubs() if not response[\"hubs\"]: print(", "\"Check the status to ensure role is superhub and configured", "hub in response[\"hubs\"]: if hub[\"role\"] != \"feeder\" or hub[\"target_state\"] !=", "(def.json). This enables policy in cfe_internal/enterprise/federation/federation.cf ```json { \"classes\": {", "feeders and cat all together into hubs.cert CERT_PATH = os.path.join(DISTRIBUTED_CLEANUP_DIR,", "Federated Reporting hub configured to be superhub\".format( os.path.basename(__file__) ) )", "if not WORKDIR or not CFE_FR_TABLES: print(\"Unable to get WORKDIR", "string import random import subprocess import sys from getpass import", "proc, sending sql...\") outs, errs = proc.communicate(input=post_sql.encode()) if \"ERROR\" in", "with proper RBAC settings for normal operation. You will be", "will overwrite fr_distributed_cleanup user and role on superhub and all", "logger.addHandler(ch) if not os.path.exists(DISTRIBUTED_CLEANUP_SECRET_PATH): if sys.stdout.isatty(): interactive_setup() else: print( \"{}", "this script.\" ) for hub in response[\"hubs\"]: if hub[\"role\"] !=", "group = parser.add_mutually_exclusive_group() group.add_argument(\"--debug\", action=\"store_true\") group.add_argument(\"--inform\", action=\"store_true\") args = parser.parse_args()", "{}\".format( response ) ) sys.exit(1) response = feeder_api.put_role_permissions( \"fr_distributed_cleanup\", [\"host.delete\"]", "= parser.parse_args() global logger logger = logging.getLogger(\"fr_distributed_cleanup\") ch = logging.StreamHandler()", "hub before running this script.\" ) for hub in response[\"hubs\"]:", "\"password\": <PASSWORD>(fr_<PASSWORD>_cleanup_password), \"roles\": [\"fr_distributed_cleanup\"], }, ) if response[\"status\"] != 201:", "as root interactively.\".format( os.path.basename(__file__) ) ) sys.exit(1) fr_distributed_cleanup_password = read_secret(DISTRIBUTED_CLEANUP_SECRET_PATH)", "hostkey IN ({});\\n\".format( table, feeder_hubid, \",\".join([\"'{}'\".format(hk) for hk in post_hostkeys]),", "continue feeder_hostkey = hub[\"hostkey\"] feeder_hostname = hub[\"ui_name\"] feeder_api = NovaApi(", "logger.debug(\"got a proc, sending sql...\") outs, errs = proc.communicate(input=post_sql.encode()) if", "feeder_api.delete(\"host\", host_to_delete) # both 202 Accepted and 404 Not Found", "has run on superhub and feeders, run this script to", "in post_hostkeys: delete_sql += \"('{}', CURRENT_TIMESTAMP) \".format(hostkey) delete_sql += (", "delete_sql += \"('{}', CURRENT_TIMESTAMP) \".format(hostkey) delete_sql += ( \"ON CONFLICT", "k=20)) admin_pass = getpass( prompt=\"Enter admin password for superhub {}:", "= os.path.join(os.path.dirname(__file__), \"config.sh\") cmd = \"source {}; echo $WORKDIR; echo", "+= \"INSERT INTO __hosts (hostkey,deleted) VALUES\" for hostkey in post_hostkeys:", "[table.strip() for table in lines[1].decode().split()] if not WORKDIR or not", "\"\\\\set ON_ERROR STOP on\\n\" delete_sql = \"\" post_hostkeys = []", "Host Cleanup role\", \"includeContext\": \"cfengine\", }, ) if response[\"status\"] !=", "\"Problem running post processing SQL. returncode was {}, stderr:\\n{}\\nstdout:\\n{}\".format( proc.returncode,", "= newest.hostkey AND ls.lastseentimestamp = newest.newesttimestamp AND ls.hostkey = hosts.hostkey", "= excluded.deleted;\\n\" ) clear_sql = \"set schema 'public';\\n\" for table", "status = api.fr_hub_status() if ( status[\"status\"] == 200 and status[\"role\"]", "\"description\": \"fr_distributed_cleanup Federated Host Cleanup user\", \"email\": \"{}\".format(email), \"password\": \"{}\".format(<PASSWORD>),", "ch.setLevel(logging.DEBUG) if args.inform: logger.setLevel(logging.INFO) ch.setLevel(logging.INFO) logger.addHandler(ch) if not os.path.exists(DISTRIBUTED_CLEANUP_SECRET_PATH): if", "== 0: logger.info(\"%s: No hosts to delete. No actions taken.\",", "else: print( \"Check the status to ensure role is superhub", "which to place various needed files DISTRIBUTED_CLEANUP_DIR = \"/opt/cfengine/federation/cftransport/distributed_cleanup\" #", "ls JOIN ( SELECT hostkey, max(lastseentimestamp) as newesttimestamp FROM lastseenhosts", "import platform import string import random import subprocess import sys", "0: sys.exit(\"\\n{} must be run as root\".format(os.path.basename(__file__))) parser = argparse.ArgumentParser(", "SELECT hostkey, max(lastseentimestamp) as newesttimestamp FROM lastseenhosts WHERE lastseendirection =", "SELECT DISTINCT hosts.hostkey FROM hosts WHERE hub_id = '{0}' AND", "for superhub admin credentials and then admin credentials on each", "from cfsecret import read_secret, write_secret WORKDIR = None CFE_FR_TABLES =", "prompt=\"Enter admin credentials for {} at {}: \".format( hub[\"ui_name\"], hub[\"api_url\"]", "= response[\"rows\"][0][0] sql = \"\"\" SELECT DISTINCT hosts.hostkey FROM hosts", "try again\") sys.exit(1) else: print( \"Check the status to ensure", "was %s, stderr:\\n%s\\nstdout:\\n%s\", proc.returncode, errs.decode(\"utf-8\"), outs.decode(\"utf-8\"), ) if len(hosts_to_delete) !=", "schema 'public';\\n\" for table in CFE_FR_TABLES: # special case of", "or hub[\"target_state\"] != \"on\": continue feeder_hostkey = hub[\"hostkey\"] feeder_hostname =", "superhub admin credentials and then admin credentials on each feeder.", "Federated Host Cleanup user\", \"email\": \"{}\".format(email), \"password\": \"{}\".format(<PASSWORD>), \"roles\": [\"fr_distributed_cleanup\"],", "post-loop SQL statement to delete hosts locally from feeder schemas", "the status to ensure role is superhub and configured is", "= api.put( \"role\", \"fr_distributed_cleanup\", { \"description\": \"fr_distributed_cleanup Federated Host Cleanup", "newesttimestamp FROM lastseenhosts WHERE lastseendirection = 'INCOMING' GROUP BY hostkey", "column # and delete from all federated tables similar to", ") logger.info(\"Creating fr_distributed_cleanup role on %s\", feeder_hostname) response = feeder_api.put(", "{}. Skipping\".format(feeder_hostname) ) continue sql = \"SELECT hub_id FROM __hubs", "AND ls.lastseentimestamp = newest.newesttimestamp AND ls.hostkey = hosts.hostkey AND ls.hub_id", "in lines[1].decode().split()] if not WORKDIR or not CFE_FR_TABLES: print(\"Unable to", ") sys.exit(1) response = feeder_api.put_role_permissions( \"fr_distributed_cleanup\", [\"host.delete\"] ) if response[\"status\"]", "for hub in response[\"hubs\"]: if hub[\"role\"] != \"feeder\" or hub[\"target_state\"]", "as newest ON ls.hostkey = newest.hostkey AND ls.lastseentimestamp = newest.newesttimestamp", "with subprocess.Popen( cmd, stdout=subprocess.PIPE, shell=True, executable=\"/bin/bash\" ) as proc: lines", "in which to place various needed files DISTRIBUTED_CLEANUP_DIR = \"/opt/cfengine/federation/cftransport/distributed_cleanup\"", "change to feeder schema to make deletions easier/more direct without", "add the host_to_delete if it was successfully deleted on the", "script.\" ) for hub in response[\"hubs\"]: if hub[\"role\"] != \"feeder\"", "print(\"Unable to set RBAC permissions on role fr_distributed_cleanup\") sys.exit(1) logger.info(\"Creating", "superhub status = api.fr_hub_status() if ( status[\"status\"] == 200 and", "api_password=<PASSWORD>, cert_path=CERT_PATH, hostname=feeder_hostname, ) logger.info(\"Creating fr_distributed_cleanup role on %s\", feeder_hostname)", "WHERE hostkey = '{}'\".format(feeder_hostkey) response = api.query(sql) if response[\"status\"] !=", "( \"DELETE FROM {} WHERE hub_id = {} AND hostkey", "== 0: sys.exit(\"\\n{} must be run as root\".format(os.path.basename(__file__))) parser =", "in deleted column # and delete from all federated tables", "on parent table will work if \"__promiselog_*\" in table: table", "subprocess import sys from getpass import getpass from nova_api import", "\"Delete %s on feeder %s got %s status code\", host_to_delete,", "import os import platform import string import random import subprocess", "newest.hostkey AND ls.lastseentimestamp = newest.newesttimestamp AND ls.hostkey = hosts.hostkey AND", ") sys.exit(1) for hub in feederResponse[\"hubs\"]: feeder_credentials = getpass( prompt=\"Enter", "from nova_api import NovaApi from cfsecret import read_secret, write_secret WORKDIR", "= \"set schema 'public';\\n\" for table in CFE_FR_TABLES: # special", "\"No attached feeders. Please attach at least one feeder hub", "\"classes\": { \"cfengine_mp_enable_fr_distributed_cleanup\": [ \"any::\" ] } } ``` After", "\"hubStatus.get\"] ) if response[\"status\"] != 201: print(\"Unable to set RBAC", "== 0: logger.info( \"No hosts on feeder %s need processing", "configured to be superhub\".format( os.path.basename(__file__) ) ) sys.exit(1) response =", "if len(hosts_to_delete) == 0: logger.info(\"%s: No hosts to delete. No", "processed\", hub[\"ui_name\"], len(hosts_to_delete), ) if __name__ == \"__main__\": main() else:", "ch = logging.StreamHandler() if args.debug: logger.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) if args.inform: logger.setLevel(logging.INFO)", "user\", \"email\": \"{}\".format(email), \"password\": \"{}\".format(<PASSWORD>), \"roles\": [\"fr_distributed_cleanup\"], }, ) if", "\"includeContext\": \"cfengine\", }, ) if response[\"status\"] != 201: print( \"Problem", "should return one row, [0], and one column, [0], in", "process by setting current_timestamp in deleted column # and delete", "# We only selected hostkey so will take the first", "if status[\"status\"] == 401: print(\"admin credentials are incorrect, try again\")", "logger.info( \"No hosts on feeder %s need processing on superhub", "({});\\n\".format( table, feeder_hubid, \",\".join([\"'{}'\".format(hk) for hk in post_hostkeys]), ) )", "calls will overwrite fr_distributed_cleanup user and role on superhub and", "= row[0] response = feeder_api.delete(\"host\", host_to_delete) # both 202 Accepted", "\"{}\".format(email), \"password\": <PASSWORD>(fr_<PASSWORD>_cleanup_password), \"roles\": [\"fr_distributed_cleanup\"], }, ) if response[\"status\"] !=", "Host Cleanup user\", \"email\": \"{}\".format(email), \"password\": <PASSWORD>(fr_<PASSWORD>_cleanup_password), \"roles\": [\"fr_distributed_cleanup\"], },", "if sys.stdout.isatty(): interactive_setup() else: print( \"{} requires manual setup, please", "logger.setLevel(logging.INFO) ch.setLevel(logging.INFO) logger.addHandler(ch) if not os.path.exists(DISTRIBUTED_CLEANUP_SECRET_PATH): if sys.stdout.isatty(): interactive_setup() else:", "GROUP BY hostkey ) as newest ON ls.hostkey = newest.hostkey", "config_sh_path = os.path.join(os.path.dirname(__file__), \"config.sh\") cmd = \"source {}; echo $WORKDIR;", "/var/cfengine/httpd/ssl/certs on # superhub and feeders and cat all together", "not os.geteuid() == 0: sys.exit(\"\\n{} must be run as root\".format(os.path.basename(__file__)))", "post_sql += \"\\\\set ON_ERROR STOP on\\n\" delete_sql = \"\" post_hostkeys", "host_to_delete) # both 202 Accepted and 404 Not Found are", "is superhub and configured is True. {}\".format( status ) )", "\"{} can only be run on a Federated Reporting hub", "feeders. Please attach at least one feeder hub before running", "config.sh\") sys.exit(1) # Primary dir in which to place various", "api.query(sql) if response[\"status\"] != 200: print( \"Unable to query for", "%s host deletions processed\", hub[\"ui_name\"], len(hosts_to_delete), ) if __name__ ==", "hostkey in post_hostkeys: delete_sql += \"('{}', CURRENT_TIMESTAMP) \".format(hostkey) delete_sql +=", "= \"set schema 'hub_{}';\\n\".format(feeder_hubid) post_sql += \"\\\\set ON_ERROR STOP on\\n\"", "DISTRIBUTED_CLEANUP_SECRET_PATH = os.path.join(WORKDIR, \"state/fr_distributed_cleanup.cfsecret\") def interactive_setup(): fr_distributed_cleanup_password = \"\".join(random.choices(string.printable, k=20))", "post_hostkeys]), ) ) post_sql += delete_sql + clear_sql logger.debug(\"Running SQL:\\n%s\",", "response[\"status\"] != 200: print( \"Unable to query for deletion candidates.", ") sys.exit(1) logger.debug( \"Ran post processing SQL. returncode was %s,", "proc.stdout.readlines() WORKDIR = lines[0].decode().strip() CFE_FR_TABLES = [table.strip() for table in", "of column values. # We only selected hostkey so will", "hosts.hostkey FROM hosts WHERE hub_id = '{0}' AND EXISTS( SELECT", "logger.info(\"Creating fr_distributed_cleanup user on superhub\") response = api.put( \"user\", \"fr_distributed_cleanup\",", "\"ON CONFLICT (hostkey,hub_id) DO UPDATE SET deleted = excluded.deleted;\\n\" )", "sys.exit(1) for hub in feederResponse[\"hubs\"]: feeder_credentials = getpass( prompt=\"Enter admin", "else: raise ImportError(\"fr_distributed_cleanup.py must only be used as a script!\")", "superhub so skipping post processing\", feeder_hostname, ) continue # simulate" ]
[ "#Doesn't work. import time fibonacci = [1, 1] n =", "n = int(input()) while len(fibonacci) < n: fibonacci.append(fibonacci[-1] + fibonacci[-2])", "len(fibonacci) < n: fibonacci.append(fibonacci[-1] + fibonacci[-2]) for i in range(n):", "import time fibonacci = [1, 1] n = int(input()) while", "< n: fibonacci.append(fibonacci[-1] + fibonacci[-2]) for i in range(n): print(fibonacci[i],", "work. import time fibonacci = [1, 1] n = int(input())", "int(input()) while len(fibonacci) < n: fibonacci.append(fibonacci[-1] + fibonacci[-2]) for i", "n: fibonacci.append(fibonacci[-1] + fibonacci[-2]) for i in range(n): print(fibonacci[i], end='", "time fibonacci = [1, 1] n = int(input()) while len(fibonacci)", "= [1, 1] n = int(input()) while len(fibonacci) < n:", "= int(input()) while len(fibonacci) < n: fibonacci.append(fibonacci[-1] + fibonacci[-2]) for", "fibonacci = [1, 1] n = int(input()) while len(fibonacci) <", "[1, 1] n = int(input()) while len(fibonacci) < n: fibonacci.append(fibonacci[-1]", "fibonacci.append(fibonacci[-1] + fibonacci[-2]) for i in range(n): print(fibonacci[i], end=' ')", "while len(fibonacci) < n: fibonacci.append(fibonacci[-1] + fibonacci[-2]) for i in", "1] n = int(input()) while len(fibonacci) < n: fibonacci.append(fibonacci[-1] +" ]
[]
[ "hasattr(self, \"_file\"): self._openfile() self._file.write(str(msg) + \"\\n\") if not self._buffering: self._file.flush()", "implement this API: (maybe put it into slogger.py?) log =", "file-like\" % (consumer,)) consumer = File(consumer) self.keywords2consumer[keywords] = consumer def", "a message to the log \"\"\" py.std.syslog.syslog(self.priority, str(msg)) for _prio", "scheme. XXX implement this API: (maybe put it into slogger.py?)", "for _prio in \"EMERG ALERT CRIT ERR WARNING NOTICE INFO", "from the back, the list of keywords, the first consumer", "default_keywordmapper self._keywordmapper = keywordmapper def __repr__(self): return \"<py.log.Producer %s>\" %", "to stdout, stderr, files, etc. Used extensively by PyPy-1.1. \"\"\"", "\"\"\" func = self._keywordmapper.getconsumer(self._keywords) if func is not None: func(self.Message(self._keywords,", "+ \"\\n\") if hasattr(self._file, 'flush'): self._file.flush() class Path(object): \"\"\" log", "setstate(state): default_keywordmapper.setstate(state) def getstate(): return default_keywordmapper.getstate() # # Consumers #", "self._append and 'a' or 'w' f = open(self._filename, mode) self._file", "def __init__(self, keywords, keywordmapper=None, **kw): if hasattr(keywords, 'split'): keywords =", "message to the appropriate consumer(s) \"\"\" func = self._keywordmapper.getconsumer(self._keywords) if", "consumer is not None and not py.builtin.callable(consumer): if not hasattr(consumer,", "'write'): raise TypeError( \"%r should be None, callable or file-like\"", "be None, callable or file-like\" % (consumer,)) consumer = File(consumer)", "self.keywords2consumer.clear() self.keywords2consumer.update(state) def getconsumer(self, keywords): \"\"\" return a consumer matching", "hasattr(keywords, 'split'): keywords = tuple(keywords.split()) self._keywords = keywords if keywordmapper", "setconsumer(keywords, consumer): default_keywordmapper.setconsumer(keywords, consumer) def setstate(state): default_keywordmapper.setstate(state) def getstate(): return", "consumer): default_keywordmapper.setconsumer(keywords, consumer) def setstate(state): default_keywordmapper.setstate(state) def getstate(): return default_keywordmapper.getstate()", "message to the log \"\"\" self._file.write(str(msg) + \"\\n\") if hasattr(self._file,", "that writes to the syslog daemon \"\"\" def __init__(self, priority", "a string or tuple\" % (keywords,)) if consumer is not", "set of keywords. \"\"\" # normalize to tuples if isinstance(keywords,", "is None: priority = self.LOG_INFO self.priority = priority def __call__(self,", "(maybe put it into slogger.py?) log = Logger( info=py.log.STDOUT, debug=py.log.STDOUT,", "it into slogger.py?) log = Logger( info=py.log.STDOUT, debug=py.log.STDOUT, command=None) log.info(\"hello\",", "*args): \"\"\" write a message to the appropriate consumer(s) \"\"\"", "isinstance(keywords, str): keywords = tuple(filter(None, keywords.split())) elif hasattr(keywords, '_keywords'): keywords", "to stdout (using 'print') \"\"\" sys.stderr.write(str(msg)+\"\\n\") default_keywordmapper = KeywordMapper() def", "msg): \"\"\" write a message to the log \"\"\" py.std.syslog.syslog(self.priority,", "by walking, starting from the back, the list of keywords,", "or tuple\" % (keywords,)) if consumer is not None and", "\"<py.log.Producer %s>\" % \":\".join(self._keywords) def __getattr__(self, name): if '_' in", "WARNING NOTICE INFO DEBUG\".split(): _prio = \"LOG_\" + _prio try:", "_prio = \"LOG_\" + _prio try: setattr(Syslog, _prio, getattr(py.std.syslog, _prio))", "\"EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG\".split(): _prio =", "{} def getstate(self): return self.keywords2consumer.copy() def setstate(self, state): self.keywords2consumer.clear() self.keywords2consumer.update(state)", "ALERT CRIT ERR WARNING NOTICE INFO DEBUG\".split(): _prio = \"LOG_\"", "% \":\".join(self._keywords) def __getattr__(self, name): if '_' in name: raise", "'consumer' object, which then prints them to stdout, stderr, files,", "if consumer is not None and not py.builtin.callable(consumer): if not", "self._file = f def __call__(self, msg): \"\"\" write a message", "logging functionality based on a producer/consumer scheme. XXX implement this", "of keywords, the first consumer matching a keyword is returned", "matching a keyword is returned (falling back to py.log.default) \"\"\"", "def __init__(self): self.keywords2consumer = {} def getstate(self): return self.keywords2consumer.copy() def", "= Message # to allow later customization keywords2consumer = {}", "\"\"\" # normalize to tuples if isinstance(keywords, str): keywords =", "def STDOUT(msg): \"\"\" consumer that writes to sys.stdout \"\"\" sys.stdout.write(str(msg)+\"\\n\")", "\"\"\" if not hasattr(self, \"_file\"): self._openfile() self._file.write(str(msg) + \"\\n\") if", "the list of keywords, the first consumer matching a keyword", "tries to find the most suitable consumer by walking, starting", "or not hasattr(f, 'open') self._file = f def __call__(self, msg):", "__call__(self, msg): \"\"\" write a message to the log \"\"\"", "tuple(keywords.split()) self._keywords = keywords if keywordmapper is None: keywordmapper =", "self.keywords2consumer.update(state) def getconsumer(self, keywords): \"\"\" return a consumer matching the", "buffering=False): self._append = append self._filename = str(filename) self._buffering = buffering", "default consumer, prints the message to stdout (using 'print') \"\"\"", "\"\"\" set a consumer for a set of keywords. \"\"\"", "default_keywordmapper.getstate() # # Consumers # class File(object): \"\"\" log consumer", "put it into slogger.py?) log = Logger( info=py.log.STDOUT, debug=py.log.STDOUT, command=None)", "to the syslog daemon \"\"\" def __init__(self, priority = None):", "# # Consumers # class File(object): \"\"\" log consumer wrapping", "file(-like) object \"\"\" def __init__(self, f): assert hasattr(f, 'write') #assert", "f = open(self._filename, mode) self._file = f def __call__(self, msg):", "def setstate(state): default_keywordmapper.setstate(state) def getstate(): return default_keywordmapper.getstate() # # Consumers", "= keywordmapper def __repr__(self): return \"<py.log.Producer %s>\" % \":\".join(self._keywords) def", "keywords._keywords elif not isinstance(keywords, tuple): raise TypeError(\"key %r is not", "command=None) \"\"\" import py, sys class Message(object): def __init__(self, keywords,", "priority is None: priority = self.LOG_INFO self.priority = priority def", "self.args)) def prefix(self): return \"[%s] \" % (\":\".join(self.keywords)) def __str__(self):", "keywords, keywordmapper=None, **kw): if hasattr(keywords, 'split'): keywords = tuple(keywords.split()) self._keywords", "consumer def default_consumer(msg): \"\"\" the default consumer, prints the message", "\"[%s] \" % (\":\".join(self.keywords)) def __str__(self): return self.prefix() + self.content()", "class Syslog: \"\"\" consumer that writes to the syslog daemon", "producer API which sends messages to be logged to a", "return self.keywords2consumer.copy() def setstate(self, state): self.keywords2consumer.clear() self.keywords2consumer.update(state) def getconsumer(self, keywords):", "def __init__(self, keywords, args): self.keywords = keywords self.args = args", "\"\"\" write a message to the log \"\"\" self._file.write(str(msg) +", "= self.LOG_INFO self.priority = priority def __call__(self, msg): \"\"\" write", "a keyword is returned (falling back to py.log.default) \"\"\" for", "debug=py.log.STDOUT, command=None) log.info(\"hello\", \"world\") log.command(\"hello\", \"world\") log = Logger(info=Logger(something=...), debug=py.log.STDOUT,", "consumer = File(consumer) self.keywords2consumer[keywords] = consumer def default_consumer(msg): \"\"\" the", "is not None: func(self.Message(self._keywords, args)) class KeywordMapper: def __init__(self): self.keywords2consumer", "= self._append and 'a' or 'w' f = open(self._filename, mode)", "consumer that opens and writes to a Path \"\"\" def", "not hasattr(self, \"_file\"): self._openfile() self._file.write(str(msg) + \"\\n\") if not self._buffering:", "= priority def __call__(self, msg): \"\"\" write a message to", "keywords, consumer): \"\"\" set a consumer for a set of", "name: raise AttributeError(name) producer = self.__class__(self._keywords + (name,)) setattr(self, name,", "'a' or 'w' f = open(self._filename, mode) self._file = f", "log = Logger(info=Logger(something=...), debug=py.log.STDOUT, command=None) \"\"\" import py, sys class", "py, sys class Message(object): def __init__(self, keywords, args): self.keywords =", "messages to be logged to a 'consumer' object, which then", "<filename>py/_log/log.py \"\"\" basic logging functionality based on a producer/consumer scheme.", "callable or file-like\" % (consumer,)) consumer = File(consumer) self.keywords2consumer[keywords] =", "to be logged to a 'consumer' object, which then prints", "\"\"\" the default consumer, prints the message to stdout (using", "except KeyError: continue return self.keywords2consumer.get('default', default_consumer) def setconsumer(self, keywords, consumer):", "'print') \"\"\" sys.stderr.write(str(msg)+\"\\n\") default_keywordmapper = KeywordMapper() def setconsumer(keywords, consumer): default_keywordmapper.setconsumer(keywords,", "KeywordMapper: def __init__(self): self.keywords2consumer = {} def getstate(self): return self.keywords2consumer.copy()", "def STDERR(msg): \"\"\" consumer that writes to sys.stderr \"\"\" sys.stderr.write(str(msg)+\"\\n\")", "not delayed_create: self._openfile() def _openfile(self): mode = self._append and 'a'", "priority = self.LOG_INFO self.priority = priority def __call__(self, msg): \"\"\"", "self._openfile() self._file.write(str(msg) + \"\\n\") if not self._buffering: self._file.flush() def STDOUT(msg):", "default_consumer(msg): \"\"\" the default consumer, prints the message to stdout", "back, the list of keywords, the first consumer matching a", "consumer): \"\"\" set a consumer for a set of keywords.", "= keywords if keywordmapper is None: keywordmapper = default_keywordmapper self._keywordmapper", "later customization keywords2consumer = {} def __init__(self, keywords, keywordmapper=None, **kw):", "them to stdout, stderr, files, etc. Used extensively by PyPy-1.1.", "log consumer that opens and writes to a Path \"\"\"", "\" \".join(map(str, self.args)) def prefix(self): return \"[%s] \" % (\":\".join(self.keywords))", "consumer, prints the message to stdout (using 'print') \"\"\" sys.stderr.write(str(msg)+\"\\n\")", "'_' in name: raise AttributeError(name) producer = self.__class__(self._keywords + (name,))", "self._buffering = buffering if not delayed_create: self._openfile() def _openfile(self): mode", "\"\"\" def __init__(self, filename, append=False, delayed_create=False, buffering=False): self._append = append", "setstate(self, state): self.keywords2consumer.clear() self.keywords2consumer.update(state) def getconsumer(self, keywords): \"\"\" return a", "not a string or tuple\" % (keywords,)) if consumer is", "def getstate(self): return self.keywords2consumer.copy() def setstate(self, state): self.keywords2consumer.clear() self.keywords2consumer.update(state) def", "the most suitable consumer by walking, starting from the back,", "priority def __call__(self, msg): \"\"\" write a message to the", "elif hasattr(keywords, '_keywords'): keywords = keywords._keywords elif not isinstance(keywords, tuple):", "keyword is returned (falling back to py.log.default) \"\"\" for i", "keywords, the first consumer matching a keyword is returned (falling", "default_keywordmapper.setconsumer(keywords, consumer) def setstate(state): default_keywordmapper.setstate(state) def getstate(): return default_keywordmapper.getstate() #", "tuple): raise TypeError(\"key %r is not a string or tuple\"", "prints them to stdout, stderr, files, etc. Used extensively by", "__init__(self, keywords, args): self.keywords = keywords self.args = args def", "__init__(self, f): assert hasattr(f, 'write') #assert isinstance(f, file) or not", "def __call__(self, msg): \"\"\" write a message to the log", "the syslog daemon \"\"\" def __init__(self, priority = None): if", "import py, sys class Message(object): def __init__(self, keywords, args): self.keywords", "keywords2consumer = {} def __init__(self, keywords, keywordmapper=None, **kw): if hasattr(keywords,", "self.LOG_INFO self.priority = priority def __call__(self, msg): \"\"\" write a", "to sys.stderr \"\"\" sys.stderr.write(str(msg)+\"\\n\") class Syslog: \"\"\" consumer that writes", "log consumer wrapping a file(-like) object \"\"\" def __init__(self, f):", "func = self._keywordmapper.getconsumer(self._keywords) if func is not None: func(self.Message(self._keywords, args))", "message to the log \"\"\" py.std.syslog.syslog(self.priority, str(msg)) for _prio in", "consumer matching the given keywords. tries to find the most", "None, callable or file-like\" % (consumer,)) consumer = File(consumer) self.keywords2consumer[keywords]", "consumer(s) \"\"\" func = self._keywordmapper.getconsumer(self._keywords) if func is not None:", "__getattr__(self, name): if '_' in name: raise AttributeError(name) producer =", "to tuples if isinstance(keywords, str): keywords = tuple(filter(None, keywords.split())) elif", "= Logger( info=py.log.STDOUT, debug=py.log.STDOUT, command=None) log.info(\"hello\", \"world\") log.command(\"hello\", \"world\") log", "\" % (\":\".join(self.keywords)) def __str__(self): return self.prefix() + self.content() class", "self.keywords = keywords self.args = args def content(self): return \"", "extensively by PyPy-1.1. \"\"\" Message = Message # to allow", "%s>\" % \":\".join(self._keywords) def __getattr__(self, name): if '_' in name:", "\"\"\" write a message to the appropriate consumer(s) \"\"\" func", "getconsumer(self, keywords): \"\"\" return a consumer matching the given keywords.", "to py.log.default) \"\"\" for i in range(len(keywords), 0, -1): try:", "\"\"\" def __init__(self, f): assert hasattr(f, 'write') #assert isinstance(f, file)", "if not hasattr(consumer, 'write'): raise TypeError( \"%r should be None,", "if hasattr(keywords, 'split'): keywords = tuple(keywords.split()) self._keywords = keywords if", "mode = self._append and 'a' or 'w' f = open(self._filename,", "= None): if priority is None: priority = self.LOG_INFO self.priority", "file) or not hasattr(f, 'open') self._file = f def __call__(self,", "continue return self.keywords2consumer.get('default', default_consumer) def setconsumer(self, keywords, consumer): \"\"\" set", "this API: (maybe put it into slogger.py?) log = Logger(", "self._file.write(str(msg) + \"\\n\") if hasattr(self._file, 'flush'): self._file.flush() class Path(object): \"\"\"", "is returned (falling back to py.log.default) \"\"\" for i in", "self.content() class Producer(object): \"\"\" (deprecated) Log producer API which sends", "py.log.default) \"\"\" for i in range(len(keywords), 0, -1): try: return", "sys.stdout \"\"\" sys.stdout.write(str(msg)+\"\\n\") def STDERR(msg): \"\"\" consumer that writes to", "for i in range(len(keywords), 0, -1): try: return self.keywords2consumer[keywords[:i]] except", "mode) self._file = f def __call__(self, msg): \"\"\" write a", "args)) class KeywordMapper: def __init__(self): self.keywords2consumer = {} def getstate(self):", "the default consumer, prints the message to stdout (using 'print')", "keywords = keywords._keywords elif not isinstance(keywords, tuple): raise TypeError(\"key %r", "list of keywords, the first consumer matching a keyword is", "delayed_create=False, buffering=False): self._append = append self._filename = str(filename) self._buffering =", "sys.stdout.write(str(msg)+\"\\n\") def STDERR(msg): \"\"\" consumer that writes to sys.stderr \"\"\"", "setconsumer(self, keywords, consumer): \"\"\" set a consumer for a set", "class KeywordMapper: def __init__(self): self.keywords2consumer = {} def getstate(self): return", "if not hasattr(self, \"_file\"): self._openfile() self._file.write(str(msg) + \"\\n\") if not", "-1): try: return self.keywords2consumer[keywords[:i]] except KeyError: continue return self.keywords2consumer.get('default', default_consumer)", "default_consumer) def setconsumer(self, keywords, consumer): \"\"\" set a consumer for", "**kw): if hasattr(keywords, 'split'): keywords = tuple(keywords.split()) self._keywords = keywords", "keywords. tries to find the most suitable consumer by walking,", "= tuple(filter(None, keywords.split())) elif hasattr(keywords, '_keywords'): keywords = keywords._keywords elif", "STDERR(msg): \"\"\" consumer that writes to sys.stderr \"\"\" sys.stderr.write(str(msg)+\"\\n\") class", "a producer/consumer scheme. XXX implement this API: (maybe put it", "'_keywords'): keywords = keywords._keywords elif not isinstance(keywords, tuple): raise TypeError(\"key", "\"\"\" log consumer that opens and writes to a Path", "# class File(object): \"\"\" log consumer wrapping a file(-like) object", "\"\"\" sys.stderr.write(str(msg)+\"\\n\") class Syslog: \"\"\" consumer that writes to the", "= buffering if not delayed_create: self._openfile() def _openfile(self): mode =", "%r is not a string or tuple\" % (keywords,)) if", "ERR WARNING NOTICE INFO DEBUG\".split(): _prio = \"LOG_\" + _prio", "f): assert hasattr(f, 'write') #assert isinstance(f, file) or not hasattr(f,", "\".join(map(str, self.args)) def prefix(self): return \"[%s] \" % (\":\".join(self.keywords)) def", "return self.keywords2consumer[keywords[:i]] except KeyError: continue return self.keywords2consumer.get('default', default_consumer) def setconsumer(self,", "string or tuple\" % (keywords,)) if consumer is not None", "\"\"\" consumer that writes to sys.stderr \"\"\" sys.stderr.write(str(msg)+\"\\n\") class Syslog:", "class File(object): \"\"\" log consumer wrapping a file(-like) object \"\"\"", "hasattr(consumer, 'write'): raise TypeError( \"%r should be None, callable or", "= Logger(info=Logger(something=...), debug=py.log.STDOUT, command=None) \"\"\" import py, sys class Message(object):", "self.__class__(self._keywords + (name,)) setattr(self, name, producer) return producer def __call__(self,", "return \"[%s] \" % (\":\".join(self.keywords)) def __str__(self): return self.prefix() +", "\"\"\" (deprecated) Log producer API which sends messages to be", "self.args = args def content(self): return \" \".join(map(str, self.args)) def", "log.info(\"hello\", \"world\") log.command(\"hello\", \"world\") log = Logger(info=Logger(something=...), debug=py.log.STDOUT, command=None) \"\"\"", "to the log \"\"\" self._file.write(str(msg) + \"\\n\") if hasattr(self._file, 'flush'):", "and writes to a Path \"\"\" def __init__(self, filename, append=False,", "producer/consumer scheme. XXX implement this API: (maybe put it into", "allow later customization keywords2consumer = {} def __init__(self, keywords, keywordmapper=None,", "str(filename) self._buffering = buffering if not delayed_create: self._openfile() def _openfile(self):", "self.priority = priority def __call__(self, msg): \"\"\" write a message", "consumer wrapping a file(-like) object \"\"\" def __init__(self, f): assert", "to find the most suitable consumer by walking, starting from", "sys.stderr.write(str(msg)+\"\\n\") default_keywordmapper = KeywordMapper() def setconsumer(keywords, consumer): default_keywordmapper.setconsumer(keywords, consumer) def", "that opens and writes to a Path \"\"\" def __init__(self,", "message to the log \"\"\" if not hasattr(self, \"_file\"): self._openfile()", "keywords. \"\"\" # normalize to tuples if isinstance(keywords, str): keywords", "matching the given keywords. tries to find the most suitable", "= self._keywordmapper.getconsumer(self._keywords) if func is not None: func(self.Message(self._keywords, args)) class", "into slogger.py?) log = Logger( info=py.log.STDOUT, debug=py.log.STDOUT, command=None) log.info(\"hello\", \"world\")", "{} def __init__(self, keywords, keywordmapper=None, **kw): if hasattr(keywords, 'split'): keywords", "def default_consumer(msg): \"\"\" the default consumer, prints the message to", "def __init__(self, priority = None): if priority is None: priority", "\"\"\" for i in range(len(keywords), 0, -1): try: return self.keywords2consumer[keywords[:i]]", "try: return self.keywords2consumer[keywords[:i]] except KeyError: continue return self.keywords2consumer.get('default', default_consumer) def", "syslog daemon \"\"\" def __init__(self, priority = None): if priority", "\"\"\" sys.stderr.write(str(msg)+\"\\n\") default_keywordmapper = KeywordMapper() def setconsumer(keywords, consumer): default_keywordmapper.setconsumer(keywords, consumer)", "def _openfile(self): mode = self._append and 'a' or 'w' f", "append self._filename = str(filename) self._buffering = buffering if not delayed_create:", "prefix(self): return \"[%s] \" % (\":\".join(self.keywords)) def __str__(self): return self.prefix()", "#assert isinstance(f, file) or not hasattr(f, 'open') self._file = f", "msg): \"\"\" write a message to the log \"\"\" if", "sends messages to be logged to a 'consumer' object, which", "first consumer matching a keyword is returned (falling back to", "stdout, stderr, files, etc. Used extensively by PyPy-1.1. \"\"\" Message", "= File(consumer) self.keywords2consumer[keywords] = consumer def default_consumer(msg): \"\"\" the default", "stdout (using 'print') \"\"\" sys.stderr.write(str(msg)+\"\\n\") default_keywordmapper = KeywordMapper() def setconsumer(keywords,", "func is not None: func(self.Message(self._keywords, args)) class KeywordMapper: def __init__(self):", "log.command(\"hello\", \"world\") log = Logger(info=Logger(something=...), debug=py.log.STDOUT, command=None) \"\"\" import py,", "consumer that writes to the syslog daemon \"\"\" def __init__(self,", "on a producer/consumer scheme. XXX implement this API: (maybe put", "self._openfile() def _openfile(self): mode = self._append and 'a' or 'w'", "_prio in \"EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG\".split():", "'split'): keywords = tuple(keywords.split()) self._keywords = keywords if keywordmapper is", "not hasattr(f, 'open') self._file = f def __call__(self, msg): \"\"\"", "a message to the log \"\"\" if not hasattr(self, \"_file\"):", "\"%r should be None, callable or file-like\" % (consumer,)) consumer", "not isinstance(keywords, tuple): raise TypeError(\"key %r is not a string", "not py.builtin.callable(consumer): if not hasattr(consumer, 'write'): raise TypeError( \"%r should", "sys class Message(object): def __init__(self, keywords, args): self.keywords = keywords", "hasattr(f, 'open') self._file = f def __call__(self, msg): \"\"\" write", "the log \"\"\" py.std.syslog.syslog(self.priority, str(msg)) for _prio in \"EMERG ALERT", "'w' f = open(self._filename, mode) self._file = f def __call__(self,", "consumer) def setstate(state): default_keywordmapper.setstate(state) def getstate(): return default_keywordmapper.getstate() # #", "class Producer(object): \"\"\" (deprecated) Log producer API which sends messages", "keywordmapper is None: keywordmapper = default_keywordmapper self._keywordmapper = keywordmapper def", "a set of keywords. \"\"\" # normalize to tuples if", "getstate(self): return self.keywords2consumer.copy() def setstate(self, state): self.keywords2consumer.clear() self.keywords2consumer.update(state) def getconsumer(self,", "__repr__(self): return \"<py.log.Producer %s>\" % \":\".join(self._keywords) def __getattr__(self, name): if", "setattr(self, name, producer) return producer def __call__(self, *args): \"\"\" write", "\"\"\" log consumer wrapping a file(-like) object \"\"\" def __init__(self,", "Path \"\"\" def __init__(self, filename, append=False, delayed_create=False, buffering=False): self._append =", "is not a string or tuple\" % (keywords,)) if consumer", "return \"<py.log.Producer %s>\" % \":\".join(self._keywords) def __getattr__(self, name): if '_'", "return producer def __call__(self, *args): \"\"\" write a message to", "\"\\n\") if hasattr(self._file, 'flush'): self._file.flush() class Path(object): \"\"\" log consumer", "write a message to the appropriate consumer(s) \"\"\" func =", "__str__(self): return self.prefix() + self.content() class Producer(object): \"\"\" (deprecated) Log", "__init__(self, filename, append=False, delayed_create=False, buffering=False): self._append = append self._filename =", "not None: func(self.Message(self._keywords, args)) class KeywordMapper: def __init__(self): self.keywords2consumer =", "self.keywords2consumer.get('default', default_consumer) def setconsumer(self, keywords, consumer): \"\"\" set a consumer", "= KeywordMapper() def setconsumer(keywords, consumer): default_keywordmapper.setconsumer(keywords, consumer) def setstate(state): default_keywordmapper.setstate(state)", "\"\"\" basic logging functionality based on a producer/consumer scheme. XXX", "API: (maybe put it into slogger.py?) log = Logger( info=py.log.STDOUT,", "PyPy-1.1. \"\"\" Message = Message # to allow later customization", "in \"EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG\".split(): _prio", "def __str__(self): return self.prefix() + self.content() class Producer(object): \"\"\" (deprecated)", "\"\"\" self._file.write(str(msg) + \"\\n\") if hasattr(self._file, 'flush'): self._file.flush() class Path(object):", "if keywordmapper is None: keywordmapper = default_keywordmapper self._keywordmapper = keywordmapper", "TypeError( \"%r should be None, callable or file-like\" % (consumer,))", "buffering if not delayed_create: self._openfile() def _openfile(self): mode = self._append", "keywordmapper = default_keywordmapper self._keywordmapper = keywordmapper def __repr__(self): return \"<py.log.Producer", "customization keywords2consumer = {} def __init__(self, keywords, keywordmapper=None, **kw): if", "stderr, files, etc. Used extensively by PyPy-1.1. \"\"\" Message =", "to the log \"\"\" if not hasattr(self, \"_file\"): self._openfile() self._file.write(str(msg)", "name, producer) return producer def __call__(self, *args): \"\"\" write a", "DEBUG\".split(): _prio = \"LOG_\" + _prio try: setattr(Syslog, _prio, getattr(py.std.syslog,", "\"world\") log = Logger(info=Logger(something=...), debug=py.log.STDOUT, command=None) \"\"\" import py, sys", "def setstate(self, state): self.keywords2consumer.clear() self.keywords2consumer.update(state) def getconsumer(self, keywords): \"\"\" return", "etc. Used extensively by PyPy-1.1. \"\"\" Message = Message #", "in name: raise AttributeError(name) producer = self.__class__(self._keywords + (name,)) setattr(self,", "open(self._filename, mode) self._file = f def __call__(self, msg): \"\"\" write", "self.keywords2consumer = {} def getstate(self): return self.keywords2consumer.copy() def setstate(self, state):", "\"\"\" sys.stdout.write(str(msg)+\"\\n\") def STDERR(msg): \"\"\" consumer that writes to sys.stderr", "Consumers # class File(object): \"\"\" log consumer wrapping a file(-like)", "= keywords self.args = args def content(self): return \" \".join(map(str,", "INFO DEBUG\".split(): _prio = \"LOG_\" + _prio try: setattr(Syslog, _prio,", "\"\"\" write a message to the log \"\"\" if not", "wrapping a file(-like) object \"\"\" def __init__(self, f): assert hasattr(f,", "normalize to tuples if isinstance(keywords, str): keywords = tuple(filter(None, keywords.split()))", "state): self.keywords2consumer.clear() self.keywords2consumer.update(state) def getconsumer(self, keywords): \"\"\" return a consumer", "= self.__class__(self._keywords + (name,)) setattr(self, name, producer) return producer def", "(name,)) setattr(self, name, producer) return producer def __call__(self, *args): \"\"\"", "TypeError(\"key %r is not a string or tuple\" % (keywords,))", "should be None, callable or file-like\" % (consumer,)) consumer =", "\"\"\" write a message to the log \"\"\" py.std.syslog.syslog(self.priority, str(msg))", "back to py.log.default) \"\"\" for i in range(len(keywords), 0, -1):", "self._keywordmapper.getconsumer(self._keywords) if func is not None: func(self.Message(self._keywords, args)) class KeywordMapper:", "the message to stdout (using 'print') \"\"\" sys.stderr.write(str(msg)+\"\\n\") default_keywordmapper =", "be logged to a 'consumer' object, which then prints them", "based on a producer/consumer scheme. XXX implement this API: (maybe", "raise AttributeError(name) producer = self.__class__(self._keywords + (name,)) setattr(self, name, producer)", "KeywordMapper() def setconsumer(keywords, consumer): default_keywordmapper.setconsumer(keywords, consumer) def setstate(state): default_keywordmapper.setstate(state) def", "AttributeError(name) producer = self.__class__(self._keywords + (name,)) setattr(self, name, producer) return", "= \"LOG_\" + _prio try: setattr(Syslog, _prio, getattr(py.std.syslog, _prio)) except", "(keywords,)) if consumer is not None and not py.builtin.callable(consumer): if", "keywords): \"\"\" return a consumer matching the given keywords. tries", "hasattr(self._file, 'flush'): self._file.flush() class Path(object): \"\"\" log consumer that opens", "getstate(): return default_keywordmapper.getstate() # # Consumers # class File(object): \"\"\"", "by PyPy-1.1. \"\"\" Message = Message # to allow later", "KeyError: continue return self.keywords2consumer.get('default', default_consumer) def setconsumer(self, keywords, consumer): \"\"\"", "CRIT ERR WARNING NOTICE INFO DEBUG\".split(): _prio = \"LOG_\" +", "filename, append=False, delayed_create=False, buffering=False): self._append = append self._filename = str(filename)", "keywordmapper=None, **kw): if hasattr(keywords, 'split'): keywords = tuple(keywords.split()) self._keywords =", "default_keywordmapper.setstate(state) def getstate(): return default_keywordmapper.getstate() # # Consumers # class", "\"world\") log.command(\"hello\", \"world\") log = Logger(info=Logger(something=...), debug=py.log.STDOUT, command=None) \"\"\" import", "find the most suitable consumer by walking, starting from the", "the log \"\"\" if not hasattr(self, \"_file\"): self._openfile() self._file.write(str(msg) +", "= {} def __init__(self, keywords, keywordmapper=None, **kw): if hasattr(keywords, 'split'):", "i in range(len(keywords), 0, -1): try: return self.keywords2consumer[keywords[:i]] except KeyError:", "the first consumer matching a keyword is returned (falling back", "self.keywords2consumer[keywords[:i]] except KeyError: continue return self.keywords2consumer.get('default', default_consumer) def setconsumer(self, keywords,", "def __init__(self, filename, append=False, delayed_create=False, buffering=False): self._append = append self._filename", "+ (name,)) setattr(self, name, producer) return producer def __call__(self, *args):", "None: func(self.Message(self._keywords, args)) class KeywordMapper: def __init__(self): self.keywords2consumer = {}", "args def content(self): return \" \".join(map(str, self.args)) def prefix(self): return", "consumer matching a keyword is returned (falling back to py.log.default)", "% (consumer,)) consumer = File(consumer) self.keywords2consumer[keywords] = consumer def default_consumer(msg):", "consumer for a set of keywords. \"\"\" # normalize to", "= str(filename) self._buffering = buffering if not delayed_create: self._openfile() def", "if '_' in name: raise AttributeError(name) producer = self.__class__(self._keywords +", "a file(-like) object \"\"\" def __init__(self, f): assert hasattr(f, 'write')", "= args def content(self): return \" \".join(map(str, self.args)) def prefix(self):", "slogger.py?) log = Logger( info=py.log.STDOUT, debug=py.log.STDOUT, command=None) log.info(\"hello\", \"world\") log.command(\"hello\",", "XXX implement this API: (maybe put it into slogger.py?) log", "log \"\"\" if not hasattr(self, \"_file\"): self._openfile() self._file.write(str(msg) + \"\\n\")", "a Path \"\"\" def __init__(self, filename, append=False, delayed_create=False, buffering=False): self._append", "to a 'consumer' object, which then prints them to stdout,", "None and not py.builtin.callable(consumer): if not hasattr(consumer, 'write'): raise TypeError(", "isinstance(keywords, tuple): raise TypeError(\"key %r is not a string or", "name): if '_' in name: raise AttributeError(name) producer = self.__class__(self._keywords", "None: priority = self.LOG_INFO self.priority = priority def __call__(self, msg):", "self._file.write(str(msg) + \"\\n\") if not self._buffering: self._file.flush() def STDOUT(msg): \"\"\"", "# to allow later customization keywords2consumer = {} def __init__(self,", "delayed_create: self._openfile() def _openfile(self): mode = self._append and 'a' or", "(consumer,)) consumer = File(consumer) self.keywords2consumer[keywords] = consumer def default_consumer(msg): \"\"\"", "a message to the appropriate consumer(s) \"\"\" func = self._keywordmapper.getconsumer(self._keywords)", "log \"\"\" py.std.syslog.syslog(self.priority, str(msg)) for _prio in \"EMERG ALERT CRIT", "producer = self.__class__(self._keywords + (name,)) setattr(self, name, producer) return producer", "+ self.content() class Producer(object): \"\"\" (deprecated) Log producer API which", "a 'consumer' object, which then prints them to stdout, stderr,", "opens and writes to a Path \"\"\" def __init__(self, filename,", "\"\"\" consumer that writes to sys.stdout \"\"\" sys.stdout.write(str(msg)+\"\\n\") def STDERR(msg):", "Log producer API which sends messages to be logged to", "def prefix(self): return \"[%s] \" % (\":\".join(self.keywords)) def __str__(self): return", "% (keywords,)) if consumer is not None and not py.builtin.callable(consumer):", "'write') #assert isinstance(f, file) or not hasattr(f, 'open') self._file =", "if func is not None: func(self.Message(self._keywords, args)) class KeywordMapper: def", "msg): \"\"\" write a message to the log \"\"\" self._file.write(str(msg)", "\"\"\" import py, sys class Message(object): def __init__(self, keywords, args):", "Message = Message # to allow later customization keywords2consumer =", "message to stdout (using 'print') \"\"\" sys.stderr.write(str(msg)+\"\\n\") default_keywordmapper = KeywordMapper()", "= consumer def default_consumer(msg): \"\"\" the default consumer, prints the", "object, which then prints them to stdout, stderr, files, etc.", "consumer that writes to sys.stdout \"\"\" sys.stdout.write(str(msg)+\"\\n\") def STDERR(msg): \"\"\"", "consumer that writes to sys.stderr \"\"\" sys.stderr.write(str(msg)+\"\\n\") class Syslog: \"\"\"", "suitable consumer by walking, starting from the back, the list", "if hasattr(self._file, 'flush'): self._file.flush() class Path(object): \"\"\" log consumer that", "tuples if isinstance(keywords, str): keywords = tuple(filter(None, keywords.split())) elif hasattr(keywords,", "log = Logger( info=py.log.STDOUT, debug=py.log.STDOUT, command=None) log.info(\"hello\", \"world\") log.command(\"hello\", \"world\")", "and not py.builtin.callable(consumer): if not hasattr(consumer, 'write'): raise TypeError( \"%r", "(\":\".join(self.keywords)) def __str__(self): return self.prefix() + self.content() class Producer(object): \"\"\"", "that writes to sys.stdout \"\"\" sys.stdout.write(str(msg)+\"\\n\") def STDERR(msg): \"\"\" consumer", "= f def __call__(self, msg): \"\"\" write a message to", "the log \"\"\" self._file.write(str(msg) + \"\\n\") if hasattr(self._file, 'flush'): self._file.flush()", "Message(object): def __init__(self, keywords, args): self.keywords = keywords self.args =", "class Path(object): \"\"\" log consumer that opens and writes to", "\"LOG_\" + _prio try: setattr(Syslog, _prio, getattr(py.std.syslog, _prio)) except AttributeError:", "def getconsumer(self, keywords): \"\"\" return a consumer matching the given", "self.keywords2consumer[keywords] = consumer def default_consumer(msg): \"\"\" the default consumer, prints", "Used extensively by PyPy-1.1. \"\"\" Message = Message # to", "if isinstance(keywords, str): keywords = tuple(filter(None, keywords.split())) elif hasattr(keywords, '_keywords'):", "walking, starting from the back, the list of keywords, the", "def setconsumer(self, keywords, consumer): \"\"\" set a consumer for a", "self._filename = str(filename) self._buffering = buffering if not delayed_create: self._openfile()", "writes to sys.stdout \"\"\" sys.stdout.write(str(msg)+\"\\n\") def STDERR(msg): \"\"\" consumer that", "args): self.keywords = keywords self.args = args def content(self): return", "\":\".join(self._keywords) def __getattr__(self, name): if '_' in name: raise AttributeError(name)", "self._keywordmapper = keywordmapper def __repr__(self): return \"<py.log.Producer %s>\" % \":\".join(self._keywords)", "to sys.stdout \"\"\" sys.stdout.write(str(msg)+\"\\n\") def STDERR(msg): \"\"\" consumer that writes", "self._keywords = keywords if keywordmapper is None: keywordmapper = default_keywordmapper", "return self.prefix() + self.content() class Producer(object): \"\"\" (deprecated) Log producer", "given keywords. tries to find the most suitable consumer by", "priority = None): if priority is None: priority = self.LOG_INFO", "= keywords._keywords elif not isinstance(keywords, tuple): raise TypeError(\"key %r is", "default_keywordmapper = KeywordMapper() def setconsumer(keywords, consumer): default_keywordmapper.setconsumer(keywords, consumer) def setstate(state):", "object \"\"\" def __init__(self, f): assert hasattr(f, 'write') #assert isinstance(f,", "keywords = tuple(keywords.split()) self._keywords = keywords if keywordmapper is None:", "__init__(self): self.keywords2consumer = {} def getstate(self): return self.keywords2consumer.copy() def setstate(self,", "a message to the log \"\"\" self._file.write(str(msg) + \"\\n\") if", "of keywords. \"\"\" # normalize to tuples if isinstance(keywords, str):", "def __getattr__(self, name): if '_' in name: raise AttributeError(name) producer", "write a message to the log \"\"\" if not hasattr(self,", "self._file.flush() def STDOUT(msg): \"\"\" consumer that writes to sys.stdout \"\"\"", "f def __call__(self, msg): \"\"\" write a message to the", "is not None and not py.builtin.callable(consumer): if not hasattr(consumer, 'write'):", "which then prints them to stdout, stderr, files, etc. Used", "\"\"\" consumer that writes to the syslog daemon \"\"\" def", "(deprecated) Log producer API which sends messages to be logged", "tuple(filter(None, keywords.split())) elif hasattr(keywords, '_keywords'): keywords = keywords._keywords elif not", "self.prefix() + self.content() class Producer(object): \"\"\" (deprecated) Log producer API", "most suitable consumer by walking, starting from the back, the", "= append self._filename = str(filename) self._buffering = buffering if not", "basic logging functionality based on a producer/consumer scheme. XXX implement", "raise TypeError(\"key %r is not a string or tuple\" %", "def __init__(self, f): assert hasattr(f, 'write') #assert isinstance(f, file) or", "self.keywords2consumer.copy() def setstate(self, state): self.keywords2consumer.clear() self.keywords2consumer.update(state) def getconsumer(self, keywords): \"\"\"", "not None and not py.builtin.callable(consumer): if not hasattr(consumer, 'write'): raise", "or 'w' f = open(self._filename, mode) self._file = f def", "producer def __call__(self, *args): \"\"\" write a message to the", "__init__(self, priority = None): if priority is None: priority =", "str(msg)) for _prio in \"EMERG ALERT CRIT ERR WARNING NOTICE", "# Consumers # class File(object): \"\"\" log consumer wrapping a", "is None: keywordmapper = default_keywordmapper self._keywordmapper = keywordmapper def __repr__(self):", "keywords = tuple(filter(None, keywords.split())) elif hasattr(keywords, '_keywords'): keywords = keywords._keywords", "File(object): \"\"\" log consumer wrapping a file(-like) object \"\"\" def", "writes to a Path \"\"\" def __init__(self, filename, append=False, delayed_create=False,", "log \"\"\" self._file.write(str(msg) + \"\\n\") if hasattr(self._file, 'flush'): self._file.flush() class", "then prints them to stdout, stderr, files, etc. Used extensively", "self._file.flush() class Path(object): \"\"\" log consumer that opens and writes", "return default_keywordmapper.getstate() # # Consumers # class File(object): \"\"\" log", "Logger(info=Logger(something=...), debug=py.log.STDOUT, command=None) \"\"\" import py, sys class Message(object): def", "func(self.Message(self._keywords, args)) class KeywordMapper: def __init__(self): self.keywords2consumer = {} def", "daemon \"\"\" def __init__(self, priority = None): if priority is", "\"\"\" py.std.syslog.syslog(self.priority, str(msg)) for _prio in \"EMERG ALERT CRIT ERR", "and 'a' or 'w' f = open(self._filename, mode) self._file =", "write a message to the log \"\"\" self._file.write(str(msg) + \"\\n\")", "for a set of keywords. \"\"\" # normalize to tuples", "self._append = append self._filename = str(filename) self._buffering = buffering if", "producer) return producer def __call__(self, *args): \"\"\" write a message", "def __call__(self, *args): \"\"\" write a message to the appropriate", "to a Path \"\"\" def __init__(self, filename, append=False, delayed_create=False, buffering=False):", "Message # to allow later customization keywords2consumer = {} def", "str): keywords = tuple(filter(None, keywords.split())) elif hasattr(keywords, '_keywords'): keywords =", "writes to the syslog daemon \"\"\" def __init__(self, priority =", "writes to sys.stderr \"\"\" sys.stderr.write(str(msg)+\"\\n\") class Syslog: \"\"\" consumer that", "py.builtin.callable(consumer): if not hasattr(consumer, 'write'): raise TypeError( \"%r should be", "starting from the back, the list of keywords, the first", "\"\"\" return a consumer matching the given keywords. tries to", "append=False, delayed_create=False, buffering=False): self._append = append self._filename = str(filename) self._buffering", "NOTICE INFO DEBUG\".split(): _prio = \"LOG_\" + _prio try: setattr(Syslog,", "write a message to the log \"\"\" py.std.syslog.syslog(self.priority, str(msg)) for", "File(consumer) self.keywords2consumer[keywords] = consumer def default_consumer(msg): \"\"\" the default consumer,", "% (\":\".join(self.keywords)) def __str__(self): return self.prefix() + self.content() class Producer(object):", "content(self): return \" \".join(map(str, self.args)) def prefix(self): return \"[%s] \"", "= {} def getstate(self): return self.keywords2consumer.copy() def setstate(self, state): self.keywords2consumer.clear()", "which sends messages to be logged to a 'consumer' object,", "Syslog: \"\"\" consumer that writes to the syslog daemon \"\"\"", "__init__(self, keywords, keywordmapper=None, **kw): if hasattr(keywords, 'split'): keywords = tuple(keywords.split())", "range(len(keywords), 0, -1): try: return self.keywords2consumer[keywords[:i]] except KeyError: continue return", "returned (falling back to py.log.default) \"\"\" for i in range(len(keywords),", "if not delayed_create: self._openfile() def _openfile(self): mode = self._append and", "keywords self.args = args def content(self): return \" \".join(map(str, self.args))", "to the appropriate consumer(s) \"\"\" func = self._keywordmapper.getconsumer(self._keywords) if func", "def setconsumer(keywords, consumer): default_keywordmapper.setconsumer(keywords, consumer) def setstate(state): default_keywordmapper.setstate(state) def getstate():", "API which sends messages to be logged to a 'consumer'", "__call__(self, *args): \"\"\" write a message to the appropriate consumer(s)", "None): if priority is None: priority = self.LOG_INFO self.priority =", "(falling back to py.log.default) \"\"\" for i in range(len(keywords), 0,", "keywords.split())) elif hasattr(keywords, '_keywords'): keywords = keywords._keywords elif not isinstance(keywords,", "keywords if keywordmapper is None: keywordmapper = default_keywordmapper self._keywordmapper =", "hasattr(f, 'write') #assert isinstance(f, file) or not hasattr(f, 'open') self._file", "Logger( info=py.log.STDOUT, debug=py.log.STDOUT, command=None) log.info(\"hello\", \"world\") log.command(\"hello\", \"world\") log =", "class Message(object): def __init__(self, keywords, args): self.keywords = keywords self.args", "def getstate(): return default_keywordmapper.getstate() # # Consumers # class File(object):", "debug=py.log.STDOUT, command=None) \"\"\" import py, sys class Message(object): def __init__(self,", "(using 'print') \"\"\" sys.stderr.write(str(msg)+\"\\n\") default_keywordmapper = KeywordMapper() def setconsumer(keywords, consumer):", "to the log \"\"\" py.std.syslog.syslog(self.priority, str(msg)) for _prio in \"EMERG", "logged to a 'consumer' object, which then prints them to", "\"\"\" def __init__(self, priority = None): if priority is None:", "the given keywords. tries to find the most suitable consumer", "not self._buffering: self._file.flush() def STDOUT(msg): \"\"\" consumer that writes to", "sys.stderr \"\"\" sys.stderr.write(str(msg)+\"\\n\") class Syslog: \"\"\" consumer that writes to", "files, etc. Used extensively by PyPy-1.1. \"\"\" Message = Message", "\"\\n\") if not self._buffering: self._file.flush() def STDOUT(msg): \"\"\" consumer that", "0, -1): try: return self.keywords2consumer[keywords[:i]] except KeyError: continue return self.keywords2consumer.get('default',", "set a consumer for a set of keywords. \"\"\" #", "def __repr__(self): return \"<py.log.Producer %s>\" % \":\".join(self._keywords) def __getattr__(self, name):", "info=py.log.STDOUT, debug=py.log.STDOUT, command=None) log.info(\"hello\", \"world\") log.command(\"hello\", \"world\") log = Logger(info=Logger(something=...),", "assert hasattr(f, 'write') #assert isinstance(f, file) or not hasattr(f, 'open')", "elif not isinstance(keywords, tuple): raise TypeError(\"key %r is not a", "= default_keywordmapper self._keywordmapper = keywordmapper def __repr__(self): return \"<py.log.Producer %s>\"", "keywordmapper def __repr__(self): return \"<py.log.Producer %s>\" % \":\".join(self._keywords) def __getattr__(self,", "that writes to sys.stderr \"\"\" sys.stderr.write(str(msg)+\"\\n\") class Syslog: \"\"\" consumer", "hasattr(keywords, '_keywords'): keywords = keywords._keywords elif not isinstance(keywords, tuple): raise", "+ \"\\n\") if not self._buffering: self._file.flush() def STDOUT(msg): \"\"\" consumer", "raise TypeError( \"%r should be None, callable or file-like\" %", "if not self._buffering: self._file.flush() def STDOUT(msg): \"\"\" consumer that writes", "'flush'): self._file.flush() class Path(object): \"\"\" log consumer that opens and", "keywords, args): self.keywords = keywords self.args = args def content(self):", "appropriate consumer(s) \"\"\" func = self._keywordmapper.getconsumer(self._keywords) if func is not", "or file-like\" % (consumer,)) consumer = File(consumer) self.keywords2consumer[keywords] = consumer", "\"\"\" Message = Message # to allow later customization keywords2consumer", "a consumer matching the given keywords. tries to find the", "return \" \".join(map(str, self.args)) def prefix(self): return \"[%s] \" %", "functionality based on a producer/consumer scheme. XXX implement this API:", "STDOUT(msg): \"\"\" consumer that writes to sys.stdout \"\"\" sys.stdout.write(str(msg)+\"\\n\") def", "py.std.syslog.syslog(self.priority, str(msg)) for _prio in \"EMERG ALERT CRIT ERR WARNING", "the back, the list of keywords, the first consumer matching", "= open(self._filename, mode) self._file = f def __call__(self, msg): \"\"\"", "not hasattr(consumer, 'write'): raise TypeError( \"%r should be None, callable", "the appropriate consumer(s) \"\"\" func = self._keywordmapper.getconsumer(self._keywords) if func is", "command=None) log.info(\"hello\", \"world\") log.command(\"hello\", \"world\") log = Logger(info=Logger(something=...), debug=py.log.STDOUT, command=None)", "sys.stderr.write(str(msg)+\"\\n\") class Syslog: \"\"\" consumer that writes to the syslog", "+ _prio try: setattr(Syslog, _prio, getattr(py.std.syslog, _prio)) except AttributeError: pass", "# normalize to tuples if isinstance(keywords, str): keywords = tuple(filter(None,", "return self.keywords2consumer.get('default', default_consumer) def setconsumer(self, keywords, consumer): \"\"\" set a", "if priority is None: priority = self.LOG_INFO self.priority = priority", "def content(self): return \" \".join(map(str, self.args)) def prefix(self): return \"[%s]", "\"_file\"): self._openfile() self._file.write(str(msg) + \"\\n\") if not self._buffering: self._file.flush() def", "_openfile(self): mode = self._append and 'a' or 'w' f =", "in range(len(keywords), 0, -1): try: return self.keywords2consumer[keywords[:i]] except KeyError: continue", "= tuple(keywords.split()) self._keywords = keywords if keywordmapper is None: keywordmapper", "tuple\" % (keywords,)) if consumer is not None and not", "'open') self._file = f def __call__(self, msg): \"\"\" write a", "None: keywordmapper = default_keywordmapper self._keywordmapper = keywordmapper def __repr__(self): return", "self._buffering: self._file.flush() def STDOUT(msg): \"\"\" consumer that writes to sys.stdout", "Path(object): \"\"\" log consumer that opens and writes to a", "return a consumer matching the given keywords. tries to find", "a consumer for a set of keywords. \"\"\" # normalize", "prints the message to stdout (using 'print') \"\"\" sys.stderr.write(str(msg)+\"\\n\") default_keywordmapper", "to allow later customization keywords2consumer = {} def __init__(self, keywords,", "isinstance(f, file) or not hasattr(f, 'open') self._file = f def", "consumer by walking, starting from the back, the list of", "Producer(object): \"\"\" (deprecated) Log producer API which sends messages to" ]
[ "in phone_list_from_db: phone_list.append(merge_phones_like_on_home_page(phone)) email_list = [] #for email in email_liset_from_db:", "clear(s): return re.sub(\"[() -]\", \"\", s) def remove_spaces(s): return re.sub('", "return re.sub(\"[() -]\", \"\", s) def remove_spaces(s): return re.sub(' +',", "= [con.all_mail_from_home_page for con in contacts_from_home_page] assert phone_list == phones_from_home_page", "email in email_liset_from_db: # email_list.append(merge_mail_like_on_home_page(email)) contacts_from_home_page = sorted(app.contact.get_contact_list(), key=Contact.id_or_max) phones_from_home_page", "contacts_from_db == contacts_from_home_page def clear(s): return re.sub(\"[() -]\", \"\", s)", "None, [contact.home_phone, contact.mobile_phone, contact.work_phone, contact.secondary_phone])))) def merge_email_like_on_home_page(contact): return \"\\n\".join(filter(lambda x:", "x is not None, [contact.home_phone, contact.mobile_phone, contact.work_phone, contact.secondary_phone])))) def merge_email_like_on_home_page(contact):", "x: clear(x), filter(lambda x: x is not None, [contact.home_phone, contact.mobile_phone,", "map(lambda x: remove_spaces(x), filter(lambda x: x is not None, [contact.email,", "in contacts_from_home_page] #emails_from_home_page = [con.all_mail_from_home_page for con in contacts_from_home_page] assert", "db): contacts_from_db = db.get_contact_list() phone_list_from_db = db.phones_from_db() #email_liset_from_db = db.emails_from_db()", "#emails_from_home_page = [con.all_mail_from_home_page for con in contacts_from_home_page] assert phone_list ==", "db.emails_from_db() phone_list = [] for phone in phone_list_from_db: phone_list.append(merge_phones_like_on_home_page(phone)) email_list", "import re from model.contact import Contact def test_all_contacts(app, db): contacts_from_db", "return \"\\n\".join(filter(lambda x: x != \"\", map(lambda x: remove_spaces(x), filter(lambda", "contacts_from_home_page] assert phone_list == phones_from_home_page #assert email_list == emails_from_home_page assert", "test_all_contacts(app, db): contacts_from_db = db.get_contact_list() phone_list_from_db = db.phones_from_db() #email_liset_from_db =", "assert contacts_from_db == contacts_from_home_page def clear(s): return re.sub(\"[() -]\", \"\",", "+', ' ', s).rstrip() def merge_phones_like_on_home_page(contact): return \"\\n\".join(filter(lambda x: x", "is not None, [contact.home_phone, contact.mobile_phone, contact.work_phone, contact.secondary_phone])))) def merge_email_like_on_home_page(contact): return", "phone_list_from_db = db.phones_from_db() #email_liset_from_db = db.emails_from_db() phone_list = [] for", "' ', s).rstrip() def merge_phones_like_on_home_page(contact): return \"\\n\".join(filter(lambda x: x !=", "con in contacts_from_home_page] assert phone_list == phones_from_home_page #assert email_list ==", "!= \"\", map(lambda x: remove_spaces(x), filter(lambda x: x is not", "contacts_from_home_page def clear(s): return re.sub(\"[() -]\", \"\", s) def remove_spaces(s):", "db.phones_from_db() #email_liset_from_db = db.emails_from_db() phone_list = [] for phone in", "email_list.append(merge_mail_like_on_home_page(email)) contacts_from_home_page = sorted(app.contact.get_contact_list(), key=Contact.id_or_max) phones_from_home_page = [con.all_phones_from_home_page for con", "s).rstrip() def merge_phones_like_on_home_page(contact): return \"\\n\".join(filter(lambda x: x != \"\", map(lambda", "\"\\n\".join(filter(lambda x: x != \"\", map(lambda x: remove_spaces(x), filter(lambda x:", "\"\", map(lambda x: clear(x), filter(lambda x: x is not None,", "== emails_from_home_page assert contacts_from_db == contacts_from_home_page def clear(s): return re.sub(\"[()", "return re.sub(' +', ' ', s).rstrip() def merge_phones_like_on_home_page(contact): return \"\\n\".join(filter(lambda", "re.sub(\"[() -]\", \"\", s) def remove_spaces(s): return re.sub(' +', '", "remove_spaces(s): return re.sub(' +', ' ', s).rstrip() def merge_phones_like_on_home_page(contact): return", "Contact def test_all_contacts(app, db): contacts_from_db = db.get_contact_list() phone_list_from_db = db.phones_from_db()", "[] for phone in phone_list_from_db: phone_list.append(merge_phones_like_on_home_page(phone)) email_list = [] #for", "== contacts_from_home_page def clear(s): return re.sub(\"[() -]\", \"\", s) def", "remove_spaces(x), filter(lambda x: x is not None, [contact.email, contact.email2, contact.email3]))))", "re from model.contact import Contact def test_all_contacts(app, db): contacts_from_db =", "[] #for email in email_liset_from_db: # email_list.append(merge_mail_like_on_home_page(email)) contacts_from_home_page = sorted(app.contact.get_contact_list(),", "phone_list.append(merge_phones_like_on_home_page(phone)) email_list = [] #for email in email_liset_from_db: # email_list.append(merge_mail_like_on_home_page(email))", "for con in contacts_from_home_page] assert phone_list == phones_from_home_page #assert email_list", "#for email in email_liset_from_db: # email_list.append(merge_mail_like_on_home_page(email)) contacts_from_home_page = sorted(app.contact.get_contact_list(), key=Contact.id_or_max)", "= [] #for email in email_liset_from_db: # email_list.append(merge_mail_like_on_home_page(email)) contacts_from_home_page =", "= sorted(app.contact.get_contact_list(), key=Contact.id_or_max) phones_from_home_page = [con.all_phones_from_home_page for con in contacts_from_home_page]", "x: x is not None, [contact.home_phone, contact.mobile_phone, contact.work_phone, contact.secondary_phone])))) def", "from model.contact import Contact def test_all_contacts(app, db): contacts_from_db = db.get_contact_list()", "= [] for phone in phone_list_from_db: phone_list.append(merge_phones_like_on_home_page(phone)) email_list = []", "contact.secondary_phone])))) def merge_email_like_on_home_page(contact): return \"\\n\".join(filter(lambda x: x != \"\", map(lambda", "db.get_contact_list() phone_list_from_db = db.phones_from_db() #email_liset_from_db = db.emails_from_db() phone_list = []", "contact.work_phone, contact.secondary_phone])))) def merge_email_like_on_home_page(contact): return \"\\n\".join(filter(lambda x: x != \"\",", "phone_list = [] for phone in phone_list_from_db: phone_list.append(merge_phones_like_on_home_page(phone)) email_list =", "= [con.all_phones_from_home_page for con in contacts_from_home_page] #emails_from_home_page = [con.all_mail_from_home_page for", "filter(lambda x: x is not None, [contact.home_phone, contact.mobile_phone, contact.work_phone, contact.secondary_phone]))))", "emails_from_home_page assert contacts_from_db == contacts_from_home_page def clear(s): return re.sub(\"[() -]\",", "email_liset_from_db: # email_list.append(merge_mail_like_on_home_page(email)) contacts_from_home_page = sorted(app.contact.get_contact_list(), key=Contact.id_or_max) phones_from_home_page = [con.all_phones_from_home_page", "merge_email_like_on_home_page(contact): return \"\\n\".join(filter(lambda x: x != \"\", map(lambda x: remove_spaces(x),", "con in contacts_from_home_page] #emails_from_home_page = [con.all_mail_from_home_page for con in contacts_from_home_page]", "= db.get_contact_list() phone_list_from_db = db.phones_from_db() #email_liset_from_db = db.emails_from_db() phone_list =", "contacts_from_db = db.get_contact_list() phone_list_from_db = db.phones_from_db() #email_liset_from_db = db.emails_from_db() phone_list", "-]\", \"\", s) def remove_spaces(s): return re.sub(' +', ' ',", "#email_liset_from_db = db.emails_from_db() phone_list = [] for phone in phone_list_from_db:", "#assert email_list == emails_from_home_page assert contacts_from_db == contacts_from_home_page def clear(s):", "\"\\n\".join(filter(lambda x: x != \"\", map(lambda x: clear(x), filter(lambda x:", "clear(x), filter(lambda x: x is not None, [contact.home_phone, contact.mobile_phone, contact.work_phone,", "= db.emails_from_db() phone_list = [] for phone in phone_list_from_db: phone_list.append(merge_phones_like_on_home_page(phone))", "', s).rstrip() def merge_phones_like_on_home_page(contact): return \"\\n\".join(filter(lambda x: x != \"\",", "phone_list == phones_from_home_page #assert email_list == emails_from_home_page assert contacts_from_db ==", "re.sub(' +', ' ', s).rstrip() def merge_phones_like_on_home_page(contact): return \"\\n\".join(filter(lambda x:", "email_list == emails_from_home_page assert contacts_from_db == contacts_from_home_page def clear(s): return", "x: x != \"\", map(lambda x: remove_spaces(x), filter(lambda x: x", "model.contact import Contact def test_all_contacts(app, db): contacts_from_db = db.get_contact_list() phone_list_from_db", "merge_phones_like_on_home_page(contact): return \"\\n\".join(filter(lambda x: x != \"\", map(lambda x: clear(x),", "def clear(s): return re.sub(\"[() -]\", \"\", s) def remove_spaces(s): return", "sorted(app.contact.get_contact_list(), key=Contact.id_or_max) phones_from_home_page = [con.all_phones_from_home_page for con in contacts_from_home_page] #emails_from_home_page", "key=Contact.id_or_max) phones_from_home_page = [con.all_phones_from_home_page for con in contacts_from_home_page] #emails_from_home_page =", "email_list = [] #for email in email_liset_from_db: # email_list.append(merge_mail_like_on_home_page(email)) contacts_from_home_page", "[con.all_phones_from_home_page for con in contacts_from_home_page] #emails_from_home_page = [con.all_mail_from_home_page for con", "[contact.home_phone, contact.mobile_phone, contact.work_phone, contact.secondary_phone])))) def merge_email_like_on_home_page(contact): return \"\\n\".join(filter(lambda x: x", "return \"\\n\".join(filter(lambda x: x != \"\", map(lambda x: clear(x), filter(lambda", "def test_all_contacts(app, db): contacts_from_db = db.get_contact_list() phone_list_from_db = db.phones_from_db() #email_liset_from_db", "phone_list_from_db: phone_list.append(merge_phones_like_on_home_page(phone)) email_list = [] #for email in email_liset_from_db: #", "def merge_email_like_on_home_page(contact): return \"\\n\".join(filter(lambda x: x != \"\", map(lambda x:", "= db.phones_from_db() #email_liset_from_db = db.emails_from_db() phone_list = [] for phone", "assert phone_list == phones_from_home_page #assert email_list == emails_from_home_page assert contacts_from_db", "s) def remove_spaces(s): return re.sub(' +', ' ', s).rstrip() def", "def merge_phones_like_on_home_page(contact): return \"\\n\".join(filter(lambda x: x != \"\", map(lambda x:", "phone in phone_list_from_db: phone_list.append(merge_phones_like_on_home_page(phone)) email_list = [] #for email in", "for con in contacts_from_home_page] #emails_from_home_page = [con.all_mail_from_home_page for con in", "phones_from_home_page #assert email_list == emails_from_home_page assert contacts_from_db == contacts_from_home_page def", "# email_list.append(merge_mail_like_on_home_page(email)) contacts_from_home_page = sorted(app.contact.get_contact_list(), key=Contact.id_or_max) phones_from_home_page = [con.all_phones_from_home_page for", "not None, [contact.home_phone, contact.mobile_phone, contact.work_phone, contact.secondary_phone])))) def merge_email_like_on_home_page(contact): return \"\\n\".join(filter(lambda", "def remove_spaces(s): return re.sub(' +', ' ', s).rstrip() def merge_phones_like_on_home_page(contact):", "== phones_from_home_page #assert email_list == emails_from_home_page assert contacts_from_db == contacts_from_home_page", "!= \"\", map(lambda x: clear(x), filter(lambda x: x is not", "\"\", map(lambda x: remove_spaces(x), filter(lambda x: x is not None,", "phones_from_home_page = [con.all_phones_from_home_page for con in contacts_from_home_page] #emails_from_home_page = [con.all_mail_from_home_page", "x != \"\", map(lambda x: clear(x), filter(lambda x: x is", "contacts_from_home_page = sorted(app.contact.get_contact_list(), key=Contact.id_or_max) phones_from_home_page = [con.all_phones_from_home_page for con in", "x: remove_spaces(x), filter(lambda x: x is not None, [contact.email, contact.email2,", "x != \"\", map(lambda x: remove_spaces(x), filter(lambda x: x is", "map(lambda x: clear(x), filter(lambda x: x is not None, [contact.home_phone,", "for phone in phone_list_from_db: phone_list.append(merge_phones_like_on_home_page(phone)) email_list = [] #for email", "in contacts_from_home_page] assert phone_list == phones_from_home_page #assert email_list == emails_from_home_page", "x: x != \"\", map(lambda x: clear(x), filter(lambda x: x", "import Contact def test_all_contacts(app, db): contacts_from_db = db.get_contact_list() phone_list_from_db =", "in email_liset_from_db: # email_list.append(merge_mail_like_on_home_page(email)) contacts_from_home_page = sorted(app.contact.get_contact_list(), key=Contact.id_or_max) phones_from_home_page =", "contact.mobile_phone, contact.work_phone, contact.secondary_phone])))) def merge_email_like_on_home_page(contact): return \"\\n\".join(filter(lambda x: x !=", "contacts_from_home_page] #emails_from_home_page = [con.all_mail_from_home_page for con in contacts_from_home_page] assert phone_list", "[con.all_mail_from_home_page for con in contacts_from_home_page] assert phone_list == phones_from_home_page #assert", "\"\", s) def remove_spaces(s): return re.sub(' +', ' ', s).rstrip()" ]
[ "= [ ('Run Pymodel Graphics to generate dot file from", "dot file from FSM model, no need use pma', 'pmg", "'pmg ABPFSM'), ('Generate an svg file from the graphics commands',", "from FSM model, no need use pma', 'pmg ABP'), ('Generate", "commands', 'dotsvg ABPFSM'), # Now display ABPFSM.svg in browser, should", "browser ('Run PyModel Analyzer to generate FSM from original FSM,", "Graphics to generate dot file from FSM model, no need", "graphics commands', 'dotsvg ABPFSM'), # Now display ABPFSM.svg in browser,", "no need use pma', 'pmg ABP'), ('Generate SVG file from", "the graphics commands', 'dotsvg ABPFSM'), # Now display ABPFSM.svg in", "Analyzer to generate FSM from original FSM, should be the", "generate a file of graphics commands from new FSM', 'pmg", "file of graphics commands from new FSM', 'pmg ABPFSM'), ('Generate", "file from FSM model, no need use pma', 'pmg ABP'),", "be the same', 'pma ABP'), ('Run PyModel Graphics to generate", "display ABPFSM.svg in browser, should look the same as ABP.svg", "model, no need use pma', 'pmg ABP'), ('Generate SVG file", "generate FSM from original FSM, should be the same', 'pma", "svg file from the graphics commands', 'dotsvg ABPFSM'), # Now", "ABP'), ('Generate SVG file from dot', 'dotsvg ABP'), # Now", "FSM', 'pmg ABPFSM'), ('Generate an svg file from the graphics", "in browser ('Run PyModel Analyzer to generate FSM from original", "[ ('Run Pymodel Graphics to generate dot file from FSM", "SVG file from dot', 'dotsvg ABP'), # Now display ABP.dot", "# Now display ABPFSM.svg in browser, should look the same", "cases = [ ('Run Pymodel Graphics to generate dot file", "a file of graphics commands from new FSM', 'pmg ABPFSM'),", "analyzer and graphics tests \"\"\" cases = [ ('Run Pymodel", "commands from new FSM', 'pmg ABPFSM'), ('Generate an svg file", "\"\"\" ABP analyzer and graphics tests \"\"\" cases = [", "ABPFSM'), # Now display ABPFSM.svg in browser, should look the", "graphics tests \"\"\" cases = [ ('Run Pymodel Graphics to", "graphics commands from new FSM', 'pmg ABPFSM'), ('Generate an svg", "the same', 'pma ABP'), ('Run PyModel Graphics to generate a", "from dot', 'dotsvg ABP'), # Now display ABP.dot in browser", "to generate a file of graphics commands from new FSM',", "ABP analyzer and graphics tests \"\"\" cases = [ ('Run", "('Generate SVG file from dot', 'dotsvg ABP'), # Now display", "ABPFSM.svg in browser, should look the same as ABP.svg ]", "FSM model, no need use pma', 'pmg ABP'), ('Generate SVG", "# Now display ABP.dot in browser ('Run PyModel Analyzer to", "from original FSM, should be the same', 'pma ABP'), ('Run", "'dotsvg ABPFSM'), # Now display ABPFSM.svg in browser, should look", "'pmg ABP'), ('Generate SVG file from dot', 'dotsvg ABP'), #", "PyModel Analyzer to generate FSM from original FSM, should be", "to generate FSM from original FSM, should be the same',", "file from dot', 'dotsvg ABP'), # Now display ABP.dot in", "('Generate an svg file from the graphics commands', 'dotsvg ABPFSM'),", "FSM, should be the same', 'pma ABP'), ('Run PyModel Graphics", "display ABP.dot in browser ('Run PyModel Analyzer to generate FSM", "from new FSM', 'pmg ABPFSM'), ('Generate an svg file from", "PyModel Graphics to generate a file of graphics commands from", "'dotsvg ABP'), # Now display ABP.dot in browser ('Run PyModel", "should be the same', 'pma ABP'), ('Run PyModel Graphics to", "an svg file from the graphics commands', 'dotsvg ABPFSM'), #", "('Run PyModel Graphics to generate a file of graphics commands", "ABP'), ('Run PyModel Graphics to generate a file of graphics", "('Run Pymodel Graphics to generate dot file from FSM model,", "FSM from original FSM, should be the same', 'pma ABP'),", "dot', 'dotsvg ABP'), # Now display ABP.dot in browser ('Run", "\"\"\" cases = [ ('Run Pymodel Graphics to generate dot", "generate dot file from FSM model, no need use pma',", "('Run PyModel Analyzer to generate FSM from original FSM, should", "of graphics commands from new FSM', 'pmg ABPFSM'), ('Generate an", "new FSM', 'pmg ABPFSM'), ('Generate an svg file from the", "ABP.dot in browser ('Run PyModel Analyzer to generate FSM from", "need use pma', 'pmg ABP'), ('Generate SVG file from dot',", "'pma ABP'), ('Run PyModel Graphics to generate a file of", "Graphics to generate a file of graphics commands from new", "Pymodel Graphics to generate dot file from FSM model, no", "and graphics tests \"\"\" cases = [ ('Run Pymodel Graphics", "original FSM, should be the same', 'pma ABP'), ('Run PyModel", "to generate dot file from FSM model, no need use", "Now display ABPFSM.svg in browser, should look the same as", "pma', 'pmg ABP'), ('Generate SVG file from dot', 'dotsvg ABP'),", "same', 'pma ABP'), ('Run PyModel Graphics to generate a file", "<gh_stars>10-100 \"\"\" ABP analyzer and graphics tests \"\"\" cases =", "Now display ABP.dot in browser ('Run PyModel Analyzer to generate", "ABPFSM'), ('Generate an svg file from the graphics commands', 'dotsvg", "ABP'), # Now display ABP.dot in browser ('Run PyModel Analyzer", "use pma', 'pmg ABP'), ('Generate SVG file from dot', 'dotsvg", "file from the graphics commands', 'dotsvg ABPFSM'), # Now display", "from the graphics commands', 'dotsvg ABPFSM'), # Now display ABPFSM.svg", "tests \"\"\" cases = [ ('Run Pymodel Graphics to generate" ]
[ "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies", "Django 3.1.2 on 2020-10-26 12:21 from django.db import migrations, models", "model_name='game', name='score', field=models.FloatField(null=True, verbose_name='Score'), ), migrations.AlterField( model_name='game', name='series', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,", "# Generated by Django 3.1.2 on 2020-10-26 12:21 from django.db", "verbose_name='Score'), ), migrations.AlterField( model_name='game', name='series', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='games.series'), ), ]", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('games',", "name='score', field=models.FloatField(null=True, verbose_name='Score'), ), migrations.AlterField( model_name='game', name='series', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='games.series'),", "on 2020-10-26 12:21 from django.db import migrations, models import django.db.models.deletion", "django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('games', '0001_initial'), ] operations", "Generated by Django 3.1.2 on 2020-10-26 12:21 from django.db import", "migrations.AlterField( model_name='game', name='score', field=models.FloatField(null=True, verbose_name='Score'), ), migrations.AlterField( model_name='game', name='series', field=models.ForeignKey(null=True,", "Migration(migrations.Migration): dependencies = [ ('games', '0001_initial'), ] operations = [", "= [ migrations.AlterField( model_name='game', name='score', field=models.FloatField(null=True, verbose_name='Score'), ), migrations.AlterField( model_name='game',", "<filename>games/migrations/0002_auto_20201026_1221.py # Generated by Django 3.1.2 on 2020-10-26 12:21 from", "12:21 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('games', '0001_initial'),", "by Django 3.1.2 on 2020-10-26 12:21 from django.db import migrations,", "[ ('games', '0001_initial'), ] operations = [ migrations.AlterField( model_name='game', name='score',", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [", "dependencies = [ ('games', '0001_initial'), ] operations = [ migrations.AlterField(", "2020-10-26 12:21 from django.db import migrations, models import django.db.models.deletion class", "[ migrations.AlterField( model_name='game', name='score', field=models.FloatField(null=True, verbose_name='Score'), ), migrations.AlterField( model_name='game', name='series',", "'0001_initial'), ] operations = [ migrations.AlterField( model_name='game', name='score', field=models.FloatField(null=True, verbose_name='Score'),", "class Migration(migrations.Migration): dependencies = [ ('games', '0001_initial'), ] operations =", "import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('games', '0001_initial'), ]", "field=models.FloatField(null=True, verbose_name='Score'), ), migrations.AlterField( model_name='game', name='series', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='games.series'), ),", "operations = [ migrations.AlterField( model_name='game', name='score', field=models.FloatField(null=True, verbose_name='Score'), ), migrations.AlterField(", "3.1.2 on 2020-10-26 12:21 from django.db import migrations, models import", "('games', '0001_initial'), ] operations = [ migrations.AlterField( model_name='game', name='score', field=models.FloatField(null=True,", "= [ ('games', '0001_initial'), ] operations = [ migrations.AlterField( model_name='game',", "] operations = [ migrations.AlterField( model_name='game', name='score', field=models.FloatField(null=True, verbose_name='Score'), )," ]
[ "function is supposed to look (1 == direct callee, 2", "__version__ = '0.4.0' import os,types,sys,re,imp,__builtin__ import mx.Tools.NewBuiltins # RE to", "given namespaces and returns list of corresponding module objects. If", "module plus all subdirectories having an __init__.py file. The modules", "len(match) mods = [sys.modules[pkgname]] for k,v in sys.modules.items(): if k[:match_len]", "pkgbasename='', pkgdict=None, isdir=os.path.isdir,exists=os.path.exists, isfile=os.path.isfile,join=os.path.join,listdir=os.listdir, module_name=module_name,initmodule_name=initmodule_name): \"\"\" Return a list of", "look (1 == direct callee, 2 == callee of callee,", "globals, from_list) if reload and not is_new: mod = reloader(mod)", "modules(names, extract=extract): \"\"\" Converts a list of module names into", "submodules found into module. The module location is found by", "PackageTools - A set of tools to aid working with", "= 1 return pkgdict.keys() def find_subpackages(package, recursive=0, splitpath=os.path.split): \"\"\" Assuming", "mods: for name,obj in mod.__dict__.items(): if type(obj) is ClassType: if", "name = frame.f_globals['__name__'] del frame return sys.modules[name] def _module_loader(name, locals,", "globals, sysmods, errors='strict', importer=__import__, reloader=reload, from_list=['*']): \"\"\" Internal API for", "return None def import_modules(modnames,module=None,errors='strict',reload=0, thismodule=_thismodule): \"\"\" Import all modules given", "name,obj in mod.__dict__.items(): if isinstance(obj,baseclass): if annotated: instances.append((mod,name,obj)) else: instances.append(obj)", "return pkgdict.keys() def find_subpackages(package, recursive=0, splitpath=os.path.split): \"\"\" Assuming that package", "frame = exc_info()[2].tb_frame for i in trange(upcount): frame = frame.f_back", "= 1 if recursive: find_packages(path, recursive=1, pkgbasename=pkgname + '.', pkgdict=pkgdict)", "def import_subpackages(module, reload=0, recursive=0, import_modules=import_modules, find_subpackages=find_subpackages): \"\"\" Does a subpackages", "points to a loaded package module, this function tries to", "XXX Recursive search does not support the __all__ attribute subpackages", "identify Python modules suffixes = projection(imp.get_suffixes(),0) module_name = re.compile('(.*)(' +", "= module.__dict__ sysmods = sys.modules for name in modnames: mod", "if mod is not None: locals[name] = mod def load_modules(modnames,locals=None,globals=None,errors='strict',reload=0):", "documentation for further information on copyrights, or contact the author.", "mod is not None: locals[name] = mod def load_modules(modnames,locals=None,globals=None,errors='strict',reload=0): \"\"\"", "to reload. \"\"\" modules = [] append = modules.append sysmods", "package with the given name. The package must already be", "for mod in mods: for name,obj in mod.__dict__.items(): if type(obj)", "the modules and also returns a list of module objects.", "join(dir, filename) if isdir(path): # Check for __init__ module(s) for", "locals, locals, sysmods, errors=errors) if mod is not None: locals[name]", "extract=extract): \"\"\" Converts a list of module names into a", "arguments (modname, errorclass, errorvalue). If the handler returns, processing continues.", "reload. \"\"\" import_modules(find_subpackages(module, recursive=recursive), module, reload=reload) def load_subpackages(module, locals=None, globals=None,", "the returned list will contain tuples (module_object,name,instances_object) for each instances", "for name,obj in mod.__dict__.items(): if type(obj) is ClassType: if baseclass", "processing continues. If reload is true (default is false), all", "must already be loaded. Only the currently registered modules are", "for name in modnames: mod = _module_loader(name, locals, globals, sysmods,", "silently ignored. If reload is true (default is false), all", "pass elif errors == 'strict': raise elif callable(errors): errors(name, sys.exc_info()[0],", "recursive=1, pkgbasename=pkgname + '.', pkgdict=pkgdict) break elif not ignore_modules: m", "for filename in l: m = module_name.match(filename) if m is", "(default is false), all already modules among the list will", "module. modnames may contain dotted package names. If errors is", "forced to reload. \"\"\" modules = [] append = modules.append", "subdirectories having an __init__.py file. The modules name is prepended", "reload. \"\"\" return load_modules(find_subpackages(module, recursive=recursive), locals, globals, errors=errors, reload=reload) def", "None def import_modules(modnames,module=None,errors='strict',reload=0, thismodule=_thismodule): \"\"\" Import all modules given in", "\"\"\" classes = [] for mod in mods: for name,obj", "is the module where the class is defined. \"\"\" classes", "only used during recursion. \"\"\" l = listdir(dir) if pkgdict", "recursive=recursive) else: # XXX Recursive search does not support the", "package. Subpackages are all Python files included in the same", "'__init__': pkgdict[pkgbasename + m.group(1)] = 1 return pkgdict.keys() def find_subpackages(package,", "basename + name return subpackages def _thismodule(upcount=1, exc_info=sys.exc_info,trange=trange): \"\"\" Returns", "reloader=reload, from_list=['*']): \"\"\" Internal API for loading a module \"\"\"", "def load_subpackages(module, locals=None, globals=None, errors='strict', reload=0, recursive=0, load_modules=load_modules, find_subpackages=find_subpackages): \"\"\"", "pkgdict.keys() def find_subpackages(package, recursive=0, splitpath=os.path.split): \"\"\" Assuming that package points", "Internal API for loading a module \"\"\" if not sysmods.has_key(name):", "of module names into a list of module objects. The", "1/0 except: frame = exc_info()[2].tb_frame for i in trange(upcount): frame", "but with load_modules functionality, i.e. imports the modules and also", "recursive is true (default is false), then subpackages of subpackages", "__init__ module if available. If recursive is true (default is", "also returns a list of module objects. If errors is", "= 0 try: mod = importer(name, locals, globals, from_list) if", "errors is a callable object, then it is called with", "module(s) for name in initmodule_names: if isfile(join(path, name)): pkgname =", "(module_object,name,class_object) for each class found where module_object is the module", "pkgdict=None, isdir=os.path.isdir,exists=os.path.exists, isfile=os.path.isfile,join=os.path.join,listdir=os.listdir, module_name=module_name,initmodule_name=initmodule_name): \"\"\" Return a list of package", "'strict' (default), then ImportErrors and SyntaxErrors are raised. If set", "Converts a list of module names into a list of", "all modules belonging to the package with the given name.", "del frame return sys.modules[name] def _module_loader(name, locals, globals, sysmods, errors='strict',", "ignore_modules=0, pkgbasename='', pkgdict=None, isdir=os.path.isdir,exists=os.path.exists, isfile=os.path.isfile,join=os.path.join,listdir=os.listdir, module_name=module_name,initmodule_name=initmodule_name): \"\"\" Return a list", "and SyntaxErrors are raised. If set to 'ignore', they are", "imports the modules and also returns a list of module", "Python modules suffixes = projection(imp.get_suffixes(),0) module_name = re.compile('(.*)(' + '|'.join(suffixes)", "_thismodule(2) locals = module.__dict__ sysmods = sys.modules for name in", "are included in the search (subdirectories are *not* taken into", "\"\"\" Return a list of package names found in dir.", "module = _thismodule(2) locals = module.__dict__ sysmods = sys.modules for", "be forced to reload. \"\"\" import_modules(find_subpackages(module, recursive=recursive), module, reload=reload) def", "author. All Rights Reserved. \"\"\" __version__ = '0.4.0' import os,types,sys,re,imp,__builtin__", "load_modules functionality, i.e. imports the modules and also returns a", "= _thismodule(2) locals = module.__dict__ sysmods = sys.modules for name", "\"\"\" PackageTools - A set of tools to aid working", "Did not work, then let's try to find the subpackages", "errors == 'strict': raise elif callable(errors): errors(name, sys.exc_info()[0], sys.exc_info()[1]) else:", "re.compile('__init__(' + '|'.join(suffixes) + ')$') initmodule_names = [] for suffix", "further information on copyrights, or contact the author. All Rights", "returned list will contain tuples (module_object,name,class_object) for each class found", "recursive: # Try the __all__ attribute... try: subpackages = list(package.__all__)", "_thismodule(upcount=1, exc_info=sys.exc_info,trange=trange): \"\"\" Returns the module object that the callee", "for k,v in sys.modules.items(): if k[:match_len] == match and v", "is false), all already modules among the list will be", "is true the returned list will contain tuples (module_object,name,class_object) for", "modules def import_subpackages(module, reload=0, recursive=0, import_modules=import_modules, find_subpackages=find_subpackages): \"\"\" Does a", "support the __all__ attribute subpackages = find_packages(package.__path__[0], recursive=recursive) basename =", "is defined. \"\"\" instances = [] for mod in mods:", "up the execution stack the function is supposed to look", "<NAME>; mailto:<EMAIL> Copyright (c) 2000-2015, eGenix.com Software GmbH; mailto:<EMAIL> See", "pkgdict=pkgdict) break elif not ignore_modules: m = module_name.match(filename) if m", "# RE to identify Python modules suffixes = projection(imp.get_suffixes(),0) module_name", "search recurses into package directories. pkgbasename and pkgdict are only", "available. If reload is true (default is false), all already", "each instances found where module_object is the module where the", "if mod is not None: append(mod) return modules def import_subpackages(module,", "mod = reloader(mod) except KeyboardInterrupt: # Pass through; SystemExit will", "module, this function tries to identify all subpackages of that", "\"\"\" instances = [] for mod in mods: for name,obj", "by looking at the __file__ attribute that non-builtin modules define.", "is defined. \"\"\" classes = [] for mod in mods:", "list will be forced to reload. \"\"\" import_modules(find_subpackages(module, recursive=recursive), module,", "Rights Reserved. \"\"\" __version__ = '0.4.0' import os,types,sys,re,imp,__builtin__ import mx.Tools.NewBuiltins", "to reload. \"\"\" if module is None: module = _thismodule(2)", "is 'strict' (default), then ImportErrors and SyntaxErrors are raised. If", "a list of all modules belonging to the package with", "not work, then let's try to find the subpackages by", "true (default is false), then subpackages of subpackages are recursively", "the package with the given name. The package must already", "for i in trange(upcount): frame = frame.f_back name = frame.f_globals['__name__']", "except Exception, why: if errors == 'ignore': pass elif errors", "module_name = re.compile('(.*)(' + '|'.join(suffixes) + ')$') initmodule_name = re.compile('__init__('", "annotated is true the returned list will contain tuples (module_object,name,instances_object)", "= [] for suffix in suffixes: initmodule_names.append('__init__' + suffix) def", "classes def find_instances(mods,baseclass,annotated=0, InstanceType=types.InstanceType,issubclass=issubclass): \"\"\" Find all instances of baseclass", "recursive=0, ignore_modules=0, pkgbasename='', pkgdict=None, isdir=os.path.isdir,exists=os.path.exists, isfile=os.path.isfile,join=os.path.join,listdir=os.listdir, module_name=module_name,initmodule_name=initmodule_name): \"\"\" Return a", "in l: m = module_name.match(filename) if m is not None", "pkgbasename + filename pkgdict[pkgname] = 1 if recursive: find_packages(path, recursive=1,", "attribute... try: subpackages = list(package.__all__) except (ImportError, AttributeError): # Did", "object, then it is called with arguments (modname, errorclass, errorvalue).", "package names found in dir. Packages are Python modules and", "find the subpackages by looking # at the directory where", "package.__name__ + '.' for i,name in irange(subpackages): subpackages[i] = basename", "modules = [] append = modules.append sysmods = sys.modules for", "lives... subpackages = find_packages(package.__path__[0], recursive=recursive) else: # XXX Recursive search", "be forced to reload. \"\"\" return load_modules(find_subpackages(module, recursive=recursive), locals, globals,", "be forced to reload. \"\"\" if module is None: module", "in list mods. If annotated is true the returned list", "attribute from the package __init__ module if available. If reload", "are *not* taken into account). If ignore_modules is true (default", "the documentation for further information on copyrights, or contact the", "package directories. pkgbasename and pkgdict are only used during recursion.", "= modules.append sysmods = sys.modules for name in modnames: mod", "module_name=module_name,initmodule_name=initmodule_name): \"\"\" Return a list of package names found in", "that package. Subpackages are all Python files included in the", "are only used during recursion. \"\"\" l = listdir(dir) if", "m is not None and \\ m.group(1) != '__init__': pkgdict[pkgbasename", "Try the __all__ attribute... try: subpackages = list(package.__all__) except (ImportError,", "def _thismodule(upcount=1, exc_info=sys.exc_info,trange=trange): \"\"\" Returns the module object that the", "elif errors == 'strict': raise elif callable(errors): errors(name, sys.exc_info()[0], sys.exc_info()[1])", "(c) 2000-2015, eGenix.com Software GmbH; mailto:<EMAIL> See the documentation for", "the package __init__ module if available. If recursive is true", "in modnames: mod = _module_loader(name, locals, globals, sysmods, errors=errors) if", "each class found where module_object is the module where the", "Recursive search does not support the __all__ attribute subpackages =", "Return a list of package names found in dir. Packages", "false), then subpackages of subpackages are recursively also included in", "taken into account). If ignore_modules is true (default is false),", "with load_modules functionality, i.e. imports the modules and also returns", "also included in the search. \"\"\" if not recursive: #", "in mods: for name,obj in mod.__dict__.items(): if isinstance(obj,baseclass): if annotated:", "[sys.modules[pkgname]] for k,v in sys.modules.items(): if k[:match_len] == match and", "= [sys.modules[pkgname]] for k,v in sys.modules.items(): if k[:match_len] == match", "subpackages are recursively also included in the search. \"\"\" if", "are all Python files included in the same directory as", "are ignored. If recursive is true the search recurses into", "k[:match_len] == match and v is not None: mods.append(v) return", "class found where module_object is the module where the class", "they are silently ignored. If errors is a callable object,", "mod = _module_loader(name, locals, globals, sysmods, errors=errors) if mod is", "Does a subpackages scan using find_subpackages(module) and then imports all", "instances is defined. \"\"\" instances = [] for mod in", "If errors is 'strict' (default), then ImportErrors and SyntaxErrors are", "in mod.__dict__.items(): if type(obj) is ClassType: if baseclass and not", "how far up the execution stack the function is supposed", "InstanceType=types.InstanceType,issubclass=issubclass): \"\"\" Find all instances of baseclass defined by the", "not None: locals[name] = mod def load_modules(modnames,locals=None,globals=None,errors='strict',reload=0): \"\"\" Imports all", "None: locals[name] = mod def load_modules(modnames,locals=None,globals=None,errors='strict',reload=0): \"\"\" Imports all modules", "module, reload=reload) def load_subpackages(module, locals=None, globals=None, errors='strict', reload=0, recursive=0, load_modules=load_modules,", "v is not None: mods.append(v) return mods def find_classes(mods,baseclass=None,annotated=0, ClassType=types.ClassType,issubclass=issubclass):", "(default is false), then subpackages of subpackages are recursively also", "the module object that the callee is calling from. upcount", "in mods: for name,obj in mod.__dict__.items(): if type(obj) is ClassType:", "subpackages of subpackages are recursively also included in the search.", "mod in mods: for name,obj in mod.__dict__.items(): if isinstance(obj,baseclass): if", "Find all instances of baseclass defined by the module objects", "names) def package_modules(pkgname): \"\"\" Returns a list of all modules", "is not None: append(mod) return modules def import_subpackages(module, reload=0, recursive=0,", "baseclass is None) defined by the module objects in list", "modnames using the given namespaces and returns list of corresponding", "name return subpackages def _thismodule(upcount=1, exc_info=sys.exc_info,trange=trange): \"\"\" Returns the module", "modules.append sysmods = sys.modules for name in modnames: mod =", "not None: mods.append(v) return mods def find_classes(mods,baseclass=None,annotated=0, ClassType=types.ClassType,issubclass=issubclass): \"\"\" Find", "return mod return None def import_modules(modnames,module=None,errors='strict',reload=0, thismodule=_thismodule): \"\"\" Import all", "by the module objects in list mods. If annotated is", "among the list will be forced to reload. \"\"\" if", "the module objects in list mods. If annotated is true", "ignored. If recursive is true the search recurses into package", "import_modules(find_subpackages(module, recursive=recursive), module, reload=reload) def load_subpackages(module, locals=None, globals=None, errors='strict', reload=0,", "the subpackages by looking # at the directory where package", "basename = package.__name__ + '.' for i,name in irange(subpackages): subpackages[i]", "mod def load_modules(modnames,locals=None,globals=None,errors='strict',reload=0): \"\"\" Imports all modules in modnames using", "module objects. The modules must already be loaded. \"\"\" return", "except: frame = exc_info()[2].tb_frame for i in trange(upcount): frame =", "module if available. If recursive is true (default is false),", "in sys.modules.items(): if k[:match_len] == match and v is not", "errorclass, errorvalue). If the handler returns, processing continues. If reload", "tools to aid working with packages. Copyright (c) 1998-2000, <NAME>;", "module. The .py extension is removed from the files. The", "*not* taken into account). If ignore_modules is true (default is", "if errors == 'ignore': pass elif errors == 'strict': raise", "classes = [] for mod in mods: for name,obj in", "true the returned list will contain tuples (module_object,name,class_object) for each", "subpackages scan using find_subpackages(module) and then imports all submodules found", "all modules in modnames using the given namespaces and returns", "list of module objects. If errors is 'strict' (default), then", "find_classes(mods,baseclass=None,annotated=0, ClassType=types.ClassType,issubclass=issubclass): \"\"\" Find all subclasses of baseclass or simply", "working with packages. Copyright (c) 1998-2000, <NAME>; mailto:<EMAIL> Copyright (c)", "pkgdict = {} if files_only: for filename in l: m", "isdir(path): # Check for __init__ module(s) for name in initmodule_names:", "initmodule_name = re.compile('__init__(' + '|'.join(suffixes) + ')$') initmodule_names = []", "mx.Tools.NewBuiltins # RE to identify Python modules suffixes = projection(imp.get_suffixes(),0)", "recursive is true the search recurses into package directories. pkgbasename", "tuples (module_object,name,class_object) for each class found where module_object is the", "for suffix in suffixes: initmodule_names.append('__init__' + suffix) def find_packages(dir=os.curdir, files_only=0,", "directories. pkgbasename and pkgdict are only used during recursion. \"\"\"", "to reload. \"\"\" return load_modules(find_subpackages(module, recursive=recursive), locals, globals, errors=errors, reload=reload)", "instances = [] for mod in mods: for name,obj in", "are recursively also included in the search. \"\"\" if not", "names into a list of module objects. The modules must", "contain tuples (module_object,name,instances_object) for each instances found where module_object is", "does not support the __all__ attribute subpackages = find_packages(package.__path__[0], recursive=recursive)", "if available. If reload is true (default is false), all", "by the error handler raise except Exception, why: if errors", "'.' match_len = len(match) mods = [sys.modules[pkgname]] for k,v in", "the error handler raise except Exception, why: if errors ==", "set to 'ignore', they are silently ignored. If errors is", "list(package.__all__) except (ImportError, AttributeError): # Did not work, then let's", "with packages. Copyright (c) 1998-2000, <NAME>; mailto:<EMAIL> Copyright (c) 2000-2015,", "\"\"\" l = listdir(dir) if pkgdict is None: pkgdict =", "and not issubclass(obj,baseclass): continue if annotated: classes.append((mod, name, obj)) else:", "locals = module.__dict__ sysmods = sys.modules for name in modnames:", "is_new: mod = reloader(mod) except KeyboardInterrupt: # Pass through; SystemExit", "Same as import_subpackages but with load_modules functionality, i.e. imports the", "list of module objects. The modules must already be loaded.", "among the list will be forced to reload. \"\"\" modules", "\"\"\" Same as import_subpackages but with load_modules functionality, i.e. imports", "isdir=os.path.isdir,exists=os.path.exists, isfile=os.path.isfile,join=os.path.join,listdir=os.listdir, module_name=module_name,initmodule_name=initmodule_name): \"\"\" Return a list of package names", "for __init__ module(s) for name in initmodule_names: if isfile(join(path, name)):", "isfile(join(path, name)): pkgname = pkgbasename + filename pkgdict[pkgname] = 1", "m.group(1)] = 1 return pkgdict.keys() def find_subpackages(package, recursive=0, splitpath=os.path.split): \"\"\"", "all Python files included in the same directory as the", "= frame.f_back name = frame.f_globals['__name__'] del frame return sys.modules[name] def", "currently registered modules are included in the list. \"\"\" match", "= importer(name, locals, globals, from_list) if reload and not is_new:", "= projection(imp.get_suffixes(),0) module_name = re.compile('(.*)(' + '|'.join(suffixes) + ')$') initmodule_name", "if not recursive: # Try the __all__ attribute... try: subpackages", "locals, globals, from_list) if reload and not is_new: mod =", "= find_packages(package.__path__[0], recursive=recursive) else: # XXX Recursive search does not", "= mod def load_modules(modnames,locals=None,globals=None,errors='strict',reload=0): \"\"\" Imports all modules in modnames", "If errors is 'strict' (default), then ImportErrors are raised. If", "aid working with packages. Copyright (c) 1998-2000, <NAME>; mailto:<EMAIL> Copyright", "same directory as the module plus all subdirectories having an", "exc_info=sys.exc_info,trange=trange): \"\"\" Returns the module object that the callee is", "See the documentation for further information on copyrights, or contact", "set of tools to aid working with packages. Copyright (c)", "All Rights Reserved. \"\"\" __version__ = '0.4.0' import os,types,sys,re,imp,__builtin__ import", "__all__ attribute... try: subpackages = list(package.__all__) except (ImportError, AttributeError): #", "errors='strict', importer=__import__, reloader=reload, from_list=['*']): \"\"\" Internal API for loading a", "returns a list of module objects. If errors is 'strict'", "find_packages(package.__path__[0], recursive=recursive) basename = package.__name__ + '.' for i,name in", "instances found where module_object is the module where the instances", "__file__ attribute that non-builtin modules define. The function uses the", "are silently ignored. If reload is true (default is false),", "belonging to the package with the given name. The package", "list will be forced to reload. \"\"\" modules = []", "in the search (subdirectories are *not* taken into account). If", "Copyright (c) 2000-2015, eGenix.com Software GmbH; mailto:<EMAIL> See the documentation", "into package directories. pkgbasename and pkgdict are only used during", "= exc_info()[2].tb_frame for i in trange(upcount): frame = frame.f_back name", "handler returns, processing continues. If reload is true (default is", "sysmods, errors=errors) if mod is not None: locals[name] = mod", "\"\"\" modules = [] append = modules.append sysmods = sys.modules", "loaded. \"\"\" return extract(sys.modules, names) def package_modules(pkgname): \"\"\" Returns a", "for mod in mods: for name,obj in mod.__dict__.items(): if isinstance(obj,baseclass):", "forced to reload. \"\"\" return load_modules(find_subpackages(module, recursive=recursive), locals, globals, errors=errors,", "the directory where package lives... subpackages = find_packages(package.__path__[0], recursive=recursive) else:", "may contain dotted package names. If errors is 'strict' (default),", "Find all subclasses of baseclass or simply all classes (if", "module is None: module = _thismodule(2) locals = module.__dict__ sysmods", "name. The package must already be loaded. Only the currently", "etc.). \"\"\" try: 1/0 except: frame = exc_info()[2].tb_frame for i", "attribute that non-builtin modules define. The function uses the __all__", "module. module defaults to the caller's module. modnames may contain", "If set to 'ignore', they are silently ignored. If errors", "Returns a list of all modules belonging to the package", "the list will be forced to reload. \"\"\" return load_modules(find_subpackages(module,", "def package_modules(pkgname): \"\"\" Returns a list of all modules belonging", "callee, etc.). \"\"\" try: 1/0 except: frame = exc_info()[2].tb_frame for", "files included in the same directory as the module plus", "included in the search (subdirectories are *not* taken into account).", "is not None: locals[name] = mod def load_modules(modnames,locals=None,globals=None,errors='strict',reload=0): \"\"\" Imports", "packages. If files_only is true, only Python files are included", "to 'ignore', they are silently ignored. If reload is true", "the module plus all subdirectories having an __init__.py file. The", "and subdirectories that provide an __init__ module. The .py extension", "filename) if isdir(path): # Check for __init__ module(s) for name", "upcount can be given to indicate how far up the", "Import all modules given in modnames into module. module defaults", "using the given namespaces and returns list of corresponding module", "import_subpackages but with load_modules functionality, i.e. imports the modules and", "be loaded. Only the currently registered modules are included in", "return mods def find_classes(mods,baseclass=None,annotated=0, ClassType=types.ClassType,issubclass=issubclass): \"\"\" Find all subclasses of", "list. \"\"\" match = pkgname + '.' match_len = len(match)", "= _module_loader(name, locals, locals, sysmods, errors=errors) if mod is not", "modules suffixes = projection(imp.get_suffixes(),0) module_name = re.compile('(.*)(' + '|'.join(suffixes) +", "callee of callee, etc.). \"\"\" try: 1/0 except: frame =", "subpackages by looking # at the directory where package lives...", "else: raise ValueError,'unknown errors value' else: return mod return None", "in dir. Packages are Python modules and subdirectories that provide", "false), modules are ignored. If recursive is true the search", "for each instances found where module_object is the module where", "if baseclass and not issubclass(obj,baseclass): continue if annotated: classes.append((mod, name,", "If set to 'ignore', they are silently ignored. If reload", "= frame.f_globals['__name__'] del frame return sys.modules[name] def _module_loader(name, locals, globals,", "the caller's module. modnames may contain dotted package names. If", "is true, only Python files are included in the search", "modules are included in the list. \"\"\" match = pkgname", "an __init__ module. The .py extension is removed from the", "except (ImportError, AttributeError): # Did not work, then let's try", "directory where package lives... subpackages = find_packages(package.__path__[0], recursive=recursive) else: #", "mods: for name,obj in mod.__dict__.items(): if isinstance(obj,baseclass): if annotated: instances.append((mod,name,obj))", "'strict' (default), then ImportErrors are raised. If set to 'ignore',", "function uses the __all__ attribute from the package __init__ module", "uses the __all__ attribute from the package __init__ module if", "sys.exc_info()[0], sys.exc_info()[1]) else: raise ValueError,'unknown errors value' else: return mod", "None) defined by the module objects in list mods. If", "reload. \"\"\" modules = [] append = modules.append sysmods =", "and v is not None: mods.append(v) return mods def find_classes(mods,baseclass=None,annotated=0,", "def find_classes(mods,baseclass=None,annotated=0, ClassType=types.ClassType,issubclass=issubclass): \"\"\" Find all subclasses of baseclass or", "at the __file__ attribute that non-builtin modules define. The function", "in trange(upcount): frame = frame.f_back name = frame.f_globals['__name__'] del frame", "already be loaded. Only the currently registered modules are included", "handler raise except Exception, why: if errors == 'ignore': pass", "packages. Copyright (c) 1998-2000, <NAME>; mailto:<EMAIL> Copyright (c) 2000-2015, eGenix.com", "package module, this function tries to identify all subpackages of", "\"\"\" Does a subpackages scan using find_subpackages(module) and then imports", "is called with arguments (modname, errorclass, errorvalue). If the handler", "If recursive is true (default is false), then subpackages of", "try: subpackages = list(package.__all__) except (ImportError, AttributeError): # Did not", "Python modules and subdirectories that provide an __init__ module. The", "name is prepended to all subpackage names. The module location", "set to 'ignore', they are silently ignored. If reload is", "not support the __all__ attribute subpackages = find_packages(package.__path__[0], recursive=recursive) basename", "in the same directory as the module plus all subdirectories", "recurses into package directories. pkgbasename and pkgdict are only used", "reload=reload) def modules(names, extract=extract): \"\"\" Converts a list of module", "+ '|'.join(suffixes) + ')$') initmodule_names = [] for suffix in", "issubclass(obj,baseclass): continue if annotated: classes.append((mod, name, obj)) else: classes.append(obj) return", "modules must already be loaded. \"\"\" return extract(sys.modules, names) def", "= [] append = modules.append sysmods = sys.modules for name", "be loaded. \"\"\" return extract(sys.modules, names) def package_modules(pkgname): \"\"\" Returns", "of tools to aid working with packages. Copyright (c) 1998-2000,", "annotated is true the returned list will contain tuples (module_object,name,class_object)", "+ ')$') initmodule_names = [] for suffix in suffixes: initmodule_names.append('__init__'", "mods. If annotated is true the returned list will contain", "that non-builtin modules define. The function uses the __all__ attribute", "contain tuples (module_object,name,class_object) for each class found where module_object is", "isfile=os.path.isfile,join=os.path.join,listdir=os.listdir, module_name=module_name,initmodule_name=initmodule_name): \"\"\" Return a list of package names found", "else: return mod return None def import_modules(modnames,module=None,errors='strict',reload=0, thismodule=_thismodule): \"\"\" Import", "__init__ module. The .py extension is removed from the files.", "The __init__ modules are not considered being seperate packages. If", "else: is_new = 0 try: mod = importer(name, locals, globals,", "is true (default is false), all already modules among the", "# Check for __init__ module(s) for name in initmodule_names: if", "subpackages of that package. Subpackages are all Python files included", "of callee, etc.). \"\"\" try: 1/0 except: frame = exc_info()[2].tb_frame", "of corresponding module objects. If errors is 'strict' (default), then", "modules and also returns a list of module objects. If", "objects. The modules must already be loaded. \"\"\" return extract(sys.modules,", "The package must already be loaded. Only the currently registered", "sys.modules.items(): if k[:match_len] == match and v is not None:", "\"\"\" Find all instances of baseclass defined by the module", "find_packages(path, recursive=1, pkgbasename=pkgname + '.', pkgdict=pkgdict) break elif not ignore_modules:", "# at the directory where package lives... subpackages = find_packages(package.__path__[0],", "already be loaded. \"\"\" return extract(sys.modules, names) def package_modules(pkgname): \"\"\"", "the __all__ attribute... try: subpackages = list(package.__all__) except (ImportError, AttributeError):", "(module_object,name,instances_object) for each instances found where module_object is the module", "\"\"\" Assuming that package points to a loaded package module,", "the class is defined. \"\"\" classes = [] for mod", "prepended to all subpackage names. The module location is found", "by looking # at the directory where package lives... subpackages", "recursion. \"\"\" l = listdir(dir) if pkgdict is None: pkgdict", "not recursive: # Try the __all__ attribute... try: subpackages =", "\"\"\" Find all subclasses of baseclass or simply all classes", "indicate how far up the execution stack the function is", "baseclass or simply all classes (if baseclass is None) defined", "pkgdict[pkgbasename + m.group(1)] = 1 else: for filename in l:", "name in modnames: mod = _module_loader(name, locals, globals, sysmods, errors=errors)", "attribute subpackages = find_packages(package.__path__[0], recursive=recursive) basename = package.__name__ + '.'", "pkgdict is None: pkgdict = {} if files_only: for filename", "frame = frame.f_back name = frame.f_globals['__name__'] del frame return sys.modules[name]", "+ suffix) def find_packages(dir=os.curdir, files_only=0, recursive=0, ignore_modules=0, pkgbasename='', pkgdict=None, isdir=os.path.isdir,exists=os.path.exists,", "except KeyboardInterrupt: # Pass through; SystemExit will be handled by", "ClassType=types.ClassType,issubclass=issubclass): \"\"\" Find all subclasses of baseclass or simply all", "where module_object is the module where the instances is defined.", "The function uses the __all__ attribute from the package __init__", "= pkgbasename + filename pkgdict[pkgname] = 1 if recursive: find_packages(path,", "search does not support the __all__ attribute subpackages = find_packages(package.__path__[0],", "simply all classes (if baseclass is None) defined by the", "errorvalue). If the handler returns, processing continues. If reload is", "for name in modnames: mod = _module_loader(name, locals, locals, sysmods,", "and then imports all submodules found into module. The module", "reload=0, recursive=0, import_modules=import_modules, find_subpackages=find_subpackages): \"\"\" Does a subpackages scan using", "import_subpackages(module, reload=0, recursive=0, import_modules=import_modules, find_subpackages=find_subpackages): \"\"\" Does a subpackages scan", "plus all subdirectories having an __init__.py file. The modules name", "+ ')$') initmodule_name = re.compile('__init__(' + '|'.join(suffixes) + ')$') initmodule_names", "seperate packages. If files_only is true, only Python files are", "is None: module = _thismodule(2) locals = module.__dict__ sysmods =", "the list will be forced to reload. \"\"\" import_modules(find_subpackages(module, recursive=recursive),", "if annotated: classes.append((mod, name, obj)) else: classes.append(obj) return classes def", "subpackages = list(package.__all__) except (ImportError, AttributeError): # Did not work,", "(1 == direct callee, 2 == callee of callee, etc.).", "load_modules(modnames,locals=None,globals=None,errors='strict',reload=0): \"\"\" Imports all modules in modnames using the given", "A set of tools to aid working with packages. Copyright", "for further information on copyrights, or contact the author. All", "return sys.modules[name] def _module_loader(name, locals, globals, sysmods, errors='strict', importer=__import__, reloader=reload,", "not issubclass(obj,baseclass): continue if annotated: classes.append((mod, name, obj)) else: classes.append(obj)", "is removed from the files. The __init__ modules are not", "is true the search recurses into package directories. pkgbasename and", "errors=errors) if mod is not None: locals[name] = mod def", "modnames may contain dotted package names. If errors is 'strict'", "object that the callee is calling from. upcount can be", "ImportErrors are raised. If set to 'ignore', they are silently", "pkgbasename and pkgdict are only used during recursion. \"\"\" l", "classes.append((mod, name, obj)) else: classes.append(obj) return classes def find_instances(mods,baseclass,annotated=0, InstanceType=types.InstanceType,issubclass=issubclass):", "is true the returned list will contain tuples (module_object,name,instances_object) for", "\"\"\" Internal API for loading a module \"\"\" if not", "through; SystemExit will be handled by the error handler raise", "supposed to look (1 == direct callee, 2 == callee", "append(mod) return modules def import_subpackages(module, reload=0, recursive=0, import_modules=import_modules, find_subpackages=find_subpackages): \"\"\"", "The modules must already be loaded. \"\"\" return extract(sys.modules, names)", "contain dotted package names. If errors is 'strict' (default), then", "return modules def import_subpackages(module, reload=0, recursive=0, import_modules=import_modules, find_subpackages=find_subpackages): \"\"\" Does", "is prepended to all subpackage names. The module location is", "the list will be forced to reload. \"\"\" if module", "re.compile('(.*)(' + '|'.join(suffixes) + ')$') initmodule_name = re.compile('__init__(' + '|'.join(suffixes)", "Returns the module object that the callee is calling from.", "mod is not None: append(mod) return modules def import_subpackages(module, reload=0,", "'.', pkgdict=pkgdict) break elif not ignore_modules: m = module_name.match(filename) if", "l: path = join(dir, filename) if isdir(path): # Check for", "in modnames using the given namespaces and returns list of", "the search (subdirectories are *not* taken into account). If ignore_modules", "to indicate how far up the execution stack the function", "recursive=0, load_modules=load_modules, find_subpackages=find_subpackages): \"\"\" Same as import_subpackages but with load_modules", "modnames into module. module defaults to the caller's module. modnames", "a list of module names into a list of module", "+ '.' match_len = len(match) mods = [sys.modules[pkgname]] for k,v", "as import_subpackages but with load_modules functionality, i.e. imports the modules", "callee, 2 == callee of callee, etc.). \"\"\" try: 1/0", "not is_new: mod = reloader(mod) except KeyboardInterrupt: # Pass through;", "The .py extension is removed from the files. The __init__", "then it is called with arguments (modname, errorclass, errorvalue). If", "'strict': raise elif callable(errors): errors(name, sys.exc_info()[0], sys.exc_info()[1]) else: raise ValueError,'unknown", "'__init__': pkgdict[pkgbasename + m.group(1)] = 1 else: for filename in", "all subclasses of baseclass or simply all classes (if baseclass", "Copyright (c) 1998-2000, <NAME>; mailto:<EMAIL> Copyright (c) 2000-2015, eGenix.com Software", "of all modules belonging to the package with the given", "ignored. If errors is a callable object, then it is", "the list will be forced to reload. \"\"\" modules =", "defaults to the caller's module. modnames may contain dotted package", "true (default is false), all already modules among the list", "find_subpackages=find_subpackages): \"\"\" Does a subpackages scan using find_subpackages(module) and then", "then imports all submodules found into module. The module location", "among the list will be forced to reload. \"\"\" return", "Software GmbH; mailto:<EMAIL> See the documentation for further information on", "recursive=recursive) basename = package.__name__ + '.' for i,name in irange(subpackages):", "is a callable object, then it is called with arguments", "If recursive is true the search recurses into package directories.", "find_subpackages(package, recursive=0, splitpath=os.path.split): \"\"\" Assuming that package points to a", "defined by the module objects in list mods. If annotated", "the module where the class is defined. \"\"\" classes =", "raised. If set to 'ignore', they are silently ignored. If", "to find the subpackages by looking # at the directory", "import_modules(modnames,module=None,errors='strict',reload=0, thismodule=_thismodule): \"\"\" Import all modules given in modnames into", "in irange(subpackages): subpackages[i] = basename + name return subpackages def", "for filename in l: path = join(dir, filename) if isdir(path):", "1 else: is_new = 0 try: mod = importer(name, locals,", "execution stack the function is supposed to look (1 ==", "on copyrights, or contact the author. All Rights Reserved. \"\"\"", "= basename + name return subpackages def _thismodule(upcount=1, exc_info=sys.exc_info,trange=trange): \"\"\"", "recursive=recursive), module, reload=reload) def load_subpackages(module, locals=None, globals=None, errors='strict', reload=0, recursive=0,", "def modules(names, extract=extract): \"\"\" Converts a list of module names", "package must already be loaded. Only the currently registered modules", "= re.compile('__init__(' + '|'.join(suffixes) + ')$') initmodule_names = [] for", "will be forced to reload. \"\"\" return load_modules(find_subpackages(module, recursive=recursive), locals,", "= module_name.match(filename) if m is not None and \\ m.group(1)", "silently ignored. If errors is a callable object, then it", "the __file__ attribute that non-builtin modules define. The function uses", "mailto:<EMAIL> Copyright (c) 2000-2015, eGenix.com Software GmbH; mailto:<EMAIL> See the", "file. The modules name is prepended to all subpackage names.", "subclasses of baseclass or simply all classes (if baseclass is", "if pkgdict is None: pkgdict = {} if files_only: for", "\"\"\" match = pkgname + '.' match_len = len(match) mods", "elif callable(errors): errors(name, sys.exc_info()[0], sys.exc_info()[1]) else: raise ValueError,'unknown errors value'", "module objects. If errors is 'strict' (default), then ImportErrors and", "found where module_object is the module where the instances is", "locals, globals, sysmods, errors=errors) if mod is not None: append(mod)", "the callee is calling from. upcount can be given to", "+ '.', pkgdict=pkgdict) break elif not ignore_modules: m = module_name.match(filename)", "names. If errors is 'strict' (default), then ImportErrors and SyntaxErrors", "this function tries to identify all subpackages of that package.", "true the search recurses into package directories. pkgbasename and pkgdict", "l: m = module_name.match(filename) if m is not None and", "else: classes.append(obj) return classes def find_instances(mods,baseclass,annotated=0, InstanceType=types.InstanceType,issubclass=issubclass): \"\"\" Find all", "where module_object is the module where the class is defined.", "\"\"\" return extract(sys.modules, names) def package_modules(pkgname): \"\"\" Returns a list", "dotted package names. If errors is 'strict' (default), then ImportErrors", "locals=None, globals=None, errors='strict', reload=0, recursive=0, load_modules=load_modules, find_subpackages=find_subpackages): \"\"\" Same as", "during recursion. \"\"\" l = listdir(dir) if pkgdict is None:", "all modules given in modnames into module. module defaults to", "to the caller's module. modnames may contain dotted package names.", "being seperate packages. If files_only is true, only Python files", "true (default is false), modules are ignored. If recursive is", "errors is 'strict' (default), then ImportErrors are raised. If set", "If annotated is true the returned list will contain tuples", "calling from. upcount can be given to indicate how far", "objects. If errors is 'strict' (default), then ImportErrors are raised.", "forced to reload. \"\"\" if module is None: module =", "mods.append(v) return mods def find_classes(mods,baseclass=None,annotated=0, ClassType=types.ClassType,issubclass=issubclass): \"\"\" Find all subclasses", "if isfile(join(path, name)): pkgname = pkgbasename + filename pkgdict[pkgname] =", "to look (1 == direct callee, 2 == callee of", "module objects in list mods. If annotated is true the", "that package points to a loaded package module, this function", "defined. \"\"\" classes = [] for mod in mods: for", "load_subpackages(module, locals=None, globals=None, errors='strict', reload=0, recursive=0, load_modules=load_modules, find_subpackages=find_subpackages): \"\"\" Same", "will contain tuples (module_object,name,instances_object) for each instances found where module_object", "if k[:match_len] == match and v is not None: mods.append(v)", "is not None: mods.append(v) return mods def find_classes(mods,baseclass=None,annotated=0, ClassType=types.ClassType,issubclass=issubclass): \"\"\"", "SyntaxErrors are raised. If set to 'ignore', they are silently", "If the handler returns, processing continues. If reload is true", "reload and not is_new: mod = reloader(mod) except KeyboardInterrupt: #", "value' else: return mod return None def import_modules(modnames,module=None,errors='strict',reload=0, thismodule=_thismodule): \"\"\"", "= _module_loader(name, locals, globals, sysmods, errors=errors) if mod is not", "let's try to find the subpackages by looking # at", "== 'ignore': pass elif errors == 'strict': raise elif callable(errors):", "i.e. imports the modules and also returns a list of", ".py extension is removed from the files. The __init__ modules", "define. The function uses the __all__ attribute from the package", "into module. module defaults to the caller's module. modnames may", "modules belonging to the package with the given name. The", "sysmods, errors='strict', importer=__import__, reloader=reload, from_list=['*']): \"\"\" Internal API for loading", "def load_modules(modnames,locals=None,globals=None,errors='strict',reload=0): \"\"\" Imports all modules in modnames using the", "raise except Exception, why: if errors == 'ignore': pass elif", "for i,name in irange(subpackages): subpackages[i] = basename + name return", "m.group(1) != '__init__': pkgdict[pkgbasename + m.group(1)] = 1 return pkgdict.keys()", "recursive=0, import_modules=import_modules, find_subpackages=find_subpackages): \"\"\" Does a subpackages scan using find_subpackages(module)", "extract(sys.modules, names) def package_modules(pkgname): \"\"\" Returns a list of all", "attribute from the package __init__ module if available. If recursive", "sys.modules for name in modnames: mod = _module_loader(name, locals, globals,", "mods def find_classes(mods,baseclass=None,annotated=0, ClassType=types.ClassType,issubclass=issubclass): \"\"\" Find all subclasses of baseclass", "the author. All Rights Reserved. \"\"\" __version__ = '0.4.0' import", "try: 1/0 except: frame = exc_info()[2].tb_frame for i in trange(upcount):", "package points to a loaded package module, this function tries", "def find_subpackages(package, recursive=0, splitpath=os.path.split): \"\"\" Assuming that package points to", "== direct callee, 2 == callee of callee, etc.). \"\"\"", "all subdirectories having an __init__.py file. The modules name is", "ignored. If reload is true (default is false), all already", "list will be forced to reload. \"\"\" if module is", "they are silently ignored. If reload is true (default is", "loading a module \"\"\" if not sysmods.has_key(name): is_new = 1", "listdir(dir) if pkgdict is None: pkgdict = {} if files_only:", "of that package. Subpackages are all Python files included in", "can be given to indicate how far up the execution", "Check for __init__ module(s) for name in initmodule_names: if isfile(join(path,", "match_len = len(match) mods = [sys.modules[pkgname]] for k,v in sys.modules.items():", "returned list will contain tuples (module_object,name,instances_object) for each instances found", "if available. If recursive is true (default is false), then", "true, only Python files are included in the search (subdirectories", "[] for suffix in suffixes: initmodule_names.append('__init__' + suffix) def find_packages(dir=os.curdir,", "into module. The module location is found by looking at", "(if baseclass is None) defined by the module objects in", "\\ m.group(1) != '__init__': pkgdict[pkgbasename + m.group(1)] = 1 return", "from the package __init__ module if available. If reload is", "forced to reload. \"\"\" import_modules(find_subpackages(module, recursive=recursive), module, reload=reload) def load_subpackages(module,", "suffixes = projection(imp.get_suffixes(),0) module_name = re.compile('(.*)(' + '|'.join(suffixes) + ')$')", "files_only: for filename in l: m = module_name.match(filename) if m", "looking at the __file__ attribute that non-builtin modules define. The", "called with arguments (modname, errorclass, errorvalue). If the handler returns,", "of baseclass or simply all classes (if baseclass is None)", "filename in l: m = module_name.match(filename) if m is not", "Subpackages are all Python files included in the same directory", "list will contain tuples (module_object,name,class_object) for each class found where", "'0.4.0' import os,types,sys,re,imp,__builtin__ import mx.Tools.NewBuiltins # RE to identify Python", "= listdir(dir) if pkgdict is None: pkgdict = {} if", "= pkgname + '.' match_len = len(match) mods = [sys.modules[pkgname]]", "\"\"\" if module is None: module = _thismodule(2) locals =", "package lives... subpackages = find_packages(package.__path__[0], recursive=recursive) else: # XXX Recursive", "function tries to identify all subpackages of that package. Subpackages", "find_packages(dir=os.curdir, files_only=0, recursive=0, ignore_modules=0, pkgbasename='', pkgdict=None, isdir=os.path.isdir,exists=os.path.exists, isfile=os.path.isfile,join=os.path.join,listdir=os.listdir, module_name=module_name,initmodule_name=initmodule_name): \"\"\"", "are not considered being seperate packages. If files_only is true,", "list will be forced to reload. \"\"\" return load_modules(find_subpackages(module, recursive=recursive),", "modnames: mod = _module_loader(name, locals, locals, sysmods, errors=errors) if mod", "scan using find_subpackages(module) and then imports all submodules found into", "locals[name] = mod def load_modules(modnames,locals=None,globals=None,errors='strict',reload=0): \"\"\" Imports all modules in", "in suffixes: initmodule_names.append('__init__' + suffix) def find_packages(dir=os.curdir, files_only=0, recursive=0, ignore_modules=0,", "a list of package names found in dir. Packages are", "given to indicate how far up the execution stack the", "or simply all classes (if baseclass is None) defined by", "package_modules(pkgname): \"\"\" Returns a list of all modules belonging to", "imports all submodules found into module. The module location is", "module location is found by looking at the __file__ attribute", "m.group(1)] = 1 else: for filename in l: path =", "continues. If reload is true (default is false), all already", "callee is calling from. upcount can be given to indicate", "from_list=['*']): \"\"\" Internal API for loading a module \"\"\" if", "to 'ignore', they are silently ignored. If errors is a", "is the module where the instances is defined. \"\"\" instances", "among the list will be forced to reload. \"\"\" import_modules(find_subpackages(module,", "registered modules are included in the list. \"\"\" match =", "m = module_name.match(filename) if m is not None and \\", "__init__.py file. The modules name is prepended to all subpackage", "files_only=0, recursive=0, ignore_modules=0, pkgbasename='', pkgdict=None, isdir=os.path.isdir,exists=os.path.exists, isfile=os.path.isfile,join=os.path.join,listdir=os.listdir, module_name=module_name,initmodule_name=initmodule_name): \"\"\" Return", "removed from the files. The __init__ modules are not considered", "== 'strict': raise elif callable(errors): errors(name, sys.exc_info()[0], sys.exc_info()[1]) else: raise", "pkgbasename=pkgname + '.', pkgdict=pkgdict) break elif not ignore_modules: m =", "_module_loader(name, locals, locals, sysmods, errors=errors) if mod is not None:", "reload=reload) def load_subpackages(module, locals=None, globals=None, errors='strict', reload=0, recursive=0, load_modules=load_modules, find_subpackages=find_subpackages):", "and also returns a list of module objects. If errors", "globals, sysmods, errors=errors) if mod is not None: append(mod) return", "objects in list mods. If annotated is true the returned", "find_instances(mods,baseclass,annotated=0, InstanceType=types.InstanceType,issubclass=issubclass): \"\"\" Find all instances of baseclass defined by", "list of package names found in dir. Packages are Python", "\"\"\" Import all modules given in modnames into module. module", "frame return sys.modules[name] def _module_loader(name, locals, globals, sysmods, errors='strict', importer=__import__,", "callable object, then it is called with arguments (modname, errorclass,", "sysmods, errors=errors) if mod is not None: append(mod) return modules", "is ClassType: if baseclass and not issubclass(obj,baseclass): continue if annotated:", "errors value' else: return mod return None def import_modules(modnames,module=None,errors='strict',reload=0, thismodule=_thismodule):", "sys.modules[name] def _module_loader(name, locals, globals, sysmods, errors='strict', importer=__import__, reloader=reload, from_list=['*']):", "classes.append(obj) return classes def find_instances(mods,baseclass,annotated=0, InstanceType=types.InstanceType,issubclass=issubclass): \"\"\" Find all instances", "= package.__name__ + '.' for i,name in irange(subpackages): subpackages[i] =", "names. The module location is found by looking at the", "'.' for i,name in irange(subpackages): subpackages[i] = basename + name", "if type(obj) is ClassType: if baseclass and not issubclass(obj,baseclass): continue", "baseclass defined by the module objects in list mods. If", "def find_packages(dir=os.curdir, files_only=0, recursive=0, ignore_modules=0, pkgbasename='', pkgdict=None, isdir=os.path.isdir,exists=os.path.exists, isfile=os.path.isfile,join=os.path.join,listdir=os.listdir, module_name=module_name,initmodule_name=initmodule_name):", "list of corresponding module objects. If errors is 'strict' (default),", "module. The module location is found by looking at the", "is 'strict' (default), then ImportErrors are raised. If set to", "Python files included in the same directory as the module", "None: pkgdict = {} if files_only: for filename in l:", "then let's try to find the subpackages by looking #", "If files_only is true, only Python files are included in", "the __all__ attribute from the package __init__ module if available.", "location is found by looking at the __file__ attribute that", "package __init__ module if available. If recursive is true (default", "\"\"\" Returns a list of all modules belonging to the", "import_modules=import_modules, find_subpackages=find_subpackages): \"\"\" Does a subpackages scan using find_subpackages(module) and", "or contact the author. All Rights Reserved. \"\"\" __version__ =", "sys.exc_info()[1]) else: raise ValueError,'unknown errors value' else: return mod return", "[] for mod in mods: for name,obj in mod.__dict__.items(): if", "= {} if files_only: for filename in l: m =", "the returned list will contain tuples (module_object,name,class_object) for each class", "found where module_object is the module where the class is", "defined. \"\"\" instances = [] for mod in mods: for", "trange(upcount): frame = frame.f_back name = frame.f_globals['__name__'] del frame return", "information on copyrights, or contact the author. All Rights Reserved.", "AttributeError): # Did not work, then let's try to find", "= list(package.__all__) except (ImportError, AttributeError): # Did not work, then", "= '0.4.0' import os,types,sys,re,imp,__builtin__ import mx.Tools.NewBuiltins # RE to identify", "i,name in irange(subpackages): subpackages[i] = basename + name return subpackages", "name,obj in mod.__dict__.items(): if type(obj) is ClassType: if baseclass and", "is supposed to look (1 == direct callee, 2 ==", "then ImportErrors are raised. If set to 'ignore', they are", "break elif not ignore_modules: m = module_name.match(filename) if m is", "ignore_modules is true (default is false), modules are ignored. If", "module where the instances is defined. \"\"\" instances = []", "if recursive: find_packages(path, recursive=1, pkgbasename=pkgname + '.', pkgdict=pkgdict) break elif", "of baseclass defined by the module objects in list mods.", "reload is true (default is false), all already modules among", "contact the author. All Rights Reserved. \"\"\" __version__ = '0.4.0'", "extension is removed from the files. The __init__ modules are", "'|'.join(suffixes) + ')$') initmodule_name = re.compile('__init__(' + '|'.join(suffixes) + ')$')", "true the returned list will contain tuples (module_object,name,instances_object) for each", "(modname, errorclass, errorvalue). If the handler returns, processing continues. If", "API for loading a module \"\"\" if not sysmods.has_key(name): is_new", "mods = [sys.modules[pkgname]] for k,v in sys.modules.items(): if k[:match_len] ==", "from. upcount can be given to indicate how far up", "will contain tuples (module_object,name,class_object) for each class found where module_object", "module names into a list of module objects. The modules", "if m is not None and \\ m.group(1) != '__init__':", "')$') initmodule_names = [] for suffix in suffixes: initmodule_names.append('__init__' +", "is_new = 1 else: is_new = 0 try: mod =", "not ignore_modules: m = module_name.match(filename) if m is not None", "GmbH; mailto:<EMAIL> See the documentation for further information on copyrights,", "caller's module. modnames may contain dotted package names. If errors", "pkgdict[pkgname] = 1 if recursive: find_packages(path, recursive=1, pkgbasename=pkgname + '.',", "ImportErrors and SyntaxErrors are raised. If set to 'ignore', they", "(ImportError, AttributeError): # Did not work, then let's try to", "0 try: mod = importer(name, locals, globals, from_list) if reload", "modules define. The function uses the __all__ attribute from the", "in modnames into module. module defaults to the caller's module.", "+ '.' for i,name in irange(subpackages): subpackages[i] = basename +", "recursive=recursive), locals, globals, errors=errors, reload=reload) def modules(names, extract=extract): \"\"\" Converts", "only Python files are included in the search (subdirectories are", "a list of module objects. The modules must already be", "The modules name is prepended to all subpackage names. The", "into account). If ignore_modules is true (default is false), modules", "= [] for mod in mods: for name,obj in mod.__dict__.items():", "a callable object, then it is called with arguments (modname,", "not None and \\ m.group(1) != '__init__': pkgdict[pkgbasename + m.group(1)]", "to reload. \"\"\" import_modules(find_subpackages(module, recursive=recursive), module, reload=reload) def load_subpackages(module, locals=None,", "module where the class is defined. \"\"\" classes = []", "!= '__init__': pkgdict[pkgbasename + m.group(1)] = 1 else: for filename", "mod = _module_loader(name, locals, locals, sysmods, errors=errors) if mod is", "{} if files_only: for filename in l: m = module_name.match(filename)", "else: for filename in l: path = join(dir, filename) if", "thismodule=_thismodule): \"\"\" Import all modules given in modnames into module.", "with arguments (modname, errorclass, errorvalue). If the handler returns, processing", "to aid working with packages. Copyright (c) 1998-2000, <NAME>; mailto:<EMAIL>", "'ignore', they are silently ignored. If reload is true (default", "pkgdict are only used during recursion. \"\"\" l = listdir(dir)", "sys.modules for name in modnames: mod = _module_loader(name, locals, locals,", "the instances is defined. \"\"\" instances = [] for mod", "1998-2000, <NAME>; mailto:<EMAIL> Copyright (c) 2000-2015, eGenix.com Software GmbH; mailto:<EMAIL>", "import mx.Tools.NewBuiltins # RE to identify Python modules suffixes =", "if files_only: for filename in l: m = module_name.match(filename) if", "Only the currently registered modules are included in the list.", "the module where the instances is defined. \"\"\" instances =", "\\ m.group(1) != '__init__': pkgdict[pkgbasename + m.group(1)] = 1 else:", "available. If recursive is true (default is false), then subpackages", "a module \"\"\" if not sysmods.has_key(name): is_new = 1 else:", "the given namespaces and returns list of corresponding module objects.", "returns, processing continues. If reload is true (default is false),", "tries to identify all subpackages of that package. Subpackages are", "raise ValueError,'unknown errors value' else: return mod return None def", "are included in the list. \"\"\" match = pkgname +", "the __all__ attribute subpackages = find_packages(package.__path__[0], recursive=recursive) basename = package.__name__", "direct callee, 2 == callee of callee, etc.). \"\"\" try:", "considered being seperate packages. If files_only is true, only Python", "errors(name, sys.exc_info()[0], sys.exc_info()[1]) else: raise ValueError,'unknown errors value' else: return", "baseclass and not issubclass(obj,baseclass): continue if annotated: classes.append((mod, name, obj))", "_module_loader(name, locals, globals, sysmods, errors='strict', importer=__import__, reloader=reload, from_list=['*']): \"\"\" Internal", "\"\"\" __version__ = '0.4.0' import os,types,sys,re,imp,__builtin__ import mx.Tools.NewBuiltins # RE", "having an __init__.py file. The modules name is prepended to", "loaded. Only the currently registered modules are included in the", "obj)) else: classes.append(obj) return classes def find_instances(mods,baseclass,annotated=0, InstanceType=types.InstanceType,issubclass=issubclass): \"\"\" Find", "recursive=0, splitpath=os.path.split): \"\"\" Assuming that package points to a loaded", "Pass through; SystemExit will be handled by the error handler", "be given to indicate how far up the execution stack", "exc_info()[2].tb_frame for i in trange(upcount): frame = frame.f_back name =", "provide an __init__ module. The .py extension is removed from", "__init__ module if available. If reload is true (default is", "module_object is the module where the class is defined. \"\"\"", "list will contain tuples (module_object,name,instances_object) for each instances found where", "from the files. The __init__ modules are not considered being", "are silently ignored. If errors is a callable object, then", "in l: path = join(dir, filename) if isdir(path): # Check", "are raised. If set to 'ignore', they are silently ignored.", "included in the list. \"\"\" match = pkgname + '.'", "Assuming that package points to a loaded package module, this", "= 1 else: is_new = 0 try: mod = importer(name,", "modules name is prepended to all subpackage names. The module", "1 if recursive: find_packages(path, recursive=1, pkgbasename=pkgname + '.', pkgdict=pkgdict) break", "= sys.modules for name in modnames: mod = _module_loader(name, locals,", "copyrights, or contact the author. All Rights Reserved. \"\"\" __version__", "= 1 else: for filename in l: path = join(dir,", "== callee of callee, etc.). \"\"\" try: 1/0 except: frame", "= re.compile('(.*)(' + '|'.join(suffixes) + ')$') initmodule_name = re.compile('__init__(' +", "handled by the error handler raise except Exception, why: if", "2000-2015, eGenix.com Software GmbH; mailto:<EMAIL> See the documentation for further", "in the search. \"\"\" if not recursive: # Try the", "why: if errors == 'ignore': pass elif errors == 'strict':", "\"\"\" import_modules(find_subpackages(module, recursive=recursive), module, reload=reload) def load_subpackages(module, locals=None, globals=None, errors='strict',", "mod in mods: for name,obj in mod.__dict__.items(): if type(obj) is", "non-builtin modules define. The function uses the __all__ attribute from", "is false), then subpackages of subpackages are recursively also included", "\"\"\" try: 1/0 except: frame = exc_info()[2].tb_frame for i in", "importer(name, locals, globals, from_list) if reload and not is_new: mod", "match and v is not None: mods.append(v) return mods def", "reloader(mod) except KeyboardInterrupt: # Pass through; SystemExit will be handled", "continue if annotated: classes.append((mod, name, obj)) else: classes.append(obj) return classes", "\"\"\" Returns the module object that the callee is calling", "the currently registered modules are included in the list. \"\"\"", "it is called with arguments (modname, errorclass, errorvalue). If the", "module objects. If errors is 'strict' (default), then ImportErrors are", "is found by looking at the __file__ attribute that non-builtin", "that provide an __init__ module. The .py extension is removed", "the search recurses into package directories. pkgbasename and pkgdict are", "# Try the __all__ attribute... try: subpackages = list(package.__all__) except", "module object that the callee is calling from. upcount can", "class is defined. \"\"\" classes = [] for mod in", "l = listdir(dir) if pkgdict is None: pkgdict = {}", "the list. \"\"\" match = pkgname + '.' match_len =", "names found in dir. Packages are Python modules and subdirectories", "from the package __init__ module if available. If recursive is", "+ name return subpackages def _thismodule(upcount=1, exc_info=sys.exc_info,trange=trange): \"\"\" Returns the", "'ignore', they are silently ignored. If errors is a callable", "filename pkgdict[pkgname] = 1 if recursive: find_packages(path, recursive=1, pkgbasename=pkgname +", "try to find the subpackages by looking # at the", "of module objects. If errors is 'strict' (default), then ImportErrors", "__all__ attribute subpackages = find_packages(package.__path__[0], recursive=recursive) basename = package.__name__ +", "package __init__ module if available. If reload is true (default", "suffix) def find_packages(dir=os.curdir, files_only=0, recursive=0, ignore_modules=0, pkgbasename='', pkgdict=None, isdir=os.path.isdir,exists=os.path.exists, isfile=os.path.isfile,join=os.path.join,listdir=os.listdir,", "is false), modules are ignored. If recursive is true the", "None: module = _thismodule(2) locals = module.__dict__ sysmods = sys.modules", "# Did not work, then let's try to find the", "all classes (if baseclass is None) defined by the module", "ClassType: if baseclass and not issubclass(obj,baseclass): continue if annotated: classes.append((mod,", "None and \\ m.group(1) != '__init__': pkgdict[pkgbasename + m.group(1)] =", "module if available. If reload is true (default is false),", "mod.__dict__.items(): if type(obj) is ClassType: if baseclass and not issubclass(obj,baseclass):", "the package __init__ module if available. If reload is true", "classes (if baseclass is None) defined by the module objects", "+ m.group(1)] = 1 else: for filename in l: path", "= find_packages(package.__path__[0], recursive=recursive) basename = package.__name__ + '.' for i,name", "(subdirectories are *not* taken into account). If ignore_modules is true", "mailto:<EMAIL> See the documentation for further information on copyrights, or", "m.group(1) != '__init__': pkgdict[pkgbasename + m.group(1)] = 1 else: for", "objects. If errors is 'strict' (default), then ImportErrors and SyntaxErrors", "1 return pkgdict.keys() def find_subpackages(package, recursive=0, splitpath=os.path.split): \"\"\" Assuming that", "all subpackages of that package. Subpackages are all Python files", "importer=__import__, reloader=reload, from_list=['*']): \"\"\" Internal API for loading a module", "\"\"\" Converts a list of module names into a list", "false), all already modules among the list will be forced", "not considered being seperate packages. If files_only is true, only", "included in the same directory as the module plus all", "frame.f_globals['__name__'] del frame return sys.modules[name] def _module_loader(name, locals, globals, sysmods,", "\"\"\" Imports all modules in modnames using the given namespaces", "Reserved. \"\"\" __version__ = '0.4.0' import os,types,sys,re,imp,__builtin__ import mx.Tools.NewBuiltins #", "module defaults to the caller's module. modnames may contain dotted", "reload=0, recursive=0, load_modules=load_modules, find_subpackages=find_subpackages): \"\"\" Same as import_subpackages but with", "given name. The package must already be loaded. Only the", "module_name.match(filename) if m is not None and \\ m.group(1) !=", "def import_modules(modnames,module=None,errors='strict',reload=0, thismodule=_thismodule): \"\"\" Import all modules given in modnames", "for each class found where module_object is the module where", "already modules among the list will be forced to reload.", "suffix in suffixes: initmodule_names.append('__init__' + suffix) def find_packages(dir=os.curdir, files_only=0, recursive=0,", "')$') initmodule_name = re.compile('__init__(' + '|'.join(suffixes) + ')$') initmodule_names =", "ignore_modules: m = module_name.match(filename) if m is not None and", "frame.f_back name = frame.f_globals['__name__'] del frame return sys.modules[name] def _module_loader(name,", "must already be loaded. \"\"\" return extract(sys.modules, names) def package_modules(pkgname):", "given in modnames into module. module defaults to the caller's", "annotated: classes.append((mod, name, obj)) else: classes.append(obj) return classes def find_instances(mods,baseclass,annotated=0,", "= reloader(mod) except KeyboardInterrupt: # Pass through; SystemExit will be", "list of all modules belonging to the package with the", "match = pkgname + '.' match_len = len(match) mods =", "subpackages = find_packages(package.__path__[0], recursive=recursive) else: # XXX Recursive search does", "modules are not considered being seperate packages. If files_only is", "name in initmodule_names: if isfile(join(path, name)): pkgname = pkgbasename +", "subpackages[i] = basename + name return subpackages def _thismodule(upcount=1, exc_info=sys.exc_info,trange=trange):", "sysmods.has_key(name): is_new = 1 else: is_new = 0 try: mod", "(default), then ImportErrors are raised. If set to 'ignore', they", "if module is None: module = _thismodule(2) locals = module.__dict__", "mod.__dict__.items(): if isinstance(obj,baseclass): if annotated: instances.append((mod,name,obj)) else: instances.append(obj) return instances", "find_packages(package.__path__[0], recursive=recursive) else: # XXX Recursive search does not support", "search (subdirectories are *not* taken into account). If ignore_modules is", "return load_modules(find_subpackages(module, recursive=recursive), locals, globals, errors=errors, reload=reload) def modules(names, extract=extract):", "irange(subpackages): subpackages[i] = basename + name return subpackages def _thismodule(upcount=1,", "where the class is defined. \"\"\" classes = [] for", "in initmodule_names: if isfile(join(path, name)): pkgname = pkgbasename + filename", "If ignore_modules is true (default is false), modules are ignored.", "name)): pkgname = pkgbasename + filename pkgdict[pkgname] = 1 if", "recursively also included in the search. \"\"\" if not recursive:", "None: append(mod) return modules def import_subpackages(module, reload=0, recursive=0, import_modules=import_modules, find_subpackages=find_subpackages):", "is None) defined by the module objects in list mods.", "to identify all subpackages of that package. Subpackages are all", "\"\"\" if not recursive: # Try the __all__ attribute... try:", "= len(match) mods = [sys.modules[pkgname]] for k,v in sys.modules.items(): if", "corresponding module objects. If errors is 'strict' (default), then ImportErrors", "error handler raise except Exception, why: if errors == 'ignore':", "all already modules among the list will be forced to", "a subpackages scan using find_subpackages(module) and then imports all submodules", "namespaces and returns list of corresponding module objects. If errors", "None: mods.append(v) return mods def find_classes(mods,baseclass=None,annotated=0, ClassType=types.ClassType,issubclass=issubclass): \"\"\" Find all", "projection(imp.get_suffixes(),0) module_name = re.compile('(.*)(' + '|'.join(suffixes) + ')$') initmodule_name =", "in modnames: mod = _module_loader(name, locals, locals, sysmods, errors=errors) if", "mod = importer(name, locals, globals, from_list) if reload and not", "locals, globals, sysmods, errors='strict', importer=__import__, reloader=reload, from_list=['*']): \"\"\" Internal API", "2 == callee of callee, etc.). \"\"\" try: 1/0 except:", "files. The __init__ modules are not considered being seperate packages.", "name, obj)) else: classes.append(obj) return classes def find_instances(mods,baseclass,annotated=0, InstanceType=types.InstanceType,issubclass=issubclass): \"\"\"", "of subpackages are recursively also included in the search. \"\"\"", "path = join(dir, filename) if isdir(path): # Check for __init__", "tuples (module_object,name,instances_object) for each instances found where module_object is the", "instances of baseclass defined by the module objects in list", "and pkgdict are only used during recursion. \"\"\" l =", "list mods. If annotated is true the returned list will", "modules are ignored. If recursive is true the search recurses", "name in modnames: mod = _module_loader(name, locals, locals, sysmods, errors=errors)", "to all subpackage names. The module location is found by", "if not sysmods.has_key(name): is_new = 1 else: is_new = 0", "is true (default is false), then subpackages of subpackages are", "into a list of module objects. The modules must already", "the execution stack the function is supposed to look (1", "else: # XXX Recursive search does not support the __all__", "and not is_new: mod = reloader(mod) except KeyboardInterrupt: # Pass", "with the given name. The package must already be loaded.", "The module location is found by looking at the __file__", "search. \"\"\" if not recursive: # Try the __all__ attribute...", "from_list) if reload and not is_new: mod = reloader(mod) except", "If errors is a callable object, then it is called", "where the instances is defined. \"\"\" instances = [] for", "If reload is true (default is false), all already modules", "os,types,sys,re,imp,__builtin__ import mx.Tools.NewBuiltins # RE to identify Python modules suffixes", "at the directory where package lives... subpackages = find_packages(package.__path__[0], recursive=recursive)", "type(obj) is ClassType: if baseclass and not issubclass(obj,baseclass): continue if", "k,v in sys.modules.items(): if k[:match_len] == match and v is", "the handler returns, processing continues. If reload is true (default", "def find_instances(mods,baseclass,annotated=0, InstanceType=types.InstanceType,issubclass=issubclass): \"\"\" Find all instances of baseclass defined", "+ '|'.join(suffixes) + ')$') initmodule_name = re.compile('__init__(' + '|'.join(suffixes) +", "be forced to reload. \"\"\" modules = [] append =", "import os,types,sys,re,imp,__builtin__ import mx.Tools.NewBuiltins # RE to identify Python modules", "the files. The __init__ modules are not considered being seperate", "is_new = 0 try: mod = importer(name, locals, globals, from_list)", "locals, sysmods, errors=errors) if mod is not None: locals[name] =", "is None: pkgdict = {} if files_only: for filename in", "are Python modules and subdirectories that provide an __init__ module.", "def _module_loader(name, locals, globals, sysmods, errors='strict', importer=__import__, reloader=reload, from_list=['*']): \"\"\"", "recursive: find_packages(path, recursive=1, pkgbasename=pkgname + '.', pkgdict=pkgdict) break elif not", "found into module. The module location is found by looking", "!= '__init__': pkgdict[pkgbasename + m.group(1)] = 1 return pkgdict.keys() def", "for name in initmodule_names: if isfile(join(path, name)): pkgname = pkgbasename", "subpackages def _thismodule(upcount=1, exc_info=sys.exc_info,trange=trange): \"\"\" Returns the module object that", "account). If ignore_modules is true (default is false), modules are", "an __init__.py file. The modules name is prepended to all", "module.__dict__ sysmods = sys.modules for name in modnames: mod =", "i in trange(upcount): frame = frame.f_back name = frame.f_globals['__name__'] del", "modules and subdirectories that provide an __init__ module. The .py", "\"\"\" return load_modules(find_subpackages(module, recursive=recursive), locals, globals, errors=errors, reload=reload) def modules(names,", "modules given in modnames into module. module defaults to the", "loaded package module, this function tries to identify all subpackages", "== match and v is not None: mods.append(v) return mods", "modules in modnames using the given namespaces and returns list", "in mod.__dict__.items(): if isinstance(obj,baseclass): if annotated: instances.append((mod,name,obj)) else: instances.append(obj) return", "will be forced to reload. \"\"\" if module is None:", "modnames: mod = _module_loader(name, locals, globals, sysmods, errors=errors) if mod", "__init__ modules are not considered being seperate packages. If files_only", "(c) 1998-2000, <NAME>; mailto:<EMAIL> Copyright (c) 2000-2015, eGenix.com Software GmbH;", "directory as the module plus all subdirectories having an __init__.py", "found in dir. Packages are Python modules and subdirectories that", "to a loaded package module, this function tries to identify", "pkgname = pkgbasename + filename pkgdict[pkgname] = 1 if recursive:", "errors='strict', reload=0, recursive=0, load_modules=load_modules, find_subpackages=find_subpackages): \"\"\" Same as import_subpackages but", "__all__ attribute from the package __init__ module if available. If", "'ignore': pass elif errors == 'strict': raise elif callable(errors): errors(name,", "Packages are Python modules and subdirectories that provide an __init__", "the function is supposed to look (1 == direct callee,", "# XXX Recursive search does not support the __all__ attribute", "files_only is true, only Python files are included in the", "of package names found in dir. Packages are Python modules", "+ filename pkgdict[pkgname] = 1 if recursive: find_packages(path, recursive=1, pkgbasename=pkgname", "subpackage names. The module location is found by looking at", "pkgname + '.' match_len = len(match) mods = [sys.modules[pkgname]] for", "for name,obj in mod.__dict__.items(): if isinstance(obj,baseclass): if annotated: instances.append((mod,name,obj)) else:", "not None: append(mod) return modules def import_subpackages(module, reload=0, recursive=0, import_modules=import_modules,", "'|'.join(suffixes) + ')$') initmodule_names = [] for suffix in suffixes:", "identify all subpackages of that package. Subpackages are all Python", "initmodule_names.append('__init__' + suffix) def find_packages(dir=os.curdir, files_only=0, recursive=0, ignore_modules=0, pkgbasename='', pkgdict=None,", "then subpackages of subpackages are recursively also included in the", "errors == 'ignore': pass elif errors == 'strict': raise elif", "package names. If errors is 'strict' (default), then ImportErrors and", "not sysmods.has_key(name): is_new = 1 else: is_new = 0 try:", "the same directory as the module plus all subdirectories having", "subpackages = find_packages(package.__path__[0], recursive=recursive) basename = package.__name__ + '.' for", "far up the execution stack the function is supposed to", "is not None and \\ m.group(1) != '__init__': pkgdict[pkgbasename +", "reload. \"\"\" if module is None: module = _thismodule(2) locals", "files are included in the search (subdirectories are *not* taken", "return classes def find_instances(mods,baseclass,annotated=0, InstanceType=types.InstanceType,issubclass=issubclass): \"\"\" Find all instances of", "elif not ignore_modules: m = module_name.match(filename) if m is not", "_module_loader(name, locals, globals, sysmods, errors=errors) if mod is not None:", "globals, errors=errors, reload=reload) def modules(names, extract=extract): \"\"\" Converts a list", "found by looking at the __file__ attribute that non-builtin modules", "raise elif callable(errors): errors(name, sys.exc_info()[0], sys.exc_info()[1]) else: raise ValueError,'unknown errors", "SystemExit will be handled by the error handler raise except", "errors is 'strict' (default), then ImportErrors and SyntaxErrors are raised.", "find_subpackages(module) and then imports all submodules found into module. The", "# Pass through; SystemExit will be handled by the error", "functionality, i.e. imports the modules and also returns a list", "module \"\"\" if not sysmods.has_key(name): is_new = 1 else: is_new", "where package lives... subpackages = find_packages(package.__path__[0], recursive=recursive) else: # XXX", "will be handled by the error handler raise except Exception,", "for loading a module \"\"\" if not sysmods.has_key(name): is_new =", "a list of module objects. If errors is 'strict' (default),", "(default is false), modules are ignored. If recursive is true", "a loaded package module, this function tries to identify all", "suffixes: initmodule_names.append('__init__' + suffix) def find_packages(dir=os.curdir, files_only=0, recursive=0, ignore_modules=0, pkgbasename='',", "looking # at the directory where package lives... subpackages =", "using find_subpackages(module) and then imports all submodules found into module.", "be handled by the error handler raise except Exception, why:", "load_modules(find_subpackages(module, recursive=recursive), locals, globals, errors=errors, reload=reload) def modules(names, extract=extract): \"\"\"", "splitpath=os.path.split): \"\"\" Assuming that package points to a loaded package", "1 else: for filename in l: path = join(dir, filename)", "callable(errors): errors(name, sys.exc_info()[0], sys.exc_info()[1]) else: raise ValueError,'unknown errors value' else:", "__init__ module(s) for name in initmodule_names: if isfile(join(path, name)): pkgname", "and returns list of corresponding module objects. If errors is", "initmodule_names = [] for suffix in suffixes: initmodule_names.append('__init__' + suffix)", "ValueError,'unknown errors value' else: return mod return None def import_modules(modnames,module=None,errors='strict',reload=0,", "will be forced to reload. \"\"\" modules = [] append", "Python files are included in the search (subdirectories are *not*", "filename in l: path = join(dir, filename) if isdir(path): #", "RE to identify Python modules suffixes = projection(imp.get_suffixes(),0) module_name =", "find_subpackages=find_subpackages): \"\"\" Same as import_subpackages but with load_modules functionality, i.e.", "\"\"\" if not sysmods.has_key(name): is_new = 1 else: is_new =", "try: mod = importer(name, locals, globals, from_list) if reload and", "will be forced to reload. \"\"\" import_modules(find_subpackages(module, recursive=recursive), module, reload=reload)", "errors=errors, reload=reload) def modules(names, extract=extract): \"\"\" Converts a list of", "return subpackages def _thismodule(upcount=1, exc_info=sys.exc_info,trange=trange): \"\"\" Returns the module object", "initmodule_names: if isfile(join(path, name)): pkgname = pkgbasename + filename pkgdict[pkgname]", "to identify Python modules suffixes = projection(imp.get_suffixes(),0) module_name = re.compile('(.*)('", "if isdir(path): # Check for __init__ module(s) for name in", "= join(dir, filename) if isdir(path): # Check for __init__ module(s)", "then ImportErrors and SyntaxErrors are raised. If set to 'ignore',", "Imports all modules in modnames using the given namespaces and", "locals, globals, errors=errors, reload=reload) def modules(names, extract=extract): \"\"\" Converts a", "sysmods = sys.modules for name in modnames: mod = _module_loader(name,", "that the callee is calling from. upcount can be given", "eGenix.com Software GmbH; mailto:<EMAIL> See the documentation for further information", "of module objects. The modules must already be loaded. \"\"\"", "all submodules found into module. The module location is found", "all subpackage names. The module location is found by looking", "list of module names into a list of module objects.", "and \\ m.group(1) != '__init__': pkgdict[pkgbasename + m.group(1)] = 1", "subdirectories that provide an __init__ module. The .py extension is", "modules among the list will be forced to reload. \"\"\"", "the search. \"\"\" if not recursive: # Try the __all__", "module_object is the module where the instances is defined. \"\"\"", "+ m.group(1)] = 1 return pkgdict.keys() def find_subpackages(package, recursive=0, splitpath=os.path.split):", "return extract(sys.modules, names) def package_modules(pkgname): \"\"\" Returns a list of", "[] append = modules.append sysmods = sys.modules for name in", "globals=None, errors='strict', reload=0, recursive=0, load_modules=load_modules, find_subpackages=find_subpackages): \"\"\" Same as import_subpackages", "is calling from. upcount can be given to indicate how", "the given name. The package must already be loaded. Only", "stack the function is supposed to look (1 == direct", "Exception, why: if errors == 'ignore': pass elif errors ==", "load_modules=load_modules, find_subpackages=find_subpackages): \"\"\" Same as import_subpackages but with load_modules functionality,", "- A set of tools to aid working with packages.", "if reload and not is_new: mod = reloader(mod) except KeyboardInterrupt:", "(default), then ImportErrors and SyntaxErrors are raised. If set to", "errors=errors) if mod is not None: append(mod) return modules def", "as the module plus all subdirectories having an __init__.py file.", "work, then let's try to find the subpackages by looking", "KeyboardInterrupt: # Pass through; SystemExit will be handled by the", "in the list. \"\"\" match = pkgname + '.' match_len", "used during recursion. \"\"\" l = listdir(dir) if pkgdict is", "append = modules.append sysmods = sys.modules for name in modnames:", "dir. Packages are Python modules and subdirectories that provide an", "mod return None def import_modules(modnames,module=None,errors='strict',reload=0, thismodule=_thismodule): \"\"\" Import all modules", "returns list of corresponding module objects. If errors is 'strict'", "included in the search. \"\"\" if not recursive: # Try", "pkgdict[pkgbasename + m.group(1)] = 1 return pkgdict.keys() def find_subpackages(package, recursive=0,", "is true (default is false), modules are ignored. If recursive", "all instances of baseclass defined by the module objects in", "to the package with the given name. The package must" ]
[ "result = urllib.parse.urlencode(given, doseq=True, safe=\":$\") expect = '%A0$=%C1$' self.assertEqual(expect, result)", "conn, addr = serv.accept() # conn.send(\"1 Hola mundo\\n\") # cantdata", "given = '%' expect = given result = urllib.parse.unquote(given) self.assertEqual(expect,", "encoding. given = (('\\u00a0', '\\u00c1'),) expect = '%A0=%C1' result =", "serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # serv.settimeout(3) # serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)", "number of characters: %s != %s\" % (test_type, len(result), (5", "that contains ignorable spaces, # such as \"\\n\", \" \",", "close files that may still be open. It # is", "result)) def test_quote_plus_with_unicode(self): # Encoding (latin-1) test for quote_plus given", "\"\"\"Test urlopen() opening a temporary file. Try to test as", "expect = os.path.join(\"make+sure\", \"using_unquote\") result = urllib.request.url2pathname(given) self.assertEqual(expect, result, \"url2pathname()", "\"using quote_plus(): %r != +\" % result) given = \"a", "Make sure simple tests pass expected_path = os.path.join(\"parts\", \"of\", \"a\",", "True)) data = collections.OrderedDict([(\"a\", 1), (\"b\", 1)]) self.assertEqual(\"a=a&a=b\", urllib.parse.urlencode({\"a\": data},", "fakeftp(self): class FakeFtpWrapper(object): def __init__(self, user, passwd, host, port, dirs,", "finally: try: newFile.close() except: pass return newFilePath def registerFileForCleanUp(self, fileName):", "start with empty fake environment os.environ = collections.OrderedDict() def tearDown(self):", "_urlopener = None def urlopen(url, data=None, proxies=None): \"\"\"urlopen(url [, data])", "bpo-35907, CVE-2019-9948: urllib must reject local_file:// scheme class DummyURLopener(urllib.request.URLopener): def", "= '%' expect = bytes(given, 'ascii') result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect,", "quote_plus(): %r != %r\" % (expect, result)) def test_quoting_plus(self): self.assertEqual(urllib.parse.quote_plus('alpha+beta", "the urllib.request.thishost utility function returns a tuple\"\"\" self.assertIsInstance(urllib.request.thishost(), tuple) class", "proxy related env vars self.env.__exit__() del self.env def test_getproxies_environment_keep_no_proxies(self): self.env.set('NO_PROXY',", "result)) def test_quoting_plus(self): self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'), 'alpha%2Bbeta+gamma') self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'), 'alpha+beta+gamma')", "urllib.parse.urlencode({\"a\": data}, True)) def test_urlencode_encoding(self): # ASCII encoding. Expect %3F", "try: text = FILE.read() FILE.close() finally: try: FILE.close() except: pass", "self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) os.environ['http_proxy'] = '' os.environ['HTTP_PROXY'] = 'http://somewhere:3128' proxies = urllib.request.getproxies_environment()", "in other. I have a linux, and # the tests", "# Make sure unquoting works when have non-quoted characters #", "'alpha+beta+gamma') # Test with bytes self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'), 'alpha%2Bbeta+gamma') # Test", "request.method = 'HEAD' self.assertEqual(request.get_method(), 'HEAD') class URL2PathNameTests(unittest.TestCase): def test_converting_drive_letter(self): self.assertEqual(url2pathname(\"///C|\"),", "self.assertFalse(bypass('anotherdomain.com:8888\\n')) self.assertFalse(bypass('newdomain.com:1234\\n')) class ProxyTests_withOrderedEnv(unittest.TestCase): def setUp(self): # We need to", "= self.sock if mock_close: # bpo-36918: HTTPConnection destructor calls close()", "amp_location = result.index('&') on_amp_left = result[amp_location - 1] on_amp_right =", "Encoding. Expect %3F with errors=\"replace' given = (('\\u00a0', '\\u00c1'),) expect", "be escaped Unwise : \"{}|\\^[]`\" Must be escaped \"\"\" def", "self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\") def test_url_path_with_control_char_rejected(self): for char_no in", "encoding=\"latin-1\") self.assertEqual(expect, result, \"using quote_plus(): %r != %r\" % (expect,", "self.assertEqual(expect, result) # Safe parameter in sequence given = ((b'\\xa0\\x24',", "UTF-8, invalid sequence, replace errors result = urllib.parse.unquote(given, errors=\"replace\") self.assertEqual(expect,", "those characters to be UTF-8 # encoded). result = urllib.parse.unquote_to_bytes(\"\\u6f22%C3%BC\")", "only helps to makes sure temporary files get deleted, but", "urllib.parse.urlencode(given) self.assertEqual(expect, result) given = {\"key name\":\"A bunch of pluses\"}", "= 'C:\\\\' for url in given: result = urllib.request.url2pathname(url) self.assertEqual(expect,", "using a bytes rather than str result = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\",", "(42, \"\\u00c1\")),) expect = '%C2%A0=42&%C2%A0=%C3%81' result = urllib.parse.urlencode(given, True) self.assertEqual(expect,", "%r\" % (expect, result)) given = '%' expect = bytes(given,", "result) def test_urlencode_encoding_safe_parameter(self): # Send '$' (\\x24) as safe character", "the # comparison. # Use the iterator in the usual", "result, \"url2pathname() failed; %s != %s\" % (expect, result)) @unittest.skipUnless(sys.platform", "occur. self.tempFiles = [] # Create a temporary file. self.registerFileForCleanUp(support.TESTFN)", "= urllib.request.Request request = Request(\"http://www.python.org\", method='HEAD') self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(), 'HEAD')", "%r\" % (expected, result)) def test_quoting_space(self): # Make sure quote()", "urlopen(tmp_fileurl) as fobj: self.assertTrue(fobj) finally: os.close(fd) os.unlink(tmp_file) self.assertFalse(os.path.exists(tmp_file)) with self.assertRaises(urllib.error.URLError):", "# Safe expressed as bytes rather than str result =", "if mock_close: # bpo-36918: HTTPConnection destructor calls close() which calls", "# Test unquoting on bad percent-escapes given = '%xab' expect", "\"needs/%s/here\" % urllib.parse.quote(\"quot=ing\") result = urllib.request.pathname2url(given) self.assertEqual(expect, result, \"pathname2url() failed;", "self.text_url_base64_resp = urllib.request.urlopen( self.text_url_base64) self.image_url_resp = urllib.request.urlopen(self.image_url) def test_interface(self): #", "def test_userpass_inurl(self): self.fakehttp(b\"HTTP/1.0 200 OK\\r\\n\\r\\nHello!\") try: fp = urlopen(\"http://user:pass@python.org/\") self.assertEqual(fp.readline(),", "url) self.assertRaises(OSError, urllib.request.URLopener().retrieve, url) self.assertRaises(OSError, DummyURLopener().open, url) self.assertRaises(OSError, DummyURLopener().retrieve, url)", "properly close files even # when exceptional conditions occur. self.tempFiles", "chr(num) result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using unquote(): %r !=", "http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, r\"contain control.*\\\\r\"): urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL, r\"contain", "limit. for i in range(FancyURLopener().maxtries): self.fakehttp(b'''HTTP/1.1 302 Found Location: file://guidocomputer.athome.com:/python/license", "% (quote_by_default, result)) # Safe expressed as bytes rather than", "= urllib.request.urlopen(url) self.assertEqual(fp.geturl(), url) finally: self.unfakehttp() def test_willclose(self): self.fakehttp(b\"HTTP/1.1 200", "input type\") def test_using_sequence(self): # Test passing in a sequence", "os.path.normcase(tmpfile)) @support.ignore_warnings(category=DeprecationWarning) def test_urlopener_retrieve_remote(self): url = \"http://www.python.org/file.txt\" self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\")", "test_read_1_1(self): self.check_read(b\"1.1\") def test_read_bogus(self): # urlopen() should raise OSError for", "0xd, 42)),) expect = '%A0$=%C1$&%A0$=13&%A0$=42' result = urllib.parse.urlencode(given, True, safe=\":$\",", "\"copy of the file was not \" \"made\") FILE =", "100 Content-Type: text/html; charset=iso-8859-1 FF ''') with self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve(support.TEST_HTTP_URL)", "= [] # Create a temporary file. self.registerFileForCleanUp(support.TESTFN) self.text =", "result)) result = urllib.parse.unquote_plus(given) self.assertEqual(expect, result, \"using unquote_plus(): %r !=", "%r != %r\" % (expect, result)) def test_unquote_to_bytes(self): given =", "self.assertTrue(resp.fp.will_close) finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\") def test_url_path_with_control_char_rejected(self): for", "\"simple responses\" without # a status line) self.check_read(b\"0.9\") def test_read_1_0(self):", "\"%E6%BC%A2%E5%AD%97\" expect = \"\\u6f22\\u5b57\" # \"Kanji\" result = urllib.parse.unquote(given) self.assertEqual(expect,", "# above attempts at injection within the url _path_ safe.", "tests. buf = None def connect(self): self.sock = FakeSocket(self.fakedata) type(self).fakesock", "ASCII values works escape_list = [] for num in range(128):", "with self.assertRaisesRegex(urllib.error.HTTPError, msg): urlopen(\"http://python.org/\") finally: self.unfakehttp() def test_redirect_limit_independent(self): # Ticket", "!= %r\" % (expect, result)) result = urllib.parse.unquote_plus(given) self.assertEqual(expect, result,", "work the same result = urllib.parse.quote_from_bytes(given) self.assertEqual(expect, result, \"using quote_from_bytes():", "_urlopener = opener else: opener = _urlopener if data is", "folder that uses a lowercase drive letter. self.assertEqual(os.path.normcase(filename), os.path.normcase(tmpfile)) @support.ignore_warnings(category=DeprecationWarning)", "sendall(self, data): FakeHTTPConnection.buf = data def makefile(self, *args, **kwds): self.io_refs", "(\"read\", \"readline\", \"readlines\", \"fileno\", \"close\", \"info\", \"geturl\", \"getcode\", \"__iter__\"): self.assertTrue(hasattr(self.returned_obj,", "given = \"ab\\u6f22\\u5b57 cd\" expect = \"ab%3F%3F+cd\" result = urllib.parse.quote_plus(given,", "[('text/plain', ''), ('charset', 'US-ASCII')]) def test_geturl(self): self.assertEqual(self.text_url_resp.geturl(), self.text_url) self.assertEqual(self.text_url_base64_resp.geturl(), self.text_url_base64)", "= '%A0$=%C1$&%A0$=13&%A0$=42' result = urllib.parse.urlencode(given, True, safe=\":$\", encoding=\"latin-1\") self.assertEqual(expect, result)", "def test_urlencode_encoding(self): # ASCII encoding. Expect %3F with errors=\"replace' given", "urllib.parse.urlencode(given, True, encoding=\"latin-1\") self.assertEqual(expect, result) def test_urlencode_bytes(self): given = ((b'\\xa0\\x24',", "%r\" % (expect, result)) given = '%' expect = given", "expect = \"%A2%D8ab%FF\" result = urllib.parse.quote(given) self.assertEqual(expect, result, \"using quote():", "URL that contains ignorable spaces, # such as \"\\n\", \"", "test_redirect_limit_independent(self): # Ticket #12923: make sure independent requests each use", "b\"\") self.assertEqual(fp.geturl(), 'http://python.org/') self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_url_fragment(self): #", "'\\u00c1'),) expect = '%C2%A0=%C3%81' result = urllib.parse.urlencode(given) self.assertEqual(expect, result) #", "quote(): %r != %r\" % (result, hexescape(' '))) result =", "self.registerFileForCleanUp(second_temp) urllib.request.urlretrieve( self.constructLocalFileUrl(support.TESTFN), second_temp, hooktester) def test_reporthook_0_bytes(self): # Test on", "setUp(self): # Records changes to env vars self.env = support.EnvironmentVarGuard()", "the open method of URLopener class.\"\"\" def test_quoted_open(self): class DummyURLopener(urllib.request.URLopener):", "self.pathname) def tearDown(self): \"\"\"Shut down the open object\"\"\" self.returned_obj.close() os.remove(support.TESTFN)", "self.returned_obj.readline()) self.assertEqual(b'', self.returned_obj.readline(), \"calling readline() after exhausting the file did", "# import socket, time # serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #", "os.path.abspath(filePath) try: filePath.encode(\"utf-8\") except UnicodeEncodeError: raise unittest.SkipTest(\"filePath is not encodable", "# Characters in the Latin-1 range, encoded with UTF-8 given", "\"vHgAAAABJRU5ErkJggg%3D%3D%0A%20\") self.text_url_resp = urllib.request.urlopen(self.text_url) self.text_url_base64_resp = urllib.request.urlopen( self.text_url_base64) self.image_url_resp =", "Test for #10836 with self.assertRaises(urllib.error.URLError) as e: urlopen('file://localhost/a/file/which/doesnot/exists.py') self.assertTrue(e.exception.filename) self.assertTrue(e.exception.reason)", "urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=b\"\\xfc\") expect = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=\"\") self.assertEqual(expect, result,", "finally: self.unfakehttp() def test_url_fragment(self): # Issue #11703: geturl() omits fragments", "urllib.parse.unquote(given, errors=\"ignore\") self.assertEqual(expect, result, \"using unquote(): %r != %r\" %", "test_geturl(self): self.assertEqual(self.returned_obj.geturl(), self.pathname) def test_getcode(self): self.assertIsNone(self.returned_obj.getcode()) def test_iter(self): # Test", "= urllib.parse.unquote(\"\\u6f22%FC\", encoding=\"latin-1\") expect = '\\u6f22\\u00fc' self.assertEqual(expect, result, \"using unquote():", "url for url in ('local_file://example', 'local-file://example'): self.assertRaises(OSError, urllib.request.urlopen, url) self.assertRaises(OSError,", "test_getcode(self): self.assertIsNone(self.returned_obj.getcode()) def test_iter(self): # Test iterator # Don't need", "unquoting of all ASCII values works escape_list = [] for", "\"using quote_plus(): %r != %r\" % (quote_by_default, result)) # Safe", "self.pathname) class ProxyTests(unittest.TestCase): def setUp(self): # Records changes to env", "self.assertRaises(OSError, DummyURLopener().retrieve, url) # Just commented them out. # Can't", "hex_repr return \"%\" + hex_repr # Shortcut for testing FancyURLopener", "self.io_refs -= 1 if self.io_refs == 0: io.BytesIO.close(self) class FakeHTTPConnection(http.client.HTTPConnection):", "= urllib.request.url2pathname(given) self.assertEqual(expect, result, \"url2pathname() failed; %s != %s\" %", "works. second_temp = \"%s.2\" % support.TESTFN self.registerFileForCleanUp(second_temp) result = urllib.request.urlretrieve(self.constructLocalFileUrl(", "return url with support.check_warnings( ('DummyURLopener style of invoking requests is", "+\" % result) given = \"a b cd e f\"", "\"0123456789\", \"_.-~\"]) result = urllib.parse.quote(do_not_quote) self.assertEqual(do_not_quote, result, \"using quote(): %r", "'http://user:pass@python.org/') self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_userpass_inurl_w_spaces(self): self.fakehttp(b\"HTTP/1.0 200 OK\\r\\n\\r\\nHello!\")", "for deletion during the test fixture tear down, and returns", "class URLopener_Tests(FakeHTTPMixin, unittest.TestCase): \"\"\"Testcase to test the open method of", "dirs, timeout=None, persistent=True): pass def retrfile(self, file, type): return io.BytesIO(),", "5) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 2) self.assertEqual(report[0][2], 5) self.assertEqual(report[1][2], 5)", "True for 'doseq' parameter works correctly given = {'sequence':['1', '2',", "None) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ()) def test_unquoting_mixed_case(self): # Test unquoting", "%r != %r\" % (expect, result)) expect = given.replace(' ',", "lead to only the pairs: * 1st, 1 * 2nd,", "\"\"\" def test_never_quote(self): # Make sure quote() does not quote", "= urllib.parse.quote_plus(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, \"using quote_plus(): %r != %r\"", "OK\\r\\n\\r\\nHello.\") host = \"localhost\\r\\nX-injected: header\\r\\n\" schemeless_url = \"//\" + host", "= http.client.HTTPConnection authorization = (\"Authorization: Basic %s\\r\\n\" % b64encode(userpass.encode(\"ASCII\")).decode(\"ASCII\")) fp", "self.assertTrue(urllib.request.proxy_bypass_environment('localhost')) self.assertTrue(urllib.request.proxy_bypass_environment('noproxy.com:5678')) self.assertTrue(urllib.request.proxy_bypass_environment('my.proxy:1234')) self.assertFalse(urllib.request.proxy_bypass_environment('my.proxy')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) # Test lowercase preference with", "def test_unquoting_badpercent(self): # Test unquoting on bad percent-escapes given =", "createNewTempFile(self, data=b\"\"): \"\"\"Creates a new temporary file containing the specified", "# Test on 5 byte file. Should call reporthook only", "self.assertIn(expect, result) self.assertEqual(result.count('&'), 2, \"Expected 2 '&'s, got %s\" %", "((\"\\u00a0\", \"\\u00c1\"),) expect = '%C2%A0=%C3%81' result = urllib.parse.urlencode(given, True) self.assertEqual(expect,", "< 13: # data = conn.recv(13-cantdata) # cantdata += len(data)", "escape_list.append(given) escape_string = ''.join(escape_list) del escape_list result = urllib.parse.unquote(escape_string) self.assertEqual(result.count('%'),", "def test_nonstring_values(self): self.assertEqual(\"a=1\", urllib.parse.urlencode({\"a\": 1})) self.assertEqual(\"a=None\", urllib.parse.urlencode({\"a\": None})) def test_nonstring_seq_values(self):", "!= %r\" % (quote_by_default, result)) # Safe expressed as bytes", "= \"%s.2\" % support.TESTFN self.registerFileForCleanUp(second_temp) result = urllib.request.urlretrieve(self.constructLocalFileUrl( support.TESTFN), second_temp)", "bytes rather than str result = urllib.parse.quote(quote_by_default, safe=b\"<>\") self.assertEqual(quote_by_default, result,", "in Python 2's \"urllib\" module\"\"\" import urllib.parse import urllib.request import", "\"QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 \" \"vHgAAAABJRU5ErkJggg%3D%3D%0A%20\") self.text_url_resp = urllib.request.urlopen(self.text_url) self.text_url_base64_resp = urllib.request.urlopen( self.text_url_base64)", "\"http://something\") finally: self.unfakehttp() def test_empty_socket(self): # urlopen() raises OSError if", "in # their unique way result = urllib.parse.quote(' ') self.assertEqual(result,", "!= %r\" % (expect, result)) def test_unquoting_with_bytes_input(self): # Bytes not", "file. Try to test as much functionality as possible so", "temporary files. for each in self.tempFiles: try: os.remove(each) except: pass", "result)) def test_unquote_with_unicode(self): # Characters in the Latin-1 range, encoded", "result, \"using quote_from_bytes(): %r != %r\" % (expect, result)) def", "Content-Length: 100 Content-Type: text/html; charset=iso-8859-1 FF ''') def _reporthook(par1, par2,", "tearDown(self): # Delete the temporary files. for each in self.tempFiles:", "% (expect, result)) given = os.path.join(\"make sure\", \"using_quote\") expect =", "responsibility of the developer to properly close files even #", "at least . .)\"): urllib.request.urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL, r\"contain control.*\\\\n\"): urllib.request.urlopen(f\"https:{schemeless_url}\")", "\"%s/using_quote\" % urllib.parse.quote(\"make sure\") result = urllib.request.pathname2url(given) self.assertEqual(expect, result, \"pathname2url()", "urllib.request.urlretrieve(support.TEST_HTTP_URL) finally: self.unfakehttp() class QuotingTests(unittest.TestCase): r\"\"\"Tests for urllib.quote() and urllib.quote_plus()", "operation on closed file\" which is logged as an #", "(expect, result)) # Characters in BMP, encoded with Latin-1 given", "just gets its own location returned and # a headers", "allowed to have non-ASCII characters) result = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=\"\\xfc\")", "yet with self.assertRaisesRegex(TypeError, 'Expected str, got bytes'): given = b'bl\\xc3\\xa5b\\xc3\\xa6rsyltet\\xc3\\xb8y'", "hexescape(' '))) result = urllib.parse.quote_plus(' ') self.assertEqual(result, '+', \"using quote_plus():", "of values given = ((b'\\xa0\\x24', (42, b'\\xc1\\x24')),) expect = '%A0%24=42&%A0%24=%C1%24'", "% (result, expected_path)) def test_quoting(self): # Test automatic quoting and", "above attempts at injection within the url _path_ safe. InvalidURL", "time.sleep(.1) # # def tearDown(self): # self.evt.wait() # # def", "def readline(self, length=None): if self.closed: return b\"\" return io.BytesIO.readline(self, length)", "one black and one white pixel self.image = ( b'\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR\\x00\\x00\\x00\\x02\\x00\\x00\\x00'", "urllib.quote() and urllib.quote_plus() According to RFC 3986 (Uniform Resource Identifiers),", "quoted in URL so no match self.assertNotEqual(fp.geturl(), url) self.assertEqual(fp.getcode(), 200)", "given = \"are+there+spaces...\" expect = given result = urllib.parse.unquote(given) self.assertEqual(expect,", "'file://localhost/' + tmp_file.replace(os.path.sep, '/') try: self.assertTrue(os.path.exists(tmp_file)) with urlopen(tmp_fileurl) as fobj:", "and \"_,.-\" do_not_quote = '' .join([\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\", \"abcdefghijklmnopqrstuvwxyz\", \"0123456789\", \"_.-~\"]) result", "urllib.parse.unquote(given) self.assertEqual(expect, result, \"using quote(): %r != %r\" % (expect,", "int, \"fileno() did not return an int\") self.assertEqual(os.read(file_num, len(self.text)), self.text,", "# We need to test conditions, where variable order _is_", "in (\"read\", \"readline\", \"readlines\", \"fileno\", \"close\", \"info\", \"geturl\", \"getcode\", \"__iter__\"):", "''.join(should_quote) for char in should_quote: result = urllib.parse.quote(char) self.assertEqual(hexescape(char), result,", "self.createNewTempFile() urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 1) self.assertEqual(report[0][2], 0) def test_reporthook_5_bytes(self):", "8192) self.assertEqual(report[1][1], 8192) self.assertEqual(report[2][1], 8192) class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin): \"\"\"Test urllib.urlretrieve()", "Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Length: 100", "new temporary file containing the specified data, registers the file", "opening a temporary file. Try to test as much functionality", "block_read_size, file_size, _report=report): _report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile() urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),", "place in %s\" % (test_type, result)) self.assertEqual(len(result), (5 * 3)", "', resp.geturl()) self.assertNotIn('\\r', resp.geturl()) self.assertNotIn('\\n', resp.geturl()) finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl", "ASCII Encoding. On a sequence of values. given = ((\"\\u00a0\",", "get an email.message.Message instance \" \"as second returned value\") def", "0x7F should_quote = ''.join(should_quote) for char in should_quote: result =", "expect = '%A0$=%C1$' result = urllib.parse.urlencode(given, doseq=True, safe=\":$\", encoding=\"latin-1\") given", "proxies['http']) class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin): \"\"\"Test urlopen() opening a fake", "for urlencode()\"\"\" def help_inputtype(self, given, test_type): \"\"\"Helper method for testing", "Basic %s\\r\\n\" % b64encode(userpass.encode(\"ASCII\")).decode(\"ASCII\")) fp = urlopen(url) # The authorization", "found in %s\" % (test_type, expected, result)) self.assertEqual(result.count('&'), 2, \"testing", "def close(self): pass self._ftpwrapper_class = urllib.request.ftpwrapper urllib.request.ftpwrapper = FakeFtpWrapper def", "\"///\\u00e8|/\") def test_roundtrip_url2pathname(self): list_of_paths = ['C:', r'\\\\\\C\\test\\\\', r'C:\\foo\\bar\\spam.foo' ] for", "urlopen()' function defined in this... (quite ugly) # test suite.", "= \"<>\" result = urllib.parse.quote(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, \"using quote():", "(expect, result)) given = '%x' expect = given result =", "buf = None def connect(self): self.sock = FakeSocket(self.fakedata) type(self).fakesock =", "# Test setting 'safe' parameter does what it should do", "in urlopen tests. buf = None def connect(self): self.sock =", "supported yet with self.assertRaisesRegex(TypeError, 'Expected str, got bytes'): given =", "only 2 times (once when # the \"network connection\" is", "urllib.request.getproxies_environment() self.assertEqual({}, proxies) # Test lowercase preference of proxy bypass", "with self.assertRaises(urllib.error.URLError): urlopen(tmp_fileurl) def test_ftp_nohost(self): test_ftp_url = 'ftp:///path' with self.assertRaises(urllib.error.URLError)", "attr) def test_info(self): self.assertIsInstance(self.text_url_resp.info(), email.message.Message) self.assertEqual(self.text_url_base64_resp.info().get_params(), [('text/plain', ''), ('charset', 'ISO-8859-1')])", "%s\" % (char, hexescape(char), result)) del should_quote partial_quote = \"ab[]cd\"", "mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Length: 100 Content-Type: text/html; charset=iso-8859-1 FF", "\"using quote(): %r != %r\" % (expected, result)) result =", "self.assertRaisesRegex(urllib.error.HTTPError, msg): urlopen(\"http://python.org/\") finally: self.unfakehttp() def test_redirect_limit_independent(self): # Ticket #12923:", "(no '_proxy') keys self.assertEqual('localhost', proxies['no']) # List of no_proxies with", "If anybody has one of the problematic environments, please help!", "so there is no injection. resp = urlopen(f\"http:{schemeless_url}\") self.assertNotIn(' ',", "del escape_list result = urllib.parse.unquote(escape_string) self.assertEqual(result.count('%'), 1, \"using unquote(): not", "second_temp = \"%s.2\" % support.TESTFN self.registerFileForCleanUp(second_temp) urllib.request.urlretrieve( self.constructLocalFileUrl(support.TESTFN), second_temp, hooktester)", "not found in %s\" % (test_type, expected, result)) self.assertEqual(result.count('&'), 2,", "= os.path.join(\"parts\", \"of\", \"a\", \"path\") expected_url = \"parts/of/a/path\" result =", "result = urllib.parse.urlencode(given, True, encoding=\"latin-1\") self.assertEqual(expect, result) given = ((\"\\u00a0\",", "def test_read_1_1(self): self.check_read(b\"1.1\") def test_read_bogus(self): # urlopen() should raise OSError", "it should do quote_by_default = \"<>\" result = urllib.parse.quote(quote_by_default, safe=quote_by_default)", "8192) class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin): \"\"\"Test urllib.urlretrieve() using fake http connections\"\"\"", "42)),) expect = '%A0$=%C1$&%A0$=13&%A0$=42' result = urllib.parse.urlencode(given, True, safe=\":$\") self.assertEqual(expect,", "interspersed given = 'ab%sd' % hexescape('c') expect = \"abcd\" result", "injection. resp = urlopen(f\"http:{schemeless_url}\") self.assertNotIn(char, resp.geturl()) finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl", "(expect, result)) given = '%' expect = bytes(given, 'ascii') result", "def test_ftp_nohost(self): test_ftp_url = 'ftp:///path' with self.assertRaises(urllib.error.URLError) as e: urlopen(test_ftp_url)", "# (Technically an invalid URI; expect those characters to be", "str result = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=b\"\\xfc\") expect = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\",", "'POST') def test_with_method_arg(self): Request = urllib.request.Request request = Request(\"http://www.python.org\", method='HEAD')", "comparison. # Use the iterator in the usual implicit way", "b\"\") self.assertEqual(fp.geturl(), 'http://user:pass@python.org/') self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_userpass_inurl_w_spaces(self): self.fakehttp(b\"HTTP/1.0", "self.image = ( b'\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR\\x00\\x00\\x00\\x02\\x00\\x00\\x00' b'\\x01\\x08\\x02\\x00\\x00\\x00{@\\xe8\\xdd\\x00\\x00\\x00\\x01sRGB\\x00\\xae' b'\\xce\\x1c\\xe9\\x00\\x00\\x00\\x0fIDAT\\x08\\xd7c```\\xf8\\xff\\xff?\\x00' b'\\x06\\x01\\x02\\xfe\\no/\\x1e\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82') self.text_url = (", "self.evt.wait() # # def testBasic(self): # # connects # ftp", "test_quote_with_unicode(self): # Characters in Latin-1 range, encoded by default in", "to silence this error. def close(self): pass FakeHTTPConnection.fakedata = fakedata", "# threading.Thread(target=server, args=(self.evt,)).start() # time.sleep(.1) # # def tearDown(self): #", "self.assertEqual(url2pathname(\"////C/test/\"), r'\\\\C\\test' '\\\\') def test_simple_compare(self): self.assertEqual(url2pathname(\"///C|/foo/bar/spam.foo\"), r'C:\\foo\\bar\\spam.foo') def test_non_ascii_drive_letter(self): self.assertRaises(IOError,", "Have special meaning in URIs and must be escaped if", "self.assertEqual({}, proxies) # Test lowercase preference of proxy bypass and", "Test with a bytes as input, with unescaped non-ASCII bytes", "\";/?:@&=+$,\" Have special meaning in URIs and must be escaped", "each in self.tempFiles: try: os.remove(each) except: pass def constructLocalFileUrl(self, filePath):", "= urllib.parse.urlencode(given, True, encoding=\"latin-1\") self.assertEqual(expect, result) def test_urlencode_bytes(self): given =", "it returned anything beyond the first line from the #", "an # \"Exception ignored in\". Override close() to silence this", "and once for the last byte). report = [] def", "%s != %s' % (expect, result)) given = '///C|/path' expect", "an argument. self.help_inputtype({\"1st\":'1', \"2nd\":'2', \"3rd\":'3'}, \"using dict as input type\")", "'<>#%\"' Must be escaped Unwise : \"{}|\\^[]`\" Must be escaped", "= \"file:\" + urllib.request.pathname2url(tmpfile) filename, _ = urllib.request.URLopener().retrieve(fileurl) # Some", "expect = '%A0%24=42&%A0%24=%C1%24' result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) def", "any # data. (#1680230) self.fakehttp(b'') try: self.assertRaises(OSError, urlopen, \"http://something\") finally:", "timeout is ignored # import socket # self.assertIsNone(socket.getdefaulttimeout()) # socket.setdefaulttimeout(30)", "= ((\"\\u00a0\", (42, \"\\u00c1\")),) expect = '%A0=42&%A0=%C1' result = urllib.parse.urlencode(given,", "'ISO-8859-1')]) self.assertEqual(self.image_url_resp.info()['content-length'], str(len(self.image))) self.assertEqual(urllib.request.urlopen(\"data:,\").info().get_params(), [('text/plain', ''), ('charset', 'US-ASCII')]) def test_geturl(self):", "Test lowercase preference of proxy bypass and correct matching including", "else: return opener.open(url, data) def FancyURLopener(): with support.check_warnings( ('FancyURLopener style", "http.client import email.message import io import unittest from unittest.mock import", "b'\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR\\x00\\x00\\x00\\x02\\x00\\x00\\x00' b'\\x01\\x08\\x02\\x00\\x00\\x00{@\\xe8\\xdd\\x00\\x00\\x00\\x01sRGB\\x00\\xae' b'\\xce\\x1c\\xe9\\x00\\x00\\x00\\x0fIDAT\\x08\\xd7c```\\xf8\\xff\\xff?\\x00' b'\\x06\\x01\\x02\\xfe\\no/\\x1e\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82') self.text_url = ( \"data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3\" \"D%26%20%C3%B6%20%C3%84%20\") self.text_url_base64", "result) def test_doseq(self): # Test that passing True for 'doseq'", "Should call reporthook only 3 times (once # when the", "self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(), 'HEAD') request = Request(\"http://www.python.org\", {}, method='HEAD') self.assertEqual(request.method,", "result = urllib.parse.unquote(given, errors=\"ignore\") self.assertEqual(expect, result, \"using unquote(): %r !=", "on a string with unescaped non-ASCII characters # (Technically an", "open method of URLopener class.\"\"\" def test_quoted_open(self): class DummyURLopener(urllib.request.URLopener): def", "'http://docs.python.org/library/urllib.html#OK' self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\") try: fp = urllib.request.urlopen(url) self.assertEqual(fp.geturl(), url)", "matter on the hex letters. The various character sets specified", "of iterations since test would fail the # instant it", "result = urllib.parse.unquote(given, encoding=None, errors=None) self.assertEqual(expect, result, \"using unquote(): %r", "for #10836 with self.assertRaises(urllib.error.URLError) as e: urlopen('file://localhost/a/file/which/doesnot/exists.py') self.assertTrue(e.exception.filename) self.assertTrue(e.exception.reason) def", "Make sure unquoting of all ASCII values works escape_list =", "os.path.join(\"make+sure\", \"using_unquote\") result = urllib.request.url2pathname(given) self.assertEqual(expect, result, \"url2pathname() failed; %s", "Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e", "= urllib.request.getproxies_environment() self.assertNotIn('http', proxies) finally: self.env.unset('REQUEST_METHOD') self.env.unset('HTTP_PROXY') def test_proxy_bypass_environment_host_match(self): bypass", "\" \"%s should be escapes to %s, not %s\" %", "\"using_quote\") expect = \"%s/using_quote\" % urllib.parse.quote(\"make sure\") result = urllib.request.pathname2url(given)", "# Characters in BMP, encoded with Latin-1 given = \"\\u6f22\\u5b57\"", "doseq=True, encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect, result) # ASCII Encoding. On a", "more lines\\n\") # conn.close() # except socket.timeout: # pass #", "Restore all proxy related env vars self.env.__exit__() del self.env def", "result = urllib.parse.quote_plus(char) self.assertEqual(hexescape(char), result, \"using quote_plus(): \" \"%s should", "object as an argument. self.help_inputtype({\"1st\":'1', \"2nd\":'2', \"3rd\":'3'}, \"using dict as", "%r\" % (expect, result)) escape_list.append(given) escape_string = ''.join(escape_list) del escape_list", "test_interface(self): # Make sure object returned by urlopen() has the", "@unittest.skipUnless(ssl, \"ssl module required\") def test_cafile_and_context(self): context = ssl.create_default_context() with", "in list(os.environ): if 'proxy' in k.lower(): self.env.unset(k) def tearDown(self): #", "block_read_size, file_size)) srcFileName = self.createNewTempFile(b\"x\" * 8193) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester)", "Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Type: text/html;", "encoding=\"latin-1\", safe=b\"\\xfc\") expect = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=\"\") self.assertEqual(expect, result, \"using", "the responsibility of the developer to properly close files even", "value self.assertIn(expect, result) self.assertEqual(result.count('&'), 2, \"Expected 2 '&'s, got %s\"", "user, passwd, host, port, dirs, timeout=None, persistent=True): pass def retrfile(self,", "urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin): \"\"\"Test urlopen() opening a fake http connection.\"\"\"", "the tearDown method. Note, # this only helps to makes", "self.assertEqual(expected_path, result, \"url2pathame() failed; %s != %s\" % (result, expected_path))", "self.text) def test_read_image(self): self.assertEqual(self.image_url_resp.read(), self.image) def test_missing_comma(self): self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain') def test_invalid_base64_data(self):", "for many error codes. self.fakehttp(b'''HTTP/1.1 302 Found Date: Wed, 02", "failed; %s != %s\" % (expect, result)) expect = given", "% (expect, result)) # Test with a bytes as input,", "pairs: * 1st, 1 * 2nd, 2 * 3rd, 3", "with errors=\"replace' given = (('\\u00a0', '\\u00c1'),) expect = '%3F=%3F' result", "failed; %s != %s\" % (expect, result)) given = \"make+sure/using_unquote\"", "def test_unquoting_parts(self): # Make sure unquoting works when have non-quoted", "FF ''') def _reporthook(par1, par2, par3): pass with self.assertRaises(urllib.error.ContentTooShortError): try:", "in range(32)] # For 0x00 - 0x1F should_quote.append(r'<>#%\"{}|\\^[]`') should_quote.append(chr(127)) #", "'HEAD') class URL2PathNameTests(unittest.TestCase): def test_converting_drive_letter(self): self.assertEqual(url2pathname(\"///C|\"), 'C:') self.assertEqual(url2pathname(\"///C:\"), 'C:') self.assertEqual(url2pathname(\"///C|/\"),", "test_long_drive_letter(self): self.assertRaises(IOError, pathname2url, \"XX:\\\\\") def test_roundtrip_pathname2url(self): list_of_paths = ['///C:', '/////folder/test/',", "Latin-1 range, encoded with UTF-8 given = 'br%C3%BCckner_sapporo_20050930.doc' expect =", "file, type): return io.BytesIO(), 0 def close(self): pass self._ftpwrapper_class =", "% (expect, result)) # Characters in BMP, encoded by default", "urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234') self.assertFalse(bypass('localhost\\n')) self.assertFalse(bypass('anotherdomain.com:8888\\n')) self.assertFalse(bypass('newdomain.com:1234\\n')) class ProxyTests_withOrderedEnv(unittest.TestCase):", "result, \"using unquote(): %r != %r\" % (expect, result)) given", "# such as \"\\n\", \" \", \"%0A\", and \"%20\". self.image_url", "which calls # flush(). Problem: flush() calls self.fp.flush() which raises", "expect = '%A0$=%C1$' self.assertEqual(expect, result) given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) result", "Issue #11703: geturl() omits fragments in the original URL. url", "\"abcdefghijklmnopqrstuvwxyz\", \"0123456789\", \"_.-~\"]) result = urllib.parse.quote(do_not_quote) self.assertEqual(do_not_quote, result, \"using quote():", "url) finally: self.unfakehttp() def test_willclose(self): self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\") try: resp", "fake http connections\"\"\" def test_short_content_raises_ContentTooShortError(self): self.fakehttp(b'''HTTP/1.1 200 OK Date: Wed,", "codes. self.fakehttp(b'''HTTP/1.1 302 Found Date: Wed, 02 Jan 2008 03:03:54", "support.check_warnings(('', DeprecationWarning)): with self.assertRaises(ValueError): urllib.request.urlopen( \"https://localhost\", cafile=\"/nonexistent/path\", context=context ) class", "guarantee and have possible dictionary input. \"\"\" expect_somewhere = [\"1st=1\",", "% (do_not_quote, result)) def test_default_safe(self): # Test '/' is default", "line in self.returned_obj: self.assertEqual(line, self.text) def test_relativelocalfile(self): self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname)", "def hexescape(char): \"\"\"Escape char as RFC 2396 specifies\"\"\" hex_repr =", "os.fdopen(newFd, \"wb\") newFile.write(data) newFile.close() finally: try: newFile.close() except: pass return", "(('\\u00a0', '\\u00c1'),) expect = '%3F=%3F' result = urllib.parse.urlencode(given, doseq=True, encoding=\"ASCII\",", "ssl except ImportError: ssl = None import sys import tempfile", "letters, digits, and \"_,.-\" do_not_quote = '' .join([\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\", \"abcdefghijklmnopqrstuvwxyz\", \"0123456789\",", "% (expect, result)) escape_list.append(given) escape_string = ''.join(escape_list) del escape_list result", "% b64encode(userpass.encode(\"ASCII\")).decode(\"ASCII\")) fp = urlopen(url) # The authorization header must", "be in place self.assertIn(authorization, fakehttp_wrapper.buf.decode(\"UTF-8\")) self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(), b\"\") #", "with self.assertRaisesRegex( InvalidURL, r\"contain control.*\\\\r.*(found at least . .)\"): urllib.request.urlopen(f\"http:{schemeless_url}\")", "as bytes rather than str result = urllib.parse.quote(quote_by_default, safe=b\"<>\") self.assertEqual(quote_by_default,", "result = urllib.parse.urlencode(given, True, safe=\":$\", encoding=\"latin-1\") self.assertEqual(expect, result) class Pathname_Tests(unittest.TestCase):", "result)) # Decode with UTF-8, invalid sequence, ignoring errors given", "try: InvalidURL = http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, r\"contain control.*\\\\r\"): urlopen(f\"http:{schemeless_url}\")", "str, got bytes'): given = b'bl\\xc3\\xa5b\\xc3\\xa6rsyltet\\xc3\\xb8y' urllib.parse.unquote(given) class urlencode_Tests(unittest.TestCase): \"\"\"Tests", "= \"%s/using_quote\" % urllib.parse.quote(\"make sure\") result = urllib.request.pathname2url(given) self.assertEqual(expect, result,", "may still be open. It # is the responsibility of", "def test_short_content_raises_ContentTooShortError(self): self.fakehttp(b'''HTTP/1.1 200 OK Date: Wed, 02 Jan 2008", "file. self.registerFileForCleanUp(support.TESTFN) self.text = b'testing urllib.urlretrieve' try: FILE = open(support.TESTFN,", "testBasic(self): # # connects # ftp = urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\",", "def test_long_drive_letter(self): self.assertRaises(IOError, pathname2url, \"XX:\\\\\") def test_roundtrip_pathname2url(self): list_of_paths = ['///C:',", "\"%C2%A2%C3%98ab%C3%BF\" result = urllib.parse.quote(given) self.assertEqual(expect, result, \"using quote(): %r !=", "(expect, result)) given = os.path.join(\"make sure\", \"using_quote\") expect = \"%s/using_quote\"", ".join([\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\", \"abcdefghijklmnopqrstuvwxyz\", \"0123456789\", \"_.-~\"]) result = urllib.parse.quote(do_not_quote) self.assertEqual(do_not_quote, result, \"using", "= '%3F=%3F' result = urllib.parse.urlencode(given, doseq=True, encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect, result)", "InvalidURL, f\"contain control.*{escaped_char_repr}\"): urllib.request.urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex( InvalidURL, f\"contain control.*{escaped_char_repr}\"): urllib.request.urlopen(f\"https:{schemeless_url}\")", "to test the open method of URLopener class.\"\"\" def test_quoted_open(self):", "= urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\", 9093, [], # timeout=30) # self.assertEqual(ftp.ftp.sock.gettimeout(),", "style of invoking requests is deprecated.', DeprecationWarning)): return urllib.request.FancyURLopener() def", "# ftp.close() # # def testTimeoutDefault(self): # # global default", "\"\\xa2\\xd8 \\xff\" expect = \"%A2%D8+%FF\" result = urllib.parse.quote_plus(given, encoding=\"latin-1\") self.assertEqual(expect,", "result)) given = '%' expect = given result = urllib.parse.unquote(given)", "in should_quote: result = urllib.parse.quote(char) self.assertEqual(hexescape(char), result, \"using quote(): \"", "different url opening codepaths. Plain # urlopen uses FancyURLOpener which", "self._connection_class = http.client.HTTPConnection http.client.HTTPConnection = fake_http_class def unfakehttp(self): http.client.HTTPConnection =", "FancyURLOpener which goes via a codepath that # calls urllib.parse.quote()", "testTimeoutNone(self): # # global default timeout is ignored # import", "attr in (\"read\", \"readline\", \"readlines\", \"fileno\", \"close\", \"info\", \"geturl\", \"getcode\",", "unquoting on bad percent-escapes given = '%xab' expect = given", "b cd e f\" expect = given.replace(' ', hexescape(' '))", "test_read_1_0(self): self.check_read(b\"1.0\") def test_read_1_1(self): self.check_read(b\"1.1\") def test_read_bogus(self): # urlopen() should", "os.close(fd) fileurl = \"file:\" + urllib.request.pathname2url(tmpfile) filename, _ = urllib.request.URLopener().retrieve(fileurl)", "Test all above in latin-1 encoding given = ((b'\\xa0\\x24', b'\\xc1\\x24'),)", "8193) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 3) self.assertEqual(report[0][2], 8193) self.assertEqual(report[0][1], 8192)", "above in latin-1 encoding given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) result =", "test_local_file_open(self): # bpo-35907, CVE-2019-9948: urllib must reject local_file:// scheme class", "that). should_quote = [chr(num) for num in range(32)] # For", "b\"\" return io.BytesIO.readline(self, length) def close(self): self.io_refs -= 1 if", "given = \"%F3%B1\" expect = \"\\ufffd\" # Replacement character result", "urlopen() opening a temporary file. Try to test as much", "= 0 # while cantdata < 13: # data =", "(1, \"\\u00c1\")),) expect = '%3F=1&%3F=%3F' result = urllib.parse.urlencode(given, True, encoding=\"ASCII\",", "OpenSSL/0.9.7e Location: file://guidocomputer.athome.com:/python/license Connection: close Content-Type: text/html; charset=iso-8859-1 ''', mock_close=True)", "escapes to %s, not %s\" % (char, hexescape(char), result)) del", "0x00 - 0x1F should_quote.append(r'<>#%\"{}|\\^[]`') should_quote.append(chr(127)) # For 0x7F should_quote =", "= urllib.parse.urlencode(given) self.assertEqual(expect, result) given = {\"key name\":\"A bunch of", "self.assertRaisesRegex( InvalidURL, r\"contain control.*\\\\r\"): urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL, r\"contain control.*\\\\n\"): urlopen(f\"https:{schemeless_url}\")", "not %s\" % (char, hexescape(char), result)) del should_quote partial_quote =", "data=None, proxies=None): \"\"\"urlopen(url [, data]) -> open file-like object\"\"\" global", "'///C:/foo/bar/spam.foo'] for path in list_of_paths: self.assertEqual(pathname2url(url2pathname(path)), path) if __name__ ==", "200 OK\\r\\n\\r\\nHello.\") try: escaped_char_repr = repr(char).replace('\\\\', r'\\\\') InvalidURL = http.client.InvalidURL", "\"ab%5B%5Dcd\" result = urllib.parse.quote(partial_quote) self.assertEqual(expected, result, \"using quote(): %r !=", "test_getproxies_environment_keep_no_proxies(self): self.env.set('NO_PROXY', 'localhost') proxies = urllib.request.getproxies_environment() # getproxies_environment use lowered", "is used # import socket # self.assertIsNone(socket.getdefaulttimeout()) # socket.setdefaulttimeout(30) #", "encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect, result) # Default is UTF-8 encoding. given", "encoding=\"latin-1\") self.assertEqual(expect, result, \"using quote(): %r != %r\" % (expect,", "each use their # own retry limit. for i in", "return an int\") self.assertEqual(os.read(file_num, len(self.text)), self.text, \"Reading on the file", "specified methods for attr in (\"read\", \"readline\", \"readlines\", \"close\", \"info\",", "= result[amp_location - 1] on_amp_right = result[amp_location + 1] self.assertTrue(on_amp_left.isdigit()", "bytes self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'), 'alpha+beta+gamma') def test_quote_bytes(self): # Bytes should", "method='HEAD') self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(), 'HEAD') request = Request(\"http://www.python.org\", {}, method='HEAD')", "% (expect, result)) # Characters in Latin-1 range, encoded by", "self.assertEqual(self.text, text) def test_reporthook(self): # Make sure that the reporthook", "defined in this... (quite ugly) # test suite. They use", "self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'), 'alpha+beta+gamma') # Test with bytes self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'),", "urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user', 'pass', 'localhost', 21, []) urlopen('ftp://localhost') finally: self.unfakeftp()", "= chr(num) result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using unquote(): %r", "ok. # If anybody has one of the problematic environments,", "# Create a list of temporary files. Each item in", "None: return opener.open(url) else: return opener.open(url, data) def FancyURLopener(): with", "= \"sequence=%s\" % value self.assertIn(expect, result) self.assertEqual(result.count('&'), 2, \"Expected 2", "test_urlencode_bytes(self): given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) expect = '%A0%24=%C1%24' result =", "def tearDown(self): \"\"\"Shut down the open object\"\"\" self.returned_obj.close() os.remove(support.TESTFN) def", "setUp(self): # Create a list of temporary files. Each item", "\"\\u6f22\\u5b57\" self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given, encoding=\"latin-1\") # Characters in BMP, encoded", "Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a character properly.", "be escaped Delimiters : '<>#%\"' Must be escaped Unwise :", "makes sure temporary files get deleted, but it # does", "for the last byte). report = [] def hooktester(block_count, block_read_size,", "of two-item tuples as input\") def test_quoting(self): # Make sure", "encoded data URL that contains ignorable spaces, # such as", "in (\"read\", \"readline\", \"readlines\", \"close\", \"info\", \"geturl\", \"getcode\", \"__iter__\"): self.assertTrue(hasattr(self.text_url_resp,", "are not allowed to have non-ASCII characters) result = urllib.parse.quote(\"a\\xfcb\",", "have non-ASCII characters) result = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=\"\\xfc\") expect =", "= '%A0=42&%A0=%C1' result = urllib.parse.urlencode(given, True, encoding=\"latin-1\") self.assertEqual(expect, result) def", "for testing FancyURLopener _urlopener = None def urlopen(url, data=None, proxies=None):", "@support.ignore_warnings(category=DeprecationWarning) def test_local_file_open(self): # bpo-35907, CVE-2019-9948: urllib must reject local_file://", "given = ((b'\\xa0\\x24', (b'\\xc1\\x24', 0xd, 42)),) expect = '%A0$=%C1$&%A0$=13&%A0$=42' result", "srcFileName = self.createNewTempFile() urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 1) self.assertEqual(report[0][2], 0)", "given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) result = urllib.parse.urlencode(given, safe=\":$\") expect =", "specified methods for attr in (\"read\", \"readline\", \"readlines\", \"fileno\", \"close\",", "\"\\u6f22\\u5b57\" # \"Kanji\" result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using unquote():", "self.assertEqual(expect, result, \"using quote_plus(): %r != %r\" % (expect, result))", "and unquote_plus() given = \"are+there+spaces...\" expect = given result =", "# Default utf-8 encoding given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) result =", "\"unexpected number of characters: %s != %s\" % (test_type, len(result),", "block_read_size, file_size, count_holder=[0]): self.assertIsInstance(block_count, int) self.assertIsInstance(block_read_size, int) self.assertIsInstance(file_size, int) self.assertEqual(block_count,", "'%x' expect = bytes(given, 'ascii') result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result,", "= None def connect(self): self.sock = FakeSocket(self.fakedata) type(self).fakesock = self.sock", "encoded). result = urllib.parse.unquote_to_bytes(\"\\u6f22%C3%BC\") expect = b'\\xe6\\xbc\\xa2\\xc3\\xbc' # UTF-8 for", "Identifiers), to escape a character you write it as '%'", "self.assertEqual(DummyURLopener().open( 'spam://example/ /'),'//example/%20/') # test the safe characters are not", "range, encoded with Latin-1 given = \"\\xa2\\xd8ab\\xff\" expect = \"%A2%D8ab%FF\"", "for urllib.quote() and urllib.quote_plus() According to RFC 3986 (Uniform Resource", "self.assertIsInstance(result[1], email.message.Message, \"did not get an email.message.Message instance \" \"as", "test_urlencode_encoding_safe_parameter(self): # Send '$' (\\x24) as safe character # Default", "and values are quoted using quote_plus() given = {\"&\":\"=\"} expect", "encoded with UTF-8 given = 'br%C3%BCckner_sapporo_20050930.doc' expect = 'br\\u00fcckner_sapporo_20050930.doc' result", "file-like object\"\"\" global _urlopener if proxies is not None: opener", "self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(), b\"\") # the spaces are quoted in", "= urllib.parse.unquote(given, errors=\"replace\") self.assertEqual(expect, result, \"using unquote(): %r != %r\"", "8193 byte file. Should call reporthook only 3 times (once", "expected text\") def test_close(self): # Test close() by calling it", "to RFC 3986 (Uniform Resource Identifiers), to escape a character", "url in given: result = urllib.request.url2pathname(url) self.assertEqual(expect, result, 'urllib.request..url2pathname() failed;", "None def connect(self): self.sock = FakeSocket(self.fakedata) type(self).fakesock = self.sock if", "self.image_url) def test_read_text(self): self.assertEqual(self.text_url_resp.read().decode( dict(self.text_url_resp.info().get_params())['charset']), self.text) def test_read_text_base64(self): self.assertEqual(self.text_url_base64_resp.read().decode( dict(self.text_url_base64_resp.info().get_params())['charset']),", "self.createNewTempFile(b\"x\" * 5) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 2) self.assertEqual(report[0][2], 5)", "* 1st, 1 * 2nd, 2 * 3rd, 3 Test", "directory). # All files in this list will be deleted", "200 OK\\r\\n\\r\\nHello!\") try: resp = urlopen(\"http://www.python.org\") self.assertTrue(resp.fp.will_close) finally: self.unfakehttp() @unittest.skipUnless(ssl,", "self.assertEqual(pathname2url(r\"\\\\\\folder\\test\" \"\\\\\"), '/////folder/test/') self.assertEqual(pathname2url(r\"\\\\folder\\test\" \"\\\\\"), '////folder/test/') self.assertEqual(pathname2url(r\"\\folder\\test\" \"\\\\\"), '/folder/test/') def", "http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, f\"contain control.*{escaped_char_repr}\"): urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL, f\"contain", "self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234') self.assertFalse(bypass('localhost\\n')) self.assertFalse(bypass('anotherdomain.com:8888\\n')) self.assertFalse(bypass('newdomain.com:1234\\n')) class ProxyTests_withOrderedEnv(unittest.TestCase): def", "test_geturl(self): self.assertEqual(self.text_url_resp.geturl(), self.text_url) self.assertEqual(self.text_url_base64_resp.geturl(), self.text_url_base64) self.assertEqual(self.image_url_resp.geturl(), self.image_url) def test_read_text(self): self.assertEqual(self.text_url_resp.read().decode(", "is not encodable to utf8\") return \"file://%s\" % urllib.request.pathname2url(filePath) def", "% result) given = \"a b cd e f\" expect", "ASCII Encoding. Expect %3F with errors=\"replace' given = (('\\u00a0', '\\u00c1'),)", "given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) expect = '%A0%24=%C1%24' result = urllib.parse.urlencode(given)", "os.path.join(\"needs\", \"quot=ing\", \"here\") expect = \"needs/%s/here\" % urllib.parse.quote(\"quot=ing\") result =", "!= %r\" % (expect, result)) def test_unquote_with_unicode(self): # Characters in", "# # def setUp(self): # import ftplib, time, threading #", "2, \"Expected 2 '&'s, got %s\" % result.count('&')) def test_empty_sequence(self):", "OK\\r\\n\\r\\nHello!\") try: resp = urlopen(\"http://www.python.org\") self.assertTrue(resp.fp.will_close) finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl", "\"\"\"Test the urllib.request.thishost utility function returns a tuple\"\"\" self.assertIsInstance(urllib.request.thishost(), tuple)", "= urllib.parse.quote(given) self.assertEqual(expect, result, \"using quote(): %r != %r\" %", "by with None (default) result = urllib.parse.quote(given, encoding=None, errors=None) self.assertEqual(expect,", "= b'\\xa2\\xd8ab\\xff' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, \"using unquote_to_bytes(): %r", "linux, and # the tests go ok. # If anybody", "# (Note, the string contains non-Latin-1-representable characters) result = urllib.parse.unquote(\"\\u6f22%FC\",", "http connections\"\"\" def test_short_content_raises_ContentTooShortError(self): self.fakehttp(b'''HTTP/1.1 200 OK Date: Wed, 02", "bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234') self.assertFalse(bypass('localhost\\n')) self.assertFalse(bypass('anotherdomain.com:8888\\n')) self.assertFalse(bypass('newdomain.com:1234\\n'))", "result) result = urllib.parse.urlencode(given, True) for value in given[\"sequence\"]: expect", "self.assertEqual(expect, result, \"using unquote_to_bytes(): %r != %r\" % (expect, result))", "utility functions in the urllib.\"\"\" def test_thishost(self): \"\"\"Test the urllib.request.thishost", "I have a linux, and # the tests go ok.", "a string with unescaped non-ASCII characters # (Technically an invalid", "# Default is UTF-8 encoding. given = (('\\u00a0', '\\u00c1'),) expect", "port self.assertFalse(bypass('newdomain.com:1235')) # wrong port def test_proxy_bypass_environment_always_match(self): bypass = urllib.request.proxy_bypass_environment", "escaped \"\"\" def test_never_quote(self): # Make sure quote() does not", "= urllib.request.URLopener().retrieve(fileurl) # Some buildbots have TEMP folder that uses", "lowercase preference with removal os.environ['no_proxy'] = '' os.environ['No_Proxy'] = 'localhost'", "is a file # name (absolute path or relative to", "'2'), ('3rd', '3')], \"using sequence of two-item tuples as input\")", "] for path in list_of_paths: self.assertEqual(url2pathname(pathname2url(path)), path) class PathName2URLTests(unittest.TestCase): def", "proxies=None): \"\"\"urlopen(url [, data]) -> open file-like object\"\"\" global _urlopener", "= \"\\u6f22\\u5b57\" self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given, encoding=\"latin-1\") # Characters in BMP,", "%r\" % (expect, result)) # unquote_to_bytes given = '%xab' expect", "= urllib.parse.urlencode(given, True, safe=\":$\", encoding=\"latin-1\") self.assertEqual(expect, result) class Pathname_Tests(unittest.TestCase): \"\"\"Test", "result)) # Errors test for quote_plus given = \"ab\\u6f22\\u5b57 cd\"", "%s\" % (expect, result)) @unittest.skipUnless(sys.platform == 'win32', 'test specific to", "test_info(self): self.assertIsInstance(self.returned_obj.info(), email.message.Message) def test_geturl(self): self.assertEqual(self.returned_obj.geturl(), self.pathname) def test_getcode(self): self.assertIsNone(self.returned_obj.getcode())", "down, and returns the absolute path of the file.\"\"\" newFd,", "Should call reporthook only 1 time. report = [] def", "invalid sequence given = \"%F3%B1\" expect = \"\\ufffd\" # Replacement", "\"\\\\\"), '////folder/test/') self.assertEqual(pathname2url(r\"\\folder\\test\" \"\\\\\"), '/folder/test/') def test_simple_compare(self): self.assertEqual(pathname2url(r'C:\\foo\\bar\\spam.foo'), \"///C:/foo/bar/spam.foo\" )", "sans # space (separate test for that). should_quote = [chr(num)", "\" \"as second returned value\") def test_copy(self): # Test that", "def test_default_safe(self): # Test '/' is default value for 'safe'", "Bytes not supported yet with self.assertRaisesRegex(TypeError, 'Expected str, got bytes'):", "for path in list_of_paths: self.assertEqual(url2pathname(pathname2url(path)), path) class PathName2URLTests(unittest.TestCase): def test_converting_drive_letter(self):", "expect = \"%A2%D8ab%FF\" result = urllib.parse.quote(given, encoding=\"latin-1\") self.assertEqual(expect, result, \"using", "errors=\"replace\") self.assertEqual(expect, result) # Utf-8 given = ((\"\\u00a0\", \"\\u00c1\"),) expect", "encoding=\"latin-1\") given = ((b'\\xa0\\x24', (b'\\xc1\\x24', 0xd, 42)),) expect = '%A0$=%C1$&%A0$=13&%A0$=42'", "self.assertEqual(\"a=None\", urllib.parse.urlencode({\"a\": None})) def test_nonstring_seq_values(self): self.assertEqual(\"a=1&a=2\", urllib.parse.urlencode({\"a\": [1, 2]}, True))", "desired Control characters : 0x00 - 0x1F, 0x7F Have no", "* 3) + 2, #5 chars per thing and amps", "to the urllib.url2path function.') def test_ntpath(self): given = ('/C:/', '///C:/',", "self.sock = FakeSocket(self.fakedata) type(self).fakesock = self.sock if mock_close: # bpo-36918:", "given = hexescape(chr(num)) expect = chr(num) result = urllib.parse.unquote(given) self.assertEqual(expect,", "self.assertTrue(os.path.exists(tmp_file)) with urlopen(tmp_fileurl) as fobj: self.assertTrue(fobj) finally: os.close(fd) os.unlink(tmp_file) self.assertFalse(os.path.exists(tmp_file))", "urlopen('ftp://localhost') finally: self.unfakeftp() def test_userpass_inurl(self): self.fakehttp(b\"HTTP/1.0 200 OK\\r\\n\\r\\nHello!\") try: fp", "test_cafile_and_context(self): context = ssl.create_default_context() with support.check_warnings(('', DeprecationWarning)): with self.assertRaises(ValueError): urllib.request.urlopen(", "url) self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_URLopener_deprecation(self): with support.check_warnings(('',DeprecationWarning)): urllib.request.URLopener()", "encoding. Expect %3F with errors=\"replace' given = (('\\u00a0', '\\u00c1'),) expect", "non-ASCII characters # (Technically an invalid URI; expect those characters", "elif not _urlopener: opener = FancyURLopener() _urlopener = opener else:", "result[amp_location + 1] self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(), \"testing %s: '&' not", "input given = b'%A2%D8ab%FF' expect = b'\\xa2\\xd8ab\\xff' result = urllib.parse.unquote_to_bytes(given)", "deprecated.', DeprecationWarning)): return urllib.request.FancyURLopener() def fakehttp(fakedata, mock_close=False): class FakeSocket(io.BytesIO): io_refs", "charset=iso-8859-1 ''', mock_close=True) try: msg = \"Redirection to url 'file:\"", "test_url_path_with_newline_header_injection_rejected(self): self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") host = \"localhost:7777?a=1 HTTP/1.1\\r\\nX-injected: header\\r\\nTEST: 123\"", "\"??\" result = urllib.parse.quote(given, encoding=\"latin-1\", errors=\"replace\") self.assertEqual(expect, result, \"using quote():", "given.replace('+', ' ') result = urllib.parse.unquote_plus(given) self.assertEqual(expect, result, \"using unquote_plus():", "as tmpdir: fd, tmpfile = tempfile.mkstemp(dir=tmpdir) os.close(fd) fileurl = \"file:\"", "call reporthook only 1 time. report = [] def hooktester(block_count,", "test_quoted_open(self): class DummyURLopener(urllib.request.URLopener): def open_spam(self, url): return url with support.check_warnings(", "only 3 times (once # when the \"network connection\" is", "len(hex_repr) == 1: hex_repr = \"0%s\" % hex_repr return \"%\"", "self.assertEqual(len(report), 2) self.assertEqual(report[0][2], 5) self.assertEqual(report[1][2], 5) def test_reporthook_8193_bytes(self): # Test", "result)) def test_quoting_space(self): # Make sure quote() and quote_plus() handle", "= 'HEAD' self.assertEqual(request.get_method(), 'HEAD') class URL2PathNameTests(unittest.TestCase): def test_converting_drive_letter(self): self.assertEqual(url2pathname(\"///C|\"), 'C:')", "return \"%\" + hex_repr # Shortcut for testing FancyURLopener _urlopener", "FakeHTTPConnection(http.client.HTTPConnection): # buffer to store data for verification in urlopen", "urlopen_DataTests(unittest.TestCase): \"\"\"Test urlopen() opening a data URL.\"\"\" def setUp(self): #", "# def testTimeoutDefault(self): # # global default timeout is used", "def test_ftp_nonexisting(self): with self.assertRaises(urllib.error.URLError) as e: urlopen('ftp://localhost/a/file/which/doesnot/exists.py') self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) @patch.object(urllib.request,", "data URL.\"\"\" def setUp(self): # text containing URL special- and", "= ((b'\\xa0\\x24', b'\\xc1\\x24'),) expect = '%A0%24=%C1%24' result = urllib.parse.urlencode(given) self.assertEqual(expect,", "''', mock_close=True) try: self.assertRaises(urllib.error.HTTPError, urlopen, \"http://something\") finally: self.unfakehttp() def test_empty_socket(self):", "not quote letters, digits, and \"_,.-\" do_not_quote = '' .join([\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\",", "testTimeoutValue(self): # ftp = urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\", 9093, [], #", "* 3) + 2)) def test_using_mapping(self): # Test passing in", "the tearDown() method for the test self.returned_obj.close() def test_info(self): self.assertIsInstance(self.returned_obj.info(),", "self.assertEqual(len(result), (5 * 3) + 2, #5 chars per thing", "finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\") def test_url_host_with_control_char_rejected(self): for char_no", "(expect, result)) # A mix of non-ASCII and percent-encoded characters,", "error on bytes input self.assertRaises(TypeError, urllib.parse.quote, given, encoding=\"latin-1\") # quote_from_bytes", "default timeout is used # import socket # self.assertIsNone(socket.getdefaulttimeout()) #", "established, once for the next 8192 # bytes, and once", "% (expect, result)) expect = given.replace('+', ' ') result =", "http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, r\"contain control.*\\\\r.*(found at least . .)\"):", "spaces, # such as \"\\n\", \" \", \"%0A\", and \"%20\".", "-> open file-like object\"\"\" global _urlopener if proxies is not", "\"using quote(): %r != %r\" % (quote_by_default, result)) # \"Safe\"", "b'%A2%D8ab%FF' expect = b'\\xa2\\xd8ab\\xff' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, \"using", "finally: try: FILE.close() except: pass def tearDown(self): # Delete the", "sure unquoting of all ASCII values works escape_list = []", "self._saved_env = os.environ # Monkey patch os.environ, start with empty", "Pathname_Tests(unittest.TestCase): \"\"\"Test pathname2url() and url2pathname()\"\"\" def test_basic(self): # Make sure", "email.message.Message) self.assertEqual(self.text_url_base64_resp.info().get_params(), [('text/plain', ''), ('charset', 'ISO-8859-1')]) self.assertEqual(self.image_url_resp.info()['content-length'], str(len(self.image))) self.assertEqual(urllib.request.urlopen(\"data:,\").info().get_params(), [('text/plain',", "= urllib.request.urlopen( self.text_url_base64) self.image_url_resp = urllib.request.urlopen(self.image_url) def test_interface(self): # Make", "is deprecated.', DeprecationWarning)): self.assertEqual(DummyURLopener().open( 'spam://example/ /'),'//example/%20/') # test the safe", "_urlopener if data is None: return opener.open(url) else: return opener.open(url,", "urllib.parse.urlencode(given, True) for value in given[\"sequence\"]: expect = \"sequence=%s\" %", "class ProxyTests_withOrderedEnv(unittest.TestCase): def setUp(self): # We need to test conditions,", "not send any # data. (#1680230) self.fakehttp(b'') try: self.assertRaises(OSError, urlopen,", "\"localhost\", 9093, []) # finally: # socket.setdefaulttimeout(None) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30)", "= b'testing urllib.urlretrieve' try: FILE = open(support.TESTFN, 'wb') FILE.write(self.text) FILE.close()", "[0x7f]: char = chr(char_no) schemeless_url = f\"//localhost:7777/test{char}/\" self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\")", "during the test fixture tear down, and returns the absolute", "support.temp_dir() as tmpdir: fd, tmpfile = tempfile.mkstemp(dir=tmpdir) os.close(fd) fileurl =", "= ( \"data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs\" \"sJT0mIPYgxCA%3D\") # base64 encoded data URL that", "= urlopen(f\"http:{schemeless_url}\") self.assertNotIn(char, resp.geturl()) finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\")", "support.TESTFN self.registerFileForCleanUp(second_temp) urllib.request.urlretrieve( self.constructLocalFileUrl(support.TESTFN), second_temp, hooktester) def test_reporthook_0_bytes(self): # Test", "% (expect, result)) class Utility_Tests(unittest.TestCase): \"\"\"Testcase to test the various", "\"using unquote(): %r != %r\" % (expect, result)) given =", "def test_readline(self): self.assertEqual(self.text, self.returned_obj.readline()) self.assertEqual(b'', self.returned_obj.readline(), \"calling readline() after exhausting", "urllib.parse.quote(given) self.assertEqual(expect, result, \"using quote(): %r != %r\" % (expect,", "% (quote_by_default, result)) # \"Safe\" non-ASCII characters should have no", "test_default_values(self): Request = urllib.request.Request request = Request(\"http://www.python.org\") self.assertEqual(request.get_method(), 'GET') request", "# their unique way result = urllib.parse.quote(' ') self.assertEqual(result, hexescape('", "= urllib.parse.unquote('br%FCckner_sapporo_20050930.doc', encoding=\"latin-1\") expect = 'br\\u00fcckner_sapporo_20050930.doc' self.assertEqual(expect, result, \"using unquote():", "# \"Exception ignored in\". Override close() to silence this error.", "= self._saved_env def test_getproxies_environment_prefer_lowercase(self): # Test lowercase preference with removal", "RGB PNG image with one black and one white pixel", "'///C:') self.assertEqual(pathname2url(\"C:\\\\\"), '///C:') def test_converting_when_no_drive_letter(self): self.assertEqual(pathname2url(r\"\\\\\\folder\\test\" \"\\\\\"), '/////folder/test/') self.assertEqual(pathname2url(r\"\\\\folder\\test\" \"\\\\\"),", "test the open method of URLopener class.\"\"\" def test_quoted_open(self): class", "count_holder[0] = count_holder[0] + 1 second_temp = \"%s.2\" % support.TESTFN", "self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'), 'alpha%2Bbeta+gamma') self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'), 'alpha+beta+gamma') # Test with", "and url2pathname()\"\"\" def test_basic(self): # Make sure simple tests pass", "in a sequence of two-item sequences as an argument. self.help_inputtype([('1st',", "self.assertFalse(bypass('newdomain.com:1234\\n')) class ProxyTests_withOrderedEnv(unittest.TestCase): def setUp(self): # We need to test", "encoding=\"latin-1\", safe=\"\\xfc\") expect = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=\"\") self.assertEqual(expect, result, \"using", "# try: # ftp = urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\", 9093, [])", "def test_unquoting(self): # Make sure unquoting of all ASCII values", "# try: # conn, addr = serv.accept() # conn.send(\"1 Hola", "byte). report = [] def hooktester(block_count, block_read_size, file_size, _report=report): _report.append((block_count,", "= (('\\u00a0', '\\u00c1'),) expect = '%C2%A0=%C3%81' result = urllib.parse.urlencode(given) self.assertEqual(expect,", "hex_repr = \"0%s\" % hex_repr return \"%\" + hex_repr #", "with support.check_warnings(('',DeprecationWarning)): urllib.request.URLopener() @unittest.skipUnless(ssl, \"ssl module required\") def test_cafile_and_context(self): context", "# base64 encoded data URL that contains ignorable spaces, #", "correctly given = {'sequence':['1', '2', '3']} expect = \"sequence=%s\" %", "functions in the urllib.\"\"\" def test_thishost(self): \"\"\"Test the urllib.request.thishost utility", "what was in Python 2's \"urllib\" module\"\"\" import urllib.parse import", "# when exceptional conditions occur. self.tempFiles = [] # Create", "mapping object as an argument. self.help_inputtype({\"1st\":'1', \"2nd\":'2', \"3rd\":'3'}, \"using dict", "\"ab%3F%3F+cd\" result = urllib.parse.quote_plus(given, encoding=\"latin-1\", errors=\"replace\") self.assertEqual(expect, result, \"using quote_plus():", "'1'), ('2nd', '2'), ('3rd', '3')], \"using sequence of two-item tuples", "# Characters in the Latin-1 range, encoded with Latin-1 result", "cantdata = 0 # while cantdata < 13: # data", "def tearDown(self): os.environ = self._saved_env def test_getproxies_environment_prefer_lowercase(self): # Test lowercase", "hex value>. The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes", "result) def test_urlencode_bytes(self): given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) expect = '%A0%24=%C1%24'", "list_of_paths = ['C:', r'\\\\\\C\\test\\\\', r'C:\\foo\\bar\\spam.foo' ] for path in list_of_paths:", "unquote() and unquote_plus() given = \"are+there+spaces...\" expect = given result", "\"%26%2328450%3B%26%2323383%3B\" # \"&#28450;&#23383;\" result = urllib.parse.quote(given, encoding=\"latin-1\", errors=\"xmlcharrefreplace\") self.assertEqual(expect, result,", "%r != %r\" % (expect, result)) # Same as above,", "r'C:\\foo\\bar\\spam.foo' ] for path in list_of_paths: self.assertEqual(url2pathname(pathname2url(path)), path) class PathName2URLTests(unittest.TestCase):", "parameter does what it should do quote_by_default = \"<>\" result", "text\") def test_fileno(self): file_num = self.returned_obj.fileno() self.assertIsInstance(file_num, int, \"fileno() did", "()) with support.check_warnings(('', BytesWarning), quiet=True): self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'') def", "files. Each item in the list is a file #", "Characters in BMP, Latin-1, with xmlcharref error handling given =", "amt=None): if self.closed: return b\"\" return io.BytesIO.read(self, amt) def readline(self,", "!= %r\" % (expect, result)) given = '%' expect =", "\"sJT0mIPYgxCA%3D\") # base64 encoded data URL that contains ignorable spaces,", "serv.settimeout(3) # serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # serv.bind((\"\", 9093)) # serv.listen()", "given = '%x' expect = given result = urllib.parse.unquote(given) self.assertEqual(expect,", "'%A0$=%C1$' self.assertEqual(expect, result) given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) result = urllib.parse.urlencode(given,", "e: urlopen('ftp://localhost/a/file/which/doesnot/exists.py') self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) @patch.object(urllib.request, 'MAXFTPCACHE', 0) def test_ftp_cache_pruning(self): self.fakeftp()", "in expect_somewhere: self.assertIn(expected, result, \"testing %s: %s not found in", "= \"%3F%3F\" # \"??\" result = urllib.parse.quote(given, encoding=\"latin-1\", errors=\"replace\") self.assertEqual(expect,", "= 'br%C3%BCckner_sapporo_20050930.doc' expect = b'br\\xc3\\xbcckner_sapporo_20050930.doc' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result,", "pass FakeHTTPConnection.fakedata = fakedata return FakeHTTPConnection class FakeHTTPMixin(object): def fakehttp(self,", "file://guidocomputer.athome.com:/python/license Connection: close Content-Type: text/html; charset=iso-8859-1 ''', mock_close=True) try: msg", "try: escaped_char_repr = repr(char).replace('\\\\', r'\\\\') InvalidURL = http.client.InvalidURL with self.assertRaisesRegex(", "encoded by with None (default) result = urllib.parse.quote(given, encoding=None, errors=None)", ".d.o.t') self.assertTrue(bypass('localhost')) self.assertTrue(bypass('LocalHost')) # MixedCase self.assertTrue(bypass('LOCALHOST')) # UPPERCASE self.assertTrue(bypass('.localhost')) self.assertTrue(bypass('newdomain.com:1234'))", "all characters that should be quoted are by default sans", "FakeFtpWrapper def unfakeftp(self): urllib.request.ftpwrapper = self._ftpwrapper_class class urlopen_FileTests(unittest.TestCase): \"\"\"Test urlopen()", "result, \"using unquote_plus(): %r != %r\" % (expect, result)) escape_list.append(given)", "'%A0=42&%A0=%C1' result = urllib.parse.urlencode(given, True, encoding=\"latin-1\") self.assertEqual(expect, result) def test_urlencode_bytes(self):", "logged as an # \"Exception ignored in\". Override close() to", "b'%A2\\xd8ab%FF' expect = b'\\xa2\\xd8ab\\xff' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, \"using", "def test_geturl(self): self.assertEqual(self.returned_obj.geturl(), self.pathname) def test_getcode(self): self.assertIsNone(self.returned_obj.getcode()) def test_iter(self): #", "= repr(char).replace('\\\\', r'\\\\') InvalidURL = http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, f\"contain", "about trying to close files that may still be open.", "\"info\", \"geturl\", \"getcode\", \"__iter__\"): self.assertTrue(hasattr(self.returned_obj, attr), \"object returned by urlopen()", "self.assertEqual('http://somewhere:3128', proxies['http']) self.env.set('REQUEST_METHOD', 'GET') proxies = urllib.request.getproxies_environment() self.assertNotIn('http', proxies) finally:", "given = \"\\xa2\\xd8ab\\xff\" expect = \"%C2%A2%C3%98ab%C3%BF\" result = urllib.parse.quote(given) self.assertEqual(expect,", "global default timeout is used # import socket # self.assertIsNone(socket.getdefaulttimeout())", "(expect, result)) given = '///C|/path' expect = 'C:\\\\path' result =", "(expect, result)) # Characters in the Latin-1 range, encoded with", "path) class PathName2URLTests(unittest.TestCase): def test_converting_drive_letter(self): self.assertEqual(pathname2url(\"C:\"), '///C:') self.assertEqual(pathname2url(\"C:\\\\\"), '///C:') def", "and have possible dictionary input. \"\"\" expect_somewhere = [\"1st=1\", \"2nd=2\",", "'HEAD') self.assertEqual(request.get_method(), 'HEAD') request = Request(\"http://www.python.org\", method='GET') self.assertEqual(request.get_method(), 'GET') request.method", "self.assertEqual(expect, result) def test_doseq(self): # Test that passing True for", "Data characters : letters, digits, and \"-_.!~*'()\" Unreserved and do", "two-item tuples as input\") def test_quoting(self): # Make sure keys", "= '%xab' expect = bytes(given, 'ascii') result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect,", "when have non-quoted characters # interspersed given = 'ab%sd' %", "UTF-8, invalid sequence, ignoring errors given = \"%F3%B1\" expect =", "k in list(os.environ): if 'proxy' in k.lower(): self.env.unset(k) def tearDown(self):", "result)) # Encoding argument should raise type error on bytes", "Encoding argument should raise type error on bytes input self.assertRaises(TypeError,", "Default is UTF-8 encoding. given = (('\\u00a0', '\\u00c1'),) expect =", "self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\") self.addCleanup(self.unfakehttp) filename, _ = urllib.request.URLopener().retrieve(url) self.assertEqual(os.path.splitext(filename)[1], \".txt\")", "= urllib.parse.unquote(given, encoding=None, errors=None) self.assertEqual(expect, result, \"using unquote(): %r !=", "result = urllib.parse.quote_from_bytes(given) self.assertEqual(expect, result, \"using quote_from_bytes(): %r != %r\"", "self.assertEqual(result, hexescape(' '), \"using quote(): %r != %r\" % (result,", "pixel self.image = ( b'\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR\\x00\\x00\\x00\\x02\\x00\\x00\\x00' b'\\x01\\x08\\x02\\x00\\x00\\x00{@\\xe8\\xdd\\x00\\x00\\x00\\x01sRGB\\x00\\xae' b'\\xce\\x1c\\xe9\\x00\\x00\\x00\\x0fIDAT\\x08\\xd7c```\\xf8\\xff\\xff?\\x00' b'\\x06\\x01\\x02\\xfe\\no/\\x1e\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82') self.text_url =", "encoding=\"latin-1\") self.assertEqual(expect, result) given = ((\"\\u00a0\", (42, \"\\u00c1\")),) expect =", "given = \"%F3%B1\" expect = \"\" result = urllib.parse.unquote(given, errors=\"ignore\")", "finally: self.unfakehttp() def test_userpass_inurl_w_spaces(self): self.fakehttp(b\"HTTP/1.0 200 OK\\r\\n\\r\\nHello!\") try: userpass =", "URI; expect those bytes to be preserved) given = b'%A2\\xd8ab%FF'", "\"a\"]}, True)) data = collections.OrderedDict([(\"a\", 1), (\"b\", 1)]) self.assertEqual(\"a=a&a=b\", urllib.parse.urlencode({\"a\":", "( \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\\n\" \"QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 \" \"vHgAAAABJRU5ErkJggg%3D%3D%0A%20\") self.text_url_resp = urllib.request.urlopen(self.text_url) self.text_url_base64_resp =", "quote(): %r != %r\" % (quote_by_default, result)) # \"Safe\" non-ASCII", "must be escaped if not being used for their special", "on connecting to the Net for testing. \"\"\" def setUp(self):", "\"%s\" % result) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote,", "unquote(): %r != %r\" % (expect, result)) result = urllib.parse.unquote_plus(given)", "%r != %r\" % (expect, result)) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None)", "(expect, result)) def test_quote_plus_with_unicode(self): # Encoding (latin-1) test for quote_plus", "= chr(char_no) schemeless_url = f\"//localhost{char}/test/\" self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") try: escaped_char_repr", "nothing about trying to close files that may still be", "reporthook only 2 times (once when # the \"network connection\"", "encoded by default in UTF-8 given = \"\\u6f22\\u5b57\" # \"Kanji\"", "quote_by_default = \"<>\" result = urllib.parse.quote(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, \"using", "unquote_to_bytes(): %r != %r\" % (expect, result)) given = '%x'", "FakeHTTPConnection.fakedata = fakedata return FakeHTTPConnection class FakeHTTPMixin(object): def fakehttp(self, fakedata,", "'Expected str, got bytes'): given = b'bl\\xc3\\xa5b\\xc3\\xa6rsyltet\\xc3\\xb8y' urllib.parse.unquote(given) class urlencode_Tests(unittest.TestCase):", "%r\" % (expect, result)) # Same as above, but using", "def test_converting_when_no_drive_letter(self): self.assertEqual(pathname2url(r\"\\\\\\folder\\test\" \"\\\\\"), '/////folder/test/') self.assertEqual(pathname2url(r\"\\\\folder\\test\" \"\\\\\"), '////folder/test/') self.assertEqual(pathname2url(r\"\\folder\\test\" \"\\\\\"),", "# \"0.9\" response accepted (but not \"simple responses\" without #", "test_converting_when_no_drive_letter(self): self.assertEqual(pathname2url(r\"\\\\\\folder\\test\" \"\\\\\"), '/////folder/test/') self.assertEqual(pathname2url(r\"\\\\folder\\test\" \"\\\\\"), '////folder/test/') self.assertEqual(pathname2url(r\"\\folder\\test\" \"\\\\\"), '/folder/test/')", "%s != %s\" % (test_type, len(result), (5 * 3) +", "between unquote() and unquote_plus() given = \"are+there+spaces...\" expect = given", "urllib must reject local_file:// scheme class DummyURLopener(urllib.request.URLopener): def open_local_file(self, url):", "= urllib.request.ftpwrapper urllib.request.ftpwrapper = FakeFtpWrapper def unfakeftp(self): urllib.request.ftpwrapper = self._ftpwrapper_class", "# test suite. They use different url opening codepaths. Plain", "!= %r\" % (expect, result)) def test_default_quoting(self): # Make sure", "\"as second returned value\") def test_copy(self): # Test that setting", "= '%3F=1&%3F=%3F' result = urllib.parse.urlencode(given, True, encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect, result)", "suite. They use different url opening codepaths. Plain # urlopen", "2396 specifies\"\"\" hex_repr = hex(ord(char))[2:].upper() if len(hex_repr) == 1: hex_repr", "with support.check_warnings( ('DummyURLopener style of invoking requests is deprecated.', DeprecationWarning)):", "(char, hexescape(char), result)) del should_quote partial_quote = \"ab[]cd\" expected =", "with self.assertRaises(urllib.error.URLError) as e: urlopen('ftp://localhost/a/file/which/doesnot/exists.py') self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) @patch.object(urllib.request, 'MAXFTPCACHE', 0)", "within the url _path_ safe. escaped_char_repr = repr(char).replace('\\\\', r'\\\\') InvalidURL", "encoded with Latin-1, with replace error handling given = \"\\u6f22\\u5b57\"", "result)) # Characters in BMP, encoded with Latin-1 given =", "do quote_by_default = \"<>\" result = urllib.parse.quote(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result,", "0) def test_ftp_cache_pruning(self): self.fakeftp() try: urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user', 'pass', 'localhost',", "fp = urlopen(url) # The authorization header must be in", "%r != %r\" % (expect, result)) def test_unquoting_with_bytes_input(self): # Bytes", "must be in place self.assertIn(authorization, fakehttp_wrapper.buf.decode(\"UTF-8\")) self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(), b\"\")", "% urllib.request.pathname2url(filePath) def createNewTempFile(self, data=b\"\"): \"\"\"Creates a new temporary file", "= urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234') self.assertFalse(bypass('localhost\\n')) self.assertFalse(bypass('anotherdomain.com:8888\\n')) self.assertFalse(bypass('newdomain.com:1234\\n')) class", "test_unquoting_plus(self): # Test difference between unquote() and unquote_plus() given =", "the various utility functions in the urllib.\"\"\" def test_thishost(self): \"\"\"Test", "def test_unquote_with_unicode(self): # Characters in the Latin-1 range, encoded with", "self.assertFalse(urllib.request.proxy_bypass_environment('localhost')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) os.environ['http_proxy'] = '' os.environ['HTTP_PROXY'] = 'http://somewhere:3128' proxies =", "(do_not_quote, result)) result = urllib.parse.quote_plus(do_not_quote) self.assertEqual(do_not_quote, result, \"using quote_plus(): %r", "Test setting 'safe' parameter does what it should do quote_by_default", "char in should_quote: result = urllib.parse.quote(char) self.assertEqual(hexescape(char), result, \"using quote():", "#5 chars per thing and amps \"testing %s: \" \"unexpected", "safe. escaped_char_repr = repr(char).replace('\\\\', r'\\\\') InvalidURL = http.client.InvalidURL with self.assertRaisesRegex(", "\"\\u6f22\\u5b57\" # \"Kanji\" expect = \"%E6%BC%A2%E5%AD%97\" result = urllib.parse.quote(given) self.assertEqual(expect,", "= urlopen(\"http://python.org/\") self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(), b\"\") self.assertEqual(fp.geturl(), 'http://python.org/') self.assertEqual(fp.getcode(), 200)", "Have no use in URIs so must be escaped space", "passing in a sequence of two-item sequences as an argument.", "test_invalid_base64_data(self): # missing padding character self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=') class urlretrieve_FileTests(unittest.TestCase): \"\"\"Test urllib.urlretrieve()", "urllib.parse.quote_plus(char) self.assertEqual(hexescape(char), result, \"using quote_plus(): \" \"%s should be escapes", "+ 2, #5 chars per thing and amps \"testing %s:", "quote_from_bytes should work the same result = urllib.parse.quote_from_bytes(given) self.assertEqual(expect, result,", "ok, but on those machines, sometimes # fail in one", "= urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=b\"\\xfc\") expect = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=\"\") self.assertEqual(expect,", "test_simple_compare(self): self.assertEqual(url2pathname(\"///C|/foo/bar/spam.foo\"), r'C:\\foo\\bar\\spam.foo') def test_non_ascii_drive_letter(self): self.assertRaises(IOError, url2pathname, \"///\\u00e8|/\") def test_roundtrip_url2pathname(self):", "sure unquoting works when have non-quoted characters # interspersed given", "order. Docs make no guarantee and have possible dictionary input.", "bytes(\"test_urllib: %s\\n\" % self.__class__.__name__, \"ascii\") f = open(support.TESTFN, 'wb') try:", "self.closed: return b\"\" return io.BytesIO.read(self, amt) def readline(self, length=None): if", "self def read(self, amt=None): if self.closed: return b\"\" return io.BytesIO.read(self,", "a codepath that # calls urllib.parse.quote() on the URL which", "'localhost, anotherdomain.com, newdomain.com:1234, .d.o.t') self.assertTrue(bypass('localhost')) self.assertTrue(bypass('LocalHost')) # MixedCase self.assertTrue(bypass('LOCALHOST')) #", "= \"%A2%D8ab%FF\" result = urllib.parse.quote(given) self.assertEqual(expect, result, \"using quote(): %r", "On a sequence of values. given = ((\"\\u00a0\", (1, \"\\u00c1\")),)", "%s != %s' % (expect, result)) class Utility_Tests(unittest.TestCase): \"\"\"Testcase to", "self.assertEqual(fp.readline(), b\"\") self.assertEqual(fp.geturl(), 'http://user:pass@python.org/') self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_userpass_inurl_w_spaces(self):", "range(128): given = hexescape(chr(num)) expect = chr(num) result = urllib.parse.unquote(given)", "self.assertEqual(self.text_url_resp.read().decode( dict(self.text_url_resp.info().get_params())['charset']), self.text) def test_read_text_base64(self): self.assertEqual(self.text_url_base64_resp.read().decode( dict(self.text_url_base64_resp.info().get_params())['charset']), self.text) def test_read_image(self):", "instead of the top # level 'def urlopen()' function defined", "(test_type, result)) self.assertEqual(len(result), (5 * 3) + 2, #5 chars", "# the tests go ok. # If anybody has one", "sure all characters that should be quoted are by default", "but on those machines, sometimes # fail in one of", "urlopen self.assertEqual(DummyURLopener().open( \"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/\"), \"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/\") @support.ignore_warnings(category=DeprecationWarning) def test_urlopener_retrieve_file(self): with support.temp_dir() as", "return opener.open(url, data) def FancyURLopener(): with support.check_warnings( ('FancyURLopener style of", "\" \"%s should be escaped to %s, not %s\" %", "'%A0=%C1' result = urllib.parse.urlencode(given, True, encoding=\"latin-1\") self.assertEqual(expect, result) given =", "getproxies_environment use lowered case truncated (no '_proxy') keys self.assertEqual('localhost', proxies['no'])", "self.assertIsInstance(self.text_url_resp.info(), email.message.Message) self.assertEqual(self.text_url_base64_resp.info().get_params(), [('text/plain', ''), ('charset', 'ISO-8859-1')]) self.assertEqual(self.image_url_resp.info()['content-length'], str(len(self.image))) self.assertEqual(urllib.request.urlopen(\"data:,\").info().get_params(),", "http.client.HTTPConnection = fake_http_class def unfakehttp(self): http.client.HTTPConnection = self._connection_class class FakeFTPMixin(object):", "urlopen, \"http://something\") finally: self.unfakehttp() def test_empty_socket(self): # urlopen() raises OSError", "character # Default utf-8 encoding given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) result", "input. \"\"\" expect_somewhere = [\"1st=1\", \"2nd=2\", \"3rd=3\"] result = urllib.parse.urlencode(given)", "white pixel self.image = ( b'\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR\\x00\\x00\\x00\\x02\\x00\\x00\\x00' b'\\x01\\x08\\x02\\x00\\x00\\x00{@\\xe8\\xdd\\x00\\x00\\x00\\x01sRGB\\x00\\xae' b'\\xce\\x1c\\xe9\\x00\\x00\\x00\\x0fIDAT\\x08\\xd7c```\\xf8\\xff\\xff?\\x00' b'\\x06\\x01\\x02\\xfe\\no/\\x1e\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82') self.text_url", "urllib.request.urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex( InvalidURL, f\"contain control.*{escaped_char_repr}\"): urllib.request.urlopen(f\"https:{schemeless_url}\") # This code", "%r != %r\" % (expect, result)) # unquote_to_bytes given =", "urllib.parse.quote_from_bytes(given) self.assertEqual(expect, result, \"using quote_from_bytes(): %r != %r\" % (expect,", "lacks %s attribute\" % attr) def test_info(self): self.assertIsInstance(self.text_url_resp.info(), email.message.Message) self.assertEqual(self.text_url_base64_resp.info().get_params(),", "as above, but using a bytes rather than str result", "#4608. for line in self.returned_obj: self.assertEqual(line, self.text) def test_relativelocalfile(self): self.assertRaises(ValueError,urllib.request.urlopen,'./'", "URL which makes all of the # above attempts at", "encoding=\"latin-1\") self.assertEqual(expect, result) class Pathname_Tests(unittest.TestCase): \"\"\"Test pathname2url() and url2pathname()\"\"\" def", "this only helps to makes sure temporary files get deleted,", "given = \"\\xa2\\xd8ab\\xff\" expect = \"%A2%D8ab%FF\" result = urllib.parse.quote(given, encoding=\"latin-1\")", "threading.Thread(target=server, args=(self.evt,)).start() # time.sleep(.1) # # def tearDown(self): # self.evt.wait()", "try: urllib.request.urlretrieve(support.TEST_HTTP_URL) finally: self.unfakehttp() class QuotingTests(unittest.TestCase): r\"\"\"Tests for urllib.quote() and", "200) finally: self.unfakehttp() def test_userpass_inurl_w_spaces(self): self.fakehttp(b\"HTTP/1.0 200 OK\\r\\n\\r\\nHello!\") try: userpass", "\"using unquote_plus(): %r != %r\" % (expect, result)) escape_list.append(given) escape_string", "in UTF-8 given = \"\\xa2\\xd8ab\\xff\" expect = \"%C2%A2%C3%98ab%C3%BF\" result =", "def test_converting_drive_letter(self): self.assertEqual(pathname2url(\"C:\"), '///C:') self.assertEqual(pathname2url(\"C:\\\\\"), '///C:') def test_converting_when_no_drive_letter(self): self.assertEqual(pathname2url(r\"\\\\\\folder\\test\" \"\\\\\"),", "= ( \"data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3\" \"D%26%20%C3%B6%20%C3%84%20\") self.text_url_base64 = ( \"data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs\" \"sJT0mIPYgxCA%3D\") #", "self.fakehttp(b'''HTTP/1.1 302 Found Date: Wed, 02 Jan 2008 03:03:54 GMT", "self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ()) with support.check_warnings(('',", "got bytes'): given = b'bl\\xc3\\xa5b\\xc3\\xa6rsyltet\\xc3\\xb8y' urllib.parse.unquote(given) class urlencode_Tests(unittest.TestCase): \"\"\"Tests for", "os.environ = collections.OrderedDict() def tearDown(self): os.environ = self._saved_env def test_getproxies_environment_prefer_lowercase(self):", "# ftplib.FTP.port = 9093 # self.evt = threading.Event() # threading.Thread(target=server,", "on local files\"\"\" def setUp(self): # Create a list of", "for url in ('local_file://example', 'local-file://example'): self.assertRaises(OSError, urllib.request.urlopen, url) self.assertRaises(OSError, urllib.request.URLopener().open,", "Decode with UTF-8, invalid sequence, replace errors result = urllib.parse.unquote(given,", "self.assertFalse(bypass('newdomain.com:1235')) # wrong port def test_proxy_bypass_environment_always_match(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY',", "FTPWrapperTests(unittest.TestCase): # # def setUp(self): # import ftplib, time, threading", "result = urllib.parse.unquote(\"\\u6f22%FC\", encoding=\"latin-1\") expect = '\\u6f22\\u00fc' self.assertEqual(expect, result, \"using", "test_iter(self): # Test iterator # Don't need to count number", "the current working directory). # All files in this list", "self.assertEqual(expect, result) # Default is UTF-8 encoding. given = (('\\u00a0',", "= urllib.request.urlopen(self.text_url) self.text_url_base64_resp = urllib.request.urlopen( self.text_url_base64) self.image_url_resp = urllib.request.urlopen(self.image_url) def", "= '%C2%A0=%C3%81' result = urllib.parse.urlencode(given) self.assertEqual(expect, result) # Latin-1 encoding.", "persistent=True): pass def retrfile(self, file, type): return io.BytesIO(), 0 def", "%r != %r\" % (quote_by_default, result)) # Safe expressed as", "200 OK\\r\\n\\r\\nHello!\") try: fp = urllib.request.urlopen(url) self.assertEqual(fp.geturl(), url) finally: self.unfakehttp()", "urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128', proxies['http']) self.env.set('REQUEST_METHOD', 'GET') proxies = urllib.request.getproxies_environment() self.assertNotIn('http', proxies)", "expect = '%3F=%3F' result = urllib.parse.urlencode(given, encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect, result)", "amt) def readline(self, length=None): if self.closed: return b\"\" return io.BytesIO.readline(self,", "not located in proper place in %s\" % (test_type, result))", "for num in range(128): given = hexescape(chr(num)) expect = chr(num)", "0x7F Have no use in URIs so must be escaped", "result, \"using quote(): %r != %r\" % (expected, result)) result", "A mix of non-ASCII and percent-encoded characters, Latin-1 # (Note,", "string with unescaped non-ASCII characters # (Technically an invalid URI;", "with None (default) result = urllib.parse.quote(given, encoding=None, errors=None) self.assertEqual(expect, result,", "(\"read\", \"readline\", \"readlines\", \"close\", \"info\", \"geturl\", \"getcode\", \"__iter__\"): self.assertTrue(hasattr(self.text_url_resp, attr),", "the tests go ok. # If anybody has one of", "Resource Identifiers), to escape a character you write it as", "sequences as an argument. self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')],", "0: io.BytesIO.close(self) class FakeHTTPConnection(http.client.HTTPConnection): # buffer to store data for", "urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 2) self.assertEqual(report[0][2], 5) self.assertEqual(report[1][2], 5) def", "= urllib.request.getproxies_environment() self.assertEqual({}, proxies) # Test lowercase preference of proxy", "= ( \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\\n\" \"QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 \" \"vHgAAAABJRU5ErkJggg%3D%3D%0A%20\") self.text_url_resp = urllib.request.urlopen(self.text_url) self.text_url_base64_resp", "result = urllib.parse.urlencode(given) self.assertEqual(expect, result) given = {\"key name\":\"A bunch", "(Since URIs are not allowed to have non-ASCII characters) result", "schemeless_url = \"//\" + host + \":8080/test/?test=a\" try: # We", "_reporthook(par1, par2, par3): pass with self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve(support.TEST_HTTP_URL, reporthook=_reporthook) finally:", "def tearDown(self): # self.evt.wait() # # def testBasic(self): # #", "by urlopen self.assertEqual(DummyURLopener().open( \"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/\"), \"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/\") @support.ignore_warnings(category=DeprecationWarning) def test_urlopener_retrieve_file(self): with support.temp_dir()", "1] self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(), \"testing %s: '&' not located in", "Test on 8193 byte file. Should call reporthook only 3", "%r\" % (quote_by_default, result)) # \"Safe\" non-ASCII characters should have", "def unfakeftp(self): urllib.request.ftpwrapper = self._ftpwrapper_class class urlopen_FileTests(unittest.TestCase): \"\"\"Test urlopen() opening", "% hex_repr return \"%\" + hex_repr # Shortcut for testing", "# Make sure keys and values are quoted using quote_plus()", "2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection:", "urlopen(url, data=None, proxies=None): \"\"\"urlopen(url [, data]) -> open file-like object\"\"\"", "Hola mundo\\n\") # cantdata = 0 # while cantdata <", "files in this list will be deleted in the tearDown", "''') def _reporthook(par1, par2, par3): pass with self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve(support.TEST_HTTP_URL,", "= self._connection_class class FakeFTPMixin(object): def fakeftp(self): class FakeFtpWrapper(object): def __init__(self,", "and once when the block is # read). report =", "self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\") try: resp = urlopen(\"http://www.python.org\") self.assertTrue(resp.fp.will_close) finally: self.unfakehttp()", "code of ``'%' + hex(ord(<character>))[2:]`` escapes a character properly. Case", "non-ASCII and percent-encoded characters, UTF-8 result = urllib.parse.unquote(\"\\u6f22%C3%BC\") expect =", "handling given = \"\\u6f22\\u5b57\" expect = \"%26%2328450%3B%26%2323383%3B\" # \"&#28450;&#23383;\" result", "import email.message import io import unittest from unittest.mock import patch", "% attr) def test_read(self): self.assertEqual(self.text, self.returned_obj.read()) def test_readline(self): self.assertEqual(self.text, self.returned_obj.readline())", "given = {\"&\":\"=\"} expect = \"%s=%s\" % (hexescape('&'), hexescape('=')) result", "tests for urllib.request.Request.\"\"\" def test_default_values(self): Request = urllib.request.Request request =", "handling given = \"\\u6f22\\u5b57\" expect = \"%3F%3F\" # \"??\" result", "= urlopen(f\"http:{schemeless_url}\") self.assertNotIn(' ', resp.geturl()) self.assertNotIn('\\r', resp.geturl()) self.assertNotIn('\\n', resp.geturl()) finally:", "lacks %s attribute\" % attr) def test_read(self): self.assertEqual(self.text, self.returned_obj.read()) def", "path in list_of_paths: self.assertEqual(pathname2url(url2pathname(path)), path) if __name__ == '__main__': unittest.main()", "= urllib.parse.urlencode(given, True) for value in given[\"sequence\"]: expect = \"sequence=%s\"", "data URLs :;,%=& \\u00f6 \\u00c4 \" # 2x1 pixel RGB", "FILE = open(second_temp, 'rb') try: text = FILE.read() FILE.close() finally:", "Utility_Tests(unittest.TestCase): \"\"\"Testcase to test the various utility functions in the", "[] # Create a temporary file. self.registerFileForCleanUp(support.TESTFN) self.text = b'testing", "works when have non-quoted characters # interspersed given = 'ab%sd'", "(test_type, len(result), (5 * 3) + 2)) def test_using_mapping(self): #", "replace errors result = urllib.parse.unquote(given, errors=\"replace\") self.assertEqual(expect, result, \"using unquote():", "did not return an int\") self.assertEqual(os.read(file_num, len(self.text)), self.text, \"Reading on", "(b'\\xc1\\x24', 0xd, 42)),) expect = '%A0$=%C1$&%A0$=13&%A0$=42' result = urllib.parse.urlencode(given, True,", "quote_plus given = \"\\xa2\\xd8 \\xff\" expect = \"%A2%D8+%FF\" result =", "serv.close() # evt.set() # # class FTPWrapperTests(unittest.TestCase): # # def", "((b'\\xa0\\x24', b'\\xc1\\x24'),) expect = '%A0%24=%C1%24' result = urllib.parse.urlencode(given) self.assertEqual(expect, result)", "urllib.request.urlretrieve(\"file:%s\" % support.TESTFN) self.assertEqual(result[0], support.TESTFN) self.assertIsInstance(result[1], email.message.Message, \"did not get", "(expect, result)) # Same as above, but using a bytes", "ASCII encoding. Expect %3F with errors=\"replace' given = (('\\u00a0', '\\u00c1'),)", "result)) # Characters in the Latin-1 range, encoded with Latin-1", "class FakeSocket(io.BytesIO): io_refs = 1 def sendall(self, data): FakeHTTPConnection.buf =", "test_readlines(self): lines_list = self.returned_obj.readlines() self.assertEqual(len(lines_list), 1, \"readlines() returned the wrong", "read(self, amt=None): if self.closed: return b\"\" return io.BytesIO.read(self, amt) def", "# ftp = urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\", 9093, [], # timeout=30)", "encoded with Latin-1 given = \"\\u6f22\\u5b57\" self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given, encoding=\"latin-1\")", "result)) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ()) def", "self.env def test_getproxies_environment_keep_no_proxies(self): self.env.set('NO_PROXY', 'localhost') proxies = urllib.request.getproxies_environment() # getproxies_environment", "# global default timeout is ignored # import socket #", "urllib.parse.quote(given, encoding=None, errors=None) self.assertEqual(expect, result, \"using quote(): %r != %r\"", "quoting_Tests for details on quoting and such. \"\"\" def test_unquoting(self):", "quoted using quote_plus() given = {\"&\":\"=\"} expect = \"%s=%s\" %", "result = urllib.parse.urlencode(given, encoding=\"latin-1\") self.assertEqual(expect, result) def test_urlencode_encoding_doseq(self): # ASCII", "= 'xyz.com' self.assertTrue(urllib.request.proxy_bypass_environment('localhost')) self.assertTrue(urllib.request.proxy_bypass_environment('noproxy.com:5678')) self.assertTrue(urllib.request.proxy_bypass_environment('my.proxy:1234')) self.assertFalse(urllib.request.proxy_bypass_environment('my.proxy')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) # Test lowercase", "opener = urllib.request.FancyURLopener(proxies=proxies) elif not _urlopener: opener = FancyURLopener() _urlopener", "hooktester) self.assertEqual(len(report), 3) self.assertEqual(report[0][2], 8193) self.assertEqual(report[0][1], 8192) self.assertEqual(report[1][1], 8192) self.assertEqual(report[2][1],", "need to test conditions, where variable order _is_ significant self._saved_env", "encoding=\"latin-1\", errors=\"replace\") self.assertEqual(expect, result, \"using quote(): %r != %r\" %", "mixed-case hex digits in the percent-escapes given = '%Ab%eA' expect", "to escape a character you write it as '%' +", "def test_info(self): self.assertIsInstance(self.text_url_resp.info(), email.message.Message) self.assertEqual(self.text_url_base64_resp.info().get_params(), [('text/plain', ''), ('charset', 'ISO-8859-1')]) self.assertEqual(self.image_url_resp.info()['content-length'],", "self.assertEqual(fp.geturl(), url) finally: self.unfakehttp() def test_willclose(self): self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\") try:", "\"key+name=A+bunch+of+pluses\" result = urllib.parse.urlencode(given) self.assertEqual(expect, result) def test_doseq(self): # Test", "\"test data URLs :;,%=& \\u00f6 \\u00c4 \" # 2x1 pixel", "and \"-_.!~*'()\" Unreserved and do not need to be escaped;", "%r\" % (expect, result)) # Decode with UTF-8, invalid sequence,", "result = urllib.parse.unquote_to_bytes(\"\\u6f22%C3%BC\") expect = b'\\xe6\\xbc\\xa2\\xc3\\xbc' # UTF-8 for \"\\u6f22\\u00fc\"", "test_urlopener_retrieve_file(self): with support.temp_dir() as tmpdir: fd, tmpfile = tempfile.mkstemp(dir=tmpdir) os.close(fd)", "= urllib.request.getproxies_environment() # getproxies_environment use lowered case truncated (no '_proxy')", "next 8192 # bytes, and once for the last byte).", "%s attribute\" % attr) def test_info(self): self.assertIsInstance(self.text_url_resp.info(), email.message.Message) self.assertEqual(self.text_url_base64_resp.info().get_params(), [('text/plain',", "self.assertNotEqual(fp.geturl(), url) self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_URLopener_deprecation(self): with support.check_warnings(('',DeprecationWarning)):", "on zero length file. Should call reporthook only 1 time.", "self.assertEqual(self.image_url_resp.info()['content-length'], str(len(self.image))) self.assertEqual(urllib.request.urlopen(\"data:,\").info().get_params(), [('text/plain', ''), ('charset', 'US-ASCII')]) def test_geturl(self): self.assertEqual(self.text_url_resp.geturl(),", "given: result = urllib.request.url2pathname(url) self.assertEqual(expect, result, 'urllib.request..url2pathname() failed; %s !=", "result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) # Sequence of values", "this list will be deleted in the tearDown method. Note,", "self.assertRaisesRegex( InvalidURL, f\"contain control.*{escaped_char_repr}\"): urllib.request.urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex( InvalidURL, f\"contain control.*{escaped_char_repr}\"):", "self.registerFileForCleanUp(newFilePath) newFile = os.fdopen(newFd, \"wb\") newFile.write(data) newFile.close() finally: try: newFile.close()", "given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) expect = '%A0$=%C1$' result = urllib.parse.urlencode(given,", "\"using_unquote\") result = urllib.request.url2pathname(given) self.assertEqual(expect, result, \"url2pathname() failed; %s !=", "ssl.create_default_context() with support.check_warnings(('', DeprecationWarning)): with self.assertRaises(ValueError): urllib.request.urlopen( \"https://localhost\", cafile=\"/nonexistent/path\", context=context", "socket # self.assertIsNone(socket.getdefaulttimeout()) # socket.setdefaulttimeout(30) # try: # ftp =", "url _path_ safe. InvalidURL = http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, r\"contain", "quote_plus given = \"ab\\u6f22\\u5b57 cd\" expect = \"ab%3F%3F+cd\" result =", "in one of the tests, sometimes in other. I have", "self.assertEqual(report[0][2], 8193) self.assertEqual(report[0][1], 8192) self.assertEqual(report[1][1], 8192) self.assertEqual(report[2][1], 8192) class urlretrieve_HttpTests(unittest.TestCase,", "temporary files get deleted, but it # does nothing about", "Test unquoting on mixed-case hex digits in the percent-escapes given", "url2pathname()\"\"\" def test_basic(self): # Make sure simple tests pass expected_path", "# Characters in Latin-1 range, encoded with Latin-1 given =", "# flush(). Problem: flush() calls self.fp.flush() which raises # \"ValueError:", "= urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128', proxies['http']) class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin): \"\"\"Test urlopen()", "given = b'%A2%D8ab%FF' expect = b'\\xa2\\xd8ab\\xff' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect,", "AttributeError), urllib.parse.unquote, b'') def test_unquoting_badpercent(self): # Test unquoting on bad", "file_size)) srcFileName = self.createNewTempFile() urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 1) self.assertEqual(report[0][2],", "= self.returned_obj.fileno() self.assertIsInstance(file_num, int, \"fileno() did not return an int\")", "url): return url with support.check_warnings( ('DummyURLopener style of invoking requests", "works escape_list = [] for num in range(128): given =", "r'\\\\\\C\\test\\\\', r'C:\\foo\\bar\\spam.foo' ] for path in list_of_paths: self.assertEqual(url2pathname(pathname2url(path)), path) class", "for many error codes. self.fakehttp(b'''HTTP/1.1 401 Authentication Required Date: Wed,", "by the tearDown() method for the test self.returned_obj.close() def test_info(self):", "1 time. report = [] def hooktester(block_count, block_read_size, file_size, _report=report):", "\"\\\\\"), '/////folder/test/') self.assertEqual(pathname2url(r\"\\\\folder\\test\" \"\\\\\"), '////folder/test/') self.assertEqual(pathname2url(r\"\\folder\\test\" \"\\\\\"), '/folder/test/') def test_simple_compare(self):", "% (result, expected_url)) result = urllib.request.url2pathname(expected_url) self.assertEqual(expected_path, result, \"url2pathame() failed;", "newFile.close() except: pass return newFilePath def registerFileForCleanUp(self, fileName): self.tempFiles.append(fileName) def", "= http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, f\"contain control.*{escaped_char_repr}\"): urllib.request.urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(", "'/') def test_safe(self): # Test setting 'safe' parameter does what", "pathname2url from base64 import b64encode import collections def hexescape(char): \"\"\"Escape", "'br\\u00fcckner_sapporo_20050930.doc' self.assertEqual(expect, result, \"using unquote(): %r != %r\" % (expect,", "1) self.assertEqual(report[0][2], 0) def test_reporthook_5_bytes(self): # Test on 5 byte", "with self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve(support.TEST_HTTP_URL, reporthook=_reporthook) finally: self.unfakehttp() def test_short_content_raises_ContentTooShortError_without_reporthook(self): self.fakehttp(b'''HTTP/1.1", "to store data for verification in urlopen tests. buf =", "list(os.environ): if 'proxy' in k.lower(): self.env.unset(k) def tearDown(self): # Restore", "self.fakehttp(b\"HTTP/1.0 200 OK\\r\\n\\r\\nHello!\") try: userpass = \"<PASSWORD>\" url = \"http://{}@python.org/\".format(userpass)", "= '\\u6f22\\u00fc' self.assertEqual(expect, result, \"using unquote(): %r != %r\" %", "of proxy bypass and correct matching including ports os.environ['no_proxy'] =", "verification in urlopen tests. buf = None def connect(self): self.sock", "# Can't really tell why keep failing in windows and", "1 def sendall(self, data): FakeHTTPConnection.buf = data def makefile(self, *args,", "# Utf-8 given = ((\"\\u00a0\", \"\\u00c1\"),) expect = '%C2%A0=%C3%81' result", "\"using unquote(): %r != %r\" % (expect, result)) # A", "as e: urlopen(test_ftp_url) self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) def test_ftp_nonexisting(self): with self.assertRaises(urllib.error.URLError) as", "% (expect, result)) given = '%x' expect = bytes(given, 'ascii')", "the same result = urllib.parse.quote_from_bytes(given) self.assertEqual(expect, result, \"using quote_from_bytes(): %r", "error codes. self.fakehttp(b'''HTTP/1.1 401 Authentication Required Date: Wed, 02 Jan", "'local-file://example'): self.assertRaises(OSError, urllib.request.urlopen, url) self.assertRaises(OSError, urllib.request.URLopener().open, url) self.assertRaises(OSError, urllib.request.URLopener().retrieve, url)", "characters, UTF-8 result = urllib.parse.unquote(\"\\u6f22%C3%BC\") expect = '\\u6f22\\u00fc' self.assertEqual(expect, result,", "%r\" % (expect, result)) def test_quote_plus_with_unicode(self): # Encoding (latin-1) test", "\"///C:/foo/bar/spam.foo\" ) def test_long_drive_letter(self): self.assertRaises(IOError, pathname2url, \"XX:\\\\\") def test_roundtrip_pathname2url(self): list_of_paths", "result)) # Characters in BMP, encoded with UTF-8 given =", "please help! # . Facundo # # def server(evt): #", "testing self.text = bytes(\"test_urllib: %s\\n\" % self.__class__.__name__, \"ascii\") f =", "result = urllib.parse.quote_plus(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, \"using quote_plus(): %r !=", "((\"\\u00a0\", \"\\u00c1\"),) expect = '%A0=%C1' result = urllib.parse.urlencode(given, True, encoding=\"latin-1\")", "urllib.parse.urlencode(given, True, safe=\":$\") self.assertEqual(expect, result) # Test all above in", "quote directly to percent-encoded values given = b\"\\xa2\\xd8ab\\xff\" expect =", "self.text = bytes(\"test_urllib: %s\\n\" % self.__class__.__name__, \"ascii\") f = open(support.TESTFN,", "'))) result = urllib.parse.quote_plus(' ') self.assertEqual(result, '+', \"using quote_plus(): %r", "temporary file containing the specified data, registers the file for", "they work ok, but on those machines, sometimes # fail", "doc string for quoting_Tests for details on quoting and such.", "given.replace(' ', hexescape(' ')) result = urllib.parse.quote(given) self.assertEqual(expect, result, \"using", "in the Latin-1 range, encoded with None (default) result =", "self.env.set('HTTP_PROXY', 'http://somewhere:3128') proxies = urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128', proxies['http']) self.env.set('REQUEST_METHOD', 'GET') proxies", "'localhost') proxies = urllib.request.getproxies_environment() # getproxies_environment use lowered case truncated", "of lines\") self.assertEqual(lines_list[0], self.text, \"readlines() returned improper text\") def test_fileno(self):", "as safe character # Default utf-8 encoding given = ((b'\\xa0\\x24',", "# If anybody has one of the problematic environments, please", "OpenSSL/0.9.7e Connection: close Content-Length: 100 Content-Type: text/html; charset=iso-8859-1 FF ''')", "digits, and \"-_.!~*'()\" Unreserved and do not need to be", "Latin-1 range, encoded by default in UTF-8 given = \"\\xa2\\xd8ab\\xff\"", "simple tests pass expected_path = os.path.join(\"parts\", \"of\", \"a\", \"path\") expected_url", "def test_quoting_plus(self): self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'), 'alpha%2Bbeta+gamma') self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'), 'alpha+beta+gamma') #", "urlopen('ftp://localhost/a/file/which/doesnot/exists.py') self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) @patch.object(urllib.request, 'MAXFTPCACHE', 0) def test_ftp_cache_pruning(self): self.fakeftp() try:", "quote(): %r != %r\" % (expect, result)) expect = given.replace('", "\"\\u6f22\\u5b57\" expect = \"%3F%3F\" # \"??\" result = urllib.parse.quote(given, encoding=\"latin-1\",", "in BMP, encoded with Latin-1, with replace error handling given", "and returns the absolute path of the file.\"\"\" newFd, newFilePath", "self.returned_obj.close() def test_info(self): self.assertIsInstance(self.returned_obj.info(), email.message.Message) def test_geturl(self): self.assertEqual(self.returned_obj.geturl(), self.pathname) def", "result = urllib.parse.urlencode(given, doseq=True, safe=\":$\", encoding=\"latin-1\") given = ((b'\\xa0\\x24', (b'\\xc1\\x24',", "urlopen(\"http://user:pass@python.org/\") self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(), b\"\") self.assertEqual(fp.geturl(), 'http://user:pass@python.org/') self.assertEqual(fp.getcode(), 200) finally:", "difference between unquote() and unquote_plus() given = \"are+there+spaces...\" expect =", "data, registers the file for deletion during the test fixture", "def test_geturl(self): self.assertEqual(self.text_url_resp.geturl(), self.text_url) self.assertEqual(self.text_url_base64_resp.geturl(), self.text_url_base64) self.assertEqual(self.image_url_resp.geturl(), self.image_url) def test_read_text(self):", "\\xff\" expect = \"%A2%D8+%FF\" result = urllib.parse.quote_plus(given, encoding=\"latin-1\") self.assertEqual(expect, result,", "urllib.parse.urlencode({\"a\": [1, 2]}, True)) self.assertEqual(\"a=None&a=a\", urllib.parse.urlencode({\"a\": [None, \"a\"]}, True)) data", "support.check_warnings(('',DeprecationWarning)): urllib.request.URLopener() @unittest.skipUnless(ssl, \"ssl module required\") def test_cafile_and_context(self): context =", "finally: self.unfakehttp() def test_redirect_limit_independent(self): # Ticket #12923: make sure independent", "cafile=\"/nonexistent/path\", context=context ) class urlopen_DataTests(unittest.TestCase): \"\"\"Test urlopen() opening a data", "contains non-Latin-1-representable characters) result = urllib.parse.unquote(\"\\u6f22%FC\", encoding=\"latin-1\") expect = '\\u6f22\\u00fc'", "(expect, result)) def test_unquote_with_unicode(self): # Characters in the Latin-1 range,", "OSError if the underlying socket does not send any #", "= collections.OrderedDict() def tearDown(self): os.environ = self._saved_env def test_getproxies_environment_prefer_lowercase(self): #", "self.assertEqual(pathname2url(\"C:\\\\\"), '///C:') def test_converting_when_no_drive_letter(self): self.assertEqual(pathname2url(r\"\\\\\\folder\\test\" \"\\\\\"), '/////folder/test/') self.assertEqual(pathname2url(r\"\\\\folder\\test\" \"\\\\\"), '////folder/test/')", "server(evt): # import socket, time # serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "safe. InvalidURL = http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, r\"contain control.*\\\\r.*(found at", "bytes rather than str result = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=b\"\\xfc\") expect", "char_no in list(range(0, 0x21)) + [0x7f]: char = chr(char_no) schemeless_url", "repr(char).replace('\\\\', r'\\\\') InvalidURL = http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, f\"contain control.*{escaped_char_repr}\"):", "%r\" % (expect, result)) # Characters in BMP, Latin-1, with", "lines_list = self.returned_obj.readlines() self.assertEqual(len(lines_list), 1, \"readlines() returned the wrong number", "os.environ['http_proxy'] = '' os.environ['HTTP_PROXY'] = 'http://somewhere:3128' proxies = urllib.request.getproxies_environment() self.assertEqual({},", "2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Location:", "ignored in\". Override close() to silence this error. def close(self):", "len(data) # time.sleep(.3) # conn.send(\"2 No more lines\\n\") # conn.close()", "\"did not get an email.message.Message instance \" \"as second returned", "control.*\\\\r.*(found at least . .)\"): urllib.request.urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL, r\"contain control.*\\\\n\"):", "finally: try: FILE.close() except: pass self.assertEqual(self.text, text) def test_reporthook(self): #", "text/html; charset=iso-8859-1 FF ''') def _reporthook(par1, par2, par3): pass with", "errors=\"replace\") self.assertEqual(expect, result, \"using quote_plus(): %r != %r\" % (expect,", "\"ascii\") f = open(support.TESTFN, 'wb') try: f.write(self.text) finally: f.close() self.pathname", "in this... (quite ugly) # test suite. They use different", "100 Content-Type: text/html; charset=iso-8859-1 FF ''') def _reporthook(par1, par2, par3):", "test_thishost(self): \"\"\"Test the urllib.request.thishost utility function returns a tuple\"\"\" self.assertIsInstance(urllib.request.thishost(),", "anotherdomain.com, newdomain.com:1234, .d.o.t') self.assertTrue(bypass('localhost')) self.assertTrue(bypass('LocalHost')) # MixedCase self.assertTrue(bypass('LOCALHOST')) # UPPERCASE", "range, encoded with Latin-1 result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc', encoding=\"latin-1\") expect =", "with UTF-8, invalid sequence, ignoring errors given = \"%F3%B1\" expect", "sets specified are: Reserved characters : \";/?:@&=+$,\" Have special meaning", "do not need to be escaped; can be, though, if", "given = {\"key name\":\"A bunch of pluses\"} expect = \"key+name=A+bunch+of+pluses\"", "') self.assertEqual(result, hexescape(' '), \"using quote(): %r != %r\" %", "# test the safe characters are not quoted by urlopen", "fd, tmp_file = tempfile.mkstemp() tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/')", "self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'), 'alpha%2Bbeta+gamma') # Test with safe bytes self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma',", "\"using quote_plus(): %r != %r\" % (do_not_quote, result)) def test_default_safe(self):", "_path_ safe. InvalidURL = http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, r\"contain control.*\\\\r.*(found", "def test_fileno(self): file_num = self.returned_obj.fileno() self.assertIsInstance(file_num, int, \"fileno() did not", "urllib.parse.quote_plus(partial_quote) self.assertEqual(expected, result, \"using quote_plus(): %r != %r\" % (expected,", "'localhost' self.assertFalse(urllib.request.proxy_bypass_environment('localhost')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) os.environ['http_proxy'] = '' os.environ['HTTP_PROXY'] = 'http://somewhere:3128' proxies", "# text containing URL special- and unicode-characters self.text = \"test", "self.unfakehttp() class QuotingTests(unittest.TestCase): r\"\"\"Tests for urllib.quote() and urllib.quote_plus() According to", "list_of_paths: self.assertEqual(url2pathname(pathname2url(path)), path) class PathName2URLTests(unittest.TestCase): def test_converting_drive_letter(self): self.assertEqual(pathname2url(\"C:\"), '///C:') self.assertEqual(pathname2url(\"C:\\\\\"),", "+ <2 character US-ASCII hex value>. The Python code of", "type error on bytes input self.assertRaises(TypeError, urllib.parse.quote, given, encoding=\"latin-1\") #", "FakeFtpWrapper(object): def __init__(self, user, passwd, host, port, dirs, timeout=None, persistent=True):", "PNG image with one black and one white pixel self.image", "such as \"\\n\", \" \", \"%0A\", and \"%20\". self.image_url =", "sequence of two-item tuples as input\") def test_quoting(self): # Make", "self.assertEqual(expect, result) # Latin-1 encoding. given = (('\\u00a0', '\\u00c1'),) expect", "f = open(support.TESTFN, 'wb') try: f.write(self.text) finally: f.close() self.pathname =", "urllib.parse.unquote, None) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ()) with support.check_warnings(('', BytesWarning), quiet=True):", "= \"key+name=A+bunch+of+pluses\" result = urllib.parse.urlencode(given) self.assertEqual(expect, result) def test_doseq(self): #", "request = Request(\"http://www.python.org\") self.assertEqual(request.get_method(), 'GET') request = Request(\"http://www.python.org\", {}) self.assertEqual(request.get_method(),", "FakeHTTPConnection.buf = data def makefile(self, *args, **kwds): self.io_refs += 1", "urllib.request.urlopen( self.text_url_base64) self.image_url_resp = urllib.request.urlopen(self.image_url) def test_interface(self): # Make sure", "by urlopen() lacks %s attribute\" % attr) def test_info(self): self.assertIsInstance(self.text_url_resp.info(),", "with urlopen(tmp_fileurl) as fobj: self.assertTrue(fobj) finally: os.close(fd) os.unlink(tmp_file) self.assertFalse(os.path.exists(tmp_file)) with", "def setUp(self): # text containing URL special- and unicode-characters self.text", "test_readline(self): self.assertEqual(self.text, self.returned_obj.readline()) self.assertEqual(b'', self.returned_obj.readline(), \"calling readline() after exhausting the", "test for ticket #4608. for line in self.returned_obj: self.assertEqual(line, self.text)", "# socket.setdefaulttimeout(None) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() # # def", "os try: import ssl except ImportError: ssl = None import", "block_read_size, file_size, _report=report): _report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile(b\"x\" *", "data def makefile(self, *args, **kwds): self.io_refs += 1 return self", "control.*\\\\n\"): urllib.request.urlopen(f\"https:{schemeless_url}\") # This code path quotes the URL so", "socket.socket(socket.AF_INET, socket.SOCK_STREAM) # serv.settimeout(3) # serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # serv.bind((\"\",", "urllib.parse.quote(do_not_quote) self.assertEqual(do_not_quote, result, \"using quote(): %r != %r\" % (do_not_quote,", "'+'), 'alpha+beta+gamma') # Test with bytes self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'), 'alpha%2Bbeta+gamma') #", "= Request(\"http://www.python.org\", {}, method='HEAD') self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(), 'HEAD') request =", "import socket # self.assertIsNone(socket.getdefaulttimeout()) # socket.setdefaulttimeout(30) # try: # ftp", "raise OSError for many error codes. self.fakehttp(b'''HTTP/1.1 401 Authentication Required", "test_quoting_space(self): # Make sure quote() and quote_plus() handle spaces as", "!= %r\" % (expect, result)) def test_unquoting_parts(self): # Make sure", "# The authorization header must be in place self.assertIn(authorization, fakehttp_wrapper.buf.decode(\"UTF-8\"))", "so there is no injection. resp = urlopen(f\"http:{schemeless_url}\") self.assertNotIn(char, resp.geturl())", "Reserved characters : \";/?:@&=+$,\" Have special meaning in URIs and", "urlopen(\"http://python.org/\") self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(), b\"\") self.assertEqual(fp.geturl(), 'http://python.org/') self.assertEqual(fp.getcode(), 200) finally:", "test_non_ascii_drive_letter(self): self.assertRaises(IOError, url2pathname, \"///\\u00e8|/\") def test_roundtrip_url2pathname(self): list_of_paths = ['C:', r'\\\\\\C\\test\\\\',", "safe=\":$\") expect = '%A0$=%C1$' self.assertEqual(expect, result) given = ((b'\\xa0\\x24', b'\\xc1\\x24'),)", "self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") try: # We explicitly test urllib.request.urlopen() instead", "0x00 - 0x1F, 0x7F Have no use in URIs so", "ssl = None import sys import tempfile from nturl2path import", "control.*{escaped_char_repr}\"): urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL, f\"contain control.*{escaped_char_repr}\"): urlopen(f\"https:{schemeless_url}\") finally: self.unfakehttp() @unittest.skipUnless(ssl,", "self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ()) with support.check_warnings(('', BytesWarning), quiet=True): self.assertRaises((TypeError, AttributeError),", "def test_getcode(self): self.assertIsNone(self.returned_obj.getcode()) def test_iter(self): # Test iterator # Don't", "go ok. # If anybody has one of the problematic", "for urllib.request.Request.\"\"\" def test_default_values(self): Request = urllib.request.Request request = Request(\"http://www.python.org\")", "(Technically an invalid URI; expect those bytes to be preserved)", "= urllib.parse.quote(given, encoding=\"latin-1\", errors=\"xmlcharrefreplace\") self.assertEqual(expect, result, \"using quote(): %r !=", "_report=report): _report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile(b\"x\" * 8193) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),", "((b'\\xa0\\x24', b'\\xc1\\x24'),) result = urllib.parse.urlencode(given, safe=\":$\", encoding=\"latin-1\") expect = '%A0$=%C1$'", "anything about order. Docs make no guarantee and have possible", "= FakeSocket(self.fakedata) type(self).fakesock = self.sock if mock_close: # bpo-36918: HTTPConnection", "# Make sure unquoting of all ASCII values works escape_list", "# urlopen() should raise OSError for many error codes. self.fakehttp(b'''HTTP/1.1", "% (expect, result)) # Same as above, but using a", "urllib.parse.unquote(\"\\u6f22%FC\", encoding=\"latin-1\") expect = '\\u6f22\\u00fc' self.assertEqual(expect, result, \"using unquote(): %r", "% self.pathname) def tearDown(self): \"\"\"Shut down the open object\"\"\" self.returned_obj.close()", "'http://python.org/') self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_url_fragment(self): # Issue #11703:", "'http://somewhere:3128' os.environ['Http_Proxy'] = 'http://somewhereelse:3128' proxies = urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128', proxies['http']) class", "urlopen_FileTests(unittest.TestCase): \"\"\"Test urlopen() opening a temporary file. Try to test", "!= %r\" % (quote_by_default, result)) result = urllib.parse.quote_plus(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default,", "result = urllib.parse.quote(partial_quote) self.assertEqual(expected, result, \"using quote(): %r != %r\"", "def test_using_mapping(self): # Test passing in a mapping object as", "first line from the # comparison. # Use the iterator", "3 times (once # when the \"network connection\" is established,", "None def urlopen(url, data=None, proxies=None): \"\"\"urlopen(url [, data]) -> open", "be UTF-8 # encoded). result = urllib.parse.unquote_to_bytes(\"\\u6f22%C3%BC\") expect = b'\\xe6\\xbc\\xa2\\xc3\\xbc'", "_ = urllib.request.URLopener().retrieve(fileurl) # Some buildbots have TEMP folder that", "self.unfakehttp() def test_missing_localfile(self): # Test for #10836 with self.assertRaises(urllib.error.URLError) as", "io.BytesIO.read(self, amt) def readline(self, length=None): if self.closed: return b\"\" return", "urlopen, \"http://python.org/\") finally: self.unfakehttp() def test_invalid_redirect(self): # urlopen() should raise", "did not\" \" return an empty string\") def test_readlines(self): lines_list", "escaped Unwise : \"{}|\\^[]`\" Must be escaped \"\"\" def test_never_quote(self):", "self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") try: escaped_char_repr = repr(char).replace('\\\\', r'\\\\') InvalidURL =", "so must be escaped space : 0x20 Must be escaped", "result)) result = urllib.parse.quote_plus(char) self.assertEqual(hexescape(char), result, \"using quote_plus(): \" \"%s", "%r != %r\" % (result, hexescape(' '))) result = urllib.parse.quote_plus('", "Net for testing. \"\"\" def setUp(self): # Create a temp", "drive letter. self.assertEqual(os.path.normcase(filename), os.path.normcase(tmpfile)) @support.ignore_warnings(category=DeprecationWarning) def test_urlopener_retrieve_remote(self): url = \"http://www.python.org/file.txt\"", "%r != %r\" % (quote_by_default, result)) # \"Safe\" non-ASCII characters", "\"\\u00c1\"),) expect = '%C2%A0=%C3%81' result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result)", "\"readlines\", \"fileno\", \"close\", \"info\", \"geturl\", \"getcode\", \"__iter__\"): self.assertTrue(hasattr(self.returned_obj, attr), \"object", "# when the \"network connection\" is established, once for the", "{}, method='HEAD') self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(), 'HEAD') request = Request(\"http://www.python.org\", method='GET')", "the filename argument works. second_temp = \"%s.2\" % support.TESTFN self.registerFileForCleanUp(second_temp)", "class urlencode_Tests(unittest.TestCase): \"\"\"Tests for urlencode()\"\"\" def help_inputtype(self, given, test_type): \"\"\"Helper", "self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com')) self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com:8888')) self.assertTrue(urllib.request.proxy_bypass_environment('newdomain.com:1234')) def test_proxy_cgi_ignore(self): try: self.env.set('HTTP_PROXY', 'http://somewhere:3128') proxies =", "result = urllib.parse.urlencode(given) self.assertEqual(expect, result) def test_doseq(self): # Test that", "as e: urlopen('ftp://localhost/a/file/which/doesnot/exists.py') self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) @patch.object(urllib.request, 'MAXFTPCACHE', 0) def test_ftp_cache_pruning(self):", "f\"contain control.*{escaped_char_repr}\"): urllib.request.urlopen(f\"https:{schemeless_url}\") # This code path quotes the URL", "expect = \"%C2%A2%C3%98ab%C3%BF\" result = urllib.parse.quote(given) self.assertEqual(expect, result, \"using quote():", "self.env.set('REQUEST_METHOD', 'GET') proxies = urllib.request.getproxies_environment() self.assertNotIn('http', proxies) finally: self.env.unset('REQUEST_METHOD') self.env.unset('HTTP_PROXY')", "result)) # Characters in BMP, encoded by default in UTF-8", "at injection within the url _path_ safe. escaped_char_repr = repr(char).replace('\\\\',", "return url for url in ('local_file://example', 'local-file://example'): self.assertRaises(OSError, urllib.request.urlopen, url)", "tearDown method. Note, # this only helps to makes sure", "result) # ASCII Encoding. On a sequence of values. given", "\"__iter__\"): self.assertTrue(hasattr(self.text_url_resp, attr), \"object returned by urlopen() lacks %s attribute\"", "+ \":8080/test/?test=a\" try: InvalidURL = http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, r\"contain", "True) self.assertEqual(expect, result) def test_urlencode_encoding_safe_parameter(self): # Send '$' (\\x24) as", "expect = \"needs/%s/here\" % urllib.parse.quote(\"quot=ing\") result = urllib.request.pathname2url(given) self.assertEqual(expect, result,", "the spaces are quoted in URL so no match self.assertNotEqual(fp.geturl(),", "= \"localhost:7777?a=1 HTTP/1.1\\r\\nX-injected: header\\r\\nTEST: 123\" schemeless_url = \"//\" + host", "r'\\\\') InvalidURL = http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, f\"contain control.*{escaped_char_repr}\"): urllib.request.urlopen(f\"http:{schemeless_url}\")", "all proxy related env vars for k in list(os.environ): if", "[] def hooktester(block_count, block_read_size, file_size, _report=report): _report.append((block_count, block_read_size, file_size)) srcFileName", "('3rd', '3')], \"using sequence of two-item tuples as input\") def", "file descriptor returned by fileno() \" \"did not return the", "requests is deprecated.', DeprecationWarning)): return urllib.request.FancyURLopener() def fakehttp(fakedata, mock_close=False): class", "readline() after exhausting the file did not\" \" return an", "expect = \"ab%3F%3F+cd\" result = urllib.parse.quote_plus(given, encoding=\"latin-1\", errors=\"replace\") self.assertEqual(expect, result,", "self.assertNotIn('\\r', resp.geturl()) self.assertNotIn('\\n', resp.geturl()) finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\")", "be deleted in the tearDown method. Note, # this only", "self.env.unset('REQUEST_METHOD') self.env.unset('HTTP_PROXY') def test_proxy_bypass_environment_host_match(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', 'localhost, anotherdomain.com,", "percent-escapes given = '%xab' expect = given result = urllib.parse.unquote(given)", "self.tempFiles = [] # Create a temporary file. self.registerFileForCleanUp(support.TESTFN) self.text", "unquote_plus(): %r != %r\" % (expect, result)) def test_unquoting_plus(self): #", "is ignored # import socket # self.assertIsNone(socket.getdefaulttimeout()) # socket.setdefaulttimeout(30) #", "[]) # ftp.close() # # def testTimeoutNone(self): # # global", "tmpdir: fd, tmpfile = tempfile.mkstemp(dir=tmpdir) os.close(fd) fileurl = \"file:\" +", "# A mix of non-ASCII and percent-encoded characters, UTF-8 result", "= urllib.parse.unquote(escape_string) self.assertEqual(result.count('%'), 1, \"using unquote(): not all characters escaped:", "response accepted (but not \"simple responses\" without # a status", "[]) urlopen('ftp://localhost') finally: self.unfakeftp() def test_userpass_inurl(self): self.fakehttp(b\"HTTP/1.0 200 OK\\r\\n\\r\\nHello!\") try:", "%s != %s\" % (expect, result)) given = \"make+sure/using_unquote\" expect", "= {\"key name\":\"A bunch of pluses\"} expect = \"key+name=A+bunch+of+pluses\" result", "def test_getproxies_environment_prefer_lowercase(self): # Test lowercase preference with removal os.environ['no_proxy'] =", "resp.geturl()) self.assertNotIn('\\r', resp.geturl()) self.assertNotIn('\\n', resp.geturl()) finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module", "FakeHTTPMixin): \"\"\"Test urllib.urlretrieve() using fake http connections\"\"\" def test_short_content_raises_ContentTooShortError(self): self.fakehttp(b'''HTTP/1.1", "# Test for #10836 with self.assertRaises(urllib.error.URLError) as e: urlopen('file://localhost/a/file/which/doesnot/exists.py') self.assertTrue(e.exception.filename)", "def test_reporthook_0_bytes(self): # Test on zero length file. Should call", "except: pass def tearDown(self): # Delete the temporary files. for", "default value for 'safe' parameter self.assertEqual(urllib.parse.quote.__defaults__[0], '/') def test_safe(self): #", "b'\\x06\\x01\\x02\\xfe\\no/\\x1e\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82') self.text_url = ( \"data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3\" \"D%26%20%C3%B6%20%C3%84%20\") self.text_url_base64 = ( \"data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs\"", "self.assertIsInstance(block_count, int) self.assertIsInstance(block_read_size, int) self.assertIsInstance(file_size, int) self.assertEqual(block_count, count_holder[0]) count_holder[0] =", "open. It # is the responsibility of the developer to", "invalid URI; expect those bytes to be preserved) given =", "%r != %r\" % (expect, result)) def test_quote_plus_with_unicode(self): # Encoding", "\"using quote_plus(): %r != %r\" % (expect, result)) class UnquotingTests(unittest.TestCase):", "% (expect, result)) expect = given result = urllib.request.url2pathname(result) self.assertEqual(expect,", "(quite ugly) # test suite. They use different url opening", "opening codepaths. Plain # urlopen uses FancyURLOpener which goes via", "def tearDown(self): # Delete the temporary files. for each in", "Characters in the Latin-1 range, encoded with None (default) result", "URL. url = 'http://docs.python.org/library/urllib.html#OK' self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\") try: fp =", "error handling given = \"\\u6f22\\u5b57\" expect = \"%26%2328450%3B%26%2323383%3B\" # \"&#28450;&#23383;\"", "being used for their special meaning Data characters : letters,", "(default) result = urllib.parse.quote(given, encoding=None, errors=None) self.assertEqual(expect, result, \"using quote():", "def test_file_notexists(self): fd, tmp_file = tempfile.mkstemp() tmp_fileurl = 'file://localhost/' +", "# # global default timeout is used # import socket", "= '%A0%24=%C1%24' result = urllib.parse.urlencode(given) self.assertEqual(expect, result) result = urllib.parse.urlencode(given,", "test_userpass_inurl_w_spaces(self): self.fakehttp(b\"HTTP/1.0 200 OK\\r\\n\\r\\nHello!\") try: userpass = \"<PASSWORD>\" url =", "\"%s.2\" % support.TESTFN self.registerFileForCleanUp(second_temp) result = urllib.request.urlretrieve(self.constructLocalFileUrl( support.TESTFN), second_temp) self.assertEqual(second_temp,", "0xd, 42)),) expect = '%A0$=%C1$&%A0$=13&%A0$=42' result = urllib.parse.urlencode(given, True, safe=\":$\")", "((b'\\xa0\\x24', b'\\xc1\\x24'),) result = urllib.parse.urlencode(given, doseq=True, safe=\":$\") expect = '%A0$=%C1$'", "except UnicodeEncodeError: raise unittest.SkipTest(\"filePath is not encodable to utf8\") return", "test_with_method_arg(self): Request = urllib.request.Request request = Request(\"http://www.python.org\", method='HEAD') self.assertEqual(request.method, 'HEAD')", "urllib.parse.quote() on the URL which makes all of the #", "argument. self.help_inputtype({\"1st\":'1', \"2nd\":'2', \"3rd\":'3'}, \"using dict as input type\") def", "no_proxies with space. self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234') self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com')) self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com:8888')) self.assertTrue(urllib.request.proxy_bypass_environment('newdomain.com:1234'))", "self.assertRaisesRegex(InvalidURL, f\"contain control.*{escaped_char_repr}\"): urlopen(f\"https:{schemeless_url}\") finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\")", "= '%A0%24=42&%A0%24=%C1%24' result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) def test_urlencode_encoding_safe_parameter(self):", "\"%A2%D8ab%FF\" result = urllib.parse.quote(given, encoding=\"latin-1\") self.assertEqual(expect, result, \"using quote(): %r", "filePath = os.path.abspath(filePath) try: filePath.encode(\"utf-8\") except UnicodeEncodeError: raise unittest.SkipTest(\"filePath is", "\"using quote(): %r != %r\" % (expect, result)) expect =", "urlopen uses FancyURLOpener which goes via a codepath that #", "GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Type: text/html; charset=iso-8859-1 ''', mock_close=True)", "result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using unquote(): %r != %r\"", "AttributeError), urllib.parse.unquote_to_bytes, None) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ()) def test_unquoting_mixed_case(self): #", "than str result = urllib.parse.quote(quote_by_default, safe=b\"<>\") self.assertEqual(quote_by_default, result, \"using quote():", "result = urllib.request.urlretrieve(\"file:%s\" % support.TESTFN) self.assertEqual(result[0], support.TESTFN) self.assertIsInstance(result[1], email.message.Message, \"did", "expect = 'C:\\\\path' result = urllib.request.url2pathname(given) self.assertEqual(expect, result, 'urllib.request.url2pathname() failed;", "Connection: close Content-Type: text/html; charset=iso-8859-1 ''', mock_close=True) try: msg =", "working directory). # All files in this list will be", "# self.evt.wait() # # def testBasic(self): # # connects #", "block is # read). report = [] def hooktester(block_count, block_read_size,", "encoding=\"latin-1\") # quote_from_bytes should work the same result = urllib.parse.quote_from_bytes(given)", "# no port self.assertFalse(bypass('newdomain.com:1235')) # wrong port def test_proxy_bypass_environment_always_match(self): bypass", "encoded with Latin-1 given = \"\\xa2\\xd8ab\\xff\" expect = \"%A2%D8ab%FF\" result", "keep failing in windows and sparc. # Everywhere else they", "Characters in the Latin-1 range, encoded with UTF-8 given =", "+ \":8080/test/?test=a\" try: # We explicitly test urllib.request.urlopen() instead of", "# \"Safe\" non-ASCII characters should have no effect # (Since", "import urllib.parse import urllib.request import urllib.error import http.client import email.message", "b'\\xc1\\x24'),) expect = '%A0%24=%C1%24' result = urllib.parse.urlencode(given) self.assertEqual(expect, result) result", "result = urllib.request.url2pathname(given) self.assertEqual(expect, result, 'urllib.request.url2pathname() failed; %s != %s'", "failed; %s != %s' % (expect, result)) given = '///C|/path'", "result)) # Characters in Latin-1 range, encoded by with None", "% hexescape('c') expect = \"abcd\" result = urllib.parse.unquote(given) self.assertEqual(expect, result,", "type\") def test_using_sequence(self): # Test passing in a sequence of", "goes via a codepath that # calls urllib.parse.quote() on the", "= (\"Authorization: Basic %s\\r\\n\" % b64encode(userpass.encode(\"ASCII\")).decode(\"ASCII\")) fp = urlopen(url) #", "self.assertEqual(expect, result) given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) expect = '%A0$=%C1$' result", "text containing URL special- and unicode-characters self.text = \"test data", "# Decode with UTF-8, invalid sequence, replace errors result =", "= \"test data URLs :;,%=& \\u00f6 \\u00c4 \" # 2x1", "given = \"\\u6f22\\u5b57\" expect = \"%3F%3F\" # \"??\" result =", "iterator # Don't need to count number of iterations since", "the specified methods for attr in (\"read\", \"readline\", \"readlines\", \"close\",", "('local_file://example', 'local-file://example'): self.assertRaises(OSError, urllib.request.urlopen, url) self.assertRaises(OSError, urllib.request.URLopener().open, url) self.assertRaises(OSError, urllib.request.URLopener().retrieve,", "mock_close=False): fake_http_class = fakehttp(fakedata, mock_close=mock_close) self._connection_class = http.client.HTTPConnection http.client.HTTPConnection =", "os.path.join(\"make sure\", \"using_quote\") expect = \"%s/using_quote\" % urllib.parse.quote(\"make sure\") result", "# Send '$' (\\x24) as safe character # Default utf-8", "expected_url = \"parts/of/a/path\" result = urllib.request.pathname2url(expected_path) self.assertEqual(expected_url, result, \"pathname2url() failed;", "# timeout=30) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() class RequestTests(unittest.TestCase): \"\"\"Unit", "Make sure quote() and quote_plus() handle spaces as specified in", "0x20 Must be escaped Delimiters : '<>#%\"' Must be escaped", "urlopen() should raise OSError for many error codes. self.fakehttp(b'''HTTP/1.1 302", "Request = urllib.request.Request request = Request(\"http://www.python.org\") self.assertEqual(request.get_method(), 'GET') request =", "test_url_fragment(self): # Issue #11703: geturl() omits fragments in the original", "\"url2pathame() failed; %s != %s\" % (result, expected_path)) def test_quoting(self):", "the doc string for quoting_Tests for details on quoting and", "commented them out. # Can't really tell why keep failing", "self.returned_obj.fileno() self.assertIsInstance(file_num, int, \"fileno() did not return an int\") self.assertEqual(os.read(file_num,", "This code path quotes the URL so there is no", "= urllib.parse.urlencode(given, True, encoding=\"latin-1\") self.assertEqual(expect, result) given = ((\"\\u00a0\", (42,", "test for quote_plus given = \"ab\\u6f22\\u5b57 cd\" expect = \"ab%3F%3F+cd\"", "%r\" % (do_not_quote, result)) result = urllib.parse.quote_plus(do_not_quote) self.assertEqual(do_not_quote, result, \"using", "given = (('\\u00a0', '\\u00c1'),) expect = '%C2%A0=%C3%81' result = urllib.parse.urlencode(given)", "b'\\xc1\\x24'),) expect = '%A0$=%C1$' result = urllib.parse.urlencode(given, doseq=True, safe=\":$\", encoding=\"latin-1\")", "and percent-encoded characters, Latin-1 # (Note, the string contains non-Latin-1-representable", "del should_quote partial_quote = \"ab[]cd\" expected = \"ab%5B%5Dcd\" result =", "result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) given = ((\"\\u00a0\", (42,", "def test_willclose(self): self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\") try: resp = urlopen(\"http://www.python.org\") self.assertTrue(resp.fp.will_close)", "\"http://www.python.org/file.txt\" self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\") self.addCleanup(self.unfakehttp) filename, _ = urllib.request.URLopener().retrieve(url) self.assertEqual(os.path.splitext(filename)[1],", "for quoting_Tests for details on quoting and such. \"\"\" def", "'def urlopen()' function defined in this... (quite ugly) # test", "= urllib.parse.quote(do_not_quote) self.assertEqual(do_not_quote, result, \"using quote(): %r != %r\" %", "test for that). should_quote = [chr(num) for num in range(32)]", ". Facundo # # def server(evt): # import socket, time", "required\") def test_cafile_and_context(self): context = ssl.create_default_context() with support.check_warnings(('', DeprecationWarning)): with", "# Test automatic quoting and unquoting works for pathnam2url() and", "'US-ASCII')]) def test_geturl(self): self.assertEqual(self.text_url_resp.geturl(), self.text_url) self.assertEqual(self.text_url_base64_resp.geturl(), self.text_url_base64) self.assertEqual(self.image_url_resp.geturl(), self.image_url) def", "'3'])) result = urllib.parse.urlencode(given) self.assertEqual(expect, result) result = urllib.parse.urlencode(given, True)", "= urllib.parse.unquote_to_bytes(\"\\u6f22%C3%BC\") expect = b'\\xe6\\xbc\\xa2\\xc3\\xbc' # UTF-8 for \"\\u6f22\\u00fc\" self.assertEqual(expect,", "test_unquote_to_bytes(self): given = 'br%C3%BCckner_sapporo_20050930.doc' expect = b'br\\xc3\\xbcckner_sapporo_20050930.doc' result = urllib.parse.unquote_to_bytes(given)", "quote(): %r != %r\" % (expect, result)) # Same as", "use lowered case truncated (no '_proxy') keys self.assertEqual('localhost', proxies['no']) #", "urlopen() should raise OSError for many error codes. self.fakehttp(b'''HTTP/1.1 401", "of invoking requests is deprecated.', DeprecationWarning)): return urllib.request.FancyURLopener() def fakehttp(fakedata,", "code path quotes the URL so there is no injection.", "\"\"\"Tests for unquote() and unquote_plus() See the doc string for", "%s\" % result.count('&')) def test_empty_sequence(self): self.assertEqual(\"\", urllib.parse.urlencode({})) self.assertEqual(\"\", urllib.parse.urlencode([])) def", "b'testing urllib.urlretrieve' try: FILE = open(support.TESTFN, 'wb') FILE.write(self.text) FILE.close() finally:", "# Just commented them out. # Can't really tell why", "self.assertTrue(os.path.exists(second_temp), \"copy of the file was not \" \"made\") FILE", "urllib.parse.urlencode({})) self.assertEqual(\"\", urllib.parse.urlencode([])) def test_nonstring_values(self): self.assertEqual(\"a=1\", urllib.parse.urlencode({\"a\": 1})) self.assertEqual(\"a=None\", urllib.parse.urlencode({\"a\":", "Characters in BMP, encoded with Latin-1 given = \"\\u6f22\\u5b57\" self.assertRaises(UnicodeEncodeError,", "\"%A2%D8+%FF\" result = urllib.parse.quote_plus(given, encoding=\"latin-1\") self.assertEqual(expect, result, \"using quote_plus(): %r", "urllib.parse.urlencode(given, doseq=True, safe=\":$\", encoding=\"latin-1\") given = ((b'\\xa0\\x24', (b'\\xc1\\x24', 0xd, 42)),)", "dict(self.text_url_base64_resp.info().get_params())['charset']), self.text) def test_read_image(self): self.assertEqual(self.image_url_resp.read(), self.image) def test_missing_comma(self): self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain') def", "tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/') try: self.assertTrue(os.path.exists(tmp_file)) with urlopen(tmp_fileurl)", "hex digits in the percent-escapes given = '%Ab%eA' expect =", "UTF-8 given = \"\\xa2\\xd8ab\\xff\" expect = \"%C2%A2%C3%98ab%C3%BF\" result = urllib.parse.quote(given)", "'localhost', 21, []) urlopen('ftp://localhost') finally: self.unfakeftp() def test_userpass_inurl(self): self.fakehttp(b\"HTTP/1.0 200", "range, encoded by default in UTF-8 given = \"\\xa2\\xd8ab\\xff\" expect", "proxies = urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128', proxies['http']) class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin): \"\"\"Test", "\"\"\" def setUp(self): # Create a temp file to use", "bypass and correct matching including ports os.environ['no_proxy'] = 'localhost, noproxy.com,", "\"ssl module required\") def test_url_host_with_newline_header_injection_rejected(self): self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") host =", "using quote_plus() given = {\"&\":\"=\"} expect = \"%s=%s\" % (hexescape('&'),", "escape_list = [] for num in range(128): given = hexescape(chr(num))", "self.assertEqual(expect, result) # ASCII Encoding. On a sequence of values.", "if self.closed: return b\"\" return io.BytesIO.read(self, amt) def readline(self, length=None):", "self.unfakehttp() def test_empty_socket(self): # urlopen() raises OSError if the underlying", "def test_url_host_with_control_char_rejected(self): for char_no in list(range(0, 0x21)) + [0x7f]: char", "result, \"using quote(): %r != %r\" % (expect, result)) expect", "that uses a lowercase drive letter. self.assertEqual(os.path.normcase(filename), os.path.normcase(tmpfile)) @support.ignore_warnings(category=DeprecationWarning) def", "result, \"using unquote(): %r != %r\" % (expect, result)) def", "given[\"sequence\"]: expect = \"sequence=%s\" % value self.assertIn(expect, result) self.assertEqual(result.count('&'), 2,", "as '%' + <2 character US-ASCII hex value>. The Python", "sometimes in other. I have a linux, and # the", "for their special meaning Data characters : letters, digits, and", "vars for k in list(os.environ): if 'proxy' in k.lower(): self.env.unset(k)", "tearDown() method for the test self.returned_obj.close() def test_info(self): self.assertIsInstance(self.returned_obj.info(), email.message.Message)", "it here and then having it be called again #", "preserved) given = b'%A2\\xd8ab%FF' expect = b'\\xa2\\xd8ab\\xff' result = urllib.parse.unquote_to_bytes(given)", "self.assertEqual(expect, result) given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) result = urllib.parse.urlencode(given, doseq=True,", "encoding=\"latin-1\") # Characters in BMP, encoded with Latin-1, with replace", "opener.open(url) else: return opener.open(url, data) def FancyURLopener(): with support.check_warnings( ('FancyURLopener", "but using a bytes rather than str result = urllib.parse.quote(\"a\\xfcb\",", "self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(), b\"\") self.assertEqual(fp.geturl(), 'http://user:pass@python.org/') self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp()", "@patch.object(urllib.request, 'MAXFTPCACHE', 0) def test_ftp_cache_pruning(self): self.fakeftp() try: urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user',", "try: fp = urlopen(\"http://user:pass@python.org/\") self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(), b\"\") self.assertEqual(fp.geturl(), 'http://user:pass@python.org/')", "Request(\"http://www.python.org\", method='GET') self.assertEqual(request.get_method(), 'GET') request.method = 'HEAD' self.assertEqual(request.get_method(), 'HEAD') class", "!= %r\" % (expected, result)) result = urllib.parse.quote_plus(partial_quote) self.assertEqual(expected, result,", "errors=None) self.assertEqual(expect, result, \"using quote(): %r != %r\" % (expect,", "2nd, 2 * 3rd, 3 Test cannot assume anything about", "urllib.parse.urlencode(given) self.assertEqual(expect, result) def test_doseq(self): # Test that passing True", "+ host + \":8080/test/?test=a\" try: # We explicitly test urllib.request.urlopen()", "urlopen(f\"https:{schemeless_url}\") finally: self.unfakehttp() def test_read_0_9(self): # \"0.9\" response accepted (but", "schemeless_url = \"//\" + host + \":8080/test/?test=a\" try: InvalidURL =", "urlopen(\"http://www.python.org\") self.assertTrue(resp.fp.will_close) finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\") def test_url_path_with_control_char_rejected(self):", "expect = '%A0$=%C1$' self.assertEqual(expect, result) given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) expect", "# def testTimeoutNone(self): # # global default timeout is ignored", "= \"a b cd e f\" expect = given.replace(' ',", "returns a tuple\"\"\" self.assertIsInstance(urllib.request.thishost(), tuple) class URLopener_Tests(FakeHTTPMixin, unittest.TestCase): \"\"\"Testcase to", "# encoded). result = urllib.parse.unquote_to_bytes(\"\\u6f22%C3%BC\") expect = b'\\xe6\\xbc\\xa2\\xc3\\xbc' # UTF-8", "returned and # a headers value is returned. result =", "% (expect, result)) given = \"make+sure/using_unquote\" expect = os.path.join(\"make+sure\", \"using_unquote\")", "= \"Redirection to url 'file:\" with self.assertRaisesRegex(urllib.error.HTTPError, msg): urlopen(\"http://python.org/\") finally:", "self.assertEqual(line, self.text) def test_relativelocalfile(self): self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname) class ProxyTests(unittest.TestCase): def", "'\\u00c1'),) expect = '%3F=%3F' result = urllib.parse.urlencode(given, doseq=True, encoding=\"ASCII\", errors=\"replace\")", "# . Facundo # # def server(evt): # import socket,", "0x21)) + [0x7f]: char = chr(char_no) schemeless_url = f\"//localhost:7777/test{char}/\" self.fakehttp(b\"HTTP/1.1", "serv.bind((\"\", 9093)) # serv.listen() # try: # conn, addr =", "= os.path.join(\"make+sure\", \"using_unquote\") result = urllib.request.url2pathname(given) self.assertEqual(expect, result, \"url2pathname() failed;", "given = '%x' expect = bytes(given, 'ascii') result = urllib.parse.unquote_to_bytes(given)", "== 1: hex_repr = \"0%s\" % hex_repr return \"%\" +", "urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 1) self.assertEqual(report[0][2], 0) def test_reporthook_5_bytes(self): #", "%r != %r\" % (expect, result)) # Characters in Latin-1", "should have no effect # (Since URIs are not allowed", "char = chr(char_no) schemeless_url = f\"//localhost:7777/test{char}/\" self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") try:", "%s: \" \"unexpected number of characters: %s != %s\" %", "%s != %s\" % (result, expected_path)) def test_quoting(self): # Test", "given = 'br%C3%BCckner_sapporo_20050930.doc' expect = 'br\\u00fcckner_sapporo_20050930.doc' result = urllib.parse.unquote(given) self.assertEqual(expect,", "by urlopen() lacks %s attribute\" % attr) def test_read(self): self.assertEqual(self.text,", "raises # \"ValueError: I/O operation on closed file\" which is", "value>. The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a", "a linux, and # the tests go ok. # If", "given = \"\\u6f22\\u5b57\" self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given, encoding=\"latin-1\") # Characters in", "from test import support import os try: import ssl except", "\"\\u6f22\\u00fc\" self.assertEqual(expect, result, \"using unquote_to_bytes(): %r != %r\" % (expect,", "Test unquoting on bad percent-escapes given = '%xab' expect =", "top # level 'def urlopen()' function defined in this... (quite", "def test_read_bogus(self): # urlopen() should raise OSError for many error", "self.assertEqual(request.get_method(), 'GET') request.method = 'HEAD' self.assertEqual(request.get_method(), 'HEAD') class URL2PathNameTests(unittest.TestCase): def", "descriptor returned by fileno() \" \"did not return the expected", "self.text_url_base64 = ( \"data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs\" \"sJT0mIPYgxCA%3D\") # base64 encoded data URL", "= os.path.abspath(filePath) try: filePath.encode(\"utf-8\") except UnicodeEncodeError: raise unittest.SkipTest(\"filePath is not", "safe=quote_by_default) self.assertEqual(quote_by_default, result, \"using quote(): %r != %r\" % (quote_by_default,", "except socket.timeout: # pass # finally: # serv.close() # evt.set()", "required\") def test_url_path_with_newline_header_injection_rejected(self): self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") host = \"localhost:7777?a=1 HTTP/1.1\\r\\nX-injected:", "03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Location: file://guidocomputer.athome.com:/python/license", "[], # timeout=30) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() class RequestTests(unittest.TestCase):", "= urllib.parse.urlencode(given, safe=\":$\", encoding=\"latin-1\") expect = '%A0$=%C1$' self.assertEqual(expect, result) given", "= urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128', proxies['http']) self.env.set('REQUEST_METHOD', 'GET') proxies = urllib.request.getproxies_environment() self.assertNotIn('http',", "urllib.parse.unquote(given) class urlencode_Tests(unittest.TestCase): \"\"\"Tests for urlencode()\"\"\" def help_inputtype(self, given, test_type):", "def test_interface(self): # Make sure object returned by urlopen() has", "9093 # self.evt = threading.Event() # threading.Thread(target=server, args=(self.evt,)).start() # time.sleep(.1)", "\"__iter__\"): self.assertTrue(hasattr(self.returned_obj, attr), \"object returned by urlopen() lacks %s attribute\"", "urlopen(f\"http:{schemeless_url}\") self.assertNotIn(' ', resp.geturl()) self.assertNotIn('\\r', resp.geturl()) self.assertNotIn('\\n', resp.geturl()) finally: self.unfakehttp()", "self.assertEqual(len(report), 3) self.assertEqual(report[0][2], 8193) self.assertEqual(report[0][1], 8192) self.assertEqual(report[1][1], 8192) self.assertEqual(report[2][1], 8192)", "(\"b\", 1)]) self.assertEqual(\"a=a&a=b\", urllib.parse.urlencode({\"a\": data}, True)) def test_urlencode_encoding(self): # ASCII", "\"using quote(): %r != %r\" % (do_not_quote, result)) result =", "result, \"using quote_plus(): %r != %r\" % (expect, result)) class", "(expect, result)) def test_unquoting_plus(self): # Test difference between unquote() and", "characters # interspersed given = 'ab%sd' % hexescape('c') expect =", "= http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, r\"contain control.*\\\\r.*(found at least .", "((\"\\u00a0\", (42, \"\\u00c1\")),) expect = '%C2%A0=42&%C2%A0=%C3%81' result = urllib.parse.urlencode(given, True)", "# cantdata += len(data) # time.sleep(.3) # conn.send(\"2 No more", "self.assertTrue(e.exception.reason) def test_file_notexists(self): fd, tmp_file = tempfile.mkstemp() tmp_fileurl = 'file://localhost/'", "= \"%F3%B1\" expect = \"\\ufffd\" # Replacement character result =", "result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) def test_urlencode_encoding_safe_parameter(self): # Send", "test_never_quote(self): # Make sure quote() does not quote letters, digits,", "result = urllib.parse.quote(do_not_quote) self.assertEqual(do_not_quote, result, \"using quote(): %r != %r\"", "+ 1] self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(), \"testing %s: '&' not located", "OK\\r\\n\\r\\nHello!\") try: fp = urlopen(\"http://user:pass@python.org/\") self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(), b\"\") self.assertEqual(fp.geturl(),", "list will be deleted in the tearDown method. Note, #", "use different url opening codepaths. Plain # urlopen uses FancyURLOpener", "')) result = urllib.parse.quote(given) self.assertEqual(expect, result, \"using quote(): %r !=", "= '' os.environ['HTTP_PROXY'] = 'http://somewhere:3128' proxies = urllib.request.getproxies_environment() self.assertEqual({}, proxies)", "self.assertEqual(fp.geturl(), 'http://user:pass@python.org/') self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_userpass_inurl_w_spaces(self): self.fakehttp(b\"HTTP/1.0 200", "'wb') FILE.write(self.text) FILE.close() finally: try: FILE.close() except: pass def tearDown(self):", "url2pathname, \"///\\u00e8|/\") def test_roundtrip_url2pathname(self): list_of_paths = ['C:', r'\\\\\\C\\test\\\\', r'C:\\foo\\bar\\spam.foo' ]", "URIs are not allowed to have non-ASCII characters) result =", "ftp.close() class RequestTests(unittest.TestCase): \"\"\"Unit tests for urllib.request.Request.\"\"\" def test_default_values(self): Request", "('charset', 'US-ASCII')]) def test_geturl(self): self.assertEqual(self.text_url_resp.geturl(), self.text_url) self.assertEqual(self.text_url_base64_resp.geturl(), self.text_url_base64) self.assertEqual(self.image_url_resp.geturl(), self.image_url)", "pass # finally: # serv.close() # evt.set() # # class", "self.env = support.EnvironmentVarGuard() # Delete all proxy related env vars", "method for the test self.returned_obj.close() def test_info(self): self.assertIsInstance(self.returned_obj.info(), email.message.Message) def", "urllib.parse.urlencode(given) self.assertEqual(expect, result) # Latin-1 encoding. given = (('\\u00a0', '\\u00c1'),)", "url = \"http://{}@python.org/\".format(userpass) fakehttp_wrapper = http.client.HTTPConnection authorization = (\"Authorization: Basic", "_report=report): _report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile(b\"x\" * 5) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName),", "test_doseq(self): # Test that passing True for 'doseq' parameter works", "\"\\n\", \" \", \"%0A\", and \"%20\". self.image_url = ( \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\\n\"", "path in list_of_paths: self.assertEqual(url2pathname(pathname2url(path)), path) class PathName2URLTests(unittest.TestCase): def test_converting_drive_letter(self): self.assertEqual(pathname2url(\"C:\"),", "# serv.listen() # try: # conn, addr = serv.accept() #", "(expect, result)) # Decode with UTF-8, invalid sequence, ignoring errors", "values are quoted using quote_plus() given = {\"&\":\"=\"} expect =", "readline(self, length=None): if self.closed: return b\"\" return io.BytesIO.readline(self, length) def", "addr = serv.accept() # conn.send(\"1 Hola mundo\\n\") # cantdata =", "urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\", 9093, []) # ftp.close() # # def", "FakeFTPMixin): \"\"\"Test urlopen() opening a fake http connection.\"\"\" def check_read(self,", "errors given = \"%F3%B1\" expect = \"\" result = urllib.parse.unquote(given,", "call reporthook only 3 times (once # when the \"network", "%r\" % (expect, result)) # Characters in BMP, encoded with", "as an argument. self.help_inputtype({\"1st\":'1', \"2nd\":'2', \"3rd\":'3'}, \"using dict as input", "and unicode-characters self.text = \"test data URLs :;,%=& \\u00f6 \\u00c4", "characters, Latin-1 # (Note, the string contains non-Latin-1-representable characters) result", "OK\\r\\n\\r\\nHello!\") self.addCleanup(self.unfakehttp) filename, _ = urllib.request.URLopener().retrieve(url) self.assertEqual(os.path.splitext(filename)[1], \".txt\") @support.ignore_warnings(category=DeprecationWarning) def", "open(second_temp, 'rb') try: text = FILE.read() FILE.close() finally: try: FILE.close()", "safe=\":$\") expect = '%A0$=%C1$' self.assertEqual(expect, result) # Safe parameter in", "= urlopen(\"http://www.python.org\") self.assertTrue(resp.fp.will_close) finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\") def", "test_fileno(self): file_num = self.returned_obj.fileno() self.assertIsInstance(file_num, int, \"fileno() did not return", "to close files that may still be open. It #", "fileName): self.tempFiles.append(fileName) def test_basic(self): # Make sure that a local", "support.EnvironmentVarGuard() # Delete all proxy related env vars for k", "\"\"\"Regression tests for what was in Python 2's \"urllib\" module\"\"\"", "% (expect, result)) given = '%' expect = bytes(given, 'ascii')", "one of the tests, sometimes in other. I have a", "result, 'urllib.request..url2pathname() failed; %s != %s' % (expect, result)) given", "need to count number of iterations since test would fail", "hex(ord(<character>))[2:]`` escapes a character properly. Case does not matter on", "result)) # Test on a string with unescaped non-ASCII characters", "= \"ab\\u6f22\\u5b57 cd\" expect = \"ab%3F%3F+cd\" result = urllib.parse.quote_plus(given, encoding=\"latin-1\",", "import urllib.request import urllib.error import http.client import email.message import io", "'%xab' expect = bytes(given, 'ascii') result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result,", "which goes via a codepath that # calls urllib.parse.quote() on", "in the Latin-1 range, encoded with Latin-1 result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc',", "Test with safe bytes self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'), 'alpha+beta+gamma') def test_quote_bytes(self):", "0 def close(self): pass self._ftpwrapper_class = urllib.request.ftpwrapper urllib.request.ftpwrapper = FakeFtpWrapper", "result.count('&'))) amp_location = result.index('&') on_amp_left = result[amp_location - 1] on_amp_right", "\"using unquote(): %r != %r\" % (expect, result)) # unquote_to_bytes", "'*') self.assertTrue(bypass('newdomain.com')) self.assertTrue(bypass('newdomain.com:1234')) self.env.set('NO_PROXY', '*, anotherdomain.com') self.assertTrue(bypass('anotherdomain.com')) self.assertFalse(bypass('newdomain.com')) self.assertFalse(bypass('newdomain.com:1234')) def", "self.assertEqual(fp.readline(), b\"\") # the spaces are quoted in URL so", "def test_local_file_open(self): # bpo-35907, CVE-2019-9948: urllib must reject local_file:// scheme", "UPPERCASE self.assertTrue(bypass('.localhost')) self.assertTrue(bypass('newdomain.com:1234')) self.assertTrue(bypass('.newdomain.com:1234')) self.assertTrue(bypass('foo.d.o.t')) # issue 29142 self.assertTrue(bypass('d.o.t')) self.assertTrue(bypass('anotherdomain.com:8888'))", "self.assertRaises(urllib.error.HTTPError, urlopen, \"http://something\") finally: self.unfakehttp() def test_empty_socket(self): # urlopen() raises", "handle spaces as specified in # their unique way result", "= \"\\xa2\\xd8ab\\xff\" expect = \"%C2%A2%C3%98ab%C3%BF\" result = urllib.parse.quote(given) self.assertEqual(expect, result,", "Connection: close Content-Type: text/html; charset=iso-8859-1 ''', mock_close=True) try: self.assertRaises(OSError, urlopen,", "= urllib.request.URLopener().retrieve(url) self.assertEqual(os.path.splitext(filename)[1], \".txt\") @support.ignore_warnings(category=DeprecationWarning) def test_local_file_open(self): # bpo-35907, CVE-2019-9948:", "(expect, result)) given = '%x' expect = bytes(given, 'ascii') result", "not matter on the hex letters. The various character sets", "'%A0$=%C1$&%A0$=13&%A0$=42' result = urllib.parse.urlencode(given, True, safe=\":$\", encoding=\"latin-1\") self.assertEqual(expect, result) class", "tuples as input\") def test_quoting(self): # Make sure keys and", "given = b'bl\\xc3\\xa5b\\xc3\\xa6rsyltet\\xc3\\xb8y' urllib.parse.unquote(given) class urlencode_Tests(unittest.TestCase): \"\"\"Tests for urlencode()\"\"\" def", "setting 'safe' parameter does what it should do quote_by_default =", "with self.assertRaisesRegex(TypeError, 'Expected str, got bytes'): given = b'bl\\xc3\\xa5b\\xc3\\xa6rsyltet\\xc3\\xb8y' urllib.parse.unquote(given)", "with self.assertRaises(urllib.error.URLError) as e: urlopen('file://localhost/a/file/which/doesnot/exists.py') self.assertTrue(e.exception.filename) self.assertTrue(e.exception.reason) def test_file_notexists(self): fd,", "bytes input self.assertRaises(TypeError, urllib.parse.quote, given, encoding=\"latin-1\") # quote_from_bytes should work", "collections.OrderedDict() def tearDown(self): os.environ = self._saved_env def test_getproxies_environment_prefer_lowercase(self): # Test", "bytes self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'), 'alpha%2Bbeta+gamma') # Test with safe bytes self.assertEqual(urllib.parse.quote_plus('alpha+beta", "self.io_refs += 1 return self def read(self, amt=None): if self.closed:", "for quote_plus given = \"\\xa2\\xd8 \\xff\" expect = \"%A2%D8+%FF\" result", "% (expect, result)) class UnquotingTests(unittest.TestCase): \"\"\"Tests for unquote() and unquote_plus()", "many error codes. self.fakehttp(b'''HTTP/1.1 302 Found Date: Wed, 02 Jan", "for testing self.text = bytes(\"test_urllib: %s\\n\" % self.__class__.__name__, \"ascii\") f", "(expect, result)) def test_quote_with_unicode(self): # Characters in Latin-1 range, encoded", "\"localhost:7777?a=1 HTTP/1.1\\r\\nX-injected: header\\r\\nTEST: 123\" schemeless_url = \"//\" + host +", "%r\" % (expect, result)) # Decode with UTF-8, invalid sequence", "# All files in this list will be deleted in", "'test specific to the urllib.url2path function.') def test_ntpath(self): given =", "timeout=30) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() class RequestTests(unittest.TestCase): \"\"\"Unit tests", "= \"%A2%D8ab%FF\" result = urllib.parse.quote(given, encoding=\"latin-1\") self.assertEqual(expect, result, \"using quote():", "expect = b'br\\xc3\\xbcckner_sapporo_20050930.doc' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, \"using unquote_to_bytes():", "# buffer to store data for verification in urlopen tests.", "tests pass expected_path = os.path.join(\"parts\", \"of\", \"a\", \"path\") expected_url =", "self.returned_obj: self.assertEqual(line, self.text) def test_relativelocalfile(self): self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname) class ProxyTests(unittest.TestCase):", "unfakehttp(self): http.client.HTTPConnection = self._connection_class class FakeFTPMixin(object): def fakeftp(self): class FakeFtpWrapper(object):", "f\"contain control.*{escaped_char_repr}\"): urllib.request.urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex( InvalidURL, f\"contain control.*{escaped_char_repr}\"): urllib.request.urlopen(f\"https:{schemeless_url}\") #", "DeprecationWarning)): self.assertEqual(DummyURLopener().open( 'spam://example/ /'),'//example/%20/') # test the safe characters are", "InvalidURL = http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, f\"contain control.*{escaped_char_repr}\"): urlopen(f\"http:{schemeless_url}\") with", "2's \"urllib\" module\"\"\" import urllib.parse import urllib.request import urllib.error import", "2 times (once when # the \"network connection\" is established", "# import socket # self.assertIsNone(socket.getdefaulttimeout()) # socket.setdefaulttimeout(30) # try: #", "support.TESTFN self.registerFileForCleanUp(second_temp) result = urllib.request.urlretrieve(self.constructLocalFileUrl( support.TESTFN), second_temp) self.assertEqual(second_temp, result[0]) self.assertTrue(os.path.exists(second_temp),", "raises OSError if the underlying socket does not send any", "ports os.environ['no_proxy'] = 'localhost, noproxy.com, my.proxy:1234' os.environ['No_Proxy'] = 'xyz.com' self.assertTrue(urllib.request.proxy_bypass_environment('localhost'))", "its own location returned and # a headers value is", "mock_close=mock_close) self._connection_class = http.client.HTTPConnection http.client.HTTPConnection = fake_http_class def unfakehttp(self): http.client.HTTPConnection", "Ticket #12923: make sure independent requests each use their #", "with one black and one white pixel self.image = (", "tmpfile = tempfile.mkstemp(dir=tmpdir) os.close(fd) fileurl = \"file:\" + urllib.request.pathname2url(tmpfile) filename,", "the problematic environments, please help! # . Facundo # #", "self.assertEqual(url2pathname(\"///C|/\"), 'C:\\\\') def test_converting_when_no_drive_letter(self): # cannot end a raw string", "True, encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect, result) # Utf-8 given = ((\"\\u00a0\",", "url) self.assertRaises(OSError, urllib.request.URLopener().open, url) self.assertRaises(OSError, urllib.request.URLopener().retrieve, url) self.assertRaises(OSError, DummyURLopener().open, url)", "for testing different input types. 'given' must lead to only", "of the tests, sometimes in other. I have a linux,", "urllib.parse.quote(partial_quote) self.assertEqual(expected, result, \"using quote(): %r != %r\" % (expected,", "UTF-8, invalid sequence given = \"%F3%B1\" expect = \"\\ufffd\" #", "registers the file for deletion during the test fixture tear", "mock_close=True) try: self.assertRaises(OSError, urlopen, \"http://python.org/\") finally: self.unfakehttp() def test_invalid_redirect(self): #", "\"network connection\" is established, once for the next 8192 #", "def test_iter(self): # Test iterator # Don't need to count", "return opener.open(url) else: return opener.open(url, data) def FancyURLopener(): with support.check_warnings(", "\"0.9\" response accepted (but not \"simple responses\" without # a", "# latin-1 given = ((\"\\u00a0\", \"\\u00c1\"),) expect = '%A0=%C1' result", "self.assertRaisesRegex( InvalidURL, r\"contain control.*\\\\r.*(found at least . .)\"): urllib.request.urlopen(f\"http:{schemeless_url}\") with", "lines\") self.assertEqual(lines_list[0], self.text, \"readlines() returned improper text\") def test_fileno(self): file_num", "ftp = urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\", 9093, []) # ftp.close() #", "Request(\"http://www.python.org\", {}, method='HEAD') self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(), 'HEAD') request = Request(\"http://www.python.org\",", "item in the list is a file # name (absolute", "'localhost, noproxy.com, my.proxy:1234' os.environ['No_Proxy'] = 'xyz.com' self.assertTrue(urllib.request.proxy_bypass_environment('localhost')) self.assertTrue(urllib.request.proxy_bypass_environment('noproxy.com:5678')) self.assertTrue(urllib.request.proxy_bypass_environment('my.proxy:1234')) self.assertFalse(urllib.request.proxy_bypass_environment('my.proxy'))", "an int\") self.assertEqual(os.read(file_num, len(self.text)), self.text, \"Reading on the file descriptor", "r'\\\\C\\test' '\\\\') def test_simple_compare(self): self.assertEqual(url2pathname(\"///C|/foo/bar/spam.foo\"), r'C:\\foo\\bar\\spam.foo') def test_non_ascii_drive_letter(self): self.assertRaises(IOError, url2pathname,", "# \"Kanji\" result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using unquote(): %r", "self.assertEqual(expect, result) def test_urlencode_encoding_safe_parameter(self): # Send '$' (\\x24) as safe", "# the \"network connection\" is established and once when the", "here and then having it be called again # by", "% (expect, result)) # unquote_to_bytes given = '%xab' expect =", "self.assertEqual(DummyURLopener().open( \"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/\"), \"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/\") @support.ignore_warnings(category=DeprecationWarning) def test_urlopener_retrieve_file(self): with support.temp_dir() as tmpdir:", "test the safe characters are not quoted by urlopen self.assertEqual(DummyURLopener().open(", "connection\" is established, once for the next 8192 # bytes,", "\"using unquote_plus(): %r != %r\" % (expect, result)) def test_unquoting_plus(self):", "safe bytes self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'), 'alpha+beta+gamma') def test_quote_bytes(self): # Bytes", "Request(\"http://www.python.org\", {}) self.assertEqual(request.get_method(), 'POST') def test_with_method_arg(self): Request = urllib.request.Request request", "3) + 2, #5 chars per thing and amps \"testing", "For 0x7F should_quote = ''.join(should_quote) for char in should_quote: result", "quote letters, digits, and \"_,.-\" do_not_quote = '' .join([\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\", \"abcdefghijklmnopqrstuvwxyz\",", "second_temp, hooktester) def test_reporthook_0_bytes(self): # Test on zero length file.", "support.TESTFN, hooktester) self.assertEqual(len(report), 1) self.assertEqual(report[0][2], 0) def test_reporthook_5_bytes(self): # Test", "\"//\" + host + \":8080/test/?test=a\" try: # We explicitly test", "the file was not \" \"made\") FILE = open(second_temp, 'rb')", "# Characters in Latin-1 range, encoded by with None (default)", "list(range(0, 0x21)) + [0x7f]: char = chr(char_no) schemeless_url = f\"//localhost:7777/test{char}/\"", "setUp(self): # We need to test conditions, where variable order", "proxies = urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128', proxies['http']) self.env.set('REQUEST_METHOD', 'GET') proxies = urllib.request.getproxies_environment()", "be escaped; can be, though, if desired Control characters :", "cd e f\" expect = given.replace(' ', hexescape(' ')) result", "num in range(32)] # For 0x00 - 0x1F should_quote.append(r'<>#%\"{}|\\^[]`') should_quote.append(chr(127))", "self.assertEqual(expect, result) class Pathname_Tests(unittest.TestCase): \"\"\"Test pathname2url() and url2pathname()\"\"\" def test_basic(self):", "self.closed: return b\"\" return io.BytesIO.readline(self, length) def close(self): self.io_refs -=", "# Test with a bytes as input, with unescaped non-ASCII", "True, safe=\":$\", encoding=\"latin-1\") self.assertEqual(expect, result) class Pathname_Tests(unittest.TestCase): \"\"\"Test pathname2url() and", "# finally: # socket.setdefaulttimeout(None) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() #", "!= %s\" % (expect, result)) given = \"make+sure/using_unquote\" expect =", "\", \"%0A\", and \"%20\". self.image_url = ( \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\\n\" \"QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 \"", "raw string in \\ self.assertEqual(url2pathname(\"///C/test/\"), r'\\\\\\C\\test' '\\\\') self.assertEqual(url2pathname(\"////C/test/\"), r'\\\\C\\test' '\\\\')", "have possible dictionary input. \"\"\" expect_somewhere = [\"1st=1\", \"2nd=2\", \"3rd=3\"]", "result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) # latin-1 given =", "result = urllib.parse.quote(' ') self.assertEqual(result, hexescape(' '), \"using quote(): %r", "def registerFileForCleanUp(self, fileName): self.tempFiles.append(fileName) def test_basic(self): # Make sure that", "that the reporthook works. def hooktester(block_count, block_read_size, file_size, count_holder=[0]): self.assertIsInstance(block_count,", "self.assertEqual(block_count, count_holder[0]) count_holder[0] = count_holder[0] + 1 second_temp = \"%s.2\"", "= urllib.parse.urlencode(given, safe=\":$\") expect = '%A0$=%C1$' self.assertEqual(expect, result) given =", "errors=\"ignore\") self.assertEqual(expect, result, \"using unquote(): %r != %r\" % (expect,", "# Don't need to count number of iterations since test", "test_unquoting_parts(self): # Make sure unquoting works when have non-quoted characters", "(result, expected_url)) result = urllib.request.url2pathname(expected_url) self.assertEqual(expected_path, result, \"url2pathame() failed; %s", "result)) result = urllib.parse.quote_plus(partial_quote) self.assertEqual(expected, result, \"using quote_plus(): %r !=", "hooktester) self.assertEqual(len(report), 2) self.assertEqual(report[0][2], 5) self.assertEqual(report[1][2], 5) def test_reporthook_8193_bytes(self): #", "'HEAD') request = Request(\"http://www.python.org\", method='GET') self.assertEqual(request.get_method(), 'GET') request.method = 'HEAD'", "expect = '\\u6f22\\u00fc' self.assertEqual(expect, result, \"using unquote(): %r != %r\"", "encoding=None, errors=None) self.assertEqual(expect, result, \"using unquote(): %r != %r\" %", "self.assertEqual(request.get_method(), 'POST') def test_with_method_arg(self): Request = urllib.request.Request request = Request(\"http://www.python.org\",", "= (('\\u00a0', '\\u00c1'),) expect = '%3F=%3F' result = urllib.parse.urlencode(given, doseq=True,", "[, data]) -> open file-like object\"\"\" global _urlopener if proxies", "percent-escapes given = '%Ab%eA' expect = b'\\xab\\xea' result = urllib.parse.unquote_to_bytes(given)", "= '%A0=%C1' result = urllib.parse.urlencode(given, True, encoding=\"latin-1\") self.assertEqual(expect, result) given", "result)) class Utility_Tests(unittest.TestCase): \"\"\"Testcase to test the various utility functions", "FancyURLopener _urlopener = None def urlopen(url, data=None, proxies=None): \"\"\"urlopen(url [,", "# Test difference between unquote() and unquote_plus() given = \"are+there+spaces...\"", "def test_url_fragment(self): # Issue #11703: geturl() omits fragments in the", "given = \"\\u6f22\\u5b57\" expect = \"%26%2328450%3B%26%2323383%3B\" # \"&#28450;&#23383;\" result =", "to only the pairs: * 1st, 1 * 2nd, 2", "returned improper text\") def test_fileno(self): file_num = self.returned_obj.fileno() self.assertIsInstance(file_num, int,", "automatic quoting and unquoting works for pathnam2url() and # url2pathname()", "self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve(support.TEST_HTTP_URL, reporthook=_reporthook) finally: self.unfakehttp() def test_short_content_raises_ContentTooShortError_without_reporthook(self): self.fakehttp(b'''HTTP/1.1 200", "test_read_text(self): self.assertEqual(self.text_url_resp.read().decode( dict(self.text_url_resp.info().get_params())['charset']), self.text) def test_read_text_base64(self): self.assertEqual(self.text_url_base64_resp.read().decode( dict(self.text_url_base64_resp.info().get_params())['charset']), self.text) def", "test_unquoting(self): # Make sure unquoting of all ASCII values works", "% (expect, result)) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes,", "= '%C2%A0=42&%C2%A0=%C3%81' result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) # latin-1", "# data = conn.recv(13-cantdata) # cantdata += len(data) # time.sleep(.3)", "# def server(evt): # import socket, time # serv =", "you write it as '%' + <2 character US-ASCII hex", "200 OK\\r\\n\\r\\nHello!\") try: fp = urlopen(\"http://user:pass@python.org/\") self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(), b\"\")", "self.constructLocalFileUrl(support.TESTFN), second_temp, hooktester) def test_reporthook_0_bytes(self): # Test on zero length", "control.*{escaped_char_repr}\"): urllib.request.urlopen(f\"https:{schemeless_url}\") # This code path quotes the URL so", "class FakeHTTPMixin(object): def fakehttp(self, fakedata, mock_close=False): fake_http_class = fakehttp(fakedata, mock_close=mock_close)", "def test_readlines(self): lines_list = self.returned_obj.readlines() self.assertEqual(len(lines_list), 1, \"readlines() returned the", "302 Found Location: file://guidocomputer.athome.com:/python/license Connection: close ''', mock_close=True) try: self.assertRaises(urllib.error.HTTPError,", "escapes a character properly. Case does not matter on the", "result = urllib.parse.quote_plus(given, encoding=\"latin-1\") self.assertEqual(expect, result, \"using quote_plus(): %r !=", "# Make sure that a local file just gets its", "in the percent-escapes given = '%Ab%eA' expect = b'\\xab\\xea' result", "\"-_.!~*'()\" Unreserved and do not need to be escaped; can", "self.assertEqual(self.text_url_base64_resp.read().decode( dict(self.text_url_base64_resp.info().get_params())['charset']), self.text) def test_read_image(self): self.assertEqual(self.image_url_resp.read(), self.image) def test_missing_comma(self): self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain')", "result, \"using quote(): %r != %r\" % (do_not_quote, result)) result", "- 1] on_amp_right = result[amp_location + 1] self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(),", "self.assertTrue(urllib.request.proxy_bypass_environment('noproxy.com:5678')) self.assertTrue(urllib.request.proxy_bypass_environment('my.proxy:1234')) self.assertFalse(urllib.request.proxy_bypass_environment('my.proxy')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) # Test lowercase preference with replacement", "module required\") def test_cafile_and_context(self): context = ssl.create_default_context() with support.check_warnings(('', DeprecationWarning)):", "given = '%Ab%eA' expect = b'\\xab\\xea' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect,", "result, \"using quote(): %r != %r\" % (quote_by_default, result)) result", "result = urllib.parse.quote(quote_by_default, safe=b\"<>\") self.assertEqual(quote_by_default, result, \"using quote(): %r !=", "self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) # Test lowercase preference with replacement os.environ['http_proxy'] = 'http://somewhere:3128'", "# Characters in BMP, encoded with Latin-1, with replace error", "self.assertEqual(expected, result, \"using quote_plus(): %r != %r\" % (expected, result))", "import socket, time # serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # serv.settimeout(3)", "newFile.close() finally: try: newFile.close() except: pass return newFilePath def registerFileForCleanUp(self,", "status line) self.check_read(b\"0.9\") def test_read_1_0(self): self.check_read(b\"1.0\") def test_read_1_1(self): self.check_read(b\"1.1\") def", "def test_missing_localfile(self): # Test for #10836 with self.assertRaises(urllib.error.URLError) as e:", "\"using quote(): %r != %r\" % (expect, result)) def test_default_quoting(self):", "above, but using a bytes rather than str result =", "self.env.set('NO_PROXY', '*, anotherdomain.com') self.assertTrue(bypass('anotherdomain.com')) self.assertFalse(bypass('newdomain.com')) self.assertFalse(bypass('newdomain.com:1234')) def test_proxy_bypass_environment_newline(self): bypass =", "the Latin-1 range, encoded with UTF-8 given = 'br%C3%BCckner_sapporo_20050930.doc' expect", "self.assertEqual(report[0][2], 0) def test_reporthook_5_bytes(self): # Test on 5 byte file.", "self.text_url_base64) self.assertEqual(self.image_url_resp.geturl(), self.image_url) def test_read_text(self): self.assertEqual(self.text_url_resp.read().decode( dict(self.text_url_resp.info().get_params())['charset']), self.text) def test_read_text_base64(self):", "b'br\\xc3\\xbcckner_sapporo_20050930.doc' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, \"using unquote_to_bytes(): %r !=", "escape a character you write it as '%' + <2", "import support import os try: import ssl except ImportError: ssl", "= [\"1st=1\", \"2nd=2\", \"3rd=3\"] result = urllib.parse.urlencode(given) for expected in", "for url in given: result = urllib.request.url2pathname(url) self.assertEqual(expect, result, 'urllib.request..url2pathname()", "\"%0A\", and \"%20\". self.image_url = ( \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\\n\" \"QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 \" \"vHgAAAABJRU5ErkJggg%3D%3D%0A%20\")", "!= %s\" % (expect, result)) given = os.path.join(\"make sure\", \"using_quote\")", "See the doc string for quoting_Tests for details on quoting", "attempts at injection within the url _path_ safe. escaped_char_repr =", "chars per thing and amps \"testing %s: \" \"unexpected number", "really tell why keep failing in windows and sparc. #", "a temporary file. self.registerFileForCleanUp(support.TESTFN) self.text = b'testing urllib.urlretrieve' try: FILE", "urlretrieve_FileTests(unittest.TestCase): \"\"\"Test urllib.urlretrieve() on local files\"\"\" def setUp(self): # Create", "class DummyURLopener(urllib.request.URLopener): def open_local_file(self, url): return url for url in", "Decode with UTF-8, invalid sequence, ignoring errors given = \"%F3%B1\"", "= open(support.TESTFN, 'wb') FILE.write(self.text) FILE.close() finally: try: FILE.close() except: pass", "200) finally: self.unfakehttp() def test_URLopener_deprecation(self): with support.check_warnings(('',DeprecationWarning)): urllib.request.URLopener() @unittest.skipUnless(ssl, \"ssl", "% (expect, result)) # Encoding argument should raise type error", "'C:') self.assertEqual(url2pathname(\"///C:\"), 'C:') self.assertEqual(url2pathname(\"///C|/\"), 'C:\\\\') def test_converting_when_no_drive_letter(self): # cannot end", "helps to makes sure temporary files get deleted, but it", "OK\\r\\n\\r\\nHello!\") try: userpass = \"<PASSWORD>\" url = \"http://{}@python.org/\".format(userpass) fakehttp_wrapper =", "characters should have no effect # (Since URIs are not", "test would fail the # instant it returned anything beyond", "encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect, result) # Utf-8 given = ((\"\\u00a0\", \"\\u00c1\"),)", "# serv.close() # evt.set() # # class FTPWrapperTests(unittest.TestCase): # #", "hexescape(char): \"\"\"Escape char as RFC 2396 specifies\"\"\" hex_repr = hex(ord(char))[2:].upper()", "\"http://something\") finally: self.unfakehttp() def test_missing_localfile(self): # Test for #10836 with", "'), \"using quote(): %r != %r\" % (result, hexescape(' ')))", "quote_plus(): %r != %r\" % (expect, result)) class UnquotingTests(unittest.TestCase): \"\"\"Tests", "!= %s' % (expect, result)) given = '///C|/path' expect =", "self.assertEqual(os.path.splitext(filename)[1], \".txt\") @support.ignore_warnings(category=DeprecationWarning) def test_local_file_open(self): # bpo-35907, CVE-2019-9948: urllib must", "are quoted using quote_plus() given = {\"&\":\"=\"} expect = \"%s=%s\"", "= 'br\\u00fcckner_sapporo_20050930.doc' result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using unquote(): %r", "def close(self): self.io_refs -= 1 if self.io_refs == 0: io.BytesIO.close(self)", "with removal os.environ['no_proxy'] = '' os.environ['No_Proxy'] = 'localhost' self.assertFalse(urllib.request.proxy_bypass_environment('localhost')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary'))", "digits in the percent-escapes given = '%Ab%eA' expect = b'\\xab\\xea'", "= urllib.request.Request request = Request(\"http://www.python.org\") self.assertEqual(request.get_method(), 'GET') request = Request(\"http://www.python.org\",", "= FakeFtpWrapper def unfakeftp(self): urllib.request.ftpwrapper = self._ftpwrapper_class class urlopen_FileTests(unittest.TestCase): \"\"\"Test", "so no match self.assertNotEqual(fp.geturl(), url) self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def", "escape_string = ''.join(escape_list) del escape_list result = urllib.parse.unquote(escape_string) self.assertEqual(result.count('%'), 1,", "self.assertTrue(bypass('LOCALHOST')) # UPPERCASE self.assertTrue(bypass('.localhost')) self.assertTrue(bypass('newdomain.com:1234')) self.assertTrue(bypass('.newdomain.com:1234')) self.assertTrue(bypass('foo.d.o.t')) # issue 29142", "only the pairs: * 1st, 1 * 2nd, 2 *", "as much functionality as possible so as to cut down", "host = \"localhost\\r\\nX-injected: header\\r\\n\" schemeless_url = \"//\" + host +", "safe=b\"\\xfc\") expect = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=\"\") self.assertEqual(expect, result, \"using quote():", "and sparc. # Everywhere else they work ok, but on", "bytes, and once for the last byte). report = []", "fake http connection.\"\"\" def check_read(self, ver): self.fakehttp(b\"HTTP/\" + ver +", "'%A0$=%C1$' self.assertEqual(expect, result) given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) expect = '%A0$=%C1$'", "= FancyURLopener() _urlopener = opener else: opener = _urlopener if", "filename argument works. second_temp = \"%s.2\" % support.TESTFN self.registerFileForCleanUp(second_temp) result", "unquote(): %r != %r\" % (expect, result)) # unquote_to_bytes given", "DummyURLopener(urllib.request.URLopener): def open_spam(self, url): return url with support.check_warnings( ('DummyURLopener style", "self.assertTrue(bypass('LocalHost')) # MixedCase self.assertTrue(bypass('LOCALHOST')) # UPPERCASE self.assertTrue(bypass('.localhost')) self.assertTrue(bypass('newdomain.com:1234')) self.assertTrue(bypass('.newdomain.com:1234')) self.assertTrue(bypass('foo.d.o.t'))", "resp.geturl()) finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\") def test_url_host_with_control_char_rejected(self): for", "200 OK\\r\\n\\r\\nHello!\") self.addCleanup(self.unfakehttp) filename, _ = urllib.request.URLopener().retrieve(url) self.assertEqual(os.path.splitext(filename)[1], \".txt\") @support.ignore_warnings(category=DeprecationWarning)", "%r != %r\" % (expect, result)) # Decode with UTF-8,", "!= %s\" % (expect, result)) expect = given result =", "\"url2pathname() failed; %s != %s\" % (expect, result)) given =", "(expect, result)) class Utility_Tests(unittest.TestCase): \"\"\"Testcase to test the various utility", "self.assertFalse(bypass('newdomain.com')) self.assertFalse(bypass('newdomain.com:1234')) def test_proxy_bypass_environment_newline(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', 'localhost, anotherdomain.com,", "urlopen(f\"http:{schemeless_url}\") self.assertNotIn(char, resp.geturl()) finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\") def", "# except socket.timeout: # pass # finally: # serv.close() #", "urlopen(url) # The authorization header must be in place self.assertIn(authorization,", "self.assertEqual(url2pathname(pathname2url(path)), path) class PathName2URLTests(unittest.TestCase): def test_converting_drive_letter(self): self.assertEqual(pathname2url(\"C:\"), '///C:') self.assertEqual(pathname2url(\"C:\\\\\"), '///C:')", "result = urllib.parse.quote(given, encoding=\"latin-1\") self.assertEqual(expect, result, \"using quote(): %r !=", "report = [] def hooktester(block_count, block_read_size, file_size, _report=report): _report.append((block_count, block_read_size,", "# Test that passing True for 'doseq' parameter works correctly", "self.assertTrue(bypass('.anotherdomain.com:8888')) self.assertTrue(bypass('www.newdomain.com:1234')) self.assertFalse(bypass('prelocalhost')) self.assertFalse(bypass('newdomain.com')) # no port self.assertFalse(bypass('newdomain.com:1235')) # wrong", "buffer to store data for verification in urlopen tests. buf", "self.fakehttp(b\"HTTP/1.0 200 OK\\r\\n\\r\\nHello!\") try: fp = urlopen(\"http://user:pass@python.org/\") self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(),", "environment os.environ = collections.OrderedDict() def tearDown(self): os.environ = self._saved_env def", "result) # Test all above in latin-1 encoding given =", "urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL, r\"contain control.*\\\\n\"): urlopen(f\"https:{schemeless_url}\") finally: self.unfakehttp() def test_read_0_9(self):", "result, \"using quote(): %r != %r\" % (expect, result)) #", "setting the filename argument works. second_temp = \"%s.2\" % support.TESTFN", "self.assertEqual(pathname2url(r\"\\folder\\test\" \"\\\\\"), '/folder/test/') def test_simple_compare(self): self.assertEqual(pathname2url(r'C:\\foo\\bar\\spam.foo'), \"///C:/foo/bar/spam.foo\" ) def test_long_drive_letter(self):", "def test_unquoting_mixed_case(self): # Test unquoting on mixed-case hex digits in", "fakehttp_wrapper = http.client.HTTPConnection authorization = (\"Authorization: Basic %s\\r\\n\" % b64encode(userpass.encode(\"ASCII\")).decode(\"ASCII\"))", "quote_plus(): \" \"%s should be escapes to %s, not %s\"", "\"readlines\", \"close\", \"info\", \"geturl\", \"getcode\", \"__iter__\"): self.assertTrue(hasattr(self.text_url_resp, attr), \"object returned", "data is None: return opener.open(url) else: return opener.open(url, data) def", "= urllib.parse.unquote_plus(given) self.assertEqual(expect, result, \"using unquote_plus(): %r != %r\" %", "\"fileno() did not return an int\") self.assertEqual(os.read(file_num, len(self.text)), self.text, \"Reading", "# Delete all proxy related env vars for k in", "for num in range(32)] # For 0x00 - 0x1F should_quote.append(r'<>#%\"{}|\\^[]`')", "Test with bytes self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'), 'alpha%2Bbeta+gamma') # Test with safe", "urllib.request.Request request = Request(\"http://www.python.org\") self.assertEqual(request.get_method(), 'GET') request = Request(\"http://www.python.org\", {})", "spaces are quoted in URL so no match self.assertNotEqual(fp.geturl(), url)", "try: # We explicitly test urllib.request.urlopen() instead of the top", "self.returned_obj.readline(), \"calling readline() after exhausting the file did not\" \"", "methods for attr in (\"read\", \"readline\", \"readlines\", \"fileno\", \"close\", \"info\",", ": \";/?:@&=+$,\" Have special meaning in URIs and must be", "read). report = [] def hooktester(block_count, block_read_size, file_size, _report=report): _report.append((block_count,", "escaped if not being used for their special meaning Data", "urllib.parse.urlencode(given, doseq=True, encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect, result) # ASCII Encoding. On", "the \"network connection\" is established and once when the block", "reject local_file:// scheme class DummyURLopener(urllib.request.URLopener): def open_local_file(self, url): return url", "DeprecationWarning)): with self.assertRaises(ValueError): urllib.request.urlopen( \"https://localhost\", cafile=\"/nonexistent/path\", context=context ) class urlopen_DataTests(unittest.TestCase):", "ignored # import socket # self.assertIsNone(socket.getdefaulttimeout()) # socket.setdefaulttimeout(30) # try:", "result = urllib.parse.quote(given, encoding=\"latin-1\", errors=\"xmlcharrefreplace\") self.assertEqual(expect, result, \"using quote(): %r", "30) # ftp.close() # # def testTimeoutValue(self): # ftp =", "\"\"\"Unit tests for urllib.request.Request.\"\"\" def test_default_values(self): Request = urllib.request.Request request", "!= %r\" % (expect, result)) def test_unquoting_plus(self): # Test difference", "Latin-1 # (Note, the string contains non-Latin-1-representable characters) result =", "%r != %r\" % (expect, result)) given = '%x' expect", "filePath): filePath = os.path.abspath(filePath) try: filePath.encode(\"utf-8\") except UnicodeEncodeError: raise unittest.SkipTest(\"filePath", "# evt.set() # # class FTPWrapperTests(unittest.TestCase): # # def setUp(self):", "= \"abcd\" result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using quote(): %r", "last byte). report = [] def hooktester(block_count, block_read_size, file_size, _report=report):", "\"using quote(): %r != %r\" % (expect, result)) # Same", "sequence of two-item sequences as an argument. self.help_inputtype([('1st', '1'), ('2nd',", "# 2x1 pixel RGB PNG image with one black and", "# comparison. # Use the iterator in the usual implicit", "result) # latin-1 given = ((\"\\u00a0\", \"\\u00c1\"),) expect = '%A0=%C1'", "self.image) def test_missing_comma(self): self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain') def test_invalid_base64_data(self): # missing padding character", "\"testing %s: expected 2 '&'s; got %s\" % (test_type, result.count('&')))", "\"http://{}@python.org/\".format(userpass) fakehttp_wrapper = http.client.HTTPConnection authorization = (\"Authorization: Basic %s\\r\\n\" %", "above attempts at injection within the url _path_ safe. escaped_char_repr", "a tuple\"\"\" self.assertIsInstance(urllib.request.thishost(), tuple) class URLopener_Tests(FakeHTTPMixin, unittest.TestCase): \"\"\"Testcase to test", "URL2PathNameTests(unittest.TestCase): def test_converting_drive_letter(self): self.assertEqual(url2pathname(\"///C|\"), 'C:') self.assertEqual(url2pathname(\"///C:\"), 'C:') self.assertEqual(url2pathname(\"///C|/\"), 'C:\\\\') def", "= \"localhost\\r\\nX-injected: header\\r\\n\" schemeless_url = \"//\" + host + \":8080/test/?test=a\"", "%r != %r\" % (expect, result)) def test_unquoting_plus(self): # Test", "= 9093 # self.evt = threading.Event() # threading.Thread(target=server, args=(self.evt,)).start() #", "expect = b'\\xab\\xea' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, \"using unquote_to_bytes():", "import sys import tempfile from nturl2path import url2pathname, pathname2url from", "Test '/' is default value for 'safe' parameter self.assertEqual(urllib.parse.quote.__defaults__[0], '/')", "env vars for k in list(os.environ): if 'proxy' in k.lower():", "are: Reserved characters : \";/?:@&=+$,\" Have special meaning in URIs", "close(self): pass FakeHTTPConnection.fakedata = fakedata return FakeHTTPConnection class FakeHTTPMixin(object): def", "import b64encode import collections def hexescape(char): \"\"\"Escape char as RFC", "return newFilePath def registerFileForCleanUp(self, fileName): self.tempFiles.append(fileName) def test_basic(self): # Make", "% result.count('&')) def test_empty_sequence(self): self.assertEqual(\"\", urllib.parse.urlencode({})) self.assertEqual(\"\", urllib.parse.urlencode([])) def test_nonstring_values(self):", "= f\"//localhost:7777/test{char}/\" self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") try: # We explicitly test", "result = urllib.request.pathname2url(given) self.assertEqual(expect, result, \"pathname2url() failed; %s != %s\"", "self.assertEqual(report[2][1], 8192) class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin): \"\"\"Test urllib.urlretrieve() using fake http", "self.text, \"Reading on the file descriptor returned by fileno() \"", "Latin-1 encoding. given = (('\\u00a0', '\\u00c1'),) expect = '%A0=%C1' result", "in the original URL. url = 'http://docs.python.org/library/urllib.html#OK' self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\")", "r\"\"\"Tests for urllib.quote() and urllib.quote_plus() According to RFC 3986 (Uniform", "bytes as input, with unescaped non-ASCII bytes # (Technically an", "import ftplib, time, threading # ftplib.FTP.port = 9093 # self.evt", "assume anything about order. Docs make no guarantee and have", ":;,%=& \\u00f6 \\u00c4 \" # 2x1 pixel RGB PNG image", "self.assertEqual('localhost', proxies['no']) # List of no_proxies with space. self.env.set('NO_PROXY', 'localhost,", "%s\" % (expect, result)) given = os.path.join(\"make sure\", \"using_quote\") expect", "= hex(ord(char))[2:].upper() if len(hex_repr) == 1: hex_repr = \"0%s\" %", "not allowed to have non-ASCII characters) result = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\",", "self.assertEqual(expect, result, \"url2pathname() failed; %s != %s\" % (expect, result))", "why keep failing in windows and sparc. # Everywhere else", "injection within the url _path_ safe. InvalidURL = http.client.InvalidURL with", "raise type error on bytes input self.assertRaises(TypeError, urllib.parse.quote, given, encoding=\"latin-1\")", "\"mypass\", \"localhost\", 9093, []) # ftp.close() # # def testTimeoutNone(self):", "self.fakehttp(b'') try: self.assertRaises(OSError, urlopen, \"http://something\") finally: self.unfakehttp() def test_missing_localfile(self): #", "[chr(num) for num in range(32)] # For 0x00 - 0x1F", "%r\" % (expect, result)) # Test with a bytes as", "for attr in (\"read\", \"readline\", \"readlines\", \"close\", \"info\", \"geturl\", \"getcode\",", "urlopen, \"http://something\") finally: self.unfakehttp() def test_missing_localfile(self): # Test for #10836", "'GET') proxies = urllib.request.getproxies_environment() self.assertNotIn('http', proxies) finally: self.env.unset('REQUEST_METHOD') self.env.unset('HTTP_PROXY') def", "= urllib.parse.quote_plus(char) self.assertEqual(hexescape(char), result, \"using quote_plus(): \" \"%s should be", "self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_url_fragment(self): # Issue #11703: geturl()", "the Latin-1 range, encoded with Latin-1 result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc', encoding=\"latin-1\")", "= [] def hooktester(block_count, block_read_size, file_size, _report=report): _report.append((block_count, block_read_size, file_size))", "a data URL.\"\"\" def setUp(self): # text containing URL special-", "to be preserved) given = b'%A2\\xd8ab%FF' expect = b'\\xa2\\xd8ab\\xff' result", "range(32)] # For 0x00 - 0x1F should_quote.append(r'<>#%\"{}|\\^[]`') should_quote.append(chr(127)) # For", "result)) def test_unquote_to_bytes(self): given = 'br%C3%BCckner_sapporo_20050930.doc' expect = b'br\\xc3\\xbcckner_sapporo_20050930.doc' result", "data = conn.recv(13-cantdata) # cantdata += len(data) # time.sleep(.3) #", "tmp_file.replace(os.path.sep, '/') try: self.assertTrue(os.path.exists(tmp_file)) with urlopen(tmp_fileurl) as fobj: self.assertTrue(fobj) finally:", "self.assertEqual(pathname2url(r\"\\\\folder\\test\" \"\\\\\"), '////folder/test/') self.assertEqual(pathname2url(r\"\\folder\\test\" \"\\\\\"), '/folder/test/') def test_simple_compare(self): self.assertEqual(pathname2url(r'C:\\foo\\bar\\spam.foo'), \"///C:/foo/bar/spam.foo\"", "+= len(data) # time.sleep(.3) # conn.send(\"2 No more lines\\n\") #", "tell why keep failing in windows and sparc. # Everywhere", ": 0x20 Must be escaped Delimiters : '<>#%\"' Must be", "quote(): %r != %r\" % (expect, result)) # Characters in", "file. Should call reporthook only 3 times (once # when", "url2pathname, pathname2url from base64 import b64encode import collections def hexescape(char):", "urllib.request.URLopener().retrieve(fileurl) # Some buildbots have TEMP folder that uses a", "test_basic(self): # Make sure that a local file just gets", "# missing padding character self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=') class urlretrieve_FileTests(unittest.TestCase): \"\"\"Test urllib.urlretrieve() on", "try: self.assertRaises(urllib.error.HTTPError, urlopen, \"http://something\") finally: self.unfakehttp() def test_empty_socket(self): # urlopen()", "= (('\\u00a0', '\\u00c1'),) expect = '%3F=%3F' result = urllib.parse.urlencode(given, encoding=\"ASCII\",", "tuple) class URLopener_Tests(FakeHTTPMixin, unittest.TestCase): \"\"\"Testcase to test the open method", "e: urlopen('file://localhost/a/file/which/doesnot/exists.py') self.assertTrue(e.exception.filename) self.assertTrue(e.exception.reason) def test_file_notexists(self): fd, tmp_file = tempfile.mkstemp()", "\"\\u00c1\")),) expect = '%3F=1&%3F=%3F' result = urllib.parse.urlencode(given, True, encoding=\"ASCII\", errors=\"replace\")", "\"XX:\\\\\") def test_roundtrip_pathname2url(self): list_of_paths = ['///C:', '/////folder/test/', '///C:/foo/bar/spam.foo'] for path", "module required\") def test_url_host_with_newline_header_injection_rejected(self): self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") host = \"localhost\\r\\nX-injected:", "self.unfakehttp() def test_short_content_raises_ContentTooShortError_without_reporthook(self): self.fakehttp(b'''HTTP/1.1 200 OK Date: Wed, 02 Jan", "= urllib.parse.quote_plus(partial_quote) self.assertEqual(expected, result, \"using quote_plus(): %r != %r\" %", "msg): urlopen(\"http://python.org/\") finally: self.unfakehttp() def test_redirect_limit_independent(self): # Ticket #12923: make", "testing different input types. 'given' must lead to only the", "DummyURLopener().open, url) self.assertRaises(OSError, DummyURLopener().retrieve, url) # Just commented them out.", "result = urllib.parse.quote(given, encoding=\"latin-1\", errors=\"replace\") self.assertEqual(expect, result, \"using quote(): %r", ".)\"): urllib.request.urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL, r\"contain control.*\\\\n\"): urllib.request.urlopen(f\"https:{schemeless_url}\") # This code", "def test_default_values(self): Request = urllib.request.Request request = Request(\"http://www.python.org\") self.assertEqual(request.get_method(), 'GET')", "Use the iterator in the usual implicit way to test", "result) # Utf-8 given = ((\"\\u00a0\", \"\\u00c1\"),) expect = '%C2%A0=%C3%81'", "in place self.assertIn(authorization, fakehttp_wrapper.buf.decode(\"UTF-8\")) self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(), b\"\") # the", "default sans # space (separate test for that). should_quote =", "returned by urlopen() lacks %s attribute\" % attr) def test_read(self):", "used for their special meaning Data characters : letters, digits,", "bytes to be preserved) given = b'%A2\\xd8ab%FF' expect = b'\\xa2\\xd8ab\\xff'", "nturl2path import url2pathname, pathname2url from base64 import b64encode import collections", "% (expect, result)) def test_unquoting_parts(self): # Make sure unquoting works", "(Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Location: file://guidocomputer.athome.com:/python/license Connection: close Content-Type: text/html;", "Characters in the Latin-1 range, encoded with Latin-1 result =", "and must be escaped if not being used for their", "evt.set() # # class FTPWrapperTests(unittest.TestCase): # # def setUp(self): #", "(expected, result)) def test_quoting_space(self): # Make sure quote() and quote_plus()", "function.') def test_ntpath(self): given = ('/C:/', '///C:/', '/C|//') expect =", "quote(): %r != %r\" % (quote_by_default, result)) result = urllib.parse.quote_plus(quote_by_default,", "for details on quoting and such. \"\"\" def test_unquoting(self): #", "values given = b\"\\xa2\\xd8ab\\xff\" expect = \"%A2%D8ab%FF\" result = urllib.parse.quote(given)", "(but not \"simple responses\" without # a status line) self.check_read(b\"0.9\")", "self.assertEqual(expect, result) # Test all above in latin-1 encoding given", "is # read). report = [] def hooktester(block_count, block_read_size, file_size,", "one white pixel self.image = ( b'\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR\\x00\\x00\\x00\\x02\\x00\\x00\\x00' b'\\x01\\x08\\x02\\x00\\x00\\x00{@\\xe8\\xdd\\x00\\x00\\x00\\x01sRGB\\x00\\xae' b'\\xce\\x1c\\xe9\\x00\\x00\\x00\\x0fIDAT\\x08\\xd7c```\\xf8\\xff\\xff?\\x00' b'\\x06\\x01\\x02\\xfe\\no/\\x1e\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82')", "a file # name (absolute path or relative to the", "time # serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # serv.settimeout(3) # serv.setsockopt(socket.SOL_SOCKET,", "(expect, result)) # Characters in Latin-1 range, encoded by with", "% (result, hexescape(' '))) result = urllib.parse.quote_plus(' ') self.assertEqual(result, '+',", "# conn, addr = serv.accept() # conn.send(\"1 Hola mundo\\n\") #", "'+', \"using quote_plus(): %r != +\" % result) given =", "ftp.close() # # def testTimeoutDefault(self): # # global default timeout", "# For 0x7F should_quote = ''.join(should_quote) for char in should_quote:", "urlopen(\"http://python.org/\") finally: self.unfakehttp() def test_redirect_limit_independent(self): # Ticket #12923: make sure", "should_quote partial_quote = \"ab[]cd\" expected = \"ab%5B%5Dcd\" result = urllib.parse.quote(partial_quote)", "urllib.parse.unquote(given, errors=\"replace\") self.assertEqual(expect, result, \"using unquote(): %r != %r\" %", "e: urlopen(test_ftp_url) self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) def test_ftp_nonexisting(self): with self.assertRaises(urllib.error.URLError) as e:", "\"https://localhost\", cafile=\"/nonexistent/path\", context=context ) class urlopen_DataTests(unittest.TestCase): \"\"\"Test urlopen() opening a", "a temporary file. Try to test as much functionality as", "of values. given = ((\"\\u00a0\", (1, \"\\u00c1\")),) expect = '%3F=1&%3F=%3F'", "% (expect, result)) def test_unquote_with_unicode(self): # Characters in the Latin-1", "self.assertTrue(bypass('newdomain.com:1234')) self.env.set('NO_PROXY', '*, anotherdomain.com') self.assertTrue(bypass('anotherdomain.com')) self.assertFalse(bypass('newdomain.com')) self.assertFalse(bypass('newdomain.com:1234')) def test_proxy_bypass_environment_newline(self): bypass", "'HEAD') self.assertEqual(request.get_method(), 'HEAD') request = Request(\"http://www.python.org\", {}, method='HEAD') self.assertEqual(request.method, 'HEAD')", "self.assertTrue(fobj) finally: os.close(fd) os.unlink(tmp_file) self.assertFalse(os.path.exists(tmp_file)) with self.assertRaises(urllib.error.URLError): urlopen(tmp_fileurl) def test_ftp_nohost(self):", "') self.assertEqual(result, '+', \"using quote_plus(): %r != +\" % result)", "# Make sure quote() and quote_plus() handle spaces as specified", "given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) result = urllib.parse.urlencode(given, safe=\":$\", encoding=\"latin-1\") expect", "class PathName2URLTests(unittest.TestCase): def test_converting_drive_letter(self): self.assertEqual(pathname2url(\"C:\"), '///C:') self.assertEqual(pathname2url(\"C:\\\\\"), '///C:') def test_converting_when_no_drive_letter(self):", "in a mapping object as an argument. self.help_inputtype({\"1st\":'1', \"2nd\":'2', \"3rd\":'3'},", "self.text) def test_relativelocalfile(self): self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname) class ProxyTests(unittest.TestCase): def setUp(self):", "when the \"network connection\" is established, once for the next", "way result = urllib.parse.quote(' ') self.assertEqual(result, hexescape(' '), \"using quote():", "r'\\\\\\C\\test' '\\\\') self.assertEqual(url2pathname(\"////C/test/\"), r'\\\\C\\test' '\\\\') def test_simple_compare(self): self.assertEqual(url2pathname(\"///C|/foo/bar/spam.foo\"), r'C:\\foo\\bar\\spam.foo') def", "to test for ticket #4608. for line in self.returned_obj: self.assertEqual(line,", "Latin-1 result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc', encoding=\"latin-1\") expect = 'br\\u00fcckner_sapporo_20050930.doc' self.assertEqual(expect, result,", "OSError for many error codes. self.fakehttp(b'''HTTP/1.1 401 Authentication Required Date:", "OK\\r\\n\\r\\nHello.\") host = \"localhost:7777?a=1 HTTP/1.1\\r\\nX-injected: header\\r\\nTEST: 123\" schemeless_url = \"//\"", "= urllib.parse.urlencode(given, doseq=True, encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect, result) # ASCII Encoding.", "# We explicitly test urllib.request.urlopen() instead of the top #", "expect_somewhere: self.assertIn(expected, result, \"testing %s: %s not found in %s\"", "\"using dict as input type\") def test_using_sequence(self): # Test passing", "self.assertEqual(self.text_url_resp.geturl(), self.text_url) self.assertEqual(self.text_url_base64_resp.geturl(), self.text_url_base64) self.assertEqual(self.image_url_resp.geturl(), self.image_url) def test_read_text(self): self.assertEqual(self.text_url_resp.read().decode( dict(self.text_url_resp.info().get_params())['charset']),", "given = ((\"\\u00a0\", \"\\u00c1\"),) expect = '%C2%A0=%C3%81' result = urllib.parse.urlencode(given,", "return io.BytesIO.read(self, amt) def readline(self, length=None): if self.closed: return b\"\"", "= 'C:\\\\path' result = urllib.request.url2pathname(given) self.assertEqual(expect, result, 'urllib.request.url2pathname() failed; %s", "% (expect, result)) def test_unquoting_plus(self): # Test difference between unquote()", "quoting and unquoting works for pathnam2url() and # url2pathname() respectively", "# def testTimeoutValue(self): # ftp = urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\", 9093,", "= \"//\" + host + \":8080/test/?test=a\" try: # We explicitly", "= tempfile.mkstemp(dir=tmpdir) os.close(fd) fileurl = \"file:\" + urllib.request.pathname2url(tmpfile) filename, _", "%s, not %s\" % (char, hexescape(char), result)) result = urllib.parse.quote_plus(char)", "not encodable to utf8\") return \"file://%s\" % urllib.request.pathname2url(filePath) def createNewTempFile(self,", "anotherdomain.com, newdomain.com:1234') self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com')) self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com:8888')) self.assertTrue(urllib.request.proxy_bypass_environment('newdomain.com:1234')) def test_proxy_cgi_ignore(self): try: self.env.set('HTTP_PROXY', 'http://somewhere:3128')", "socket.setdefaulttimeout(None) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() # # def testTimeoutDefault(self):", "def createNewTempFile(self, data=b\"\"): \"\"\"Creates a new temporary file containing the", "result = urllib.parse.quote(given) self.assertEqual(expect, result, \"using quote(): %r != %r\"", "expected in expect_somewhere: self.assertIn(expected, result, \"testing %s: %s not found", "% urllib.parse.quote(\"quot=ing\") result = urllib.request.pathname2url(given) self.assertEqual(expect, result, \"pathname2url() failed; %s", "# urlopen uses FancyURLOpener which goes via a codepath that", "str(len(self.image))) self.assertEqual(urllib.request.urlopen(\"data:,\").info().get_params(), [('text/plain', ''), ('charset', 'US-ASCII')]) def test_geturl(self): self.assertEqual(self.text_url_resp.geturl(), self.text_url)", "(Note, the string contains non-Latin-1-representable characters) result = urllib.parse.unquote(\"\\u6f22%FC\", encoding=\"latin-1\")", "escaped_char_repr = repr(char).replace('\\\\', r'\\\\') InvalidURL = http.client.InvalidURL with self.assertRaisesRegex( InvalidURL,", "def test_non_ascii_drive_letter(self): self.assertRaises(IOError, url2pathname, \"///\\u00e8|/\") def test_roundtrip_url2pathname(self): list_of_paths = ['C:',", "= {'sequence':['1', '2', '3']} expect = \"sequence=%s\" % urllib.parse.quote_plus(str(['1', '2',", "gamma'), 'alpha%2Bbeta+gamma') # Test with safe bytes self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'),", "result = urllib.request.url2pathname(result) self.assertEqual(expect, result, \"url2pathname() failed; %s != %s\"", "self.assertEqual(request.get_method(), 'HEAD') request = Request(\"http://www.python.org\", {}, method='HEAD') self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(),", "InvalidURL, f\"contain control.*{escaped_char_repr}\"): urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL, f\"contain control.*{escaped_char_repr}\"): urlopen(f\"https:{schemeless_url}\") finally:", "= \"ab[]cd\" expected = \"ab%5B%5Dcd\" result = urllib.parse.quote(partial_quote) self.assertEqual(expected, result,", "UTF-8 encoding. given = (('\\u00a0', '\\u00c1'),) expect = '%C2%A0=%C3%81' result", "with empty fake environment os.environ = collections.OrderedDict() def tearDown(self): os.environ", "9093, []) # ftp.close() # # def testTimeoutNone(self): # #", "The Python code of ``'%' + hex(ord(<character>))[2:]`` escapes a character", "2 '&'s; got %s\" % (test_type, result.count('&'))) amp_location = result.index('&')", "with unescaped non-ASCII bytes # (Technically an invalid URI; expect", "# the spaces are quoted in URL so no match", "urllib.parse.urlencode(given, True, encoding=\"latin-1\") self.assertEqual(expect, result) given = ((\"\\u00a0\", (42, \"\\u00c1\")),)", "# # connects # ftp = urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\", 9093,", "Sequence of values given = ((b'\\xa0\\x24', (42, b'\\xc1\\x24')),) expect =", "mundo\\n\") # cantdata = 0 # while cantdata < 13:", "return FakeHTTPConnection class FakeHTTPMixin(object): def fakehttp(self, fakedata, mock_close=False): fake_http_class =", "os.environ['No_Proxy'] = 'localhost' self.assertFalse(urllib.request.proxy_bypass_environment('localhost')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) os.environ['http_proxy'] = '' os.environ['HTTP_PROXY'] =", "hooktester(block_count, block_read_size, file_size, _report=report): _report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile()", "'\\u6f22\\u00fc' self.assertEqual(expect, result, \"using unquote(): %r != %r\" % (expect,", "close Content-Length: 100 Content-Type: text/html; charset=iso-8859-1 FF ''') with self.assertRaises(urllib.error.ContentTooShortError):", "def hooktester(block_count, block_read_size, file_size, count_holder=[0]): self.assertIsInstance(block_count, int) self.assertIsInstance(block_read_size, int) self.assertIsInstance(file_size,", "reporthook only 1 time. report = [] def hooktester(block_count, block_read_size,", "result) # Latin-1 encoding. given = (('\\u00a0', '\\u00c1'),) expect =", "expect = '%A0$=%C1$&%A0$=13&%A0$=42' result = urllib.parse.urlencode(given, True, safe=\":$\") self.assertEqual(expect, result)", "hexescape(char), result)) del should_quote partial_quote = \"ab[]cd\" expected = \"ab%5B%5Dcd\"", "= urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=\"\\xfc\") expect = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=\"\") self.assertEqual(expect,", "Test lowercase preference with removal os.environ['no_proxy'] = '' os.environ['No_Proxy'] =", "of non-ASCII and percent-encoded characters, UTF-8 result = urllib.parse.unquote(\"\\u6f22%C3%BC\") expect", "with a bytes as input given = b'%A2%D8ab%FF' expect =", "conn.send(\"2 No more lines\\n\") # conn.close() # except socket.timeout: #", "\"getcode\", \"__iter__\"): self.assertTrue(hasattr(self.returned_obj, attr), \"object returned by urlopen() lacks %s", "self.fakehttp(b'''HTTP/1.1 401 Authentication Required Date: Wed, 02 Jan 2008 03:03:54", "'localhost, anotherdomain.com, newdomain.com:1234') self.assertFalse(bypass('localhost\\n')) self.assertFalse(bypass('anotherdomain.com:8888\\n')) self.assertFalse(bypass('newdomain.com:1234\\n')) class ProxyTests_withOrderedEnv(unittest.TestCase): def setUp(self):", "%s\" % (expect, result)) expect = given result = urllib.request.url2pathname(result)", "self.text = b'testing urllib.urlretrieve' try: FILE = open(support.TESTFN, 'wb') FILE.write(self.text)", "os.environ # Monkey patch os.environ, start with empty fake environment", "proxies = urllib.request.getproxies_environment() # getproxies_environment use lowered case truncated (no", "Test on a string with unescaped non-ASCII characters # (Technically", "for line in self.returned_obj: self.assertEqual(line, self.text) def test_relativelocalfile(self): self.assertRaises(ValueError,urllib.request.urlopen,'./' +", "b64encode(userpass.encode(\"ASCII\")).decode(\"ASCII\")) fp = urlopen(url) # The authorization header must be", "for k in list(os.environ): if 'proxy' in k.lower(): self.env.unset(k) def", "self.assertEqual(quote_by_default, result, \"using quote_plus(): %r != %r\" % (quote_by_default, result))", "# A mix of non-ASCII and percent-encoded characters, Latin-1 #", "def test_proxy_cgi_ignore(self): try: self.env.set('HTTP_PROXY', 'http://somewhere:3128') proxies = urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128', proxies['http'])", ". .)\"): urllib.request.urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL, r\"contain control.*\\\\n\"): urllib.request.urlopen(f\"https:{schemeless_url}\") # This", "name\":\"A bunch of pluses\"} expect = \"key+name=A+bunch+of+pluses\" result = urllib.parse.urlencode(given)", "result)) class UnquotingTests(unittest.TestCase): \"\"\"Tests for unquote() and unquote_plus() See the", "file containing the specified data, registers the file for deletion", "\"geturl\", \"getcode\", \"__iter__\"): self.assertTrue(hasattr(self.returned_obj, attr), \"object returned by urlopen() lacks", "time. report = [] def hooktester(block_count, block_read_size, file_size, _report=report): _report.append((block_count,", "= Request(\"http://www.python.org\", {}) self.assertEqual(request.get_method(), 'POST') def test_with_method_arg(self): Request = urllib.request.Request", "requests is deprecated.', DeprecationWarning)): self.assertEqual(DummyURLopener().open( 'spam://example/ /'),'//example/%20/') # test the", "control.*\\\\r\"): urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL, r\"contain control.*\\\\n\"): urlopen(f\"https:{schemeless_url}\") finally: self.unfakehttp() def", "{}) self.assertEqual(request.get_method(), 'POST') def test_with_method_arg(self): Request = urllib.request.Request request =", "urllib.request.pathname2url(filePath) def createNewTempFile(self, data=b\"\"): \"\"\"Creates a new temporary file containing", "self.text = \"test data URLs :;,%=& \\u00f6 \\u00c4 \" #", "possible so as to cut down on reliance on connecting", "# this only helps to makes sure temporary files get", "self.assertEqual(expect, result) given = {\"key name\":\"A bunch of pluses\"} expect", "file. Should call reporthook only 1 time. report = []", "Latin-1 range, encoded with Latin-1 given = \"\\xa2\\xd8ab\\xff\" expect =", "a temp file to use for testing self.text = bytes(\"test_urllib:", "self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\") try: fp = urllib.request.urlopen(url) self.assertEqual(fp.geturl(), url) finally:", "= result.index('&') on_amp_left = result[amp_location - 1] on_amp_right = result[amp_location", "character you write it as '%' + <2 character US-ASCII", "their unique way result = urllib.parse.quote(' ') self.assertEqual(result, hexescape(' '),", "# # def server(evt): # import socket, time # serv", "support.TESTFN, hooktester) self.assertEqual(len(report), 2) self.assertEqual(report[0][2], 5) self.assertEqual(report[1][2], 5) def test_reporthook_8193_bytes(self):", "unescaped non-ASCII characters # (Technically an invalid URI; expect those", "= threading.Event() # threading.Thread(target=server, args=(self.evt,)).start() # time.sleep(.1) # # def", "= \"0%s\" % hex_repr return \"%\" + hex_repr # Shortcut", "try: newFile.close() except: pass return newFilePath def registerFileForCleanUp(self, fileName): self.tempFiles.append(fileName)", "= self.createNewTempFile(b\"x\" * 5) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 2) self.assertEqual(report[0][2],", "email.message.Message) def test_geturl(self): self.assertEqual(self.returned_obj.geturl(), self.pathname) def test_getcode(self): self.assertIsNone(self.returned_obj.getcode()) def test_iter(self):", "relative to the current working directory). # All files in", "other. I have a linux, and # the tests go", "characters # (Technically an invalid URI; expect those characters to", "connect(self): self.sock = FakeSocket(self.fakedata) type(self).fakesock = self.sock if mock_close: #", "the percent-escapes given = '%Ab%eA' expect = b'\\xab\\xea' result =", "self.assertEqual(self.text, self.returned_obj.read()) def test_readline(self): self.assertEqual(self.text, self.returned_obj.readline()) self.assertEqual(b'', self.returned_obj.readline(), \"calling readline()", "quote(): %r != %r\" % (do_not_quote, result)) result = urllib.parse.quote_plus(do_not_quote)", "self.pathname = support.TESTFN self.returned_obj = urlopen(\"file:%s\" % self.pathname) def tearDown(self):", "safe character # Default utf-8 encoding given = ((b'\\xa0\\x24', b'\\xc1\\x24'),)", "def test_quote_with_unicode(self): # Characters in Latin-1 range, encoded by default", "proxies['http']) self.env.set('REQUEST_METHOD', 'GET') proxies = urllib.request.getproxies_environment() self.assertNotIn('http', proxies) finally: self.env.unset('REQUEST_METHOD')", "\"<>\" result = urllib.parse.quote(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, \"using quote(): %r", "= \"//\" + host + \":8080/test/?test=a\" try: InvalidURL = http.client.InvalidURL", "# Test on zero length file. Should call reporthook only", "= urllib.parse.quote(' ') self.assertEqual(result, hexescape(' '), \"using quote(): %r !=", "urllib.parse.urlencode([])) def test_nonstring_values(self): self.assertEqual(\"a=1\", urllib.parse.urlencode({\"a\": 1})) self.assertEqual(\"a=None\", urllib.parse.urlencode({\"a\": None})) def", "anybody has one of the problematic environments, please help! #", "be escapes to %s, not %s\" % (char, hexescape(char), result))", "\"\"\" def test_unquoting(self): # Make sure unquoting of all ASCII", "a character properly. Case does not matter on the hex", "!= %r\" % (result, hexescape(' '))) result = urllib.parse.quote_plus(' ')", "directly to percent-encoded values given = b\"\\xa2\\xd8ab\\xff\" expect = \"%A2%D8ab%FF\"", "(('\\u00a0', '\\u00c1'),) expect = '%A0=%C1' result = urllib.parse.urlencode(given, encoding=\"latin-1\") self.assertEqual(expect,", "the \"network connection\" is established, once for the next 8192", "% (test_type, result.count('&'))) amp_location = result.index('&') on_amp_left = result[amp_location -", "def test_quoting_space(self): # Make sure quote() and quote_plus() handle spaces", "= \"sequence=%s\" % urllib.parse.quote_plus(str(['1', '2', '3'])) result = urllib.parse.urlencode(given) self.assertEqual(expect,", "self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ()) def test_unquoting_mixed_case(self): # Test unquoting on", "\"testing %s: \" \"unexpected number of characters: %s != %s\"", "works for pathnam2url() and # url2pathname() respectively given = os.path.join(\"needs\",", "self.text, \"readlines() returned improper text\") def test_fileno(self): file_num = self.returned_obj.fileno()", "to have non-ASCII characters) result = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=\"\\xfc\") expect", "# \"ValueError: I/O operation on closed file\" which is logged", "if the underlying socket does not send any # data.", "self.assertTrue(bypass('d.o.t')) self.assertTrue(bypass('anotherdomain.com:8888')) self.assertTrue(bypass('.anotherdomain.com:8888')) self.assertTrue(bypass('www.newdomain.com:1234')) self.assertFalse(bypass('prelocalhost')) self.assertFalse(bypass('newdomain.com')) # no port self.assertFalse(bypass('newdomain.com:1235'))", "self.assertEqual(urllib.parse.quote.__defaults__[0], '/') def test_safe(self): # Test setting 'safe' parameter does", "(\\x24) as safe character # Default utf-8 encoding given =", "\"%A2%D8ab%FF\" result = urllib.parse.quote(given) self.assertEqual(expect, result, \"using quote(): %r !=", "'ftp:///path' with self.assertRaises(urllib.error.URLError) as e: urlopen(test_ftp_url) self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) def test_ftp_nonexisting(self):", "self.sock if mock_close: # bpo-36918: HTTPConnection destructor calls close() which", "changes to env vars self.env = support.EnvironmentVarGuard() # Delete all", "+ [0x7f]: char = chr(char_no) schemeless_url = f\"//localhost{char}/test/\" self.fakehttp(b\"HTTP/1.1 200", "self.assertEqual(url2pathname(\"///C|\"), 'C:') self.assertEqual(url2pathname(\"///C:\"), 'C:') self.assertEqual(url2pathname(\"///C|/\"), 'C:\\\\') def test_converting_when_no_drive_letter(self): # cannot", "type(self).fakesock = self.sock if mock_close: # bpo-36918: HTTPConnection destructor calls", "unquote() and unquote_plus() See the doc string for quoting_Tests for", "= self.createNewTempFile(b\"x\" * 8193) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 3) self.assertEqual(report[0][2],", "finally: # socket.setdefaulttimeout(None) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() # #", "expect = '%A0%24=%C1%24' result = urllib.parse.urlencode(given) self.assertEqual(expect, result) result =", "\"%s should be escapes to %s, not %s\" % (char,", "and amps \"testing %s: \" \"unexpected number of characters: %s", "class Utility_Tests(unittest.TestCase): \"\"\"Testcase to test the various utility functions in", "def test_unquoting_plus(self): # Test difference between unquote() and unquote_plus() given", "' ') result = urllib.parse.unquote_plus(given) self.assertEqual(expect, result, \"using unquote_plus(): %r", "1 if self.io_refs == 0: io.BytesIO.close(self) class FakeHTTPConnection(http.client.HTTPConnection): # buffer", "fail the # instant it returned anything beyond the first", "the safe characters are not quoted by urlopen self.assertEqual(DummyURLopener().open( \"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/\"),", "UTF-8 # encoded). result = urllib.parse.unquote_to_bytes(\"\\u6f22%C3%BC\") expect = b'\\xe6\\xbc\\xa2\\xc3\\xbc' #", "!= %r\" % (expect, result)) # Characters in the Latin-1", "given = os.path.join(\"make sure\", \"using_quote\") expect = \"%s/using_quote\" % urllib.parse.quote(\"make", "\"mypass\", \"localhost\", 9093, [], # timeout=30) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) #", "should raise type error on bytes input self.assertRaises(TypeError, urllib.parse.quote, given,", "% (quote_by_default, result)) result = urllib.parse.quote_plus(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, \"using", "latin-1 encoding given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) result = urllib.parse.urlencode(given, safe=\":$\",", "'\\u00c1'),) expect = '%3F=%3F' result = urllib.parse.urlencode(given, encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect,", "(expect, result)) # Test with a bytes as input given", "self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() # # def testTimeoutValue(self): # ftp", "def testTimeoutDefault(self): # # global default timeout is used #", "tearDown(self): # Restore all proxy related env vars self.env.__exit__() del", "expect = given result = urllib.request.url2pathname(result) self.assertEqual(expect, result, \"url2pathname() failed;", "xmlcharref error handling given = \"\\u6f22\\u5b57\" expect = \"%26%2328450%3B%26%2323383%3B\" #", "(expect, result)) expect = given.replace('+', ' ') result = urllib.parse.unquote_plus(given)", "200 OK\\r\\n\\r\\nHello.\") try: # We explicitly test urllib.request.urlopen() instead of", "files. for each in self.tempFiles: try: os.remove(each) except: pass def", "of characters: %s != %s\" % (test_type, len(result), (5 *", "Delete all proxy related env vars for k in list(os.environ):", "quote_plus(): %r != %r\" % (quote_by_default, result)) # Safe expressed", "!= %r\" % (expect, result)) class UnquotingTests(unittest.TestCase): \"\"\"Tests for unquote()", "= urllib.request.pathname2url(given) self.assertEqual(expect, result, \"pathname2url() failed; %s != %s\" %", "8192) self.assertEqual(report[2][1], 8192) class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin): \"\"\"Test urllib.urlretrieve() using fake", "'%3F=1&%3F=%3F' result = urllib.parse.urlencode(given, True, encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect, result) #", "= chr(char_no) schemeless_url = f\"//localhost:7777/test{char}/\" self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") try: #", "# Everywhere else they work ok, but on those machines,", "% (char, hexescape(char), result)) del should_quote partial_quote = \"ab[]cd\" expected", "requests each use their # own retry limit. for i", "(hexescape('&'), hexescape('=')) result = urllib.parse.urlencode(given) self.assertEqual(expect, result) given = {\"key", "host + \":8080/test/?test=a\" try: InvalidURL = http.client.InvalidURL with self.assertRaisesRegex( InvalidURL,", "given = {'sequence':['1', '2', '3']} expect = \"sequence=%s\" % urllib.parse.quote_plus(str(['1',", "except: pass return newFilePath def registerFileForCleanUp(self, fileName): self.tempFiles.append(fileName) def test_basic(self):", "\" \"unexpected number of characters: %s != %s\" % (test_type,", "urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\", 9093, [], # timeout=30) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30)", "% (expect, result)) # A mix of non-ASCII and percent-encoded", "self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(), b\"\") self.assertEqual(fp.geturl(), 'http://python.org/') self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp()", "According to RFC 3986 (Uniform Resource Identifiers), to escape a", "gamma', b'+'), 'alpha+beta+gamma') def test_quote_bytes(self): # Bytes should quote directly", "the file.\"\"\" newFd, newFilePath = tempfile.mkstemp() try: self.registerFileForCleanUp(newFilePath) newFile =", "as input, with unescaped non-ASCII bytes # (Technically an invalid", "import io import unittest from unittest.mock import patch from test", "with replace error handling given = \"\\u6f22\\u5b57\" expect = \"%3F%3F\"", "# \"&#28450;&#23383;\" result = urllib.parse.quote(given, encoding=\"latin-1\", errors=\"xmlcharrefreplace\") self.assertEqual(expect, result, \"using", "Safe parameter in sequence given = ((b'\\xa0\\x24', (b'\\xc1\\x24', 0xd, 42)),)", "for what was in Python 2's \"urllib\" module\"\"\" import urllib.parse", "support.check_warnings(('', BytesWarning), quiet=True): self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'') def test_unquoting_badpercent(self): #", "the urllib.\"\"\" def test_thishost(self): \"\"\"Test the urllib.request.thishost utility function returns", "expect = \"sequence=%s\" % urllib.parse.quote_plus(str(['1', '2', '3'])) result = urllib.parse.urlencode(given)", "newFilePath def registerFileForCleanUp(self, fileName): self.tempFiles.append(fileName) def test_basic(self): # Make sure", "cannot assume anything about order. Docs make no guarantee and", "os.remove(support.TESTFN) def test_interface(self): # Make sure object returned by urlopen()", "par3): pass with self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve(support.TEST_HTTP_URL, reporthook=_reporthook) finally: self.unfakehttp() def", "def _reporthook(par1, par2, par3): pass with self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve(support.TEST_HTTP_URL, reporthook=_reporthook)", "* 3rd, 3 Test cannot assume anything about order. Docs", "DummyURLopener(urllib.request.URLopener): def open_local_file(self, url): return url for url in ('local_file://example',", "%s\\n\" % self.__class__.__name__, \"ascii\") f = open(support.TESTFN, 'wb') try: f.write(self.text)", "quote(): %r != %r\" % (expect, result)) def test_default_quoting(self): #", "collections def hexescape(char): \"\"\"Escape char as RFC 2396 specifies\"\"\" hex_repr", "= given.replace(' ', '+') result = urllib.parse.quote_plus(given) self.assertEqual(expect, result, \"using", "\" \"%s\" % result) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None) self.assertRaises((TypeError, AttributeError),", "def test_quoted_open(self): class DummyURLopener(urllib.request.URLopener): def open_spam(self, url): return url with", "self.assertEqual(expect, result, \"pathname2url() failed; %s != %s\" % (expect, result))", "# self.assertIsNone(socket.getdefaulttimeout()) # socket.setdefaulttimeout(30) # try: # ftp = urllib.ftpwrapper(\"myuser\",", "self.assertRaises(IOError, pathname2url, \"XX:\\\\\") def test_roundtrip_pathname2url(self): list_of_paths = ['///C:', '/////folder/test/', '///C:/foo/bar/spam.foo']", "= '%xab' expect = given result = urllib.parse.unquote(given) self.assertEqual(expect, result,", "\"using unquote_to_bytes(): %r != %r\" % (expect, result)) def test_unquote_with_unicode(self):", "self.assertTrue(hasattr(self.returned_obj, attr), \"object returned by urlopen() lacks %s attribute\" %", "'%3F=%3F' result = urllib.parse.urlencode(given, doseq=True, encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect, result) #", "def test_read_0_9(self): # \"0.9\" response accepted (but not \"simple responses\"", "result, \"using quote_plus(): %r != %r\" % (do_not_quote, result)) def", "char = chr(char_no) schemeless_url = f\"//localhost{char}/test/\" self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") try:", "urllib.parse.quote(given, encoding=\"latin-1\", errors=\"xmlcharrefreplace\") self.assertEqual(expect, result, \"using quote(): %r != %r\"", "\"pathname2url() failed; %s != %s\" % (result, expected_url)) result =", "Test close() by calling it here and then having it", "rather than str result = urllib.parse.quote(quote_by_default, safe=b\"<>\") self.assertEqual(quote_by_default, result, \"using", "self.assertEqual(self.returned_obj.geturl(), self.pathname) def test_getcode(self): self.assertIsNone(self.returned_obj.getcode()) def test_iter(self): # Test iterator", "def testBasic(self): # # connects # ftp = urllib.ftpwrapper(\"myuser\", \"mypass\",", "Make sure quote() does not quote letters, digits, and \"_,.-\"", "given, test_type): \"\"\"Helper method for testing different input types. 'given'", "+ b\" 200 OK\\r\\n\\r\\nHello!\") try: fp = urlopen(\"http://python.org/\") self.assertEqual(fp.readline(), b\"Hello!\")", "''), ('charset', 'ISO-8859-1')]) self.assertEqual(self.image_url_resp.info()['content-length'], str(len(self.image))) self.assertEqual(urllib.request.urlopen(\"data:,\").info().get_params(), [('text/plain', ''), ('charset', 'US-ASCII')])", "self.fp.flush() which raises # \"ValueError: I/O operation on closed file\"", "# (Since URIs are not allowed to have non-ASCII characters)", "\"testing %s: '&' not located in proper place in %s\"", "conn.recv(13-cantdata) # cantdata += len(data) # time.sleep(.3) # conn.send(\"2 No", "self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\") def test_url_host_with_control_char_rejected(self): for char_no in", "# Characters in BMP, Latin-1, with xmlcharref error handling given", "# calls urllib.parse.quote() on the URL which makes all of", "it # does nothing about trying to close files that", "returns the absolute path of the file.\"\"\" newFd, newFilePath =", "\"\\u00c1\"),) expect = '%A0=%C1' result = urllib.parse.urlencode(given, True, encoding=\"latin-1\") self.assertEqual(expect,", "mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Type: text/html; charset=iso-8859-1 ''', mock_close=True) try:", "= urllib.parse.unquote(\"\\u6f22%C3%BC\") expect = '\\u6f22\\u00fc' self.assertEqual(expect, result, \"using unquote(): %r", "only 1 time. report = [] def hooktester(block_count, block_read_size, file_size,", "result = urllib.parse.urlencode(given, encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect, result) # Default is", "# UTF-8 for \"\\u6f22\\u00fc\" self.assertEqual(expect, result, \"using unquote_to_bytes(): %r !=", "'http://somewhereelse:3128' proxies = urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128', proxies['http']) class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin):", "urllib.parse.quote(given, encoding=\"latin-1\") self.assertEqual(expect, result, \"using quote(): %r != %r\" %", "environments, please help! # . Facundo # # def server(evt):", "import tempfile from nturl2path import url2pathname, pathname2url from base64 import", "ver + b\" 200 OK\\r\\n\\r\\nHello!\") try: fp = urlopen(\"http://python.org/\") self.assertEqual(fp.readline(),", "result = urllib.parse.unquote(escape_string) self.assertEqual(result.count('%'), 1, \"using unquote(): not all characters", "pass def constructLocalFileUrl(self, filePath): filePath = os.path.abspath(filePath) try: filePath.encode(\"utf-8\") except", "def test_ntpath(self): given = ('/C:/', '///C:/', '/C|//') expect = 'C:\\\\'", "result, \"using unquote(): %r != %r\" % (expect, result)) result", "and one white pixel self.image = ( b'\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR\\x00\\x00\\x00\\x02\\x00\\x00\\x00' b'\\x01\\x08\\x02\\x00\\x00\\x00{@\\xe8\\xdd\\x00\\x00\\x00\\x01sRGB\\x00\\xae' b'\\xce\\x1c\\xe9\\x00\\x00\\x00\\x0fIDAT\\x08\\xd7c```\\xf8\\xff\\xff?\\x00'", "def test_read_text(self): self.assertEqual(self.text_url_resp.read().decode( dict(self.text_url_resp.info().get_params())['charset']), self.text) def test_read_text_base64(self): self.assertEqual(self.text_url_base64_resp.read().decode( dict(self.text_url_base64_resp.info().get_params())['charset']), self.text)", "expect = '%3F=1&%3F=%3F' result = urllib.parse.urlencode(given, True, encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect,", "def test_basic(self): # Make sure simple tests pass expected_path =", "the top # level 'def urlopen()' function defined in this...", "def test_read_1_0(self): self.check_read(b\"1.0\") def test_read_1_1(self): self.check_read(b\"1.1\") def test_read_bogus(self): # urlopen()", "''), ('charset', 'US-ASCII')]) def test_geturl(self): self.assertEqual(self.text_url_resp.geturl(), self.text_url) self.assertEqual(self.text_url_base64_resp.geturl(), self.text_url_base64) self.assertEqual(self.image_url_resp.geturl(),", "def test_read(self): self.assertEqual(self.text, self.returned_obj.read()) def test_readline(self): self.assertEqual(self.text, self.returned_obj.readline()) self.assertEqual(b'', self.returned_obj.readline(),", "percent-encoded values given = b\"\\xa2\\xd8ab\\xff\" expect = \"%A2%D8ab%FF\" result =", "test_copy(self): # Test that setting the filename argument works. second_temp", "'alpha%2Bbeta+gamma') self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'), 'alpha+beta+gamma') # Test with bytes self.assertEqual(urllib.parse.quote_plus(b'alpha+beta", "self.assertEqual(os.read(file_num, len(self.text)), self.text, \"Reading on the file descriptor returned by", "# issue 29142 self.assertTrue(bypass('d.o.t')) self.assertTrue(bypass('anotherdomain.com:8888')) self.assertTrue(bypass('.anotherdomain.com:8888')) self.assertTrue(bypass('www.newdomain.com:1234')) self.assertFalse(bypass('prelocalhost')) self.assertFalse(bypass('newdomain.com')) #", "invalid URI; expect those characters to be UTF-8 # encoded).", "def test_unquote_to_bytes(self): given = 'br%C3%BCckner_sapporo_20050930.doc' expect = b'br\\xc3\\xbcckner_sapporo_20050930.doc' result =", "if len(hex_repr) == 1: hex_repr = \"0%s\" % hex_repr return", "True, safe=\":$\") self.assertEqual(expect, result) # Test all above in latin-1", "special meaning in URIs and must be escaped if not", "!= %s\" % (result, expected_url)) result = urllib.request.url2pathname(expected_url) self.assertEqual(expected_path, result,", "improper text\") def test_fileno(self): file_num = self.returned_obj.fileno() self.assertIsInstance(file_num, int, \"fileno()", "Unwise : \"{}|\\^[]`\" Must be escaped \"\"\" def test_never_quote(self): #", "\"%3F%3F\" # \"??\" result = urllib.parse.quote(given, encoding=\"latin-1\", errors=\"replace\") self.assertEqual(expect, result,", "'%C2%A0=42&%C2%A0=%C3%81' result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) # latin-1 given", "((b'\\xa0\\x24', b'\\xc1\\x24'),) expect = '%A0$=%C1$' result = urllib.parse.urlencode(given, doseq=True, safe=\":$\",", "class DummyURLopener(urllib.request.URLopener): def open_spam(self, url): return url with support.check_warnings( ('DummyURLopener", "timeout is used # import socket # self.assertIsNone(socket.getdefaulttimeout()) # socket.setdefaulttimeout(30)", "two-item sequences as an argument. self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd',", "%r\" % (expect, result)) result = urllib.parse.unquote_plus(given) self.assertEqual(expect, result, \"using", "context = ssl.create_default_context() with support.check_warnings(('', DeprecationWarning)): with self.assertRaises(ValueError): urllib.request.urlopen( \"https://localhost\",", "= self._ftpwrapper_class class urlopen_FileTests(unittest.TestCase): \"\"\"Test urlopen() opening a temporary file.", "tempfile.mkstemp() tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/') try: self.assertTrue(os.path.exists(tmp_file)) with", "no port self.assertFalse(bypass('newdomain.com:1235')) # wrong port def test_proxy_bypass_environment_always_match(self): bypass =", "Request(\"http://www.python.org\") self.assertEqual(request.get_method(), 'GET') request = Request(\"http://www.python.org\", {}) self.assertEqual(request.get_method(), 'POST') def", "5) def test_reporthook_8193_bytes(self): # Test on 8193 byte file. Should", "if not being used for their special meaning Data characters", "for testing. \"\"\" def setUp(self): # Create a temp file", "should_quote: result = urllib.parse.quote(char) self.assertEqual(hexescape(char), result, \"using quote(): \" \"%s", "not being used for their special meaning Data characters :", "(expect, result)) expect = given result = urllib.request.url2pathname(result) self.assertEqual(expect, result,", "that passing True for 'doseq' parameter works correctly given =", "b'\\xa2\\xd8ab\\xff' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, \"using unquote_to_bytes(): %r !=", "def test_reporthook(self): # Make sure that the reporthook works. def", "a bytes rather than str result = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=b\"\\xfc\")", "= ''.join(should_quote) for char in should_quote: result = urllib.parse.quote(char) self.assertEqual(hexescape(char),", "open_local_file(self, url): return url for url in ('local_file://example', 'local-file://example'): self.assertRaises(OSError,", "== 'win32', 'test specific to the urllib.url2path function.') def test_ntpath(self):", "as input given = b'%A2%D8ab%FF' expect = b'\\xa2\\xd8ab\\xff' result =", "= (('\\u00a0', '\\u00c1'),) expect = '%A0=%C1' result = urllib.parse.urlencode(given, encoding=\"latin-1\")", "try: urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user', 'pass', 'localhost', 21, []) urlopen('ftp://localhost') finally:", "reporthook works. def hooktester(block_count, block_read_size, file_size, count_holder=[0]): self.assertIsInstance(block_count, int) self.assertIsInstance(block_read_size,", "def test_userpass_inurl_w_spaces(self): self.fakehttp(b\"HTTP/1.0 200 OK\\r\\n\\r\\nHello!\") try: userpass = \"<PASSWORD>\" url", "= urllib.parse.unquote(given) self.assertEqual(expect, result, \"using unquote(): %r != %r\" %", "= \"%s.2\" % support.TESTFN self.registerFileForCleanUp(second_temp) urllib.request.urlretrieve( self.constructLocalFileUrl(support.TESTFN), second_temp, hooktester) def", "windows and sparc. # Everywhere else they work ok, but", "\":8080/test/?test=a\" try: # We explicitly test urllib.request.urlopen() instead of the", "and do not need to be escaped; can be, though,", "\"Kanji\" result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using unquote(): %r !=", "the URL so there is no injection. resp = urlopen(f\"http:{schemeless_url}\")", "module required\") def test_url_host_with_control_char_rejected(self): for char_no in list(range(0, 0x21)) +", "black and one white pixel self.image = ( b'\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR\\x00\\x00\\x00\\x02\\x00\\x00\\x00' b'\\x01\\x08\\x02\\x00\\x00\\x00{@\\xe8\\xdd\\x00\\x00\\x00\\x01sRGB\\x00\\xae'", "def test_simple_compare(self): self.assertEqual(pathname2url(r'C:\\foo\\bar\\spam.foo'), \"///C:/foo/bar/spam.foo\" ) def test_long_drive_letter(self): self.assertRaises(IOError, pathname2url, \"XX:\\\\\")", "test_default_quoting(self): # Make sure all characters that should be quoted", "``'%' + hex(ord(<character>))[2:]`` escapes a character properly. Case does not", "result)) given = '%' expect = bytes(given, 'ascii') result =", "ftp.close() # # def testTimeoutValue(self): # ftp = urllib.ftpwrapper(\"myuser\", \"mypass\",", "BMP, Latin-1, with xmlcharref error handling given = \"\\u6f22\\u5b57\" expect", "of all ASCII values works escape_list = [] for num", "((b'\\xa0\\x24', (b'\\xc1\\x24', 0xd, 42)),) expect = '%A0$=%C1$&%A0$=13&%A0$=42' result = urllib.parse.urlencode(given,", "\"using unquote(): not all characters escaped: \" \"%s\" % result)", "opener.open(url, data) def FancyURLopener(): with support.check_warnings( ('FancyURLopener style of invoking", "% (test_type, result)) self.assertEqual(len(result), (5 * 3) + 2, #5", "hex_repr = hex(ord(char))[2:].upper() if len(hex_repr) == 1: hex_repr = \"0%s\"", "f.close() self.pathname = support.TESTFN self.returned_obj = urlopen(\"file:%s\" % self.pathname) def", "# urlopen() raises OSError if the underlying socket does not", "% (expect, result)) # Characters in BMP, encoded with Latin-1", "unquote(): not all characters escaped: \" \"%s\" % result) self.assertRaises((TypeError,", "\"ab\\u6f22\\u5b57 cd\" expect = \"ab%3F%3F+cd\" result = urllib.parse.quote_plus(given, encoding=\"latin-1\", errors=\"replace\")", "r'C:\\foo\\bar\\spam.foo') def test_non_ascii_drive_letter(self): self.assertRaises(IOError, url2pathname, \"///\\u00e8|/\") def test_roundtrip_url2pathname(self): list_of_paths =", "http.client.HTTPConnection authorization = (\"Authorization: Basic %s\\r\\n\" % b64encode(userpass.encode(\"ASCII\")).decode(\"ASCII\")) fp =", "fakehttp(fakedata, mock_close=mock_close) self._connection_class = http.client.HTTPConnection http.client.HTTPConnection = fake_http_class def unfakehttp(self):", "a raw string in \\ self.assertEqual(url2pathname(\"///C/test/\"), r'\\\\\\C\\test' '\\\\') self.assertEqual(url2pathname(\"////C/test/\"), r'\\\\C\\test'", "% (expect, result)) def test_quote_with_unicode(self): # Characters in Latin-1 range,", "!= %r\" % (expect, result)) expect = given.replace('+', ' ')", "= '' os.environ['No_Proxy'] = 'localhost' self.assertFalse(urllib.request.proxy_bypass_environment('localhost')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) os.environ['http_proxy'] = ''", "hex letters. The various character sets specified are: Reserved characters", "%r != %r\" % (expect, result)) result = urllib.parse.unquote_plus(given) self.assertEqual(expect,", "dictionary input. \"\"\" expect_somewhere = [\"1st=1\", \"2nd=2\", \"3rd=3\"] result =", "close(self): self.io_refs -= 1 if self.io_refs == 0: io.BytesIO.close(self) class", "hooktester) self.assertEqual(len(report), 1) self.assertEqual(report[0][2], 0) def test_reporthook_5_bytes(self): # Test on", "safe=\":$\", encoding=\"latin-1\") given = ((b'\\xa0\\x24', (b'\\xc1\\x24', 0xd, 42)),) expect =", "a fake http connection.\"\"\" def check_read(self, ver): self.fakehttp(b\"HTTP/\" + ver", "for 'doseq' parameter works correctly given = {'sequence':['1', '2', '3']}", "FakeFTPMixin(object): def fakeftp(self): class FakeFtpWrapper(object): def __init__(self, user, passwd, host,", "proxy bypass and correct matching including ports os.environ['no_proxy'] = 'localhost,", "!= %r\" % (expect, result)) def test_unquote_to_bytes(self): given = 'br%C3%BCckner_sapporo_20050930.doc'", "urllib.parse.unquote(given) self.assertEqual(expect, result, \"using unquote(): %r != %r\" % (expect,", "'%A0=%C1' result = urllib.parse.urlencode(given, encoding=\"latin-1\") self.assertEqual(expect, result) def test_urlencode_encoding_doseq(self): #", "1 second_temp = \"%s.2\" % support.TESTFN self.registerFileForCleanUp(second_temp) urllib.request.urlretrieve( self.constructLocalFileUrl(support.TESTFN), second_temp,", "injection. resp = urlopen(f\"http:{schemeless_url}\") self.assertNotIn(' ', resp.geturl()) self.assertNotIn('\\r', resp.geturl()) self.assertNotIn('\\n',", "200 OK\\r\\n\\r\\nHello.\") host = \"localhost\\r\\nX-injected: header\\r\\n\" schemeless_url = \"//\" +", "bytes'): given = b'bl\\xc3\\xa5b\\xc3\\xa6rsyltet\\xc3\\xb8y' urllib.parse.unquote(given) class urlencode_Tests(unittest.TestCase): \"\"\"Tests for urlencode()\"\"\"", "\"ssl module required\") def test_url_path_with_newline_header_injection_rejected(self): self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") host =", "1})) self.assertEqual(\"a=None\", urllib.parse.urlencode({\"a\": None})) def test_nonstring_seq_values(self): self.assertEqual(\"a=1&a=2\", urllib.parse.urlencode({\"a\": [1, 2]},", "socket.timeout: # pass # finally: # serv.close() # evt.set() #", "end a raw string in \\ self.assertEqual(url2pathname(\"///C/test/\"), r'\\\\\\C\\test' '\\\\') self.assertEqual(url2pathname(\"////C/test/\"),", "= '%' expect = given result = urllib.parse.unquote(given) self.assertEqual(expect, result,", "= ['///C:', '/////folder/test/', '///C:/foo/bar/spam.foo'] for path in list_of_paths: self.assertEqual(pathname2url(url2pathname(path)), path)", "files get deleted, but it # does nothing about trying", "unquoting on mixed-case hex digits in the percent-escapes given =", "Send '$' (\\x24) as safe character # Default utf-8 encoding", "= [] for num in range(128): given = hexescape(chr(num)) expect", "def test_doseq(self): # Test that passing True for 'doseq' parameter", "not _urlopener: opener = FancyURLopener() _urlopener = opener else: opener", "result)) result = urllib.parse.quote_plus(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, \"using quote_plus(): %r", "pathnam2url() and # url2pathname() respectively given = os.path.join(\"needs\", \"quot=ing\", \"here\")", "Plain # urlopen uses FancyURLOpener which goes via a codepath", "urllib.parse.urlencode(given, True) self.assertEqual(expect, result) given = ((\"\\u00a0\", (42, \"\\u00c1\")),) expect", "return b\"\" return io.BytesIO.read(self, amt) def readline(self, length=None): if self.closed:", "!= %r\" % (expect, result)) escape_list.append(given) escape_string = ''.join(escape_list) del", "test_willclose(self): self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\") try: resp = urlopen(\"http://www.python.org\") self.assertTrue(resp.fp.will_close) finally:", "in \\ self.assertEqual(url2pathname(\"///C/test/\"), r'\\\\\\C\\test' '\\\\') self.assertEqual(url2pathname(\"////C/test/\"), r'\\\\C\\test' '\\\\') def test_simple_compare(self):", "the block is # read). report = [] def hooktester(block_count,", "%r\" % (quote_by_default, result)) result = urllib.parse.quote_plus(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result,", "of no_proxies with space. self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234') self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com')) self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com:8888'))", "need to be escaped; can be, though, if desired Control", ") def test_long_drive_letter(self): self.assertRaises(IOError, pathname2url, \"XX:\\\\\") def test_roundtrip_pathname2url(self): list_of_paths =", "fake_http_class def unfakehttp(self): http.client.HTTPConnection = self._connection_class class FakeFTPMixin(object): def fakeftp(self):", "ftp.close() # # def testTimeoutNone(self): # # global default timeout", "local_file:// scheme class DummyURLopener(urllib.request.URLopener): def open_local_file(self, url): return url for", "as input type\") def test_using_sequence(self): # Test passing in a", "not need to be escaped; can be, though, if desired", "!= %r\" % (expect, result)) def test_quote_plus_with_unicode(self): # Encoding (latin-1)", "string in \\ self.assertEqual(url2pathname(\"///C/test/\"), r'\\\\\\C\\test' '\\\\') self.assertEqual(url2pathname(\"////C/test/\"), r'\\\\C\\test' '\\\\') def", "is None: return opener.open(url) else: return opener.open(url, data) def FancyURLopener():", "= '%3F=%3F' result = urllib.parse.urlencode(given, encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect, result) #", "utf-8 encoding given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) result = urllib.parse.urlencode(given, safe=\":$\")", "FakeHTTPMixin, FakeFTPMixin): \"\"\"Test urlopen() opening a fake http connection.\"\"\" def", "It # is the responsibility of the developer to properly", "escaped; can be, though, if desired Control characters : 0x00", "\"using unquote(): %r != %r\" % (expect, result)) # Decode", "result)) given = '%x' expect = bytes(given, 'ascii') result =", "result, 'urllib.request.url2pathname() failed; %s != %s' % (expect, result)) class", "urlopen() has the specified methods for attr in (\"read\", \"readline\",", "# Test passing in a mapping object as an argument.", "finally: # serv.close() # evt.set() # # class FTPWrapperTests(unittest.TestCase): #", "correct matching including ports os.environ['no_proxy'] = 'localhost, noproxy.com, my.proxy:1234' os.environ['No_Proxy']", "!= %s\" % (test_type, len(result), (5 * 3) + 2))", "Note, # this only helps to makes sure temporary files", "and correct matching including ports os.environ['no_proxy'] = 'localhost, noproxy.com, my.proxy:1234'", "%s != %s\" % (expect, result)) given = os.path.join(\"make sure\",", "URLopener class.\"\"\" def test_quoted_open(self): class DummyURLopener(urllib.request.URLopener): def open_spam(self, url): return", "# self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() class RequestTests(unittest.TestCase): \"\"\"Unit tests for", "no injection. resp = urlopen(f\"http:{schemeless_url}\") self.assertNotIn(char, resp.geturl()) finally: self.unfakehttp() @unittest.skipUnless(ssl,", "% (test_type, expected, result)) self.assertEqual(result.count('&'), 2, \"testing %s: expected 2", "result) def test_urlencode_encoding_doseq(self): # ASCII Encoding. Expect %3F with errors=\"replace'", "unittest from unittest.mock import patch from test import support import", "# by the tearDown() method for the test self.returned_obj.close() def", "with self.assertRaises(urllib.error.URLError) as e: urlopen(test_ftp_url) self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) def test_ftp_nonexisting(self): with", "def test_invalid_redirect(self): # urlopen() should raise OSError for many error", "should do quote_by_default = \"<>\" result = urllib.parse.quote(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default,", "# Replacement character result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using unquote():", "urllib.parse.urlencode(given, True, safe=\":$\", encoding=\"latin-1\") self.assertEqual(expect, result) class Pathname_Tests(unittest.TestCase): \"\"\"Test pathname2url()", "urllib.parse.quote, given, encoding=\"latin-1\") # Characters in BMP, encoded with Latin-1,", "# MixedCase self.assertTrue(bypass('LOCALHOST')) # UPPERCASE self.assertTrue(bypass('.localhost')) self.assertTrue(bypass('newdomain.com:1234')) self.assertTrue(bypass('.newdomain.com:1234')) self.assertTrue(bypass('foo.d.o.t')) #", "'http://somewhere:3128' proxies = urllib.request.getproxies_environment() self.assertEqual({}, proxies) # Test lowercase preference", "second_temp) self.assertEqual(second_temp, result[0]) self.assertTrue(os.path.exists(second_temp), \"copy of the file was not", "expect = \"%3F%3F\" # \"??\" result = urllib.parse.quote(given, encoding=\"latin-1\", errors=\"replace\")", "%r\" % (expect, result)) given = '%x' expect = bytes(given,", "= \"parts/of/a/path\" result = urllib.request.pathname2url(expected_path) self.assertEqual(expected_url, result, \"pathname2url() failed; %s", "= urllib.request.url2pathname(expected_url) self.assertEqual(expected_path, result, \"url2pathame() failed; %s != %s\" %", "% (expect, result)) def test_default_quoting(self): # Make sure all characters", "result = urllib.parse.quote_plus(do_not_quote) self.assertEqual(do_not_quote, result, \"using quote_plus(): %r != %r\"", "urlopen(tmp_fileurl) def test_ftp_nohost(self): test_ftp_url = 'ftp:///path' with self.assertRaises(urllib.error.URLError) as e:", "one of the problematic environments, please help! # . Facundo", "urllib.parse.unquote, b'') def test_unquoting_badpercent(self): # Test unquoting on bad percent-escapes", "safe=\"\") self.assertEqual(expect, result, \"using quote(): %r != %r\" % (expect,", "test_reporthook_0_bytes(self): # Test on zero length file. Should call reporthook", "\"using quote(): %r != %r\" % (expect, result)) result =", "% (expect, result)) given = '///C|/path' expect = 'C:\\\\path' result", "__init__(self, user, passwd, host, port, dirs, timeout=None, persistent=True): pass def", "newFd, newFilePath = tempfile.mkstemp() try: self.registerFileForCleanUp(newFilePath) newFile = os.fdopen(newFd, \"wb\")", "self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") host = \"localhost:7777?a=1 HTTP/1.1\\r\\nX-injected: header\\r\\nTEST: 123\" schemeless_url", "safe=\":$\", encoding=\"latin-1\") expect = '%A0$=%C1$' self.assertEqual(expect, result) given = ((b'\\xa0\\x24',", "does nothing about trying to close files that may still", "match self.assertNotEqual(fp.geturl(), url) self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_URLopener_deprecation(self): with", "'\\u00c1'),) expect = '%A0=%C1' result = urllib.parse.urlencode(given, encoding=\"latin-1\") self.assertEqual(expect, result)", "host = \"localhost:7777?a=1 HTTP/1.1\\r\\nX-injected: header\\r\\nTEST: 123\" schemeless_url = \"//\" +", "a local file just gets its own location returned and", "%r\" % (expect, result)) # Test on a string with", "f\" expect = given.replace(' ', hexescape(' ')) result = urllib.parse.quote(given)", "returned anything beyond the first line from the # comparison.", "times (once when # the \"network connection\" is established and", "= bytes(\"test_urllib: %s\\n\" % self.__class__.__name__, \"ascii\") f = open(support.TESTFN, 'wb')", "given = ((\"\\u00a0\", (42, \"\\u00c1\")),) expect = '%C2%A0=42&%C2%A0=%C3%81' result =", "test_proxy_bypass_environment_newline(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234') self.assertFalse(bypass('localhost\\n')) self.assertFalse(bypass('anotherdomain.com:8888\\n'))", "sequence given = \"%F3%B1\" expect = \"\\ufffd\" # Replacement character", "value in given[\"sequence\"]: expect = \"sequence=%s\" % value self.assertIn(expect, result)", "given, encoding=\"latin-1\") # Characters in BMP, encoded with Latin-1, with", "Test automatic quoting and unquoting works for pathnam2url() and #", "given = \"\\u6f22\\u5b57\" # \"Kanji\" expect = \"%E6%BC%A2%E5%AD%97\" result =", "value for 'safe' parameter self.assertEqual(urllib.parse.quote.__defaults__[0], '/') def test_safe(self): # Test", "'2', '3'])) result = urllib.parse.urlencode(given) self.assertEqual(expect, result) result = urllib.parse.urlencode(given,", "expect = '%C2%A0=42&%C2%A0=%C3%81' result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) #", "self.unfakehttp() def test_willclose(self): self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\") try: resp = urlopen(\"http://www.python.org\")", "= b'\\xab\\xea' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, \"using unquote_to_bytes(): %r", "returned the wrong number of lines\") self.assertEqual(lines_list[0], self.text, \"readlines() returned", "in %s\" % (test_type, expected, result)) self.assertEqual(result.count('&'), 2, \"testing %s:", "has the specified methods for attr in (\"read\", \"readline\", \"readlines\",", "= hexescape(chr(num)) expect = chr(num) result = urllib.parse.unquote(given) self.assertEqual(expect, result,", "urllib.parse.quote_plus(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, \"using quote_plus(): %r != %r\" %", "for path in list_of_paths: self.assertEqual(pathname2url(url2pathname(path)), path) if __name__ == '__main__':", "\"2nd=2\", \"3rd=3\"] result = urllib.parse.urlencode(given) for expected in expect_somewhere: self.assertIn(expected,", "mod_ssl/2.8.22 OpenSSL/0.9.7e Location: file://guidocomputer.athome.com:/python/license Connection: close Content-Type: text/html; charset=iso-8859-1 ''',", "tests go ok. # If anybody has one of the", "should raise OSError for many error codes. self.fakehttp(b'''HTTP/1.1 401 Authentication", "= given result = urllib.request.url2pathname(result) self.assertEqual(expect, result, \"url2pathname() failed; %s", "= urllib.parse.urlencode(given, doseq=True, safe=\":$\") expect = '%A0$=%C1$' self.assertEqual(expect, result) #", "hex(ord(char))[2:].upper() if len(hex_repr) == 1: hex_repr = \"0%s\" % hex_repr", "injection within the url _path_ safe. escaped_char_repr = repr(char).replace('\\\\', r'\\\\')", "located in proper place in %s\" % (test_type, result)) self.assertEqual(len(result),", "result = urllib.parse.urlencode(given, doseq=True, encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect, result) # ASCII", "socket.setdefaulttimeout(30) # try: # ftp = urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\", 9093,", "expect = \"%E6%BC%A2%E5%AD%97\" result = urllib.parse.quote(given) self.assertEqual(expect, result, \"using quote():", "which makes all of the # above attempts at injection", "urllib.parse.quote(given, encoding=\"latin-1\", errors=\"replace\") self.assertEqual(expect, result, \"using quote(): %r != %r\"", "test self.returned_obj.close() def test_info(self): self.assertIsInstance(self.returned_obj.info(), email.message.Message) def test_geturl(self): self.assertEqual(self.returned_obj.geturl(), self.pathname)", "unquote(): %r != %r\" % (expect, result)) # Characters in", "self.assertEqual(request.get_method(), 'HEAD') class URL2PathNameTests(unittest.TestCase): def test_converting_drive_letter(self): self.assertEqual(url2pathname(\"///C|\"), 'C:') self.assertEqual(url2pathname(\"///C:\"), 'C:')", "(expect, result)) given = \"make+sure/using_unquote\" expect = os.path.join(\"make+sure\", \"using_unquote\") result", "len(self.text)), self.text, \"Reading on the file descriptor returned by fileno()", "''', mock_close=True) try: msg = \"Redirection to url 'file:\" with", "given = \"make+sure/using_unquote\" expect = os.path.join(\"make+sure\", \"using_unquote\") result = urllib.request.url2pathname(given)", "result) given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) result = urllib.parse.urlencode(given, doseq=True, safe=\":$\")", "* 2nd, 2 * 3rd, 3 Test cannot assume anything", "def FancyURLopener(): with support.check_warnings( ('FancyURLopener style of invoking requests is", "(42, b'\\xc1\\x24')),) expect = '%A0%24=42&%A0%24=%C1%24' result = urllib.parse.urlencode(given, True) self.assertEqual(expect,", "\"http://python.org/\") finally: self.unfakehttp() def test_invalid_redirect(self): # urlopen() should raise OSError", "urllib.request.URLopener() @unittest.skipUnless(ssl, \"ssl module required\") def test_cafile_and_context(self): context = ssl.create_default_context()", "# # def testTimeoutDefault(self): # # global default timeout is", "# # def tearDown(self): # self.evt.wait() # # def testBasic(self):", "= urllib.parse.quote(given, encoding=None, errors=None) self.assertEqual(expect, result, \"using quote(): %r !=", "def fakehttp(self, fakedata, mock_close=False): fake_http_class = fakehttp(fakedata, mock_close=mock_close) self._connection_class =", "\"abcd\" result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using quote(): %r !=", "self.assertTrue(e.exception.filename) self.assertTrue(e.exception.reason) def test_file_notexists(self): fd, tmp_file = tempfile.mkstemp() tmp_fileurl =", "socket.SO_REUSEADDR, 1) # serv.bind((\"\", 9093)) # serv.listen() # try: #", "expect = \"%s=%s\" % (hexescape('&'), hexescape('=')) result = urllib.parse.urlencode(given) self.assertEqual(expect,", "this... (quite ugly) # test suite. They use different url", "to env vars self.env = support.EnvironmentVarGuard() # Delete all proxy", "io.BytesIO.readline(self, length) def close(self): self.io_refs -= 1 if self.io_refs ==", "issue 29142 self.assertTrue(bypass('d.o.t')) self.assertTrue(bypass('anotherdomain.com:8888')) self.assertTrue(bypass('.anotherdomain.com:8888')) self.assertTrue(bypass('www.newdomain.com:1234')) self.assertFalse(bypass('prelocalhost')) self.assertFalse(bypass('newdomain.com')) # no", "socket does not send any # data. (#1680230) self.fakehttp(b'') try:", "OK\\r\\n\\r\\nHello.\") try: # We explicitly test urllib.request.urlopen() instead of the", "%r != %r\" % (expect, result)) # Encoding argument should", "'/////folder/test/', '///C:/foo/bar/spam.foo'] for path in list_of_paths: self.assertEqual(pathname2url(url2pathname(path)), path) if __name__", "except: pass def constructLocalFileUrl(self, filePath): filePath = os.path.abspath(filePath) try: filePath.encode(\"utf-8\")", "support.check_warnings( ('FancyURLopener style of invoking requests is deprecated.', DeprecationWarning)): return", "io.BytesIO(), 0 def close(self): pass self._ftpwrapper_class = urllib.request.ftpwrapper urllib.request.ftpwrapper =", "# pass # finally: # serv.close() # evt.set() # #", "an email.message.Message instance \" \"as second returned value\") def test_copy(self):", "with unescaped non-ASCII characters # (Technically an invalid URI; expect", "expect = given.replace('+', ' ') result = urllib.parse.unquote_plus(given) self.assertEqual(expect, result,", "finally: self.env.unset('REQUEST_METHOD') self.env.unset('HTTP_PROXY') def test_proxy_bypass_environment_host_match(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', 'localhost,", "if 'proxy' in k.lower(): self.env.unset(k) def tearDown(self): # Restore all", "passing True for 'doseq' parameter works correctly given = {'sequence':['1',", "urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL, f\"contain control.*{escaped_char_repr}\"): urlopen(f\"https:{schemeless_url}\") finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl", "'file:\" with self.assertRaisesRegex(urllib.error.HTTPError, msg): urlopen(\"http://python.org/\") finally: self.unfakehttp() def test_redirect_limit_independent(self): #", "'urllib.request..url2pathname() failed; %s != %s' % (expect, result)) given =", "= None import sys import tempfile from nturl2path import url2pathname,", "tempfile.mkstemp() try: self.registerFileForCleanUp(newFilePath) newFile = os.fdopen(newFd, \"wb\") newFile.write(data) newFile.close() finally:", "way to test for ticket #4608. for line in self.returned_obj:", "self.assertEqual(self.text, self.returned_obj.readline()) self.assertEqual(b'', self.returned_obj.readline(), \"calling readline() after exhausting the file", "UTF-8 for \"\\u6f22\\u00fc\" self.assertEqual(expect, result, \"using unquote_to_bytes(): %r != %r\"", "character sets specified are: Reserved characters : \";/?:@&=+$,\" Have special", "text\") def test_close(self): # Test close() by calling it here", "bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234, .d.o.t') self.assertTrue(bypass('localhost')) self.assertTrue(bypass('LocalHost'))", "values works escape_list = [] for num in range(128): given", "\"urllib\" module\"\"\" import urllib.parse import urllib.request import urllib.error import http.client", "= urllib.parse.urlencode(given) self.assertEqual(expect, result) result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result)", "urllib.request.urlretrieve( self.constructLocalFileUrl(support.TESTFN), second_temp, hooktester) def test_reporthook_0_bytes(self): # Test on zero", "self.assertIsNone(self.returned_obj.getcode()) def test_iter(self): # Test iterator # Don't need to", "meaning in URIs and must be escaped if not being", "!= %r\" % (expect, result)) def test_quote_with_unicode(self): # Characters in", "use for testing self.text = bytes(\"test_urllib: %s\\n\" % self.__class__.__name__, \"ascii\")", "specified data, registers the file for deletion during the test", "return \"file://%s\" % urllib.request.pathname2url(filePath) def createNewTempFile(self, data=b\"\"): \"\"\"Creates a new", "= \"\\ufffd\" # Replacement character result = urllib.parse.unquote(given) self.assertEqual(expect, result,", "'GET') request = Request(\"http://www.python.org\", {}) self.assertEqual(request.get_method(), 'POST') def test_with_method_arg(self): Request", "result, \"using unquote_to_bytes(): %r != %r\" % (expect, result)) #", "letters, digits, and \"-_.!~*'()\" Unreserved and do not need to", "a status line) self.check_read(b\"0.9\") def test_read_1_0(self): self.check_read(b\"1.0\") def test_read_1_1(self): self.check_read(b\"1.1\")", "with Latin-1 result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc', encoding=\"latin-1\") expect = 'br\\u00fcckner_sapporo_20050930.doc' self.assertEqual(expect,", "_urlopener: opener = FancyURLopener() _urlopener = opener else: opener =", "Latin-1, with xmlcharref error handling given = \"\\u6f22\\u5b57\" expect =", "We need to test conditions, where variable order _is_ significant", "# time.sleep(.1) # # def tearDown(self): # self.evt.wait() # #", "data=b\"\"): \"\"\"Creates a new temporary file containing the specified data,", "self.assertEqual(result.count('&'), 2, \"Expected 2 '&'s, got %s\" % result.count('&')) def", "with support.temp_dir() as tmpdir: fd, tmpfile = tempfile.mkstemp(dir=tmpdir) os.close(fd) fileurl", "unquote(): %r != %r\" % (expect, result)) expect = given.replace('+',", "args=(self.evt,)).start() # time.sleep(.1) # # def tearDown(self): # self.evt.wait() #", "control.*\\\\n\"): urlopen(f\"https:{schemeless_url}\") finally: self.unfakehttp() def test_read_0_9(self): # \"0.9\" response accepted", "Test iterator # Don't need to count number of iterations", "(once when # the \"network connection\" is established and once", "not None: opener = urllib.request.FancyURLopener(proxies=proxies) elif not _urlopener: opener =", "# ASCII Encoding. Expect %3F with errors=\"replace' given = (('\\u00a0',", "!= %r\" % (expect, result)) # Characters in BMP, Latin-1,", "via a codepath that # calls urllib.parse.quote() on the URL", "200) finally: self.unfakehttp() def test_url_fragment(self): # Issue #11703: geturl() omits", "self.assertRaises(urllib.error.URLError) as e: urlopen('file://localhost/a/file/which/doesnot/exists.py') self.assertTrue(e.exception.filename) self.assertTrue(e.exception.reason) def test_file_notexists(self): fd, tmp_file", "have no effect # (Since URIs are not allowed to", "once when the block is # read). report = []", "= \"http://www.python.org/file.txt\" self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\") self.addCleanup(self.unfakehttp) filename, _ = urllib.request.URLopener().retrieve(url)", "%r\" % (expect, result)) # Characters in BMP, encoded by", "= self.returned_obj.readlines() self.assertEqual(len(lines_list), 1, \"readlines() returned the wrong number of", "self.assertEqual(len(lines_list), 1, \"readlines() returned the wrong number of lines\") self.assertEqual(lines_list[0],", "different input types. 'given' must lead to only the pairs:", "close(self): pass self._ftpwrapper_class = urllib.request.ftpwrapper urllib.request.ftpwrapper = FakeFtpWrapper def unfakeftp(self):", "%r != %r\" % (expect, result)) # Characters in BMP,", "0x21)) + [0x7f]: char = chr(char_no) schemeless_url = f\"//localhost{char}/test/\" self.fakehttp(b\"HTTP/1.1", "unquote(): %r != %r\" % (expect, result)) # A mix", "works correctly given = {'sequence':['1', '2', '3']} expect = \"sequence=%s\"", "text) def test_reporthook(self): # Make sure that the reporthook works.", "url _path_ safe. escaped_char_repr = repr(char).replace('\\\\', r'\\\\') InvalidURL = http.client.InvalidURL", "def test_url_host_with_newline_header_injection_rejected(self): self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") host = \"localhost\\r\\nX-injected: header\\r\\n\" schemeless_url", "Latin-1 given = \"\\u6f22\\u5b57\" self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given, encoding=\"latin-1\") # Characters", "% (expected, result)) def test_quoting_space(self): # Make sure quote() and", "% (char, hexescape(char), result)) result = urllib.parse.quote_plus(char) self.assertEqual(hexescape(char), result, \"using", "= \"%C2%A2%C3%98ab%C3%BF\" result = urllib.parse.quote(given) self.assertEqual(expect, result, \"using quote(): %r", "empty fake environment os.environ = collections.OrderedDict() def tearDown(self): os.environ =", "# socket.setdefaulttimeout(30) # try: # ftp = urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\",", "import http.client import email.message import io import unittest from unittest.mock", "def test_URLopener_deprecation(self): with support.check_warnings(('',DeprecationWarning)): urllib.request.URLopener() @unittest.skipUnless(ssl, \"ssl module required\") def", "class FTPWrapperTests(unittest.TestCase): # # def setUp(self): # import ftplib, time,", "self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve(support.TEST_HTTP_URL) finally: self.unfakehttp() class QuotingTests(unittest.TestCase): r\"\"\"Tests for urllib.quote()", "# Test unquoting on mixed-case hex digits in the percent-escapes", "finally: self.unfakehttp() def test_missing_localfile(self): # Test for #10836 with self.assertRaises(urllib.error.URLError)", "ticket #4608. for line in self.returned_obj: self.assertEqual(line, self.text) def test_relativelocalfile(self):", "in k.lower(): self.env.unset(k) def tearDown(self): # Restore all proxy related", "all above in latin-1 encoding given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) result", "URL so no match self.assertNotEqual(fp.geturl(), url) self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp()", "\"ab[]cd\" expected = \"ab%5B%5Dcd\" result = urllib.parse.quote(partial_quote) self.assertEqual(expected, result, \"using", "types. 'given' must lead to only the pairs: * 1st,", "def test_urlencode_encoding_safe_parameter(self): # Send '$' (\\x24) as safe character #", "unquote_plus(): %r != %r\" % (expect, result)) escape_list.append(given) escape_string =", "character US-ASCII hex value>. The Python code of ``'%' +", "original URL. url = 'http://docs.python.org/library/urllib.html#OK' self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\") try: fp", "def unfakehttp(self): http.client.HTTPConnection = self._connection_class class FakeFTPMixin(object): def fakeftp(self): class", "expect = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=\"\") self.assertEqual(expect, result, \"using quote(): %r", "as specified in # their unique way result = urllib.parse.quote('", "ProxyTests_withOrderedEnv(unittest.TestCase): def setUp(self): # We need to test conditions, where", "support.TESTFN self.returned_obj = urlopen(\"file:%s\" % self.pathname) def tearDown(self): \"\"\"Shut down", "if desired Control characters : 0x00 - 0x1F, 0x7F Have", "self.text) def test_read_text_base64(self): self.assertEqual(self.text_url_base64_resp.read().decode( dict(self.text_url_base64_resp.info().get_params())['charset']), self.text) def test_read_image(self): self.assertEqual(self.image_url_resp.read(), self.image)", "# UPPERCASE self.assertTrue(bypass('.localhost')) self.assertTrue(bypass('newdomain.com:1234')) self.assertTrue(bypass('.newdomain.com:1234')) self.assertTrue(bypass('foo.d.o.t')) # issue 29142 self.assertTrue(bypass('d.o.t'))", "self.assertEqual(expected_url, result, \"pathname2url() failed; %s != %s\" % (result, expected_url))", "self.assertTrue(bypass('.newdomain.com:1234')) self.assertTrue(bypass('foo.d.o.t')) # issue 29142 self.assertTrue(bypass('d.o.t')) self.assertTrue(bypass('anotherdomain.com:8888')) self.assertTrue(bypass('.anotherdomain.com:8888')) self.assertTrue(bypass('www.newdomain.com:1234')) self.assertFalse(bypass('prelocalhost'))", "(expect, result)) # Characters in BMP, encoded with UTF-8 given", "a headers value is returned. result = urllib.request.urlretrieve(\"file:%s\" % support.TESTFN)", "'doseq' parameter works correctly given = {'sequence':['1', '2', '3']} expect", "(test_type, expected, result)) self.assertEqual(result.count('&'), 2, \"testing %s: expected 2 '&'s;", "pixel RGB PNG image with one black and one white", "sure\", \"using_quote\") expect = \"%s/using_quote\" % urllib.parse.quote(\"make sure\") result =", "self.assertRaises(OSError, urllib.request.URLopener().retrieve, url) self.assertRaises(OSError, DummyURLopener().open, url) self.assertRaises(OSError, DummyURLopener().retrieve, url) #", "data}, True)) def test_urlencode_encoding(self): # ASCII encoding. Expect %3F with", "sequence, replace errors result = urllib.parse.unquote(given, errors=\"replace\") self.assertEqual(expect, result, \"using", "((\"\\u00a0\", (42, \"\\u00c1\")),) expect = '%A0=42&%A0=%C1' result = urllib.parse.urlencode(given, True,", "work ok, but on those machines, sometimes # fail in", "\"file:\" + urllib.request.pathname2url(tmpfile) filename, _ = urllib.request.URLopener().retrieve(fileurl) # Some buildbots", "Connection: close Content-Length: 100 Content-Type: text/html; charset=iso-8859-1 FF ''') with", "called again # by the tearDown() method for the test", "URL so there is no injection. resp = urlopen(f\"http:{schemeless_url}\") self.assertNotIn(char,", "be escaped \"\"\" def test_never_quote(self): # Make sure quote() does", "BMP, encoded with Latin-1 given = \"\\u6f22\\u5b57\" self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given,", "\"Safe\" non-ASCII characters should have no effect # (Since URIs", "UTF-8 given = \"\\u6f22\\u5b57\" # \"Kanji\" expect = \"%E6%BC%A2%E5%AD%97\" result", "expect = 'br\\u00fcckner_sapporo_20050930.doc' result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using unquote():", "= urllib.parse.quote(partial_quote) self.assertEqual(expected, result, \"using quote(): %r != %r\" %", "time.sleep(.3) # conn.send(\"2 No more lines\\n\") # conn.close() # except", "does not quote letters, digits, and \"_,.-\" do_not_quote = ''", "[\"1st=1\", \"2nd=2\", \"3rd=3\"] result = urllib.parse.urlencode(given) for expected in expect_somewhere:", "= ((b'\\xa0\\x24', b'\\xc1\\x24'),) expect = '%A0$=%C1$' result = urllib.parse.urlencode(given, doseq=True,", "the # instant it returned anything beyond the first line", "\"3rd\":'3'}, \"using dict as input type\") def test_using_sequence(self): # Test", "except ImportError: ssl = None import sys import tempfile from", "result = urllib.parse.urlencode(given, safe=\":$\") expect = '%A0$=%C1$' self.assertEqual(expect, result) given", "# For 0x00 - 0x1F should_quote.append(r'<>#%\"{}|\\^[]`') should_quote.append(chr(127)) # For 0x7F", "# Encoding (latin-1) test for quote_plus given = \"\\xa2\\xd8 \\xff\"", "of the file was not \" \"made\") FILE = open(second_temp,", "urllib.request.getproxies_environment() # getproxies_environment use lowered case truncated (no '_proxy') keys", "self.assertIsInstance(self.returned_obj.info(), email.message.Message) def test_geturl(self): self.assertEqual(self.returned_obj.geturl(), self.pathname) def test_getcode(self): self.assertIsNone(self.returned_obj.getcode()) def", "raise unittest.SkipTest(\"filePath is not encodable to utf8\") return \"file://%s\" %", "urllib.request.urlopen(self.text_url) self.text_url_base64_resp = urllib.request.urlopen( self.text_url_base64) self.image_url_resp = urllib.request.urlopen(self.image_url) def test_interface(self):", "quiet=True): self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'') def test_unquoting_badpercent(self): # Test unquoting", "Decode with UTF-8, invalid sequence given = \"%F3%B1\" expect =", "\"pathname2url() failed; %s != %s\" % (expect, result)) expect =", "bytes(given, 'ascii') result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, \"using unquote_to_bytes(): %r", "result) # Default is UTF-8 encoding. given = (('\\u00a0', '\\u00c1'),)", "def tearDown(self): # Restore all proxy related env vars self.env.__exit__()", "= \"\\u6f22\\u5b57\" # \"Kanji\" result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using", "self.image_url = ( \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\\n\" \"QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 \" \"vHgAAAABJRU5ErkJggg%3D%3D%0A%20\") self.text_url_resp = urllib.request.urlopen(self.text_url)", "self.text_url) self.assertEqual(self.text_url_base64_resp.geturl(), self.text_url_base64) self.assertEqual(self.image_url_resp.geturl(), self.image_url) def test_read_text(self): self.assertEqual(self.text_url_resp.read().decode( dict(self.text_url_resp.info().get_params())['charset']), self.text)", "a sequence of two-item sequences as an argument. self.help_inputtype([('1st', '1'),", "test_urlopener_retrieve_remote(self): url = \"http://www.python.org/file.txt\" self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\") self.addCleanup(self.unfakehttp) filename, _", "self.assertEqual(expect, result) # Utf-8 given = ((\"\\u00a0\", \"\\u00c1\"),) expect =", "b'\\xc1\\x24'),) result = urllib.parse.urlencode(given, safe=\":$\", encoding=\"latin-1\") expect = '%A0$=%C1$' self.assertEqual(expect,", "\"using quote(): %r != %r\" % (result, hexescape(' '))) result", "urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\", 9093, []) # finally: # socket.setdefaulttimeout(None) #", "'wb') try: f.write(self.text) finally: f.close() self.pathname = support.TESTFN self.returned_obj =", "on reliance on connecting to the Net for testing. \"\"\"", "def test_url_path_with_newline_header_injection_rejected(self): self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") host = \"localhost:7777?a=1 HTTP/1.1\\r\\nX-injected: header\\r\\nTEST:", "files even # when exceptional conditions occur. self.tempFiles = []", "\"\"\"Shut down the open object\"\"\" self.returned_obj.close() os.remove(support.TESTFN) def test_interface(self): #", "of the # above attempts at injection within the url", "self.assertEqual(expect, result) # latin-1 given = ((\"\\u00a0\", \"\\u00c1\"),) expect =", "unquote_to_bytes(): %r != %r\" % (expect, result)) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes,", "test_using_sequence(self): # Test passing in a sequence of two-item sequences", "in the Latin-1 range, encoded with UTF-8 given = 'br%C3%BCckner_sapporo_20050930.doc'", "= support.TESTFN self.returned_obj = urlopen(\"file:%s\" % self.pathname) def tearDown(self): \"\"\"Shut", "02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22", "= http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, f\"contain control.*{escaped_char_repr}\"): urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL,", "\"%F3%B1\" expect = \"\" result = urllib.parse.unquote(given, errors=\"ignore\") self.assertEqual(expect, result,", "which is logged as an # \"Exception ignored in\". Override", "0 # while cantdata < 13: # data = conn.recv(13-cantdata)", "self.assertEqual('http://somewhere:3128', proxies['http']) class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin): \"\"\"Test urlopen() opening a", "self.assertEqual(b'', self.returned_obj.readline(), \"calling readline() after exhausting the file did not\"", "def test_ftp_cache_pruning(self): self.fakeftp() try: urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user', 'pass', 'localhost', 21,", "self.env.__exit__() del self.env def test_getproxies_environment_keep_no_proxies(self): self.env.set('NO_PROXY', 'localhost') proxies = urllib.request.getproxies_environment()", "fd, tmpfile = tempfile.mkstemp(dir=tmpdir) os.close(fd) fileurl = \"file:\" + urllib.request.pathname2url(tmpfile)", "file just gets its own location returned and # a", "characters : letters, digits, and \"-_.!~*'()\" Unreserved and do not", "close Content-Length: 100 Content-Type: text/html; charset=iso-8859-1 FF ''') def _reporthook(par1,", "def __init__(self, user, passwd, host, port, dirs, timeout=None, persistent=True): pass", "self.assertEqual(self.image_url_resp.read(), self.image) def test_missing_comma(self): self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain') def test_invalid_base64_data(self): # missing padding", "\"localhost\", 9093, [], # timeout=30) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close()", "about order. Docs make no guarantee and have possible dictionary", "# ftp = urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\", 9093, []) # ftp.close()", "# Test lowercase preference of proxy bypass and correct matching", "result, \"using quote_plus(): %r != %r\" % (expected, result)) def", "for the test self.returned_obj.close() def test_info(self): self.assertIsInstance(self.returned_obj.info(), email.message.Message) def test_geturl(self):", "13: # data = conn.recv(13-cantdata) # cantdata += len(data) #", "result, \"using quote(): %r != %r\" % (expect, result)) result", "result, \"using quote(): %r != %r\" % (quote_by_default, result)) #", "variable order _is_ significant self._saved_env = os.environ # Monkey patch", "missing padding character self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=') class urlretrieve_FileTests(unittest.TestCase): \"\"\"Test urllib.urlretrieve() on local", "urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128', proxies['http']) class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin): \"\"\"Test urlopen() opening", "expect = '%C2%A0=%C3%81' result = urllib.parse.urlencode(given) self.assertEqual(expect, result) # Latin-1", "fileurl = \"file:\" + urllib.request.pathname2url(tmpfile) filename, _ = urllib.request.URLopener().retrieve(fileurl) #", "safe=\"\\xfc\") expect = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=\"\") self.assertEqual(expect, result, \"using quote():", "self.assertEqual(hexescape(char), result, \"using quote(): \" \"%s should be escaped to", "chr(char_no) schemeless_url = f\"//localhost:7777/test{char}/\" self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") try: # We", "email.message import io import unittest from unittest.mock import patch from", "Problem: flush() calls self.fp.flush() which raises # \"ValueError: I/O operation", "Make sure object returned by urlopen() has the specified methods", "List of no_proxies with space. self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234') self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com'))", "\"ssl module required\") def test_url_path_with_control_char_rejected(self): for char_no in list(range(0, 0x21))", "unquote(): %r != %r\" % (expect, result)) given = '%'", "it be called again # by the tearDown() method for", "test_ftp_url = 'ftp:///path' with self.assertRaises(urllib.error.URLError) as e: urlopen(test_ftp_url) self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason)", "the file did not\" \" return an empty string\") def", "self.tempFiles: try: os.remove(each) except: pass def constructLocalFileUrl(self, filePath): filePath =", "# # global default timeout is ignored # import socket", "\"\"\"Test urllib.urlretrieve() on local files\"\"\" def setUp(self): # Create a", "self.assertEqual(expect, result) # Sequence of values given = ((b'\\xa0\\x24', (42,", "test_invalid_redirect(self): # urlopen() should raise OSError for many error codes.", "that # calls urllib.parse.quote() on the URL which makes all", "\"\"\"Helper method for testing different input types. 'given' must lead", "\"0%s\" % hex_repr return \"%\" + hex_repr # Shortcut for", "urllib.parse.quote(\"make sure\") result = urllib.request.pathname2url(given) self.assertEqual(expect, result, \"pathname2url() failed; %s", "is established, once for the next 8192 # bytes, and", "%s: %s not found in %s\" % (test_type, expected, result))", "url 'file:\" with self.assertRaisesRegex(urllib.error.HTTPError, msg): urlopen(\"http://python.org/\") finally: self.unfakehttp() def test_redirect_limit_independent(self):", "BytesWarning), quiet=True): self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'') def test_unquoting_badpercent(self): # Test", "Content-Type: text/html; charset=iso-8859-1 ''', mock_close=True) try: self.assertRaises(OSError, urlopen, \"http://python.org/\") finally:", "# Test passing in a sequence of two-item sequences as", "self.assertEqual(expect, result, 'urllib.request..url2pathname() failed; %s != %s' % (expect, result))", "%s: expected 2 '&'s; got %s\" % (test_type, result.count('&'))) amp_location", "self.assertTrue(bypass('anotherdomain.com:8888')) self.assertTrue(bypass('.anotherdomain.com:8888')) self.assertTrue(bypass('www.newdomain.com:1234')) self.assertFalse(bypass('prelocalhost')) self.assertFalse(bypass('newdomain.com')) # no port self.assertFalse(bypass('newdomain.com:1235')) #", "a bytes as input, with unescaped non-ASCII bytes # (Technically", "those machines, sometimes # fail in one of the tests,", "class.\"\"\" def test_quoted_open(self): class DummyURLopener(urllib.request.URLopener): def open_spam(self, url): return url", "= urllib.parse.quote(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, \"using quote(): %r != %r\"", "for the next 8192 # bytes, and once for the", "= b\"\\xa2\\xd8ab\\xff\" expect = \"%A2%D8ab%FF\" result = urllib.parse.quote(given) self.assertEqual(expect, result,", "return an empty string\") def test_readlines(self): lines_list = self.returned_obj.readlines() self.assertEqual(len(lines_list),", "\"using unquote_to_bytes(): %r != %r\" % (expect, result)) self.assertRaises((TypeError, AttributeError),", "= b'%A2%D8ab%FF' expect = b'\\xa2\\xd8ab\\xff' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result,", "'3')], \"using sequence of two-item tuples as input\") def test_quoting(self):", "('2nd', '2'), ('3rd', '3')], \"using sequence of two-item tuples as", "self.assertEqual(lines_list[0], self.text, \"readlines() returned improper text\") def test_fileno(self): file_num =", "raise OSError for many error codes. self.fakehttp(b'''HTTP/1.1 302 Found Date:", "try: import ssl except ImportError: ssl = None import sys", "developer to properly close files even # when exceptional conditions", "given = ((\"\\u00a0\", (1, \"\\u00c1\")),) expect = '%3F=1&%3F=%3F' result =", "urllib.request.Request request = Request(\"http://www.python.org\", method='HEAD') self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(), 'HEAD') request", "with UTF-8 given = \"%E6%BC%A2%E5%AD%97\" expect = \"\\u6f22\\u5b57\" # \"Kanji\"", "character self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=') class urlretrieve_FileTests(unittest.TestCase): \"\"\"Test urllib.urlretrieve() on local files\"\"\" def", "= opener else: opener = _urlopener if data is None:", "200 OK Date: Wed, 02 Jan 2008 03:03:54 GMT Server:", "test_nonstring_seq_values(self): self.assertEqual(\"a=1&a=2\", urllib.parse.urlencode({\"a\": [1, 2]}, True)) self.assertEqual(\"a=None&a=a\", urllib.parse.urlencode({\"a\": [None, \"a\"]},", "= b'%A2\\xd8ab%FF' expect = b'\\xa2\\xd8ab\\xff' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result,", "self.unfakehttp() def test_userpass_inurl_w_spaces(self): self.fakehttp(b\"HTTP/1.0 200 OK\\r\\n\\r\\nHello!\") try: userpass = \"<PASSWORD>\"", "# import ftplib, time, threading # ftplib.FTP.port = 9093 #", "url) # Just commented them out. # Can't really tell", "urllib.request.url2pathname(expected_url) self.assertEqual(expected_path, result, \"url2pathame() failed; %s != %s\" % (result,", ") class urlopen_DataTests(unittest.TestCase): \"\"\"Test urlopen() opening a data URL.\"\"\" def", "%r != %r\" % (expect, result)) escape_list.append(given) escape_string = ''.join(escape_list)", "a lowercase drive letter. self.assertEqual(os.path.normcase(filename), os.path.normcase(tmpfile)) @support.ignore_warnings(category=DeprecationWarning) def test_urlopener_retrieve_remote(self): url", "= \"are+there+spaces...\" expect = given result = urllib.parse.unquote(given) self.assertEqual(expect, result,", "result)) del should_quote partial_quote = \"ab[]cd\" expected = \"ab%5B%5Dcd\" result", "b\"\\xa2\\xd8ab\\xff\" expect = \"%A2%D8ab%FF\" result = urllib.parse.quote(given) self.assertEqual(expect, result, \"using", "write it as '%' + <2 character US-ASCII hex value>.", "\"%s should be escaped to %s, not %s\" % (char,", "email.message.Message instance \" \"as second returned value\") def test_copy(self): #", "a list of temporary files. Each item in the list", "help! # . Facundo # # def server(evt): # import", "= FILE.read() FILE.close() finally: try: FILE.close() except: pass self.assertEqual(self.text, text)", "urllib.request.pathname2url(expected_path) self.assertEqual(expected_url, result, \"pathname2url() failed; %s != %s\" % (result,", "def test_redirect_limit_independent(self): # Ticket #12923: make sure independent requests each", "Same as above, but using a bytes rather than str", "self.assertEqual(quote_by_default, result, \"using quote(): %r != %r\" % (quote_by_default, result))", "% (expect, result)) # Characters in the Latin-1 range, encoded", "%s\" % (expect, result)) given = \"make+sure/using_unquote\" expect = os.path.join(\"make+sure\",", "\"%\" + hex_repr # Shortcut for testing FancyURLopener _urlopener =", "= urllib.parse.urlencode(given) self.assertEqual(expect, result) result = urllib.parse.urlencode(given, True) for value", "(expect, result)) # Characters in BMP, Latin-1, with xmlcharref error", "instance \" \"as second returned value\") def test_copy(self): # Test", "self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'), 'alpha+beta+gamma') def test_quote_bytes(self): # Bytes should quote", "characters are not quoted by urlopen self.assertEqual(DummyURLopener().open( \"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/\"), \"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/\") @support.ignore_warnings(category=DeprecationWarning)", "to percent-encoded values given = b\"\\xa2\\xd8ab\\xff\" expect = \"%A2%D8ab%FF\" result", "testing. \"\"\" def setUp(self): # Create a temp file to", "The various character sets specified are: Reserved characters : \";/?:@&=+$,\"", "Case does not matter on the hex letters. The various", "control.*{escaped_char_repr}\"): urlopen(f\"https:{schemeless_url}\") finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\") def test_url_host_with_newline_header_injection_rejected(self):", "% (hexescape('&'), hexescape('=')) result = urllib.parse.urlencode(given) self.assertEqual(expect, result) given =", "8192 # bytes, and once for the last byte). report", "\"Redirection to url 'file:\" with self.assertRaisesRegex(urllib.error.HTTPError, msg): urlopen(\"http://python.org/\") finally: self.unfakehttp()", "self.assertIsInstance(urllib.request.thishost(), tuple) class URLopener_Tests(FakeHTTPMixin, unittest.TestCase): \"\"\"Testcase to test the open", "fake environment os.environ = collections.OrderedDict() def tearDown(self): os.environ = self._saved_env", "(5 * 3) + 2, #5 chars per thing and", "= 'ftp:///path' with self.assertRaises(urllib.error.URLError) as e: urlopen(test_ftp_url) self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) def", "responses\" without # a status line) self.check_read(b\"0.9\") def test_read_1_0(self): self.check_read(b\"1.0\")", "%r\" % (result, hexescape(' '))) result = urllib.parse.quote_plus(' ') self.assertEqual(result,", "in Latin-1 range, encoded with Latin-1 given = \"\\xa2\\xd8ab\\xff\" expect", "% (expect, result)) # Characters in BMP, encoded with UTF-8", "200 OK\\r\\n\\r\\nHello!\") try: fp = urlopen(\"http://python.org/\") self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(), b\"\")", "\".txt\") @support.ignore_warnings(category=DeprecationWarning) def test_local_file_open(self): # bpo-35907, CVE-2019-9948: urllib must reject", "image with one black and one white pixel self.image =", "\"fileno\", \"close\", \"info\", \"geturl\", \"getcode\", \"__iter__\"): self.assertTrue(hasattr(self.returned_obj, attr), \"object returned", "# level 'def urlopen()' function defined in this... (quite ugly)", "BMP, encoded with Latin-1, with replace error handling given =", "type): return io.BytesIO(), 0 def close(self): pass self._ftpwrapper_class = urllib.request.ftpwrapper", "bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', '*') self.assertTrue(bypass('newdomain.com')) self.assertTrue(bypass('newdomain.com:1234')) self.env.set('NO_PROXY', '*, anotherdomain.com')", "self.unfakehttp() def test_URLopener_deprecation(self): with support.check_warnings(('',DeprecationWarning)): urllib.request.URLopener() @unittest.skipUnless(ssl, \"ssl module required\")", "%r != %r\" % (do_not_quote, result)) result = urllib.parse.quote_plus(do_not_quote) self.assertEqual(do_not_quote,", "test_default_safe(self): # Test '/' is default value for 'safe' parameter", "%s' % (expect, result)) class Utility_Tests(unittest.TestCase): \"\"\"Testcase to test the", "pass def tearDown(self): # Delete the temporary files. for each", "# Make sure that the reporthook works. def hooktester(block_count, block_read_size,", "result = urllib.parse.unquote(\"\\u6f22%C3%BC\") expect = '\\u6f22\\u00fc' self.assertEqual(expect, result, \"using unquote():", "\"Kanji\" expect = \"%E6%BC%A2%E5%AD%97\" result = urllib.parse.quote(given) self.assertEqual(expect, result, \"using", "file was not \" \"made\") FILE = open(second_temp, 'rb') try:", "file.\"\"\" newFd, newFilePath = tempfile.mkstemp() try: self.registerFileForCleanUp(newFilePath) newFile = os.fdopen(newFd,", "value\") def test_copy(self): # Test that setting the filename argument", "urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', '*') self.assertTrue(bypass('newdomain.com')) self.assertTrue(bypass('newdomain.com:1234')) self.env.set('NO_PROXY', '*, anotherdomain.com') self.assertTrue(bypass('anotherdomain.com')) self.assertFalse(bypass('newdomain.com'))", "\" # 2x1 pixel RGB PNG image with one black", "self.returned_obj.readlines() self.assertEqual(len(lines_list), 1, \"readlines() returned the wrong number of lines\")", "tmp_file = tempfile.mkstemp() tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/') try:", "+ self.pathname) class ProxyTests(unittest.TestCase): def setUp(self): # Records changes to", "in self.tempFiles: try: os.remove(each) except: pass def constructLocalFileUrl(self, filePath): filePath", "a character you write it as '%' + <2 character", "with bytes self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'), 'alpha%2Bbeta+gamma') # Test with safe bytes", "result) # Sequence of values given = ((b'\\xa0\\x24', (42, b'\\xc1\\x24')),)", "unquote(): %r != %r\" % (expect, result)) def test_unquoting_with_bytes_input(self): #", "= urllib.request.urlopen(self.image_url) def test_interface(self): # Make sure object returned by", "result)) expect = given result = urllib.request.url2pathname(result) self.assertEqual(expect, result, \"url2pathname()", "self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_userpass_inurl_w_spaces(self): self.fakehttp(b\"HTTP/1.0 200 OK\\r\\n\\r\\nHello!\") try:", "and unquote_plus() See the doc string for quoting_Tests for details", "the URL which makes all of the # above attempts", "no injection. resp = urlopen(f\"http:{schemeless_url}\") self.assertNotIn(' ', resp.geturl()) self.assertNotIn('\\r', resp.geturl())", "%r\" % (quote_by_default, result)) # Safe expressed as bytes rather", "Safe expressed as bytes rather than str result = urllib.parse.quote(quote_by_default,", "def test_reporthook_5_bytes(self): # Test on 5 byte file. Should call", "test_read(self): self.assertEqual(self.text, self.returned_obj.read()) def test_readline(self): self.assertEqual(self.text, self.returned_obj.readline()) self.assertEqual(b'', self.returned_obj.readline(), \"calling", "errors=\"xmlcharrefreplace\") self.assertEqual(expect, result, \"using quote(): %r != %r\" % (expect,", "Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Length: 100 Content-Type:", "# # def testTimeoutValue(self): # ftp = urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\",", "characters) result = urllib.parse.unquote(\"\\u6f22%FC\", encoding=\"latin-1\") expect = '\\u6f22\\u00fc' self.assertEqual(expect, result,", "list_of_paths = ['///C:', '/////folder/test/', '///C:/foo/bar/spam.foo'] for path in list_of_paths: self.assertEqual(pathname2url(url2pathname(path)),", "input self.assertRaises(TypeError, urllib.parse.quote, given, encoding=\"latin-1\") # quote_from_bytes should work the", "in URL so no match self.assertNotEqual(fp.geturl(), url) self.assertEqual(fp.getcode(), 200) finally:", "GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Type:", "urlopen() opening a data URL.\"\"\" def setUp(self): # text containing", "self._saved_env def test_getproxies_environment_prefer_lowercase(self): # Test lowercase preference with removal os.environ['no_proxy']", "sometimes # fail in one of the tests, sometimes in", "on the URL which makes all of the # above", "r'\\\\') InvalidURL = http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, f\"contain control.*{escaped_char_repr}\"): urlopen(f\"http:{schemeless_url}\")", "self.assertEqual(\"\", urllib.parse.urlencode([])) def test_nonstring_values(self): self.assertEqual(\"a=1\", urllib.parse.urlencode({\"a\": 1})) self.assertEqual(\"a=None\", urllib.parse.urlencode({\"a\": None}))", "try: self.assertTrue(os.path.exists(tmp_file)) with urlopen(tmp_fileurl) as fobj: self.assertTrue(fobj) finally: os.close(fd) os.unlink(tmp_file)", "#10836 with self.assertRaises(urllib.error.URLError) as e: urlopen('file://localhost/a/file/which/doesnot/exists.py') self.assertTrue(e.exception.filename) self.assertTrue(e.exception.reason) def test_file_notexists(self):", "\"close\", \"info\", \"geturl\", \"getcode\", \"__iter__\"): self.assertTrue(hasattr(self.text_url_resp, attr), \"object returned by", "length=None): if self.closed: return b\"\" return io.BytesIO.readline(self, length) def close(self):", "and # a headers value is returned. result = urllib.request.urlretrieve(\"file:%s\"", "def test_basic(self): # Make sure that a local file just", "pathname2url, \"XX:\\\\\") def test_roundtrip_pathname2url(self): list_of_paths = ['///C:', '/////folder/test/', '///C:/foo/bar/spam.foo'] for", "\" \"vHgAAAABJRU5ErkJggg%3D%3D%0A%20\") self.text_url_resp = urllib.request.urlopen(self.text_url) self.text_url_base64_resp = urllib.request.urlopen( self.text_url_base64) self.image_url_resp", "bad percent-escapes given = '%xab' expect = given result =", "(expect, result)) # unquote_to_bytes given = '%xab' expect = bytes(given,", "'proxy' in k.lower(): self.env.unset(k) def tearDown(self): # Restore all proxy", "def sendall(self, data): FakeHTTPConnection.buf = data def makefile(self, *args, **kwds):", "http.client.HTTPConnection http.client.HTTPConnection = fake_http_class def unfakehttp(self): http.client.HTTPConnection = self._connection_class class", "self.assertEqual(result[0], support.TESTFN) self.assertIsInstance(result[1], email.message.Message, \"did not get an email.message.Message instance", "resp = urlopen(f\"http:{schemeless_url}\") self.assertNotIn(' ', resp.geturl()) self.assertNotIn('\\r', resp.geturl()) self.assertNotIn('\\n', resp.geturl())", "urllib.request.urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL, r\"contain control.*\\\\n\"): urllib.request.urlopen(f\"https:{schemeless_url}\") # This code path", "urllib.parse.quote(char) self.assertEqual(hexescape(char), result, \"using quote(): \" \"%s should be escaped", "'spam://example/ /'),'//example/%20/') # test the safe characters are not quoted", "= socket.socket(socket.AF_INET, socket.SOCK_STREAM) # serv.settimeout(3) # serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) #", "out. # Can't really tell why keep failing in windows", "\"using unquote_plus(): %r != %r\" % (expect, result)) def test_unquote_to_bytes(self):", "urllib.request.urlopen(url) self.assertEqual(fp.geturl(), url) finally: self.unfakehttp() def test_willclose(self): self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\")", "+ hex(ord(<character>))[2:]`` escapes a character properly. Case does not matter", "temporary file. self.registerFileForCleanUp(support.TESTFN) self.text = b'testing urllib.urlretrieve' try: FILE =", "quote_plus(): %r != +\" % result) given = \"a b", "# a status line) self.check_read(b\"0.9\") def test_read_1_0(self): self.check_read(b\"1.0\") def test_read_1_1(self):", "test_reporthook(self): # Make sure that the reporthook works. def hooktester(block_count,", "unittest.SkipTest(\"filePath is not encodable to utf8\") return \"file://%s\" % urllib.request.pathname2url(filePath)", "self.assertRaises(OSError, urlopen, \"http://something\") finally: self.unfakehttp() def test_missing_localfile(self): # Test for", "b'\\xe6\\xbc\\xa2\\xc3\\xbc' # UTF-8 for \"\\u6f22\\u00fc\" self.assertEqual(expect, result, \"using unquote_to_bytes(): %r", "tests for what was in Python 2's \"urllib\" module\"\"\" import", "= http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, r\"contain control.*\\\\r\"): urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL,", "bytes as input given = b'%A2%D8ab%FF' expect = b'\\xa2\\xd8ab\\xff' result", "# ASCII Encoding. On a sequence of values. given =", "Request = urllib.request.Request request = Request(\"http://www.python.org\", method='HEAD') self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(),", "= 'http://docs.python.org/library/urllib.html#OK' self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\") try: fp = urllib.request.urlopen(url) self.assertEqual(fp.geturl(),", "test_url_host_with_newline_header_injection_rejected(self): self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") host = \"localhost\\r\\nX-injected: header\\r\\n\" schemeless_url =", "= urllib.parse.urlencode(given, True) self.assertEqual(expect, result) # Sequence of values given", "test_roundtrip_pathname2url(self): list_of_paths = ['///C:', '/////folder/test/', '///C:/foo/bar/spam.foo'] for path in list_of_paths:", "\"\" result = urllib.parse.unquote(given, errors=\"ignore\") self.assertEqual(expect, result, \"using unquote(): %r", "self.assertRaisesRegex( InvalidURL, f\"contain control.*{escaped_char_repr}\"): urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL, f\"contain control.*{escaped_char_repr}\"): urlopen(f\"https:{schemeless_url}\")", "# Bytes not supported yet with self.assertRaisesRegex(TypeError, 'Expected str, got", "'%' + <2 character US-ASCII hex value>. The Python code", "( \"data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs\" \"sJT0mIPYgxCA%3D\") # base64 encoded data URL that contains", "['C:', r'\\\\\\C\\test\\\\', r'C:\\foo\\bar\\spam.foo' ] for path in list_of_paths: self.assertEqual(url2pathname(pathname2url(path)), path)", "urllib.urlretrieve' try: FILE = open(support.TESTFN, 'wb') FILE.write(self.text) FILE.close() finally: try:", "close ''', mock_close=True) try: self.assertRaises(urllib.error.HTTPError, urlopen, \"http://something\") finally: self.unfakehttp() def", "\"2nd\":'2', \"3rd\":'3'}, \"using dict as input type\") def test_using_sequence(self): #", "= urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234, .d.o.t') self.assertTrue(bypass('localhost')) self.assertTrue(bypass('LocalHost')) #", "urllib.request.urlretrieve(self.constructLocalFileUrl( support.TESTFN), second_temp) self.assertEqual(second_temp, result[0]) self.assertTrue(os.path.exists(second_temp), \"copy of the file", "function defined in this... (quite ugly) # test suite. They", "# \"??\" result = urllib.parse.quote(given, encoding=\"latin-1\", errors=\"replace\") self.assertEqual(expect, result, \"using", "utf8\") return \"file://%s\" % urllib.request.pathname2url(filePath) def createNewTempFile(self, data=b\"\"): \"\"\"Creates a", "+ 2)) def test_using_mapping(self): # Test passing in a mapping", "\"made\") FILE = open(second_temp, 'rb') try: text = FILE.read() FILE.close()", "((b'\\xa0\\x24', b'\\xc1\\x24'),) result = urllib.parse.urlencode(given, safe=\":$\") expect = '%A0$=%C1$' self.assertEqual(expect,", "quote() and quote_plus() handle spaces as specified in # their", "passing in a mapping object as an argument. self.help_inputtype({\"1st\":'1', \"2nd\":'2',", "\"using quote(): %r != %r\" % (expect, result)) # Characters", "'%A0$=%C1$' self.assertEqual(expect, result) # Safe parameter in sequence given =", "# ftp.close() # # def testTimeoutNone(self): # # global default", "all characters escaped: \" \"%s\" % result) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote,", "def setUp(self): # Records changes to env vars self.env =", "def setUp(self): # Create a temp file to use for", "For 0x00 - 0x1F should_quote.append(r'<>#%\"{}|\\^[]`') should_quote.append(chr(127)) # For 0x7F should_quote", "FancyURLopener(): with support.check_warnings( ('FancyURLopener style of invoking requests is deprecated.',", "pass self.assertEqual(self.text, text) def test_reporthook(self): # Make sure that the", "'ascii') result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, \"using unquote_to_bytes(): %r !=", "{\"key name\":\"A bunch of pluses\"} expect = \"key+name=A+bunch+of+pluses\" result =", "result)) # Decode with UTF-8, invalid sequence, replace errors result", "self.check_read(b\"0.9\") def test_read_1_0(self): self.check_read(b\"1.0\") def test_read_1_1(self): self.check_read(b\"1.1\") def test_read_bogus(self): #", "in %s\" % (test_type, result)) self.assertEqual(len(result), (5 * 3) +", "with self.assertRaises(ValueError): urllib.request.urlopen( \"https://localhost\", cafile=\"/nonexistent/path\", context=context ) class urlopen_DataTests(unittest.TestCase): \"\"\"Test", "test import support import os try: import ssl except ImportError:", "try: userpass = \"<PASSWORD>\" url = \"http://{}@python.org/\".format(userpass) fakehttp_wrapper = http.client.HTTPConnection", "(Technically an invalid URI; expect those characters to be UTF-8", "\" \"made\") FILE = open(second_temp, 'rb') try: text = FILE.read()", "%r\" % (expect, result)) given = '%x' expect = given", "expressed as bytes rather than str result = urllib.parse.quote(quote_by_default, safe=b\"<>\")", "= urllib.parse.quote(quote_by_default, safe=b\"<>\") self.assertEqual(quote_by_default, result, \"using quote(): %r != %r\"", "= urlopen(url) # The authorization header must be in place", "0) def test_reporthook_5_bytes(self): # Test on 5 byte file. Should", "None) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ()) with support.check_warnings(('', BytesWarning), quiet=True): self.assertRaises((TypeError,", "1 * 2nd, 2 * 3rd, 3 Test cannot assume", "Facundo # # def server(evt): # import socket, time #", "# Same as above, but using a bytes rather than", "fixture tear down, and returns the absolute path of the", "test_url_path_with_control_char_rejected(self): for char_no in list(range(0, 0x21)) + [0x7f]: char =", "opening a data URL.\"\"\" def setUp(self): # text containing URL", "sure that a local file just gets its own location", "unquote_plus() given = \"are+there+spaces...\" expect = given result = urllib.parse.unquote(given)", "test_quote_plus_with_unicode(self): # Encoding (latin-1) test for quote_plus given = \"\\xa2\\xd8", "range(FancyURLopener().maxtries): self.fakehttp(b'''HTTP/1.1 302 Found Location: file://guidocomputer.athome.com:/python/license Connection: close ''', mock_close=True)", "(result, expected_path)) def test_quoting(self): # Test automatic quoting and unquoting", "partial_quote = \"ab[]cd\" expected = \"ab%5B%5Dcd\" result = urllib.parse.quote(partial_quote) self.assertEqual(expected,", "%s != %s\" % (expect, result)) expect = given result", "quote(): %r != %r\" % (expect, result)) def test_quote_plus_with_unicode(self): #", "list is a file # name (absolute path or relative", "\"of\", \"a\", \"path\") expected_url = \"parts/of/a/path\" result = urllib.request.pathname2url(expected_path) self.assertEqual(expected_url,", "\"here\") expect = \"needs/%s/here\" % urllib.parse.quote(\"quot=ing\") result = urllib.request.pathname2url(given) self.assertEqual(expect,", "\"sequence=%s\" % value self.assertIn(expect, result) self.assertEqual(result.count('&'), 2, \"Expected 2 '&'s,", "self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) @patch.object(urllib.request, 'MAXFTPCACHE', 0) def test_ftp_cache_pruning(self): self.fakeftp() try: urllib.request.ftpcache['test']", "self.assertTrue(urllib.request.proxy_bypass_environment('my.proxy:1234')) self.assertFalse(urllib.request.proxy_bypass_environment('my.proxy')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) # Test lowercase preference with replacement os.environ['http_proxy']", "all proxy related env vars self.env.__exit__() del self.env def test_getproxies_environment_keep_no_proxies(self):", "(separate test for that). should_quote = [chr(num) for num in", "\"path\") expected_url = \"parts/of/a/path\" result = urllib.request.pathname2url(expected_path) self.assertEqual(expected_url, result, \"pathname2url()", "GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Location: file://guidocomputer.athome.com:/python/license Connection: close Content-Type: text/html; charset=iso-8859-1", "InvalidURL, f\"contain control.*{escaped_char_repr}\"): urllib.request.urlopen(f\"https:{schemeless_url}\") # This code path quotes the", "urllib.request.urlopen( \"https://localhost\", cafile=\"/nonexistent/path\", context=context ) class urlopen_DataTests(unittest.TestCase): \"\"\"Test urlopen() opening", "RFC 3986 (Uniform Resource Identifiers), to escape a character you", "of the developer to properly close files even # when", "the string contains non-Latin-1-representable characters) result = urllib.parse.unquote(\"\\u6f22%FC\", encoding=\"latin-1\") expect", "help_inputtype(self, given, test_type): \"\"\"Helper method for testing different input types.", "and # url2pathname() respectively given = os.path.join(\"needs\", \"quot=ing\", \"here\") expect", "not quoted by urlopen self.assertEqual(DummyURLopener().open( \"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/\"), \"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/\") @support.ignore_warnings(category=DeprecationWarning) def test_urlopener_retrieve_file(self):", "test_quoting(self): # Make sure keys and values are quoted using", "= urllib.parse.quote(char) self.assertEqual(hexescape(char), result, \"using quote(): \" \"%s should be", "support.check_warnings( ('DummyURLopener style of invoking requests is deprecated.', DeprecationWarning)): self.assertEqual(DummyURLopener().open(", "not get an email.message.Message instance \" \"as second returned value\")", "(expect, result)) # Decode with UTF-8, invalid sequence, replace errors", "= urllib.parse.urlencode(given) self.assertEqual(expect, result) # Latin-1 encoding. given = (('\\u00a0',", "URLopener_Tests(FakeHTTPMixin, unittest.TestCase): \"\"\"Testcase to test the open method of URLopener", "a new temporary file containing the specified data, registers the", "os.environ['no_proxy'] = '' os.environ['No_Proxy'] = 'localhost' self.assertFalse(urllib.request.proxy_bypass_environment('localhost')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) os.environ['http_proxy'] =", "= \"make+sure/using_unquote\" expect = os.path.join(\"make+sure\", \"using_unquote\") result = urllib.request.url2pathname(given) self.assertEqual(expect,", "finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\") def test_url_host_with_newline_header_injection_rejected(self): self.fakehttp(b\"HTTP/1.1 200", "resp = urlopen(f\"http:{schemeless_url}\") self.assertNotIn(char, resp.geturl()) finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module", "self.registerFileForCleanUp(support.TESTFN) self.text = b'testing urllib.urlretrieve' try: FILE = open(support.TESTFN, 'wb')", "('charset', 'ISO-8859-1')]) self.assertEqual(self.image_url_resp.info()['content-length'], str(len(self.image))) self.assertEqual(urllib.request.urlopen(\"data:,\").info().get_params(), [('text/plain', ''), ('charset', 'US-ASCII')]) def", "an invalid URI; expect those characters to be UTF-8 #", "2, \"testing %s: expected 2 '&'s; got %s\" % (test_type,", "def read(self, amt=None): if self.closed: return b\"\" return io.BytesIO.read(self, amt)", "Everywhere else they work ok, but on those machines, sometimes", "302 Found Date: Wed, 02 Jan 2008 03:03:54 GMT Server:", "class urlopen_DataTests(unittest.TestCase): \"\"\"Test urlopen() opening a data URL.\"\"\" def setUp(self):", "range, encoded by with None (default) result = urllib.parse.quote(given, encoding=None,", "open(support.TESTFN, 'wb') FILE.write(self.text) FILE.close() finally: try: FILE.close() except: pass def", "be escaped if not being used for their special meaning", "self.assertEqual(request.get_method(), 'HEAD') request = Request(\"http://www.python.org\", method='GET') self.assertEqual(request.get_method(), 'GET') request.method =", "self.env.set('NO_PROXY', '*') self.assertTrue(bypass('newdomain.com')) self.assertTrue(bypass('newdomain.com:1234')) self.env.set('NO_PROXY', '*, anotherdomain.com') self.assertTrue(bypass('anotherdomain.com')) self.assertFalse(bypass('newdomain.com')) self.assertFalse(bypass('newdomain.com:1234'))", "%r != %r\" % (expect, result)) expect = given.replace('+', '", "def test_safe(self): # Test setting 'safe' parameter does what it", "are not quoted by urlopen self.assertEqual(DummyURLopener().open( \"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/\"), \"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/\") @support.ignore_warnings(category=DeprecationWarning) def", "for i in range(FancyURLopener().maxtries): self.fakehttp(b'''HTTP/1.1 302 Found Location: file://guidocomputer.athome.com:/python/license Connection:", "import os try: import ssl except ImportError: ssl = None", "attr) def test_read(self): self.assertEqual(self.text, self.returned_obj.read()) def test_readline(self): self.assertEqual(self.text, self.returned_obj.readline()) self.assertEqual(b'',", "be escaped space : 0x20 Must be escaped Delimiters :", "unescaped non-ASCII bytes # (Technically an invalid URI; expect those", "urllib.request.FancyURLopener(proxies=proxies) elif not _urlopener: opener = FancyURLopener() _urlopener = opener", "method='GET') self.assertEqual(request.get_method(), 'GET') request.method = 'HEAD' self.assertEqual(request.get_method(), 'HEAD') class URL2PathNameTests(unittest.TestCase):", "= urllib.parse.urlencode(given) self.assertEqual(expect, result) def test_doseq(self): # Test that passing", "expect = \"sequence=%s\" % value self.assertIn(expect, result) self.assertEqual(result.count('&'), 2, \"Expected", "* 5) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 2) self.assertEqual(report[0][2], 5) self.assertEqual(report[1][2],", "= 'file://localhost/' + tmp_file.replace(os.path.sep, '/') try: self.assertTrue(os.path.exists(tmp_file)) with urlopen(tmp_fileurl) as", "\"\\xa2\\xd8ab\\xff\" expect = \"%A2%D8ab%FF\" result = urllib.parse.quote(given, encoding=\"latin-1\") self.assertEqual(expect, result,", "is no injection. resp = urlopen(f\"http:{schemeless_url}\") self.assertNotIn(' ', resp.geturl()) self.assertNotIn('\\r',", "!= %r\" % (expect, result)) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None) self.assertRaises((TypeError,", "no effect # (Since URIs are not allowed to have", "unittest.TestCase): \"\"\"Testcase to test the open method of URLopener class.\"\"\"", "matching including ports os.environ['no_proxy'] = 'localhost, noproxy.com, my.proxy:1234' os.environ['No_Proxy'] =", "self.assertRaises(OSError, DummyURLopener().open, url) self.assertRaises(OSError, DummyURLopener().retrieve, url) # Just commented them", "'%C2%A0=%C3%81' result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) given = ((\"\\u00a0\",", "url) self.assertRaises(OSError, DummyURLopener().retrieve, url) # Just commented them out. #", "host + \":8080/test/?test=a\" try: # We explicitly test urllib.request.urlopen() instead", "self.assertEqual(expect, result) result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) # Sequence", "result.count('&')) def test_empty_sequence(self): self.assertEqual(\"\", urllib.parse.urlencode({})) self.assertEqual(\"\", urllib.parse.urlencode([])) def test_nonstring_values(self): self.assertEqual(\"a=1\",", "the next 8192 # bytes, and once for the last", "\"ValueError: I/O operation on closed file\" which is logged as", "\\u00c4 \" # 2x1 pixel RGB PNG image with one", "self.assertFalse(bypass('prelocalhost')) self.assertFalse(bypass('newdomain.com')) # no port self.assertFalse(bypass('newdomain.com:1235')) # wrong port def", "# while cantdata < 13: # data = conn.recv(13-cantdata) #", "# Make sure quote() does not quote letters, digits, and", "\"ssl module required\") def test_url_host_with_control_char_rejected(self): for char_no in list(range(0, 0x21))", "result = urllib.request.url2pathname(url) self.assertEqual(expect, result, 'urllib.request..url2pathname() failed; %s != %s'", "0x1F, 0x7F Have no use in URIs so must be", "(expect, result)) escape_list.append(given) escape_string = ''.join(escape_list) del escape_list result =", "padding character self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=') class urlretrieve_FileTests(unittest.TestCase): \"\"\"Test urllib.urlretrieve() on local files\"\"\"", "pathname2url() and url2pathname()\"\"\" def test_basic(self): # Make sure simple tests", "serv.listen() # try: # conn, addr = serv.accept() # conn.send(\"1", "connections\"\"\" def test_short_content_raises_ContentTooShortError(self): self.fakehttp(b'''HTTP/1.1 200 OK Date: Wed, 02 Jan", "result, \"testing %s: %s not found in %s\" % (test_type,", "should be quoted are by default sans # space (separate", "def setUp(self): # import ftplib, time, threading # ftplib.FTP.port =", "resp = urlopen(\"http://www.python.org\") self.assertTrue(resp.fp.will_close) finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\")", "self.unfakehttp() def test_redirect_limit_independent(self): # Ticket #12923: make sure independent requests", "result, \"using unquote_plus(): %r != %r\" % (expect, result)) def", "# ASCII encoding. Expect %3F with errors=\"replace' given = (('\\u00a0',", "as \"\\n\", \" \", \"%0A\", and \"%20\". self.image_url = (", "tests, sometimes in other. I have a linux, and #", "latin-1 given = ((\"\\u00a0\", \"\\u00c1\"),) expect = '%A0=%C1' result =", "def test_url_path_with_control_char_rejected(self): for char_no in list(range(0, 0x21)) + [0x7f]: char", "expect = given result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using unquote():", "30) # ftp.close() class RequestTests(unittest.TestCase): \"\"\"Unit tests for urllib.request.Request.\"\"\" def", "= os.fdopen(newFd, \"wb\") newFile.write(data) newFile.close() finally: try: newFile.close() except: pass", "int\") self.assertEqual(os.read(file_num, len(self.text)), self.text, \"Reading on the file descriptor returned", "registerFileForCleanUp(self, fileName): self.tempFiles.append(fileName) def test_basic(self): # Make sure that a", "should raise OSError for many error codes. self.fakehttp(b'''HTTP/1.1 302 Found", "as possible so as to cut down on reliance on", "Test that passing True for 'doseq' parameter works correctly given", "b\"\" return io.BytesIO.read(self, amt) def readline(self, length=None): if self.closed: return", "Found Location: file://guidocomputer.athome.com:/python/license Connection: close ''', mock_close=True) try: self.assertRaises(urllib.error.HTTPError, urlopen,", "class urlopen_FileTests(unittest.TestCase): \"\"\"Test urlopen() opening a temporary file. Try to", "details on quoting and such. \"\"\" def test_unquoting(self): # Make", "attr), \"object returned by urlopen() lacks %s attribute\" % attr)", "self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ()) def test_unquoting_mixed_case(self):", "def test_with_method_arg(self): Request = urllib.request.Request request = Request(\"http://www.python.org\", method='HEAD') self.assertEqual(request.method,", "space : 0x20 Must be escaped Delimiters : '<>#%\"' Must", "urlopen() raises OSError if the underlying socket does not send", "self.assertEqual(pathname2url(\"C:\"), '///C:') self.assertEqual(pathname2url(\"C:\\\\\"), '///C:') def test_converting_when_no_drive_letter(self): self.assertEqual(pathname2url(r\"\\\\\\folder\\test\" \"\\\\\"), '/////folder/test/') self.assertEqual(pathname2url(r\"\\\\folder\\test\"", "cantdata += len(data) # time.sleep(.3) # conn.send(\"2 No more lines\\n\")", "attempts at injection within the url _path_ safe. InvalidURL =", "data. (#1680230) self.fakehttp(b'') try: self.assertRaises(OSError, urlopen, \"http://something\") finally: self.unfakehttp() def", "self.assertEqual(\"\", urllib.parse.urlencode({})) self.assertEqual(\"\", urllib.parse.urlencode([])) def test_nonstring_values(self): self.assertEqual(\"a=1\", urllib.parse.urlencode({\"a\": 1})) self.assertEqual(\"a=None\",", "= 'http://somewhere:3128' proxies = urllib.request.getproxies_environment() self.assertEqual({}, proxies) # Test lowercase", "returned by urlopen() has the specified methods for attr in", "error codes. self.fakehttp(b'''HTTP/1.1 302 Found Date: Wed, 02 Jan 2008", "path of the file.\"\"\" newFd, newFilePath = tempfile.mkstemp() try: self.registerFileForCleanUp(newFilePath)", "urllib.request.ftpwrapper('user', 'pass', 'localhost', 21, []) urlopen('ftp://localhost') finally: self.unfakeftp() def test_userpass_inurl(self):", "(char, hexescape(char), result)) result = urllib.parse.quote_plus(char) self.assertEqual(hexescape(char), result, \"using quote_plus():", "= ((\"\\u00a0\", \"\\u00c1\"),) expect = '%C2%A0=%C3%81' result = urllib.parse.urlencode(given, True)", "mix of non-ASCII and percent-encoded characters, Latin-1 # (Note, the", "their # own retry limit. for i in range(FancyURLopener().maxtries): self.fakehttp(b'''HTTP/1.1", "use their # own retry limit. for i in range(FancyURLopener().maxtries):", "with self.assertRaisesRegex( InvalidURL, f\"contain control.*{escaped_char_repr}\"): urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL, f\"contain control.*{escaped_char_repr}\"):", "hexescape(chr(num)) expect = chr(num) result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using", "use in URIs so must be escaped space : 0x20", "= Request(\"http://www.python.org\") self.assertEqual(request.get_method(), 'GET') request = Request(\"http://www.python.org\", {}) self.assertEqual(request.get_method(), 'POST')", "then having it be called again # by the tearDown()", "UnquotingTests(unittest.TestCase): \"\"\"Tests for unquote() and unquote_plus() See the doc string", "filename, _ = urllib.request.URLopener().retrieve(fileurl) # Some buildbots have TEMP folder", "finally: self.unfakehttp() class QuotingTests(unittest.TestCase): r\"\"\"Tests for urllib.quote() and urllib.quote_plus() According", "to test the various utility functions in the urllib.\"\"\" def", "module required\") def test_url_path_with_control_char_rejected(self): for char_no in list(range(0, 0x21)) +", "= urllib.request.ftpwrapper('user', 'pass', 'localhost', 21, []) urlopen('ftp://localhost') finally: self.unfakeftp() def", "\"using unquote_to_bytes(): %r != %r\" % (expect, result)) given =", "= Request(\"http://www.python.org\", method='HEAD') self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(), 'HEAD') request = Request(\"http://www.python.org\",", "# serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # serv.bind((\"\", 9093)) # serv.listen() #", "!= %r\" % (expect, result)) # A mix of non-ASCII", "= os.environ # Monkey patch os.environ, start with empty fake", "401 Authentication Required Date: Wed, 02 Jan 2008 03:03:54 GMT", "self.assertIn(authorization, fakehttp_wrapper.buf.decode(\"UTF-8\")) self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(), b\"\") # the spaces are", "\"are+there+spaces...\" expect = given result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using", "for 'safe' parameter self.assertEqual(urllib.parse.quote.__defaults__[0], '/') def test_safe(self): # Test setting", "!= %r\" % (expect, result)) # Same as above, but", "newFile.write(data) newFile.close() finally: try: newFile.close() except: pass return newFilePath def", "%r\" % (expect, result)) expect = given.replace(' ', '+') result", "self.assertEqual(pathname2url(r'C:\\foo\\bar\\spam.foo'), \"///C:/foo/bar/spam.foo\" ) def test_long_drive_letter(self): self.assertRaises(IOError, pathname2url, \"XX:\\\\\") def test_roundtrip_pathname2url(self):", "%r != +\" % result) given = \"a b cd", "def server(evt): # import socket, time # serv = socket.socket(socket.AF_INET,", "num in range(128): given = hexescape(chr(num)) expect = chr(num) result", "self.assertFalse(os.path.exists(tmp_file)) with self.assertRaises(urllib.error.URLError): urlopen(tmp_fileurl) def test_ftp_nohost(self): test_ftp_url = 'ftp:///path' with", "self.assertFalse(urllib.request.proxy_bypass_environment('my.proxy')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) # Test lowercase preference with replacement os.environ['http_proxy'] =", "# Make sure object returned by urlopen() has the specified", "after exhausting the file did not\" \" return an empty", "def test_thishost(self): \"\"\"Test the urllib.request.thishost utility function returns a tuple\"\"\"", "filePath.encode(\"utf-8\") except UnicodeEncodeError: raise unittest.SkipTest(\"filePath is not encodable to utf8\")", "and such. \"\"\" def test_unquoting(self): # Make sure unquoting of", "should be escaped to %s, not %s\" % (char, hexescape(char),", "'C:\\\\' for url in given: result = urllib.request.url2pathname(url) self.assertEqual(expect, result,", "filename, _ = urllib.request.URLopener().retrieve(url) self.assertEqual(os.path.splitext(filename)[1], \".txt\") @support.ignore_warnings(category=DeprecationWarning) def test_local_file_open(self): #", "by default in UTF-8 given = \"\\xa2\\xd8ab\\xff\" expect = \"%C2%A2%C3%98ab%C3%BF\"", "_is_ significant self._saved_env = os.environ # Monkey patch os.environ, start", "test_simple_compare(self): self.assertEqual(pathname2url(r'C:\\foo\\bar\\spam.foo'), \"///C:/foo/bar/spam.foo\" ) def test_long_drive_letter(self): self.assertRaises(IOError, pathname2url, \"XX:\\\\\") def", "!= %r\" % (expect, result)) def test_quoting_plus(self): self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'), 'alpha%2Bbeta+gamma')", "import urllib.error import http.client import email.message import io import unittest", "machines, sometimes # fail in one of the tests, sometimes", "= 'br%C3%BCckner_sapporo_20050930.doc' expect = 'br\\u00fcckner_sapporo_20050930.doc' result = urllib.parse.unquote(given) self.assertEqual(expect, result,", "= 'http://somewhere:3128' os.environ['Http_Proxy'] = 'http://somewhereelse:3128' proxies = urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128', proxies['http'])", "@support.ignore_warnings(category=DeprecationWarning) def test_urlopener_retrieve_remote(self): url = \"http://www.python.org/file.txt\" self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\") self.addCleanup(self.unfakehttp)", "fail in one of the tests, sometimes in other. I", "safe characters are not quoted by urlopen self.assertEqual(DummyURLopener().open( \"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/\"), \"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/\")", "self.assertEqual(expect, result, \"using quote(): %r != %r\" % (expect, result))", "urllib.parse.urlencode(given, True, encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect, result) # Utf-8 given =", "Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Location: file://guidocomputer.athome.com:/python/license Connection: close Content-Type:", "object\"\"\" self.returned_obj.close() os.remove(support.TESTFN) def test_interface(self): # Make sure object returned", "RFC 2396 specifies\"\"\" hex_repr = hex(ord(char))[2:].upper() if len(hex_repr) == 1:", "= f\"//localhost{char}/test/\" self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") try: escaped_char_repr = repr(char).replace('\\\\', r'\\\\')", "replace error handling given = \"\\u6f22\\u5b57\" expect = \"%3F%3F\" #", "with None (default) result = urllib.parse.unquote(given, encoding=None, errors=None) self.assertEqual(expect, result,", "expected = \"ab%5B%5Dcd\" result = urllib.parse.quote(partial_quote) self.assertEqual(expected, result, \"using quote():", "setUp(self): # Create a temp file to use for testing", "{\"&\":\"=\"} expect = \"%s=%s\" % (hexescape('&'), hexescape('=')) result = urllib.parse.urlencode(given)", "gets its own location returned and # a headers value", "urllib.parse.quote_plus(' ') self.assertEqual(result, '+', \"using quote_plus(): %r != +\" %", "%s attribute\" % attr) def test_read(self): self.assertEqual(self.text, self.returned_obj.read()) def test_readline(self):", "# Sequence of values given = ((b'\\xa0\\x24', (42, b'\\xc1\\x24')),) expect", "= tempfile.mkstemp() try: self.registerFileForCleanUp(newFilePath) newFile = os.fdopen(newFd, \"wb\") newFile.write(data) newFile.close()", "# bytes, and once for the last byte). report =", "temporary files. Each item in the list is a file", "= urllib.parse.urlencode(given, True, encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect, result) # Utf-8 given", "# Test all above in latin-1 encoding given = ((b'\\xa0\\x24',", "test_missing_comma(self): self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain') def test_invalid_base64_data(self): # missing padding character self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=') class", "test_reporthook_8193_bytes(self): # Test on 8193 byte file. Should call reporthook", "escaped Delimiters : '<>#%\"' Must be escaped Unwise : \"{}|\\^[]`\"", "Create a temporary file. self.registerFileForCleanUp(support.TESTFN) self.text = b'testing urllib.urlretrieve' try:", "%r != %r\" % (quote_by_default, result)) result = urllib.parse.quote_plus(quote_by_default, safe=quote_by_default)", "iterator in the usual implicit way to test for ticket", "US-ASCII hex value>. The Python code of ``'%' + hex(ord(<character>))[2:]``", "result, \"using quote_plus(): %r != %r\" % (expect, result)) #", "\"Reading on the file descriptor returned by fileno() \" \"did", "url = \"http://www.python.org/file.txt\" self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\") self.addCleanup(self.unfakehttp) filename, _ =", "''', mock_close=True) try: self.assertRaises(OSError, urlopen, \"http://python.org/\") finally: self.unfakehttp() def test_invalid_redirect(self):", "url opening codepaths. Plain # urlopen uses FancyURLOpener which goes", "established and once when the block is # read). report", "Python 2's \"urllib\" module\"\"\" import urllib.parse import urllib.request import urllib.error", "result = urllib.parse.unquote_plus(given) self.assertEqual(expect, result, \"using unquote_plus(): %r != %r\"", "in URIs and must be escaped if not being used", "= b'br\\xc3\\xbcckner_sapporo_20050930.doc' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, \"using unquote_to_bytes(): %r", "result)) expect = given.replace('+', ' ') result = urllib.parse.unquote_plus(given) self.assertEqual(expect,", "test_file_notexists(self): fd, tmp_file = tempfile.mkstemp() tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep,", "escaped space : 0x20 Must be escaped Delimiters : '<>#%\"'", "pass expected_path = os.path.join(\"parts\", \"of\", \"a\", \"path\") expected_url = \"parts/of/a/path\"", "result, \"pathname2url() failed; %s != %s\" % (result, expected_url)) result", "fakedata, mock_close=False): fake_http_class = fakehttp(fakedata, mock_close=mock_close) self._connection_class = http.client.HTTPConnection http.client.HTTPConnection", "\"info\", \"geturl\", \"getcode\", \"__iter__\"): self.assertTrue(hasattr(self.text_url_resp, attr), \"object returned by urlopen()", "result)) # Characters in the Latin-1 range, encoded with None", "self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')], \"using sequence of two-item", "self.unfakeftp() def test_userpass_inurl(self): self.fakehttp(b\"HTTP/1.0 200 OK\\r\\n\\r\\nHello!\") try: fp = urlopen(\"http://user:pass@python.org/\")", "with UTF-8 given = 'br%C3%BCckner_sapporo_20050930.doc' expect = 'br\\u00fcckner_sapporo_20050930.doc' result =", "self.assertRaises(TypeError, urllib.parse.quote, given, encoding=\"latin-1\") # quote_from_bytes should work the same", "self.assertTrue(bypass('anotherdomain.com')) self.assertFalse(bypass('newdomain.com')) self.assertFalse(bypass('newdomain.com:1234')) def test_proxy_bypass_environment_newline(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', 'localhost,", "\"{}|\\^[]`\" Must be escaped \"\"\" def test_never_quote(self): # Make sure", "None (default) result = urllib.parse.unquote(given, encoding=None, errors=None) self.assertEqual(expect, result, \"using", "result = urllib.parse.urlencode(given, True, encoding=\"latin-1\") self.assertEqual(expect, result) def test_urlencode_bytes(self): given", "%r != %r\" % (expect, result)) # Errors test for", "the first line from the # comparison. # Use the", "'%A0$=%C1$' result = urllib.parse.urlencode(given, doseq=True, safe=\":$\", encoding=\"latin-1\") given = ((b'\\xa0\\x24',", "= given.replace('+', ' ') result = urllib.parse.unquote_plus(given) self.assertEqual(expect, result, \"using", "def test_getproxies_environment_keep_no_proxies(self): self.env.set('NO_PROXY', 'localhost') proxies = urllib.request.getproxies_environment() # getproxies_environment use", "various utility functions in the urllib.\"\"\" def test_thishost(self): \"\"\"Test the", "FakeSocket(self.fakedata) type(self).fakesock = self.sock if mock_close: # bpo-36918: HTTPConnection destructor", "with xmlcharref error handling given = \"\\u6f22\\u5b57\" expect = \"%26%2328450%3B%26%2323383%3B\"", "though, if desired Control characters : 0x00 - 0x1F, 0x7F", "# name (absolute path or relative to the current working", "got %s\" % (test_type, result.count('&'))) amp_location = result.index('&') on_amp_left =", "% (do_not_quote, result)) result = urllib.parse.quote_plus(do_not_quote) self.assertEqual(do_not_quote, result, \"using quote_plus():", "ftp = urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\", 9093, []) # finally: #", "length file. Should call reporthook only 1 time. report =", "= urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', '*') self.assertTrue(bypass('newdomain.com')) self.assertTrue(bypass('newdomain.com:1234')) self.env.set('NO_PROXY', '*, anotherdomain.com') self.assertTrue(bypass('anotherdomain.com'))", "% (expect, result)) def test_unquoting_with_bytes_input(self): # Bytes not supported yet", "the expected text\") def test_close(self): # Test close() by calling", "connects # ftp = urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\", 9093, []) #", "quote(): %r != %r\" % (expect, result)) result = urllib.parse.unquote_plus(given)", "def test_urlencode_encoding_doseq(self): # ASCII Encoding. Expect %3F with errors=\"replace' given", "%s\" % (test_type, expected, result)) self.assertEqual(result.count('&'), 2, \"testing %s: expected", "\"using unquote(): %r != %r\" % (expect, result)) result =", "srcFileName = self.createNewTempFile(b\"x\" * 5) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 2)", "'%' expect = given result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using", "result, \"using quote_plus(): %r != %r\" % (expect, result)) def", "'\\\\') def test_simple_compare(self): self.assertEqual(url2pathname(\"///C|/foo/bar/spam.foo\"), r'C:\\foo\\bar\\spam.foo') def test_non_ascii_drive_letter(self): self.assertRaises(IOError, url2pathname, \"///\\u00e8|/\")", "using fake http connections\"\"\" def test_short_content_raises_ContentTooShortError(self): self.fakehttp(b'''HTTP/1.1 200 OK Date:", "host, port, dirs, timeout=None, persistent=True): pass def retrfile(self, file, type):", "again # by the tearDown() method for the test self.returned_obj.close()", "ImportError: ssl = None import sys import tempfile from nturl2path", "file_size, _report=report): _report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile() urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN,", "byte file. Should call reporthook only 3 times (once #", "%r != %r\" % (expect, result)) def test_default_quoting(self): # Make", "result, \"using quote_plus(): \" \"%s should be escapes to %s,", "'\\\\') self.assertEqual(url2pathname(\"////C/test/\"), r'\\\\C\\test' '\\\\') def test_simple_compare(self): self.assertEqual(url2pathname(\"///C|/foo/bar/spam.foo\"), r'C:\\foo\\bar\\spam.foo') def test_non_ascii_drive_letter(self):", "os.close(fd) os.unlink(tmp_file) self.assertFalse(os.path.exists(tmp_file)) with self.assertRaises(urllib.error.URLError): urlopen(tmp_fileurl) def test_ftp_nohost(self): test_ftp_url =", "serv.accept() # conn.send(\"1 Hola mundo\\n\") # cantdata = 0 #", "make no guarantee and have possible dictionary input. \"\"\" expect_somewhere", "\"<PASSWORD>\" url = \"http://{}@python.org/\".format(userpass) fakehttp_wrapper = http.client.HTTPConnection authorization = (\"Authorization:", "result) given = ((\"\\u00a0\", (42, \"\\u00c1\")),) expect = '%C2%A0=42&%C2%A0=%C3%81' result", "UnicodeEncodeError: raise unittest.SkipTest(\"filePath is not encodable to utf8\") return \"file://%s\"", "finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\") def test_url_path_with_newline_header_injection_rejected(self): self.fakehttp(b\"HTTP/1.1 200", "!= %r\" % (expect, result)) # Errors test for quote_plus", "@unittest.skipUnless(ssl, \"ssl module required\") def test_url_host_with_newline_header_injection_rejected(self): self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") host", "and then having it be called again # by the", "wrong port def test_proxy_bypass_environment_always_match(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', '*') self.assertTrue(bypass('newdomain.com'))", "= urllib.parse.unquote(given, errors=\"ignore\") self.assertEqual(expect, result, \"using unquote(): %r != %r\"", "= urlopen(\"http://user:pass@python.org/\") self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(), b\"\") self.assertEqual(fp.geturl(), 'http://user:pass@python.org/') self.assertEqual(fp.getcode(), 200)", "pass self._ftpwrapper_class = urllib.request.ftpwrapper urllib.request.ftpwrapper = FakeFtpWrapper def unfakeftp(self): urllib.request.ftpwrapper", "\"did not return the expected text\") def test_close(self): # Test", "= \"<PASSWORD>\" url = \"http://{}@python.org/\".format(userpass) fakehttp_wrapper = http.client.HTTPConnection authorization =", "self.assertEqual(expect, result) def test_urlencode_encoding_doseq(self): # ASCII Encoding. Expect %3F with", "Test on 5 byte file. Should call reporthook only 2", "what it should do quote_by_default = \"<>\" result = urllib.parse.quote(quote_by_default,", "Make sure that the reporthook works. def hooktester(block_count, block_read_size, file_size,", "# url2pathname() respectively given = os.path.join(\"needs\", \"quot=ing\", \"here\") expect =", "len(result), (5 * 3) + 2)) def test_using_mapping(self): # Test", "(expect, result)) expect = given.replace(' ', '+') result = urllib.parse.quote_plus(given)", "authorization header must be in place self.assertIn(authorization, fakehttp_wrapper.buf.decode(\"UTF-8\")) self.assertEqual(fp.readline(), b\"Hello!\")", "be, though, if desired Control characters : 0x00 - 0x1F,", "encoding=None, errors=None) self.assertEqual(expect, result, \"using quote(): %r != %r\" %", "# Characters in BMP, encoded with UTF-8 given = \"%E6%BC%A2%E5%AD%97\"", "even # when exceptional conditions occur. self.tempFiles = [] #", "gamma', '+'), 'alpha+beta+gamma') # Test with bytes self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'), 'alpha%2Bbeta+gamma')", "finally: self.unfakehttp() def test_willclose(self): self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\") try: resp =", "not supported yet with self.assertRaisesRegex(TypeError, 'Expected str, got bytes'): given", "name (absolute path or relative to the current working directory).", "encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect, result) # ASCII Encoding. On a sequence", "except: pass self.assertEqual(self.text, text) def test_reporthook(self): # Make sure that", "url2pathname() respectively given = os.path.join(\"needs\", \"quot=ing\", \"here\") expect = \"needs/%s/here\"", "in Latin-1 range, encoded by with None (default) result =", "count number of iterations since test would fail the #", "self.assertRaises(ValueError): urllib.request.urlopen( \"https://localhost\", cafile=\"/nonexistent/path\", context=context ) class urlopen_DataTests(unittest.TestCase): \"\"\"Test urlopen()", "\"using unquote(): %r != %r\" % (expect, result)) expect =", "3) self.assertEqual(report[0][2], 8193) self.assertEqual(report[0][1], 8192) self.assertEqual(report[1][1], 8192) self.assertEqual(report[2][1], 8192) class", "on 8193 byte file. Should call reporthook only 3 times", "urllib.parse.urlencode(given, True) self.assertEqual(expect, result) def test_urlencode_encoding_safe_parameter(self): # Send '$' (\\x24)", "%r != %r\" % (expect, result)) def test_unquoting_parts(self): # Make", "opener = FancyURLopener() _urlopener = opener else: opener = _urlopener", "'%xab' expect = given result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using", "self.assertTrue(bypass('foo.d.o.t')) # issue 29142 self.assertTrue(bypass('d.o.t')) self.assertTrue(bypass('anotherdomain.com:8888')) self.assertTrue(bypass('.anotherdomain.com:8888')) self.assertTrue(bypass('www.newdomain.com:1234')) self.assertFalse(bypass('prelocalhost')) self.assertFalse(bypass('newdomain.com'))", "is default value for 'safe' parameter self.assertEqual(urllib.parse.quote.__defaults__[0], '/') def test_safe(self):", "Characters in Latin-1 range, encoded by with None (default) result", "urllib.request.URLopener().retrieve, url) self.assertRaises(OSError, DummyURLopener().open, url) self.assertRaises(OSError, DummyURLopener().retrieve, url) # Just", "self.assertTrue(e.exception.reason) def test_ftp_nonexisting(self): with self.assertRaises(urllib.error.URLError) as e: urlopen('ftp://localhost/a/file/which/doesnot/exists.py') self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason)", "(do_not_quote, result)) def test_default_safe(self): # Test '/' is default value", "b\"Hello!\") self.assertEqual(fp.readline(), b\"\") # the spaces are quoted in URL", "urlopen(f\"https:{schemeless_url}\") finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\") def test_url_host_with_newline_header_injection_rejected(self): self.fakehttp(b\"HTTP/1.1", "self.assertIn(expected, result, \"testing %s: %s not found in %s\" %", "urllib.request.url2pathname(url) self.assertEqual(expect, result, 'urllib.request..url2pathname() failed; %s != %s' % (expect,", "OSError for many error codes. self.fakehttp(b'''HTTP/1.1 302 Found Date: Wed,", "self.assertIsInstance(file_num, int, \"fileno() did not return an int\") self.assertEqual(os.read(file_num, len(self.text)),", "as to cut down on reliance on connecting to the", "self.assertEqual(expected, result, \"using quote(): %r != %r\" % (expected, result))", "encoding=\"latin-1\", errors=\"xmlcharrefreplace\") self.assertEqual(expect, result, \"using quote(): %r != %r\" %", "= urllib.parse.urlencode(given, True) self.assertEqual(expect, result) # latin-1 given = ((\"\\u00a0\",", "def test_reporthook_8193_bytes(self): # Test on 8193 byte file. Should call", "as RFC 2396 specifies\"\"\" hex_repr = hex(ord(char))[2:].upper() if len(hex_repr) ==", "CVE-2019-9948: urllib must reject local_file:// scheme class DummyURLopener(urllib.request.URLopener): def open_local_file(self,", "\"file://%s\" % urllib.request.pathname2url(filePath) def createNewTempFile(self, data=b\"\"): \"\"\"Creates a new temporary", "constructLocalFileUrl(self, filePath): filePath = os.path.abspath(filePath) try: filePath.encode(\"utf-8\") except UnicodeEncodeError: raise", "character result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using unquote(): %r !=", "lowercase preference with replacement os.environ['http_proxy'] = 'http://somewhere:3128' os.environ['Http_Proxy'] = 'http://somewhereelse:3128'", "result)) def test_unquoting_with_bytes_input(self): # Bytes not supported yet with self.assertRaisesRegex(TypeError,", "was not \" \"made\") FILE = open(second_temp, 'rb') try: text", "= ((b'\\xa0\\x24', b'\\xc1\\x24'),) result = urllib.parse.urlencode(given, safe=\":$\", encoding=\"latin-1\") expect =", "def testTimeoutNone(self): # # global default timeout is ignored #", "threading # ftplib.FTP.port = 9093 # self.evt = threading.Event() #", "self.assertEqual(\"a=a&a=b\", urllib.parse.urlencode({\"a\": data}, True)) def test_urlencode_encoding(self): # ASCII encoding. Expect", "e f\" expect = given.replace(' ', hexescape(' ')) result =", "\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\\n\" \"QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 \" \"vHgAAAABJRU5ErkJggg%3D%3D%0A%20\") self.text_url_resp = urllib.request.urlopen(self.text_url) self.text_url_base64_resp = urllib.request.urlopen(", "with self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve(support.TEST_HTTP_URL) finally: self.unfakehttp() class QuotingTests(unittest.TestCase): r\"\"\"Tests for", "% self.__class__.__name__, \"ascii\") f = open(support.TESTFN, 'wb') try: f.write(self.text) finally:", "non-Latin-1-representable characters) result = urllib.parse.unquote(\"\\u6f22%FC\", encoding=\"latin-1\") expect = '\\u6f22\\u00fc' self.assertEqual(expect,", "encoded with UTF-8 given = \"%E6%BC%A2%E5%AD%97\" expect = \"\\u6f22\\u5b57\" #", "self.assertFalse(bypass('newdomain.com:1234')) def test_proxy_bypass_environment_newline(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234')", "count_holder[0] + 1 second_temp = \"%s.2\" % support.TESTFN self.registerFileForCleanUp(second_temp) urllib.request.urlretrieve(", "msg = \"Redirection to url 'file:\" with self.assertRaisesRegex(urllib.error.HTTPError, msg): urlopen(\"http://python.org/\")", "deleted, but it # does nothing about trying to close", "parameter self.assertEqual(urllib.parse.quote.__defaults__[0], '/') def test_safe(self): # Test setting 'safe' parameter", "urllib.parse.urlencode({\"a\": 1})) self.assertEqual(\"a=None\", urllib.parse.urlencode({\"a\": None})) def test_nonstring_seq_values(self): self.assertEqual(\"a=1&a=2\", urllib.parse.urlencode({\"a\": [1,", "\"readline\", \"readlines\", \"close\", \"info\", \"geturl\", \"getcode\", \"__iter__\"): self.assertTrue(hasattr(self.text_url_resp, attr), \"object", "conditions occur. self.tempFiles = [] # Create a temporary file.", "', hexescape(' ')) result = urllib.parse.quote(given) self.assertEqual(expect, result, \"using quote():", "'' os.environ['No_Proxy'] = 'localhost' self.assertFalse(urllib.request.proxy_bypass_environment('localhost')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) os.environ['http_proxy'] = '' os.environ['HTTP_PROXY']", "second_temp = \"%s.2\" % support.TESTFN self.registerFileForCleanUp(second_temp) result = urllib.request.urlretrieve(self.constructLocalFileUrl( support.TESTFN),", "urllib.parse.unquote_plus(given) self.assertEqual(expect, result, \"using unquote_plus(): %r != %r\" % (expect,", "data]) -> open file-like object\"\"\" global _urlopener if proxies is", "= ('/C:/', '///C:/', '/C|//') expect = 'C:\\\\' for url in", "self.assertEqual(expect, result) def test_urlencode_bytes(self): given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) expect =", "preference with removal os.environ['no_proxy'] = '' os.environ['No_Proxy'] = 'localhost' self.assertFalse(urllib.request.proxy_bypass_environment('localhost'))", "dict as input type\") def test_using_sequence(self): # Test passing in", "url in ('local_file://example', 'local-file://example'): self.assertRaises(OSError, urllib.request.urlopen, url) self.assertRaises(OSError, urllib.request.URLopener().open, url)", "proxies is not None: opener = urllib.request.FancyURLopener(proxies=proxies) elif not _urlopener:", "of invoking requests is deprecated.', DeprecationWarning)): self.assertEqual(DummyURLopener().open( 'spam://example/ /'),'//example/%20/') #", "-= 1 if self.io_refs == 0: io.BytesIO.close(self) class FakeHTTPConnection(http.client.HTTPConnection): #", "count_holder=[0]): self.assertIsInstance(block_count, int) self.assertIsInstance(block_read_size, int) self.assertIsInstance(file_size, int) self.assertEqual(block_count, count_holder[0]) count_holder[0]", "Characters in BMP, encoded with UTF-8 given = \"%E6%BC%A2%E5%AD%97\" expect", "the specified methods for attr in (\"read\", \"readline\", \"readlines\", \"fileno\",", "fake_http_class = fakehttp(fakedata, mock_close=mock_close) self._connection_class = http.client.HTTPConnection http.client.HTTPConnection = fake_http_class", "vars self.env = support.EnvironmentVarGuard() # Delete all proxy related env", "2 '&'s, got %s\" % result.count('&')) def test_empty_sequence(self): self.assertEqual(\"\", urllib.parse.urlencode({}))", "1: hex_repr = \"0%s\" % hex_repr return \"%\" + hex_repr", "try: self.registerFileForCleanUp(newFilePath) newFile = os.fdopen(newFd, \"wb\") newFile.write(data) newFile.close() finally: try:", "self.tempFiles.append(fileName) def test_basic(self): # Make sure that a local file", "self.assertEqual(second_temp, result[0]) self.assertTrue(os.path.exists(second_temp), \"copy of the file was not \"", "urllib.parse.quote, given, encoding=\"latin-1\") # quote_from_bytes should work the same result", "urllib.parse.urlencode(given, encoding=\"latin-1\") self.assertEqual(expect, result) def test_urlencode_encoding_doseq(self): # ASCII Encoding. Expect", "= \"%E6%BC%A2%E5%AD%97\" result = urllib.parse.quote(given) self.assertEqual(expect, result, \"using quote(): %r", "= 1 def sendall(self, data): FakeHTTPConnection.buf = data def makefile(self,", "% (expect, result)) given = '%' expect = given result", "Unreserved and do not need to be escaped; can be,", "'2', '3']} expect = \"sequence=%s\" % urllib.parse.quote_plus(str(['1', '2', '3'])) result", "[1, 2]}, True)) self.assertEqual(\"a=None&a=a\", urllib.parse.urlencode({\"a\": [None, \"a\"]}, True)) data =", "with Latin-1 given = \"\\xa2\\xd8ab\\xff\" expect = \"%A2%D8ab%FF\" result =", "containing URL special- and unicode-characters self.text = \"test data URLs", "file_size)) srcFileName = self.createNewTempFile(b\"x\" * 5) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report),", "\"%F3%B1\" expect = \"\\ufffd\" # Replacement character result = urllib.parse.unquote(given)", "encoded by default in UTF-8 given = \"\\xa2\\xd8ab\\xff\" expect =", "Control characters : 0x00 - 0x1F, 0x7F Have no use", "result = urllib.parse.quote(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, \"using quote(): %r !=", "\"object returned by urlopen() lacks %s attribute\" % attr) def", "expected, result)) self.assertEqual(result.count('&'), 2, \"testing %s: expected 2 '&'s; got", "'*, anotherdomain.com') self.assertTrue(bypass('anotherdomain.com')) self.assertFalse(bypass('newdomain.com')) self.assertFalse(bypass('newdomain.com:1234')) def test_proxy_bypass_environment_newline(self): bypass = urllib.request.proxy_bypass_environment", "ignoring errors given = \"%F3%B1\" expect = \"\" result =", "cannot end a raw string in \\ self.assertEqual(url2pathname(\"///C/test/\"), r'\\\\\\C\\test' '\\\\')", "ftplib.FTP.port = 9093 # self.evt = threading.Event() # threading.Thread(target=server, args=(self.evt,)).start()", "!= %r\" % (expect, result)) # Characters in BMP, encoded", "MixedCase self.assertTrue(bypass('LOCALHOST')) # UPPERCASE self.assertTrue(bypass('.localhost')) self.assertTrue(bypass('newdomain.com:1234')) self.assertTrue(bypass('.newdomain.com:1234')) self.assertTrue(bypass('foo.d.o.t')) # issue", "test_info(self): self.assertIsInstance(self.text_url_resp.info(), email.message.Message) self.assertEqual(self.text_url_base64_resp.info().get_params(), [('text/plain', ''), ('charset', 'ISO-8859-1')]) self.assertEqual(self.image_url_resp.info()['content-length'], str(len(self.image)))", "test_proxy_cgi_ignore(self): try: self.env.set('HTTP_PROXY', 'http://somewhere:3128') proxies = urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128', proxies['http']) self.env.set('REQUEST_METHOD',", "failed; %s != %s\" % (result, expected_url)) result = urllib.request.url2pathname(expected_url)", "21, []) urlopen('ftp://localhost') finally: self.unfakeftp() def test_userpass_inurl(self): self.fakehttp(b\"HTTP/1.0 200 OK\\r\\n\\r\\nHello!\")", "place self.assertIn(authorization, fakehttp_wrapper.buf.decode(\"UTF-8\")) self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(), b\"\") # the spaces", "finally: self.unfakehttp() def test_URLopener_deprecation(self): with support.check_warnings(('',DeprecationWarning)): urllib.request.URLopener() @unittest.skipUnless(ssl, \"ssl module", "urllib.request.urlopen(self.image_url) def test_interface(self): # Make sure object returned by urlopen()", "given = (('\\u00a0', '\\u00c1'),) expect = '%A0=%C1' result = urllib.parse.urlencode(given,", "patch from test import support import os try: import ssl", "object\"\"\" global _urlopener if proxies is not None: opener =", "to %s, not %s\" % (char, hexescape(char), result)) result =", "(expect, result)) def test_unquoting_parts(self): # Make sure unquoting works when", "\"\\xa2\\xd8ab\\xff\" expect = \"%C2%A2%C3%98ab%C3%BF\" result = urllib.parse.quote(given) self.assertEqual(expect, result, \"using", "\"readlines() returned improper text\") def test_fileno(self): file_num = self.returned_obj.fileno() self.assertIsInstance(file_num,", "self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) def test_ftp_nonexisting(self): with self.assertRaises(urllib.error.URLError) as e: urlopen('ftp://localhost/a/file/which/doesnot/exists.py') self.assertFalse(e.exception.filename)", "# def tearDown(self): # self.evt.wait() # # def testBasic(self): #", "return the expected text\") def test_close(self): # Test close() by", "file://guidocomputer.athome.com:/python/license Connection: close ''', mock_close=True) try: self.assertRaises(urllib.error.HTTPError, urlopen, \"http://something\") finally:", "quoted are by default sans # space (separate test for", "fragments in the original URL. url = 'http://docs.python.org/library/urllib.html#OK' self.fakehttp(b\"HTTP/1.1 200", "fileno() \" \"did not return the expected text\") def test_close(self):", "[('text/plain', ''), ('charset', 'ISO-8859-1')]) self.assertEqual(self.image_url_resp.info()['content-length'], str(len(self.image))) self.assertEqual(urllib.request.urlopen(\"data:,\").info().get_params(), [('text/plain', ''), ('charset',", "FILE.write(self.text) FILE.close() finally: try: FILE.close() except: pass def tearDown(self): #", "try: self.env.set('HTTP_PROXY', 'http://somewhere:3128') proxies = urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128', proxies['http']) self.env.set('REQUEST_METHOD', 'GET')", "non-ASCII bytes # (Technically an invalid URI; expect those bytes", "to cut down on reliance on connecting to the Net", "unquote_to_bytes(): %r != %r\" % (expect, result)) given = '%'", "class Pathname_Tests(unittest.TestCase): \"\"\"Test pathname2url() and url2pathname()\"\"\" def test_basic(self): # Make", "urllib.request.getproxies_environment() self.assertNotIn('http', proxies) finally: self.env.unset('REQUEST_METHOD') self.env.unset('HTTP_PROXY') def test_proxy_bypass_environment_host_match(self): bypass =", "Test with a bytes as input given = b'%A2%D8ab%FF' expect", "True) self.assertEqual(expect, result) given = ((\"\\u00a0\", (42, \"\\u00c1\")),) expect =", "\"_,.-\" do_not_quote = '' .join([\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\", \"abcdefghijklmnopqrstuvwxyz\", \"0123456789\", \"_.-~\"]) result =", "specific to the urllib.url2path function.') def test_ntpath(self): given = ('/C:/',", "result)) # Safe expressed as bytes rather than str result", "None (default) result = urllib.parse.quote(given, encoding=None, errors=None) self.assertEqual(expect, result, \"using", "!= +\" % result) given = \"a b cd e", "= \"%26%2328450%3B%26%2323383%3B\" # \"&#28450;&#23383;\" result = urllib.parse.quote(given, encoding=\"latin-1\", errors=\"xmlcharrefreplace\") self.assertEqual(expect,", "# List of no_proxies with space. self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234')", "f\"//localhost:7777/test{char}/\" self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") try: # We explicitly test urllib.request.urlopen()", "self.unfakehttp() def test_url_fragment(self): # Issue #11703: geturl() omits fragments in", "Utf-8 given = ((\"\\u00a0\", \"\\u00c1\"),) expect = '%C2%A0=%C3%81' result =", "finally: self.unfakehttp() def test_invalid_redirect(self): # urlopen() should raise OSError for", "text/html; charset=iso-8859-1 ''', mock_close=True) try: msg = \"Redirection to url", "attr in (\"read\", \"readline\", \"readlines\", \"close\", \"info\", \"geturl\", \"getcode\", \"__iter__\"):", "+ [0x7f]: char = chr(char_no) schemeless_url = f\"//localhost:7777/test{char}/\" self.fakehttp(b\"HTTP/1.1 200", "test suite. They use different url opening codepaths. Plain #", "codes. self.fakehttp(b'''HTTP/1.1 401 Authentication Required Date: Wed, 02 Jan 2008", "path or relative to the current working directory). # All", "them out. # Can't really tell why keep failing in", "not\" \" return an empty string\") def test_readlines(self): lines_list =", "03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close", "#11703: geturl() omits fragments in the original URL. url =", "%r\" % (expect, result)) # A mix of non-ASCII and", "of two-item sequences as an argument. self.help_inputtype([('1st', '1'), ('2nd', '2'),", "urllib.request.URLopener().open, url) self.assertRaises(OSError, urllib.request.URLopener().retrieve, url) self.assertRaises(OSError, DummyURLopener().open, url) self.assertRaises(OSError, DummyURLopener().retrieve,", "result)) expect = given.replace(' ', '+') result = urllib.parse.quote_plus(given) self.assertEqual(expect,", "self.assertIsNone(socket.getdefaulttimeout()) # socket.setdefaulttimeout(30) # try: # ftp = urllib.ftpwrapper(\"myuser\", \"mypass\",", "# Test with bytes self.assertEqual(urllib.parse.quote_plus(b'alpha+beta gamma'), 'alpha%2Bbeta+gamma') # Test with", "port, dirs, timeout=None, persistent=True): pass def retrfile(self, file, type): return", "not \" \"made\") FILE = open(second_temp, 'rb') try: text =", "1), (\"b\", 1)]) self.assertEqual(\"a=a&a=b\", urllib.parse.urlencode({\"a\": data}, True)) def test_urlencode_encoding(self): #", "\"localhost\\r\\nX-injected: header\\r\\n\" schemeless_url = \"//\" + host + \":8080/test/?test=a\" try:", "# # def testBasic(self): # # connects # ftp =", "expect = 'br\\u00fcckner_sapporo_20050930.doc' self.assertEqual(expect, result, \"using unquote(): %r != %r\"", "Replacement character result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using unquote(): %r", "to url 'file:\" with self.assertRaisesRegex(urllib.error.HTTPError, msg): urlopen(\"http://python.org/\") finally: self.unfakehttp() def", "def test_quoting(self): # Test automatic quoting and unquoting works for", "# Create a temp file to use for testing self.text", "in latin-1 encoding given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) result = urllib.parse.urlencode(given,", "Should call reporthook only 2 times (once when # the", "'rb') try: text = FILE.read() FILE.close() finally: try: FILE.close() except:", "urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 3) self.assertEqual(report[0][2], 8193) self.assertEqual(report[0][1], 8192) self.assertEqual(report[1][1],", "to test as much functionality as possible so as to", "the file for deletion during the test fixture tear down,", "line from the # comparison. # Use the iterator in", "call reporthook only 2 times (once when # the \"network", "_path_ safe. escaped_char_repr = repr(char).replace('\\\\', r'\\\\') InvalidURL = http.client.InvalidURL with", "value is returned. result = urllib.request.urlretrieve(\"file:%s\" % support.TESTFN) self.assertEqual(result[0], support.TESTFN)", "the list is a file # name (absolute path or", "deleted in the tearDown method. Note, # this only helps", "\"using quote(): %r != %r\" % (quote_by_default, result)) result =", "result)) # Same as above, but using a bytes rather", "urllib.url2path function.') def test_ntpath(self): given = ('/C:/', '///C:/', '/C|//') expect", "self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_URLopener_deprecation(self): with support.check_warnings(('',DeprecationWarning)): urllib.request.URLopener() @unittest.skipUnless(ssl,", "test_converting_drive_letter(self): self.assertEqual(url2pathname(\"///C|\"), 'C:') self.assertEqual(url2pathname(\"///C:\"), 'C:') self.assertEqual(url2pathname(\"///C|/\"), 'C:\\\\') def test_converting_when_no_drive_letter(self): #", "self.env.unset('HTTP_PROXY') def test_proxy_bypass_environment_host_match(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234,", "zero length file. Should call reporthook only 1 time. report", "os.remove(each) except: pass def constructLocalFileUrl(self, filePath): filePath = os.path.abspath(filePath) try:", "given = (('\\u00a0', '\\u00c1'),) expect = '%3F=%3F' result = urllib.parse.urlencode(given,", "result = urllib.parse.urlencode(given, True, safe=\":$\") self.assertEqual(expect, result) # Test all", "method of URLopener class.\"\"\" def test_quoted_open(self): class DummyURLopener(urllib.request.URLopener): def open_spam(self,", "I/O operation on closed file\" which is logged as an", "urllib.parse.quote(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, \"using quote(): %r != %r\" %", "and percent-encoded characters, UTF-8 result = urllib.parse.unquote(\"\\u6f22%C3%BC\") expect = '\\u6f22\\u00fc'", "an argument. self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')], \"using sequence", "# Decode with UTF-8, invalid sequence, ignoring errors given =", "\" return an empty string\") def test_readlines(self): lines_list = self.returned_obj.readlines()", "tempfile from nturl2path import url2pathname, pathname2url from base64 import b64encode", "result = urllib.parse.quote_plus(given) self.assertEqual(expect, result, \"using quote_plus(): %r != %r\"", "self.env.unset(k) def tearDown(self): # Restore all proxy related env vars", "f\"contain control.*{escaped_char_repr}\"): urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL, f\"contain control.*{escaped_char_repr}\"): urlopen(f\"https:{schemeless_url}\") finally: self.unfakehttp()", "trying to close files that may still be open. It", "par2, par3): pass with self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve(support.TEST_HTTP_URL, reporthook=_reporthook) finally: self.unfakehttp()", "newFile = os.fdopen(newFd, \"wb\") newFile.write(data) newFile.close() finally: try: newFile.close() except:", "in URIs so must be escaped space : 0x20 Must", "store data for verification in urlopen tests. buf = None", "f\"//localhost{char}/test/\" self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") try: escaped_char_repr = repr(char).replace('\\\\', r'\\\\') InvalidURL", "%r != %r\" % (do_not_quote, result)) def test_default_safe(self): # Test", "self.returned_obj.read()) def test_readline(self): self.assertEqual(self.text, self.returned_obj.readline()) self.assertEqual(b'', self.returned_obj.readline(), \"calling readline() after", "import unittest from unittest.mock import patch from test import support", "def fakeftp(self): class FakeFtpWrapper(object): def __init__(self, user, passwd, host, port,", "http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, f\"contain control.*{escaped_char_repr}\"): urllib.request.urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex( InvalidURL,", "= urllib.parse.urlencode(given, encoding=\"latin-1\") self.assertEqual(expect, result) def test_urlencode_encoding_doseq(self): # ASCII Encoding.", "def urlopen(url, data=None, proxies=None): \"\"\"urlopen(url [, data]) -> open file-like", "specified in # their unique way result = urllib.parse.quote(' ')", "self.check_read(b\"1.0\") def test_read_1_1(self): self.check_read(b\"1.1\") def test_read_bogus(self): # urlopen() should raise", "Characters in Latin-1 range, encoded with Latin-1 given = \"\\xa2\\xd8ab\\xff\"", "list(range(0, 0x21)) + [0x7f]: char = chr(char_no) schemeless_url = f\"//localhost{char}/test/\"", "Test lowercase preference with replacement os.environ['http_proxy'] = 'http://somewhere:3128' os.environ['Http_Proxy'] =", "2, #5 chars per thing and amps \"testing %s: \"", "%r\" % (do_not_quote, result)) def test_default_safe(self): # Test '/' is", "is deprecated.', DeprecationWarning)): return urllib.request.FancyURLopener() def fakehttp(fakedata, mock_close=False): class FakeSocket(io.BytesIO):", "unique way result = urllib.parse.quote(' ') self.assertEqual(result, hexescape(' '), \"using", "!= %r\" % (do_not_quote, result)) def test_default_safe(self): # Test '/'", "sure independent requests each use their # own retry limit.", "% support.TESTFN self.registerFileForCleanUp(second_temp) result = urllib.request.urlretrieve(self.constructLocalFileUrl( support.TESTFN), second_temp) self.assertEqual(second_temp, result[0])", "self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(), \"testing %s: '&' not located in proper", "# def setUp(self): # import ftplib, time, threading # ftplib.FTP.port", "Latin-1 range, encoded with None (default) result = urllib.parse.unquote(given, encoding=None,", "with self.assertRaisesRegex( InvalidURL, r\"contain control.*\\\\r\"): urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL, r\"contain control.*\\\\n\"):", "% (expect, result)) # Characters in Latin-1 range, encoded with", "!= %r\" % (expect, result)) # Characters in Latin-1 range,", "= 'localhost, noproxy.com, my.proxy:1234' os.environ['No_Proxy'] = 'xyz.com' self.assertTrue(urllib.request.proxy_bypass_environment('localhost')) self.assertTrue(urllib.request.proxy_bypass_environment('noproxy.com:5678')) self.assertTrue(urllib.request.proxy_bypass_environment('my.proxy:1234'))", "encodable to utf8\") return \"file://%s\" % urllib.request.pathname2url(filePath) def createNewTempFile(self, data=b\"\"):", "characters) result = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=\"\\xfc\") expect = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\",", ": \"{}|\\^[]`\" Must be escaped \"\"\" def test_never_quote(self): # Make", "= \"%s=%s\" % (hexescape('&'), hexescape('=')) result = urllib.parse.urlencode(given) self.assertEqual(expect, result)", "= urllib.request.url2pathname(result) self.assertEqual(expect, result, \"url2pathname() failed; %s != %s\" %", "not return the expected text\") def test_close(self): # Test close()", "% (test_type, len(result), (5 * 3) + 2)) def test_using_mapping(self):", "%s, not %s\" % (char, hexescape(char), result)) del should_quote partial_quote", "srcFileName = self.createNewTempFile(b\"x\" * 8193) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 3)", "in\". Override close() to silence this error. def close(self): pass", "properly. Case does not matter on the hex letters. The", "FancyURLopener() _urlopener = opener else: opener = _urlopener if data", "\"url2pathname() failed; %s != %s\" % (expect, result)) @unittest.skipUnless(sys.platform ==", "in given[\"sequence\"]: expect = \"sequence=%s\" % value self.assertIn(expect, result) self.assertEqual(result.count('&'),", "(test_type, result.count('&'))) amp_location = result.index('&') on_amp_left = result[amp_location - 1]", "URIs so must be escaped space : 0x20 Must be", "%r\" % (expect, result)) # Characters in the Latin-1 range,", "spaces as specified in # their unique way result =", "= urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=\"\") self.assertEqual(expect, result, \"using quote(): %r !=", "'HEAD' self.assertEqual(request.get_method(), 'HEAD') class URL2PathNameTests(unittest.TestCase): def test_converting_drive_letter(self): self.assertEqual(url2pathname(\"///C|\"), 'C:') self.assertEqual(url2pathname(\"///C:\"),", "= '' .join([\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\", \"abcdefghijklmnopqrstuvwxyz\", \"0123456789\", \"_.-~\"]) result = urllib.parse.quote(do_not_quote) self.assertEqual(do_not_quote,", "class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin, FakeFTPMixin): \"\"\"Test urlopen() opening a fake http", "= _urlopener if data is None: return opener.open(url) else: return", "file\" which is logged as an # \"Exception ignored in\".", "make sure independent requests each use their # own retry", "function returns a tuple\"\"\" self.assertIsInstance(urllib.request.thishost(), tuple) class URLopener_Tests(FakeHTTPMixin, unittest.TestCase): \"\"\"Testcase", "self.text_url_resp = urllib.request.urlopen(self.text_url) self.text_url_base64_resp = urllib.request.urlopen( self.text_url_base64) self.image_url_resp = urllib.request.urlopen(self.image_url)", "= tempfile.mkstemp() tmp_fileurl = 'file://localhost/' + tmp_file.replace(os.path.sep, '/') try: self.assertTrue(os.path.exists(tmp_file))", "URL special- and unicode-characters self.text = \"test data URLs :;,%=&", "given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) result = urllib.parse.urlencode(given, doseq=True, safe=\":$\") expect", "result, \"using unquote_to_bytes(): %r != %r\" % (expect, result)) given", "Errors test for quote_plus given = \"ab\\u6f22\\u5b57 cd\" expect =", "test_quoting_plus(self): self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'), 'alpha%2Bbeta+gamma') self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'), 'alpha+beta+gamma') # Test", "urllib.request.pathname2url(given) self.assertEqual(expect, result, \"pathname2url() failed; %s != %s\" % (expect,", "PathName2URLTests(unittest.TestCase): def test_converting_drive_letter(self): self.assertEqual(pathname2url(\"C:\"), '///C:') self.assertEqual(pathname2url(\"C:\\\\\"), '///C:') def test_converting_when_no_drive_letter(self): self.assertEqual(pathname2url(r\"\\\\\\folder\\test\"", "expected_path = os.path.join(\"parts\", \"of\", \"a\", \"path\") expected_url = \"parts/of/a/path\" result", "independent requests each use their # own retry limit. for", "'%C2%A0=%C3%81' result = urllib.parse.urlencode(given) self.assertEqual(expect, result) # Latin-1 encoding. given", "def test_default_quoting(self): # Make sure all characters that should be", "# space (separate test for that). should_quote = [chr(num) for", "= '%A0$=%C1$&%A0$=13&%A0$=42' result = urllib.parse.urlencode(given, True, safe=\":$\") self.assertEqual(expect, result) #", "Just commented them out. # Can't really tell why keep", "(\"Authorization: Basic %s\\r\\n\" % b64encode(userpass.encode(\"ASCII\")).decode(\"ASCII\")) fp = urlopen(url) # The", "the underlying socket does not send any # data. (#1680230)", "headers value is returned. result = urllib.request.urlretrieve(\"file:%s\" % support.TESTFN) self.assertEqual(result[0],", "Characters in BMP, encoded with Latin-1, with replace error handling", "# Test that setting the filename argument works. second_temp =", "expect = \"%26%2328450%3B%26%2323383%3B\" # \"&#28450;&#23383;\" result = urllib.parse.quote(given, encoding=\"latin-1\", errors=\"xmlcharrefreplace\")", "!= %r\" % (expect, result)) given = '%x' expect =", "to test conditions, where variable order _is_ significant self._saved_env =", "bunch of pluses\"} expect = \"key+name=A+bunch+of+pluses\" result = urllib.parse.urlencode(given) self.assertEqual(expect,", "b'bl\\xc3\\xa5b\\xc3\\xa6rsyltet\\xc3\\xb8y' urllib.parse.unquote(given) class urlencode_Tests(unittest.TestCase): \"\"\"Tests for urlencode()\"\"\" def help_inputtype(self, given,", "to properly close files even # when exceptional conditions occur.", "%r\" % (expect, result)) # Characters in Latin-1 range, encoded", "self.assertTrue(bypass('localhost')) self.assertTrue(bypass('LocalHost')) # MixedCase self.assertTrue(bypass('LOCALHOST')) # UPPERCASE self.assertTrue(bypass('.localhost')) self.assertTrue(bypass('newdomain.com:1234')) self.assertTrue(bypass('.newdomain.com:1234'))", "related env vars for k in list(os.environ): if 'proxy' in", "char as RFC 2396 specifies\"\"\" hex_repr = hex(ord(char))[2:].upper() if len(hex_repr)", ": letters, digits, and \"-_.!~*'()\" Unreserved and do not need", "percent-encoded characters, UTF-8 result = urllib.parse.unquote(\"\\u6f22%C3%BC\") expect = '\\u6f22\\u00fc' self.assertEqual(expect,", "b\" 200 OK\\r\\n\\r\\nHello!\") try: fp = urlopen(\"http://python.org/\") self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(),", "having it be called again # by the tearDown() method", "test_reporthook_5_bytes(self): # Test on 5 byte file. Should call reporthook", "urllib.parse.quote(quote_by_default, safe=b\"<>\") self.assertEqual(quote_by_default, result, \"using quote(): %r != %r\" %", "\\u00f6 \\u00c4 \" # 2x1 pixel RGB PNG image with", "urllib.request.urlopen(f\"https:{schemeless_url}\") # This code path quotes the URL so there", "def test_empty_socket(self): # urlopen() raises OSError if the underlying socket", "'br\\u00fcckner_sapporo_20050930.doc' result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using unquote(): %r !=", "Content-Type: text/html; charset=iso-8859-1 FF ''') with self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve(support.TEST_HTTP_URL) finally:", "9093)) # serv.listen() # try: # conn, addr = serv.accept()", "errors=None) self.assertEqual(expect, result, \"using unquote(): %r != %r\" % (expect,", "result)) def test_default_quoting(self): # Make sure all characters that should", "on quoting and such. \"\"\" def test_unquoting(self): # Make sure", "location returned and # a headers value is returned. result", "''.join(escape_list) del escape_list result = urllib.parse.unquote(escape_string) self.assertEqual(result.count('%'), 1, \"using unquote():", "non-ASCII characters should have no effect # (Since URIs are", "by default sans # space (separate test for that). should_quote", "!= %r\" % (expect, result)) # Test with a bytes", "result)) def test_unquoting_parts(self): # Make sure unquoting works when have", "URIs and must be escaped if not being used for", "import patch from test import support import os try: import", "expect = \"\\u6f22\\u5b57\" # \"Kanji\" result = urllib.parse.unquote(given) self.assertEqual(expect, result,", "self.assertTrue(hasattr(self.text_url_resp, attr), \"object returned by urlopen() lacks %s attribute\" %", "%r\" % (expect, result)) # Errors test for quote_plus given", "unquote_to_bytes(): %r != %r\" % (expect, result)) def test_unquote_with_unicode(self): #", "self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname) class ProxyTests(unittest.TestCase): def setUp(self): # Records changes", "# finally: # serv.close() # evt.set() # # class FTPWrapperTests(unittest.TestCase):", "def open_local_file(self, url): return url for url in ('local_file://example', 'local-file://example'):", "'/') try: self.assertTrue(os.path.exists(tmp_file)) with urlopen(tmp_fileurl) as fobj: self.assertTrue(fobj) finally: os.close(fd)", "urllib.parse.unquote_to_bytes, ()) def test_unquoting_mixed_case(self): # Test unquoting on mixed-case hex", "expect = 'C:\\\\' for url in given: result = urllib.request.url2pathname(url)", "!= %r\" % (quote_by_default, result)) # \"Safe\" non-ASCII characters should", "urllib.request.urlopen, url) self.assertRaises(OSError, urllib.request.URLopener().open, url) self.assertRaises(OSError, urllib.request.URLopener().retrieve, url) self.assertRaises(OSError, DummyURLopener().open,", "self.assertRaises(urllib.error.URLError): urlopen(tmp_fileurl) def test_ftp_nohost(self): test_ftp_url = 'ftp:///path' with self.assertRaises(urllib.error.URLError) as", "time, threading # ftplib.FTP.port = 9093 # self.evt = threading.Event()", "*args, **kwds): self.io_refs += 1 return self def read(self, amt=None):", "urllib.parse.quote(\"quot=ing\") result = urllib.request.pathname2url(given) self.assertEqual(expect, result, \"pathname2url() failed; %s !=", "with support.check_warnings(('', BytesWarning), quiet=True): self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'') def test_unquoting_badpercent(self):", "expect those bytes to be preserved) given = b'%A2\\xd8ab%FF' expect", "urllib.request.urlopen() instead of the top # level 'def urlopen()' function", "@unittest.skipUnless(ssl, \"ssl module required\") def test_url_host_with_control_char_rejected(self): for char_no in list(range(0,", "# def testBasic(self): # # connects # ftp = urllib.ftpwrapper(\"myuser\",", "patch os.environ, start with empty fake environment os.environ = collections.OrderedDict()", "where variable order _is_ significant self._saved_env = os.environ # Monkey", "%s\" % (test_type, len(result), (5 * 3) + 2)) def", "Make sure unquoting works when have non-quoted characters # interspersed", "result)) self.assertEqual(result.count('&'), 2, \"testing %s: expected 2 '&'s; got %s\"", "in range(FancyURLopener().maxtries): self.fakehttp(b'''HTTP/1.1 302 Found Location: file://guidocomputer.athome.com:/python/license Connection: close ''',", "self.assertNotIn(' ', resp.geturl()) self.assertNotIn('\\r', resp.geturl()) self.assertNotIn('\\n', resp.geturl()) finally: self.unfakehttp() @unittest.skipUnless(ssl,", "in this list will be deleted in the tearDown method.", "(expected, result)) result = urllib.parse.quote_plus(partial_quote) self.assertEqual(expected, result, \"using quote_plus(): %r", "try: self.assertRaises(OSError, urlopen, \"http://something\") finally: self.unfakehttp() def test_missing_localfile(self): # Test", "self.assertTrue(bypass('newdomain.com:1234')) self.assertTrue(bypass('.newdomain.com:1234')) self.assertTrue(bypass('foo.d.o.t')) # issue 29142 self.assertTrue(bypass('d.o.t')) self.assertTrue(bypass('anotherdomain.com:8888')) self.assertTrue(bypass('.anotherdomain.com:8888')) self.assertTrue(bypass('www.newdomain.com:1234'))", "errors=\"replace\") self.assertEqual(expect, result, \"using unquote(): %r != %r\" % (expect,", "expect = \"\\ufffd\" # Replacement character result = urllib.parse.unquote(given) self.assertEqual(expect,", "* 8193) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 3) self.assertEqual(report[0][2], 8193) self.assertEqual(report[0][1],", "of the file.\"\"\" newFd, newFilePath = tempfile.mkstemp() try: self.registerFileForCleanUp(newFilePath) newFile", "Make sure that a local file just gets its own", "self.assertEqual(url2pathname(\"///C:\"), 'C:') self.assertEqual(url2pathname(\"///C|/\"), 'C:\\\\') def test_converting_when_no_drive_letter(self): # cannot end a", "'%x' expect = given result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using", "return b\"\" return io.BytesIO.readline(self, length) def close(self): self.io_refs -= 1", "= ((\"\\u00a0\", (1, \"\\u00c1\")),) expect = '%3F=1&%3F=%3F' result = urllib.parse.urlencode(given,", "% (expect, result)) # Decode with UTF-8, invalid sequence, ignoring", "passwd, host, port, dirs, timeout=None, persistent=True): pass def retrfile(self, file,", "try: # ftp = urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\", 9093, []) #", "urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, \"using unquote_to_bytes(): %r != %r\" % (expect,", "fakehttp(self, fakedata, mock_close=False): fake_http_class = fakehttp(fakedata, mock_close=mock_close) self._connection_class = http.client.HTTPConnection", "to the Net for testing. \"\"\" def setUp(self): # Create", "'%Ab%eA' expect = b'\\xab\\xea' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, \"using", "def makefile(self, *args, **kwds): self.io_refs += 1 return self def", "UTF-8 given = \"%E6%BC%A2%E5%AD%97\" expect = \"\\u6f22\\u5b57\" # \"Kanji\" result", "def fakehttp(fakedata, mock_close=False): class FakeSocket(io.BytesIO): io_refs = 1 def sendall(self,", "r\"contain control.*\\\\n\"): urlopen(f\"https:{schemeless_url}\") finally: self.unfakehttp() def test_read_0_9(self): # \"0.9\" response", "parameter in sequence given = ((b'\\xa0\\x24', (b'\\xc1\\x24', 0xd, 42)),) expect", "failed; %s != %s\" % (expect, result)) given = os.path.join(\"make", "request = Request(\"http://www.python.org\", {}, method='HEAD') self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(), 'HEAD') request", "result)) # Characters in Latin-1 range, encoded with Latin-1 given", "@unittest.skipUnless(ssl, \"ssl module required\") def test_url_path_with_control_char_rejected(self): for char_no in list(range(0,", "os.environ, start with empty fake environment os.environ = collections.OrderedDict() def", "closed file\" which is logged as an # \"Exception ignored", "fp = urlopen(\"http://user:pass@python.org/\") self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(), b\"\") self.assertEqual(fp.geturl(), 'http://user:pass@python.org/') self.assertEqual(fp.getcode(),", "should work the same result = urllib.parse.quote_from_bytes(given) self.assertEqual(expect, result, \"using", "result)) # A mix of non-ASCII and percent-encoded characters, UTF-8", "result, \"using unquote(): %r != %r\" % (expect, result)) expect", "(absolute path or relative to the current working directory). #", "with replacement os.environ['http_proxy'] = 'http://somewhere:3128' os.environ['Http_Proxy'] = 'http://somewhereelse:3128' proxies =", "_urlopener if proxies is not None: opener = urllib.request.FancyURLopener(proxies=proxies) elif", "\"using unquote(): %r != %r\" % (expect, result)) def test_unquoting_with_bytes_input(self):", "files\"\"\" def setUp(self): # Create a list of temporary files.", "@unittest.skipUnless(ssl, \"ssl module required\") def test_url_path_with_newline_header_injection_rejected(self): self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") host", "urllib.request.url2pathname(given) self.assertEqual(expect, result, 'urllib.request.url2pathname() failed; %s != %s' % (expect,", "hooktester(block_count, block_read_size, file_size, count_holder=[0]): self.assertIsInstance(block_count, int) self.assertIsInstance(block_read_size, int) self.assertIsInstance(file_size, int)", "!= %r\" % (expected, result)) def test_quoting_space(self): # Make sure", "on bytes input self.assertRaises(TypeError, urllib.parse.quote, given, encoding=\"latin-1\") # quote_from_bytes should", "as an # \"Exception ignored in\". Override close() to silence", "= 'localhost' self.assertFalse(urllib.request.proxy_bypass_environment('localhost')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) os.environ['http_proxy'] = '' os.environ['HTTP_PROXY'] = 'http://somewhere:3128'", "url with support.check_warnings( ('DummyURLopener style of invoking requests is deprecated.',", "lowered case truncated (no '_proxy') keys self.assertEqual('localhost', proxies['no']) # List", "respectively given = os.path.join(\"needs\", \"quot=ing\", \"here\") expect = \"needs/%s/here\" %", "safe=\":$\", encoding=\"latin-1\") self.assertEqual(expect, result) class Pathname_Tests(unittest.TestCase): \"\"\"Test pathname2url() and url2pathname()\"\"\"", "exceptional conditions occur. self.tempFiles = [] # Create a temporary", "\"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/\") @support.ignore_warnings(category=DeprecationWarning) def test_urlopener_retrieve_file(self): with support.temp_dir() as tmpdir: fd, tmpfile", "file did not\" \" return an empty string\") def test_readlines(self):", "urllib.request.thishost utility function returns a tuple\"\"\" self.assertIsInstance(urllib.request.thishost(), tuple) class URLopener_Tests(FakeHTTPMixin,", "test_URLopener_deprecation(self): with support.check_warnings(('',DeprecationWarning)): urllib.request.URLopener() @unittest.skipUnless(ssl, \"ssl module required\") def test_cafile_and_context(self):", "in range(128): given = hexescape(chr(num)) expect = chr(num) result =", "self.assertTrue(bypass('www.newdomain.com:1234')) self.assertFalse(bypass('prelocalhost')) self.assertFalse(bypass('newdomain.com')) # no port self.assertFalse(bypass('newdomain.com:1235')) # wrong port", "significant self._saved_env = os.environ # Monkey patch os.environ, start with", "\"\\u00c1\")),) expect = '%A0=42&%A0=%C1' result = urllib.parse.urlencode(given, True, encoding=\"latin-1\") self.assertEqual(expect,", "self.assertEqual(len(report), 1) self.assertEqual(report[0][2], 0) def test_reporthook_5_bytes(self): # Test on 5", "anything beyond the first line from the # comparison. #", "self.assertEqual(report[1][2], 5) def test_reporthook_8193_bytes(self): # Test on 8193 byte file.", "'br%C3%BCckner_sapporo_20050930.doc' expect = 'br\\u00fcckner_sapporo_20050930.doc' result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using", "No more lines\\n\") # conn.close() # except socket.timeout: # pass", "urlopen('file://localhost/a/file/which/doesnot/exists.py') self.assertTrue(e.exception.filename) self.assertTrue(e.exception.reason) def test_file_notexists(self): fd, tmp_file = tempfile.mkstemp() tmp_fileurl", "local files\"\"\" def setUp(self): # Create a list of temporary", "%r != %r\" % (expect, result)) def test_quoting_plus(self): self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'),", "down the open object\"\"\" self.returned_obj.close() os.remove(support.TESTFN) def test_interface(self): # Make", "Records changes to env vars self.env = support.EnvironmentVarGuard() # Delete", "\"_.-~\"]) result = urllib.parse.quote(do_not_quote) self.assertEqual(do_not_quote, result, \"using quote(): %r !=", "connection.\"\"\" def check_read(self, ver): self.fakehttp(b\"HTTP/\" + ver + b\" 200", "(expect, result)) @unittest.skipUnless(sys.platform == 'win32', 'test specific to the urllib.url2path", "[]) # finally: # socket.setdefaulttimeout(None) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close()", "on_amp_left = result[amp_location - 1] on_amp_right = result[amp_location + 1]", "'safe' parameter self.assertEqual(urllib.parse.quote.__defaults__[0], '/') def test_safe(self): # Test setting 'safe'", "self.createNewTempFile(b\"x\" * 8193) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 3) self.assertEqual(report[0][2], 8193)", "byte file. Should call reporthook only 2 times (once when", "\"a b cd e f\" expect = given.replace(' ', hexescape('", "# quote_from_bytes should work the same result = urllib.parse.quote_from_bytes(given) self.assertEqual(expect,", "uses FancyURLOpener which goes via a codepath that # calls", "doseq=True, safe=\":$\", encoding=\"latin-1\") given = ((b'\\xa0\\x24', (b'\\xc1\\x24', 0xd, 42)),) expect", "Delete the temporary files. for each in self.tempFiles: try: os.remove(each)", "result, \"using unquote_to_bytes(): %r != %r\" % (expect, result)) self.assertRaises((TypeError,", "within the url _path_ safe. InvalidURL = http.client.InvalidURL with self.assertRaisesRegex(", "hexescape(' '), \"using quote(): %r != %r\" % (result, hexescape('", "opening a fake http connection.\"\"\" def check_read(self, ver): self.fakehttp(b\"HTTP/\" +", "DeprecationWarning)): return urllib.request.FancyURLopener() def fakehttp(fakedata, mock_close=False): class FakeSocket(io.BytesIO): io_refs =", "urllib.parse.quote_plus(do_not_quote) self.assertEqual(do_not_quote, result, \"using quote_plus(): %r != %r\" % (do_not_quote,", "in self.returned_obj: self.assertEqual(line, self.text) def test_relativelocalfile(self): self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname) class", "for ticket #4608. for line in self.returned_obj: self.assertEqual(line, self.text) def", "result)) result = urllib.parse.quote_plus(do_not_quote) self.assertEqual(do_not_quote, result, \"using quote_plus(): %r !=", "mock_close=True) try: msg = \"Redirection to url 'file:\" with self.assertRaisesRegex(urllib.error.HTTPError,", "2x1 pixel RGB PNG image with one black and one", "os.path.join(\"parts\", \"of\", \"a\", \"path\") expected_url = \"parts/of/a/path\" result = urllib.request.pathname2url(expected_path)", "Content-Length: 100 Content-Type: text/html; charset=iso-8859-1 FF ''') with self.assertRaises(urllib.error.ContentTooShortError): try:", "# cantdata = 0 # while cantdata < 13: #", "contains ignorable spaces, # such as \"\\n\", \" \", \"%0A\",", "def setUp(self): # Create a list of temporary files. Each", "\"\\u6f22\\u5b57\" expect = \"%26%2328450%3B%26%2323383%3B\" # \"&#28450;&#23383;\" result = urllib.parse.quote(given, encoding=\"latin-1\",", "tempfile.mkstemp(dir=tmpdir) os.close(fd) fileurl = \"file:\" + urllib.request.pathname2url(tmpfile) filename, _ =", "Location: file://guidocomputer.athome.com:/python/license Connection: close Content-Type: text/html; charset=iso-8859-1 ''', mock_close=True) try:", "Create a temp file to use for testing self.text =", "\"wb\") newFile.write(data) newFile.close() finally: try: newFile.close() except: pass return newFilePath", "self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com:8888')) self.assertTrue(urllib.request.proxy_bypass_environment('newdomain.com:1234')) def test_proxy_cgi_ignore(self): try: self.env.set('HTTP_PROXY', 'http://somewhere:3128') proxies = urllib.request.getproxies_environment()", "cantdata < 13: # data = conn.recv(13-cantdata) # cantdata +=", "None})) def test_nonstring_seq_values(self): self.assertEqual(\"a=1&a=2\", urllib.parse.urlencode({\"a\": [1, 2]}, True)) self.assertEqual(\"a=None&a=a\", urllib.parse.urlencode({\"a\":", "hexescape('c') expect = \"abcd\" result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using", "test_read_image(self): self.assertEqual(self.image_url_resp.read(), self.image) def test_missing_comma(self): self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain') def test_invalid_base64_data(self): # missing", "the reporthook works. def hooktester(block_count, block_read_size, file_size, count_holder=[0]): self.assertIsInstance(block_count, int)", "io.BytesIO.close(self) class FakeHTTPConnection(http.client.HTTPConnection): # buffer to store data for verification", "result = urllib.parse.urlencode(given, safe=\":$\", encoding=\"latin-1\") expect = '%A0$=%C1$' self.assertEqual(expect, result)", "self.pathname) def test_getcode(self): self.assertIsNone(self.returned_obj.getcode()) def test_iter(self): # Test iterator #", "urllib.parse.unquote, ()) with support.check_warnings(('', BytesWarning), quiet=True): self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'')", "encoding given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) result = urllib.parse.urlencode(given, safe=\":$\", encoding=\"latin-1\")", "given = b'%A2\\xd8ab%FF' expect = b'\\xa2\\xd8ab\\xff' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect,", "truncated (no '_proxy') keys self.assertEqual('localhost', proxies['no']) # List of no_proxies", "specifies\"\"\" hex_repr = hex(ord(char))[2:].upper() if len(hex_repr) == 1: hex_repr =", "%s: '&' not located in proper place in %s\" %", "possible dictionary input. \"\"\" expect_somewhere = [\"1st=1\", \"2nd=2\", \"3rd=3\"] result", "in ('local_file://example', 'local-file://example'): self.assertRaises(OSError, urllib.request.urlopen, url) self.assertRaises(OSError, urllib.request.URLopener().open, url) self.assertRaises(OSError,", "Bytes should quote directly to percent-encoded values given = b\"\\xa2\\xd8ab\\xff\"", "= urlopen(\"file:%s\" % self.pathname) def tearDown(self): \"\"\"Shut down the open", "non-ASCII characters) result = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=\"\\xfc\") expect = urllib.parse.quote(\"a\\xfcb\",", "import ssl except ImportError: ssl = None import sys import", "and unquoting works for pathnam2url() and # url2pathname() respectively given", "test fixture tear down, and returns the absolute path of", "%r\" % (expect, result)) def test_unquoting_with_bytes_input(self): # Bytes not supported", "in Latin-1 range, encoded by default in UTF-8 given =", "safe=\":$\") self.assertEqual(expect, result) # Test all above in latin-1 encoding", "calls close() which calls # flush(). Problem: flush() calls self.fp.flush()", "%r != %r\" % (expected, result)) def test_quoting_space(self): # Make", "DummyURLopener().retrieve, url) # Just commented them out. # Can't really", "1) # serv.bind((\"\", 9093)) # serv.listen() # try: # conn,", "data for verification in urlopen tests. buf = None def", "def test_converting_when_no_drive_letter(self): # cannot end a raw string in \\", "result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc', encoding=\"latin-1\") expect = 'br\\u00fcckner_sapporo_20050930.doc' self.assertEqual(expect, result, \"using", "[0x7f]: char = chr(char_no) schemeless_url = f\"//localhost{char}/test/\" self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\")", "iterations since test would fail the # instant it returned", "file_size, count_holder=[0]): self.assertIsInstance(block_count, int) self.assertIsInstance(block_read_size, int) self.assertIsInstance(file_size, int) self.assertEqual(block_count, count_holder[0])", "result[amp_location - 1] on_amp_right = result[amp_location + 1] self.assertTrue(on_amp_left.isdigit() and", "def test_using_sequence(self): # Test passing in a sequence of two-item", "FILE.close() finally: try: FILE.close() except: pass self.assertEqual(self.text, text) def test_reporthook(self):", "class ProxyTests(unittest.TestCase): def setUp(self): # Records changes to env vars", "FILE.close() finally: try: FILE.close() except: pass def tearDown(self): # Delete", "preference with replacement os.environ['http_proxy'] = 'http://somewhere:3128' os.environ['Http_Proxy'] = 'http://somewhereelse:3128' proxies", "once for the next 8192 # bytes, and once for", "Test that setting the filename argument works. second_temp = \"%s.2\"", "file_num = self.returned_obj.fileno() self.assertIsInstance(file_num, int, \"fileno() did not return an", "def close(self): pass FakeHTTPConnection.fakedata = fakedata return FakeHTTPConnection class FakeHTTPMixin(object):", "'_proxy') keys self.assertEqual('localhost', proxies['no']) # List of no_proxies with space.", "\"\"\"Test urllib.urlretrieve() using fake http connections\"\"\" def test_short_content_raises_ContentTooShortError(self): self.fakehttp(b'''HTTP/1.1 200", "anotherdomain.com') self.assertTrue(bypass('anotherdomain.com')) self.assertFalse(bypass('newdomain.com')) self.assertFalse(bypass('newdomain.com:1234')) def test_proxy_bypass_environment_newline(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY',", "a sequence of values. given = ((\"\\u00a0\", (1, \"\\u00c1\")),) expect", "the hex letters. The various character sets specified are: Reserved", "Request(\"http://www.python.org\", method='HEAD') self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(), 'HEAD') request = Request(\"http://www.python.org\", {},", "'alpha%2Bbeta+gamma') # Test with safe bytes self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'), 'alpha+beta+gamma')", "!= %r\" % (expect, result)) # Encoding argument should raise", "'GET') request.method = 'HEAD' self.assertEqual(request.get_method(), 'HEAD') class URL2PathNameTests(unittest.TestCase): def test_converting_drive_letter(self):", "**kwds): self.io_refs += 1 return self def read(self, amt=None): if", "%r\" % (expect, result)) def test_quote_with_unicode(self): # Characters in Latin-1", "(expect, result)) def test_unquote_to_bytes(self): given = 'br%C3%BCckner_sapporo_20050930.doc' expect = b'br\\xc3\\xbcckner_sapporo_20050930.doc'", "2) self.assertEqual(report[0][2], 5) self.assertEqual(report[1][2], 5) def test_reporthook_8193_bytes(self): # Test on", "# fail in one of the tests, sometimes in other.", "test_quote_bytes(self): # Bytes should quote directly to percent-encoded values given", "self._connection_class class FakeFTPMixin(object): def fakeftp(self): class FakeFtpWrapper(object): def __init__(self, user,", "finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\") def test_url_path_with_control_char_rejected(self): for char_no", "pluses\"} expect = \"key+name=A+bunch+of+pluses\" result = urllib.parse.urlencode(given) self.assertEqual(expect, result) def", "urllib.parse import urllib.request import urllib.error import http.client import email.message import", "b\"Hello!\") self.assertEqual(fp.readline(), b\"\") self.assertEqual(fp.geturl(), 'http://python.org/') self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def", "= data def makefile(self, *args, **kwds): self.io_refs += 1 return", "text/html; charset=iso-8859-1 FF ''') with self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve(support.TEST_HTTP_URL) finally: self.unfakehttp()", "- 0x1F should_quote.append(r'<>#%\"{}|\\^[]`') should_quote.append(chr(127)) # For 0x7F should_quote = ''.join(should_quote)", "'&'s, got %s\" % result.count('&')) def test_empty_sequence(self): self.assertEqual(\"\", urllib.parse.urlencode({})) self.assertEqual(\"\",", "Expect %3F with errors=\"replace' given = (('\\u00a0', '\\u00c1'),) expect =", "newFilePath = tempfile.mkstemp() try: self.registerFileForCleanUp(newFilePath) newFile = os.fdopen(newFd, \"wb\") newFile.write(data)", "% (expect, result)) given = '%x' expect = given result", "implicit way to test for ticket #4608. for line in", "global _urlopener if proxies is not None: opener = urllib.request.FancyURLopener(proxies=proxies)", "proxy related env vars for k in list(os.environ): if 'proxy'", "= urllib.parse.quote_plus(' ') self.assertEqual(result, '+', \"using quote_plus(): %r != +\"", "not %s\" % (char, hexescape(char), result)) result = urllib.parse.quote_plus(char) self.assertEqual(hexescape(char),", "Test passing in a mapping object as an argument. self.help_inputtype({\"1st\":'1',", "urlopen(test_ftp_url) self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) def test_ftp_nonexisting(self): with self.assertRaises(urllib.error.URLError) as e: urlopen('ftp://localhost/a/file/which/doesnot/exists.py')", "= ''.join(escape_list) del escape_list result = urllib.parse.unquote(escape_string) self.assertEqual(result.count('%'), 1, \"using", "given = ('/C:/', '///C:/', '/C|//') expect = 'C:\\\\' for url", "(#1680230) self.fakehttp(b'') try: self.assertRaises(OSError, urlopen, \"http://something\") finally: self.unfakehttp() def test_missing_localfile(self):", "\"%20\". self.image_url = ( \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\\n\" \"QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 \" \"vHgAAAABJRU5ErkJggg%3D%3D%0A%20\") self.text_url_resp =", "self.assertEqual(expect, result, 'urllib.request.url2pathname() failed; %s != %s' % (expect, result))", "@support.ignore_warnings(category=DeprecationWarning) def test_urlopener_retrieve_file(self): with support.temp_dir() as tmpdir: fd, tmpfile =", "urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234, .d.o.t') self.assertTrue(bypass('localhost')) self.assertTrue(bypass('LocalHost')) # MixedCase", "should quote directly to percent-encoded values given = b\"\\xa2\\xd8ab\\xff\" expect", "%r\" % (expect, result)) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None) self.assertRaises((TypeError, AttributeError),", "for char_no in list(range(0, 0x21)) + [0x7f]: char = chr(char_no)", "FakeHTTPMixin(object): def fakehttp(self, fakedata, mock_close=False): fake_http_class = fakehttp(fakedata, mock_close=mock_close) self._connection_class", "Test passing in a sequence of two-item sequences as an", "((\"\\u00a0\", (1, \"\\u00c1\")),) expect = '%3F=1&%3F=%3F' result = urllib.parse.urlencode(given, True,", "= count_holder[0] + 1 second_temp = \"%s.2\" % support.TESTFN self.registerFileForCleanUp(second_temp)", "env vars self.env.__exit__() del self.env def test_getproxies_environment_keep_no_proxies(self): self.env.set('NO_PROXY', 'localhost') proxies", "to makes sure temporary files get deleted, but it #", "= '%A0$=%C1$' self.assertEqual(expect, result) given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) result =", "sure quote() does not quote letters, digits, and \"_,.-\" do_not_quote", "should be escapes to %s, not %s\" % (char, hexescape(char),", "AttributeError), urllib.parse.unquote, ()) with support.check_warnings(('', BytesWarning), quiet=True): self.assertRaises((TypeError, AttributeError), urllib.parse.unquote,", "# does nothing about trying to close files that may", "+ 1 second_temp = \"%s.2\" % support.TESTFN self.registerFileForCleanUp(second_temp) urllib.request.urlretrieve( self.constructLocalFileUrl(support.TESTFN),", "given = ((b'\\xa0\\x24', (42, b'\\xc1\\x24')),) expect = '%A0%24=42&%A0%24=%C1%24' result =", "urllib.parse.urlencode(given) self.assertEqual(expect, result) result = urllib.parse.urlencode(given, True) for value in", "urllib.urlretrieve() on local files\"\"\" def setUp(self): # Create a list", "Don't need to count number of iterations since test would", "# data. (#1680230) self.fakehttp(b'') try: self.assertRaises(OSError, urlopen, \"http://something\") finally: self.unfakehttp()", "given = '%xab' expect = bytes(given, 'ascii') result = urllib.parse.unquote_to_bytes(given)", "'%' expect = bytes(given, 'ascii') result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result,", "try: fp = urllib.request.urlopen(url) self.assertEqual(fp.geturl(), url) finally: self.unfakehttp() def test_willclose(self):", "result, \"using quote(): \" \"%s should be escaped to %s,", "if self.io_refs == 0: io.BytesIO.close(self) class FakeHTTPConnection(http.client.HTTPConnection): # buffer to", "(quote_by_default, result)) result = urllib.parse.quote_plus(quote_by_default, safe=quote_by_default) self.assertEqual(quote_by_default, result, \"using quote_plus():", "\"using quote(): %r != %r\" % (expect, result)) # Encoding", "urllib.parse.quote_plus(given, encoding=\"latin-1\", errors=\"replace\") self.assertEqual(expect, result, \"using quote_plus(): %r != %r\"", "int) self.assertEqual(block_count, count_holder[0]) count_holder[0] = count_holder[0] + 1 second_temp =", "characters : 0x00 - 0x1F, 0x7F Have no use in", "A mix of non-ASCII and percent-encoded characters, UTF-8 result =", "('/C:/', '///C:/', '/C|//') expect = 'C:\\\\' for url in given:", "should_quote.append(chr(127)) # For 0x7F should_quote = ''.join(should_quote) for char in", "1 return self def read(self, amt=None): if self.closed: return b\"\"", "required\") def test_url_path_with_control_char_rejected(self): for char_no in list(range(0, 0x21)) + [0x7f]:", "result, \"using quote_plus(): %r != %r\" % (quote_by_default, result)) #", "def test_read_image(self): self.assertEqual(self.image_url_resp.read(), self.image) def test_missing_comma(self): self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain') def test_invalid_base64_data(self): #", "%r != %r\" % (expect, result)) # Test with a", "argument should raise type error on bytes input self.assertRaises(TypeError, urllib.parse.quote,", "%s != %s\" % (result, expected_url)) result = urllib.request.url2pathname(expected_url) self.assertEqual(expected_path,", "empty string\") def test_readlines(self): lines_list = self.returned_obj.readlines() self.assertEqual(len(lines_list), 1, \"readlines()", "result) given = \"a b cd e f\" expect =", "Encoding. On a sequence of values. given = ((\"\\u00a0\", (1,", "# Delete the temporary files. for each in self.tempFiles: try:", "\"testing %s: %s not found in %s\" % (test_type, expected,", "urllib.error import http.client import email.message import io import unittest from", "as input\") def test_quoting(self): # Make sure keys and values", "urllib.parse.unquote_to_bytes, None) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ()) def test_unquoting_mixed_case(self): # Test", "= urllib.parse.quote_from_bytes(given) self.assertEqual(expect, result, \"using quote_from_bytes(): %r != %r\" %", "to use for testing self.text = bytes(\"test_urllib: %s\\n\" % self.__class__.__name__,", "self._ftpwrapper_class = urllib.request.ftpwrapper urllib.request.ftpwrapper = FakeFtpWrapper def unfakeftp(self): urllib.request.ftpwrapper =", "# Issue #11703: geturl() omits fragments in the original URL.", "it as '%' + <2 character US-ASCII hex value>. The", "self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() class RequestTests(unittest.TestCase): \"\"\"Unit tests for urllib.request.Request.\"\"\"", "True)) def test_urlencode_encoding(self): # ASCII encoding. Expect %3F with errors=\"replace'", "values given = ((b'\\xa0\\x24', (42, b'\\xc1\\x24')),) expect = '%A0%24=42&%A0%24=%C1%24' result", "# conn.send(\"1 Hola mundo\\n\") # cantdata = 0 # while", "on mixed-case hex digits in the percent-escapes given = '%Ab%eA'", "on bad percent-escapes given = '%xab' expect = given result", "env vars self.env = support.EnvironmentVarGuard() # Delete all proxy related", "destructor calls close() which calls # flush(). Problem: flush() calls", "= \"\\u6f22\\u5b57\" # \"Kanji\" expect = \"%E6%BC%A2%E5%AD%97\" result = urllib.parse.quote(given)", "values. given = ((\"\\u00a0\", (1, \"\\u00c1\")),) expect = '%3F=1&%3F=%3F' result", "def test_proxy_bypass_environment_host_match(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234, .d.o.t')", "# read). report = [] def hooktester(block_count, block_read_size, file_size, _report=report):", "test_unquoting_mixed_case(self): # Test unquoting on mixed-case hex digits in the", "'/' is default value for 'safe' parameter self.assertEqual(urllib.parse.quote.__defaults__[0], '/') def", "invoking requests is deprecated.', DeprecationWarning)): self.assertEqual(DummyURLopener().open( 'spam://example/ /'),'//example/%20/') # test", "vars self.env.__exit__() del self.env def test_getproxies_environment_keep_no_proxies(self): self.env.set('NO_PROXY', 'localhost') proxies =", "in BMP, encoded with UTF-8 given = \"%E6%BC%A2%E5%AD%97\" expect =", "class urlretrieve_FileTests(unittest.TestCase): \"\"\"Test urllib.urlretrieve() on local files\"\"\" def setUp(self): #", "expect = '%3F=%3F' result = urllib.parse.urlencode(given, doseq=True, encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect,", "self.assertEqual(self.image_url_resp.geturl(), self.image_url) def test_read_text(self): self.assertEqual(self.text_url_resp.read().decode( dict(self.text_url_resp.info().get_params())['charset']), self.text) def test_read_text_base64(self): self.assertEqual(self.text_url_base64_resp.read().decode(", "errors=\"replace\") self.assertEqual(expect, result) # Default is UTF-8 encoding. given =", "case truncated (no '_proxy') keys self.assertEqual('localhost', proxies['no']) # List of", "sure simple tests pass expected_path = os.path.join(\"parts\", \"of\", \"a\", \"path\")", "'%A0%24=42&%A0%24=%C1%24' result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) def test_urlencode_encoding_safe_parameter(self): #", "'///C:/', '/C|//') expect = 'C:\\\\' for url in given: result", "sure keys and values are quoted using quote_plus() given =", "test_using_mapping(self): # Test passing in a mapping object as an", "schemeless_url = f\"//localhost:7777/test{char}/\" self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") try: # We explicitly", "returned value\") def test_copy(self): # Test that setting the filename", "'http://somewhere:3128') proxies = urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128', proxies['http']) self.env.set('REQUEST_METHOD', 'GET') proxies =", "encoding=\"latin-1\", errors=\"replace\") self.assertEqual(expect, result, \"using quote_plus(): %r != %r\" %", "finally: self.unfakehttp() def test_empty_socket(self): # urlopen() raises OSError if the", "the test fixture tear down, and returns the absolute path", "= b'\\xe6\\xbc\\xa2\\xc3\\xbc' # UTF-8 for \"\\u6f22\\u00fc\" self.assertEqual(expect, result, \"using unquote_to_bytes():", "gamma'), 'alpha%2Bbeta+gamma') self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'), 'alpha+beta+gamma') # Test with bytes", "def test_missing_comma(self): self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain') def test_invalid_base64_data(self): # missing padding character self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=')", "port def test_proxy_bypass_environment_always_match(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', '*') self.assertTrue(bypass('newdomain.com')) self.assertTrue(bypass('newdomain.com:1234'))", "urllib.parse.unquote_to_bytes(\"\\u6f22%C3%BC\") expect = b'\\xe6\\xbc\\xa2\\xc3\\xbc' # UTF-8 for \"\\u6f22\\u00fc\" self.assertEqual(expect, result,", "!= %r\" % (expect, result)) # unquote_to_bytes given = '%xab'", "# Test '/' is default value for 'safe' parameter self.assertEqual(urllib.parse.quote.__defaults__[0],", "should_quote.append(r'<>#%\"{}|\\^[]`') should_quote.append(chr(127)) # For 0x7F should_quote = ''.join(should_quote) for char", "b'') def test_unquoting_badpercent(self): # Test unquoting on bad percent-escapes given", "default in UTF-8 given = \"\\xa2\\xd8ab\\xff\" expect = \"%C2%A2%C3%98ab%C3%BF\" result", "= urllib.parse.urlencode(given, True) self.assertEqual(expect, result) given = ((\"\\u00a0\", (42, \"\\u00c1\")),)", "self.assertEqual(os.path.normcase(filename), os.path.normcase(tmpfile)) @support.ignore_warnings(category=DeprecationWarning) def test_urlopener_retrieve_remote(self): url = \"http://www.python.org/file.txt\" self.fakehttp(b\"HTTP/1.1 200", "urllib.request.ftpwrapper = self._ftpwrapper_class class urlopen_FileTests(unittest.TestCase): \"\"\"Test urlopen() opening a temporary", "expect = bytes(given, 'ascii') result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, \"using", "result = urllib.parse.quote_plus(given, encoding=\"latin-1\", errors=\"replace\") self.assertEqual(expect, result, \"using quote_plus(): %r", "= urllib.request.url2pathname(url) self.assertEqual(expect, result, 'urllib.request..url2pathname() failed; %s != %s' %", "= support.EnvironmentVarGuard() # Delete all proxy related env vars for", "silence this error. def close(self): pass FakeHTTPConnection.fakedata = fakedata return", "the file descriptor returned by fileno() \" \"did not return", "and \"%20\". self.image_url = ( \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\\n\" \"QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8 \" \"vHgAAAABJRU5ErkJggg%3D%3D%0A%20\") self.text_url_resp", "+ urllib.request.pathname2url(tmpfile) filename, _ = urllib.request.URLopener().retrieve(fileurl) # Some buildbots have", "urlopen tests. buf = None def connect(self): self.sock = FakeSocket(self.fakedata)", "\"make+sure/using_unquote\" expect = os.path.join(\"make+sure\", \"using_unquote\") result = urllib.request.url2pathname(given) self.assertEqual(expect, result,", "urllib.parse.urlencode(given) self.assertEqual(expect, result) result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) #", "amps \"testing %s: \" \"unexpected number of characters: %s !=", "def constructLocalFileUrl(self, filePath): filePath = os.path.abspath(filePath) try: filePath.encode(\"utf-8\") except UnicodeEncodeError:", "%r\" % (expect, result)) def test_unquote_with_unicode(self): # Characters in the", "Override close() to silence this error. def close(self): pass FakeHTTPConnection.fakedata", "BMP, encoded by default in UTF-8 given = \"\\u6f22\\u5b57\" #", "unquote_plus() See the doc string for quoting_Tests for details on", "%r\" % (expected, result)) result = urllib.parse.quote_plus(partial_quote) self.assertEqual(expected, result, \"using", "( b'\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR\\x00\\x00\\x00\\x02\\x00\\x00\\x00' b'\\x01\\x08\\x02\\x00\\x00\\x00{@\\xe8\\xdd\\x00\\x00\\x00\\x01sRGB\\x00\\xae' b'\\xce\\x1c\\xe9\\x00\\x00\\x00\\x0fIDAT\\x08\\xd7c```\\xf8\\xff\\xff?\\x00' b'\\x06\\x01\\x02\\xfe\\no/\\x1e\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82') self.text_url = ( \"data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3\" \"D%26%20%C3%B6%20%C3%84%20\")", "self.assertRaisesRegex(InvalidURL, r\"contain control.*\\\\n\"): urlopen(f\"https:{schemeless_url}\") finally: self.unfakehttp() def test_read_0_9(self): # \"0.9\"", "down on reliance on connecting to the Net for testing.", "URL so there is no injection. resp = urlopen(f\"http:{schemeless_url}\") self.assertNotIn('", "Connection: close Content-Length: 100 Content-Type: text/html; charset=iso-8859-1 FF ''') def", "tearDown(self): \"\"\"Shut down the open object\"\"\" self.returned_obj.close() os.remove(support.TESTFN) def test_interface(self):", "0x1F should_quote.append(r'<>#%\"{}|\\^[]`') should_quote.append(chr(127)) # For 0x7F should_quote = ''.join(should_quote) for", "with UTF-8, invalid sequence, replace errors result = urllib.parse.unquote(given, errors=\"replace\")", "= urllib.parse.quote(given, encoding=\"latin-1\", errors=\"replace\") self.assertEqual(expect, result, \"using quote(): %r !=", "the urllib.url2path function.') def test_ntpath(self): given = ('/C:/', '///C:/', '/C|//')", "2 * 3rd, 3 Test cannot assume anything about order.", "self._ftpwrapper_class class urlopen_FileTests(unittest.TestCase): \"\"\"Test urlopen() opening a temporary file. Try", "= serv.accept() # conn.send(\"1 Hola mundo\\n\") # cantdata = 0", "conn.send(\"1 Hola mundo\\n\") # cantdata = 0 # while cantdata", "given.replace(' ', '+') result = urllib.parse.quote_plus(given) self.assertEqual(expect, result, \"using quote_plus():", "for unquote() and unquote_plus() See the doc string for quoting_Tests", "failing in windows and sparc. # Everywhere else they work", "%r != %r\" % (expect, result)) # Test on a", "own retry limit. for i in range(FancyURLopener().maxtries): self.fakehttp(b'''HTTP/1.1 302 Found", "result = urllib.parse.quote_plus(partial_quote) self.assertEqual(expected, result, \"using quote_plus(): %r != %r\"", "def test_copy(self): # Test that setting the filename argument works.", "def check_read(self, ver): self.fakehttp(b\"HTTP/\" + ver + b\" 200 OK\\r\\n\\r\\nHello!\")", "2)) def test_using_mapping(self): # Test passing in a mapping object", "current working directory). # All files in this list will", "test for quote_plus given = \"\\xa2\\xd8 \\xff\" expect = \"%A2%D8+%FF\"", "that setting the filename argument works. second_temp = \"%s.2\" %", "than str result = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=b\"\\xfc\") expect = urllib.parse.quote(\"a\\xfcb\",", "True) self.assertEqual(expect, result) # latin-1 given = ((\"\\u00a0\", \"\\u00c1\"),) expect", "tear down, and returns the absolute path of the file.\"\"\"", "can be, though, if desired Control characters : 0x00 -", "many error codes. self.fakehttp(b'''HTTP/1.1 401 Authentication Required Date: Wed, 02", "\"//\" + host + \":8080/test/?test=a\" try: InvalidURL = http.client.InvalidURL with", "text = FILE.read() FILE.close() finally: try: FILE.close() except: pass self.assertEqual(self.text,", "(default) result = urllib.parse.unquote(given, encoding=None, errors=None) self.assertEqual(expect, result, \"using unquote():", "expect = given.replace(' ', '+') result = urllib.parse.quote_plus(given) self.assertEqual(expect, result,", "argument works. second_temp = \"%s.2\" % support.TESTFN self.registerFileForCleanUp(second_temp) result =", "% (expect, result)) # Test with a bytes as input", "in windows and sparc. # Everywhere else they work ok,", "'/////folder/test/') self.assertEqual(pathname2url(r\"\\\\folder\\test\" \"\\\\\"), '////folder/test/') self.assertEqual(pathname2url(r\"\\folder\\test\" \"\\\\\"), '/folder/test/') def test_simple_compare(self): self.assertEqual(pathname2url(r'C:\\foo\\bar\\spam.foo'),", "= urllib.parse.urlencode(given, encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect, result) # Default is UTF-8", "# Safe parameter in sequence given = ((b'\\xa0\\x24', (b'\\xc1\\x24', 0xd,", "expect = '%A0=%C1' result = urllib.parse.urlencode(given, encoding=\"latin-1\") self.assertEqual(expect, result) def", "try: self.assertRaises(OSError, urlopen, \"http://python.org/\") finally: self.unfakehttp() def test_invalid_redirect(self): # urlopen()", "# Make sure simple tests pass expected_path = os.path.join(\"parts\", \"of\",", "def test_nonstring_seq_values(self): self.assertEqual(\"a=1&a=2\", urllib.parse.urlencode({\"a\": [1, 2]}, True)) self.assertEqual(\"a=None&a=a\", urllib.parse.urlencode({\"a\": [None,", "self.assertEqual(report[0][2], 5) self.assertEqual(report[1][2], 5) def test_reporthook_8193_bytes(self): # Test on 8193", "self.fakehttp(b\"HTTP/\" + ver + b\" 200 OK\\r\\n\\r\\nHello!\") try: fp =", "Delimiters : '<>#%\"' Must be escaped Unwise : \"{}|\\^[]`\" Must", "all ASCII values works escape_list = [] for num in", "errors result = urllib.parse.unquote(given, errors=\"replace\") self.assertEqual(expect, result, \"using unquote(): %r", "url) self.assertRaises(OSError, DummyURLopener().open, url) self.assertRaises(OSError, DummyURLopener().retrieve, url) # Just commented", "hooktester(block_count, block_read_size, file_size, _report=report): _report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile(b\"x\"", "(expect, result)) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, None) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote_to_bytes, ())", "including ports os.environ['no_proxy'] = 'localhost, noproxy.com, my.proxy:1234' os.environ['No_Proxy'] = 'xyz.com'", "expect = '%A0=%C1' result = urllib.parse.urlencode(given, True, encoding=\"latin-1\") self.assertEqual(expect, result)", "are quoted in URL so no match self.assertNotEqual(fp.geturl(), url) self.assertEqual(fp.getcode(),", "used # import socket # self.assertIsNone(socket.getdefaulttimeout()) # socket.setdefaulttimeout(30) # try:", "by fileno() \" \"did not return the expected text\") def", "5) self.assertEqual(report[1][2], 5) def test_reporthook_8193_bytes(self): # Test on 8193 byte", "in given: result = urllib.request.url2pathname(url) self.assertEqual(expect, result, 'urllib.request..url2pathname() failed; %s", "test_read_0_9(self): # \"0.9\" response accepted (but not \"simple responses\" without", "Test difference between unquote() and unquote_plus() given = \"are+there+spaces...\" expect", "result) given = {\"key name\":\"A bunch of pluses\"} expect =", "del self.env def test_getproxies_environment_keep_no_proxies(self): self.env.set('NO_PROXY', 'localhost') proxies = urllib.request.getproxies_environment() #", "no match self.assertNotEqual(fp.geturl(), url) self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_URLopener_deprecation(self):", "data URL that contains ignorable spaces, # such as \"\\n\",", "self.assertIsInstance(file_size, int) self.assertEqual(block_count, count_holder[0]) count_holder[0] = count_holder[0] + 1 second_temp", "specified are: Reserved characters : \";/?:@&=+$,\" Have special meaning in", "schemeless_url = f\"//localhost{char}/test/\" self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") try: escaped_char_repr = repr(char).replace('\\\\',", "characters : \";/?:@&=+$,\" Have special meaning in URIs and must", "result, \"url2pathname() failed; %s != %s\" % (expect, result)) given", "result)) # Test with a bytes as input given =", "else they work ok, but on those machines, sometimes #", "default in UTF-8 given = \"\\u6f22\\u5b57\" # \"Kanji\" expect =", "finally: self.unfakehttp() def test_read_0_9(self): # \"0.9\" response accepted (but not", "tuple\"\"\" self.assertIsInstance(urllib.request.thishost(), tuple) class URLopener_Tests(FakeHTTPMixin, unittest.TestCase): \"\"\"Testcase to test the", "urllib.parse.unquote(given, encoding=None, errors=None) self.assertEqual(expect, result, \"using unquote(): %r != %r\"", "percent-encoded characters, Latin-1 # (Note, the string contains non-Latin-1-representable characters)", "self.assertEqual(self.text_url_base64_resp.info().get_params(), [('text/plain', ''), ('charset', 'ISO-8859-1')]) self.assertEqual(self.image_url_resp.info()['content-length'], str(len(self.image))) self.assertEqual(urllib.request.urlopen(\"data:,\").info().get_params(), [('text/plain', ''),", "InvalidURL = http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, f\"contain control.*{escaped_char_repr}\"): urllib.request.urlopen(f\"http:{schemeless_url}\") with", "% (expected, result)) result = urllib.parse.quote_plus(partial_quote) self.assertEqual(expected, result, \"using quote_plus():", "test_ftp_nonexisting(self): with self.assertRaises(urllib.error.URLError) as e: urlopen('ftp://localhost/a/file/which/doesnot/exists.py') self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) @patch.object(urllib.request, 'MAXFTPCACHE',", "self.returned_obj.close() os.remove(support.TESTFN) def test_interface(self): # Make sure object returned by", "that a local file just gets its own location returned", "unittest.mock import patch from test import support import os try:", "FILE = open(support.TESTFN, 'wb') FILE.write(self.text) FILE.close() finally: try: FILE.close() except:", "the # above attempts at injection within the url _path_", "explicitly test urllib.request.urlopen() instead of the top # level 'def", "file_size, _report=report): _report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile(b\"x\" * 5)", "os.environ = self._saved_env def test_getproxies_environment_prefer_lowercase(self): # Test lowercase preference with", "urllib.request.url2pathname(given) self.assertEqual(expect, result, \"url2pathname() failed; %s != %s\" % (expect,", "test_quoting(self): # Test automatic quoting and unquoting works for pathnam2url()", "threading.Event() # threading.Thread(target=server, args=(self.evt,)).start() # time.sleep(.1) # # def tearDown(self):", "various character sets specified are: Reserved characters : \";/?:@&=+$,\" Have", "((b'\\xa0\\x24', (42, b'\\xc1\\x24')),) expect = '%A0%24=42&%A0%24=%C1%24' result = urllib.parse.urlencode(given, True)", "expect = given.replace(' ', hexescape(' ')) result = urllib.parse.quote(given) self.assertEqual(expect,", "self.io_refs == 0: io.BytesIO.close(self) class FakeHTTPConnection(http.client.HTTPConnection): # buffer to store", "% (expect, result)) result = urllib.parse.unquote_plus(given) self.assertEqual(expect, result, \"using unquote_plus():", "\"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/\"), \"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/\") @support.ignore_warnings(category=DeprecationWarning) def test_urlopener_retrieve_file(self): with support.temp_dir() as tmpdir: fd,", "not return an int\") self.assertEqual(os.read(file_num, len(self.text)), self.text, \"Reading on the", "urllib.parse.urlencode(given, True) self.assertEqual(expect, result) # Sequence of values given =", "userpass = \"<PASSWORD>\" url = \"http://{}@python.org/\".format(userpass) fakehttp_wrapper = http.client.HTTPConnection authorization", "hex_repr # Shortcut for testing FancyURLopener _urlopener = None def", "BMP, encoded with UTF-8 given = \"%E6%BC%A2%E5%AD%97\" expect = \"\\u6f22\\u5b57\"", "self.assertRaises(ValueError,urllib.request.urlopen,'data:text/plain') def test_invalid_base64_data(self): # missing padding character self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=') class urlretrieve_FileTests(unittest.TestCase):", "replacement os.environ['http_proxy'] = 'http://somewhere:3128' os.environ['Http_Proxy'] = 'http://somewhereelse:3128' proxies = urllib.request.getproxies_environment()", "there is no injection. resp = urlopen(f\"http:{schemeless_url}\") self.assertNotIn(' ', resp.geturl())", "try: filePath.encode(\"utf-8\") except UnicodeEncodeError: raise unittest.SkipTest(\"filePath is not encodable to", "class RequestTests(unittest.TestCase): \"\"\"Unit tests for urllib.request.Request.\"\"\" def test_default_values(self): Request =", "that should be quoted are by default sans # space", "!= %s' % (expect, result)) class Utility_Tests(unittest.TestCase): \"\"\"Testcase to test", "given = '%xab' expect = given result = urllib.parse.unquote(given) self.assertEqual(expect,", "self.assertEqual(urllib.request.urlopen(\"data:,\").info().get_params(), [('text/plain', ''), ('charset', 'US-ASCII')]) def test_geturl(self): self.assertEqual(self.text_url_resp.geturl(), self.text_url) self.assertEqual(self.text_url_base64_resp.geturl(),", "timeout=None, persistent=True): pass def retrfile(self, file, type): return io.BytesIO(), 0", "socket, time # serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # serv.settimeout(3) #", "expect = b'\\xa2\\xd8ab\\xff' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, \"using unquote_to_bytes():", "\":8080/test/?test=a\" try: InvalidURL = http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, r\"contain control.*\\\\r\"):", "pass with self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve(support.TEST_HTTP_URL, reporthook=_reporthook) finally: self.unfakehttp() def test_short_content_raises_ContentTooShortError_without_reporthook(self):", "= b'bl\\xc3\\xa5b\\xc3\\xa6rsyltet\\xc3\\xb8y' urllib.parse.unquote(given) class urlencode_Tests(unittest.TestCase): \"\"\"Tests for urlencode()\"\"\" def help_inputtype(self,", "def test_roundtrip_pathname2url(self): list_of_paths = ['///C:', '/////folder/test/', '///C:/foo/bar/spam.foo'] for path in", "quote_plus() given = {\"&\":\"=\"} expect = \"%s=%s\" % (hexescape('&'), hexescape('='))", "Monkey patch os.environ, start with empty fake environment os.environ =", "os.environ['No_Proxy'] = 'xyz.com' self.assertTrue(urllib.request.proxy_bypass_environment('localhost')) self.assertTrue(urllib.request.proxy_bypass_environment('noproxy.com:5678')) self.assertTrue(urllib.request.proxy_bypass_environment('my.proxy:1234')) self.assertFalse(urllib.request.proxy_bypass_environment('my.proxy')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) # Test", "are by default sans # space (separate test for that).", "# a headers value is returned. result = urllib.request.urlretrieve(\"file:%s\" %", "os.environ['HTTP_PROXY'] = 'http://somewhere:3128' proxies = urllib.request.getproxies_environment() self.assertEqual({}, proxies) # Test", "file_size, _report=report): _report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile(b\"x\" * 8193)", "def test_relativelocalfile(self): self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname) class ProxyTests(unittest.TestCase): def setUp(self): #", "on the hex letters. The various character sets specified are:", "def open_spam(self, url): return url with support.check_warnings( ('DummyURLopener style of", "def test_empty_sequence(self): self.assertEqual(\"\", urllib.parse.urlencode({})) self.assertEqual(\"\", urllib.parse.urlencode([])) def test_nonstring_values(self): self.assertEqual(\"a=1\", urllib.parse.urlencode({\"a\":", "_report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile(b\"x\" * 8193) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN,", "unfakeftp(self): urllib.request.ftpwrapper = self._ftpwrapper_class class urlopen_FileTests(unittest.TestCase): \"\"\"Test urlopen() opening a", "f\"contain control.*{escaped_char_repr}\"): urlopen(f\"https:{schemeless_url}\") finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\") def", "and # the tests go ok. # If anybody has", "def help_inputtype(self, given, test_type): \"\"\"Helper method for testing different input", "result = urllib.parse.urlencode(given, True, encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect, result) # Utf-8", "'HEAD') request = Request(\"http://www.python.org\", {}, method='HEAD') self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(), 'HEAD')", "'xyz.com' self.assertTrue(urllib.request.proxy_bypass_environment('localhost')) self.assertTrue(urllib.request.proxy_bypass_environment('noproxy.com:5678')) self.assertTrue(urllib.request.proxy_bypass_environment('my.proxy:1234')) self.assertFalse(urllib.request.proxy_bypass_environment('my.proxy')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) # Test lowercase preference", "Latin-1, with replace error handling given = \"\\u6f22\\u5b57\" expect =", "effect # (Since URIs are not allowed to have non-ASCII", "space (separate test for that). should_quote = [chr(num) for num", "\"using quote_plus(): %r != %r\" % (expected, result)) def test_quoting_space(self):", "Content-Type: text/html; charset=iso-8859-1 ''', mock_close=True) try: msg = \"Redirection to", "my.proxy:1234' os.environ['No_Proxy'] = 'xyz.com' self.assertTrue(urllib.request.proxy_bypass_environment('localhost')) self.assertTrue(urllib.request.proxy_bypass_environment('noproxy.com:5678')) self.assertTrue(urllib.request.proxy_bypass_environment('my.proxy:1234')) self.assertFalse(urllib.request.proxy_bypass_environment('my.proxy')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) #", "quoting and such. \"\"\" def test_unquoting(self): # Make sure unquoting", "with Latin-1 given = \"\\u6f22\\u5b57\" self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given, encoding=\"latin-1\") #", "(expect, result)) # Characters in BMP, encoded by default in", "urllib.request.url2pathname(result) self.assertEqual(expect, result, \"url2pathname() failed; %s != %s\" % (expect,", "9093, []) # finally: # socket.setdefaulttimeout(None) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) #", "list of temporary files. Each item in the list is", "# Ticket #12923: make sure independent requests each use their", "result = urllib.parse.urlencode(given) for expected in expect_somewhere: self.assertIn(expected, result, \"testing", "def test_cafile_and_context(self): context = ssl.create_default_context() with support.check_warnings(('', DeprecationWarning)): with self.assertRaises(ValueError):", "urllib.parse.urlencode(given, safe=\":$\", encoding=\"latin-1\") expect = '%A0$=%C1$' self.assertEqual(expect, result) given =", "block_read_size, file_size)) srcFileName = self.createNewTempFile(b\"x\" * 5) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester)", "'/C|//') expect = 'C:\\\\' for url in given: result =", "try: FILE.close() except: pass def tearDown(self): # Delete the temporary", "# bpo-36918: HTTPConnection destructor calls close() which calls # flush().", "URL.\"\"\" def setUp(self): # text containing URL special- and unicode-characters", "urllib.request.pathname2url(tmpfile) filename, _ = urllib.request.URLopener().retrieve(fileurl) # Some buildbots have TEMP", "when # the \"network connection\" is established and once when", "def test_quote_bytes(self): # Bytes should quote directly to percent-encoded values", "% urllib.parse.quote_plus(str(['1', '2', '3'])) result = urllib.parse.urlencode(given) self.assertEqual(expect, result) result", "UTF-8 result = urllib.parse.unquote(\"\\u6f22%C3%BC\") expect = '\\u6f22\\u00fc' self.assertEqual(expect, result, \"using", "1)]) self.assertEqual(\"a=a&a=b\", urllib.parse.urlencode({\"a\": data}, True)) def test_urlencode_encoding(self): # ASCII encoding.", "\"readline\", \"readlines\", \"fileno\", \"close\", \"info\", \"geturl\", \"getcode\", \"__iter__\"): self.assertTrue(hasattr(self.returned_obj, attr),", "safe=quote_by_default) self.assertEqual(quote_by_default, result, \"using quote_plus(): %r != %r\" % (quote_by_default,", "# instant it returned anything beyond the first line from", "for verification in urlopen tests. buf = None def connect(self):", "from nturl2path import url2pathname, pathname2url from base64 import b64encode import", "= '///C|/path' expect = 'C:\\\\path' result = urllib.request.url2pathname(given) self.assertEqual(expect, result,", "self.assertRaisesRegex(InvalidURL, r\"contain control.*\\\\n\"): urllib.request.urlopen(f\"https:{schemeless_url}\") # This code path quotes the", "b\"Hello!\") self.assertEqual(fp.readline(), b\"\") self.assertEqual(fp.geturl(), 'http://user:pass@python.org/') self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def", "ugly) # test suite. They use different url opening codepaths.", "= urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\", 9093, []) # finally: # socket.setdefaulttimeout(None)", "self.fakeftp() try: urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user', 'pass', 'localhost', 21, []) urlopen('ftp://localhost')", "test_type): \"\"\"Helper method for testing different input types. 'given' must", "\"%s.2\" % support.TESTFN self.registerFileForCleanUp(second_temp) urllib.request.urlretrieve( self.constructLocalFileUrl(support.TESTFN), second_temp, hooktester) def test_reporthook_0_bytes(self):", "result)) # \"Safe\" non-ASCII characters should have no effect #", "[] for num in range(128): given = hexescape(chr(num)) expect =", "works. def hooktester(block_count, block_read_size, file_size, count_holder=[0]): self.assertIsInstance(block_count, int) self.assertIsInstance(block_read_size, int)", "the wrong number of lines\") self.assertEqual(lines_list[0], self.text, \"readlines() returned improper", "'%A0$=%C1$&%A0$=13&%A0$=42' result = urllib.parse.urlencode(given, True, safe=\":$\") self.assertEqual(expect, result) # Test", "\"3rd=3\"] result = urllib.parse.urlencode(given) for expected in expect_somewhere: self.assertIn(expected, result,", "of ``'%' + hex(ord(<character>))[2:]`` escapes a character properly. Case does", "with safe bytes self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'), 'alpha+beta+gamma') def test_quote_bytes(self): #", "encoding=\"latin-1\") expect = '%A0$=%C1$' self.assertEqual(expect, result) given = ((b'\\xa0\\x24', b'\\xc1\\x24'),)", "b'\\xce\\x1c\\xe9\\x00\\x00\\x00\\x0fIDAT\\x08\\xd7c```\\xf8\\xff\\xff?\\x00' b'\\x06\\x01\\x02\\xfe\\no/\\x1e\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82') self.text_url = ( \"data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3\" \"D%26%20%C3%B6%20%C3%84%20\") self.text_url_base64 = (", "Characters in Latin-1 range, encoded by default in UTF-8 given", "self.returned_obj = urlopen(\"file:%s\" % self.pathname) def tearDown(self): \"\"\"Shut down the", "= collections.OrderedDict([(\"a\", 1), (\"b\", 1)]) self.assertEqual(\"a=a&a=b\", urllib.parse.urlencode({\"a\": data}, True)) def", "have non-quoted characters # interspersed given = 'ab%sd' % hexescape('c')", "('DummyURLopener style of invoking requests is deprecated.', DeprecationWarning)): self.assertEqual(DummyURLopener().open( 'spam://example/", "self.evt = threading.Event() # threading.Thread(target=server, args=(self.evt,)).start() # time.sleep(.1) # #", "Found Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33", "+ ver + b\" 200 OK\\r\\n\\r\\nHello!\") try: fp = urlopen(\"http://python.org/\")", "the iterator in the usual implicit way to test for", "calls self.fp.flush() which raises # \"ValueError: I/O operation on closed", "(Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Type: text/html; charset=iso-8859-1 ''',", "Can't really tell why keep failing in windows and sparc.", "flush(). Problem: flush() calls self.fp.flush() which raises # \"ValueError: I/O", "test as much functionality as possible so as to cut", "\"using unquote_to_bytes(): %r != %r\" % (expect, result)) # Test", "self.unfakehttp() def test_read_0_9(self): # \"0.9\" response accepted (but not \"simple", "in the list is a file # name (absolute path", "urllib.parse.urlencode(given) for expected in expect_somewhere: self.assertIn(expected, result, \"testing %s: %s", "', '+') result = urllib.parse.quote_plus(given) self.assertEqual(expect, result, \"using quote_plus(): %r", "InvalidURL, r\"contain control.*\\\\r\"): urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL, r\"contain control.*\\\\n\"): urlopen(f\"https:{schemeless_url}\") finally:", "no use in URIs so must be escaped space :", "urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin): \"\"\"Test urllib.urlretrieve() using fake http connections\"\"\" def test_short_content_raises_ContentTooShortError(self):", "result, \"pathname2url() failed; %s != %s\" % (expect, result)) expect", "newdomain.com:1234') self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com')) self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com:8888')) self.assertTrue(urllib.request.proxy_bypass_environment('newdomain.com:1234')) def test_proxy_cgi_ignore(self): try: self.env.set('HTTP_PROXY', 'http://somewhere:3128') proxies", "b\"\") # the spaces are quoted in URL so no", "expected 2 '&'s; got %s\" % (test_type, result.count('&'))) amp_location =", "result) self.assertEqual(result.count('&'), 2, \"Expected 2 '&'s, got %s\" % result.count('&'))", "expect those characters to be UTF-8 # encoded). result =", "functionality as possible so as to cut down on reliance", "# serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # serv.settimeout(3) # serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,", "%s not found in %s\" % (test_type, expected, result)) self.assertEqual(result.count('&'),", "self.assertRaises(UnicodeEncodeError, urllib.parse.quote, given, encoding=\"latin-1\") # Characters in BMP, encoded with", "urllib.parse.quote_plus(given, encoding=\"latin-1\") self.assertEqual(expect, result, \"using quote_plus(): %r != %r\" %", "GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Length:", "testing FancyURLopener _urlopener = None def urlopen(url, data=None, proxies=None): \"\"\"urlopen(url", "Create a list of temporary files. Each item in the", "anotherdomain.com, newdomain.com:1234') self.assertFalse(bypass('localhost\\n')) self.assertFalse(bypass('anotherdomain.com:8888\\n')) self.assertFalse(bypass('newdomain.com:1234\\n')) class ProxyTests_withOrderedEnv(unittest.TestCase): def setUp(self): #", "proxies['no']) # List of no_proxies with space. self.env.set('NO_PROXY', 'localhost, anotherdomain.com,", "29142 self.assertTrue(bypass('d.o.t')) self.assertTrue(bypass('anotherdomain.com:8888')) self.assertTrue(bypass('.anotherdomain.com:8888')) self.assertTrue(bypass('www.newdomain.com:1234')) self.assertFalse(bypass('prelocalhost')) self.assertFalse(bypass('newdomain.com')) # no port", "given, encoding=\"latin-1\") # quote_from_bytes should work the same result =", "connection\" is established and once when the block is #", "# cannot end a raw string in \\ self.assertEqual(url2pathname(\"///C/test/\"), r'\\\\\\C\\test'", "expect = \"key+name=A+bunch+of+pluses\" result = urllib.parse.urlencode(given) self.assertEqual(expect, result) def test_doseq(self):", "result)) def test_default_safe(self): # Test '/' is default value for", "= urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\", 9093, []) # ftp.close() # #", "') result = urllib.parse.unquote_plus(given) self.assertEqual(expect, result, \"using unquote_plus(): %r !=", "there is no injection. resp = urlopen(f\"http:{schemeless_url}\") self.assertNotIn(char, resp.geturl()) finally:", "url = 'http://docs.python.org/library/urllib.html#OK' self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\") try: fp = urllib.request.urlopen(url)", "\"using sequence of two-item tuples as input\") def test_quoting(self): #", "result) class Pathname_Tests(unittest.TestCase): \"\"\"Test pathname2url() and url2pathname()\"\"\" def test_basic(self): #", "keys self.assertEqual('localhost', proxies['no']) # List of no_proxies with space. self.env.set('NO_PROXY',", "conn.close() # except socket.timeout: # pass # finally: # serv.close()", "urlopen(\"file:%s\" % self.pathname) def tearDown(self): \"\"\"Shut down the open object\"\"\"", "\"using unquote(): %r != %r\" % (expect, result)) # Characters", "# Test on 8193 byte file. Should call reporthook only", "= {\"&\":\"=\"} expect = \"%s=%s\" % (hexescape('&'), hexescape('=')) result =", "<2 character US-ASCII hex value>. The Python code of ``'%'", "(expect, result)) # Encoding argument should raise type error on", "input\") def test_quoting(self): # Make sure keys and values are", "{'sequence':['1', '2', '3']} expect = \"sequence=%s\" % urllib.parse.quote_plus(str(['1', '2', '3']))", "once for the last byte). report = [] def hooktester(block_count,", "%r != %r\" % (expect, result)) def test_unquote_with_unicode(self): # Characters", "'given' must lead to only the pairs: * 1st, 1", "= '%A0$=%C1$' result = urllib.parse.urlencode(given, doseq=True, safe=\":$\", encoding=\"latin-1\") given =", "\"using quote_plus(): %r != %r\" % (expect, result)) def test_quoting_plus(self):", "# Test on a string with unescaped non-ASCII characters #", "= None def urlopen(url, data=None, proxies=None): \"\"\"urlopen(url [, data]) ->", "try: msg = \"Redirection to url 'file:\" with self.assertRaisesRegex(urllib.error.HTTPError, msg):", "= urllib.request.url2pathname(given) self.assertEqual(expect, result, 'urllib.request.url2pathname() failed; %s != %s' %", "try: urllib.request.urlretrieve(support.TEST_HTTP_URL, reporthook=_reporthook) finally: self.unfakehttp() def test_short_content_raises_ContentTooShortError_without_reporthook(self): self.fakehttp(b'''HTTP/1.1 200 OK", "\"Exception ignored in\". Override close() to silence this error. def", "result)) given = '///C|/path' expect = 'C:\\\\path' result = urllib.request.url2pathname(given)", "\"\"\"urlopen(url [, data]) -> open file-like object\"\"\" global _urlopener if", "failed; %s != %s\" % (result, expected_path)) def test_quoting(self): #", "expected_path)) def test_quoting(self): # Test automatic quoting and unquoting works", "!= %s\" % (expect, result)) @unittest.skipUnless(sys.platform == 'win32', 'test specific", "for quote_plus given = \"ab\\u6f22\\u5b57 cd\" expect = \"ab%3F%3F+cd\" result", "to be escaped; can be, though, if desired Control characters", "AttributeError), urllib.parse.unquote_to_bytes, ()) def test_unquoting_mixed_case(self): # Test unquoting on mixed-case", "accepted (but not \"simple responses\" without # a status line)", "test_proxy_bypass_environment_host_match(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234, .d.o.t') self.assertTrue(bypass('localhost'))", "# self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() # # def testTimeoutDefault(self): #", "letter. self.assertEqual(os.path.normcase(filename), os.path.normcase(tmpfile)) @support.ignore_warnings(category=DeprecationWarning) def test_urlopener_retrieve_remote(self): url = \"http://www.python.org/file.txt\" self.fakehttp(b\"HTTP/1.1", "exhausting the file did not\" \" return an empty string\")", "result) given = ((\"\\u00a0\", (42, \"\\u00c1\")),) expect = '%A0=42&%A0=%C1' result", "% attr) def test_info(self): self.assertIsInstance(self.text_url_resp.info(), email.message.Message) self.assertEqual(self.text_url_base64_resp.info().get_params(), [('text/plain', ''), ('charset',", "self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(), 'HEAD') request = Request(\"http://www.python.org\", method='GET') self.assertEqual(request.get_method(), 'GET')", "= '%x' expect = given result = urllib.parse.unquote(given) self.assertEqual(expect, result,", "class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin): \"\"\"Test urllib.urlretrieve() using fake http connections\"\"\" def", "( \"data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3\" \"D%26%20%C3%B6%20%C3%84%20\") self.text_url_base64 = ( \"data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs\" \"sJT0mIPYgxCA%3D\") # base64", "calls # flush(). Problem: flush() calls self.fp.flush() which raises #", "have TEMP folder that uses a lowercase drive letter. self.assertEqual(os.path.normcase(filename),", "errors=\"replace\") self.assertEqual(expect, result) # ASCII Encoding. On a sequence of", "'%A0%24=%C1%24' result = urllib.parse.urlencode(given) self.assertEqual(expect, result) result = urllib.parse.urlencode(given, True)", "temp file to use for testing self.text = bytes(\"test_urllib: %s\\n\"", "support.TESTFN, hooktester) self.assertEqual(len(report), 3) self.assertEqual(report[0][2], 8193) self.assertEqual(report[0][1], 8192) self.assertEqual(report[1][1], 8192)", "def test_quoting(self): # Make sure keys and values are quoted", "string contains non-Latin-1-representable characters) result = urllib.parse.unquote(\"\\u6f22%FC\", encoding=\"latin-1\") expect =", "makes all of the # above attempts at injection within", "test_ntpath(self): given = ('/C:/', '///C:/', '/C|//') expect = 'C:\\\\' for", "retrfile(self, file, type): return io.BytesIO(), 0 def close(self): pass self._ftpwrapper_class", "Make sure all characters that should be quoted are by", "\"using quote(): %r != %r\" % (expect, result)) def test_quote_plus_with_unicode(self):", "connecting to the Net for testing. \"\"\" def setUp(self): #", "that may still be open. It # is the responsibility", "self.assertNotIn('http', proxies) finally: self.env.unset('REQUEST_METHOD') self.env.unset('HTTP_PROXY') def test_proxy_bypass_environment_host_match(self): bypass = urllib.request.proxy_bypass_environment", "# ftp.close() # # def testTimeoutValue(self): # ftp = urllib.ftpwrapper(\"myuser\",", "own location returned and # a headers value is returned.", "result = urllib.parse.quote(given, encoding=None, errors=None) self.assertEqual(expect, result, \"using quote(): %r", "b64encode import collections def hexescape(char): \"\"\"Escape char as RFC 2396", "None: opener = urllib.request.FancyURLopener(proxies=proxies) elif not _urlopener: opener = FancyURLopener()", "method for testing different input types. 'given' must lead to", "\"%E6%BC%A2%E5%AD%97\" result = urllib.parse.quote(given) self.assertEqual(expect, result, \"using quote(): %r !=", "% (expect, result)) # Test on a string with unescaped", "TEMP folder that uses a lowercase drive letter. self.assertEqual(os.path.normcase(filename), os.path.normcase(tmpfile))", "Each item in the list is a file # name", "expect = \"%A2%D8+%FF\" result = urllib.parse.quote_plus(given, encoding=\"latin-1\") self.assertEqual(expect, result, \"using", "\\ self.assertEqual(url2pathname(\"///C/test/\"), r'\\\\\\C\\test' '\\\\') self.assertEqual(url2pathname(\"////C/test/\"), r'\\\\C\\test' '\\\\') def test_simple_compare(self): self.assertEqual(url2pathname(\"///C|/foo/bar/spam.foo\"),", "urllib.parse.quote(' ') self.assertEqual(result, hexescape(' '), \"using quote(): %r != %r\"", "\"ssl module required\") def test_cafile_and_context(self): context = ssl.create_default_context() with support.check_warnings(('',", "unquote_to_bytes(): %r != %r\" % (expect, result)) # Test on", "encoding=\"latin-1\") expect = 'br\\u00fcckner_sapporo_20050930.doc' self.assertEqual(expect, result, \"using unquote(): %r !=", "= ( b'\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR\\x00\\x00\\x00\\x02\\x00\\x00\\x00' b'\\x01\\x08\\x02\\x00\\x00\\x00{@\\xe8\\xdd\\x00\\x00\\x00\\x01sRGB\\x00\\xae' b'\\xce\\x1c\\xe9\\x00\\x00\\x00\\x0fIDAT\\x08\\xd7c```\\xf8\\xff\\xff?\\x00' b'\\x06\\x01\\x02\\xfe\\no/\\x1e\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82') self.text_url = ( \"data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3\"", "fakedata return FakeHTTPConnection class FakeHTTPMixin(object): def fakehttp(self, fakedata, mock_close=False): fake_http_class", "be open. It # is the responsibility of the developer", "if data is None: return opener.open(url) else: return opener.open(url, data)", "given = '%' expect = bytes(given, 'ascii') result = urllib.parse.unquote_to_bytes(given)", "result) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ()) with", "3) + 2)) def test_using_mapping(self): # Test passing in a", "= urllib.parse.urlencode(given, doseq=True, safe=\":$\", encoding=\"latin-1\") given = ((b'\\xa0\\x24', (b'\\xc1\\x24', 0xd,", "result)) given = \"make+sure/using_unquote\" expect = os.path.join(\"make+sure\", \"using_unquote\") result =", "from base64 import b64encode import collections def hexescape(char): \"\"\"Escape char", "result = urllib.request.urlretrieve(self.constructLocalFileUrl( support.TESTFN), second_temp) self.assertEqual(second_temp, result[0]) self.assertTrue(os.path.exists(second_temp), \"copy of", "self.help_inputtype({\"1st\":'1', \"2nd\":'2', \"3rd\":'3'}, \"using dict as input type\") def test_using_sequence(self):", "urllib.parse.urlencode(given, safe=\":$\") expect = '%A0$=%C1$' self.assertEqual(expect, result) given = ((b'\\xa0\\x24',", "result = urllib.request.pathname2url(expected_path) self.assertEqual(expected_url, result, \"pathname2url() failed; %s != %s\"", "calls urllib.parse.quote() on the URL which makes all of the", "charset=iso-8859-1 ''', mock_close=True) try: self.assertRaises(OSError, urlopen, \"http://python.org/\") finally: self.unfakehttp() def", "count_holder[0]) count_holder[0] = count_holder[0] + 1 second_temp = \"%s.2\" %", "1, \"readlines() returned the wrong number of lines\") self.assertEqual(lines_list[0], self.text,", "try: fp = urlopen(\"http://python.org/\") self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(), b\"\") self.assertEqual(fp.geturl(), 'http://python.org/')", "result) result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) # Sequence of", "%r\" % (expect, result)) # Encoding argument should raise type", "given = ((\"\\u00a0\", \"\\u00c1\"),) expect = '%A0=%C1' result = urllib.parse.urlencode(given,", "resp.geturl()) self.assertNotIn('\\n', resp.geturl()) finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\") def", "All files in this list will be deleted in the", "Test cannot assume anything about order. Docs make no guarantee", "problematic environments, please help! # . Facundo # # def", "the url _path_ safe. InvalidURL = http.client.InvalidURL with self.assertRaisesRegex( InvalidURL,", "number of iterations since test would fail the # instant", "check_read(self, ver): self.fakehttp(b\"HTTP/\" + ver + b\" 200 OK\\r\\n\\r\\nHello!\") try:", "proxies = urllib.request.getproxies_environment() self.assertEqual({}, proxies) # Test lowercase preference of", "mix of non-ASCII and percent-encoded characters, UTF-8 result = urllib.parse.unquote(\"\\u6f22%C3%BC\")", "returned by urlopen() lacks %s attribute\" % attr) def test_info(self):", "'/folder/test/') def test_simple_compare(self): self.assertEqual(pathname2url(r'C:\\foo\\bar\\spam.foo'), \"///C:/foo/bar/spam.foo\" ) def test_long_drive_letter(self): self.assertRaises(IOError, pathname2url,", "file # name (absolute path or relative to the current", "# ftp = urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\", 9093, []) # finally:", "%s\\r\\n\" % b64encode(userpass.encode(\"ASCII\")).decode(\"ASCII\")) fp = urlopen(url) # The authorization header", "test_roundtrip_url2pathname(self): list_of_paths = ['C:', r'\\\\\\C\\test\\\\', r'C:\\foo\\bar\\spam.foo' ] for path in", "result, \"using unquote(): %r != %r\" % (expect, result)) #", "unquote(): %r != %r\" % (expect, result)) given = '%x'", "os.environ['no_proxy'] = 'localhost, noproxy.com, my.proxy:1234' os.environ['No_Proxy'] = 'xyz.com' self.assertTrue(urllib.request.proxy_bypass_environment('localhost')) self.assertTrue(urllib.request.proxy_bypass_environment('noproxy.com:5678'))", "\"pathname2url() failed; %s != %s\" % (expect, result)) given =", "letters. The various character sets specified are: Reserved characters :", "order _is_ significant self._saved_env = os.environ # Monkey patch os.environ,", "with space. self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234') self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com')) self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com:8888')) self.assertTrue(urllib.request.proxy_bypass_environment('newdomain.com:1234')) def", "%r != %r\" % (expect, result)) class UnquotingTests(unittest.TestCase): \"\"\"Tests for", "result = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=b\"\\xfc\") expect = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=\"\")", "# serv.bind((\"\", 9093)) # serv.listen() # try: # conn, addr", "class FakeFtpWrapper(object): def __init__(self, user, passwd, host, port, dirs, timeout=None,", "as fobj: self.assertTrue(fobj) finally: os.close(fd) os.unlink(tmp_file) self.assertFalse(os.path.exists(tmp_file)) with self.assertRaises(urllib.error.URLError): urlopen(tmp_fileurl)", "self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() # # def testTimeoutDefault(self): # #", "self.env.set('NO_PROXY', 'localhost') proxies = urllib.request.getproxies_environment() # getproxies_environment use lowered case", "Must be escaped \"\"\" def test_never_quote(self): # Make sure quote()", "expect = b'\\xe6\\xbc\\xa2\\xc3\\xbc' # UTF-8 for \"\\u6f22\\u00fc\" self.assertEqual(expect, result, \"using", "\"\\ufffd\" # Replacement character result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using", "hexescape(' ')) result = urllib.parse.quote(given) self.assertEqual(expect, result, \"using quote(): %r", "fp = urllib.request.urlopen(url) self.assertEqual(fp.geturl(), url) finally: self.unfakehttp() def test_willclose(self): self.fakehttp(b\"HTTP/1.1", "# This code path quotes the URL so there is", "= urllib.parse.quote(given, encoding=\"latin-1\") self.assertEqual(expect, result, \"using quote(): %r != %r\"", "geturl() omits fragments in the original URL. url = 'http://docs.python.org/library/urllib.html#OK'", "self.assertEqual(fp.readline(), b\"\") self.assertEqual(fp.geturl(), 'http://python.org/') self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_url_fragment(self):", "test_short_content_raises_ContentTooShortError_without_reporthook(self): self.fakehttp(b'''HTTP/1.1 200 OK Date: Wed, 02 Jan 2008 03:03:54", "the url _path_ safe. escaped_char_repr = repr(char).replace('\\\\', r'\\\\') InvalidURL =", "is returned. result = urllib.request.urlretrieve(\"file:%s\" % support.TESTFN) self.assertEqual(result[0], support.TESTFN) self.assertIsInstance(result[1],", "is not None: opener = urllib.request.FancyURLopener(proxies=proxies) elif not _urlopener: opener", "\"using quote_from_bytes(): %r != %r\" % (expect, result)) def test_quote_with_unicode(self):", "self.assertRaises(urllib.error.URLError) as e: urlopen('ftp://localhost/a/file/which/doesnot/exists.py') self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) @patch.object(urllib.request, 'MAXFTPCACHE', 0) def", "special- and unicode-characters self.text = \"test data URLs :;,%=& \\u00f6", "urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=\"\") self.assertEqual(expect, result, \"using quote(): %r != %r\"", "self.assertEqual(hexescape(char), result, \"using quote_plus(): \" \"%s should be escapes to", "= fakedata return FakeHTTPConnection class FakeHTTPMixin(object): def fakehttp(self, fakedata, mock_close=False):", "error handling given = \"\\u6f22\\u5b57\" expect = \"%3F%3F\" # \"??\"", "omits fragments in the original URL. url = 'http://docs.python.org/library/urllib.html#OK' self.fakehttp(b\"HTTP/1.1", "\"\"\"Creates a new temporary file containing the specified data, registers", "proxies) finally: self.env.unset('REQUEST_METHOD') self.env.unset('HTTP_PROXY') def test_proxy_bypass_environment_host_match(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY',", "Latin-1 range, encoded with Latin-1 result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc', encoding=\"latin-1\") expect", "%r != %r\" % (expect, result)) # Characters in the", "\"sequence=%s\" % urllib.parse.quote_plus(str(['1', '2', '3'])) result = urllib.parse.urlencode(given) self.assertEqual(expect, result)", "and urllib.quote_plus() According to RFC 3986 (Uniform Resource Identifiers), to", "get deleted, but it # does nothing about trying to", "charset=iso-8859-1 FF ''') with self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve(support.TEST_HTTP_URL) finally: self.unfakehttp() class", "('FancyURLopener style of invoking requests is deprecated.', DeprecationWarning)): return urllib.request.FancyURLopener()", "# Latin-1 encoding. given = (('\\u00a0', '\\u00c1'),) expect = '%A0=%C1'", "close Content-Type: text/html; charset=iso-8859-1 ''', mock_close=True) try: self.assertRaises(OSError, urlopen, \"http://python.org/\")", "8193) self.assertEqual(report[0][1], 8192) self.assertEqual(report[1][1], 8192) self.assertEqual(report[2][1], 8192) class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin):", "test_proxy_bypass_environment_always_match(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', '*') self.assertTrue(bypass('newdomain.com')) self.assertTrue(bypass('newdomain.com:1234')) self.env.set('NO_PROXY', '*,", "result)) def test_quote_with_unicode(self): # Characters in Latin-1 range, encoded by", "text/html; charset=iso-8859-1 ''', mock_close=True) try: self.assertRaises(OSError, urlopen, \"http://python.org/\") finally: self.unfakehttp()", "this error. def close(self): pass FakeHTTPConnection.fakedata = fakedata return FakeHTTPConnection", "header must be in place self.assertIn(authorization, fakehttp_wrapper.buf.decode(\"UTF-8\")) self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(),", "sure quote() and quote_plus() handle spaces as specified in #", "for value in given[\"sequence\"]: expect = \"sequence=%s\" % value self.assertIn(expect,", "given = \"\\xa2\\xd8 \\xff\" expect = \"%A2%D8+%FF\" result = urllib.parse.quote_plus(given,", "(expect, result)) # Test with a bytes as input, with", "or relative to the current working directory). # All files", "int) self.assertIsInstance(block_read_size, int) self.assertIsInstance(file_size, int) self.assertEqual(block_count, count_holder[0]) count_holder[0] = count_holder[0]", "for attr in (\"read\", \"readline\", \"readlines\", \"fileno\", \"close\", \"info\", \"geturl\",", "escaped: \" \"%s\" % result) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None) self.assertRaises((TypeError,", "/'),'//example/%20/') # test the safe characters are not quoted by", "(once # when the \"network connection\" is established, once for", "collections.OrderedDict([(\"a\", 1), (\"b\", 1)]) self.assertEqual(\"a=a&a=b\", urllib.parse.urlencode({\"a\": data}, True)) def test_urlencode_encoding(self):", "test_ftp_nohost(self): test_ftp_url = 'ftp:///path' with self.assertRaises(urllib.error.URLError) as e: urlopen(test_ftp_url) self.assertFalse(e.exception.filename)", "b'\\xc1\\x24'),) result = urllib.parse.urlencode(given, doseq=True, safe=\":$\") expect = '%A0$=%C1$' self.assertEqual(expect,", "self.assertEqual(report[0][1], 8192) self.assertEqual(report[1][1], 8192) self.assertEqual(report[2][1], 8192) class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin): \"\"\"Test", "socket.setdefaulttimeout(None) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() # # def testTimeoutValue(self):", "result = urllib.parse.quote_plus(' ') self.assertEqual(result, '+', \"using quote_plus(): %r !=", "# Encoding argument should raise type error on bytes input", "retry limit. for i in range(FancyURLopener().maxtries): self.fakehttp(b'''HTTP/1.1 302 Found Location:", "by calling it here and then having it be called", "sure object returned by urlopen() has the specified methods for", "for pathnam2url() and # url2pathname() respectively given = os.path.join(\"needs\", \"quot=ing\",", "special meaning Data characters : letters, digits, and \"-_.!~*'()\" Unreserved", "does not send any # data. (#1680230) self.fakehttp(b'') try: self.assertRaises(OSError,", "= '%Ab%eA' expect = b'\\xab\\xea' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result,", "expect_somewhere = [\"1st=1\", \"2nd=2\", \"3rd=3\"] result = urllib.parse.urlencode(given) for expected", "self.assertTrue(urllib.request.proxy_bypass_environment('newdomain.com:1234')) def test_proxy_cgi_ignore(self): try: self.env.set('HTTP_PROXY', 'http://somewhere:3128') proxies = urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128',", "class QuotingTests(unittest.TestCase): r\"\"\"Tests for urllib.quote() and urllib.quote_plus() According to RFC", "local file just gets its own location returned and #", "will be deleted in the tearDown method. Note, # this", "self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\") def test_url_host_with_newline_header_injection_rejected(self): self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\")", "in the urllib.\"\"\" def test_thishost(self): \"\"\"Test the urllib.request.thishost utility function", "file. Should call reporthook only 2 times (once when #", "#12923: make sure independent requests each use their # own", "!= %r\" % (expect, result)) expect = given.replace(' ', '+')", "\"geturl\", \"getcode\", \"__iter__\"): self.assertTrue(hasattr(self.text_url_resp, attr), \"object returned by urlopen() lacks", "= self.createNewTempFile() urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 1) self.assertEqual(report[0][2], 0) def", "string for quoting_Tests for details on quoting and such. \"\"\"", "range, encoded with None (default) result = urllib.parse.unquote(given, encoding=None, errors=None)", "given = \"%E6%BC%A2%E5%AD%97\" expect = \"\\u6f22\\u5b57\" # \"Kanji\" result =", "# Characters in Latin-1 range, encoded by default in UTF-8", "% urllib.parse.quote(\"make sure\") result = urllib.request.pathname2url(given) self.assertEqual(expect, result, \"pathname2url() failed;", "request = Request(\"http://www.python.org\", {}) self.assertEqual(request.get_method(), 'POST') def test_with_method_arg(self): Request =", "self.assertEqual(do_not_quote, result, \"using quote(): %r != %r\" % (do_not_quote, result))", "result = urllib.parse.unquote(given, errors=\"replace\") self.assertEqual(expect, result, \"using unquote(): %r !=", "email.message.Message, \"did not get an email.message.Message instance \" \"as second", "# Test iterator # Don't need to count number of", "sequence, ignoring errors given = \"%F3%B1\" expect = \"\" result", "= open(support.TESTFN, 'wb') try: f.write(self.text) finally: f.close() self.pathname = support.TESTFN", "int) self.assertIsInstance(file_size, int) self.assertEqual(block_count, count_holder[0]) count_holder[0] = count_holder[0] + 1", "()) def test_unquoting_mixed_case(self): # Test unquoting on mixed-case hex digits", "% (expect, result)) def test_quoting_plus(self): self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'), 'alpha%2Bbeta+gamma') self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma',", "urllib.parse.urlencode({\"a\": [None, \"a\"]}, True)) data = collections.OrderedDict([(\"a\", 1), (\"b\", 1)])", "# Test with a bytes as input given = b'%A2%D8ab%FF'", "%s\" % (test_type, result)) self.assertEqual(len(result), (5 * 3) + 2,", "(expect, result)) # Test on a string with unescaped non-ASCII", "from unittest.mock import patch from test import support import os", "= ((b'\\xa0\\x24', (b'\\xc1\\x24', 0xd, 42)),) expect = '%A0$=%C1$&%A0$=13&%A0$=42' result =", "hexescape('=')) result = urllib.parse.urlencode(given) self.assertEqual(expect, result) given = {\"key name\":\"A", "self.text_url = ( \"data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3\" \"D%26%20%C3%B6%20%C3%84%20\") self.text_url_base64 = ( \"data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs\" \"sJT0mIPYgxCA%3D\")", "instant it returned anything beyond the first line from the", "k.lower(): self.env.unset(k) def tearDown(self): # Restore all proxy related env", "unquote_plus(): %r != %r\" % (expect, result)) def test_unquote_to_bytes(self): given", "cd\" expect = \"ab%3F%3F+cd\" result = urllib.parse.quote_plus(given, encoding=\"latin-1\", errors=\"replace\") self.assertEqual(expect,", "Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Location: file://guidocomputer.athome.com:/python/license Connection: close", "if proxies is not None: opener = urllib.request.FancyURLopener(proxies=proxies) elif not", "'3']} expect = \"sequence=%s\" % urllib.parse.quote_plus(str(['1', '2', '3'])) result =", "1] on_amp_right = result[amp_location + 1] self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(), \"testing", "test_missing_localfile(self): # Test for #10836 with self.assertRaises(urllib.error.URLError) as e: urlopen('file://localhost/a/file/which/doesnot/exists.py')", "encoding=\"latin-1\", safe=\"\") self.assertEqual(expect, result, \"using quote(): %r != %r\" %", "encoding=\"latin-1\") self.assertEqual(expect, result) def test_urlencode_encoding_doseq(self): # ASCII Encoding. Expect %3F", "not \"simple responses\" without # a status line) self.check_read(b\"0.9\") def", "got %s\" % result.count('&')) def test_empty_sequence(self): self.assertEqual(\"\", urllib.parse.urlencode({})) self.assertEqual(\"\", urllib.parse.urlencode([]))", "= \"\\u6f22\\u5b57\" expect = \"%3F%3F\" # \"??\" result = urllib.parse.quote(given,", "self.assertEqual(url2pathname(\"///C/test/\"), r'\\\\\\C\\test' '\\\\') self.assertEqual(url2pathname(\"////C/test/\"), r'\\\\C\\test' '\\\\') def test_simple_compare(self): self.assertEqual(url2pathname(\"///C|/foo/bar/spam.foo\"), r'C:\\foo\\bar\\spam.foo')", "def test_short_content_raises_ContentTooShortError_without_reporthook(self): self.fakehttp(b'''HTTP/1.1 200 OK Date: Wed, 02 Jan 2008", "self.text_url_base64) self.image_url_resp = urllib.request.urlopen(self.image_url) def test_interface(self): # Make sure object", "# Records changes to env vars self.env = support.EnvironmentVarGuard() #", "is no injection. resp = urlopen(f\"http:{schemeless_url}\") self.assertNotIn(char, resp.geturl()) finally: self.unfakehttp()", "invoking requests is deprecated.', DeprecationWarning)): return urllib.request.FancyURLopener() def fakehttp(fakedata, mock_close=False):", "try: FILE = open(support.TESTFN, 'wb') FILE.write(self.text) FILE.close() finally: try: FILE.close()", "''') with self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve(support.TEST_HTTP_URL) finally: self.unfakehttp() class QuotingTests(unittest.TestCase): r\"\"\"Tests", "close() which calls # flush(). Problem: flush() calls self.fp.flush() which", "try: resp = urlopen(\"http://www.python.org\") self.assertTrue(resp.fp.will_close) finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module", "urllib.parse.quote_plus(str(['1', '2', '3'])) result = urllib.parse.urlencode(given) self.assertEqual(expect, result) result =", "% value self.assertIn(expect, result) self.assertEqual(result.count('&'), 2, \"Expected 2 '&'s, got", "def retrfile(self, file, type): return io.BytesIO(), 0 def close(self): pass", "!= %r\" % (do_not_quote, result)) result = urllib.parse.quote_plus(do_not_quote) self.assertEqual(do_not_quote, result,", "'C:') self.assertEqual(url2pathname(\"///C|/\"), 'C:\\\\') def test_converting_when_no_drive_letter(self): # cannot end a raw", "\"\"\" expect_somewhere = [\"1st=1\", \"2nd=2\", \"3rd=3\"] result = urllib.parse.urlencode(given) for", "finally: self.unfakehttp() def test_short_content_raises_ContentTooShortError_without_reporthook(self): self.fakehttp(b'''HTTP/1.1 200 OK Date: Wed, 02", "True, encoding=\"latin-1\") self.assertEqual(expect, result) given = ((\"\\u00a0\", (42, \"\\u00c1\")),) expect", "self.assertEqual(self.text_url_base64_resp.geturl(), self.text_url_base64) self.assertEqual(self.image_url_resp.geturl(), self.image_url) def test_read_text(self): self.assertEqual(self.text_url_resp.read().decode( dict(self.text_url_resp.info().get_params())['charset']), self.text) def", "support.TESTFN) self.assertEqual(result[0], support.TESTFN) self.assertIsInstance(result[1], email.message.Message, \"did not get an email.message.Message", "given = '///C|/path' expect = 'C:\\\\path' result = urllib.request.url2pathname(given) self.assertEqual(expect,", "ignorable spaces, # such as \"\\n\", \" \", \"%0A\", and", "result)) given = '%x' expect = given result = urllib.parse.unquote(given)", "mock_close: # bpo-36918: HTTPConnection destructor calls close() which calls #", "expected_url)) result = urllib.request.url2pathname(expected_url) self.assertEqual(expected_path, result, \"url2pathame() failed; %s !=", "expect = \"abcd\" result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using quote():", "encoding. given = (('\\u00a0', '\\u00c1'),) expect = '%C2%A0=%C3%81' result =", "True) for value in given[\"sequence\"]: expect = \"sequence=%s\" % value", ": '<>#%\"' Must be escaped Unwise : \"{}|\\^[]`\" Must be", "open file-like object\"\"\" global _urlopener if proxies is not None:", "# Use the iterator in the usual implicit way to", "try: os.remove(each) except: pass def constructLocalFileUrl(self, filePath): filePath = os.path.abspath(filePath)", "given result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using unquote(): %r !=", "sequence of values. given = ((\"\\u00a0\", (1, \"\\u00c1\")),) expect =", "support import os try: import ssl except ImportError: ssl =", "testTimeoutDefault(self): # # global default timeout is used # import", "\"\\\\\"), '/folder/test/') def test_simple_compare(self): self.assertEqual(pathname2url(r'C:\\foo\\bar\\spam.foo'), \"///C:/foo/bar/spam.foo\" ) def test_long_drive_letter(self): self.assertRaises(IOError,", "data): FakeHTTPConnection.buf = data def makefile(self, *args, **kwds): self.io_refs +=", "(result, hexescape(' '))) result = urllib.parse.quote_plus(' ') self.assertEqual(result, '+', \"using", "in list(range(0, 0x21)) + [0x7f]: char = chr(char_no) schemeless_url =", "self.fakehttp(b'''HTTP/1.1 200 OK Date: Wed, 02 Jan 2008 03:03:54 GMT", "% support.TESTFN self.registerFileForCleanUp(second_temp) urllib.request.urlretrieve( self.constructLocalFileUrl(support.TESTFN), second_temp, hooktester) def test_reporthook_0_bytes(self): #", "InvalidURL = http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, r\"contain control.*\\\\r\"): urlopen(f\"http:{schemeless_url}\") with", "tearDown(self): os.environ = self._saved_env def test_getproxies_environment_prefer_lowercase(self): # Test lowercase preference", "\"\"\"Testcase to test the various utility functions in the urllib.\"\"\"", "the original URL. url = 'http://docs.python.org/library/urllib.html#OK' self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\") try:", "with support.check_warnings(('', DeprecationWarning)): with self.assertRaises(ValueError): urllib.request.urlopen( \"https://localhost\", cafile=\"/nonexistent/path\", context=context )", "unquoting works for pathnam2url() and # url2pathname() respectively given =", "thing and amps \"testing %s: \" \"unexpected number of characters:", "= http.client.HTTPConnection http.client.HTTPConnection = fake_http_class def unfakehttp(self): http.client.HTTPConnection = self._connection_class", "Latin-1 given = \"\\xa2\\xd8ab\\xff\" expect = \"%A2%D8ab%FF\" result = urllib.parse.quote(given,", "= urllib.parse.quote_plus(given, encoding=\"latin-1\", errors=\"replace\") self.assertEqual(expect, result, \"using quote_plus(): %r !=", "self.assertEqual(url2pathname(\"///C|/foo/bar/spam.foo\"), r'C:\\foo\\bar\\spam.foo') def test_non_ascii_drive_letter(self): self.assertRaises(IOError, url2pathname, \"///\\u00e8|/\") def test_roundtrip_url2pathname(self): list_of_paths", "%r\" % (expect, result)) def test_unquote_to_bytes(self): given = 'br%C3%BCckner_sapporo_20050930.doc' expect", "files that may still be open. It # is the", "'safe' parameter does what it should do quote_by_default = \"<>\"", "FakeHTTPConnection class FakeHTTPMixin(object): def fakehttp(self, fakedata, mock_close=False): fake_http_class = fakehttp(fakedata,", "pass def retrfile(self, file, type): return io.BytesIO(), 0 def close(self):", "test_unquoting_with_bytes_input(self): # Bytes not supported yet with self.assertRaisesRegex(TypeError, 'Expected str,", "= \"%A2%D8+%FF\" result = urllib.parse.quote_plus(given, encoding=\"latin-1\") self.assertEqual(expect, result, \"using quote_plus():", "urlopen() lacks %s attribute\" % attr) def test_read(self): self.assertEqual(self.text, self.returned_obj.read())", "self.assertEqual(expect, result, \"using unquote(): %r != %r\" % (expect, result))", "result)) given = os.path.join(\"make sure\", \"using_quote\") expect = \"%s/using_quote\" %", "was in Python 2's \"urllib\" module\"\"\" import urllib.parse import urllib.request", "noproxy.com, my.proxy:1234' os.environ['No_Proxy'] = 'xyz.com' self.assertTrue(urllib.request.proxy_bypass_environment('localhost')) self.assertTrue(urllib.request.proxy_bypass_environment('noproxy.com:5678')) self.assertTrue(urllib.request.proxy_bypass_environment('my.proxy:1234')) self.assertFalse(urllib.request.proxy_bypass_environment('my.proxy')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary'))", "quote() does not quote letters, digits, and \"_,.-\" do_not_quote =", "methods for attr in (\"read\", \"readline\", \"readlines\", \"close\", \"info\", \"geturl\",", "UTF-8 given = 'br%C3%BCckner_sapporo_20050930.doc' expect = 'br\\u00fcckner_sapporo_20050930.doc' result = urllib.parse.unquote(given)", "have a linux, and # the tests go ok. #", "(expect, result)) def test_default_quoting(self): # Make sure all characters that", "1, \"using unquote(): not all characters escaped: \" \"%s\" %", "characters: %s != %s\" % (test_type, len(result), (5 * 3)", "def test_info(self): self.assertIsInstance(self.returned_obj.info(), email.message.Message) def test_geturl(self): self.assertEqual(self.returned_obj.geturl(), self.pathname) def test_getcode(self):", "i in range(FancyURLopener().maxtries): self.fakehttp(b'''HTTP/1.1 302 Found Location: file://guidocomputer.athome.com:/python/license Connection: close", "Encoding (latin-1) test for quote_plus given = \"\\xa2\\xd8 \\xff\" expect", "Location: file://guidocomputer.athome.com:/python/license Connection: close ''', mock_close=True) try: self.assertRaises(urllib.error.HTTPError, urlopen, \"http://something\")", "data) def FancyURLopener(): with support.check_warnings( ('FancyURLopener style of invoking requests", "reporthook only 3 times (once # when the \"network connection\"", "length) def close(self): self.io_refs -= 1 if self.io_refs == 0:", "not all characters escaped: \" \"%s\" % result) self.assertRaises((TypeError, AttributeError),", "self.assertEqual(expect, result) given = ((\"\\u00a0\", (42, \"\\u00c1\")),) expect = '%C2%A0=42&%C2%A0=%C3%81'", "urllib.request import urllib.error import http.client import email.message import io import", "# getproxies_environment use lowered case truncated (no '_proxy') keys self.assertEqual('localhost',", "result = urllib.parse.urlencode(given) self.assertEqual(expect, result) # Latin-1 encoding. given =", "= ((b'\\xa0\\x24', (42, b'\\xc1\\x24')),) expect = '%A0%24=42&%A0%24=%C1%24' result = urllib.parse.urlencode(given,", "of pluses\"} expect = \"key+name=A+bunch+of+pluses\" result = urllib.parse.urlencode(given) self.assertEqual(expect, result)", "test_relativelocalfile(self): self.assertRaises(ValueError,urllib.request.urlopen,'./' + self.pathname) class ProxyTests(unittest.TestCase): def setUp(self): # Records", "None import sys import tempfile from nturl2path import url2pathname, pathname2url", "True)) self.assertEqual(\"a=None&a=a\", urllib.parse.urlencode({\"a\": [None, \"a\"]}, True)) data = collections.OrderedDict([(\"a\", 1),", "on the file descriptor returned by fileno() \" \"did not", "dict(self.text_url_resp.info().get_params())['charset']), self.text) def test_read_text_base64(self): self.assertEqual(self.text_url_base64_resp.read().decode( dict(self.text_url_base64_resp.info().get_params())['charset']), self.text) def test_read_image(self): self.assertEqual(self.image_url_resp.read(),", "escaped to %s, not %s\" % (char, hexescape(char), result)) result", "%r\" % (expect, result)) def test_unquoting_plus(self): # Test difference between", "to utf8\") return \"file://%s\" % urllib.request.pathname2url(filePath) def createNewTempFile(self, data=b\"\"): \"\"\"Creates", "self.assertTrue(bypass('.localhost')) self.assertTrue(bypass('newdomain.com:1234')) self.assertTrue(bypass('.newdomain.com:1234')) self.assertTrue(bypass('foo.d.o.t')) # issue 29142 self.assertTrue(bypass('d.o.t')) self.assertTrue(bypass('anotherdomain.com:8888')) self.assertTrue(bypass('.anotherdomain.com:8888'))", "= '%A0=%C1' result = urllib.parse.urlencode(given, encoding=\"latin-1\") self.assertEqual(expect, result) def test_urlencode_encoding_doseq(self):", "serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # serv.bind((\"\", 9093)) # serv.listen() # try:", "%s\" % (test_type, result.count('&'))) amp_location = result.index('&') on_amp_left = result[amp_location", "fakehttp_wrapper.buf.decode(\"UTF-8\")) self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(), b\"\") # the spaces are quoted", "character properly. Case does not matter on the hex letters.", "Must be escaped Delimiters : '<>#%\"' Must be escaped Unwise", "to the current working directory). # All files in this", "def test_invalid_base64_data(self): # missing padding character self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=') class urlretrieve_FileTests(unittest.TestCase): \"\"\"Test", "Latin-1 range, encoded by with None (default) result = urllib.parse.quote(given,", "with self.assertRaisesRegex(InvalidURL, f\"contain control.*{escaped_char_repr}\"): urlopen(f\"https:{schemeless_url}\") finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module", "!= %r\" % (expect, result)) # Decode with UTF-8, invalid", "result)) # Characters in BMP, Latin-1, with xmlcharref error handling", "def test_roundtrip_url2pathname(self): list_of_paths = ['C:', r'\\\\\\C\\test\\\\', r'C:\\foo\\bar\\spam.foo' ] for path", "given = b\"\\xa2\\xd8ab\\xff\" expect = \"%A2%D8ab%FF\" result = urllib.parse.quote(given) self.assertEqual(expect,", "flush() calls self.fp.flush() which raises # \"ValueError: I/O operation on", "with support.check_warnings( ('FancyURLopener style of invoking requests is deprecated.', DeprecationWarning)):", "result = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=\"\\xfc\") expect = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=\"\")", "self.assertEqual(expect, result, \"using quote_from_bytes(): %r != %r\" % (expect, result))", "with a bytes as input, with unescaped non-ASCII bytes #", "\"data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3\" \"D%26%20%C3%B6%20%C3%84%20\") self.text_url_base64 = ( \"data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs\" \"sJT0mIPYgxCA%3D\") # base64 encoded", "= urllib.parse.urlencode(given) for expected in expect_somewhere: self.assertIn(expected, result, \"testing %s:", "2]}, True)) self.assertEqual(\"a=None&a=a\", urllib.parse.urlencode({\"a\": [None, \"a\"]}, True)) data = collections.OrderedDict([(\"a\",", "result)) escape_list.append(given) escape_string = ''.join(escape_list) del escape_list result = urllib.parse.unquote(escape_string)", "unicode-characters self.text = \"test data URLs :;,%=& \\u00f6 \\u00c4 \"", "self.assertFalse(bypass('newdomain.com')) # no port self.assertFalse(bypass('newdomain.com:1235')) # wrong port def test_proxy_bypass_environment_always_match(self):", "expect = '%C2%A0=%C3%81' result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) given", "without # a status line) self.check_read(b\"0.9\") def test_read_1_0(self): self.check_read(b\"1.0\") def", "'br%C3%BCckner_sapporo_20050930.doc' expect = b'br\\xc3\\xbcckner_sapporo_20050930.doc' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, \"using", "'///C:') def test_converting_when_no_drive_letter(self): self.assertEqual(pathname2url(r\"\\\\\\folder\\test\" \"\\\\\"), '/////folder/test/') self.assertEqual(pathname2url(r\"\\\\folder\\test\" \"\\\\\"), '////folder/test/') self.assertEqual(pathname2url(r\"\\folder\\test\"", "such. \"\"\" def test_unquoting(self): # Make sure unquoting of all", "= ((\"\\u00a0\", (42, \"\\u00c1\")),) expect = '%C2%A0=42&%C2%A0=%C3%81' result = urllib.parse.urlencode(given,", "= urllib.request.FancyURLopener(proxies=proxies) elif not _urlopener: opener = FancyURLopener() _urlopener =", "fobj: self.assertTrue(fobj) finally: os.close(fd) os.unlink(tmp_file) self.assertFalse(os.path.exists(tmp_file)) with self.assertRaises(urllib.error.URLError): urlopen(tmp_fileurl) def", "result, \"pathname2url() failed; %s != %s\" % (expect, result)) given", "self.assertTrue(bypass('newdomain.com')) self.assertTrue(bypass('newdomain.com:1234')) self.env.set('NO_PROXY', '*, anotherdomain.com') self.assertTrue(bypass('anotherdomain.com')) self.assertFalse(bypass('newdomain.com')) self.assertFalse(bypass('newdomain.com:1234')) def test_proxy_bypass_environment_newline(self):", "test_ftp_cache_pruning(self): self.fakeftp() try: urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user', 'pass', 'localhost', 21, [])", "fp = urlopen(\"http://python.org/\") self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(), b\"\") self.assertEqual(fp.geturl(), 'http://python.org/') self.assertEqual(fp.getcode(),", "temporary file. Try to test as much functionality as possible", "3 Test cannot assume anything about order. Docs make no", "(expect, result)) def test_unquoting_with_bytes_input(self): # Bytes not supported yet with", "URLs :;,%=& \\u00f6 \\u00c4 \" # 2x1 pixel RGB PNG", "self.assertEqual(\"a=1&a=2\", urllib.parse.urlencode({\"a\": [1, 2]}, True)) self.assertEqual(\"a=None&a=a\", urllib.parse.urlencode({\"a\": [None, \"a\"]}, True))", "urllib.parse.unquote(\"\\u6f22%C3%BC\") expect = '\\u6f22\\u00fc' self.assertEqual(expect, result, \"using unquote(): %r !=", "'+') result = urllib.parse.quote_plus(given) self.assertEqual(expect, result, \"using quote_plus(): %r !=", "open_spam(self, url): return url with support.check_warnings( ('DummyURLopener style of invoking", "%s' % (expect, result)) given = '///C|/path' expect = 'C:\\\\path'", "import url2pathname, pathname2url from base64 import b64encode import collections def", "\"parts/of/a/path\" result = urllib.request.pathname2url(expected_path) self.assertEqual(expected_url, result, \"pathname2url() failed; %s !=", "result)) @unittest.skipUnless(sys.platform == 'win32', 'test specific to the urllib.url2path function.')", "= ssl.create_default_context() with support.check_warnings(('', DeprecationWarning)): with self.assertRaises(ValueError): urllib.request.urlopen( \"https://localhost\", cafile=\"/nonexistent/path\",", "result, \"using unquote_to_bytes(): %r != %r\" % (expect, result)) def", "but it # does nothing about trying to close files", "header\\r\\nTEST: 123\" schemeless_url = \"//\" + host + \":8080/test/?test=a\" try:", "return self def read(self, amt=None): if self.closed: return b\"\" return", "= ['C:', r'\\\\\\C\\test\\\\', r'C:\\foo\\bar\\spam.foo' ] for path in list_of_paths: self.assertEqual(url2pathname(pathname2url(path)),", "# time.sleep(.3) # conn.send(\"2 No more lines\\n\") # conn.close() #", "default timeout is ignored # import socket # self.assertIsNone(socket.getdefaulttimeout()) #", "= urllib.parse.quote_plus(do_not_quote) self.assertEqual(do_not_quote, result, \"using quote_plus(): %r != %r\" %", "be escaped to %s, not %s\" % (char, hexescape(char), result))", "self.__class__.__name__, \"ascii\") f = open(support.TESTFN, 'wb') try: f.write(self.text) finally: f.close()", "close() by calling it here and then having it be", "try: f.write(self.text) finally: f.close() self.pathname = support.TESTFN self.returned_obj = urlopen(\"file:%s\"", "urllib.parse.urlencode({\"a\": None})) def test_nonstring_seq_values(self): self.assertEqual(\"a=1&a=2\", urllib.parse.urlencode({\"a\": [1, 2]}, True)) self.assertEqual(\"a=None&a=a\",", "escape_list result = urllib.parse.unquote(escape_string) self.assertEqual(result.count('%'), 1, \"using unquote(): not all", "in proper place in %s\" % (test_type, result)) self.assertEqual(len(result), (5", "+= 1 return self def read(self, amt=None): if self.closed: return", "would fail the # instant it returned anything beyond the", "%r\" % (expect, result)) expect = given.replace('+', ' ') result", "= urllib.request.urlretrieve(\"file:%s\" % support.TESTFN) self.assertEqual(result[0], support.TESTFN) self.assertIsInstance(result[1], email.message.Message, \"did not", "setUp(self): # import ftplib, time, threading # ftplib.FTP.port = 9093", "Some buildbots have TEMP folder that uses a lowercase drive", "# Create a temporary file. self.registerFileForCleanUp(support.TESTFN) self.text = b'testing urllib.urlretrieve'", "expect = '%A0=42&%A0=%C1' result = urllib.parse.urlencode(given, True, encoding=\"latin-1\") self.assertEqual(expect, result)", "urllib.parse.urlencode(given, encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect, result) # Default is UTF-8 encoding.", "at injection within the url _path_ safe. InvalidURL = http.client.InvalidURL", "removal os.environ['no_proxy'] = '' os.environ['No_Proxy'] = 'localhost' self.assertFalse(urllib.request.proxy_bypass_environment('localhost')) self.assertFalse(urllib.request.proxy_bypass_environment('arbitrary')) os.environ['http_proxy']", "OK Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33", "of the top # level 'def urlopen()' function defined in", "= \"needs/%s/here\" % urllib.parse.quote(\"quot=ing\") result = urllib.request.pathname2url(given) self.assertEqual(expect, result, \"pathname2url()", "b'\\x01\\x08\\x02\\x00\\x00\\x00{@\\xe8\\xdd\\x00\\x00\\x00\\x01sRGB\\x00\\xae' b'\\xce\\x1c\\xe9\\x00\\x00\\x00\\x0fIDAT\\x08\\xd7c```\\xf8\\xff\\xff?\\x00' b'\\x06\\x01\\x02\\xfe\\no/\\x1e\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82') self.text_url = ( \"data:text/plain;charset=UTF-8,test%20data%20URLs%20%3A%3B%2C%25%3\" \"D%26%20%C3%B6%20%C3%84%20\") self.text_url_base64 =", "an invalid URI; expect those bytes to be preserved) given", "be called again # by the tearDown() method for the", "same result = urllib.parse.quote_from_bytes(given) self.assertEqual(expect, result, \"using quote_from_bytes(): %r !=", "= [chr(num) for num in range(32)] # For 0x00 -", "f.write(self.text) finally: f.close() self.pathname = support.TESTFN self.returned_obj = urlopen(\"file:%s\" %", "in BMP, encoded with Latin-1 given = \"\\u6f22\\u5b57\" self.assertRaises(UnicodeEncodeError, urllib.parse.quote,", "(latin-1) test for quote_plus given = \"\\xa2\\xd8 \\xff\" expect =", "close Content-Type: text/html; charset=iso-8859-1 ''', mock_close=True) try: msg = \"Redirection", "level 'def urlopen()' function defined in this... (quite ugly) #", "os.environ['Http_Proxy'] = 'http://somewhereelse:3128' proxies = urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128', proxies['http']) class urlopen_HttpTests(unittest.TestCase,", "no guarantee and have possible dictionary input. \"\"\" expect_somewhere =", "cut down on reliance on connecting to the Net for", "test_url_host_with_control_char_rejected(self): for char_no in list(range(0, 0x21)) + [0x7f]: char =", "self.assertEqual(expect, result) result = urllib.parse.urlencode(given, True) for value in given[\"sequence\"]:", "self.assertEqual(\"a=1\", urllib.parse.urlencode({\"a\": 1})) self.assertEqual(\"a=None\", urllib.parse.urlencode({\"a\": None})) def test_nonstring_seq_values(self): self.assertEqual(\"a=1&a=2\", urllib.parse.urlencode({\"a\":", "Characters in BMP, encoded by default in UTF-8 given =", "# global default timeout is used # import socket #", "FF ''') with self.assertRaises(urllib.error.ContentTooShortError): try: urllib.request.urlretrieve(support.TEST_HTTP_URL) finally: self.unfakehttp() class QuotingTests(unittest.TestCase):", "data = collections.OrderedDict([(\"a\", 1), (\"b\", 1)]) self.assertEqual(\"a=a&a=b\", urllib.parse.urlencode({\"a\": data}, True))", "def test_never_quote(self): # Make sure quote() does not quote letters,", "r\"contain control.*\\\\r.*(found at least . .)\"): urllib.request.urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL, r\"contain", "wrong number of lines\") self.assertEqual(lines_list[0], self.text, \"readlines() returned improper text\")", "pass return newFilePath def registerFileForCleanUp(self, fileName): self.tempFiles.append(fileName) def test_basic(self): #", "= \"%F3%B1\" expect = \"\" result = urllib.parse.unquote(given, errors=\"ignore\") self.assertEqual(expect,", "self.assertRaises(ValueError,urllib.request.urlopen,'data:;base64,Cg=') class urlretrieve_FileTests(unittest.TestCase): \"\"\"Test urllib.urlretrieve() on local files\"\"\" def setUp(self):", "keys and values are quoted using quote_plus() given = {\"&\":\"=\"}", "r\"contain control.*\\\\r\"): urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL, r\"contain control.*\\\\n\"): urlopen(f\"https:{schemeless_url}\") finally: self.unfakehttp()", "by default in UTF-8 given = \"\\u6f22\\u5b57\" # \"Kanji\" expect", "# Monkey patch os.environ, start with empty fake environment os.environ", "# conn.close() # except socket.timeout: # pass # finally: #", "test_basic(self): # Make sure simple tests pass expected_path = os.path.join(\"parts\",", "sparc. # Everywhere else they work ok, but on those", "FILE.close() except: pass self.assertEqual(self.text, text) def test_reporthook(self): # Make sure", "chr(char_no) schemeless_url = f\"//localhost{char}/test/\" self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") try: escaped_char_repr =", "Connection: close ''', mock_close=True) try: self.assertRaises(urllib.error.HTTPError, urlopen, \"http://something\") finally: self.unfakehttp()", "base64 import b64encode import collections def hexescape(char): \"\"\"Escape char as", "given = ((\"\\u00a0\", (42, \"\\u00c1\")),) expect = '%A0=42&%A0=%C1' result =", "Try to test as much functionality as possible so as", "'&'s; got %s\" % (test_type, result.count('&'))) amp_location = result.index('&') on_amp_left", "!= %s\" % (result, expected_path)) def test_quoting(self): # Test automatic", "encoding=\"latin-1\") self.assertEqual(expect, result) def test_urlencode_bytes(self): given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) expect", "much functionality as possible so as to cut down on", "url): return url for url in ('local_file://example', 'local-file://example'): self.assertRaises(OSError, urllib.request.urlopen,", "encoded with None (default) result = urllib.parse.unquote(given, encoding=None, errors=None) self.assertEqual(expect,", "an empty string\") def test_readlines(self): lines_list = self.returned_obj.readlines() self.assertEqual(len(lines_list), 1,", "has one of the problematic environments, please help! # .", "# # class FTPWrapperTests(unittest.TestCase): # # def setUp(self): # import", "urllib.request.FancyURLopener() def fakehttp(fakedata, mock_close=False): class FakeSocket(io.BytesIO): io_refs = 1 def", "urlencode()\"\"\" def help_inputtype(self, given, test_type): \"\"\"Helper method for testing different", "= Request(\"http://www.python.org\", method='GET') self.assertEqual(request.get_method(), 'GET') request.method = 'HEAD' self.assertEqual(request.get_method(), 'HEAD')", "== 0: io.BytesIO.close(self) class FakeHTTPConnection(http.client.HTTPConnection): # buffer to store data", "request = Request(\"http://www.python.org\", method='HEAD') self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(), 'HEAD') request =", "is UTF-8 encoding. given = (('\\u00a0', '\\u00c1'),) expect = '%C2%A0=%C3%81'", "\"\"\"Test urlopen() opening a data URL.\"\"\" def setUp(self): # text", "def test_urlencode_bytes(self): given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) expect = '%A0%24=%C1%24' result", "= 'br\\u00fcckner_sapporo_20050930.doc' self.assertEqual(expect, result, \"using unquote(): %r != %r\" %", "% (expect, result)) # Decode with UTF-8, invalid sequence, replace", "does not matter on the hex letters. The various character", "deletion during the test fixture tear down, and returns the", "ver): self.fakehttp(b\"HTTP/\" + ver + b\" 200 OK\\r\\n\\r\\nHello!\") try: fp", "returned by fileno() \" \"did not return the expected text\")", "'MAXFTPCACHE', 0) def test_ftp_cache_pruning(self): self.fakeftp() try: urllib.request.ftpcache['test'] = urllib.request.ftpwrapper('user', 'pass',", "@unittest.skipUnless(sys.platform == 'win32', 'test specific to the urllib.url2path function.') def", "r\"contain control.*\\\\n\"): urllib.request.urlopen(f\"https:{schemeless_url}\") # This code path quotes the URL", "# Test lowercase preference with replacement os.environ['http_proxy'] = 'http://somewhere:3128' os.environ['Http_Proxy']", "They use different url opening codepaths. Plain # urlopen uses", "(expect, result)) result = urllib.parse.unquote_plus(given) self.assertEqual(expect, result, \"using unquote_plus(): %r", "header\\r\\n\" schemeless_url = \"//\" + host + \":8080/test/?test=a\" try: InvalidURL", "else: opener = _urlopener if data is None: return opener.open(url)", "self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, b'') def test_unquoting_badpercent(self): # Test unquoting on", "(expect, result)) # Decode with UTF-8, invalid sequence given =", "True, encoding=\"latin-1\") self.assertEqual(expect, result) def test_urlencode_bytes(self): given = ((b'\\xa0\\x24', b'\\xc1\\x24'),)", "a mapping object as an argument. self.help_inputtype({\"1st\":'1', \"2nd\":'2', \"3rd\":'3'}, \"using", "least . .)\"): urllib.request.urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL, r\"contain control.*\\\\n\"): urllib.request.urlopen(f\"https:{schemeless_url}\") #", "!= %r\" % (expect, result)) # Test on a string", "OK\\r\\n\\r\\nHello.\") try: escaped_char_repr = repr(char).replace('\\\\', r'\\\\') InvalidURL = http.client.InvalidURL with", "input, with unescaped non-ASCII bytes # (Technically an invalid URI;", "result)) def test_unquoting_plus(self): # Test difference between unquote() and unquote_plus()", "= '%C2%A0=%C3%81' result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result) given =", "on_amp_right.isdigit(), \"testing %s: '&' not located in proper place in", "def test_proxy_bypass_environment_always_match(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', '*') self.assertTrue(bypass('newdomain.com')) self.assertTrue(bypass('newdomain.com:1234')) self.env.set('NO_PROXY',", "hooktester) def test_reporthook_0_bytes(self): # Test on zero length file. Should", "test_close(self): # Test close() by calling it here and then", "Make sure keys and values are quoted using quote_plus() given", "the absolute path of the file.\"\"\" newFd, newFilePath = tempfile.mkstemp()", "# Restore all proxy related env vars self.env.__exit__() del self.env", "\"D%26%20%C3%B6%20%C3%84%20\") self.text_url_base64 = ( \"data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs\" \"sJT0mIPYgxCA%3D\") # base64 encoded data", "encoding given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) result = urllib.parse.urlencode(given, safe=\":$\") expect", "\"\"\"Tests for urlencode()\"\"\" def help_inputtype(self, given, test_type): \"\"\"Helper method for", "result)) # Test with a bytes as input, with unescaped", "\"\"\"Testcase to test the open method of URLopener class.\"\"\" def", "self.assertEqual(expect, result, \"using unquote_plus(): %r != %r\" % (expect, result))", "= urllib.request.pathname2url(expected_path) self.assertEqual(expected_url, result, \"pathname2url() failed; %s != %s\" %", "the tests, sometimes in other. I have a linux, and", "%r != %r\" % (expect, result)) # A mix of", "def test_unquoting_with_bytes_input(self): # Bytes not supported yet with self.assertRaisesRegex(TypeError, 'Expected", "errors=\"replace' given = (('\\u00a0', '\\u00c1'),) expect = '%3F=%3F' result =", "Authentication Required Date: Wed, 02 Jan 2008 03:03:54 GMT Server:", "string\") def test_readlines(self): lines_list = self.returned_obj.readlines() self.assertEqual(len(lines_list), 1, \"readlines() returned", "given = \"a b cd e f\" expect = given.replace('", "'' os.environ['HTTP_PROXY'] = 'http://somewhere:3128' proxies = urllib.request.getproxies_environment() self.assertEqual({}, proxies) #", "def test_simple_compare(self): self.assertEqual(url2pathname(\"///C|/foo/bar/spam.foo\"), r'C:\\foo\\bar\\spam.foo') def test_non_ascii_drive_letter(self): self.assertRaises(IOError, url2pathname, \"///\\u00e8|/\") def", "test_getproxies_environment_prefer_lowercase(self): # Test lowercase preference with removal os.environ['no_proxy'] = ''", "is the responsibility of the developer to properly close files", "urllib.parse.unquote(escape_string) self.assertEqual(result.count('%'), 1, \"using unquote(): not all characters escaped: \"", "resp.geturl()) finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\") def test_url_path_with_newline_header_injection_rejected(self): self.fakehttp(b\"HTTP/1.1", "self.assertEqual(report[1][1], 8192) self.assertEqual(report[2][1], 8192) class urlretrieve_HttpTests(unittest.TestCase, FakeHTTPMixin): \"\"\"Test urllib.urlretrieve() using", "mock_close=True) try: self.assertRaises(urllib.error.HTTPError, urlopen, \"http://something\") finally: self.unfakehttp() def test_empty_socket(self): #", "# is the responsibility of the developer to properly close", "_report=report): _report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile() urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester)", "on_amp_right = result[amp_location + 1] self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(), \"testing %s:", "urllib.request.ftpwrapper = FakeFtpWrapper def unfakeftp(self): urllib.request.ftpwrapper = self._ftpwrapper_class class urlopen_FileTests(unittest.TestCase):", "= given result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using unquote(): %r", "\"%s=%s\" % (hexescape('&'), hexescape('=')) result = urllib.parse.urlencode(given) self.assertEqual(expect, result) given", "% (expect, result)) expect = given.replace(' ', '+') result =", "with Latin-1, with replace error handling given = \"\\u6f22\\u5b57\" expect", "result)) # unquote_to_bytes given = '%xab' expect = bytes(given, 'ascii')", "def testTimeoutValue(self): # ftp = urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\", 9093, [],", "(5 * 3) + 2)) def test_using_mapping(self): # Test passing", "should_quote = ''.join(should_quote) for char in should_quote: result = urllib.parse.quote(char)", "charset=iso-8859-1 FF ''') def _reporthook(par1, par2, par3): pass with self.assertRaises(urllib.error.ContentTooShortError):", "urlencode_Tests(unittest.TestCase): \"\"\"Tests for urlencode()\"\"\" def help_inputtype(self, given, test_type): \"\"\"Helper method", "self.assertRaises(IOError, url2pathname, \"///\\u00e8|/\") def test_roundtrip_url2pathname(self): list_of_paths = ['C:', r'\\\\\\C\\test\\\\', r'C:\\foo\\bar\\spam.foo'", "= 'http://somewhereelse:3128' proxies = urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128', proxies['http']) class urlopen_HttpTests(unittest.TestCase, FakeHTTPMixin,", "Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian GNU/Linux)", "\"Expected 2 '&'s, got %s\" % result.count('&')) def test_empty_sequence(self): self.assertEqual(\"\",", "with self.assertRaisesRegex( InvalidURL, f\"contain control.*{escaped_char_repr}\"): urllib.request.urlopen(f\"https:{schemeless_url}\") # This code path", "and quote_plus() handle spaces as specified in # their unique", "test_read_text_base64(self): self.assertEqual(self.text_url_base64_resp.read().decode( dict(self.text_url_base64_resp.info().get_params())['charset']), self.text) def test_read_image(self): self.assertEqual(self.image_url_resp.read(), self.image) def test_missing_comma(self):", "with self.assertRaisesRegex(InvalidURL, r\"contain control.*\\\\n\"): urlopen(f\"https:{schemeless_url}\") finally: self.unfakehttp() def test_read_0_9(self): #", "'' .join([\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\", \"abcdefghijklmnopqrstuvwxyz\", \"0123456789\", \"_.-~\"]) result = urllib.parse.quote(do_not_quote) self.assertEqual(do_not_quote, result,", "['///C:', '/////folder/test/', '///C:/foo/bar/spam.foo'] for path in list_of_paths: self.assertEqual(pathname2url(url2pathname(path)), path) if", "HTTP/1.1\\r\\nX-injected: header\\r\\nTEST: 123\" schemeless_url = \"//\" + host + \":8080/test/?test=a\"", "QuotingTests(unittest.TestCase): r\"\"\"Tests for urllib.quote() and urllib.quote_plus() According to RFC 3986", "= given.replace(' ', hexescape(' ')) result = urllib.parse.quote(given) self.assertEqual(expect, result,", "self.check_read(b\"1.1\") def test_read_bogus(self): # urlopen() should raise OSError for many", "\"\\u00c1\")),) expect = '%C2%A0=42&%C2%A0=%C3%81' result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result)", "mock_close=False): class FakeSocket(io.BytesIO): io_refs = 1 def sendall(self, data): FakeHTTPConnection.buf", "\"calling readline() after exhausting the file did not\" \" return", "urlopen() lacks %s attribute\" % attr) def test_info(self): self.assertIsInstance(self.text_url_resp.info(), email.message.Message)", "Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33 (Debian", "# \"Kanji\" expect = \"%E6%BC%A2%E5%AD%97\" result = urllib.parse.quote(given) self.assertEqual(expect, result,", "test_urlencode_encoding(self): # ASCII encoding. Expect %3F with errors=\"replace' given =", "ftplib, time, threading # ftplib.FTP.port = 9093 # self.evt =", "in UTF-8 given = \"\\u6f22\\u5b57\" # \"Kanji\" expect = \"%E6%BC%A2%E5%AD%97\"", "urllib.parse.urlencode(given, doseq=True, safe=\":$\") expect = '%A0$=%C1$' self.assertEqual(expect, result) # Safe", "def test_converting_drive_letter(self): self.assertEqual(url2pathname(\"///C|\"), 'C:') self.assertEqual(url2pathname(\"///C:\"), 'C:') self.assertEqual(url2pathname(\"///C|/\"), 'C:\\\\') def test_converting_when_no_drive_letter(self):", "(quote_by_default, result)) # Safe expressed as bytes rather than str", "%r != %r\" % (expect, result)) given = '%' expect", "finally: f.close() self.pathname = support.TESTFN self.returned_obj = urlopen(\"file:%s\" % self.pathname)", "= \"ab%5B%5Dcd\" result = urllib.parse.quote(partial_quote) self.assertEqual(expected, result, \"using quote(): %r", "\"localhost\", 9093, []) # ftp.close() # # def testTimeoutNone(self): #", "b'\\xab\\xea' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, \"using unquote_to_bytes(): %r !=", "with UTF-8, invalid sequence given = \"%F3%B1\" expect = \"\\ufffd\"", "\"close\", \"info\", \"geturl\", \"getcode\", \"__iter__\"): self.assertTrue(hasattr(self.returned_obj, attr), \"object returned by", "as e: urlopen('file://localhost/a/file/which/doesnot/exists.py') self.assertTrue(e.exception.filename) self.assertTrue(e.exception.reason) def test_file_notexists(self): fd, tmp_file =", "result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, \"using unquote_to_bytes(): %r != %r\"", "# serv.settimeout(3) # serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # serv.bind((\"\", 9093)) #", "HTTPConnection destructor calls close() which calls # flush(). Problem: flush()", "FILE.close() except: pass def tearDown(self): # Delete the temporary files.", "preference of proxy bypass and correct matching including ports os.environ['no_proxy']", "200 OK\\r\\n\\r\\nHello!\") try: userpass = \"<PASSWORD>\" url = \"http://{}@python.org/\".format(userpass) fakehttp_wrapper", "by urlopen() has the specified methods for attr in (\"read\",", "= \"\\xa2\\xd8ab\\xff\" expect = \"%A2%D8ab%FF\" result = urllib.parse.quote(given, encoding=\"latin-1\") self.assertEqual(expect,", "self.assertRaises(urllib.error.URLError) as e: urlopen(test_ftp_url) self.assertFalse(e.exception.filename) self.assertTrue(e.exception.reason) def test_ftp_nonexisting(self): with self.assertRaises(urllib.error.URLError)", "bytes # (Technically an invalid URI; expect those bytes to", "returned. result = urllib.request.urlretrieve(\"file:%s\" % support.TESTFN) self.assertEqual(result[0], support.TESTFN) self.assertIsInstance(result[1], email.message.Message,", "(expect, result)) class UnquotingTests(unittest.TestCase): \"\"\"Tests for unquote() and unquote_plus() See", "self.registerFileForCleanUp(second_temp) result = urllib.request.urlretrieve(self.constructLocalFileUrl( support.TESTFN), second_temp) self.assertEqual(second_temp, result[0]) self.assertTrue(os.path.exists(second_temp), \"copy", "OpenSSL/0.9.7e Connection: close Content-Type: text/html; charset=iso-8859-1 ''', mock_close=True) try: self.assertRaises(OSError,", "those bytes to be preserved) given = b'%A2\\xd8ab%FF' expect =", "\"using quote_plus(): %r != %r\" % (expect, result)) # Errors", "self.assertEqual(do_not_quote, result, \"using quote_plus(): %r != %r\" % (do_not_quote, result))", "quote_plus() handle spaces as specified in # their unique way", "= open(second_temp, 'rb') try: text = FILE.read() FILE.close() finally: try:", "os.unlink(tmp_file) self.assertFalse(os.path.exists(tmp_file)) with self.assertRaises(urllib.error.URLError): urlopen(tmp_fileurl) def test_ftp_nohost(self): test_ftp_url = 'ftp:///path'", "os.environ['http_proxy'] = 'http://somewhere:3128' os.environ['Http_Proxy'] = 'http://somewhereelse:3128' proxies = urllib.request.getproxies_environment() self.assertEqual('http://somewhere:3128',", "# Errors test for quote_plus given = \"ab\\u6f22\\u5b57 cd\" expect", "input types. 'given' must lead to only the pairs: *", "test_read_bogus(self): # urlopen() should raise OSError for many error codes.", "- 0x1F, 0x7F Have no use in URIs so must", "expect = '%A0$=%C1$&%A0$=13&%A0$=42' result = urllib.parse.urlencode(given, True, safe=\":$\", encoding=\"latin-1\") self.assertEqual(expect,", "= \"http://{}@python.org/\".format(userpass) fakehttp_wrapper = http.client.HTTPConnection authorization = (\"Authorization: Basic %s\\r\\n\"", "def connect(self): self.sock = FakeSocket(self.fakedata) type(self).fakesock = self.sock if mock_close:", "on closed file\" which is logged as an # \"Exception", "5 byte file. Should call reporthook only 2 times (once", "lines\\n\") # conn.close() # except socket.timeout: # pass # finally:", "%r\" % (expect, result)) def test_quoting_plus(self): self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'), 'alpha%2Bbeta+gamma') self.assertEqual(urllib.parse.quote_plus('alpha+beta", "io_refs = 1 def sendall(self, data): FakeHTTPConnection.buf = data def", "= ((b'\\xa0\\x24', b'\\xc1\\x24'),) result = urllib.parse.urlencode(given, safe=\":$\") expect = '%A0$=%C1$'", "= urllib.parse.quote_plus(given) self.assertEqual(expect, result, \"using quote_plus(): %r != %r\" %", "GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Length: 100 Content-Type: text/html; charset=iso-8859-1", "class URL2PathNameTests(unittest.TestCase): def test_converting_drive_letter(self): self.assertEqual(url2pathname(\"///C|\"), 'C:') self.assertEqual(url2pathname(\"///C:\"), 'C:') self.assertEqual(url2pathname(\"///C|/\"), 'C:\\\\')", "open object\"\"\" self.returned_obj.close() os.remove(support.TESTFN) def test_interface(self): # Make sure object", "urllib.quote_plus() According to RFC 3986 (Uniform Resource Identifiers), to escape", "self.assertEqual(result.count('&'), 2, \"testing %s: expected 2 '&'s; got %s\" %", "result[0]) self.assertTrue(os.path.exists(second_temp), \"copy of the file was not \" \"made\")", "# Characters in BMP, encoded by default in UTF-8 given", "codepaths. Plain # urlopen uses FancyURLOpener which goes via a", "# own retry limit. for i in range(FancyURLopener().maxtries): self.fakehttp(b'''HTTP/1.1 302", "urllib.parse.unquote('br%FCckner_sapporo_20050930.doc', encoding=\"latin-1\") expect = 'br\\u00fcckner_sapporo_20050930.doc' self.assertEqual(expect, result, \"using unquote(): %r", "a bytes as input given = b'%A2%D8ab%FF' expect = b'\\xa2\\xd8ab\\xff'", "= os.path.join(\"needs\", \"quot=ing\", \"here\") expect = \"needs/%s/here\" % urllib.parse.quote(\"quot=ing\") result", "reliance on connecting to the Net for testing. \"\"\" def", "urllib.urlretrieve() using fake http connections\"\"\" def test_short_content_raises_ContentTooShortError(self): self.fakehttp(b'''HTTP/1.1 200 OK", "attribute\" % attr) def test_read(self): self.assertEqual(self.text, self.returned_obj.read()) def test_readline(self): self.assertEqual(self.text,", "200 OK\\r\\n\\r\\nHello.\") host = \"localhost:7777?a=1 HTTP/1.1\\r\\nX-injected: header\\r\\nTEST: 123\" schemeless_url =", "characters to be UTF-8 # encoded). result = urllib.parse.unquote_to_bytes(\"\\u6f22%C3%BC\") expect", "self.assertRaises(OSError, urllib.request.URLopener().open, url) self.assertRaises(OSError, urllib.request.URLopener().retrieve, url) self.assertRaises(OSError, DummyURLopener().open, url) self.assertRaises(OSError,", "= ((b'\\xa0\\x24', b'\\xc1\\x24'),) result = urllib.parse.urlencode(given, doseq=True, safe=\":$\") expect =", "codepath that # calls urllib.parse.quote() on the URL which makes", "FILE.read() FILE.close() finally: try: FILE.close() except: pass self.assertEqual(self.text, text) def", "\"mypass\", \"localhost\", 9093, []) # finally: # socket.setdefaulttimeout(None) # self.assertEqual(ftp.ftp.sock.gettimeout(),", "proxies = urllib.request.getproxies_environment() self.assertNotIn('http', proxies) finally: self.env.unset('REQUEST_METHOD') self.env.unset('HTTP_PROXY') def test_proxy_bypass_environment_host_match(self):", "in list_of_paths: self.assertEqual(url2pathname(pathname2url(path)), path) class PathName2URLTests(unittest.TestCase): def test_converting_drive_letter(self): self.assertEqual(pathname2url(\"C:\"), '///C:')", "while cantdata < 13: # data = conn.recv(13-cantdata) # cantdata", "self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234') self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com')) self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com:8888')) self.assertTrue(urllib.request.proxy_bypass_environment('newdomain.com:1234')) def test_proxy_cgi_ignore(self): try:", "result) given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) expect = '%A0$=%C1$' result =", "def test_close(self): # Test close() by calling it here and", "makefile(self, *args, **kwds): self.io_refs += 1 return self def read(self,", "\"quot=ing\", \"here\") expect = \"needs/%s/here\" % urllib.parse.quote(\"quot=ing\") result = urllib.request.pathname2url(given)", "\"a\", \"path\") expected_url = \"parts/of/a/path\" result = urllib.request.pathname2url(expected_path) self.assertEqual(expected_url, result,", "required\") def test_url_host_with_control_char_rejected(self): for char_no in list(range(0, 0x21)) + [0x7f]:", "calling it here and then having it be called again", "%r != %r\" % (expected, result)) result = urllib.parse.quote_plus(partial_quote) self.assertEqual(expected,", "= ((\"\\u00a0\", \"\\u00c1\"),) expect = '%A0=%C1' result = urllib.parse.urlencode(given, True,", "'ab%sd' % hexescape('c') expect = \"abcd\" result = urllib.parse.unquote(given) self.assertEqual(expect,", "support.TESTFN) self.assertIsInstance(result[1], email.message.Message, \"did not get an email.message.Message instance \"", "for char in should_quote: result = urllib.parse.quote(char) self.assertEqual(hexescape(char), result, \"using", "urllib.\"\"\" def test_thishost(self): \"\"\"Test the urllib.request.thishost utility function returns a", "utility function returns a tuple\"\"\" self.assertIsInstance(urllib.request.thishost(), tuple) class URLopener_Tests(FakeHTTPMixin, unittest.TestCase):", "sure\") result = urllib.request.pathname2url(given) self.assertEqual(expect, result, \"pathname2url() failed; %s !=", "still be open. It # is the responsibility of the", "sure temporary files get deleted, but it # does nothing", "given = 'br%C3%BCckner_sapporo_20050930.doc' expect = b'br\\xc3\\xbcckner_sapporo_20050930.doc' result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect,", "# wrong port def test_proxy_bypass_environment_always_match(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', '*')", "does what it should do quote_by_default = \"<>\" result =", "(('\\u00a0', '\\u00c1'),) expect = '%C2%A0=%C3%81' result = urllib.parse.urlencode(given) self.assertEqual(expect, result)", "expect = \"\" result = urllib.parse.unquote(given, errors=\"ignore\") self.assertEqual(expect, result, \"using", "quote_plus(): %r != %r\" % (expect, result)) # Errors test", "return io.BytesIO(), 0 def close(self): pass self._ftpwrapper_class = urllib.request.ftpwrapper urllib.request.ftpwrapper", "line) self.check_read(b\"0.9\") def test_read_1_0(self): self.check_read(b\"1.0\") def test_read_1_1(self): self.check_read(b\"1.1\") def test_read_bogus(self):", "proxies) # Test lowercase preference of proxy bypass and correct", "io import unittest from unittest.mock import patch from test import", "% result) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ())", "their special meaning Data characters : letters, digits, and \"-_.!~*'()\"", "second returned value\") def test_copy(self): # Test that setting the", "\"readlines() returned the wrong number of lines\") self.assertEqual(lines_list[0], self.text, \"readlines()", "= urllib.parse.urlencode(given, True, safe=\":$\") self.assertEqual(expect, result) # Test all above", "self.assertRaises(OSError, urlopen, \"http://python.org/\") finally: self.unfakehttp() def test_invalid_redirect(self): # urlopen() should", "unquote_to_bytes(): %r != %r\" % (expect, result)) def test_unquoting_parts(self): #", "test_userpass_inurl(self): self.fakehttp(b\"HTTP/1.0 200 OK\\r\\n\\r\\nHello!\") try: fp = urlopen(\"http://user:pass@python.org/\") self.assertEqual(fp.readline(), b\"Hello!\")", "result = urllib.request.url2pathname(given) self.assertEqual(expect, result, \"url2pathname() failed; %s != %s\"", "def setUp(self): # We need to test conditions, where variable", "self.unfakehttp() def test_invalid_redirect(self): # urlopen() should raise OSError for many", "self.assertNotIn(char, resp.geturl()) finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\") def test_url_path_with_newline_header_injection_rejected(self):", "self.assertRaisesRegex( InvalidURL, f\"contain control.*{escaped_char_repr}\"): urllib.request.urlopen(f\"https:{schemeless_url}\") # This code path quotes", "socket.SOCK_STREAM) # serv.settimeout(3) # serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # serv.bind((\"\", 9093))", "uses a lowercase drive letter. self.assertEqual(os.path.normcase(filename), os.path.normcase(tmpfile)) @support.ignore_warnings(category=DeprecationWarning) def test_urlopener_retrieve_remote(self):", "opener else: opener = _urlopener if data is None: return", "_ = urllib.request.URLopener().retrieve(url) self.assertEqual(os.path.splitext(filename)[1], \".txt\") @support.ignore_warnings(category=DeprecationWarning) def test_local_file_open(self): # bpo-35907,", "\"data:text/plain;charset=ISO-8859-1;base64,dGVzdCBkYXRhIFVSTHMgOjs\" \"sJT0mIPYgxCA%3D\") # base64 encoded data URL that contains ignorable", "self.assertIsInstance(block_read_size, int) self.assertIsInstance(file_size, int) self.assertEqual(block_count, count_holder[0]) count_holder[0] = count_holder[0] +", "as an argument. self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')], \"using", "(('\\u00a0', '\\u00c1'),) expect = '%3F=%3F' result = urllib.parse.urlencode(given, encoding=\"ASCII\", errors=\"replace\")", "# Test close() by calling it here and then having", "quotes the URL so there is no injection. resp =", "% (expect, result)) # Errors test for quote_plus given =", "result = urllib.parse.quote(char) self.assertEqual(hexescape(char), result, \"using quote(): \" \"%s should", "in BMP, Latin-1, with xmlcharref error handling given = \"\\u6f22\\u5b57\"", "GMT Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Location: file://guidocomputer.athome.com:/python/license Connection:", "def test_urlopener_retrieve_remote(self): url = \"http://www.python.org/file.txt\" self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello!\") self.addCleanup(self.unfakehttp) filename,", "is logged as an # \"Exception ignored in\". Override close()", "Must be escaped Unwise : \"{}|\\^[]`\" Must be escaped \"\"\"", "test_unquoting_badpercent(self): # Test unquoting on bad percent-escapes given = '%xab'", "of the problematic environments, please help! # . Facundo #", "the specified data, registers the file for deletion during the", "We explicitly test urllib.request.urlopen() instead of the top # level", "# Decode with UTF-8, invalid sequence given = \"%F3%B1\" expect", "reporthook=_reporthook) finally: self.unfakehttp() def test_short_content_raises_ContentTooShortError_without_reporthook(self): self.fakehttp(b'''HTTP/1.1 200 OK Date: Wed,", "# self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() # # def testTimeoutValue(self): #", "of URLopener class.\"\"\" def test_quoted_open(self): class DummyURLopener(urllib.request.URLopener): def open_spam(self, url):", "sys import tempfile from nturl2path import url2pathname, pathname2url from base64", "30) # ftp.close() # # def testTimeoutDefault(self): # # global", "test_converting_drive_letter(self): self.assertEqual(pathname2url(\"C:\"), '///C:') self.assertEqual(pathname2url(\"C:\\\\\"), '///C:') def test_converting_when_no_drive_letter(self): self.assertEqual(pathname2url(r\"\\\\\\folder\\test\" \"\\\\\"), '/////folder/test/')", "'win32', 'test specific to the urllib.url2path function.') def test_ntpath(self): given", "so as to cut down on reliance on connecting to", "in BMP, encoded by default in UTF-8 given = \"\\u6f22\\u5b57\"", "test the various utility functions in the urllib.\"\"\" def test_thishost(self):", "\" \"did not return the expected text\") def test_close(self): #", "errors=\"replace\") self.assertEqual(expect, result, \"using quote(): %r != %r\" % (expect,", "self.addCleanup(self.unfakehttp) filename, _ = urllib.request.URLopener().retrieve(url) self.assertEqual(os.path.splitext(filename)[1], \".txt\") @support.ignore_warnings(category=DeprecationWarning) def test_local_file_open(self):", "unquote(): %r != %r\" % (expect, result)) # Decode with", "self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234, .d.o.t') self.assertTrue(bypass('localhost')) self.assertTrue(bypass('LocalHost')) # MixedCase self.assertTrue(bypass('LOCALHOST'))", "if self.closed: return b\"\" return io.BytesIO.readline(self, length) def close(self): self.io_refs", "% support.TESTFN) self.assertEqual(result[0], support.TESTFN) self.assertIsInstance(result[1], email.message.Message, \"did not get an", "the Latin-1 range, encoded with None (default) result = urllib.parse.unquote(given,", "\"getcode\", \"__iter__\"): self.assertTrue(hasattr(self.text_url_resp, attr), \"object returned by urlopen() lacks %s", "quote_plus(): %r != %r\" % (expected, result)) def test_quoting_space(self): #", "# bpo-35907, CVE-2019-9948: urllib must reject local_file:// scheme class DummyURLopener(urllib.request.URLopener):", "%r\" % (expect, result)) def test_default_quoting(self): # Make sure all", "meaning Data characters : letters, digits, and \"-_.!~*'()\" Unreserved and", "sure that the reporthook works. def hooktester(block_count, block_read_size, file_size, count_holder=[0]):", "= '%x' expect = bytes(given, 'ascii') result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect,", "the open object\"\"\" self.returned_obj.close() os.remove(support.TESTFN) def test_interface(self): # Make sure", "= '%A0$=%C1$' self.assertEqual(expect, result) given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) expect =", "test urllib.request.urlopen() instead of the top # level 'def urlopen()'", "path quotes the URL so there is no injection. resp", "'urllib.request.url2pathname() failed; %s != %s' % (expect, result)) class Utility_Tests(unittest.TestCase):", "expect = chr(num) result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using unquote():", "on those machines, sometimes # fail in one of the", "\"using quote(): \" \"%s should be escaped to %s, not", "conditions, where variable order _is_ significant self._saved_env = os.environ #", "def test_proxy_bypass_environment_newline(self): bypass = urllib.request.proxy_bypass_environment self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234') self.assertFalse(bypass('localhost\\n'))", "# Make sure all characters that should be quoted are", "buildbots have TEMP folder that uses a lowercase drive letter.", "deprecated.', DeprecationWarning)): self.assertEqual(DummyURLopener().open( 'spam://example/ /'),'//example/%20/') # test the safe characters", "result = urllib.request.url2pathname(expected_url) self.assertEqual(expected_path, result, \"url2pathame() failed; %s != %s\"", "given = os.path.join(\"needs\", \"quot=ing\", \"here\") expect = \"needs/%s/here\" % urllib.parse.quote(\"quot=ing\")", "'C:\\\\') def test_converting_when_no_drive_letter(self): # cannot end a raw string in", "the usual implicit way to test for ticket #4608. for", "is established and once when the block is # read).", "for that). should_quote = [chr(num) for num in range(32)] #", "the developer to properly close files even # when exceptional", "Shortcut for testing FancyURLopener _urlopener = None def urlopen(url, data=None,", "encoded with Latin-1 result = urllib.parse.unquote('br%FCckner_sapporo_20050930.doc', encoding=\"latin-1\") expect = 'br\\u00fcckner_sapporo_20050930.doc'", "OK\\r\\n\\r\\nHello!\") try: fp = urllib.request.urlopen(url) self.assertEqual(fp.geturl(), url) finally: self.unfakehttp() def", "in sequence given = ((b'\\xa0\\x24', (b'\\xc1\\x24', 0xd, 42)),) expect =", "# class FTPWrapperTests(unittest.TestCase): # # def setUp(self): # import ftplib,", "for \"\\u6f22\\u00fc\" self.assertEqual(expect, result, \"using unquote_to_bytes(): %r != %r\" %", "9093, [], # timeout=30) # self.assertEqual(ftp.ftp.sock.gettimeout(), 30) # ftp.close() class", "_report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile() urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report),", "all of the # above attempts at injection within the", "finally: self.unfakeftp() def test_userpass_inurl(self): self.fakehttp(b\"HTTP/1.0 200 OK\\r\\n\\r\\nHello!\") try: fp =", "test_nonstring_values(self): self.assertEqual(\"a=1\", urllib.parse.urlencode({\"a\": 1})) self.assertEqual(\"a=None\", urllib.parse.urlencode({\"a\": None})) def test_nonstring_seq_values(self): self.assertEqual(\"a=1&a=2\",", "result, \"url2pathame() failed; %s != %s\" % (result, expected_path)) def", "= 'ab%sd' % hexescape('c') expect = \"abcd\" result = urllib.parse.unquote(given)", "Required Date: Wed, 02 Jan 2008 03:03:54 GMT Server: Apache/1.3.33", "# conn.send(\"2 No more lines\\n\") # conn.close() # except socket.timeout:", "_report.append((block_count, block_read_size, file_size)) srcFileName = self.createNewTempFile(b\"x\" * 5) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN,", "def test_quote_plus_with_unicode(self): # Encoding (latin-1) test for quote_plus given =", "quoted by urlopen self.assertEqual(DummyURLopener().open( \"spam://c:|windows%/:=&?~#+!$,;'@()*[]|/path/\"), \"//c:|windows%/:=&?~#+!$,;'@()*[]|/path/\") @support.ignore_warnings(category=DeprecationWarning) def test_urlopener_retrieve_file(self): with", "self.assertEqual(fp.geturl(), 'http://python.org/') self.assertEqual(fp.getcode(), 200) finally: self.unfakehttp() def test_url_fragment(self): # Issue", "close files even # when exceptional conditions occur. self.tempFiles =", "newdomain.com:1234, .d.o.t') self.assertTrue(bypass('localhost')) self.assertTrue(bypass('LocalHost')) # MixedCase self.assertTrue(bypass('LOCALHOST')) # UPPERCASE self.assertTrue(bypass('.localhost'))", "'$' (\\x24) as safe character # Default utf-8 encoding given", "= bytes(given, 'ascii') result = urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, \"using unquote_to_bytes():", "urllib.request.urlretrieve(support.TEST_HTTP_URL, reporthook=_reporthook) finally: self.unfakehttp() def test_short_content_raises_ContentTooShortError_without_reporthook(self): self.fakehttp(b'''HTTP/1.1 200 OK Date:", "the test self.returned_obj.close() def test_info(self): self.assertIsInstance(self.returned_obj.info(), email.message.Message) def test_geturl(self): self.assertEqual(self.returned_obj.geturl(),", "urllib.parse.quote_plus(given) self.assertEqual(expect, result, \"using quote_plus(): %r != %r\" % (expect,", "digits, and \"_,.-\" do_not_quote = '' .join([\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\", \"abcdefghijklmnopqrstuvwxyz\", \"0123456789\", \"_.-~\"])", "= fakehttp(fakedata, mock_close=mock_close) self._connection_class = http.client.HTTPConnection http.client.HTTPConnection = fake_http_class def", "per thing and amps \"testing %s: \" \"unexpected number of", "Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Type: text/html; charset=iso-8859-1", "\"\"\"Test urlopen() opening a fake http connection.\"\"\" def check_read(self, ver):", "+ tmp_file.replace(os.path.sep, '/') try: self.assertTrue(os.path.exists(tmp_file)) with urlopen(tmp_fileurl) as fobj: self.assertTrue(fobj)", "unquoting works when have non-quoted characters # interspersed given =", "= urllib.parse.unquote_to_bytes(given) self.assertEqual(expect, result, \"using unquote_to_bytes(): %r != %r\" %", "style of invoking requests is deprecated.', DeprecationWarning)): self.assertEqual(DummyURLopener().open( 'spam://example/ /'),'//example/%20/')", "result.index('&') on_amp_left = result[amp_location - 1] on_amp_right = result[amp_location +", "(quote_by_default, result)) # \"Safe\" non-ASCII characters should have no effect", "beyond the first line from the # comparison. # Use", "(Uniform Resource Identifiers), to escape a character you write it", "\"\"\"Escape char as RFC 2396 specifies\"\"\" hex_repr = hex(ord(char))[2:].upper() if", "b'\\xc1\\x24')),) expect = '%A0%24=42&%A0%24=%C1%24' result = urllib.parse.urlencode(given, True) self.assertEqual(expect, result)", "% (expect, result)) def test_unquote_to_bytes(self): given = 'br%C3%BCckner_sapporo_20050930.doc' expect =", "unquote_to_bytes(): %r != %r\" % (expect, result)) # Test with", "characters escaped: \" \"%s\" % result) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, None)", "ftp = urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\", 9093, [], # timeout=30) #", "which raises # \"ValueError: I/O operation on closed file\" which", "self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") host = \"localhost\\r\\nX-injected: header\\r\\n\" schemeless_url = \"//\"", "hexescape(char), result)) result = urllib.parse.quote_plus(char) self.assertEqual(hexescape(char), result, \"using quote_plus(): \"", "[None, \"a\"]}, True)) data = collections.OrderedDict([(\"a\", 1), (\"b\", 1)]) self.assertEqual(\"a=a&a=b\",", "result)) # Decode with UTF-8, invalid sequence given = \"%F3%B1\"", "result = urllib.parse.urlencode(given) self.assertEqual(expect, result) result = urllib.parse.urlencode(given, True) self.assertEqual(expect,", "absolute path of the file.\"\"\" newFd, newFilePath = tempfile.mkstemp() try:", "the temporary files. for each in self.tempFiles: try: os.remove(each) except:", "quote(): %r != %r\" % (expected, result)) result = urllib.parse.quote_plus(partial_quote)", "class FakeFTPMixin(object): def fakeftp(self): class FakeFtpWrapper(object): def __init__(self, user, passwd,", "urllib.request.ftpwrapper urllib.request.ftpwrapper = FakeFtpWrapper def unfakeftp(self): urllib.request.ftpwrapper = self._ftpwrapper_class class", "(Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e Connection: close Content-Length: 100 Content-Type: text/html;", "send any # data. (#1680230) self.fakehttp(b'') try: self.assertRaises(OSError, urlopen, \"http://something\")", "be quoted are by default sans # space (separate test", "safe=b\"<>\") self.assertEqual(quote_by_default, result, \"using quote(): %r != %r\" % (quote_by_default,", "bpo-36918: HTTPConnection destructor calls close() which calls # flush(). Problem:", "3986 (Uniform Resource Identifiers), to escape a character you write", "Content-Type: text/html; charset=iso-8859-1 FF ''') def _reporthook(par1, par2, par3): pass", "to %s, not %s\" % (char, hexescape(char), result)) del should_quote", "% (expect, result)) # Decode with UTF-8, invalid sequence given", "request = Request(\"http://www.python.org\", method='GET') self.assertEqual(request.get_method(), 'GET') request.method = 'HEAD' self.assertEqual(request.get_method(),", "The authorization header must be in place self.assertIn(authorization, fakehttp_wrapper.buf.decode(\"UTF-8\")) self.assertEqual(fp.readline(),", "self.assertNotIn('\\n', resp.geturl()) finally: self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\") def test_url_host_with_control_char_rejected(self):", "result = urllib.parse.urlencode(given) self.assertEqual(expect, result) result = urllib.parse.urlencode(given, True) for", "try: # conn, addr = serv.accept() # conn.send(\"1 Hola mundo\\n\")", "must be escaped space : 0x20 Must be escaped Delimiters", "support.TESTFN), second_temp) self.assertEqual(second_temp, result[0]) self.assertTrue(os.path.exists(second_temp), \"copy of the file was", "\"\"\"Test pathname2url() and url2pathname()\"\"\" def test_basic(self): # Make sure simple", "self.assertEqual(result.count('%'), 1, \"using unquote(): not all characters escaped: \" \"%s\"", "# (Technically an invalid URI; expect those bytes to be", "expect = '%A0$=%C1$' self.assertEqual(expect, result) # Safe parameter in sequence", "# Test with safe bytes self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', b'+'), 'alpha+beta+gamma') def", "= \"ab%3F%3F+cd\" result = urllib.parse.quote_plus(given, encoding=\"latin-1\", errors=\"replace\") self.assertEqual(expect, result, \"using", "characters that should be quoted are by default sans #", "with self.assertRaisesRegex( InvalidURL, f\"contain control.*{escaped_char_repr}\"): urllib.request.urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex( InvalidURL, f\"contain", "module\"\"\" import urllib.parse import urllib.request import urllib.error import http.client import", "context=context ) class urlopen_DataTests(unittest.TestCase): \"\"\"Test urlopen() opening a data URL.\"\"\"", "invalid sequence, ignoring errors given = \"%F3%B1\" expect = \"\"", "%s\" % (result, expected_url)) result = urllib.request.url2pathname(expected_url) self.assertEqual(expected_path, result, \"url2pathame()", "'////folder/test/') self.assertEqual(pathname2url(r\"\\folder\\test\" \"\\\\\"), '/folder/test/') def test_simple_compare(self): self.assertEqual(pathname2url(r'C:\\foo\\bar\\spam.foo'), \"///C:/foo/bar/spam.foo\" ) def", "% (expect, result)) def test_quote_plus_with_unicode(self): # Encoding (latin-1) test for", "lowercase preference of proxy bypass and correct matching including ports", "urllib.request.Request.\"\"\" def test_default_values(self): Request = urllib.request.Request request = Request(\"http://www.python.org\") self.assertEqual(request.get_method(),", "since test would fail the # instant it returned anything", "block_read_size, file_size)) srcFileName = self.createNewTempFile() urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report), 1)", "result = urllib.parse.urlencode(given, True) for value in given[\"sequence\"]: expect =", "%3F with errors=\"replace' given = (('\\u00a0', '\\u00c1'),) expect = '%3F=%3F'", "%r\" % (expect, result)) class UnquotingTests(unittest.TestCase): \"\"\"Tests for unquote() and", "failed; %s != %s\" % (expect, result)) @unittest.skipUnless(sys.platform == 'win32',", "return io.BytesIO.readline(self, length) def close(self): self.io_refs -= 1 if self.io_refs", "%r\" % (expect, result)) def test_unquoting_parts(self): # Make sure unquoting", "and on_amp_right.isdigit(), \"testing %s: '&' not located in proper place", "RequestTests(unittest.TestCase): \"\"\"Unit tests for urllib.request.Request.\"\"\" def test_default_values(self): Request = urllib.request.Request", "# unquote_to_bytes given = '%xab' expect = bytes(given, 'ascii') result", "urllib.parse.urlencode(given, True) self.assertEqual(expect, result) # latin-1 given = ((\"\\u00a0\", \"\\u00c1\"),)", "self.assertFalse(bypass('localhost\\n')) self.assertFalse(bypass('anotherdomain.com:8888\\n')) self.assertFalse(bypass('newdomain.com:1234\\n')) class ProxyTests_withOrderedEnv(unittest.TestCase): def setUp(self): # We need", "rather than str result = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=b\"\\xfc\") expect =", "times (once # when the \"network connection\" is established, once", "tearDown(self): # self.evt.wait() # # def testBasic(self): # # connects", "Docs make no guarantee and have possible dictionary input. \"\"\"", "# connects # ftp = urllib.ftpwrapper(\"myuser\", \"mypass\", \"localhost\", 9093, [])", "to be UTF-8 # encoded). result = urllib.parse.unquote_to_bytes(\"\\u6f22%C3%BC\") expect =", "= result[amp_location + 1] self.assertTrue(on_amp_left.isdigit() and on_amp_right.isdigit(), \"testing %s: '&'", "http connection.\"\"\" def check_read(self, ver): self.fakehttp(b\"HTTP/\" + ver + b\"", "test_safe(self): # Test setting 'safe' parameter does what it should", "quote(): %r != %r\" % (expect, result)) # Encoding argument", "%s != %s\" % (expect, result)) @unittest.skipUnless(sys.platform == 'win32', 'test", "non-quoted characters # interspersed given = 'ab%sd' % hexescape('c') expect", "http.client.HTTPConnection = self._connection_class class FakeFTPMixin(object): def fakeftp(self): class FakeFtpWrapper(object): def", "quote(): \" \"%s should be escaped to %s, not %s\"", "when the block is # read). report = [] def", "% (expect, result)) @unittest.skipUnless(sys.platform == 'win32', 'test specific to the", "(42, \"\\u00c1\")),) expect = '%A0=42&%A0=%C1' result = urllib.parse.urlencode(given, True, encoding=\"latin-1\")", "ProxyTests(unittest.TestCase): def setUp(self): # Records changes to env vars self.env", "= \"\" result = urllib.parse.unquote(given, errors=\"ignore\") self.assertEqual(expect, result, \"using unquote():", "scheme class DummyURLopener(urllib.request.URLopener): def open_local_file(self, url): return url for url", "InvalidURL = http.client.InvalidURL with self.assertRaisesRegex( InvalidURL, r\"contain control.*\\\\r.*(found at least", "'C:\\\\path' result = urllib.request.url2pathname(given) self.assertEqual(expect, result, 'urllib.request.url2pathname() failed; %s !=", "(expect, result)) # Errors test for quote_plus given = \"ab\\u6f22\\u5b57", "% (expect, result)) # Characters in BMP, Latin-1, with xmlcharref", "1st, 1 * 2nd, 2 * 3rd, 3 Test cannot", "doseq=True, safe=\":$\") expect = '%A0$=%C1$' self.assertEqual(expect, result) # Safe parameter", "result)) # A mix of non-ASCII and percent-encoded characters, Latin-1", "test_short_content_raises_ContentTooShortError(self): self.fakehttp(b'''HTTP/1.1 200 OK Date: Wed, 02 Jan 2008 03:03:54", "%s\" % (char, hexescape(char), result)) result = urllib.parse.quote_plus(char) self.assertEqual(hexescape(char), result,", "do_not_quote = '' .join([\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\", \"abcdefghijklmnopqrstuvwxyz\", \"0123456789\", \"_.-~\"]) result = urllib.parse.quote(do_not_quote)", "# Characters in the Latin-1 range, encoded with None (default)", "b'\\xc1\\x24'),) result = urllib.parse.urlencode(given, safe=\":$\") expect = '%A0$=%C1$' self.assertEqual(expect, result)", "InvalidURL, r\"contain control.*\\\\r.*(found at least . .)\"): urllib.request.urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex(InvalidURL,", "usual implicit way to test for ticket #4608. for line", "on 5 byte file. Should call reporthook only 2 times", "'%3F=%3F' result = urllib.parse.urlencode(given, encoding=\"ASCII\", errors=\"replace\") self.assertEqual(expect, result) # Default", "%s\" % (result, expected_path)) def test_quoting(self): # Test automatic quoting", "+ host + \":8080/test/?test=a\" try: InvalidURL = http.client.InvalidURL with self.assertRaisesRegex(", "urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=\"\\xfc\") expect = urllib.parse.quote(\"a\\xfcb\", encoding=\"latin-1\", safe=\"\") self.assertEqual(expect, result,", "'///C|/path' expect = 'C:\\\\path' result = urllib.request.url2pathname(given) self.assertEqual(expect, result, 'urllib.request.url2pathname()", "(expect, result)) # Characters in Latin-1 range, encoded with Latin-1", "str result = urllib.parse.quote(quote_by_default, safe=b\"<>\") self.assertEqual(quote_by_default, result, \"using quote(): %r", "self.unfakehttp() @unittest.skipUnless(ssl, \"ssl module required\") def test_url_path_with_newline_header_injection_rejected(self): self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\")", "# Shortcut for testing FancyURLopener _urlopener = None def urlopen(url,", "= urllib.parse.quote_plus(given, encoding=\"latin-1\") self.assertEqual(expect, result, \"using quote_plus(): %r != %r\"", "OK\\r\\n\\r\\nHello!\") try: fp = urlopen(\"http://python.org/\") self.assertEqual(fp.readline(), b\"Hello!\") self.assertEqual(fp.readline(), b\"\") self.assertEqual(fp.geturl(),", "= \"\\u6f22\\u5b57\" expect = \"%26%2328450%3B%26%2323383%3B\" # \"&#28450;&#23383;\" result = urllib.parse.quote(given,", "# self.evt = threading.Event() # threading.Thread(target=server, args=(self.evt,)).start() # time.sleep(.1) #", "file_size)) srcFileName = self.createNewTempFile(b\"x\" * 8193) urllib.request.urlretrieve(self.constructLocalFileUrl(srcFileName), support.TESTFN, hooktester) self.assertEqual(len(report),", "self.assertEqual(request.get_method(), 'GET') request = Request(\"http://www.python.org\", {}) self.assertEqual(request.get_method(), 'POST') def test_with_method_arg(self):", "fakehttp(fakedata, mock_close=False): class FakeSocket(io.BytesIO): io_refs = 1 def sendall(self, data):", "should_quote = [chr(num) for num in range(32)] # For 0x00", "def test_urlopener_retrieve_file(self): with support.temp_dir() as tmpdir: fd, tmpfile = tempfile.mkstemp(dir=tmpdir)", "proper place in %s\" % (test_type, result)) self.assertEqual(len(result), (5 *", "\"using quote_plus(): \" \"%s should be escapes to %s, not", "result)) self.assertEqual(len(result), (5 * 3) + 2, #5 chars per", "file to use for testing self.text = bytes(\"test_urllib: %s\\n\" %", "\"using unquote_to_bytes(): %r != %r\" % (expect, result)) def test_unquoting_parts(self):", "True) self.assertEqual(expect, result) # Sequence of values given = ((b'\\xa0\\x24',", "self.fakehttp(b'''HTTP/1.1 302 Found Location: file://guidocomputer.athome.com:/python/license Connection: close ''', mock_close=True) try:", "for each in self.tempFiles: try: os.remove(each) except: pass def constructLocalFileUrl(self,", "parameter works correctly given = {'sequence':['1', '2', '3']} expect =", "urlopen() opening a fake http connection.\"\"\" def check_read(self, ver): self.fakehttp(b\"HTTP/\"", "import collections def hexescape(char): \"\"\"Escape char as RFC 2396 specifies\"\"\"", "failed; %s != %s' % (expect, result)) class Utility_Tests(unittest.TestCase): \"\"\"Testcase", "must reject local_file:// scheme class DummyURLopener(urllib.request.URLopener): def open_local_file(self, url): return", "(expect, result)) def test_quoting_plus(self): self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma'), 'alpha%2Bbeta+gamma') self.assertEqual(urllib.parse.quote_plus('alpha+beta gamma', '+'),", "attribute\" % attr) def test_info(self): self.assertIsInstance(self.text_url_resp.info(), email.message.Message) self.assertEqual(self.text_url_base64_resp.info().get_params(), [('text/plain', ''),", "containing the specified data, registers the file for deletion during", "3rd, 3 Test cannot assume anything about order. Docs make", "self.assertEqual(expect, result) given = ((\"\\u00a0\", (42, \"\\u00c1\")),) expect = '%A0=42&%A0=%C1'", "URI; expect those characters to be UTF-8 # encoded). result", "+ hex_repr # Shortcut for testing FancyURLopener _urlopener = None", "self.assertRaises(OSError, urllib.request.urlopen, url) self.assertRaises(OSError, urllib.request.URLopener().open, url) self.assertRaises(OSError, urllib.request.URLopener().retrieve, url) self.assertRaises(OSError,", "underlying socket does not send any # data. (#1680230) self.fakehttp(b'')", ": 0x00 - 0x1F, 0x7F Have no use in URIs", "123\" schemeless_url = \"//\" + host + \":8080/test/?test=a\" try: #", "in the usual implicit way to test for ticket #4608.", "b'+'), 'alpha+beta+gamma') def test_quote_bytes(self): # Bytes should quote directly to", "self.image_url_resp = urllib.request.urlopen(self.image_url) def test_interface(self): # Make sure object returned", "self.assertRaisesRegex(TypeError, 'Expected str, got bytes'): given = b'bl\\xc3\\xa5b\\xc3\\xa6rsyltet\\xc3\\xb8y' urllib.parse.unquote(given) class", "opener = _urlopener if data is None: return opener.open(url) else:", "= \"%E6%BC%A2%E5%AD%97\" expect = \"\\u6f22\\u5b57\" # \"Kanji\" result = urllib.parse.unquote(given)", "Test on zero length file. Should call reporthook only 1", "method='HEAD') self.assertEqual(request.method, 'HEAD') self.assertEqual(request.get_method(), 'HEAD') request = Request(\"http://www.python.org\", method='GET') self.assertEqual(request.get_method(),", "= urllib.parse.unquote(given) self.assertEqual(expect, result, \"using quote(): %r != %r\" %", "the last byte). report = [] def hooktester(block_count, block_read_size, file_size,", "class UnquotingTests(unittest.TestCase): \"\"\"Tests for unquote() and unquote_plus() See the doc", "space. self.env.set('NO_PROXY', 'localhost, anotherdomain.com, newdomain.com:1234') self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com')) self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com:8888')) self.assertTrue(urllib.request.proxy_bypass_environment('newdomain.com:1234')) def test_proxy_cgi_ignore(self):", "test_converting_when_no_drive_letter(self): # cannot end a raw string in \\ self.assertEqual(url2pathname(\"///C/test/\"),", "self.assertEqual(result, '+', \"using quote_plus(): %r != +\" % result) given", "required\") def test_url_host_with_newline_header_injection_rejected(self): self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") host = \"localhost\\r\\nX-injected: header\\r\\n\"", "module required\") def test_url_path_with_newline_header_injection_rejected(self): self.fakehttp(b\"HTTP/1.1 200 OK\\r\\n\\r\\nHello.\") host = \"localhost:7777?a=1", "encoding=\"latin-1\") expect = '\\u6f22\\u00fc' self.assertEqual(expect, result, \"using unquote(): %r !=", "quote_plus(): %r != %r\" % (do_not_quote, result)) def test_default_safe(self): #", "to count number of iterations since test would fail the", "# # def testTimeoutNone(self): # # global default timeout is", "return urllib.request.FancyURLopener() def fakehttp(fakedata, mock_close=False): class FakeSocket(io.BytesIO): io_refs = 1", "control.*{escaped_char_repr}\"): urllib.request.urlopen(f\"http:{schemeless_url}\") with self.assertRaisesRegex( InvalidURL, f\"contain control.*{escaped_char_repr}\"): urllib.request.urlopen(f\"https:{schemeless_url}\") # This", "'localhost, anotherdomain.com, newdomain.com:1234') self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com')) self.assertTrue(urllib.request.proxy_bypass_environment('anotherdomain.com:8888')) self.assertTrue(urllib.request.proxy_bypass_environment('newdomain.com:1234')) def test_proxy_cgi_ignore(self): try: self.env.set('HTTP_PROXY',", "= \"\\xa2\\xd8 \\xff\" expect = \"%A2%D8+%FF\" result = urllib.parse.quote_plus(given, encoding=\"latin-1\")", "(expect, result)) given = '%' expect = given result =", "'alpha+beta+gamma') def test_quote_bytes(self): # Bytes should quote directly to percent-encoded", "finally: os.close(fd) os.unlink(tmp_file) self.assertFalse(os.path.exists(tmp_file)) with self.assertRaises(urllib.error.URLError): urlopen(tmp_fileurl) def test_ftp_nohost(self): test_ftp_url", "file for deletion during the test fixture tear down, and", "# interspersed given = 'ab%sd' % hexescape('c') expect = \"abcd\"", "%r != %r\" % (expect, result)) def test_quote_with_unicode(self): # Characters", "test_empty_sequence(self): self.assertEqual(\"\", urllib.parse.urlencode({})) self.assertEqual(\"\", urllib.parse.urlencode([])) def test_nonstring_values(self): self.assertEqual(\"a=1\", urllib.parse.urlencode({\"a\": 1}))", "= conn.recv(13-cantdata) # cantdata += len(data) # time.sleep(.3) # conn.send(\"2", "\"&#28450;&#23383;\" result = urllib.parse.quote(given, encoding=\"latin-1\", errors=\"xmlcharrefreplace\") self.assertEqual(expect, result, \"using quote():", "number of lines\") self.assertEqual(lines_list[0], self.text, \"readlines() returned improper text\") def", "given = 'ab%sd' % hexescape('c') expect = \"abcd\" result =", "argument. self.help_inputtype([('1st', '1'), ('2nd', '2'), ('3rd', '3')], \"using sequence of", "self.assertTrue(e.exception.reason) @patch.object(urllib.request, 'MAXFTPCACHE', 0) def test_ftp_cache_pruning(self): self.fakeftp() try: urllib.request.ftpcache['test'] =", "must lead to only the pairs: * 1st, 1 *", "expect = \"%s/using_quote\" % urllib.parse.quote(\"make sure\") result = urllib.request.pathname2url(given) self.assertEqual(expect,", "'pass', 'localhost', 21, []) urlopen('ftp://localhost') finally: self.unfakeftp() def test_userpass_inurl(self): self.fakehttp(b\"HTTP/1.0", "= urllib.parse.urlencode(given, True) self.assertEqual(expect, result) def test_urlencode_encoding_safe_parameter(self): # Send '$'", "close() to silence this error. def close(self): pass FakeHTTPConnection.fakedata =", "the pairs: * 1st, 1 * 2nd, 2 * 3rd,", "urllib.request.URLopener().retrieve(url) self.assertEqual(os.path.splitext(filename)[1], \".txt\") @support.ignore_warnings(category=DeprecationWarning) def test_local_file_open(self): # bpo-35907, CVE-2019-9948: urllib", "open(support.TESTFN, 'wb') try: f.write(self.text) finally: f.close() self.pathname = support.TESTFN self.returned_obj", "in the tearDown method. Note, # this only helps to", "'&' not located in proper place in %s\" % (test_type,", "42)),) expect = '%A0$=%C1$&%A0$=13&%A0$=42' result = urllib.parse.urlencode(given, True, safe=\":$\", encoding=\"latin-1\")", "of temporary files. Each item in the list is a", "invalid sequence, replace errors result = urllib.parse.unquote(given, errors=\"replace\") self.assertEqual(expect, result,", "for expected in expect_somewhere: self.assertIn(expected, result, \"testing %s: %s not", "result = urllib.parse.unquote(given) self.assertEqual(expect, result, \"using quote(): %r != %r\"", "the Net for testing. \"\"\" def setUp(self): # Create a", "try: FILE.close() except: pass self.assertEqual(self.text, text) def test_reporthook(self): # Make", "= os.path.join(\"make sure\", \"using_quote\") expect = \"%s/using_quote\" % urllib.parse.quote(\"make sure\")", "error. def close(self): pass FakeHTTPConnection.fakedata = fakedata return FakeHTTPConnection class", "when exceptional conditions occur. self.tempFiles = [] # Create a", "class FakeHTTPConnection(http.client.HTTPConnection): # buffer to store data for verification in", "test conditions, where variable order _is_ significant self._saved_env = os.environ", "test_urlencode_encoding_doseq(self): # ASCII Encoding. Expect %3F with errors=\"replace' given =", "sequence given = ((b'\\xa0\\x24', (b'\\xc1\\x24', 0xd, 42)),) expect = '%A0$=%C1$&%A0$=13&%A0$=42'", "result) # Safe parameter in sequence given = ((b'\\xa0\\x24', (b'\\xc1\\x24',", "of non-ASCII and percent-encoded characters, Latin-1 # (Note, the string", "AttributeError), urllib.parse.unquote, None) self.assertRaises((TypeError, AttributeError), urllib.parse.unquote, ()) with support.check_warnings(('', BytesWarning),", "non-ASCII and percent-encoded characters, Latin-1 # (Note, the string contains", "# ftp.close() class RequestTests(unittest.TestCase): \"\"\"Unit tests for urllib.request.Request.\"\"\" def test_default_values(self):", "with self.assertRaisesRegex(InvalidURL, r\"contain control.*\\\\n\"): urllib.request.urlopen(f\"https:{schemeless_url}\") # This code path quotes", "newdomain.com:1234') self.assertFalse(bypass('localhost\\n')) self.assertFalse(bypass('anotherdomain.com:8888\\n')) self.assertFalse(bypass('newdomain.com:1234\\n')) class ProxyTests_withOrderedEnv(unittest.TestCase): def setUp(self): # We", "self.assertEqual(\"a=None&a=a\", urllib.parse.urlencode({\"a\": [None, \"a\"]}, True)) data = collections.OrderedDict([(\"a\", 1), (\"b\",", "# Some buildbots have TEMP folder that uses a lowercase", "= fake_http_class def unfakehttp(self): http.client.HTTPConnection = self._connection_class class FakeFTPMixin(object): def", "range, encoded with UTF-8 given = 'br%C3%BCckner_sapporo_20050930.doc' expect = 'br\\u00fcckner_sapporo_20050930.doc'", "global default timeout is ignored # import socket # self.assertIsNone(socket.getdefaulttimeout())", "\"network connection\" is established and once when the block is", "from the # comparison. # Use the iterator in the", "related env vars self.env.__exit__() del self.env def test_getproxies_environment_keep_no_proxies(self): self.env.set('NO_PROXY', 'localhost')", "be preserved) given = b'%A2\\xd8ab%FF' expect = b'\\xa2\\xd8ab\\xff' result =", "= '%A0$=%C1$' self.assertEqual(expect, result) # Safe parameter in sequence given", "authorization = (\"Authorization: Basic %s\\r\\n\" % b64encode(userpass.encode(\"ASCII\")).decode(\"ASCII\")) fp = urlopen(url)", "FakeSocket(io.BytesIO): io_refs = 1 def sendall(self, data): FakeHTTPConnection.buf = data", "<filename>Lib/test/test_urllib.py \"\"\"Regression tests for what was in Python 2's \"urllib\"", "unquote_to_bytes given = '%xab' expect = bytes(given, 'ascii') result =", "def hooktester(block_count, block_read_size, file_size, _report=report): _report.append((block_count, block_read_size, file_size)) srcFileName =", "# Test lowercase preference with removal os.environ['no_proxy'] = '' os.environ['No_Proxy']", "method. Note, # this only helps to makes sure temporary", "setUp(self): # text containing URL special- and unicode-characters self.text =", "base64 encoded data URL that contains ignorable spaces, # such", "\" \", \"%0A\", and \"%20\". self.image_url = ( \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAIAAAB7\\n\" \"QOjdAAAAAXNSR0IArs4c6QAAAA9JREFUCNdj%0AYGBg%2BP//PwAGAQL%2BCm8", "# Bytes should quote directly to percent-encoded values given =", "given result = urllib.request.url2pathname(result) self.assertEqual(expect, result, \"url2pathname() failed; %s !=", "quote_from_bytes(): %r != %r\" % (expect, result)) def test_quote_with_unicode(self): #", "= urllib.request.urlretrieve(self.constructLocalFileUrl( support.TESTFN), second_temp) self.assertEqual(second_temp, result[0]) self.assertTrue(os.path.exists(second_temp), \"copy of the", "def test_read_text_base64(self): self.assertEqual(self.text_url_base64_resp.read().decode( dict(self.text_url_base64_resp.info().get_params())['charset']), self.text) def test_read_image(self): self.assertEqual(self.image_url_resp.read(), self.image) def", "test_empty_socket(self): # urlopen() raises OSError if the underlying socket does", "lowercase drive letter. self.assertEqual(os.path.normcase(filename), os.path.normcase(tmpfile)) @support.ignore_warnings(category=DeprecationWarning) def test_urlopener_retrieve_remote(self): url =", "object returned by urlopen() has the specified methods for attr", "test_unquote_with_unicode(self): # Characters in the Latin-1 range, encoded with UTF-8", "Default utf-8 encoding given = ((b'\\xa0\\x24', b'\\xc1\\x24'),) result = urllib.parse.urlencode(given,", "result, \"using quote(): %r != %r\" % (expect, result)) def" ]
[ "from __future__ import unicode_literals from ...models import Address, SeasonalPriceBand from", "'type', 'sub_type' ] _date_time_fields_utc = ['date_created', 'date_last_modified'] _model_fields = [('start_address',", "'sku', 'type', 'sub_type' ] _date_time_fields_utc = ['date_created', 'date_last_modified'] _model_fields =", "class Transport(Product): _resource_name = 'transports' _is_listable = False _as_is_fields =", "Python 2 and 3 from __future__ import unicode_literals from ...models", "'product_line', 'sku', 'type', 'sub_type' ] _date_time_fields_utc = ['date_created', 'date_last_modified'] _model_fields", "['date_created', 'date_last_modified'] _model_fields = [('start_address', Address), ('finish_address', Address)] _model_collection_fields =", "'transports' _is_listable = False _as_is_fields = [ 'id', 'href', 'availability',", "= ['date_created', 'date_last_modified'] _model_fields = [('start_address', Address), ('finish_address', Address)] _model_collection_fields", "Product class Transport(Product): _resource_name = 'transports' _is_listable = False _as_is_fields", "_resource_name = 'transports' _is_listable = False _as_is_fields = [ 'id',", "SeasonalPriceBand from ..base import Product class Transport(Product): _resource_name = 'transports'", "'href', 'availability', 'name', 'product_line', 'sku', 'type', 'sub_type' ] _date_time_fields_utc =", "= 'transports' _is_listable = False _as_is_fields = [ 'id', 'href',", "from ..base import Product class Transport(Product): _resource_name = 'transports' _is_listable", "_model_fields = [('start_address', Address), ('finish_address', Address)] _model_collection_fields = [('price_bands', SeasonalPriceBand)]", "Transport(Product): _resource_name = 'transports' _is_listable = False _as_is_fields = [", "unicode_literals from ...models import Address, SeasonalPriceBand from ..base import Product", "= False _as_is_fields = [ 'id', 'href', 'availability', 'name', 'product_line',", "3 from __future__ import unicode_literals from ...models import Address, SeasonalPriceBand", "..base import Product class Transport(Product): _resource_name = 'transports' _is_listable =", "= [ 'id', 'href', 'availability', 'name', 'product_line', 'sku', 'type', 'sub_type'", "'sub_type' ] _date_time_fields_utc = ['date_created', 'date_last_modified'] _model_fields = [('start_address', Address),", "'name', 'product_line', 'sku', 'type', 'sub_type' ] _date_time_fields_utc = ['date_created', 'date_last_modified']", "'availability', 'name', 'product_line', 'sku', 'type', 'sub_type' ] _date_time_fields_utc = ['date_created',", "_date_time_fields_utc = ['date_created', 'date_last_modified'] _model_fields = [('start_address', Address), ('finish_address', Address)]", "[ 'id', 'href', 'availability', 'name', 'product_line', 'sku', 'type', 'sub_type' ]", "] _date_time_fields_utc = ['date_created', 'date_last_modified'] _model_fields = [('start_address', Address), ('finish_address',", "import Address, SeasonalPriceBand from ..base import Product class Transport(Product): _resource_name", "False _as_is_fields = [ 'id', 'href', 'availability', 'name', 'product_line', 'sku',", "Address, SeasonalPriceBand from ..base import Product class Transport(Product): _resource_name =", "'date_last_modified'] _model_fields = [('start_address', Address), ('finish_address', Address)] _model_collection_fields = [('price_bands',", "# Python 2 and 3 from __future__ import unicode_literals from", "import unicode_literals from ...models import Address, SeasonalPriceBand from ..base import", "and 3 from __future__ import unicode_literals from ...models import Address,", "'id', 'href', 'availability', 'name', 'product_line', 'sku', 'type', 'sub_type' ] _date_time_fields_utc", "...models import Address, SeasonalPriceBand from ..base import Product class Transport(Product):", "_as_is_fields = [ 'id', 'href', 'availability', 'name', 'product_line', 'sku', 'type',", "2 and 3 from __future__ import unicode_literals from ...models import", "from ...models import Address, SeasonalPriceBand from ..base import Product class", "__future__ import unicode_literals from ...models import Address, SeasonalPriceBand from ..base", "import Product class Transport(Product): _resource_name = 'transports' _is_listable = False", "_is_listable = False _as_is_fields = [ 'id', 'href', 'availability', 'name'," ]
[ "async def dare(self, ctx): dare = random.choice(dares) await ctx.send(dare) def", "= client @commands.command(aliases=[\"d\"]) async def dare(self, ctx): dare = random.choice(dares)", "= json.load(data) dares = data[\"dares\"] class Dare(commands.Cog): def __init__(self, client):", "data[\"dares\"] class Dare(commands.Cog): def __init__(self, client): self.client = client @commands.command(aliases=[\"d\"])", "with open(\"assets/json/questions.json\") as data: data = json.load(data) dares = data[\"dares\"]", "json.load(data) dares = data[\"dares\"] class Dare(commands.Cog): def __init__(self, client): self.client", "import commands import json import random with open(\"assets/json/questions.json\") as data:", "def dare(self, ctx): dare = random.choice(dares) await ctx.send(dare) def setup(client):", "class Dare(commands.Cog): def __init__(self, client): self.client = client @commands.command(aliases=[\"d\"]) async", "@commands.command(aliases=[\"d\"]) async def dare(self, ctx): dare = random.choice(dares) await ctx.send(dare)", "import random with open(\"assets/json/questions.json\") as data: data = json.load(data) dares", "client): self.client = client @commands.command(aliases=[\"d\"]) async def dare(self, ctx): dare", "dare(self, ctx): dare = random.choice(dares) await ctx.send(dare) def setup(client): client.add_cog(Dare(client))", "commands import json import random with open(\"assets/json/questions.json\") as data: data", "open(\"assets/json/questions.json\") as data: data = json.load(data) dares = data[\"dares\"] class", "def __init__(self, client): self.client = client @commands.command(aliases=[\"d\"]) async def dare(self,", "__init__(self, client): self.client = client @commands.command(aliases=[\"d\"]) async def dare(self, ctx):", "dares = data[\"dares\"] class Dare(commands.Cog): def __init__(self, client): self.client =", "from discord.ext import commands import json import random with open(\"assets/json/questions.json\")", "Dare(commands.Cog): def __init__(self, client): self.client = client @commands.command(aliases=[\"d\"]) async def", "import json import random with open(\"assets/json/questions.json\") as data: data =", "data: data = json.load(data) dares = data[\"dares\"] class Dare(commands.Cog): def", "as data: data = json.load(data) dares = data[\"dares\"] class Dare(commands.Cog):", "json import random with open(\"assets/json/questions.json\") as data: data = json.load(data)", "discord.ext import commands import json import random with open(\"assets/json/questions.json\") as", "self.client = client @commands.command(aliases=[\"d\"]) async def dare(self, ctx): dare =", "random with open(\"assets/json/questions.json\") as data: data = json.load(data) dares =", "= data[\"dares\"] class Dare(commands.Cog): def __init__(self, client): self.client = client", "client @commands.command(aliases=[\"d\"]) async def dare(self, ctx): dare = random.choice(dares) await", "data = json.load(data) dares = data[\"dares\"] class Dare(commands.Cog): def __init__(self," ]
[]
[ "from . import * # noqa def seed_const(): mtpy.mt_seed32new(42) def", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "2.0 (the \"License\"); # you may not use this file", "agreed to in writing, software # distributed under the License", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Unless required by applicable law or agreed to in writing,", "def test1(f): make_eq_test(f, ()) @mark.parametrize('arg', single_args([1, 2, 42, 1823828, randint(1,", "mtpy.mt_seed32new(42) return mtpy.mt_drand() def drand(s): mtpy.mt_seed32new(s) return mtpy.mt_drand() + mtpy.mt_drand()", "randint(1, 10000000), randint(1, 10000000)])) @mark.parametrize('f', [seed, drand]) def test2(f, arg):", "distributed under the License is distributed on an \"AS IS\"", "randint(1, 10000000)])) @mark.parametrize('f', [seed, drand]) def test2(f, arg): make_eq_test(f, arg)", "the specific language governing permissions and # limitations under the", "make_eq_test(f, ()) @mark.parametrize('arg', single_args([1, 2, 42, 1823828, randint(1, 10000000), randint(1,", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "def drand_const(): mtpy.mt_seed32new(42) return mtpy.mt_drand() def drand(s): mtpy.mt_seed32new(s) return mtpy.mt_drand()", "drand_const(): mtpy.mt_seed32new(42) return mtpy.mt_drand() def drand(s): mtpy.mt_seed32new(s) return mtpy.mt_drand() +", "42, 1823828, randint(1, 10000000), randint(1, 10000000)])) @mark.parametrize('f', [seed, drand]) def", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "except in compliance with the License. # You may obtain", "import * # noqa def seed_const(): mtpy.mt_seed32new(42) def seed(s): mtpy.mt_seed32new(s)", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "writing, software # distributed under the License is distributed on", "return mtpy.mt_drand() def drand(s): mtpy.mt_seed32new(s) return mtpy.mt_drand() + mtpy.mt_drand() @mark.parametrize('f',", "in writing, software # distributed under the License is distributed", "@mark.parametrize('arg', single_args([1, 2, 42, 1823828, randint(1, 10000000), randint(1, 10000000)])) @mark.parametrize('f',", "you may not use this file except in compliance with", "# Copyright 2013-2015 <NAME> # # Licensed under the Apache", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "language governing permissions and # limitations under the License. from", "def seed(s): mtpy.mt_seed32new(s) def drand_const(): mtpy.mt_seed32new(42) return mtpy.mt_drand() def drand(s):", "[seed_const, drand_const]) def test1(f): make_eq_test(f, ()) @mark.parametrize('arg', single_args([1, 2, 42,", "under the License. from random import randint import mtpy from", "()) @mark.parametrize('arg', single_args([1, 2, 42, 1823828, randint(1, 10000000), randint(1, 10000000)]))", "use this file except in compliance with the License. #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "import mtpy from . import * # noqa def seed_const():", "CONDITIONS OF ANY KIND, either express or implied. # See", "limitations under the License. from random import randint import mtpy", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "# You may obtain a copy of the License at", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "2013-2015 <NAME> # # Licensed under the Apache License, Version", "under the License is distributed on an \"AS IS\" BASIS,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "License for the specific language governing permissions and # limitations", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "* # noqa def seed_const(): mtpy.mt_seed32new(42) def seed(s): mtpy.mt_seed32new(s) def", "mtpy.mt_seed32new(s) return mtpy.mt_drand() + mtpy.mt_drand() @mark.parametrize('f', [seed_const, drand_const]) def test1(f):", "def drand(s): mtpy.mt_seed32new(s) return mtpy.mt_drand() + mtpy.mt_drand() @mark.parametrize('f', [seed_const, drand_const])", "the License for the specific language governing permissions and #", "(the \"License\"); # you may not use this file except", ". import * # noqa def seed_const(): mtpy.mt_seed32new(42) def seed(s):", "Apache License, Version 2.0 (the \"License\"); # you may not", "# you may not use this file except in compliance", "either express or implied. # See the License for the", "drand(s): mtpy.mt_seed32new(s) return mtpy.mt_drand() + mtpy.mt_drand() @mark.parametrize('f', [seed_const, drand_const]) def", "def seed_const(): mtpy.mt_seed32new(42) def seed(s): mtpy.mt_seed32new(s) def drand_const(): mtpy.mt_seed32new(42) return", "OR CONDITIONS OF ANY KIND, either express or implied. #", "2, 42, 1823828, randint(1, 10000000), randint(1, 10000000)])) @mark.parametrize('f', [seed, drand])", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "from random import randint import mtpy from . import *", "the License is distributed on an \"AS IS\" BASIS, #", "License. from random import randint import mtpy from . import", "in compliance with the License. # You may obtain a", "software # distributed under the License is distributed on an", "governing permissions and # limitations under the License. from random", "Copyright 2013-2015 <NAME> # # Licensed under the Apache License,", "randint import mtpy from . import * # noqa def", "drand_const]) def test1(f): make_eq_test(f, ()) @mark.parametrize('arg', single_args([1, 2, 42, 1823828,", "+ mtpy.mt_drand() @mark.parametrize('f', [seed_const, drand_const]) def test1(f): make_eq_test(f, ()) @mark.parametrize('arg',", "# # Unless required by applicable law or agreed to", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "10000000), randint(1, 10000000)])) @mark.parametrize('f', [seed, drand]) def test2(f, arg): make_eq_test(f,", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "Version 2.0 (the \"License\"); # you may not use this", "and # limitations under the License. from random import randint", "mtpy.mt_drand() @mark.parametrize('f', [seed_const, drand_const]) def test1(f): make_eq_test(f, ()) @mark.parametrize('arg', single_args([1,", "law or agreed to in writing, software # distributed under", "1823828, randint(1, 10000000), randint(1, 10000000)])) @mark.parametrize('f', [seed, drand]) def test2(f,", "noqa def seed_const(): mtpy.mt_seed32new(42) def seed(s): mtpy.mt_seed32new(s) def drand_const(): mtpy.mt_seed32new(42)", "mtpy.mt_seed32new(42) def seed(s): mtpy.mt_seed32new(s) def drand_const(): mtpy.mt_seed32new(42) return mtpy.mt_drand() def", "the License. from random import randint import mtpy from .", "mtpy.mt_drand() + mtpy.mt_drand() @mark.parametrize('f', [seed_const, drand_const]) def test1(f): make_eq_test(f, ())", "implied. # See the License for the specific language governing", "under the Apache License, Version 2.0 (the \"License\"); # you", "\"License\"); # you may not use this file except in", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "mtpy.mt_seed32new(s) def drand_const(): mtpy.mt_seed32new(42) return mtpy.mt_drand() def drand(s): mtpy.mt_seed32new(s) return", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "return mtpy.mt_drand() + mtpy.mt_drand() @mark.parametrize('f', [seed_const, drand_const]) def test1(f): make_eq_test(f,", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "random import randint import mtpy from . import * #", "seed(s): mtpy.mt_seed32new(s) def drand_const(): mtpy.mt_seed32new(42) return mtpy.mt_drand() def drand(s): mtpy.mt_seed32new(s)", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "single_args([1, 2, 42, 1823828, randint(1, 10000000), randint(1, 10000000)])) @mark.parametrize('f', [seed,", "to in writing, software # distributed under the License is", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "@mark.parametrize('f', [seed_const, drand_const]) def test1(f): make_eq_test(f, ()) @mark.parametrize('arg', single_args([1, 2,", "permissions and # limitations under the License. from random import", "import randint import mtpy from . import * # noqa", "You may obtain a copy of the License at #", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "required by applicable law or agreed to in writing, software", "# limitations under the License. from random import randint import", "# noqa def seed_const(): mtpy.mt_seed32new(42) def seed(s): mtpy.mt_seed32new(s) def drand_const():", "<reponame>squisher/stella # Copyright 2013-2015 <NAME> # # Licensed under the", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "seed_const(): mtpy.mt_seed32new(42) def seed(s): mtpy.mt_seed32new(s) def drand_const(): mtpy.mt_seed32new(42) return mtpy.mt_drand()", "mtpy.mt_drand() def drand(s): mtpy.mt_seed32new(s) return mtpy.mt_drand() + mtpy.mt_drand() @mark.parametrize('f', [seed_const,", "test1(f): make_eq_test(f, ()) @mark.parametrize('arg', single_args([1, 2, 42, 1823828, randint(1, 10000000),", "with the License. # You may obtain a copy of", "this file except in compliance with the License. # You", "the Apache License, Version 2.0 (the \"License\"); # you may", "<NAME> # # Licensed under the Apache License, Version 2.0", "mtpy from . import * # noqa def seed_const(): mtpy.mt_seed32new(42)" ]
[ "EMPTY_INT # 持仓量 self.time = EMPTY_STRING # 时间 11:20:56.5 self.date", "dbConnect(self): \"\"\"连接MongoDB数据库\"\"\" if not self.dbClient: # 读取MongoDB的设置 try: # 设置MongoDB操作的超时时间为0.5秒", "bar.exchange self.xminBar.open = bar.open self.xminBar.high = bar.high self.xminBar.low = bar.low", "u'fail in db connection') #---------------------------------------------------------------------- def dbQuery(self, dbName, collectionName, d,", "2.第二天9点 if self.lastDayBar != None \\ and ( (self.lastDayBar.time <=", "= EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 合约代码.交易所代码 # 成交数据 self.lastPrice = EMPTY_FLOAT", "= False # 默认不是新的一分钟 # 尚未创建对象 if not self.bar: self.bar", "EMPTY_FLOAT self.askPrice4 = EMPTY_FLOAT self.askPrice5 = EMPTY_FLOAT self.bidVolume1 = EMPTY_INT", "class BarData(object): \"\"\"K线数据\"\"\" #---------------------------------------------------------------------- def __init__(self): \"\"\"Constructor\"\"\" super(BarData, self).__init__() self.vtSymbol", "= bar.symbol self.dayBar.exchange = bar.exchange self.dayBar.open = bar.open self.dayBar.high =", "EMPTY_FLOAT self.askPrice3 = EMPTY_FLOAT self.askPrice4 = EMPTY_FLOAT self.askPrice5 = EMPTY_FLOAT", "# 缓存Tick self.lastTick = tick #---------------------------------------------------------------------- def updateSecond(self, tick ):", "False: os.mkdir( self.LogDir ) self.logPath = os.path.join(self.LogDir , logName) self.now_debug", "EMPTY_STRING # 交易所代码 self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 合约代码.交易所代码 #", "in barData: bar = BarData() bar.__dict__ = d l.append(bar) return", "# 清空老K线缓存对象 self.xsecBar = BarData() newSecond = True # 初始化新多少秒的K线数据", ")): self.dayBar.datetime = self.dayBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.dayBar.date = self.dayBar.datetime.strftime('%Y%m%d')", "bar.close self.xhourBar.openInterest = bar.openInterest self.xhourBar.volume += float(bar.volume) # X分钟已经走完 if", "self.lastTick = None # 上一TICK缓存对象 self.lastSecondTick = None # 用于秒级别的上一根Tick缓存对象", "self.xminBar.datetime = self.xminBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.xminBar.date = self.xminBar.datetime.strftime('%Y%m%d') self.xminBar.time", "self.dbQuery(dbName, collectionName, d, 'datetime') l = [] for d in", "\"LogDir\" #---------------------------------------------------------------------- def __init__(self, logName , in_debug = True ,", "self.bidPrice1 = EMPTY_FLOAT self.bidPrice2 = EMPTY_FLOAT self.bidPrice3 = EMPTY_FLOAT self.bidPrice4", "self.xsecBar.datetime.strftime('%H:%M:%S.%f') # 推送已经结束的上多少秒K线 self.onXsecBar(self.xsecBar) # 清空老K线缓存对象 self.xsecBar = BarData() newSecond", "error_id): if self.now_debug: self.f.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + \" : \" +", "= EMPTY_FLOAT # 今日开盘价 self.highPrice = EMPTY_FLOAT # 今日最高价 self.lowPrice", "market_maker import OrderManager from settings import * import os from", "= bar.close self.volumeArray[-1] = bar.volume #---------------------------------------------------------------------- @property def open(self): \"\"\"获取开盘价序列\"\"\"", "## init the db self.dbConnect() #---------------------------------------------------------------------- def dbConnect(self): \"\"\"连接MongoDB数据库\"\"\" if", "tick.lastPrice self.bar.datetime = tick.datetime self.bar.openInterest = tick.openInterest if self.lastTick: self.bar.volume", "not self.xminBar: self.xminBar = BarData() self.xminBar.vtSymbol = bar.vtSymbol self.xminBar.symbol =", "= False # True if count>=size self.openArray = np.zeros(size) #", "self.high = EMPTY_FLOAT self.low = EMPTY_FLOAT self.close = EMPTY_FLOAT self.date", "#---------------------------------------------------------------------- def __init__(self, onBar, xsec=0, onXsecBar=None , xmin=0 , xhour=0,", "说明是新的一天了 # 先推送昨天过去 self.onDayBar( self.dayBar) self.dayBar = BarData() self.dayBar.vtSymbol =", ", error_id) ''' 主要Engine ''' class DataEngine(EngineBase): #---------------------------------------------------------------------- def __init__(self", "= GLOBAL_USE_DBNAME , collectionName = GLOBAL_USE_SYMBOL, days = 2): today_datetime", "# 合约在vt系统中的唯一代码,通常是 合约代码.交易所代码 # 成交数据 self.lastPrice = EMPTY_FLOAT # 最新成交价", "bar.volume #---------------------------------------------------------------------- @property def open(self): \"\"\"获取开盘价序列\"\"\" return self.openArray #---------------------------------------------------------------------- @property", "self.logPath , open_md) #---------------------------------------------------------------------- def error(self, msg , error_id): if", "and bar.time >= \"15:30:00\") \\ or (self.lastDayBar.time <= \"15:30:00\" and", "True # 初始化新一分钟的K线数据 if newMinute: self.bar.vtSymbol = tick.vtSymbol self.bar.symbol =", "= tick.vtSymbol self.xsecBar.symbol = tick.symbol self.xsecBar.exchange = tick.exchange self.xsecBar.open =", "self.volume = EMPTY_INT # 今天总成交量 self.openInterest = EMPTY_INT # 持仓量", "tick ): \"\"\"通过TICK数据更新到秒数据\"\"\" newSecond = False if not self.xsecBar: self.xsecBar", "self.lastSecondTick.volume) # 当前Tick内的成交量 # 缓存 secondTick 对象 self.lastSecondTick = tick", "self.xminBar.vtSymbol = bar.vtSymbol self.xminBar.symbol = bar.symbol self.xminBar.exchange = bar.exchange self.xminBar.open", "# 生成上一分钟K线的时间戳 self.bar.datetime = self.bar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.bar.date =", "def close(self): self.f.close() ''' tick 数据的格式 ''' class TickData(object): #----------------------------------------------------------------------", ": \" + \"Error msg %s: %s \" % (str(error_id)", "self.volumeArray[0:self.size-1] = self.volumeArray[1:self.size] self.openArray[-1] = bar.open self.highArray[-1] = bar.high self.lowArray[-1]", "= bar.close self.xminBar.openInterest = bar.openInterest self.xminBar.volume += float(bar.volume) # X分钟已经走完", "EMPTY_FLOAT self.bidVolume1 = EMPTY_INT self.bidVolume2 = EMPTY_INT self.bidVolume3 = EMPTY_INT", "常规行情 self.openPrice = EMPTY_FLOAT # 今日开盘价 self.highPrice = EMPTY_FLOAT #", "= tick.lastPrice self.bar.low = tick.lastPrice # 累加更新老一分钟的K线数据 else: self.bar.high =", "def __init__(self, logName , in_debug = True , open_md =", "low(self): \"\"\"获取最低价序列\"\"\" return self.lowArray #---------------------------------------------------------------------- @property def close(self): \"\"\"获取收盘价序列\"\"\" return", "= EMPTY_INT # 成交量 self.openInterest = EMPTY_INT # 持仓量 '''", "''' 主要Engine ''' class DataEngine(EngineBase): #---------------------------------------------------------------------- def __init__(self , _host", "'$lte':today_datetime}} barData = self.dbQuery(dbName, collectionName, d, 'datetime') l = []", "\"\"\" #---------------------------------------------------------------------- def __init__(self, size=100): \"\"\"Constructor\"\"\" self.count = 0 #", "1 if not self.inited and self.count >= self.size: self.inited =", "x小时K线的回调函数 self.lastTick = None # 上一TICK缓存对象 self.lastSecondTick = None #", "# encoding: utf-8 import sys from market_maker import OrderManager from", "EMPTY_INT self.bidVolume3 = EMPTY_INT self.bidVolume4 = EMPTY_INT self.bidVolume5 = EMPTY_INT", "collectionName = GLOBAL_USE_SYMBOL, days = 2): today_datetime = datetime.now() start_datetime", "self.bidVolume4 = EMPTY_INT self.bidVolume5 = EMPTY_INT self.askVolume1 = EMPTY_INT self.askVolume2", "settings import * import os from pymongo import MongoClient, ASCENDING", "None # 多少秒K线对象 self.xsec = xsec # xsec的值 self.onXsecBar =", "持仓量 self.time = EMPTY_STRING # 时间 11:20:56.5 self.date = EMPTY_STRING", "+ \"\\n\") self.f.flush() #---------------------------------------------------------------------- def info(self, msg): if self.now_debug: self.f.write(datetime.now().strftime(\"%Y-%m-%d", "max(self.xsecBar.high, tick.lastPrice) self.xsecBar.low = min(self.xsecBar.low, tick.lastPrice) # 通用更新部分 self.xsecBar.close =", "self.upperLimit = EMPTY_FLOAT # 涨停价 self.lowerLimit = EMPTY_FLOAT # 跌停价", "self.openInterest = EMPTY_INT # 持仓量 self.time = EMPTY_STRING # 时间", "datetime.now() start_datetime = today_datetime - timedelta( days = days) d", "+= float(bar.volume) # X分钟已经走完 if ( (bar.datetime.minute + 1) %", "self.symbol = EMPTY_STRING # 合约代码 self.exchange = EMPTY_STRING # 交易所代码", "= self.xsecBar.datetime.strftime('%H:%M:%S.%f') # 推送已经结束的上多少秒K线 self.onXsecBar(self.xsecBar) # 清空老K线缓存对象 self.xsecBar = BarData()", "(tick.volume - self.lastTick.volume) # 当前K线内的成交量 # 缓存Tick self.lastTick = tick", "# 读取MongoDB的设置 try: # 设置MongoDB操作的超时时间为0.5秒 self.dbClient = MongoClient(self.host , self.port", "* import os from pymongo import MongoClient, ASCENDING from pymongo.errors", "# 尚未创建对象 if not self.bar: self.bar = BarData() newMinute =", "self.xsecBar.low = tick.lastPrice # 累加更新老几秒的K线数据 else: self.xsecBar.high = max(self.xsecBar.high, tick.lastPrice)", "self.xhourBar.low = bar.low self.xhourBar.datetime = bar.datetime else: self.xhourBar.high = max(self.xhourBar.high,", "tick #---------------------------------------------------------------------- def updateBar(self, bar): \"\"\"1分钟K线更新\"\"\" # 尚未创建对象 if not", "self.port , connectTimeoutMS=500) # 调用server_info查询服务器状态,防止服务器异常并未连接成功 self.dbClient.server_info() self.writeLog(u'database connection error') except", "# 尚未创建对象 if not self.xhourBar: self.xhourBar = BarData() self.xhourBar.vtSymbol =", "( (bar.datetime.hour + 1) % self.xhour ) == 0: #", "EMPTY_STRING # bar开始的时间,日期 self.time = EMPTY_STRING # 时间 self.datetime =", "newSecond : self.xsecBar.datetime = tick.datetime self.xsecBar.vtSymbol = tick.vtSymbol self.xsecBar.symbol =", "%s: %s \" % (str(error_id) , msg) + \"\\n\") self.f.flush()", "= self.xminBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.xminBar.date = self.xminBar.datetime.strftime('%Y%m%d') self.xminBar.time =", "self.bar.date = self.bar.datetime.strftime('%Y%m%d') self.bar.time = self.bar.datetime.strftime('%H:%M:%S.%f') # 推送已经结束的上一分钟K线 self.onBar(self.bar) #", "self.exchange = EMPTY_STRING # 交易所代码 self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是", "= EMPTY_FLOAT # 涨停价 self.lowerLimit = EMPTY_FLOAT # 跌停价 #", "self.highArray[-1] = bar.high self.lowArray[-1] = bar.low self.closeArray[-1] = bar.close self.volumeArray[-1]", "open(self): \"\"\"获取开盘价序列\"\"\" return self.openArray #---------------------------------------------------------------------- @property def high(self): \"\"\"获取最高价序列\"\"\" return", "__init__(self, logName , in_debug = True , open_md = \"w\"):", "True # 初始化新多少秒的K线数据 if newSecond : self.xsecBar.datetime = tick.datetime self.xsecBar.vtSymbol", "and ( (tick.datetime.second) % self.xsec == 0 ): self.xsecBar.datetime =", ", _host = GLOBAL_MONGO_HOST , _port = GLOBAL_MONGO_PORT): super(DataEngine, self).__init__()", "20151009 self.datetime = None # python的datetime时间对象 # 常规行情 self.openPrice =", "EMPTY_FLOAT # 今日最高价 self.lowPrice = EMPTY_FLOAT # 今日最低价 self.preClosePrice =", "and self.count >= self.size: self.inited = True self.openArray[0:self.size-1] = self.openArray[1:self.size]", "X分钟K线的回调函数 self.xhourBar = None # x小时K线对象 self.xhour = xhour #", "self.xhourBar.high = bar.high self.xhourBar.low = bar.low self.xhourBar.datetime = bar.datetime else:", "def loadBars( self, dbName = GLOBAL_USE_DBNAME , collectionName = GLOBAL_USE_SYMBOL,", "connection') #---------------------------------------------------------------------- def dbQuery(self, dbName, collectionName, d, sortKey='', sortDirection=ASCENDING): \"\"\"从MongoDB中读取数据,d是查询要求,返回的是数据库查询的指针\"\"\"", "the db self.dbConnect() #---------------------------------------------------------------------- def dbConnect(self): \"\"\"连接MongoDB数据库\"\"\" if not self.dbClient:", "if not self.inited and self.count >= self.size: self.inited = True", "l = [] for d in barData: bar = BarData()", "def writeError(self, content , error_id = 0): \"\"\" 发送错误通知/记录日志文件 :param", ", _port = GLOBAL_MONGO_PORT): super(DataEngine, self).__init__() self.host = _host self.port", "= EMPTY_FLOAT self.close = EMPTY_FLOAT self.date = EMPTY_STRING # bar开始的时间,日期", "self.bar.vtSymbol = tick.vtSymbol self.bar.symbol = tick.symbol self.bar.exchange = tick.exchange self.bar.open", "connection error') except ConnectionFailure: self.writeLog( u'fail in db connection') #----------------------------------------------------------------------", "self.dayBar.high = bar.high self.dayBar.low = bar.low self.dayBar.datetime = bar.datetime else:", ": \" + msg + \"\\n\") self.f.flush() #---------------------------------------------------------------------- def close(self):", "not self.dbClient: # 读取MongoDB的设置 try: # 设置MongoDB操作的超时时间为0.5秒 self.dbClient = MongoClient(self.host", "import * import os from pymongo import MongoClient, ASCENDING from", "OrderManager from settings import * import os from pymongo import", "self).__init__() # 代码相关 self.symbol = EMPTY_STRING # 合约代码 self.exchange =", "= bar ######################################################################## class ArrayManager(object): \"\"\" K线序列管理工具,负责: 1. K线时间序列的维护 2.", "tick.lastPrice) self.bar.low = min(self.bar.low, tick.lastPrice) # 通用更新部分 self.bar.close = tick.lastPrice", "# X分钟已经走完 if ( (bar.datetime.minute + 1) % self.xmin )", "= self.bar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.bar.date = self.bar.datetime.strftime('%Y%m%d') self.bar.time =", ", open_md) #---------------------------------------------------------------------- def error(self, msg , error_id): if self.now_debug:", "今日最低价 self.preClosePrice = EMPTY_FLOAT self.upperLimit = EMPTY_FLOAT # 涨停价 self.lowerLimit", "# 将秒和微秒设为0 self.xminBar.date = self.xminBar.datetime.strftime('%Y%m%d') self.xminBar.time = self.xminBar.datetime.strftime('%H:%M:%S') # 推送", "self.bar.exchange = tick.exchange self.bar.open = tick.lastPrice self.bar.high = tick.lastPrice self.bar.low", "EMPTY_FLOAT self.bidPrice5 = EMPTY_FLOAT self.askPrice1 = EMPTY_FLOAT self.askPrice2 = EMPTY_FLOAT", "writeError(self, content , error_id = 0): \"\"\" 发送错误通知/记录日志文件 :param content:", "+= float(bar.volume) self.lastDayBar = bar ######################################################################## class ArrayManager(object): \"\"\" K线序列管理工具,负责:", "db connection') #---------------------------------------------------------------------- def dbQuery(self, dbName, collectionName, d, sortKey='', sortDirection=ASCENDING):", "self.dbClient: db = self.dbClient[dbName] collection = db[collectionName] if sortKey: cursor", "推送 self.onXhourBar(self.xhourBar) # 清空老K线缓存对象 self.xhourBar = None #---------------------------------------------------------------------------- def updateDayBar(self,", "\" : \" + msg + \"\\n\") self.f.flush() #---------------------------------------------------------------------- def", "== False: os.mkdir( self.LogDir ) self.logPath = os.path.join(self.LogDir , logName)", "self.xsecBar.vtSymbol = tick.vtSymbol self.xsecBar.symbol = tick.symbol self.xsecBar.exchange = tick.exchange self.xsecBar.open", "= self.xminBar.datetime.strftime('%Y%m%d') self.xminBar.time = self.xminBar.datetime.strftime('%H:%M:%S') # 推送 self.onXminBar(self.xminBar) # 清空老K线缓存对象", "# 将秒和微秒设为0 self.bar.date = self.bar.datetime.strftime('%Y%m%d') self.bar.time = self.bar.datetime.strftime('%H:%M:%S.%f') # 推送已经结束的上一分钟K线", "bar.openInterest self.xhourBar.volume += float(bar.volume) # X分钟已经走完 if ( (bar.datetime.hour +", "bar): \"\"\"更新K线\"\"\" self.count += 1 if not self.inited and self.count", "BarData() self.xhourBar.vtSymbol = bar.vtSymbol self.xhourBar.symbol = bar.symbol self.xhourBar.exchange = bar.exchange", "对查询出来的数据进行排序 else: cursor = collection.find(d) if cursor: return list(cursor) else:", "# xsec的值 self.onXsecBar = onXsecBar # x秒的回调函数 self.xminBar = None", "BarData() self.dayBar.vtSymbol = bar.vtSymbol self.dayBar.symbol = bar.symbol self.dayBar.exchange = bar.exchange", "# 累加更新老一分钟的K线数据 else: self.bar.high = max(self.bar.high, tick.lastPrice) self.bar.low = min(self.bar.low,", "生成上一X分钟K线的时间戳 self.xhourBar.datetime = self.xhourBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.xhourBar.date = self.xhourBar.datetime.strftime('%Y%m%d')", "EMPTY_FLOAT self.askPrice1 = EMPTY_FLOAT self.askPrice2 = EMPTY_FLOAT self.askPrice3 = EMPTY_FLOAT", "python的datetime时间对象 # 常规行情 self.openPrice = EMPTY_FLOAT # 今日开盘价 self.highPrice =", "else: self.xsecBar.high = max(self.xsecBar.high, tick.lastPrice) self.xsecBar.low = min(self.xsecBar.low, tick.lastPrice) #", "self.xsecBar.datetime.replace( microsecond=0) # 将秒和微秒设为0 self.xsecBar.date = self.xsecBar.datetime.strftime('%Y%m%d') self.xsecBar.time = self.xsecBar.datetime.strftime('%H:%M:%S.%f')", "tick.vtSymbol self.xsecBar.symbol = tick.symbol self.xsecBar.exchange = tick.exchange self.xsecBar.open = tick.lastPrice", "bar.high self.dayBar.low = bar.low self.dayBar.datetime = bar.datetime elif not self.dayBar:", "None \\ and ( (self.lastDayBar.time <= \"15:30:00\" and bar.time >=", "bar.open self.highArray[-1] = bar.high self.lowArray[-1] = bar.low self.closeArray[-1] = bar.close", "可以用X整除 # 生成上一X分钟K线的时间戳 self.xhourBar.datetime = self.xhourBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.xhourBar.date", "self.port = _port # MongoDB数据库相关 self.dbClient = None # MongoDB客户端对象", "== 0: # 可以用X整除 # 生成上一X分钟K线的时间戳 self.xhourBar.datetime = self.xhourBar.datetime.replace(second=0, microsecond=0)", "def updateBar(self, bar): \"\"\"更新K线\"\"\" self.count += 1 if not self.inited", "self.bar.low = tick.lastPrice # 累加更新老一分钟的K线数据 else: self.bar.high = max(self.bar.high, tick.lastPrice)", "\"w\"): if os.path.exists(self.LogDir) == False: os.mkdir( self.LogDir ) self.logPath =", "self.xminBar.openInterest = bar.openInterest self.xminBar.volume += float(bar.volume) # X分钟已经走完 if (", "self.onXhourBar = onXhourBar # x小时K线的回调函数 self.lastTick = None # 上一TICK缓存对象", "EMPTY_INT self.askVolume1 = EMPTY_INT self.askVolume2 = EMPTY_INT self.askVolume3 = EMPTY_INT", "bar.__dict__ = d l.append(bar) return l ######################################################################## class BarManager(object): \"\"\"", "# 用于秒级别的上一根Tick缓存对象 self.dayBar = None # 一个交易日的bar对象 self.onDayBar = onDayBar", "if newMinute: self.bar.vtSymbol = tick.vtSymbol self.bar.symbol = tick.symbol self.bar.exchange =", "= None # 1分钟K线对象 self.onBar = onBar # 1分钟K线回调函数 self.xsecBar", "if not self.xminBar: self.xminBar = BarData() self.xminBar.vtSymbol = bar.vtSymbol self.xminBar.symbol", "d, 'datetime') l = [] for d in barData: bar", "= bar.open self.xhourBar.high = bar.high self.xhourBar.low = bar.low self.xhourBar.datetime =", "# 代码相关 self.symbol = EMPTY_STRING # 合约代码 self.exchange = EMPTY_STRING", "self.bar.datetime.strftime('%Y%m%d') self.bar.time = self.bar.datetime.strftime('%H:%M:%S.%f') # 推送已经结束的上一分钟K线 self.onBar(self.bar) # 创建新的K线对象 self.bar", "+= (tick.volume - self.lastTick.volume) # 当前K线内的成交量 # 缓存Tick self.lastTick =", "= max(self.xsecBar.high, tick.lastPrice) self.xsecBar.low = min(self.xsecBar.low, tick.lastPrice) # 通用更新部分 self.xsecBar.close", "error(self, msg , error_id): if self.now_debug: self.f.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + \"", "barData: bar = BarData() bar.__dict__ = d l.append(bar) return l", "= xhour # x的值 self.onXhourBar = onXhourBar # x小时K线的回调函数 self.lastTick", "self.xminBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.xminBar.date = self.xminBar.datetime.strftime('%Y%m%d') self.xminBar.time = self.xminBar.datetime.strftime('%H:%M:%S')", "= EMPTY_FLOAT # 最新成交价 self.lastVolume = EMPTY_INT # 最新成交量 self.volume", "if self.lastSecondTick: self.xsecBar.volume += (tick.volume - self.lastSecondTick.volume) # 当前Tick内的成交量 #", "<= \"15:30:00\" and bar.time >= \"15:30:00\") \\ or (self.lastDayBar.time <=", "= \"LogDir\" #---------------------------------------------------------------------- def __init__(self, logName , in_debug = True", "self.dayBar.vtSymbol = bar.vtSymbol self.dayBar.symbol = bar.symbol self.dayBar.exchange = bar.exchange self.dayBar.open", "updateHourBar(self , bar): \"\"\"1小时K线更新\"\"\" # 尚未创建对象 if not self.xhourBar: self.xhourBar", "tick.datetime.minute: # 生成上一分钟K线的时间戳 self.bar.datetime = self.bar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.bar.date", "= self.volumeArray[1:self.size] self.openArray[-1] = bar.open self.highArray[-1] = bar.high self.lowArray[-1] =", ", bar.low) # 通用部分 self.dayBar.close = bar.close self.dayBar.openInterest = bar.openInterest", "self.lastTick: self.bar.volume += (tick.volume - self.lastTick.volume) # 当前K线内的成交量 # 缓存Tick", "基于1分钟K线合成X分钟K线(X可以是2、3、5、10、15、30、60) \"\"\" #---------------------------------------------------------------------- def __init__(self, onBar, xsec=0, onXsecBar=None , xmin=0", "# 跌停价 # 五档行情 self.bidPrice1 = EMPTY_FLOAT self.bidPrice2 = EMPTY_FLOAT", "= EMPTY_FLOAT # 今日最高价 self.lowPrice = EMPTY_FLOAT # 今日最低价 self.preClosePrice", "EMPTY_FLOAT # OHLC self.high = EMPTY_FLOAT self.low = EMPTY_FLOAT self.close", "import os from pymongo import MongoClient, ASCENDING from pymongo.errors import", "# 1分钟K线对象 self.onBar = onBar # 1分钟K线回调函数 self.xsecBar = None", "= None # python的datetime时间对象 # 常规行情 self.openPrice = EMPTY_FLOAT #", "def info(self, msg): if self.now_debug: self.f.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + \" :", "缓存Tick self.lastTick = tick #---------------------------------------------------------------------- def updateSecond(self, tick ): \"\"\"通过TICK数据更新到秒数据\"\"\"", "self, dbName = GLOBAL_USE_DBNAME , collectionName = GLOBAL_USE_SYMBOL, days =", "self.lastTick = tick #---------------------------------------------------------------------- def updateSecond(self, tick ): \"\"\"通过TICK数据更新到秒数据\"\"\" newSecond", "self.inited = False # True if count>=size self.openArray = np.zeros(size)", "\" + \"Error msg %s: %s \" % (str(error_id) ,", "= tick.symbol self.bar.exchange = tick.exchange self.bar.open = tick.lastPrice self.bar.high =", "newSecond = True # 初始化新多少秒的K线数据 if newSecond : self.xsecBar.datetime =", "通用部分 self.xminBar.close = bar.close self.xminBar.openInterest = bar.openInterest self.xminBar.volume += float(bar.volume)", "self.xhourBar.open = bar.open self.xhourBar.high = bar.high self.xhourBar.low = bar.low self.xhourBar.datetime", "代码相关 self.symbol = EMPTY_STRING # 合约代码 self.exchange = EMPTY_STRING #", "timedelta( days = days) d = {'datetime':{'$gte':start_datetime , '$lte':today_datetime}} barData", "发送错误通知/记录日志文件 :param content: :return: \"\"\" if self.logger: self.logger.error(content , error_id)", "def high(self): \"\"\"获取最高价序列\"\"\" return self.highArray #---------------------------------------------------------------------- @property def low(self): \"\"\"获取最低价序列\"\"\"", "# 最新成交量 self.volume = EMPTY_INT # 今天总成交量 self.openInterest = EMPTY_INT", "tick.openInterest if self.lastSecondTick: self.xsecBar.volume += (tick.volume - self.lastSecondTick.volume) # 当前Tick内的成交量", "= MongoClient(self.host , self.port , connectTimeoutMS=500) # 调用server_info查询服务器状态,防止服务器异常并未连接成功 self.dbClient.server_info() self.writeLog(u'database", "days = days) d = {'datetime':{'$gte':start_datetime , '$lte':today_datetime}} barData =", "涨停价 self.lowerLimit = EMPTY_FLOAT # 跌停价 # 五档行情 self.bidPrice1 =", "\"\"\"更新K线\"\"\" self.count += 1 if not self.inited and self.count >=", "sortDirection=ASCENDING): \"\"\"从MongoDB中读取数据,d是查询要求,返回的是数据库查询的指针\"\"\" if self.dbClient: db = self.dbClient[dbName] collection = db[collectionName]", "EMPTY_STRING = \"\" EMPTY_FLOAT = 0.0 EMPTY_INT = 0 #----------------------------------------------------------------------", "\"\"\"K线数据\"\"\" #---------------------------------------------------------------------- def __init__(self): \"\"\"Constructor\"\"\" super(BarData, self).__init__() self.vtSymbol = EMPTY_STRING", "True if count>=size self.openArray = np.zeros(size) # OHLC self.highArray =", "self.lastDayBar != None \\ and ( (self.lastDayBar.time <= \"15:30:00\" and", "microsecond=0) # 将秒和微秒设为0 self.bar.date = self.bar.datetime.strftime('%Y%m%d') self.bar.time = self.bar.datetime.strftime('%H:%M:%S.%f') #", "= max(self.dayBar.high , bar.high) self.dayBar.low = min(self.dayBar.low , bar.low) #", "import ConnectionFailure from datetime import datetime , timedelta import numpy", "self.xminBar.datetime.strftime('%H:%M:%S') # 推送 self.onXminBar(self.xminBar) # 清空老K线缓存对象 self.xminBar = None #----------------------------------------------------------------------", "# 通用部分 self.dayBar.close = bar.close self.dayBar.openInterest = bar.openInterest self.dayBar.volume +=", "= self.xsecBar.datetime.replace( microsecond=0) # 将秒和微秒设为0 self.xsecBar.date = self.xsecBar.datetime.strftime('%Y%m%d') self.xsecBar.time =", "self.onXminBar = onXminBar # X分钟K线的回调函数 self.xhourBar = None # x小时K线对象", "EMPTY_FLOAT self.askPrice5 = EMPTY_FLOAT self.bidVolume1 = EMPTY_INT self.bidVolume2 = EMPTY_INT", "utf-8 import sys from market_maker import OrderManager from settings import", "\"\"\" #---------------------------------------------------------------------- def __init__(self, onBar, xsec=0, onXsecBar=None , xmin=0 ,", "= min(self.dayBar.low , bar.low) # 通用部分 self.dayBar.close = bar.close self.dayBar.openInterest", "class TickData(object): #---------------------------------------------------------------------- def __init__(self): \"\"\"Constructor\"\"\" super(TickData, self).__init__() # 代码相关", "self.bidPrice3 = EMPTY_FLOAT self.bidPrice4 = EMPTY_FLOAT self.bidPrice5 = EMPTY_FLOAT self.askPrice1", "return list(cursor) else: return [] else: self.writeLog(u'db query failed') return", "updateDayBar(self, bar): # 一天走完 # 1. 夜盘 , 2.第二天9点 if", "# x小时K线的回调函数 self.lastTick = None # 上一TICK缓存对象 self.lastSecondTick = None", "self.bar.volume += (tick.volume - self.lastTick.volume) # 当前K线内的成交量 # 缓存Tick self.lastTick", "self.inited and self.count >= self.size: self.inited = True self.openArray[0:self.size-1] =", "!= None \\ and ( (self.lastDayBar.time <= \"15:30:00\" and bar.time", "self.count >= self.size: self.inited = True self.openArray[0:self.size-1] = self.openArray[1:self.size] self.highArray[0:self.size-1]", "self.xsecBar.datetime.strftime('%Y%m%d') self.xsecBar.time = self.xsecBar.datetime.strftime('%H:%M:%S.%f') # 推送已经结束的上多少秒K线 self.onXsecBar(self.xsecBar) # 清空老K线缓存对象 self.xsecBar", "bar.vtSymbol self.xhourBar.symbol = bar.symbol self.xhourBar.exchange = bar.exchange self.xhourBar.open = bar.open", "updateSecond(self, tick ): \"\"\"通过TICK数据更新到秒数据\"\"\" newSecond = False if not self.xsecBar:", "self.xhourBar = None #---------------------------------------------------------------------------- def updateDayBar(self, bar): # 一天走完 #", "!= tick.datetime.minute: # 生成上一分钟K线的时间戳 self.bar.datetime = self.bar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0", "self.logger.error(content , error_id) ''' 主要Engine ''' class DataEngine(EngineBase): #---------------------------------------------------------------------- def", "\"\"\"Constructor\"\"\" super(TickData, self).__init__() # 代码相关 self.symbol = EMPTY_STRING # 合约代码", "- self.lastSecondTick.volume) # 当前Tick内的成交量 # 缓存 secondTick 对象 self.lastSecondTick =", "bar.datetime # 累加老K线 else: self.xminBar.high = max(self.xminBar.high, bar.high) self.xminBar.low =", "self.xsecBar.volume += (tick.volume - self.lastSecondTick.volume) # 当前Tick内的成交量 # 缓存 secondTick", "= \"BITMEX\" EMPTY_STRING = \"\" EMPTY_FLOAT = 0.0 EMPTY_INT =", "= False if not self.xsecBar: self.xsecBar = BarData() newSecond =", "not self.dayBar: self.dayBar = BarData() self.dayBar.vtSymbol = bar.vtSymbol self.dayBar.symbol =", "= days) d = {'datetime':{'$gte':start_datetime , '$lte':today_datetime}} barData = self.dbQuery(dbName,", "= None # 上一TICK缓存对象 self.lastSecondTick = None # 用于秒级别的上一根Tick缓存对象 self.dayBar", "# 通用更新部分 self.bar.close = tick.lastPrice self.bar.datetime = tick.datetime self.bar.openInterest =", "= max(self.xhourBar.high, bar.high) self.xhourBar.low = min(self.xhourBar.low, bar.low) # 通用部分 self.xhourBar.close", "1. K线时间序列的维护 2. 常用技术指标的计算 \"\"\" #---------------------------------------------------------------------- def __init__(self, size=100): \"\"\"Constructor\"\"\"", "self.now_debug: self.f.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + \" : \" + msg +", "None # python的datetime时间对象 # 常规行情 self.openPrice = EMPTY_FLOAT # 今日开盘价", "max(self.bar.high, tick.lastPrice) self.bar.low = min(self.bar.low, tick.lastPrice) # 通用更新部分 self.bar.close =", "import datetime , timedelta import numpy as np ######################################################################################################################## #", "= None #---------------------------------------------------------------------- def updateHourBar(self , bar): \"\"\"1小时K线更新\"\"\" # 尚未创建对象", "\"15:30:00\" and bar.time <= self.lastDayBar.time )): self.dayBar.datetime = self.dayBar.datetime.replace(second=0, microsecond=0)", "\\ or (self.lastDayBar.time <= \"15:30:00\" and bar.time <= self.lastDayBar.time )):", "= self.dayBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.dayBar.date = self.dayBar.datetime.strftime('%Y%m%d') self.dayBar.time =", "self.bar.close = tick.lastPrice self.bar.datetime = tick.datetime self.bar.openInterest = tick.openInterest if", "# 初始化新多少秒的K线数据 if newSecond : self.xsecBar.datetime = tick.datetime self.xsecBar.vtSymbol =", "self.bidVolume3 = EMPTY_INT self.bidVolume4 = EMPTY_INT self.bidVolume5 = EMPTY_INT self.askVolume1", "__init__(self , _host = GLOBAL_MONGO_HOST , _port = GLOBAL_MONGO_PORT): super(DataEngine,", "self.xsecBar = BarData() newSecond = True # 初始化新多少秒的K线数据 if newSecond", "else: return [] else: self.writeLog(u'db query failed') return [] #-----------------------------------------------------------------------", "self.inited = True self.openArray[0:self.size-1] = self.openArray[1:self.size] self.highArray[0:self.size-1] = self.highArray[1:self.size] self.lowArray[0:self.size-1]", "self.date = EMPTY_STRING # bar开始的时间,日期 self.time = EMPTY_STRING # 时间", "\"\"\"获取最高价序列\"\"\" return self.highArray #---------------------------------------------------------------------- @property def low(self): \"\"\"获取最低价序列\"\"\" return self.lowArray", "self.preClosePrice = EMPTY_FLOAT self.upperLimit = EMPTY_FLOAT # 涨停价 self.lowerLimit =", "MongoClient, ASCENDING from pymongo.errors import ConnectionFailure from datetime import datetime", "# MongoDB客户端对象 self.logger = LoggerEngine(\"dataEngine.log\") ## init the db self.dbConnect()", "# 通用更新部分 self.xsecBar.close = tick.lastPrice self.xsecBar.openInterest = tick.openInterest if self.lastSecondTick:", "self.bidPrice4 = EMPTY_FLOAT self.bidPrice5 = EMPTY_FLOAT self.askPrice1 = EMPTY_FLOAT self.askPrice2", "= EMPTY_INT ######################################################################## class BarData(object): \"\"\"K线数据\"\"\" #---------------------------------------------------------------------- def __init__(self): \"\"\"Constructor\"\"\"", "- self.lastTick.volume) # 当前K线内的成交量 # 缓存Tick self.lastTick = tick #----------------------------------------------------------------------", "if ( (bar.datetime.minute + 1) % self.xmin ) == 0:", "bar.symbol self.xminBar.exchange = bar.exchange self.xminBar.open = bar.open self.xminBar.high = bar.high", "None # MongoDB客户端对象 self.logger = LoggerEngine(\"dataEngine.log\") ## init the db", "= collection.find(d).sort(sortKey, sortDirection) # 对查询出来的数据进行排序 else: cursor = collection.find(d) if", "def updateDayBar(self, bar): # 一天走完 # 1. 夜盘 , 2.第二天9点", "EMPTY_INT ######################################################################## class BarData(object): \"\"\"K线数据\"\"\" #---------------------------------------------------------------------- def __init__(self): \"\"\"Constructor\"\"\" super(BarData,", "self.xsecBar.symbol = tick.symbol self.xsecBar.exchange = tick.exchange self.xsecBar.open = tick.lastPrice self.xsecBar.high", "#---------------------------------------------------------------------- def updateBar(self, bar): \"\"\"更新K线\"\"\" self.count += 1 if not", "self.dayBar.open = bar.open self.dayBar.high = bar.high self.dayBar.low = bar.low self.dayBar.datetime", "self.dbConnect() #---------------------------------------------------------------------- def dbConnect(self): \"\"\"连接MongoDB数据库\"\"\" if not self.dbClient: # 读取MongoDB的设置", "\\ and ( (self.lastDayBar.time <= \"15:30:00\" and bar.time >= \"15:30:00\")", "MongoDB数据库相关 self.dbClient = None # MongoDB客户端对象 self.logger = LoggerEngine(\"dataEngine.log\") ##", "self.askVolume5 = EMPTY_INT ######################################################################## class BarData(object): \"\"\"K线数据\"\"\" #---------------------------------------------------------------------- def __init__(self):", "= bar.high self.lowArray[-1] = bar.low self.closeArray[-1] = bar.close self.volumeArray[-1] =", "EMPTY_FLOAT # 今日开盘价 self.highPrice = EMPTY_FLOAT # 今日最高价 self.lowPrice =", "self.vtSymbol = EMPTY_STRING # vt系统代码 self.symbol = EMPTY_STRING # 代码", ", error_id = 0): \"\"\" 发送错误通知/记录日志文件 :param content: :return: \"\"\"", "self.xhourBar.datetime.strftime('%Y%m%d') self.xhourBar.time = self.xhourBar.datetime.strftime('%H:%M:%S') # 推送 self.onXhourBar(self.xhourBar) # 清空老K线缓存对象 self.xhourBar", "newMinute = True # 新的一分钟 elif self.bar.datetime.minute != tick.datetime.minute: #", "days) d = {'datetime':{'$gte':start_datetime , '$lte':today_datetime}} barData = self.dbQuery(dbName, collectionName,", "def error(self, msg , error_id): if self.now_debug: self.f.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") +", "msg) + \"\\n\") self.f.flush() #---------------------------------------------------------------------- def info(self, msg): if self.now_debug:", "logName , in_debug = True , open_md = \"w\"): if", "通用更新部分 self.bar.close = tick.lastPrice self.bar.datetime = tick.datetime self.bar.openInterest = tick.openInterest", "= onDayBar # 交易日K线的回调函数 self.lastDayBar = None #---------------------------------------------------------------------- def updateTick(self,", "self.xhourBar = BarData() self.xhourBar.vtSymbol = bar.vtSymbol self.xhourBar.symbol = bar.symbol self.xhourBar.exchange", "(tick.datetime.second) % self.xsec == 0 ): self.xsecBar.datetime = self.xsecBar.datetime.replace( microsecond=0)", "import OrderManager from settings import * import os from pymongo", "OHLC self.high = EMPTY_FLOAT self.low = EMPTY_FLOAT self.close = EMPTY_FLOAT", "BarData() bar.__dict__ = d l.append(bar) return l ######################################################################## class BarManager(object):", "EMPTY_FLOAT self.bidPrice3 = EMPTY_FLOAT self.bidPrice4 = EMPTY_FLOAT self.bidPrice5 = EMPTY_FLOAT", "sortKey: cursor = collection.find(d).sort(sortKey, sortDirection) # 对查询出来的数据进行排序 else: cursor =", "创建新的K线对象 self.bar = BarData() newMinute = True # 初始化新一分钟的K线数据 if", "self.xhourBar.volume += float(bar.volume) # X分钟已经走完 if ( (bar.datetime.hour + 1)", "if ( (bar.datetime.hour + 1) % self.xhour ) == 0:", "self.xhourBar.time = self.xhourBar.datetime.strftime('%H:%M:%S') # 推送 self.onXhourBar(self.xhourBar) # 清空老K线缓存对象 self.xhourBar =", "self.askVolume4 = EMPTY_INT self.askVolume5 = EMPTY_INT ######################################################################## class BarData(object): \"\"\"K线数据\"\"\"", "= bar.volume #---------------------------------------------------------------------- @property def open(self): \"\"\"获取开盘价序列\"\"\" return self.openArray #----------------------------------------------------------------------", "推送已经结束的上多少秒K线 self.onXsecBar(self.xsecBar) # 清空老K线缓存对象 self.xsecBar = BarData() newSecond = True", "= self.lowArray[1:self.size] self.closeArray[0:self.size-1] = self.closeArray[1:self.size] self.volumeArray[0:self.size-1] = self.volumeArray[1:self.size] self.openArray[-1] =", "# 成交数据 self.lastPrice = EMPTY_FLOAT # 最新成交价 self.lastVolume = EMPTY_INT", "True # 新的一分钟 elif self.bar.datetime.minute != tick.datetime.minute: # 生成上一分钟K线的时间戳 self.bar.datetime", "): self.xsecBar.datetime = self.xsecBar.datetime.replace( microsecond=0) # 将秒和微秒设为0 self.xsecBar.date = self.xsecBar.datetime.strftime('%Y%m%d')", "connectTimeoutMS=500) # 调用server_info查询服务器状态,防止服务器异常并未连接成功 self.dbClient.server_info() self.writeLog(u'database connection error') except ConnectionFailure: self.writeLog(", "# 最新成交价 self.lastVolume = EMPTY_INT # 最新成交量 self.volume = EMPTY_INT", "+ msg + \"\\n\") self.f.flush() #---------------------------------------------------------------------- def close(self): self.f.close() '''", "microsecond=0) # 将秒和微秒设为0 self.xsecBar.date = self.xsecBar.datetime.strftime('%Y%m%d') self.xsecBar.time = self.xsecBar.datetime.strftime('%H:%M:%S.%f') #", "= open( self.logPath , open_md) #---------------------------------------------------------------------- def error(self, msg ,", "self.xhourBar.datetime.strftime('%H:%M:%S') # 推送 self.onXhourBar(self.xhourBar) # 清空老K线缓存对象 self.xhourBar = None #----------------------------------------------------------------------------", "def open(self): \"\"\"获取开盘价序列\"\"\" return self.openArray #---------------------------------------------------------------------- @property def high(self): \"\"\"获取最高价序列\"\"\"", "onXminBar=None , onXhourBar = None, onDayBar=None): \"\"\"Constructor\"\"\" self.bar = None", "open_md = \"w\"): if os.path.exists(self.LogDir) == False: os.mkdir( self.LogDir )", "self.lowArray[0:self.size-1] = self.lowArray[1:self.size] self.closeArray[0:self.size-1] = self.closeArray[1:self.size] self.volumeArray[0:self.size-1] = self.volumeArray[1:self.size] self.openArray[-1]", "super(TickData, self).__init__() # 代码相关 self.symbol = EMPTY_STRING # 合约代码 self.exchange", "bar开始的时间,日期 self.time = EMPTY_STRING # 时间 self.datetime = None #", "self.dayBar.openInterest = bar.openInterest self.dayBar.volume += float(bar.volume) self.lastDayBar = bar ########################################################################", "= bar.exchange self.xhourBar.open = bar.open self.xhourBar.high = bar.high self.xhourBar.low =", "2. 常用技术指标的计算 \"\"\" #---------------------------------------------------------------------- def __init__(self, size=100): \"\"\"Constructor\"\"\" self.count =", "= size # 缓存大小 self.inited = False # True if", "# OHLC self.highArray = np.zeros(size) self.lowArray = np.zeros(size) self.closeArray =", "ASCENDING from pymongo.errors import ConnectionFailure from datetime import datetime ,", "if count>=size self.openArray = np.zeros(size) # OHLC self.highArray = np.zeros(size)", "= GLOBAL_MONGO_PORT): super(DataEngine, self).__init__() self.host = _host self.port = _port", "None, onDayBar=None): \"\"\"Constructor\"\"\" self.bar = None # 1分钟K线对象 self.onBar =", "= bar.symbol self.xhourBar.exchange = bar.exchange self.xhourBar.open = bar.open self.xhourBar.high =", "累加更新老几秒的K线数据 else: self.xsecBar.high = max(self.xsecBar.high, tick.lastPrice) self.xsecBar.low = min(self.xsecBar.low, tick.lastPrice)", ", in_debug = True , open_md = \"w\"): if os.path.exists(self.LogDir)", "def close(self): \"\"\"获取收盘价序列\"\"\" return self.closeArray #---------------------------------------------------------------------- @property def volume(self): \"\"\"获取成交量序列\"\"\"", "# 今日开盘价 self.highPrice = EMPTY_FLOAT # 今日最高价 self.lowPrice = EMPTY_FLOAT", "d in barData: bar = BarData() bar.__dict__ = d l.append(bar)", "= EMPTY_FLOAT self.bidPrice3 = EMPTY_FLOAT self.bidPrice4 = EMPTY_FLOAT self.bidPrice5 =", "% self.xhour ) == 0: # 可以用X整除 # 生成上一X分钟K线的时间戳 self.xhourBar.datetime", "= bar.high self.dayBar.low = bar.low self.dayBar.datetime = bar.datetime elif not", "#---------------------------------------------------------------------- def updateHourBar(self , bar): \"\"\"1小时K线更新\"\"\" # 尚未创建对象 if not", "= EMPTY_STRING # 交易所代码 self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 合约代码.交易所代码", "2. 基于1分钟K线合成X分钟K线(X可以是2、3、5、10、15、30、60) \"\"\" #---------------------------------------------------------------------- def __init__(self, onBar, xsec=0, onXsecBar=None ,", "tick): \"\"\"TICK更新\"\"\" newMinute = False # 默认不是新的一分钟 # 尚未创建对象 if", "EMPTY_INT self.askVolume2 = EMPTY_INT self.askVolume3 = EMPTY_INT self.askVolume4 = EMPTY_INT", "#---------------------------------------------------------------------- def dbConnect(self): \"\"\"连接MongoDB数据库\"\"\" if not self.dbClient: # 读取MongoDB的设置 try:", "[] #----------------------------------------------------------------------- def loadBars( self, dbName = GLOBAL_USE_DBNAME , collectionName", "None # x小时K线对象 self.xhour = xhour # x的值 self.onXhourBar =", "self.dayBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.dayBar.date = self.dayBar.datetime.strftime('%Y%m%d') self.dayBar.time = self.dayBar.datetime.strftime('%H:%M:%S')", "#---------------------------------------------------------------------- @property def low(self): \"\"\"获取最低价序列\"\"\" return self.lowArray #---------------------------------------------------------------------- @property def", "上一TICK缓存对象 self.lastSecondTick = None # 用于秒级别的上一根Tick缓存对象 self.dayBar = None #", "EMPTY_FLOAT self.date = EMPTY_STRING # bar开始的时间,日期 self.time = EMPTY_STRING #", "self.onBar = onBar # 1分钟K线回调函数 self.xsecBar = None # 多少秒K线对象", "updateBar(self, bar): \"\"\"1分钟K线更新\"\"\" # 尚未创建对象 if not self.xminBar: self.xminBar =", "# 生成上一X分钟K线的时间戳 self.xhourBar.datetime = self.xhourBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.xhourBar.date =", "= bar.symbol self.xminBar.exchange = bar.exchange self.xminBar.open = bar.open self.xminBar.high =", "self.xhourBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.xhourBar.date = self.xhourBar.datetime.strftime('%Y%m%d') self.xhourBar.time = self.xhourBar.datetime.strftime('%H:%M:%S')", "合约在vt系统中的唯一代码,通常是 合约代码.交易所代码 # 成交数据 self.lastPrice = EMPTY_FLOAT # 最新成交价 self.lastVolume", "生成上一分钟K线的时间戳 self.bar.datetime = self.bar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.bar.date = self.bar.datetime.strftime('%Y%m%d')", "self.onXminBar(self.xminBar) # 清空老K线缓存对象 self.xminBar = None #---------------------------------------------------------------------- def updateHourBar(self ,", "self.xsecBar.openInterest = tick.openInterest if self.lastSecondTick: self.xsecBar.volume += (tick.volume - self.lastSecondTick.volume)", ": self.xsecBar.datetime = tick.datetime self.xsecBar.vtSymbol = tick.vtSymbol self.xsecBar.symbol = tick.symbol", "from datetime import datetime , timedelta import numpy as np", "今日开盘价 self.highPrice = EMPTY_FLOAT # 今日最高价 self.lowPrice = EMPTY_FLOAT #", "+ 1) % self.xhour ) == 0: # 可以用X整除 #", "self.dayBar.low = min(self.dayBar.low , bar.low) # 通用部分 self.dayBar.close = bar.close", "<gh_stars>1-10 # encoding: utf-8 import sys from market_maker import OrderManager", "''' class TickData(object): #---------------------------------------------------------------------- def __init__(self): \"\"\"Constructor\"\"\" super(TickData, self).__init__() #", "as np ######################################################################################################################## # constants EXCHANGE_BITMEX = \"BITMEX\" EMPTY_STRING =", "BarData() newSecond = True elif self.xsecBar.datetime.second != tick.datetime.second and (", "self.bidVolume5 = EMPTY_INT self.askVolume1 = EMPTY_INT self.askVolume2 = EMPTY_INT self.askVolume3", "= max(self.xminBar.high, bar.high) self.xminBar.low = min(self.xminBar.low, bar.low) # 通用部分 self.xminBar.close", "# 1. 夜盘 , 2.第二天9点 if self.lastDayBar != None \\", "self.bar.openInterest = tick.openInterest if self.lastTick: self.bar.volume += (tick.volume - self.lastTick.volume)", "= d l.append(bar) return l ######################################################################## class BarManager(object): \"\"\" K线合成器,支持:", "bar.exchange self.dayBar.open = bar.open self.dayBar.high = bar.high self.dayBar.low = bar.low", "\"\\n\") self.f.flush() #---------------------------------------------------------------------- def info(self, msg): if self.now_debug: self.f.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")", "= bar.exchange self.xminBar.open = bar.open self.xminBar.high = bar.high self.xminBar.low =", "= bar.datetime else: self.xhourBar.high = max(self.xhourBar.high, bar.high) self.xhourBar.low = min(self.xhourBar.low,", "[] else: self.writeLog(u'db query failed') return [] #----------------------------------------------------------------------- def loadBars(", "self.askVolume2 = EMPTY_INT self.askVolume3 = EMPTY_INT self.askVolume4 = EMPTY_INT self.askVolume5", "代码 self.exchange = EMPTY_STRING # 交易所 self.open = EMPTY_FLOAT #", "self.lastSecondTick = None # 用于秒级别的上一根Tick缓存对象 self.dayBar = None # 一个交易日的bar对象", "LoggerEngine(\"dataEngine.log\") ## init the db self.dbConnect() #---------------------------------------------------------------------- def dbConnect(self): \"\"\"连接MongoDB数据库\"\"\"", "= True # 初始化新多少秒的K线数据 if newSecond : self.xsecBar.datetime = tick.datetime", "None # 1分钟K线对象 self.onBar = onBar # 1分钟K线回调函数 self.xsecBar =", "cursor: return list(cursor) else: return [] else: self.writeLog(u'db query failed')", "): \"\"\"通过TICK数据更新到秒数据\"\"\" newSecond = False if not self.xsecBar: self.xsecBar =", "= tick.openInterest if self.lastSecondTick: self.xsecBar.volume += (tick.volume - self.lastSecondTick.volume) #", "默认不是新的一分钟 # 尚未创建对象 if not self.bar: self.bar = BarData() newMinute", "# 今天总成交量 self.openInterest = EMPTY_INT # 持仓量 self.time = EMPTY_STRING", "''' class DataEngine(EngineBase): #---------------------------------------------------------------------- def __init__(self , _host = GLOBAL_MONGO_HOST", "# 将秒和微秒设为0 self.xhourBar.date = self.xhourBar.datetime.strftime('%Y%m%d') self.xhourBar.time = self.xhourBar.datetime.strftime('%H:%M:%S') # 推送", "self.xhour = xhour # x的值 self.onXhourBar = onXhourBar # x小时K线的回调函数", "将秒和微秒设为0 self.bar.date = self.bar.datetime.strftime('%Y%m%d') self.bar.time = self.bar.datetime.strftime('%H:%M:%S.%f') # 推送已经结束的上一分钟K线 self.onBar(self.bar)", "= None # 用于秒级别的上一根Tick缓存对象 self.dayBar = None # 一个交易日的bar对象 self.onDayBar", "self.date = EMPTY_STRING # 日期 20151009 self.datetime = None #", "= EMPTY_FLOAT self.askPrice4 = EMPTY_FLOAT self.askPrice5 = EMPTY_FLOAT self.bidVolume1 =", "barData = self.dbQuery(dbName, collectionName, d, 'datetime') l = [] for", "x秒的回调函数 self.xminBar = None # X分钟K线对象 self.xmin = xmin #", "= EMPTY_FLOAT self.askPrice1 = EMPTY_FLOAT self.askPrice2 = EMPTY_FLOAT self.askPrice3 =", "今日最高价 self.lowPrice = EMPTY_FLOAT # 今日最低价 self.preClosePrice = EMPTY_FLOAT self.upperLimit", "# 清空老K线缓存对象 self.xminBar = None #---------------------------------------------------------------------- def updateHourBar(self , bar):", "self.lowPrice = EMPTY_FLOAT # 今日最低价 self.preClosePrice = EMPTY_FLOAT self.upperLimit =", "= bar.high self.xminBar.low = bar.low self.xminBar.datetime = bar.datetime # 累加老K线", "五档行情 self.bidPrice1 = EMPTY_FLOAT self.bidPrice2 = EMPTY_FLOAT self.bidPrice3 = EMPTY_FLOAT", "''' engine的基础类 ''' class EngineBase(object): #---------------------------------------------------------------------- def writeLog(self, content): if", "from market_maker import OrderManager from settings import * import os", "\"\\n\") self.f.flush() #---------------------------------------------------------------------- def close(self): self.f.close() ''' tick 数据的格式 '''", "# 新的一分钟 elif self.bar.datetime.minute != tick.datetime.minute: # 生成上一分钟K线的时间戳 self.bar.datetime =", "% self.xmin ) == 0: # 可以用X整除 # 生成上一X分钟K线的时间戳 self.xminBar.datetime", "数据的格式 ''' class TickData(object): #---------------------------------------------------------------------- def __init__(self): \"\"\"Constructor\"\"\" super(TickData, self).__init__()", "self.xsecBar.close = tick.lastPrice self.xsecBar.openInterest = tick.openInterest if self.lastSecondTick: self.xsecBar.volume +=", "bar.openInterest self.dayBar.volume += float(bar.volume) self.lastDayBar = bar ######################################################################## class ArrayManager(object):", "= bar.low self.dayBar.datetime = bar.datetime elif not self.dayBar: self.dayBar =", "self.dayBar: self.dayBar = BarData() self.dayBar.vtSymbol = bar.vtSymbol self.dayBar.symbol = bar.symbol", "sortKey='', sortDirection=ASCENDING): \"\"\"从MongoDB中读取数据,d是查询要求,返回的是数据库查询的指针\"\"\" if self.dbClient: db = self.dbClient[dbName] collection =", "# 生成上一X分钟K线的时间戳 self.xminBar.datetime = self.xminBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.xminBar.date =", "bar.symbol self.xhourBar.exchange = bar.exchange self.xhourBar.open = bar.open self.xhourBar.high = bar.high", "# 交易日K线的回调函数 self.lastDayBar = None #---------------------------------------------------------------------- def updateTick(self, tick): \"\"\"TICK更新\"\"\"", "bar.symbol self.dayBar.exchange = bar.exchange self.dayBar.open = bar.open self.dayBar.high = bar.high", ", collectionName = GLOBAL_USE_SYMBOL, days = 2): today_datetime = datetime.now()", "self.writeLog(u'db query failed') return [] #----------------------------------------------------------------------- def loadBars( self, dbName", "TickData(object): #---------------------------------------------------------------------- def __init__(self): \"\"\"Constructor\"\"\" super(TickData, self).__init__() # 代码相关 self.symbol", "False # True if count>=size self.openArray = np.zeros(size) # OHLC", "msg): if self.now_debug: self.f.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + \" : \" +", "# 当前Tick内的成交量 # 缓存 secondTick 对象 self.lastSecondTick = tick #----------------------------------------------------------------------", "self.highArray[0:self.size-1] = self.highArray[1:self.size] self.lowArray[0:self.size-1] = self.lowArray[1:self.size] self.closeArray[0:self.size-1] = self.closeArray[1:self.size] self.volumeArray[0:self.size-1]", "#---------------------------------------------------------------------- def writeError(self, content , error_id = 0): \"\"\" 发送错误通知/记录日志文件", "= bar.low self.xhourBar.datetime = bar.datetime else: self.xhourBar.high = max(self.xhourBar.high, bar.high)", "content , error_id = 0): \"\"\" 发送错误通知/记录日志文件 :param content: :return:", "= _port # MongoDB数据库相关 self.dbClient = None # MongoDB客户端对象 self.logger", "elif not self.dayBar: self.dayBar = BarData() self.dayBar.vtSymbol = bar.vtSymbol self.dayBar.symbol", "EMPTY_STRING # 日期 20151009 self.datetime = None # python的datetime时间对象 #", "if newSecond : self.xsecBar.datetime = tick.datetime self.xsecBar.vtSymbol = tick.vtSymbol self.xsecBar.symbol", "onDayBar=None): \"\"\"Constructor\"\"\" self.bar = None # 1分钟K线对象 self.onBar = onBar", "if self.now_debug: self.f.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + \" : \" + \"Error", "= bar.open self.highArray[-1] = bar.high self.lowArray[-1] = bar.low self.closeArray[-1] =", "= tick.lastPrice self.xsecBar.low = tick.lastPrice # 累加更新老几秒的K线数据 else: self.xsecBar.high =", "in_debug = True , open_md = \"w\"): if os.path.exists(self.LogDir) ==", "self.xsecBar.low = min(self.xsecBar.low, tick.lastPrice) # 通用更新部分 self.xsecBar.close = tick.lastPrice self.xsecBar.openInterest", "min(self.xhourBar.low, bar.low) # 通用部分 self.xhourBar.close = bar.close self.xhourBar.openInterest = bar.openInterest", "self.bar.datetime.minute != tick.datetime.minute: # 生成上一分钟K线的时间戳 self.bar.datetime = self.bar.datetime.replace(second=0, microsecond=0) #", "- timedelta( days = days) d = {'datetime':{'$gte':start_datetime , '$lte':today_datetime}}", "= tick.lastPrice # 累加更新老几秒的K线数据 else: self.xsecBar.high = max(self.xsecBar.high, tick.lastPrice) self.xsecBar.low", "dbName = GLOBAL_USE_DBNAME , collectionName = GLOBAL_USE_SYMBOL, days = 2):", "msg , error_id): if self.now_debug: self.f.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + \" :", "= max(self.bar.high, tick.lastPrice) self.bar.low = min(self.bar.low, tick.lastPrice) # 通用更新部分 self.bar.close", "tick.datetime self.xsecBar.vtSymbol = tick.vtSymbol self.xsecBar.symbol = tick.symbol self.xsecBar.exchange = tick.exchange", "= self.xhourBar.datetime.strftime('%H:%M:%S') # 推送 self.onXhourBar(self.xhourBar) # 清空老K线缓存对象 self.xhourBar = None", "#---------------------------------------------------------------------- def updateTick(self, tick): \"\"\"TICK更新\"\"\" newMinute = False # 默认不是新的一分钟", "可以用X整除 # 生成上一X分钟K线的时间戳 self.xminBar.datetime = self.xminBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.xminBar.date", "= bar.datetime elif not self.dayBar: self.dayBar = BarData() self.dayBar.vtSymbol =", "newSecond = False if not self.xsecBar: self.xsecBar = BarData() newSecond", "self.f.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + \" : \" + \"Error msg %s:", "%H:%M:%S\") + \" : \" + msg + \"\\n\") self.f.flush()", "EngineBase(object): #---------------------------------------------------------------------- def writeLog(self, content): if self.logger: self.logger.info(content) #---------------------------------------------------------------------- def", "self.dayBar = BarData() self.dayBar.vtSymbol = bar.vtSymbol self.dayBar.symbol = bar.symbol self.dayBar.exchange", "in_debug if self.now_debug: self.f = open( self.logPath , open_md) #----------------------------------------------------------------------", "最新成交价 self.lastVolume = EMPTY_INT # 最新成交量 self.volume = EMPTY_INT #", "EMPTY_INT self.bidVolume2 = EMPTY_INT self.bidVolume3 = EMPTY_INT self.bidVolume4 = EMPTY_INT", "tick.symbol self.xsecBar.exchange = tick.exchange self.xsecBar.open = tick.lastPrice self.xsecBar.high = tick.lastPrice", "= collection.find(d) if cursor: return list(cursor) else: return [] else:", "if sortKey: cursor = collection.find(d).sort(sortKey, sortDirection) # 对查询出来的数据进行排序 else: cursor", "bar.vtSymbol self.xminBar.symbol = bar.symbol self.xminBar.exchange = bar.exchange self.xminBar.open = bar.open", "class LoggerEngine(object): LogDir = \"LogDir\" #---------------------------------------------------------------------- def __init__(self, logName ,", "bar.low) # 通用部分 self.xminBar.close = bar.close self.xminBar.openInterest = bar.openInterest self.xminBar.volume", "self.dayBar.high = max(self.dayBar.high , bar.high) self.dayBar.low = min(self.dayBar.low , bar.low)", "EMPTY_FLOAT self.bidPrice2 = EMPTY_FLOAT self.bidPrice3 = EMPTY_FLOAT self.bidPrice4 = EMPTY_FLOAT", "= EMPTY_FLOAT self.bidPrice4 = EMPTY_FLOAT self.bidPrice5 = EMPTY_FLOAT self.askPrice1 =", "= self.closeArray[1:self.size] self.volumeArray[0:self.size-1] = self.volumeArray[1:self.size] self.openArray[-1] = bar.open self.highArray[-1] =", "== 0 ): self.xsecBar.datetime = self.xsecBar.datetime.replace( microsecond=0) # 将秒和微秒设为0 self.xsecBar.date", "\"\"\" if self.logger: self.logger.error(content , error_id) ''' 主要Engine ''' class", "True self.openArray[0:self.size-1] = self.openArray[1:self.size] self.highArray[0:self.size-1] = self.highArray[1:self.size] self.lowArray[0:self.size-1] = self.lowArray[1:self.size]", "将秒和微秒设为0 self.xminBar.date = self.xminBar.datetime.strftime('%Y%m%d') self.xminBar.time = self.xminBar.datetime.strftime('%H:%M:%S') # 推送 self.onXminBar(self.xminBar)", "xsec=0, onXsecBar=None , xmin=0 , xhour=0, onXminBar=None , onXhourBar =", "成交量 self.openInterest = EMPTY_INT # 持仓量 ''' engine的基础类 ''' class", "= tick.lastPrice self.xsecBar.openInterest = tick.openInterest if self.lastSecondTick: self.xsecBar.volume += (tick.volume", "= True elif self.xsecBar.datetime.second != tick.datetime.second and ( (tick.datetime.second) %", "bar.low self.xminBar.datetime = bar.datetime # 累加老K线 else: self.xminBar.high = max(self.xminBar.high,", "= onXhourBar # x小时K线的回调函数 self.lastTick = None # 上一TICK缓存对象 self.lastSecondTick", "self.f.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + \" : \" + msg + \"\\n\")", "info(self, msg): if self.now_debug: self.f.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + \" : \"", "np.zeros(size) #---------------------------------------------------------------------- def updateBar(self, bar): \"\"\"更新K线\"\"\" self.count += 1 if", "累加老K线 else: self.xminBar.high = max(self.xminBar.high, bar.high) self.xminBar.low = min(self.xminBar.low, bar.low)", "self.xsecBar = None # 多少秒K线对象 self.xsec = xsec # xsec的值", "not self.bar: self.bar = BarData() newMinute = True # 新的一分钟", "self.xsecBar.time = self.xsecBar.datetime.strftime('%H:%M:%S.%f') # 推送已经结束的上多少秒K线 self.onXsecBar(self.xsecBar) # 清空老K线缓存对象 self.xsecBar =", "# 调用server_info查询服务器状态,防止服务器异常并未连接成功 self.dbClient.server_info() self.writeLog(u'database connection error') except ConnectionFailure: self.writeLog( u'fail", "EMPTY_STRING # 时间 self.datetime = None # python的datetime时间对象 self.volume =", "self.lastDayBar.time )): self.dayBar.datetime = self.dayBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.dayBar.date =", "bar.low self.closeArray[-1] = bar.close self.volumeArray[-1] = bar.volume #---------------------------------------------------------------------- @property def", "else: cursor = collection.find(d) if cursor: return list(cursor) else: return", ", '$lte':today_datetime}} barData = self.dbQuery(dbName, collectionName, d, 'datetime') l =", "== 0: # 可以用X整除 # 生成上一X分钟K线的时间戳 self.xminBar.datetime = self.xminBar.datetime.replace(second=0, microsecond=0)", "# 设置MongoDB操作的超时时间为0.5秒 self.dbClient = MongoClient(self.host , self.port , connectTimeoutMS=500) #", "= BarData() newSecond = True elif self.xsecBar.datetime.second != tick.datetime.second and", "return self.lowArray #---------------------------------------------------------------------- @property def close(self): \"\"\"获取收盘价序列\"\"\" return self.closeArray #----------------------------------------------------------------------", "writeLog(self, content): if self.logger: self.logger.info(content) #---------------------------------------------------------------------- def writeError(self, content ,", "= bar.open self.xminBar.high = bar.high self.xminBar.low = bar.low self.xminBar.datetime =", "self.askVolume3 = EMPTY_INT self.askVolume4 = EMPTY_INT self.askVolume5 = EMPTY_INT ########################################################################", "\"\"\"获取开盘价序列\"\"\" return self.openArray #---------------------------------------------------------------------- @property def high(self): \"\"\"获取最高价序列\"\"\" return self.highArray", "= self.xminBar.datetime.strftime('%H:%M:%S') # 推送 self.onXminBar(self.xminBar) # 清空老K线缓存对象 self.xminBar = None", "self.openArray = np.zeros(size) # OHLC self.highArray = np.zeros(size) self.lowArray =", ", xhour=0, onXminBar=None , onXhourBar = None, onDayBar=None): \"\"\"Constructor\"\"\" self.bar", "= xsec # xsec的值 self.onXsecBar = onXsecBar # x秒的回调函数 self.xminBar", "X分钟已经走完 if ( (bar.datetime.hour + 1) % self.xhour ) ==", "microsecond=0) # 将秒和微秒设为0 self.xhourBar.date = self.xhourBar.datetime.strftime('%Y%m%d') self.xhourBar.time = self.xhourBar.datetime.strftime('%H:%M:%S') #", "db = self.dbClient[dbName] collection = db[collectionName] if sortKey: cursor =", "BarData() newMinute = True # 新的一分钟 elif self.bar.datetime.minute != tick.datetime.minute:", "(bar.datetime.hour + 1) % self.xhour ) == 0: # 可以用X整除", "tick.datetime self.bar.openInterest = tick.openInterest if self.lastTick: self.bar.volume += (tick.volume -", "EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 合约代码.交易所代码 # 成交数据 self.lastPrice = EMPTY_FLOAT #", "try: # 设置MongoDB操作的超时时间为0.5秒 self.dbClient = MongoClient(self.host , self.port , connectTimeoutMS=500)", "def updateTick(self, tick): \"\"\"TICK更新\"\"\" newMinute = False # 默认不是新的一分钟 #", "= None # 一个交易日的bar对象 self.onDayBar = onDayBar # 交易日K线的回调函数 self.lastDayBar", "self.open = EMPTY_FLOAT # OHLC self.high = EMPTY_FLOAT self.low =", "self.bar = BarData() newMinute = True # 新的一分钟 elif self.bar.datetime.minute", "夜盘 , 2.第二天9点 if self.lastDayBar != None \\ and (", "+ \" : \" + msg + \"\\n\") self.f.flush() #----------------------------------------------------------------------", "= EMPTY_STRING # bar开始的时间,日期 self.time = EMPTY_STRING # 时间 self.datetime", "持仓量 ''' engine的基础类 ''' class EngineBase(object): #---------------------------------------------------------------------- def writeLog(self, content):", "self.dayBar.exchange = bar.exchange self.dayBar.open = bar.open self.dayBar.high = bar.high self.dayBar.low", "\"\" EMPTY_FLOAT = 0.0 EMPTY_INT = 0 #---------------------------------------------------------------------- class LoggerEngine(object):", "min(self.xsecBar.low, tick.lastPrice) # 通用更新部分 self.xsecBar.close = tick.lastPrice self.xsecBar.openInterest = tick.openInterest", "# 常规行情 self.openPrice = EMPTY_FLOAT # 今日开盘价 self.highPrice = EMPTY_FLOAT", "return self.highArray #---------------------------------------------------------------------- @property def low(self): \"\"\"获取最低价序列\"\"\" return self.lowArray #----------------------------------------------------------------------", "1分钟K线回调函数 self.xsecBar = None # 多少秒K线对象 self.xsec = xsec #", "self.xsecBar.high = max(self.xsecBar.high, tick.lastPrice) self.xsecBar.low = min(self.xsecBar.low, tick.lastPrice) # 通用更新部分", "# 代码 self.exchange = EMPTY_STRING # 交易所 self.open = EMPTY_FLOAT", "self.xhourBar = None # x小时K线对象 self.xhour = xhour # x的值", "= self.dayBar.datetime.strftime('%Y%m%d') self.dayBar.time = self.dayBar.datetime.strftime('%H:%M:%S') # 说明是新的一天了 # 先推送昨天过去 self.onDayBar(", "def dbConnect(self): \"\"\"连接MongoDB数据库\"\"\" if not self.dbClient: # 读取MongoDB的设置 try: #", "x的值 self.onXhourBar = onXhourBar # x小时K线的回调函数 self.lastTick = None #", "= {'datetime':{'$gte':start_datetime , '$lte':today_datetime}} barData = self.dbQuery(dbName, collectionName, d, 'datetime')", "self.now_debug: self.f.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + \" : \" + \"Error msg", "# 初始化新一分钟的K线数据 if newMinute: self.bar.vtSymbol = tick.vtSymbol self.bar.symbol = tick.symbol", "class DataEngine(EngineBase): #---------------------------------------------------------------------- def __init__(self , _host = GLOBAL_MONGO_HOST ,", "@property def high(self): \"\"\"获取最高价序列\"\"\" return self.highArray #---------------------------------------------------------------------- @property def low(self):", "self.closeArray[-1] = bar.close self.volumeArray[-1] = bar.volume #---------------------------------------------------------------------- @property def open(self):", "max(self.dayBar.high , bar.high) self.dayBar.low = min(self.dayBar.low , bar.low) # 通用部分", "+= (tick.volume - self.lastSecondTick.volume) # 当前Tick内的成交量 # 缓存 secondTick 对象", "tick.vtSymbol self.bar.symbol = tick.symbol self.bar.exchange = tick.exchange self.bar.open = tick.lastPrice", "# 清空老K线缓存对象 self.xhourBar = None #---------------------------------------------------------------------------- def updateDayBar(self, bar): #", "1. 夜盘 , 2.第二天9点 if self.lastDayBar != None \\ and", "False if not self.xsecBar: self.xsecBar = BarData() newSecond = True", "EMPTY_INT # 成交量 self.openInterest = EMPTY_INT # 持仓量 ''' engine的基础类", "self.f = open( self.logPath , open_md) #---------------------------------------------------------------------- def error(self, msg", "基于Tick合成1分钟K线 2. 基于1分钟K线合成X分钟K线(X可以是2、3、5、10、15、30、60) \"\"\" #---------------------------------------------------------------------- def __init__(self, onBar, xsec=0, onXsecBar=None", "LoggerEngine(object): LogDir = \"LogDir\" #---------------------------------------------------------------------- def __init__(self, logName , in_debug", "# 时间 self.datetime = None # python的datetime时间对象 self.volume = EMPTY_INT", "self.dbClient.server_info() self.writeLog(u'database connection error') except ConnectionFailure: self.writeLog( u'fail in db", "self.xminBar.close = bar.close self.xminBar.openInterest = bar.openInterest self.xminBar.volume += float(bar.volume) #", ", self.port , connectTimeoutMS=500) # 调用server_info查询服务器状态,防止服务器异常并未连接成功 self.dbClient.server_info() self.writeLog(u'database connection error')", "self.bar.datetime = tick.datetime self.bar.openInterest = tick.openInterest if self.lastTick: self.bar.volume +=", "bar.exchange self.xhourBar.open = bar.open self.xhourBar.high = bar.high self.xhourBar.low = bar.low", "初始化新多少秒的K线数据 if newSecond : self.xsecBar.datetime = tick.datetime self.xsecBar.vtSymbol = tick.vtSymbol", "EMPTY_FLOAT self.close = EMPTY_FLOAT self.date = EMPTY_STRING # bar开始的时间,日期 self.time", "= bar.openInterest self.xhourBar.volume += float(bar.volume) # X分钟已经走完 if ( (bar.datetime.hour", "bar.low) # 通用部分 self.xhourBar.close = bar.close self.xhourBar.openInterest = bar.openInterest self.xhourBar.volume", "self.dayBar.time = self.dayBar.datetime.strftime('%H:%M:%S') # 说明是新的一天了 # 先推送昨天过去 self.onDayBar( self.dayBar) self.dayBar", "self.onXsecBar = onXsecBar # x秒的回调函数 self.xminBar = None # X分钟K线对象", "collection.find(d).sort(sortKey, sortDirection) # 对查询出来的数据进行排序 else: cursor = collection.find(d) if cursor:", "self.highArray #---------------------------------------------------------------------- @property def low(self): \"\"\"获取最低价序列\"\"\" return self.lowArray #---------------------------------------------------------------------- @property", "self.dbClient = MongoClient(self.host , self.port , connectTimeoutMS=500) # 调用server_info查询服务器状态,防止服务器异常并未连接成功 self.dbClient.server_info()", "else: self.xminBar.high = max(self.xminBar.high, bar.high) self.xminBar.low = min(self.xminBar.low, bar.low) #", "bar.vtSymbol self.dayBar.symbol = bar.symbol self.dayBar.exchange = bar.exchange self.dayBar.open = bar.open", "= EMPTY_INT self.bidVolume5 = EMPTY_INT self.askVolume1 = EMPTY_INT self.askVolume2 =", ", bar): \"\"\"1小时K线更新\"\"\" # 尚未创建对象 if not self.xhourBar: self.xhourBar =", "# 持仓量 ''' engine的基础类 ''' class EngineBase(object): #---------------------------------------------------------------------- def writeLog(self,", "= EMPTY_FLOAT self.low = EMPTY_FLOAT self.close = EMPTY_FLOAT self.date =", "%H:%M:%S\") + \" : \" + \"Error msg %s: %s", ", bar.high) self.dayBar.low = min(self.dayBar.low , bar.low) # 通用部分 self.dayBar.close", "先推送昨天过去 self.onDayBar( self.dayBar) self.dayBar = BarData() self.dayBar.vtSymbol = bar.vtSymbol self.dayBar.symbol", "self.datetime = None # python的datetime时间对象 # 常规行情 self.openPrice = EMPTY_FLOAT", "\"\"\"Constructor\"\"\" self.bar = None # 1分钟K线对象 self.onBar = onBar #", "''' tick 数据的格式 ''' class TickData(object): #---------------------------------------------------------------------- def __init__(self): \"\"\"Constructor\"\"\"", "通用部分 self.dayBar.close = bar.close self.dayBar.openInterest = bar.openInterest self.dayBar.volume += float(bar.volume)", "EMPTY_FLOAT # 跌停价 # 五档行情 self.bidPrice1 = EMPTY_FLOAT self.bidPrice2 =", "tick.datetime.second and ( (tick.datetime.second) % self.xsec == 0 ): self.xsecBar.datetime", "#---------------------------------------------------------------------- def info(self, msg): if self.now_debug: self.f.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + \"", "import sys from market_maker import OrderManager from settings import *", "= EMPTY_FLOAT self.askPrice2 = EMPTY_FLOAT self.askPrice3 = EMPTY_FLOAT self.askPrice4 =", "self.dayBar.symbol = bar.symbol self.dayBar.exchange = bar.exchange self.dayBar.open = bar.open self.dayBar.high", "LogDir = \"LogDir\" #---------------------------------------------------------------------- def __init__(self, logName , in_debug =", "self.f.close() ''' tick 数据的格式 ''' class TickData(object): #---------------------------------------------------------------------- def __init__(self):", "collectionName, d, sortKey='', sortDirection=ASCENDING): \"\"\"从MongoDB中读取数据,d是查询要求,返回的是数据库查询的指针\"\"\" if self.dbClient: db = self.dbClient[dbName]", "0): \"\"\" 发送错误通知/记录日志文件 :param content: :return: \"\"\" if self.logger: self.logger.error(content", "= EMPTY_FLOAT # 今日最低价 self.preClosePrice = EMPTY_FLOAT self.upperLimit = EMPTY_FLOAT", "bar.close self.dayBar.openInterest = bar.openInterest self.dayBar.volume += float(bar.volume) self.lastDayBar = bar", "onXminBar # X分钟K线的回调函数 self.xhourBar = None # x小时K线对象 self.xhour =", "list(cursor) else: return [] else: self.writeLog(u'db query failed') return []", "OHLC self.highArray = np.zeros(size) self.lowArray = np.zeros(size) self.closeArray = np.zeros(size)", "#---------------------------------------------------------------------- def updateBar(self, bar): \"\"\"1分钟K线更新\"\"\" # 尚未创建对象 if not self.xminBar:", "= BarData() self.xminBar.vtSymbol = bar.vtSymbol self.xminBar.symbol = bar.symbol self.xminBar.exchange =", "def __init__(self, size=100): \"\"\"Constructor\"\"\" self.count = 0 # 缓存计数 self.size", "= 0.0 EMPTY_INT = 0 #---------------------------------------------------------------------- class LoggerEngine(object): LogDir =", "主要Engine ''' class DataEngine(EngineBase): #---------------------------------------------------------------------- def __init__(self , _host =", "def low(self): \"\"\"获取最低价序列\"\"\" return self.lowArray #---------------------------------------------------------------------- @property def close(self): \"\"\"获取收盘价序列\"\"\"", "0: # 可以用X整除 # 生成上一X分钟K线的时间戳 self.xminBar.datetime = self.xminBar.datetime.replace(second=0, microsecond=0) #", "close(self): self.f.close() ''' tick 数据的格式 ''' class TickData(object): #---------------------------------------------------------------------- def", "self.xhourBar.high = max(self.xhourBar.high, bar.high) self.xhourBar.low = min(self.xhourBar.low, bar.low) # 通用部分", "= EMPTY_INT self.bidVolume3 = EMPTY_INT self.bidVolume4 = EMPTY_INT self.bidVolume5 =", "self.lastPrice = EMPTY_FLOAT # 最新成交价 self.lastVolume = EMPTY_INT # 最新成交量", "self.writeLog(u'database connection error') except ConnectionFailure: self.writeLog( u'fail in db connection')", "pymongo import MongoClient, ASCENDING from pymongo.errors import ConnectionFailure from datetime", "#---------------------------------------------------------------------- def __init__(self , _host = GLOBAL_MONGO_HOST , _port =", "from pymongo.errors import ConnectionFailure from datetime import datetime , timedelta", "= EMPTY_STRING # 日期 20151009 self.datetime = None # python的datetime时间对象", "# X分钟K线的回调函数 self.xhourBar = None # x小时K线对象 self.xhour = xhour", "size # 缓存大小 self.inited = False # True if count>=size", "else: self.dayBar.high = max(self.dayBar.high , bar.high) self.dayBar.low = min(self.dayBar.low ,", "# x小时K线对象 self.xhour = xhour # x的值 self.onXhourBar = onXhourBar", "= bar.low self.closeArray[-1] = bar.close self.volumeArray[-1] = bar.volume #---------------------------------------------------------------------- @property", "\"\"\"通过TICK数据更新到秒数据\"\"\" newSecond = False if not self.xsecBar: self.xsecBar = BarData()", "# 默认不是新的一分钟 # 尚未创建对象 if not self.bar: self.bar = BarData()", "tick.lastPrice self.xsecBar.low = tick.lastPrice # 累加更新老几秒的K线数据 else: self.xsecBar.high = max(self.xsecBar.high,", "self.lowArray #---------------------------------------------------------------------- @property def close(self): \"\"\"获取收盘价序列\"\"\" return self.closeArray #---------------------------------------------------------------------- @property", "# python的datetime时间对象 self.volume = EMPTY_INT # 成交量 self.openInterest = EMPTY_INT", "self.xsecBar.exchange = tick.exchange self.xsecBar.open = tick.lastPrice self.xsecBar.high = tick.lastPrice self.xsecBar.low", "self.f.flush() #---------------------------------------------------------------------- def close(self): self.f.close() ''' tick 数据的格式 ''' class", "bar.high self.dayBar.low = bar.low self.dayBar.datetime = bar.datetime else: self.dayBar.high =", "# 当前K线内的成交量 # 缓存Tick self.lastTick = tick #---------------------------------------------------------------------- def updateSecond(self,", "EMPTY_INT # 最新成交量 self.volume = EMPTY_INT # 今天总成交量 self.openInterest =", "_port = GLOBAL_MONGO_PORT): super(DataEngine, self).__init__() self.host = _host self.port =", "xmin # X的值 self.onXminBar = onXminBar # X分钟K线的回调函数 self.xhourBar =", "msg %s: %s \" % (str(error_id) , msg) + \"\\n\")", "# 今日最低价 self.preClosePrice = EMPTY_FLOAT self.upperLimit = EMPTY_FLOAT # 涨停价", "bar): \"\"\"1小时K线更新\"\"\" # 尚未创建对象 if not self.xhourBar: self.xhourBar = BarData()", "collectionName, d, 'datetime') l = [] for d in barData:", "= EMPTY_STRING # 代码 self.exchange = EMPTY_STRING # 交易所 self.open", "bar.high) self.dayBar.low = min(self.dayBar.low , bar.low) # 通用部分 self.dayBar.close =", "tick.openInterest if self.lastTick: self.bar.volume += (tick.volume - self.lastTick.volume) # 当前K线内的成交量", "= GLOBAL_USE_SYMBOL, days = 2): today_datetime = datetime.now() start_datetime =", "_port # MongoDB数据库相关 self.dbClient = None # MongoDB客户端对象 self.logger =", "db self.dbConnect() #---------------------------------------------------------------------- def dbConnect(self): \"\"\"连接MongoDB数据库\"\"\" if not self.dbClient: #", "ConnectionFailure: self.writeLog( u'fail in db connection') #---------------------------------------------------------------------- def dbQuery(self, dbName,", ", 2.第二天9点 if self.lastDayBar != None \\ and ( (self.lastDayBar.time", "@property def close(self): \"\"\"获取收盘价序列\"\"\" return self.closeArray #---------------------------------------------------------------------- @property def volume(self):", "EMPTY_INT self.askVolume5 = EMPTY_INT ######################################################################## class BarData(object): \"\"\"K线数据\"\"\" #---------------------------------------------------------------------- def", "bar.openInterest self.xminBar.volume += float(bar.volume) # X分钟已经走完 if ( (bar.datetime.minute +", "# 说明是新的一天了 # 先推送昨天过去 self.onDayBar( self.dayBar) self.dayBar = BarData() self.dayBar.vtSymbol", "self.dayBar.date = self.dayBar.datetime.strftime('%Y%m%d') self.dayBar.time = self.dayBar.datetime.strftime('%H:%M:%S') # 说明是新的一天了 # 先推送昨天过去", "#---------------------------------------------------------------------- @property def close(self): \"\"\"获取收盘价序列\"\"\" return self.closeArray #---------------------------------------------------------------------- @property def", "= None #---------------------------------------------------------------------------- def updateDayBar(self, bar): # 一天走完 # 1.", "self.xminBar.exchange = bar.exchange self.xminBar.open = bar.open self.xminBar.high = bar.high self.xminBar.low", "% (str(error_id) , msg) + \"\\n\") self.f.flush() #---------------------------------------------------------------------- def info(self,", "self.bidPrice5 = EMPTY_FLOAT self.askPrice1 = EMPTY_FLOAT self.askPrice2 = EMPTY_FLOAT self.askPrice3", "__init__(self): \"\"\"Constructor\"\"\" super(BarData, self).__init__() self.vtSymbol = EMPTY_STRING # vt系统代码 self.symbol", "and ( (self.lastDayBar.time <= \"15:30:00\" and bar.time >= \"15:30:00\") \\", "= True , open_md = \"w\"): if os.path.exists(self.LogDir) == False:", "时间 self.datetime = None # python的datetime时间对象 self.volume = EMPTY_INT #", "# x秒的回调函数 self.xminBar = None # X分钟K线对象 self.xmin = xmin", "self.openArray[0:self.size-1] = self.openArray[1:self.size] self.highArray[0:self.size-1] = self.highArray[1:self.size] self.lowArray[0:self.size-1] = self.lowArray[1:self.size] self.closeArray[0:self.size-1]", "缓存大小 self.inited = False # True if count>=size self.openArray =", "self.logPath = os.path.join(self.LogDir , logName) self.now_debug = in_debug if self.now_debug:", "self.logger: self.logger.info(content) #---------------------------------------------------------------------- def writeError(self, content , error_id = 0):", "if not self.dbClient: # 读取MongoDB的设置 try: # 设置MongoDB操作的超时时间为0.5秒 self.dbClient =", "self.xhourBar.vtSymbol = bar.vtSymbol self.xhourBar.symbol = bar.symbol self.xhourBar.exchange = bar.exchange self.xhourBar.open", "= tick.datetime self.bar.openInterest = tick.openInterest if self.lastTick: self.bar.volume += (tick.volume", "bar): \"\"\"1分钟K线更新\"\"\" # 尚未创建对象 if not self.xminBar: self.xminBar = BarData()", "缓存计数 self.size = size # 缓存大小 self.inited = False #", "in db connection') #---------------------------------------------------------------------- def dbQuery(self, dbName, collectionName, d, sortKey='',", "= tick.exchange self.xsecBar.open = tick.lastPrice self.xsecBar.high = tick.lastPrice self.xsecBar.low =", "\"\"\"1分钟K线更新\"\"\" # 尚未创建对象 if not self.xminBar: self.xminBar = BarData() self.xminBar.vtSymbol", "= bar.high self.dayBar.low = bar.low self.dayBar.datetime = bar.datetime else: self.dayBar.high", "query failed') return [] #----------------------------------------------------------------------- def loadBars( self, dbName =", "self.xsecBar.high = tick.lastPrice self.xsecBar.low = tick.lastPrice # 累加更新老几秒的K线数据 else: self.xsecBar.high", "BarManager(object): \"\"\" K线合成器,支持: 1. 基于Tick合成1分钟K线 2. 基于1分钟K线合成X分钟K线(X可以是2、3、5、10、15、30、60) \"\"\" #---------------------------------------------------------------------- def", "self.f.flush() #---------------------------------------------------------------------- def info(self, msg): if self.now_debug: self.f.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") +", "= tick.exchange self.bar.open = tick.lastPrice self.bar.high = tick.lastPrice self.bar.low =", "# X分钟已经走完 if ( (bar.datetime.hour + 1) % self.xhour )", "= onXminBar # X分钟K线的回调函数 self.xhourBar = None # x小时K线对象 self.xhour", "= EMPTY_FLOAT self.bidPrice5 = EMPTY_FLOAT self.askPrice1 = EMPTY_FLOAT self.askPrice2 =", "from pymongo import MongoClient, ASCENDING from pymongo.errors import ConnectionFailure from", "0 # 缓存计数 self.size = size # 缓存大小 self.inited =", "BarData() newMinute = True # 初始化新一分钟的K线数据 if newMinute: self.bar.vtSymbol =", "None # X分钟K线对象 self.xmin = xmin # X的值 self.onXminBar =", "1) % self.xmin ) == 0: # 可以用X整除 # 生成上一X分钟K线的时间戳", "self.onDayBar = onDayBar # 交易日K线的回调函数 self.lastDayBar = None #---------------------------------------------------------------------- def", "self.symbol = EMPTY_STRING # 代码 self.exchange = EMPTY_STRING # 交易所", "engine的基础类 ''' class EngineBase(object): #---------------------------------------------------------------------- def writeLog(self, content): if self.logger:", "tick.lastPrice self.xsecBar.high = tick.lastPrice self.xsecBar.low = tick.lastPrice # 累加更新老几秒的K线数据 else:", "= 0): \"\"\" 发送错误通知/记录日志文件 :param content: :return: \"\"\" if self.logger:", "min(self.bar.low, tick.lastPrice) # 通用更新部分 self.bar.close = tick.lastPrice self.bar.datetime = tick.datetime", "None # 一个交易日的bar对象 self.onDayBar = onDayBar # 交易日K线的回调函数 self.lastDayBar =", "K线序列管理工具,负责: 1. K线时间序列的维护 2. 常用技术指标的计算 \"\"\" #---------------------------------------------------------------------- def __init__(self, size=100):", "\" % (str(error_id) , msg) + \"\\n\") self.f.flush() #---------------------------------------------------------------------- def", "EMPTY_FLOAT self.bidPrice4 = EMPTY_FLOAT self.bidPrice5 = EMPTY_FLOAT self.askPrice1 = EMPTY_FLOAT", "l.append(bar) return l ######################################################################## class BarManager(object): \"\"\" K线合成器,支持: 1. 基于Tick合成1分钟K线", "False # 默认不是新的一分钟 # 尚未创建对象 if not self.bar: self.bar =", "return l ######################################################################## class BarManager(object): \"\"\" K线合成器,支持: 1. 基于Tick合成1分钟K线 2.", "self.LogDir ) self.logPath = os.path.join(self.LogDir , logName) self.now_debug = in_debug", "self.xminBar.time = self.xminBar.datetime.strftime('%H:%M:%S') # 推送 self.onXminBar(self.xminBar) # 清空老K线缓存对象 self.xminBar =", "\"\"\"获取收盘价序列\"\"\" return self.closeArray #---------------------------------------------------------------------- @property def volume(self): \"\"\"获取成交量序列\"\"\" return self.volumeArray", "交易日K线的回调函数 self.lastDayBar = None #---------------------------------------------------------------------- def updateTick(self, tick): \"\"\"TICK更新\"\"\" newMinute", "######################################################################## class BarManager(object): \"\"\" K线合成器,支持: 1. 基于Tick合成1分钟K线 2. 基于1分钟K线合成X分钟K线(X可以是2、3、5、10、15、30、60) \"\"\"", "# X分钟K线对象 self.xmin = xmin # X的值 self.onXminBar = onXminBar", "if not self.xhourBar: self.xhourBar = BarData() self.xhourBar.vtSymbol = bar.vtSymbol self.xhourBar.symbol", "None #---------------------------------------------------------------------- def updateHourBar(self , bar): \"\"\"1小时K线更新\"\"\" # 尚未创建对象 if", "= self.xhourBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.xhourBar.date = self.xhourBar.datetime.strftime('%Y%m%d') self.xhourBar.time =", "读取MongoDB的设置 try: # 设置MongoDB操作的超时时间为0.5秒 self.dbClient = MongoClient(self.host , self.port ,", "self.askPrice5 = EMPTY_FLOAT self.bidVolume1 = EMPTY_INT self.bidVolume2 = EMPTY_INT self.bidVolume3", "# X的值 self.onXminBar = onXminBar # X分钟K线的回调函数 self.xhourBar = None", "xhour # x的值 self.onXhourBar = onXhourBar # x小时K线的回调函数 self.lastTick =", "= np.zeros(size) self.lowArray = np.zeros(size) self.closeArray = np.zeros(size) self.volumeArray =", "close(self): \"\"\"获取收盘价序列\"\"\" return self.closeArray #---------------------------------------------------------------------- @property def volume(self): \"\"\"获取成交量序列\"\"\" return", "将秒和微秒设为0 self.dayBar.date = self.dayBar.datetime.strftime('%Y%m%d') self.dayBar.time = self.dayBar.datetime.strftime('%H:%M:%S') # 说明是新的一天了 #", "EMPTY_STRING # 交易所 self.open = EMPTY_FLOAT # OHLC self.high =", "= os.path.join(self.LogDir , logName) self.now_debug = in_debug if self.now_debug: self.f", "sortDirection) # 对查询出来的数据进行排序 else: cursor = collection.find(d) if cursor: return", "将秒和微秒设为0 self.xsecBar.date = self.xsecBar.datetime.strftime('%Y%m%d') self.xsecBar.time = self.xsecBar.datetime.strftime('%H:%M:%S.%f') # 推送已经结束的上多少秒K线 self.onXsecBar(self.xsecBar)", "清空老K线缓存对象 self.xhourBar = None #---------------------------------------------------------------------------- def updateDayBar(self, bar): # 一天走完", "tick.lastPrice self.xsecBar.openInterest = tick.openInterest if self.lastSecondTick: self.xsecBar.volume += (tick.volume -", "当前Tick内的成交量 # 缓存 secondTick 对象 self.lastSecondTick = tick #---------------------------------------------------------------------- def", "loadBars( self, dbName = GLOBAL_USE_DBNAME , collectionName = GLOBAL_USE_SYMBOL, days", ", onXhourBar = None, onDayBar=None): \"\"\"Constructor\"\"\" self.bar = None #", "# 可以用X整除 # 生成上一X分钟K线的时间戳 self.xminBar.datetime = self.xminBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0", "timedelta import numpy as np ######################################################################################################################## # constants EXCHANGE_BITMEX =", "通用部分 self.xhourBar.close = bar.close self.xhourBar.openInterest = bar.openInterest self.xhourBar.volume += float(bar.volume)", "self.xmin ) == 0: # 可以用X整除 # 生成上一X分钟K线的时间戳 self.xminBar.datetime =", "self.xhourBar: self.xhourBar = BarData() self.xhourBar.vtSymbol = bar.vtSymbol self.xhourBar.symbol = bar.symbol", "# 多少秒K线对象 self.xsec = xsec # xsec的值 self.onXsecBar = onXsecBar", "<= self.lastDayBar.time )): self.dayBar.datetime = self.dayBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.dayBar.date", "self.xhourBar.close = bar.close self.xhourBar.openInterest = bar.openInterest self.xhourBar.volume += float(bar.volume) #", "error') except ConnectionFailure: self.writeLog( u'fail in db connection') #---------------------------------------------------------------------- def", "open_md) #---------------------------------------------------------------------- def error(self, msg , error_id): if self.now_debug: self.f.write(datetime.now().strftime(\"%Y-%m-%d", "or (self.lastDayBar.time <= \"15:30:00\" and bar.time <= self.lastDayBar.time )): self.dayBar.datetime", "尚未创建对象 if not self.xhourBar: self.xhourBar = BarData() self.xhourBar.vtSymbol = bar.vtSymbol", "= tick #---------------------------------------------------------------------- def updateSecond(self, tick ): \"\"\"通过TICK数据更新到秒数据\"\"\" newSecond =", "EMPTY_FLOAT self.low = EMPTY_FLOAT self.close = EMPTY_FLOAT self.date = EMPTY_STRING", "self.time = EMPTY_STRING # 时间 self.datetime = None # python的datetime时间对象", "d, sortKey='', sortDirection=ASCENDING): \"\"\"从MongoDB中读取数据,d是查询要求,返回的是数据库查询的指针\"\"\" if self.dbClient: db = self.dbClient[dbName] collection", "def dbQuery(self, dbName, collectionName, d, sortKey='', sortDirection=ASCENDING): \"\"\"从MongoDB中读取数据,d是查询要求,返回的是数据库查询的指针\"\"\" if self.dbClient:", "+ \"Error msg %s: %s \" % (str(error_id) , msg)", "os.path.exists(self.LogDir) == False: os.mkdir( self.LogDir ) self.logPath = os.path.join(self.LogDir ,", "# 日期 20151009 self.datetime = None # python的datetime时间对象 # 常规行情", "X分钟K线对象 self.xmin = xmin # X的值 self.onXminBar = onXminBar #", "BarData() self.xminBar.vtSymbol = bar.vtSymbol self.xminBar.symbol = bar.symbol self.xminBar.exchange = bar.exchange", "self.dbClient = None # MongoDB客户端对象 self.logger = LoggerEngine(\"dataEngine.log\") ## init", "self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 合约代码.交易所代码 # 成交数据 self.lastPrice =", "except ConnectionFailure: self.writeLog( u'fail in db connection') #---------------------------------------------------------------------- def dbQuery(self,", "tick.lastPrice # 累加更新老几秒的K线数据 else: self.xsecBar.high = max(self.xsecBar.high, tick.lastPrice) self.xsecBar.low =", "np.zeros(size) self.volumeArray = np.zeros(size) #---------------------------------------------------------------------- def updateBar(self, bar): \"\"\"更新K线\"\"\" self.count", "EMPTY_STRING # 代码 self.exchange = EMPTY_STRING # 交易所 self.open =", "self.onDayBar( self.dayBar) self.dayBar = BarData() self.dayBar.vtSymbol = bar.vtSymbol self.dayBar.symbol =", "cursor = collection.find(d) if cursor: return list(cursor) else: return []", "= min(self.xhourBar.low, bar.low) # 通用部分 self.xhourBar.close = bar.close self.xhourBar.openInterest =", ", msg) + \"\\n\") self.f.flush() #---------------------------------------------------------------------- def info(self, msg): if", "min(self.dayBar.low , bar.low) # 通用部分 self.dayBar.close = bar.close self.dayBar.openInterest =", "# 合约代码 self.exchange = EMPTY_STRING # 交易所代码 self.vtSymbol = EMPTY_STRING", "#---------------------------------------------------------------------- class LoggerEngine(object): LogDir = \"LogDir\" #---------------------------------------------------------------------- def __init__(self, logName", "#---------------------------------------------------------------------- def close(self): self.f.close() ''' tick 数据的格式 ''' class TickData(object):", "from settings import * import os from pymongo import MongoClient,", "#---------------------------------------------------------------------- def __init__(self, size=100): \"\"\"Constructor\"\"\" self.count = 0 # 缓存计数", "= min(self.xsecBar.low, tick.lastPrice) # 通用更新部分 self.xsecBar.close = tick.lastPrice self.xsecBar.openInterest =", "self.xminBar.symbol = bar.symbol self.xminBar.exchange = bar.exchange self.xminBar.open = bar.open self.xminBar.high", "self.xhourBar.symbol = bar.symbol self.xhourBar.exchange = bar.exchange self.xhourBar.open = bar.open self.xhourBar.high", "self.closeArray[0:self.size-1] = self.closeArray[1:self.size] self.volumeArray[0:self.size-1] = self.volumeArray[1:self.size] self.openArray[-1] = bar.open self.highArray[-1]", "None # 用于秒级别的上一根Tick缓存对象 self.dayBar = None # 一个交易日的bar对象 self.onDayBar =", "生成上一X分钟K线的时间戳 self.xminBar.datetime = self.xminBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.xminBar.date = self.xminBar.datetime.strftime('%Y%m%d')", "__init__(self): \"\"\"Constructor\"\"\" super(TickData, self).__init__() # 代码相关 self.symbol = EMPTY_STRING #", "tick.exchange self.bar.open = tick.lastPrice self.bar.high = tick.lastPrice self.bar.low = tick.lastPrice", "self.dayBar) self.dayBar = BarData() self.dayBar.vtSymbol = bar.vtSymbol self.dayBar.symbol = bar.symbol", "MongoClient(self.host , self.port , connectTimeoutMS=500) # 调用server_info查询服务器状态,防止服务器异常并未连接成功 self.dbClient.server_info() self.writeLog(u'database connection", "# x的值 self.onXhourBar = onXhourBar # x小时K线的回调函数 self.lastTick = None", "bar.high self.lowArray[-1] = bar.low self.closeArray[-1] = bar.close self.volumeArray[-1] = bar.volume", "self.size = size # 缓存大小 self.inited = False # True", ", xmin=0 , xhour=0, onXminBar=None , onXhourBar = None, onDayBar=None):", "X的值 self.onXminBar = onXminBar # X分钟K线的回调函数 self.xhourBar = None #", "= self.highArray[1:self.size] self.lowArray[0:self.size-1] = self.lowArray[1:self.size] self.closeArray[0:self.size-1] = self.closeArray[1:self.size] self.volumeArray[0:self.size-1] =", "constants EXCHANGE_BITMEX = \"BITMEX\" EMPTY_STRING = \"\" EMPTY_FLOAT = 0.0", "= 2): today_datetime = datetime.now() start_datetime = today_datetime - timedelta(", "GLOBAL_USE_DBNAME , collectionName = GLOBAL_USE_SYMBOL, days = 2): today_datetime =", "= tick #---------------------------------------------------------------------- def updateBar(self, bar): \"\"\"1分钟K线更新\"\"\" # 尚未创建对象 if", "self).__init__() self.host = _host self.port = _port # MongoDB数据库相关 self.dbClient", "{'datetime':{'$gte':start_datetime , '$lte':today_datetime}} barData = self.dbQuery(dbName, collectionName, d, 'datetime') l", "self.bar.time = self.bar.datetime.strftime('%H:%M:%S.%f') # 推送已经结束的上一分钟K线 self.onBar(self.bar) # 创建新的K线对象 self.bar =", "def __init__(self): \"\"\"Constructor\"\"\" super(TickData, self).__init__() # 代码相关 self.symbol = EMPTY_STRING", "######################################################################## class BarData(object): \"\"\"K线数据\"\"\" #---------------------------------------------------------------------- def __init__(self): \"\"\"Constructor\"\"\" super(BarData, self).__init__()", ", error_id): if self.now_debug: self.f.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + \" : \"", "= [] for d in barData: bar = BarData() bar.__dict__", "= np.zeros(size) self.volumeArray = np.zeros(size) #---------------------------------------------------------------------- def updateBar(self, bar): \"\"\"更新K线\"\"\"", "# python的datetime时间对象 # 常规行情 self.openPrice = EMPTY_FLOAT # 今日开盘价 self.highPrice", "= EMPTY_FLOAT self.askPrice3 = EMPTY_FLOAT self.askPrice4 = EMPTY_FLOAT self.askPrice5 =", "self.lastDayBar = None #---------------------------------------------------------------------- def updateTick(self, tick): \"\"\"TICK更新\"\"\" newMinute =", "encoding: utf-8 import sys from market_maker import OrderManager from settings", "= 0 #---------------------------------------------------------------------- class LoggerEngine(object): LogDir = \"LogDir\" #---------------------------------------------------------------------- def", "bar.time >= \"15:30:00\") \\ or (self.lastDayBar.time <= \"15:30:00\" and bar.time", "= 0 # 缓存计数 self.size = size # 缓存大小 self.inited", "交易所代码 self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 合约代码.交易所代码 # 成交数据 self.lastPrice", "# constants EXCHANGE_BITMEX = \"BITMEX\" EMPTY_STRING = \"\" EMPTY_FLOAT =", "onXhourBar # x小时K线的回调函数 self.lastTick = None # 上一TICK缓存对象 self.lastSecondTick =", "= bar.low self.dayBar.datetime = bar.datetime else: self.dayBar.high = max(self.dayBar.high ,", "%s \" % (str(error_id) , msg) + \"\\n\") self.f.flush() #----------------------------------------------------------------------", "None # 上一TICK缓存对象 self.lastSecondTick = None # 用于秒级别的上一根Tick缓存对象 self.dayBar =", "2): today_datetime = datetime.now() start_datetime = today_datetime - timedelta( days", "= tick.lastPrice self.bar.datetime = tick.datetime self.bar.openInterest = tick.openInterest if self.lastTick:", "= tick.lastPrice self.bar.high = tick.lastPrice self.bar.low = tick.lastPrice # 累加更新老一分钟的K线数据", "self.onXhourBar(self.xhourBar) # 清空老K线缓存对象 self.xhourBar = None #---------------------------------------------------------------------------- def updateDayBar(self, bar):", "if self.lastDayBar != None \\ and ( (self.lastDayBar.time <= \"15:30:00\"", "EMPTY_STRING # vt系统代码 self.symbol = EMPTY_STRING # 代码 self.exchange =", "high(self): \"\"\"获取最高价序列\"\"\" return self.highArray #---------------------------------------------------------------------- @property def low(self): \"\"\"获取最低价序列\"\"\" return", "= LoggerEngine(\"dataEngine.log\") ## init the db self.dbConnect() #---------------------------------------------------------------------- def dbConnect(self):", "float(bar.volume) # X分钟已经走完 if ( (bar.datetime.minute + 1) % self.xmin", "np ######################################################################################################################## # constants EXCHANGE_BITMEX = \"BITMEX\" EMPTY_STRING = \"\"", "os from pymongo import MongoClient, ASCENDING from pymongo.errors import ConnectionFailure", "\"\"\"获取最低价序列\"\"\" return self.lowArray #---------------------------------------------------------------------- @property def close(self): \"\"\"获取收盘价序列\"\"\" return self.closeArray", "bar.high self.xhourBar.low = bar.low self.xhourBar.datetime = bar.datetime else: self.xhourBar.high =", "用于秒级别的上一根Tick缓存对象 self.dayBar = None # 一个交易日的bar对象 self.onDayBar = onDayBar #", "= bar.close self.xhourBar.openInterest = bar.openInterest self.xhourBar.volume += float(bar.volume) # X分钟已经走完", "self.bar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.bar.date = self.bar.datetime.strftime('%Y%m%d') self.bar.time = self.bar.datetime.strftime('%H:%M:%S.%f')", "def __init__(self , _host = GLOBAL_MONGO_HOST , _port = GLOBAL_MONGO_PORT):", "= self.xsecBar.datetime.strftime('%Y%m%d') self.xsecBar.time = self.xsecBar.datetime.strftime('%H:%M:%S.%f') # 推送已经结束的上多少秒K线 self.onXsecBar(self.xsecBar) # 清空老K线缓存对象", "= bar.openInterest self.xminBar.volume += float(bar.volume) # X分钟已经走完 if ( (bar.datetime.minute", "db[collectionName] if sortKey: cursor = collection.find(d).sort(sortKey, sortDirection) # 对查询出来的数据进行排序 else:", "def __init__(self): \"\"\"Constructor\"\"\" super(BarData, self).__init__() self.vtSymbol = EMPTY_STRING # vt系统代码", "self.lowArray[-1] = bar.low self.closeArray[-1] = bar.close self.volumeArray[-1] = bar.volume #----------------------------------------------------------------------", "bar.close self.xminBar.openInterest = bar.openInterest self.xminBar.volume += float(bar.volume) # X分钟已经走完 if", "#---------------------------------------------------------------------- def dbQuery(self, dbName, collectionName, d, sortKey='', sortDirection=ASCENDING): \"\"\"从MongoDB中读取数据,d是查询要求,返回的是数据库查询的指针\"\"\" if", "#---------------------------------------------------------------------- def __init__(self): \"\"\"Constructor\"\"\" super(BarData, self).__init__() self.vtSymbol = EMPTY_STRING #", "def updateBar(self, bar): \"\"\"1分钟K线更新\"\"\" # 尚未创建对象 if not self.xminBar: self.xminBar", "class ArrayManager(object): \"\"\" K线序列管理工具,负责: 1. K线时间序列的维护 2. 常用技术指标的计算 \"\"\" #----------------------------------------------------------------------", "= bar.exchange self.dayBar.open = bar.open self.dayBar.high = bar.high self.dayBar.low =", "= EMPTY_STRING # 时间 self.datetime = None # python的datetime时间对象 self.volume", "= tick.lastPrice self.xsecBar.high = tick.lastPrice self.xsecBar.low = tick.lastPrice # 累加更新老几秒的K线数据", "BarData() newSecond = True # 初始化新多少秒的K线数据 if newSecond : self.xsecBar.datetime", "#---------------------------------------------------------------------- @property def open(self): \"\"\"获取开盘价序列\"\"\" return self.openArray #---------------------------------------------------------------------- @property def", "self.lastSecondTick: self.xsecBar.volume += (tick.volume - self.lastSecondTick.volume) # 当前Tick内的成交量 # 缓存", "# 持仓量 self.time = EMPTY_STRING # 时间 11:20:56.5 self.date =", "self.openPrice = EMPTY_FLOAT # 今日开盘价 self.highPrice = EMPTY_FLOAT # 今日最高价", "not self.xsecBar: self.xsecBar = BarData() newSecond = True elif self.xsecBar.datetime.second", "累加更新老一分钟的K线数据 else: self.bar.high = max(self.bar.high, tick.lastPrice) self.bar.low = min(self.bar.low, tick.lastPrice)", "= self.bar.datetime.strftime('%Y%m%d') self.bar.time = self.bar.datetime.strftime('%H:%M:%S.%f') # 推送已经结束的上一分钟K线 self.onBar(self.bar) # 创建新的K线对象", "K线合成器,支持: 1. 基于Tick合成1分钟K线 2. 基于1分钟K线合成X分钟K线(X可以是2、3、5、10、15、30、60) \"\"\" #---------------------------------------------------------------------- def __init__(self, onBar,", "设置MongoDB操作的超时时间为0.5秒 self.dbClient = MongoClient(self.host , self.port , connectTimeoutMS=500) # 调用server_info查询服务器状态,防止服务器异常并未连接成功", "# 一个交易日的bar对象 self.onDayBar = onDayBar # 交易日K线的回调函数 self.lastDayBar = None", "self.bar = BarData() newMinute = True # 初始化新一分钟的K线数据 if newMinute:", "self.lastTick.volume) # 当前K线内的成交量 # 缓存Tick self.lastTick = tick #---------------------------------------------------------------------- def", "# 创建新的K线对象 self.bar = BarData() newMinute = True # 初始化新一分钟的K线数据", "if self.logger: self.logger.info(content) #---------------------------------------------------------------------- def writeError(self, content , error_id =", "self.lastVolume = EMPTY_INT # 最新成交量 self.volume = EMPTY_INT # 今天总成交量", "一天走完 # 1. 夜盘 , 2.第二天9点 if self.lastDayBar != None", "\"\"\"Constructor\"\"\" self.count = 0 # 缓存计数 self.size = size #", "self.dbClient[dbName] collection = db[collectionName] if sortKey: cursor = collection.find(d).sort(sortKey, sortDirection)", "= None # python的datetime时间对象 self.volume = EMPTY_INT # 成交量 self.openInterest", "None #---------------------------------------------------------------------------- def updateDayBar(self, bar): # 一天走完 # 1. 夜盘", "#---------------------------------------------------------------------- def __init__(self, logName , in_debug = True , open_md", "MongoDB客户端对象 self.logger = LoggerEngine(\"dataEngine.log\") ## init the db self.dbConnect() #----------------------------------------------------------------------", "= bar.low self.xminBar.datetime = bar.datetime # 累加老K线 else: self.xminBar.high =", "GLOBAL_MONGO_HOST , _port = GLOBAL_MONGO_PORT): super(DataEngine, self).__init__() self.host = _host", "self.highArray[1:self.size] self.lowArray[0:self.size-1] = self.lowArray[1:self.size] self.closeArray[0:self.size-1] = self.closeArray[1:self.size] self.volumeArray[0:self.size-1] = self.volumeArray[1:self.size]", "python的datetime时间对象 self.volume = EMPTY_INT # 成交量 self.openInterest = EMPTY_INT #", "= EMPTY_FLOAT self.bidPrice2 = EMPTY_FLOAT self.bidPrice3 = EMPTY_FLOAT self.bidPrice4 =", "self.onBar(self.bar) # 创建新的K线对象 self.bar = BarData() newMinute = True #", "self.bar.open = tick.lastPrice self.bar.high = tick.lastPrice self.bar.low = tick.lastPrice #", "self.xminBar.low = min(self.xminBar.low, bar.low) # 通用部分 self.xminBar.close = bar.close self.xminBar.openInterest", "bar.close self.volumeArray[-1] = bar.volume #---------------------------------------------------------------------- @property def open(self): \"\"\"获取开盘价序列\"\"\" return", "# 可以用X整除 # 生成上一X分钟K线的时间戳 self.xhourBar.datetime = self.xhourBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0", "super(DataEngine, self).__init__() self.host = _host self.port = _port # MongoDB数据库相关", "1. 基于Tick合成1分钟K线 2. 基于1分钟K线合成X分钟K线(X可以是2、3、5、10、15、30、60) \"\"\" #---------------------------------------------------------------------- def __init__(self, onBar, xsec=0,", "= tick.openInterest if self.lastTick: self.bar.volume += (tick.volume - self.lastTick.volume) #", "count>=size self.openArray = np.zeros(size) # OHLC self.highArray = np.zeros(size) self.lowArray", "class BarManager(object): \"\"\" K线合成器,支持: 1. 基于Tick合成1分钟K线 2. 基于1分钟K线合成X分钟K线(X可以是2、3、5、10、15、30、60) \"\"\" #----------------------------------------------------------------------", "newMinute = True # 初始化新一分钟的K线数据 if newMinute: self.bar.vtSymbol = tick.vtSymbol", "self.openInterest = EMPTY_INT # 持仓量 ''' engine的基础类 ''' class EngineBase(object):", "self.bar.datetime.strftime('%H:%M:%S.%f') # 推送已经结束的上一分钟K线 self.onBar(self.bar) # 创建新的K线对象 self.bar = BarData() newMinute", "bar.high) self.xminBar.low = min(self.xminBar.low, bar.low) # 通用部分 self.xminBar.close = bar.close", "(self.lastDayBar.time <= \"15:30:00\" and bar.time <= self.lastDayBar.time )): self.dayBar.datetime =", "= bar.datetime # 累加老K线 else: self.xminBar.high = max(self.xminBar.high, bar.high) self.xminBar.low", "if self.dbClient: db = self.dbClient[dbName] collection = db[collectionName] if sortKey:", "self.xminBar = BarData() self.xminBar.vtSymbol = bar.vtSymbol self.xminBar.symbol = bar.symbol self.xminBar.exchange", "X分钟已经走完 if ( (bar.datetime.minute + 1) % self.xmin ) ==", "pymongo.errors import ConnectionFailure from datetime import datetime , timedelta import", "bar.open self.xminBar.high = bar.high self.xminBar.low = bar.low self.xminBar.datetime = bar.datetime", "def updateSecond(self, tick ): \"\"\"通过TICK数据更新到秒数据\"\"\" newSecond = False if not", "\"\"\" K线序列管理工具,负责: 1. K线时间序列的维护 2. 常用技术指标的计算 \"\"\" #---------------------------------------------------------------------- def __init__(self,", ">= self.size: self.inited = True self.openArray[0:self.size-1] = self.openArray[1:self.size] self.highArray[0:self.size-1] =", "self.volumeArray[-1] = bar.volume #---------------------------------------------------------------------- @property def open(self): \"\"\"获取开盘价序列\"\"\" return self.openArray", "self.exchange = EMPTY_STRING # 交易所 self.open = EMPTY_FLOAT # OHLC", "self.xminBar: self.xminBar = BarData() self.xminBar.vtSymbol = bar.vtSymbol self.xminBar.symbol = bar.symbol", "EMPTY_FLOAT # 今日最低价 self.preClosePrice = EMPTY_FLOAT self.upperLimit = EMPTY_FLOAT #", "days = 2): today_datetime = datetime.now() start_datetime = today_datetime -", "[] for d in barData: bar = BarData() bar.__dict__ =", "\"BITMEX\" EMPTY_STRING = \"\" EMPTY_FLOAT = 0.0 EMPTY_INT = 0", "EMPTY_FLOAT self.upperLimit = EMPTY_FLOAT # 涨停价 self.lowerLimit = EMPTY_FLOAT #", "True elif self.xsecBar.datetime.second != tick.datetime.second and ( (tick.datetime.second) % self.xsec", "= onBar # 1分钟K线回调函数 self.xsecBar = None # 多少秒K线对象 self.xsec", "11:20:56.5 self.date = EMPTY_STRING # 日期 20151009 self.datetime = None", "self.xsecBar.open = tick.lastPrice self.xsecBar.high = tick.lastPrice self.xsecBar.low = tick.lastPrice #", "通用更新部分 self.xsecBar.close = tick.lastPrice self.xsecBar.openInterest = tick.openInterest if self.lastSecondTick: self.xsecBar.volume", "一个交易日的bar对象 self.onDayBar = onDayBar # 交易日K线的回调函数 self.lastDayBar = None #----------------------------------------------------------------------", "# OHLC self.high = EMPTY_FLOAT self.low = EMPTY_FLOAT self.close =", "\"\"\" K线合成器,支持: 1. 基于Tick合成1分钟K线 2. 基于1分钟K线合成X分钟K线(X可以是2、3、5、10、15、30、60) \"\"\" #---------------------------------------------------------------------- def __init__(self,", "self.logger: self.logger.error(content , error_id) ''' 主要Engine ''' class DataEngine(EngineBase): #----------------------------------------------------------------------", "EXCHANGE_BITMEX = \"BITMEX\" EMPTY_STRING = \"\" EMPTY_FLOAT = 0.0 EMPTY_INT", "对象 self.lastSecondTick = tick #---------------------------------------------------------------------- def updateBar(self, bar): \"\"\"1分钟K线更新\"\"\" #", ", logName) self.now_debug = in_debug if self.now_debug: self.f = open(", "# 涨停价 self.lowerLimit = EMPTY_FLOAT # 跌停价 # 五档行情 self.bidPrice1", "= self.openArray[1:self.size] self.highArray[0:self.size-1] = self.highArray[1:self.size] self.lowArray[0:self.size-1] = self.lowArray[1:self.size] self.closeArray[0:self.size-1] =", "合约代码.交易所代码 # 成交数据 self.lastPrice = EMPTY_FLOAT # 最新成交价 self.lastVolume =", "EMPTY_INT self.bidVolume4 = EMPTY_INT self.bidVolume5 = EMPTY_INT self.askVolume1 = EMPTY_INT", "self.askPrice2 = EMPTY_FLOAT self.askPrice3 = EMPTY_FLOAT self.askPrice4 = EMPTY_FLOAT self.askPrice5", "= EMPTY_INT self.askVolume5 = EMPTY_INT ######################################################################## class BarData(object): \"\"\"K线数据\"\"\" #----------------------------------------------------------------------", "error_id = 0): \"\"\" 发送错误通知/记录日志文件 :param content: :return: \"\"\" if", "= BarData() newSecond = True # 初始化新多少秒的K线数据 if newSecond :", "self.logger.info(content) #---------------------------------------------------------------------- def writeError(self, content , error_id = 0): \"\"\"", "max(self.xhourBar.high, bar.high) self.xhourBar.low = min(self.xhourBar.low, bar.low) # 通用部分 self.xhourBar.close =", "self.low = EMPTY_FLOAT self.close = EMPTY_FLOAT self.date = EMPTY_STRING #", "交易所 self.open = EMPTY_FLOAT # OHLC self.high = EMPTY_FLOAT self.low", "if self.lastTick: self.bar.volume += (tick.volume - self.lastTick.volume) # 当前K线内的成交量 #", "open( self.logPath , open_md) #---------------------------------------------------------------------- def error(self, msg , error_id):", "datetime , timedelta import numpy as np ######################################################################################################################## # constants", "None #---------------------------------------------------------------------- def updateTick(self, tick): \"\"\"TICK更新\"\"\" newMinute = False #", "xsec的值 self.onXsecBar = onXsecBar # x秒的回调函数 self.xminBar = None #", "self).__init__() self.vtSymbol = EMPTY_STRING # vt系统代码 self.symbol = EMPTY_STRING #", "= None # x小时K线对象 self.xhour = xhour # x的值 self.onXhourBar", "#---------------------------------------------------------------------- def error(self, msg , error_id): if self.now_debug: self.f.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")", "collection = db[collectionName] if sortKey: cursor = collection.find(d).sort(sortKey, sortDirection) #", "( (tick.datetime.second) % self.xsec == 0 ): self.xsecBar.datetime = self.xsecBar.datetime.replace(", "bar.low self.dayBar.datetime = bar.datetime else: self.dayBar.high = max(self.dayBar.high , bar.high)", "= EMPTY_FLOAT self.bidVolume1 = EMPTY_INT self.bidVolume2 = EMPTY_INT self.bidVolume3 =", "self.volume = EMPTY_INT # 成交量 self.openInterest = EMPTY_INT # 持仓量", "@property def low(self): \"\"\"获取最低价序列\"\"\" return self.lowArray #---------------------------------------------------------------------- @property def close(self):", "= EMPTY_STRING # 交易所 self.open = EMPTY_FLOAT # OHLC self.high", "self.dayBar.datetime = self.dayBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.dayBar.date = self.dayBar.datetime.strftime('%Y%m%d') self.dayBar.time", "# 今日最高价 self.lowPrice = EMPTY_FLOAT # 今日最低价 self.preClosePrice = EMPTY_FLOAT", "'datetime') l = [] for d in barData: bar =", "= None, onDayBar=None): \"\"\"Constructor\"\"\" self.bar = None # 1分钟K线对象 self.onBar", "self.xsecBar.datetime = self.xsecBar.datetime.replace( microsecond=0) # 将秒和微秒设为0 self.xsecBar.date = self.xsecBar.datetime.strftime('%Y%m%d') self.xsecBar.time", "self.openArray[-1] = bar.open self.highArray[-1] = bar.high self.lowArray[-1] = bar.low self.closeArray[-1]", "self.xsec = xsec # xsec的值 self.onXsecBar = onXsecBar # x秒的回调函数", "= bar.openInterest self.dayBar.volume += float(bar.volume) self.lastDayBar = bar ######################################################################## class", "= tick.datetime self.xsecBar.vtSymbol = tick.vtSymbol self.xsecBar.symbol = tick.symbol self.xsecBar.exchange =", "self.dayBar.low = bar.low self.dayBar.datetime = bar.datetime else: self.dayBar.high = max(self.dayBar.high", "self.xminBar.high = max(self.xminBar.high, bar.high) self.xminBar.low = min(self.xminBar.low, bar.low) # 通用部分", "x小时K线对象 self.xhour = xhour # x的值 self.onXhourBar = onXhourBar #", "0 #---------------------------------------------------------------------- class LoggerEngine(object): LogDir = \"LogDir\" #---------------------------------------------------------------------- def __init__(self,", "时间 11:20:56.5 self.date = EMPTY_STRING # 日期 20151009 self.datetime =", "#----------------------------------------------------------------------- def loadBars( self, dbName = GLOBAL_USE_DBNAME , collectionName =", "numpy as np ######################################################################################################################## # constants EXCHANGE_BITMEX = \"BITMEX\" EMPTY_STRING", "# bar开始的时间,日期 self.time = EMPTY_STRING # 时间 self.datetime = None", "self.dayBar.datetime = bar.datetime elif not self.dayBar: self.dayBar = BarData() self.dayBar.vtSymbol", "self.xsecBar.datetime.second != tick.datetime.second and ( (tick.datetime.second) % self.xsec == 0", "= BarData() bar.__dict__ = d l.append(bar) return l ######################################################################## class", "tick #---------------------------------------------------------------------- def updateSecond(self, tick ): \"\"\"通过TICK数据更新到秒数据\"\"\" newSecond = False", "self.xminBar.volume += float(bar.volume) # X分钟已经走完 if ( (bar.datetime.minute + 1)", "\"\"\" 发送错误通知/记录日志文件 :param content: :return: \"\"\" if self.logger: self.logger.error(content ,", "tick.lastPrice self.bar.high = tick.lastPrice self.bar.low = tick.lastPrice # 累加更新老一分钟的K线数据 else:", "if not self.bar: self.bar = BarData() newMinute = True #", "1分钟K线对象 self.onBar = onBar # 1分钟K线回调函数 self.xsecBar = None #", "EMPTY_STRING # 时间 11:20:56.5 self.date = EMPTY_STRING # 日期 20151009", "None # python的datetime时间对象 self.volume = EMPTY_INT # 成交量 self.openInterest =", "清空老K线缓存对象 self.xminBar = None #---------------------------------------------------------------------- def updateHourBar(self , bar): \"\"\"1小时K线更新\"\"\"", "# 五档行情 self.bidPrice1 = EMPTY_FLOAT self.bidPrice2 = EMPTY_FLOAT self.bidPrice3 =", "= True # 初始化新一分钟的K线数据 if newMinute: self.bar.vtSymbol = tick.vtSymbol self.bar.symbol", "\"15:30:00\" and bar.time >= \"15:30:00\") \\ or (self.lastDayBar.time <= \"15:30:00\"", "self.bar.high = tick.lastPrice self.bar.low = tick.lastPrice # 累加更新老一分钟的K线数据 else: self.bar.high", "self.dayBar.datetime.strftime('%Y%m%d') self.dayBar.time = self.dayBar.datetime.strftime('%H:%M:%S') # 说明是新的一天了 # 先推送昨天过去 self.onDayBar( self.dayBar)", "self.xminBar = None #---------------------------------------------------------------------- def updateHourBar(self , bar): \"\"\"1小时K线更新\"\"\" #", "_host self.port = _port # MongoDB数据库相关 self.dbClient = None #", "onXhourBar = None, onDayBar=None): \"\"\"Constructor\"\"\" self.bar = None # 1分钟K线对象", "= None #---------------------------------------------------------------------- def updateTick(self, tick): \"\"\"TICK更新\"\"\" newMinute = False", "# 时间 11:20:56.5 self.date = EMPTY_STRING # 日期 20151009 self.datetime", "= bar.close self.dayBar.openInterest = bar.openInterest self.dayBar.volume += float(bar.volume) self.lastDayBar =", "= BarData() newMinute = True # 新的一分钟 elif self.bar.datetime.minute !=", "#---------------------------------------------------------------------- def updateSecond(self, tick ): \"\"\"通过TICK数据更新到秒数据\"\"\" newSecond = False if", "= EMPTY_INT # 最新成交量 self.volume = EMPTY_INT # 今天总成交量 self.openInterest", "EMPTY_INT self.bidVolume5 = EMPTY_INT self.askVolume1 = EMPTY_INT self.askVolume2 = EMPTY_INT", "onXsecBar=None , xmin=0 , xhour=0, onXminBar=None , onXhourBar = None,", "+= float(bar.volume) # X分钟已经走完 if ( (bar.datetime.hour + 1) %", "self.closeArray = np.zeros(size) self.volumeArray = np.zeros(size) #---------------------------------------------------------------------- def updateBar(self, bar):", "self.xmin = xmin # X的值 self.onXminBar = onXminBar # X分钟K线的回调函数", "_host = GLOBAL_MONGO_HOST , _port = GLOBAL_MONGO_PORT): super(DataEngine, self).__init__() self.host", "self.xhour ) == 0: # 可以用X整除 # 生成上一X分钟K线的时间戳 self.xhourBar.datetime =", "dbName, collectionName, d, sortKey='', sortDirection=ASCENDING): \"\"\"从MongoDB中读取数据,d是查询要求,返回的是数据库查询的指针\"\"\" if self.dbClient: db =", "#---------------------------------------------------------------------------- def updateDayBar(self, bar): # 一天走完 # 1. 夜盘 ,", "# 累加老K线 else: self.xminBar.high = max(self.xminBar.high, bar.high) self.xminBar.low = min(self.xminBar.low,", "= np.zeros(size) self.closeArray = np.zeros(size) self.volumeArray = np.zeros(size) #---------------------------------------------------------------------- def", "= _host self.port = _port # MongoDB数据库相关 self.dbClient = None", "= True self.openArray[0:self.size-1] = self.openArray[1:self.size] self.highArray[0:self.size-1] = self.highArray[1:self.size] self.lowArray[0:self.size-1] =", "secondTick 对象 self.lastSecondTick = tick #---------------------------------------------------------------------- def updateBar(self, bar): \"\"\"1分钟K线更新\"\"\"", "= BarData() self.xhourBar.vtSymbol = bar.vtSymbol self.xhourBar.symbol = bar.symbol self.xhourBar.exchange =", "np.zeros(size) # OHLC self.highArray = np.zeros(size) self.lowArray = np.zeros(size) self.closeArray", "0 ): self.xsecBar.datetime = self.xsecBar.datetime.replace( microsecond=0) # 将秒和微秒设为0 self.xsecBar.date =", "def __init__(self, onBar, xsec=0, onXsecBar=None , xmin=0 , xhour=0, onXminBar=None", "DataEngine(EngineBase): #---------------------------------------------------------------------- def __init__(self , _host = GLOBAL_MONGO_HOST , _port", "EMPTY_FLOAT self.askPrice2 = EMPTY_FLOAT self.askPrice3 = EMPTY_FLOAT self.askPrice4 = EMPTY_FLOAT", "onXsecBar # x秒的回调函数 self.xminBar = None # X分钟K线对象 self.xmin =", "尚未创建对象 if not self.bar: self.bar = BarData() newMinute = True", "ArrayManager(object): \"\"\" K线序列管理工具,负责: 1. K线时间序列的维护 2. 常用技术指标的计算 \"\"\" #---------------------------------------------------------------------- def", "成交数据 self.lastPrice = EMPTY_FLOAT # 最新成交价 self.lastVolume = EMPTY_INT #", "# 1分钟K线回调函数 self.xsecBar = None # 多少秒K线对象 self.xsec = xsec", "= xmin # X的值 self.onXminBar = onXminBar # X分钟K线的回调函数 self.xhourBar", "updateTick(self, tick): \"\"\"TICK更新\"\"\" newMinute = False # 默认不是新的一分钟 # 尚未创建对象", "newMinute = False # 默认不是新的一分钟 # 尚未创建对象 if not self.bar:", "updateBar(self, bar): \"\"\"更新K线\"\"\" self.count += 1 if not self.inited and", "= db[collectionName] if sortKey: cursor = collection.find(d).sort(sortKey, sortDirection) # 对查询出来的数据进行排序", "= EMPTY_INT self.askVolume4 = EMPTY_INT self.askVolume5 = EMPTY_INT ######################################################################## class", "self.askPrice1 = EMPTY_FLOAT self.askPrice2 = EMPTY_FLOAT self.askPrice3 = EMPTY_FLOAT self.askPrice4", "新的一分钟 elif self.bar.datetime.minute != tick.datetime.minute: # 生成上一分钟K线的时间戳 self.bar.datetime = self.bar.datetime.replace(second=0,", "= None # MongoDB客户端对象 self.logger = LoggerEngine(\"dataEngine.log\") ## init the", "GLOBAL_USE_SYMBOL, days = 2): today_datetime = datetime.now() start_datetime = today_datetime", "= bar.open self.dayBar.high = bar.high self.dayBar.low = bar.low self.dayBar.datetime =", "# 推送 self.onXhourBar(self.xhourBar) # 清空老K线缓存对象 self.xhourBar = None #---------------------------------------------------------------------------- def", "# 上一TICK缓存对象 self.lastSecondTick = None # 用于秒级别的上一根Tick缓存对象 self.dayBar = None", "self.bar.datetime = self.bar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.bar.date = self.bar.datetime.strftime('%Y%m%d') self.bar.time", "not self.inited and self.count >= self.size: self.inited = True self.openArray[0:self.size-1]", "d = {'datetime':{'$gte':start_datetime , '$lte':today_datetime}} barData = self.dbQuery(dbName, collectionName, d,", "bar.datetime else: self.xhourBar.high = max(self.xhourBar.high, bar.high) self.xhourBar.low = min(self.xhourBar.low, bar.low)", "+ \"\\n\") self.f.flush() #---------------------------------------------------------------------- def close(self): self.f.close() ''' tick 数据的格式", "self.highPrice = EMPTY_FLOAT # 今日最高价 self.lowPrice = EMPTY_FLOAT # 今日最低价", "elif self.bar.datetime.minute != tick.datetime.minute: # 生成上一分钟K线的时间戳 self.bar.datetime = self.bar.datetime.replace(second=0, microsecond=0)", "self.dayBar.datetime = bar.datetime else: self.dayBar.high = max(self.dayBar.high , bar.high) self.dayBar.low", "= GLOBAL_MONGO_HOST , _port = GLOBAL_MONGO_PORT): super(DataEngine, self).__init__() self.host =", "# vt系统代码 self.symbol = EMPTY_STRING # 代码 self.exchange = EMPTY_STRING", "size=100): \"\"\"Constructor\"\"\" self.count = 0 # 缓存计数 self.size = size", "return [] else: self.writeLog(u'db query failed') return [] #----------------------------------------------------------------------- def", "= np.zeros(size) # OHLC self.highArray = np.zeros(size) self.lowArray = np.zeros(size)", "= EMPTY_INT # 持仓量 self.time = EMPTY_STRING # 时间 11:20:56.5", "# 缓存计数 self.size = size # 缓存大小 self.inited = False", "+= 1 if not self.inited and self.count >= self.size: self.inited", "# 缓存 secondTick 对象 self.lastSecondTick = tick #---------------------------------------------------------------------- def updateBar(self,", "EMPTY_FLOAT # 最新成交价 self.lastVolume = EMPTY_INT # 最新成交量 self.volume =", "content: :return: \"\"\" if self.logger: self.logger.error(content , error_id) ''' 主要Engine", "!= tick.datetime.second and ( (tick.datetime.second) % self.xsec == 0 ):", "self.xsecBar.datetime = tick.datetime self.xsecBar.vtSymbol = tick.vtSymbol self.xsecBar.symbol = tick.symbol self.xsecBar.exchange", "return [] #----------------------------------------------------------------------- def loadBars( self, dbName = GLOBAL_USE_DBNAME ,", "float(bar.volume) # X分钟已经走完 if ( (bar.datetime.hour + 1) % self.xhour", "self.time = EMPTY_STRING # 时间 11:20:56.5 self.date = EMPTY_STRING #", "= EMPTY_INT self.askVolume1 = EMPTY_INT self.askVolume2 = EMPTY_INT self.askVolume3 =", "msg + \"\\n\") self.f.flush() #---------------------------------------------------------------------- def close(self): self.f.close() ''' tick", "float(bar.volume) self.lastDayBar = bar ######################################################################## class ArrayManager(object): \"\"\" K线序列管理工具,负责: 1.", "if os.path.exists(self.LogDir) == False: os.mkdir( self.LogDir ) self.logPath = os.path.join(self.LogDir", "ConnectionFailure from datetime import datetime , timedelta import numpy as", "tick.lastPrice) # 通用更新部分 self.bar.close = tick.lastPrice self.bar.datetime = tick.datetime self.bar.openInterest", "bar.high) self.xhourBar.low = min(self.xhourBar.low, bar.low) # 通用部分 self.xhourBar.close = bar.close", "# 通用部分 self.xhourBar.close = bar.close self.xhourBar.openInterest = bar.openInterest self.xhourBar.volume +=", "\"15:30:00\") \\ or (self.lastDayBar.time <= \"15:30:00\" and bar.time <= self.lastDayBar.time", "''' class EngineBase(object): #---------------------------------------------------------------------- def writeLog(self, content): if self.logger: self.logger.info(content)", "bar = BarData() bar.__dict__ = d l.append(bar) return l ########################################################################", "<= \"15:30:00\" and bar.time <= self.lastDayBar.time )): self.dayBar.datetime = self.dayBar.datetime.replace(second=0,", "(str(error_id) , msg) + \"\\n\") self.f.flush() #---------------------------------------------------------------------- def info(self, msg):", "elif self.xsecBar.datetime.second != tick.datetime.second and ( (tick.datetime.second) % self.xsec ==", "\"\"\"从MongoDB中读取数据,d是查询要求,返回的是数据库查询的指针\"\"\" if self.dbClient: db = self.dbClient[dbName] collection = db[collectionName] if", "# 推送已经结束的上一分钟K线 self.onBar(self.bar) # 创建新的K线对象 self.bar = BarData() newMinute =", "######################################################################################################################## # constants EXCHANGE_BITMEX = \"BITMEX\" EMPTY_STRING = \"\" EMPTY_FLOAT", "= self.xhourBar.datetime.strftime('%Y%m%d') self.xhourBar.time = self.xhourBar.datetime.strftime('%H:%M:%S') # 推送 self.onXhourBar(self.xhourBar) # 清空老K线缓存对象", "self.logger = LoggerEngine(\"dataEngine.log\") ## init the db self.dbConnect() #---------------------------------------------------------------------- def", "初始化新一分钟的K线数据 if newMinute: self.bar.vtSymbol = tick.vtSymbol self.bar.symbol = tick.symbol self.bar.exchange", "import MongoClient, ASCENDING from pymongo.errors import ConnectionFailure from datetime import", "= bar.vtSymbol self.dayBar.symbol = bar.symbol self.dayBar.exchange = bar.exchange self.dayBar.open =", "self.bidVolume2 = EMPTY_INT self.bidVolume3 = EMPTY_INT self.bidVolume4 = EMPTY_INT self.bidVolume5", "failed') return [] #----------------------------------------------------------------------- def loadBars( self, dbName = GLOBAL_USE_DBNAME", "self.bar.symbol = tick.symbol self.bar.exchange = tick.exchange self.bar.open = tick.lastPrice self.bar.high", "= tick.lastPrice # 累加更新老一分钟的K线数据 else: self.bar.high = max(self.bar.high, tick.lastPrice) self.bar.low", "# 缓存大小 self.inited = False # True if count>=size self.openArray", "self.bar.low = min(self.bar.low, tick.lastPrice) # 通用更新部分 self.bar.close = tick.lastPrice self.bar.datetime", "start_datetime = today_datetime - timedelta( days = days) d =", "= EMPTY_FLOAT # 跌停价 # 五档行情 self.bidPrice1 = EMPTY_FLOAT self.bidPrice2", "self.lowerLimit = EMPTY_FLOAT # 跌停价 # 五档行情 self.bidPrice1 = EMPTY_FLOAT", "self.xsecBar = BarData() newSecond = True elif self.xsecBar.datetime.second != tick.datetime.second", "else: self.writeLog(u'db query failed') return [] #----------------------------------------------------------------------- def loadBars( self,", ", open_md = \"w\"): if os.path.exists(self.LogDir) == False: os.mkdir( self.LogDir", "self.xhourBar.datetime = self.xhourBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0 self.xhourBar.date = self.xhourBar.datetime.strftime('%Y%m%d') self.xhourBar.time", "= BarData() self.dayBar.vtSymbol = bar.vtSymbol self.dayBar.symbol = bar.symbol self.dayBar.exchange =", "EMPTY_FLOAT # 涨停价 self.lowerLimit = EMPTY_FLOAT # 跌停价 # 五档行情", "if self.now_debug: self.f = open( self.logPath , open_md) #---------------------------------------------------------------------- def", ") == 0: # 可以用X整除 # 生成上一X分钟K线的时间戳 self.xminBar.datetime = self.xminBar.datetime.replace(second=0,", "= bar.vtSymbol self.xminBar.symbol = bar.symbol self.xminBar.exchange = bar.exchange self.xminBar.open =", "= np.zeros(size) #---------------------------------------------------------------------- def updateBar(self, bar): \"\"\"更新K线\"\"\" self.count += 1", "np.zeros(size) self.lowArray = np.zeros(size) self.closeArray = np.zeros(size) self.volumeArray = np.zeros(size)", "newMinute: self.bar.vtSymbol = tick.vtSymbol self.bar.symbol = tick.symbol self.bar.exchange = tick.exchange", "多少秒K线对象 self.xsec = xsec # xsec的值 self.onXsecBar = onXsecBar #", "self.count = 0 # 缓存计数 self.size = size # 缓存大小", "self.xminBar.datetime = bar.datetime # 累加老K线 else: self.xminBar.high = max(self.xminBar.high, bar.high)", ":return: \"\"\" if self.logger: self.logger.error(content , error_id) ''' 主要Engine '''", "清空老K线缓存对象 self.xsecBar = BarData() newSecond = True # 初始化新多少秒的K线数据 if", "EMPTY_INT # 持仓量 ''' engine的基础类 ''' class EngineBase(object): #---------------------------------------------------------------------- def", "if self.logger: self.logger.error(content , error_id) ''' 主要Engine ''' class DataEngine(EngineBase):", "= bar.datetime else: self.dayBar.high = max(self.dayBar.high , bar.high) self.dayBar.low =", "日期 20151009 self.datetime = None # python的datetime时间对象 # 常规行情 self.openPrice", "(tick.volume - self.lastSecondTick.volume) # 当前Tick内的成交量 # 缓存 secondTick 对象 self.lastSecondTick", "self.dayBar.datetime.strftime('%H:%M:%S') # 说明是新的一天了 # 先推送昨天过去 self.onDayBar( self.dayBar) self.dayBar = BarData()", "= EMPTY_INT # 今天总成交量 self.openInterest = EMPTY_INT # 持仓量 self.time", "= EMPTY_STRING # 时间 11:20:56.5 self.date = EMPTY_STRING # 日期", "= BarData() newMinute = True # 初始化新一分钟的K线数据 if newMinute: self.bar.vtSymbol", ") self.logPath = os.path.join(self.LogDir , logName) self.now_debug = in_debug if", "newSecond = True elif self.xsecBar.datetime.second != tick.datetime.second and ( (tick.datetime.second)", "当前K线内的成交量 # 缓存Tick self.lastTick = tick #---------------------------------------------------------------------- def updateSecond(self, tick", "tick.symbol self.bar.exchange = tick.exchange self.bar.open = tick.lastPrice self.bar.high = tick.lastPrice", "self.onXsecBar(self.xsecBar) # 清空老K线缓存对象 self.xsecBar = BarData() newSecond = True #", "1) % self.xhour ) == 0: # 可以用X整除 # 生成上一X分钟K线的时间戳", "self.dayBar.low = bar.low self.dayBar.datetime = bar.datetime elif not self.dayBar: self.dayBar", "self.askPrice3 = EMPTY_FLOAT self.askPrice4 = EMPTY_FLOAT self.askPrice5 = EMPTY_FLOAT self.bidVolume1", "self.dayBar.close = bar.close self.dayBar.openInterest = bar.openInterest self.dayBar.volume += float(bar.volume) self.lastDayBar", "(bar.datetime.minute + 1) % self.xmin ) == 0: # 可以用X整除", "return self.openArray #---------------------------------------------------------------------- @property def high(self): \"\"\"获取最高价序列\"\"\" return self.highArray #----------------------------------------------------------------------", "self.now_debug = in_debug if self.now_debug: self.f = open( self.logPath ,", "min(self.xminBar.low, bar.low) # 通用部分 self.xminBar.close = bar.close self.xminBar.openInterest = bar.openInterest", "self.bar.high = max(self.bar.high, tick.lastPrice) self.bar.low = min(self.bar.low, tick.lastPrice) # 通用更新部分", "= self.dbClient[dbName] collection = db[collectionName] if sortKey: cursor = collection.find(d).sort(sortKey,", "self.bidPrice2 = EMPTY_FLOAT self.bidPrice3 = EMPTY_FLOAT self.bidPrice4 = EMPTY_FLOAT self.bidPrice5", "推送已经结束的上一分钟K线 self.onBar(self.bar) # 创建新的K线对象 self.bar = BarData() newMinute = True", "= \"w\"): if os.path.exists(self.LogDir) == False: os.mkdir( self.LogDir ) self.logPath", "#---------------------------------------------------------------------- @property def high(self): \"\"\"获取最高价序列\"\"\" return self.highArray #---------------------------------------------------------------------- @property def", "跌停价 # 五档行情 self.bidPrice1 = EMPTY_FLOAT self.bidPrice2 = EMPTY_FLOAT self.bidPrice3", "= EMPTY_INT self.bidVolume4 = EMPTY_INT self.bidVolume5 = EMPTY_INT self.askVolume1 =", "# 将秒和微秒设为0 self.xsecBar.date = self.xsecBar.datetime.strftime('%Y%m%d') self.xsecBar.time = self.xsecBar.datetime.strftime('%H:%M:%S.%f') # 推送已经结束的上多少秒K线", "d l.append(bar) return l ######################################################################## class BarManager(object): \"\"\" K线合成器,支持: 1.", "tick.lastPrice # 累加更新老一分钟的K线数据 else: self.bar.high = max(self.bar.high, tick.lastPrice) self.bar.low =", "dbQuery(self, dbName, collectionName, d, sortKey='', sortDirection=ASCENDING): \"\"\"从MongoDB中读取数据,d是查询要求,返回的是数据库查询的指针\"\"\" if self.dbClient: db", "sys from market_maker import OrderManager from settings import * import", "self.lowArray = np.zeros(size) self.closeArray = np.zeros(size) self.volumeArray = np.zeros(size) #----------------------------------------------------------------------", "= None # X分钟K线对象 self.xmin = xmin # X的值 self.onXminBar", "init the db self.dbConnect() #---------------------------------------------------------------------- def dbConnect(self): \"\"\"连接MongoDB数据库\"\"\" if not", "self.xsec == 0 ): self.xsecBar.datetime = self.xsecBar.datetime.replace( microsecond=0) # 将秒和微秒设为0", "\" : \" + \"Error msg %s: %s \" %", "# True if count>=size self.openArray = np.zeros(size) # OHLC self.highArray", "self.dbClient: # 读取MongoDB的设置 try: # 设置MongoDB操作的超时时间为0.5秒 self.dbClient = MongoClient(self.host ,", "np.zeros(size) self.closeArray = np.zeros(size) self.volumeArray = np.zeros(size) #---------------------------------------------------------------------- def updateBar(self,", "EMPTY_FLOAT = 0.0 EMPTY_INT = 0 #---------------------------------------------------------------------- class LoggerEngine(object): LogDir", "tick.lastPrice self.bar.low = tick.lastPrice # 累加更新老一分钟的K线数据 else: self.bar.high = max(self.bar.high,", "bar.high self.xminBar.low = bar.low self.xminBar.datetime = bar.datetime # 累加老K线 else:", "self.xsecBar: self.xsecBar = BarData() newSecond = True elif self.xsecBar.datetime.second !=", "self.volumeArray[1:self.size] self.openArray[-1] = bar.open self.highArray[-1] = bar.high self.lowArray[-1] = bar.low", "self.size: self.inited = True self.openArray[0:self.size-1] = self.openArray[1:self.size] self.highArray[0:self.size-1] = self.highArray[1:self.size]", "self.xminBar = None # X分钟K线对象 self.xmin = xmin # X的值", "bar ######################################################################## class ArrayManager(object): \"\"\" K线序列管理工具,负责: 1. K线时间序列的维护 2. 常用技术指标的计算", "0: # 可以用X整除 # 生成上一X分钟K线的时间戳 self.xhourBar.datetime = self.xhourBar.datetime.replace(second=0, microsecond=0) #", ":param content: :return: \"\"\" if self.logger: self.logger.error(content , error_id) '''", "EMPTY_INT # 今天总成交量 self.openInterest = EMPTY_INT # 持仓量 self.time =", "@property def open(self): \"\"\"获取开盘价序列\"\"\" return self.openArray #---------------------------------------------------------------------- @property def high(self):", "os.path.join(self.LogDir , logName) self.now_debug = in_debug if self.now_debug: self.f =", "= tick.symbol self.xsecBar.exchange = tick.exchange self.xsecBar.open = tick.lastPrice self.xsecBar.high =", "self.xminBar.datetime.strftime('%Y%m%d') self.xminBar.time = self.xminBar.datetime.strftime('%H:%M:%S') # 推送 self.onXminBar(self.xminBar) # 清空老K线缓存对象 self.xminBar", "self.xminBar.low = bar.low self.xminBar.datetime = bar.datetime # 累加老K线 else: self.xminBar.high", "# 推送 self.onXminBar(self.xminBar) # 清空老K线缓存对象 self.xminBar = None #---------------------------------------------------------------------- def", "self.datetime = None # python的datetime时间对象 self.volume = EMPTY_INT # 成交量", "= tick.vtSymbol self.bar.symbol = tick.symbol self.bar.exchange = tick.exchange self.bar.open =", "if not self.xsecBar: self.xsecBar = BarData() newSecond = True elif", "self.xhourBar.date = self.xhourBar.datetime.strftime('%Y%m%d') self.xhourBar.time = self.xhourBar.datetime.strftime('%H:%M:%S') # 推送 self.onXhourBar(self.xhourBar) #", "# 先推送昨天过去 self.onDayBar( self.dayBar) self.dayBar = BarData() self.dayBar.vtSymbol = bar.vtSymbol", "\"\"\"1小时K线更新\"\"\" # 尚未创建对象 if not self.xhourBar: self.xhourBar = BarData() self.xhourBar.vtSymbol", "def updateHourBar(self , bar): \"\"\"1小时K线更新\"\"\" # 尚未创建对象 if not self.xhourBar:", "today_datetime = datetime.now() start_datetime = today_datetime - timedelta( days =", "GLOBAL_MONGO_PORT): super(DataEngine, self).__init__() self.host = _host self.port = _port #", "= \"\" EMPTY_FLOAT = 0.0 EMPTY_INT = 0 #---------------------------------------------------------------------- class", "self.dayBar = None # 一个交易日的bar对象 self.onDayBar = onDayBar # 交易日K线的回调函数", "= self.dayBar.datetime.strftime('%H:%M:%S') # 说明是新的一天了 # 先推送昨天过去 self.onDayBar( self.dayBar) self.dayBar =", "import numpy as np ######################################################################################################################## # constants EXCHANGE_BITMEX = \"BITMEX\"", "content): if self.logger: self.logger.info(content) #---------------------------------------------------------------------- def writeError(self, content , error_id", "( (bar.datetime.minute + 1) % self.xmin ) == 0: #", "= EMPTY_INT self.askVolume3 = EMPTY_INT self.askVolume4 = EMPTY_INT self.askVolume5 =", "# 成交量 self.openInterest = EMPTY_INT # 持仓量 ''' engine的基础类 '''", "self.xminBar.open = bar.open self.xminBar.high = bar.high self.xminBar.low = bar.low self.xminBar.datetime", "= EMPTY_FLOAT self.askPrice5 = EMPTY_FLOAT self.bidVolume1 = EMPTY_INT self.bidVolume2 =", "self.volumeArray = np.zeros(size) #---------------------------------------------------------------------- def updateBar(self, bar): \"\"\"更新K线\"\"\" self.count +=", "# 交易所代码 self.vtSymbol = EMPTY_STRING # 合约在vt系统中的唯一代码,通常是 合约代码.交易所代码 # 成交数据", "self.now_debug: self.f = open( self.logPath , open_md) #---------------------------------------------------------------------- def error(self,", "( (self.lastDayBar.time <= \"15:30:00\" and bar.time >= \"15:30:00\") \\ or", "= min(self.bar.low, tick.lastPrice) # 通用更新部分 self.bar.close = tick.lastPrice self.bar.datetime =", "= in_debug if self.now_debug: self.f = open( self.logPath , open_md)", "for d in barData: bar = BarData() bar.__dict__ = d", "self.writeLog( u'fail in db connection') #---------------------------------------------------------------------- def dbQuery(self, dbName, collectionName,", "今天总成交量 self.openInterest = EMPTY_INT # 持仓量 self.time = EMPTY_STRING #", "# 交易所 self.open = EMPTY_FLOAT # OHLC self.high = EMPTY_FLOAT", "logName) self.now_debug = in_debug if self.now_debug: self.f = open( self.logPath", "self.host = _host self.port = _port # MongoDB数据库相关 self.dbClient =", "% self.xsec == 0 ): self.xsecBar.datetime = self.xsecBar.datetime.replace( microsecond=0) #", "datetime import datetime , timedelta import numpy as np ########################################################################################################################", "xmin=0 , xhour=0, onXminBar=None , onXhourBar = None, onDayBar=None): \"\"\"Constructor\"\"\"", "合约代码 self.exchange = EMPTY_STRING # 交易所代码 self.vtSymbol = EMPTY_STRING #", "= EMPTY_FLOAT self.upperLimit = EMPTY_FLOAT # 涨停价 self.lowerLimit = EMPTY_FLOAT", "self.xhourBar.datetime = bar.datetime else: self.xhourBar.high = max(self.xhourBar.high, bar.high) self.xhourBar.low =", "self.lastDayBar = bar ######################################################################## class ArrayManager(object): \"\"\" K线序列管理工具,负责: 1. K线时间序列的维护", "= EMPTY_INT self.askVolume2 = EMPTY_INT self.askVolume3 = EMPTY_INT self.askVolume4 =", "self.count += 1 if not self.inited and self.count >= self.size:", "\"Error msg %s: %s \" % (str(error_id) , msg) +", "= bar.high self.xhourBar.low = bar.low self.xhourBar.datetime = bar.datetime else: self.xhourBar.high", "######################################################################## class ArrayManager(object): \"\"\" K线序列管理工具,负责: 1. K线时间序列的维护 2. 常用技术指标的计算 \"\"\"", "最新成交量 self.volume = EMPTY_INT # 今天总成交量 self.openInterest = EMPTY_INT #", "__init__(self, onBar, xsec=0, onXsecBar=None , xmin=0 , xhour=0, onXminBar=None ,", "= EMPTY_INT self.bidVolume2 = EMPTY_INT self.bidVolume3 = EMPTY_INT self.bidVolume4 =", "self.openArray #---------------------------------------------------------------------- @property def high(self): \"\"\"获取最高价序列\"\"\" return self.highArray #---------------------------------------------------------------------- @property", "collection.find(d) if cursor: return list(cursor) else: return [] else: self.writeLog(u'db", "self.bar: self.bar = BarData() newMinute = True # 新的一分钟 elif", "EMPTY_INT = 0 #---------------------------------------------------------------------- class LoggerEngine(object): LogDir = \"LogDir\" #----------------------------------------------------------------------", "self.askPrice4 = EMPTY_FLOAT self.askPrice5 = EMPTY_FLOAT self.bidVolume1 = EMPTY_INT self.bidVolume2", "if self.now_debug: self.f.write(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") + \" : \" + msg", "max(self.xminBar.high, bar.high) self.xminBar.low = min(self.xminBar.low, bar.low) # 通用部分 self.xminBar.close =", ">= \"15:30:00\") \\ or (self.lastDayBar.time <= \"15:30:00\" and bar.time <=", "and bar.time <= self.lastDayBar.time )): self.dayBar.datetime = self.dayBar.datetime.replace(second=0, microsecond=0) #", "bar.datetime elif not self.dayBar: self.dayBar = BarData() self.dayBar.vtSymbol = bar.vtSymbol", "bar.low self.xhourBar.datetime = bar.datetime else: self.xhourBar.high = max(self.xhourBar.high, bar.high) self.xhourBar.low", "tick.exchange self.xsecBar.open = tick.lastPrice self.xsecBar.high = tick.lastPrice self.xsecBar.low = tick.lastPrice", "# 尚未创建对象 if not self.xminBar: self.xminBar = BarData() self.xminBar.vtSymbol =", "tick.lastPrice) # 通用更新部分 self.xsecBar.close = tick.lastPrice self.xsecBar.openInterest = tick.openInterest if", "# 累加更新老几秒的K线数据 else: self.xsecBar.high = max(self.xsecBar.high, tick.lastPrice) self.xsecBar.low = min(self.xsecBar.low,", "尚未创建对象 if not self.xminBar: self.xminBar = BarData() self.xminBar.vtSymbol = bar.vtSymbol", "onBar # 1分钟K线回调函数 self.xsecBar = None # 多少秒K线对象 self.xsec =", "# 对查询出来的数据进行排序 else: cursor = collection.find(d) if cursor: return list(cursor)", "xhour=0, onXminBar=None , onXhourBar = None, onDayBar=None): \"\"\"Constructor\"\"\" self.bar =", "if cursor: return list(cursor) else: return [] else: self.writeLog(u'db query", "self.close = EMPTY_FLOAT self.date = EMPTY_STRING # bar开始的时间,日期 self.time =", "\" + msg + \"\\n\") self.f.flush() #---------------------------------------------------------------------- def close(self): self.f.close()", "l ######################################################################## class BarManager(object): \"\"\" K线合成器,支持: 1. 基于Tick合成1分钟K线 2. 基于1分钟K线合成X分钟K线(X可以是2、3、5、10、15、30、60)", "tick.lastPrice) self.xsecBar.low = min(self.xsecBar.low, tick.lastPrice) # 通用更新部分 self.xsecBar.close = tick.lastPrice", "= min(self.xminBar.low, bar.low) # 通用部分 self.xminBar.close = bar.close self.xminBar.openInterest =", "tick 数据的格式 ''' class TickData(object): #---------------------------------------------------------------------- def __init__(self): \"\"\"Constructor\"\"\" super(TickData,", "+ 1) % self.xmin ) == 0: # 可以用X整除 #", ", connectTimeoutMS=500) # 调用server_info查询服务器状态,防止服务器异常并未连接成功 self.dbClient.server_info() self.writeLog(u'database connection error') except ConnectionFailure:", "= today_datetime - timedelta( days = days) d = {'datetime':{'$gte':start_datetime", "else: self.xhourBar.high = max(self.xhourBar.high, bar.high) self.xhourBar.low = min(self.xhourBar.low, bar.low) #", "else: self.bar.high = max(self.bar.high, tick.lastPrice) self.bar.low = min(self.bar.low, tick.lastPrice) #", "self.dayBar.high = bar.high self.dayBar.low = bar.low self.dayBar.datetime = bar.datetime elif", "onBar, xsec=0, onXsecBar=None , xmin=0 , xhour=0, onXminBar=None , onXhourBar", "cursor = collection.find(d).sort(sortKey, sortDirection) # 对查询出来的数据进行排序 else: cursor = collection.find(d)", "bar.low) # 通用部分 self.dayBar.close = bar.close self.dayBar.openInterest = bar.openInterest self.dayBar.volume", "error_id) ''' 主要Engine ''' class DataEngine(EngineBase): #---------------------------------------------------------------------- def __init__(self ,", "microsecond=0) # 将秒和微秒设为0 self.dayBar.date = self.dayBar.datetime.strftime('%Y%m%d') self.dayBar.time = self.dayBar.datetime.strftime('%H:%M:%S') #", "__init__(self, size=100): \"\"\"Constructor\"\"\" self.count = 0 # 缓存计数 self.size =", "= True # 新的一分钟 elif self.bar.datetime.minute != tick.datetime.minute: # 生成上一分钟K线的时间戳", "# MongoDB数据库相关 self.dbClient = None # MongoDB客户端对象 self.logger = LoggerEngine(\"dataEngine.log\")", "\"\"\"连接MongoDB数据库\"\"\" if not self.dbClient: # 读取MongoDB的设置 try: # 设置MongoDB操作的超时时间为0.5秒 self.dbClient", "not self.xhourBar: self.xhourBar = BarData() self.xhourBar.vtSymbol = bar.vtSymbol self.xhourBar.symbol =", "self.dayBar.volume += float(bar.volume) self.lastDayBar = bar ######################################################################## class ArrayManager(object): \"\"\"", "microsecond=0) # 将秒和微秒设为0 self.xminBar.date = self.xminBar.datetime.strftime('%Y%m%d') self.xminBar.time = self.xminBar.datetime.strftime('%H:%M:%S') #", "+ \" : \" + \"Error msg %s: %s \"", "self.closeArray[1:self.size] self.volumeArray[0:self.size-1] = self.volumeArray[1:self.size] self.openArray[-1] = bar.open self.highArray[-1] = bar.high", "缓存 secondTick 对象 self.lastSecondTick = tick #---------------------------------------------------------------------- def updateBar(self, bar):", "class EngineBase(object): #---------------------------------------------------------------------- def writeLog(self, content): if self.logger: self.logger.info(content) #----------------------------------------------------------------------", "调用server_info查询服务器状态,防止服务器异常并未连接成功 self.dbClient.server_info() self.writeLog(u'database connection error') except ConnectionFailure: self.writeLog( u'fail in", "# 推送已经结束的上多少秒K线 self.onXsecBar(self.xsecBar) # 清空老K线缓存对象 self.xsecBar = BarData() newSecond =", "(self.lastDayBar.time <= \"15:30:00\" and bar.time >= \"15:30:00\") \\ or (self.lastDayBar.time", "= self.bar.datetime.strftime('%H:%M:%S.%f') # 推送已经结束的上一分钟K线 self.onBar(self.bar) # 创建新的K线对象 self.bar = BarData()", "bar.open self.xhourBar.high = bar.high self.xhourBar.low = bar.low self.xhourBar.datetime = bar.datetime", "= EMPTY_STRING # 合约代码 self.exchange = EMPTY_STRING # 交易所代码 self.vtSymbol", "EMPTY_INT self.askVolume3 = EMPTY_INT self.askVolume4 = EMPTY_INT self.askVolume5 = EMPTY_INT", "= datetime.now() start_datetime = today_datetime - timedelta( days = days)", "bar.datetime else: self.dayBar.high = max(self.dayBar.high , bar.high) self.dayBar.low = min(self.dayBar.low", "将秒和微秒设为0 self.xhourBar.date = self.xhourBar.datetime.strftime('%Y%m%d') self.xhourBar.time = self.xhourBar.datetime.strftime('%H:%M:%S') # 推送 self.onXhourBar(self.xhourBar)", "= None # 多少秒K线对象 self.xsec = xsec # xsec的值 self.onXsecBar", "self.lowArray[1:self.size] self.closeArray[0:self.size-1] = self.closeArray[1:self.size] self.volumeArray[0:self.size-1] = self.volumeArray[1:self.size] self.openArray[-1] = bar.open", "os.mkdir( self.LogDir ) self.logPath = os.path.join(self.LogDir , logName) self.now_debug =", "# 通用部分 self.xminBar.close = bar.close self.xminBar.openInterest = bar.openInterest self.xminBar.volume +=", "= EMPTY_STRING # vt系统代码 self.symbol = EMPTY_STRING # 代码 self.exchange", "today_datetime - timedelta( days = days) d = {'datetime':{'$gte':start_datetime ,", "BarData(object): \"\"\"K线数据\"\"\" #---------------------------------------------------------------------- def __init__(self): \"\"\"Constructor\"\"\" super(BarData, self).__init__() self.vtSymbol =", "\"\"\"Constructor\"\"\" super(BarData, self).__init__() self.vtSymbol = EMPTY_STRING # vt系统代码 self.symbol =", "bar.time <= self.lastDayBar.time )): self.dayBar.datetime = self.dayBar.datetime.replace(second=0, microsecond=0) # 将秒和微秒设为0", "推送 self.onXminBar(self.xminBar) # 清空老K线缓存对象 self.xminBar = None #---------------------------------------------------------------------- def updateHourBar(self", "常用技术指标的计算 \"\"\" #---------------------------------------------------------------------- def __init__(self, size=100): \"\"\"Constructor\"\"\" self.count = 0", "= self.dbQuery(dbName, collectionName, d, 'datetime') l = [] for d", "True , open_md = \"w\"): if os.path.exists(self.LogDir) == False: os.mkdir(", "#---------------------------------------------------------------------- def __init__(self): \"\"\"Constructor\"\"\" super(TickData, self).__init__() # 代码相关 self.symbol =", "= EMPTY_FLOAT # OHLC self.high = EMPTY_FLOAT self.low = EMPTY_FLOAT", "self.xhourBar.openInterest = bar.openInterest self.xhourBar.volume += float(bar.volume) # X分钟已经走完 if (", "K线时间序列的维护 2. 常用技术指标的计算 \"\"\" #---------------------------------------------------------------------- def __init__(self, size=100): \"\"\"Constructor\"\"\" self.count", "EMPTY_STRING # 合约代码 self.exchange = EMPTY_STRING # 交易所代码 self.vtSymbol =", "self.xminBar.date = self.xminBar.datetime.strftime('%Y%m%d') self.xminBar.time = self.xminBar.datetime.strftime('%H:%M:%S') # 推送 self.onXminBar(self.xminBar) #", "EMPTY_INT self.askVolume4 = EMPTY_INT self.askVolume5 = EMPTY_INT ######################################################################## class BarData(object):", "self.askVolume1 = EMPTY_INT self.askVolume2 = EMPTY_INT self.askVolume3 = EMPTY_INT self.askVolume4", "self.openArray[1:self.size] self.highArray[0:self.size-1] = self.highArray[1:self.size] self.lowArray[0:self.size-1] = self.lowArray[1:self.size] self.closeArray[0:self.size-1] = self.closeArray[1:self.size]", "0.0 EMPTY_INT = 0 #---------------------------------------------------------------------- class LoggerEngine(object): LogDir = \"LogDir\"", "self.highArray = np.zeros(size) self.lowArray = np.zeros(size) self.closeArray = np.zeros(size) self.volumeArray", "# 一天走完 # 1. 夜盘 , 2.第二天9点 if self.lastDayBar !=", "= EMPTY_FLOAT self.date = EMPTY_STRING # bar开始的时间,日期 self.time = EMPTY_STRING", ") == 0: # 可以用X整除 # 生成上一X分钟K线的时间戳 self.xhourBar.datetime = self.xhourBar.datetime.replace(second=0,", "= EMPTY_INT # 持仓量 ''' engine的基础类 ''' class EngineBase(object): #----------------------------------------------------------------------", "self.bar = None # 1分钟K线对象 self.onBar = onBar # 1分钟K线回调函数", "= onXsecBar # x秒的回调函数 self.xminBar = None # X分钟K线对象 self.xmin", "self.xhourBar.exchange = bar.exchange self.xhourBar.open = bar.open self.xhourBar.high = bar.high self.xhourBar.low", "#---------------------------------------------------------------------- def writeLog(self, content): if self.logger: self.logger.info(content) #---------------------------------------------------------------------- def writeError(self,", "bar.low self.dayBar.datetime = bar.datetime elif not self.dayBar: self.dayBar = BarData()", "self.xminBar.high = bar.high self.xminBar.low = bar.low self.xminBar.datetime = bar.datetime #", "super(BarData, self).__init__() self.vtSymbol = EMPTY_STRING # vt系统代码 self.symbol = EMPTY_STRING", "bar): # 一天走完 # 1. 夜盘 , 2.第二天9点 if self.lastDayBar", "bar.open self.dayBar.high = bar.high self.dayBar.low = bar.low self.dayBar.datetime = bar.datetime", "self.xsecBar.date = self.xsecBar.datetime.strftime('%Y%m%d') self.xsecBar.time = self.xsecBar.datetime.strftime('%H:%M:%S.%f') # 推送已经结束的上多少秒K线 self.onXsecBar(self.xsecBar) #", "self.xhourBar.low = min(self.xhourBar.low, bar.low) # 通用部分 self.xhourBar.close = bar.close self.xhourBar.openInterest", "= bar.vtSymbol self.xhourBar.symbol = bar.symbol self.xhourBar.exchange = bar.exchange self.xhourBar.open =", "onDayBar # 交易日K线的回调函数 self.lastDayBar = None #---------------------------------------------------------------------- def updateTick(self, tick):", "\"\"\"TICK更新\"\"\" newMinute = False # 默认不是新的一分钟 # 尚未创建对象 if not", "# 将秒和微秒设为0 self.dayBar.date = self.dayBar.datetime.strftime('%Y%m%d') self.dayBar.time = self.dayBar.datetime.strftime('%H:%M:%S') # 说明是新的一天了", "self.lastSecondTick = tick #---------------------------------------------------------------------- def updateBar(self, bar): \"\"\"1分钟K线更新\"\"\" # 尚未创建对象", "xsec # xsec的值 self.onXsecBar = onXsecBar # x秒的回调函数 self.xminBar =", ", timedelta import numpy as np ######################################################################################################################## # constants EXCHANGE_BITMEX", "def writeLog(self, content): if self.logger: self.logger.info(content) #---------------------------------------------------------------------- def writeError(self, content", "self.bidVolume1 = EMPTY_INT self.bidVolume2 = EMPTY_INT self.bidVolume3 = EMPTY_INT self.bidVolume4", "vt系统代码 self.symbol = EMPTY_STRING # 代码 self.exchange = EMPTY_STRING #" ]
[ "Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. \"\"\"Utilities for", "for writing project diagrams \"\"\" def __init__(self, config, styles): self.config", "def get_values(self, obj): \"\"\"get label and shape for classes. The", "% (label, line) for attr in attrs: label = r'%s\\n\\f08%s'", "# This program is distributed in the hope that it", "associations for rel in diagram.get_relationships('association'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, label=rel.name, **self.ass_edges) def", "DotBackend(basename, additionnal_param=layout) self.file_name = file_name def get_title(self, obj): \"\"\"get project", "if obj.shape == 'interface': shape = 'ellipse' else: shape =", "vcg graphs from a diagram definition and a project \"\"\"", "either version 2 of the License, or (at your option)", "label=label, shape='record') return dict(label=label, shape='record') def close_graph(self): \"\"\"print the dot", "methods: label = r'%s\\n\\f10%s()' % (label, func) return dict(label=label, shape=shape)", "<diadefs> \"\"\" for diagram in diadefs: basename = diagram.title.strip().replace(' ',", "writing project diagrams \"\"\" def __init__(self, config, styles): self.config =", "= r'%s|%s\\l|' % (label, r'\\l'.join(obj.attrs)) for func in obj.methods: label", "for func in obj.methods: label = r'%s%s()\\l' % (label, func.name)", "project diagrams \"\"\" def __init__(self, config, styles): self.config = config", "attributes and methods \"\"\" if is_exception(obj.node): label = r'\\fb\\f09%s\\fn' %", "Fifth Floor, Boston, MA 02110-1301, USA. \"\"\"Utilities for creating VCG", "in diagram.get_relationships('specialization'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.inh_edges) # implementation links for rel", "(maxlen + 2) label = r'%s\\n\\f%s' % (label, line) for", "printer\"\"\" raise NotImplementedError def get_title(self, obj): \"\"\"get project title\"\"\" raise", "a project \"\"\" def __init__(self, config): styles = [dict(arrowtail='none', arrowhead=\"open\"),", "= '{%s}' % label if is_exception(obj.node): return dict(fontcolor='red', label=label, shape='record')", "self.graph_file = open(file_name, 'w+') self.printer = VCGPrinter(self.graph_file) self.printer.open_graph(title=basename, layoutalgorithm='dfs', late_edge_labels='yes',", "methods + attrs) line = '_' * (maxlen + 2)", "self.printer.emit_edge = self.printer.edge def get_title(self, obj): \"\"\"get project title in", "'interface': shape = 'ellipse' else: shape = 'box' if not", "program; if not, write to the Free Software Foundation, Inc.,", "'{%s}' % label if is_exception(obj.node): return dict(fontcolor='red', label=label, shape='record') return", "write to the Free Software Foundation, Inc., # 51 Franklin", "self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.imp_edges) # generate associations for rel in diagram.get_relationships('association'):", "graph\"\"\" raise NotImplementedError class DotWriter(DiagramWriter): \"\"\"write dot graphs from a", "General Public License along with # this program; if not,", "the hope that it will be useful, but WITHOUT #", "box width for UML like diagram maxlen = max(len(name) for", "def close_graph(self): \"\"\"print the dot graph into <file_name>\"\"\" self.printer.generate(self.file_name) class", "DotBackend from pylint.pyreverse.utils import is_exception class DiagramWriter(object): \"\"\"base class for", "self.write_packages(diagram) self.close_graph() def write_packages(self, diagram): \"\"\"write a package diagram\"\"\" #", "= max(len(name) for name in [obj.title] + methods + attrs)", "project title\"\"\" raise NotImplementedError def get_values(self, obj): \"\"\"get label and", "(c) 2008-2013 LOGILAB S.A. (Paris, FRANCE). # http://www.logilab.fr/ -- mailto:<EMAIL>", "file_name, basename): \"\"\"initialize VCGWriter for a UML graph\"\"\" self.graph_file =", "backarrowsize=0), dict(arrowstyle='solid', backarrowstyle='none', backarrowsize=10), dict(arrowstyle='solid', backarrowstyle='none', linestyle='dotted', backarrowsize=10), dict(arrowstyle='solid', backarrowstyle='none',", "version. # # This program is distributed in the hope", "title\"\"\" return obj.title def get_values(self, obj): \"\"\"get label and shape", "'w+') self.printer = VCGPrinter(self.graph_file) self.printer.open_graph(title=basename, layoutalgorithm='dfs', late_edge_labels='yes', port_sharing='no', manhattan_edges='yes') self.printer.emit_node", "DotWriter(DiagramWriter): \"\"\"write dot graphs from a diagram definition and a", "late_edge_labels='yes', port_sharing='no', manhattan_edges='yes') self.printer.emit_node = self.printer.node self.printer.emit_edge = self.printer.edge def", "graphs from a diagram definition and a project \"\"\" def", "get_values(self, obj): \"\"\"get label and shape for classes. The label", "FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General", "def set_printer(self, file_name, basename): \"\"\"initialize DotWriter and add options for", "for rel in diagram.get_relationships('depends'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.pkg_edges) def write_classes(self, diagram):", "methods \"\"\" label = obj.title if obj.shape == 'interface': label", "is_exception(obj.node): return dict(fontcolor='red', label=label, shape='record') return dict(label=label, shape='record') def close_graph(self):", "r'\\fb%s\\fn' % obj.title def get_values(self, obj): \"\"\"get label and shape", "r'%s|%s\\l|' % (label, r'\\l'.join(obj.attrs)) for func in obj.methods: label =", "label if is_exception(obj.node): return dict(fontcolor='red', label=label, shape='record') return dict(label=label, shape='record')", "label and shape for classes. The label contains all attributes", "\"\"\" layout = dict(rankdir=\"BT\") self.printer = DotBackend(basename, additionnal_param=layout) self.file_name =", "class VCGWriter(DiagramWriter): \"\"\"write vcg graphs from a diagram definition and", "dot graphs from a diagram definition and a project \"\"\"", "obj.methods] # box width for UML like diagram maxlen =", "shape='record') def close_graph(self): \"\"\"print the dot graph into <file_name>\"\"\" self.printer.generate(self.file_name)", "or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU", "'_' * (maxlen + 2) label = r'%s\\n\\f%s' % (label,", "\"\"\"get project title\"\"\" raise NotImplementedError def get_values(self, obj): \"\"\"get label", "= 'ellipse' else: shape = 'box' if not self.config.only_classnames: attrs", "will be useful, but WITHOUT # ANY WARRANTY; without even", "of the GNU General Public License as published by the", "\"\"\" def __init__(self, config): styles = [dict(arrowtail='none', arrowhead=\"open\"), dict(arrowtail='none', arrowhead='empty'),", "contains all attributes and methods \"\"\" if is_exception(obj.node): label =", "USA. \"\"\"Utilities for creating VCG and Dot diagrams\"\"\" from logilab.common.vcgutils", "= styles self.printer = None # defined in set_printer def", "defined in set_printer def write(self, diadefs): \"\"\"write files for <project>", "or (at your option) any later # version. # #", "not, write to the Free Software Foundation, Inc., # 51", "General Public License for more details. # # You should", "free software; you can redistribute it and/or modify it under", "def get_title(self, obj): \"\"\"get project title\"\"\" return obj.title def get_values(self,", "i, obj in enumerate(sorted(diagram.modules(), key=lambda x: x.title)): self.printer.emit_node(i, label=self.get_title(obj), shape='box')", "# # This program is free software; you can redistribute", "the Free Software Foundation, Inc., # 51 Franklin Street, Fifth", "add options for layout. \"\"\" layout = dict(rankdir=\"BT\") self.printer =", "rel.to_object.fig_id, label=rel.name, **self.ass_edges) def set_printer(self, file_name, basename): \"\"\"set printer\"\"\" raise", "obj.attrs methods = [func.name for func in obj.methods] # box", "[dict(arrowstyle='solid', backarrowstyle='none', backarrowsize=0), dict(arrowstyle='solid', backarrowstyle='none', backarrowsize=10), dict(arrowstyle='solid', backarrowstyle='none', linestyle='dotted', backarrowsize=10),", "x: x.title)): self.printer.emit_node(i, label=self.get_title(obj), shape='box') obj.fig_id = i # package", "label if not self.config.only_classnames: label = r'%s|%s\\l|' % (label, r'\\l'.join(obj.attrs))", "and Dot diagrams\"\"\" from logilab.common.vcgutils import VCGPrinter from logilab.common.graph import", "file_name, basename): \"\"\"initialize DotWriter and add options for layout. \"\"\"", "% label if not self.config.only_classnames: label = r'%s|%s\\l|' % (label,", "it will be useful, but WITHOUT # ANY WARRANTY; without", "# generate associations for rel in diagram.get_relationships('association'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, label=rel.name,", "dict(label=label, shape=shape) def close_graph(self): \"\"\"close graph and file\"\"\" self.printer.close_graph() self.graph_file.close()", "links for rel in diagram.get_relationships('implements'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.imp_edges) # generate", "maxlen = max(len(name) for name in [obj.title] + methods +", "License along with # this program; if not, write to", "% label if is_exception(obj.node): return dict(fontcolor='red', label=label, shape='record') return dict(label=label,", "for classes. The label contains all attributes and methods \"\"\"", "r'%s\\n\\f%s' % (label, line) for attr in attrs: label =", "\"\"\"Utilities for creating VCG and Dot diagrams\"\"\" from logilab.common.vcgutils import", "diagram maxlen = max(len(name) for name in [obj.title] + methods", "self.printer = None # defined in set_printer def write(self, diadefs):", "results for i, obj in enumerate(sorted(diagram.modules(), key=lambda x: x.title)): self.printer.emit_node(i,", "graph\"\"\" self.graph_file = open(file_name, 'w+') self.printer = VCGPrinter(self.graph_file) self.printer.open_graph(title=basename, layoutalgorithm='dfs',", "and methods \"\"\" label = obj.title if obj.shape == 'interface':", "] DiagramWriter.__init__(self, config, styles) def set_printer(self, file_name, basename): \"\"\"initialize VCGWriter", "diagram.TYPE == 'class': self.write_classes(diagram) else: self.write_packages(diagram) self.close_graph() def write_packages(self, diagram):", "return dict(label=label, shape='record') def close_graph(self): \"\"\"print the dot graph into", "% (label, line) for func in methods: label = r'%s\\n\\f10%s()'", "\"\"\"print the dot graph into <file_name>\"\"\" self.printer.generate(self.file_name) class VCGWriter(DiagramWriter): \"\"\"write", "**self.pkg_edges) def write_classes(self, diagram): \"\"\"write a class diagram\"\"\" # sorted", "+ 2) label = r'%s\\n\\f%s' % (label, line) for attr", "% (label, func.name) label = '{%s}' % label if is_exception(obj.node):", "hope that it will be useful, but WITHOUT # ANY", "predictable (hence testable) results for i, obj in enumerate(sorted(diagram.objects, key=lambda", "and methods \"\"\" if is_exception(obj.node): label = r'\\fb\\f09%s\\fn' % obj.title", "styles = [dict(arrowstyle='solid', backarrowstyle='none', backarrowsize=0), dict(arrowstyle='solid', backarrowstyle='none', backarrowsize=10), dict(arrowstyle='solid', backarrowstyle='none',", "= '_' * (maxlen + 2) label = r'%s\\n\\f%s' %", "\"\"\"base class for writing project diagrams \"\"\" def __init__(self, config,", "# package dependencies for rel in diagram.get_relationships('depends'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.pkg_edges)", "label = r'%s|%s\\l|' % (label, r'\\l'.join(obj.attrs)) for func in obj.methods:", "obj): \"\"\"get project title\"\"\" raise NotImplementedError def get_values(self, obj): \"\"\"get", "(label, func.name) label = '{%s}' % label if is_exception(obj.node): return", "-- mailto:<EMAIL> # # This program is free software; you", "in diagram.get_relationships('depends'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.pkg_edges) def write_classes(self, diagram): \"\"\"write a", "diagram.get_relationships('depends'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.pkg_edges) def write_classes(self, diagram): \"\"\"write a class", "# the terms of the GNU General Public License as", "License for more details. # # You should have received", "[obj.title] + methods + attrs) line = '_' * (maxlen", "% obj.title else: label = r'\\fb%s\\fn' % obj.title if obj.shape", "Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston,", "= r'\\fb\\f09%s\\fn' % obj.title else: label = r'\\fb%s\\fn' % obj.title", "\"\"\"finalize the graph\"\"\" raise NotImplementedError class DotWriter(DiagramWriter): \"\"\"write dot graphs", "Dot diagrams\"\"\" from logilab.common.vcgutils import VCGPrinter from logilab.common.graph import DotBackend", "return obj.title def get_values(self, obj): \"\"\"get label and shape for", "diagrams\"\"\" from logilab.common.vcgutils import VCGPrinter from logilab.common.graph import DotBackend from", "your option) any later # version. # # This program", "obj.fig_id = i # inheritance links for rel in diagram.get_relationships('specialization'):", "styles self.printer = None # defined in set_printer def write(self,", "= VCGPrinter(self.graph_file) self.printer.open_graph(title=basename, layoutalgorithm='dfs', late_edge_labels='yes', port_sharing='no', manhattan_edges='yes') self.printer.emit_node = self.printer.node", "self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.inh_edges) # implementation links for rel in diagram.get_relationships('implements'):", "for rel in diagram.get_relationships('implements'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.imp_edges) # generate associations", "generate associations for rel in diagram.get_relationships('association'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, label=rel.name, **self.ass_edges)", "label and shape for classes.\"\"\" raise NotImplementedError def close_graph(self): \"\"\"finalize", "__init__(self, config, styles): self.config = config self.pkg_edges, self.inh_edges, self.imp_edges, self.ass_edges", "= r'%s\\n\\f%s' % (label, line) for func in methods: label", "terms of the GNU General Public License as published by", "def write(self, diadefs): \"\"\"write files for <project> according to <diadefs>", "details. # # You should have received a copy of", "backarrowstyle='none', linestyle='dotted', backarrowsize=10), dict(arrowstyle='solid', backarrowstyle='none', textcolor='green'), ] DiagramWriter.__init__(self, config, styles)", "additionnal_param=layout) self.file_name = file_name def get_title(self, obj): \"\"\"get project title\"\"\"", "Copyright (c) 2008-2013 LOGILAB S.A. (Paris, FRANCE). # http://www.logilab.fr/ --", "This program is free software; you can redistribute it and/or", "Public License as published by the Free Software # Foundation;", "obj.title if obj.shape == 'interface': label = u'«interface»\\\\n%s' % label", "shape for classes. The label contains all attributes and methods", "\"\"\"write files for <project> according to <diadefs> \"\"\" for diagram", "style='dashed'), dict(fontcolor='green', arrowtail='none', arrowhead='diamond', style='solid'), ] DiagramWriter.__init__(self, config, styles) def", "port_sharing='no', manhattan_edges='yes') self.printer.emit_node = self.printer.node self.printer.emit_edge = self.printer.edge def get_title(self,", "label contains all attributes and methods \"\"\" if is_exception(obj.node): label", "= None # defined in set_printer def write(self, diadefs): \"\"\"write", "set_printer(self, file_name, basename): \"\"\"set printer\"\"\" raise NotImplementedError def get_title(self, obj):", "label = u'«interface»\\\\n%s' % label if not self.config.only_classnames: label =", "def set_printer(self, file_name, basename): \"\"\"set printer\"\"\" raise NotImplementedError def get_title(self,", "inheritance links for rel in diagram.get_relationships('specialization'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.inh_edges) #", "dict(fontcolor='green', arrowtail='none', arrowhead='diamond', style='solid'), ] DiagramWriter.__init__(self, config, styles) def set_printer(self,", "# inheritance links for rel in diagram.get_relationships('specialization'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.inh_edges)", "even the implied warranty of MERCHANTABILITY or FITNESS # FOR", "= self.printer.edge def get_title(self, obj): \"\"\"get project title in vcg", "the GNU General Public License for more details. # #", "self.printer.emit_node = self.printer.node self.printer.emit_edge = self.printer.edge def get_title(self, obj): \"\"\"get", "License as published by the Free Software # Foundation; either", "= dict(rankdir=\"BT\") self.printer = DotBackend(basename, additionnal_param=layout) self.file_name = file_name def", "# this program; if not, write to the Free Software", "arrowhead='empty', style='dashed'), dict(fontcolor='green', arrowtail='none', arrowhead='diamond', style='solid'), ] DiagramWriter.__init__(self, config, styles)", "and add options for layout. \"\"\" layout = dict(rankdir=\"BT\") self.printer", "def set_printer(self, file_name, basename): \"\"\"initialize VCGWriter for a UML graph\"\"\"", "r'\\l'.join(obj.attrs)) for func in obj.methods: label = r'%s%s()\\l' % (label,", "a project \"\"\" def __init__(self, config): styles = [dict(arrowstyle='solid', backarrowstyle='none',", "rel.to_object.fig_id, **self.imp_edges) # generate associations for rel in diagram.get_relationships('association'): self.printer.emit_edge(rel.from_object.fig_id,", "rel in diagram.get_relationships('specialization'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.inh_edges) # implementation links for", "from a diagram definition and a project \"\"\" def __init__(self,", "**self.inh_edges) # implementation links for rel in diagram.get_relationships('implements'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id,", "manhattan_edges='yes') self.printer.emit_node = self.printer.node self.printer.emit_edge = self.printer.edge def get_title(self, obj):", "not self.config.only_classnames: label = r'%s|%s\\l|' % (label, r'\\l'.join(obj.attrs)) for func", "\"\"\"set printer\"\"\" raise NotImplementedError def get_title(self, obj): \"\"\"get project title\"\"\"", "ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or", "'%s.%s' % (basename, self.config.output_format) self.set_printer(file_name, basename) if diagram.TYPE == 'class':", "from logilab.common.graph import DotBackend from pylint.pyreverse.utils import is_exception class DiagramWriter(object):", "', '_') file_name = '%s.%s' % (basename, self.config.output_format) self.set_printer(file_name, basename)", "classes.\"\"\" raise NotImplementedError def close_graph(self): \"\"\"finalize the graph\"\"\" raise NotImplementedError", "received a copy of the GNU General Public License along", "def get_values(self, obj): \"\"\"get label and shape for classes.\"\"\" raise", "# Copyright (c) 2008-2013 LOGILAB S.A. (Paris, FRANCE). # http://www.logilab.fr/", "= r'%s%s()\\l' % (label, func.name) label = '{%s}' % label", "project \"\"\" def __init__(self, config): styles = [dict(arrowtail='none', arrowhead=\"open\"), dict(arrowtail='none',", "set_printer(self, file_name, basename): \"\"\"initialize DotWriter and add options for layout.", "get predictable (hence testable) results for i, obj in enumerate(sorted(diagram.objects,", "shape for classes.\"\"\" raise NotImplementedError def close_graph(self): \"\"\"finalize the graph\"\"\"", "class DotWriter(DiagramWriter): \"\"\"write dot graphs from a diagram definition and", "= diagram.title.strip().replace(' ', '_') file_name = '%s.%s' % (basename, self.config.output_format)", "= obj.title if obj.shape == 'interface': label = u'«interface»\\\\n%s' %", "methods \"\"\" if is_exception(obj.node): label = r'\\fb\\f09%s\\fn' % obj.title else:", "# -*- coding: utf-8 -*- # Copyright (c) 2008-2013 LOGILAB", "VCGPrinter from logilab.common.graph import DotBackend from pylint.pyreverse.utils import is_exception class", "r'\\fb%s\\fn' % obj.title if obj.shape == 'interface': shape = 'ellipse'", "write(self, diadefs): \"\"\"write files for <project> according to <diadefs> \"\"\"", "more details. # # You should have received a copy", "Street, Fifth Floor, Boston, MA 02110-1301, USA. \"\"\"Utilities for creating", "else: self.write_packages(diagram) self.close_graph() def write_packages(self, diagram): \"\"\"write a package diagram\"\"\"", "func in methods: label = r'%s\\n\\f10%s()' % (label, func) return", "diagram definition and a project \"\"\" def __init__(self, config): styles", "\"\"\" def __init__(self, config, styles): self.config = config self.pkg_edges, self.inh_edges,", "self.config.only_classnames: attrs = obj.attrs methods = [func.name for func in", "% (label, r'\\l'.join(obj.attrs)) for func in obj.methods: label = r'%s%s()\\l'", "NotImplementedError def get_values(self, obj): \"\"\"get label and shape for classes.\"\"\"", "basename): \"\"\"initialize VCGWriter for a UML graph\"\"\" self.graph_file = open(file_name,", "testable) results for i, obj in enumerate(sorted(diagram.objects, key=lambda x: x.title)):", "(hence testable) results for i, obj in enumerate(sorted(diagram.modules(), key=lambda x:", "= open(file_name, 'w+') self.printer = VCGPrinter(self.graph_file) self.printer.open_graph(title=basename, layoutalgorithm='dfs', late_edge_labels='yes', port_sharing='no',", "methods = [func.name for func in obj.methods] # box width", "if is_exception(obj.node): label = r'\\fb\\f09%s\\fn' % obj.title else: label =", "textcolor='green'), ] DiagramWriter.__init__(self, config, styles) def set_printer(self, file_name, basename): \"\"\"initialize", "config): styles = [dict(arrowstyle='solid', backarrowstyle='none', backarrowsize=0), dict(arrowstyle='solid', backarrowstyle='none', backarrowsize=10), dict(arrowstyle='solid',", "config): styles = [dict(arrowtail='none', arrowhead=\"open\"), dict(arrowtail='none', arrowhead='empty'), dict(arrowtail='node', arrowhead='empty', style='dashed'),", "# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.", "is free software; you can redistribute it and/or modify it", "r'%s\\n\\f10%s()' % (label, func) return dict(label=label, shape=shape) def close_graph(self): \"\"\"close", "raise NotImplementedError def close_graph(self): \"\"\"finalize the graph\"\"\" raise NotImplementedError class", "raise NotImplementedError class DotWriter(DiagramWriter): \"\"\"write dot graphs from a diagram", "label = r'\\fb%s\\fn' % obj.title if obj.shape == 'interface': shape", "= u'«interface»\\\\n%s' % label if not self.config.only_classnames: label = r'%s|%s\\l|'", "Software # Foundation; either version 2 of the License, or", "basename): \"\"\"initialize DotWriter and add options for layout. \"\"\" layout", "in enumerate(sorted(diagram.objects, key=lambda x: x.title)): self.printer.emit_node(i, **self.get_values(obj)) obj.fig_id = i", "for func in methods: label = r'%s\\n\\f10%s()' % (label, func)", "label = r'%s\\n\\f08%s' % (label, attr) if attrs: label =", "# # This program is distributed in the hope that", "a copy of the GNU General Public License along with", "set_printer(self, file_name, basename): \"\"\"initialize VCGWriter for a UML graph\"\"\" self.graph_file", "shape='record') return dict(label=label, shape='record') def close_graph(self): \"\"\"print the dot graph", "mailto:<EMAIL> # # This program is free software; you can", "Free Software # Foundation; either version 2 of the License,", "DiagramWriter.__init__(self, config, styles) def set_printer(self, file_name, basename): \"\"\"initialize VCGWriter for", "label = r'\\fb\\f09%s\\fn' % obj.title else: label = r'\\fb%s\\fn' %", "= r'%s\\n\\f10%s()' % (label, func) return dict(label=label, shape=shape) def close_graph(self):", "redistribute it and/or modify it under # the terms of", "warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE.", "basename): \"\"\"set printer\"\"\" raise NotImplementedError def get_title(self, obj): \"\"\"get project", "(label, line) for attr in attrs: label = r'%s\\n\\f08%s' %", "self.imp_edges, self.ass_edges = styles self.printer = None # defined in", "PARTICULAR PURPOSE. See the GNU General Public License for more", "of the GNU General Public License along with # this", "This program is distributed in the hope that it will", "MA 02110-1301, USA. \"\"\"Utilities for creating VCG and Dot diagrams\"\"\"", "dict(arrowtail='none', arrowhead='empty'), dict(arrowtail='node', arrowhead='empty', style='dashed'), dict(fontcolor='green', arrowtail='none', arrowhead='diamond', style='solid'), ]", "== 'interface': shape = 'ellipse' else: shape = 'box' if", "'interface': label = u'«interface»\\\\n%s' % label if not self.config.only_classnames: label", "for i, obj in enumerate(sorted(diagram.modules(), key=lambda x: x.title)): self.printer.emit_node(i, label=self.get_title(obj),", "contains all attributes and methods \"\"\" label = obj.title if", "self.config = config self.pkg_edges, self.inh_edges, self.imp_edges, self.ass_edges = styles self.printer", "self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, label=rel.name, **self.ass_edges) def set_printer(self, file_name, basename): \"\"\"set printer\"\"\"", "# sorted to get predictable (hence testable) results for i,", "get_title(self, obj): \"\"\"get project title\"\"\" raise NotImplementedError def get_values(self, obj):", "func) return dict(label=label, shape=shape) def close_graph(self): \"\"\"close graph and file\"\"\"", "creating VCG and Dot diagrams\"\"\" from logilab.common.vcgutils import VCGPrinter from", "the GNU General Public License as published by the Free", "dict(arrowstyle='solid', backarrowstyle='none', linestyle='dotted', backarrowsize=10), dict(arrowstyle='solid', backarrowstyle='none', textcolor='green'), ] DiagramWriter.__init__(self, config,", "sorted to get predictable (hence testable) results for i, obj", "backarrowsize=10), dict(arrowstyle='solid', backarrowstyle='none', linestyle='dotted', backarrowsize=10), dict(arrowstyle='solid', backarrowstyle='none', textcolor='green'), ] DiagramWriter.__init__(self,", "the GNU General Public License along with # this program;", "**self.get_values(obj)) obj.fig_id = i # inheritance links for rel in", "GNU General Public License as published by the Free Software", "copy of the GNU General Public License along with #", "styles = [dict(arrowtail='none', arrowhead=\"open\"), dict(arrowtail='none', arrowhead='empty'), dict(arrowtail='node', arrowhead='empty', style='dashed'), dict(fontcolor='green',", "close_graph(self): \"\"\"finalize the graph\"\"\" raise NotImplementedError class DotWriter(DiagramWriter): \"\"\"write dot", "predictable (hence testable) results for i, obj in enumerate(sorted(diagram.modules(), key=lambda", "\"\"\"write dot graphs from a diagram definition and a project", "obj.methods: label = r'%s%s()\\l' % (label, func.name) label = '{%s}'", "in obj.methods: label = r'%s%s()\\l' % (label, func.name) label =", "(label, attr) if attrs: label = r'%s\\n\\f%s' % (label, line)", "file_name def get_title(self, obj): \"\"\"get project title\"\"\" return obj.title def", "and/or modify it under # the terms of the GNU", "\"\"\"get label and shape for classes. The label contains all", "FRANCE). # http://www.logilab.fr/ -- mailto:<EMAIL> # # This program is", "the License, or (at your option) any later # version.", "\"\"\" def __init__(self, config): styles = [dict(arrowstyle='solid', backarrowstyle='none', backarrowsize=0), dict(arrowstyle='solid',", "\"\"\"initialize VCGWriter for a UML graph\"\"\" self.graph_file = open(file_name, 'w+')", "program is distributed in the hope that it will be", "of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See", "shape = 'ellipse' else: shape = 'box' if not self.config.only_classnames:", "else: label = r'\\fb%s\\fn' % obj.title if obj.shape == 'interface':", "diagram): \"\"\"write a class diagram\"\"\" # sorted to get predictable", "label = r'%s%s()\\l' % (label, func.name) label = '{%s}' %", "classes. The label contains all attributes and methods \"\"\" if", "logilab.common.graph import DotBackend from pylint.pyreverse.utils import is_exception class DiagramWriter(object): \"\"\"base", "for rel in diagram.get_relationships('specialization'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.inh_edges) # implementation links", "self.printer.generate(self.file_name) class VCGWriter(DiagramWriter): \"\"\"write vcg graphs from a diagram definition", "# defined in set_printer def write(self, diadefs): \"\"\"write files for", "enumerate(sorted(diagram.objects, key=lambda x: x.title)): self.printer.emit_node(i, **self.get_values(obj)) obj.fig_id = i #", "file_name, basename): \"\"\"set printer\"\"\" raise NotImplementedError def get_title(self, obj): \"\"\"get", "obj): \"\"\"get label and shape for classes.\"\"\" raise NotImplementedError def", "\"\"\"get project title\"\"\" return obj.title def get_values(self, obj): \"\"\"get label", "title\"\"\" raise NotImplementedError def get_values(self, obj): \"\"\"get label and shape", "PURPOSE. See the GNU General Public License for more details.", "def write_packages(self, diagram): \"\"\"write a package diagram\"\"\" # sorted to", "def __init__(self, config): styles = [dict(arrowstyle='solid', backarrowstyle='none', backarrowsize=0), dict(arrowstyle='solid', backarrowstyle='none',", "config, styles) def set_printer(self, file_name, basename): \"\"\"initialize VCGWriter for a", "get_title(self, obj): \"\"\"get project title in vcg format\"\"\" return r'\\fb%s\\fn'", "None # defined in set_printer def write(self, diadefs): \"\"\"write files", "package diagram\"\"\" # sorted to get predictable (hence testable) results", "for <project> according to <diadefs> \"\"\" for diagram in diadefs:", "file_name = '%s.%s' % (basename, self.config.output_format) self.set_printer(file_name, basename) if diagram.TYPE", "Boston, MA 02110-1301, USA. \"\"\"Utilities for creating VCG and Dot", "obj): \"\"\"get project title\"\"\" return obj.title def get_values(self, obj): \"\"\"get", "a diagram definition and a project \"\"\" def __init__(self, config):", "coding: utf-8 -*- # Copyright (c) 2008-2013 LOGILAB S.A. (Paris,", "to get predictable (hence testable) results for i, obj in", "results for i, obj in enumerate(sorted(diagram.objects, key=lambda x: x.title)): self.printer.emit_node(i,", "51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. \"\"\"Utilities", "diadefs): \"\"\"write files for <project> according to <diadefs> \"\"\" for", "diagrams \"\"\" def __init__(self, config, styles): self.config = config self.pkg_edges,", "program is free software; you can redistribute it and/or modify", "self.printer.node self.printer.emit_edge = self.printer.edge def get_title(self, obj): \"\"\"get project title", "shape = 'box' if not self.config.only_classnames: attrs = obj.attrs methods", "attr) if attrs: label = r'%s\\n\\f%s' % (label, line) for", "LOGILAB S.A. (Paris, FRANCE). # http://www.logilab.fr/ -- mailto:<EMAIL> # #", "VCGWriter(DiagramWriter): \"\"\"write vcg graphs from a diagram definition and a", "for more details. # # You should have received a", "Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,", "Public License along with # this program; if not, write", "import is_exception class DiagramWriter(object): \"\"\"base class for writing project diagrams", "in enumerate(sorted(diagram.modules(), key=lambda x: x.title)): self.printer.emit_node(i, label=self.get_title(obj), shape='box') obj.fig_id =", "arrowhead='diamond', style='solid'), ] DiagramWriter.__init__(self, config, styles) def set_printer(self, file_name, basename):", "= self.printer.node self.printer.emit_edge = self.printer.edge def get_title(self, obj): \"\"\"get project", "package dependencies for rel in diagram.get_relationships('depends'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.pkg_edges) def", "raise NotImplementedError def get_values(self, obj): \"\"\"get label and shape for", "S.A. (Paris, FRANCE). # http://www.logilab.fr/ -- mailto:<EMAIL> # # This", "any later # version. # # This program is distributed", "NotImplementedError class DotWriter(DiagramWriter): \"\"\"write dot graphs from a diagram definition", "vcg format\"\"\" return r'\\fb%s\\fn' % obj.title def get_values(self, obj): \"\"\"get", "layout = dict(rankdir=\"BT\") self.printer = DotBackend(basename, additionnal_param=layout) self.file_name = file_name", "linestyle='dotted', backarrowsize=10), dict(arrowstyle='solid', backarrowstyle='none', textcolor='green'), ] DiagramWriter.__init__(self, config, styles) def", "software; you can redistribute it and/or modify it under #", "can redistribute it and/or modify it under # the terms", "format\"\"\" return r'\\fb%s\\fn' % obj.title def get_values(self, obj): \"\"\"get label", "that it will be useful, but WITHOUT # ANY WARRANTY;", "diagram.get_relationships('implements'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.imp_edges) # generate associations for rel in", "* (maxlen + 2) label = r'%s\\n\\f%s' % (label, line)", "http://www.logilab.fr/ -- mailto:<EMAIL> # # This program is free software;", "[func.name for func in obj.methods] # box width for UML", "self.printer = DotBackend(basename, additionnal_param=layout) self.file_name = file_name def get_title(self, obj):", "diagram.get_relationships('association'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, label=rel.name, **self.ass_edges) def set_printer(self, file_name, basename): \"\"\"set", "'ellipse' else: shape = 'box' if not self.config.only_classnames: attrs =", "\"\"\"get label and shape for classes.\"\"\" raise NotImplementedError def close_graph(self):", "arrowhead='empty'), dict(arrowtail='node', arrowhead='empty', style='dashed'), dict(fontcolor='green', arrowtail='none', arrowhead='diamond', style='solid'), ] DiagramWriter.__init__(self,", "in attrs: label = r'%s\\n\\f08%s' % (label, attr) if attrs:", "self.close_graph() def write_packages(self, diagram): \"\"\"write a package diagram\"\"\" # sorted", "rel in diagram.get_relationships('depends'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.pkg_edges) def write_classes(self, diagram): \"\"\"write", "title in vcg format\"\"\" return r'\\fb%s\\fn' % obj.title def get_values(self,", "self.set_printer(file_name, basename) if diagram.TYPE == 'class': self.write_classes(diagram) else: self.write_packages(diagram) self.close_graph()", "r'\\fb\\f09%s\\fn' % obj.title else: label = r'\\fb%s\\fn' % obj.title if", "all attributes and methods \"\"\" label = obj.title if obj.shape", "and a project \"\"\" def __init__(self, config): styles = [dict(arrowstyle='solid',", "project \"\"\" def __init__(self, config): styles = [dict(arrowstyle='solid', backarrowstyle='none', backarrowsize=0),", "Floor, Boston, MA 02110-1301, USA. \"\"\"Utilities for creating VCG and", "self.printer.edge def get_title(self, obj): \"\"\"get project title in vcg format\"\"\"", "VCGWriter for a UML graph\"\"\" self.graph_file = open(file_name, 'w+') self.printer", "layout. \"\"\" layout = dict(rankdir=\"BT\") self.printer = DotBackend(basename, additionnal_param=layout) self.file_name", "line) for func in methods: label = r'%s\\n\\f10%s()' % (label,", "WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS", "is_exception class DiagramWriter(object): \"\"\"base class for writing project diagrams \"\"\"", "by the Free Software # Foundation; either version 2 of", "if attrs: label = r'%s\\n\\f%s' % (label, line) for func", "NotImplementedError def get_title(self, obj): \"\"\"get project title\"\"\" raise NotImplementedError def", "for classes.\"\"\" raise NotImplementedError def close_graph(self): \"\"\"finalize the graph\"\"\" raise", "project title\"\"\" return obj.title def get_values(self, obj): \"\"\"get label and", "UML graph\"\"\" self.graph_file = open(file_name, 'w+') self.printer = VCGPrinter(self.graph_file) self.printer.open_graph(title=basename,", "basename) if diagram.TYPE == 'class': self.write_classes(diagram) else: self.write_packages(diagram) self.close_graph() def", "= r'\\fb%s\\fn' % obj.title if obj.shape == 'interface': shape =", "UML like diagram maxlen = max(len(name) for name in [obj.title]", "line = '_' * (maxlen + 2) label = r'%s\\n\\f%s'", "self.inh_edges, self.imp_edges, self.ass_edges = styles self.printer = None # defined", "to <diadefs> \"\"\" for diagram in diadefs: basename = diagram.title.strip().replace('", "\"\"\"write a package diagram\"\"\" # sorted to get predictable (hence", "x.title)): self.printer.emit_node(i, label=self.get_title(obj), shape='box') obj.fig_id = i # package dependencies", "backarrowstyle='none', backarrowsize=10), dict(arrowstyle='solid', backarrowstyle='none', linestyle='dotted', backarrowsize=10), dict(arrowstyle='solid', backarrowstyle='none', textcolor='green'), ]", "self.printer.emit_node(i, label=self.get_title(obj), shape='box') obj.fig_id = i # package dependencies for", "in [obj.title] + methods + attrs) line = '_' *", "GNU General Public License along with # this program; if", "open(file_name, 'w+') self.printer = VCGPrinter(self.graph_file) self.printer.open_graph(title=basename, layoutalgorithm='dfs', late_edge_labels='yes', port_sharing='no', manhattan_edges='yes')", "obj.title else: label = r'\\fb%s\\fn' % obj.title if obj.shape ==", "'box' if not self.config.only_classnames: attrs = obj.attrs methods = [func.name", "class for writing project diagrams \"\"\" def __init__(self, config, styles):", "diagram.title.strip().replace(' ', '_') file_name = '%s.%s' % (basename, self.config.output_format) self.set_printer(file_name,", "MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the", "diagram\"\"\" # sorted to get predictable (hence testable) results for", "__init__(self, config): styles = [dict(arrowstyle='solid', backarrowstyle='none', backarrowsize=0), dict(arrowstyle='solid', backarrowstyle='none', backarrowsize=10),", "get predictable (hence testable) results for i, obj in enumerate(sorted(diagram.modules(),", "(at your option) any later # version. # # This", "# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY", "if is_exception(obj.node): return dict(fontcolor='red', label=label, shape='record') return dict(label=label, shape='record') def", "a UML graph\"\"\" self.graph_file = open(file_name, 'w+') self.printer = VCGPrinter(self.graph_file)", "self.write_classes(diagram) else: self.write_packages(diagram) self.close_graph() def write_packages(self, diagram): \"\"\"write a package", "u'«interface»\\\\n%s' % label if not self.config.only_classnames: label = r'%s|%s\\l|' %", "styles): self.config = config self.pkg_edges, self.inh_edges, self.imp_edges, self.ass_edges = styles", "enumerate(sorted(diagram.modules(), key=lambda x: x.title)): self.printer.emit_node(i, label=self.get_title(obj), shape='box') obj.fig_id = i", "links for rel in diagram.get_relationships('specialization'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.inh_edges) # implementation", "raise NotImplementedError def get_title(self, obj): \"\"\"get project title\"\"\" raise NotImplementedError", "= i # inheritance links for rel in diagram.get_relationships('specialization'): self.printer.emit_edge(rel.from_object.fig_id,", "project title in vcg format\"\"\" return r'\\fb%s\\fn' % obj.title def", "The label contains all attributes and methods \"\"\" label =", "close_graph(self): \"\"\"print the dot graph into <file_name>\"\"\" self.printer.generate(self.file_name) class VCGWriter(DiagramWriter):", "02110-1301, USA. \"\"\"Utilities for creating VCG and Dot diagrams\"\"\" from", "r'%s\\n\\f%s' % (label, line) for func in methods: label =", "== 'class': self.write_classes(diagram) else: self.write_packages(diagram) self.close_graph() def write_packages(self, diagram): \"\"\"write", "NotImplementedError def close_graph(self): \"\"\"finalize the graph\"\"\" raise NotImplementedError class DotWriter(DiagramWriter):", "You should have received a copy of the GNU General", "Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor,", "config, styles): self.config = config self.pkg_edges, self.inh_edges, self.imp_edges, self.ass_edges =", "under # the terms of the GNU General Public License", "'class': self.write_classes(diagram) else: self.write_packages(diagram) self.close_graph() def write_packages(self, diagram): \"\"\"write a", "class diagram\"\"\" # sorted to get predictable (hence testable) results", "obj in enumerate(sorted(diagram.objects, key=lambda x: x.title)): self.printer.emit_node(i, **self.get_values(obj)) obj.fig_id =", "(label, r'\\l'.join(obj.attrs)) for func in obj.methods: label = r'%s%s()\\l' %", "License, or (at your option) any later # version. #", "self.pkg_edges, self.inh_edges, self.imp_edges, self.ass_edges = styles self.printer = None #", "implementation links for rel in diagram.get_relationships('implements'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.imp_edges) #", "and shape for classes. The label contains all attributes and", "max(len(name) for name in [obj.title] + methods + attrs) line", "in set_printer def write(self, diadefs): \"\"\"write files for <project> according", "obj): \"\"\"get label and shape for classes. The label contains", "self.config.only_classnames: label = r'%s|%s\\l|' % (label, r'\\l'.join(obj.attrs)) for func in", "if not, write to the Free Software Foundation, Inc., #", "func in obj.methods: label = r'%s%s()\\l' % (label, func.name) label", "label = '{%s}' % label if is_exception(obj.node): return dict(fontcolor='red', label=label,", "return dict(label=label, shape=shape) def close_graph(self): \"\"\"close graph and file\"\"\" self.printer.close_graph()", "= obj.attrs methods = [func.name for func in obj.methods] #", "(Paris, FRANCE). # http://www.logilab.fr/ -- mailto:<EMAIL> # # This program", "func in obj.methods] # box width for UML like diagram", "it under # the terms of the GNU General Public", "dict(arrowtail='node', arrowhead='empty', style='dashed'), dict(fontcolor='green', arrowtail='none', arrowhead='diamond', style='solid'), ] DiagramWriter.__init__(self, config,", "it and/or modify it under # the terms of the", "dict(fontcolor='red', label=label, shape='record') return dict(label=label, shape='record') def close_graph(self): \"\"\"print the", "dict(label=label, shape='record') def close_graph(self): \"\"\"print the dot graph into <file_name>\"\"\"", "self.printer.open_graph(title=basename, layoutalgorithm='dfs', late_edge_labels='yes', port_sharing='no', manhattan_edges='yes') self.printer.emit_node = self.printer.node self.printer.emit_edge =", "% (basename, self.config.output_format) self.set_printer(file_name, basename) if diagram.TYPE == 'class': self.write_classes(diagram)", "else: shape = 'box' if not self.config.only_classnames: attrs = obj.attrs", "# You should have received a copy of the GNU", "(label, func) return dict(label=label, shape=shape) def close_graph(self): \"\"\"close graph and", "label=self.get_title(obj), shape='box') obj.fig_id = i # package dependencies for rel", "r'%s\\n\\f08%s' % (label, attr) if attrs: label = r'%s\\n\\f%s' %", "label = r'%s\\n\\f10%s()' % (label, func) return dict(label=label, shape=shape) def", "+ methods + attrs) line = '_' * (maxlen +", "# Foundation; either version 2 of the License, or (at", "config, styles) def set_printer(self, file_name, basename): \"\"\"initialize DotWriter and add", "self.printer = VCGPrinter(self.graph_file) self.printer.open_graph(title=basename, layoutalgorithm='dfs', late_edge_labels='yes', port_sharing='no', manhattan_edges='yes') self.printer.emit_node =", "WITHOUT # ANY WARRANTY; without even the implied warranty of", "implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR", "classes. The label contains all attributes and methods \"\"\" label", "label = r'%s\\n\\f%s' % (label, line) for attr in attrs:", "obj.fig_id = i # package dependencies for rel in diagram.get_relationships('depends'):", "along with # this program; if not, write to the", "\"\"\"get project title in vcg format\"\"\" return r'\\fb%s\\fn' % obj.title", "+ attrs) line = '_' * (maxlen + 2) label", "return r'\\fb%s\\fn' % obj.title def get_values(self, obj): \"\"\"get label and", "should have received a copy of the GNU General Public", "label = r'%s\\n\\f%s' % (label, line) for func in methods:", "DiagramWriter.__init__(self, config, styles) def set_printer(self, file_name, basename): \"\"\"initialize DotWriter and", "attrs: label = r'%s\\n\\f08%s' % (label, attr) if attrs: label", "2 of the License, or (at your option) any later", "x.title)): self.printer.emit_node(i, **self.get_values(obj)) obj.fig_id = i # inheritance links for", "files for <project> according to <diadefs> \"\"\" for diagram in", "dot graph into <file_name>\"\"\" self.printer.generate(self.file_name) class VCGWriter(DiagramWriter): \"\"\"write vcg graphs", "all attributes and methods \"\"\" if is_exception(obj.node): label = r'\\fb\\f09%s\\fn'", "in diadefs: basename = diagram.title.strip().replace(' ', '_') file_name = '%s.%s'", "def get_title(self, obj): \"\"\"get project title\"\"\" raise NotImplementedError def get_values(self,", "attrs) line = '_' * (maxlen + 2) label =", "FOR A PARTICULAR PURPOSE. See the GNU General Public License", "not self.config.only_classnames: attrs = obj.attrs methods = [func.name for func", "styles) def set_printer(self, file_name, basename): \"\"\"initialize DotWriter and add options", "\"\"\" label = obj.title if obj.shape == 'interface': label =", "def __init__(self, config, styles): self.config = config self.pkg_edges, self.inh_edges, self.imp_edges,", "option) any later # version. # # This program is", "[dict(arrowtail='none', arrowhead=\"open\"), dict(arrowtail='none', arrowhead='empty'), dict(arrowtail='node', arrowhead='empty', style='dashed'), dict(fontcolor='green', arrowtail='none', arrowhead='diamond',", "for a UML graph\"\"\" self.graph_file = open(file_name, 'w+') self.printer =", "but WITHOUT # ANY WARRANTY; without even the implied warranty", "# FOR A PARTICULAR PURPOSE. See the GNU General Public", "= [func.name for func in obj.methods] # box width for", "without even the implied warranty of MERCHANTABILITY or FITNESS #", "arrowhead=\"open\"), dict(arrowtail='none', arrowhead='empty'), dict(arrowtail='node', arrowhead='empty', style='dashed'), dict(fontcolor='green', arrowtail='none', arrowhead='diamond', style='solid'),", "backarrowstyle='none', textcolor='green'), ] DiagramWriter.__init__(self, config, styles) def set_printer(self, file_name, basename):", "obj in enumerate(sorted(diagram.modules(), key=lambda x: x.title)): self.printer.emit_node(i, label=self.get_title(obj), shape='box') obj.fig_id", "\"\"\"write vcg graphs from a diagram definition and a project", "with # this program; if not, write to the Free", "class DiagramWriter(object): \"\"\"base class for writing project diagrams \"\"\" def", "attrs = obj.attrs methods = [func.name for func in obj.methods]", "in the hope that it will be useful, but WITHOUT", "x: x.title)): self.printer.emit_node(i, **self.get_values(obj)) obj.fig_id = i # inheritance links", "dependencies for rel in diagram.get_relationships('depends'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.pkg_edges) def write_classes(self,", "definition and a project \"\"\" def __init__(self, config): styles =", "line) for attr in attrs: label = r'%s\\n\\f08%s' % (label,", "obj): \"\"\"get project title in vcg format\"\"\" return r'\\fb%s\\fn' %", "arrowtail='none', arrowhead='diamond', style='solid'), ] DiagramWriter.__init__(self, config, styles) def set_printer(self, file_name,", "The label contains all attributes and methods \"\"\" if is_exception(obj.node):", "for name in [obj.title] + methods + attrs) line =", "% (label, attr) if attrs: label = r'%s\\n\\f%s' % (label,", "2008-2013 LOGILAB S.A. (Paris, FRANCE). # http://www.logilab.fr/ -- mailto:<EMAIL> #", "of the License, or (at your option) any later #", "get_title(self, obj): \"\"\"get project title\"\"\" return obj.title def get_values(self, obj):", "backarrowstyle='none', backarrowsize=0), dict(arrowstyle='solid', backarrowstyle='none', backarrowsize=10), dict(arrowstyle='solid', backarrowstyle='none', linestyle='dotted', backarrowsize=10), dict(arrowstyle='solid',", "if obj.shape == 'interface': label = u'«interface»\\\\n%s' % label if", "= file_name def get_title(self, obj): \"\"\"get project title\"\"\" return obj.title", "return dict(fontcolor='red', label=label, shape='record') return dict(label=label, shape='record') def close_graph(self): \"\"\"print", "= r'%s\\n\\f08%s' % (label, attr) if attrs: label = r'%s\\n\\f%s'", "i # inheritance links for rel in diagram.get_relationships('specialization'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id,", "diadefs: basename = diagram.title.strip().replace(' ', '_') file_name = '%s.%s' %", "key=lambda x: x.title)): self.printer.emit_node(i, **self.get_values(obj)) obj.fig_id = i # inheritance", "See the GNU General Public License for more details. #", "in methods: label = r'%s\\n\\f10%s()' % (label, func) return dict(label=label,", "# implementation links for rel in diagram.get_relationships('implements'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.imp_edges)", "diagram): \"\"\"write a package diagram\"\"\" # sorted to get predictable", "backarrowsize=10), dict(arrowstyle='solid', backarrowstyle='none', textcolor='green'), ] DiagramWriter.__init__(self, config, styles) def set_printer(self,", "distributed in the hope that it will be useful, but", "dict(arrowstyle='solid', backarrowstyle='none', backarrowsize=10), dict(arrowstyle='solid', backarrowstyle='none', linestyle='dotted', backarrowsize=10), dict(arrowstyle='solid', backarrowstyle='none', textcolor='green'),", "in diagram.get_relationships('implements'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.imp_edges) # generate associations for rel", "# This program is free software; you can redistribute it", "= i # package dependencies for rel in diagram.get_relationships('depends'): self.printer.emit_edge(rel.from_object.fig_id,", "attrs: label = r'%s\\n\\f%s' % (label, line) for func in", "as published by the Free Software # Foundation; either version", "be useful, but WITHOUT # ANY WARRANTY; without even the", "a package diagram\"\"\" # sorted to get predictable (hence testable)", "] DiagramWriter.__init__(self, config, styles) def set_printer(self, file_name, basename): \"\"\"initialize DotWriter", "Public License for more details. # # You should have", "pylint.pyreverse.utils import is_exception class DiagramWriter(object): \"\"\"base class for writing project", "def close_graph(self): \"\"\"finalize the graph\"\"\" raise NotImplementedError class DotWriter(DiagramWriter): \"\"\"write", "config self.pkg_edges, self.inh_edges, self.imp_edges, self.ass_edges = styles self.printer = None", "2) label = r'%s\\n\\f%s' % (label, line) for attr in", "\"\"\"write a class diagram\"\"\" # sorted to get predictable (hence", "the terms of the GNU General Public License as published", "rel in diagram.get_relationships('association'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, label=rel.name, **self.ass_edges) def set_printer(self, file_name,", "modify it under # the terms of the GNU General", "self.printer.emit_node(i, **self.get_values(obj)) obj.fig_id = i # inheritance links for rel", "graph into <file_name>\"\"\" self.printer.generate(self.file_name) class VCGWriter(DiagramWriter): \"\"\"write vcg graphs from", "r'%s%s()\\l' % (label, func.name) label = '{%s}' % label if", "**self.ass_edges) def set_printer(self, file_name, basename): \"\"\"set printer\"\"\" raise NotImplementedError def", "i, obj in enumerate(sorted(diagram.objects, key=lambda x: x.title)): self.printer.emit_node(i, **self.get_values(obj)) obj.fig_id", "in vcg format\"\"\" return r'\\fb%s\\fn' % obj.title def get_values(self, obj):", "to the Free Software Foundation, Inc., # 51 Franklin Street,", "diagram in diadefs: basename = diagram.title.strip().replace(' ', '_') file_name =", "(basename, self.config.output_format) self.set_printer(file_name, basename) if diagram.TYPE == 'class': self.write_classes(diagram) else:", "for diagram in diadefs: basename = diagram.title.strip().replace(' ', '_') file_name", "for func in obj.methods] # box width for UML like", "like diagram maxlen = max(len(name) for name in [obj.title] +", "= '%s.%s' % (basename, self.config.output_format) self.set_printer(file_name, basename) if diagram.TYPE ==", "self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.pkg_edges) def write_classes(self, diagram): \"\"\"write a class diagram\"\"\"", "# box width for UML like diagram maxlen = max(len(name)", "version 2 of the License, or (at your option) any", "the implied warranty of MERCHANTABILITY or FITNESS # FOR A", "i # package dependencies for rel in diagram.get_relationships('depends'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id,", "# http://www.logilab.fr/ -- mailto:<EMAIL> # # This program is free", "label=rel.name, **self.ass_edges) def set_printer(self, file_name, basename): \"\"\"set printer\"\"\" raise NotImplementedError", "A PARTICULAR PURPOSE. See the GNU General Public License for", "__init__(self, config): styles = [dict(arrowtail='none', arrowhead=\"open\"), dict(arrowtail='none', arrowhead='empty'), dict(arrowtail='node', arrowhead='empty',", "for layout. \"\"\" layout = dict(rankdir=\"BT\") self.printer = DotBackend(basename, additionnal_param=layout)", "attributes and methods \"\"\" label = obj.title if obj.shape ==", "write_classes(self, diagram): \"\"\"write a class diagram\"\"\" # sorted to get", "rel.to_object.fig_id, **self.pkg_edges) def write_classes(self, diagram): \"\"\"write a class diagram\"\"\" #", "\"\"\" if is_exception(obj.node): label = r'\\fb\\f09%s\\fn' % obj.title else: label", "<project> according to <diadefs> \"\"\" for diagram in diadefs: basename", "# version. # # This program is distributed in the", "import VCGPrinter from logilab.common.graph import DotBackend from pylint.pyreverse.utils import is_exception", "obj.shape == 'interface': shape = 'ellipse' else: shape = 'box'", "and shape for classes.\"\"\" raise NotImplementedError def close_graph(self): \"\"\"finalize the", "self.file_name = file_name def get_title(self, obj): \"\"\"get project title\"\"\" return", "rel in diagram.get_relationships('implements'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.imp_edges) # generate associations for", "get_values(self, obj): \"\"\"get label and shape for classes.\"\"\" raise NotImplementedError", "dict(rankdir=\"BT\") self.printer = DotBackend(basename, additionnal_param=layout) self.file_name = file_name def get_title(self,", "obj.shape == 'interface': label = u'«interface»\\\\n%s' % label if not", "if not self.config.only_classnames: attrs = obj.attrs methods = [func.name for", "and a project \"\"\" def __init__(self, config): styles = [dict(arrowtail='none',", "= DotBackend(basename, additionnal_param=layout) self.file_name = file_name def get_title(self, obj): \"\"\"get", "you can redistribute it and/or modify it under # the", "a class diagram\"\"\" # sorted to get predictable (hence testable)", "= [dict(arrowstyle='solid', backarrowstyle='none', backarrowsize=0), dict(arrowstyle='solid', backarrowstyle='none', backarrowsize=10), dict(arrowstyle='solid', backarrowstyle='none', linestyle='dotted',", "styles) def set_printer(self, file_name, basename): \"\"\"initialize VCGWriter for a UML", "basename = diagram.title.strip().replace(' ', '_') file_name = '%s.%s' % (basename,", "from logilab.common.vcgutils import VCGPrinter from logilab.common.graph import DotBackend from pylint.pyreverse.utils", "options for layout. \"\"\" layout = dict(rankdir=\"BT\") self.printer = DotBackend(basename,", "self.config.output_format) self.set_printer(file_name, basename) if diagram.TYPE == 'class': self.write_classes(diagram) else: self.write_packages(diagram)", "= [dict(arrowtail='none', arrowhead=\"open\"), dict(arrowtail='none', arrowhead='empty'), dict(arrowtail='node', arrowhead='empty', style='dashed'), dict(fontcolor='green', arrowtail='none',", "label contains all attributes and methods \"\"\" label = obj.title", "have received a copy of the GNU General Public License", "shape='box') obj.fig_id = i # package dependencies for rel in", "def __init__(self, config): styles = [dict(arrowtail='none', arrowhead=\"open\"), dict(arrowtail='none', arrowhead='empty'), dict(arrowtail='node',", "def get_title(self, obj): \"\"\"get project title in vcg format\"\"\" return", "name in [obj.title] + methods + attrs) line = '_'", "obj.title if obj.shape == 'interface': shape = 'ellipse' else: shape", "useful, but WITHOUT # ANY WARRANTY; without even the implied", "obj.title def get_values(self, obj): \"\"\"get label and shape for classes.", "label = obj.title if obj.shape == 'interface': label = u'«interface»\\\\n%s'", "is_exception(obj.node): label = r'\\fb\\f09%s\\fn' % obj.title else: label = r'\\fb%s\\fn'", "in obj.methods] # box width for UML like diagram maxlen", "\"\"\" for diagram in diadefs: basename = diagram.title.strip().replace(' ', '_')", "later # version. # # This program is distributed in", "**self.imp_edges) # generate associations for rel in diagram.get_relationships('association'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id,", "diagram.get_relationships('specialization'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, **self.inh_edges) # implementation links for rel in", "General Public License as published by the Free Software #", "== 'interface': label = u'«interface»\\\\n%s' % label if not self.config.only_classnames:", "rel.to_object.fig_id, **self.inh_edges) # implementation links for rel in diagram.get_relationships('implements'): self.printer.emit_edge(rel.from_object.fig_id,", "set_printer def write(self, diadefs): \"\"\"write files for <project> according to", "VCGPrinter(self.graph_file) self.printer.open_graph(title=basename, layoutalgorithm='dfs', late_edge_labels='yes', port_sharing='no', manhattan_edges='yes') self.printer.emit_node = self.printer.node self.printer.emit_edge", "VCG and Dot diagrams\"\"\" from logilab.common.vcgutils import VCGPrinter from logilab.common.graph", "style='solid'), ] DiagramWriter.__init__(self, config, styles) def set_printer(self, file_name, basename): \"\"\"initialize", "<file_name>\"\"\" self.printer.generate(self.file_name) class VCGWriter(DiagramWriter): \"\"\"write vcg graphs from a diagram", "published by the Free Software # Foundation; either version 2", "% (label, func) return dict(label=label, shape=shape) def close_graph(self): \"\"\"close graph", "dict(arrowstyle='solid', backarrowstyle='none', textcolor='green'), ] DiagramWriter.__init__(self, config, styles) def set_printer(self, file_name,", "for creating VCG and Dot diagrams\"\"\" from logilab.common.vcgutils import VCGPrinter", "from pylint.pyreverse.utils import is_exception class DiagramWriter(object): \"\"\"base class for writing", "key=lambda x: x.title)): self.printer.emit_node(i, label=self.get_title(obj), shape='box') obj.fig_id = i #", "DiagramWriter(object): \"\"\"base class for writing project diagrams \"\"\" def __init__(self,", "def write_classes(self, diagram): \"\"\"write a class diagram\"\"\" # sorted to", "into <file_name>\"\"\" self.printer.generate(self.file_name) class VCGWriter(DiagramWriter): \"\"\"write vcg graphs from a", "\"\"\"initialize DotWriter and add options for layout. \"\"\" layout =", "attr in attrs: label = r'%s\\n\\f08%s' % (label, attr) if", "according to <diadefs> \"\"\" for diagram in diadefs: basename =", "write_packages(self, diagram): \"\"\"write a package diagram\"\"\" # sorted to get", "= config self.pkg_edges, self.inh_edges, self.imp_edges, self.ass_edges = styles self.printer =", "logilab.common.vcgutils import VCGPrinter from logilab.common.graph import DotBackend from pylint.pyreverse.utils import", "func.name) label = '{%s}' % label if is_exception(obj.node): return dict(fontcolor='red',", "width for UML like diagram maxlen = max(len(name) for name", "for UML like diagram maxlen = max(len(name) for name in", "utf-8 -*- # Copyright (c) 2008-2013 LOGILAB S.A. (Paris, FRANCE).", "in diagram.get_relationships('association'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, label=rel.name, **self.ass_edges) def set_printer(self, file_name, basename):", "import DotBackend from pylint.pyreverse.utils import is_exception class DiagramWriter(object): \"\"\"base class", "this program; if not, write to the Free Software Foundation,", "layoutalgorithm='dfs', late_edge_labels='yes', port_sharing='no', manhattan_edges='yes') self.printer.emit_node = self.printer.node self.printer.emit_edge = self.printer.edge", "= r'%s\\n\\f%s' % (label, line) for attr in attrs: label", "for i, obj in enumerate(sorted(diagram.objects, key=lambda x: x.title)): self.printer.emit_node(i, **self.get_values(obj))", "-*- coding: utf-8 -*- # Copyright (c) 2008-2013 LOGILAB S.A.", "DotWriter and add options for layout. \"\"\" layout = dict(rankdir=\"BT\")", "Foundation; either version 2 of the License, or (at your", "'_') file_name = '%s.%s' % (basename, self.config.output_format) self.set_printer(file_name, basename) if", "testable) results for i, obj in enumerate(sorted(diagram.modules(), key=lambda x: x.title)):", "% obj.title def get_values(self, obj): \"\"\"get label and shape for", "GNU General Public License for more details. # # You", "self.ass_edges = styles self.printer = None # defined in set_printer", "for attr in attrs: label = r'%s\\n\\f08%s' % (label, attr)", "for rel in diagram.get_relationships('association'): self.printer.emit_edge(rel.from_object.fig_id, rel.to_object.fig_id, label=rel.name, **self.ass_edges) def set_printer(self,", "-*- # Copyright (c) 2008-2013 LOGILAB S.A. (Paris, FRANCE). #", "the graph\"\"\" raise NotImplementedError class DotWriter(DiagramWriter): \"\"\"write dot graphs from", "is distributed in the hope that it will be useful,", "= 'box' if not self.config.only_classnames: attrs = obj.attrs methods =", "% obj.title if obj.shape == 'interface': shape = 'ellipse' else:", "the dot graph into <file_name>\"\"\" self.printer.generate(self.file_name) class VCGWriter(DiagramWriter): \"\"\"write vcg", "# # You should have received a copy of the", "Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA", "(label, line) for func in methods: label = r'%s\\n\\f10%s()' %", "(hence testable) results for i, obj in enumerate(sorted(diagram.objects, key=lambda x:", "if not self.config.only_classnames: label = r'%s|%s\\l|' % (label, r'\\l'.join(obj.attrs)) for", "if diagram.TYPE == 'class': self.write_classes(diagram) else: self.write_packages(diagram) self.close_graph() def write_packages(self,", "the Free Software # Foundation; either version 2 of the" ]
[ "return {\"success\": False, \"errors\": [\"Client is taken\"]} except Exception as", "client_id} clients[client_id] = client return {\"success\": True, \"client\": client} return", "message = {\"content\": content, \"client_id\": client_id} messages.append(message) await queue.put(message) return", "return {\"success\": True, \"client\": client} return {\"success\": False, \"errors\": [\"Client", "client return {\"success\": True, \"client\": client} return {\"success\": False, \"errors\":", "clients[client_id] = client return {\"success\": True, \"client\": client} return {\"success\":", "client} return {\"success\": False, \"errors\": [\"Client is taken\"]} except Exception", "\"errors\": [str(error)]} @mutation.field(\"createClient\") @convert_kwargs_to_snake_case async def resolve_create_client(obj, info, client_id): try:", "content, client_id): try: message = {\"content\": content, \"client_id\": client_id} messages.append(message)", "= client return {\"success\": True, \"client\": client} return {\"success\": False,", "queue.put(message) return {\"success\": True, \"message\": message} except Exception as error:", "queue mutation = MutationType() @mutation.field(\"createMessage\") @convert_kwargs_to_snake_case async def resolve_create_message(obj, info,", "taken\"]} except Exception as error: return {\"success\": False, \"errors\": [str(error)]}", "@mutation.field(\"createClient\") @convert_kwargs_to_snake_case async def resolve_create_client(obj, info, client_id): try: logging.info(f\"Client id:", "messages.append(message) await queue.put(message) return {\"success\": True, \"message\": message} except Exception", "@mutation.field(\"createMessage\") @convert_kwargs_to_snake_case async def resolve_create_message(obj, info, content, client_id): try: message", "try: message = {\"content\": content, \"client_id\": client_id} messages.append(message) await queue.put(message)", "{\"content\": content, \"client_id\": client_id} messages.append(message) await queue.put(message) return {\"success\": True,", "messages, queue mutation = MutationType() @mutation.field(\"createMessage\") @convert_kwargs_to_snake_case async def resolve_create_message(obj,", "from config import clients, messages, queue mutation = MutationType() @mutation.field(\"createMessage\")", "from ariadne import MutationType, convert_kwargs_to_snake_case from config import clients, messages,", "True, \"client\": client} return {\"success\": False, \"errors\": [\"Client is taken\"]}", "async def resolve_create_message(obj, info, content, client_id): try: message = {\"content\":", "clients.get(client_id): client = {\"client_id\": client_id} clients[client_id] = client return {\"success\":", "return {\"success\": False, \"errors\": [str(error)]} @mutation.field(\"createClient\") @convert_kwargs_to_snake_case async def resolve_create_client(obj,", "@convert_kwargs_to_snake_case async def resolve_create_message(obj, info, content, client_id): try: message =", "content, \"client_id\": client_id} messages.append(message) await queue.put(message) return {\"success\": True, \"message\":", "ariadne import MutationType, convert_kwargs_to_snake_case from config import clients, messages, queue", "@convert_kwargs_to_snake_case async def resolve_create_client(obj, info, client_id): try: logging.info(f\"Client id: {client_id}\")", "import logging from ariadne import MutationType, convert_kwargs_to_snake_case from config import", "= {\"client_id\": client_id} clients[client_id] = client return {\"success\": True, \"client\":", "Exception as error: return {\"success\": False, \"errors\": [str(error)]} @mutation.field(\"createClient\") @convert_kwargs_to_snake_case", "await queue.put(message) return {\"success\": True, \"message\": message} except Exception as", "as error: return {\"success\": False, \"errors\": [str(error)]} @mutation.field(\"createClient\") @convert_kwargs_to_snake_case async", "async def resolve_create_client(obj, info, client_id): try: logging.info(f\"Client id: {client_id}\") if", "clients, messages, queue mutation = MutationType() @mutation.field(\"createMessage\") @convert_kwargs_to_snake_case async def", "client_id} messages.append(message) await queue.put(message) return {\"success\": True, \"message\": message} except", "MutationType() @mutation.field(\"createMessage\") @convert_kwargs_to_snake_case async def resolve_create_message(obj, info, content, client_id): try:", "{\"success\": True, \"message\": message} except Exception as error: return {\"success\":", "[\"Client is taken\"]} except Exception as error: return {\"success\": False,", "logging.info(f\"Client id: {client_id}\") if not clients.get(client_id): client = {\"client_id\": client_id}", "id: {client_id}\") if not clients.get(client_id): client = {\"client_id\": client_id} clients[client_id]", "\"errors\": [\"Client is taken\"]} except Exception as error: return {\"success\":", "{client_id}\") if not clients.get(client_id): client = {\"client_id\": client_id} clients[client_id] =", "False, \"errors\": [\"Client is taken\"]} except Exception as error: return", "MutationType, convert_kwargs_to_snake_case from config import clients, messages, queue mutation =", "= MutationType() @mutation.field(\"createMessage\") @convert_kwargs_to_snake_case async def resolve_create_message(obj, info, content, client_id):", "{\"client_id\": client_id} clients[client_id] = client return {\"success\": True, \"client\": client}", "config import clients, messages, queue mutation = MutationType() @mutation.field(\"createMessage\") @convert_kwargs_to_snake_case", "import MutationType, convert_kwargs_to_snake_case from config import clients, messages, queue mutation", "info, client_id): try: logging.info(f\"Client id: {client_id}\") if not clients.get(client_id): client", "if not clients.get(client_id): client = {\"client_id\": client_id} clients[client_id] = client", "{\"success\": False, \"errors\": [\"Client is taken\"]} except Exception as error:", "def resolve_create_message(obj, info, content, client_id): try: message = {\"content\": content,", "client_id): try: logging.info(f\"Client id: {client_id}\") if not clients.get(client_id): client =", "True, \"message\": message} except Exception as error: return {\"success\": False,", "mutation = MutationType() @mutation.field(\"createMessage\") @convert_kwargs_to_snake_case async def resolve_create_message(obj, info, content,", "try: logging.info(f\"Client id: {client_id}\") if not clients.get(client_id): client = {\"client_id\":", "resolve_create_client(obj, info, client_id): try: logging.info(f\"Client id: {client_id}\") if not clients.get(client_id):", "\"client\": client} return {\"success\": False, \"errors\": [\"Client is taken\"]} except", "{\"success\": False, \"errors\": [str(error)]} @mutation.field(\"createClient\") @convert_kwargs_to_snake_case async def resolve_create_client(obj, info,", "client_id): try: message = {\"content\": content, \"client_id\": client_id} messages.append(message) await", "\"message\": message} except Exception as error: return {\"success\": False, \"errors\":", "info, content, client_id): try: message = {\"content\": content, \"client_id\": client_id}", "False, \"errors\": [str(error)]} @mutation.field(\"createClient\") @convert_kwargs_to_snake_case async def resolve_create_client(obj, info, client_id):", "is taken\"]} except Exception as error: return {\"success\": False, \"errors\":", "resolve_create_message(obj, info, content, client_id): try: message = {\"content\": content, \"client_id\":", "\"client_id\": client_id} messages.append(message) await queue.put(message) return {\"success\": True, \"message\": message}", "def resolve_create_client(obj, info, client_id): try: logging.info(f\"Client id: {client_id}\") if not", "message} except Exception as error: return {\"success\": False, \"errors\": [str(error)]}", "not clients.get(client_id): client = {\"client_id\": client_id} clients[client_id] = client return", "= {\"content\": content, \"client_id\": client_id} messages.append(message) await queue.put(message) return {\"success\":", "return {\"success\": True, \"message\": message} except Exception as error: return", "except Exception as error: return {\"success\": False, \"errors\": [str(error)]} @mutation.field(\"createClient\")", "{\"success\": True, \"client\": client} return {\"success\": False, \"errors\": [\"Client is", "logging from ariadne import MutationType, convert_kwargs_to_snake_case from config import clients,", "[str(error)]} @mutation.field(\"createClient\") @convert_kwargs_to_snake_case async def resolve_create_client(obj, info, client_id): try: logging.info(f\"Client", "convert_kwargs_to_snake_case from config import clients, messages, queue mutation = MutationType()", "client = {\"client_id\": client_id} clients[client_id] = client return {\"success\": True,", "import clients, messages, queue mutation = MutationType() @mutation.field(\"createMessage\") @convert_kwargs_to_snake_case async", "error: return {\"success\": False, \"errors\": [str(error)]} @mutation.field(\"createClient\") @convert_kwargs_to_snake_case async def" ]
[ "return self.post(self.channel.slack_webhook_url, json=payload) class HipChat(HttpTransport): def is_noop(self, check): return True", "json=payload, headers=headers) class Pushbullet(HttpTransport): def notify(self, check): text = tmpl(\"pushbullet_message.html\",", "check): if not self.channel.email_verified: return True if check.status == \"down\":", "json=payload) class Telegram(HttpTransport): SM = \"https://api.telegram.org/bot%s/sendMessage\" % settings.TELEGRAM_TOKEN @classmethod def", "hc.lib import emails from hc.lib.string import replace try: import apprise", "if not profile.authorize_sms(): profile.send_sms_limit_notice(\"SMS\") return \"Monthly SMS limit exceeded\" url", "self.prepare(cmd, check) code = os.system(cmd) if code != 0: return", "def _request(cls, method, url, **kwargs): try: options = dict(kwargs) options[\"timeout\"]", "requests.exceptions.ConnectionError: return \"Connection failed\" @classmethod def get(cls, url, **kwargs): #", "safe(check.name), \"$TAGS\": safe(check.tags), } for i, tag in enumerate(check.tags_list()): ctx[\"$TAG%d\"", "pass def notify(self, check): headers = { \"Conent-Type\": \"application/json\", \"Authorization\":", "False def tmpl(template_name, **ctx): template_path = \"integrations/%s\" % template_name #", "\"down\" else \"resolve\", \"title\": tmpl(\"pagertree_title.html\", check=check), \"description\": tmpl(\"pagertree_description.html\", check=check), \"client\":", "dict(kwargs) options[\"timeout\"] = 5 if \"headers\" not in options: options[\"headers\"]", "= { \"From\": settings.TWILIO_FROM, \"To\": self.channel.sms_number, \"Body\": text, } return", "unsub_link = self.channel.get_unsub_link() headers = { \"X-Bounce-Url\": bounce_url, \"List-Unsubscribe\": \"<%s>\"", "settings.SITE_NAME, \"client_url\": check.details_url(), } return self.post(self.URL, json=payload) class PagerTree(HttpTransport): def", "check=check) payload = json.loads(text) url = self.channel.discord_webhook_url + \"/slack\" return", "ctx, headers) def is_noop(self, check): if not self.channel.email_verified: return True", "profile.authorize_sms(): profile.send_sms_limit_notice(\"SMS\") return \"Monthly SMS limit exceeded\" url = self.URL", "method because it is also used in # hc.front.views.telegram_bot to", "\"https://events.pagerduty.com/generic/2010-04-15/create_event.json\" def notify(self, check): description = tmpl(\"pd_description.html\", check=check) payload =", "the sorting preference for this email address p = Profile.objects.get(user__email=self.channel.email_value)", "# The third element, if present, is the priority for", "\"down\": return not self.channel.email_notify_down else: return not self.channel.email_notify_up class Shell(Transport):", "settings.PUSHOVER_API_TOKEN, \"user\": user_key, \"message\": text, \"title\": title, \"html\": 1, \"priority\":", "return False def checks(self): return self.channel.project.check_set.order_by(\"created\") class Email(Transport): def notify(self,", "def get(cls, url, **kwargs): # Make 3 attempts-- for x", "# Well, we tried return \"Connection timed out\" except requests.exceptions.ConnectionError:", "{ \"type\": self.channel.zulip_type, \"to\": self.channel.zulip_to, \"topic\": tmpl(\"zulip_topic.html\", check=check), \"content\": tmpl(\"zulip_content.html\",", "!= \"down\" def notify(self, check): params = { \"idList\": self.channel.trello_list_id,", "notify(self, check): url = self.channel.value headers = {\"Conent-Type\": \"application/json\"} payload", "render_to_string(template_path, ctx).strip().replace(\"\\xa0\", \" \") class Transport(object): def __init__(self, channel): self.channel", "not profile.authorize_sms(): profile.send_sms_limit_notice(\"SMS\") return \"Monthly SMS limit exceeded\" url =", "headers = {} for key, value in spec[\"headers\"].items(): headers[key] =", "url = \"https://api.opsgenie.com/v2/alerts\" if self.channel.opsgenie_region == \"eu\": url = \"https://api.eu.opsgenie.com/v2/alerts\"", "+ \"/slack\" return self.post(url, json=payload) class Telegram(HttpTransport): SM = \"https://api.telegram.org/bot%s/sendMessage\"", "= self.channel.value headers = {\"Conent-Type\": \"application/json\"} payload = { \"incident_key\":", "and both are optional. \"\"\" return False def checks(self): return", "str(check.code), \"source\": settings.SITE_NAME} if check.status == \"down\": payload[\"tags\"] = check.tags_list()", "is by check's creation time sort = \"created\" # list()", "\"\"\" raise NotImplementedError() def is_noop(self, check): \"\"\" Return True if", "m: return f'Received status code {r.status_code} with a message: \"{m}\"'", "= p.sort except Profile.DoesNotExist: # Default sort order is by", "rendering a template ctx = { \"check\": check, \"checks\": list(self.checks()),", "= cls.get_error(r) if m: return f'Received status code {r.status_code} with", "status of the check. This method returns None on success,", "return \"Connection timed out\" except requests.exceptions.ConnectionError: return \"Connection failed\" @classmethod", "return self.post(url, data=body.encode(), headers=headers) elif spec[\"method\"] == \"PUT\": return self.put(url,", "1)] = tag return replace(template, ctx) def is_noop(self, check): if", "attempts-- for x in range(0, 3): error = cls._request(\"post\", url,", "check=check) formatted = tmpl(\"matrix_description_formatted.html\", check=check) payload = { \"msgtype\": \"m.text\",", "method, url, **kwargs): try: options = dict(kwargs) options[\"timeout\"] = 5", "\"List-Unsubscribe=One-Click\", } try: # Look up the sorting preference for", "tmpl(template_name, **ctx): template_path = \"integrations/%s\" % template_name # \\xa0 is", "payload = json.loads(text) url = self.channel.discord_webhook_url + \"/slack\" return self.post(url,", "values. \"\"\" def safe(s): return quote(s) if urlencode else s", "return True if check.status == \"up\" and not self.channel.url_up: return", "else \"resolve\", \"description\": description, \"client\": settings.SITE_NAME, \"client_url\": check.details_url(), } return", "= cls._request(\"put\", url, **kwargs) if error is None: break return", "status code {r.status_code}\" except requests.exceptions.Timeout: # Well, we tried return", "self.post(url, data=data, auth=auth) class WhatsApp(HttpTransport): URL = \"https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json\" def is_noop(self,", "== \"down\" and not self.channel.url_down: return True if check.status ==", "def notify(self, check): url = self.channel.value headers = {\"Content-Type\": \"application/json\"}", "= { \"$CODE\": str(check.code), \"$STATUS\": check.status, \"$NOW\": timezone.now().replace(microsecond=0).isoformat(), \"$NAME\": check.name,", "**ctx): template_path = \"integrations/%s\" % template_name # \\xa0 is non-breaking", "requests from urllib.parse import quote, urlencode from hc.accounts.models import Profile", "else: return not self.channel.whatsapp_notify_up def notify(self, check): profile = Profile.objects.for_user(self.channel.project.owner)", "are not enabled\" if check.status == \"up\": cmd = self.channel.cmd_up", "def tmpl(template_name, **ctx): template_path = \"integrations/%s\" % template_name # \\xa0", "ctx[\"$TAG%d\" % (i + 1)] = tag return replace(template, ctx)", "to avoid DB access while # rendering a template ctx", "url = self.prepare(spec[\"url\"], check, urlencode=True) headers = {} for key,", "\"$CODE\": str(check.code), \"$STATUS\": check.status, \"$NOW\": safe(timezone.now().replace(microsecond=0).isoformat()), \"$NAME\": safe(check.name), \"$TAGS\": safe(check.tags),", "Pushbullet(HttpTransport): def notify(self, check): text = tmpl(\"pushbullet_message.html\", check=check) url =", "ctx = {\"check\": check, \"down_checks\": list(others)} text = tmpl(\"pushover_message.html\", **ctx)", "user_key, prio = pieces[0], pieces[1] # The third element, if", "check.details_url(), } return self.post(self.URL, json=payload) class PagerTree(HttpTransport): def notify(self, check):", "if urlencode else s ctx = { \"$CODE\": str(check.code), \"$STATUS\":", "= self.channel.value headers = {\"Content-Type\": \"application/json\"} payload = { \"incident_key\":", "\"client\": settings.SITE_NAME, \"client_url\": settings.SITE_ROOT, \"tags\": \",\".join(check.tags_list()), } return self.post(url, json=payload,", "check=check) payload[\"note\"] = tmpl(\"opsgenie_note.html\", check=check) payload[\"description\"] = tmpl(\"opsgenie_description.html\", check=check) url", "else: return not self.channel.email_notify_up class Shell(Transport): def prepare(self, template, check):", "response.json().get(\"description\") except ValueError: pass @classmethod def send(cls, chat_id, text): #", "title, \"html\": 1, \"priority\": int(prio), } # Emergency notification if", "= tmpl(\"pushover_title.html\", **ctx) pieces = self.channel.value.split(\"|\") user_key, prio = pieces[0],", "def prepare(self, template, check, urlencode=False): \"\"\" Replace variables with actual", "\"https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json\" def is_noop(self, check): return check.status != \"down\" def notify(self,", "= \"https://events.pagerduty.com/generic/2010-04-15/create_event.json\" def notify(self, check): description = tmpl(\"pd_description.html\", check=check) payload", "% settings.TWILIO_ACCOUNT auth = (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH) text = tmpl(\"whatsapp_message.html\", check=check,", "check): params = { \"idList\": self.channel.trello_list_id, \"name\": tmpl(\"trello_name.html\", check=check), \"desc\":", "= cls._request(\"get\", url, **kwargs) if error is None: break return", "settings.TWILIO_AUTH) text = tmpl(\"whatsapp_message.html\", check=check, site_name=settings.SITE_NAME) data = { \"From\":", "tried return \"Connection timed out\" except requests.exceptions.ConnectionError: return \"Connection failed\"", "json=payload) class PagerTree(HttpTransport): def notify(self, check): url = self.channel.value headers", "is_noop(self, check): return True class OpsGenie(HttpTransport): @classmethod def get_error(cls, response):", "a.notify(body=body, title=title, notify_type=notify_type) else None ) class MsTeams(HttpTransport): def notify(self,", "URL = \"https://api.pushover.net/1/messages.json\" def notify(self, check): others = self.checks().filter(status=\"down\").exclude(code=check.code) #", "def notify(self, check): from hc.api.models import TokenBucket if not TokenBucket.authorize_telegram(self.channel.telegram_id):", "response): # Override in subclasses: look for a specific error", "if not self.channel.email_verified: return \"Email not verified\" unsub_link = self.channel.get_unsub_link()", "safe(timezone.now().replace(microsecond=0).isoformat()), \"$NAME\": safe(check.name), \"$TAGS\": safe(check.tags), } for i, tag in", "\"https://api.pushover.net/1/messages.json\" def notify(self, check): others = self.checks().filter(status=\"down\").exclude(code=check.code) # list() executes", "def notify(self, check): text = tmpl(\"msteams_message.json\", check=check) payload = json.loads(text)", "\"integrations/%s\" % template_name # \\xa0 is non-breaking space. It causes", "{ \"X-Bounce-Url\": bounce_url, \"List-Unsubscribe\": \"<%s>\" % unsub_link, \"List-Unsubscribe-Post\": \"List-Unsubscribe=One-Click\", }", "3): error = cls._request(\"get\", url, **kwargs) if error is None:", "check=check), \"key\": settings.TRELLO_APP_KEY, \"token\": self.channel.trello_token, } return self.post(self.URL, params=params) class", "Profile from hc.lib import emails from hc.lib.string import replace try:", "error = cls._request(\"post\", url, **kwargs) if error is None: break", "tmpl(\"matrix_description_formatted.html\", check=check) payload = { \"msgtype\": \"m.text\", \"body\": plain, \"format\":", "self.post(url, json=payload, headers=headers) class PagerDuty(HttpTransport): URL = \"https://events.pagerduty.com/generic/2010-04-15/create_event.json\" def notify(self,", "urlencode=True) headers = {} for key, value in spec[\"headers\"].items(): headers[key]", "quote(s) if urlencode else s ctx = { \"$CODE\": str(check.code),", "self.channel.value headers = {\"Content-Type\": \"application/json\"} payload = { \"incident_key\": str(check.code),", "= spec[\"body\"] if body: body = self.prepare(body, check) if spec[\"method\"]", "This method is overridden in Webhook subclass where the user", "plain, \"format\": \"org.matrix.custom.html\", \"formatted_body\": formatted, } return self.post(self.get_url(), json=payload) class", "# rendering a template ctx = { \"check\": check, \"checks\":", "self.channel.cmd_up: return True return False def notify(self, check): if not", "value in spec[\"headers\"].items(): headers[key] = self.prepare(value, check) body = spec[\"body\"]", "check.status == \"down\": return not self.channel.email_notify_down else: return not self.channel.email_notify_up", "requests.request(method, url, **options) if r.status_code not in (200, 201, 202,", "chat_id, \"text\": text, \"parse_mode\": \"html\"} ) def notify(self, check): from", "\"List-Unsubscribe\": \"<%s>\" % unsub_link, \"List-Unsubscribe-Post\": \"List-Unsubscribe=One-Click\", } try: # Look", "actual values. \"\"\" def safe(s): return quote(s) if urlencode else", "notification if prio == \"2\": payload[\"retry\"] = settings.PUSHOVER_EMERGENCY_RETRY_DELAY payload[\"expire\"] =", "key, value in spec[\"headers\"].items(): headers[key] = self.prepare(value, check) body =", "} return self.post(self.URL, json=payload) class PagerTree(HttpTransport): def notify(self, check): url", "Profile.objects.get(user__email=self.channel.email_value) sort = p.sort except Profile.DoesNotExist: # Default sort order", "3 attempts-- for x in range(0, 3): error = cls._request(\"put\",", "installed\" a = apprise.Apprise() title = tmpl(\"apprise_title.html\", check=check) body =", "\"title\": tmpl(\"pagertree_title.html\", check=check), \"description\": tmpl(\"pagertree_description.html\", check=check), \"client\": settings.SITE_NAME, \"client_url\": settings.SITE_ROOT,", "time sort = \"created\" # list() executes the query, to", "= settings.PUSHOVER_EMERGENCY_EXPIRATION return self.post(self.URL, data=payload) class VictorOps(HttpTransport): def notify(self, check):", "options[\"timeout\"] = 5 if \"headers\" not in options: options[\"headers\"] =", "else \"RECOVERY\" payload = { \"entity_id\": str(check.code), \"message_type\": mtype, \"entity_display_name\":", "{\"check\": check, \"down_checks\": list(others)} text = tmpl(\"pushover_message.html\", **ctx) title =", "payload = json.loads(text) return self.post(self.channel.value, json=payload) class Zulip(HttpTransport): @classmethod def", "= self.channel.get_unsub_link() headers = { \"X-Bounce-Url\": bounce_url, \"List-Unsubscribe\": \"<%s>\" %", "= \"https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json\" def is_noop(self, check): return check.status != \"down\" def", "import replace try: import apprise except ImportError: # Enforce settings.APPRISE_ENABLED", "def notify(self, check, bounce_url): if not self.channel.email_verified: return \"Email not", "check=check), \"description\": tmpl(\"pagertree_description.html\", check=check), \"client\": settings.SITE_NAME, \"client_url\": settings.SITE_ROOT, \"tags\": \",\".join(check.tags_list()),", "False def notify(self, check): spec = self.channel.webhook_spec(check.status) if not spec[\"url\"]:", "return \"Connection failed\" @classmethod def get(cls, url, **kwargs): # Make", "= json.loads(text) return self.post(self.channel.slack_webhook_url, json=payload) class HipChat(HttpTransport): def is_noop(self, check):", "check.status != \"down\" def notify(self, check): params = { \"idList\":", "safe(check.tags), } for i, tag in enumerate(check.tags_list()): ctx[\"$TAG%d\" % (i", "\"$CODE\": str(check.code), \"$STATUS\": check.status, \"$NOW\": timezone.now().replace(microsecond=0).isoformat(), \"$NAME\": check.name, \"$TAGS\": check.tags,", "check): description = tmpl(\"pd_description.html\", check=check) payload = { \"service_key\": self.channel.pd_service_key,", "attempts-- for x in range(0, 3): error = cls._request(\"get\", url,", "in enumerate(check.tags_list()): ctx[\"$TAG%d\" % (i + 1)] = safe(tag) return", "and/or not installed\" a = apprise.Apprise() title = tmpl(\"apprise_title.html\", check=check)", "True if check.status == \"up\" and not self.channel.url_up: return True", "\"event_type\": \"trigger\" if check.status == \"down\" else \"resolve\", \"title\": tmpl(\"pagertree_title.html\",", "payload = { \"incident_key\": str(check.code), \"event_type\": \"trigger\" if check.status ==", "%d\" % code class HttpTransport(Transport): @classmethod def get_error(cls, response): #", "tmpl(\"opsgenie_description.html\", check=check) url = \"https://api.opsgenie.com/v2/alerts\" if self.channel.opsgenie_region == \"eu\": url", "is a separate method because it is also used in", "\"application/json\", \"Authorization\": \"GenieKey %s\" % self.channel.opsgenie_key, } payload = {\"alias\":", "returns None on success, and error message on error. \"\"\"", "self.channel.discord_webhook_url + \"/slack\" return self.post(url, json=payload) class Telegram(HttpTransport): SM =", "Well, we tried return \"Connection timed out\" except requests.exceptions.ConnectionError: return", "headers=headers) class Slack(HttpTransport): def notify(self, check): text = tmpl(\"slack_message.json\", check=check)", "not self.channel.url_up: return True return False def notify(self, check): spec", "if check.status == \"down\": return not self.channel.whatsapp_notify_down else: return not", "= 5 if \"headers\" not in options: options[\"headers\"] = {}", "data = { \"type\": self.channel.zulip_type, \"to\": self.channel.zulip_to, \"topic\": tmpl(\"zulip_topic.html\", check=check),", "{\"type\": \"note\", \"title\": settings.SITE_NAME, \"body\": text} return self.post(url, json=payload, headers=headers)", "auth = (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH) text = tmpl(\"sms_message.html\", check=check, site_name=settings.SITE_NAME) data", "json.loads(text) url = self.channel.discord_webhook_url + \"/slack\" return self.post(url, json=payload) class", "notify(self, check): text = tmpl(\"slack_message.json\", check=check) payload = json.loads(text) return", "check): text = tmpl(\"msteams_message.json\", check=check) payload = json.loads(text) return self.post(self.channel.value,", "return f'Received status code {r.status_code} with a message: \"{m}\"' return", "hc.accounts.models import Profile from hc.lib import emails from hc.lib.string import", "prepare(self, template, check, urlencode=False): \"\"\" Replace variables with actual values.", "= { \"check\": check, \"checks\": list(self.checks()), \"sort\": sort, \"now\": timezone.now(),", "PagerTeam(HttpTransport): def notify(self, check): url = self.channel.value headers = {\"Content-Type\":", "Telegram(HttpTransport): SM = \"https://api.telegram.org/bot%s/sendMessage\" % settings.TELEGRAM_TOKEN @classmethod def get_error(cls, response):", "\"msgtype\": \"m.text\", \"body\": plain, \"format\": \"org.matrix.custom.html\", \"formatted_body\": formatted, } return", "\"token\": settings.PUSHOVER_API_TOKEN, \"user\": user_key, \"message\": text, \"title\": title, \"html\": 1,", "= requests.request(method, url, **options) if r.status_code not in (200, 201,", "check): return check.status != \"down\" def notify(self, check): profile =", "from django.conf import settings from django.template.loader import render_to_string from django.utils", "= \"created\" # list() executes the query, to avoid DB", "this email address p = Profile.objects.get(user__email=self.channel.email_value) sort = p.sort except", "= { \"service_key\": self.channel.pd_service_key, \"incident_key\": str(check.code), \"event_type\": \"trigger\" if check.status", "@classmethod def get_error(cls, response): try: return response.json().get(\"msg\") except ValueError: pass", "} return self.post(self.channel.value, json=payload) class Matrix(HttpTransport): def get_url(self): s =", "@classmethod def get_error(cls, response): try: return response.json().get(\"description\") except ValueError: pass", "import quote, urlencode from hc.accounts.models import Profile from hc.lib import", "tmpl(\"pagerteam_title.html\", check=check), \"description\": tmpl(\"pagerteam_description.html\", check=check), \"client\": settings.SITE_NAME, \"client_url\": settings.SITE_ROOT, \"tags\":", "check=check) payload = { \"msgtype\": \"m.text\", \"body\": plain, \"format\": \"org.matrix.custom.html\",", "check, bounce_url): if not self.channel.email_verified: return \"Email not verified\" unsub_link", "bounce_url): if not self.channel.email_verified: return \"Email not verified\" unsub_link =", "notify(self, check): _, domain = self.channel.zulip_bot_email.split(\"@\") url = \"https://%s/api/v1/messages\" %", "0: return \"Command returned exit code %d\" % code class", "check.status == \"down\" else \"resolve\", \"title\": tmpl(\"pagerteam_title.html\", check=check), \"description\": tmpl(\"pagerteam_description.html\",", "cls.get_error(r) if m: return f'Received status code {r.status_code} with a", "\"/%s/close?identifierType=alias\" % check.code return self.post(url, json=payload, headers=headers) class PagerDuty(HttpTransport): URL", "= { \"token\": settings.PUSHOVER_API_TOKEN, \"user\": user_key, \"message\": text, \"title\": title,", "self.channel.cmd_down: return True if check.status == \"up\" and not self.channel.cmd_up:", "and/or enabled return \"Apprise is disabled and/or not installed\" a", "profile.send_sms_limit_notice(\"WhatsApp\") return \"Monthly message limit exceeded\" url = self.URL %", "( \"Failed\" if not a.notify(body=body, title=title, notify_type=notify_type) else None )", "except requests.exceptions.Timeout: # Well, we tried return \"Connection timed out\"", "# Telegram.send is a separate method because it is also", "url = \"https://%s/api/v1/messages\" % domain auth = (self.channel.zulip_bot_email, self.channel.zulip_api_key) data", "Override in subclasses: look for a specific error message in", "notify(self, check): text = tmpl(\"msteams_message.json\", check=check) payload = json.loads(text) return", "= self.channel.webhook_spec(check.status) if not spec[\"url\"]: return \"Empty webhook URL\" url", "check): spec = self.channel.webhook_spec(check.status) if not spec[\"url\"]: return \"Empty webhook", "URL = \"https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json\" def is_noop(self, check): return check.status != \"down\"", "url = \"https://api.eu.opsgenie.com/v2/alerts\" if check.status == \"up\": url += \"/%s/close?identifierType=alias\"", "check): if check.status == \"down\" and not self.channel.url_down: return True", "class Webhook(HttpTransport): def prepare(self, template, check, urlencode=False): \"\"\" Replace variables", "message in the # response and return it. return None", "= tmpl(\"pd_description.html\", check=check) payload = { \"service_key\": self.channel.pd_service_key, \"incident_key\": str(check.code),", "\"To\": self.channel.sms_number, \"Body\": text, } return self.post(url, data=data, auth=auth) class", "return response.json().get(\"msg\") except ValueError: pass def notify(self, check): _, domain", "ignore check's current status. This method is overridden in Webhook", "check=check), \"desc\": tmpl(\"trello_desc.html\", check=check), \"key\": settings.TRELLO_APP_KEY, \"token\": self.channel.trello_token, } return", "site_name=settings.SITE_NAME) data = { \"From\": \"whatsapp:%s\" % settings.TWILIO_FROM, \"To\": \"whatsapp:%s\"", "urlencode=False): \"\"\" Replace variables with actual values. \"\"\" def safe(s):", "rendering a template ctx = {\"check\": check, \"down_checks\": list(others)} text", "return \"Monthly SMS limit exceeded\" url = self.URL % settings.TWILIO_ACCOUNT", "self.channel.trello_token, } return self.post(self.URL, params=params) class Apprise(HttpTransport): def notify(self, check):", "if not settings.APPRISE_ENABLED: # Not supported and/or enabled return \"Apprise", "from django.utils import timezone import json import requests from urllib.parse", "return \"Apprise is disabled and/or not installed\" a = apprise.Apprise()", "\"\"\" return False def checks(self): return self.channel.project.check_set.order_by(\"created\") class Email(Transport): def", "check.status == \"down\": payload[\"tags\"] = check.tags_list() payload[\"message\"] = tmpl(\"opsgenie_message.html\", check=check)", "error @classmethod def put(cls, url, **kwargs): # Make 3 attempts--", "return self.put(url, data=body.encode(), headers=headers) class Slack(HttpTransport): def notify(self, check): text", "if check.status == \"down\" else \"resolve\", \"title\": tmpl(\"pagerteam_title.html\", check=check), \"description\":", "3 and check.status == \"up\": prio = pieces[2] payload =", "not self.channel.whatsapp_notify_down else: return not self.channel.whatsapp_notify_up def notify(self, check): profile", "except requests.exceptions.ConnectionError: return \"Connection failed\" @classmethod def get(cls, url, **kwargs):", "template, check, urlencode=False): \"\"\" Replace variables with actual values. \"\"\"", "return error @classmethod def put(cls, url, **kwargs): # Make 3", "check's creation time sort = \"created\" # list() executes the", "spec[\"body\"] if body: body = self.prepare(body, check) if spec[\"method\"] ==", "return url def notify(self, check): plain = tmpl(\"matrix_description.html\", check=check) formatted", "MsTeams(HttpTransport): def notify(self, check): text = tmpl(\"msteams_message.json\", check=check) payload =", "PagerDuty(HttpTransport): URL = \"https://events.pagerduty.com/generic/2010-04-15/create_event.json\" def notify(self, check): description = tmpl(\"pd_description.html\",", "check): if not settings.APPRISE_ENABLED: # Not supported and/or enabled return", "return True if check.status == \"up\" and not self.channel.cmd_up: return", "if \"User-Agent\" not in options[\"headers\"]: options[\"headers\"][\"User-Agent\"] = \"healthchecks.io\" r =", "where the user can configure webhook urls for \"up\" and", "!= \"down\" def notify(self, check): profile = Profile.objects.for_user(self.channel.project.owner) if not", "text = tmpl(\"telegram_message.html\", check=check) return self.send(self.channel.telegram_id, text) class Sms(HttpTransport): URL", "= settings.PUSHOVER_EMERGENCY_RETRY_DELAY payload[\"expire\"] = settings.PUSHOVER_EMERGENCY_EXPIRATION return self.post(self.URL, data=payload) class VictorOps(HttpTransport):", "{ \"$CODE\": str(check.code), \"$STATUS\": check.status, \"$NOW\": timezone.now().replace(microsecond=0).isoformat(), \"$NAME\": check.name, \"$TAGS\":", "\"Body\": text, } return self.post(url, data=data, auth=auth) class WhatsApp(HttpTransport): URL", "get_error(cls, response): try: return response.json().get(\"message\") except ValueError: pass def notify(self,", "check): if check.status == \"down\": return not self.channel.whatsapp_notify_down else: return", "def is_noop(self, check): \"\"\" Return True if transport will ignore", "replace try: import apprise except ImportError: # Enforce settings.APPRISE_ENABLED =", "\"$TAGS\": safe(check.tags), } for i, tag in enumerate(check.tags_list()): ctx[\"$TAG%d\" %", "\"GET\": return self.get(url, headers=headers) elif spec[\"method\"] == \"POST\": return self.post(url,", "\"format\": \"org.matrix.custom.html\", \"formatted_body\": formatted, } return self.post(self.get_url(), json=payload) class Discord(HttpTransport):", "return render_to_string(template_path, ctx).strip().replace(\"\\xa0\", \" \") class Transport(object): def __init__(self, channel):", "order is by check's creation time sort = \"created\" #", "bounce_url, \"List-Unsubscribe\": \"<%s>\" % unsub_link, \"List-Unsubscribe-Post\": \"List-Unsubscribe=One-Click\", } try: #", "data=payload) class VictorOps(HttpTransport): def notify(self, check): description = tmpl(\"victorops_description.html\", check=check)", "for a specific error message in the # response and", "None @classmethod def _request(cls, method, url, **kwargs): try: options =", "notify_type = ( apprise.NotifyType.SUCCESS if check.status == \"up\" else apprise.NotifyType.FAILURE", "the user can configure webhook urls for \"up\" and \"down\"", "\"unsub_link\": unsub_link, } emails.alert(self.channel.email_value, ctx, headers) def is_noop(self, check): if", "== \"down\" else \"RECOVERY\" payload = { \"entity_id\": str(check.code), \"message_type\":", "\"org.matrix.custom.html\", \"formatted_body\": formatted, } return self.post(self.get_url(), json=payload) class Discord(HttpTransport): def", "settings.APPRISE_ENABLED = False def tmpl(template_name, **ctx): template_path = \"integrations/%s\" %", "to send invite links. return cls.post( cls.SM, json={\"chat_id\": chat_id, \"text\":", "settings.SHELL_ENABLED: return \"Shell commands are not enabled\" if check.status ==", "= { \"Conent-Type\": \"application/json\", \"Authorization\": \"GenieKey %s\" % self.channel.opsgenie_key, }", "tmpl(\"pushbullet_message.html\", check=check) url = \"https://api.pushbullet.com/v2/pushes\" headers = { \"Access-Token\": self.channel.value,", "% template_name # \\xa0 is non-breaking space. It causes SMS", "ValueError: pass @classmethod def send(cls, chat_id, text): # Telegram.send is", "headers) def is_noop(self, check): if not self.channel.email_verified: return True if", "def get_error(cls, response): # Override in subclasses: look for a", "{r.status_code} with a message: \"{m}\"' return f\"Received status code {r.status_code}\"", "in subclasses: look for a specific error message in the", "\"https://api.eu.opsgenie.com/v2/alerts\" if check.status == \"up\": url += \"/%s/close?identifierType=alias\" % check.code", "ctx[\"$TAG%d\" % (i + 1)] = safe(tag) return replace(template, ctx)", "\"up\" and not self.channel.cmd_up: return True return False def notify(self,", "settings.SITE_ROOT, \"tags\": \",\".join(check.tags_list()), } return self.post(url, json=payload, headers=headers) class PagerTeam(HttpTransport):", "self.channel.zulip_type, \"to\": self.channel.zulip_to, \"topic\": tmpl(\"zulip_topic.html\", check=check), \"content\": tmpl(\"zulip_content.html\", check=check), }", "a separate method because it is also used in #", "notify(self, check): if not settings.APPRISE_ENABLED: # Not supported and/or enabled", "\"topic\": tmpl(\"zulip_topic.html\", check=check), \"content\": tmpl(\"zulip_content.html\", check=check), } return self.post(url, data=data,", "notify(self, check): from hc.api.models import TokenBucket if not TokenBucket.authorize_telegram(self.channel.telegram_id): return", "\"description\": tmpl(\"pagertree_description.html\", check=check), \"client\": settings.SITE_NAME, \"client_url\": settings.SITE_ROOT, \"tags\": \",\".join(check.tags_list()), }", "Send notification about current status of the check. This method", "self.prepare(value, check) body = spec[\"body\"] if body: body = self.prepare(body,", "payload = json.loads(text) return self.post(self.channel.slack_webhook_url, json=payload) class HipChat(HttpTransport): def is_noop(self,", "not in (200, 201, 202, 204): m = cls.get_error(r) if", "= pieces[0], pieces[1] # The third element, if present, is", "\"client_url\": check.details_url(), } return self.post(self.URL, json=payload) class PagerTree(HttpTransport): def notify(self,", "return quote(s) if urlencode else s ctx = { \"$CODE\":", "tmpl(\"victorops_description.html\", check=check) mtype = \"CRITICAL\" if check.status == \"down\" else", "\"<%s>\" % unsub_link, \"List-Unsubscribe-Post\": \"List-Unsubscribe=One-Click\", } try: # Look up", "= \"https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json\" def is_noop(self, check): if check.status == \"down\": return", "= tmpl(\"pushbullet_message.html\", check=check) url = \"https://api.pushbullet.com/v2/pushes\" headers = { \"Access-Token\":", "def get_url(self): s = quote(self.channel.value) url = settings.MATRIX_HOMESERVER url +=", "self.prepare(spec[\"url\"], check, urlencode=True) headers = {} for key, value in", "None: break return error class Webhook(HttpTransport): def prepare(self, template, check,", "description, \"monitoring_tool\": settings.SITE_NAME, } return self.post(self.channel.value, json=payload) class Matrix(HttpTransport): def", "not in options: options[\"headers\"] = {} if \"User-Agent\" not in", "(i + 1)] = tag return replace(template, ctx) def is_noop(self,", "and not self.channel.cmd_up: return True return False def notify(self, check):", "return None @classmethod def _request(cls, method, url, **kwargs): try: options", "= { \"entity_id\": str(check.code), \"message_type\": mtype, \"entity_display_name\": check.name_then_code(), \"state_message\": description,", "if transport will ignore check's current status. This method is", "for key, value in spec[\"headers\"].items(): headers[key] = self.prepare(value, check) body", "tmpl(\"whatsapp_message.html\", check=check, site_name=settings.SITE_NAME) data = { \"From\": \"whatsapp:%s\" % settings.TWILIO_FROM,", "check.status != \"down\" def notify(self, check): profile = Profile.objects.for_user(self.channel.project.owner) if", "WhatsApp(HttpTransport): URL = \"https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json\" def is_noop(self, check): if check.status ==", "import settings from django.template.loader import render_to_string from django.utils import timezone", "specific error message in the # response and return it.", "class WhatsApp(HttpTransport): URL = \"https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json\" def is_noop(self, check): if check.status", "for this email address p = Profile.objects.get(user__email=self.channel.email_value) sort = p.sort", "\"down\" events, and both are optional. \"\"\" return False def", "DB access while # rendering a template ctx = {\"check\":", "in Webhook subclass where the user can configure webhook urls", "json.loads(text) return self.post(self.channel.slack_webhook_url, json=payload) class HipChat(HttpTransport): def is_noop(self, check): return", "return self.post(url, json=payload, headers=headers) class PagerDuty(HttpTransport): URL = \"https://events.pagerduty.com/generic/2010-04-15/create_event.json\" def", "if body: body = self.prepare(body, check) if spec[\"method\"] == \"GET\":", "@classmethod def put(cls, url, **kwargs): # Make 3 attempts-- for", "element, if present, is the priority for \"up\" events if", "get(cls, url, **kwargs): # Make 3 attempts-- for x in", "== \"down\" else \"resolve\", \"title\": tmpl(\"pagerteam_title.html\", check=check), \"description\": tmpl(\"pagerteam_description.html\", check=check),", "check=check), \"description\": tmpl(\"pagerteam_description.html\", check=check), \"client\": settings.SITE_NAME, \"client_url\": settings.SITE_ROOT, \"tags\": \",\".join(check.tags_list()),", "urlencode from hc.accounts.models import Profile from hc.lib import emails from", "class Transport(object): def __init__(self, channel): self.channel = channel def notify(self,", "hc.api.models import TokenBucket if not TokenBucket.authorize_telegram(self.channel.telegram_id): return \"Rate limit exceeded\"", "text = tmpl(\"pushover_message.html\", **ctx) title = tmpl(\"pushover_title.html\", **ctx) pieces =", "+= \"/_matrix/client/r0/rooms/%s/send/m.room.message?\" % s url += urlencode({\"access_token\": settings.MATRIX_ACCESS_TOKEN}) return url", "formatted, } return self.post(self.get_url(), json=payload) class Discord(HttpTransport): def notify(self, check):", "\"trigger\" if check.status == \"down\" else \"resolve\", \"description\": description, \"client\":", "self.channel.cmd_up elif check.status == \"down\": cmd = self.channel.cmd_down cmd =", "\"https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json\" def is_noop(self, check): if check.status == \"down\": return not", "= \"https://api.opsgenie.com/v2/alerts\" if self.channel.opsgenie_region == \"eu\": url = \"https://api.eu.opsgenie.com/v2/alerts\" if", "sorting preference for this email address p = Profile.objects.get(user__email=self.channel.email_value) sort", "= apprise.Apprise() title = tmpl(\"apprise_title.html\", check=check) body = tmpl(\"apprise_description.html\", check=check)", "if not a.notify(body=body, title=title, notify_type=notify_type) else None ) class MsTeams(HttpTransport):", "check=check) body = tmpl(\"apprise_description.html\", check=check) a.add(self.channel.value) notify_type = ( apprise.NotifyType.SUCCESS", "safe(tag) return replace(template, ctx) def is_noop(self, check): if check.status ==", "payload[\"expire\"] = settings.PUSHOVER_EMERGENCY_EXPIRATION return self.post(self.URL, data=payload) class VictorOps(HttpTransport): def notify(self,", "ctx) def is_noop(self, check): if check.status == \"down\" and not", "Return True if transport will ignore check's current status. This", "requests.exceptions.Timeout: # Well, we tried return \"Connection timed out\" except", "return \"Monthly message limit exceeded\" url = self.URL % settings.TWILIO_ACCOUNT", "tmpl(\"pushover_title.html\", **ctx) pieces = self.channel.value.split(\"|\") user_key, prio = pieces[0], pieces[1]", "cmd = self.channel.cmd_up elif check.status == \"down\": cmd = self.channel.cmd_down", "= self.channel.discord_webhook_url + \"/slack\" return self.post(url, json=payload) class Telegram(HttpTransport): SM", "supported and/or enabled return \"Apprise is disabled and/or not installed\"", "sort, \"now\": timezone.now(), \"unsub_link\": unsub_link, } emails.alert(self.channel.email_value, ctx, headers) def", "prepare(self, template, check): \"\"\" Replace placeholders with actual values. \"\"\"", "attempts-- for x in range(0, 3): error = cls._request(\"put\", url,", "self.channel.opsgenie_key, } payload = {\"alias\": str(check.code), \"source\": settings.SITE_NAME} if check.status", "settings.TWILIO_FROM, \"To\": self.channel.sms_number, \"Body\": text, } return self.post(url, data=data, auth=auth)", "settings.TRELLO_APP_KEY, \"token\": self.channel.trello_token, } return self.post(self.URL, params=params) class Apprise(HttpTransport): def", "if check.status == \"up\" and not self.channel.url_up: return True return", "\"Access-Token\": self.channel.value, \"Conent-Type\": \"application/json\", } payload = {\"type\": \"note\", \"title\":", "} return self.post(self.get_url(), json=payload) class Discord(HttpTransport): def notify(self, check): text", "payload[\"tags\"] = check.tags_list() payload[\"message\"] = tmpl(\"opsgenie_message.html\", check=check) payload[\"note\"] = tmpl(\"opsgenie_note.html\",", "is_noop(self, check): \"\"\" Return True if transport will ignore check's", "if check.status == \"down\": return not self.channel.email_notify_down else: return not", "except ImportError: # Enforce settings.APPRISE_ENABLED = False def tmpl(template_name, **ctx):", "3): error = cls._request(\"put\", url, **kwargs) if error is None:", "return True class OpsGenie(HttpTransport): @classmethod def get_error(cls, response): try: return", "\"To\": \"whatsapp:%s\" % self.channel.sms_number, \"Body\": text, } return self.post(url, data=data,", "5 if \"headers\" not in options: options[\"headers\"] = {} if", "message on error. \"\"\" raise NotImplementedError() def is_noop(self, check): \"\"\"", "tmpl(\"pagertree_description.html\", check=check), \"client\": settings.SITE_NAME, \"client_url\": settings.SITE_ROOT, \"tags\": \",\".join(check.tags_list()), } return", "json=payload, headers=headers) class PagerDuty(HttpTransport): URL = \"https://events.pagerduty.com/generic/2010-04-15/create_event.json\" def notify(self, check):", "text = tmpl(\"sms_message.html\", check=check, site_name=settings.SITE_NAME) data = { \"From\": settings.TWILIO_FROM,", "\"Rate limit exceeded\" text = tmpl(\"telegram_message.html\", check=check) return self.send(self.channel.telegram_id, text)", "class VictorOps(HttpTransport): def notify(self, check): description = tmpl(\"victorops_description.html\", check=check) mtype", "class Matrix(HttpTransport): def get_url(self): s = quote(self.channel.value) url = settings.MATRIX_HOMESERVER", "check, urlencode=True) headers = {} for key, value in spec[\"headers\"].items():", "success, and error message on error. \"\"\" raise NotImplementedError() def", "\"From\": \"whatsapp:%s\" % settings.TWILIO_FROM, \"To\": \"whatsapp:%s\" % self.channel.sms_number, \"Body\": text,", "= \"https://api.telegram.org/bot%s/sendMessage\" % settings.TELEGRAM_TOKEN @classmethod def get_error(cls, response): try: return", "notify(self, check): params = { \"idList\": self.channel.trello_list_id, \"name\": tmpl(\"trello_name.html\", check=check),", "check.status == \"down\" and not self.channel.url_down: return True if check.status", "not enabled\" if check.status == \"up\": cmd = self.channel.cmd_up elif", "check=check), \"client\": settings.SITE_NAME, \"client_url\": settings.SITE_ROOT, \"tags\": \",\".join(check.tags_list()), } return self.post(url,", "\"title\": title, \"html\": 1, \"priority\": int(prio), } # Emergency notification", "Sms(HttpTransport): URL = \"https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json\" def is_noop(self, check): return check.status !=", "user can configure webhook urls for \"up\" and \"down\" events,", "if check.status == \"up\" and not self.channel.cmd_up: return True return", "= self.prepare(spec[\"url\"], check, urlencode=True) headers = {} for key, value", "not verified\" unsub_link = self.channel.get_unsub_link() headers = { \"X-Bounce-Url\": bounce_url,", "elif spec[\"method\"] == \"POST\": return self.post(url, data=body.encode(), headers=headers) elif spec[\"method\"]", "url = self.channel.value headers = {\"Content-Type\": \"application/json\"} payload = {", "% check.code return self.post(url, json=payload, headers=headers) class PagerDuty(HttpTransport): URL =", "url, **kwargs): # Make 3 attempts-- for x in range(0,", "timezone.now(), \"unsub_link\": unsub_link, } emails.alert(self.channel.email_value, ctx, headers) def is_noop(self, check):", "check=check) payload = { \"service_key\": self.channel.pd_service_key, \"incident_key\": str(check.code), \"event_type\": \"trigger\"", "can configure webhook urls for \"up\" and \"down\" events, and", "tag return replace(template, ctx) def is_noop(self, check): if check.status ==", "notify(self, check, bounce_url): if not self.channel.email_verified: return \"Email not verified\"", "sort order is by check's creation time sort = \"created\"", "\"title\": settings.SITE_NAME, \"body\": text} return self.post(url, json=payload, headers=headers) class Pushover(HttpTransport):", "check.name, \"$TAGS\": check.tags, } for i, tag in enumerate(check.tags_list()): ctx[\"$TAG%d\"", "data=body.encode(), headers=headers) elif spec[\"method\"] == \"PUT\": return self.put(url, data=body.encode(), headers=headers)", "= tmpl(\"apprise_title.html\", check=check) body = tmpl(\"apprise_description.html\", check=check) a.add(self.channel.value) notify_type =", "\\xa0 is non-breaking space. It causes SMS messages to use", "notify(self, check): text = tmpl(\"slack_message.json\", check=check) payload = json.loads(text) url", "quote, urlencode from hc.accounts.models import Profile from hc.lib import emails", "= safe(tag) return replace(template, ctx) def is_noop(self, check): if check.status", "range(0, 3): error = cls._request(\"put\", url, **kwargs) if error is", "not self.channel.cmd_down: return True if check.status == \"up\" and not", "enabled return \"Apprise is disabled and/or not installed\" a =", "to use UCS2 encoding # and cost twice the money.", "return response.json().get(\"message\") except ValueError: pass def notify(self, check): headers =", "a specific error message in the # response and return", "prio == \"2\": payload[\"retry\"] = settings.PUSHOVER_EMERGENCY_RETRY_DELAY payload[\"expire\"] = settings.PUSHOVER_EMERGENCY_EXPIRATION return", "enabled\" if check.status == \"up\": cmd = self.channel.cmd_up elif check.status", "\"now\": timezone.now(), \"unsub_link\": unsub_link, } emails.alert(self.channel.email_value, ctx, headers) def is_noop(self,", "not self.channel.cmd_up: return True return False def notify(self, check): if", "timezone import json import requests from urllib.parse import quote, urlencode", "self.channel.whatsapp_notify_down else: return not self.channel.whatsapp_notify_up def notify(self, check): profile =", "money. return render_to_string(template_path, ctx).strip().replace(\"\\xa0\", \" \") class Transport(object): def __init__(self,", "check.status == \"up\": cmd = self.channel.cmd_up elif check.status == \"down\":", "def notify(self, check): description = tmpl(\"victorops_description.html\", check=check) mtype = \"CRITICAL\"", "commands are not enabled\" if check.status == \"up\": cmd =", "notify(self, check): spec = self.channel.webhook_spec(check.status) if not spec[\"url\"]: return \"Empty", "Telegram.send is a separate method because it is also used", "\"healthchecks.io\" r = requests.request(method, url, **options) if r.status_code not in", "email address p = Profile.objects.get(user__email=self.channel.email_value) sort = p.sort except Profile.DoesNotExist:", "message: \"{m}\"' return f\"Received status code {r.status_code}\" except requests.exceptions.Timeout: #", "@classmethod def get_error(cls, response): try: return response.json().get(\"message\") except ValueError: pass", "% code class HttpTransport(Transport): @classmethod def get_error(cls, response): # Override", "from urllib.parse import quote, urlencode from hc.accounts.models import Profile from", "notification about current status of the check. This method returns", "= os.system(cmd) if code != 0: return \"Command returned exit", "check.tags_list() payload[\"message\"] = tmpl(\"opsgenie_message.html\", check=check) payload[\"note\"] = tmpl(\"opsgenie_note.html\", check=check) payload[\"description\"]", "\"Monthly SMS limit exceeded\" url = self.URL % settings.TWILIO_ACCOUNT auth", "for i, tag in enumerate(check.tags_list()): ctx[\"$TAG%d\" % (i + 1)]", "headers=headers) class PagerTeam(HttpTransport): def notify(self, check): url = self.channel.value headers", "= tmpl(\"opsgenie_description.html\", check=check) url = \"https://api.opsgenie.com/v2/alerts\" if self.channel.opsgenie_region == \"eu\":", "template, check): \"\"\" Replace placeholders with actual values. \"\"\" ctx", "auth=auth) class WhatsApp(HttpTransport): URL = \"https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json\" def is_noop(self, check): if", "body = tmpl(\"apprise_description.html\", check=check) a.add(self.channel.value) notify_type = ( apprise.NotifyType.SUCCESS if", "status code {r.status_code} with a message: \"{m}\"' return f\"Received status", "break return error @classmethod def put(cls, url, **kwargs): # Make", "if not TokenBucket.authorize_telegram(self.channel.telegram_id): return \"Rate limit exceeded\" text = tmpl(\"telegram_message.html\",", "check.status == \"down\": return not self.channel.whatsapp_notify_down else: return not self.channel.whatsapp_notify_up", "{\"Conent-Type\": \"application/json\"} payload = { \"incident_key\": str(check.code), \"event_type\": \"trigger\" if", "return f\"Received status code {r.status_code}\" except requests.exceptions.Timeout: # Well, we", "HipChat(HttpTransport): def is_noop(self, check): return True class OpsGenie(HttpTransport): @classmethod def", "check): description = tmpl(\"victorops_description.html\", check=check) mtype = \"CRITICAL\" if check.status", "def notify(self, check): \"\"\" Send notification about current status of", "# hc.front.views.telegram_bot to send invite links. return cls.post( cls.SM, json={\"chat_id\":", "is None: break return error class Webhook(HttpTransport): def prepare(self, template,", "return replace(template, ctx) def is_noop(self, check): if check.status == \"down\"", "apprise.NotifyType.SUCCESS if check.status == \"up\" else apprise.NotifyType.FAILURE ) return (", "\"$STATUS\": check.status, \"$NOW\": timezone.now().replace(microsecond=0).isoformat(), \"$NAME\": check.name, \"$TAGS\": check.tags, } for", "on success, and error message on error. \"\"\" raise NotImplementedError()", "VictorOps(HttpTransport): def notify(self, check): description = tmpl(\"victorops_description.html\", check=check) mtype =", "\"\"\" Replace placeholders with actual values. \"\"\" ctx = {", "failed\" @classmethod def get(cls, url, **kwargs): # Make 3 attempts--", "ctx).strip().replace(\"\\xa0\", \" \") class Transport(object): def __init__(self, channel): self.channel =", "if error is None: break return error @classmethod def put(cls,", "class PagerDuty(HttpTransport): URL = \"https://events.pagerduty.com/generic/2010-04-15/create_event.json\" def notify(self, check): description =", "class Shell(Transport): def prepare(self, template, check): \"\"\" Replace placeholders with", "limit exceeded\" url = self.URL % settings.TWILIO_ACCOUNT auth = (settings.TWILIO_ACCOUNT,", "error message on error. \"\"\" raise NotImplementedError() def is_noop(self, check):", "Not supported and/or enabled return \"Apprise is disabled and/or not", "replace(template, ctx) def is_noop(self, check): if check.status == \"down\" and", "\"Connection failed\" @classmethod def get(cls, url, **kwargs): # Make 3", "is_noop(self, check): if check.status == \"down\" and not self.channel.cmd_down: return", "settings.TWILIO_AUTH) text = tmpl(\"sms_message.html\", check=check, site_name=settings.SITE_NAME) data = { \"From\":", "for \"up\" and \"down\" events, and both are optional. \"\"\"", "return True return False def notify(self, check): spec = self.channel.webhook_spec(check.status)", "= settings.MATRIX_HOMESERVER url += \"/_matrix/client/r0/rooms/%s/send/m.room.message?\" % s url += urlencode({\"access_token\":", "3): error = cls._request(\"post\", url, **kwargs) if error is None:", "urlencode else s ctx = { \"$CODE\": str(check.code), \"$STATUS\": check.status,", "self.channel.email_notify_down else: return not self.channel.email_notify_up class Shell(Transport): def prepare(self, template,", "site_name=settings.SITE_NAME) data = { \"From\": settings.TWILIO_FROM, \"To\": self.channel.sms_number, \"Body\": text,", "tmpl(\"pagertree_title.html\", check=check), \"description\": tmpl(\"pagertree_description.html\", check=check), \"client\": settings.SITE_NAME, \"client_url\": settings.SITE_ROOT, \"tags\":", "return not self.channel.whatsapp_notify_up def notify(self, check): profile = Profile.objects.for_user(self.channel.project.owner) if", "exceeded\" url = self.URL % settings.TWILIO_ACCOUNT auth = (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH)", "True class OpsGenie(HttpTransport): @classmethod def get_error(cls, response): try: return response.json().get(\"message\")", "mtype, \"entity_display_name\": check.name_then_code(), \"state_message\": description, \"monitoring_tool\": settings.SITE_NAME, } return self.post(self.channel.value,", "json=payload) class Discord(HttpTransport): def notify(self, check): text = tmpl(\"slack_message.json\", check=check)", "error message in the # response and return it. return", "It causes SMS messages to use UCS2 encoding # and", "domain = self.channel.zulip_bot_email.split(\"@\") url = \"https://%s/api/v1/messages\" % domain auth =", "\"eu\": url = \"https://api.eu.opsgenie.com/v2/alerts\" if check.status == \"up\": url +=", "\"entity_id\": str(check.code), \"message_type\": mtype, \"entity_display_name\": check.name_then_code(), \"state_message\": description, \"monitoring_tool\": settings.SITE_NAME,", "= self.checks().filter(status=\"down\").exclude(code=check.code) # list() executes the query, to avoid DB", "in range(0, 3): error = cls._request(\"post\", url, **kwargs) if error", "check.code return self.post(url, json=payload, headers=headers) class PagerDuty(HttpTransport): URL = \"https://events.pagerduty.com/generic/2010-04-15/create_event.json\"", "priority for \"up\" events if len(pieces) == 3 and check.status", "i, tag in enumerate(check.tags_list()): ctx[\"$TAG%d\" % (i + 1)] =", "def notify(self, check): headers = { \"Conent-Type\": \"application/json\", \"Authorization\": \"GenieKey", "check's current status. This method is overridden in Webhook subclass", "= tmpl(\"opsgenie_note.html\", check=check) payload[\"description\"] = tmpl(\"opsgenie_description.html\", check=check) url = \"https://api.opsgenie.com/v2/alerts\"", "range(0, 3): error = cls._request(\"get\", url, **kwargs) if error is", "= (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH) text = tmpl(\"sms_message.html\", check=check, site_name=settings.SITE_NAME) data =", "we tried return \"Connection timed out\" except requests.exceptions.ConnectionError: return \"Connection", "return it. return None @classmethod def _request(cls, method, url, **kwargs):", "\") class Transport(object): def __init__(self, channel): self.channel = channel def", "= tmpl(\"slack_message.json\", check=check) payload = json.loads(text) url = self.channel.discord_webhook_url +", "\"$NOW\": timezone.now().replace(microsecond=0).isoformat(), \"$NAME\": check.name, \"$TAGS\": check.tags, } for i, tag", "== \"up\": url += \"/%s/close?identifierType=alias\" % check.code return self.post(url, json=payload,", "settings.APPRISE_ENABLED: # Not supported and/or enabled return \"Apprise is disabled", "TokenBucket if not TokenBucket.authorize_telegram(self.channel.telegram_id): return \"Rate limit exceeded\" text =", "text, } return self.post(url, data=data, auth=auth) class Trello(HttpTransport): URL =", "self.channel.opsgenie_region == \"eu\": url = \"https://api.eu.opsgenie.com/v2/alerts\" if check.status == \"up\":", "class HttpTransport(Transport): @classmethod def get_error(cls, response): # Override in subclasses:", "True return False def notify(self, check): spec = self.channel.webhook_spec(check.status) if", "while # rendering a template ctx = {\"check\": check, \"down_checks\":", "auth = (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH) text = tmpl(\"whatsapp_message.html\", check=check, site_name=settings.SITE_NAME) data", "a template ctx = {\"check\": check, \"down_checks\": list(others)} text =", "# Not supported and/or enabled return \"Apprise is disabled and/or", "def put(cls, url, **kwargs): # Make 3 attempts-- for x", "channel def notify(self, check): \"\"\" Send notification about current status", "try: return response.json().get(\"msg\") except ValueError: pass def notify(self, check): _,", "\"{m}\"' return f\"Received status code {r.status_code}\" except requests.exceptions.Timeout: # Well,", "\"headers\" not in options: options[\"headers\"] = {} if \"User-Agent\" not", "events if len(pieces) == 3 and check.status == \"up\": prio", "\"https://%s/api/v1/messages\" % domain auth = (self.channel.zulip_bot_email, self.channel.zulip_api_key) data = {", "the check. This method returns None on success, and error", "exceeded\" text = tmpl(\"telegram_message.html\", check=check) return self.send(self.channel.telegram_id, text) class Sms(HttpTransport):", "= tag return replace(template, ctx) def is_noop(self, check): if check.status", "space. It causes SMS messages to use UCS2 encoding #", "UCS2 encoding # and cost twice the money. return render_to_string(template_path,", "break return error class Webhook(HttpTransport): def prepare(self, template, check, urlencode=False):", "= \"integrations/%s\" % template_name # \\xa0 is non-breaking space. It", "Emergency notification if prio == \"2\": payload[\"retry\"] = settings.PUSHOVER_EMERGENCY_RETRY_DELAY payload[\"expire\"]", "\"Apprise is disabled and/or not installed\" a = apprise.Apprise() title", "webhook URL\" url = self.prepare(spec[\"url\"], check, urlencode=True) headers = {}", "URL\" url = self.prepare(spec[\"url\"], check, urlencode=True) headers = {} for", "self.post(url, json=payload, headers=headers) class PagerTeam(HttpTransport): def notify(self, check): url =", "return self.post(url, data=data, auth=auth) class WhatsApp(HttpTransport): URL = \"https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json\" def", "\"Authorization\": \"GenieKey %s\" % self.channel.opsgenie_key, } payload = {\"alias\": str(check.code),", "is_noop(self, check): return check.status != \"down\" def notify(self, check): profile", "emails from hc.lib.string import replace try: import apprise except ImportError:", "% self.channel.opsgenie_key, } payload = {\"alias\": str(check.code), \"source\": settings.SITE_NAME} if", "url, **kwargs): try: options = dict(kwargs) options[\"timeout\"] = 5 if", "self.checks().filter(status=\"down\").exclude(code=check.code) # list() executes the query, to avoid DB access", "settings.TWILIO_FROM, \"To\": \"whatsapp:%s\" % self.channel.sms_number, \"Body\": text, } return self.post(url,", "= { \"type\": self.channel.zulip_type, \"to\": self.channel.zulip_to, \"topic\": tmpl(\"zulip_topic.html\", check=check), \"content\":", "== \"down\" else \"resolve\", \"title\": tmpl(\"pagertree_title.html\", check=check), \"description\": tmpl(\"pagertree_description.html\", check=check),", "is_noop(self, check): if check.status == \"down\": return not self.channel.whatsapp_notify_down else:", "True if check.status == \"down\": return not self.channel.email_notify_down else: return", "= {\"Content-Type\": \"application/json\"} payload = { \"incident_key\": str(check.code), \"event_type\": \"trigger\"", "data=data, auth=auth) class Trello(HttpTransport): URL = \"https://api.trello.com/1/cards\" def is_noop(self, check):", "NotImplementedError() def is_noop(self, check): \"\"\" Return True if transport will", "\"Connection timed out\" except requests.exceptions.ConnectionError: return \"Connection failed\" @classmethod def", "apprise.NotifyType.FAILURE ) return ( \"Failed\" if not a.notify(body=body, title=title, notify_type=notify_type)", "def notify(self, check): text = tmpl(\"pushbullet_message.html\", check=check) url = \"https://api.pushbullet.com/v2/pushes\"", "check) code = os.system(cmd) if code != 0: return \"Command", "safe(s): return quote(s) if urlencode else s ctx = {", "non-breaking space. It causes SMS messages to use UCS2 encoding", "url = self.URL % settings.TWILIO_ACCOUNT auth = (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH) text", "self.channel.value headers = {\"Conent-Type\": \"application/json\"} payload = { \"incident_key\": str(check.code),", "text, \"title\": title, \"html\": 1, \"priority\": int(prio), } # Emergency", "separate method because it is also used in # hc.front.views.telegram_bot", "\"incident_key\": str(check.code), \"event_type\": \"trigger\" if check.status == \"down\" else \"resolve\",", "payload = { \"msgtype\": \"m.text\", \"body\": plain, \"format\": \"org.matrix.custom.html\", \"formatted_body\":", "if not settings.SHELL_ENABLED: return \"Shell commands are not enabled\" if", "(self.channel.zulip_bot_email, self.channel.zulip_api_key) data = { \"type\": self.channel.zulip_type, \"to\": self.channel.zulip_to, \"topic\":", "\"Empty webhook URL\" url = self.prepare(spec[\"url\"], check, urlencode=True) headers =", "not self.channel.email_verified: return True if check.status == \"down\": return not", "= self.channel.cmd_down cmd = self.prepare(cmd, check) code = os.system(cmd) if", "for x in range(0, 3): error = cls._request(\"post\", url, **kwargs)", "check.status == \"down\" else \"resolve\", \"title\": tmpl(\"pagertree_title.html\", check=check), \"description\": tmpl(\"pagertree_description.html\",", "self.post(self.channel.slack_webhook_url, json=payload) class HipChat(HttpTransport): def is_noop(self, check): return True class", "\"down\" and not self.channel.url_down: return True if check.status == \"up\"", "in # hc.front.views.telegram_bot to send invite links. return cls.post( cls.SM,", "title = tmpl(\"apprise_title.html\", check=check) body = tmpl(\"apprise_description.html\", check=check) a.add(self.channel.value) notify_type", "= self.channel.cmd_up elif check.status == \"down\": cmd = self.channel.cmd_down cmd", "else None ) class MsTeams(HttpTransport): def notify(self, check): text =", "False def notify(self, check): if not settings.SHELL_ENABLED: return \"Shell commands", "= {} for key, value in spec[\"headers\"].items(): headers[key] = self.prepare(value,", "self.channel.zulip_api_key) data = { \"type\": self.channel.zulip_type, \"to\": self.channel.zulip_to, \"topic\": tmpl(\"zulip_topic.html\",", "text = tmpl(\"slack_message.json\", check=check) payload = json.loads(text) url = self.channel.discord_webhook_url", "r = requests.request(method, url, **options) if r.status_code not in (200,", "tmpl(\"trello_desc.html\", check=check), \"key\": settings.TRELLO_APP_KEY, \"token\": self.channel.trello_token, } return self.post(self.URL, params=params)", "Default sort order is by check's creation time sort =", "\"List-Unsubscribe-Post\": \"List-Unsubscribe=One-Click\", } try: # Look up the sorting preference", "= \"https://%s/api/v1/messages\" % domain auth = (self.channel.zulip_bot_email, self.channel.zulip_api_key) data =", "check.status == \"up\" else apprise.NotifyType.FAILURE ) return ( \"Failed\" if", "self.URL % settings.TWILIO_ACCOUNT auth = (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH) text = tmpl(\"whatsapp_message.html\",", "tmpl(\"msteams_message.json\", check=check) payload = json.loads(text) return self.post(self.channel.value, json=payload) class Zulip(HttpTransport):", "= { \"msgtype\": \"m.text\", \"body\": plain, \"format\": \"org.matrix.custom.html\", \"formatted_body\": formatted,", "django.template.loader import render_to_string from django.utils import timezone import json import", "p = Profile.objects.get(user__email=self.channel.email_value) sort = p.sort except Profile.DoesNotExist: # Default", "} emails.alert(self.channel.email_value, ctx, headers) def is_noop(self, check): if not self.channel.email_verified:", "params=params) class Apprise(HttpTransport): def notify(self, check): if not settings.APPRISE_ENABLED: #", "is disabled and/or not installed\" a = apprise.Apprise() title =", "check=check) url = \"https://api.pushbullet.com/v2/pushes\" headers = { \"Access-Token\": self.channel.value, \"Conent-Type\":", "json.loads(text) return self.post(self.channel.value, json=payload) class Zulip(HttpTransport): @classmethod def get_error(cls, response):", "for \"up\" events if len(pieces) == 3 and check.status ==", "because it is also used in # hc.front.views.telegram_bot to send", "options[\"headers\"] = {} if \"User-Agent\" not in options[\"headers\"]: options[\"headers\"][\"User-Agent\"] =", "urls for \"up\" and \"down\" events, and both are optional.", "on error. \"\"\" raise NotImplementedError() def is_noop(self, check): \"\"\" Return", "m = cls.get_error(r) if m: return f'Received status code {r.status_code}", "\"GenieKey %s\" % self.channel.opsgenie_key, } payload = {\"alias\": str(check.code), \"source\":", "chat_id, text): # Telegram.send is a separate method because it", "{ \"service_key\": self.channel.pd_service_key, \"incident_key\": str(check.code), \"event_type\": \"trigger\" if check.status ==", "= check.tags_list() payload[\"message\"] = tmpl(\"opsgenie_message.html\", check=check) payload[\"note\"] = tmpl(\"opsgenie_note.html\", check=check)", "# Make 3 attempts-- for x in range(0, 3): error", "Webhook subclass where the user can configure webhook urls for", "# Look up the sorting preference for this email address", "put(cls, url, **kwargs): # Make 3 attempts-- for x in", "settings.TWILIO_ACCOUNT auth = (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH) text = tmpl(\"sms_message.html\", check=check, site_name=settings.SITE_NAME)", "events, and both are optional. \"\"\" return False def checks(self):", "options[\"headers\"]: options[\"headers\"][\"User-Agent\"] = \"healthchecks.io\" r = requests.request(method, url, **options) if", "\"state_message\": description, \"monitoring_tool\": settings.SITE_NAME, } return self.post(self.channel.value, json=payload) class Matrix(HttpTransport):", "webhook urls for \"up\" and \"down\" events, and both are", "def notify(self, check): url = self.channel.value headers = {\"Conent-Type\": \"application/json\"}", "return self.send(self.channel.telegram_id, text) class Sms(HttpTransport): URL = \"https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json\" def is_noop(self,", "check.status == \"down\" else \"RECOVERY\" payload = { \"entity_id\": str(check.code),", "placeholders with actual values. \"\"\" ctx = { \"$CODE\": str(check.code),", "not self.channel.email_verified: return \"Email not verified\" unsub_link = self.channel.get_unsub_link() headers", "body = spec[\"body\"] if body: body = self.prepare(body, check) if", "links. return cls.post( cls.SM, json={\"chat_id\": chat_id, \"text\": text, \"parse_mode\": \"html\"}", "{ \"entity_id\": str(check.code), \"message_type\": mtype, \"entity_display_name\": check.name_then_code(), \"state_message\": description, \"monitoring_tool\":", "auth = (self.channel.zulip_bot_email, self.channel.zulip_api_key) data = { \"type\": self.channel.zulip_type, \"to\":", "cls._request(\"get\", url, **kwargs) if error is None: break return error", "if not self.channel.email_verified: return True if check.status == \"down\": return", "+ 1)] = safe(tag) return replace(template, ctx) def is_noop(self, check):", "TokenBucket.authorize_telegram(self.channel.telegram_id): return \"Rate limit exceeded\" text = tmpl(\"telegram_message.html\", check=check) return", "self.channel.url_up: return True return False def notify(self, check): spec =", "if check.status == \"down\" else \"RECOVERY\" payload = { \"entity_id\":", "check): \"\"\" Send notification about current status of the check.", "if check.status == \"down\" and not self.channel.url_down: return True if", "\"resolve\", \"title\": tmpl(\"pagerteam_title.html\", check=check), \"description\": tmpl(\"pagerteam_description.html\", check=check), \"client\": settings.SITE_NAME, \"client_url\":", "settings.SITE_NAME} if check.status == \"down\": payload[\"tags\"] = check.tags_list() payload[\"message\"] =", "\"https://api.trello.com/1/cards\" def is_noop(self, check): return check.status != \"down\" def notify(self,", "get_error(cls, response): # Override in subclasses: look for a specific", "**kwargs) if error is None: break return error @classmethod def", "return self.post(self.channel.value, json=payload) class Zulip(HttpTransport): @classmethod def get_error(cls, response): try:", "import emails from hc.lib.string import replace try: import apprise except", "+= \"/%s/close?identifierType=alias\" % check.code return self.post(url, json=payload, headers=headers) class PagerDuty(HttpTransport):", "\"m.text\", \"body\": plain, \"format\": \"org.matrix.custom.html\", \"formatted_body\": formatted, } return self.post(self.get_url(),", "{ \"From\": settings.TWILIO_FROM, \"To\": self.channel.sms_number, \"Body\": text, } return self.post(url,", "\"2\": payload[\"retry\"] = settings.PUSHOVER_EMERGENCY_RETRY_DELAY payload[\"expire\"] = settings.PUSHOVER_EMERGENCY_EXPIRATION return self.post(self.URL, data=payload)", "_request(cls, method, url, **kwargs): try: options = dict(kwargs) options[\"timeout\"] =", "def notify(self, check): if not settings.APPRISE_ENABLED: # Not supported and/or", "template_name # \\xa0 is non-breaking space. It causes SMS messages", "return self.post(url, data=data, auth=auth) class Trello(HttpTransport): URL = \"https://api.trello.com/1/cards\" def", "\"trigger\" if check.status == \"down\" else \"resolve\", \"title\": tmpl(\"pagerteam_title.html\", check=check),", "error = cls._request(\"put\", url, **kwargs) if error is None: break", "import Profile from hc.lib import emails from hc.lib.string import replace", "\"User-Agent\" not in options[\"headers\"]: options[\"headers\"][\"User-Agent\"] = \"healthchecks.io\" r = requests.request(method,", "headers=headers) class Pushbullet(HttpTransport): def notify(self, check): text = tmpl(\"pushbullet_message.html\", check=check)", "if not spec[\"url\"]: return \"Empty webhook URL\" url = self.prepare(spec[\"url\"],", "This method returns None on success, and error message on", "check): plain = tmpl(\"matrix_description.html\", check=check) formatted = tmpl(\"matrix_description_formatted.html\", check=check) payload", "overridden in Webhook subclass where the user can configure webhook", "and not self.channel.url_up: return True return False def notify(self, check):", "is_noop(self, check): if check.status == \"down\" and not self.channel.url_down: return", "class HipChat(HttpTransport): def is_noop(self, check): return True class OpsGenie(HttpTransport): @classmethod", "return True if check.status == \"down\": return not self.channel.email_notify_down else:", "s url += urlencode({\"access_token\": settings.MATRIX_ACCESS_TOKEN}) return url def notify(self, check):", "return check.status != \"down\" def notify(self, check): params = {", "in enumerate(check.tags_list()): ctx[\"$TAG%d\" % (i + 1)] = tag return", "will ignore check's current status. This method is overridden in", "self.channel.zulip_to, \"topic\": tmpl(\"zulip_topic.html\", check=check), \"content\": tmpl(\"zulip_content.html\", check=check), } return self.post(url,", "emails.alert(self.channel.email_value, ctx, headers) def is_noop(self, check): if not self.channel.email_verified: return", "== \"down\" else \"resolve\", \"description\": description, \"client\": settings.SITE_NAME, \"client_url\": check.details_url(),", "\"POST\": return self.post(url, data=body.encode(), headers=headers) elif spec[\"method\"] == \"PUT\": return", "self.channel.email_verified: return \"Email not verified\" unsub_link = self.channel.get_unsub_link() headers =", "if check.status == \"down\" and not self.channel.cmd_down: return True if", "text, } return self.post(url, data=data, auth=auth) class WhatsApp(HttpTransport): URL =", "body: body = self.prepare(body, check) if spec[\"method\"] == \"GET\": return", "= { \"incident_key\": str(check.code), \"event_type\": \"trigger\" if check.status == \"down\"", "str(check.code), \"message_type\": mtype, \"entity_display_name\": check.name_then_code(), \"state_message\": description, \"monitoring_tool\": settings.SITE_NAME, }", "\"/slack\" return self.post(url, json=payload) class Telegram(HttpTransport): SM = \"https://api.telegram.org/bot%s/sendMessage\" %", "**ctx) title = tmpl(\"pushover_title.html\", **ctx) pieces = self.channel.value.split(\"|\") user_key, prio", "return not self.channel.email_notify_down else: return not self.channel.email_notify_up class Shell(Transport): def", "@classmethod def post(cls, url, **kwargs): # Make 3 attempts-- for", "None ) class MsTeams(HttpTransport): def notify(self, check): text = tmpl(\"msteams_message.json\",", "range(0, 3): error = cls._request(\"post\", url, **kwargs) if error is", "URL = \"https://api.trello.com/1/cards\" def is_noop(self, check): return check.status != \"down\"", "sort = p.sort except Profile.DoesNotExist: # Default sort order is", "class MsTeams(HttpTransport): def notify(self, check): text = tmpl(\"msteams_message.json\", check=check) payload", "plain = tmpl(\"matrix_description.html\", check=check) formatted = tmpl(\"matrix_description_formatted.html\", check=check) payload =", "headers = { \"X-Bounce-Url\": bounce_url, \"List-Unsubscribe\": \"<%s>\" % unsub_link, \"List-Unsubscribe-Post\":", "settings.TELEGRAM_TOKEN @classmethod def get_error(cls, response): try: return response.json().get(\"description\") except ValueError:", "if check.status == \"down\" else \"resolve\", \"title\": tmpl(\"pagertree_title.html\", check=check), \"description\":", "False def checks(self): return self.channel.project.check_set.order_by(\"created\") class Email(Transport): def notify(self, check,", "\"application/json\"} payload = { \"incident_key\": str(check.code), \"event_type\": \"trigger\" if check.status", "cls._request(\"put\", url, **kwargs) if error is None: break return error", "formatted = tmpl(\"matrix_description_formatted.html\", check=check) payload = { \"msgtype\": \"m.text\", \"body\":", "else \"resolve\", \"title\": tmpl(\"pagertree_title.html\", check=check), \"description\": tmpl(\"pagertree_description.html\", check=check), \"client\": settings.SITE_NAME,", "a template ctx = { \"check\": check, \"checks\": list(self.checks()), \"sort\":", "def safe(s): return quote(s) if urlencode else s ctx =", "= tmpl(\"matrix_description.html\", check=check) formatted = tmpl(\"matrix_description_formatted.html\", check=check) payload = {", "class Trello(HttpTransport): URL = \"https://api.trello.com/1/cards\" def is_noop(self, check): return check.status", "\"From\": settings.TWILIO_FROM, \"To\": self.channel.sms_number, \"Body\": text, } return self.post(url, data=data,", "notify(self, check): text = tmpl(\"pushbullet_message.html\", check=check) url = \"https://api.pushbullet.com/v2/pushes\" headers", "{ \"Conent-Type\": \"application/json\", \"Authorization\": \"GenieKey %s\" % self.channel.opsgenie_key, } payload", "\"tags\": \",\".join(check.tags_list()), } return self.post(url, json=payload, headers=headers) class Pushbullet(HttpTransport): def", "class Apprise(HttpTransport): def notify(self, check): if not settings.APPRISE_ENABLED: # Not", "{r.status_code}\" except requests.exceptions.Timeout: # Well, we tried return \"Connection timed", "} return self.post(url, json=payload, headers=headers) class Pushbullet(HttpTransport): def notify(self, check):", "payload[\"retry\"] = settings.PUSHOVER_EMERGENCY_RETRY_DELAY payload[\"expire\"] = settings.PUSHOVER_EMERGENCY_EXPIRATION return self.post(self.URL, data=payload) class", "= tmpl(\"victorops_description.html\", check=check) mtype = \"CRITICAL\" if check.status == \"down\"", "check.status == \"up\": url += \"/%s/close?identifierType=alias\" % check.code return self.post(url,", "\",\".join(check.tags_list()), } return self.post(url, json=payload, headers=headers) class Pushbullet(HttpTransport): def notify(self,", "self.channel.value, \"Conent-Type\": \"application/json\", } payload = {\"type\": \"note\", \"title\": settings.SITE_NAME,", "check): text = tmpl(\"pushbullet_message.html\", check=check) url = \"https://api.pushbullet.com/v2/pushes\" headers =", "\"$NAME\": safe(check.name), \"$TAGS\": safe(check.tags), } for i, tag in enumerate(check.tags_list()):", "\"$NOW\": safe(timezone.now().replace(microsecond=0).isoformat()), \"$NAME\": safe(check.name), \"$TAGS\": safe(check.tags), } for i, tag", "URL = \"https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json\" def is_noop(self, check): if check.status == \"down\":", "spec[\"url\"]: return \"Empty webhook URL\" url = self.prepare(spec[\"url\"], check, urlencode=True)", "{ \"$CODE\": str(check.code), \"$STATUS\": check.status, \"$NOW\": safe(timezone.now().replace(microsecond=0).isoformat()), \"$NAME\": safe(check.name), \"$TAGS\":", "check=check) url = \"https://api.opsgenie.com/v2/alerts\" if self.channel.opsgenie_region == \"eu\": url =", "timezone.now().replace(microsecond=0).isoformat(), \"$NAME\": check.name, \"$TAGS\": check.tags, } for i, tag in", "\"service_key\": self.channel.pd_service_key, \"incident_key\": str(check.code), \"event_type\": \"trigger\" if check.status == \"down\"", "options = dict(kwargs) options[\"timeout\"] = 5 if \"headers\" not in", "== \"up\" and not self.channel.cmd_up: return True return False def", "error is None: break return error class Webhook(HttpTransport): def prepare(self,", "look for a specific error message in the # response", "def notify(self, check): description = tmpl(\"pd_description.html\", check=check) payload = {", "if prio == \"2\": payload[\"retry\"] = settings.PUSHOVER_EMERGENCY_RETRY_DELAY payload[\"expire\"] = settings.PUSHOVER_EMERGENCY_EXPIRATION", "check=check) mtype = \"CRITICAL\" if check.status == \"down\" else \"RECOVERY\"", "self.post(self.channel.value, json=payload) class Zulip(HttpTransport): @classmethod def get_error(cls, response): try: return", "headers = {\"Conent-Type\": \"application/json\"} payload = { \"incident_key\": str(check.code), \"event_type\":", "payload = {\"alias\": str(check.code), \"source\": settings.SITE_NAME} if check.status == \"down\":", "{ \"idList\": self.channel.trello_list_id, \"name\": tmpl(\"trello_name.html\", check=check), \"desc\": tmpl(\"trello_desc.html\", check=check), \"key\":", "template ctx = {\"check\": check, \"down_checks\": list(others)} text = tmpl(\"pushover_message.html\",", "check): url = self.channel.value headers = {\"Conent-Type\": \"application/json\"} payload =", "hc.front.views.telegram_bot to send invite links. return cls.post( cls.SM, json={\"chat_id\": chat_id,", "else apprise.NotifyType.FAILURE ) return ( \"Failed\" if not a.notify(body=body, title=title,", "url = \"https://api.pushbullet.com/v2/pushes\" headers = { \"Access-Token\": self.channel.value, \"Conent-Type\": \"application/json\",", "+= urlencode({\"access_token\": settings.MATRIX_ACCESS_TOKEN}) return url def notify(self, check): plain =", "cmd = self.channel.cmd_down cmd = self.prepare(cmd, check) code = os.system(cmd)", "cost twice the money. return render_to_string(template_path, ctx).strip().replace(\"\\xa0\", \" \") class", "check.status == \"down\" and not self.channel.cmd_down: return True if check.status", "get_error(cls, response): try: return response.json().get(\"msg\") except ValueError: pass def notify(self,", "# list() executes the query, to avoid DB access while", "\"event_type\": \"trigger\" if check.status == \"down\" else \"resolve\", \"title\": tmpl(\"pagerteam_title.html\",", "= cls._request(\"post\", url, **kwargs) if error is None: break return", "render_to_string from django.utils import timezone import json import requests from", "and error message on error. \"\"\" raise NotImplementedError() def is_noop(self,", "\"monitoring_tool\": settings.SITE_NAME, } return self.post(self.channel.value, json=payload) class Matrix(HttpTransport): def get_url(self):", "\"\"\" def safe(s): return quote(s) if urlencode else s ctx", "== \"eu\": url = \"https://api.eu.opsgenie.com/v2/alerts\" if check.status == \"up\": url", "tag in enumerate(check.tags_list()): ctx[\"$TAG%d\" % (i + 1)] = tag", "the priority for \"up\" events if len(pieces) == 3 and", "notify(self, check): url = self.channel.value headers = {\"Content-Type\": \"application/json\"} payload", "return \"Shell commands are not enabled\" if check.status == \"up\":", "payload = { \"token\": settings.PUSHOVER_API_TOKEN, \"user\": user_key, \"message\": text, \"title\":", "notify(self, check): profile = Profile.objects.for_user(self.channel.project.owner) if not profile.authorize_sms(): profile.send_sms_limit_notice(\"SMS\") return", "text = tmpl(\"msteams_message.json\", check=check) payload = json.loads(text) return self.post(self.channel.value, json=payload)", "\"description\": description, \"client\": settings.SITE_NAME, \"client_url\": check.details_url(), } return self.post(self.URL, json=payload)", "\"down\": payload[\"tags\"] = check.tags_list() payload[\"message\"] = tmpl(\"opsgenie_message.html\", check=check) payload[\"note\"] =", "and check.status == \"up\": prio = pieces[2] payload = {", "self.channel.value.split(\"|\") user_key, prio = pieces[0], pieces[1] # The third element,", "tmpl(\"pd_description.html\", check=check) payload = { \"service_key\": self.channel.pd_service_key, \"incident_key\": str(check.code), \"event_type\":", "description = tmpl(\"pd_description.html\", check=check) payload = { \"service_key\": self.channel.pd_service_key, \"incident_key\":", "@classmethod def send(cls, chat_id, text): # Telegram.send is a separate", "headers = { \"Conent-Type\": \"application/json\", \"Authorization\": \"GenieKey %s\" % self.channel.opsgenie_key,", "notify(self, check): others = self.checks().filter(status=\"down\").exclude(code=check.code) # list() executes the query,", "== \"down\": return not self.channel.whatsapp_notify_down else: return not self.channel.whatsapp_notify_up def", "+ 1)] = tag return replace(template, ctx) def is_noop(self, check):", "check.name_then_code(), \"state_message\": description, \"monitoring_tool\": settings.SITE_NAME, } return self.post(self.channel.value, json=payload) class", "= (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH) text = tmpl(\"whatsapp_message.html\", check=check, site_name=settings.SITE_NAME) data =", "self.post(url, json=payload) class Telegram(HttpTransport): SM = \"https://api.telegram.org/bot%s/sendMessage\" % settings.TELEGRAM_TOKEN @classmethod", "self.post(url, data=body.encode(), headers=headers) elif spec[\"method\"] == \"PUT\": return self.put(url, data=body.encode(),", "disabled and/or not installed\" a = apprise.Apprise() title = tmpl(\"apprise_title.html\",", "= \"https://api.trello.com/1/cards\" def is_noop(self, check): return check.status != \"down\" def", "pieces = self.channel.value.split(\"|\") user_key, prio = pieces[0], pieces[1] # The", "def prepare(self, template, check): \"\"\" Replace placeholders with actual values.", "not a.notify(body=body, title=title, notify_type=notify_type) else None ) class MsTeams(HttpTransport): def", "\"token\": self.channel.trello_token, } return self.post(self.URL, params=params) class Apprise(HttpTransport): def notify(self,", "url = self.channel.discord_webhook_url + \"/slack\" return self.post(url, json=payload) class Telegram(HttpTransport):", "elif check.status == \"down\": cmd = self.channel.cmd_down cmd = self.prepare(cmd,", "try: options = dict(kwargs) options[\"timeout\"] = 5 if \"headers\" not", "prio = pieces[0], pieces[1] # The third element, if present,", "Make 3 attempts-- for x in range(0, 3): error =", "cls.post( cls.SM, json={\"chat_id\": chat_id, \"text\": text, \"parse_mode\": \"html\"} ) def", "check=check) return self.send(self.channel.telegram_id, text) class Sms(HttpTransport): URL = \"https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json\" def", "in range(0, 3): error = cls._request(\"get\", url, **kwargs) if error", "url = settings.MATRIX_HOMESERVER url += \"/_matrix/client/r0/rooms/%s/send/m.room.message?\" % s url +=", "access while # rendering a template ctx = {\"check\": check,", "\"message_type\": mtype, \"entity_display_name\": check.name_then_code(), \"state_message\": description, \"monitoring_tool\": settings.SITE_NAME, } return", "\"https://api.pushbullet.com/v2/pushes\" headers = { \"Access-Token\": self.channel.value, \"Conent-Type\": \"application/json\", } payload", "auth=auth) class Trello(HttpTransport): URL = \"https://api.trello.com/1/cards\" def is_noop(self, check): return", "def __init__(self, channel): self.channel = channel def notify(self, check): \"\"\"", "return error @classmethod def post(cls, url, **kwargs): # Make 3", "preference for this email address p = Profile.objects.get(user__email=self.channel.email_value) sort =", "def notify(self, check): if not settings.SHELL_ENABLED: return \"Shell commands are", "else s ctx = { \"$CODE\": str(check.code), \"$STATUS\": check.status, \"$NOW\":", "try: # Look up the sorting preference for this email", "json import requests from urllib.parse import quote, urlencode from hc.accounts.models", "tmpl(\"apprise_description.html\", check=check) a.add(self.channel.value) notify_type = ( apprise.NotifyType.SUCCESS if check.status ==", "Zulip(HttpTransport): @classmethod def get_error(cls, response): try: return response.json().get(\"msg\") except ValueError:", "address p = Profile.objects.get(user__email=self.channel.email_value) sort = p.sort except Profile.DoesNotExist: #", "and not self.channel.cmd_down: return True if check.status == \"up\" and", "code != 0: return \"Command returned exit code %d\" %", "\"entity_display_name\": check.name_then_code(), \"state_message\": description, \"monitoring_tool\": settings.SITE_NAME, } return self.post(self.channel.value, json=payload)", "class Zulip(HttpTransport): @classmethod def get_error(cls, response): try: return response.json().get(\"msg\") except", "%s\" % self.channel.opsgenie_key, } payload = {\"alias\": str(check.code), \"source\": settings.SITE_NAME}", "def is_noop(self, check): return True class OpsGenie(HttpTransport): @classmethod def get_error(cls,", "def is_noop(self, check): if check.status == \"down\" and not self.channel.url_down:", "in spec[\"headers\"].items(): headers[key] = self.prepare(value, check) body = spec[\"body\"] if", "data = { \"From\": \"whatsapp:%s\" % settings.TWILIO_FROM, \"To\": \"whatsapp:%s\" %", "in (200, 201, 202, 204): m = cls.get_error(r) if m:", "configure webhook urls for \"up\" and \"down\" events, and both", "\"up\" else apprise.NotifyType.FAILURE ) return ( \"Failed\" if not a.notify(body=body,", "self.post(url, json=payload, headers=headers) class Pushbullet(HttpTransport): def notify(self, check): text =", "def notify(self, check): profile = Profile.objects.for_user(self.channel.project.owner) if not profile.authorize_sms(): profile.send_sms_limit_notice(\"WhatsApp\")", "used in # hc.front.views.telegram_bot to send invite links. return cls.post(", "from django.template.loader import render_to_string from django.utils import timezone import json", "error is None: break return error @classmethod def put(cls, url,", "__init__(self, channel): self.channel = channel def notify(self, check): \"\"\" Send", "\"desc\": tmpl(\"trello_desc.html\", check=check), \"key\": settings.TRELLO_APP_KEY, \"token\": self.channel.trello_token, } return self.post(self.URL,", "settings.PUSHOVER_EMERGENCY_RETRY_DELAY payload[\"expire\"] = settings.PUSHOVER_EMERGENCY_EXPIRATION return self.post(self.URL, data=payload) class VictorOps(HttpTransport): def", "= self.prepare(cmd, check) code = os.system(cmd) if code != 0:", "pieces[0], pieces[1] # The third element, if present, is the", "Discord(HttpTransport): def notify(self, check): text = tmpl(\"slack_message.json\", check=check) payload =", "in options[\"headers\"]: options[\"headers\"][\"User-Agent\"] = \"healthchecks.io\" r = requests.request(method, url, **options)", "\"up\": prio = pieces[2] payload = { \"token\": settings.PUSHOVER_API_TOKEN, \"user\":", "notify(self, check): description = tmpl(\"pd_description.html\", check=check) payload = { \"service_key\":", "URL = \"https://events.pagerduty.com/generic/2010-04-15/create_event.json\" def notify(self, check): description = tmpl(\"pd_description.html\", check=check)", "\"name\": tmpl(\"trello_name.html\", check=check), \"desc\": tmpl(\"trello_desc.html\", check=check), \"key\": settings.TRELLO_APP_KEY, \"token\": self.channel.trello_token,", "Apprise(HttpTransport): def notify(self, check): if not settings.APPRISE_ENABLED: # Not supported", "json=payload, headers=headers) class Pushover(HttpTransport): URL = \"https://api.pushover.net/1/messages.json\" def notify(self, check):", "self.channel.url_down: return True if check.status == \"up\" and not self.channel.url_up:", "= tmpl(\"slack_message.json\", check=check) payload = json.loads(text) return self.post(self.channel.slack_webhook_url, json=payload) class", "list() executes the query, to avoid DB access while #", "== \"GET\": return self.get(url, headers=headers) elif spec[\"method\"] == \"POST\": return", "use UCS2 encoding # and cost twice the money. return", "= self.URL % settings.TWILIO_ACCOUNT auth = (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH) text =", "\"key\": settings.TRELLO_APP_KEY, \"token\": self.channel.trello_token, } return self.post(self.URL, params=params) class Apprise(HttpTransport):", ") return ( \"Failed\" if not a.notify(body=body, title=title, notify_type=notify_type) else", "(settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH) text = tmpl(\"sms_message.html\", check=check, site_name=settings.SITE_NAME) data = {", "if check.status == \"up\" else apprise.NotifyType.FAILURE ) return ( \"Failed\"", "tmpl(\"opsgenie_message.html\", check=check) payload[\"note\"] = tmpl(\"opsgenie_note.html\", check=check) payload[\"description\"] = tmpl(\"opsgenie_description.html\", check=check)", "\"https://api.opsgenie.com/v2/alerts\" if self.channel.opsgenie_region == \"eu\": url = \"https://api.eu.opsgenie.com/v2/alerts\" if check.status", "creation time sort = \"created\" # list() executes the query,", "\"parse_mode\": \"html\"} ) def notify(self, check): from hc.api.models import TokenBucket", "the query, to avoid DB access while # rendering a", "tmpl(\"opsgenie_note.html\", check=check) payload[\"description\"] = tmpl(\"opsgenie_description.html\", check=check) url = \"https://api.opsgenie.com/v2/alerts\" if", "check): headers = { \"Conent-Type\": \"application/json\", \"Authorization\": \"GenieKey %s\" %", "from hc.api.models import TokenBucket if not TokenBucket.authorize_telegram(self.channel.telegram_id): return \"Rate limit", "spec[\"method\"] == \"POST\": return self.post(url, data=body.encode(), headers=headers) elif spec[\"method\"] ==", "Email(Transport): def notify(self, check, bounce_url): if not self.channel.email_verified: return \"Email", "urllib.parse import quote, urlencode from hc.accounts.models import Profile from hc.lib", "return \"Command returned exit code %d\" % code class HttpTransport(Transport):", "not in options[\"headers\"]: options[\"headers\"][\"User-Agent\"] = \"healthchecks.io\" r = requests.request(method, url,", "\"https://api.telegram.org/bot%s/sendMessage\" % settings.TELEGRAM_TOKEN @classmethod def get_error(cls, response): try: return response.json().get(\"description\")", "def get_error(cls, response): try: return response.json().get(\"description\") except ValueError: pass @classmethod", "up the sorting preference for this email address p =", "notify(self, check): plain = tmpl(\"matrix_description.html\", check=check) formatted = tmpl(\"matrix_description_formatted.html\", check=check)", "subclass where the user can configure webhook urls for \"up\"", "== \"up\" else apprise.NotifyType.FAILURE ) return ( \"Failed\" if not", "try: import apprise except ImportError: # Enforce settings.APPRISE_ENABLED = False", "{ \"token\": settings.PUSHOVER_API_TOKEN, \"user\": user_key, \"message\": text, \"title\": title, \"html\":", "check) if spec[\"method\"] == \"GET\": return self.get(url, headers=headers) elif spec[\"method\"]", "not installed\" a = apprise.Apprise() title = tmpl(\"apprise_title.html\", check=check) body", "} return self.post(self.URL, params=params) class Apprise(HttpTransport): def notify(self, check): if", "response.json().get(\"message\") except ValueError: pass def notify(self, check): headers = {", "return \"Rate limit exceeded\" text = tmpl(\"telegram_message.html\", check=check) return self.send(self.channel.telegram_id,", "\"Command returned exit code %d\" % code class HttpTransport(Transport): @classmethod", "**kwargs) if error is None: break return error class Webhook(HttpTransport):", "method is overridden in Webhook subclass where the user can", "\",\".join(check.tags_list()), } return self.post(url, json=payload, headers=headers) class PagerTeam(HttpTransport): def notify(self,", "current status. This method is overridden in Webhook subclass where", "class PagerTeam(HttpTransport): def notify(self, check): url = self.channel.value headers =", "settings.SITE_ROOT, \"tags\": \",\".join(check.tags_list()), } return self.post(url, json=payload, headers=headers) class Pushbullet(HttpTransport):", "class Slack(HttpTransport): def notify(self, check): text = tmpl(\"slack_message.json\", check=check) payload", "== \"down\": return not self.channel.email_notify_down else: return not self.channel.email_notify_up class", "import timezone import json import requests from urllib.parse import quote,", "tmpl(\"slack_message.json\", check=check) payload = json.loads(text) return self.post(self.channel.slack_webhook_url, json=payload) class HipChat(HttpTransport):", "= self.channel.zulip_bot_email.split(\"@\") url = \"https://%s/api/v1/messages\" % domain auth = (self.channel.zulip_bot_email,", "hc.lib.string import replace try: import apprise except ImportError: # Enforce", "avoid DB access while # rendering a template ctx =", "domain auth = (self.channel.zulip_bot_email, self.channel.zulip_api_key) data = { \"type\": self.channel.zulip_type,", "except ValueError: pass def notify(self, check): headers = { \"Conent-Type\":", "\"sort\": sort, \"now\": timezone.now(), \"unsub_link\": unsub_link, } emails.alert(self.channel.email_value, ctx, headers)", "\"\"\" ctx = { \"$CODE\": str(check.code), \"$STATUS\": check.status, \"$NOW\": timezone.now().replace(microsecond=0).isoformat(),", "str(check.code), \"event_type\": \"trigger\" if check.status == \"down\" else \"resolve\", \"description\":", "\"resolve\", \"title\": tmpl(\"pagertree_title.html\", check=check), \"description\": tmpl(\"pagertree_description.html\", check=check), \"client\": settings.SITE_NAME, \"client_url\":", "access while # rendering a template ctx = { \"check\":", "tmpl(\"pagerteam_description.html\", check=check), \"client\": settings.SITE_NAME, \"client_url\": settings.SITE_ROOT, \"tags\": \",\".join(check.tags_list()), } return", "self.channel.get_unsub_link() headers = { \"X-Bounce-Url\": bounce_url, \"List-Unsubscribe\": \"<%s>\" % unsub_link,", "in options: options[\"headers\"] = {} if \"User-Agent\" not in options[\"headers\"]:", "= self.channel.value.split(\"|\") user_key, prio = pieces[0], pieces[1] # The third", "url def notify(self, check): plain = tmpl(\"matrix_description.html\", check=check) formatted =", "headers = { \"Access-Token\": self.channel.value, \"Conent-Type\": \"application/json\", } payload =", "DB access while # rendering a template ctx = {", "description, \"client\": settings.SITE_NAME, \"client_url\": check.details_url(), } return self.post(self.URL, json=payload) class", "<reponame>MaxwellDPS/healthchecks import os from django.conf import settings from django.template.loader import", "= tmpl(\"apprise_description.html\", check=check) a.add(self.channel.value) notify_type = ( apprise.NotifyType.SUCCESS if check.status", "error @classmethod def post(cls, url, **kwargs): # Make 3 attempts--", "1)] = safe(tag) return replace(template, ctx) def is_noop(self, check): if", "\"text\": text, \"parse_mode\": \"html\"} ) def notify(self, check): from hc.api.models", "\"to\": self.channel.zulip_to, \"topic\": tmpl(\"zulip_topic.html\", check=check), \"content\": tmpl(\"zulip_content.html\", check=check), } return", "self.channel.cmd_down cmd = self.prepare(cmd, check) code = os.system(cmd) if code", "text = tmpl(\"pushbullet_message.html\", check=check) url = \"https://api.pushbullet.com/v2/pushes\" headers = {", "code %d\" % code class HttpTransport(Transport): @classmethod def get_error(cls, response):", "\"down\" def notify(self, check): params = { \"idList\": self.channel.trello_list_id, \"name\":", "twice the money. return render_to_string(template_path, ctx).strip().replace(\"\\xa0\", \" \") class Transport(object):", "\"html\"} ) def notify(self, check): from hc.api.models import TokenBucket if", "} # Emergency notification if prio == \"2\": payload[\"retry\"] =", "= channel def notify(self, check): \"\"\" Send notification about current", "django.conf import settings from django.template.loader import render_to_string from django.utils import", "settings from django.template.loader import render_to_string from django.utils import timezone import", "if check.status == \"down\": payload[\"tags\"] = check.tags_list() payload[\"message\"] = tmpl(\"opsgenie_message.html\",", "# Override in subclasses: look for a specific error message", "def notify(self, check): text = tmpl(\"slack_message.json\", check=check) payload = json.loads(text)", "\"title\": tmpl(\"pagerteam_title.html\", check=check), \"description\": tmpl(\"pagerteam_description.html\", check=check), \"client\": settings.SITE_NAME, \"client_url\": settings.SITE_ROOT,", "list(others)} text = tmpl(\"pushover_message.html\", **ctx) title = tmpl(\"pushover_title.html\", **ctx) pieces", "data=body.encode(), headers=headers) class Slack(HttpTransport): def notify(self, check): text = tmpl(\"slack_message.json\",", "self.get(url, headers=headers) elif spec[\"method\"] == \"POST\": return self.post(url, data=body.encode(), headers=headers)", "raise NotImplementedError() def is_noop(self, check): \"\"\" Return True if transport", "def post(cls, url, **kwargs): # Make 3 attempts-- for x", "check=check) payload = json.loads(text) return self.post(self.channel.value, json=payload) class Zulip(HttpTransport): @classmethod", "error. \"\"\" raise NotImplementedError() def is_noop(self, check): \"\"\" Return True", "data=data, auth=auth) class WhatsApp(HttpTransport): URL = \"https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json\" def is_noop(self, check):", "headers=headers) class PagerDuty(HttpTransport): URL = \"https://events.pagerduty.com/generic/2010-04-15/create_event.json\" def notify(self, check): description", "payload = {\"type\": \"note\", \"title\": settings.SITE_NAME, \"body\": text} return self.post(url,", "\"/_matrix/client/r0/rooms/%s/send/m.room.message?\" % s url += urlencode({\"access_token\": settings.MATRIX_ACCESS_TOKEN}) return url def", "(200, 201, 202, 204): m = cls.get_error(r) if m: return", "Webhook(HttpTransport): def prepare(self, template, check, urlencode=False): \"\"\" Replace variables with", "return self.post(url, json=payload, headers=headers) class Pushover(HttpTransport): URL = \"https://api.pushover.net/1/messages.json\" def", "= self.prepare(body, check) if spec[\"method\"] == \"GET\": return self.get(url, headers=headers)", "\"X-Bounce-Url\": bounce_url, \"List-Unsubscribe\": \"<%s>\" % unsub_link, \"List-Unsubscribe-Post\": \"List-Unsubscribe=One-Click\", } try:", "check, \"checks\": list(self.checks()), \"sort\": sort, \"now\": timezone.now(), \"unsub_link\": unsub_link, }", "s ctx = { \"$CODE\": str(check.code), \"$STATUS\": check.status, \"$NOW\": safe(timezone.now().replace(microsecond=0).isoformat()),", "None on success, and error message on error. \"\"\" raise", "is_noop(self, check): return check.status != \"down\" def notify(self, check): params", "= { \"idList\": self.channel.trello_list_id, \"name\": tmpl(\"trello_name.html\", check=check), \"desc\": tmpl(\"trello_desc.html\", check=check),", "import requests from urllib.parse import quote, urlencode from hc.accounts.models import", "Transport(object): def __init__(self, channel): self.channel = channel def notify(self, check):", "\"\"\" Return True if transport will ignore check's current status.", "if \"headers\" not in options: options[\"headers\"] = {} if \"User-Agent\"", "== \"PUT\": return self.put(url, data=body.encode(), headers=headers) class Slack(HttpTransport): def notify(self,", "template_path = \"integrations/%s\" % template_name # \\xa0 is non-breaking space.", "not spec[\"url\"]: return \"Empty webhook URL\" url = self.prepare(spec[\"url\"], check,", "(settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH) text = tmpl(\"whatsapp_message.html\", check=check, site_name=settings.SITE_NAME) data = {", "self.channel = channel def notify(self, check): \"\"\" Send notification about", "return self.post(self.URL, json=payload) class PagerTree(HttpTransport): def notify(self, check): url =", "profile.authorize_sms(): profile.send_sms_limit_notice(\"WhatsApp\") return \"Monthly message limit exceeded\" url = self.URL", "check): if check.status == \"down\" and not self.channel.cmd_down: return True", "unsub_link, \"List-Unsubscribe-Post\": \"List-Unsubscribe=One-Click\", } try: # Look up the sorting", "\"up\": url += \"/%s/close?identifierType=alias\" % check.code return self.post(url, json=payload, headers=headers)", "Look up the sorting preference for this email address p", "class OpsGenie(HttpTransport): @classmethod def get_error(cls, response): try: return response.json().get(\"message\") except", "settings.PUSHOVER_EMERGENCY_EXPIRATION return self.post(self.URL, data=payload) class VictorOps(HttpTransport): def notify(self, check): description", "current status of the check. This method returns None on", "if error is None: break return error @classmethod def post(cls,", "\"CRITICAL\" if check.status == \"down\" else \"RECOVERY\" payload = {", "return not self.channel.whatsapp_notify_down else: return not self.channel.whatsapp_notify_up def notify(self, check):", "== 3 and check.status == \"up\": prio = pieces[2] payload", "in the # response and return it. return None @classmethod", "with actual values. \"\"\" ctx = { \"$CODE\": str(check.code), \"$STATUS\":", "cls.SM, json={\"chat_id\": chat_id, \"text\": text, \"parse_mode\": \"html\"} ) def notify(self,", "self.post(self.URL, json=payload) class PagerTree(HttpTransport): def notify(self, check): url = self.channel.value", "is None: break return error @classmethod def post(cls, url, **kwargs):", "are optional. \"\"\" return False def checks(self): return self.channel.project.check_set.order_by(\"created\") class", "try: return response.json().get(\"message\") except ValueError: pass def notify(self, check): headers", "tag in enumerate(check.tags_list()): ctx[\"$TAG%d\" % (i + 1)] = safe(tag)", "not self.channel.email_notify_down else: return not self.channel.email_notify_up class Shell(Transport): def prepare(self,", "tmpl(\"pushover_message.html\", **ctx) title = tmpl(\"pushover_title.html\", **ctx) pieces = self.channel.value.split(\"|\") user_key,", "\"Monthly message limit exceeded\" url = self.URL % settings.TWILIO_ACCOUNT auth", "class Discord(HttpTransport): def notify(self, check): text = tmpl(\"slack_message.json\", check=check) payload", "cmd = self.prepare(cmd, check) code = os.system(cmd) if code !=", "exit code %d\" % code class HttpTransport(Transport): @classmethod def get_error(cls,", "class Email(Transport): def notify(self, check, bounce_url): if not self.channel.email_verified: return", "break return error @classmethod def post(cls, url, **kwargs): # Make", "notify(self, check): headers = { \"Conent-Type\": \"application/json\", \"Authorization\": \"GenieKey %s\"", "Pushover(HttpTransport): URL = \"https://api.pushover.net/1/messages.json\" def notify(self, check): others = self.checks().filter(status=\"down\").exclude(code=check.code)", "causes SMS messages to use UCS2 encoding # and cost", "notify(self, check): profile = Profile.objects.for_user(self.channel.project.owner) if not profile.authorize_sms(): profile.send_sms_limit_notice(\"WhatsApp\") return", "message limit exceeded\" url = self.URL % settings.TWILIO_ACCOUNT auth =", "cls._request(\"post\", url, **kwargs) if error is None: break return error", "return response.json().get(\"description\") except ValueError: pass @classmethod def send(cls, chat_id, text):", "202, 204): m = cls.get_error(r) if m: return f'Received status", "\"PUT\": return self.put(url, data=body.encode(), headers=headers) class Slack(HttpTransport): def notify(self, check):", "Profile.objects.for_user(self.channel.project.owner) if not profile.authorize_sms(): profile.send_sms_limit_notice(\"SMS\") return \"Monthly SMS limit exceeded\"", "{ \"msgtype\": \"m.text\", \"body\": plain, \"format\": \"org.matrix.custom.html\", \"formatted_body\": formatted, }", "= {\"alias\": str(check.code), \"source\": settings.SITE_NAME} if check.status == \"down\": payload[\"tags\"]", "HttpTransport(Transport): @classmethod def get_error(cls, response): # Override in subclasses: look", "and \"down\" events, and both are optional. \"\"\" return False", "payload[\"description\"] = tmpl(\"opsgenie_description.html\", check=check) url = \"https://api.opsgenie.com/v2/alerts\" if self.channel.opsgenie_region ==", "others = self.checks().filter(status=\"down\").exclude(code=check.code) # list() executes the query, to avoid", "present, is the priority for \"up\" events if len(pieces) ==", "check): url = self.channel.value headers = {\"Content-Type\": \"application/json\"} payload =", "= tmpl(\"matrix_description_formatted.html\", check=check) payload = { \"msgtype\": \"m.text\", \"body\": plain,", "= quote(self.channel.value) url = settings.MATRIX_HOMESERVER url += \"/_matrix/client/r0/rooms/%s/send/m.room.message?\" % s", "{} for key, value in spec[\"headers\"].items(): headers[key] = self.prepare(value, check)", "def get_error(cls, response): try: return response.json().get(\"message\") except ValueError: pass def", "% unsub_link, \"List-Unsubscribe-Post\": \"List-Unsubscribe=One-Click\", } try: # Look up the", "**options) if r.status_code not in (200, 201, 202, 204): m", "str(check.code), \"$STATUS\": check.status, \"$NOW\": safe(timezone.now().replace(microsecond=0).isoformat()), \"$NAME\": safe(check.name), \"$TAGS\": safe(check.tags), }", "headers=headers) elif spec[\"method\"] == \"POST\": return self.post(url, data=body.encode(), headers=headers) elif", "\"down_checks\": list(others)} text = tmpl(\"pushover_message.html\", **ctx) title = tmpl(\"pushover_title.html\", **ctx)", "response.json().get(\"msg\") except ValueError: pass def notify(self, check): _, domain =", "except ValueError: pass @classmethod def send(cls, chat_id, text): # Telegram.send", "def notify(self, check): profile = Profile.objects.for_user(self.channel.project.owner) if not profile.authorize_sms(): profile.send_sms_limit_notice(\"SMS\")", "\"up\": cmd = self.channel.cmd_up elif check.status == \"down\": cmd =", "pieces[2] payload = { \"token\": settings.PUSHOVER_API_TOKEN, \"user\": user_key, \"message\": text,", "send(cls, chat_id, text): # Telegram.send is a separate method because", ") def notify(self, check): from hc.api.models import TokenBucket if not", "url, **options) if r.status_code not in (200, 201, 202, 204):", "settings.SITE_NAME, \"client_url\": settings.SITE_ROOT, \"tags\": \",\".join(check.tags_list()), } return self.post(url, json=payload, headers=headers)", "= { \"From\": \"whatsapp:%s\" % settings.TWILIO_FROM, \"To\": \"whatsapp:%s\" % self.channel.sms_number,", "payload = { \"entity_id\": str(check.code), \"message_type\": mtype, \"entity_display_name\": check.name_then_code(), \"state_message\":", "a.add(self.channel.value) notify_type = ( apprise.NotifyType.SUCCESS if check.status == \"up\" else", "post(cls, url, **kwargs): # Make 3 attempts-- for x in", "and cost twice the money. return render_to_string(template_path, ctx).strip().replace(\"\\xa0\", \" \")", "headers=headers) class Pushover(HttpTransport): URL = \"https://api.pushover.net/1/messages.json\" def notify(self, check): others", "method returns None on success, and error message on error.", "= (self.channel.zulip_bot_email, self.channel.zulip_api_key) data = { \"type\": self.channel.zulip_type, \"to\": self.channel.zulip_to,", "settings.SITE_NAME, \"body\": text} return self.post(url, json=payload, headers=headers) class Pushover(HttpTransport): URL", "text, \"parse_mode\": \"html\"} ) def notify(self, check): from hc.api.models import", "is the priority for \"up\" events if len(pieces) == 3", "Trello(HttpTransport): URL = \"https://api.trello.com/1/cards\" def is_noop(self, check): return check.status !=", "def notify(self, check): others = self.checks().filter(status=\"down\").exclude(code=check.code) # list() executes the", "check): \"\"\" Return True if transport will ignore check's current", "json=payload) class HipChat(HttpTransport): def is_noop(self, check): return True class OpsGenie(HttpTransport):", "\" \") class Transport(object): def __init__(self, channel): self.channel = channel", "transport will ignore check's current status. This method is overridden", "\"down\" else \"resolve\", \"title\": tmpl(\"pagerteam_title.html\", check=check), \"description\": tmpl(\"pagerteam_description.html\", check=check), \"client\":", "a message: \"{m}\"' return f\"Received status code {r.status_code}\" except requests.exceptions.Timeout:", "check) body = spec[\"body\"] if body: body = self.prepare(body, check)", "elif spec[\"method\"] == \"PUT\": return self.put(url, data=body.encode(), headers=headers) class Slack(HttpTransport):", "check=check) payload = json.loads(text) return self.post(self.channel.slack_webhook_url, json=payload) class HipChat(HttpTransport): def", "{ \"incident_key\": str(check.code), \"event_type\": \"trigger\" if check.status == \"down\" else", "x in range(0, 3): error = cls._request(\"get\", url, **kwargs) if", "tmpl(\"sms_message.html\", check=check, site_name=settings.SITE_NAME) data = { \"From\": settings.TWILIO_FROM, \"To\": self.channel.sms_number,", "== \"up\" and not self.channel.url_up: return True return False def", "def notify(self, check): spec = self.channel.webhook_spec(check.status) if not spec[\"url\"]: return", "is_noop(self, check): if not self.channel.email_verified: return True if check.status ==", "urlencode({\"access_token\": settings.MATRIX_ACCESS_TOKEN}) return url def notify(self, check): plain = tmpl(\"matrix_description.html\",", "import json import requests from urllib.parse import quote, urlencode from", "checks(self): return self.channel.project.check_set.order_by(\"created\") class Email(Transport): def notify(self, check, bounce_url): if", "unsub_link, } emails.alert(self.channel.email_value, ctx, headers) def is_noop(self, check): if not", "\"note\", \"title\": settings.SITE_NAME, \"body\": text} return self.post(url, json=payload, headers=headers) class", "\"check\": check, \"checks\": list(self.checks()), \"sort\": sort, \"now\": timezone.now(), \"unsub_link\": unsub_link,", "@classmethod def get(cls, url, **kwargs): # Make 3 attempts-- for", "code {r.status_code} with a message: \"{m}\"' return f\"Received status code", "list(self.checks()), \"sort\": sort, \"now\": timezone.now(), \"unsub_link\": unsub_link, } emails.alert(self.channel.email_value, ctx,", "error class Webhook(HttpTransport): def prepare(self, template, check, urlencode=False): \"\"\" Replace", "except Profile.DoesNotExist: # Default sort order is by check's creation", "error is None: break return error @classmethod def post(cls, url,", "payload = { \"service_key\": self.channel.pd_service_key, \"incident_key\": str(check.code), \"event_type\": \"trigger\" if", "error = cls._request(\"get\", url, **kwargs) if error is None: break", "( apprise.NotifyType.SUCCESS if check.status == \"up\" else apprise.NotifyType.FAILURE ) return", "\"Conent-Type\": \"application/json\", \"Authorization\": \"GenieKey %s\" % self.channel.opsgenie_key, } payload =", "\"\"\" Replace variables with actual values. \"\"\" def safe(s): return", "Shell(Transport): def prepare(self, template, check): \"\"\" Replace placeholders with actual", "and not self.channel.url_down: return True if check.status == \"up\" and", "3 attempts-- for x in range(0, 3): error = cls._request(\"post\",", "= {\"Conent-Type\": \"application/json\"} payload = { \"incident_key\": str(check.code), \"event_type\": \"trigger\"", "check.status == \"down\" else \"resolve\", \"description\": description, \"client\": settings.SITE_NAME, \"client_url\":", "get_url(self): s = quote(self.channel.value) url = settings.MATRIX_HOMESERVER url += \"/_matrix/client/r0/rooms/%s/send/m.room.message?\"", "\"event_type\": \"trigger\" if check.status == \"down\" else \"resolve\", \"description\": description,", "\"trigger\" if check.status == \"down\" else \"resolve\", \"title\": tmpl(\"pagertree_title.html\", check=check),", "% s url += urlencode({\"access_token\": settings.MATRIX_ACCESS_TOKEN}) return url def notify(self,", "# response and return it. return None @classmethod def _request(cls,", "OpsGenie(HttpTransport): @classmethod def get_error(cls, response): try: return response.json().get(\"message\") except ValueError:", "notify(self, check): description = tmpl(\"victorops_description.html\", check=check) mtype = \"CRITICAL\" if", "x in range(0, 3): error = cls._request(\"post\", url, **kwargs) if", "url, **kwargs) if error is None: break return error class", "if spec[\"method\"] == \"GET\": return self.get(url, headers=headers) elif spec[\"method\"] ==", "== \"POST\": return self.post(url, data=body.encode(), headers=headers) elif spec[\"method\"] == \"PUT\":", "if check.status == \"up\": url += \"/%s/close?identifierType=alias\" % check.code return", "# Enforce settings.APPRISE_ENABLED = False def tmpl(template_name, **ctx): template_path =", "check): others = self.checks().filter(status=\"down\").exclude(code=check.code) # list() executes the query, to", "\"down\" def notify(self, check): profile = Profile.objects.for_user(self.channel.project.owner) if not profile.authorize_sms():", "also used in # hc.front.views.telegram_bot to send invite links. return", "= {\"type\": \"note\", \"title\": settings.SITE_NAME, \"body\": text} return self.post(url, json=payload,", "self.channel.sms_number, \"Body\": text, } return self.post(url, data=data, auth=auth) class Trello(HttpTransport):", "self.post(url, data=data, auth=auth) class Trello(HttpTransport): URL = \"https://api.trello.com/1/cards\" def is_noop(self,", "if m: return f'Received status code {r.status_code} with a message:", "json=payload) class Zulip(HttpTransport): @classmethod def get_error(cls, response): try: return response.json().get(\"msg\")", "% settings.TWILIO_ACCOUNT auth = (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH) text = tmpl(\"sms_message.html\", check=check,", "# Default sort order is by check's creation time sort", "\"\"\" Send notification about current status of the check. This", "204): m = cls.get_error(r) if m: return f'Received status code", "= tmpl(\"pushover_message.html\", **ctx) title = tmpl(\"pushover_title.html\", **ctx) pieces = self.channel.value.split(\"|\")", "return ( \"Failed\" if not a.notify(body=body, title=title, notify_type=notify_type) else None", "not settings.SHELL_ENABLED: return \"Shell commands are not enabled\" if check.status", "(i + 1)] = safe(tag) return replace(template, ctx) def is_noop(self,", "import TokenBucket if not TokenBucket.authorize_telegram(self.channel.telegram_id): return \"Rate limit exceeded\" text", "\"idList\": self.channel.trello_list_id, \"name\": tmpl(\"trello_name.html\", check=check), \"desc\": tmpl(\"trello_desc.html\", check=check), \"key\": settings.TRELLO_APP_KEY,", "self.channel.pd_service_key, \"incident_key\": str(check.code), \"event_type\": \"trigger\" if check.status == \"down\" else", "tmpl(\"zulip_topic.html\", check=check), \"content\": tmpl(\"zulip_content.html\", check=check), } return self.post(url, data=data, auth=auth)", "return self.post(self.URL, data=payload) class VictorOps(HttpTransport): def notify(self, check): description =", "\"down\" else \"resolve\", \"description\": description, \"client\": settings.SITE_NAME, \"client_url\": check.details_url(), }", "\"up\" and \"down\" events, and both are optional. \"\"\" return", "ImportError: # Enforce settings.APPRISE_ENABLED = False def tmpl(template_name, **ctx): template_path", "text = tmpl(\"whatsapp_message.html\", check=check, site_name=settings.SITE_NAME) data = { \"From\": \"whatsapp:%s\"", "self.channel.webhook_spec(check.status) if not spec[\"url\"]: return \"Empty webhook URL\" url =", "return self.post(url, json=payload) class Telegram(HttpTransport): SM = \"https://api.telegram.org/bot%s/sendMessage\" % settings.TELEGRAM_TOKEN", "= Profile.objects.for_user(self.channel.project.owner) if not profile.authorize_sms(): profile.send_sms_limit_notice(\"SMS\") return \"Monthly SMS limit", "sort = \"created\" # list() executes the query, to avoid", "profile.send_sms_limit_notice(\"SMS\") return \"Monthly SMS limit exceeded\" url = self.URL %", "} payload = {\"alias\": str(check.code), \"source\": settings.SITE_NAME} if check.status ==", "def notify(self, check): _, domain = self.channel.zulip_bot_email.split(\"@\") url = \"https://%s/api/v1/messages\"", "payload[\"note\"] = tmpl(\"opsgenie_note.html\", check=check) payload[\"description\"] = tmpl(\"opsgenie_description.html\", check=check) url =", "% domain auth = (self.channel.zulip_bot_email, self.channel.zulip_api_key) data = { \"type\":", "if check.status == \"up\": cmd = self.channel.cmd_up elif check.status ==", "spec[\"method\"] == \"PUT\": return self.put(url, data=body.encode(), headers=headers) class Slack(HttpTransport): def", "while # rendering a template ctx = { \"check\": check,", "def notify(self, check): plain = tmpl(\"matrix_description.html\", check=check) formatted = tmpl(\"matrix_description_formatted.html\",", "True return False def notify(self, check): if not settings.SHELL_ENABLED: return", "== \"down\": cmd = self.channel.cmd_down cmd = self.prepare(cmd, check) code", "headers = {\"Content-Type\": \"application/json\"} payload = { \"incident_key\": str(check.code), \"event_type\":", "django.utils import timezone import json import requests from urllib.parse import", "variables with actual values. \"\"\" def safe(s): return quote(s) if", "ctx = { \"check\": check, \"checks\": list(self.checks()), \"sort\": sort, \"now\":", "= \"https://api.pushbullet.com/v2/pushes\" headers = { \"Access-Token\": self.channel.value, \"Conent-Type\": \"application/json\", }", "f\"Received status code {r.status_code}\" except requests.exceptions.Timeout: # Well, we tried", "settings.MATRIX_HOMESERVER url += \"/_matrix/client/r0/rooms/%s/send/m.room.message?\" % s url += urlencode({\"access_token\": settings.MATRIX_ACCESS_TOKEN})", "str(check.code), \"$STATUS\": check.status, \"$NOW\": timezone.now().replace(microsecond=0).isoformat(), \"$NAME\": check.name, \"$TAGS\": check.tags, }", "check.status == \"up\" and not self.channel.cmd_up: return True return False", "try: return response.json().get(\"description\") except ValueError: pass @classmethod def send(cls, chat_id,", "= { \"Access-Token\": self.channel.value, \"Conent-Type\": \"application/json\", } payload = {\"type\":", "**kwargs): # Make 3 attempts-- for x in range(0, 3):", "\"Failed\" if not a.notify(body=body, title=title, notify_type=notify_type) else None ) class", "} return self.post(url, json=payload, headers=headers) class PagerTeam(HttpTransport): def notify(self, check):", "os.system(cmd) if code != 0: return \"Command returned exit code", "data = { \"From\": settings.TWILIO_FROM, \"To\": self.channel.sms_number, \"Body\": text, }", "== \"down\": payload[\"tags\"] = check.tags_list() payload[\"message\"] = tmpl(\"opsgenie_message.html\", check=check) payload[\"note\"]", "not self.channel.email_notify_up class Shell(Transport): def prepare(self, template, check): \"\"\" Replace", "= ( apprise.NotifyType.SUCCESS if check.status == \"up\" else apprise.NotifyType.FAILURE )", "apprise.Apprise() title = tmpl(\"apprise_title.html\", check=check) body = tmpl(\"apprise_description.html\", check=check) a.add(self.channel.value)", "check): if not settings.SHELL_ENABLED: return \"Shell commands are not enabled\"", "return self.post(self.get_url(), json=payload) class Discord(HttpTransport): def notify(self, check): text =", "Slack(HttpTransport): def notify(self, check): text = tmpl(\"slack_message.json\", check=check) payload =", "get_error(cls, response): try: return response.json().get(\"description\") except ValueError: pass @classmethod def", "self.URL % settings.TWILIO_ACCOUNT auth = (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH) text = tmpl(\"sms_message.html\",", "else \"resolve\", \"title\": tmpl(\"pagerteam_title.html\", check=check), \"description\": tmpl(\"pagerteam_description.html\", check=check), \"client\": settings.SITE_NAME,", "self.put(url, data=body.encode(), headers=headers) class Slack(HttpTransport): def notify(self, check): text =", "= \"https://api.pushover.net/1/messages.json\" def notify(self, check): others = self.checks().filter(status=\"down\").exclude(code=check.code) # list()", "timed out\" except requests.exceptions.ConnectionError: return \"Connection failed\" @classmethod def get(cls,", "pass def notify(self, check): _, domain = self.channel.zulip_bot_email.split(\"@\") url =", "x in range(0, 3): error = cls._request(\"put\", url, **kwargs) if", "if self.channel.opsgenie_region == \"eu\": url = \"https://api.eu.opsgenie.com/v2/alerts\" if check.status ==", "\"RECOVERY\" payload = { \"entity_id\": str(check.code), \"message_type\": mtype, \"entity_display_name\": check.name_then_code(),", "p.sort except Profile.DoesNotExist: # Default sort order is by check's", "**ctx) pieces = self.channel.value.split(\"|\") user_key, prio = pieces[0], pieces[1] #", "3 attempts-- for x in range(0, 3): error = cls._request(\"get\",", "send invite links. return cls.post( cls.SM, json={\"chat_id\": chat_id, \"text\": text,", "response): try: return response.json().get(\"description\") except ValueError: pass @classmethod def send(cls,", "user_key, \"message\": text, \"title\": title, \"html\": 1, \"priority\": int(prio), }", "by check's creation time sort = \"created\" # list() executes", "is also used in # hc.front.views.telegram_bot to send invite links.", "\"checks\": list(self.checks()), \"sort\": sort, \"now\": timezone.now(), \"unsub_link\": unsub_link, } emails.alert(self.channel.email_value,", "= json.loads(text) return self.post(self.channel.value, json=payload) class Zulip(HttpTransport): @classmethod def get_error(cls,", "is None: break return error @classmethod def put(cls, url, **kwargs):", "return True return False def notify(self, check): if not settings.SHELL_ENABLED:", "= \"CRITICAL\" if check.status == \"down\" else \"RECOVERY\" payload =", "= {\"check\": check, \"down_checks\": list(others)} text = tmpl(\"pushover_message.html\", **ctx) title", "\"description\": tmpl(\"pagerteam_description.html\", check=check), \"client\": settings.SITE_NAME, \"client_url\": settings.SITE_ROOT, \"tags\": \",\".join(check.tags_list()), }", "{ \"Access-Token\": self.channel.value, \"Conent-Type\": \"application/json\", } payload = {\"type\": \"note\",", "return cls.post( cls.SM, json={\"chat_id\": chat_id, \"text\": text, \"parse_mode\": \"html\"} )", "\"$STATUS\": check.status, \"$NOW\": safe(timezone.now().replace(microsecond=0).isoformat()), \"$NAME\": safe(check.name), \"$TAGS\": safe(check.tags), } for", "return \"Email not verified\" unsub_link = self.channel.get_unsub_link() headers = {", "% settings.TWILIO_FROM, \"To\": \"whatsapp:%s\" % self.channel.sms_number, \"Body\": text, } return", "actual values. \"\"\" ctx = { \"$CODE\": str(check.code), \"$STATUS\": check.status,", "\"$NAME\": check.name, \"$TAGS\": check.tags, } for i, tag in enumerate(check.tags_list()):", "# Emergency notification if prio == \"2\": payload[\"retry\"] = settings.PUSHOVER_EMERGENCY_RETRY_DELAY", "title = tmpl(\"pushover_title.html\", **ctx) pieces = self.channel.value.split(\"|\") user_key, prio =", "== \"2\": payload[\"retry\"] = settings.PUSHOVER_EMERGENCY_RETRY_DELAY payload[\"expire\"] = settings.PUSHOVER_EMERGENCY_EXPIRATION return self.post(self.URL,", "channel): self.channel = channel def notify(self, check): \"\"\" Send notification", "third element, if present, is the priority for \"up\" events", "import apprise except ImportError: # Enforce settings.APPRISE_ENABLED = False def", "def notify(self, check): params = { \"idList\": self.channel.trello_list_id, \"name\": tmpl(\"trello_name.html\",", "params = { \"idList\": self.channel.trello_list_id, \"name\": tmpl(\"trello_name.html\", check=check), \"desc\": tmpl(\"trello_desc.html\",", "201, 202, 204): m = cls.get_error(r) if m: return f'Received", "pieces[1] # The third element, if present, is the priority", "self.post(self.channel.value, json=payload) class Matrix(HttpTransport): def get_url(self): s = quote(self.channel.value) url", "\"application/json\", } payload = {\"type\": \"note\", \"title\": settings.SITE_NAME, \"body\": text}", "title=title, notify_type=notify_type) else None ) class MsTeams(HttpTransport): def notify(self, check):", "query, to avoid DB access while # rendering a template", "check. This method returns None on success, and error message", "notify(self, check): if not settings.SHELL_ENABLED: return \"Shell commands are not", "settings.MATRIX_ACCESS_TOKEN}) return url def notify(self, check): plain = tmpl(\"matrix_description.html\", check=check)", "check): text = tmpl(\"slack_message.json\", check=check) payload = json.loads(text) return self.post(self.channel.slack_webhook_url,", "return not self.channel.email_notify_up class Shell(Transport): def prepare(self, template, check): \"\"\"", "\"down\" else \"RECOVERY\" payload = { \"entity_id\": str(check.code), \"message_type\": mtype,", "the money. return render_to_string(template_path, ctx).strip().replace(\"\\xa0\", \" \") class Transport(object): def", "\"down\" and not self.channel.cmd_down: return True if check.status == \"up\"", "# \\xa0 is non-breaking space. It causes SMS messages to", "= json.loads(text) url = self.channel.discord_webhook_url + \"/slack\" return self.post(url, json=payload)", "% settings.TELEGRAM_TOKEN @classmethod def get_error(cls, response): try: return response.json().get(\"description\") except", "url += \"/%s/close?identifierType=alias\" % check.code return self.post(url, json=payload, headers=headers) class", "f'Received status code {r.status_code} with a message: \"{m}\"' return f\"Received", "text = tmpl(\"slack_message.json\", check=check) payload = json.loads(text) return self.post(self.channel.slack_webhook_url, json=payload)", "\"formatted_body\": formatted, } return self.post(self.get_url(), json=payload) class Discord(HttpTransport): def notify(self,", "check, \"down_checks\": list(others)} text = tmpl(\"pushover_message.html\", **ctx) title = tmpl(\"pushover_title.html\",", "\"up\" and not self.channel.url_up: return True return False def notify(self,", "text) class Sms(HttpTransport): URL = \"https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json\" def is_noop(self, check): return", "= Profile.objects.get(user__email=self.channel.email_value) sort = p.sort except Profile.DoesNotExist: # Default sort", "spec[\"method\"] == \"GET\": return self.get(url, headers=headers) elif spec[\"method\"] == \"POST\":", "json=payload, headers=headers) class PagerTeam(HttpTransport): def notify(self, check): url = self.channel.value", "spec = self.channel.webhook_spec(check.status) if not spec[\"url\"]: return \"Empty webhook URL\"", "s = quote(self.channel.value) url = settings.MATRIX_HOMESERVER url += \"/_matrix/client/r0/rooms/%s/send/m.room.message?\" %", "quote(self.channel.value) url = settings.MATRIX_HOMESERVER url += \"/_matrix/client/r0/rooms/%s/send/m.room.message?\" % s url", "PagerTree(HttpTransport): def notify(self, check): url = self.channel.value headers = {\"Conent-Type\":", "headers=headers) elif spec[\"method\"] == \"PUT\": return self.put(url, data=body.encode(), headers=headers) class", "subclasses: look for a specific error message in the #", "response): try: return response.json().get(\"msg\") except ValueError: pass def notify(self, check):", "for x in range(0, 3): error = cls._request(\"put\", url, **kwargs)", "return error class Webhook(HttpTransport): def prepare(self, template, check, urlencode=False): \"\"\"", "# rendering a template ctx = {\"check\": check, \"down_checks\": list(others)}", "values. \"\"\" ctx = { \"$CODE\": str(check.code), \"$STATUS\": check.status, \"$NOW\":", "\"message\": text, \"title\": title, \"html\": 1, \"priority\": int(prio), } #", "except ValueError: pass def notify(self, check): _, domain = self.channel.zulip_bot_email.split(\"@\")", "ctx = { \"$CODE\": str(check.code), \"$STATUS\": check.status, \"$NOW\": timezone.now().replace(microsecond=0).isoformat(), \"$NAME\":", "= tmpl(\"sms_message.html\", check=check, site_name=settings.SITE_NAME) data = { \"From\": settings.TWILIO_FROM, \"To\":", "== \"up\": prio = pieces[2] payload = { \"token\": settings.PUSHOVER_API_TOKEN,", "_, domain = self.channel.zulip_bot_email.split(\"@\") url = \"https://%s/api/v1/messages\" % domain auth", "executes the query, to avoid DB access while # rendering", "options: options[\"headers\"] = {} if \"User-Agent\" not in options[\"headers\"]: options[\"headers\"][\"User-Agent\"]", "for x in range(0, 3): error = cls._request(\"get\", url, **kwargs)", "tmpl(\"matrix_description.html\", check=check) formatted = tmpl(\"matrix_description_formatted.html\", check=check) payload = { \"msgtype\":", "tmpl(\"apprise_title.html\", check=check) body = tmpl(\"apprise_description.html\", check=check) a.add(self.channel.value) notify_type = (", "= tmpl(\"msteams_message.json\", check=check) payload = json.loads(text) return self.post(self.channel.value, json=payload) class", "notify(self, check): \"\"\" Send notification about current status of the", "enumerate(check.tags_list()): ctx[\"$TAG%d\" % (i + 1)] = safe(tag) return replace(template,", "not self.channel.url_down: return True if check.status == \"up\" and not", "\"user\": user_key, \"message\": text, \"title\": title, \"html\": 1, \"priority\": int(prio),", "return self.post(self.channel.value, json=payload) class Matrix(HttpTransport): def get_url(self): s = quote(self.channel.value)", "check): \"\"\" Replace placeholders with actual values. \"\"\" ctx =", "class PagerTree(HttpTransport): def notify(self, check): url = self.channel.value headers =", "self.post(self.URL, data=payload) class VictorOps(HttpTransport): def notify(self, check): description = tmpl(\"victorops_description.html\",", "{ \"From\": \"whatsapp:%s\" % settings.TWILIO_FROM, \"To\": \"whatsapp:%s\" % self.channel.sms_number, \"Body\":", "SM = \"https://api.telegram.org/bot%s/sendMessage\" % settings.TELEGRAM_TOKEN @classmethod def get_error(cls, response): try:", "if len(pieces) == 3 and check.status == \"up\": prio =", "\"priority\": int(prio), } # Emergency notification if prio == \"2\":", "\"$TAGS\": check.tags, } for i, tag in enumerate(check.tags_list()): ctx[\"$TAG%d\" %", "response and return it. return None @classmethod def _request(cls, method,", "check=check) payload[\"description\"] = tmpl(\"opsgenie_description.html\", check=check) url = \"https://api.opsgenie.com/v2/alerts\" if self.channel.opsgenie_region", "class Pushbullet(HttpTransport): def notify(self, check): text = tmpl(\"pushbullet_message.html\", check=check) url", "{\"alias\": str(check.code), \"source\": settings.SITE_NAME} if check.status == \"down\": payload[\"tags\"] =", "class Sms(HttpTransport): URL = \"https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json\" def is_noop(self, check): return check.status", "{ \"check\": check, \"checks\": list(self.checks()), \"sort\": sort, \"now\": timezone.now(), \"unsub_link\":", "not profile.authorize_sms(): profile.send_sms_limit_notice(\"WhatsApp\") return \"Monthly message limit exceeded\" url =", "is non-breaking space. It causes SMS messages to use UCS2", "\"body\": plain, \"format\": \"org.matrix.custom.html\", \"formatted_body\": formatted, } return self.post(self.get_url(), json=payload)", "self.channel.email_verified: return True if check.status == \"down\": return not self.channel.email_notify_down", "url = self.channel.value headers = {\"Conent-Type\": \"application/json\"} payload = {", "= pieces[2] payload = { \"token\": settings.PUSHOVER_API_TOKEN, \"user\": user_key, \"message\":", "check): text = tmpl(\"slack_message.json\", check=check) payload = json.loads(text) url =", "= { \"X-Bounce-Url\": bounce_url, \"List-Unsubscribe\": \"<%s>\" % unsub_link, \"List-Unsubscribe-Post\": \"List-Unsubscribe=One-Click\",", "@classmethod def get_error(cls, response): # Override in subclasses: look for", "return self.post(url, json=payload, headers=headers) class PagerTeam(HttpTransport): def notify(self, check): url", "return self.get(url, headers=headers) elif spec[\"method\"] == \"POST\": return self.post(url, data=body.encode(),", "self.channel.zulip_bot_email.split(\"@\") url = \"https://%s/api/v1/messages\" % domain auth = (self.channel.zulip_bot_email, self.channel.zulip_api_key)", "payload[\"message\"] = tmpl(\"opsgenie_message.html\", check=check) payload[\"note\"] = tmpl(\"opsgenie_note.html\", check=check) payload[\"description\"] =", "url += \"/_matrix/client/r0/rooms/%s/send/m.room.message?\" % s url += urlencode({\"access_token\": settings.MATRIX_ACCESS_TOKEN}) return", "self.channel.trello_list_id, \"name\": tmpl(\"trello_name.html\", check=check), \"desc\": tmpl(\"trello_desc.html\", check=check), \"key\": settings.TRELLO_APP_KEY, \"token\":", "def is_noop(self, check): return check.status != \"down\" def notify(self, check):", "# and cost twice the money. return render_to_string(template_path, ctx).strip().replace(\"\\xa0\", \"", "\"up\" events if len(pieces) == 3 and check.status == \"up\":", "self.post(self.URL, params=params) class Apprise(HttpTransport): def notify(self, check): if not settings.APPRISE_ENABLED:", "!= 0: return \"Command returned exit code %d\" % code", "{\"Content-Type\": \"application/json\"} payload = { \"incident_key\": str(check.code), \"event_type\": \"trigger\" if", "def get_error(cls, response): try: return response.json().get(\"msg\") except ValueError: pass def", "= tmpl(\"opsgenie_message.html\", check=check) payload[\"note\"] = tmpl(\"opsgenie_note.html\", check=check) payload[\"description\"] = tmpl(\"opsgenie_description.html\",", "tmpl(\"telegram_message.html\", check=check) return self.send(self.channel.telegram_id, text) class Sms(HttpTransport): URL = \"https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json\"", "int(prio), } # Emergency notification if prio == \"2\": payload[\"retry\"]", "self.channel.whatsapp_notify_up def notify(self, check): profile = Profile.objects.for_user(self.channel.project.owner) if not profile.authorize_sms():", "import render_to_string from django.utils import timezone import json import requests", "out\" except requests.exceptions.ConnectionError: return \"Connection failed\" @classmethod def get(cls, url,", "return self.post(self.URL, params=params) class Apprise(HttpTransport): def notify(self, check): if not", "1, \"priority\": int(prio), } # Emergency notification if prio ==", "\"tags\": \",\".join(check.tags_list()), } return self.post(url, json=payload, headers=headers) class PagerTeam(HttpTransport): def", "def is_noop(self, check): if check.status == \"down\": return not self.channel.whatsapp_notify_down", "description = tmpl(\"victorops_description.html\", check=check) mtype = \"CRITICAL\" if check.status ==", "\"type\": self.channel.zulip_type, \"to\": self.channel.zulip_to, \"topic\": tmpl(\"zulip_topic.html\", check=check), \"content\": tmpl(\"zulip_content.html\", check=check),", "check): return True class OpsGenie(HttpTransport): @classmethod def get_error(cls, response): try:", "code = os.system(cmd) if code != 0: return \"Command returned", "} payload = {\"type\": \"note\", \"title\": settings.SITE_NAME, \"body\": text} return", "check.tags, } for i, tag in enumerate(check.tags_list()): ctx[\"$TAG%d\" % (i", "check=check) a.add(self.channel.value) notify_type = ( apprise.NotifyType.SUCCESS if check.status == \"up\"", "self.send(self.channel.telegram_id, text) class Sms(HttpTransport): URL = \"https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json\" def is_noop(self, check):", "a = apprise.Apprise() title = tmpl(\"apprise_title.html\", check=check) body = tmpl(\"apprise_description.html\",", "os from django.conf import settings from django.template.loader import render_to_string from", "both are optional. \"\"\" return False def checks(self): return self.channel.project.check_set.order_by(\"created\")", "True if transport will ignore check's current status. This method", "} for i, tag in enumerate(check.tags_list()): ctx[\"$TAG%d\" % (i +", "if present, is the priority for \"up\" events if len(pieces)", "text} return self.post(url, json=payload, headers=headers) class Pushover(HttpTransport): URL = \"https://api.pushover.net/1/messages.json\"", "of the check. This method returns None on success, and", "returned exit code %d\" % code class HttpTransport(Transport): @classmethod def", "Profile.objects.for_user(self.channel.project.owner) if not profile.authorize_sms(): profile.send_sms_limit_notice(\"WhatsApp\") return \"Monthly message limit exceeded\"", "Replace variables with actual values. \"\"\" def safe(s): return quote(s)", "tmpl(\"slack_message.json\", check=check) payload = json.loads(text) url = self.channel.discord_webhook_url + \"/slack\"", "limit exceeded\" text = tmpl(\"telegram_message.html\", check=check) return self.send(self.channel.telegram_id, text) class", "self.channel.email_notify_up class Shell(Transport): def prepare(self, template, check): \"\"\" Replace placeholders", "self.post(self.get_url(), json=payload) class Discord(HttpTransport): def notify(self, check): text = tmpl(\"slack_message.json\",", "**kwargs): try: options = dict(kwargs) options[\"timeout\"] = 5 if \"headers\"", "messages to use UCS2 encoding # and cost twice the", "self.channel.project.check_set.order_by(\"created\") class Email(Transport): def notify(self, check, bounce_url): if not self.channel.email_verified:", "= \"healthchecks.io\" r = requests.request(method, url, **options) if r.status_code not", "SMS messages to use UCS2 encoding # and cost twice", ") class MsTeams(HttpTransport): def notify(self, check): text = tmpl(\"msteams_message.json\", check=check)", "settings.SITE_NAME, } return self.post(self.channel.value, json=payload) class Matrix(HttpTransport): def get_url(self): s", "Replace placeholders with actual values. \"\"\" ctx = { \"$CODE\":", "enumerate(check.tags_list()): ctx[\"$TAG%d\" % (i + 1)] = tag return replace(template,", "return False def notify(self, check): if not settings.SHELL_ENABLED: return \"Shell", "} return self.post(url, data=data, auth=auth) class WhatsApp(HttpTransport): URL = \"https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json\"", "r.status_code not in (200, 201, 202, 204): m = cls.get_error(r)", "verified\" unsub_link = self.channel.get_unsub_link() headers = { \"X-Bounce-Url\": bounce_url, \"List-Unsubscribe\":", "The third element, if present, is the priority for \"up\"", "\"Conent-Type\": \"application/json\", } payload = {\"type\": \"note\", \"title\": settings.SITE_NAME, \"body\":", "check): return check.status != \"down\" def notify(self, check): params =", "check.status == \"up\": prio = pieces[2] payload = { \"token\":", "Enforce settings.APPRISE_ENABLED = False def tmpl(template_name, **ctx): template_path = \"integrations/%s\"", "\"created\" # list() executes the query, to avoid DB access", "Profile.DoesNotExist: # Default sort order is by check's creation time", "str(check.code), \"event_type\": \"trigger\" if check.status == \"down\" else \"resolve\", \"title\":", "\"Shell commands are not enabled\" if check.status == \"up\": cmd", "from hc.lib import emails from hc.lib.string import replace try: import", "if r.status_code not in (200, 201, 202, 204): m =", "check=check, site_name=settings.SITE_NAME) data = { \"From\": \"whatsapp:%s\" % settings.TWILIO_FROM, \"To\":", "code class HttpTransport(Transport): @classmethod def get_error(cls, response): # Override in", "def checks(self): return self.channel.project.check_set.order_by(\"created\") class Email(Transport): def notify(self, check, bounce_url):", "mtype = \"CRITICAL\" if check.status == \"down\" else \"RECOVERY\" payload", "profile = Profile.objects.for_user(self.channel.project.owner) if not profile.authorize_sms(): profile.send_sms_limit_notice(\"WhatsApp\") return \"Monthly message", "with a message: \"{m}\"' return f\"Received status code {r.status_code}\" except", "check, urlencode=False): \"\"\" Replace variables with actual values. \"\"\" def", "not TokenBucket.authorize_telegram(self.channel.telegram_id): return \"Rate limit exceeded\" text = tmpl(\"telegram_message.html\", check=check)", "text): # Telegram.send is a separate method because it is", "check.status == \"down\": cmd = self.channel.cmd_down cmd = self.prepare(cmd, check)", "check): profile = Profile.objects.for_user(self.channel.project.owner) if not profile.authorize_sms(): profile.send_sms_limit_notice(\"WhatsApp\") return \"Monthly", "optional. \"\"\" return False def checks(self): return self.channel.project.check_set.order_by(\"created\") class Email(Transport):", "= tmpl(\"whatsapp_message.html\", check=check, site_name=settings.SITE_NAME) data = { \"From\": \"whatsapp:%s\" %", "return self.post(url, json=payload, headers=headers) class Pushbullet(HttpTransport): def notify(self, check): text", "status. This method is overridden in Webhook subclass where the", "def is_noop(self, check): if not self.channel.email_verified: return True if check.status", "not self.channel.whatsapp_notify_up def notify(self, check): profile = Profile.objects.for_user(self.channel.project.owner) if not", "% (i + 1)] = tag return replace(template, ctx) def", "ValueError: pass def notify(self, check): headers = { \"Conent-Type\": \"application/json\",", "} return self.post(url, data=data, auth=auth) class Trello(HttpTransport): URL = \"https://api.trello.com/1/cards\"", "= {} if \"User-Agent\" not in options[\"headers\"]: options[\"headers\"][\"User-Agent\"] = \"healthchecks.io\"", "options[\"headers\"][\"User-Agent\"] = \"healthchecks.io\" r = requests.request(method, url, **options) if r.status_code", "headers[key] = self.prepare(value, check) body = spec[\"body\"] if body: body", "if not profile.authorize_sms(): profile.send_sms_limit_notice(\"WhatsApp\") return \"Monthly message limit exceeded\" url", "invite links. return cls.post( cls.SM, json={\"chat_id\": chat_id, \"text\": text, \"parse_mode\":", "spec[\"headers\"].items(): headers[key] = self.prepare(value, check) body = spec[\"body\"] if body:", "\"whatsapp:%s\" % settings.TWILIO_FROM, \"To\": \"whatsapp:%s\" % self.channel.sms_number, \"Body\": text, }", "% (i + 1)] = safe(tag) return replace(template, ctx) def", "check): from hc.api.models import TokenBucket if not TokenBucket.authorize_telegram(self.channel.telegram_id): return \"Rate", "tmpl(\"trello_name.html\", check=check), \"desc\": tmpl(\"trello_desc.html\", check=check), \"key\": settings.TRELLO_APP_KEY, \"token\": self.channel.trello_token, }", "check): profile = Profile.objects.for_user(self.channel.project.owner) if not profile.authorize_sms(): profile.send_sms_limit_notice(\"SMS\") return \"Monthly", "check=check, site_name=settings.SITE_NAME) data = { \"From\": settings.TWILIO_FROM, \"To\": self.channel.sms_number, \"Body\":", "body = self.prepare(body, check) if spec[\"method\"] == \"GET\": return self.get(url,", "if error is None: break return error class Webhook(HttpTransport): def", "not settings.APPRISE_ENABLED: # Not supported and/or enabled return \"Apprise is", "= False def tmpl(template_name, **ctx): template_path = \"integrations/%s\" % template_name", "code {r.status_code}\" except requests.exceptions.Timeout: # Well, we tried return \"Connection", "== \"down\" and not self.channel.cmd_down: return True if check.status ==", "return self.channel.project.check_set.order_by(\"created\") class Email(Transport): def notify(self, check, bounce_url): if not", "check.status == \"up\" and not self.channel.url_up: return True return False", "\"Body\": text, } return self.post(url, data=data, auth=auth) class Trello(HttpTransport): URL", "template ctx = { \"check\": check, \"checks\": list(self.checks()), \"sort\": sort,", "check.status, \"$NOW\": timezone.now().replace(microsecond=0).isoformat(), \"$NAME\": check.name, \"$TAGS\": check.tags, } for i,", "} try: # Look up the sorting preference for this", "profile = Profile.objects.for_user(self.channel.project.owner) if not profile.authorize_sms(): profile.send_sms_limit_notice(\"SMS\") return \"Monthly SMS", "with actual values. \"\"\" def safe(s): return quote(s) if urlencode", "\"Email not verified\" unsub_link = self.channel.get_unsub_link() headers = { \"X-Bounce-Url\":", "None: break return error @classmethod def post(cls, url, **kwargs): #", "encoding # and cost twice the money. return render_to_string(template_path, ctx).strip().replace(\"\\xa0\",", "is overridden in Webhook subclass where the user can configure", "url, **kwargs) if error is None: break return error @classmethod", "if check.status == \"down\" else \"resolve\", \"description\": description, \"client\": settings.SITE_NAME,", "it is also used in # hc.front.views.telegram_bot to send invite", "json=payload) class Matrix(HttpTransport): def get_url(self): s = quote(self.channel.value) url =", "True if check.status == \"up\" and not self.channel.cmd_up: return True", "def is_noop(self, check): if check.status == \"down\" and not self.channel.cmd_down:", "self.prepare(body, check) if spec[\"method\"] == \"GET\": return self.get(url, headers=headers) elif", "class Telegram(HttpTransport): SM = \"https://api.telegram.org/bot%s/sendMessage\" % settings.TELEGRAM_TOKEN @classmethod def get_error(cls,", "and return it. return None @classmethod def _request(cls, method, url,", "ValueError: pass def notify(self, check): _, domain = self.channel.zulip_bot_email.split(\"@\") url", "\"client_url\": settings.SITE_ROOT, \"tags\": \",\".join(check.tags_list()), } return self.post(url, json=payload, headers=headers) class", "\"html\": 1, \"priority\": int(prio), } # Emergency notification if prio", "self.post(url, json=payload, headers=headers) class Pushover(HttpTransport): URL = \"https://api.pushover.net/1/messages.json\" def notify(self,", "% self.channel.sms_number, \"Body\": text, } return self.post(url, data=data, auth=auth) class", "json={\"chat_id\": chat_id, \"text\": text, \"parse_mode\": \"html\"} ) def notify(self, check):", "== \"up\": cmd = self.channel.cmd_up elif check.status == \"down\": cmd", "= Profile.objects.for_user(self.channel.project.owner) if not profile.authorize_sms(): profile.send_sms_limit_notice(\"WhatsApp\") return \"Monthly message limit", "the # response and return it. return None @classmethod def", "\"down\": return not self.channel.whatsapp_notify_down else: return not self.channel.whatsapp_notify_up def notify(self,", "= \"https://api.eu.opsgenie.com/v2/alerts\" if check.status == \"up\": url += \"/%s/close?identifierType=alias\" %", "= self.prepare(value, check) body = spec[\"body\"] if body: body =", "= dict(kwargs) options[\"timeout\"] = 5 if \"headers\" not in options:", "\"down\": cmd = self.channel.cmd_down cmd = self.prepare(cmd, check) code =", "self.channel.sms_number, \"Body\": text, } return self.post(url, data=data, auth=auth) class WhatsApp(HttpTransport):", "if code != 0: return \"Command returned exit code %d\"", "check.status, \"$NOW\": safe(timezone.now().replace(microsecond=0).isoformat()), \"$NAME\": safe(check.name), \"$TAGS\": safe(check.tags), } for i,", "class Pushover(HttpTransport): URL = \"https://api.pushover.net/1/messages.json\" def notify(self, check): others =", "from hc.accounts.models import Profile from hc.lib import emails from hc.lib.string", "@classmethod def _request(cls, method, url, **kwargs): try: options = dict(kwargs)", "def send(cls, chat_id, text): # Telegram.send is a separate method", "from hc.lib.string import replace try: import apprise except ImportError: #", "None: break return error @classmethod def put(cls, url, **kwargs): #", "= tmpl(\"telegram_message.html\", check=check) return self.send(self.channel.telegram_id, text) class Sms(HttpTransport): URL =", "settings.TWILIO_ACCOUNT auth = (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH) text = tmpl(\"whatsapp_message.html\", check=check, site_name=settings.SITE_NAME)", "url += urlencode({\"access_token\": settings.MATRIX_ACCESS_TOKEN}) return url def notify(self, check): plain", "return \"Empty webhook URL\" url = self.prepare(spec[\"url\"], check, urlencode=True) headers", "import os from django.conf import settings from django.template.loader import render_to_string", "{} if \"User-Agent\" not in options[\"headers\"]: options[\"headers\"][\"User-Agent\"] = \"healthchecks.io\" r", "return False def notify(self, check): spec = self.channel.webhook_spec(check.status) if not", "return check.status != \"down\" def notify(self, check): profile = Profile.objects.for_user(self.channel.project.owner)", "about current status of the check. This method returns None", "prio = pieces[2] payload = { \"token\": settings.PUSHOVER_API_TOKEN, \"user\": user_key,", "\"source\": settings.SITE_NAME} if check.status == \"down\": payload[\"tags\"] = check.tags_list() payload[\"message\"]", "\"resolve\", \"description\": description, \"client\": settings.SITE_NAME, \"client_url\": check.details_url(), } return self.post(self.URL,", "response): try: return response.json().get(\"message\") except ValueError: pass def notify(self, check):", "ctx = { \"$CODE\": str(check.code), \"$STATUS\": check.status, \"$NOW\": safe(timezone.now().replace(microsecond=0).isoformat()), \"$NAME\":", "len(pieces) == 3 and check.status == \"up\": prio = pieces[2]", "apprise except ImportError: # Enforce settings.APPRISE_ENABLED = False def tmpl(template_name,", "\"client\": settings.SITE_NAME, \"client_url\": check.details_url(), } return self.post(self.URL, json=payload) class PagerTree(HttpTransport):", "\"whatsapp:%s\" % self.channel.sms_number, \"Body\": text, } return self.post(url, data=data, auth=auth)", "pass @classmethod def send(cls, chat_id, text): # Telegram.send is a", "Matrix(HttpTransport): def get_url(self): s = quote(self.channel.value) url = settings.MATRIX_HOMESERVER url", "it. return None @classmethod def _request(cls, method, url, **kwargs): try:", "in range(0, 3): error = cls._request(\"put\", url, **kwargs) if error", "notify_type=notify_type) else None ) class MsTeams(HttpTransport): def notify(self, check): text", "check): _, domain = self.channel.zulip_bot_email.split(\"@\") url = \"https://%s/api/v1/messages\" % domain", "SMS limit exceeded\" url = self.URL % settings.TWILIO_ACCOUNT auth =", "= { \"$CODE\": str(check.code), \"$STATUS\": check.status, \"$NOW\": safe(timezone.now().replace(microsecond=0).isoformat()), \"$NAME\": safe(check.name),", "\"body\": text} return self.post(url, json=payload, headers=headers) class Pushover(HttpTransport): URL =" ]
[ "revision: str) -> None: self.package = ExternalPackage(url, revision) self._builders =", "imports = super().from_pyobj(content) imports._builder = builder # pylint: disable=protected-access return", "graviti.portex.register import ExternalContainerRegister if TYPE_CHECKING: from subprocess import CompletedProcess from", "_deep_fetch(self) -> None: try: self._run([\"git\", \"fetch\", \"origin\"]) except CalledProcessError as", "= list(self._path.glob(\"**/ROOT.yaml\")) if len(roots) == 0: raise TypeError(\"No 'ROOT.yaml' file", "else: bases = (PortexExternalType,) type_ = type(self._name, bases, class_attrs) self._builder.package[self._name]", "class TypeBuilder: \"\"\"The builder of the external Portex template type.", "Returns: The builded Portex external package. \"\"\" for builder in", "None: try: self._run([\"git\", \"fetch\", \"origin\"]) except CalledProcessError as error: raise", "to the repo url is invalid.\", error, ) from None", "bases = (PortexExternalType,) type_ = type(self._name, bases, class_attrs) self._builder.package[self._name] =", "The name of the Portex template type. path: The source", "(*yaml_file.relative_to(root).parent.parts, yaml_file.stem) name = \".\".join(parts) builders[name] = TypeBuilder(name, yaml_file, self)", "to. \"\"\" def __init__(self, name: str, path: Path, builder: PackageBuilder)", "package: The package the Portex template type belongs to. \"\"\"", "the Portex external package. Returns: The builded Portex external package.", "TypeError(\"No 'ROOT.yaml' file found\") if len(roots) >= 2: raise TypeError(\"More", "git repo revision (tag/commit) of the external package. Returns: The", "The builded Portex external package. \"\"\" for builder in self._builders.values():", "external package. \"\"\" for builder in self._builders.values(): if builder.is_building: continue", "repo. Returns: The root directory path of the package repo.", "-> ExternalPackage: \"\"\"Build the Portex external package. Returns: The builded", "<gh_stars>10-100 #!/usr/bin/env python3 # # Copyright 2022 Graviti. Licensed under", "graviti.portex.external import PortexExternalType from graviti.portex.factory import ConnectedFieldsFactory, TypeFactory from graviti.portex.package", "Dict[str, Any] = { \"params\": params, \"factory\": factory, \"package\": self._builder.package,", "PIPE, CalledProcessError, run from tempfile import gettempdir from typing import", "failed, most likely due to the repo url is invalid.\",", "return False return not bool(result.stdout) def _clone_repo(self) -> None: print(f\"Cloning", "PackageBuilder ) -> _I: \"\"\"Create :class:`Imports` instance from python list.", "the portex belongs to. \"\"\" _builder: PackageBuilder def __getitem__(self, key:", "self._run([\"git\", \"checkout\", self._revision]) except CalledProcessError as error: raise GitCommandError( \"'git", "type. path: The source file path of the Portex template", "= PackageBuilder(url, revision) package = builder.build() packages.externals[url, revision] = package", "detected. \"\"\" if self.is_building: raise TypeError(\"Circular reference\") self.is_building = True", "return self.package[key] except KeyError: return self._builders.__getitem__(key).build() def _create_type_builders(self) -> Dict[str,", "the package repo. Returns: The root directory path of the", "packages from graviti.portex.param import Param, Params from graviti.portex.register import ExternalContainerRegister", "of the external package. \"\"\" def __init__(self, url: str, revision:", "one 'ROOT.yaml' file found\") return roots[0].parent class PackageBuilder: \"\"\"The builder", "import Param, Params from graviti.portex.register import ExternalContainerRegister if TYPE_CHECKING: from", "= \".\".join(parts) builders[name] = TypeBuilder(name, yaml_file, self) return builders def", "package. revision: The git repo revision (tag/commit) of the external", "Arguments: package: The package the portex belongs to. \"\"\" _builder:", "than one \"ROOT.yaml\" found. \"\"\" roots = list(self._path.glob(\"**/ROOT.yaml\")) if len(roots)", "command failed means the git repo has been cleaned or", "BuilderImports.from_pyobj(content.get(\"imports\", []), self._builder) factory = TypeFactory(decl, imports) keys = factory.keys", "#!/usr/bin/env python3 # # Copyright 2022 Graviti. Licensed under MIT", "The package the Portex template type belongs to. \"\"\" def", "/ \"portex\" tempdir.mkdir(exist_ok=True) md5_instance = md5() md5_instance.update(url.encode(\"utf-8\")) md5_instance.update(revision.encode(\"utf-8\")) self._path =", "return run(args, cwd=self._path, env=self._env, stdout=PIPE, stderr=PIPE, check=True) def _init_repo(self) ->", "= builder # pylint: disable=protected-access return imports def build_package(url: str,", "except CalledProcessError as error: raise GitCommandError( \"'git fetch' failed, most", "error, ) from None try: self._run([\"git\", \"checkout\", self._revision]) except CalledProcessError", "from graviti.portex.base import PortexRecordBase from graviti.portex.external import PortexExternalType from graviti.portex.factory", "ExternalPackage, Imports, packages from graviti.portex.param import Param, Params from graviti.portex.register", "import GitCommandError, GitNotFoundError from graviti.portex.base import PortexRecordBase from graviti.portex.external import", "Portex template type. package: The package the Portex template type", "str, revision: str) -> ExternalPackage: \"\"\"Build an external package. Arguments:", "file found\") return roots[0].parent class PackageBuilder: \"\"\"The builder of the", "repo revision (tag/commit) of the external package. Returns: The :class:`ExternalPackage`", "content[\"declaration\"] imports = BuilderImports.from_pyobj(content.get(\"imports\", []), self._builder) factory = TypeFactory(decl, imports)", "= path self._builder = builder self.is_building = False def build(self)", "None: self.package = ExternalPackage(url, revision) self._builders = self._create_type_builders() def __getitem__(self,", "CalledProcessError as error: raise GitCommandError( \"'git fetch' failed, most likely", "Type[\"PortexType\"]: try: return super().__getitem__(key) except KeyError: return self._builder.__getitem__(key) @classmethod def", "True with self._path.open() as fp: content = yaml.load(fp, yaml.Loader) params_pyobj", "self._deep_fetch() except (CalledProcessError, GitCommandError, FileNotFoundError): rmtree(path) raise print(f\"Cloned to '{path}'\")", "= md5() md5_instance.update(url.encode(\"utf-8\")) md5_instance.update(revision.encode(\"utf-8\")) self._path = tempdir / md5_instance.hexdigest() self._url", "key, value in params.items(): value.ptype = keys.get(key, PTYPE.Any) params.add(Param(\"nullable\", False,", "Raises: TypeError: when the \"ROOT.yaml\" not found or more than", "= name self._path = path self._builder = builder self.is_building =", "None: self._run([\"git\", \"init\"]) self._run([\"git\", \"remote\", \"add\", \"origin\", self._url]) def _shallow_fetch(self)", "self._create_type_builders() def __getitem__(self, key: str) -> Type[\"PortexExternalType\"]: try: return self.package[key]", "== 0: raise TypeError(\"No 'ROOT.yaml' file found\") if len(roots) >=", "-> None: self.package = ExternalPackage(url, revision) self._builders = self._create_type_builders() def", "GitCommandError( \"'git checkout' failed, most likely due to the repo", "of the Portex template type. package: The package the Portex", "Path from shutil import rmtree from subprocess import PIPE, CalledProcessError,", "builded Portex external package. \"\"\" for builder in self._builders.values(): if", "_prepare_repo(self) -> None: if not self._path.exists(): self._clone_repo() elif not self._check_repo_integrity():", "None def _check_repo_integrity(self) -> bool: try: result = self._run([\"git\", \"status\",", "type_ return type_ class BuilderImports(Imports): \"\"\"The imports of the Portex", "self._path = tempdir / md5_instance.hexdigest() self._url = url self._revision =", "Portex external package. Returns: The builded Portex external package. \"\"\"", "checkout' failed, most likely due to the repo revision is", "graviti.portex.package import ExternalPackage, Imports, packages from graviti.portex.param import Param, Params", "# type: ignore[override] # pylint: disable=arguments-differ cls: Type[_I], content: List[Dict[str,", "The git repo revision (tag/commit) of the external package. Returns:", "from_pyobj( # type: ignore[override] # pylint: disable=arguments-differ cls: Type[_I], content:", "from graviti.portex.base import PortexType EXTERNAL_TYPE_TO_CONTAINER = ExternalContainerRegister.EXTERNAL_TYPE_TO_CONTAINER _I = TypeVar(\"_I\",", ") else: bases = (PortexExternalType,) type_ = type(self._name, bases, class_attrs)", "= PackageRepo(self.package.url, self.package.revision) root = repo.get_root() builders = {} for", "factory.class_, imports, factory.transform_kwargs ) else: bases = (PortexExternalType,) type_ =", "None: if not self._path.exists(): self._clone_repo() elif not self._check_repo_integrity(): rmtree(self._path) self._clone_repo()", "\"--depth=1\"]) self._run([\"git\", \"checkout\", \"FETCH_HEAD\"]) def _deep_fetch(self) -> None: try: self._run([\"git\",", "self._shallow_fetch() except CalledProcessError: self._deep_fetch() except (CalledProcessError, GitCommandError, FileNotFoundError): rmtree(path) raise", "from pathlib import Path from shutil import rmtree from subprocess", "hashlib import md5 from pathlib import Path from shutil import", "\"\"\"Build the Portex external type. Returns: The builded Portex external", "url of the external package. revision: The git repo revision", "likely due to the repo url is invalid.\", error, )", "to the repo revision is invalid.\", error, ) from None", "from subprocess import CompletedProcess from graviti.portex.base import PortexType EXTERNAL_TYPE_TO_CONTAINER =", "from hashlib import md5 from pathlib import Path from shutil", "url self._revision = revision try: self._prepare_repo() except FileNotFoundError: raise GitNotFoundError()", "A :class:`Imports` instance created from the input python list. \"\"\"", "from graviti.portex.package import ExternalPackage, Imports, packages from graviti.portex.param import Param,", "EXTERNAL_TYPE_TO_CONTAINER = ExternalContainerRegister.EXTERNAL_TYPE_TO_CONTAINER _I = TypeVar(\"_I\", bound=\"BuilderImports\") class PackageRepo: \"\"\"The", "= (*yaml_file.relative_to(root).parent.parts, yaml_file.stem) name = \".\".join(parts) builders[name] = TypeBuilder(name, yaml_file,", "GitNotFoundError from graviti.portex.base import PortexRecordBase from graviti.portex.external import PortexExternalType from", "super().from_pyobj(content) imports._builder = builder # pylint: disable=protected-access return imports def", "= (PortexExternalType,) type_ = type(self._name, bases, class_attrs) self._builder.package[self._name] = type_", "} if issubclass(factory.class_, PortexRecordBase): bases: Tuple[Type[\"PortexType\"], ...] = (PortexRecordBase, PortexExternalType)", "\"'git fetch' failed, most likely due to the repo url", "\"\"\" def __init__(self, name: str, path: Path, builder: PackageBuilder) ->", "PackageRepo: \"\"\"The local git repo of the external Portex package.", "\"\"\"The local git repo of the external Portex package. Arguments:", "self._revision, \"--depth=1\"]) self._run([\"git\", \"checkout\", \"FETCH_HEAD\"]) def _deep_fetch(self) -> None: try:", "rmtree(path) raise print(f\"Cloned to '{path}'\") def get_root(self) -> Path: \"\"\"Get", "PortexRecordBase from graviti.portex.external import PortexExternalType from graviti.portex.factory import ConnectedFieldsFactory, TypeFactory", "imports._builder = builder # pylint: disable=protected-access return imports def build_package(url:", "has been cleaned or broken return False return not bool(result.stdout)", "package. \"\"\" def __init__(self, url: str, revision: str) -> None:", "self.is_building: raise TypeError(\"Circular reference\") self.is_building = True with self._path.open() as", "name: str, path: Path, builder: PackageBuilder) -> None: self._name =", "str, path: Path, builder: PackageBuilder) -> None: self._name = name", "self._clone_repo() elif not self._check_repo_integrity(): rmtree(self._path) self._clone_repo() def _run(self, args: List[str])", "most likely due to the repo url is invalid.\", error,", "belongs to. \"\"\" def __init__(self, name: str, path: Path, builder:", "Dict[str, Any] = {} def __init__(self, url: str, revision: str)", "of the external package. revision: The git repo revision (tag/commit)", "ignore[override] # pylint: disable=arguments-differ cls: Type[_I], content: List[Dict[str, Any]], builder:", "-> None: self._run([\"git\", \"fetch\", \"origin\", self._revision, \"--depth=1\"]) self._run([\"git\", \"checkout\", \"FETCH_HEAD\"])", "import PortexType EXTERNAL_TYPE_TO_CONTAINER = ExternalContainerRegister.EXTERNAL_TYPE_TO_CONTAINER _I = TypeVar(\"_I\", bound=\"BuilderImports\") class", "ExternalPackage: \"\"\"Build an external package. Arguments: url: The git repo", "builder related classes.\"\"\" from hashlib import md5 from pathlib import", "The root directory path of the package repo. Raises: TypeError:", "external package. Arguments: url: The git repo url of the", "the external package. Returns: The :class:`ExternalPackage` instance. \"\"\" builder =", "md5() md5_instance.update(url.encode(\"utf-8\")) md5_instance.update(revision.encode(\"utf-8\")) self._path = tempdir / md5_instance.hexdigest() self._url =", ":class:`ExternalPackage` instance. \"\"\" builder = PackageBuilder(url, revision) package = builder.build()", "self._run([\"git\", \"status\", \"--porcelain\"]) except CalledProcessError: # The git command failed", "md5_instance.update(revision.encode(\"utf-8\")) self._path = tempdir / md5_instance.hexdigest() self._url = url self._revision", "external Portex template type. Arguments: name: The name of the", "Portex external type. Returns: The builded Portex external type. Raises:", "template type belongs to. \"\"\" def __init__(self, name: str, path:", "import ConnectedFieldsFactory, TypeFactory from graviti.portex.package import ExternalPackage, Imports, packages from", "imports) keys = factory.keys params = Params.from_pyobj(params_pyobj) for key, value", "PortexType EXTERNAL_TYPE_TO_CONTAINER = ExternalContainerRegister.EXTERNAL_TYPE_TO_CONTAINER _I = TypeVar(\"_I\", bound=\"BuilderImports\") class PackageRepo:", "self._run([\"git\", \"fetch\", \"origin\", self._revision, \"--depth=1\"]) self._run([\"git\", \"checkout\", \"FETCH_HEAD\"]) def _deep_fetch(self)", "repo. Raises: TypeError: when the \"ROOT.yaml\" not found or more", "most likely due to the repo revision is invalid.\", error,", "revision is invalid.\", error, ) from None def _check_repo_integrity(self) ->", "{} for yaml_file in root.glob(\"**/*.yaml\"): if yaml_file.name == \"ROOT.yaml\": continue", "except KeyError: return self._builder.__getitem__(key) @classmethod def from_pyobj( # type: ignore[override]", "type. Returns: The builded Portex external type. Raises: TypeError: Raise", "A python list representing imported types. builder: The package builder.", "directory path of the package repo. Returns: The root directory", "def _clone_repo(self) -> None: print(f\"Cloning repo '{self._url}@{self._revision}'\") path = self._path", "FileNotFoundError): rmtree(path) raise print(f\"Cloned to '{path}'\") def get_root(self) -> Path:", "Returns: The root directory path of the package repo. Raises:", "graviti.portex.base import PortexType EXTERNAL_TYPE_TO_CONTAINER = ExternalContainerRegister.EXTERNAL_TYPE_TO_CONTAINER _I = TypeVar(\"_I\", bound=\"BuilderImports\")", "self._run([\"git\", \"fetch\", \"origin\"]) except CalledProcessError as error: raise GitCommandError( \"'git", "circular reference detected. \"\"\" if self.is_building: raise TypeError(\"Circular reference\") self.is_building", "\"\"\" builder = PackageBuilder(url, revision) package = builder.build() packages.externals[url, revision]", "self._builder.package, } if issubclass(factory.class_, PortexRecordBase): bases: Tuple[Type[\"PortexType\"], ...] = (PortexRecordBase,", "root = repo.get_root() builders = {} for yaml_file in root.glob(\"**/*.yaml\"):", "template type. path: The source file path of the Portex", "_builder: PackageBuilder def __getitem__(self, key: str) -> Type[\"PortexType\"]: try: return", "...] = (PortexRecordBase, PortexExternalType) class_attrs[\"_fields_factory\"] = ConnectedFieldsFactory( decl, factory.class_, imports,", "Returns: A :class:`Imports` instance created from the input python list.", "TypeBuilder: \"\"\"The builder of the external Portex template type. Arguments:", "python list. \"\"\" imports = super().from_pyobj(content) imports._builder = builder #", "url: The git repo url of the external package. revision:", "repo revision (tag/commit) of the external package. \"\"\" def __init__(self,", "imports, factory.transform_kwargs ) else: bases = (PortexExternalType,) type_ = type(self._name,", "package builder. Returns: A :class:`Imports` instance created from the input", "(tag/commit) of the external package. Returns: The :class:`ExternalPackage` instance. \"\"\"", "typing import TYPE_CHECKING, Any, Dict, List, Tuple, Type, TypeVar import", "Portex template type. Arguments: name: The name of the Portex", "self.is_building = False def build(self) -> Type[\"PortexExternalType\"]: \"\"\"Build the Portex", "content = yaml.load(fp, yaml.Loader) params_pyobj = content.get(\"parameters\", []) decl =", "PackageBuilder def __getitem__(self, key: str) -> Type[\"PortexType\"]: try: return super().__getitem__(key)", "-> None: self._name = name self._path = path self._builder =", "self._builders.values(): if builder.is_building: continue builder.build() return self.package class TypeBuilder: \"\"\"The", "= type(self._name, bases, class_attrs) self._builder.package[self._name] = type_ return type_ class", "error: raise GitCommandError( \"'git fetch' failed, most likely due to", "def _prepare_repo(self) -> None: if not self._path.exists(): self._clone_repo() elif not", "created from the input python list. \"\"\" imports = super().from_pyobj(content)", "= yaml.load(fp, yaml.Loader) params_pyobj = content.get(\"parameters\", []) decl = content[\"declaration\"]", "= TypeBuilder(name, yaml_file, self) return builders def build(self) -> ExternalPackage:", "portex belongs to. \"\"\" _builder: PackageBuilder def __getitem__(self, key: str)", "ExternalContainerRegister if TYPE_CHECKING: from subprocess import CompletedProcess from graviti.portex.base import", "CalledProcessError: self._deep_fetch() except (CalledProcessError, GitCommandError, FileNotFoundError): rmtree(path) raise print(f\"Cloned to", "pylint: disable=protected-access return imports def build_package(url: str, revision: str) ->", "self._clone_repo() def _run(self, args: List[str]) -> \"CompletedProcess[bytes]\": return run(args, cwd=self._path,", "= content[\"declaration\"] imports = BuilderImports.from_pyobj(content.get(\"imports\", []), self._builder) factory = TypeFactory(decl,", "subprocess import CompletedProcess from graviti.portex.base import PortexType EXTERNAL_TYPE_TO_CONTAINER = ExternalContainerRegister.EXTERNAL_TYPE_TO_CONTAINER", "url: str, revision: str) -> None: self.package = ExternalPackage(url, revision)", "import ExternalPackage, Imports, packages from graviti.portex.param import Param, Params from", "def _deep_fetch(self) -> None: try: self._run([\"git\", \"fetch\", \"origin\"]) except CalledProcessError", "\"\"\" _builder: PackageBuilder def __getitem__(self, key: str) -> Type[\"PortexType\"]: try:", "self._path.exists(): self._clone_repo() elif not self._check_repo_integrity(): rmtree(self._path) self._clone_repo() def _run(self, args:", "str) -> Type[\"PortexExternalType\"]: try: return self.package[key] except KeyError: return self._builders.__getitem__(key).build()", "_env: Dict[str, Any] = {} def __init__(self, url: str, revision:", "the repo url is invalid.\", error, ) from None try:", "the Portex external type. Returns: The builded Portex external type.", "raise print(f\"Cloned to '{path}'\") def get_root(self) -> Path: \"\"\"Get the", "2: raise TypeError(\"More than one 'ROOT.yaml' file found\") return roots[0].parent", "try: self._shallow_fetch() except CalledProcessError: self._deep_fetch() except (CalledProcessError, GitCommandError, FileNotFoundError): rmtree(path)", "\"\"\" roots = list(self._path.glob(\"**/ROOT.yaml\")) if len(roots) == 0: raise TypeError(\"No", "(CalledProcessError, GitCommandError, FileNotFoundError): rmtree(path) raise print(f\"Cloned to '{path}'\") def get_root(self)", "python list. Arguments: content: A python list representing imported types.", "directory path of the package repo. Raises: TypeError: when the", "graviti.portex.ptype as PTYPE from graviti.exception import GitCommandError, GitNotFoundError from graviti.portex.base", "list. \"\"\" imports = super().from_pyobj(content) imports._builder = builder # pylint:", "bases, class_attrs) self._builder.package[self._name] = type_ return type_ class BuilderImports(Imports): \"\"\"The", "= ExternalContainerRegister.EXTERNAL_TYPE_TO_CONTAINER _I = TypeVar(\"_I\", bound=\"BuilderImports\") class PackageRepo: \"\"\"The local", "= (PortexRecordBase, PortexExternalType) class_attrs[\"_fields_factory\"] = ConnectedFieldsFactory( decl, factory.class_, imports, factory.transform_kwargs", "self._check_repo_integrity(): rmtree(self._path) self._clone_repo() def _run(self, args: List[str]) -> \"CompletedProcess[bytes]\": return", "PackageBuilder(url, revision) package = builder.build() packages.externals[url, revision] = package return", "build(self) -> Type[\"PortexExternalType\"]: \"\"\"Build the Portex external type. Returns: The", "The git repo url of the external package. revision: The", "def __getitem__(self, key: str) -> Type[\"PortexType\"]: try: return super().__getitem__(key) except", "self._builder.package[self._name] = type_ return type_ class BuilderImports(Imports): \"\"\"The imports of", "md5 from pathlib import Path from shutil import rmtree from", "-> Type[\"PortexExternalType\"]: \"\"\"Build the Portex external type. Returns: The builded", "Raise when circular reference detected. \"\"\" if self.is_building: raise TypeError(\"Circular", "len(roots) == 0: raise TypeError(\"No 'ROOT.yaml' file found\") if len(roots)", "tempfile import gettempdir from typing import TYPE_CHECKING, Any, Dict, List,", "ExternalPackage: \"\"\"Build the Portex external package. Returns: The builded Portex", "# pylint: disable=protected-access return imports def build_package(url: str, revision: str)", "invalid.\", error, ) from None def _check_repo_integrity(self) -> bool: try:", "-> None: try: self._run([\"git\", \"fetch\", \"origin\"]) except CalledProcessError as error:", "_create_type_builders(self) -> Dict[str, \"TypeBuilder\"]: repo = PackageRepo(self.package.url, self.package.revision) root =", "# # Copyright 2022 Graviti. Licensed under MIT License. #", "GitCommandError, GitNotFoundError from graviti.portex.base import PortexRecordBase from graviti.portex.external import PortexExternalType", "TypeBuilder(name, yaml_file, self) return builders def build(self) -> ExternalPackage: \"\"\"Build", "path self._builder = builder self.is_building = False def build(self) ->", "name: The name of the Portex template type. path: The", "with self._path.open() as fp: content = yaml.load(fp, yaml.Loader) params_pyobj =", "key: str) -> Type[\"PortexExternalType\"]: try: return self.package[key] except KeyError: return", "List[str]) -> \"CompletedProcess[bytes]\": return run(args, cwd=self._path, env=self._env, stdout=PIPE, stderr=PIPE, check=True)", "of the external Portex template type. Arguments: name: The name", "git repo url of the external package. revision: The git", "TypeFactory(decl, imports) keys = factory.keys params = Params.from_pyobj(params_pyobj) for key,", "list representing imported types. builder: The package builder. Returns: A", "found. \"\"\" roots = list(self._path.glob(\"**/ROOT.yaml\")) if len(roots) == 0: raise", "\"\"\" def __init__(self, url: str, revision: str) -> None: self.package", "TypeError: when the \"ROOT.yaml\" not found or more than one", "params.items(): value.ptype = keys.get(key, PTYPE.Any) params.add(Param(\"nullable\", False, ptype=PTYPE.Boolean)) class_attrs: Dict[str,", "yaml_file.stem) name = \".\".join(parts) builders[name] = TypeBuilder(name, yaml_file, self) return", "TypeVar(\"_I\", bound=\"BuilderImports\") class PackageRepo: \"\"\"The local git repo of the", "class_attrs) self._builder.package[self._name] = type_ return type_ class BuilderImports(Imports): \"\"\"The imports", "builder of the external Portex package. Arguments: url: The git", "Copyright 2022 Graviti. Licensed under MIT License. # \"\"\"Portex type", "disable=protected-access return imports def build_package(url: str, revision: str) -> ExternalPackage:", "of the external package. Returns: The :class:`ExternalPackage` instance. \"\"\" builder", "ExternalPackage(url, revision) self._builders = self._create_type_builders() def __getitem__(self, key: str) ->", "0: raise TypeError(\"No 'ROOT.yaml' file found\") if len(roots) >= 2:", "self._builders.__getitem__(key).build() def _create_type_builders(self) -> Dict[str, \"TypeBuilder\"]: repo = PackageRepo(self.package.url, self.package.revision)", "builder in self._builders.values(): if builder.is_building: continue builder.build() return self.package class", "-> _I: \"\"\"Create :class:`Imports` instance from python list. Arguments: content:", "\"package\": self._builder.package, } if issubclass(factory.class_, PortexRecordBase): bases: Tuple[Type[\"PortexType\"], ...] =", "= super().from_pyobj(content) imports._builder = builder # pylint: disable=protected-access return imports", "self._run([\"git\", \"init\"]) self._run([\"git\", \"remote\", \"add\", \"origin\", self._url]) def _shallow_fetch(self) ->", "tempdir.mkdir(exist_ok=True) md5_instance = md5() md5_instance.update(url.encode(\"utf-8\")) md5_instance.update(revision.encode(\"utf-8\")) self._path = tempdir /", "import PIPE, CalledProcessError, run from tempfile import gettempdir from typing", "\"--porcelain\"]) except CalledProcessError: # The git command failed means the", "ConnectedFieldsFactory, TypeFactory from graviti.portex.package import ExternalPackage, Imports, packages from graviti.portex.param", "roots = list(self._path.glob(\"**/ROOT.yaml\")) if len(roots) == 0: raise TypeError(\"No 'ROOT.yaml'", "related classes.\"\"\" from hashlib import md5 from pathlib import Path", "broken return False return not bool(result.stdout) def _clone_repo(self) -> None:", "def __init__(self, url: str, revision: str) -> None: self.package =", "type. Arguments: package: The package the portex belongs to. \"\"\"", "package. Returns: The :class:`ExternalPackage` instance. \"\"\" builder = PackageBuilder(url, revision)", "\"\"\"Build an external package. Arguments: url: The git repo url", "import CompletedProcess from graviti.portex.base import PortexType EXTERNAL_TYPE_TO_CONTAINER = ExternalContainerRegister.EXTERNAL_TYPE_TO_CONTAINER _I", "= ExternalPackage(url, revision) self._builders = self._create_type_builders() def __getitem__(self, key: str)", "repo.get_root() builders = {} for yaml_file in root.glob(\"**/*.yaml\"): if yaml_file.name", "GitCommandError, FileNotFoundError): rmtree(path) raise print(f\"Cloned to '{path}'\") def get_root(self) ->", "builder: The package builder. Returns: A :class:`Imports` instance created from", "self._url]) def _shallow_fetch(self) -> None: self._run([\"git\", \"fetch\", \"origin\", self._revision, \"--depth=1\"])", "_shallow_fetch(self) -> None: self._run([\"git\", \"fetch\", \"origin\", self._revision, \"--depth=1\"]) self._run([\"git\", \"checkout\",", "or broken return False return not bool(result.stdout) def _clone_repo(self) ->", "package. \"\"\" _env: Dict[str, Any] = {} def __init__(self, url:", "found\") if len(roots) >= 2: raise TypeError(\"More than one 'ROOT.yaml'", "Path, builder: PackageBuilder) -> None: self._name = name self._path =", "Returns: The builded Portex external type. Raises: TypeError: Raise when", "self._builders = self._create_type_builders() def __getitem__(self, key: str) -> Type[\"PortexExternalType\"]: try:", "\"params\": params, \"factory\": factory, \"package\": self._builder.package, } if issubclass(factory.class_, PortexRecordBase):", "repo has been cleaned or broken return False return not", "external type. Raises: TypeError: Raise when circular reference detected. \"\"\"", "-> Path: \"\"\"Get the root directory path of the package", "external package. Returns: The :class:`ExternalPackage` instance. \"\"\" builder = PackageBuilder(url,", "the repo revision is invalid.\", error, ) from None def", "__init__(self, url: str, revision: str) -> None: self.package = ExternalPackage(url,", "package. Returns: The builded Portex external package. \"\"\" for builder", "self._builder = builder self.is_building = False def build(self) -> Type[\"PortexExternalType\"]:", "more than one \"ROOT.yaml\" found. \"\"\" roots = list(self._path.glob(\"**/ROOT.yaml\")) if", "builder: PackageBuilder) -> None: self._name = name self._path = path", "content: List[Dict[str, Any]], builder: PackageBuilder ) -> _I: \"\"\"Create :class:`Imports`", "None: self._run([\"git\", \"fetch\", \"origin\", self._revision, \"--depth=1\"]) self._run([\"git\", \"checkout\", \"FETCH_HEAD\"]) def", "subprocess import PIPE, CalledProcessError, run from tempfile import gettempdir from", "graviti.portex.base import PortexRecordBase from graviti.portex.external import PortexExternalType from graviti.portex.factory import", "factory = TypeFactory(decl, imports) keys = factory.keys params = Params.from_pyobj(params_pyobj)", "The package builder. Returns: A :class:`Imports` instance created from the", "\"portex\" tempdir.mkdir(exist_ok=True) md5_instance = md5() md5_instance.update(url.encode(\"utf-8\")) md5_instance.update(revision.encode(\"utf-8\")) self._path = tempdir", "builded Portex external type. Raises: TypeError: Raise when circular reference", "found\") return roots[0].parent class PackageBuilder: \"\"\"The builder of the external", "print(f\"Cloned to '{path}'\") def get_root(self) -> Path: \"\"\"Get the root", "raise TypeError(\"Circular reference\") self.is_building = True with self._path.open() as fp:", "failed means the git repo has been cleaned or broken", "super().__getitem__(key) except KeyError: return self._builder.__getitem__(key) @classmethod def from_pyobj( # type:", "-> ExternalPackage: \"\"\"Build an external package. Arguments: url: The git", "-> None: if not self._path.exists(): self._clone_repo() elif not self._check_repo_integrity(): rmtree(self._path)", "False def build(self) -> Type[\"PortexExternalType\"]: \"\"\"Build the Portex external type.", "str) -> Type[\"PortexType\"]: try: return super().__getitem__(key) except KeyError: return self._builder.__getitem__(key)", "template type. Arguments: package: The package the portex belongs to.", "the root directory path of the package repo. Returns: The", "package the portex belongs to. \"\"\" _builder: PackageBuilder def __getitem__(self,", "invalid.\", error, ) from None try: self._run([\"git\", \"checkout\", self._revision]) except", "result = self._run([\"git\", \"status\", \"--porcelain\"]) except CalledProcessError: # The git", "= Path(gettempdir()) / \"portex\" tempdir.mkdir(exist_ok=True) md5_instance = md5() md5_instance.update(url.encode(\"utf-8\")) md5_instance.update(revision.encode(\"utf-8\"))", "to. \"\"\" _builder: PackageBuilder def __getitem__(self, key: str) -> Type[\"PortexType\"]:", "\"\"\"Create :class:`Imports` instance from python list. Arguments: content: A python", "revision (tag/commit) of the external package. \"\"\" _env: Dict[str, Any]", "if self.is_building: raise TypeError(\"Circular reference\") self.is_building = True with self._path.open()", "PortexRecordBase): bases: Tuple[Type[\"PortexType\"], ...] = (PortexRecordBase, PortexExternalType) class_attrs[\"_fields_factory\"] = ConnectedFieldsFactory(", "Dict, List, Tuple, Type, TypeVar import yaml import graviti.portex.ptype as", "len(roots) >= 2: raise TypeError(\"More than one 'ROOT.yaml' file found\")", "md5_instance = md5() md5_instance.update(url.encode(\"utf-8\")) md5_instance.update(revision.encode(\"utf-8\")) self._path = tempdir / md5_instance.hexdigest()", "ConnectedFieldsFactory( decl, factory.class_, imports, factory.transform_kwargs ) else: bases = (PortexExternalType,)", "= repo.get_root() builders = {} for yaml_file in root.glob(\"**/*.yaml\"): if", "Type[_I], content: List[Dict[str, Any]], builder: PackageBuilder ) -> _I: \"\"\"Create", "the external package. \"\"\" _env: Dict[str, Any] = {} def", "elif not self._check_repo_integrity(): rmtree(self._path) self._clone_repo() def _run(self, args: List[str]) ->", "PortexExternalType from graviti.portex.factory import ConnectedFieldsFactory, TypeFactory from graviti.portex.package import ExternalPackage,", "self._revision]) except CalledProcessError as error: raise GitCommandError( \"'git checkout' failed,", "from shutil import rmtree from subprocess import PIPE, CalledProcessError, run", "'{path}'\") def get_root(self) -> Path: \"\"\"Get the root directory path", "return not bool(result.stdout) def _clone_repo(self) -> None: print(f\"Cloning repo '{self._url}@{self._revision}'\")", "Path: \"\"\"Get the root directory path of the package repo.", "\"origin\", self._revision, \"--depth=1\"]) self._run([\"git\", \"checkout\", \"FETCH_HEAD\"]) def _deep_fetch(self) -> None:", "License. # \"\"\"Portex type builder related classes.\"\"\" from hashlib import", "revision (tag/commit) of the external package. \"\"\" def __init__(self, url:", "\"fetch\", \"origin\", self._revision, \"--depth=1\"]) self._run([\"git\", \"checkout\", \"FETCH_HEAD\"]) def _deep_fetch(self) ->", "\"\"\"Portex type builder related classes.\"\"\" from hashlib import md5 from", "(tag/commit) of the external package. \"\"\" def __init__(self, url: str,", "name self._path = path self._builder = builder self.is_building = False", "Dict[str, \"TypeBuilder\"]: repo = PackageRepo(self.package.url, self.package.revision) root = repo.get_root() builders", "external package. \"\"\" def __init__(self, url: str, revision: str) ->", "-> Type[\"PortexExternalType\"]: try: return self.package[key] except KeyError: return self._builders.__getitem__(key).build() def", "yaml_file, self) return builders def build(self) -> ExternalPackage: \"\"\"Build the", "from tempfile import gettempdir from typing import TYPE_CHECKING, Any, Dict,", "raise GitCommandError( \"'git checkout' failed, most likely due to the", "external type. Returns: The builded Portex external type. Raises: TypeError:", "self.package.revision) root = repo.get_root() builders = {} for yaml_file in", "self._run([\"git\", \"remote\", \"add\", \"origin\", self._url]) def _shallow_fetch(self) -> None: self._run([\"git\",", "\"origin\", self._url]) def _shallow_fetch(self) -> None: self._run([\"git\", \"fetch\", \"origin\", self._revision,", "list(self._path.glob(\"**/ROOT.yaml\")) if len(roots) == 0: raise TypeError(\"No 'ROOT.yaml' file found\")", "PackageRepo(self.package.url, self.package.revision) root = repo.get_root() builders = {} for yaml_file", "GitCommandError( \"'git fetch' failed, most likely due to the repo", "package. \"\"\" for builder in self._builders.values(): if builder.is_building: continue builder.build()", "self._revision = revision try: self._prepare_repo() except FileNotFoundError: raise GitNotFoundError() from", "the Portex template type. Arguments: package: The package the portex", "git command failed means the git repo has been cleaned", "Arguments: url: The git repo url of the external package.", "\"ROOT.yaml\": continue parts = (*yaml_file.relative_to(root).parent.parts, yaml_file.stem) name = \".\".join(parts) builders[name]", "= BuilderImports.from_pyobj(content.get(\"imports\", []), self._builder) factory = TypeFactory(decl, imports) keys =", "self._path path.mkdir() try: self._init_repo() try: self._shallow_fetch() except CalledProcessError: self._deep_fetch() except", "PTYPE from graviti.exception import GitCommandError, GitNotFoundError from graviti.portex.base import PortexRecordBase", "reference\") self.is_building = True with self._path.open() as fp: content =", "belongs to. \"\"\" _builder: PackageBuilder def __getitem__(self, key: str) ->", "\"\"\" imports = super().from_pyobj(content) imports._builder = builder # pylint: disable=protected-access", "except FileNotFoundError: raise GitNotFoundError() from None def _prepare_repo(self) -> None:", "cwd=self._path, env=self._env, stdout=PIPE, stderr=PIPE, check=True) def _init_repo(self) -> None: self._run([\"git\",", "def __init__(self, url: str, revision: str) -> None: tempdir =", "package repo. Raises: TypeError: when the \"ROOT.yaml\" not found or", "(PortexRecordBase, PortexExternalType) class_attrs[\"_fields_factory\"] = ConnectedFieldsFactory( decl, factory.class_, imports, factory.transform_kwargs )", "likely due to the repo revision is invalid.\", error, )", "if len(roots) == 0: raise TypeError(\"No 'ROOT.yaml' file found\") if", "except CalledProcessError: # The git command failed means the git", "TypeFactory from graviti.portex.package import ExternalPackage, Imports, packages from graviti.portex.param import", "self._prepare_repo() except FileNotFoundError: raise GitNotFoundError() from None def _prepare_repo(self) ->", "Arguments: content: A python list representing imported types. builder: The", "Portex template type. path: The source file path of the", "as fp: content = yaml.load(fp, yaml.Loader) params_pyobj = content.get(\"parameters\", [])", "CalledProcessError: # The git command failed means the git repo", "try: self._init_repo() try: self._shallow_fetch() except CalledProcessError: self._deep_fetch() except (CalledProcessError, GitCommandError,", "build_package(url: str, revision: str) -> ExternalPackage: \"\"\"Build an external package.", "import gettempdir from typing import TYPE_CHECKING, Any, Dict, List, Tuple,", "tempdir = Path(gettempdir()) / \"portex\" tempdir.mkdir(exist_ok=True) md5_instance = md5() md5_instance.update(url.encode(\"utf-8\"))", "The git command failed means the git repo has been", "rmtree(self._path) self._clone_repo() def _run(self, args: List[str]) -> \"CompletedProcess[bytes]\": return run(args,", "\"TypeBuilder\"]: repo = PackageRepo(self.package.url, self.package.revision) root = repo.get_root() builders =", "the \"ROOT.yaml\" not found or more than one \"ROOT.yaml\" found.", "graviti.portex.factory import ConnectedFieldsFactory, TypeFactory from graviti.portex.package import ExternalPackage, Imports, packages", "params, \"factory\": factory, \"package\": self._builder.package, } if issubclass(factory.class_, PortexRecordBase): bases:", "python list representing imported types. builder: The package builder. Returns:", "in root.glob(\"**/*.yaml\"): if yaml_file.name == \"ROOT.yaml\": continue parts = (*yaml_file.relative_to(root).parent.parts,", "instance created from the input python list. \"\"\" imports =", "the external package. \"\"\" def __init__(self, url: str, revision: str)", "Raises: TypeError: Raise when circular reference detected. \"\"\" if self.is_building:", "_clone_repo(self) -> None: print(f\"Cloning repo '{self._url}@{self._revision}'\") path = self._path path.mkdir()", "from None def _check_repo_integrity(self) -> bool: try: result = self._run([\"git\",", "path of the package repo. Raises: TypeError: when the \"ROOT.yaml\"", "ExternalContainerRegister.EXTERNAL_TYPE_TO_CONTAINER _I = TypeVar(\"_I\", bound=\"BuilderImports\") class PackageRepo: \"\"\"The local git", "= {} def __init__(self, url: str, revision: str) -> None:", "gettempdir from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Type,", "if not self._path.exists(): self._clone_repo() elif not self._check_repo_integrity(): rmtree(self._path) self._clone_repo() def", "cls: Type[_I], content: List[Dict[str, Any]], builder: PackageBuilder ) -> _I:", "__init__(self, name: str, path: Path, builder: PackageBuilder) -> None: self._name", "type belongs to. \"\"\" def __init__(self, name: str, path: Path,", "The package the portex belongs to. \"\"\" _builder: PackageBuilder def", "Param, Params from graviti.portex.register import ExternalContainerRegister if TYPE_CHECKING: from subprocess", "except KeyError: return self._builders.__getitem__(key).build() def _create_type_builders(self) -> Dict[str, \"TypeBuilder\"]: repo", "try: return super().__getitem__(key) except KeyError: return self._builder.__getitem__(key) @classmethod def from_pyobj(", "TYPE_CHECKING, Any, Dict, List, Tuple, Type, TypeVar import yaml import", "(tag/commit) of the external package. \"\"\" _env: Dict[str, Any] =", "try: return self.package[key] except KeyError: return self._builders.__getitem__(key).build() def _create_type_builders(self) ->", "graviti.exception import GitCommandError, GitNotFoundError from graviti.portex.base import PortexRecordBase from graviti.portex.external", "\"\"\"Build the Portex external package. Returns: The builded Portex external", "Type[\"PortexExternalType\"]: \"\"\"Build the Portex external type. Returns: The builded Portex", "when circular reference detected. \"\"\" if self.is_building: raise TypeError(\"Circular reference\")", ":class:`Imports` instance created from the input python list. \"\"\" imports", "def build_package(url: str, revision: str) -> ExternalPackage: \"\"\"Build an external", "classes.\"\"\" from hashlib import md5 from pathlib import Path from", "repo revision is invalid.\", error, ) from None def _check_repo_integrity(self)", "Arguments: name: The name of the Portex template type. path:", "path.mkdir() try: self._init_repo() try: self._shallow_fetch() except CalledProcessError: self._deep_fetch() except (CalledProcessError,", "self._init_repo() try: self._shallow_fetch() except CalledProcessError: self._deep_fetch() except (CalledProcessError, GitCommandError, FileNotFoundError):", "self.is_building = True with self._path.open() as fp: content = yaml.load(fp,", "key: str) -> Type[\"PortexType\"]: try: return super().__getitem__(key) except KeyError: return", "self._name = name self._path = path self._builder = builder self.is_building", "yaml.Loader) params_pyobj = content.get(\"parameters\", []) decl = content[\"declaration\"] imports =", "\"ROOT.yaml\" not found or more than one \"ROOT.yaml\" found. \"\"\"", "path: Path, builder: PackageBuilder) -> None: self._name = name self._path", "imported types. builder: The package builder. Returns: A :class:`Imports` instance", "-> None: print(f\"Cloning repo '{self._url}@{self._revision}'\") path = self._path path.mkdir() try:", "import graviti.portex.ptype as PTYPE from graviti.exception import GitCommandError, GitNotFoundError from", "self._run([\"git\", \"checkout\", \"FETCH_HEAD\"]) def _deep_fetch(self) -> None: try: self._run([\"git\", \"fetch\",", "from graviti.portex.factory import ConnectedFieldsFactory, TypeFactory from graviti.portex.package import ExternalPackage, Imports,", "Params from graviti.portex.register import ExternalContainerRegister if TYPE_CHECKING: from subprocess import", ">= 2: raise TypeError(\"More than one 'ROOT.yaml' file found\") return", "the input python list. \"\"\" imports = super().from_pyobj(content) imports._builder =", "_I = TypeVar(\"_I\", bound=\"BuilderImports\") class PackageRepo: \"\"\"The local git repo", "type. package: The package the Portex template type belongs to.", "external package. \"\"\" _env: Dict[str, Any] = {} def __init__(self,", "of the Portex template type. path: The source file path", "for builder in self._builders.values(): if builder.is_building: continue builder.build() return self.package", "type_ = type(self._name, bases, class_attrs) self._builder.package[self._name] = type_ return type_", "local git repo of the external Portex package. Arguments: url:", "= { \"params\": params, \"factory\": factory, \"package\": self._builder.package, } if", "url: str, revision: str) -> None: tempdir = Path(gettempdir()) /", "= self._path path.mkdir() try: self._init_repo() try: self._shallow_fetch() except CalledProcessError: self._deep_fetch()", "= self._run([\"git\", \"status\", \"--porcelain\"]) except CalledProcessError: # The git command", "self._builder) factory = TypeFactory(decl, imports) keys = factory.keys params =", "self._url = url self._revision = revision try: self._prepare_repo() except FileNotFoundError:", "repo = PackageRepo(self.package.url, self.package.revision) root = repo.get_root() builders = {}", "for key, value in params.items(): value.ptype = keys.get(key, PTYPE.Any) params.add(Param(\"nullable\",", "of the external package. \"\"\" _env: Dict[str, Any] = {}", "GitNotFoundError() from None def _prepare_repo(self) -> None: if not self._path.exists():", "type. Raises: TypeError: Raise when circular reference detected. \"\"\" if", "bool(result.stdout) def _clone_repo(self) -> None: print(f\"Cloning repo '{self._url}@{self._revision}'\") path =", "\"\"\"The builder of the external Portex package. Arguments: url: The", "template type. Arguments: name: The name of the Portex template", "from the input python list. \"\"\" imports = super().from_pyobj(content) imports._builder", "shutil import rmtree from subprocess import PIPE, CalledProcessError, run from", "= self._create_type_builders() def __getitem__(self, key: str) -> Type[\"PortexExternalType\"]: try: return", "params.add(Param(\"nullable\", False, ptype=PTYPE.Boolean)) class_attrs: Dict[str, Any] = { \"params\": params,", "root.glob(\"**/*.yaml\"): if yaml_file.name == \"ROOT.yaml\": continue parts = (*yaml_file.relative_to(root).parent.parts, yaml_file.stem)", "import PortexRecordBase from graviti.portex.external import PortexExternalType from graviti.portex.factory import ConnectedFieldsFactory,", "repo url is invalid.\", error, ) from None try: self._run([\"git\",", "print(f\"Cloning repo '{self._url}@{self._revision}'\") path = self._path path.mkdir() try: self._init_repo() try:", "Tuple[Type[\"PortexType\"], ...] = (PortexRecordBase, PortexExternalType) class_attrs[\"_fields_factory\"] = ConnectedFieldsFactory( decl, factory.class_,", "str, revision: str) -> None: self.package = ExternalPackage(url, revision) self._builders", "instance from python list. Arguments: content: A python list representing", "from graviti.portex.register import ExternalContainerRegister if TYPE_CHECKING: from subprocess import CompletedProcess", "TYPE_CHECKING: from subprocess import CompletedProcess from graviti.portex.base import PortexType EXTERNAL_TYPE_TO_CONTAINER", "if len(roots) >= 2: raise TypeError(\"More than one 'ROOT.yaml' file", "\"\"\"Get the root directory path of the package repo. Returns:", "= Params.from_pyobj(params_pyobj) for key, value in params.items(): value.ptype = keys.get(key,", "{ \"params\": params, \"factory\": factory, \"package\": self._builder.package, } if issubclass(factory.class_,", "except CalledProcessError: self._deep_fetch() except (CalledProcessError, GitCommandError, FileNotFoundError): rmtree(path) raise print(f\"Cloned", "__getitem__(self, key: str) -> Type[\"PortexExternalType\"]: try: return self.package[key] except KeyError:", "BuilderImports(Imports): \"\"\"The imports of the Portex template type. Arguments: package:", "import PortexExternalType from graviti.portex.factory import ConnectedFieldsFactory, TypeFactory from graviti.portex.package import", "\"add\", \"origin\", self._url]) def _shallow_fetch(self) -> None: self._run([\"git\", \"fetch\", \"origin\",", "been cleaned or broken return False return not bool(result.stdout) def", "Portex template type belongs to. \"\"\" def __init__(self, name: str,", "keys.get(key, PTYPE.Any) params.add(Param(\"nullable\", False, ptype=PTYPE.Boolean)) class_attrs: Dict[str, Any] = {", "bound=\"BuilderImports\") class PackageRepo: \"\"\"The local git repo of the external", "return super().__getitem__(key) except KeyError: return self._builder.__getitem__(key) @classmethod def from_pyobj( #", "= builder self.is_building = False def build(self) -> Type[\"PortexExternalType\"]: \"\"\"Build", "path = self._path path.mkdir() try: self._init_repo() try: self._shallow_fetch() except CalledProcessError:", "yaml_file.name == \"ROOT.yaml\": continue parts = (*yaml_file.relative_to(root).parent.parts, yaml_file.stem) name =", "representing imported types. builder: The package builder. Returns: A :class:`Imports`", "source file path of the Portex template type. package: The", "\"factory\": factory, \"package\": self._builder.package, } if issubclass(factory.class_, PortexRecordBase): bases: Tuple[Type[\"PortexType\"],", "graviti.portex.param import Param, Params from graviti.portex.register import ExternalContainerRegister if TYPE_CHECKING:", "external package. revision: The git repo revision (tag/commit) of the", "one \"ROOT.yaml\" found. \"\"\" roots = list(self._path.glob(\"**/ROOT.yaml\")) if len(roots) ==", "url is invalid.\", error, ) from None try: self._run([\"git\", \"checkout\",", "return builders def build(self) -> ExternalPackage: \"\"\"Build the Portex external", "of the package repo. Raises: TypeError: when the \"ROOT.yaml\" not", "reference detected. \"\"\" if self.is_building: raise TypeError(\"Circular reference\") self.is_building =", "Any]], builder: PackageBuilder ) -> _I: \"\"\"Create :class:`Imports` instance from", "type_ class BuilderImports(Imports): \"\"\"The imports of the Portex template type.", "Tuple, Type, TypeVar import yaml import graviti.portex.ptype as PTYPE from", "instance. \"\"\" builder = PackageBuilder(url, revision) package = builder.build() packages.externals[url,", "Imports, packages from graviti.portex.param import Param, Params from graviti.portex.register import", "template type. package: The package the Portex template type belongs", "def _shallow_fetch(self) -> None: self._run([\"git\", \"fetch\", \"origin\", self._revision, \"--depth=1\"]) self._run([\"git\",", "\"status\", \"--porcelain\"]) except CalledProcessError: # The git command failed means", "path of the package repo. Returns: The root directory path", "types. builder: The package builder. Returns: A :class:`Imports` instance created", "raise GitNotFoundError() from None def _prepare_repo(self) -> None: if not", "List, Tuple, Type, TypeVar import yaml import graviti.portex.ptype as PTYPE", "try: result = self._run([\"git\", \"status\", \"--porcelain\"]) except CalledProcessError: # The", "value in params.items(): value.ptype = keys.get(key, PTYPE.Any) params.add(Param(\"nullable\", False, ptype=PTYPE.Boolean))", "package. Arguments: url: The git repo url of the external", "\"\"\" if self.is_building: raise TypeError(\"Circular reference\") self.is_building = True with", "str) -> ExternalPackage: \"\"\"Build an external package. Arguments: url: The", "repo revision (tag/commit) of the external package. \"\"\" _env: Dict[str,", "= factory.keys params = Params.from_pyobj(params_pyobj) for key, value in params.items():", "[]), self._builder) factory = TypeFactory(decl, imports) keys = factory.keys params", "except (CalledProcessError, GitCommandError, FileNotFoundError): rmtree(path) raise print(f\"Cloned to '{path}'\") def", "decl, factory.class_, imports, factory.transform_kwargs ) else: bases = (PortexExternalType,) type_", "import TYPE_CHECKING, Any, Dict, List, Tuple, Type, TypeVar import yaml", "external package. Returns: The builded Portex external package. \"\"\" for", "stderr=PIPE, check=True) def _init_repo(self) -> None: self._run([\"git\", \"init\"]) self._run([\"git\", \"remote\",", "PackageBuilder) -> None: self._name = name self._path = path self._builder", "\"\"\" for builder in self._builders.values(): if builder.is_building: continue builder.build() return", "bool: try: result = self._run([\"git\", \"status\", \"--porcelain\"]) except CalledProcessError: #", "error, ) from None def _check_repo_integrity(self) -> bool: try: result", "repo of the external Portex package. Arguments: url: The git", "None def _prepare_repo(self) -> None: if not self._path.exists(): self._clone_repo() elif", "external Portex package. Arguments: url: The git repo url of", "not bool(result.stdout) def _clone_repo(self) -> None: print(f\"Cloning repo '{self._url}@{self._revision}'\") path", "Portex package. Arguments: url: The git repo url of the", "builder.is_building: continue builder.build() return self.package class TypeBuilder: \"\"\"The builder of", "not self._path.exists(): self._clone_repo() elif not self._check_repo_integrity(): rmtree(self._path) self._clone_repo() def _run(self,", "get_root(self) -> Path: \"\"\"Get the root directory path of the", "Params.from_pyobj(params_pyobj) for key, value in params.items(): value.ptype = keys.get(key, PTYPE.Any)", "self._builder.__getitem__(key) @classmethod def from_pyobj( # type: ignore[override] # pylint: disable=arguments-differ", "ptype=PTYPE.Boolean)) class_attrs: Dict[str, Any] = { \"params\": params, \"factory\": factory,", "def _create_type_builders(self) -> Dict[str, \"TypeBuilder\"]: repo = PackageRepo(self.package.url, self.package.revision) root", "__getitem__(self, key: str) -> Type[\"PortexType\"]: try: return super().__getitem__(key) except KeyError:", "self) return builders def build(self) -> ExternalPackage: \"\"\"Build the Portex", "git repo revision (tag/commit) of the external package. \"\"\" def", "from None def _prepare_repo(self) -> None: if not self._path.exists(): self._clone_repo()", "-> None: tempdir = Path(gettempdir()) / \"portex\" tempdir.mkdir(exist_ok=True) md5_instance =", "decl = content[\"declaration\"] imports = BuilderImports.from_pyobj(content.get(\"imports\", []), self._builder) factory =", "return self._builder.__getitem__(key) @classmethod def from_pyobj( # type: ignore[override] # pylint:", "raise TypeError(\"More than one 'ROOT.yaml' file found\") return roots[0].parent class", "_I: \"\"\"Create :class:`Imports` instance from python list. Arguments: content: A", "name = \".\".join(parts) builders[name] = TypeBuilder(name, yaml_file, self) return builders", "-> Type[\"PortexType\"]: try: return super().__getitem__(key) except KeyError: return self._builder.__getitem__(key) @classmethod", "\"checkout\", self._revision]) except CalledProcessError as error: raise GitCommandError( \"'git checkout'", "builders[name] = TypeBuilder(name, yaml_file, self) return builders def build(self) ->", "str) -> None: tempdir = Path(gettempdir()) / \"portex\" tempdir.mkdir(exist_ok=True) md5_instance", "of the Portex template type. Arguments: package: The package the", "\"checkout\", \"FETCH_HEAD\"]) def _deep_fetch(self) -> None: try: self._run([\"git\", \"fetch\", \"origin\"])", "import yaml import graviti.portex.ptype as PTYPE from graviti.exception import GitCommandError,", "The git repo revision (tag/commit) of the external package. \"\"\"", "builders def build(self) -> ExternalPackage: \"\"\"Build the Portex external package.", "self.package = ExternalPackage(url, revision) self._builders = self._create_type_builders() def __getitem__(self, key:", "from graviti.portex.external import PortexExternalType from graviti.portex.factory import ConnectedFieldsFactory, TypeFactory from", "def __getitem__(self, key: str) -> Type[\"PortexExternalType\"]: try: return self.package[key] except", "Any, Dict, List, Tuple, Type, TypeVar import yaml import graviti.portex.ptype", "TypeVar import yaml import graviti.portex.ptype as PTYPE from graviti.exception import", "issubclass(factory.class_, PortexRecordBase): bases: Tuple[Type[\"PortexType\"], ...] = (PortexRecordBase, PortexExternalType) class_attrs[\"_fields_factory\"] =", "MIT License. # \"\"\"Portex type builder related classes.\"\"\" from hashlib", "revision: The git repo revision (tag/commit) of the external package.", "= False def build(self) -> Type[\"PortexExternalType\"]: \"\"\"Build the Portex external", "package repo. Returns: The root directory path of the package", "[]) decl = content[\"declaration\"] imports = BuilderImports.from_pyobj(content.get(\"imports\", []), self._builder) factory", "list. Arguments: content: A python list representing imported types. builder:", "self.package[key] except KeyError: return self._builders.__getitem__(key).build() def _create_type_builders(self) -> Dict[str, \"TypeBuilder\"]:", "if builder.is_building: continue builder.build() return self.package class TypeBuilder: \"\"\"The builder", "content.get(\"parameters\", []) decl = content[\"declaration\"] imports = BuilderImports.from_pyobj(content.get(\"imports\", []), self._builder)", "package: The package the portex belongs to. \"\"\" _builder: PackageBuilder", "def get_root(self) -> Path: \"\"\"Get the root directory path of", "def _run(self, args: List[str]) -> \"CompletedProcess[bytes]\": return run(args, cwd=self._path, env=self._env,", "parts = (*yaml_file.relative_to(root).parent.parts, yaml_file.stem) name = \".\".join(parts) builders[name] = TypeBuilder(name,", "disable=arguments-differ cls: Type[_I], content: List[Dict[str, Any]], builder: PackageBuilder ) ->", "keys = factory.keys params = Params.from_pyobj(params_pyobj) for key, value in", "repo url of the external package. revision: The git repo", "\"remote\", \"add\", \"origin\", self._url]) def _shallow_fetch(self) -> None: self._run([\"git\", \"fetch\",", "PackageBuilder: \"\"\"The builder of the external Portex package. Arguments: url:", "yaml_file in root.glob(\"**/*.yaml\"): if yaml_file.name == \"ROOT.yaml\": continue parts =", "import Path from shutil import rmtree from subprocess import PIPE,", "path: The source file path of the Portex template type.", "stdout=PIPE, stderr=PIPE, check=True) def _init_repo(self) -> None: self._run([\"git\", \"init\"]) self._run([\"git\",", "from python list. Arguments: content: A python list representing imported", "= True with self._path.open() as fp: content = yaml.load(fp, yaml.Loader)", "raise GitCommandError( \"'git fetch' failed, most likely due to the", "None try: self._run([\"git\", \"checkout\", self._revision]) except CalledProcessError as error: raise", "pathlib import Path from shutil import rmtree from subprocess import", "args: List[str]) -> \"CompletedProcess[bytes]\": return run(args, cwd=self._path, env=self._env, stdout=PIPE, stderr=PIPE,", "factory.keys params = Params.from_pyobj(params_pyobj) for key, value in params.items(): value.ptype", "is invalid.\", error, ) from None try: self._run([\"git\", \"checkout\", self._revision])", "revision) package = builder.build() packages.externals[url, revision] = package return package", "fp: content = yaml.load(fp, yaml.Loader) params_pyobj = content.get(\"parameters\", []) decl", "package the Portex template type belongs to. \"\"\" def __init__(self,", "the git repo has been cleaned or broken return False", "\".\".join(parts) builders[name] = TypeBuilder(name, yaml_file, self) return builders def build(self)", "factory.transform_kwargs ) else: bases = (PortexExternalType,) type_ = type(self._name, bases,", "TypeError(\"More than one 'ROOT.yaml' file found\") return roots[0].parent class PackageBuilder:", "builder self.is_building = False def build(self) -> Type[\"PortexExternalType\"]: \"\"\"Build the", "Portex external type. Raises: TypeError: Raise when circular reference detected.", "-> Dict[str, \"TypeBuilder\"]: repo = PackageRepo(self.package.url, self.package.revision) root = repo.get_root()", "input python list. \"\"\" imports = super().from_pyobj(content) imports._builder = builder", "as error: raise GitCommandError( \"'git checkout' failed, most likely due", "class_attrs: Dict[str, Any] = { \"params\": params, \"factory\": factory, \"package\":", "bases: Tuple[Type[\"PortexType\"], ...] = (PortexRecordBase, PortexExternalType) class_attrs[\"_fields_factory\"] = ConnectedFieldsFactory( decl,", "continue parts = (*yaml_file.relative_to(root).parent.parts, yaml_file.stem) name = \".\".join(parts) builders[name] =", "root directory path of the package repo. Raises: TypeError: when", "= type_ return type_ class BuilderImports(Imports): \"\"\"The imports of the", "return imports def build_package(url: str, revision: str) -> ExternalPackage: \"\"\"Build", "(PortexExternalType,) type_ = type(self._name, bases, class_attrs) self._builder.package[self._name] = type_ return", "self.package class TypeBuilder: \"\"\"The builder of the external Portex template", "# The git command failed means the git repo has", "= content.get(\"parameters\", []) decl = content[\"declaration\"] imports = BuilderImports.from_pyobj(content.get(\"imports\", []),", "= TypeFactory(decl, imports) keys = factory.keys params = Params.from_pyobj(params_pyobj) for", "CompletedProcess from graviti.portex.base import PortexType EXTERNAL_TYPE_TO_CONTAINER = ExternalContainerRegister.EXTERNAL_TYPE_TO_CONTAINER _I =", "if issubclass(factory.class_, PortexRecordBase): bases: Tuple[Type[\"PortexType\"], ...] = (PortexRecordBase, PortexExternalType) class_attrs[\"_fields_factory\"]", "factory, \"package\": self._builder.package, } if issubclass(factory.class_, PortexRecordBase): bases: Tuple[Type[\"PortexType\"], ...]", "try: self._prepare_repo() except FileNotFoundError: raise GitNotFoundError() from None def _prepare_repo(self)", "builder = PackageBuilder(url, revision) package = builder.build() packages.externals[url, revision] =", "{} def __init__(self, url: str, revision: str) -> None: tempdir", "'ROOT.yaml' file found\") return roots[0].parent class PackageBuilder: \"\"\"The builder of", "\"origin\"]) except CalledProcessError as error: raise GitCommandError( \"'git fetch' failed,", "PortexExternalType) class_attrs[\"_fields_factory\"] = ConnectedFieldsFactory( decl, factory.class_, imports, factory.transform_kwargs ) else:", "List[Dict[str, Any]], builder: PackageBuilder ) -> _I: \"\"\"Create :class:`Imports` instance", "revision try: self._prepare_repo() except FileNotFoundError: raise GitNotFoundError() from None def", "/ md5_instance.hexdigest() self._url = url self._revision = revision try: self._prepare_repo()", "class BuilderImports(Imports): \"\"\"The imports of the Portex template type. Arguments:", "_run(self, args: List[str]) -> \"CompletedProcess[bytes]\": return run(args, cwd=self._path, env=self._env, stdout=PIPE,", "when the \"ROOT.yaml\" not found or more than one \"ROOT.yaml\"", "builder. Returns: A :class:`Imports` instance created from the input python", "= tempdir / md5_instance.hexdigest() self._url = url self._revision = revision", "found or more than one \"ROOT.yaml\" found. \"\"\" roots =", "FileNotFoundError: raise GitNotFoundError() from None def _prepare_repo(self) -> None: if", "class PackageRepo: \"\"\"The local git repo of the external Portex", "'{self._url}@{self._revision}'\") path = self._path path.mkdir() try: self._init_repo() try: self._shallow_fetch() except", "cleaned or broken return False return not bool(result.stdout) def _clone_repo(self)", "content: A python list representing imported types. builder: The package", "the external Portex template type. Arguments: name: The name of", "for yaml_file in root.glob(\"**/*.yaml\"): if yaml_file.name == \"ROOT.yaml\": continue parts", "str, revision: str) -> None: tempdir = Path(gettempdir()) / \"portex\"", "the Portex template type belongs to. \"\"\" def __init__(self, name:", "# \"\"\"Portex type builder related classes.\"\"\" from hashlib import md5", "builder.build() return self.package class TypeBuilder: \"\"\"The builder of the external", "under MIT License. # \"\"\"Portex type builder related classes.\"\"\" from", "an external package. Arguments: url: The git repo url of", "__init__(self, url: str, revision: str) -> None: tempdir = Path(gettempdir())", "_check_repo_integrity(self) -> bool: try: result = self._run([\"git\", \"status\", \"--porcelain\"]) except", "== \"ROOT.yaml\": continue parts = (*yaml_file.relative_to(root).parent.parts, yaml_file.stem) name = \".\".join(parts)", "root directory path of the package repo. Returns: The root", "env=self._env, stdout=PIPE, stderr=PIPE, check=True) def _init_repo(self) -> None: self._run([\"git\", \"init\"])", "False return not bool(result.stdout) def _clone_repo(self) -> None: print(f\"Cloning repo", "from graviti.portex.param import Param, Params from graviti.portex.register import ExternalContainerRegister if", "git repo of the external Portex package. Arguments: url: The", "\"FETCH_HEAD\"]) def _deep_fetch(self) -> None: try: self._run([\"git\", \"fetch\", \"origin\"]) except", "build(self) -> ExternalPackage: \"\"\"Build the Portex external package. Returns: The", "TypeError: Raise when circular reference detected. \"\"\" if self.is_building: raise", "PTYPE.Any) params.add(Param(\"nullable\", False, ptype=PTYPE.Boolean)) class_attrs: Dict[str, Any] = { \"params\":", "builder of the external Portex template type. Arguments: name: The", "from subprocess import PIPE, CalledProcessError, run from tempfile import gettempdir", "Type[\"PortexExternalType\"]: try: return self.package[key] except KeyError: return self._builders.__getitem__(key).build() def _create_type_builders(self)", "builders = {} for yaml_file in root.glob(\"**/*.yaml\"): if yaml_file.name ==", "class_attrs[\"_fields_factory\"] = ConnectedFieldsFactory( decl, factory.class_, imports, factory.transform_kwargs ) else: bases", "the Portex template type. package: The package the Portex template", "import ExternalContainerRegister if TYPE_CHECKING: from subprocess import CompletedProcess from graviti.portex.base", "the Portex template type. path: The source file path of", "False, ptype=PTYPE.Boolean)) class_attrs: Dict[str, Any] = { \"params\": params, \"factory\":", "def build(self) -> ExternalPackage: \"\"\"Build the Portex external package. Returns:", "Path(gettempdir()) / \"portex\" tempdir.mkdir(exist_ok=True) md5_instance = md5() md5_instance.update(url.encode(\"utf-8\")) md5_instance.update(revision.encode(\"utf-8\")) self._path", "fetch' failed, most likely due to the repo url is", "CalledProcessError, run from tempfile import gettempdir from typing import TYPE_CHECKING,", "return self.package class TypeBuilder: \"\"\"The builder of the external Portex", "value.ptype = keys.get(key, PTYPE.Any) params.add(Param(\"nullable\", False, ptype=PTYPE.Boolean)) class_attrs: Dict[str, Any]", "params = Params.from_pyobj(params_pyobj) for key, value in params.items(): value.ptype =", "from None try: self._run([\"git\", \"checkout\", self._revision]) except CalledProcessError as error:", "try: self._run([\"git\", \"checkout\", self._revision]) except CalledProcessError as error: raise GitCommandError(", "git repo has been cleaned or broken return False return", "raise TypeError(\"No 'ROOT.yaml' file found\") if len(roots) >= 2: raise", "params_pyobj = content.get(\"parameters\", []) decl = content[\"declaration\"] imports = BuilderImports.from_pyobj(content.get(\"imports\",", "Type, TypeVar import yaml import graviti.portex.ptype as PTYPE from graviti.exception", "roots[0].parent class PackageBuilder: \"\"\"The builder of the external Portex package.", "self._path = path self._builder = builder self.is_building = False def", "yaml import graviti.portex.ptype as PTYPE from graviti.exception import GitCommandError, GitNotFoundError", "due to the repo revision is invalid.\", error, ) from", "return roots[0].parent class PackageBuilder: \"\"\"The builder of the external Portex", "def build(self) -> Type[\"PortexExternalType\"]: \"\"\"Build the Portex external type. Returns:", "Graviti. Licensed under MIT License. # \"\"\"Portex type builder related", "str) -> None: self.package = ExternalPackage(url, revision) self._builders = self._create_type_builders()", "import rmtree from subprocess import PIPE, CalledProcessError, run from tempfile", "= TypeVar(\"_I\", bound=\"BuilderImports\") class PackageRepo: \"\"\"The local git repo of", "of the package repo. Returns: The root directory path of", "revision: str) -> ExternalPackage: \"\"\"Build an external package. Arguments: url:", "Returns: The :class:`ExternalPackage` instance. \"\"\" builder = PackageBuilder(url, revision) package", "except CalledProcessError as error: raise GitCommandError( \"'git checkout' failed, most", "The source file path of the Portex template type. package:", "-> None: self._run([\"git\", \"init\"]) self._run([\"git\", \"remote\", \"add\", \"origin\", self._url]) def", "TypeError(\"Circular reference\") self.is_building = True with self._path.open() as fp: content", "\"init\"]) self._run([\"git\", \"remote\", \"add\", \"origin\", self._url]) def _shallow_fetch(self) -> None:", "git repo revision (tag/commit) of the external package. \"\"\" _env:", "md5_instance.update(url.encode(\"utf-8\")) md5_instance.update(revision.encode(\"utf-8\")) self._path = tempdir / md5_instance.hexdigest() self._url = url", "is invalid.\", error, ) from None def _check_repo_integrity(self) -> bool:", "self._path.open() as fp: content = yaml.load(fp, yaml.Loader) params_pyobj = content.get(\"parameters\",", "The :class:`ExternalPackage` instance. \"\"\" builder = PackageBuilder(url, revision) package =", "'ROOT.yaml' file found\") if len(roots) >= 2: raise TypeError(\"More than", "def _init_repo(self) -> None: self._run([\"git\", \"init\"]) self._run([\"git\", \"remote\", \"add\", \"origin\",", "failed, most likely due to the repo revision is invalid.\",", "builder # pylint: disable=protected-access return imports def build_package(url: str, revision:", ") from None try: self._run([\"git\", \"checkout\", self._revision]) except CalledProcessError as", "as PTYPE from graviti.exception import GitCommandError, GitNotFoundError from graviti.portex.base import", "to '{path}'\") def get_root(self) -> Path: \"\"\"Get the root directory", "imports of the Portex template type. Arguments: package: The package", "file path of the Portex template type. package: The package", ") from None def _check_repo_integrity(self) -> bool: try: result =", "error: raise GitCommandError( \"'git checkout' failed, most likely due to", "\"\"\"The builder of the external Portex template type. Arguments: name:", "run from tempfile import gettempdir from typing import TYPE_CHECKING, Any,", "or more than one \"ROOT.yaml\" found. \"\"\" roots = list(self._path.glob(\"**/ROOT.yaml\"))", "= {} for yaml_file in root.glob(\"**/*.yaml\"): if yaml_file.name == \"ROOT.yaml\":", "repo '{self._url}@{self._revision}'\") path = self._path path.mkdir() try: self._init_repo() try: self._shallow_fetch()", "CalledProcessError as error: raise GitCommandError( \"'git checkout' failed, most likely", "tempdir / md5_instance.hexdigest() self._url = url self._revision = revision try:", "\"CompletedProcess[bytes]\": return run(args, cwd=self._path, env=self._env, stdout=PIPE, stderr=PIPE, check=True) def _init_repo(self)", "imports = BuilderImports.from_pyobj(content.get(\"imports\", []), self._builder) factory = TypeFactory(decl, imports) keys", ":class:`Imports` instance from python list. Arguments: content: A python list", "name of the Portex template type. path: The source file", "of the external Portex package. Arguments: url: The git repo", "due to the repo url is invalid.\", error, ) from", "KeyError: return self._builder.__getitem__(key) @classmethod def from_pyobj( # type: ignore[override] #", "check=True) def _init_repo(self) -> None: self._run([\"git\", \"init\"]) self._run([\"git\", \"remote\", \"add\",", "2022 Graviti. Licensed under MIT License. # \"\"\"Portex type builder", "not self._check_repo_integrity(): rmtree(self._path) self._clone_repo() def _run(self, args: List[str]) -> \"CompletedProcess[bytes]\":", "None: print(f\"Cloning repo '{self._url}@{self._revision}'\") path = self._path path.mkdir() try: self._init_repo()", "KeyError: return self._builders.__getitem__(key).build() def _create_type_builders(self) -> Dict[str, \"TypeBuilder\"]: repo =", "= keys.get(key, PTYPE.Any) params.add(Param(\"nullable\", False, ptype=PTYPE.Boolean)) class_attrs: Dict[str, Any] =", "\"fetch\", \"origin\"]) except CalledProcessError as error: raise GitCommandError( \"'git fetch'", "Portex external package. \"\"\" for builder in self._builders.values(): if builder.is_building:", ") -> _I: \"\"\"Create :class:`Imports` instance from python list. Arguments:", "= ConnectedFieldsFactory( decl, factory.class_, imports, factory.transform_kwargs ) else: bases =", "revision (tag/commit) of the external package. Returns: The :class:`ExternalPackage` instance.", "\"\"\"The imports of the Portex template type. Arguments: package: The", "the external Portex package. Arguments: url: The git repo url", "type. Arguments: name: The name of the Portex template type.", "-> bool: try: result = self._run([\"git\", \"status\", \"--porcelain\"]) except CalledProcessError:", "def from_pyobj( # type: ignore[override] # pylint: disable=arguments-differ cls: Type[_I],", "return self._builders.__getitem__(key).build() def _create_type_builders(self) -> Dict[str, \"TypeBuilder\"]: repo = PackageRepo(self.package.url,", "\"\"\" _env: Dict[str, Any] = {} def __init__(self, url: str,", "-> \"CompletedProcess[bytes]\": return run(args, cwd=self._path, env=self._env, stdout=PIPE, stderr=PIPE, check=True) def", "type: ignore[override] # pylint: disable=arguments-differ cls: Type[_I], content: List[Dict[str, Any]],", "try: self._run([\"git\", \"fetch\", \"origin\"]) except CalledProcessError as error: raise GitCommandError(", "pylint: disable=arguments-differ cls: Type[_I], content: List[Dict[str, Any]], builder: PackageBuilder )", "in self._builders.values(): if builder.is_building: continue builder.build() return self.package class TypeBuilder:", "_init_repo(self) -> None: self._run([\"git\", \"init\"]) self._run([\"git\", \"remote\", \"add\", \"origin\", self._url])", "The builded Portex external type. Raises: TypeError: Raise when circular", "builder: PackageBuilder ) -> _I: \"\"\"Create :class:`Imports` instance from python", "imports def build_package(url: str, revision: str) -> ExternalPackage: \"\"\"Build an", "as error: raise GitCommandError( \"'git fetch' failed, most likely due", "than one 'ROOT.yaml' file found\") return roots[0].parent class PackageBuilder: \"\"\"The", "revision) self._builders = self._create_type_builders() def __getitem__(self, key: str) -> Type[\"PortexExternalType\"]:", "in params.items(): value.ptype = keys.get(key, PTYPE.Any) params.add(Param(\"nullable\", False, ptype=PTYPE.Boolean)) class_attrs:", "# pylint: disable=arguments-differ cls: Type[_I], content: List[Dict[str, Any]], builder: PackageBuilder", "means the git repo has been cleaned or broken return", "Any] = {} def __init__(self, url: str, revision: str) ->", "def __init__(self, name: str, path: Path, builder: PackageBuilder) -> None:", "Portex template type. Arguments: package: The package the portex belongs", "import md5 from pathlib import Path from shutil import rmtree", "# Copyright 2022 Graviti. Licensed under MIT License. # \"\"\"Portex", "from graviti.exception import GitCommandError, GitNotFoundError from graviti.portex.base import PortexRecordBase from", "if yaml_file.name == \"ROOT.yaml\": continue parts = (*yaml_file.relative_to(root).parent.parts, yaml_file.stem) name", "md5_instance.hexdigest() self._url = url self._revision = revision try: self._prepare_repo() except", "path of the Portex template type. package: The package the", "revision: str) -> None: tempdir = Path(gettempdir()) / \"portex\" tempdir.mkdir(exist_ok=True)", "the external package. revision: The git repo revision (tag/commit) of", "from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Type, TypeVar", "@classmethod def from_pyobj( # type: ignore[override] # pylint: disable=arguments-differ cls:", "file found\") if len(roots) >= 2: raise TypeError(\"More than one", "continue builder.build() return self.package class TypeBuilder: \"\"\"The builder of the", "\"'git checkout' failed, most likely due to the repo revision", "def _check_repo_integrity(self) -> bool: try: result = self._run([\"git\", \"status\", \"--porcelain\"])", "return type_ class BuilderImports(Imports): \"\"\"The imports of the Portex template", "rmtree from subprocess import PIPE, CalledProcessError, run from tempfile import", "None: self._name = name self._path = path self._builder = builder", "yaml.load(fp, yaml.Loader) params_pyobj = content.get(\"parameters\", []) decl = content[\"declaration\"] imports", "Any] = { \"params\": params, \"factory\": factory, \"package\": self._builder.package, }", "the package repo. Raises: TypeError: when the \"ROOT.yaml\" not found", "\"ROOT.yaml\" found. \"\"\" roots = list(self._path.glob(\"**/ROOT.yaml\")) if len(roots) == 0:", "if TYPE_CHECKING: from subprocess import CompletedProcess from graviti.portex.base import PortexType", "None: tempdir = Path(gettempdir()) / \"portex\" tempdir.mkdir(exist_ok=True) md5_instance = md5()", "type builder related classes.\"\"\" from hashlib import md5 from pathlib", "type(self._name, bases, class_attrs) self._builder.package[self._name] = type_ return type_ class BuilderImports(Imports):", "python3 # # Copyright 2022 Graviti. Licensed under MIT License.", "= revision try: self._prepare_repo() except FileNotFoundError: raise GitNotFoundError() from None", "Licensed under MIT License. # \"\"\"Portex type builder related classes.\"\"\"", "= url self._revision = revision try: self._prepare_repo() except FileNotFoundError: raise", "class PackageBuilder: \"\"\"The builder of the external Portex package. Arguments:", "run(args, cwd=self._path, env=self._env, stdout=PIPE, stderr=PIPE, check=True) def _init_repo(self) -> None:", "not found or more than one \"ROOT.yaml\" found. \"\"\" roots" ]
[ "Input( ... value=[mapping_extract_value.op.outputs[\"value\"].name], ... definition=GetSingle.op.inputs[\"spec\"], ... ) ... ) >>>", "from ..df.types import Definition from ..df.base import op from ..util.data", "main(): ... async for ctx, result in MemoryOrchestrator.run(dataflow, inputs): ...", "definition=GetSingle.op.inputs[\"spec\"], ... ) ... ) >>> inputs = [ ...", "import traverse_get MAPPING = Definition(name=\"mapping\", primitive=\"map\") MAPPING_TRAVERSE = Definition(name=\"mapping_traverse\", primitive=\"List[str]\")", "dataflow.seed.append( ... Input( ... value=[mapping_extract_value.op.outputs[\"value\"].name], ... definition=GetSingle.op.inputs[\"spec\"], ... ) ...", "... print(result) >>> >>> asyncio.run(main()) {'value': 42} \"\"\" return {\"value\":", "given key and value. Parameters ---------- key : str The", "Definition(name=\"mapping_traverse\", primitive=\"List[str]\") MAPPING_KEY = Definition(name=\"key\", primitive=\"str\") MAPPING_VALUE = Definition(name=\"value\", primitive=\"generic\")", "list[str] A list of keys to traverse through the mapping", "dffml import * >>> >>> dataflow = DataFlow.auto(create_mapping, GetSingle) >>>", "mapping. value : Any The value for the mapping. Returns", "key and value. Parameters ---------- key : str The key", "primitive=\"str\") MAPPING_VALUE = Definition(name=\"value\", primitive=\"generic\") @op( name=\"dffml.mapping.extract\", inputs={\"mapping\": MAPPING, \"traverse\":", "primitive=\"map\") MAPPING_TRAVERSE = Definition(name=\"mapping_traverse\", primitive=\"List[str]\") MAPPING_KEY = Definition(name=\"key\", primitive=\"str\") MAPPING_VALUE", "inputs = [ ... Input( ... value=\"key1\", definition=create_mapping.op.inputs[\"key\"], ... ),", ") >>> inputs = [ ... Input( ... value={\"key1\": {\"key2\":", "dataflow = DataFlow.auto(mapping_extract_value, GetSingle) >>> >>> dataflow.seed.append( ... Input( ...", "the keys. Examples -------- >>> import asyncio >>> from dffml", "op from ..util.data import traverse_get MAPPING = Definition(name=\"mapping\", primitive=\"map\") MAPPING_TRAVERSE", "definition=mapping_extract_value.op.inputs[\"traverse\"], ... ), ... ] >>> >>> async def main():", "= [ ... Input( ... value={\"key1\": {\"key2\": 42}}, ... definition=mapping_extract_value.op.inputs[\"mapping\"],", "traverse through the mapping dictionary and extract the values. Returns", "... value={\"key1\": {\"key2\": 42}}, ... definition=mapping_extract_value.op.inputs[\"mapping\"], ... ), ... Input(", "typing import Dict, List, Any from ..df.types import Definition from", "the values. Returns ------- dict A dictionary containing the value", "... value=[\"key1\", \"key2\"], ... definition=mapping_extract_value.op.inputs[\"traverse\"], ... ), ... ] >>>", "str The key for the mapping. value : Any The", "the mapping created. Examples -------- >>> import asyncio >>> from", "value=[\"key1\", \"key2\"], ... definition=mapping_extract_value.op.inputs[\"traverse\"], ... ), ... ] >>> >>>", "from. traverse : list[str] A list of keys to traverse", "traverse_get(mapping, *traverse)} @op( name=\"dffml.mapping.create\", inputs={\"key\": MAPPING_KEY, \"value\": MAPPING_VALUE}, outputs={\"mapping\": MAPPING},", "created. Examples -------- >>> import asyncio >>> from dffml import", "Parameters ---------- key : str The key for the mapping.", "DataFlow.auto(mapping_extract_value, GetSingle) >>> >>> dataflow.seed.append( ... Input( ... value=[mapping_extract_value.op.outputs[\"value\"].name], ...", "result in MemoryOrchestrator.run(dataflow, inputs): ... print(result) >>> >>> asyncio.run(main()) {'mapping':", "mapping dictionary and extract the values. Returns ------- dict A", "traverse : list[str] A list of keys to traverse through", "42}}, ... definition=mapping_extract_value.op.inputs[\"mapping\"], ... ), ... Input( ... value=[\"key1\", \"key2\"],", "keys to traverse through the mapping dictionary and extract the", "value : Any The value for the mapping. Returns -------", "... definition=mapping_extract_value.op.inputs[\"mapping\"], ... ), ... Input( ... value=[\"key1\", \"key2\"], ...", "= Definition(name=\"key\", primitive=\"str\") MAPPING_VALUE = Definition(name=\"value\", primitive=\"generic\") @op( name=\"dffml.mapping.extract\", inputs={\"mapping\":", "value={\"key1\": {\"key2\": 42}}, ... definition=mapping_extract_value.op.inputs[\"mapping\"], ... ), ... Input( ...", "The value for the mapping. Returns ------- dict A dictionary", "Definition from ..df.base import op from ..util.data import traverse_get MAPPING", "result in MemoryOrchestrator.run(dataflow, inputs): ... print(result) >>> >>> asyncio.run(main()) {'value':", "the mapping dictionary and extract the values. Returns ------- dict", "Any from ..df.types import Definition from ..df.base import op from", "create_mapping(key: str, value: Any): \"\"\" Creates a mapping of a", "[ ... Input( ... value=\"key1\", definition=create_mapping.op.inputs[\"key\"], ... ), ... Input(", "---------- mapping : dict The mapping to extract the value", "... async for ctx, result in MemoryOrchestrator.run(dataflow, inputs): ... print(result)", "def create_mapping(key: str, value: Any): \"\"\" Creates a mapping of", "A list of keys to traverse through the mapping dictionary", "... value=[create_mapping.op.outputs[\"mapping\"].name], ... definition=GetSingle.op.inputs[\"spec\"], ... ) ... ) >>> inputs", "to traverse through the mapping dictionary and extract the values.", "list of keys to traverse through the mapping dictionary and", "for the mapping. value : Any The value for the", "value from a given mapping. Parameters ---------- mapping : dict", "the value of the keys. Examples -------- >>> import asyncio", "import * >>> >>> dataflow = DataFlow.auto(mapping_extract_value, GetSingle) >>> >>>", "... ] >>> >>> async def main(): ... async for", "asyncio.run(main()) {'value': 42} \"\"\" return {\"value\": traverse_get(mapping, *traverse)} @op( name=\"dffml.mapping.create\",", ">>> from dffml import * >>> >>> dataflow = DataFlow.auto(mapping_extract_value,", "Input( ... value={\"key1\": {\"key2\": 42}}, ... definition=mapping_extract_value.op.inputs[\"mapping\"], ... ), ...", "given mapping. Parameters ---------- mapping : dict The mapping to", ">>> >>> dataflow = DataFlow.auto(mapping_extract_value, GetSingle) >>> >>> dataflow.seed.append( ...", "= DataFlow.auto(create_mapping, GetSingle) >>> dataflow.seed.append( ... Input( ... value=[create_mapping.op.outputs[\"mapping\"].name], ...", "... Input( ... value=[\"key1\", \"key2\"], ... definition=mapping_extract_value.op.inputs[\"traverse\"], ... ), ...", "print(result) >>> >>> asyncio.run(main()) {'value': 42} \"\"\" return {\"value\": traverse_get(mapping,", "42} \"\"\" return {\"value\": traverse_get(mapping, *traverse)} @op( name=\"dffml.mapping.create\", inputs={\"key\": MAPPING_KEY,", ">>> async def main(): ... async for ctx, result in", ") def mapping_extract_value(mapping: Dict[str, Any], traverse: List[str]): \"\"\" Extracts value", "outputs={\"mapping\": MAPPING}, ) def create_mapping(key: str, value: Any): \"\"\" Creates", "dataflow = DataFlow.auto(create_mapping, GetSingle) >>> dataflow.seed.append( ... Input( ... value=[create_mapping.op.outputs[\"mapping\"].name],", "MemoryOrchestrator.run(dataflow, inputs): ... print(result) >>> >>> asyncio.run(main()) {'value': 42} \"\"\"", "The mapping to extract the value from. traverse : list[str]", ">>> import asyncio >>> from dffml import * >>> >>>", ">>> dataflow.seed.append( ... Input( ... value=[create_mapping.op.outputs[\"mapping\"].name], ... definition=GetSingle.op.inputs[\"spec\"], ... )", "\"\"\" Creates a mapping of a given key and value.", "from typing import Dict, List, Any from ..df.types import Definition", ">>> inputs = [ ... Input( ... value=\"key1\", definition=create_mapping.op.inputs[\"key\"], ...", ">>> >>> async def main(): ... async for ctx, result", "def main(): ... async for ctx, result in MemoryOrchestrator.run(dataflow, inputs):", "a mapping of a given key and value. Parameters ----------", "@op( name=\"dffml.mapping.extract\", inputs={\"mapping\": MAPPING, \"traverse\": MAPPING_TRAVERSE}, outputs={\"value\": MAPPING_VALUE}, ) def", "dict A dictionary containing the mapping created. Examples -------- >>>", "Returns ------- dict A dictionary containing the mapping created. Examples", "import asyncio >>> from dffml import * >>> >>> dataflow", "Input( ... value=42, definition=create_mapping.op.inputs[\"value\"], ... ), ... ] >>> >>>", "keys. Examples -------- >>> import asyncio >>> from dffml import", "dictionary containing the mapping created. Examples -------- >>> import asyncio", ") ... ) >>> inputs = [ ... Input( ...", ">>> from dffml import * >>> >>> dataflow = DataFlow.auto(create_mapping,", "Any): \"\"\" Creates a mapping of a given key and", ">>> dataflow = DataFlow.auto(mapping_extract_value, GetSingle) >>> >>> dataflow.seed.append( ... Input(", "extract the value from. traverse : list[str] A list of", "definition=mapping_extract_value.op.inputs[\"mapping\"], ... ), ... Input( ... value=[\"key1\", \"key2\"], ... definition=mapping_extract_value.op.inputs[\"traverse\"],", ") def create_mapping(key: str, value: Any): \"\"\" Creates a mapping", "name=\"dffml.mapping.extract\", inputs={\"mapping\": MAPPING, \"traverse\": MAPPING_TRAVERSE}, outputs={\"value\": MAPPING_VALUE}, ) def mapping_extract_value(mapping:", "the value from. traverse : list[str] A list of keys", "a given key and value. Parameters ---------- key : str", "..util.data import traverse_get MAPPING = Definition(name=\"mapping\", primitive=\"map\") MAPPING_TRAVERSE = Definition(name=\"mapping_traverse\",", "async for ctx, result in MemoryOrchestrator.run(dataflow, inputs): ... print(result) >>>", "import Dict, List, Any from ..df.types import Definition from ..df.base", "... value=[mapping_extract_value.op.outputs[\"value\"].name], ... definition=GetSingle.op.inputs[\"spec\"], ... ) ... ) >>> inputs", "... ) ... ) >>> inputs = [ ... Input(", "value: Any): \"\"\" Creates a mapping of a given key", ">>> >>> asyncio.run(main()) {'value': 42} \"\"\" return {\"value\": traverse_get(mapping, *traverse)}", "Input( ... value=[create_mapping.op.outputs[\"mapping\"].name], ... definition=GetSingle.op.inputs[\"spec\"], ... ) ... ) >>>", "MAPPING_KEY = Definition(name=\"key\", primitive=\"str\") MAPPING_VALUE = Definition(name=\"value\", primitive=\"generic\") @op( name=\"dffml.mapping.extract\",", "import Definition from ..df.base import op from ..util.data import traverse_get", "A dictionary containing the value of the keys. Examples --------", "Examples -------- >>> import asyncio >>> from dffml import *", "for ctx, result in MemoryOrchestrator.run(dataflow, inputs): ... print(result) >>> >>>", "DataFlow.auto(create_mapping, GetSingle) >>> dataflow.seed.append( ... Input( ... value=[create_mapping.op.outputs[\"mapping\"].name], ... definition=GetSingle.op.inputs[\"spec\"],", "\"value\": MAPPING_VALUE}, outputs={\"mapping\": MAPPING}, ) def create_mapping(key: str, value: Any):", "in MemoryOrchestrator.run(dataflow, inputs): ... print(result) >>> >>> asyncio.run(main()) {'mapping': {'key1':", "... Input( ... value=\"key1\", definition=create_mapping.op.inputs[\"key\"], ... ), ... Input( ...", "import * >>> >>> dataflow = DataFlow.auto(create_mapping, GetSingle) >>> dataflow.seed.append(", "@op( name=\"dffml.mapping.create\", inputs={\"key\": MAPPING_KEY, \"value\": MAPPING_VALUE}, outputs={\"mapping\": MAPPING}, ) def", "outputs={\"value\": MAPPING_VALUE}, ) def mapping_extract_value(mapping: Dict[str, Any], traverse: List[str]): \"\"\"", "MAPPING = Definition(name=\"mapping\", primitive=\"map\") MAPPING_TRAVERSE = Definition(name=\"mapping_traverse\", primitive=\"List[str]\") MAPPING_KEY =", "... ), ... Input( ... value=[\"key1\", \"key2\"], ... definition=mapping_extract_value.op.inputs[\"traverse\"], ...", "value=42, definition=create_mapping.op.inputs[\"value\"], ... ), ... ] >>> >>> async def", ">>> inputs = [ ... Input( ... value={\"key1\": {\"key2\": 42}},", "the mapping. value : Any The value for the mapping.", "dffml import * >>> >>> dataflow = DataFlow.auto(mapping_extract_value, GetSingle) >>>", "through the mapping dictionary and extract the values. Returns -------", "= Definition(name=\"mapping_traverse\", primitive=\"List[str]\") MAPPING_KEY = Definition(name=\"key\", primitive=\"str\") MAPPING_VALUE = Definition(name=\"value\",", "mapping_extract_value(mapping: Dict[str, Any], traverse: List[str]): \"\"\" Extracts value from a", "... Input( ... value={\"key1\": {\"key2\": 42}}, ... definition=mapping_extract_value.op.inputs[\"mapping\"], ... ),", "from dffml import * >>> >>> dataflow = DataFlow.auto(create_mapping, GetSingle)", "asyncio >>> from dffml import * >>> >>> dataflow =", "------- dict A dictionary containing the mapping created. Examples --------", "from dffml import * >>> >>> dataflow = DataFlow.auto(mapping_extract_value, GetSingle)", ">>> dataflow.seed.append( ... Input( ... value=[mapping_extract_value.op.outputs[\"value\"].name], ... definition=GetSingle.op.inputs[\"spec\"], ... )", "values. Returns ------- dict A dictionary containing the value of", "GetSingle) >>> dataflow.seed.append( ... Input( ... value=[create_mapping.op.outputs[\"mapping\"].name], ... definition=GetSingle.op.inputs[\"spec\"], ...", "= [ ... Input( ... value=\"key1\", definition=create_mapping.op.inputs[\"key\"], ... ), ...", "async def main(): ... async for ctx, result in MemoryOrchestrator.run(dataflow,", "from a given mapping. Parameters ---------- mapping : dict The", "inputs): ... print(result) >>> >>> asyncio.run(main()) {'value': 42} \"\"\" return", "key : str The key for the mapping. value :", "List[str]): \"\"\" Extracts value from a given mapping. Parameters ----------", "traverse: List[str]): \"\"\" Extracts value from a given mapping. Parameters", "MAPPING_VALUE}, ) def mapping_extract_value(mapping: Dict[str, Any], traverse: List[str]): \"\"\" Extracts", "Definition(name=\"value\", primitive=\"generic\") @op( name=\"dffml.mapping.extract\", inputs={\"mapping\": MAPPING, \"traverse\": MAPPING_TRAVERSE}, outputs={\"value\": MAPPING_VALUE},", "Definition(name=\"mapping\", primitive=\"map\") MAPPING_TRAVERSE = Definition(name=\"mapping_traverse\", primitive=\"List[str]\") MAPPING_KEY = Definition(name=\"key\", primitive=\"str\")", "traverse_get MAPPING = Definition(name=\"mapping\", primitive=\"map\") MAPPING_TRAVERSE = Definition(name=\"mapping_traverse\", primitive=\"List[str]\") MAPPING_KEY", "name=\"dffml.mapping.create\", inputs={\"key\": MAPPING_KEY, \"value\": MAPPING_VALUE}, outputs={\"mapping\": MAPPING}, ) def create_mapping(key:", "inputs = [ ... Input( ... value={\"key1\": {\"key2\": 42}}, ...", "str, value: Any): \"\"\" Creates a mapping of a given", "mapping. Returns ------- dict A dictionary containing the mapping created.", "in MemoryOrchestrator.run(dataflow, inputs): ... print(result) >>> >>> asyncio.run(main()) {'value': 42}", "*traverse)} @op( name=\"dffml.mapping.create\", inputs={\"key\": MAPPING_KEY, \"value\": MAPPING_VALUE}, outputs={\"mapping\": MAPPING}, )", "* >>> >>> dataflow = DataFlow.auto(mapping_extract_value, GetSingle) >>> >>> dataflow.seed.append(", "... value=42, definition=create_mapping.op.inputs[\"value\"], ... ), ... ] >>> >>> async", "mapping. Parameters ---------- mapping : dict The mapping to extract", "containing the value of the keys. Examples -------- >>> import", ">>> dataflow = DataFlow.auto(create_mapping, GetSingle) >>> dataflow.seed.append( ... Input( ...", "value=\"key1\", definition=create_mapping.op.inputs[\"key\"], ... ), ... Input( ... value=42, definition=create_mapping.op.inputs[\"value\"], ...", "\"key2\"], ... definition=mapping_extract_value.op.inputs[\"traverse\"], ... ), ... ] >>> >>> async", "..df.base import op from ..util.data import traverse_get MAPPING = Definition(name=\"mapping\",", "inputs): ... print(result) >>> >>> asyncio.run(main()) {'mapping': {'key1': 42}} \"\"\"", "MAPPING, \"traverse\": MAPPING_TRAVERSE}, outputs={\"value\": MAPPING_VALUE}, ) def mapping_extract_value(mapping: Dict[str, Any],", "Input( ... value=[\"key1\", \"key2\"], ... definition=mapping_extract_value.op.inputs[\"traverse\"], ... ), ... ]", "... print(result) >>> >>> asyncio.run(main()) {'mapping': {'key1': 42}} \"\"\" return", ": list[str] A list of keys to traverse through the", ">>> >>> dataflow.seed.append( ... Input( ... value=[mapping_extract_value.op.outputs[\"value\"].name], ... definition=GetSingle.op.inputs[\"spec\"], ...", "[ ... Input( ... value={\"key1\": {\"key2\": 42}}, ... definition=mapping_extract_value.op.inputs[\"mapping\"], ...", "------- dict A dictionary containing the value of the keys.", "Any], traverse: List[str]): \"\"\" Extracts value from a given mapping.", "value=[mapping_extract_value.op.outputs[\"value\"].name], ... definition=GetSingle.op.inputs[\"spec\"], ... ) ... ) >>> inputs =", "Creates a mapping of a given key and value. Parameters", "... ) >>> inputs = [ ... Input( ... value=\"key1\",", "\"\"\" return {\"value\": traverse_get(mapping, *traverse)} @op( name=\"dffml.mapping.create\", inputs={\"key\": MAPPING_KEY, \"value\":", "... definition=mapping_extract_value.op.inputs[\"traverse\"], ... ), ... ] >>> >>> async def", "... ) >>> inputs = [ ... Input( ... value={\"key1\":", "dictionary containing the value of the keys. Examples -------- >>>", "... Input( ... value=42, definition=create_mapping.op.inputs[\"value\"], ... ), ... ] >>>", "---------- key : str The key for the mapping. value", "import op from ..util.data import traverse_get MAPPING = Definition(name=\"mapping\", primitive=\"map\")", "Extracts value from a given mapping. Parameters ---------- mapping :", "and value. Parameters ---------- key : str The key for", "= DataFlow.auto(mapping_extract_value, GetSingle) >>> >>> dataflow.seed.append( ... Input( ... value=[mapping_extract_value.op.outputs[\"value\"].name],", "Definition(name=\"key\", primitive=\"str\") MAPPING_VALUE = Definition(name=\"value\", primitive=\"generic\") @op( name=\"dffml.mapping.extract\", inputs={\"mapping\": MAPPING,", "= Definition(name=\"mapping\", primitive=\"map\") MAPPING_TRAVERSE = Definition(name=\"mapping_traverse\", primitive=\"List[str]\") MAPPING_KEY = Definition(name=\"key\",", "MAPPING_VALUE}, outputs={\"mapping\": MAPPING}, ) def create_mapping(key: str, value: Any): \"\"\"", "mapping : dict The mapping to extract the value from.", "... value=\"key1\", definition=create_mapping.op.inputs[\"key\"], ... ), ... Input( ... value=42, definition=create_mapping.op.inputs[\"value\"],", "MemoryOrchestrator.run(dataflow, inputs): ... print(result) >>> >>> asyncio.run(main()) {'mapping': {'key1': 42}}", "GetSingle) >>> >>> dataflow.seed.append( ... Input( ... value=[mapping_extract_value.op.outputs[\"value\"].name], ... definition=GetSingle.op.inputs[\"spec\"],", "Any The value for the mapping. Returns ------- dict A", "MAPPING_TRAVERSE = Definition(name=\"mapping_traverse\", primitive=\"List[str]\") MAPPING_KEY = Definition(name=\"key\", primitive=\"str\") MAPPING_VALUE =", "to extract the value from. traverse : list[str] A list", ": dict The mapping to extract the value from. traverse", "Dict, List, Any from ..df.types import Definition from ..df.base import", "{\"key2\": 42}}, ... definition=mapping_extract_value.op.inputs[\"mapping\"], ... ), ... Input( ... value=[\"key1\",", "\"\"\" Extracts value from a given mapping. Parameters ---------- mapping", "a given mapping. Parameters ---------- mapping : dict The mapping", "mapping created. Examples -------- >>> import asyncio >>> from dffml", "= Definition(name=\"value\", primitive=\"generic\") @op( name=\"dffml.mapping.extract\", inputs={\"mapping\": MAPPING, \"traverse\": MAPPING_TRAVERSE}, outputs={\"value\":", "... ), ... ] >>> >>> async def main(): ...", "MAPPING_TRAVERSE}, outputs={\"value\": MAPPING_VALUE}, ) def mapping_extract_value(mapping: Dict[str, Any], traverse: List[str]):", "dataflow.seed.append( ... Input( ... value=[create_mapping.op.outputs[\"mapping\"].name], ... definition=GetSingle.op.inputs[\"spec\"], ... ) ...", "return {\"value\": traverse_get(mapping, *traverse)} @op( name=\"dffml.mapping.create\", inputs={\"key\": MAPPING_KEY, \"value\": MAPPING_VALUE},", "Dict[str, Any], traverse: List[str]): \"\"\" Extracts value from a given", "dictionary and extract the values. Returns ------- dict A dictionary", "... ), ... Input( ... value=42, definition=create_mapping.op.inputs[\"value\"], ... ), ...", "... Input( ... value=[create_mapping.op.outputs[\"mapping\"].name], ... definition=GetSingle.op.inputs[\"spec\"], ... ) ... )", "the mapping. Returns ------- dict A dictionary containing the mapping", "value for the mapping. Returns ------- dict A dictionary containing", "inputs={\"key\": MAPPING_KEY, \"value\": MAPPING_VALUE}, outputs={\"mapping\": MAPPING}, ) def create_mapping(key: str,", "print(result) >>> >>> asyncio.run(main()) {'mapping': {'key1': 42}} \"\"\" return {\"mapping\":", "dict A dictionary containing the value of the keys. Examples", "from ..util.data import traverse_get MAPPING = Definition(name=\"mapping\", primitive=\"map\") MAPPING_TRAVERSE =", "Parameters ---------- mapping : dict The mapping to extract the", ") >>> inputs = [ ... Input( ... value=\"key1\", definition=create_mapping.op.inputs[\"key\"],", "Input( ... value=\"key1\", definition=create_mapping.op.inputs[\"key\"], ... ), ... Input( ... value=42,", "of keys to traverse through the mapping dictionary and extract", "of a given key and value. Parameters ---------- key :", "] >>> >>> async def main(): ... async for ctx,", "from ..df.base import op from ..util.data import traverse_get MAPPING =", "\"traverse\": MAPPING_TRAVERSE}, outputs={\"value\": MAPPING_VALUE}, ) def mapping_extract_value(mapping: Dict[str, Any], traverse:", "inputs={\"mapping\": MAPPING, \"traverse\": MAPPING_TRAVERSE}, outputs={\"value\": MAPPING_VALUE}, ) def mapping_extract_value(mapping: Dict[str,", "MAPPING_KEY, \"value\": MAPPING_VALUE}, outputs={\"mapping\": MAPPING}, ) def create_mapping(key: str, value:", "ctx, result in MemoryOrchestrator.run(dataflow, inputs): ... print(result) >>> >>> asyncio.run(main())", "List, Any from ..df.types import Definition from ..df.base import op", "MAPPING}, ) def create_mapping(key: str, value: Any): \"\"\" Creates a", ": Any The value for the mapping. Returns ------- dict", "... definition=GetSingle.op.inputs[\"spec\"], ... ) ... ) >>> inputs = [", "value from. traverse : list[str] A list of keys to", "{'value': 42} \"\"\" return {\"value\": traverse_get(mapping, *traverse)} @op( name=\"dffml.mapping.create\", inputs={\"key\":", "-------- >>> import asyncio >>> from dffml import * >>>", "value. Parameters ---------- key : str The key for the", "..df.types import Definition from ..df.base import op from ..util.data import", "primitive=\"generic\") @op( name=\"dffml.mapping.extract\", inputs={\"mapping\": MAPPING, \"traverse\": MAPPING_TRAVERSE}, outputs={\"value\": MAPPING_VALUE}, )", "mapping of a given key and value. Parameters ---------- key", "for the mapping. Returns ------- dict A dictionary containing the", "value=[create_mapping.op.outputs[\"mapping\"].name], ... definition=GetSingle.op.inputs[\"spec\"], ... ) ... ) >>> inputs =", "and extract the values. Returns ------- dict A dictionary containing", "of the keys. Examples -------- >>> import asyncio >>> from", "... Input( ... value=[mapping_extract_value.op.outputs[\"value\"].name], ... definition=GetSingle.op.inputs[\"spec\"], ... ) ... )", "Returns ------- dict A dictionary containing the value of the", "definition=create_mapping.op.inputs[\"key\"], ... ), ... Input( ... value=42, definition=create_mapping.op.inputs[\"value\"], ... ),", "containing the mapping created. Examples -------- >>> import asyncio >>>", "), ... Input( ... value=42, definition=create_mapping.op.inputs[\"value\"], ... ), ... ]", "extract the values. Returns ------- dict A dictionary containing the", ": str The key for the mapping. value : Any", "A dictionary containing the mapping created. Examples -------- >>> import", "), ... Input( ... value=[\"key1\", \"key2\"], ... definition=mapping_extract_value.op.inputs[\"traverse\"], ... ),", "The key for the mapping. value : Any The value", ">>> >>> asyncio.run(main()) {'mapping': {'key1': 42}} \"\"\" return {\"mapping\": {key:", "def mapping_extract_value(mapping: Dict[str, Any], traverse: List[str]): \"\"\" Extracts value from", "* >>> >>> dataflow = DataFlow.auto(create_mapping, GetSingle) >>> dataflow.seed.append( ...", "mapping to extract the value from. traverse : list[str] A", "value of the keys. Examples -------- >>> import asyncio >>>", ">>> asyncio.run(main()) {'value': 42} \"\"\" return {\"value\": traverse_get(mapping, *traverse)} @op(", "primitive=\"List[str]\") MAPPING_KEY = Definition(name=\"key\", primitive=\"str\") MAPPING_VALUE = Definition(name=\"value\", primitive=\"generic\") @op(", "key for the mapping. value : Any The value for", ">>> >>> dataflow = DataFlow.auto(create_mapping, GetSingle) >>> dataflow.seed.append( ... Input(", "MAPPING_VALUE = Definition(name=\"value\", primitive=\"generic\") @op( name=\"dffml.mapping.extract\", inputs={\"mapping\": MAPPING, \"traverse\": MAPPING_TRAVERSE},", ">>> asyncio.run(main()) {'mapping': {'key1': 42}} \"\"\" return {\"mapping\": {key: value}}", "{\"value\": traverse_get(mapping, *traverse)} @op( name=\"dffml.mapping.create\", inputs={\"key\": MAPPING_KEY, \"value\": MAPPING_VALUE}, outputs={\"mapping\":", "), ... ] >>> >>> async def main(): ... async", "definition=create_mapping.op.inputs[\"value\"], ... ), ... ] >>> >>> async def main():", "dict The mapping to extract the value from. traverse :" ]
[ "so skipping\") except ValueError as e: logger.warn(\"Received msg of wrong", "for cls_tuple in [ (NvdV2Feed, False), (VulnDBFeed, False), (VulnerabilityFeed, True),", "for fn in preflight_check_functions: try: fn() except Exception as e:", "Exception( \"Exceeded retries for feeds client config check. Failing check\"", "as dbsession: distro_mappings = dbsession.query(DistroMapping).all() for i in initial_mappings: if", ") else: anchore_engine.subsys.metrics.summary_observe( \"anchore_monitor_runtime_seconds\", time.time() - timer, function=\"do_feed_sync\", status=\"fail\", )", "except Exception as e: logger.warn( \"Could not verify feeds endpoint", "if \"get_selected_feeds_to_sync\" not in locals(): from anchore_engine.services.policy_engine.engine.feeds.sync import ( get_selected_feeds_to_sync,", "anchore_engine.services.policy_engine.engine.feeds.sync import ( get_selected_feeds_to_sync, ) handler_success = False timer =", "5 try: feed_config_check_retries = int(os.getenv(\"FEED_CLIENT_CHECK_RETRIES\", 3)) except ValueError: logger.exception( \"Error", "True def init_db_content(): \"\"\" Initialize the policy engine db with", "task this cycle: \" + str(err)) def handle_feed_sync_trigger(*args, **kwargs): \"\"\"", "+= feed_config_check_backoff try: logger.info( \"Checking feeds client credentials. Attempt {}", "IntegrityError): logger.warn(\"another process has already initialized, continuing\") else: raise Exception(", "not in locals(): from anchore_engine.services.policy_engine.engine.tasks import FeedsUpdateTask if \"get_selected_feeds_to_sync\" not", "e: logger.error(\"Caught escaped error in feed sync handler: {}\".format(e)) finally:", "a feed sync in the queue and if not, adds", "Exception as e: logger.error( \"Error caught in feed sync trigger", "service not yet ready, will retry\") raise Exception(\"Simplequeue service not", "import pkg_resources import os import retrying from sqlalchemy.exc import IntegrityError", "]: logger.info(\"Registering feed handler {}\".format(cls_tuple[0].__feed_name__)) feed_registry.register(cls_tuple[0], is_vulnerability_feed=cls_tuple[1]) def do_feed_sync(msg): if", "any data necessary at startup. :return: \"\"\" return _init_distro_mappings() def", "env value ANCHORE_FEED_SYNC_CHECK_RETRIES into int, using default value of 5\"", "feed handler {}\".format(cls_tuple[0].__feed_name__)) feed_registry.register(cls_tuple[0], is_vulnerability_feed=cls_tuple[1]) def do_feed_sync(msg): if \"FeedsUpdateTask\" not", "this cycle: \" + str(err)) def handle_feed_sync_trigger(*args, **kwargs): \"\"\" Checks", "handler - exception: \" + str(err)) if handler_success: anchore_engine.subsys.metrics.summary_observe( \"anchore_monitor_runtime_seconds\",", "for firing this should be longer than the expected feed", "for next cycle\" ) finally: logger.info(\"Feed Sync task creator complete\")", "exceptions or return False return value :return: \"\"\" preflight_check_functions =", "userId=None) if not q_client.is_inqueue(name=feed_sync_queuename, inobj=feed_sync_msg): try: q_client.enqueue(name=feed_sync_queuename, inobj=feed_sync_msg) except: logger.error(\"Could", "flavor=\"DEB\"), DistroMapping(from_distro=\"fedora\", to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"ol\", to_distro=\"ol\", flavor=\"RHEL\"), DistroMapping(from_distro=\"rhel\", to_distro=\"rhel\", flavor=\"RHEL\"),", "value of 5\" ) feed_config_check_backoff = 5 # service funcs", "of 5\" ) FEED_SYNC_RETRIES = 5 try: FEED_SYNC_RETRY_BACKOFF = int(", "service not yet ready\") else: try: # This has its", "VulnDBFeed, GithubFeed, feed_registry, NvdFeed, ) # from anchore_engine.subsys.logger import enable_bootstrap_logging", "logger.exception( \"Preflight checks failed with error: {}. Aborting service startup\".format(", "time.sleep(sleep_time) sleep_time += feed_config_check_backoff try: logger.info( \"Checking feeds client credentials.", "\"\"\" Initialize the policy engine db with any data necessary", "its own retry on the queue fetch, so wrap with", "task creator activated\") try: push_sync_task(system_user) logger.info(\"Feed Sync Trigger done, waiting", "feed_config_check_backoff last_ex = None for i in range(feed_config_check_retries): if i", "except: logger.error(\"Could not enqueue message for a feed sync\") raise", "session_scope() as dbsession: distro_mappings = dbsession.query(DistroMapping).all() for i in initial_mappings:", "anchore_engine.services.policy_engine.engine.tasks import FeedsUpdateTask if \"get_selected_feeds_to_sync\" not in locals(): from anchore_engine.services.policy_engine.engine.feeds.sync", "return False return value :return: \"\"\" preflight_check_functions = [init_db_content, init_feed_registry]", "initial_mappings: if not [x for x in distro_mappings if x.from_distro", "Interval for firing this should be longer than the expected", "logger.info(\"Feed sync task executor activated\") try: run_feed_sync(system_user) except Exception as", "= config[\"system_user_auth\"] return system_user_auth def process_preflight(): \"\"\" Execute the preflight", "{}).get(\"sync_enabled\", True) if feed_sync_enabled: logger.info(\"Feed Sync task creator activated\") try:", "ApiService, LifeCycleStages from anchore_engine.services.policy_engine.engine.feeds.feeds import ( VulnerabilityFeed, NvdV2Feed, PackagesFeed, VulnDBFeed,", "inobj=feed_sync_msg) except: logger.error(\"Could not enqueue message for a feed sync\")", "@retrying.retry( stop_max_attempt_number=FEED_SYNC_RETRIES, wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000, wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000, ) def", "import anchore_engine.clients.services.common import anchore_engine.subsys.servicestatus import anchore_engine.subsys.metrics from anchore_engine.subsys import logger", "False return value :return: \"\"\" preflight_check_functions = [init_db_content, init_feed_registry] for", "locals(): from anchore_engine.services.policy_engine.engine.tasks import FeedsUpdateTask if \"get_selected_feeds_to_sync\" not in locals():", "SimpleQueueClient(user=system_user[0], password=<PASSWORD>[1]) q_client = internal_client_for(SimpleQueueClient, userId=None) if not q_client.is_inqueue(name=feed_sync_queuename, inobj=feed_sync_msg):", "sqlalchemy.exc import IntegrityError # anchore modules import anchore_engine.clients.services.common import anchore_engine.subsys.servicestatus", "into int, using default value of 5\" ) feed_config_check_backoff =", "\"enabled\": True} # These are user-configurable but mostly for debugging", "logger.info( \"Checking policy engine db initialization. Checking initial set of", "all retries. Will wait for next cycle\" ) finally: logger.info(\"Feed", "Initiates a feed sync in the system in response to", "into int, using default value of 5\" ) FEED_SYNC_RETRY_BACKOFF =", "+ str(err) ) return True def init_db_content(): \"\"\" Initialize the", "result = FeedsUpdateTask.run_feeds_update(json_obj=msg.get(\"data\")) if result is not None: handler_success =", "False, }, \"feed_sync_checker\": { \"handler\": handle_feed_sync_trigger, \"taskType\": \"handle_feed_sync_trigger\", \"args\": [],", "= FeedsUpdateTask.run_feeds_update(json_obj=msg.get(\"data\")) if result is not None: handler_success = True", "anchore_engine.subsys.logger import enable_bootstrap_logging # enable_bootstrap_logging() from anchore_engine.utils import timer feed_sync_queuename", "sync handler: {}\".format(e)) finally: logger.info(\"Feed sync task executor complete\") else:", "ready, will retry\") raise Exception(\"Simplequeue service not yet ready\") else:", "to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"ol\", to_distro=\"ol\", flavor=\"RHEL\"), DistroMapping(from_distro=\"rhel\", to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"ubuntu\", to_distro=\"ubuntu\",", "service startup if any throw uncaught exceptions or return False", "enable_bootstrap_logging() from anchore_engine.utils import timer feed_sync_queuename = \"feed_sync_tasks\" system_user_auth =", "feed_sync_queuename = \"feed_sync_tasks\" system_user_auth = None feed_sync_msg = {\"task_type\": \"feed_sync\",", "process_preflight(): \"\"\" Execute the preflight functions, aborting service startup if", "timer feed_sync_queuename = \"feed_sync_tasks\" system_user_auth = None feed_sync_msg = {\"task_type\":", "or return False return value :return: \"\"\" preflight_check_functions = [init_db_content,", "from anchore_engine.subsys import logger from anchore_engine.configuration import localconfig from anchore_engine.clients.services", "logger.warn(\"Received msg of wrong type\") except Exception as err: logger.warn(\"failure", ") finally: logger.info(\"Feed Sync task creator complete\") else: logger.info( \"sync_enabled", "str(err)) if handler_success: anchore_engine.subsys.metrics.summary_observe( \"anchore_monitor_runtime_seconds\", time.time() - timer, function=\"do_feed_sync\", status=\"success\",", "logger.info(\"init args: {}\".format(kwargs)) cycle_time = kwargs[\"mythread\"][\"cycle_timer\"] while True: config =", "}, } __lifecycle_handlers__ = { LifeCycleStages.pre_register: [ (process_preflight, None), ]", "{}\".format( e ) ) last_ex = e else: if last_ex:", "to process task this cycle: \" + str(err)) def handle_feed_sync_trigger(*args,", "db with any data necessary at startup. :return: \"\"\" return", "\"args\": [__service_name__], \"cycle_timer\": 60, \"min_cycle_timer\": 60, \"max_cycle_timer\": 60, \"last_queued\": 0,", "# q_client = SimpleQueueClient(user=system_user[0], password=<PASSWORD>[1]) q_client = internal_client_for(SimpleQueueClient, userId=None) if", "anchore_engine.clients.services import simplequeue, internal_client_for from anchore_engine.clients.services.simplequeue import SimpleQueueClient from anchore_engine.service", "sync trigger\" ) time.sleep(cycle_time) return True @retrying.retry( stop_max_attempt_number=FEED_SYNC_RETRIES, wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF *", "flavor=\"ALPINE\"), DistroMapping(from_distro=\"busybox\", to_distro=\"busybox\", flavor=\"BUSYB\"), DistroMapping(from_distro=\"centos\", to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"debian\", to_distro=\"debian\", flavor=\"DEB\"),", "complete\") except Exception as err: if isinstance(err, IntegrityError): logger.warn(\"another process", "handle_feed_sync_trigger, \"taskType\": \"handle_feed_sync_trigger\", \"args\": [], \"cycle_timer\": 600, \"min_cycle_timer\": 300, \"max_cycle_timer\":", "from anchore_engine.services.policy_engine.engine.feeds.feeds import ( VulnerabilityFeed, NvdV2Feed, PackagesFeed, VulnDBFeed, GithubFeed, feed_registry,", "= 5 # service funcs (must be here) def _check_feed_client_credentials():", "trigger handler after all retries. Will wait for next cycle\"", "verify feeds endpoint and/or config. Got exception: {}\".format( e )", "import SimpleQueueClient from anchore_engine.service import ApiService, LifeCycleStages from anchore_engine.services.policy_engine.engine.feeds.feeds import", "response to a message from the queue :param args: :param", "\"\"\" system_user = _system_creds() logger.info(\"init args: {}\".format(kwargs)) cycle_time = kwargs[\"mythread\"][\"cycle_timer\"]", "[], \"cycle_timer\": 3600, \"min_cycle_timer\": 1800, \"max_cycle_timer\": 100000, \"last_queued\": 0, \"last_return\":", "from anchore_engine.service import ApiService, LifeCycleStages from anchore_engine.services.policy_engine.engine.feeds.feeds import ( VulnerabilityFeed,", "1000, ) def push_sync_task(system_user): all_ready = anchore_engine.clients.services.common.check_services_ready( [\"simplequeue\"] ) if", "try: logger.info( \"Checking feeds client credentials. Attempt {} of {}\".format(", "else: raise Exception( \"Exceeded retries for feeds client config check.", "dbsession: distro_mappings = dbsession.query(DistroMapping).all() for i in initial_mappings: if not", "= pkg_resources.resource_filename(__name__, \"swagger\") __monitors__ = { \"service_heartbeat\": { \"handler\": anchore_engine.subsys.servicestatus.handle_service_heartbeat,", "= 5 try: feed_config_check_retries = int(os.getenv(\"FEED_CLIENT_CHECK_RETRIES\", 3)) except ValueError: logger.exception(", "# Register feeds, the tuple is the class and bool", "functions, aborting service startup if any throw uncaught exceptions or", "feeds client credentials. Attempt {} of {}\".format( i + 1,", "on the queue fetch, so wrap with catch block to", "a task for a feed sync in the queue and", "= { \"service_heartbeat\": { \"handler\": anchore_engine.subsys.servicestatus.handle_service_heartbeat, \"taskType\": \"handle_service_heartbeat\", \"args\": [__service_name__],", ") return True def init_db_content(): \"\"\" Initialize the policy engine", "feed_registry, NvdFeed, ) # from anchore_engine.subsys.logger import enable_bootstrap_logging # enable_bootstrap_logging()", "skipping\") except ValueError as e: logger.warn(\"Received msg of wrong type\")", "from sqlalchemy.exc import IntegrityError # anchore modules import anchore_engine.clients.services.common import", "Execute the preflight functions, aborting service startup if any throw", "a feed sync\") raise class PolicyEngineService(ApiService): __service_name__ = \"policy_engine\" __spec_dir__", "Exception(\"Simplequeue service not yet ready\") else: # q_client = SimpleQueueClient(user=system_user[0],", "as e: logger.warn(\"Received msg of wrong type\") except Exception as", "True), (PackagesFeed, False), (GithubFeed, False), (NvdFeed, False), ]: logger.info(\"Registering feed", "default value of 5\" ) FEED_SYNC_RETRY_BACKOFF = 5 try: feed_config_check_retries", "if not, adds one. Interval for firing this should be", "\"min_cycle_timer\": 300, \"max_cycle_timer\": 100000, \"last_queued\": 0, \"last_return\": False, \"initialized\": False,", "enable_bootstrap_logging # enable_bootstrap_logging() from anchore_engine.utils import timer feed_sync_queuename = \"feed_sync_tasks\"", "\"last_return\": False, \"initialized\": False, }, \"feed_sync_checker\": { \"handler\": handle_feed_sync_trigger, \"taskType\":", ") def handle_feed_sync(*args, **kwargs): \"\"\" Initiates a feed sync in", "necessary at system init try: logger.info( \"Checking policy engine db", "timer = time.time() logger.info(\"FIRING: feed syncer\") try: feeds = get_selected_feeds_to_sync(localconfig.get_config())", "\"handler\": handle_feed_sync, \"taskType\": \"handle_feed_sync\", \"args\": [], \"cycle_timer\": 3600, \"min_cycle_timer\": 1800,", "True except Exception as e: logger.warn( \"Could not verify feeds", ":return: \"\"\" return _init_distro_mappings() def init_feed_registry(): # Register feeds, the", "range(feed_config_check_retries): if i > 0: logger.info( \"Waiting for {} seconds", "time.time() - timer, function=\"do_feed_sync\", status=\"fail\", ) def handle_feed_sync(*args, **kwargs): \"\"\"", "should be longer than the expected feed sync duration. :param", "initialize default distro mappings - exception: \" + str(err) )", "{}).get(\"sync_enabled\", True) if feed_sync_enabled: logger.info(\"Feed sync task executor activated\") try:", "return True except Exception as e: logger.warn( \"Could not verify", "at startup. :return: \"\"\" return _init_distro_mappings() def init_feed_registry(): # Register", "5)) except ValueError: logger.exception( \"Error parsing env value ANCHORE_FEED_SYNC_CHECK_RETRIES into", "Checking initial set of distro mappings\" ) with session_scope() as", "mostly for debugging and testing purposes try: FEED_SYNC_RETRIES = int(os.getenv(\"ANCHORE_FEED_SYNC_CHECK_RETRIES\",", "+ str(err)) def handle_feed_sync_trigger(*args, **kwargs): \"\"\" Checks to see if", "longer than the expected feed sync duration. :param args: :param", "throw uncaught exceptions or return False return value :return: \"\"\"", "FeedsUpdateTask.run_feeds_update(json_obj=msg.get(\"data\")) if result is not None: handler_success = True else:", "if there is a task for a feed sync in", "double-retry on task exec simplequeue.run_target_with_queue_ttl( None, queue=feed_sync_queuename, target=do_feed_sync, max_wait_seconds=30, visibility_timeout=180,", "feed_config_check_retries = int(os.getenv(\"FEED_CLIENT_CHECK_RETRIES\", 3)) except ValueError: logger.exception( \"Error parsing env", "\"anchore_monitor_runtime_seconds\", time.time() - timer, function=\"do_feed_sync\", status=\"fail\", ) def handle_feed_sync(*args, **kwargs):", "using default value of 3\" ) feed_config_check_retries = 3 try:", "return True def init_db_content(): \"\"\" Initialize the policy engine db", "policy engine db with any data necessary at startup. :return:", "client = None logger.info(\"Feeds client credentials ok\") return True except", "Register feeds, the tuple is the class and bool if", "e: logger.warn( \"Could not verify feeds endpoint and/or config. Got", "FEED_CLIENT_CHECK_RETRIES into int, using default value of 3\" ) feed_config_check_retries", "e ) ) last_ex = e else: if last_ex: raise", "* 1000, ) def run_feed_sync(system_user): all_ready = anchore_engine.clients.services.common.check_services_ready( [\"simplequeue\"] )", "\" + str(err) ) return True def init_db_content(): \"\"\" Initialize", ") except Exception as err: logger.warn(\"failed to process task this", "next cycle\" ) finally: logger.info(\"Feed Sync task creator complete\") else:", "mapping initialization complete\") except Exception as err: if isinstance(err, IntegrityError):", "get_client() client = None logger.info(\"Feeds client credentials ok\") return True", "endpoint and/or config. Got exception: {}\".format( e ) ) last_ex", "\"FeedsUpdateTask\" not in locals(): from anchore_engine.services.policy_engine.engine.tasks import FeedsUpdateTask if \"get_selected_feeds_to_sync\"", "all_ready: logger.info(\"simplequeue service not yet ready, will retry\") raise Exception(\"Simplequeue", "\"\"\" Initiates a feed sync in the system in response", "\"feed_sync\", \"enabled\": True} # These are user-configurable but mostly for", "{}\".format(i)) dbsession.add(i) logger.info(\"Distro mapping initialization complete\") except Exception as err:", "sleep_time ) ) time.sleep(sleep_time) sleep_time += feed_config_check_backoff try: logger.info( \"Checking", "logger.info(\"Syncing configured feeds: {}\".format(feeds)) result = FeedsUpdateTask.run_feeds_update(json_obj=msg.get(\"data\")) if result is", "0, \"last_return\": False, \"initialized\": False, }, } __lifecycle_handlers__ = {", "= None feed_sync_msg = {\"task_type\": \"feed_sync\", \"enabled\": True} # These", "(GithubFeed, False), (NvdFeed, False), ]: logger.info(\"Registering feed handler {}\".format(cls_tuple[0].__feed_name__)) feed_registry.register(cls_tuple[0],", "feed sync duration. :param args: :param kwargs: :return: \"\"\" system_user", "VulnerabilityFeed, NvdV2Feed, PackagesFeed, VulnDBFeed, GithubFeed, feed_registry, NvdFeed, ) # from", "to_distro=\"rhel\", flavor=\"RHEL\"), ] # set up any data necessary at", "last_ex = None for i in range(feed_config_check_retries): if i >", "and/or config. Got exception: {}\".format( e ) ) last_ex =", "all_ready = anchore_engine.clients.services.common.check_services_ready( [\"simplequeue\"] ) if not all_ready: logger.info(\"simplequeue service", "Sync task creator activated\") try: push_sync_task(system_user) logger.info(\"Feed Sync Trigger done,", "else: anchore_engine.subsys.metrics.summary_observe( \"anchore_monitor_runtime_seconds\", time.time() - timer, function=\"do_feed_sync\", status=\"fail\", ) def", "import logger from anchore_engine.configuration import localconfig from anchore_engine.clients.services import simplequeue,", "e: logger.exception( \"Preflight checks failed with error: {}. Aborting service", "not all_ready: logger.info(\"simplequeue service not yet ready, will retry\") raise", "else: if last_ex: raise last_ex else: raise Exception( \"Exceeded retries", "in locals(): from anchore_engine.services.policy_engine.engine.feeds.sync import ( get_selected_feeds_to_sync, ) handler_success =", "modules import anchore_engine.clients.services.common import anchore_engine.subsys.servicestatus import anchore_engine.subsys.metrics from anchore_engine.subsys import", "any data necessary at system init try: logger.info( \"Checking policy", "raise Exception( \"unable to initialize default distro mappings - exception:", "task for a feed sync in the queue and if", "in the queue and if not, adds one. Interval for", "100000, \"last_queued\": 0, \"last_return\": False, \"initialized\": False, }, } __lifecycle_handlers__", "Exception as e: logger.error(\"Caught escaped error in feed sync handler:", "the queue and if not, adds one. Interval for firing", "service startup\".format( e ) ) sys.exit(1) def _init_distro_mappings(): from anchore_engine.db", "type\") except Exception as err: logger.warn(\"failure in feed sync handler", "else: raise Exception( \"unable to initialize default distro mappings -", "- skipping feed sync trigger\" ) time.sleep(cycle_time) return True @retrying.retry(", "def run_feed_sync(system_user): all_ready = anchore_engine.clients.services.common.check_services_ready( [\"simplequeue\"] ) if not all_ready:", "to false in config - skipping feed sync\") time.sleep(cycle_time) return", "funcs (must be here) def _check_feed_client_credentials(): from anchore_engine.services.policy_engine.engine.feeds.client import get_client", "isinstance(err, IntegrityError): logger.warn(\"another process has already initialized, continuing\") else: raise", "= True else: logger.warn(\"Feed sync task marked as disabled, so", "up any data necessary at system init try: logger.info( \"Checking", "one. Interval for firing this should be longer than the", "= int(os.getenv(\"FEED_CLIENT_CHECK_BACKOFF\", 5)) except ValueError: logger.exception( \"Error parsing env FEED_CLIENT_CHECK_BACKOFF", "wait for next cycle\" ) finally: logger.info(\"Feed Sync task creator", "[], \"cycle_timer\": 600, \"min_cycle_timer\": 300, \"max_cycle_timer\": 100000, \"last_queued\": 0, \"last_return\":", "default value of 5\" ) feed_config_check_backoff = 5 # service", "missing mapping: {}\".format(i)) dbsession.add(i) logger.info(\"Distro mapping initialization complete\") except Exception", "5\" ) feed_config_check_backoff = 5 # service funcs (must be", "> 0: logger.info( \"Waiting for {} seconds to try feeds", "is a distro vulnerability feed or not for cls_tuple in", "false in config - skipping feed sync\") time.sleep(cycle_time) return True", "\"cycle_timer\": 3600, \"min_cycle_timer\": 1800, \"max_cycle_timer\": 100000, \"last_queued\": 0, \"last_return\": False,", "startup\".format( e ) ) sys.exit(1) def _init_distro_mappings(): from anchore_engine.db import", "for {} seconds to try feeds client config check again\".format(", "time.time() - timer, function=\"do_feed_sync\", status=\"success\", ) else: anchore_engine.subsys.metrics.summary_observe( \"anchore_monitor_runtime_seconds\", time.time()", "LifeCycleStages from anchore_engine.services.policy_engine.engine.feeds.feeds import ( VulnerabilityFeed, NvdV2Feed, PackagesFeed, VulnDBFeed, GithubFeed,", "bool if feed is a distro vulnerability feed or not", "\"cycle_timer\": 600, \"min_cycle_timer\": 300, \"max_cycle_timer\": 100000, \"last_queued\": 0, \"last_return\": False,", "\"last_return\": False, \"initialized\": False, }, } __lifecycle_handlers__ = { LifeCycleStages.pre_register:", "int(os.getenv(\"FEED_CLIENT_CHECK_BACKOFF\", 5)) except ValueError: logger.exception( \"Error parsing env FEED_CLIENT_CHECK_BACKOFF value", "escaped error in feed sync handler: {}\".format(e)) finally: logger.info(\"Feed sync", "startup if any throw uncaught exceptions or return False return", "logger.exception( \"Error parsing env value ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF into int, using default", "(VulnerabilityFeed, True), (PackagesFeed, False), (GithubFeed, False), (NvdFeed, False), ]: logger.info(\"Registering", "as err: logger.warn(\"failure in feed sync handler - exception: \"", "\"initialized\": False, }, \"feed_sync_checker\": { \"handler\": handle_feed_sync_trigger, \"taskType\": \"handle_feed_sync_trigger\", \"args\":", "feed_registry.register(cls_tuple[0], is_vulnerability_feed=cls_tuple[1]) def do_feed_sync(msg): if \"FeedsUpdateTask\" not in locals(): from", "Exception as err: logger.warn(\"failed to process task this cycle: \"", "= anchore_engine.clients.services.common.check_services_ready( [\"simplequeue\"] ) if not all_ready: logger.info(\"simplequeue service not", "{ \"handler\": handle_feed_sync, \"taskType\": \"handle_feed_sync\", \"args\": [], \"cycle_timer\": 3600, \"min_cycle_timer\":", "the expected feed sync duration. :param args: :param kwargs: :return:", "of 5\" ) feed_config_check_backoff = 5 # service funcs (must", "( get_selected_feeds_to_sync, ) handler_success = False timer = time.time() logger.info(\"FIRING:", "import anchore_engine.subsys.metrics from anchore_engine.subsys import logger from anchore_engine.configuration import localconfig", "None logger.info(\"Feeds client credentials ok\") return True except Exception as", "run_feed_sync(system_user) except Exception as e: logger.error(\"Caught escaped error in feed", "os import retrying from sqlalchemy.exc import IntegrityError # anchore modules", "as e: logger.warn( \"Could not verify feeds endpoint and/or config.", "5\" ) FEED_SYNC_RETRIES = 5 try: FEED_SYNC_RETRY_BACKOFF = int( os.getenv(\"ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF\",", "exception: {}\".format( e ) ) last_ex = e else: if", "import FeedsUpdateTask if \"get_selected_feeds_to_sync\" not in locals(): from anchore_engine.services.policy_engine.engine.feeds.sync import", "preflight_check_functions = [init_db_content, init_feed_registry] for fn in preflight_check_functions: try: fn()", "retry\") raise Exception(\"Simplequeue service not yet ready\") else: try: #", "i in initial_mappings: if not [x for x in distro_mappings", "# set up any data necessary at system init try:", "status=\"fail\", ) def handle_feed_sync(*args, **kwargs): \"\"\" Initiates a feed sync", "feed sync in the queue and if not, adds one.", "return system_user_auth def process_preflight(): \"\"\" Execute the preflight functions, aborting", "raise class PolicyEngineService(ApiService): __service_name__ = \"policy_engine\" __spec_dir__ = pkg_resources.resource_filename(__name__, \"swagger\")", "config.get(\"feeds\", {}).get(\"sync_enabled\", True) if feed_sync_enabled: logger.info(\"Feed sync task executor activated\")", "config - skipping feed sync\") time.sleep(cycle_time) return True @retrying.retry( stop_max_attempt_number=FEED_SYNC_RETRIES,", "system_user_auth = config[\"system_user_auth\"] return system_user_auth def process_preflight(): \"\"\" Execute the", "import localconfig from anchore_engine.clients.services import simplequeue, internal_client_for from anchore_engine.clients.services.simplequeue import", "- exception: \" + str(err)) if handler_success: anchore_engine.subsys.metrics.summary_observe( \"anchore_monitor_runtime_seconds\", time.time()", "# service funcs (must be here) def _check_feed_client_credentials(): from anchore_engine.services.policy_engine.engine.feeds.client", ") def _system_creds(): global system_user_auth if not system_user_auth: config =", "check. Failing check\" ) def _system_creds(): global system_user_auth if not", "from anchore_engine.subsys.logger import enable_bootstrap_logging # enable_bootstrap_logging() from anchore_engine.utils import timer", "executor activated\") try: run_feed_sync(system_user) except Exception as e: logger.error(\"Caught escaped", "as e: logger.error(\"Caught escaped error in feed sync handler: {}\".format(e))", ") def run_feed_sync(system_user): all_ready = anchore_engine.clients.services.common.check_services_ready( [\"simplequeue\"] ) if not", "to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"debian\", to_distro=\"debian\", flavor=\"DEB\"), DistroMapping(from_distro=\"fedora\", to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"ol\", to_distro=\"ol\",", "\"\"\" Execute the preflight functions, aborting service startup if any", "mapping: {}\".format(i)) dbsession.add(i) logger.info(\"Distro mapping initialization complete\") except Exception as", "int, using default value of 3\" ) feed_config_check_retries = 3", "True) if feed_sync_enabled: logger.info(\"Feed sync task executor activated\") try: run_feed_sync(system_user)", "in the system in response to a message from the", "init_feed_registry] for fn in preflight_check_functions: try: fn() except Exception as", "= config.get(\"feeds\", {}).get(\"sync_enabled\", True) if feed_sync_enabled: logger.info(\"Feed Sync task creator", "FEED_SYNC_RETRIES = 5 try: FEED_SYNC_RETRY_BACKOFF = int( os.getenv(\"ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF\", 5) )", "logger.info( \"sync_enabled is set to false in config - skipping", "push_sync_task(system_user): all_ready = anchore_engine.clients.services.common.check_services_ready( [\"simplequeue\"] ) if not all_ready: logger.info(\"simplequeue", "system in response to a message from the queue :param", "a message from the queue :param args: :param kwargs: :return:", "message for a feed sync\") raise class PolicyEngineService(ApiService): __service_name__ =", "feeds client config check again\".format( sleep_time ) ) time.sleep(sleep_time) sleep_time", ":param kwargs: :return: \"\"\" system_user = _system_creds() logger.info(\"init args: {}\".format(kwargs))", "= get_client() client = None logger.info(\"Feeds client credentials ok\") return", "int, using default value of 5\" ) FEED_SYNC_RETRIES = 5", "logger.warn(\"failed to process task this cycle: \" + str(err)) def", "except ValueError: logger.exception( \"Error parsing env value FEED_CLIENT_CHECK_RETRIES into int,", "Got exception: {}\".format( e ) ) last_ex = e else:", "is a task for a feed sync in the queue", "sync handler - exception: \" + str(err)) if handler_success: anchore_engine.subsys.metrics.summary_observe(", "simplequeue.run_target_with_queue_ttl( None, queue=feed_sync_queuename, target=do_feed_sync, max_wait_seconds=30, visibility_timeout=180, retries=FEED_SYNC_RETRIES, backoff_time=FEED_SYNC_RETRY_BACKOFF, ) except", "with error: {}. Aborting service startup\".format( e ) ) sys.exit(1)", "None for i in range(feed_config_check_retries): if i > 0: logger.info(", "get_selected_feeds_to_sync, ) handler_success = False timer = time.time() logger.info(\"FIRING: feed", "logger.warn(\"Feed sync task marked as disabled, so skipping\") except ValueError", "as e: logger.error( \"Error caught in feed sync trigger handler", "if \"FeedsUpdateTask\" not in locals(): from anchore_engine.services.policy_engine.engine.tasks import FeedsUpdateTask if", "and if not, adds one. Interval for firing this should", "init_feed_registry(): # Register feeds, the tuple is the class and", "logger.info(\"FIRING: feed syncer\") try: feeds = get_selected_feeds_to_sync(localconfig.get_config()) logger.info(\"Syncing configured feeds:", "e: logger.error( \"Error caught in feed sync trigger handler after", "feed sync trigger handler after all retries. Will wait for", "# from anchore_engine.subsys.logger import enable_bootstrap_logging # enable_bootstrap_logging() from anchore_engine.utils import", "into int, using default value of 5\" ) FEED_SYNC_RETRIES =", "Exception as err: logger.warn(\"failure in feed sync handler - exception:", "sync duration. :param args: :param kwargs: :return: \"\"\" system_user =", "return _init_distro_mappings() def init_feed_registry(): # Register feeds, the tuple is", "logger.info(\"Feed Sync Trigger done, waiting for next cycle.\") except Exception", "as err: logger.warn(\"failed to process task this cycle: \" +", "( VulnerabilityFeed, NvdV2Feed, PackagesFeed, VulnDBFeed, GithubFeed, feed_registry, NvdFeed, ) #", "{ \"handler\": handle_feed_sync_trigger, \"taskType\": \"handle_feed_sync_trigger\", \"args\": [], \"cycle_timer\": 600, \"min_cycle_timer\":", "NvdV2Feed, PackagesFeed, VulnDBFeed, GithubFeed, feed_registry, NvdFeed, ) # from anchore_engine.subsys.logger", "q_client = internal_client_for(SimpleQueueClient, userId=None) if not q_client.is_inqueue(name=feed_sync_queuename, inobj=feed_sync_msg): try: q_client.enqueue(name=feed_sync_queuename,", "in feed sync trigger handler after all retries. Will wait", "result is not None: handler_success = True else: logger.warn(\"Feed sync", "logger.exception( \"Error parsing env value FEED_CLIENT_CHECK_RETRIES into int, using default", "1, feed_config_check_retries ) ) client = get_client() client = None", "from the queue :param args: :param kwargs: :return: \"\"\" system_user", "\"feed_sync_checker\": { \"handler\": handle_feed_sync_trigger, \"taskType\": \"handle_feed_sync_trigger\", \"args\": [], \"cycle_timer\": 600,", "q_client.is_inqueue(name=feed_sync_queuename, inobj=feed_sync_msg): try: q_client.enqueue(name=feed_sync_queuename, inobj=feed_sync_msg) except: logger.error(\"Could not enqueue message", "db initialization. Checking initial set of distro mappings\" ) with", "set to false in config - skipping feed sync trigger\"", "def push_sync_task(system_user): all_ready = anchore_engine.clients.services.common.check_services_ready( [\"simplequeue\"] ) if not all_ready:", "i.from_distro]: logger.info(\"Adding missing mapping: {}\".format(i)) dbsession.add(i) logger.info(\"Distro mapping initialization complete\")", "5 # service funcs (must be here) def _check_feed_client_credentials(): from", "internal_client_for from anchore_engine.clients.services.simplequeue import SimpleQueueClient from anchore_engine.service import ApiService, LifeCycleStages", "parsing env FEED_CLIENT_CHECK_BACKOFF value into int, using default value of", "will retry\") raise Exception(\"Simplequeue service not yet ready\") else: try:", ") ) last_ex = e else: if last_ex: raise last_ex", ") sys.exit(1) def _init_distro_mappings(): from anchore_engine.db import session_scope, DistroMapping initial_mappings", "yet ready\") else: try: # This has its own retry", "if feed is a distro vulnerability feed or not for", "i > 0: logger.info( \"Waiting for {} seconds to try", "system_user_auth def process_preflight(): \"\"\" Execute the preflight functions, aborting service", "last_ex else: raise Exception( \"Exceeded retries for feeds client config", "import IntegrityError # anchore modules import anchore_engine.clients.services.common import anchore_engine.subsys.servicestatus import", "in feed sync handler - exception: \" + str(err)) if", "{}\".format(kwargs)) cycle_time = kwargs[\"mythread\"][\"cycle_timer\"] while True: config = localconfig.get_config() feed_sync_enabled", "in preflight_check_functions: try: fn() except Exception as e: logger.exception( \"Preflight", "600, \"min_cycle_timer\": 300, \"max_cycle_timer\": 100000, \"last_queued\": 0, \"last_return\": False, \"initialized\":", "\"anchore_monitor_runtime_seconds\", time.time() - timer, function=\"do_feed_sync\", status=\"success\", ) else: anchore_engine.subsys.metrics.summary_observe( \"anchore_monitor_runtime_seconds\",", "function=\"do_feed_sync\", status=\"success\", ) else: anchore_engine.subsys.metrics.summary_observe( \"anchore_monitor_runtime_seconds\", time.time() - timer, function=\"do_feed_sync\",", "\"max_cycle_timer\": 100000, \"last_queued\": 0, \"last_return\": False, \"initialized\": False, }, }", "startup. :return: \"\"\" return _init_distro_mappings() def init_feed_registry(): # Register feeds,", "there is a task for a feed sync in the", "60, \"max_cycle_timer\": 60, \"last_queued\": 0, \"last_return\": False, \"initialized\": False, },", "value ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF into int, using default value of 5\" )", "\"\"\" return _init_distro_mappings() def init_feed_registry(): # Register feeds, the tuple", "\"Preflight checks failed with error: {}. Aborting service startup\".format( e", "retries for feeds client config check. Failing check\" ) def", "DistroMapping(from_distro=\"fedora\", to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"ol\", to_distro=\"ol\", flavor=\"RHEL\"), DistroMapping(from_distro=\"rhel\", to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"ubuntu\",", "False, \"initialized\": False, }, \"feed_sync\": { \"handler\": handle_feed_sync, \"taskType\": \"handle_feed_sync\",", "yet ready, will retry\") raise Exception(\"Simplequeue service not yet ready\")", "failed with error: {}. Aborting service startup\".format( e ) )", "set of distro mappings\" ) with session_scope() as dbsession: distro_mappings", "but mostly for debugging and testing purposes try: FEED_SYNC_RETRIES =", "to_distro=\"busybox\", flavor=\"BUSYB\"), DistroMapping(from_distro=\"centos\", to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"debian\", to_distro=\"debian\", flavor=\"DEB\"), DistroMapping(from_distro=\"fedora\", to_distro=\"rhel\",", "FEED_SYNC_RETRY_BACKOFF = 5 try: feed_config_check_retries = int(os.getenv(\"FEED_CLIENT_CHECK_RETRIES\", 3)) except ValueError:", "from anchore_engine.clients.services import simplequeue, internal_client_for from anchore_engine.clients.services.simplequeue import SimpleQueueClient from", "60, \"min_cycle_timer\": 60, \"max_cycle_timer\": 60, \"last_queued\": 0, \"last_return\": False, \"initialized\":", ") def push_sync_task(system_user): all_ready = anchore_engine.clients.services.common.check_services_ready( [\"simplequeue\"] ) if not", "try: # This has its own retry on the queue", "not yet ready\") else: # q_client = SimpleQueueClient(user=system_user[0], password=<PASSWORD>[1]) q_client", "initial set of distro mappings\" ) with session_scope() as dbsession:", "anchore_engine.utils import timer feed_sync_queuename = \"feed_sync_tasks\" system_user_auth = None feed_sync_msg", "DistroMapping(from_distro=\"ol\", to_distro=\"ol\", flavor=\"RHEL\"), DistroMapping(from_distro=\"rhel\", to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"ubuntu\", to_distro=\"ubuntu\", flavor=\"DEB\"), DistroMapping(from_distro=\"amzn\",", "internal_client_for(SimpleQueueClient, userId=None) if not q_client.is_inqueue(name=feed_sync_queuename, inobj=feed_sync_msg): try: q_client.enqueue(name=feed_sync_queuename, inobj=feed_sync_msg) except:", "\"taskType\": \"handle_feed_sync_trigger\", \"args\": [], \"cycle_timer\": 600, \"min_cycle_timer\": 300, \"max_cycle_timer\": 100000,", "1000, wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000, ) def push_sync_task(system_user): all_ready = anchore_engine.clients.services.common.check_services_ready(", "value of 3\" ) feed_config_check_retries = 3 try: feed_config_check_backoff =", "\"\"\" preflight_check_functions = [init_db_content, init_feed_registry] for fn in preflight_check_functions: try:", "Attempt {} of {}\".format( i + 1, feed_config_check_retries ) )", "to_distro=\"amzn\", flavor=\"RHEL\"), DistroMapping(from_distro=\"redhat\", to_distro=\"rhel\", flavor=\"RHEL\"), ] # set up any", "retry on the queue fetch, so wrap with catch block", "[\"simplequeue\"] ) if not all_ready: logger.info(\"simplequeue service not yet ready,", "DistroMapping(from_distro=\"alpine\", to_distro=\"alpine\", flavor=\"ALPINE\"), DistroMapping(from_distro=\"busybox\", to_distro=\"busybox\", flavor=\"BUSYB\"), DistroMapping(from_distro=\"centos\", to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"debian\",", "in initial_mappings: if not [x for x in distro_mappings if", "anchore_engine.subsys.servicestatus.handle_service_heartbeat, \"taskType\": \"handle_service_heartbeat\", \"args\": [__service_name__], \"cycle_timer\": 60, \"min_cycle_timer\": 60, \"max_cycle_timer\":", "policy engine db initialization. Checking initial set of distro mappings\"", "is set to false in config - skipping feed sync", ") ) client = get_client() client = None logger.info(\"Feeds client", "1000, ) def run_feed_sync(system_user): all_ready = anchore_engine.clients.services.common.check_services_ready( [\"simplequeue\"] ) if", "disabled, so skipping\") except ValueError as e: logger.warn(\"Received msg of", "in range(feed_config_check_retries): if i > 0: logger.info( \"Waiting for {}", "False, \"initialized\": False, }, \"feed_sync_checker\": { \"handler\": handle_feed_sync_trigger, \"taskType\": \"handle_feed_sync_trigger\",", "sync\") raise class PolicyEngineService(ApiService): __service_name__ = \"policy_engine\" __spec_dir__ = pkg_resources.resource_filename(__name__,", "firing this should be longer than the expected feed sync", "activated\") try: push_sync_task(system_user) logger.info(\"Feed Sync Trigger done, waiting for next", "\"max_cycle_timer\": 60, \"last_queued\": 0, \"last_return\": False, \"initialized\": False, }, \"feed_sync_checker\":", "trigger\" ) time.sleep(cycle_time) return True @retrying.retry( stop_max_attempt_number=FEED_SYNC_RETRIES, wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000,", "class and bool if feed is a distro vulnerability feed", "\"get_selected_feeds_to_sync\" not in locals(): from anchore_engine.services.policy_engine.engine.feeds.sync import ( get_selected_feeds_to_sync, )", "preflight functions, aborting service startup if any throw uncaught exceptions", "err: logger.warn(\"failure in feed sync handler - exception: \" +", "except Exception as err: if isinstance(err, IntegrityError): logger.warn(\"another process has", "* 1000, wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000, ) def push_sync_task(system_user): all_ready =", "DistroMapping(from_distro=\"ubuntu\", to_distro=\"ubuntu\", flavor=\"DEB\"), DistroMapping(from_distro=\"amzn\", to_distro=\"amzn\", flavor=\"RHEL\"), DistroMapping(from_distro=\"redhat\", to_distro=\"rhel\", flavor=\"RHEL\"), ]", "SimpleQueueClient from anchore_engine.service import ApiService, LifeCycleStages from anchore_engine.services.policy_engine.engine.feeds.feeds import (", "except ValueError: logger.exception( \"Error parsing env FEED_CLIENT_CHECK_BACKOFF value into int,", "again\".format( sleep_time ) ) time.sleep(sleep_time) sleep_time += feed_config_check_backoff try: logger.info(", "this should be longer than the expected feed sync duration.", "PolicyEngineService(ApiService): __service_name__ = \"policy_engine\" __spec_dir__ = pkg_resources.resource_filename(__name__, \"swagger\") __monitors__ =", "ok\") return True except Exception as e: logger.warn( \"Could not", "err: if isinstance(err, IntegrityError): logger.warn(\"another process has already initialized, continuing\")", "configured feeds: {}\".format(feeds)) result = FeedsUpdateTask.run_feeds_update(json_obj=msg.get(\"data\")) if result is not", "anchore_engine.subsys.servicestatus import anchore_engine.subsys.metrics from anchore_engine.subsys import logger from anchore_engine.configuration import", "= localconfig.get_config() system_user_auth = config[\"system_user_auth\"] return system_user_auth def process_preflight(): \"\"\"", ") ) sys.exit(1) def _init_distro_mappings(): from anchore_engine.db import session_scope, DistroMapping", "\"last_queued\": 0, \"last_return\": False, \"initialized\": False, }, \"feed_sync_checker\": { \"handler\":", "\"min_cycle_timer\": 1800, \"max_cycle_timer\": 100000, \"last_queued\": 0, \"last_return\": False, \"initialized\": False,", ") time.sleep(cycle_time) return True @retrying.retry( stop_max_attempt_number=FEED_SYNC_RETRIES, wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000, wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF", "cycle.\") except Exception as e: logger.error( \"Error caught in feed", "raise Exception( \"Exceeded retries for feeds client config check. Failing", "debugging and testing purposes try: FEED_SYNC_RETRIES = int(os.getenv(\"ANCHORE_FEED_SYNC_CHECK_RETRIES\", 5)) except", "[ DistroMapping(from_distro=\"alpine\", to_distro=\"alpine\", flavor=\"ALPINE\"), DistroMapping(from_distro=\"busybox\", to_distro=\"busybox\", flavor=\"BUSYB\"), DistroMapping(from_distro=\"centos\", to_distro=\"rhel\", flavor=\"RHEL\"),", "continuing\") else: raise Exception( \"unable to initialize default distro mappings", "cls_tuple in [ (NvdV2Feed, False), (VulnDBFeed, False), (VulnerabilityFeed, True), (PackagesFeed,", "be longer than the expected feed sync duration. :param args:", "of {}\".format( i + 1, feed_config_check_retries ) ) client =", "feed_config_check_backoff = 5 # service funcs (must be here) def", "distro_mappings = dbsession.query(DistroMapping).all() for i in initial_mappings: if not [x", "logger from anchore_engine.configuration import localconfig from anchore_engine.clients.services import simplequeue, internal_client_for", "IntegrityError # anchore modules import anchore_engine.clients.services.common import anchore_engine.subsys.servicestatus import anchore_engine.subsys.metrics", "anchore_engine.clients.services.common.check_services_ready( [\"simplequeue\"] ) if not all_ready: logger.info(\"simplequeue service not yet", "feed_sync_msg = {\"task_type\": \"feed_sync\", \"enabled\": True} # These are user-configurable", "from anchore_engine.services.policy_engine.engine.tasks import FeedsUpdateTask if \"get_selected_feeds_to_sync\" not in locals(): from", "fn in preflight_check_functions: try: fn() except Exception as e: logger.exception(", "kwargs: :return: \"\"\" system_user = _system_creds() logger.info(\"init args: {}\".format(kwargs)) cycle_time", "exec simplequeue.run_target_with_queue_ttl( None, queue=feed_sync_queuename, target=do_feed_sync, max_wait_seconds=30, visibility_timeout=180, retries=FEED_SYNC_RETRIES, backoff_time=FEED_SYNC_RETRY_BACKOFF, )", ") if not all_ready: logger.info(\"simplequeue service not yet ready, will", "# anchore modules import anchore_engine.clients.services.common import anchore_engine.subsys.servicestatus import anchore_engine.subsys.metrics from", "client config check again\".format( sleep_time ) ) time.sleep(sleep_time) sleep_time +=", "\"Exceeded retries for feeds client config check. Failing check\" )", ") client = get_client() client = None logger.info(\"Feeds client credentials", "- skipping feed sync\") time.sleep(cycle_time) return True @retrying.retry( stop_max_attempt_number=FEED_SYNC_RETRIES, wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF", "not system_user_auth: config = localconfig.get_config() system_user_auth = config[\"system_user_auth\"] return system_user_auth", "to see if there is a task for a feed", "than the expected feed sync duration. :param args: :param kwargs:", "{} seconds to try feeds client config check again\".format( sleep_time", "FEED_SYNC_RETRIES = int(os.getenv(\"ANCHORE_FEED_SYNC_CHECK_RETRIES\", 5)) except ValueError: logger.exception( \"Error parsing env", "ValueError: logger.exception( \"Error parsing env value ANCHORE_FEED_SYNC_CHECK_RETRIES into int, using", "os.getenv(\"ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF\", 5) ) except ValueError: logger.exception( \"Error parsing env value", "False), (VulnDBFeed, False), (VulnerabilityFeed, True), (PackagesFeed, False), (GithubFeed, False), (NvdFeed,", "sync task marked as disabled, so skipping\") except ValueError as", "anchore_engine.service import ApiService, LifeCycleStages from anchore_engine.services.policy_engine.engine.feeds.feeds import ( VulnerabilityFeed, NvdV2Feed,", "credentials. Attempt {} of {}\".format( i + 1, feed_config_check_retries )", "credentials ok\") return True except Exception as e: logger.warn( \"Could", "+ str(err)) if handler_success: anchore_engine.subsys.metrics.summary_observe( \"anchore_monitor_runtime_seconds\", time.time() - timer, function=\"do_feed_sync\",", "to a message from the queue :param args: :param kwargs:", "feed_sync_enabled = config.get(\"feeds\", {}).get(\"sync_enabled\", True) if feed_sync_enabled: logger.info(\"Feed sync task", "[init_db_content, init_feed_registry] for fn in preflight_check_functions: try: fn() except Exception", "in config - skipping feed sync\") time.sleep(cycle_time) return True @retrying.retry(", "= {\"task_type\": \"feed_sync\", \"enabled\": True} # These are user-configurable but", "on task exec simplequeue.run_target_with_queue_ttl( None, queue=feed_sync_queuename, target=do_feed_sync, max_wait_seconds=30, visibility_timeout=180, retries=FEED_SYNC_RETRIES,", "retries=FEED_SYNC_RETRIES, backoff_time=FEED_SYNC_RETRY_BACKOFF, ) except Exception as err: logger.warn(\"failed to process", "kwargs[\"mythread\"][\"cycle_timer\"] while True: config = localconfig.get_config() feed_sync_enabled = config.get(\"feeds\", {}).get(\"sync_enabled\",", "ready\") else: try: # This has its own retry on", "process task this cycle: \" + str(err)) def handle_feed_sync_trigger(*args, **kwargs):", "if not q_client.is_inqueue(name=feed_sync_queuename, inobj=feed_sync_msg): try: q_client.enqueue(name=feed_sync_queuename, inobj=feed_sync_msg) except: logger.error(\"Could not", "parsing env value ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF into int, using default value of", "with any data necessary at startup. :return: \"\"\" return _init_distro_mappings()", "if last_ex: raise last_ex else: raise Exception( \"Exceeded retries for", "\"last_queued\": 0, \"last_return\": False, \"initialized\": False, }, } __lifecycle_handlers__ =", "system_user = _system_creds() logger.info(\"init args: {}\".format(kwargs)) cycle_time = kwargs[\"mythread\"][\"cycle_timer\"] while", "to initialize default distro mappings - exception: \" + str(err)", "retrying from sqlalchemy.exc import IntegrityError # anchore modules import anchore_engine.clients.services.common", "ValueError as e: logger.warn(\"Received msg of wrong type\") except Exception", "anchore_engine.clients.services.simplequeue import SimpleQueueClient from anchore_engine.service import ApiService, LifeCycleStages from anchore_engine.services.policy_engine.engine.feeds.feeds", "i in range(feed_config_check_retries): if i > 0: logger.info( \"Waiting for", "localconfig.get_config() feed_sync_enabled = config.get(\"feeds\", {}).get(\"sync_enabled\", True) if feed_sync_enabled: logger.info(\"Feed sync", "[x for x in distro_mappings if x.from_distro == i.from_distro]: logger.info(\"Adding", "logger.error(\"Caught escaped error in feed sync handler: {}\".format(e)) finally: logger.info(\"Feed", "is the class and bool if feed is a distro", "logger.exception( \"Error parsing env FEED_CLIENT_CHECK_BACKOFF value into int, using default", "handle_feed_sync(*args, **kwargs): \"\"\" Initiates a feed sync in the system", "DistroMapping(from_distro=\"rhel\", to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"ubuntu\", to_distro=\"ubuntu\", flavor=\"DEB\"), DistroMapping(from_distro=\"amzn\", to_distro=\"amzn\", flavor=\"RHEL\"), DistroMapping(from_distro=\"redhat\",", "DistroMapping(from_distro=\"redhat\", to_distro=\"rhel\", flavor=\"RHEL\"), ] # set up any data necessary", "handle_feed_sync_trigger(*args, **kwargs): \"\"\" Checks to see if there is a", "handler_success = True else: logger.warn(\"Feed sync task marked as disabled,", "tuple is the class and bool if feed is a", "queue and if not, adds one. Interval for firing this", "logger.info(\"Adding missing mapping: {}\".format(i)) dbsession.add(i) logger.info(\"Distro mapping initialization complete\") except", "feeds endpoint and/or config. Got exception: {}\".format( e ) )", "= None for i in range(feed_config_check_retries): if i > 0:", "(VulnDBFeed, False), (VulnerabilityFeed, True), (PackagesFeed, False), (GithubFeed, False), (NvdFeed, False),", "False, }, \"feed_sync\": { \"handler\": handle_feed_sync, \"taskType\": \"handle_feed_sync\", \"args\": [],", "init try: logger.info( \"Checking policy engine db initialization. Checking initial", "as err: if isinstance(err, IntegrityError): logger.warn(\"another process has already initialized,", "sleep_time = feed_config_check_backoff last_ex = None for i in range(feed_config_check_retries):", "i + 1, feed_config_check_retries ) ) client = get_client() client", "try: feed_config_check_retries = int(os.getenv(\"FEED_CLIENT_CHECK_RETRIES\", 3)) except ValueError: logger.exception( \"Error parsing", "try feeds client config check again\".format( sleep_time ) ) time.sleep(sleep_time)", "system init try: logger.info( \"Checking policy engine db initialization. Checking", "get_selected_feeds_to_sync(localconfig.get_config()) logger.info(\"Syncing configured feeds: {}\".format(feeds)) result = FeedsUpdateTask.run_feeds_update(json_obj=msg.get(\"data\")) if result", "args: :param kwargs: :return: \"\"\" system_user = _system_creds() logger.info(\"init args:", "Sync Trigger done, waiting for next cycle.\") except Exception as", "using default value of 5\" ) feed_config_check_backoff = 5 #", "None, queue=feed_sync_queuename, target=do_feed_sync, max_wait_seconds=30, visibility_timeout=180, retries=FEED_SYNC_RETRIES, backoff_time=FEED_SYNC_RETRY_BACKOFF, ) except Exception", "false in config - skipping feed sync trigger\" ) time.sleep(cycle_time)", "feed sync\") raise class PolicyEngineService(ApiService): __service_name__ = \"policy_engine\" __spec_dir__ =", "* 1000, wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000, ) def run_feed_sync(system_user): all_ready =", "def handle_feed_sync_trigger(*args, **kwargs): \"\"\" Checks to see if there is", "feed_config_check_backoff try: logger.info( \"Checking feeds client credentials. Attempt {} of", "using default value of 5\" ) FEED_SYNC_RETRY_BACKOFF = 5 try:", "complete\") else: logger.info(\"sync_enabled is set to false in config -", "max_wait_seconds=30, visibility_timeout=180, retries=FEED_SYNC_RETRIES, backoff_time=FEED_SYNC_RETRY_BACKOFF, ) except Exception as err: logger.warn(\"failed", "parsing env value FEED_CLIENT_CHECK_RETRIES into int, using default value of", "\"taskType\": \"handle_service_heartbeat\", \"args\": [__service_name__], \"cycle_timer\": 60, \"min_cycle_timer\": 60, \"max_cycle_timer\": 60,", "logger.info(\"Feed Sync task creator activated\") try: push_sync_task(system_user) logger.info(\"Feed Sync Trigger", "don't double-retry on task exec simplequeue.run_target_with_queue_ttl( None, queue=feed_sync_queuename, target=do_feed_sync, max_wait_seconds=30,", "ready\") else: # q_client = SimpleQueueClient(user=system_user[0], password=<PASSWORD>[1]) q_client = internal_client_for(SimpleQueueClient,", ") last_ex = e else: if last_ex: raise last_ex else:", "in feed sync handler: {}\".format(e)) finally: logger.info(\"Feed sync task executor", ") FEED_SYNC_RETRY_BACKOFF = 5 try: feed_config_check_retries = int(os.getenv(\"FEED_CLIENT_CHECK_RETRIES\", 3)) except", "Initialize the policy engine db with any data necessary at", "str(err) ) return True def init_db_content(): \"\"\" Initialize the policy", "from anchore_engine.services.policy_engine.engine.feeds.sync import ( get_selected_feeds_to_sync, ) handler_success = False timer", "anchore_engine.services.policy_engine.engine.feeds.feeds import ( VulnerabilityFeed, NvdV2Feed, PackagesFeed, VulnDBFeed, GithubFeed, feed_registry, NvdFeed,", "env value ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF into int, using default value of 5\"", "5\" ) FEED_SYNC_RETRY_BACKOFF = 5 try: feed_config_check_retries = int(os.getenv(\"FEED_CLIENT_CHECK_RETRIES\", 3))", "def _init_distro_mappings(): from anchore_engine.db import session_scope, DistroMapping initial_mappings = [", "handler: {}\".format(e)) finally: logger.info(\"Feed sync task executor complete\") else: logger.info(\"sync_enabled", "def _check_feed_client_credentials(): from anchore_engine.services.policy_engine.engine.feeds.client import get_client sleep_time = feed_config_check_backoff last_ex", "True @retrying.retry( stop_max_attempt_number=FEED_SYNC_RETRIES, wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000, wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000, )", "except Exception as e: logger.error( \"Error caught in feed sync", "finally: logger.info(\"Feed Sync task creator complete\") else: logger.info( \"sync_enabled is", "int, using default value of 5\" ) FEED_SYNC_RETRY_BACKOFF = 5", "feed sync in the system in response to a message", "last_ex = e else: if last_ex: raise last_ex else: raise", "expected feed sync duration. :param args: :param kwargs: :return: \"\"\"", "data necessary at system init try: logger.info( \"Checking policy engine", "def handle_feed_sync(*args, **kwargs): \"\"\" Initiates a feed sync in the", "try: q_client.enqueue(name=feed_sync_queuename, inobj=feed_sync_msg) except: logger.error(\"Could not enqueue message for a", "except ValueError as e: logger.warn(\"Received msg of wrong type\") except", "if i > 0: logger.info( \"Waiting for {} seconds to", ":return: \"\"\" preflight_check_functions = [init_db_content, init_feed_registry] for fn in preflight_check_functions:", "default distro mappings - exception: \" + str(err) ) return", "\"swagger\") __monitors__ = { \"service_heartbeat\": { \"handler\": anchore_engine.subsys.servicestatus.handle_service_heartbeat, \"taskType\": \"handle_service_heartbeat\",", "sys.exit(1) def _init_distro_mappings(): from anchore_engine.db import session_scope, DistroMapping initial_mappings =", "\"Error parsing env value ANCHORE_FEED_SYNC_CHECK_RETRIES into int, using default value", "= 5 try: FEED_SYNC_RETRY_BACKOFF = int( os.getenv(\"ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF\", 5) ) except", "flavor=\"BUSYB\"), DistroMapping(from_distro=\"centos\", to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"debian\", to_distro=\"debian\", flavor=\"DEB\"), DistroMapping(from_distro=\"fedora\", to_distro=\"rhel\", flavor=\"RHEL\"),", "config = localconfig.get_config() feed_sync_enabled = config.get(\"feeds\", {}).get(\"sync_enabled\", True) if feed_sync_enabled:", "else: # q_client = SimpleQueueClient(user=system_user[0], password=<PASSWORD>[1]) q_client = internal_client_for(SimpleQueueClient, userId=None)", "mappings\" ) with session_scope() as dbsession: distro_mappings = dbsession.query(DistroMapping).all() for", "with session_scope() as dbsession: distro_mappings = dbsession.query(DistroMapping).all() for i in", "init_db_content(): \"\"\" Initialize the policy engine db with any data", ") except ValueError: logger.exception( \"Error parsing env value ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF into", "False), (VulnerabilityFeed, True), (PackagesFeed, False), (GithubFeed, False), (NvdFeed, False), ]:", "False), (NvdFeed, False), ]: logger.info(\"Registering feed handler {}\".format(cls_tuple[0].__feed_name__)) feed_registry.register(cls_tuple[0], is_vulnerability_feed=cls_tuple[1])", "we don't double-retry on task exec simplequeue.run_target_with_queue_ttl( None, queue=feed_sync_queuename, target=do_feed_sync,", "\"unable to initialize default distro mappings - exception: \" +", "q_client.enqueue(name=feed_sync_queuename, inobj=feed_sync_msg) except: logger.error(\"Could not enqueue message for a feed", "True: config = localconfig.get_config() feed_sync_enabled = config.get(\"feeds\", {}).get(\"sync_enabled\", True) if", "status=\"success\", ) else: anchore_engine.subsys.metrics.summary_observe( \"anchore_monitor_runtime_seconds\", time.time() - timer, function=\"do_feed_sync\", status=\"fail\",", "args: {}\".format(kwargs)) cycle_time = kwargs[\"mythread\"][\"cycle_timer\"] while True: config = localconfig.get_config()", "msg of wrong type\") except Exception as err: logger.warn(\"failure in", "the queue :param args: :param kwargs: :return: \"\"\" system_user =", "for next cycle.\") except Exception as e: logger.error( \"Error caught", "* 1000, ) def push_sync_task(system_user): all_ready = anchore_engine.clients.services.common.check_services_ready( [\"simplequeue\"] )", "= 3 try: feed_config_check_backoff = int(os.getenv(\"FEED_CLIENT_CHECK_BACKOFF\", 5)) except ValueError: logger.exception(", "global system_user_auth if not system_user_auth: config = localconfig.get_config() system_user_auth =", "ANCHORE_FEED_SYNC_CHECK_RETRIES into int, using default value of 5\" ) FEED_SYNC_RETRIES", "target=do_feed_sync, max_wait_seconds=30, visibility_timeout=180, retries=FEED_SYNC_RETRIES, backoff_time=FEED_SYNC_RETRY_BACKOFF, ) except Exception as err:", "Sync task creator complete\") else: logger.info( \"sync_enabled is set to", "flavor=\"RHEL\"), DistroMapping(from_distro=\"rhel\", to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"ubuntu\", to_distro=\"ubuntu\", flavor=\"DEB\"), DistroMapping(from_distro=\"amzn\", to_distro=\"amzn\", flavor=\"RHEL\"),", "3 try: feed_config_check_backoff = int(os.getenv(\"FEED_CLIENT_CHECK_BACKOFF\", 5)) except ValueError: logger.exception( \"Error", "\"handler\": anchore_engine.subsys.servicestatus.handle_service_heartbeat, \"taskType\": \"handle_service_heartbeat\", \"args\": [__service_name__], \"cycle_timer\": 60, \"min_cycle_timer\": 60,", "False, }, } __lifecycle_handlers__ = { LifeCycleStages.pre_register: [ (process_preflight, None),", "raise last_ex else: raise Exception( \"Exceeded retries for feeds client", "distro mappings\" ) with session_scope() as dbsession: distro_mappings = dbsession.query(DistroMapping).all()", "logger.exception( \"Error parsing env value ANCHORE_FEED_SYNC_CHECK_RETRIES into int, using default", "localconfig.get_config() feed_sync_enabled = config.get(\"feeds\", {}).get(\"sync_enabled\", True) if feed_sync_enabled: logger.info(\"Feed Sync", "any throw uncaught exceptions or return False return value :return:", "= e else: if last_ex: raise last_ex else: raise Exception(", "err: logger.warn(\"failed to process task this cycle: \" + str(err))", "complete\") else: logger.info( \"sync_enabled is set to false in config", "logger.warn(\"another process has already initialized, continuing\") else: raise Exception( \"unable", "0, \"last_return\": False, \"initialized\": False, }, \"feed_sync\": { \"handler\": handle_feed_sync,", "logger.info(\"simplequeue service not yet ready, will retry\") raise Exception(\"Simplequeue service", "if x.from_distro == i.from_distro]: logger.info(\"Adding missing mapping: {}\".format(i)) dbsession.add(i) logger.info(\"Distro", "# enable_bootstrap_logging() from anchore_engine.utils import timer feed_sync_queuename = \"feed_sync_tasks\" system_user_auth", "{}\".format(e)) finally: logger.info(\"Feed sync task executor complete\") else: logger.info(\"sync_enabled is", "config.get(\"feeds\", {}).get(\"sync_enabled\", True) if feed_sync_enabled: logger.info(\"Feed Sync task creator activated\")", "if not [x for x in distro_mappings if x.from_distro ==", "anchore modules import anchore_engine.clients.services.common import anchore_engine.subsys.servicestatus import anchore_engine.subsys.metrics from anchore_engine.subsys", "feed sync\") time.sleep(cycle_time) return True @retrying.retry( stop_max_attempt_number=FEED_SYNC_RETRIES, wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000,", "be here) def _check_feed_client_credentials(): from anchore_engine.services.policy_engine.engine.feeds.client import get_client sleep_time =", "handler_success = False timer = time.time() logger.info(\"FIRING: feed syncer\") try:", "__monitors__ = { \"service_heartbeat\": { \"handler\": anchore_engine.subsys.servicestatus.handle_service_heartbeat, \"taskType\": \"handle_service_heartbeat\", \"args\":", "config check. Failing check\" ) def _system_creds(): global system_user_auth if", "pkg_resources import os import retrying from sqlalchemy.exc import IntegrityError #", "system_user_auth if not system_user_auth: config = localconfig.get_config() system_user_auth = config[\"system_user_auth\"]", "checks failed with error: {}. Aborting service startup\".format( e )", "return value :return: \"\"\" preflight_check_functions = [init_db_content, init_feed_registry] for fn", "feed is a distro vulnerability feed or not for cls_tuple", "in response to a message from the queue :param args:", "except Exception as err: logger.warn(\"failure in feed sync handler -", "sync in the system in response to a message from", "logger.info(\"Feed Sync task creator complete\") else: logger.info( \"sync_enabled is set", "sleep_time += feed_config_check_backoff try: logger.info( \"Checking feeds client credentials. Attempt", "queue fetch, so wrap with catch block to ensure we", "to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"ubuntu\", to_distro=\"ubuntu\", flavor=\"DEB\"), DistroMapping(from_distro=\"amzn\", to_distro=\"amzn\", flavor=\"RHEL\"), DistroMapping(from_distro=\"redhat\", to_distro=\"rhel\",", "logger.info(\"sync_enabled is set to false in config - skipping feed", "retries. Will wait for next cycle\" ) finally: logger.info(\"Feed Sync", "False, \"initialized\": False, }, } __lifecycle_handlers__ = { LifeCycleStages.pre_register: [", "raise Exception(\"Simplequeue service not yet ready\") else: # q_client =", ") handler_success = False timer = time.time() logger.info(\"FIRING: feed syncer\")", "default value of 3\" ) feed_config_check_retries = 3 try: feed_config_check_backoff", "to_distro=\"ubuntu\", flavor=\"DEB\"), DistroMapping(from_distro=\"amzn\", to_distro=\"amzn\", flavor=\"RHEL\"), DistroMapping(from_distro=\"redhat\", to_distro=\"rhel\", flavor=\"RHEL\"), ] #", "(NvdFeed, False), ]: logger.info(\"Registering feed handler {}\".format(cls_tuple[0].__feed_name__)) feed_registry.register(cls_tuple[0], is_vulnerability_feed=cls_tuple[1]) def", "DistroMapping initial_mappings = [ DistroMapping(from_distro=\"alpine\", to_distro=\"alpine\", flavor=\"ALPINE\"), DistroMapping(from_distro=\"busybox\", to_distro=\"busybox\", flavor=\"BUSYB\"),", "locals(): from anchore_engine.services.policy_engine.engine.feeds.sync import ( get_selected_feeds_to_sync, ) handler_success = False", "necessary at startup. :return: \"\"\" return _init_distro_mappings() def init_feed_registry(): #", "has its own retry on the queue fetch, so wrap", "int( os.getenv(\"ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF\", 5) ) except ValueError: logger.exception( \"Error parsing env", "Trigger done, waiting for next cycle.\") except Exception as e:", "next cycle.\") except Exception as e: logger.error( \"Error caught in", "in config - skipping feed sync trigger\" ) time.sleep(cycle_time) return", "service funcs (must be here) def _check_feed_client_credentials(): from anchore_engine.services.policy_engine.engine.feeds.client import", "5 try: FEED_SYNC_RETRY_BACKOFF = int( os.getenv(\"ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF\", 5) ) except ValueError:", "a distro vulnerability feed or not for cls_tuple in [", "fetch, so wrap with catch block to ensure we don't", "adds one. Interval for firing this should be longer than", "\"policy_engine\" __spec_dir__ = pkg_resources.resource_filename(__name__, \"swagger\") __monitors__ = { \"service_heartbeat\": {", "except Exception as e: logger.error(\"Caught escaped error in feed sync", "\"Error caught in feed sync trigger handler after all retries.", "last_ex: raise last_ex else: raise Exception( \"Exceeded retries for feeds", "try: FEED_SYNC_RETRY_BACKOFF = int( os.getenv(\"ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF\", 5) ) except ValueError: logger.exception(", "= dbsession.query(DistroMapping).all() for i in initial_mappings: if not [x for", "FEED_CLIENT_CHECK_BACKOFF value into int, using default value of 5\" )", "exception: \" + str(err) ) return True def init_db_content(): \"\"\"", "True else: logger.warn(\"Feed sync task marked as disabled, so skipping\")", "creator complete\") else: logger.info( \"sync_enabled is set to false in", "try: feed_config_check_backoff = int(os.getenv(\"FEED_CLIENT_CHECK_BACKOFF\", 5)) except ValueError: logger.exception( \"Error parsing", "feed_sync_enabled: logger.info(\"Feed Sync task creator activated\") try: push_sync_task(system_user) logger.info(\"Feed Sync", "into int, using default value of 3\" ) feed_config_check_retries =", "distro mappings - exception: \" + str(err) ) return True", "anchore_engine.services.policy_engine.engine.feeds.client import get_client sleep_time = feed_config_check_backoff last_ex = None for", "in distro_mappings if x.from_distro == i.from_distro]: logger.info(\"Adding missing mapping: {}\".format(i))", "will retry\") raise Exception(\"Simplequeue service not yet ready\") else: #", "check\" ) def _system_creds(): global system_user_auth if not system_user_auth: config", "as e: logger.exception( \"Preflight checks failed with error: {}. Aborting", "(must be here) def _check_feed_client_credentials(): from anchore_engine.services.policy_engine.engine.feeds.client import get_client sleep_time", "flavor=\"DEB\"), DistroMapping(from_distro=\"amzn\", to_distro=\"amzn\", flavor=\"RHEL\"), DistroMapping(from_distro=\"redhat\", to_distro=\"rhel\", flavor=\"RHEL\"), ] # set", "testing purposes try: FEED_SYNC_RETRIES = int(os.getenv(\"ANCHORE_FEED_SYNC_CHECK_RETRIES\", 5)) except ValueError: logger.exception(", "from anchore_engine.db import session_scope, DistroMapping initial_mappings = [ DistroMapping(from_distro=\"alpine\", to_distro=\"alpine\",", "\"handle_service_heartbeat\", \"args\": [__service_name__], \"cycle_timer\": 60, \"min_cycle_timer\": 60, \"max_cycle_timer\": 60, \"last_queued\":", "DistroMapping(from_distro=\"amzn\", to_distro=\"amzn\", flavor=\"RHEL\"), DistroMapping(from_distro=\"redhat\", to_distro=\"rhel\", flavor=\"RHEL\"), ] # set up", "= time.time() logger.info(\"FIRING: feed syncer\") try: feeds = get_selected_feeds_to_sync(localconfig.get_config()) logger.info(\"Syncing", "flavor=\"RHEL\"), ] # set up any data necessary at system", "queue=feed_sync_queuename, target=do_feed_sync, max_wait_seconds=30, visibility_timeout=180, retries=FEED_SYNC_RETRIES, backoff_time=FEED_SYNC_RETRY_BACKOFF, ) except Exception as", "cycle_time = kwargs[\"mythread\"][\"cycle_timer\"] while True: config = localconfig.get_config() feed_sync_enabled =", "system_user_auth: config = localconfig.get_config() system_user_auth = config[\"system_user_auth\"] return system_user_auth def", "\"Error parsing env value FEED_CLIENT_CHECK_RETRIES into int, using default value", "5)) except ValueError: logger.exception( \"Error parsing env FEED_CLIENT_CHECK_BACKOFF value into", "the tuple is the class and bool if feed is", "def _system_creds(): global system_user_auth if not system_user_auth: config = localconfig.get_config()", "feed sync handler - exception: \" + str(err)) if handler_success:", "timer, function=\"do_feed_sync\", status=\"fail\", ) def handle_feed_sync(*args, **kwargs): \"\"\" Initiates a", "feed_config_check_retries ) ) client = get_client() client = None logger.info(\"Feeds", "config check again\".format( sleep_time ) ) time.sleep(sleep_time) sleep_time += feed_config_check_backoff", "\"args\": [], \"cycle_timer\": 3600, \"min_cycle_timer\": 1800, \"max_cycle_timer\": 100000, \"last_queued\": 0,", "process has already initialized, continuing\") else: raise Exception( \"unable to", "True} # These are user-configurable but mostly for debugging and", "after all retries. Will wait for next cycle\" ) finally:", "sys import pkg_resources import os import retrying from sqlalchemy.exc import", "the queue fetch, so wrap with catch block to ensure", "of 3\" ) feed_config_check_retries = 3 try: feed_config_check_backoff = int(os.getenv(\"FEED_CLIENT_CHECK_BACKOFF\",", "env FEED_CLIENT_CHECK_BACKOFF value into int, using default value of 5\"", "handler_success: anchore_engine.subsys.metrics.summary_observe( \"anchore_monitor_runtime_seconds\", time.time() - timer, function=\"do_feed_sync\", status=\"success\", ) else:", ") ) time.sleep(sleep_time) sleep_time += feed_config_check_backoff try: logger.info( \"Checking feeds", "except ValueError: logger.exception( \"Error parsing env value ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF into int,", "try: logger.info( \"Checking policy engine db initialization. Checking initial set", "Exception(\"Simplequeue service not yet ready\") else: try: # This has", ") FEED_SYNC_RETRIES = 5 try: FEED_SYNC_RETRY_BACKOFF = int( os.getenv(\"ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF\", 5)", "feed_sync_enabled = config.get(\"feeds\", {}).get(\"sync_enabled\", True) if feed_sync_enabled: logger.info(\"Feed Sync task", ") time.sleep(sleep_time) sleep_time += feed_config_check_backoff try: logger.info( \"Checking feeds client", "handler after all retries. Will wait for next cycle\" )", "the class and bool if feed is a distro vulnerability", "mappings - exception: \" + str(err) ) return True def", "distro_mappings if x.from_distro == i.from_distro]: logger.info(\"Adding missing mapping: {}\".format(i)) dbsession.add(i)", "anchore_engine.subsys import logger from anchore_engine.configuration import localconfig from anchore_engine.clients.services import", "value :return: \"\"\" preflight_check_functions = [init_db_content, init_feed_registry] for fn in", "\"\"\" Checks to see if there is a task for", "yet ready\") else: # q_client = SimpleQueueClient(user=system_user[0], password=<PASSWORD>[1]) q_client =", "feeds client config check. Failing check\" ) def _system_creds(): global", "handler {}\".format(cls_tuple[0].__feed_name__)) feed_registry.register(cls_tuple[0], is_vulnerability_feed=cls_tuple[1]) def do_feed_sync(msg): if \"FeedsUpdateTask\" not in", "def do_feed_sync(msg): if \"FeedsUpdateTask\" not in locals(): from anchore_engine.services.policy_engine.engine.tasks import", "get_client sleep_time = feed_config_check_backoff last_ex = None for i in", "a feed sync in the system in response to a", "= config.get(\"feeds\", {}).get(\"sync_enabled\", True) if feed_sync_enabled: logger.info(\"Feed sync task executor", "try: push_sync_task(system_user) logger.info(\"Feed Sync Trigger done, waiting for next cycle.\")", "not enqueue message for a feed sync\") raise class PolicyEngineService(ApiService):", "DistroMapping(from_distro=\"centos\", to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"debian\", to_distro=\"debian\", flavor=\"DEB\"), DistroMapping(from_distro=\"fedora\", to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"ol\",", "wrap with catch block to ensure we don't double-retry on", "_init_distro_mappings(): from anchore_engine.db import session_scope, DistroMapping initial_mappings = [ DistroMapping(from_distro=\"alpine\",", "sync in the queue and if not, adds one. Interval", "of distro mappings\" ) with session_scope() as dbsession: distro_mappings =", "for debugging and testing purposes try: FEED_SYNC_RETRIES = int(os.getenv(\"ANCHORE_FEED_SYNC_CHECK_RETRIES\", 5))", "engine db with any data necessary at startup. :return: \"\"\"", "task executor complete\") else: logger.info(\"sync_enabled is set to false in", "else: logger.info(\"sync_enabled is set to false in config - skipping", "3\" ) feed_config_check_retries = 3 try: feed_config_check_backoff = int(os.getenv(\"FEED_CLIENT_CHECK_BACKOFF\", 5))", "= int(os.getenv(\"FEED_CLIENT_CHECK_RETRIES\", 3)) except ValueError: logger.exception( \"Error parsing env value", "FEED_SYNC_RETRY_BACKOFF = int( os.getenv(\"ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF\", 5) ) except ValueError: logger.exception( \"Error", "from anchore_engine.configuration import localconfig from anchore_engine.clients.services import simplequeue, internal_client_for from", "_check_feed_client_credentials(): from anchore_engine.services.policy_engine.engine.feeds.client import get_client sleep_time = feed_config_check_backoff last_ex =", "session_scope, DistroMapping initial_mappings = [ DistroMapping(from_distro=\"alpine\", to_distro=\"alpine\", flavor=\"ALPINE\"), DistroMapping(from_distro=\"busybox\", to_distro=\"busybox\",", "for x in distro_mappings if x.from_distro == i.from_distro]: logger.info(\"Adding missing", "not None: handler_success = True else: logger.warn(\"Feed sync task marked", "0, \"last_return\": False, \"initialized\": False, }, \"feed_sync_checker\": { \"handler\": handle_feed_sync_trigger,", "= kwargs[\"mythread\"][\"cycle_timer\"] while True: config = localconfig.get_config() feed_sync_enabled = config.get(\"feeds\",", "client config check. Failing check\" ) def _system_creds(): global system_user_auth", "do_feed_sync(msg): if \"FeedsUpdateTask\" not in locals(): from anchore_engine.services.policy_engine.engine.tasks import FeedsUpdateTask", "at system init try: logger.info( \"Checking policy engine db initialization.", "fn() except Exception as e: logger.exception( \"Preflight checks failed with", "not in locals(): from anchore_engine.services.policy_engine.engine.feeds.sync import ( get_selected_feeds_to_sync, ) handler_success", "NvdFeed, ) # from anchore_engine.subsys.logger import enable_bootstrap_logging # enable_bootstrap_logging() from", "False), (GithubFeed, False), (NvdFeed, False), ]: logger.info(\"Registering feed handler {}\".format(cls_tuple[0].__feed_name__))", "try: feeds = get_selected_feeds_to_sync(localconfig.get_config()) logger.info(\"Syncing configured feeds: {}\".format(feeds)) result =", "for i in initial_mappings: if not [x for x in", "purposes try: FEED_SYNC_RETRIES = int(os.getenv(\"ANCHORE_FEED_SYNC_CHECK_RETRIES\", 5)) except ValueError: logger.exception( \"Error", "and bool if feed is a distro vulnerability feed or", "seconds to try feeds client config check again\".format( sleep_time )", "not yet ready, will retry\") raise Exception(\"Simplequeue service not yet", "config - skipping feed sync trigger\" ) time.sleep(cycle_time) return True", "dbsession.add(i) logger.info(\"Distro mapping initialization complete\") except Exception as err: if", "skipping feed sync trigger\" ) time.sleep(cycle_time) return True @retrying.retry( stop_max_attempt_number=FEED_SYNC_RETRIES,", "logger.error(\"Could not enqueue message for a feed sync\") raise class", "exception: \" + str(err)) if handler_success: anchore_engine.subsys.metrics.summary_observe( \"anchore_monitor_runtime_seconds\", time.time() -", "logger.info(\"Distro mapping initialization complete\") except Exception as err: if isinstance(err,", "\"initialized\": False, }, \"feed_sync\": { \"handler\": handle_feed_sync, \"taskType\": \"handle_feed_sync\", \"args\":", "sync task executor activated\") try: run_feed_sync(system_user) except Exception as e:", "of 5\" ) FEED_SYNC_RETRY_BACKOFF = 5 try: feed_config_check_retries = int(os.getenv(\"FEED_CLIENT_CHECK_RETRIES\",", "try: FEED_SYNC_RETRIES = int(os.getenv(\"ANCHORE_FEED_SYNC_CHECK_RETRIES\", 5)) except ValueError: logger.exception( \"Error parsing", "wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000, wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000, ) def run_feed_sync(system_user): all_ready", "run_feed_sync(system_user): all_ready = anchore_engine.clients.services.common.check_services_ready( [\"simplequeue\"] ) if not all_ready: logger.info(\"simplequeue", "[__service_name__], \"cycle_timer\": 60, \"min_cycle_timer\": 60, \"max_cycle_timer\": 60, \"last_queued\": 0, \"last_return\":", "anchore_engine.subsys.metrics from anchore_engine.subsys import logger from anchore_engine.configuration import localconfig from", "check again\".format( sleep_time ) ) time.sleep(sleep_time) sleep_time += feed_config_check_backoff try:", "user-configurable but mostly for debugging and testing purposes try: FEED_SYNC_RETRIES", "import enable_bootstrap_logging # enable_bootstrap_logging() from anchore_engine.utils import timer feed_sync_queuename =", "Exception as err: if isinstance(err, IntegrityError): logger.warn(\"another process has already", "flavor=\"RHEL\"), DistroMapping(from_distro=\"ol\", to_distro=\"ol\", flavor=\"RHEL\"), DistroMapping(from_distro=\"rhel\", to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"ubuntu\", to_distro=\"ubuntu\", flavor=\"DEB\"),", "if any throw uncaught exceptions or return False return value", "to_distro=\"debian\", flavor=\"DEB\"), DistroMapping(from_distro=\"fedora\", to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"ol\", to_distro=\"ol\", flavor=\"RHEL\"), DistroMapping(from_distro=\"rhel\", to_distro=\"rhel\",", "from anchore_engine.clients.services.simplequeue import SimpleQueueClient from anchore_engine.service import ApiService, LifeCycleStages from", "x.from_distro == i.from_distro]: logger.info(\"Adding missing mapping: {}\".format(i)) dbsession.add(i) logger.info(\"Distro mapping", "executor complete\") else: logger.info(\"sync_enabled is set to false in config", "if handler_success: anchore_engine.subsys.metrics.summary_observe( \"anchore_monitor_runtime_seconds\", time.time() - timer, function=\"do_feed_sync\", status=\"success\", )", "if feed_sync_enabled: logger.info(\"Feed Sync task creator activated\") try: push_sync_task(system_user) logger.info(\"Feed", "_init_distro_mappings() def init_feed_registry(): # Register feeds, the tuple is the", "(NvdV2Feed, False), (VulnDBFeed, False), (VulnerabilityFeed, True), (PackagesFeed, False), (GithubFeed, False),", "\"Error parsing env value ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF into int, using default value", "logger.info( \"Waiting for {} seconds to try feeds client config", "\"Could not verify feeds endpoint and/or config. Got exception: {}\".format(", "- exception: \" + str(err) ) return True def init_db_content():", "\" + str(err)) def handle_feed_sync_trigger(*args, **kwargs): \"\"\" Checks to see", "3)) except ValueError: logger.exception( \"Error parsing env value FEED_CLIENT_CHECK_RETRIES into", "Checks to see if there is a task for a", "queue :param args: :param kwargs: :return: \"\"\" system_user = _system_creds()", "Aborting service startup\".format( e ) ) sys.exit(1) def _init_distro_mappings(): from", "logger.warn(\"failure in feed sync handler - exception: \" + str(err))", "wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000, ) def run_feed_sync(system_user): all_ready = anchore_engine.clients.services.common.check_services_ready( [\"simplequeue\"]", "enqueue message for a feed sync\") raise class PolicyEngineService(ApiService): __service_name__", "logger.info(\"Feeds client credentials ok\") return True except Exception as e:", "wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000, wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000, ) def push_sync_task(system_user): all_ready", "vulnerability feed or not for cls_tuple in [ (NvdV2Feed, False),", "\"taskType\": \"handle_feed_sync\", \"args\": [], \"cycle_timer\": 3600, \"min_cycle_timer\": 1800, \"max_cycle_timer\": 100000,", "not [x for x in distro_mappings if x.from_distro == i.from_distro]:", "wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000, ) def push_sync_task(system_user): all_ready = anchore_engine.clients.services.common.check_services_ready( [\"simplequeue\"]", "\"handle_feed_sync\", \"args\": [], \"cycle_timer\": 3600, \"min_cycle_timer\": 1800, \"max_cycle_timer\": 100000, \"last_queued\":", "while True: config = localconfig.get_config() feed_sync_enabled = config.get(\"feeds\", {}).get(\"sync_enabled\", True)", "_system_creds(): global system_user_auth if not system_user_auth: config = localconfig.get_config() system_user_auth", "ValueError: logger.exception( \"Error parsing env value ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF into int, using", "initialization. Checking initial set of distro mappings\" ) with session_scope()", "see if there is a task for a feed sync", "__spec_dir__ = pkg_resources.resource_filename(__name__, \"swagger\") __monitors__ = { \"service_heartbeat\": { \"handler\":", "to try feeds client config check again\".format( sleep_time ) )", "5) ) except ValueError: logger.exception( \"Error parsing env value ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF", "logger.error( \"Error caught in feed sync trigger handler after all", "in locals(): from anchore_engine.services.policy_engine.engine.tasks import FeedsUpdateTask if \"get_selected_feeds_to_sync\" not in", ":return: \"\"\" system_user = _system_creds() logger.info(\"init args: {}\".format(kwargs)) cycle_time =", "= int( os.getenv(\"ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF\", 5) ) except ValueError: logger.exception( \"Error parsing", "for a feed sync\") raise class PolicyEngineService(ApiService): __service_name__ = \"policy_engine\"", "feed sync handler: {}\".format(e)) finally: logger.info(\"Feed sync task executor complete\")", "False), ]: logger.info(\"Registering feed handler {}\".format(cls_tuple[0].__feed_name__)) feed_registry.register(cls_tuple[0], is_vulnerability_feed=cls_tuple[1]) def do_feed_sync(msg):", "backoff_time=FEED_SYNC_RETRY_BACKOFF, ) except Exception as err: logger.warn(\"failed to process task", "initial_mappings = [ DistroMapping(from_distro=\"alpine\", to_distro=\"alpine\", flavor=\"ALPINE\"), DistroMapping(from_distro=\"busybox\", to_distro=\"busybox\", flavor=\"BUSYB\"), DistroMapping(from_distro=\"centos\",", "as disabled, so skipping\") except ValueError as e: logger.warn(\"Received msg", "localconfig.get_config() system_user_auth = config[\"system_user_auth\"] return system_user_auth def process_preflight(): \"\"\" Execute", "done, waiting for next cycle.\") except Exception as e: logger.error(", "import timer feed_sync_queuename = \"feed_sync_tasks\" system_user_auth = None feed_sync_msg =", "is_vulnerability_feed=cls_tuple[1]) def do_feed_sync(msg): if \"FeedsUpdateTask\" not in locals(): from anchore_engine.services.policy_engine.engine.tasks", "except ValueError: logger.exception( \"Error parsing env value ANCHORE_FEED_SYNC_CHECK_RETRIES into int,", "logger.info(\"Feed sync task executor complete\") else: logger.info(\"sync_enabled is set to", "def init_db_content(): \"\"\" Initialize the policy engine db with any", "time.time() logger.info(\"FIRING: feed syncer\") try: feeds = get_selected_feeds_to_sync(localconfig.get_config()) logger.info(\"Syncing configured", "cycle\" ) finally: logger.info(\"Feed Sync task creator complete\") else: logger.info(", "import ApiService, LifeCycleStages from anchore_engine.services.policy_engine.engine.feeds.feeds import ( VulnerabilityFeed, NvdV2Feed, PackagesFeed,", "logger.info(\"Registering feed handler {}\".format(cls_tuple[0].__feed_name__)) feed_registry.register(cls_tuple[0], is_vulnerability_feed=cls_tuple[1]) def do_feed_sync(msg): if \"FeedsUpdateTask\"", "== i.from_distro]: logger.info(\"Adding missing mapping: {}\".format(i)) dbsession.add(i) logger.info(\"Distro mapping initialization", "import ( get_selected_feeds_to_sync, ) handler_success = False timer = time.time()", "anchore_engine.clients.services.common import anchore_engine.subsys.servicestatus import anchore_engine.subsys.metrics from anchore_engine.subsys import logger from", "initialization complete\") except Exception as err: if isinstance(err, IntegrityError): logger.warn(\"another", "caught in feed sync trigger handler after all retries. Will", "has already initialized, continuing\") else: raise Exception( \"unable to initialize", "not for cls_tuple in [ (NvdV2Feed, False), (VulnDBFeed, False), (VulnerabilityFeed,", "e: logger.warn(\"Received msg of wrong type\") except Exception as err:", "task exec simplequeue.run_target_with_queue_ttl( None, queue=feed_sync_queuename, target=do_feed_sync, max_wait_seconds=30, visibility_timeout=180, retries=FEED_SYNC_RETRIES, backoff_time=FEED_SYNC_RETRY_BACKOFF,", "if not system_user_auth: config = localconfig.get_config() system_user_auth = config[\"system_user_auth\"] return", "flavor=\"RHEL\"), DistroMapping(from_distro=\"debian\", to_distro=\"debian\", flavor=\"DEB\"), DistroMapping(from_distro=\"fedora\", to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"ol\", to_distro=\"ol\", flavor=\"RHEL\"),", "own retry on the queue fetch, so wrap with catch", "PackagesFeed, VulnDBFeed, GithubFeed, feed_registry, NvdFeed, ) # from anchore_engine.subsys.logger import", "**kwargs): \"\"\" Checks to see if there is a task", "dbsession.query(DistroMapping).all() for i in initial_mappings: if not [x for x", "flavor=\"RHEL\"), DistroMapping(from_distro=\"ubuntu\", to_distro=\"ubuntu\", flavor=\"DEB\"), DistroMapping(from_distro=\"amzn\", to_distro=\"amzn\", flavor=\"RHEL\"), DistroMapping(from_distro=\"redhat\", to_distro=\"rhel\", flavor=\"RHEL\"),", "wrong type\") except Exception as err: logger.warn(\"failure in feed sync", "initialized, continuing\") else: raise Exception( \"unable to initialize default distro", "GithubFeed, feed_registry, NvdFeed, ) # from anchore_engine.subsys.logger import enable_bootstrap_logging #", "feeds: {}\".format(feeds)) result = FeedsUpdateTask.run_feeds_update(json_obj=msg.get(\"data\")) if result is not None:", "x in distro_mappings if x.from_distro == i.from_distro]: logger.info(\"Adding missing mapping:", "{}\".format(cls_tuple[0].__feed_name__)) feed_registry.register(cls_tuple[0], is_vulnerability_feed=cls_tuple[1]) def do_feed_sync(msg): if \"FeedsUpdateTask\" not in locals():", "None feed_sync_msg = {\"task_type\": \"feed_sync\", \"enabled\": True} # These are", "= feed_config_check_backoff last_ex = None for i in range(feed_config_check_retries): if", "- timer, function=\"do_feed_sync\", status=\"fail\", ) def handle_feed_sync(*args, **kwargs): \"\"\" Initiates", "] # set up any data necessary at system init", "cycle: \" + str(err)) def handle_feed_sync_trigger(*args, **kwargs): \"\"\" Checks to", "for a feed sync in the queue and if not,", "env value FEED_CLIENT_CHECK_RETRIES into int, using default value of 3\"", "syncer\") try: feeds = get_selected_feeds_to_sync(localconfig.get_config()) logger.info(\"Syncing configured feeds: {}\".format(feeds)) result", "**kwargs): \"\"\" Initiates a feed sync in the system in", "else: logger.warn(\"Feed sync task marked as disabled, so skipping\") except", "task marked as disabled, so skipping\") except ValueError as e:", "inobj=feed_sync_msg): try: q_client.enqueue(name=feed_sync_queuename, inobj=feed_sync_msg) except: logger.error(\"Could not enqueue message for", "ANCHORE_FEED_SYNC_CHECK_FAILURE_BACKOFF into int, using default value of 5\" ) FEED_SYNC_RETRY_BACKOFF", "stop_max_attempt_number=FEED_SYNC_RETRIES, wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000, wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000, ) def push_sync_task(system_user):", "the system in response to a message from the queue", "int(os.getenv(\"ANCHORE_FEED_SYNC_CHECK_RETRIES\", 5)) except ValueError: logger.exception( \"Error parsing env value ANCHORE_FEED_SYNC_CHECK_RETRIES", "to_distro=\"alpine\", flavor=\"ALPINE\"), DistroMapping(from_distro=\"busybox\", to_distro=\"busybox\", flavor=\"BUSYB\"), DistroMapping(from_distro=\"centos\", to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"debian\", to_distro=\"debian\",", "time.sleep(cycle_time) return True @retrying.retry( stop_max_attempt_number=FEED_SYNC_RETRIES, wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000, wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF *", "__service_name__ = \"policy_engine\" __spec_dir__ = pkg_resources.resource_filename(__name__, \"swagger\") __monitors__ = {", "set up any data necessary at system init try: logger.info(", "distro vulnerability feed or not for cls_tuple in [ (NvdV2Feed,", "import ( VulnerabilityFeed, NvdV2Feed, PackagesFeed, VulnDBFeed, GithubFeed, feed_registry, NvdFeed, )", "if result is not None: handler_success = True else: logger.warn(\"Feed", "DistroMapping(from_distro=\"debian\", to_distro=\"debian\", flavor=\"DEB\"), DistroMapping(from_distro=\"fedora\", to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"ol\", to_distro=\"ol\", flavor=\"RHEL\"), DistroMapping(from_distro=\"rhel\",", "or not for cls_tuple in [ (NvdV2Feed, False), (VulnDBFeed, False),", "error in feed sync handler: {}\".format(e)) finally: logger.info(\"Feed sync task", "creator activated\") try: push_sync_task(system_user) logger.info(\"Feed Sync Trigger done, waiting for", "{}\".format( i + 1, feed_config_check_retries ) ) client = get_client()", "\"handler\": handle_feed_sync_trigger, \"taskType\": \"handle_feed_sync_trigger\", \"args\": [], \"cycle_timer\": 600, \"min_cycle_timer\": 300,", "anchore_engine.db import session_scope, DistroMapping initial_mappings = [ DistroMapping(from_distro=\"alpine\", to_distro=\"alpine\", flavor=\"ALPINE\"),", "import sys import pkg_resources import os import retrying from sqlalchemy.exc", "\"Waiting for {} seconds to try feeds client config check", "from anchore_engine.utils import timer feed_sync_queuename = \"feed_sync_tasks\" system_user_auth = None", "{} of {}\".format( i + 1, feed_config_check_retries ) ) client", "These are user-configurable but mostly for debugging and testing purposes", "= \"policy_engine\" __spec_dir__ = pkg_resources.resource_filename(__name__, \"swagger\") __monitors__ = { \"service_heartbeat\":", "sync\") time.sleep(cycle_time) return True @retrying.retry( stop_max_attempt_number=FEED_SYNC_RETRIES, wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000, wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF", "of wrong type\") except Exception as err: logger.warn(\"failure in feed", "for feeds client config check. Failing check\" ) def _system_creds():", "not q_client.is_inqueue(name=feed_sync_queuename, inobj=feed_sync_msg): try: q_client.enqueue(name=feed_sync_queuename, inobj=feed_sync_msg) except: logger.error(\"Could not enqueue", "Exception as e: logger.warn( \"Could not verify feeds endpoint and/or", "feed_sync_enabled: logger.info(\"Feed sync task executor activated\") try: run_feed_sync(system_user) except Exception", "catch block to ensure we don't double-retry on task exec", "anchore_engine.subsys.metrics.summary_observe( \"anchore_monitor_runtime_seconds\", time.time() - timer, function=\"do_feed_sync\", status=\"fail\", ) def handle_feed_sync(*args,", "sync trigger handler after all retries. Will wait for next", "3600, \"min_cycle_timer\": 1800, \"max_cycle_timer\": 100000, \"last_queued\": 0, \"last_return\": False, \"initialized\":", "100000, \"last_queued\": 0, \"last_return\": False, \"initialized\": False, }, \"feed_sync\": {", "sync task executor complete\") else: logger.info(\"sync_enabled is set to false", "\"Checking policy engine db initialization. Checking initial set of distro", "- timer, function=\"do_feed_sync\", status=\"success\", ) else: anchore_engine.subsys.metrics.summary_observe( \"anchore_monitor_runtime_seconds\", time.time() -", "block to ensure we don't double-retry on task exec simplequeue.run_target_with_queue_ttl(", "data necessary at startup. :return: \"\"\" return _init_distro_mappings() def init_feed_registry():", "so wrap with catch block to ensure we don't double-retry", "and testing purposes try: FEED_SYNC_RETRIES = int(os.getenv(\"ANCHORE_FEED_SYNC_CHECK_RETRIES\", 5)) except ValueError:", "value FEED_CLIENT_CHECK_RETRIES into int, using default value of 3\" )", "parsing env value ANCHORE_FEED_SYNC_CHECK_RETRIES into int, using default value of", "aborting service startup if any throw uncaught exceptions or return", "{}\".format(feeds)) result = FeedsUpdateTask.run_feeds_update(json_obj=msg.get(\"data\")) if result is not None: handler_success", "= SimpleQueueClient(user=system_user[0], password=<PASSWORD>[1]) q_client = internal_client_for(SimpleQueueClient, userId=None) if not q_client.is_inqueue(name=feed_sync_queuename,", "handle_feed_sync, \"taskType\": \"handle_feed_sync\", \"args\": [], \"cycle_timer\": 3600, \"min_cycle_timer\": 1800, \"max_cycle_timer\":", "config[\"system_user_auth\"] return system_user_auth def process_preflight(): \"\"\" Execute the preflight functions,", "waiting for next cycle.\") except Exception as e: logger.error( \"Error", "task executor activated\") try: run_feed_sync(system_user) except Exception as e: logger.error(\"Caught", "import time import sys import pkg_resources import os import retrying", "e ) ) sys.exit(1) def _init_distro_mappings(): from anchore_engine.db import session_scope,", "finally: logger.info(\"Feed sync task executor complete\") else: logger.info(\"sync_enabled is set", "except Exception as err: logger.warn(\"failed to process task this cycle:", "simplequeue, internal_client_for from anchore_engine.clients.services.simplequeue import SimpleQueueClient from anchore_engine.service import ApiService,", "activated\") try: run_feed_sync(system_user) except Exception as e: logger.error(\"Caught escaped error", "feed_config_check_retries = 3 try: feed_config_check_backoff = int(os.getenv(\"FEED_CLIENT_CHECK_BACKOFF\", 5)) except ValueError:", "else: logger.info( \"sync_enabled is set to false in config -", "= \"feed_sync_tasks\" system_user_auth = None feed_sync_msg = {\"task_type\": \"feed_sync\", \"enabled\":", "{\"task_type\": \"feed_sync\", \"enabled\": True} # These are user-configurable but mostly", "e else: if last_ex: raise last_ex else: raise Exception( \"Exceeded", "False timer = time.time() logger.info(\"FIRING: feed syncer\") try: feeds =", "= get_selected_feeds_to_sync(localconfig.get_config()) logger.info(\"Syncing configured feeds: {}\".format(feeds)) result = FeedsUpdateTask.run_feeds_update(json_obj=msg.get(\"data\")) if", "set to false in config - skipping feed sync\") time.sleep(cycle_time)", "config = localconfig.get_config() system_user_auth = config[\"system_user_auth\"] return system_user_auth def process_preflight():", "to ensure we don't double-retry on task exec simplequeue.run_target_with_queue_ttl( None,", "\"feed_sync_tasks\" system_user_auth = None feed_sync_msg = {\"task_type\": \"feed_sync\", \"enabled\": True}", "value ANCHORE_FEED_SYNC_CHECK_RETRIES into int, using default value of 5\" )", "feed_config_check_backoff = int(os.getenv(\"FEED_CLIENT_CHECK_BACKOFF\", 5)) except ValueError: logger.exception( \"Error parsing env", "engine db initialization. Checking initial set of distro mappings\" )", "not yet ready\") else: try: # This has its own", "stop_max_attempt_number=FEED_SYNC_RETRIES, wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000, wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000, ) def run_feed_sync(system_user):", "\"Error parsing env FEED_CLIENT_CHECK_BACKOFF value into int, using default value", "def process_preflight(): \"\"\" Execute the preflight functions, aborting service startup", "= internal_client_for(SimpleQueueClient, userId=None) if not q_client.is_inqueue(name=feed_sync_queuename, inobj=feed_sync_msg): try: q_client.enqueue(name=feed_sync_queuename, inobj=feed_sync_msg)", "\"cycle_timer\": 60, \"min_cycle_timer\": 60, \"max_cycle_timer\": 60, \"last_queued\": 0, \"last_return\": False,", "try: fn() except Exception as e: logger.exception( \"Preflight checks failed", "True) if feed_sync_enabled: logger.info(\"Feed Sync task creator activated\") try: push_sync_task(system_user)", "client credentials ok\") return True except Exception as e: logger.warn(", "push_sync_task(system_user) logger.info(\"Feed Sync Trigger done, waiting for next cycle.\") except", "is not None: handler_success = True else: logger.warn(\"Feed sync task", "import get_client sleep_time = feed_config_check_backoff last_ex = None for i", "None: handler_success = True else: logger.warn(\"Feed sync task marked as", "password=<PASSWORD>[1]) q_client = internal_client_for(SimpleQueueClient, userId=None) if not q_client.is_inqueue(name=feed_sync_queuename, inobj=feed_sync_msg): try:", "\"handle_feed_sync_trigger\", \"args\": [], \"cycle_timer\": 600, \"min_cycle_timer\": 300, \"max_cycle_timer\": 100000, \"last_queued\":", "1800, \"max_cycle_timer\": 100000, \"last_queued\": 0, \"last_return\": False, \"initialized\": False, },", "skipping feed sync\") time.sleep(cycle_time) return True @retrying.retry( stop_max_attempt_number=FEED_SYNC_RETRIES, wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF *", "import session_scope, DistroMapping initial_mappings = [ DistroMapping(from_distro=\"alpine\", to_distro=\"alpine\", flavor=\"ALPINE\"), DistroMapping(from_distro=\"busybox\",", "import simplequeue, internal_client_for from anchore_engine.clients.services.simplequeue import SimpleQueueClient from anchore_engine.service import", "try: run_feed_sync(system_user) except Exception as e: logger.error(\"Caught escaped error in", "\"max_cycle_timer\": 100000, \"last_queued\": 0, \"last_return\": False, \"initialized\": False, }, \"feed_sync\":", "except Exception as e: logger.exception( \"Preflight checks failed with error:", "ValueError: logger.exception( \"Error parsing env value FEED_CLIENT_CHECK_RETRIES into int, using", "the preflight functions, aborting service startup if any throw uncaught", "Exception as e: logger.exception( \"Preflight checks failed with error: {}.", "(PackagesFeed, False), (GithubFeed, False), (NvdFeed, False), ]: logger.info(\"Registering feed handler", "with catch block to ensure we don't double-retry on task", "DistroMapping(from_distro=\"busybox\", to_distro=\"busybox\", flavor=\"BUSYB\"), DistroMapping(from_distro=\"centos\", to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"debian\", to_distro=\"debian\", flavor=\"DEB\"), DistroMapping(from_distro=\"fedora\",", "feeds = get_selected_feeds_to_sync(localconfig.get_config()) logger.info(\"Syncing configured feeds: {}\".format(feeds)) result = FeedsUpdateTask.run_feeds_update(json_obj=msg.get(\"data\"))", "\"last_queued\": 0, \"last_return\": False, \"initialized\": False, }, \"feed_sync\": { \"handler\":", "flavor=\"RHEL\"), DistroMapping(from_distro=\"redhat\", to_distro=\"rhel\", flavor=\"RHEL\"), ] # set up any data", "\"initialized\": False, }, } __lifecycle_handlers__ = { LifeCycleStages.pre_register: [ (process_preflight,", "}, \"feed_sync\": { \"handler\": handle_feed_sync, \"taskType\": \"handle_feed_sync\", \"args\": [], \"cycle_timer\":", "time import sys import pkg_resources import os import retrying from", "timer, function=\"do_feed_sync\", status=\"success\", ) else: anchore_engine.subsys.metrics.summary_observe( \"anchore_monitor_runtime_seconds\", time.time() - timer,", "= False timer = time.time() logger.info(\"FIRING: feed syncer\") try: feeds", "60, \"last_queued\": 0, \"last_return\": False, \"initialized\": False, }, \"feed_sync_checker\": {", "import os import retrying from sqlalchemy.exc import IntegrityError # anchore", "[ (NvdV2Feed, False), (VulnDBFeed, False), (VulnerabilityFeed, True), (PackagesFeed, False), (GithubFeed,", "{ \"handler\": anchore_engine.subsys.servicestatus.handle_service_heartbeat, \"taskType\": \"handle_service_heartbeat\", \"args\": [__service_name__], \"cycle_timer\": 60, \"min_cycle_timer\":", "feed or not for cls_tuple in [ (NvdV2Feed, False), (VulnDBFeed,", "def init_feed_registry(): # Register feeds, the tuple is the class", "ensure we don't double-retry on task exec simplequeue.run_target_with_queue_ttl( None, queue=feed_sync_queuename,", "task creator complete\") else: logger.info( \"sync_enabled is set to false", "_system_creds() logger.info(\"init args: {}\".format(kwargs)) cycle_time = kwargs[\"mythread\"][\"cycle_timer\"] while True: config", "function=\"do_feed_sync\", status=\"fail\", ) def handle_feed_sync(*args, **kwargs): \"\"\" Initiates a feed", ") # from anchore_engine.subsys.logger import enable_bootstrap_logging # enable_bootstrap_logging() from anchore_engine.utils", "300, \"max_cycle_timer\": 100000, \"last_queued\": 0, \"last_return\": False, \"initialized\": False, },", "= [ DistroMapping(from_distro=\"alpine\", to_distro=\"alpine\", flavor=\"ALPINE\"), DistroMapping(from_distro=\"busybox\", to_distro=\"busybox\", flavor=\"BUSYB\"), DistroMapping(from_distro=\"centos\", to_distro=\"rhel\",", "feed syncer\") try: feeds = get_selected_feeds_to_sync(localconfig.get_config()) logger.info(\"Syncing configured feeds: {}\".format(feeds))", "import retrying from sqlalchemy.exc import IntegrityError # anchore modules import", "= int(os.getenv(\"ANCHORE_FEED_SYNC_CHECK_RETRIES\", 5)) except ValueError: logger.exception( \"Error parsing env value", "value into int, using default value of 5\" ) feed_config_check_backoff", "uncaught exceptions or return False return value :return: \"\"\" preflight_check_functions", "str(err)) def handle_feed_sync_trigger(*args, **kwargs): \"\"\" Checks to see if there", "the policy engine db with any data necessary at startup.", "= None logger.info(\"Feeds client credentials ok\") return True except Exception", "= _system_creds() logger.info(\"init args: {}\".format(kwargs)) cycle_time = kwargs[\"mythread\"][\"cycle_timer\"] while True:", "Will wait for next cycle\" ) finally: logger.info(\"Feed Sync task", "to false in config - skipping feed sync trigger\" )", "\"min_cycle_timer\": 60, \"max_cycle_timer\": 60, \"last_queued\": 0, \"last_return\": False, \"initialized\": False,", "preflight_check_functions: try: fn() except Exception as e: logger.exception( \"Preflight checks", "int(os.getenv(\"FEED_CLIENT_CHECK_RETRIES\", 3)) except ValueError: logger.exception( \"Error parsing env value FEED_CLIENT_CHECK_RETRIES", "logger.info( \"Checking feeds client credentials. Attempt {} of {}\".format( i", "retry\") raise Exception(\"Simplequeue service not yet ready\") else: # q_client", "from anchore_engine.services.policy_engine.engine.feeds.client import get_client sleep_time = feed_config_check_backoff last_ex = None", "if feed_sync_enabled: logger.info(\"Feed sync task executor activated\") try: run_feed_sync(system_user) except", ") feed_config_check_backoff = 5 # service funcs (must be here)", "Failing check\" ) def _system_creds(): global system_user_auth if not system_user_auth:", "anchore_engine.configuration import localconfig from anchore_engine.clients.services import simplequeue, internal_client_for from anchore_engine.clients.services.simplequeue", "{}. Aborting service startup\".format( e ) ) sys.exit(1) def _init_distro_mappings():", "Exception( \"unable to initialize default distro mappings - exception: \"", "feeds, the tuple is the class and bool if feed", "value of 5\" ) FEED_SYNC_RETRY_BACKOFF = 5 try: feed_config_check_retries =", "int, using default value of 5\" ) feed_config_check_backoff = 5", "not verify feeds endpoint and/or config. Got exception: {}\".format( e", "This has its own retry on the queue fetch, so", "anchore_engine.subsys.metrics.summary_observe( \"anchore_monitor_runtime_seconds\", time.time() - timer, function=\"do_feed_sync\", status=\"success\", ) else: anchore_engine.subsys.metrics.summary_observe(", "for i in range(feed_config_check_retries): if i > 0: logger.info( \"Waiting", "import anchore_engine.subsys.servicestatus import anchore_engine.subsys.metrics from anchore_engine.subsys import logger from anchore_engine.configuration", "+ 1, feed_config_check_retries ) ) client = get_client() client =", ":param args: :param kwargs: :return: \"\"\" system_user = _system_creds() logger.info(\"init", "client credentials. Attempt {} of {}\".format( i + 1, feed_config_check_retries", "# These are user-configurable but mostly for debugging and testing", ") feed_config_check_retries = 3 try: feed_config_check_backoff = int(os.getenv(\"FEED_CLIENT_CHECK_BACKOFF\", 5)) except", "visibility_timeout=180, retries=FEED_SYNC_RETRIES, backoff_time=FEED_SYNC_RETRY_BACKOFF, ) except Exception as err: logger.warn(\"failed to", "logger.warn( \"Could not verify feeds endpoint and/or config. Got exception:", "message from the queue :param args: :param kwargs: :return: \"\"\"", "here) def _check_feed_client_credentials(): from anchore_engine.services.policy_engine.engine.feeds.client import get_client sleep_time = feed_config_check_backoff", "\"service_heartbeat\": { \"handler\": anchore_engine.subsys.servicestatus.handle_service_heartbeat, \"taskType\": \"handle_service_heartbeat\", \"args\": [__service_name__], \"cycle_timer\": 60,", "0: logger.info( \"Waiting for {} seconds to try feeds client", "not, adds one. Interval for firing this should be longer", "class PolicyEngineService(ApiService): __service_name__ = \"policy_engine\" __spec_dir__ = pkg_resources.resource_filename(__name__, \"swagger\") __monitors__", "using default value of 5\" ) FEED_SYNC_RETRIES = 5 try:", "else: try: # This has its own retry on the", "feed sync trigger\" ) time.sleep(cycle_time) return True @retrying.retry( stop_max_attempt_number=FEED_SYNC_RETRIES, wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF", "if not all_ready: logger.info(\"simplequeue service not yet ready, will retry\")", "\"last_return\": False, \"initialized\": False, }, \"feed_sync\": { \"handler\": handle_feed_sync, \"taskType\":", "marked as disabled, so skipping\") except ValueError as e: logger.warn(\"Received", "default value of 5\" ) FEED_SYNC_RETRIES = 5 try: FEED_SYNC_RETRY_BACKOFF", "\"Checking feeds client credentials. Attempt {} of {}\".format( i +", "in [ (NvdV2Feed, False), (VulnDBFeed, False), (VulnerabilityFeed, True), (PackagesFeed, False),", "q_client = SimpleQueueClient(user=system_user[0], password=<PASSWORD>[1]) q_client = internal_client_for(SimpleQueueClient, userId=None) if not", "raise Exception(\"Simplequeue service not yet ready\") else: try: # This", "error: {}. Aborting service startup\".format( e ) ) sys.exit(1) def", "= localconfig.get_config() feed_sync_enabled = config.get(\"feeds\", {}).get(\"sync_enabled\", True) if feed_sync_enabled: logger.info(\"Feed", "localconfig from anchore_engine.clients.services import simplequeue, internal_client_for from anchore_engine.clients.services.simplequeue import SimpleQueueClient", "{ \"service_heartbeat\": { \"handler\": anchore_engine.subsys.servicestatus.handle_service_heartbeat, \"taskType\": \"handle_service_heartbeat\", \"args\": [__service_name__], \"cycle_timer\":", "client = get_client() client = None logger.info(\"Feeds client credentials ok\")", "\"args\": [], \"cycle_timer\": 600, \"min_cycle_timer\": 300, \"max_cycle_timer\": 100000, \"last_queued\": 0,", "config. Got exception: {}\".format( e ) ) last_ex = e", "already initialized, continuing\") else: raise Exception( \"unable to initialize default", "\"feed_sync\": { \"handler\": handle_feed_sync, \"taskType\": \"handle_feed_sync\", \"args\": [], \"cycle_timer\": 3600,", ") with session_scope() as dbsession: distro_mappings = dbsession.query(DistroMapping).all() for i", "\" + str(err)) if handler_success: anchore_engine.subsys.metrics.summary_observe( \"anchore_monitor_runtime_seconds\", time.time() - timer,", "duration. :param args: :param kwargs: :return: \"\"\" system_user = _system_creds()", "= [init_db_content, init_feed_registry] for fn in preflight_check_functions: try: fn() except", "if isinstance(err, IntegrityError): logger.warn(\"another process has already initialized, continuing\") else:", "value of 5\" ) FEED_SYNC_RETRIES = 5 try: FEED_SYNC_RETRY_BACKOFF =", "system_user_auth = None feed_sync_msg = {\"task_type\": \"feed_sync\", \"enabled\": True} #", "to_distro=\"ol\", flavor=\"RHEL\"), DistroMapping(from_distro=\"rhel\", to_distro=\"rhel\", flavor=\"RHEL\"), DistroMapping(from_distro=\"ubuntu\", to_distro=\"ubuntu\", flavor=\"DEB\"), DistroMapping(from_distro=\"amzn\", to_distro=\"amzn\",", "# This has its own retry on the queue fetch,", "FeedsUpdateTask if \"get_selected_feeds_to_sync\" not in locals(): from anchore_engine.services.policy_engine.engine.feeds.sync import (", "service not yet ready\") else: # q_client = SimpleQueueClient(user=system_user[0], password=<PASSWORD>[1])", "are user-configurable but mostly for debugging and testing purposes try:", "}, \"feed_sync_checker\": { \"handler\": handle_feed_sync_trigger, \"taskType\": \"handle_feed_sync_trigger\", \"args\": [], \"cycle_timer\":", "ValueError: logger.exception( \"Error parsing env FEED_CLIENT_CHECK_BACKOFF value into int, using", "} __lifecycle_handlers__ = { LifeCycleStages.pre_register: [ (process_preflight, None), ] }", "is set to false in config - skipping feed sync\")", "1000, wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000, ) def run_feed_sync(system_user): all_ready = anchore_engine.clients.services.common.check_services_ready(", "return True @retrying.retry( stop_max_attempt_number=FEED_SYNC_RETRIES, wait_incrementing_start=FEED_SYNC_RETRY_BACKOFF * 1000, wait_incrementing_increment=FEED_SYNC_RETRY_BACKOFF * 1000,", "\"sync_enabled is set to false in config - skipping feed", "pkg_resources.resource_filename(__name__, \"swagger\") __monitors__ = { \"service_heartbeat\": { \"handler\": anchore_engine.subsys.servicestatus.handle_service_heartbeat, \"taskType\":" ]
[ "available for any case. download_url = \"http://www.supremecourt.gov/media/audio/{type}files/{docket_number}.{type}\".format( type=file_type, docket_number=d )", "path = \"id('list')//tr//a/text()\" return list(map(self._return_download_url, self.html.xpath(path))) @staticmethod def _return_download_url(d): file_type", "import datetime from juriscraper.OralArgumentSite import OralArgumentSite class Site(OralArgumentSite): def __init__(self,", "2015)) def _get_download_urls(self): path = \"id('list')//tr//a/text()\" return list(map(self._return_download_url, self.html.xpath(path))) @staticmethod", "Name: scotus History: - 2014-07-20 - Created by <NAME>, reviewed", "return list(map(self._return_download_url, self.html.xpath(path))) @staticmethod def _return_download_url(d): file_type = \"mp3\" #", "Created by <NAME>, reviewed by MLR - 2017-10-09 - Updated", "for s in self.html.xpath(path) if not \"Date\" in s ]", "self.url = ( \"http://www.supremecourt.gov/oral_arguments/argument_audio.aspx\" ) self.back_scrape_iterable = list(range(2010, 2015)) def", "Supreme Court of U.S. CourtID: scotus Court Short Name: scotus", "for s in self.html.xpath(path)] def _get_case_dates(self): path = \"id('list')//tr/td[2]//text()\" return", "\"id('list')//tr//a/text()\" return list(map(self._return_download_url, self.html.xpath(path))) @staticmethod def _return_download_url(d): file_type = \"mp3\"", "Court Short Name: scotus History: - 2014-07-20 - Created by", "from juriscraper.OralArgumentSite import OralArgumentSite class Site(OralArgumentSite): def __init__(self, *args, **kwargs):", "*args, **kwargs): super(Site, self).__init__(*args, **kwargs) self.court_id = self.__module__ self.url =", "- 2014-07-20 - Created by <NAME>, reviewed by MLR -", "by MLR. \"\"\" from datetime import datetime from juriscraper.OralArgumentSite import", "2014-07-20 - Created by <NAME>, reviewed by MLR - 2017-10-09", "juriscraper.OralArgumentSite import OralArgumentSite class Site(OralArgumentSite): def __init__(self, *args, **kwargs): super(Site,", "# or 'wma' is also available for any case. download_url", "Court of U.S. CourtID: scotus Court Short Name: scotus History:", "= \"id('list')//tr//a/text()\" return list(self.html.xpath(path)) def _download_backwards(self, year): self.url = (", "= list(range(2010, 2015)) def _get_download_urls(self): path = \"id('list')//tr//a/text()\" return list(map(self._return_download_url,", "in s ] def _get_docket_numbers(self): path = \"id('list')//tr//a/text()\" return list(self.html.xpath(path))", "path = \"id('list')//tr//a/text()\" return list(self.html.xpath(path)) def _download_backwards(self, year): self.url =", "list(range(2010, 2015)) def _get_download_urls(self): path = \"id('list')//tr//a/text()\" return list(map(self._return_download_url, self.html.xpath(path)))", "OralArgumentSite class Site(OralArgumentSite): def __init__(self, *args, **kwargs): super(Site, self).__init__(*args, **kwargs)", "class Site(OralArgumentSite): def __init__(self, *args, **kwargs): super(Site, self).__init__(*args, **kwargs) self.court_id", "def _get_docket_numbers(self): path = \"id('list')//tr//a/text()\" return list(self.html.xpath(path)) def _download_backwards(self, year):", "also available for any case. download_url = \"http://www.supremecourt.gov/media/audio/{type}files/{docket_number}.{type}\".format( type=file_type, docket_number=d", "@staticmethod def _return_download_url(d): file_type = \"mp3\" # or 'wma' is", "_get_case_dates(self): path = \"id('list')//tr/td[2]//text()\" return [ datetime.strptime(s, \"%m/%d/%y\").date() for s", "Updated by MLR. \"\"\" from datetime import datetime from juriscraper.OralArgumentSite", "in self.html.xpath(path)] def _get_case_dates(self): path = \"id('list')//tr/td[2]//text()\" return [ datetime.strptime(s,", "is also available for any case. download_url = \"http://www.supremecourt.gov/media/audio/{type}files/{docket_number}.{type}\".format( type=file_type,", "download_url = \"http://www.supremecourt.gov/media/audio/{type}files/{docket_number}.{type}\".format( type=file_type, docket_number=d ) return download_url def _get_case_names(self):", "- Created by <NAME>, reviewed by MLR - 2017-10-09 -", "= \"id('list')//tr/td/span/text()\" return [s.lstrip(\". \") for s in self.html.xpath(path)] def", "datetime.strptime(s, \"%m/%d/%y\").date() for s in self.html.xpath(path) if not \"Date\" in", "list(self.html.xpath(path)) def _download_backwards(self, year): self.url = ( \"http://www.supremecourt.gov/oral_arguments/argument_audio/%s\" % year", "self.html.xpath(path) if not \"Date\" in s ] def _get_docket_numbers(self): path", "by <NAME>, reviewed by MLR - 2017-10-09 - Updated by", "if not \"Date\" in s ] def _get_docket_numbers(self): path =", "2017-10-09 - Updated by MLR. \"\"\" from datetime import datetime", "CourtID: scotus Court Short Name: scotus History: - 2014-07-20 -", "docket_number=d ) return download_url def _get_case_names(self): path = \"id('list')//tr/td/span/text()\" return", "from datetime import datetime from juriscraper.OralArgumentSite import OralArgumentSite class Site(OralArgumentSite):", "datetime import datetime from juriscraper.OralArgumentSite import OralArgumentSite class Site(OralArgumentSite): def", "file_type = \"mp3\" # or 'wma' is also available for", "def _get_case_dates(self): path = \"id('list')//tr/td[2]//text()\" return [ datetime.strptime(s, \"%m/%d/%y\").date() for", "\"id('list')//tr/td[2]//text()\" return [ datetime.strptime(s, \"%m/%d/%y\").date() for s in self.html.xpath(path) if", "scotus Court Short Name: scotus History: - 2014-07-20 - Created", "\") for s in self.html.xpath(path)] def _get_case_dates(self): path = \"id('list')//tr/td[2]//text()\"", "\"id('list')//tr/td/span/text()\" return [s.lstrip(\". \") for s in self.html.xpath(path)] def _get_case_dates(self):", "'wma' is also available for any case. download_url = \"http://www.supremecourt.gov/media/audio/{type}files/{docket_number}.{type}\".format(", "\"http://www.supremecourt.gov/media/audio/{type}files/{docket_number}.{type}\".format( type=file_type, docket_number=d ) return download_url def _get_case_names(self): path =", "return download_url def _get_case_names(self): path = \"id('list')//tr/td/span/text()\" return [s.lstrip(\". \")", "\"Date\" in s ] def _get_docket_numbers(self): path = \"id('list')//tr//a/text()\" return", "self.court_id = self.__module__ self.url = ( \"http://www.supremecourt.gov/oral_arguments/argument_audio.aspx\" ) self.back_scrape_iterable =", "History: - 2014-07-20 - Created by <NAME>, reviewed by MLR", "self.back_scrape_iterable = list(range(2010, 2015)) def _get_download_urls(self): path = \"id('list')//tr//a/text()\" return", "or 'wma' is also available for any case. download_url =", "path = \"id('list')//tr/td[2]//text()\" return [ datetime.strptime(s, \"%m/%d/%y\").date() for s in", "self.html.xpath(path))) @staticmethod def _return_download_url(d): file_type = \"mp3\" # or 'wma'", "datetime from juriscraper.OralArgumentSite import OralArgumentSite class Site(OralArgumentSite): def __init__(self, *args,", "path = \"id('list')//tr/td/span/text()\" return [s.lstrip(\". \") for s in self.html.xpath(path)]", "reviewed by MLR - 2017-10-09 - Updated by MLR. \"\"\"", ") self.back_scrape_iterable = list(range(2010, 2015)) def _get_download_urls(self): path = \"id('list')//tr//a/text()\"", "def _get_case_names(self): path = \"id('list')//tr/td/span/text()\" return [s.lstrip(\". \") for s", "= \"id('list')//tr/td[2]//text()\" return [ datetime.strptime(s, \"%m/%d/%y\").date() for s in self.html.xpath(path)", "[ datetime.strptime(s, \"%m/%d/%y\").date() for s in self.html.xpath(path) if not \"Date\"", "\"%m/%d/%y\").date() for s in self.html.xpath(path) if not \"Date\" in s", "super(Site, self).__init__(*args, **kwargs) self.court_id = self.__module__ self.url = ( \"http://www.supremecourt.gov/oral_arguments/argument_audio.aspx\"", "] def _get_docket_numbers(self): path = \"id('list')//tr//a/text()\" return list(self.html.xpath(path)) def _download_backwards(self,", "\"mp3\" # or 'wma' is also available for any case.", "for any case. download_url = \"http://www.supremecourt.gov/media/audio/{type}files/{docket_number}.{type}\".format( type=file_type, docket_number=d ) return", "scotus History: - 2014-07-20 - Created by <NAME>, reviewed by", "\"\"\"Scraper for Supreme Court of U.S. CourtID: scotus Court Short", "= \"http://www.supremecourt.gov/media/audio/{type}files/{docket_number}.{type}\".format( type=file_type, docket_number=d ) return download_url def _get_case_names(self): path", "<reponame>EvandoBlanco/juriscraper<filename>juriscraper/oral_args/united_states/federal_appellate/scotus.py<gh_stars>100-1000 \"\"\"Scraper for Supreme Court of U.S. CourtID: scotus Court", "s in self.html.xpath(path) if not \"Date\" in s ] def", "_get_case_names(self): path = \"id('list')//tr/td/span/text()\" return [s.lstrip(\". \") for s in", "def __init__(self, *args, **kwargs): super(Site, self).__init__(*args, **kwargs) self.court_id = self.__module__", "__init__(self, *args, **kwargs): super(Site, self).__init__(*args, **kwargs) self.court_id = self.__module__ self.url", "any case. download_url = \"http://www.supremecourt.gov/media/audio/{type}files/{docket_number}.{type}\".format( type=file_type, docket_number=d ) return download_url", "_return_download_url(d): file_type = \"mp3\" # or 'wma' is also available", "type=file_type, docket_number=d ) return download_url def _get_case_names(self): path = \"id('list')//tr/td/span/text()\"", "Site(OralArgumentSite): def __init__(self, *args, **kwargs): super(Site, self).__init__(*args, **kwargs) self.court_id =", "s in self.html.xpath(path)] def _get_case_dates(self): path = \"id('list')//tr/td[2]//text()\" return [", "- 2017-10-09 - Updated by MLR. \"\"\" from datetime import", "by MLR - 2017-10-09 - Updated by MLR. \"\"\" from", "**kwargs) self.court_id = self.__module__ self.url = ( \"http://www.supremecourt.gov/oral_arguments/argument_audio.aspx\" ) self.back_scrape_iterable", "MLR. \"\"\" from datetime import datetime from juriscraper.OralArgumentSite import OralArgumentSite", "= \"id('list')//tr//a/text()\" return list(map(self._return_download_url, self.html.xpath(path))) @staticmethod def _return_download_url(d): file_type =", "import OralArgumentSite class Site(OralArgumentSite): def __init__(self, *args, **kwargs): super(Site, self).__init__(*args,", "= self.__module__ self.url = ( \"http://www.supremecourt.gov/oral_arguments/argument_audio.aspx\" ) self.back_scrape_iterable = list(range(2010,", "list(map(self._return_download_url, self.html.xpath(path))) @staticmethod def _return_download_url(d): file_type = \"mp3\" # or", "self.url = ( \"http://www.supremecourt.gov/oral_arguments/argument_audio/%s\" % year ) self.html = self._download()", "_download_backwards(self, year): self.url = ( \"http://www.supremecourt.gov/oral_arguments/argument_audio/%s\" % year ) self.html", "for Supreme Court of U.S. CourtID: scotus Court Short Name:", "def _download_backwards(self, year): self.url = ( \"http://www.supremecourt.gov/oral_arguments/argument_audio/%s\" % year )", "self).__init__(*args, **kwargs) self.court_id = self.__module__ self.url = ( \"http://www.supremecourt.gov/oral_arguments/argument_audio.aspx\" )", "case. download_url = \"http://www.supremecourt.gov/media/audio/{type}files/{docket_number}.{type}\".format( type=file_type, docket_number=d ) return download_url def", "not \"Date\" in s ] def _get_docket_numbers(self): path = \"id('list')//tr//a/text()\"", "= ( \"http://www.supremecourt.gov/oral_arguments/argument_audio.aspx\" ) self.back_scrape_iterable = list(range(2010, 2015)) def _get_download_urls(self):", "def _get_download_urls(self): path = \"id('list')//tr//a/text()\" return list(map(self._return_download_url, self.html.xpath(path))) @staticmethod def", "in self.html.xpath(path) if not \"Date\" in s ] def _get_docket_numbers(self):", "[s.lstrip(\". \") for s in self.html.xpath(path)] def _get_case_dates(self): path =", "- Updated by MLR. \"\"\" from datetime import datetime from", "s ] def _get_docket_numbers(self): path = \"id('list')//tr//a/text()\" return list(self.html.xpath(path)) def", "self.__module__ self.url = ( \"http://www.supremecourt.gov/oral_arguments/argument_audio.aspx\" ) self.back_scrape_iterable = list(range(2010, 2015))", "def _return_download_url(d): file_type = \"mp3\" # or 'wma' is also", "return list(self.html.xpath(path)) def _download_backwards(self, year): self.url = ( \"http://www.supremecourt.gov/oral_arguments/argument_audio/%s\" %", "\"http://www.supremecourt.gov/oral_arguments/argument_audio.aspx\" ) self.back_scrape_iterable = list(range(2010, 2015)) def _get_download_urls(self): path =", "return [ datetime.strptime(s, \"%m/%d/%y\").date() for s in self.html.xpath(path) if not", "Short Name: scotus History: - 2014-07-20 - Created by <NAME>,", "of U.S. CourtID: scotus Court Short Name: scotus History: -", "year): self.url = ( \"http://www.supremecourt.gov/oral_arguments/argument_audio/%s\" % year ) self.html =", "\"id('list')//tr//a/text()\" return list(self.html.xpath(path)) def _download_backwards(self, year): self.url = ( \"http://www.supremecourt.gov/oral_arguments/argument_audio/%s\"", "( \"http://www.supremecourt.gov/oral_arguments/argument_audio.aspx\" ) self.back_scrape_iterable = list(range(2010, 2015)) def _get_download_urls(self): path", "<NAME>, reviewed by MLR - 2017-10-09 - Updated by MLR.", "= \"mp3\" # or 'wma' is also available for any", "U.S. CourtID: scotus Court Short Name: scotus History: - 2014-07-20", "**kwargs): super(Site, self).__init__(*args, **kwargs) self.court_id = self.__module__ self.url = (", "self.html.xpath(path)] def _get_case_dates(self): path = \"id('list')//tr/td[2]//text()\" return [ datetime.strptime(s, \"%m/%d/%y\").date()", "MLR - 2017-10-09 - Updated by MLR. \"\"\" from datetime", ") return download_url def _get_case_names(self): path = \"id('list')//tr/td/span/text()\" return [s.lstrip(\".", "download_url def _get_case_names(self): path = \"id('list')//tr/td/span/text()\" return [s.lstrip(\". \") for", "\"\"\" from datetime import datetime from juriscraper.OralArgumentSite import OralArgumentSite class", "_get_download_urls(self): path = \"id('list')//tr//a/text()\" return list(map(self._return_download_url, self.html.xpath(path))) @staticmethod def _return_download_url(d):", "return [s.lstrip(\". \") for s in self.html.xpath(path)] def _get_case_dates(self): path", "_get_docket_numbers(self): path = \"id('list')//tr//a/text()\" return list(self.html.xpath(path)) def _download_backwards(self, year): self.url" ]
[ "metavar='N', help='print frequency (default: 20)') parser.add_argument('--ckpt', default='./ckpt', help='folder to output", "size') parser.add_argument('--num_boxes', default=4, type=int, help='num of boxes for each image')", "import logsigsum as fusion_func print('fused_sum loaded!!') elif args.fusion_function == 'naive_sum':", "'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\\t' 'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})\\t'.format( i, len(val_loader), batch_time=batch_time, loss=losses,", "output_factual = fusion_function(output_vision, output_coord, output_fusion) # warning: loss_fusion is the", "vision_model.eval() coord_model.eval() fusion_model.eval() end = time.time() for i, (global_img_tensors, box_tensors,", "torch.no_grad(): output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label) output_vision =", "epoch, args.lr_steps, 'fusion') # train for one epoch train(train_loader, model_list,", "[] targets_list = [] # unpack three models [vision_model, coord_model,", "metavar='N', help='number of total epochs to run') parser.add_argument('--start_epoch', default=None, type=int,", "fusion_function, criterion, epoch=epoch, class_to_idx=dataset_val.classes_dict) else: loss = 100 # remember", "dataset_train, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, drop_last=True, pin_memory=True ) val_loader = torch.utils.data.DataLoader(", "print('interaction loaded!!') else: print(\"no such a coordinate model!\") # create", "epoch, criterion): global args batch_time = AverageMeter() data_time = AverageMeter()", "AverageMeter() acc_top1 = AverageMeter() acc_top5 = AverageMeter() # load three", "to latest checkpoint (default: none)') # model, image&feature dim and", "args.model_vision == 'rgb_roi': from model.model_lib import BboxVisualModel as RGBModel print('rgb_roi", "\"No checkpoint found at '{}'\".format(args.resume_fusion) print(\"=> loading checkpoint '{}'\".format(args.resume_fusion)) checkpoint", "class_to_idx=dataset_val.classes_dict) return # Counterfactual inference by trying a list of", "i, (global_img_tensors, box_tensors, box_categories, video_label) in enumerate(train_loader): data_time.update(time.time() - end)", "- output_counterfactual * weight acc1, acc5 = accuracy(output_debiased.cpu(), video_label, topk=(1,", "refresh the optimizer optimizer_vision.zero_grad() optimizer_coord.zero_grad() optimizer_fusion.zero_grad() loss = loss_vision +", "of three branch activation results acc1, acc5 = accuracy(output_factual.cpu(), video_label,", "= fusion_function(output_vision_subtrahend, torch.tensor(0.0), torch.tensor(0.0)) for j in range(search_length): weight =", "epoch train(train_loader, model_list, fusion_function, optimizer_list, epoch, criterion) if (epoch+1) >=", "from callbacks import AverageMeter from data_utils.causal_data_loader_frames import VideoFolder from utils", "{batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Acc1_0.0 {acc_top1_00.val:.1f} ({acc_top1_00.avg:.1f})\\t' 'Acc1_0.2 {acc_top1_02.val:.1f} ({acc_top1_02.avg:.1f})\\t' 'Acc1_0.5 {acc_top1_05.val:.1f}", "lr=args.lr, weight_decay=args.weight_decay) optimizer_list = [optimizer_vision, optimizer_coord, optimizer_fusion] criterion = torch.nn.CrossEntropyLoss()", "enumerate(val_loader): # compute output with torch.no_grad(): output_vision, feature_vision = vision_model(global_img_tensors.cuda(),", "fusion_function(output_vision, output_coord, output_fusion) # loss_fusion is the loss of output_fusion(fused,", "box_categories, box_tensors.cuda(), video_label) output_vision = output_vision.view((-1, len(val_loader.dataset.classes))) output_coord, feature_coord =", "LR decayed by 10\"\"\" decay = 0.1 ** (sum(epoch >=", "parser.add_argument('--resume_vision', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')", "type=int, help='num of boxes for each image') parser.add_argument('--num_frames', default=16, type=int,", "loss_factual = criterion(output_factual, video_label.long().cuda()) # Measure the accuracy of the", "= fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda()) output_fusion = output_fusion.view((-1, len(train_loader.dataset.classes))) output_factual = fusion_function(output_vision,", "'-b', default=16, type=int, metavar='N', help='mini-batch size') parser.add_argument('--lr', '--learning-rate', default=0.01, type=float,", "data_time.update(time.time() - end) # obtain the activation and vision features", "loss losses.update(loss_factual.item(), global_img_tensors.size(0)) acc_top1.update(acc1.item(), global_img_tensors.size(0)) acc_top5.update(acc5.item(), global_img_tensors.size(0)) # refresh the", "checkpoints') parser.add_argument('--resume_vision', default='', type=str, metavar='PATH', help='path to latest checkpoint (default:", "validate(val_loader, model_list, fusion_function, criterion, class_to_idx=dataset_val.classes_dict) return # Counterfactual inference by", "best_loss = checkpoint['best_loss'] coord_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded checkpoint '{}' (epoch {})\"", "100 # remember best loss and save checkpoint is_best =", "optimizer_vision.zero_grad() optimizer_coord.zero_grad() optimizer_fusion.zero_grad() loss = loss_vision + loss_coord + loss_factual", "remember best loss and save checkpoint is_best = loss <", "{batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' 'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\\t' 'Acc5 {acc_top5.val:.1f}", "train(train_loader, model_list, fusion_function, optimizer_list, epoch, criterion): global args batch_time =", "len(train_loader.dataset.classes))) output_factual = fusion_function(output_vision, output_coord, output_fusion) # loss_fusion is the", "accuracy(output.cpu(), video_label, topk=(1, 5)) if args.evaluate: logits_matrix.append(output.cpu().data.numpy()) targets_list.append(video_label.cpu().numpy()) # measure", "topk=(1, 5)) if args.evaluate: logits_matrix.append(output.cpu().data.numpy()) targets_list.append(video_label.cpu().numpy()) # measure accuracy and", "args.search_stride == 0: loss = validate(val_loader, model_list, fusion_function, criterion, epoch=epoch,", "help='number of data loading workers (default: 4)') parser.add_argument('-e', '--evaluate', dest='evaluate',", "topk: correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res", "data') parser.add_argument('--json_file_labels', type=str, default='../data/dataset_splits/compositional/labels.json', help='path to the json file with", "decay = 0.1 ** (sum(epoch >= np.array(lr_steps))) lr = args.lr", "= checkpoint['best_loss'] coord_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded checkpoint '{}' (epoch {})\" .format(args.resume_coord,", "{})\" .format(args.resume_vision, checkpoint['epoch'])) # optionally resume coord model from a", "loaded!!') else: print(\"no such a coordinate model!\") # create fusion", "output = output_factual loss = loss_vision acc1, acc5 = accuracy(output.cpu(),", "print('Epoch: [{0}][{1}/{2}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t' 'Loss {loss.val:.4f}", "torch.backends.cudnn as cudnn from callbacks import AverageMeter from data_utils.causal_data_loader_frames import", "i, len(val_loader), batch_time=batch_time, loss=losses, acc_top1=acc_top1, acc_top5=acc_top5, )) if args.evaluate: logits_matrix", "video_label.long().cuda()) loss_fusion = criterion(output_fusion, video_label.long().cuda()) loss_factual = criterion(output_factual, video_label.long().cuda()) #", "= AverageMeter() acc_top1 = AverageMeter() acc_top5 = AverageMeter() logits_matrix =", "default=0.9, type=float, metavar='M', help='momentum') parser.add_argument('--weight_decay', '--wd', default=0.0001, type=float, metavar='W', help='weight", "acc_top1_05=search_dict['acc_1_alpha_0.5'], acc_top1_08=search_dict['acc_1_alpha_0.8'], acc_top1_10=search_dict['acc_1_alpha_1.0'])) for k in range(search_length): print(search_list[k], search_dict['acc_1_alpha_{}'.format(round(search_list[k], 1))].avg,", "help='the index of gpu you want to use') best_loss =", "accuracy and loss losses.update(loss_factual.item(), global_img_tensors.size(0)) acc_top1.update(acc1.item(), global_img_tensors.size(0)) acc_top5.update(acc5.item(), global_img_tensors.size(0)) #", "VideoGlobalModel as RGBModel print('global_i3d loaded!!') elif args.model_vision == 'rgb_roi': from", "with ground truth labels') parser.add_argument('--dataset', default='smth_smth', help='which dataset to train')", "'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' 'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\\t' 'Acc5", "# compute output with torch.no_grad(): output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories,", "for the specified values of k\"\"\" with torch.no_grad(): maxk =", "= coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label) output_coord = output_coord.view((-1, len(train_loader.dataset.classes))) #", "outputs output_factual = fusion_function(output_vision, output_coord, output_fusion) # warning: loss_fusion is", "batch_time=batch_time, acc_top1_00=search_dict['acc_1_alpha_0.0'], acc_top1_02=search_dict['acc_1_alpha_0.2'], acc_top1_05=search_dict['acc_1_alpha_0.5'], acc_top1_08=search_dict['acc_1_alpha_0.8'], acc_top1_10=search_dict['acc_1_alpha_1.0'])) for k in range(search_length):", "args.gpu_index print(args) # create vision model if args.model_vision == 'global_i3d':", "coord-based features') parser.add_argument('--size', default=224, type=int, metavar='N', help='primary image input size')", "loss=losses, acc_top1=acc_top1, acc_top5=acc_top5, )) if args.evaluate: logits_matrix = np.concatenate(logits_matrix) targets_list", "from collections import OrderedDict import torch import torch.backends.cudnn as cudnn", "import AverageMeter from data_utils.causal_data_loader_frames import VideoFolder from utils import save_results", "# remember best loss and save checkpoint is_best = loss", "is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_fusion, args.logname))) def train(train_loader, model_list, fusion_function, optimizer_list, epoch,", "acc1, acc5 = accuracy(output_factual.cpu(), video_label, topk=(1, 5)) # record the", "0 or i + 1 == len(val_loader): print('Cf-Inference: [{0}/{1}]\\t' 'Time", "checkpoint (default: none)') parser.add_argument('--resume_coord', default='', type=str, metavar='PATH', help='path to latest", "# optionally resume coord model from a checkpoint if args.resume_coord:", "args.epochs)): adjust_learning_rate(optimizer_vision, epoch, args.lr_steps, 'vision') adjust_learning_rate(optimizer_coord, epoch, args.lr_steps, 'coord') adjust_learning_rate(optimizer_fusion,", "res.append(correct_k.mul_(100.0 / batch_size)) return res if __name__ == '__main__': main()", "fusion_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded checkpoint '{}' (epoch {})\" .format(args.resume_fusion, checkpoint['epoch'])) if", "vision_model.train() coord_model.train() fusion_model.train() end = time.time() for i, (global_img_tensors, box_tensors,", "fusion_func print('fused_sum loaded!!') elif args.fusion_function == 'naive_sum': from fusion_function import", "box_tensors.cuda(), video_label) output_coord = output_coord.view((-1, len(val_loader.dataset.classes))) # obtain the activation", "cf_inference_group(val_loader, model_list, fusion_function, search_list, class_to_idx=dataset_val.classes_dict) return print('training begin...') for epoch", "checkpoint['best_loss'] coord_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded checkpoint '{}' (epoch {})\" .format(args.resume_coord, checkpoint['epoch']))", "fusion_model] = model_list # load four optimizers, including the one", "* 0.8 elif branch_name == 'coord': for param_group in optimizer.param_groups:", "feature_vision.detach() feature_coord_detached = feature_coord.detach() # obtain the activation of fusion", "topk=(1, 5)) # record the accuracy and loss losses.update(loss_factual.item(), global_img_tensors.size(0))", "args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index print(args) # create vision", "1))].avg, search_dict['acc_5_alpha_{}'.format(round(search_list[k], 1))].avg) return def save_checkpoint(state, is_best, filename): torch.save(state, filename", "fusion_function import naivesum as fusion_func print('naive_sum loaded!!') else: print('no such", "* weight acc1, acc5 = accuracy(output_debiased.cpu(), video_label, topk=(1, 5)) search_dict['acc_1_alpha_{}'.format(round(search_list[j],", "return # Counterfactual inference by trying a list of hyperparameter", "optimizer.param_groups: param_group['lr'] = lr else: for param_group in optimizer.param_groups: param_group['lr']", "acc_top1_10=search_dict['acc_1_alpha_1.0'])) for k in range(search_length): print(search_list[k], search_dict['acc_1_alpha_{}'.format(round(search_list[k], 1))].avg, search_dict['acc_5_alpha_{}'.format(round(search_list[k], 1))].avg)", "from tqdm import tqdm parser = argparse.ArgumentParser(description='Counterfactual CAR') # Path,", "weight acc1, acc5 = accuracy(output_debiased.cpu(), video_label, topk=(1, 5)) search_dict['acc_1_alpha_{}'.format(round(search_list[j], 1))].update(acc1.item(),", "branch output_fusion = fusion_model(feature_vision.cuda(), feature_coord.cuda()) output_fusion = output_fusion.view((-1, len(val_loader.dataset.classes))) #", "checkpoint is_best = loss < best_loss best_loss = min(loss, best_loss)", "with torch.no_grad(): output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label) output_vision", "len(val_loader.dataset.classes))) # detach the computation graph, avoid the gradient confusion", "cudnn from callbacks import AverageMeter from data_utils.causal_data_loader_frames import VideoFolder from", "video_label, topk=(1, 5)) if args.evaluate: logits_matrix.append(output.cpu().data.numpy()) targets_list.append(video_label.cpu().numpy()) # measure accuracy", "epoch, args.lr_steps, 'vision') adjust_learning_rate(optimizer_coord, epoch, args.lr_steps, 'coord') adjust_learning_rate(optimizer_fusion, epoch, args.lr_steps,", "import VideoFolder from utils import save_results from tqdm import tqdm", "on validation set') parser.add_argument('--cf_inference_group', action='store_true', help='counterfactual inference model on validation", "= [] # unpack three models [vision_model, coord_model, fusion_model] =", "related arguments parser.add_argument('--root_frames', type=str, default='/mnt/data1/home/sunpengzhan/sth-sth-v2/', help='path to the folder with", "num_boxes=args.num_boxes, file_input=args.json_data_val, file_labels=args.json_file_labels, frames_duration=args.num_frames, args=args, is_val=True, if_augment=True, ) # create", "import time import numpy as np import random from collections", "for fusing activations from each branch') parser.add_argument('--img_feature_dim', default=512, type=int, metavar='N',", "search_list, class_to_idx=None): batch_time = AverageMeter() search_length = len(search_list) search_dict =", "data loading workers (default: 4)') parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate", "AverageMeter() # load three model branches [vision_model, coord_model, fusion_model] =", "= FusionModel(args) # create the fusion function for the activation", "type=bool, help='whether or not train with multi GPUs') parser.add_argument('--gpu_index', type=str,", "parser.add_argument('--json_file_labels', type=str, default='../data/dataset_splits/compositional/labels.json', help='path to the json file with ground", "experiment for checkpoints and logs') parser.add_argument('--print_freq', '-p', default=20, type=int, metavar='N',", "= checkpoint['epoch'] best_loss = checkpoint['best_loss'] vision_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded checkpoint '{}'", "= min(loss, best_loss) save_checkpoint( { 'epoch': epoch + 1, 'state_dict':", "'coord': for param_group in optimizer.param_groups: param_group['lr'] = lr elif branch_name", "argparse.ArgumentParser(description='Counterfactual CAR') # Path, dataset and log related arguments parser.add_argument('--root_frames',", "checkpoint['epoch'])) if args.resume_fusion: assert os.path.isfile(args.resume_fusion), \"No checkpoint found at '{}'\".format(args.resume_fusion)", "drop_last=True, pin_memory=True ) val_loader = torch.utils.data.DataLoader( dataset_val, drop_last=True, batch_size=args.batch_size, shuffle=False,", "for the model') parser.add_argument('--num_classes', default=174, type=int, help='num of class in", "0.1 ** (sum(epoch >= np.array(lr_steps))) lr = args.lr * decay", "learning rate to the initial LR decayed by 10\"\"\" decay", "np.linspace(0.0, 1.0, 11) # factual inference (vanilla test stage) if", ")) if args.evaluate: logits_matrix = np.concatenate(logits_matrix) targets_list = np.concatenate(targets_list) save_results(logits_matrix,", "default='interaction') parser.add_argument('--model_fusion', default='concat_fusion') parser.add_argument('--fusion_function', default='fused_sum', type=str, help='function for fusing activations", "else: print(\"no such a coordinate model!\") # create fusion model", "if args.resume_fusion: assert os.path.isfile(args.resume_fusion), \"No checkpoint found at '{}'\".format(args.resume_fusion) print(\"=>", "AverageMeter() acc_top5 = AverageMeter() # load three model branches [vision_model,", "5)) # record the accuracy and loss losses.update(loss_factual.item(), global_img_tensors.size(0)) acc_top1.update(acc1.item(),", "the initial LR decayed by 10\"\"\" decay = 0.1 **", "def save_checkpoint(state, is_best, filename): torch.save(state, filename + '_latest.pth.tar') if is_best:", "is None: args.start_epoch = checkpoint['epoch'] best_loss = checkpoint['best_loss'] vision_model.load_state_dict(checkpoint['state_dict']) print(\"=>", "len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, acc_top1=acc_top1, acc_top5=acc_top5)) def validate(val_loader, model_list, fusion_function,", "or value after fusion function output = output_factual loss =", "def cf_inference_group(val_loader, model_list, fusion_function, search_list, class_to_idx=None): batch_time = AverageMeter() search_length", "def accuracy(output, target, topk=(1,)): \"\"\"Computes the accuracy over the k", "True # create training and validation dataset dataset_train = VideoFolder(root=args.root_frames,", "found at '{}'\".format(args.resume_fusion) print(\"=> loading checkpoint '{}'\".format(args.resume_fusion)) checkpoint = torch.load(args.resume_fusion)", "OrderedDict import torch import torch.backends.cudnn as cudnn from callbacks import", "global_img_tensors.size(0)) acc_top5.update(acc5.item(), global_img_tensors.size(0)) # refresh the optimizer optimizer_vision.zero_grad() optimizer_coord.zero_grad() optimizer_fusion.zero_grad()", "loaded checkpoint '{}' (epoch {})\" .format(args.resume_fusion, checkpoint['epoch'])) if args.start_epoch is", "optimizer_fusion] = optimizer_list # switch to train mode vision_model.train() coord_model.train()", "args.start_epoch is None: args.start_epoch = checkpoint['epoch'] best_loss = checkpoint['best_loss'] fusion_model.load_state_dict(checkpoint['state_dict'])", "loss = 100 # remember best loss and save checkpoint", "frames_duration=args.num_frames, args=args, is_val=False, if_augment=True, ) dataset_val = VideoFolder(root=args.root_frames, num_boxes=args.num_boxes, file_input=args.json_data_val,", "load model branch vision_model = RGBModel(args) coord_model = BboxModel(args) fusion_model", "{ 'epoch': epoch + 1, 'state_dict': fusion_model.state_dict(), 'best_loss': best_loss, },", "args.fusion_function == 'naive_sum': from fusion_function import naivesum as fusion_func print('naive_sum", "output_factual = fusion_function(output_vision, output_coord, output_fusion) # counterfactual inference output_vision_subtrahend =", "help='momentum') parser.add_argument('--weight_decay', '--wd', default=0.0001, type=float, metavar='W', help='weight decay (default: 1e-4)')", "and others related arguments parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number", "each branch') parser.add_argument('--img_feature_dim', default=512, type=int, metavar='N', help='intermediate feature dimension for", "(epoch {})\" .format(args.resume_fusion, checkpoint['epoch'])) if args.start_epoch is None: args.start_epoch =", "args.cf_inference_group: cf_inference_group(val_loader, model_list, fusion_function, search_list, class_to_idx=dataset_val.classes_dict) return print('training begin...') for", "video_label.long().cuda()) loss_coord = criterion(output_coord, video_label.long().cuda()) loss_fusion = criterion(output_fusion, video_label.long().cuda()) loss_factual", "= VideoFolder(root=args.root_frames, num_boxes=args.num_boxes, file_input=args.json_data_train, file_labels=args.json_file_labels, frames_duration=args.num_frames, args=args, is_val=False, if_augment=True, )", "parser.add_argument('--coord_feature_dim', default=512, type=int, metavar='N', help='intermediate feature dimension for coord-based features')", "with torch.no_grad(): # factual inference output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories,", "in topk: correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return", "# create training and validation loader train_loader = torch.utils.data.DataLoader( dataset_train,", "for k in range(search_length): print(search_list[k], search_dict['acc_1_alpha_{}'.format(round(search_list[k], 1))].avg, search_dict['acc_5_alpha_{}'.format(round(search_list[k], 1))].avg) return", "model.model_lib import BboxInteractionLatentModel as BboxModel print('interaction loaded!!') else: print(\"no such", "fusing activations from each branch') parser.add_argument('--img_feature_dim', default=512, type=int, metavar='N', help='intermediate", "# unpack three models [vision_model, coord_model, fusion_model] = model_list #", "optimizer_coord, optimizer_fusion] = optimizer_list # switch to train mode vision_model.train()", "'{}'\".format(args.resume_vision) print(\"=> loading checkpoint '{}'\".format(args.resume_vision)) checkpoint = torch.load(args.resume_vision) if args.start_epoch", "lr else: for param_group in optimizer.param_groups: param_group['lr'] = lr def", "record loss losses.update(loss.item(), global_img_tensors.size(0)) acc_top1.update(acc1.item(), global_img_tensors.size(0)) acc_top5.update(acc5.item(), global_img_tensors.size(0)) # measure", "len(val_loader): print('Cf-Inference: [{0}/{1}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Acc1_0.0 {acc_top1_00.val:.1f} ({acc_top1_00.avg:.1f})\\t' 'Acc1_0.2", "search_dict['acc_1_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter() search_dict['acc_5_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter() [vision_model, coord_model,", "import tqdm parser = argparse.ArgumentParser(description='Counterfactual CAR') # Path, dataset and", "and save checkpoint is_best = loss < best_loss best_loss =", "= parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index print(args) # create vision model", "one designed for uniform assumption [optimizer_vision, optimizer_coord, optimizer_fusion] = optimizer_list", "args.start_epoch = checkpoint['epoch'] best_loss = checkpoint['best_loss'] fusion_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded checkpoint", "branch output_fusion = fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda()) output_fusion = output_fusion.view((-1, len(val_loader.dataset.classes))) #", "as RGBModel print('rgb_roi loaded!!') else: print(\"no such a vision model!\")", "type=int, metavar='N', help='print frequency (default: 20)') parser.add_argument('--ckpt', default='./ckpt', help='folder to", "torch.tensor(0.0), torch.tensor(0.0)) for j in range(search_length): weight = search_list[j] output_debiased", "RGBModel print('rgb_roi loaded!!') else: print(\"no such a vision model!\") #", "function for the activation of three branches if args.fusion_function ==", "is_val=True, if_augment=True, ) # create training and validation loader train_loader", "model.model_lib import ConcatFusionModel as FusionModel print('concat_fusion loaded!!') else: print('no such", "accuracy over the k top predictions for the specified values", "== len(val_loader): print('Cf-Inference: [{0}/{1}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Acc1_0.0 {acc_top1_00.val:.1f} ({acc_top1_00.avg:.1f})\\t'", "None: args.start_epoch = 0 cudnn.benchmark = True # create training", "graph, avoid the gradient confusion feature_vision_detached = feature_vision.detach() feature_coord_detached =", "coord_model.cuda() fusion_model = fusion_model.cuda() # optionally resume vision model from", "weight = search_list[j] output_debiased = output_factual - output_counterfactual * weight", "p.requires_grad, coord_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_fusion = torch.optim.SGD(filter(lambda p: p.requires_grad,", "= torch.load(args.resume_fusion) if args.start_epoch is None: args.start_epoch = checkpoint['epoch'] best_loss", "# counterfactual inference output_vision_subtrahend = output_vision output_counterfactual = fusion_function(output_vision_subtrahend, torch.tensor(0.0),", "to decay learning rate by 10') parser.add_argument('--momentum', default=0.9, type=float, metavar='M',", "default=[24, 35, 45], type=float, nargs=\"+\", metavar='LRSteps', help='epochs to decay learning", "os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index print(args) # create vision model if args.model_vision", "1))].update(acc5.item(), global_img_tensors.size(0)) # measure elapsed time batch_time.update(time.time() - end) end", "setting and others related arguments parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',", "with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred", "end = time.time() if i % args.print_freq == 0 or", "and (epoch + 1) % args.search_stride == 0: loss =", "for each image') parser.add_argument('--num_frames', default=16, type=int, help='num of frames for", "args.resume_fusion: assert os.path.isfile(args.resume_fusion), \"No checkpoint found at '{}'\".format(args.resume_fusion) print(\"=> loading", "inference (vanilla test stage) if args.evaluate: validate(val_loader, model_list, fusion_function, criterion,", "global_img_tensors.size(0)) acc_top1.update(acc1.item(), global_img_tensors.size(0)) acc_top5.update(acc5.item(), global_img_tensors.size(0)) # measure elapsed time batch_time.update(time.time()", "({batch_time.avg:.3f})\\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' 'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\\t'", "param_group['lr'] = lr elif branch_name == 'fusion': for param_group in", "= vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label) output_vision = output_vision.view((-1, len(train_loader.dataset.classes))) #", "= torch.utils.data.DataLoader( dataset_val, drop_last=True, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=False ) model_list", "'rgb_roi': from model.model_lib import BboxVisualModel as RGBModel print('rgb_roi loaded!!') else:", "a checkpoint if args.resume_coord: assert os.path.isfile(args.resume_coord), \"No checkpoint found at", "features') parser.add_argument('--size', default=224, type=int, metavar='N', help='primary image input size') parser.add_argument('--num_boxes',", "output_coord = output_coord.view((-1, len(val_loader.dataset.classes))) # obtain the activation of fusion", ") dataset_val = VideoFolder(root=args.root_frames, num_boxes=args.num_boxes, file_input=args.json_data_val, file_labels=args.json_file_labels, frames_duration=args.num_frames, args=args, is_val=True,", "latest checkpoint (default: none)') parser.add_argument('--resume_fusion', default='', type=str, metavar='PATH', help='path to", "return print('training begin...') for epoch in tqdm(range(args.start_epoch, args.epochs)): adjust_learning_rate(optimizer_vision, epoch,", "args.print_freq == 0 or i + 1 == len(val_loader): print('Test:", "validate(val_loader, model_list, fusion_function, criterion, epoch=None, class_to_idx=None): batch_time = AverageMeter() losses", "by 10') parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum') parser.add_argument('--weight_decay', '--wd', default=0.0001,", "json file with ground truth labels') parser.add_argument('--dataset', default='smth_smth', help='which dataset", "print('fused_sum loaded!!') elif args.fusion_function == 'naive_sum': from fusion_function import naivesum", "batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() acc_top1", "{acc_top1_10.val:.1f} ({acc_top1_10.avg:.1f})'.format( i, len(val_loader), batch_time=batch_time, acc_top1_00=search_dict['acc_1_alpha_0.0'], acc_top1_02=search_dict['acc_1_alpha_0.2'], acc_top1_05=search_dict['acc_1_alpha_0.5'], acc_top1_08=search_dict['acc_1_alpha_0.8'], acc_top1_10=search_dict['acc_1_alpha_1.0']))", "args.evaluate: logits_matrix.append(output.cpu().data.numpy()) targets_list.append(video_label.cpu().numpy()) # measure accuracy and record loss losses.update(loss.item(),", "num_workers=args.workers, pin_memory=False ) model_list = [vision_model, coord_model, fusion_model] optimizer_vision =", "== 0 or i + 1 == len(val_loader): print('Test: [{0}/{1}]\\t'", "performance every n strides') # train mode, hardware setting and", "image input size') parser.add_argument('--num_boxes', default=4, type=int, help='num of boxes for", "help='path to the folder with frames') parser.add_argument('--json_data_train', type=str, default='../data/dataset_splits/compositional/train.json', help='path", "torch.no_grad(): # factual inference output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(),", "load three model branches [vision_model, coord_model, fusion_model] = model_list #", "loaded!!') else: print('no such a fusion function!') fusion_function = fusion_func()", "losses = AverageMeter() acc_top1 = AverageMeter() acc_top5 = AverageMeter() #", "as BboxModel print('interaction loaded!!') else: print(\"no such a coordinate model!\")", "({loss.avg:.4f})\\t' 'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\\t' 'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})'.format( epoch, i, len(train_loader),", "import torch.backends.cudnn as cudnn from callbacks import AverageMeter from data_utils.causal_data_loader_frames", "criterion(output_coord, video_label.long().cuda()) loss_fusion = criterion(output_factual, video_label.long().cuda()) # statistic result from", "output_fusion = fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda()) output_fusion = output_fusion.view((-1, len(train_loader.dataset.classes))) output_factual =", "pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = []", "default='fused_sum', type=str, help='function for fusing activations from each branch') parser.add_argument('--img_feature_dim',", "torch.load(args.resume_fusion) if args.start_epoch is None: args.start_epoch = checkpoint['epoch'] best_loss =", "AverageMeter() search_dict['acc_5_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter() [vision_model, coord_model, fusion_model] = model_list", "fusion_model.train() end = time.time() for i, (global_img_tensors, box_tensors, box_categories, video_label)", "'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Acc1_0.0 {acc_top1_00.val:.1f} ({acc_top1_00.avg:.1f})\\t' 'Acc1_0.2 {acc_top1_02.val:.1f} ({acc_top1_02.avg:.1f})\\t' 'Acc1_0.5", "+ 1, 'state_dict': coord_model.state_dict(), 'best_loss': best_loss, }, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_coord,", "fuse three outputs output_factual = fusion_function(output_vision, output_coord, output_fusion) # warning:", "FusionModel(args) # create the fusion function for the activation of", "print('no such a fusion function!') fusion_function = fusion_func() if args.parallel:", "[{0}/{1}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' 'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\\t'", "training and validation loader train_loader = torch.utils.data.DataLoader( dataset_train, batch_size=args.batch_size, shuffle=True,", "'{}' (epoch {})\" .format(args.resume_fusion, checkpoint['epoch'])) if args.start_epoch is None: args.start_epoch", "import BboxInteractionLatentModel as BboxModel print('interaction loaded!!') else: print(\"no such a", "accuracy and record loss losses.update(loss.item(), global_img_tensors.size(0)) acc_top1.update(acc1.item(), global_img_tensors.size(0)) acc_top5.update(acc5.item(), global_img_tensors.size(0))", "warning: loss_fusion is the loss of output_fusion(fused, obtained from the", "sum of three branch activation results acc1, acc5 = accuracy(output_factual.cpu(),", "not train with multi GPUs') parser.add_argument('--gpu_index', type=str, default='0, 1, 2,", "torch.save(state, filename + '_latest.pth.tar') if is_best: shutil.copyfile(filename + '_latest.pth.tar', filename", "import save_results from tqdm import tqdm parser = argparse.ArgumentParser(description='Counterfactual CAR')", "default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--resume_fusion',", "default=224, type=int, metavar='N', help='primary image input size') parser.add_argument('--num_boxes', default=4, type=int,", "truth labels') parser.add_argument('--dataset', default='smth_smth', help='which dataset to train') parser.add_argument('--logname', default='my_method',", "output checkpoints') parser.add_argument('--resume_vision', default='', type=str, metavar='PATH', help='path to latest checkpoint", "(default: none)') # model, image&feature dim and training related arguments", "model_list, fusion_function, criterion, epoch=None, class_to_idx=None): batch_time = AverageMeter() losses =", "acc1, acc5 = accuracy(output.cpu(), video_label, topk=(1, 5)) if args.evaluate: logits_matrix.append(output.cpu().data.numpy())", "default=5, type=float, metavar='W', help='gradient norm clipping (default: 5)') parser.add_argument('--search_stride', type=int,", "file with train video meta data') parser.add_argument('--json_data_val', type=str, default='../data/dataset_splits/compositional/validation.json', help='path", "if i % args.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\\t' 'Time {batch_time.val:.3f}", "optimizer.param_groups: param_group['lr'] = lr elif branch_name == 'fusion': for param_group", "default='../data/dataset_splits/compositional/train.json', help='path to the json file with train video meta", "box_tensors.cuda(), video_label) output_coord = output_coord.view((-1, len(val_loader.dataset.classes))) # detach the computation", "hyperparameter if args.cf_inference_group: cf_inference_group(val_loader, model_list, fusion_function, search_list, class_to_idx=dataset_val.classes_dict) return print('training", "coord model from a checkpoint if args.resume_coord: assert os.path.isfile(args.resume_coord), \"No", "frames for the model') parser.add_argument('--num_classes', default=174, type=int, help='num of class", "search_list[j] output_debiased = output_factual - output_counterfactual * weight acc1, acc5", "target.size(0) _, pred = output.topk(maxk, 1, True, True) pred =", "metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('-e', '--evaluate',", "file with ground truth labels') parser.add_argument('--dataset', default='smth_smth', help='which dataset to", "[{0}/{1}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Acc1_0.0 {acc_top1_00.val:.1f} ({acc_top1_00.avg:.1f})\\t' 'Acc1_0.2 {acc_top1_02.val:.1f} ({acc_top1_02.avg:.1f})\\t'", "is_best = loss < best_loss best_loss = min(loss, best_loss) save_checkpoint(", "metavar='PATH', help='path to latest checkpoint (default: none)') # model, image&feature", "'state_dict': fusion_model.state_dict(), 'best_loss': best_loss, }, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_fusion, args.logname))) def", "min(loss, best_loss) save_checkpoint( { 'epoch': epoch + 1, 'state_dict': vision_model.state_dict(),", "return losses.avg def cf_inference_group(val_loader, model_list, fusion_function, search_list, class_to_idx=None): batch_time =", "metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--resume_fusion', default='', type=str,", "to evaluate mode vision_model.eval() coord_model.eval() fusion_model.eval() end = time.time() for", "checkpoint '{}'\".format(args.resume_vision)) checkpoint = torch.load(args.resume_vision) if args.start_epoch is None: args.start_epoch", "if args.model_vision == 'global_i3d': from model.model_lib import VideoGlobalModel as RGBModel", "features') parser.add_argument('--coord_feature_dim', default=512, type=int, metavar='N', help='intermediate feature dimension for coord-based", "model_list, fusion_function, criterion, epoch=epoch, class_to_idx=dataset_val.classes_dict) else: loss = 100 #", "pin_memory=False ) model_list = [vision_model, coord_model, fusion_model] optimizer_vision = torch.optim.SGD(filter(lambda", "workers (default: 4)') parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on", "True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res", "of gpu you want to use') best_loss = 1000000 def", "35, 45], type=float, nargs=\"+\", metavar='LRSteps', help='epochs to decay learning rate", "optimizer_vision = torch.optim.SGD(filter(lambda p: p.requires_grad, vision_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_coord", "losses.update(loss.item(), global_img_tensors.size(0)) acc_top1.update(acc1.item(), global_img_tensors.size(0)) acc_top5.update(acc5.item(), global_img_tensors.size(0)) # measure elapsed time", "print(\"no such a vision model!\") # create coord model if", "the parameter optimizer_vision.step() optimizer_coord.step() optimizer_fusion.step() batch_time.update(time.time() - end) end =", "action='store_true', help='counterfactual inference model on validation set') parser.add_argument('--parallel', default=True, type=bool,", "5)) search_dict['acc_1_alpha_{}'.format(round(search_list[j], 1))].update(acc1.item(), global_img_tensors.size(0)) search_dict['acc_5_alpha_{}'.format(round(search_list[j], 1))].update(acc5.item(), global_img_tensors.size(0)) # measure elapsed", "[{0}][{1}/{2}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'", "vision_model.cuda() coord_model = coord_model.cuda() fusion_model = fusion_model.cuda() # optionally resume", "best_loss) save_checkpoint( { 'epoch': epoch + 1, 'state_dict': vision_model.state_dict(), 'best_loss':", "obtain the activation and coordinate features from coordinate branch output_coord,", "branch') parser.add_argument('--img_feature_dim', default=512, type=int, metavar='N', help='intermediate feature dimension for image-based", "optimizers, including the one designed for uniform assumption [optimizer_vision, optimizer_coord,", "* decay if branch_name == 'vision': for param_group in optimizer.param_groups:", "as np import random from collections import OrderedDict import torch", "validation set') parser.add_argument('--cf_inference_group', action='store_true', help='counterfactual inference model on validation set')", "args=args, is_val=True, if_augment=True, ) # create training and validation loader", "lr * 0.8 elif branch_name == 'coord': for param_group in", "video meta data') parser.add_argument('--json_data_val', type=str, default='../data/dataset_splits/compositional/validation.json', help='path to the json", "args.lr_steps, 'coord') adjust_learning_rate(optimizer_fusion, epoch, args.lr_steps, 'fusion') # train for one", "# switch to train mode vision_model.train() coord_model.train() fusion_model.train() end =", "if (epoch+1) >= 30 and (epoch + 1) % args.search_stride", "help='print frequency (default: 20)') parser.add_argument('--ckpt', default='./ckpt', help='folder to output checkpoints')", "1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred))", "args.start_epoch is None: args.start_epoch = 0 cudnn.benchmark = True #", "is the loss of output_fusion(fused, obtained from the fusion_function) loss_vision", "= AverageMeter() losses = AverageMeter() acc_top1 = AverageMeter() acc_top5 =", "= {} for i in range(search_length): search_dict['acc_1_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter()", "output_factual - output_counterfactual * weight acc1, acc5 = accuracy(output_debiased.cpu(), video_label,", "logsigsum as fusion_func print('fused_sum loaded!!') elif args.fusion_function == 'naive_sum': from", "fusion_model = FusionModel(args) # create the fusion function for the", "else: loss = 100 # remember best loss and save", "AverageMeter() [vision_model, coord_model, fusion_model] = model_list # switch to evaluate", "from data_utils.causal_data_loader_frames import VideoFolder from utils import save_results from tqdm", "global_img_tensors.size(0)) acc_top5.update(acc5.item(), global_img_tensors.size(0)) # measure elapsed time batch_time.update(time.time() - end)", "meta data') parser.add_argument('--json_data_val', type=str, default='../data/dataset_splits/compositional/validation.json', help='path to the json file", "loss=losses, acc_top1=acc_top1, acc_top5=acc_top5)) def validate(val_loader, model_list, fusion_function, criterion, epoch=None, class_to_idx=None):", "fusion_function(output_vision, output_coord, output_fusion) # counterfactual inference output_vision_subtrahend = output_vision output_counterfactual", "+ '_latest.pth.tar', filename + '_best.pth.tar') def adjust_learning_rate(optimizer, epoch, lr_steps, branch_name=None):", "parser.add_argument('--weight_decay', '--wd', default=0.0001, type=float, metavar='W', help='weight decay (default: 1e-4)') parser.add_argument('--clip_gradient',", "decay if branch_name == 'vision': for param_group in optimizer.param_groups: param_group['lr']", "np import random from collections import OrderedDict import torch import", "tqdm import tqdm parser = argparse.ArgumentParser(description='Counterfactual CAR') # Path, dataset", "metavar='N', help='primary image input size') parser.add_argument('--num_boxes', default=4, type=int, help='num of", "loss_vision acc1, acc5 = accuracy(output.cpu(), video_label, topk=(1, 5)) if args.evaluate:", "switch to evaluate mode vision_model.eval() coord_model.eval() fusion_model.eval() end = time.time()", "20)') parser.add_argument('--ckpt', default='./ckpt', help='folder to output checkpoints') parser.add_argument('--resume_vision', default='', type=str,", "'interaction': from model.model_lib import BboxInteractionLatentModel as BboxModel print('interaction loaded!!') else:", "the json file with validation video meta data') parser.add_argument('--json_file_labels', type=str,", "for i, (global_img_tensors, box_tensors, box_categories, video_label) in enumerate(train_loader): data_time.update(time.time() -", "args.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Data {data_time.val:.3f}", "parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set') parser.add_argument('--cf_inference_group',", "model_list, fusion_function, search_list, class_to_idx=dataset_val.classes_dict) return print('training begin...') for epoch in", "1) % args.search_stride == 0: loss = validate(val_loader, model_list, fusion_function,", "optimizer.param_groups: param_group['lr'] = lr def accuracy(output, target, topk=(1,)): \"\"\"Computes the", "type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--resume_fusion', default='',", "\"No checkpoint found at '{}'\".format(args.resume_vision) print(\"=> loading checkpoint '{}'\".format(args.resume_vision)) checkpoint", "3', help='the index of gpu you want to use') best_loss", "help='path to latest checkpoint (default: none)') parser.add_argument('--resume_coord', default='', type=str, metavar='PATH',", "is_best, filename): torch.save(state, filename + '_latest.pth.tar') if is_best: shutil.copyfile(filename +", "clipping (default: 5)') parser.add_argument('--search_stride', type=int, default=5, help='test performance every n", "range(search_length): weight = search_list[j] output_debiased = output_factual - output_counterfactual *", "model!\") # create coord model if args.model_coord == 'interaction': from", "metavar='N', help='mini-batch size') parser.add_argument('--lr', '--learning-rate', default=0.01, type=float, metavar='LR', help='initial learning", "mode vision_model.train() coord_model.train() fusion_model.train() end = time.time() for i, (global_img_tensors,", "fusion_func print('naive_sum loaded!!') else: print('no such a fusion function!') fusion_function", "output with torch.no_grad(): # factual inference output_vision, feature_vision = vision_model(global_img_tensors.cuda(),", "batch_size=args.batch_size, shuffle=True, num_workers=args.workers, drop_last=True, pin_memory=True ) val_loader = torch.utils.data.DataLoader( dataset_val,", "end) end = time.time() if i % args.print_freq == 0:", "'fusion': for param_group in optimizer.param_groups: param_group['lr'] = lr else: for", "training related arguments parser.add_argument('--model_vision', default='rgb_roi') parser.add_argument('--model_coord', default='interaction') parser.add_argument('--model_fusion', default='concat_fusion') parser.add_argument('--fusion_function',", "coord_model = coord_model.cuda() fusion_model = fusion_model.cuda() # optionally resume vision", "best_loss = 1000000 def main(): global args, best_loss args =", "output_fusion = output_fusion.view((-1, len(val_loader.dataset.classes))) # fuse three outputs output_factual =", "from vision branch output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label)", "in optimizer.param_groups: param_group['lr'] = lr * 0.8 elif branch_name ==", "the fusion function for the activation of three branches if", "+ loss_coord + loss_factual loss.backward() if args.clip_gradient is not None:", "parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index print(args) # create vision model if", "= RGBModel(args) coord_model = BboxModel(args) fusion_model = FusionModel(args) # create", "+ 1, 'state_dict': vision_model.state_dict(), 'best_loss': best_loss, }, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_vision,", "data_utils.causal_data_loader_frames import VideoFolder from utils import save_results from tqdm import", "assert os.path.isfile(args.resume_coord), \"No checkpoint found at '{}'\".format(args.resume_coord) print(\"=> loading checkpoint", "cudnn.benchmark = True # create training and validation dataset dataset_train", "acc_top1.update(acc1.item(), global_img_tensors.size(0)) acc_top5.update(acc5.item(), global_img_tensors.size(0)) # measure elapsed time batch_time.update(time.time() -", "video_label) output_coord = output_coord.view((-1, len(train_loader.dataset.classes))) # detach the computation graph,", "to train') parser.add_argument('--logname', default='my_method', help='name of the experiment for checkpoints", "to the folder with frames') parser.add_argument('--json_data_train', type=str, default='../data/dataset_splits/compositional/train.json', help='path to", "acc_top1_02=search_dict['acc_1_alpha_0.2'], acc_top1_05=search_dict['acc_1_alpha_0.5'], acc_top1_08=search_dict['acc_1_alpha_0.8'], acc_top1_10=search_dict['acc_1_alpha_1.0'])) for k in range(search_length): print(search_list[k], search_dict['acc_1_alpha_{}'.format(round(search_list[k],", "'state_dict': coord_model.state_dict(), 'best_loss': best_loss, }, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_coord, args.logname))) save_checkpoint(", "best_loss, }, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_fusion, args.logname))) def train(train_loader, model_list, fusion_function,", "by trying a list of hyperparameter if args.cf_inference_group: cf_inference_group(val_loader, model_list,", "acc_top1 = AverageMeter() acc_top5 = AverageMeter() logits_matrix = [] targets_list", "type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--resume_coord', default='',", "'vision') adjust_learning_rate(optimizer_coord, epoch, args.lr_steps, 'coord') adjust_learning_rate(optimizer_fusion, epoch, args.lr_steps, 'fusion') #", "epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, acc_top1=acc_top1, acc_top5=acc_top5)) def validate(val_loader,", "create vision model if args.model_vision == 'global_i3d': from model.model_lib import", "every n strides') # train mode, hardware setting and others", "obtain the activation of fusion branch output_fusion = fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda())", "end = time.time() for i, (global_img_tensors, box_tensors, box_categories, video_label) in", "mode, hardware setting and others related arguments parser.add_argument('-j', '--workers', default=4,", "adjust_learning_rate(optimizer_coord, epoch, args.lr_steps, 'coord') adjust_learning_rate(optimizer_fusion, epoch, args.lr_steps, 'fusion') # train", "arguments parser.add_argument('--model_vision', default='rgb_roi') parser.add_argument('--model_coord', default='interaction') parser.add_argument('--model_fusion', default='concat_fusion') parser.add_argument('--fusion_function', default='fused_sum', type=str,", "RGBModel print('global_i3d loaded!!') elif args.model_vision == 'rgb_roi': from model.model_lib import", "optimizer_coord, optimizer_fusion] criterion = torch.nn.CrossEntropyLoss() search_list = np.linspace(0.0, 1.0, 11)", "the accuracy over the k top predictions for the specified", "time.time() for i, (global_img_tensors, box_tensors, box_categories, video_label) in enumerate(train_loader): data_time.update(time.time()", "the activation of three branches if args.fusion_function == 'fused_sum': from", "torch.nn.DataParallel(coord_model).cuda() fusion_model = torch.nn.DataParallel(fusion_model).cuda() else: vision_model = vision_model.cuda() coord_model =", "help='intermediate feature dimension for image-based features') parser.add_argument('--coord_feature_dim', default=512, type=int, metavar='N',", ") # create training and validation loader train_loader = torch.utils.data.DataLoader(", "model_list, fusion_function, optimizer_list, epoch, criterion) if (epoch+1) >= 30 and", "= output_coord.view((-1, len(val_loader.dataset.classes))) # obtain the activation of fusion branch", "pin_memory=True ) val_loader = torch.utils.data.DataLoader( dataset_val, drop_last=True, batch_size=args.batch_size, shuffle=False, num_workers=args.workers,", "output_fusion.view((-1, len(val_loader.dataset.classes))) # fuse three outputs output_factual = fusion_function(output_vision, output_coord,", "args.resume_coord: assert os.path.isfile(args.resume_coord), \"No checkpoint found at '{}'\".format(args.resume_coord) print(\"=> loading", "= criterion(output_factual, video_label.long().cuda()) # statistic result from fusion_branch or value", "vision_model = RGBModel(args) coord_model = BboxModel(args) fusion_model = FusionModel(args) #", "help='gradient norm clipping (default: 5)') parser.add_argument('--search_stride', type=int, default=5, help='test performance", "coord_model.state_dict(), 'best_loss': best_loss, }, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_coord, args.logname))) save_checkpoint( {", "criterion = torch.nn.CrossEntropyLoss() search_list = np.linspace(0.0, 1.0, 11) # factual", "metavar='LR', help='initial learning rate') parser.add_argument('--lr_steps', default=[24, 35, 45], type=float, nargs=\"+\",", "== 'interaction': from model.model_lib import BboxInteractionLatentModel as BboxModel print('interaction loaded!!')", "i, (global_img_tensors, box_tensors, box_categories, video_label) in enumerate(val_loader): # compute output", "output_coord = output_coord.view((-1, len(val_loader.dataset.classes))) # detach the computation graph, avoid", "+ 1 == len(val_loader): print('Cf-Inference: [{0}/{1}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Acc1_0.0", "time batch_time.update(time.time() - end) end = time.time() if i %", "acc_top1=acc_top1, acc_top5=acc_top5, )) if args.evaluate: logits_matrix = np.concatenate(logits_matrix) targets_list =", "optimizer_fusion] criterion = torch.nn.CrossEntropyLoss() search_list = np.linspace(0.0, 1.0, 11) #", "parser.add_argument('--lr_steps', default=[24, 35, 45], type=float, nargs=\"+\", metavar='LRSteps', help='epochs to decay", "True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res =", "= 1000000 def main(): global args, best_loss args = parser.parse_args()", "if i % args.print_freq == 0 or i + 1", "parser.add_argument('--json_data_train', type=str, default='../data/dataset_splits/compositional/train.json', help='path to the json file with train", "loss = validate(val_loader, model_list, fusion_function, criterion, epoch=epoch, class_to_idx=dataset_val.classes_dict) else: loss", "collections import OrderedDict import torch import torch.backends.cudnn as cudnn from", "global_img_tensors.size(0)) acc_top1.update(acc1.item(), global_img_tensors.size(0)) acc_top5.update(acc5.item(), global_img_tensors.size(0)) # refresh the optimizer optimizer_vision.zero_grad()", "45], type=float, nargs=\"+\", metavar='LRSteps', help='epochs to decay learning rate by", "save checkpoint is_best = loss < best_loss best_loss = min(loss,", "= criterion(output_fusion, video_label.long().cuda()) loss_factual = criterion(output_factual, video_label.long().cuda()) # Measure the", "branch vision_model = RGBModel(args) coord_model = BboxModel(args) fusion_model = FusionModel(args)", "gradient confusion feature_vision_detached = feature_vision.detach() feature_coord_detached = feature_coord.detach() # obtain", "= np.linspace(0.0, 1.0, 11) # factual inference (vanilla test stage)", "parser.add_argument('--root_frames', type=str, default='/mnt/data1/home/sunpengzhan/sth-sth-v2/', help='path to the folder with frames') parser.add_argument('--json_data_train',", "= args.lr * decay if branch_name == 'vision': for param_group", "BboxVisualModel as RGBModel print('rgb_roi loaded!!') else: print(\"no such a vision", "logs') parser.add_argument('--print_freq', '-p', default=20, type=int, metavar='N', help='print frequency (default: 20)')", "{})\" .format(args.resume_coord, checkpoint['epoch'])) if args.resume_fusion: assert os.path.isfile(args.resume_fusion), \"No checkpoint found", "torch.utils.data.DataLoader( dataset_train, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, drop_last=True, pin_memory=True ) val_loader =", "acc_top5=acc_top5)) def validate(val_loader, model_list, fusion_function, criterion, epoch=None, class_to_idx=None): batch_time =", "= correct[:k].contiguous().view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res if __name__", "branch activation results acc1, acc5 = accuracy(output_factual.cpu(), video_label, topk=(1, 5))", "help='path to the json file with ground truth labels') parser.add_argument('--dataset',", "of data loading workers (default: 4)') parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',", "= 0 cudnn.benchmark = True # create training and validation", "a fusion function!') fusion_function = fusion_func() if args.parallel: vision_model =", "= coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label) output_coord = output_coord.view((-1, len(val_loader.dataset.classes))) #", "= feature_coord.detach() # obtain the activation of fusion branch output_fusion", "features from vision branch output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(),", "parser.add_argument('--size', default=224, type=int, metavar='N', help='primary image input size') parser.add_argument('--num_boxes', default=4,", "= 100 # remember best loss and save checkpoint is_best", "== 'naive_sum': from fusion_function import naivesum as fusion_func print('naive_sum loaded!!')", "= torch.optim.SGD(filter(lambda p: p.requires_grad, coord_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_fusion =", "checkpoint '{}'\".format(args.resume_coord)) checkpoint = torch.load(args.resume_coord) if args.start_epoch is None: args.start_epoch", "criterion) if (epoch+1) >= 30 and (epoch + 1) %", "+ 1, 'state_dict': fusion_model.state_dict(), 'best_loss': best_loss, }, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_fusion,", "for uniform assumption [optimizer_vision, optimizer_coord, optimizer_fusion] = optimizer_list # switch", "for i in range(search_length): search_dict['acc_1_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter() search_dict['acc_5_alpha_{}'.format(round(search_list[i], 1))]", "a list of hyperparameter if args.cf_inference_group: cf_inference_group(val_loader, model_list, fusion_function, search_list,", "for coord-based features') parser.add_argument('--size', default=224, type=int, metavar='N', help='primary image input", "n strides') # train mode, hardware setting and others related", "loss_vision = criterion(output_vision, video_label.long().cuda()) loss_coord = criterion(output_coord, video_label.long().cuda()) loss_fusion =", "FusionModel print('concat_fusion loaded!!') else: print('no such a fusion model!') #", "assumption [optimizer_vision, optimizer_coord, optimizer_fusion] = optimizer_list # switch to train", "save_results from tqdm import tqdm parser = argparse.ArgumentParser(description='Counterfactual CAR') #", "({batch_time.avg:.3f})\\t' 'Acc1_0.0 {acc_top1_00.val:.1f} ({acc_top1_00.avg:.1f})\\t' 'Acc1_0.2 {acc_top1_02.val:.1f} ({acc_top1_02.avg:.1f})\\t' 'Acc1_0.5 {acc_top1_05.val:.1f} ({acc_top1_05.avg:.1f})\\t'", "box_categories.cuda(), box_tensors.cuda(), video_label) output_coord = output_coord.view((-1, len(val_loader.dataset.classes))) # detach the", "fusion branch output_fusion = fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda()) output_fusion = output_fusion.view((-1, len(val_loader.dataset.classes)))", "fusion model if args.model_fusion == 'concat_fusion': from model.model_lib import ConcatFusionModel", "in range(search_length): print(search_list[k], search_dict['acc_1_alpha_{}'.format(round(search_list[k], 1))].avg, search_dict['acc_5_alpha_{}'.format(round(search_list[k], 1))].avg) return def save_checkpoint(state,", "k top predictions for the specified values of k\"\"\" with", "# measure accuracy and record loss losses.update(loss.item(), global_img_tensors.size(0)) acc_top1.update(acc1.item(), global_img_tensors.size(0))", "print(\"=> loading checkpoint '{}'\".format(args.resume_fusion)) checkpoint = torch.load(args.resume_fusion) if args.start_epoch is", "data') parser.add_argument('--json_data_val', type=str, default='../data/dataset_splits/compositional/validation.json', help='path to the json file with", "optimizer_list, epoch, criterion): global args batch_time = AverageMeter() data_time =", "vision branch output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label) output_vision", "batch_time=batch_time, data_time=data_time, loss=losses, acc_top1=acc_top1, acc_top5=acc_top5)) def validate(val_loader, model_list, fusion_function, criterion,", "three model branches [vision_model, coord_model, fusion_model] = model_list # load", "'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' 'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\\t' 'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})\\t'.format( i,", "for checkpoints and logs') parser.add_argument('--print_freq', '-p', default=20, type=int, metavar='N', help='print", "{acc_top1_05.val:.1f} ({acc_top1_05.avg:.1f})\\t' 'Acc1_0.8 {acc_top1_08.val:.1f} ({acc_top1_08.avg:.1f})\\t' 'Acc1_1.0 {acc_top1_10.val:.1f} ({acc_top1_10.avg:.1f})'.format( i, len(val_loader),", "vision model if args.model_vision == 'global_i3d': from model.model_lib import VideoGlobalModel", "torch.optim.SGD(filter(lambda p: p.requires_grad, fusion_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_list = [optimizer_vision,", "'{}' (epoch {})\" .format(args.resume_coord, checkpoint['epoch'])) if args.resume_fusion: assert os.path.isfile(args.resume_fusion), \"No", "help='weight decay (default: 1e-4)') parser.add_argument('--clip_gradient', '-cg', default=5, type=float, metavar='W', help='gradient", "weight_decay=args.weight_decay) optimizer_coord = torch.optim.SGD(filter(lambda p: p.requires_grad, coord_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay)", "({acc_top5.avg:.1f})\\t'.format( i, len(val_loader), batch_time=batch_time, loss=losses, acc_top1=acc_top1, acc_top5=acc_top5, )) if args.evaluate:", "loss_coord = criterion(output_coord, video_label.long().cuda()) loss_fusion = criterion(output_fusion, video_label.long().cuda()) loss_factual =", "print('rgb_roi loaded!!') else: print(\"no such a vision model!\") # create", "index of gpu you want to use') best_loss = 1000000", "output_coord, output_fusion) # loss_fusion is the loss of output_fusion(fused, obtained", "momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_list = [optimizer_vision, optimizer_coord, optimizer_fusion] criterion =", "none)') parser.add_argument('--resume_coord', default='', type=str, metavar='PATH', help='path to latest checkpoint (default:", "save_checkpoint( { 'epoch': epoch + 1, 'state_dict': vision_model.state_dict(), 'best_loss': best_loss,", "k in topk: correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size))", "CAR') # Path, dataset and log related arguments parser.add_argument('--root_frames', type=str,", "'best_loss': best_loss, }, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_vision, args.logname))) save_checkpoint( { 'epoch':", "'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' 'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\\t' 'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})'.format( epoch,", "best_loss, }, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_coord, args.logname))) save_checkpoint( { 'epoch': epoch", "learning rate') parser.add_argument('--lr_steps', default=[24, 35, 45], type=float, nargs=\"+\", metavar='LRSteps', help='epochs", "else: print(\"no such a vision model!\") # create coord model", "= args.gpu_index print(args) # create vision model if args.model_vision ==", "epoch + 1, 'state_dict': coord_model.state_dict(), 'best_loss': best_loss, }, is_best, os.path.join(args.ckpt,", "output_fusion = fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda()) output_fusion = output_fusion.view((-1, len(val_loader.dataset.classes))) # fuse", "factual inference output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label) output_vision", "assert os.path.isfile(args.resume_fusion), \"No checkpoint found at '{}'\".format(args.resume_fusion) print(\"=> loading checkpoint", "activation of fusion branch output_fusion = fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda()) output_fusion =", "fusion_branch or value after fusion function output = output_factual loss", "fusion_model] = model_list # switch to evaluate mode vision_model.eval() coord_model.eval()", "the model') parser.add_argument('--num_classes', default=174, type=int, help='num of class in the", "= output_vision.view((-1, len(train_loader.dataset.classes))) # obtain the activation and coordinate features", "topk=(1,)): \"\"\"Computes the accuracy over the k top predictions for", "= argparse.ArgumentParser(description='Counterfactual CAR') # Path, dataset and log related arguments", "checkpoint '{}' (epoch {})\" .format(args.resume_fusion, checkpoint['epoch'])) if args.start_epoch is None:", "acc_top5 = AverageMeter() # load three model branches [vision_model, coord_model,", "the loss of output_fusion(fused, obtained from the fusion_function) loss_vision =", "help='folder to output checkpoints') parser.add_argument('--resume_vision', default='', type=str, metavar='PATH', help='path to", "help='num of frames for the model') parser.add_argument('--num_classes', default=174, type=int, help='num", "= output_vision.view((-1, len(val_loader.dataset.classes))) output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label)", "elapsed time batch_time.update(time.time() - end) end = time.time() if i", "use') best_loss = 1000000 def main(): global args, best_loss args", "optimizer_coord.zero_grad() optimizer_fusion.zero_grad() loss = loss_vision + loss_coord + loss_factual loss.backward()", "type=str, metavar='PATH', help='path to latest checkpoint (default: none)') # model,", "log related arguments parser.add_argument('--root_frames', type=str, default='/mnt/data1/home/sunpengzhan/sth-sth-v2/', help='path to the folder", "related arguments parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data", "'fused_sum': from fusion_function import logsigsum as fusion_func print('fused_sum loaded!!') elif", "parser.add_argument('--gpu_index', type=str, default='0, 1, 2, 3', help='the index of gpu", "default='/mnt/data1/home/sunpengzhan/sth-sth-v2/', help='path to the folder with frames') parser.add_argument('--json_data_train', type=str, default='../data/dataset_splits/compositional/train.json',", "= torch.nn.CrossEntropyLoss() search_list = np.linspace(0.0, 1.0, 11) # factual inference", "gpu you want to use') best_loss = 1000000 def main():", "len(val_loader), batch_time=batch_time, loss=losses, acc_top1=acc_top1, acc_top5=acc_top5, )) if args.evaluate: logits_matrix =", "compute output with torch.no_grad(): # factual inference output_vision, feature_vision =", "set') parser.add_argument('--parallel', default=True, type=bool, help='whether or not train with multi", "-*- coding: utf-8 -*- import argparse import os import shutil", "= feature_vision.detach() feature_coord_detached = feature_coord.detach() # obtain the activation of", "epoch=epoch, class_to_idx=dataset_val.classes_dict) else: loss = 100 # remember best loss", "dest='evaluate', action='store_true', help='evaluate model on validation set') parser.add_argument('--cf_inference_group', action='store_true', help='counterfactual", "len(train_loader.dataset.classes))) # obtain the activation and coordinate features from coordinate", "dim and training related arguments parser.add_argument('--model_vision', default='rgb_roi') parser.add_argument('--model_coord', default='interaction') parser.add_argument('--model_fusion',", "= time.time() if i % args.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\\t'", "from each branch') parser.add_argument('--img_feature_dim', default=512, type=int, metavar='N', help='intermediate feature dimension", "= max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1,", "fusion_function, optimizer_list, epoch, criterion): global args batch_time = AverageMeter() data_time", "1, 'state_dict': coord_model.state_dict(), 'best_loss': best_loss, }, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_coord, args.logname)))", "acc_top1 = AverageMeter() acc_top5 = AverageMeter() # load three model", "args.evaluate: validate(val_loader, model_list, fusion_function, criterion, class_to_idx=dataset_val.classes_dict) return # Counterfactual inference", "as fusion_func print('naive_sum loaded!!') else: print('no such a fusion function!')", "5)') parser.add_argument('--search_stride', type=int, default=5, help='test performance every n strides') #", "= lr else: for param_group in optimizer.param_groups: param_group['lr'] = lr", "param_group in optimizer.param_groups: param_group['lr'] = lr elif branch_name == 'fusion':", "metavar='W', help='gradient norm clipping (default: 5)') parser.add_argument('--search_stride', type=int, default=5, help='test", "box_tensors.cuda(), video_label) output_vision = output_vision.view((-1, len(train_loader.dataset.classes))) # obtain the activation", "'Acc1_0.8 {acc_top1_08.val:.1f} ({acc_top1_08.avg:.1f})\\t' 'Acc1_1.0 {acc_top1_10.val:.1f} ({acc_top1_10.avg:.1f})'.format( i, len(val_loader), batch_time=batch_time, acc_top1_00=search_dict['acc_1_alpha_0.0'],", "checkpoint['epoch'])) # optionally resume coord model from a checkpoint if", "1e-4)') parser.add_argument('--clip_gradient', '-cg', default=5, type=float, metavar='W', help='gradient norm clipping (default:", "# update the parameter optimizer_vision.step() optimizer_coord.step() optimizer_fusion.step() batch_time.update(time.time() - end)", "the json file with train video meta data') parser.add_argument('--json_data_val', type=str,", "is None: args.start_epoch = checkpoint['epoch'] best_loss = checkpoint['best_loss'] coord_model.load_state_dict(checkpoint['state_dict']) print(\"=>", "30 and (epoch + 1) % args.search_stride == 0: loss", "branch output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label) output_coord =", "default=174, type=int, help='num of class in the model') parser.add_argument('--epochs', default=30,", "of boxes for each image') parser.add_argument('--num_frames', default=16, type=int, help='num of", "'{}' (epoch {})\" .format(args.resume_vision, checkpoint['epoch'])) # optionally resume coord model", "help='intermediate feature dimension for coord-based features') parser.add_argument('--size', default=224, type=int, metavar='N',", "loss.backward() if args.clip_gradient is not None: torch.nn.utils.clip_grad_norm_(vision_model.parameters(), args.clip_gradient) # update", "for the activation of three branches if args.fusion_function == 'fused_sum':", "= BboxModel(args) fusion_model = FusionModel(args) # create the fusion function", "confusion feature_vision_detached = feature_vision.detach() feature_coord_detached = feature_coord.detach() # obtain the", "including the one designed for uniform assumption [optimizer_vision, optimizer_coord, optimizer_fusion]", "# factual inference (vanilla test stage) if args.evaluate: validate(val_loader, model_list,", "metavar='N', help='intermediate feature dimension for image-based features') parser.add_argument('--coord_feature_dim', default=512, type=int,", "and vision features from vision branch output_vision, feature_vision = vision_model(global_img_tensors.cuda(),", "args.logname))) save_checkpoint( { 'epoch': epoch + 1, 'state_dict': fusion_model.state_dict(), 'best_loss':", "param_group in optimizer.param_groups: param_group['lr'] = lr else: for param_group in", "vision_model = torch.nn.DataParallel(vision_model).cuda() coord_model = torch.nn.DataParallel(coord_model).cuda() fusion_model = torch.nn.DataParallel(fusion_model).cuda() else:", "video_label, topk=(1, 5)) search_dict['acc_1_alpha_{}'.format(round(search_list[j], 1))].update(acc1.item(), global_img_tensors.size(0)) search_dict['acc_5_alpha_{}'.format(round(search_list[j], 1))].update(acc5.item(), global_img_tensors.size(0)) #", "vision model!\") # create coord model if args.model_coord == 'interaction':", "video_label) output_vision = output_vision.view((-1, len(val_loader.dataset.classes))) output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(),", "global_img_tensors.size(0)) # measure elapsed time batch_time.update(time.time() - end) end =", "by 10\"\"\" decay = 0.1 ** (sum(epoch >= np.array(lr_steps))) lr", "end) end = time.time() if i % args.print_freq == 0", "dataset to train') parser.add_argument('--logname', default='my_method', help='name of the experiment for", "epoch=None, class_to_idx=None): batch_time = AverageMeter() losses = AverageMeter() acc_top1 =", "parser.add_argument('--dataset', default='smth_smth', help='which dataset to train') parser.add_argument('--logname', default='my_method', help='name of", "coord model if args.model_coord == 'interaction': from model.model_lib import BboxInteractionLatentModel", "for epoch in tqdm(range(args.start_epoch, args.epochs)): adjust_learning_rate(optimizer_vision, epoch, args.lr_steps, 'vision') adjust_learning_rate(optimizer_coord,", "train video meta data') parser.add_argument('--json_data_val', type=str, default='../data/dataset_splits/compositional/validation.json', help='path to the", "range(search_length): print(search_list[k], search_dict['acc_1_alpha_{}'.format(round(search_list[k], 1))].avg, search_dict['acc_5_alpha_{}'.format(round(search_list[k], 1))].avg) return def save_checkpoint(state, is_best,", "# record the accuracy and loss losses.update(loss_factual.item(), global_img_tensors.size(0)) acc_top1.update(acc1.item(), global_img_tensors.size(0))", "# statistic result from fusion_branch or value after fusion function", "optionally resume vision model from a checkpoint if args.resume_vision: assert", "folder with frames') parser.add_argument('--json_data_train', type=str, default='../data/dataset_splits/compositional/train.json', help='path to the json", "{acc_top1_00.val:.1f} ({acc_top1_00.avg:.1f})\\t' 'Acc1_0.2 {acc_top1_02.val:.1f} ({acc_top1_02.avg:.1f})\\t' 'Acc1_0.5 {acc_top1_05.val:.1f} ({acc_top1_05.avg:.1f})\\t' 'Acc1_0.8 {acc_top1_08.val:.1f}", "best_loss args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index print(args) # create", "rate by 10') parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum') parser.add_argument('--weight_decay', '--wd',", "the activation of fusion branch output_fusion = fusion_model(feature_vision.cuda(), feature_coord.cuda()) output_fusion", "fusion function for the activation of three branches if args.fusion_function", "args.model_coord == 'interaction': from model.model_lib import BboxInteractionLatentModel as BboxModel print('interaction", "'fusion') # train for one epoch train(train_loader, model_list, fusion_function, optimizer_list,", "'{}'\".format(args.resume_coord)) checkpoint = torch.load(args.resume_coord) if args.start_epoch is None: args.start_epoch =", "'{}'\".format(args.resume_fusion)) checkpoint = torch.load(args.resume_fusion) if args.start_epoch is None: args.start_epoch =", "optimizer_fusion.zero_grad() loss = loss_vision + loss_coord + loss_factual loss.backward() if", "such a fusion model!') # load model branch vision_model =", "strides') # train mode, hardware setting and others related arguments", "args batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter()", "such a fusion function!') fusion_function = fusion_func() if args.parallel: vision_model", "= lr elif branch_name == 'fusion': for param_group in optimizer.param_groups:", ">= np.array(lr_steps))) lr = args.lr * decay if branch_name ==", "frames_duration=args.num_frames, args=args, is_val=True, if_augment=True, ) # create training and validation", "best_loss best_loss = min(loss, best_loss) save_checkpoint( { 'epoch': epoch +", "'{}'\".format(args.resume_fusion) print(\"=> loading checkpoint '{}'\".format(args.resume_fusion)) checkpoint = torch.load(args.resume_fusion) if args.start_epoch", "torch.tensor(0.0)) for j in range(search_length): weight = search_list[j] output_debiased =", "% args.search_stride == 0: loss = validate(val_loader, model_list, fusion_function, criterion,", "import ConcatFusionModel as FusionModel print('concat_fusion loaded!!') else: print('no such a", "fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda()) output_fusion = output_fusion.view((-1, len(val_loader.dataset.classes))) # fuse three outputs", "= fusion_model.cuda() # optionally resume vision model from a checkpoint", "targets_list.append(video_label.cpu().numpy()) # measure accuracy and record loss losses.update(loss.item(), global_img_tensors.size(0)) acc_top1.update(acc1.item(),", "# obtain the activation of fusion branch output_fusion = fusion_model(feature_vision.cuda(),", "a coordinate model!\") # create fusion model if args.model_fusion ==", "default='my_method', help='name of the experiment for checkpoints and logs') parser.add_argument('--print_freq',", "a fusion model!') # load model branch vision_model = RGBModel(args)", "[vision_model, coord_model, fusion_model] = model_list # switch to evaluate mode", "video_label.long().cuda()) # Measure the accuracy of the sum of three", "search_length = len(search_list) search_dict = {} for i in range(search_length):", "is_val=False, if_augment=True, ) dataset_val = VideoFolder(root=args.root_frames, num_boxes=args.num_boxes, file_input=args.json_data_val, file_labels=args.json_file_labels, frames_duration=args.num_frames,", ".format(args.resume_vision, checkpoint['epoch'])) # optionally resume coord model from a checkpoint", "dataset_train = VideoFolder(root=args.root_frames, num_boxes=args.num_boxes, file_input=args.json_data_train, file_labels=args.json_file_labels, frames_duration=args.num_frames, args=args, is_val=False, if_augment=True,", "import VideoGlobalModel as RGBModel print('global_i3d loaded!!') elif args.model_vision == 'rgb_roi':", "torch.utils.data.DataLoader( dataset_val, drop_last=True, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=False ) model_list =", "coord_model, fusion_model] optimizer_vision = torch.optim.SGD(filter(lambda p: p.requires_grad, vision_model.parameters()), momentum=args.momentum, lr=args.lr,", "train_loader = torch.utils.data.DataLoader( dataset_train, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, drop_last=True, pin_memory=True )", "default=30, type=int, metavar='N', help='number of total epochs to run') parser.add_argument('--start_epoch',", "coord_model = torch.nn.DataParallel(coord_model).cuda() fusion_model = torch.nn.DataParallel(fusion_model).cuda() else: vision_model = vision_model.cuda()", "fusion_model = fusion_model.cuda() # optionally resume vision model from a", "= AverageMeter() [vision_model, coord_model, fusion_model] = model_list # switch to", "criterion(output_coord, video_label.long().cuda()) loss_fusion = criterion(output_fusion, video_label.long().cuda()) loss_factual = criterion(output_factual, video_label.long().cuda())", "(epoch + 1) % args.search_stride == 0: loss = validate(val_loader,", "features from coordinate branch output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(),", "if args.model_coord == 'interaction': from model.model_lib import BboxInteractionLatentModel as BboxModel", "VideoFolder(root=args.root_frames, num_boxes=args.num_boxes, file_input=args.json_data_val, file_labels=args.json_file_labels, frames_duration=args.num_frames, args=args, is_val=True, if_augment=True, ) #", "none)') parser.add_argument('--resume_fusion', default='', type=str, metavar='PATH', help='path to latest checkpoint (default:", "coord_model, fusion_model] = model_list # load four optimizers, including the", "the experiment for checkpoints and logs') parser.add_argument('--print_freq', '-p', default=20, type=int,", "= time.time() if i % args.print_freq == 0 or i", "the optimizer optimizer_vision.zero_grad() optimizer_coord.zero_grad() optimizer_fusion.zero_grad() loss = loss_vision + loss_coord", "parser.add_argument('--cf_inference_group', action='store_true', help='counterfactual inference model on validation set') parser.add_argument('--parallel', default=True,", "output_coord, output_fusion) # warning: loss_fusion is the loss of output_fusion(fused,", "None: args.start_epoch = checkpoint['epoch'] best_loss = checkpoint['best_loss'] fusion_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded", "with validation video meta data') parser.add_argument('--json_file_labels', type=str, default='../data/dataset_splits/compositional/labels.json', help='path to", "epoch in tqdm(range(args.start_epoch, args.epochs)): adjust_learning_rate(optimizer_vision, epoch, args.lr_steps, 'vision') adjust_learning_rate(optimizer_coord, epoch,", "def train(train_loader, model_list, fusion_function, optimizer_list, epoch, criterion): global args batch_time", "output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1,", "type=str, default='/mnt/data1/home/sunpengzhan/sth-sth-v2/', help='path to the folder with frames') parser.add_argument('--json_data_train', type=str,", "args, best_loss args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index print(args) #", "the sum of three branch activation results acc1, acc5 =", "and loss losses.update(loss_factual.item(), global_img_tensors.size(0)) acc_top1.update(acc1.item(), global_img_tensors.size(0)) acc_top5.update(acc5.item(), global_img_tensors.size(0)) # refresh", "args.model_fusion == 'concat_fusion': from model.model_lib import ConcatFusionModel as FusionModel print('concat_fusion", "= AverageMeter() acc_top1 = AverageMeter() acc_top5 = AverageMeter() # load", "targets_list = [] # unpack three models [vision_model, coord_model, fusion_model]", "over the k top predictions for the specified values of", "print(\"=> loading checkpoint '{}'\".format(args.resume_vision)) checkpoint = torch.load(args.resume_vision) if args.start_epoch is", "for one epoch train(train_loader, model_list, fusion_function, optimizer_list, epoch, criterion) if", "metavar='N', help='manual epoch number (useful on restarts)') parser.add_argument('--batch_size', '-b', default=16,", "shutil import time import numpy as np import random from", "resume coord model from a checkpoint if args.resume_coord: assert os.path.isfile(args.resume_coord),", "feature_coord_detached.cuda()) output_fusion = output_fusion.view((-1, len(train_loader.dataset.classes))) output_factual = fusion_function(output_vision, output_coord, output_fusion)", "enumerate(train_loader): data_time.update(time.time() - end) # obtain the activation and vision", "epoch + 1, 'state_dict': fusion_model.state_dict(), 'best_loss': best_loss, }, is_best, os.path.join(args.ckpt,", "(epoch {})\" .format(args.resume_vision, checkpoint['epoch'])) # optionally resume coord model from", "video_label) output_vision = output_vision.view((-1, len(train_loader.dataset.classes))) # obtain the activation and", "help='mini-batch size') parser.add_argument('--lr', '--learning-rate', default=0.01, type=float, metavar='LR', help='initial learning rate')", "# obtain the activation of fusion branch output_fusion = fusion_model(feature_vision_detached.cuda(),", "model_list # switch to evaluate mode vision_model.eval() coord_model.eval() fusion_model.eval() end", "print(\"no such a coordinate model!\") # create fusion model if", "= AverageMeter() data_time = AverageMeter() losses = AverageMeter() acc_top1 =", "}, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_fusion, args.logname))) def train(train_loader, model_list, fusion_function, optimizer_list,", "pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k", "= loss_vision + loss_coord + loss_factual loss.backward() if args.clip_gradient is", "save_checkpoint( { 'epoch': epoch + 1, 'state_dict': coord_model.state_dict(), 'best_loss': best_loss,", "type=str, default='../data/dataset_splits/compositional/labels.json', help='path to the json file with ground truth", "len(val_loader): print('Test: [{0}/{1}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' 'Acc1", "fusion_function import logsigsum as fusion_func print('fused_sum loaded!!') elif args.fusion_function ==", "type=str, default='0, 1, 2, 3', help='the index of gpu you", "inference by trying a list of hyperparameter if args.cf_inference_group: cf_inference_group(val_loader,", "the fusion_function) loss_vision = criterion(output_vision, video_label.long().cuda()) loss_coord = criterion(output_coord, video_label.long().cuda())", "parser.add_argument('--json_data_val', type=str, default='../data/dataset_splits/compositional/validation.json', help='path to the json file with validation", "loaded!!') else: print(\"no such a vision model!\") # create coord", "(global_img_tensors, box_tensors, box_categories, video_label) in enumerate(train_loader): data_time.update(time.time() - end) #", "loaded!!') elif args.model_vision == 'rgb_roi': from model.model_lib import BboxVisualModel as", "p.requires_grad, fusion_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_list = [optimizer_vision, optimizer_coord, optimizer_fusion]", "the one designed for uniform assumption [optimizer_vision, optimizer_coord, optimizer_fusion] =", "\"\"\"Computes the accuracy over the k top predictions for the", "help='initial learning rate') parser.add_argument('--lr_steps', default=[24, 35, 45], type=float, nargs=\"+\", metavar='LRSteps',", "and validation loader train_loader = torch.utils.data.DataLoader( dataset_train, batch_size=args.batch_size, shuffle=True, num_workers=args.workers,", "number (useful on restarts)') parser.add_argument('--batch_size', '-b', default=16, type=int, metavar='N', help='mini-batch", "help='function for fusing activations from each branch') parser.add_argument('--img_feature_dim', default=512, type=int,", "a vision model!\") # create coord model if args.model_coord ==", "model from a checkpoint if args.resume_coord: assert os.path.isfile(args.resume_coord), \"No checkpoint", "output_counterfactual = fusion_function(output_vision_subtrahend, torch.tensor(0.0), torch.tensor(0.0)) for j in range(search_length): weight", "fusion_model.state_dict(), 'best_loss': best_loss, }, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_fusion, args.logname))) def train(train_loader,", "'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' 'Acc1", "dimension for coord-based features') parser.add_argument('--size', default=224, type=int, metavar='N', help='primary image", "if args.parallel: vision_model = torch.nn.DataParallel(vision_model).cuda() coord_model = torch.nn.DataParallel(coord_model).cuda() fusion_model =", "torch.nn.DataParallel(fusion_model).cuda() else: vision_model = vision_model.cuda() coord_model = coord_model.cuda() fusion_model =", "metavar='N', help='intermediate feature dimension for coord-based features') parser.add_argument('--size', default=224, type=int,", "checkpoint '{}'\".format(args.resume_fusion)) checkpoint = torch.load(args.resume_fusion) if args.start_epoch is None: args.start_epoch", "% args.print_freq == 0 or i + 1 == len(val_loader):", "pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k", "from model.model_lib import BboxVisualModel as RGBModel print('rgb_roi loaded!!') else: print(\"no", "default=16, type=int, help='num of frames for the model') parser.add_argument('--num_classes', default=174,", "({acc_top1_00.avg:.1f})\\t' 'Acc1_0.2 {acc_top1_02.val:.1f} ({acc_top1_02.avg:.1f})\\t' 'Acc1_0.5 {acc_top1_05.val:.1f} ({acc_top1_05.avg:.1f})\\t' 'Acc1_0.8 {acc_top1_08.val:.1f} ({acc_top1_08.avg:.1f})\\t'", ".format(args.resume_coord, checkpoint['epoch'])) if args.resume_fusion: assert os.path.isfile(args.resume_fusion), \"No checkpoint found at", "box_tensors, box_categories, video_label) in enumerate(train_loader): data_time.update(time.time() - end) # obtain", "(default: 4)') parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation", "learning rate by 10') parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum') parser.add_argument('--weight_decay',", "args.start_epoch is None: args.start_epoch = checkpoint['epoch'] best_loss = checkpoint['best_loss'] vision_model.load_state_dict(checkpoint['state_dict'])", "detach the computation graph, avoid the gradient confusion feature_vision_detached =", "val_loader = torch.utils.data.DataLoader( dataset_val, drop_last=True, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=False )", "accuracy(output_factual.cpu(), video_label, topk=(1, 5)) # record the accuracy and loss", "video_label, topk=(1, 5)) # record the accuracy and loss losses.update(loss_factual.item(),", "three models [vision_model, coord_model, fusion_model] = model_list # switch to", "'--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set') parser.add_argument('--cf_inference_group', action='store_true',", "BboxModel(args) fusion_model = FusionModel(args) # create the fusion function for", "to the json file with ground truth labels') parser.add_argument('--dataset', default='smth_smth',", "from model.model_lib import ConcatFusionModel as FusionModel print('concat_fusion loaded!!') else: print('no", "# measure elapsed time batch_time.update(time.time() - end) end = time.time()", "video_label.long().cuda()) loss_factual = criterion(output_factual, video_label.long().cuda()) # Measure the accuracy of", "loss = loss_vision + loss_coord + loss_factual loss.backward() if args.clip_gradient", "checkpoint (default: none)') parser.add_argument('--resume_fusion', default='', type=str, metavar='PATH', help='path to latest", "in tqdm(range(args.start_epoch, args.epochs)): adjust_learning_rate(optimizer_vision, epoch, args.lr_steps, 'vision') adjust_learning_rate(optimizer_coord, epoch, args.lr_steps,", "loss = loss_vision acc1, acc5 = accuracy(output.cpu(), video_label, topk=(1, 5))", "output_factual = fusion_function(output_vision, output_coord, output_fusion) # loss_fusion is the loss", "for param_group in optimizer.param_groups: param_group['lr'] = lr * 0.8 elif", "time import numpy as np import random from collections import", "# load three model branches [vision_model, coord_model, fusion_model] = model_list", "# Path, dataset and log related arguments parser.add_argument('--root_frames', type=str, default='/mnt/data1/home/sunpengzhan/sth-sth-v2/',", "such a vision model!\") # create coord model if args.model_coord", "related arguments parser.add_argument('--model_vision', default='rgb_roi') parser.add_argument('--model_coord', default='interaction') parser.add_argument('--model_fusion', default='concat_fusion') parser.add_argument('--fusion_function', default='fused_sum',", "accuracy(output, target, topk=(1,)): \"\"\"Computes the accuracy over the k top", "parser.add_argument('--epochs', default=30, type=int, metavar='N', help='number of total epochs to run')", "if_augment=True, ) dataset_val = VideoFolder(root=args.root_frames, num_boxes=args.num_boxes, file_input=args.json_data_val, file_labels=args.json_file_labels, frames_duration=args.num_frames, args=args,", "training and validation dataset dataset_train = VideoFolder(root=args.root_frames, num_boxes=args.num_boxes, file_input=args.json_data_train, file_labels=args.json_file_labels,", "= output_vision output_counterfactual = fusion_function(output_vision_subtrahend, torch.tensor(0.0), torch.tensor(0.0)) for j in", "parser.add_argument('--clip_gradient', '-cg', default=5, type=float, metavar='W', help='gradient norm clipping (default: 5)')", "global args, best_loss args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index print(args)", "2, 3', help='the index of gpu you want to use')", "'_latest.pth.tar', filename + '_best.pth.tar') def adjust_learning_rate(optimizer, epoch, lr_steps, branch_name=None): \"\"\"Sets", "output_fusion) # loss_fusion is the loss of output_fusion(fused, obtained from", "help='path to the json file with validation video meta data')", "from a checkpoint if args.resume_coord: assert os.path.isfile(args.resume_coord), \"No checkpoint found", "to latest checkpoint (default: none)') parser.add_argument('--resume_coord', default='', type=str, metavar='PATH', help='path", "'state_dict': vision_model.state_dict(), 'best_loss': best_loss, }, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_vision, args.logname))) save_checkpoint(", "feature_vision_detached = feature_vision.detach() feature_coord_detached = feature_coord.detach() # obtain the activation", "import argparse import os import shutil import time import numpy", "4)') parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')", "video meta data') parser.add_argument('--json_file_labels', type=str, default='../data/dataset_splits/compositional/labels.json', help='path to the json", "feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label) output_vision = output_vision.view((-1, len(val_loader.dataset.classes)))", "file_input=args.json_data_val, file_labels=args.json_file_labels, frames_duration=args.num_frames, args=args, is_val=True, if_augment=True, ) # create training", ") model_list = [vision_model, coord_model, fusion_model] optimizer_vision = torch.optim.SGD(filter(lambda p:", "i + 1 == len(val_loader): print('Cf-Inference: [{0}/{1}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'", "{acc_top5.val:.1f} ({acc_top5.avg:.1f})'.format( epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, acc_top1=acc_top1, acc_top5=acc_top5))", "predictions for the specified values of k\"\"\" with torch.no_grad(): maxk", "= checkpoint['best_loss'] vision_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded checkpoint '{}' (epoch {})\" .format(args.resume_vision,", "box_categories, box_tensors.cuda(), video_label) output_vision = output_vision.view((-1, len(train_loader.dataset.classes))) # obtain the", "obtained from the fusion_function) loss_vision = criterion(output_vision, video_label.long().cuda()) loss_coord =", "rate') parser.add_argument('--lr_steps', default=[24, 35, 45], type=float, nargs=\"+\", metavar='LRSteps', help='epochs to", "in enumerate(val_loader): # compute output with torch.no_grad(): output_vision, feature_vision =", "= accuracy(output_debiased.cpu(), video_label, topk=(1, 5)) search_dict['acc_1_alpha_{}'.format(round(search_list[j], 1))].update(acc1.item(), global_img_tensors.size(0)) search_dict['acc_5_alpha_{}'.format(round(search_list[j], 1))].update(acc5.item(),", "if args.start_epoch is None: args.start_epoch = checkpoint['epoch'] best_loss = checkpoint['best_loss']", "create coord model if args.model_coord == 'interaction': from model.model_lib import", "obtain the activation and vision features from vision branch output_vision,", "want to use') best_loss = 1000000 def main(): global args,", "({acc_top1_02.avg:.1f})\\t' 'Acc1_0.5 {acc_top1_05.val:.1f} ({acc_top1_05.avg:.1f})\\t' 'Acc1_0.8 {acc_top1_08.val:.1f} ({acc_top1_08.avg:.1f})\\t' 'Acc1_1.0 {acc_top1_10.val:.1f} ({acc_top1_10.avg:.1f})'.format(", "of the sum of three branch activation results acc1, acc5", "({data_time.avg:.3f})\\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' 'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\\t' 'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})'.format(", "= target.size(0) _, pred = output.topk(maxk, 1, True, True) pred", "designed for uniform assumption [optimizer_vision, optimizer_coord, optimizer_fusion] = optimizer_list #", "loss_vision + loss_coord + loss_factual loss.backward() if args.clip_gradient is not", "batch_time = AverageMeter() losses = AverageMeter() acc_top1 = AverageMeter() acc_top5", "# refresh the optimizer optimizer_vision.zero_grad() optimizer_coord.zero_grad() optimizer_fusion.zero_grad() loss = loss_vision", "parser.add_argument('--num_classes', default=174, type=int, help='num of class in the model') parser.add_argument('--epochs',", "= AverageMeter() acc_top5 = AverageMeter() logits_matrix = [] targets_list =", "parser.add_argument('--num_frames', default=16, type=int, help='num of frames for the model') parser.add_argument('--num_classes',", "activation of three branches if args.fusion_function == 'fused_sum': from fusion_function", "type=int, metavar='N', help='mini-batch size') parser.add_argument('--lr', '--learning-rate', default=0.01, type=float, metavar='LR', help='initial", "import shutil import time import numpy as np import random", "from fusion_function import naivesum as fusion_func print('naive_sum loaded!!') else: print('no", "loss_fusion = criterion(output_fusion, video_label.long().cuda()) loss_factual = criterion(output_factual, video_label.long().cuda()) # Measure", "param_group['lr'] = lr else: for param_group in optimizer.param_groups: param_group['lr'] =", "import torch import torch.backends.cudnn as cudnn from callbacks import AverageMeter", "= torch.optim.SGD(filter(lambda p: p.requires_grad, vision_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_coord =", "RGBModel(args) coord_model = BboxModel(args) fusion_model = FusionModel(args) # create the", "and coordinate features from coordinate branch output_coord, feature_coord = coord_model(global_img_tensors,", "len(val_loader.dataset.classes))) # fuse three outputs output_factual = fusion_function(output_vision, output_coord, output_fusion)", "output_fusion) # warning: loss_fusion is the loss of output_fusion(fused, obtained", "= criterion(output_vision, video_label.long().cuda()) loss_coord = criterion(output_coord, video_label.long().cuda()) loss_fusion = criterion(output_factual,", ") val_loader = torch.utils.data.DataLoader( dataset_val, drop_last=True, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=False", "else: vision_model = vision_model.cuda() coord_model = coord_model.cuda() fusion_model = fusion_model.cuda()", "end) # obtain the activation and vision features from vision", "epoch, args.lr_steps, 'coord') adjust_learning_rate(optimizer_fusion, epoch, args.lr_steps, 'fusion') # train for", "% args.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Data", "json file with train video meta data') parser.add_argument('--json_data_val', type=str, default='../data/dataset_splits/compositional/validation.json',", "= np.concatenate(logits_matrix) targets_list = np.concatenate(targets_list) save_results(logits_matrix, targets_list, class_to_idx, args) return", "in range(search_length): search_dict['acc_1_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter() search_dict['acc_5_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter()", "tqdm parser = argparse.ArgumentParser(description='Counterfactual CAR') # Path, dataset and log", "k in range(search_length): print(search_list[k], search_dict['acc_1_alpha_{}'.format(round(search_list[k], 1))].avg, search_dict['acc_5_alpha_{}'.format(round(search_list[k], 1))].avg) return def", "acc_top5.update(acc5.item(), global_img_tensors.size(0)) # measure elapsed time batch_time.update(time.time() - end) end", "== 0: print('Epoch: [{0}][{1}/{2}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'", "of fusion branch output_fusion = fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda()) output_fusion = output_fusion.view((-1,", "at '{}'\".format(args.resume_vision) print(\"=> loading checkpoint '{}'\".format(args.resume_vision)) checkpoint = torch.load(args.resume_vision) if", "the activation of fusion branch output_fusion = fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda()) output_fusion", "type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('-e',", "+ loss_factual loss.backward() if args.clip_gradient is not None: torch.nn.utils.clip_grad_norm_(vision_model.parameters(), args.clip_gradient)", "type=str, default='../data/dataset_splits/compositional/validation.json', help='path to the json file with validation video", "class_to_idx, args) return losses.avg def cf_inference_group(val_loader, model_list, fusion_function, search_list, class_to_idx=None):", "{ 'epoch': epoch + 1, 'state_dict': coord_model.state_dict(), 'best_loss': best_loss, },", "fusion_model = torch.nn.DataParallel(fusion_model).cuda() else: vision_model = vision_model.cuda() coord_model = coord_model.cuda()", "from model.model_lib import BboxInteractionLatentModel as BboxModel print('interaction loaded!!') else: print(\"no", "== 'coord': for param_group in optimizer.param_groups: param_group['lr'] = lr elif", "test stage) if args.evaluate: validate(val_loader, model_list, fusion_function, criterion, class_to_idx=dataset_val.classes_dict) return", "-*- import argparse import os import shutil import time import", "{acc_top1.val:.1f} ({acc_top1.avg:.1f})\\t' 'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})'.format( epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time,", "criterion, epoch=None, class_to_idx=None): batch_time = AverageMeter() losses = AverageMeter() acc_top1", "video_label) in enumerate(val_loader): # compute output with torch.no_grad(): output_vision, feature_vision", "targets_list = np.concatenate(targets_list) save_results(logits_matrix, targets_list, class_to_idx, args) return losses.avg def", "fusion_model.cuda() # optionally resume vision model from a checkpoint if", "1000000 def main(): global args, best_loss args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES']", "or i + 1 == len(val_loader): print('Cf-Inference: [{0}/{1}]\\t' 'Time {batch_time.val:.3f}", "print('concat_fusion loaded!!') else: print('no such a fusion model!') # load", "as FusionModel print('concat_fusion loaded!!') else: print('no such a fusion model!')", "measure elapsed time batch_time.update(time.time() - end) end = time.time() if", "ConcatFusionModel as FusionModel print('concat_fusion loaded!!') else: print('no such a fusion", "image-based features') parser.add_argument('--coord_feature_dim', default=512, type=int, metavar='N', help='intermediate feature dimension for", "if args.fusion_function == 'fused_sum': from fusion_function import logsigsum as fusion_func", "three outputs output_factual = fusion_function(output_vision, output_coord, output_fusion) # counterfactual inference", "the k top predictions for the specified values of k\"\"\"", "== 'concat_fusion': from model.model_lib import ConcatFusionModel as FusionModel print('concat_fusion loaded!!')", "= coord_model.cuda() fusion_model = fusion_model.cuda() # optionally resume vision model", "restarts)') parser.add_argument('--batch_size', '-b', default=16, type=int, metavar='N', help='mini-batch size') parser.add_argument('--lr', '--learning-rate',", "[vision_model, coord_model, fusion_model] optimizer_vision = torch.optim.SGD(filter(lambda p: p.requires_grad, vision_model.parameters()), momentum=args.momentum,", "class_to_idx=dataset_val.classes_dict) else: loss = 100 # remember best loss and", "AverageMeter() acc_top5 = AverageMeter() logits_matrix = [] targets_list = []", "batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=False ) model_list = [vision_model, coord_model, fusion_model]", "and log related arguments parser.add_argument('--root_frames', type=str, default='/mnt/data1/home/sunpengzhan/sth-sth-v2/', help='path to the", "best loss and save checkpoint is_best = loss < best_loss", "branches if args.fusion_function == 'fused_sum': from fusion_function import logsigsum as", "args.evaluate: logits_matrix = np.concatenate(logits_matrix) targets_list = np.concatenate(targets_list) save_results(logits_matrix, targets_list, class_to_idx,", "= AverageMeter() search_dict['acc_5_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter() [vision_model, coord_model, fusion_model] =", "print('Cf-Inference: [{0}/{1}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Acc1_0.0 {acc_top1_00.val:.1f} ({acc_top1_00.avg:.1f})\\t' 'Acc1_0.2 {acc_top1_02.val:.1f}", "feature dimension for coord-based features') parser.add_argument('--size', default=224, type=int, metavar='N', help='primary", "+ 1 == len(val_loader): print('Test: [{0}/{1}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Loss", "naivesum as fusion_func print('naive_sum loaded!!') else: print('no such a fusion", "help='which dataset to train') parser.add_argument('--logname', default='my_method', help='name of the experiment", "fusion_function, search_list, class_to_idx=dataset_val.classes_dict) return print('training begin...') for epoch in tqdm(range(args.start_epoch,", "'epoch': epoch + 1, 'state_dict': vision_model.state_dict(), 'best_loss': best_loss, }, is_best,", "if_augment=True, ) # create training and validation loader train_loader =", "model') parser.add_argument('--num_classes', default=174, type=int, help='num of class in the model')", "data_time = AverageMeter() losses = AverageMeter() acc_top1 = AverageMeter() acc_top5", "create training and validation dataset dataset_train = VideoFolder(root=args.root_frames, num_boxes=args.num_boxes, file_input=args.json_data_train,", "drop_last=True, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=False ) model_list = [vision_model, coord_model,", "is None: args.start_epoch = 0 cudnn.benchmark = True # create", "+ 1) % args.search_stride == 0: loss = validate(val_loader, model_list,", "({acc_top1_05.avg:.1f})\\t' 'Acc1_0.8 {acc_top1_08.val:.1f} ({acc_top1_08.avg:.1f})\\t' 'Acc1_1.0 {acc_top1_10.val:.1f} ({acc_top1_10.avg:.1f})'.format( i, len(val_loader), batch_time=batch_time,", "help='path to latest checkpoint (default: none)') # model, image&feature dim", "1, 'state_dict': fusion_model.state_dict(), 'best_loss': best_loss, }, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_fusion, args.logname)))", "video_label.long().cuda()) # statistic result from fusion_branch or value after fusion", "'{}_{}'.format(args.model_coord, args.logname))) save_checkpoint( { 'epoch': epoch + 1, 'state_dict': fusion_model.state_dict(),", "three outputs output_factual = fusion_function(output_vision, output_coord, output_fusion) # warning: loss_fusion", "for param_group in optimizer.param_groups: param_group['lr'] = lr else: for param_group", "save_checkpoint( { 'epoch': epoch + 1, 'state_dict': fusion_model.state_dict(), 'best_loss': best_loss,", "AverageMeter() losses = AverageMeter() acc_top1 = AverageMeter() acc_top5 = AverageMeter()", "validation set') parser.add_argument('--parallel', default=True, type=bool, help='whether or not train with", "class_to_idx=None): batch_time = AverageMeter() search_length = len(search_list) search_dict = {}", "_, pred = output.topk(maxk, 1, True, True) pred = pred.t()", "dataset dataset_train = VideoFolder(root=args.root_frames, num_boxes=args.num_boxes, file_input=args.json_data_train, file_labels=args.json_file_labels, frames_duration=args.num_frames, args=args, is_val=False,", "frequency (default: 20)') parser.add_argument('--ckpt', default='./ckpt', help='folder to output checkpoints') parser.add_argument('--resume_vision',", "= 0.1 ** (sum(epoch >= np.array(lr_steps))) lr = args.lr *", "results acc1, acc5 = accuracy(output_factual.cpu(), video_label, topk=(1, 5)) # record", "output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label) output_vision = output_vision.view((-1,", "def main(): global args, best_loss args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] =", "parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum') parser.add_argument('--weight_decay', '--wd', default=0.0001, type=float, metavar='W',", "help='evaluate model on validation set') parser.add_argument('--cf_inference_group', action='store_true', help='counterfactual inference model", "criterion(output_vision, video_label.long().cuda()) loss_coord = criterion(output_coord, video_label.long().cuda()) loss_fusion = criterion(output_factual, video_label.long().cuda())", "None: torch.nn.utils.clip_grad_norm_(vision_model.parameters(), args.clip_gradient) # update the parameter optimizer_vision.step() optimizer_coord.step() optimizer_fusion.step()", "search_dict = {} for i in range(search_length): search_dict['acc_1_alpha_{}'.format(round(search_list[i], 1))] =", "initial LR decayed by 10\"\"\" decay = 0.1 ** (sum(epoch", "output_coord = output_coord.view((-1, len(train_loader.dataset.classes))) # detach the computation graph, avoid", "= len(search_list) search_dict = {} for i in range(search_length): search_dict['acc_1_alpha_{}'.format(round(search_list[i],", "box_tensors.cuda(), video_label) output_coord = output_coord.view((-1, len(train_loader.dataset.classes))) # detach the computation", "metavar='LRSteps', help='epochs to decay learning rate by 10') parser.add_argument('--momentum', default=0.9,", "AverageMeter() acc_top1 = AverageMeter() acc_top5 = AverageMeter() logits_matrix = []", "= torch.nn.DataParallel(fusion_model).cuda() else: vision_model = vision_model.cuda() coord_model = coord_model.cuda() fusion_model", "print('naive_sum loaded!!') else: print('no such a fusion function!') fusion_function =", "# optionally resume vision model from a checkpoint if args.resume_vision:", "type=int, help='num of class in the model') parser.add_argument('--epochs', default=30, type=int,", "train with multi GPUs') parser.add_argument('--gpu_index', type=str, default='0, 1, 2, 3',", "topk=(1, 5)) search_dict['acc_1_alpha_{}'.format(round(search_list[j], 1))].update(acc1.item(), global_img_tensors.size(0)) search_dict['acc_5_alpha_{}'.format(round(search_list[j], 1))].update(acc5.item(), global_img_tensors.size(0)) # measure", "the gradient confusion feature_vision_detached = feature_vision.detach() feature_coord_detached = feature_coord.detach() #", "from fusion_branch or value after fusion function output = output_factual", "print(\"=> loaded checkpoint '{}' (epoch {})\" .format(args.resume_vision, checkpoint['epoch'])) # optionally", "VideoFolder from utils import save_results from tqdm import tqdm parser", "os.path.join(args.ckpt, '{}_{}'.format(args.model_fusion, args.logname))) def train(train_loader, model_list, fusion_function, optimizer_list, epoch, criterion):", "help='counterfactual inference model on validation set') parser.add_argument('--parallel', default=True, type=bool, help='whether", "args.print_freq == 0 or i + 1 == len(val_loader): print('Cf-Inference:", "os import shutil import time import numpy as np import", "output_vision_subtrahend = output_vision output_counterfactual = fusion_function(output_vision_subtrahend, torch.tensor(0.0), torch.tensor(0.0)) for j", "None: args.start_epoch = checkpoint['epoch'] best_loss = checkpoint['best_loss'] coord_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded", "# loss_fusion is the loss of output_fusion(fused, obtained from the", "= checkpoint['epoch'] best_loss = checkpoint['best_loss'] fusion_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded checkpoint '{}'", "in enumerate(train_loader): data_time.update(time.time() - end) # obtain the activation and", "total epochs to run') parser.add_argument('--start_epoch', default=None, type=int, metavar='N', help='manual epoch", "default='rgb_roi') parser.add_argument('--model_coord', default='interaction') parser.add_argument('--model_fusion', default='concat_fusion') parser.add_argument('--fusion_function', default='fused_sum', type=str, help='function for", "search_dict['acc_1_alpha_{}'.format(round(search_list[k], 1))].avg, search_dict['acc_5_alpha_{}'.format(round(search_list[k], 1))].avg) return def save_checkpoint(state, is_best, filename): torch.save(state,", "= output_coord.view((-1, len(val_loader.dataset.classes))) # detach the computation graph, avoid the", "losses.update(loss_factual.item(), global_img_tensors.size(0)) acc_top1.update(acc1.item(), global_img_tensors.size(0)) acc_top5.update(acc5.item(), global_img_tensors.size(0)) # refresh the optimizer", "model_list, fusion_function, criterion, class_to_idx=dataset_val.classes_dict) return # Counterfactual inference by trying", "model') parser.add_argument('--epochs', default=30, type=int, metavar='N', help='number of total epochs to", "AverageMeter from data_utils.causal_data_loader_frames import VideoFolder from utils import save_results from", "# create training and validation dataset dataset_train = VideoFolder(root=args.root_frames, num_boxes=args.num_boxes,", "loading workers (default: 4)') parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model", "meta data') parser.add_argument('--json_file_labels', type=str, default='../data/dataset_splits/compositional/labels.json', help='path to the json file", "adjust_learning_rate(optimizer, epoch, lr_steps, branch_name=None): \"\"\"Sets the learning rate to the", "torch.load(args.resume_vision) if args.start_epoch is None: args.start_epoch = checkpoint['epoch'] best_loss =", "for image-based features') parser.add_argument('--coord_feature_dim', default=512, type=int, metavar='N', help='intermediate feature dimension", "with train video meta data') parser.add_argument('--json_data_val', type=str, default='../data/dataset_splits/compositional/validation.json', help='path to", "best_loss = checkpoint['best_loss'] vision_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded checkpoint '{}' (epoch {})\"", "fusion_model.eval() end = time.time() for i, (global_img_tensors, box_tensors, box_categories, video_label)", "momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_coord = torch.optim.SGD(filter(lambda p: p.requires_grad, coord_model.parameters()), momentum=args.momentum,", "for i, (global_img_tensors, box_tensors, box_categories, video_label) in enumerate(val_loader): # compute", "{acc_top5.val:.1f} ({acc_top5.avg:.1f})\\t'.format( i, len(val_loader), batch_time=batch_time, loss=losses, acc_top1=acc_top1, acc_top5=acc_top5, )) if", "in the model') parser.add_argument('--epochs', default=30, type=int, metavar='N', help='number of total", "filename + '_best.pth.tar') def adjust_learning_rate(optimizer, epoch, lr_steps, branch_name=None): \"\"\"Sets the", "model_list = [vision_model, coord_model, fusion_model] optimizer_vision = torch.optim.SGD(filter(lambda p: p.requires_grad,", "os.path.join(args.ckpt, '{}_{}'.format(args.model_coord, args.logname))) save_checkpoint( { 'epoch': epoch + 1, 'state_dict':", ">= 30 and (epoch + 1) % args.search_stride == 0:", "torch.nn.CrossEntropyLoss() search_list = np.linspace(0.0, 1.0, 11) # factual inference (vanilla", "0 cudnn.benchmark = True # create training and validation dataset", "resume vision model from a checkpoint if args.resume_vision: assert os.path.isfile(args.resume_vision),", "branch_name == 'coord': for param_group in optimizer.param_groups: param_group['lr'] = lr", "Counterfactual inference by trying a list of hyperparameter if args.cf_inference_group:", "dataset_val, drop_last=True, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=False ) model_list = [vision_model,", "acc5 = accuracy(output.cpu(), video_label, topk=(1, 5)) if args.evaluate: logits_matrix.append(output.cpu().data.numpy()) targets_list.append(video_label.cpu().numpy())", "logits_matrix = np.concatenate(logits_matrix) targets_list = np.concatenate(targets_list) save_results(logits_matrix, targets_list, class_to_idx, args)", "({acc_top1_10.avg:.1f})'.format( i, len(val_loader), batch_time=batch_time, acc_top1_00=search_dict['acc_1_alpha_0.0'], acc_top1_02=search_dict['acc_1_alpha_0.2'], acc_top1_05=search_dict['acc_1_alpha_0.5'], acc_top1_08=search_dict['acc_1_alpha_0.8'], acc_top1_10=search_dict['acc_1_alpha_1.0'])) for", "fusion function!') fusion_function = fusion_func() if args.parallel: vision_model = torch.nn.DataParallel(vision_model).cuda()", "checkpoint['epoch'] best_loss = checkpoint['best_loss'] fusion_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded checkpoint '{}' (epoch", "parser.add_argument('--resume_fusion', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')", "found at '{}'\".format(args.resume_vision) print(\"=> loading checkpoint '{}'\".format(args.resume_vision)) checkpoint = torch.load(args.resume_vision)", "multi GPUs') parser.add_argument('--gpu_index', type=str, default='0, 1, 2, 3', help='the index", "the accuracy and loss losses.update(loss_factual.item(), global_img_tensors.size(0)) acc_top1.update(acc1.item(), global_img_tensors.size(0)) acc_top5.update(acc5.item(), global_img_tensors.size(0))", "coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label) output_coord = output_coord.view((-1, len(val_loader.dataset.classes))) # detach", "'{}_{}'.format(args.model_vision, args.logname))) save_checkpoint( { 'epoch': epoch + 1, 'state_dict': coord_model.state_dict(),", "criterion(output_vision, video_label.long().cuda()) loss_coord = criterion(output_coord, video_label.long().cuda()) loss_fusion = criterion(output_fusion, video_label.long().cuda())", "elif args.model_vision == 'rgb_roi': from model.model_lib import BboxVisualModel as RGBModel", "\"No checkpoint found at '{}'\".format(args.resume_coord) print(\"=> loading checkpoint '{}'\".format(args.resume_coord)) checkpoint", "= model_list # load four optimizers, including the one designed", "search_dict['acc_5_alpha_{}'.format(round(search_list[k], 1))].avg) return def save_checkpoint(state, is_best, filename): torch.save(state, filename +", "AverageMeter() logits_matrix = [] targets_list = [] # unpack three", "decay (default: 1e-4)') parser.add_argument('--clip_gradient', '-cg', default=5, type=float, metavar='W', help='gradient norm", "batch_time = AverageMeter() search_length = len(search_list) search_dict = {} for", "metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--resume_coord', default='', type=str,", "of class in the model') parser.add_argument('--epochs', default=30, type=int, metavar='N', help='number", "and training related arguments parser.add_argument('--model_vision', default='rgb_roi') parser.add_argument('--model_coord', default='interaction') parser.add_argument('--model_fusion', default='concat_fusion')", "train for one epoch train(train_loader, model_list, fusion_function, optimizer_list, epoch, criterion)", "'vision': for param_group in optimizer.param_groups: param_group['lr'] = lr * 0.8", "search_dict['acc_5_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter() [vision_model, coord_model, fusion_model] = model_list #", "json file with validation video meta data') parser.add_argument('--json_file_labels', type=str, default='../data/dataset_splits/compositional/labels.json',", "at '{}'\".format(args.resume_fusion) print(\"=> loading checkpoint '{}'\".format(args.resume_fusion)) checkpoint = torch.load(args.resume_fusion) if", "the model') parser.add_argument('--epochs', default=30, type=int, metavar='N', help='number of total epochs", "three branch activation results acc1, acc5 = accuracy(output_factual.cpu(), video_label, topk=(1,", "coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label) output_coord = output_coord.view((-1, len(val_loader.dataset.classes))) # obtain", "model_list # load four optimizers, including the one designed for", "= VideoFolder(root=args.root_frames, num_boxes=args.num_boxes, file_input=args.json_data_val, file_labels=args.json_file_labels, frames_duration=args.num_frames, args=args, is_val=True, if_augment=True, )", "loss losses.update(loss.item(), global_img_tensors.size(0)) acc_top1.update(acc1.item(), global_img_tensors.size(0)) acc_top5.update(acc5.item(), global_img_tensors.size(0)) # measure elapsed", "to run') parser.add_argument('--start_epoch', default=None, type=int, metavar='N', help='manual epoch number (useful", "= fusion_function(output_vision, output_coord, output_fusion) # counterfactual inference output_vision_subtrahend = output_vision", "save_results(logits_matrix, targets_list, class_to_idx, args) return losses.avg def cf_inference_group(val_loader, model_list, fusion_function,", "param_group['lr'] = lr * 0.8 elif branch_name == 'coord': for", "epoch, lr_steps, branch_name=None): \"\"\"Sets the learning rate to the initial", "and validation dataset dataset_train = VideoFolder(root=args.root_frames, num_boxes=args.num_boxes, file_input=args.json_data_train, file_labels=args.json_file_labels, frames_duration=args.num_frames,", "# create fusion model if args.model_fusion == 'concat_fusion': from model.model_lib", "checkpoint['best_loss'] vision_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded checkpoint '{}' (epoch {})\" .format(args.resume_vision, checkpoint['epoch']))", "= torch.load(args.resume_coord) if args.start_epoch is None: args.start_epoch = checkpoint['epoch'] best_loss", "'--workers', default=4, type=int, metavar='N', help='number of data loading workers (default:", "1.0, 11) # factual inference (vanilla test stage) if args.evaluate:", "file_labels=args.json_file_labels, frames_duration=args.num_frames, args=args, is_val=True, if_augment=True, ) # create training and", "best_loss = checkpoint['best_loss'] fusion_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded checkpoint '{}' (epoch {})\"", "acc_top1=acc_top1, acc_top5=acc_top5)) def validate(val_loader, model_list, fusion_function, criterion, epoch=None, class_to_idx=None): batch_time", "# load model branch vision_model = RGBModel(args) coord_model = BboxModel(args)", "loaded!!') elif args.fusion_function == 'naive_sum': from fusion_function import naivesum as", "1, 'state_dict': vision_model.state_dict(), 'best_loss': best_loss, }, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_vision, args.logname)))", "= torch.nn.DataParallel(coord_model).cuda() fusion_model = torch.nn.DataParallel(fusion_model).cuda() else: vision_model = vision_model.cuda() coord_model", "to the json file with train video meta data') parser.add_argument('--json_data_val',", "= lr * 0.8 elif branch_name == 'coord': for param_group", "metavar='M', help='momentum') parser.add_argument('--weight_decay', '--wd', default=0.0001, type=float, metavar='W', help='weight decay (default:", "'best_loss': best_loss, }, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_coord, args.logname))) save_checkpoint( { 'epoch':", "j in range(search_length): weight = search_list[j] output_debiased = output_factual -", "specified values of k\"\"\" with torch.no_grad(): maxk = max(topk) batch_size", "acc5 = accuracy(output_debiased.cpu(), video_label, topk=(1, 5)) search_dict['acc_1_alpha_{}'.format(round(search_list[j], 1))].update(acc1.item(), global_img_tensors.size(0)) search_dict['acc_5_alpha_{}'.format(round(search_list[j],", "model if args.model_coord == 'interaction': from model.model_lib import BboxInteractionLatentModel as", "result from fusion_branch or value after fusion function output =", "p: p.requires_grad, fusion_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_list = [optimizer_vision, optimizer_coord,", "GPUs') parser.add_argument('--gpu_index', type=str, default='0, 1, 2, 3', help='the index of", "is None: args.start_epoch = checkpoint['epoch'] best_loss = checkpoint['best_loss'] fusion_model.load_state_dict(checkpoint['state_dict']) print(\"=>", "fusion_function, optimizer_list, epoch, criterion) if (epoch+1) >= 30 and (epoch", "# fuse three outputs output_factual = fusion_function(output_vision, output_coord, output_fusion) #", "weight_decay=args.weight_decay) optimizer_fusion = torch.optim.SGD(filter(lambda p: p.requires_grad, fusion_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay)", "elif args.fusion_function == 'naive_sum': from fusion_function import naivesum as fusion_func", "(epoch {})\" .format(args.resume_coord, checkpoint['epoch'])) if args.resume_fusion: assert os.path.isfile(args.resume_fusion), \"No checkpoint", "lr=args.lr, weight_decay=args.weight_decay) optimizer_fusion = torch.optim.SGD(filter(lambda p: p.requires_grad, fusion_model.parameters()), momentum=args.momentum, lr=args.lr,", "(default: 5)') parser.add_argument('--search_stride', type=int, default=5, help='test performance every n strides')", "parser = argparse.ArgumentParser(description='Counterfactual CAR') # Path, dataset and log related", "= lr def accuracy(output, target, topk=(1,)): \"\"\"Computes the accuracy over", "video_label) in enumerate(val_loader): # compute output with torch.no_grad(): # factual", "maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk,", "= torch.nn.DataParallel(vision_model).cuda() coord_model = torch.nn.DataParallel(coord_model).cuda() fusion_model = torch.nn.DataParallel(fusion_model).cuda() else: vision_model", "# create vision model if args.model_vision == 'global_i3d': from model.model_lib", "loss of output_fusion(fused, obtained from the fusion_function) loss_vision = criterion(output_vision,", "type=float, metavar='LR', help='initial learning rate') parser.add_argument('--lr_steps', default=[24, 35, 45], type=float,", "import random from collections import OrderedDict import torch import torch.backends.cudnn", "= vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label) output_vision = output_vision.view((-1, len(val_loader.dataset.classes))) output_coord,", "checkpoint = torch.load(args.resume_fusion) if args.start_epoch is None: args.start_epoch = checkpoint['epoch']", "target, topk=(1,)): \"\"\"Computes the accuracy over the k top predictions", "p: p.requires_grad, coord_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_fusion = torch.optim.SGD(filter(lambda p:", "function!') fusion_function = fusion_func() if args.parallel: vision_model = torch.nn.DataParallel(vision_model).cuda() coord_model", "help='whether or not train with multi GPUs') parser.add_argument('--gpu_index', type=str, default='0,", "AverageMeter() data_time = AverageMeter() losses = AverageMeter() acc_top1 = AverageMeter()", "correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in", "else: print('no such a fusion model!') # load model branch", "def adjust_learning_rate(optimizer, epoch, lr_steps, branch_name=None): \"\"\"Sets the learning rate to", "feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label) output_vision = output_vision.view((-1, len(train_loader.dataset.classes)))", "class in the model') parser.add_argument('--epochs', default=30, type=int, metavar='N', help='number of", "utf-8 -*- import argparse import os import shutil import time", "import numpy as np import random from collections import OrderedDict", "box_categories, video_label) in enumerate(train_loader): data_time.update(time.time() - end) # obtain the", "coordinate model!\") # create fusion model if args.model_fusion == 'concat_fusion':", "num_boxes=args.num_boxes, file_input=args.json_data_train, file_labels=args.json_file_labels, frames_duration=args.num_frames, args=args, is_val=False, if_augment=True, ) dataset_val =", "({loss.avg:.4f})\\t' 'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\\t' 'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})\\t'.format( i, len(val_loader), batch_time=batch_time,", "class_to_idx=dataset_val.classes_dict) return print('training begin...') for epoch in tqdm(range(args.start_epoch, args.epochs)): adjust_learning_rate(optimizer_vision,", "= AverageMeter() acc_top5 = AverageMeter() # load three model branches", "time.time() if i % args.print_freq == 0 or i +", "global_img_tensors.size(0)) # refresh the optimizer optimizer_vision.zero_grad() optimizer_coord.zero_grad() optimizer_fusion.zero_grad() loss =", "loss_factual loss.backward() if args.clip_gradient is not None: torch.nn.utils.clip_grad_norm_(vision_model.parameters(), args.clip_gradient) #", "logits_matrix.append(output.cpu().data.numpy()) targets_list.append(video_label.cpu().numpy()) # measure accuracy and record loss losses.update(loss.item(), global_img_tensors.size(0))", "({acc_top1.avg:.1f})\\t' 'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})\\t'.format( i, len(val_loader), batch_time=batch_time, loss=losses, acc_top1=acc_top1, acc_top5=acc_top5,", "default=0.0001, type=float, metavar='W', help='weight decay (default: 1e-4)') parser.add_argument('--clip_gradient', '-cg', default=5,", "= torch.utils.data.DataLoader( dataset_train, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, drop_last=True, pin_memory=True ) val_loader", "= True # create training and validation dataset dataset_train =", "vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label) output_vision = output_vision.view((-1, len(train_loader.dataset.classes))) # obtain", "type=int, default=5, help='test performance every n strides') # train mode,", "coord_model.eval() fusion_model.eval() end = time.time() for i, (global_img_tensors, box_tensors, box_categories,", "train mode, hardware setting and others related arguments parser.add_argument('-j', '--workers',", "switch to train mode vision_model.train() coord_model.train() fusion_model.train() end = time.time()", "the learning rate to the initial LR decayed by 10\"\"\"", "default=512, type=int, metavar='N', help='intermediate feature dimension for coord-based features') parser.add_argument('--size',", "print(\"=> loaded checkpoint '{}' (epoch {})\" .format(args.resume_fusion, checkpoint['epoch'])) if args.start_epoch", "main(): global args, best_loss args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index", "filename): torch.save(state, filename + '_latest.pth.tar') if is_best: shutil.copyfile(filename + '_latest.pth.tar',", "fusion_func() if args.parallel: vision_model = torch.nn.DataParallel(vision_model).cuda() coord_model = torch.nn.DataParallel(coord_model).cuda() fusion_model", "time.time() for i, (global_img_tensors, box_tensors, box_categories, video_label) in enumerate(val_loader): #", "value after fusion function output = output_factual loss = loss_vision", "checkpoint found at '{}'\".format(args.resume_vision) print(\"=> loading checkpoint '{}'\".format(args.resume_vision)) checkpoint =", "lr = args.lr * decay if branch_name == 'vision': for", "default='0, 1, 2, 3', help='the index of gpu you want", "on restarts)') parser.add_argument('--batch_size', '-b', default=16, type=int, metavar='N', help='mini-batch size') parser.add_argument('--lr',", "correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res if", "'concat_fusion': from model.model_lib import ConcatFusionModel as FusionModel print('concat_fusion loaded!!') else:", "os.path.isfile(args.resume_coord), \"No checkpoint found at '{}'\".format(args.resume_coord) print(\"=> loading checkpoint '{}'\".format(args.resume_coord))", "to the initial LR decayed by 10\"\"\" decay = 0.1", "action='store_true', help='evaluate model on validation set') parser.add_argument('--cf_inference_group', action='store_true', help='counterfactual inference", "branch_name == 'vision': for param_group in optimizer.param_groups: param_group['lr'] = lr", "== 0 or i + 1 == len(val_loader): print('Cf-Inference: [{0}/{1}]\\t'", "accuracy of the sum of three branch activation results acc1,", "lr=args.lr, weight_decay=args.weight_decay) optimizer_coord = torch.optim.SGD(filter(lambda p: p.requires_grad, coord_model.parameters()), momentum=args.momentum, lr=args.lr,", "= model_list # switch to evaluate mode vision_model.eval() coord_model.eval() fusion_model.eval()", "args.parallel: vision_model = torch.nn.DataParallel(vision_model).cuda() coord_model = torch.nn.DataParallel(coord_model).cuda() fusion_model = torch.nn.DataParallel(fusion_model).cuda()", "pred = output.topk(maxk, 1, True, True) pred = pred.t() correct", "np.array(lr_steps))) lr = args.lr * decay if branch_name == 'vision':", "args.logname))) def train(train_loader, model_list, fusion_function, optimizer_list, epoch, criterion): global args", "in optimizer.param_groups: param_group['lr'] = lr elif branch_name == 'fusion': for", "filename + '_latest.pth.tar') if is_best: shutil.copyfile(filename + '_latest.pth.tar', filename +", "create training and validation loader train_loader = torch.utils.data.DataLoader( dataset_train, batch_size=args.batch_size,", "for param_group in optimizer.param_groups: param_group['lr'] = lr elif branch_name ==", "Path, dataset and log related arguments parser.add_argument('--root_frames', type=str, default='/mnt/data1/home/sunpengzhan/sth-sth-v2/', help='path", "# create the fusion function for the activation of three", "help='num of class in the model') parser.add_argument('--epochs', default=30, type=int, metavar='N',", "statistic result from fusion_branch or value after fusion function output", "help='epochs to decay learning rate by 10') parser.add_argument('--momentum', default=0.9, type=float,", "input size') parser.add_argument('--num_boxes', default=4, type=int, help='num of boxes for each", "as RGBModel print('global_i3d loaded!!') elif args.model_vision == 'rgb_roi': from model.model_lib", "is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_vision, args.logname))) save_checkpoint( { 'epoch': epoch + 1,", "a checkpoint if args.resume_vision: assert os.path.isfile(args.resume_vision), \"No checkpoint found at", "({batch_time.avg:.3f})\\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' 'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\\t' 'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})\\t'.format(", "computation graph, avoid the gradient confusion feature_vision_detached = feature_vision.detach() feature_coord_detached", "type=int, metavar='N', help='primary image input size') parser.add_argument('--num_boxes', default=4, type=int, help='num", "fusion branch output_fusion = fusion_model(feature_vision.cuda(), feature_coord.cuda()) output_fusion = output_fusion.view((-1, len(val_loader.dataset.classes)))", "at '{}'\".format(args.resume_coord) print(\"=> loading checkpoint '{}'\".format(args.resume_coord)) checkpoint = torch.load(args.resume_coord) if", "lr_steps, branch_name=None): \"\"\"Sets the learning rate to the initial LR", "os.path.join(args.ckpt, '{}_{}'.format(args.model_vision, args.logname))) save_checkpoint( { 'epoch': epoch + 1, 'state_dict':", "branches [vision_model, coord_model, fusion_model] = model_list # load four optimizers,", "fuse three outputs output_factual = fusion_function(output_vision, output_coord, output_fusion) # counterfactual", "= output_fusion.view((-1, len(train_loader.dataset.classes))) output_factual = fusion_function(output_vision, output_coord, output_fusion) # loss_fusion", "'_best.pth.tar') def adjust_learning_rate(optimizer, epoch, lr_steps, branch_name=None): \"\"\"Sets the learning rate", "(default: 1e-4)') parser.add_argument('--clip_gradient', '-cg', default=5, type=float, metavar='W', help='gradient norm clipping", "labels') parser.add_argument('--dataset', default='smth_smth', help='which dataset to train') parser.add_argument('--logname', default='my_method', help='name", "= criterion(output_coord, video_label.long().cuda()) loss_fusion = criterion(output_fusion, video_label.long().cuda()) loss_factual = criterion(output_factual,", "i + 1 == len(val_loader): print('Test: [{0}/{1}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'", "[optimizer_vision, optimizer_coord, optimizer_fusion] criterion = torch.nn.CrossEntropyLoss() search_list = np.linspace(0.0, 1.0,", "feature_coord_detached.cuda()) output_fusion = output_fusion.view((-1, len(val_loader.dataset.classes))) # fuse three outputs output_factual", "loading checkpoint '{}'\".format(args.resume_fusion)) checkpoint = torch.load(args.resume_fusion) if args.start_epoch is None:", "= output_coord.view((-1, len(train_loader.dataset.classes))) # detach the computation graph, avoid the", "optimizer_list # switch to train mode vision_model.train() coord_model.train() fusion_model.train() end", "args.model_vision == 'global_i3d': from model.model_lib import VideoGlobalModel as RGBModel print('global_i3d", "{data_time.val:.3f} ({data_time.avg:.3f})\\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' 'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\\t' 'Acc5 {acc_top5.val:.1f}", "create the fusion function for the activation of three branches", "parser.add_argument('--lr', '--learning-rate', default=0.01, type=float, metavar='LR', help='initial learning rate') parser.add_argument('--lr_steps', default=[24,", "box_categories, video_label) in enumerate(val_loader): # compute output with torch.no_grad(): output_vision,", "box_categories, video_label) in enumerate(val_loader): # compute output with torch.no_grad(): #", "fusion_function(output_vision_subtrahend, torch.tensor(0.0), torch.tensor(0.0)) for j in range(search_length): weight = search_list[j]", "top predictions for the specified values of k\"\"\" with torch.no_grad():", "print(search_list[k], search_dict['acc_1_alpha_{}'.format(round(search_list[k], 1))].avg, search_dict['acc_5_alpha_{}'.format(round(search_list[k], 1))].avg) return def save_checkpoint(state, is_best, filename):", "help='manual epoch number (useful on restarts)') parser.add_argument('--batch_size', '-b', default=16, type=int,", "record the accuracy and loss losses.update(loss_factual.item(), global_img_tensors.size(0)) acc_top1.update(acc1.item(), global_img_tensors.size(0)) acc_top5.update(acc5.item(),", "fusion_model(feature_vision.cuda(), feature_coord.cuda()) output_fusion = output_fusion.view((-1, len(val_loader.dataset.classes))) # fuse three outputs", "size') parser.add_argument('--lr', '--learning-rate', default=0.01, type=float, metavar='LR', help='initial learning rate') parser.add_argument('--lr_steps',", "= checkpoint['epoch'] best_loss = checkpoint['best_loss'] coord_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded checkpoint '{}'", "- end) end = time.time() if i % args.print_freq ==", "== 0: loss = validate(val_loader, model_list, fusion_function, criterion, epoch=epoch, class_to_idx=dataset_val.classes_dict)", "** (sum(epoch >= np.array(lr_steps))) lr = args.lr * decay if", "optimizer optimizer_vision.zero_grad() optimizer_coord.zero_grad() optimizer_fusion.zero_grad() loss = loss_vision + loss_coord +", "acc_top1_00=search_dict['acc_1_alpha_0.0'], acc_top1_02=search_dict['acc_1_alpha_0.2'], acc_top1_05=search_dict['acc_1_alpha_0.5'], acc_top1_08=search_dict['acc_1_alpha_0.8'], acc_top1_10=search_dict['acc_1_alpha_1.0'])) for k in range(search_length): print(search_list[k],", "shutil.copyfile(filename + '_latest.pth.tar', filename + '_best.pth.tar') def adjust_learning_rate(optimizer, epoch, lr_steps,", "adjust_learning_rate(optimizer_fusion, epoch, args.lr_steps, 'fusion') # train for one epoch train(train_loader,", "loss_coord = criterion(output_coord, video_label.long().cuda()) loss_fusion = criterion(output_factual, video_label.long().cuda()) # statistic", "train mode vision_model.train() coord_model.train() fusion_model.train() end = time.time() for i,", "checkpoint '{}' (epoch {})\" .format(args.resume_coord, checkpoint['epoch'])) if args.resume_fusion: assert os.path.isfile(args.resume_fusion),", "= accuracy(output.cpu(), video_label, topk=(1, 5)) if args.evaluate: logits_matrix.append(output.cpu().data.numpy()) targets_list.append(video_label.cpu().numpy()) #", "torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred =", "= loss < best_loss best_loss = min(loss, best_loss) save_checkpoint( {", "parser.add_argument('--model_coord', default='interaction') parser.add_argument('--model_fusion', default='concat_fusion') parser.add_argument('--fusion_function', default='fused_sum', type=str, help='function for fusing", "== 'vision': for param_group in optimizer.param_groups: param_group['lr'] = lr *", "num_workers=args.workers, drop_last=True, pin_memory=True ) val_loader = torch.utils.data.DataLoader( dataset_val, drop_last=True, batch_size=args.batch_size,", "video_label.long().cuda()) loss_coord = criterion(output_coord, video_label.long().cuda()) loss_fusion = criterion(output_factual, video_label.long().cuda()) #", "create fusion model if args.model_fusion == 'concat_fusion': from model.model_lib import", "args.fusion_function == 'fused_sum': from fusion_function import logsigsum as fusion_func print('fused_sum", "weight_decay=args.weight_decay) optimizer_list = [optimizer_vision, optimizer_coord, optimizer_fusion] criterion = torch.nn.CrossEntropyLoss() search_list", "coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label) output_coord = output_coord.view((-1, len(train_loader.dataset.classes))) # detach", "import os import shutil import time import numpy as np", "rate to the initial LR decayed by 10\"\"\" decay =", "0.8 elif branch_name == 'coord': for param_group in optimizer.param_groups: param_group['lr']", "= checkpoint['best_loss'] fusion_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded checkpoint '{}' (epoch {})\" .format(args.resume_fusion,", "'coord') adjust_learning_rate(optimizer_fusion, epoch, args.lr_steps, 'fusion') # train for one epoch", "output_fusion = output_fusion.view((-1, len(train_loader.dataset.classes))) output_factual = fusion_function(output_vision, output_coord, output_fusion) #", "vision model from a checkpoint if args.resume_vision: assert os.path.isfile(args.resume_vision), \"No", "else: print('no such a fusion function!') fusion_function = fusion_func() if", "output with torch.no_grad(): output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label)", "norm clipping (default: 5)') parser.add_argument('--search_stride', type=int, default=5, help='test performance every", "model.model_lib import BboxVisualModel as RGBModel print('rgb_roi loaded!!') else: print(\"no such", "(default: 20)') parser.add_argument('--ckpt', default='./ckpt', help='folder to output checkpoints') parser.add_argument('--resume_vision', default='',", "enumerate(val_loader): # compute output with torch.no_grad(): # factual inference output_vision,", "print(\"=> loading checkpoint '{}'\".format(args.resume_coord)) checkpoint = torch.load(args.resume_coord) if args.start_epoch is", "four optimizers, including the one designed for uniform assumption [optimizer_vision,", "default=None, type=int, metavar='N', help='manual epoch number (useful on restarts)') parser.add_argument('--batch_size',", "[optimizer_vision, optimizer_coord, optimizer_fusion] = optimizer_list # switch to train mode", "np.concatenate(targets_list) save_results(logits_matrix, targets_list, class_to_idx, args) return losses.avg def cf_inference_group(val_loader, model_list,", "fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda()) output_fusion = output_fusion.view((-1, len(train_loader.dataset.classes))) output_factual = fusion_function(output_vision, output_coord,", "0: loss = validate(val_loader, model_list, fusion_function, criterion, epoch=epoch, class_to_idx=dataset_val.classes_dict) else:", "activation results acc1, acc5 = accuracy(output_factual.cpu(), video_label, topk=(1, 5)) #", "branch_name=None): \"\"\"Sets the learning rate to the initial LR decayed", "cf_inference_group(val_loader, model_list, fusion_function, search_list, class_to_idx=None): batch_time = AverageMeter() search_length =", "coordinate features from coordinate branch output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(),", "is_best: shutil.copyfile(filename + '_latest.pth.tar', filename + '_best.pth.tar') def adjust_learning_rate(optimizer, epoch,", "checkpoint['epoch'] best_loss = checkpoint['best_loss'] coord_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded checkpoint '{}' (epoch", "= vision_model.cuda() coord_model = coord_model.cuda() fusion_model = fusion_model.cuda() # optionally", "torch.optim.SGD(filter(lambda p: p.requires_grad, vision_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_coord = torch.optim.SGD(filter(lambda", "and record loss losses.update(loss.item(), global_img_tensors.size(0)) acc_top1.update(acc1.item(), global_img_tensors.size(0)) acc_top5.update(acc5.item(), global_img_tensors.size(0)) #", "# obtain the activation and vision features from vision branch", "'epoch': epoch + 1, 'state_dict': fusion_model.state_dict(), 'best_loss': best_loss, }, is_best,", "help='num of boxes for each image') parser.add_argument('--num_frames', default=16, type=int, help='num", "output_fusion(fused, obtained from the fusion_function) loss_vision = criterion(output_vision, video_label.long().cuda()) loss_coord", "data_time=data_time, loss=losses, acc_top1=acc_top1, acc_top5=acc_top5)) def validate(val_loader, model_list, fusion_function, criterion, epoch=None,", "default=16, type=int, metavar='N', help='mini-batch size') parser.add_argument('--lr', '--learning-rate', default=0.01, type=float, metavar='LR',", "type=int, metavar='N', help='number of total epochs to run') parser.add_argument('--start_epoch', default=None,", "= accuracy(output_factual.cpu(), video_label, topk=(1, 5)) # record the accuracy and", "parser.add_argument('--start_epoch', default=None, type=int, metavar='N', help='manual epoch number (useful on restarts)')", "hardware setting and others related arguments parser.add_argument('-j', '--workers', default=4, type=int,", "fusion_function, criterion, class_to_idx=dataset_val.classes_dict) return # Counterfactual inference by trying a", "# Measure the accuracy of the sum of three branch", "print('Test: [{0}/{1}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' 'Acc1 {acc_top1.val:.1f}", "model if args.model_fusion == 'concat_fusion': from model.model_lib import ConcatFusionModel as", "1))] = AverageMeter() [vision_model, coord_model, fusion_model] = model_list # switch", "branch output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label) output_vision =", "len(val_loader.dataset.classes))) output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label) output_coord =", "args.start_epoch = checkpoint['epoch'] best_loss = checkpoint['best_loss'] coord_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded checkpoint", "checkpoint = torch.load(args.resume_coord) if args.start_epoch is None: args.start_epoch = checkpoint['epoch']", "= AverageMeter() search_length = len(search_list) search_dict = {} for i", "default='smth_smth', help='which dataset to train') parser.add_argument('--logname', default='my_method', help='name of the", "Measure the accuracy of the sum of three branch activation", "= criterion(output_factual, video_label.long().cuda()) # Measure the accuracy of the sum", "from utils import save_results from tqdm import tqdm parser =", "optimizer_vision.step() optimizer_coord.step() optimizer_fusion.step() batch_time.update(time.time() - end) end = time.time() if", "= search_list[j] output_debiased = output_factual - output_counterfactual * weight acc1,", "i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, acc_top1=acc_top1, acc_top5=acc_top5)) def validate(val_loader, model_list,", "feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label) output_coord = output_coord.view((-1, len(train_loader.dataset.classes)))", "checkpoint = torch.load(args.resume_vision) if args.start_epoch is None: args.start_epoch = checkpoint['epoch']", "coord_model.train() fusion_model.train() end = time.time() for i, (global_img_tensors, box_tensors, box_categories,", "# factual inference output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label)", "as cudnn from callbacks import AverageMeter from data_utils.causal_data_loader_frames import VideoFolder", "optimizer_coord = torch.optim.SGD(filter(lambda p: p.requires_grad, coord_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_fusion", "criterion, epoch=epoch, class_to_idx=dataset_val.classes_dict) else: loss = 100 # remember best", "default='./ckpt', help='folder to output checkpoints') parser.add_argument('--resume_vision', default='', type=str, metavar='PATH', help='path", "parser.add_argument('--resume_coord', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')", "output_vision = output_vision.view((-1, len(train_loader.dataset.classes))) # obtain the activation and coordinate", "len(search_list) search_dict = {} for i in range(search_length): search_dict['acc_1_alpha_{}'.format(round(search_list[i], 1))]", "checkpoint['epoch'])) if args.start_epoch is None: args.start_epoch = 0 cudnn.benchmark =", "default=4, type=int, metavar='N', help='number of data loading workers (default: 4)')", "output_debiased = output_factual - output_counterfactual * weight acc1, acc5 =", "vision features from vision branch output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories,", "coord_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded checkpoint '{}' (epoch {})\" .format(args.resume_coord, checkpoint['epoch'])) if", "epoch + 1, 'state_dict': vision_model.state_dict(), 'best_loss': best_loss, }, is_best, os.path.join(args.ckpt,", "help='name of the experiment for checkpoints and logs') parser.add_argument('--print_freq', '-p',", "args.clip_gradient is not None: torch.nn.utils.clip_grad_norm_(vision_model.parameters(), args.clip_gradient) # update the parameter", "os.path.isfile(args.resume_vision), \"No checkpoint found at '{}'\".format(args.resume_vision) print(\"=> loading checkpoint '{}'\".format(args.resume_vision))", "from fusion_function import logsigsum as fusion_func print('fused_sum loaded!!') elif args.fusion_function", "(global_img_tensors, box_tensors, box_categories, video_label) in enumerate(val_loader): # compute output with", "torch.nn.DataParallel(vision_model).cuda() coord_model = torch.nn.DataParallel(coord_model).cuda() fusion_model = torch.nn.DataParallel(fusion_model).cuda() else: vision_model =", "is not None: torch.nn.utils.clip_grad_norm_(vision_model.parameters(), args.clip_gradient) # update the parameter optimizer_vision.step()", "model.model_lib import VideoGlobalModel as RGBModel print('global_i3d loaded!!') elif args.model_vision ==", "optimizer_list, epoch, criterion) if (epoch+1) >= 30 and (epoch +", "fusion branch output_fusion = fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda()) output_fusion = output_fusion.view((-1, len(train_loader.dataset.classes)))", "'{}'\".format(args.resume_vision)) checkpoint = torch.load(args.resume_vision) if args.start_epoch is None: args.start_epoch =", "p.requires_grad, vision_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_coord = torch.optim.SGD(filter(lambda p: p.requires_grad,", "criterion(output_fusion, video_label.long().cuda()) loss_factual = criterion(output_factual, video_label.long().cuda()) # Measure the accuracy", "elif branch_name == 'coord': for param_group in optimizer.param_groups: param_group['lr'] =", "from a checkpoint if args.resume_vision: assert os.path.isfile(args.resume_vision), \"No checkpoint found", "else: for param_group in optimizer.param_groups: param_group['lr'] = lr def accuracy(output,", "coord_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_fusion = torch.optim.SGD(filter(lambda p: p.requires_grad, fusion_model.parameters()),", "run') parser.add_argument('--start_epoch', default=None, type=int, metavar='N', help='manual epoch number (useful on", "loaded!!') else: print('no such a fusion model!') # load model", "= AverageMeter() logits_matrix = [] targets_list = [] # unpack", "model_list, fusion_function, search_list, class_to_idx=None): batch_time = AverageMeter() search_length = len(search_list)", "to output checkpoints') parser.add_argument('--resume_vision', default='', type=str, metavar='PATH', help='path to latest", "begin...') for epoch in tqdm(range(args.start_epoch, args.epochs)): adjust_learning_rate(optimizer_vision, epoch, args.lr_steps, 'vision')", "output_vision = output_vision.view((-1, len(val_loader.dataset.classes))) output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(),", "mode vision_model.eval() coord_model.eval() fusion_model.eval() end = time.time() for i, (global_img_tensors,", "(useful on restarts)') parser.add_argument('--batch_size', '-b', default=16, type=int, metavar='N', help='mini-batch size')", "output_factual loss = loss_vision acc1, acc5 = accuracy(output.cpu(), video_label, topk=(1,", "lr elif branch_name == 'fusion': for param_group in optimizer.param_groups: param_group['lr']", "import BboxVisualModel as RGBModel print('rgb_roi loaded!!') else: print(\"no such a", "such a coordinate model!\") # create fusion model if args.model_fusion", "({acc_top5.avg:.1f})'.format( epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, acc_top1=acc_top1, acc_top5=acc_top5)) def", "len(val_loader.dataset.classes))) # obtain the activation of fusion branch output_fusion =", "11) # factual inference (vanilla test stage) if args.evaluate: validate(val_loader,", "AverageMeter() search_length = len(search_list) search_dict = {} for i in", "model on validation set') parser.add_argument('--parallel', default=True, type=bool, help='whether or not", "p: p.requires_grad, vision_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_coord = torch.optim.SGD(filter(lambda p:", "type=float, metavar='M', help='momentum') parser.add_argument('--weight_decay', '--wd', default=0.0001, type=float, metavar='W', help='weight decay", "optionally resume coord model from a checkpoint if args.resume_coord: assert", "output_vision.view((-1, len(val_loader.dataset.classes))) output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label) output_coord", "model branch vision_model = RGBModel(args) coord_model = BboxModel(args) fusion_model =", "criterion): global args batch_time = AverageMeter() data_time = AverageMeter() losses", "if args.resume_coord: assert os.path.isfile(args.resume_coord), \"No checkpoint found at '{}'\".format(args.resume_coord) print(\"=>", "update the parameter optimizer_vision.step() optimizer_coord.step() optimizer_fusion.step() batch_time.update(time.time() - end) end", "os.path.isfile(args.resume_fusion), \"No checkpoint found at '{}'\".format(args.resume_fusion) print(\"=> loading checkpoint '{}'\".format(args.resume_fusion))", "torch.nn.utils.clip_grad_norm_(vision_model.parameters(), args.clip_gradient) # update the parameter optimizer_vision.step() optimizer_coord.step() optimizer_fusion.step() batch_time.update(time.time()", "output_coord.view((-1, len(val_loader.dataset.classes))) # obtain the activation of fusion branch output_fusion", "'Acc1_1.0 {acc_top1_10.val:.1f} ({acc_top1_10.avg:.1f})'.format( i, len(val_loader), batch_time=batch_time, acc_top1_00=search_dict['acc_1_alpha_0.0'], acc_top1_02=search_dict['acc_1_alpha_0.2'], acc_top1_05=search_dict['acc_1_alpha_0.5'], acc_top1_08=search_dict['acc_1_alpha_0.8'],", "# model, image&feature dim and training related arguments parser.add_argument('--model_vision', default='rgb_roi')", "}, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_vision, args.logname))) save_checkpoint( { 'epoch': epoch +", "default=20, type=int, metavar='N', help='print frequency (default: 20)') parser.add_argument('--ckpt', default='./ckpt', help='folder", "# detach the computation graph, avoid the gradient confusion feature_vision_detached", "'{}'\".format(args.resume_coord) print(\"=> loading checkpoint '{}'\".format(args.resume_coord)) checkpoint = torch.load(args.resume_coord) if args.start_epoch", "type=float, metavar='W', help='gradient norm clipping (default: 5)') parser.add_argument('--search_stride', type=int, default=5,", "default=4, type=int, help='num of boxes for each image') parser.add_argument('--num_frames', default=16,", "output_coord, output_fusion) # counterfactual inference output_vision_subtrahend = output_vision output_counterfactual =", "fusion_function, criterion, epoch=None, class_to_idx=None): batch_time = AverageMeter() losses = AverageMeter()", "= np.concatenate(targets_list) save_results(logits_matrix, targets_list, class_to_idx, args) return losses.avg def cf_inference_group(val_loader,", "print('training begin...') for epoch in tqdm(range(args.start_epoch, args.epochs)): adjust_learning_rate(optimizer_vision, epoch, args.lr_steps,", "acc_top1_08=search_dict['acc_1_alpha_0.8'], acc_top1_10=search_dict['acc_1_alpha_1.0'])) for k in range(search_length): print(search_list[k], search_dict['acc_1_alpha_{}'.format(round(search_list[k], 1))].avg, search_dict['acc_5_alpha_{}'.format(round(search_list[k],", "args.lr_steps, 'fusion') # train for one epoch train(train_loader, model_list, fusion_function,", "search_list, class_to_idx=dataset_val.classes_dict) return print('training begin...') for epoch in tqdm(range(args.start_epoch, args.epochs)):", "box_categories.cuda(), box_tensors.cuda(), video_label) output_coord = output_coord.view((-1, len(train_loader.dataset.classes))) # detach the", "vision_model = vision_model.cuda() coord_model = coord_model.cuda() fusion_model = fusion_model.cuda() #", "search_dict['acc_5_alpha_{}'.format(round(search_list[j], 1))].update(acc5.item(), global_img_tensors.size(0)) # measure elapsed time batch_time.update(time.time() - end)", "= [] targets_list = [] # unpack three models [vision_model,", "batch_time=batch_time, loss=losses, acc_top1=acc_top1, acc_top5=acc_top5, )) if args.evaluate: logits_matrix = np.concatenate(logits_matrix)", "param_group['lr'] = lr def accuracy(output, target, topk=(1,)): \"\"\"Computes the accuracy", "if is_best: shutil.copyfile(filename + '_latest.pth.tar', filename + '_best.pth.tar') def adjust_learning_rate(optimizer,", "{acc_top1_08.val:.1f} ({acc_top1_08.avg:.1f})\\t' 'Acc1_1.0 {acc_top1_10.val:.1f} ({acc_top1_10.avg:.1f})'.format( i, len(val_loader), batch_time=batch_time, acc_top1_00=search_dict['acc_1_alpha_0.0'], acc_top1_02=search_dict['acc_1_alpha_0.2'],", "train') parser.add_argument('--logname', default='my_method', help='name of the experiment for checkpoints and", "vision_model.state_dict(), 'best_loss': best_loss, }, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_vision, args.logname))) save_checkpoint( {", "= fusion_model(feature_vision.cuda(), feature_coord.cuda()) output_fusion = output_fusion.view((-1, len(val_loader.dataset.classes))) # fuse three", "== 'global_i3d': from model.model_lib import VideoGlobalModel as RGBModel print('global_i3d loaded!!')", "branch output_fusion = fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda()) output_fusion = output_fusion.view((-1, len(train_loader.dataset.classes))) output_factual", "# obtain the activation and coordinate features from coordinate branch", "(default: none)') parser.add_argument('--resume_coord', default='', type=str, metavar='PATH', help='path to latest checkpoint", "criterion, class_to_idx=dataset_val.classes_dict) return # Counterfactual inference by trying a list", "counterfactual inference output_vision_subtrahend = output_vision output_counterfactual = fusion_function(output_vision_subtrahend, torch.tensor(0.0), torch.tensor(0.0))", "vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label) output_vision = output_vision.view((-1, len(val_loader.dataset.classes))) output_coord, feature_coord", "= time.time() for i, (global_img_tensors, box_tensors, box_categories, video_label) in enumerate(train_loader):", "acc5 = accuracy(output_factual.cpu(), video_label, topk=(1, 5)) # record the accuracy", "1))].avg) return def save_checkpoint(state, is_best, filename): torch.save(state, filename + '_latest.pth.tar')", "as fusion_func print('fused_sum loaded!!') elif args.fusion_function == 'naive_sum': from fusion_function", "of fusion branch output_fusion = fusion_model(feature_vision.cuda(), feature_coord.cuda()) output_fusion = output_fusion.view((-1,", "in range(search_length): weight = search_list[j] output_debiased = output_factual - output_counterfactual", "loading checkpoint '{}'\".format(args.resume_coord)) checkpoint = torch.load(args.resume_coord) if args.start_epoch is None:", "box_tensors, box_categories, video_label) in enumerate(val_loader): # compute output with torch.no_grad():", "decayed by 10\"\"\" decay = 0.1 ** (sum(epoch >= np.array(lr_steps)))", "validate(val_loader, model_list, fusion_function, criterion, epoch=epoch, class_to_idx=dataset_val.classes_dict) else: loss = 100", "losses = AverageMeter() acc_top1 = AverageMeter() acc_top5 = AverageMeter() logits_matrix", "'-cg', default=5, type=float, metavar='W', help='gradient norm clipping (default: 5)') parser.add_argument('--search_stride',", "fusion model!') # load model branch vision_model = RGBModel(args) coord_model", "= output_factual - output_counterfactual * weight acc1, acc5 = accuracy(output_debiased.cpu(),", "epoch, criterion) if (epoch+1) >= 30 and (epoch + 1)", "to train mode vision_model.train() coord_model.train() fusion_model.train() end = time.time() for", "uniform assumption [optimizer_vision, optimizer_coord, optimizer_fusion] = optimizer_list # switch to", "keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res if __name__ == '__main__':", "from model.model_lib import VideoGlobalModel as RGBModel print('global_i3d loaded!!') elif args.model_vision", "of three branches if args.fusion_function == 'fused_sum': from fusion_function import", "dimension for image-based features') parser.add_argument('--coord_feature_dim', default=512, type=int, metavar='N', help='intermediate feature", "output_vision output_counterfactual = fusion_function(output_vision_subtrahend, torch.tensor(0.0), torch.tensor(0.0)) for j in range(search_length):", "the folder with frames') parser.add_argument('--json_data_train', type=str, default='../data/dataset_splits/compositional/train.json', help='path to the", "type=int, help='num of frames for the model') parser.add_argument('--num_classes', default=174, type=int,", "fusion_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_list = [optimizer_vision, optimizer_coord, optimizer_fusion] criterion", "best_loss = min(loss, best_loss) save_checkpoint( { 'epoch': epoch + 1,", "{ 'epoch': epoch + 1, 'state_dict': vision_model.state_dict(), 'best_loss': best_loss, },", "fusion_function) loss_vision = criterion(output_vision, video_label.long().cuda()) loss_coord = criterion(output_coord, video_label.long().cuda()) loss_fusion", "criterion(output_factual, video_label.long().cuda()) # Measure the accuracy of the sum of", "model branches [vision_model, coord_model, fusion_model] = model_list # load four", "validation video meta data') parser.add_argument('--json_file_labels', type=str, default='../data/dataset_splits/compositional/labels.json', help='path to the", "param_group in optimizer.param_groups: param_group['lr'] = lr * 0.8 elif branch_name", "values of k\"\"\" with torch.no_grad(): maxk = max(topk) batch_size =", "function output = output_factual loss = loss_vision acc1, acc5 =", "frames') parser.add_argument('--json_data_train', type=str, default='../data/dataset_splits/compositional/train.json', help='path to the json file with", "assert os.path.isfile(args.resume_vision), \"No checkpoint found at '{}'\".format(args.resume_vision) print(\"=> loading checkpoint", "the json file with ground truth labels') parser.add_argument('--dataset', default='smth_smth', help='which", "BboxInteractionLatentModel as BboxModel print('interaction loaded!!') else: print(\"no such a coordinate", "args.clip_gradient) # update the parameter optimizer_vision.step() optimizer_coord.step() optimizer_fusion.step() batch_time.update(time.time() -", "branch_name == 'fusion': for param_group in optimizer.param_groups: param_group['lr'] = lr", "coordinate branch output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label) output_coord", "= criterion(output_coord, video_label.long().cuda()) loss_fusion = criterion(output_factual, video_label.long().cuda()) # statistic result", "i, len(val_loader), batch_time=batch_time, acc_top1_00=search_dict['acc_1_alpha_0.0'], acc_top1_02=search_dict['acc_1_alpha_0.2'], acc_top1_05=search_dict['acc_1_alpha_0.5'], acc_top1_08=search_dict['acc_1_alpha_0.8'], acc_top1_10=search_dict['acc_1_alpha_1.0'])) for k", "output_fusion = fusion_model(feature_vision.cuda(), feature_coord.cuda()) output_fusion = output_fusion.view((-1, len(val_loader.dataset.classes))) # fuse", "+ '_latest.pth.tar') if is_best: shutil.copyfile(filename + '_latest.pth.tar', filename + '_best.pth.tar')", "loss and save checkpoint is_best = loss < best_loss best_loss", "'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})\\t'.format( i, len(val_loader), batch_time=batch_time, loss=losses, acc_top1=acc_top1, acc_top5=acc_top5, ))", "decay learning rate by 10') parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')", "targets_list, class_to_idx, args) return losses.avg def cf_inference_group(val_loader, model_list, fusion_function, search_list,", "'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' 'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\\t' 'Acc5", "10\"\"\" decay = 0.1 ** (sum(epoch >= np.array(lr_steps))) lr =", "the specified values of k\"\"\" with torch.no_grad(): maxk = max(topk)", "{})\" .format(args.resume_fusion, checkpoint['epoch'])) if args.start_epoch is None: args.start_epoch = 0", "loaded checkpoint '{}' (epoch {})\" .format(args.resume_coord, checkpoint['epoch'])) if args.resume_fusion: assert", "type=str, default='../data/dataset_splits/compositional/train.json', help='path to the json file with train video", "k\"\"\" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _,", "stage) if args.evaluate: validate(val_loader, model_list, fusion_function, criterion, class_to_idx=dataset_val.classes_dict) return #", "print(args) # create vision model if args.model_vision == 'global_i3d': from", "validation loader train_loader = torch.utils.data.DataLoader( dataset_train, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, drop_last=True,", "coding: utf-8 -*- import argparse import os import shutil import", "correct[:k].contiguous().view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res if __name__ ==", "from the fusion_function) loss_vision = criterion(output_vision, video_label.long().cuda()) loss_coord = criterion(output_coord,", "optimizer_coord.step() optimizer_fusion.step() batch_time.update(time.time() - end) end = time.time() if i", "global args batch_time = AverageMeter() data_time = AverageMeter() losses =", "parser.add_argument('--search_stride', type=int, default=5, help='test performance every n strides') # train", "box_tensors.cuda(), video_label) output_vision = output_vision.view((-1, len(val_loader.dataset.classes))) output_coord, feature_coord = coord_model(global_img_tensors,", "compute output with torch.no_grad(): output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(),", "= [vision_model, coord_model, fusion_model] optimizer_vision = torch.optim.SGD(filter(lambda p: p.requires_grad, vision_model.parameters()),", "feature_coord.cuda()) output_fusion = output_fusion.view((-1, len(val_loader.dataset.classes))) # fuse three outputs output_factual", "# warning: loss_fusion is the loss of output_fusion(fused, obtained from", "loss_coord + loss_factual loss.backward() if args.clip_gradient is not None: torch.nn.utils.clip_grad_norm_(vision_model.parameters(),", "= output_fusion.view((-1, len(val_loader.dataset.classes))) # fuse three outputs output_factual = fusion_function(output_vision,", "box_categories.cuda(), box_tensors.cuda(), video_label) output_coord = output_coord.view((-1, len(val_loader.dataset.classes))) # obtain the", "import OrderedDict import torch import torch.backends.cudnn as cudnn from callbacks", "torch.optim.SGD(filter(lambda p: p.requires_grad, coord_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_fusion = torch.optim.SGD(filter(lambda", "optimizer_fusion.step() batch_time.update(time.time() - end) end = time.time() if i %", "model!\") # create fusion model if args.model_fusion == 'concat_fusion': from", "== 'rgb_roi': from model.model_lib import BboxVisualModel as RGBModel print('rgb_roi loaded!!')", "checkpoint '{}' (epoch {})\" .format(args.resume_vision, checkpoint['epoch'])) # optionally resume coord", "unpack three models [vision_model, coord_model, fusion_model] = model_list # switch", "to latest checkpoint (default: none)') parser.add_argument('--resume_fusion', default='', type=str, metavar='PATH', help='path", "= output_factual loss = loss_vision acc1, acc5 = accuracy(output.cpu(), video_label,", "three branches if args.fusion_function == 'fused_sum': from fusion_function import logsigsum", "parser.add_argument('--fusion_function', default='fused_sum', type=str, help='function for fusing activations from each branch')", "type=float, nargs=\"+\", metavar='LRSteps', help='epochs to decay learning rate by 10')", "parser.add_argument('--num_boxes', default=4, type=int, help='num of boxes for each image') parser.add_argument('--num_frames',", "of output_fusion(fused, obtained from the fusion_function) loss_vision = criterion(output_vision, video_label.long().cuda())", "for k in topk: correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 /", "'{}_{}'.format(args.model_fusion, args.logname))) def train(train_loader, model_list, fusion_function, optimizer_list, epoch, criterion): global", "not None: torch.nn.utils.clip_grad_norm_(vision_model.parameters(), args.clip_gradient) # update the parameter optimizer_vision.step() optimizer_coord.step()", "param_group in optimizer.param_groups: param_group['lr'] = lr def accuracy(output, target, topk=(1,)):", "ground truth labels') parser.add_argument('--dataset', default='smth_smth', help='which dataset to train') parser.add_argument('--logname',", "= optimizer_list # switch to train mode vision_model.train() coord_model.train() fusion_model.train()", "is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_coord, args.logname))) save_checkpoint( { 'epoch': epoch + 1,", "inference output_vision, feature_vision = vision_model(global_img_tensors.cuda(), box_categories, box_tensors.cuda(), video_label) output_vision =", "search_list = np.linspace(0.0, 1.0, 11) # factual inference (vanilla test", "in optimizer.param_groups: param_group['lr'] = lr def accuracy(output, target, topk=(1,)): \"\"\"Computes", "video_label.long().cuda()) loss_fusion = criterion(output_factual, video_label.long().cuda()) # statistic result from fusion_branch", "with frames') parser.add_argument('--json_data_train', type=str, default='../data/dataset_splits/compositional/train.json', help='path to the json file", "'global_i3d': from model.model_lib import VideoGlobalModel as RGBModel print('global_i3d loaded!!') elif", "file_labels=args.json_file_labels, frames_duration=args.num_frames, args=args, is_val=False, if_augment=True, ) dataset_val = VideoFolder(root=args.root_frames, num_boxes=args.num_boxes,", "fusion_model] optimizer_vision = torch.optim.SGD(filter(lambda p: p.requires_grad, vision_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay)", "on validation set') parser.add_argument('--parallel', default=True, type=bool, help='whether or not train", "if args.start_epoch is None: args.start_epoch = 0 cudnn.benchmark = True", "latest checkpoint (default: none)') parser.add_argument('--resume_coord', default='', type=str, metavar='PATH', help='path to", "in optimizer.param_groups: param_group['lr'] = lr else: for param_group in optimizer.param_groups:", "= fusion_model(feature_vision_detached.cuda(), feature_coord_detached.cuda()) output_fusion = output_fusion.view((-1, len(val_loader.dataset.classes))) # fuse three", "output_coord.view((-1, len(val_loader.dataset.classes))) # detach the computation graph, avoid the gradient", "{batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t' 'Acc1 {acc_top1.val:.1f}", "'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})'.format( epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, acc_top1=acc_top1,", "args.start_epoch = 0 cudnn.benchmark = True # create training and", "= criterion(output_vision, video_label.long().cuda()) loss_coord = criterion(output_coord, video_label.long().cuda()) loss_fusion = criterion(output_fusion,", "factual inference (vanilla test stage) if args.evaluate: validate(val_loader, model_list, fusion_function,", "models [vision_model, coord_model, fusion_model] = model_list # switch to evaluate", "elif branch_name == 'fusion': for param_group in optimizer.param_groups: param_group['lr'] =", "'-p', default=20, type=int, metavar='N', help='print frequency (default: 20)') parser.add_argument('--ckpt', default='./ckpt',", "vision_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_coord = torch.optim.SGD(filter(lambda p: p.requires_grad, coord_model.parameters()),", "fusion_function = fusion_func() if args.parallel: vision_model = torch.nn.DataParallel(vision_model).cuda() coord_model =", "output_coord.view((-1, len(train_loader.dataset.classes))) # detach the computation graph, avoid the gradient", "feature_coord_detached = feature_coord.detach() # obtain the activation of fusion branch", "activations from each branch') parser.add_argument('--img_feature_dim', default=512, type=int, metavar='N', help='intermediate feature", "output_fusion) # counterfactual inference output_vision_subtrahend = output_vision output_counterfactual = fusion_function(output_vision_subtrahend,", "checkpoint (default: none)') # model, image&feature dim and training related", "'Acc1_0.5 {acc_top1_05.val:.1f} ({acc_top1_05.avg:.1f})\\t' 'Acc1_0.8 {acc_top1_08.val:.1f} ({acc_top1_08.avg:.1f})\\t' 'Acc1_1.0 {acc_top1_10.val:.1f} ({acc_top1_10.avg:.1f})'.format( i,", "checkpoint found at '{}'\".format(args.resume_fusion) print(\"=> loading checkpoint '{}'\".format(args.resume_fusion)) checkpoint =", "parser.add_argument('--ckpt', default='./ckpt', help='folder to output checkpoints') parser.add_argument('--resume_vision', default='', type=str, metavar='PATH',", "0 or i + 1 == len(val_loader): print('Test: [{0}/{1}]\\t' 'Time", "loaded checkpoint '{}' (epoch {})\" .format(args.resume_vision, checkpoint['epoch'])) # optionally resume", "if args.evaluate: validate(val_loader, model_list, fusion_function, criterion, class_to_idx=dataset_val.classes_dict) return # Counterfactual", "args.start_epoch = checkpoint['epoch'] best_loss = checkpoint['best_loss'] vision_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded checkpoint", "- end) # obtain the activation and vision features from", "loss_fusion = criterion(output_factual, video_label.long().cuda()) # statistic result from fusion_branch or", "in enumerate(val_loader): # compute output with torch.no_grad(): # factual inference", "[] for k in topk: correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0", "{acc_top1.val:.1f} ({acc_top1.avg:.1f})\\t' 'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})\\t'.format( i, len(val_loader), batch_time=batch_time, loss=losses, acc_top1=acc_top1,", "0: print('Epoch: [{0}][{1}/{2}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t' 'Loss", "if args.evaluate: logits_matrix = np.concatenate(logits_matrix) targets_list = np.concatenate(targets_list) save_results(logits_matrix, targets_list,", "= pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk:", "others related arguments parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of", "list of hyperparameter if args.cf_inference_group: cf_inference_group(val_loader, model_list, fusion_function, search_list, class_to_idx=dataset_val.classes_dict)", "1))] = AverageMeter() search_dict['acc_5_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter() [vision_model, coord_model, fusion_model]", "parameter optimizer_vision.step() optimizer_coord.step() optimizer_fusion.step() batch_time.update(time.time() - end) end = time.time()", "inference model on validation set') parser.add_argument('--parallel', default=True, type=bool, help='whether or", "parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers", "({acc_top1_08.avg:.1f})\\t' 'Acc1_1.0 {acc_top1_10.val:.1f} ({acc_top1_10.avg:.1f})'.format( i, len(val_loader), batch_time=batch_time, acc_top1_00=search_dict['acc_1_alpha_0.0'], acc_top1_02=search_dict['acc_1_alpha_0.2'], acc_top1_05=search_dict['acc_1_alpha_0.5'],", "< best_loss best_loss = min(loss, best_loss) save_checkpoint( { 'epoch': epoch", "from coordinate branch output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label)", "trying a list of hyperparameter if args.cf_inference_group: cf_inference_group(val_loader, model_list, fusion_function,", "range(search_length): search_dict['acc_1_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter() search_dict['acc_5_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter() [vision_model,", "loader train_loader = torch.utils.data.DataLoader( dataset_train, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, drop_last=True, pin_memory=True", "global_img_tensors.size(0)) search_dict['acc_5_alpha_{}'.format(round(search_list[j], 1))].update(acc5.item(), global_img_tensors.size(0)) # measure elapsed time batch_time.update(time.time() -", "{loss.val:.4f} ({loss.avg:.4f})\\t' 'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\\t' 'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})'.format( epoch, i,", "help='path to latest checkpoint (default: none)') parser.add_argument('--resume_fusion', default='', type=str, metavar='PATH',", "= fusion_function(output_vision, output_coord, output_fusion) # loss_fusion is the loss of", "the accuracy of the sum of three branch activation results", "i % args.print_freq == 0 or i + 1 ==", "logits_matrix = [] targets_list = [] # unpack three models", "checkpoint['epoch'] best_loss = checkpoint['best_loss'] vision_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded checkpoint '{}' (epoch", "{acc_top1_02.val:.1f} ({acc_top1_02.avg:.1f})\\t' 'Acc1_0.5 {acc_top1_05.val:.1f} ({acc_top1_05.avg:.1f})\\t' 'Acc1_0.8 {acc_top1_08.val:.1f} ({acc_top1_08.avg:.1f})\\t' 'Acc1_1.0 {acc_top1_10.val:.1f}", "of k\"\"\" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0)", "obtain the activation of fusion branch output_fusion = fusion_model(feature_vision.cuda(), feature_coord.cuda())", "= output.topk(maxk, 1, True, True) pred = pred.t() correct =", "'naive_sum': from fusion_function import naivesum as fusion_func print('naive_sum loaded!!') else:", "def validate(val_loader, model_list, fusion_function, criterion, epoch=None, class_to_idx=None): batch_time = AverageMeter()", "# -*- coding: utf-8 -*- import argparse import os import", "# train for one epoch train(train_loader, model_list, fusion_function, optimizer_list, epoch,", "'epoch': epoch + 1, 'state_dict': coord_model.state_dict(), 'best_loss': best_loss, }, is_best,", "if args.model_fusion == 'concat_fusion': from model.model_lib import ConcatFusionModel as FusionModel", "or not train with multi GPUs') parser.add_argument('--gpu_index', type=str, default='0, 1,", "= fusion_func() if args.parallel: vision_model = torch.nn.DataParallel(vision_model).cuda() coord_model = torch.nn.DataParallel(coord_model).cuda()", "# load four optimizers, including the one designed for uniform", "type=int, metavar='N', help='intermediate feature dimension for coord-based features') parser.add_argument('--size', default=224,", "checkpoint if args.resume_coord: assert os.path.isfile(args.resume_coord), \"No checkpoint found at '{}'\".format(args.resume_coord)", "numpy as np import random from collections import OrderedDict import", "output_counterfactual * weight acc1, acc5 = accuracy(output_debiased.cpu(), video_label, topk=(1, 5))", "[] # unpack three models [vision_model, coord_model, fusion_model] = model_list", "accuracy(output_debiased.cpu(), video_label, topk=(1, 5)) search_dict['acc_1_alpha_{}'.format(round(search_list[j], 1))].update(acc1.item(), global_img_tensors.size(0)) search_dict['acc_5_alpha_{}'.format(round(search_list[j], 1))].update(acc5.item(), global_img_tensors.size(0))", "output_vision.view((-1, len(train_loader.dataset.classes))) # obtain the activation and coordinate features from", "checkpoints and logs') parser.add_argument('--print_freq', '-p', default=20, type=int, metavar='N', help='print frequency", "BboxModel print('interaction loaded!!') else: print(\"no such a coordinate model!\") #", "= AverageMeter() # load three model branches [vision_model, coord_model, fusion_model]", "help='number of total epochs to run') parser.add_argument('--start_epoch', default=None, type=int, metavar='N',", "validation dataset dataset_train = VideoFolder(root=args.root_frames, num_boxes=args.num_boxes, file_input=args.json_data_train, file_labels=args.json_file_labels, frames_duration=args.num_frames, args=args,", "= torch.optim.SGD(filter(lambda p: p.requires_grad, fusion_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_list =", "'--wd', default=0.0001, type=float, metavar='W', help='weight decay (default: 1e-4)') parser.add_argument('--clip_gradient', '-cg',", "output_fusion.view((-1, len(train_loader.dataset.classes))) output_factual = fusion_function(output_vision, output_coord, output_fusion) # loss_fusion is", "help='primary image input size') parser.add_argument('--num_boxes', default=4, type=int, help='num of boxes", "end = time.time() if i % args.print_freq == 0: print('Epoch:", "callbacks import AverageMeter from data_utils.causal_data_loader_frames import VideoFolder from utils import", "each image') parser.add_argument('--num_frames', default=16, type=int, help='num of frames for the", "after fusion function output = output_factual loss = loss_vision acc1,", "== 'fused_sum': from fusion_function import logsigsum as fusion_func print('fused_sum loaded!!')", "avoid the gradient confusion feature_vision_detached = feature_vision.detach() feature_coord_detached = feature_coord.detach()", "model if args.model_vision == 'global_i3d': from model.model_lib import VideoGlobalModel as", "{loss.val:.4f} ({loss.avg:.4f})\\t' 'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\\t' 'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})\\t'.format( i, len(val_loader),", "type=int, metavar='N', help='manual epoch number (useful on restarts)') parser.add_argument('--batch_size', '-b',", "parser.add_argument('--model_vision', default='rgb_roi') parser.add_argument('--model_coord', default='interaction') parser.add_argument('--model_fusion', default='concat_fusion') parser.add_argument('--fusion_function', default='fused_sum', type=str, help='function", "# switch to evaluate mode vision_model.eval() coord_model.eval() fusion_model.eval() end =", "activation of fusion branch output_fusion = fusion_model(feature_vision.cuda(), feature_coord.cuda()) output_fusion =", "class_to_idx=None): batch_time = AverageMeter() losses = AverageMeter() acc_top1 = AverageMeter()", "none)') # model, image&feature dim and training related arguments parser.add_argument('--model_vision',", "epochs to run') parser.add_argument('--start_epoch', default=None, type=int, metavar='N', help='manual epoch number", "(default: none)') parser.add_argument('--resume_fusion', default='', type=str, metavar='PATH', help='path to latest checkpoint", "model from a checkpoint if args.resume_vision: assert os.path.isfile(args.resume_vision), \"No checkpoint", "args.start_epoch is None: args.start_epoch = checkpoint['epoch'] best_loss = checkpoint['best_loss'] coord_model.load_state_dict(checkpoint['state_dict'])", "vision_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded checkpoint '{}' (epoch {})\" .format(args.resume_vision, checkpoint['epoch'])) #", "(vanilla test stage) if args.evaluate: validate(val_loader, model_list, fusion_function, criterion, class_to_idx=dataset_val.classes_dict)", "arguments parser.add_argument('--root_frames', type=str, default='/mnt/data1/home/sunpengzhan/sth-sth-v2/', help='path to the folder with frames')", "torch.load(args.resume_coord) if args.start_epoch is None: args.start_epoch = checkpoint['epoch'] best_loss =", "parser.add_argument('--print_freq', '-p', default=20, type=int, metavar='N', help='print frequency (default: 20)') parser.add_argument('--ckpt',", "torch import torch.backends.cudnn as cudnn from callbacks import AverageMeter from", "= pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for", "the activation and vision features from vision branch output_vision, feature_vision", "acc1, acc5 = accuracy(output_debiased.cpu(), video_label, topk=(1, 5)) search_dict['acc_1_alpha_{}'.format(round(search_list[j], 1))].update(acc1.item(), global_img_tensors.size(0))", "i % args.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'", "args=args, is_val=False, if_augment=True, ) dataset_val = VideoFolder(root=args.root_frames, num_boxes=args.num_boxes, file_input=args.json_data_val, file_labels=args.json_file_labels,", "acc_top5.update(acc5.item(), global_img_tensors.size(0)) # refresh the optimizer optimizer_vision.zero_grad() optimizer_coord.zero_grad() optimizer_fusion.zero_grad() loss", "== len(val_loader): print('Test: [{0}/{1}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'", "of total epochs to run') parser.add_argument('--start_epoch', default=None, type=int, metavar='N', help='manual", "of hyperparameter if args.cf_inference_group: cf_inference_group(val_loader, model_list, fusion_function, search_list, class_to_idx=dataset_val.classes_dict) return", "loss_fusion is the loss of output_fusion(fused, obtained from the fusion_function)", "({acc_top1.avg:.1f})\\t' 'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})'.format( epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses,", "parser.add_argument('--logname', default='my_method', help='name of the experiment for checkpoints and logs')", "model, image&feature dim and training related arguments parser.add_argument('--model_vision', default='rgb_roi') parser.add_argument('--model_coord',", "coord_model, fusion_model] = model_list # switch to evaluate mode vision_model.eval()", "= [] for k in topk: correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True)", "== 'fusion': for param_group in optimizer.param_groups: param_group['lr'] = lr else:", "1))].update(acc1.item(), global_img_tensors.size(0)) search_dict['acc_5_alpha_{}'.format(round(search_list[j], 1))].update(acc5.item(), global_img_tensors.size(0)) # measure elapsed time batch_time.update(time.time()", "acc_top5 = AverageMeter() logits_matrix = [] targets_list = [] #", "arguments parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading", "10') parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum') parser.add_argument('--weight_decay', '--wd', default=0.0001, type=float,", "print('no such a fusion model!') # load model branch vision_model", "search_dict['acc_1_alpha_{}'.format(round(search_list[j], 1))].update(acc1.item(), global_img_tensors.size(0)) search_dict['acc_5_alpha_{}'.format(round(search_list[j], 1))].update(acc5.item(), global_img_tensors.size(0)) # measure elapsed time", "help='test performance every n strides') # train mode, hardware setting", "'_latest.pth.tar') if is_best: shutil.copyfile(filename + '_latest.pth.tar', filename + '_best.pth.tar') def", "boxes for each image') parser.add_argument('--num_frames', default=16, type=int, help='num of frames", "default=True, type=bool, help='whether or not train with multi GPUs') parser.add_argument('--gpu_index',", "adjust_learning_rate(optimizer_vision, epoch, args.lr_steps, 'vision') adjust_learning_rate(optimizer_coord, epoch, args.lr_steps, 'coord') adjust_learning_rate(optimizer_fusion, epoch,", "model!') # load model branch vision_model = RGBModel(args) coord_model =", "default='../data/dataset_splits/compositional/labels.json', help='path to the json file with ground truth labels')", "checkpoint found at '{}'\".format(args.resume_coord) print(\"=> loading checkpoint '{}'\".format(args.resume_coord)) checkpoint =", "argparse import os import shutil import time import numpy as", "losses.avg def cf_inference_group(val_loader, model_list, fusion_function, search_list, class_to_idx=None): batch_time = AverageMeter()", "evaluate mode vision_model.eval() coord_model.eval() fusion_model.eval() end = time.time() for i,", "fusion function output = output_factual loss = loss_vision acc1, acc5", "-1).expand_as(pred)) res = [] for k in topk: correct_k =", "image') parser.add_argument('--num_frames', default=16, type=int, help='num of frames for the model')", "np.concatenate(logits_matrix) targets_list = np.concatenate(targets_list) save_results(logits_matrix, targets_list, class_to_idx, args) return losses.avg", "{} for i in range(search_length): search_dict['acc_1_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter() search_dict['acc_5_alpha_{}'.format(round(search_list[i],", "len(val_loader), batch_time=batch_time, acc_top1_00=search_dict['acc_1_alpha_0.0'], acc_top1_02=search_dict['acc_1_alpha_0.2'], acc_top1_05=search_dict['acc_1_alpha_0.5'], acc_top1_08=search_dict['acc_1_alpha_0.8'], acc_top1_10=search_dict['acc_1_alpha_1.0'])) for k in", "len(train_loader.dataset.classes))) # detach the computation graph, avoid the gradient confusion", "return def save_checkpoint(state, is_best, filename): torch.save(state, filename + '_latest.pth.tar') if", "fusion_function(output_vision, output_coord, output_fusion) # warning: loss_fusion is the loss of", "default=512, type=int, metavar='N', help='intermediate feature dimension for image-based features') parser.add_argument('--coord_feature_dim',", "coord_model = BboxModel(args) fusion_model = FusionModel(args) # create the fusion", "activation and coordinate features from coordinate branch output_coord, feature_coord =", "i in range(search_length): search_dict['acc_1_alpha_{}'.format(round(search_list[i], 1))] = AverageMeter() search_dict['acc_5_alpha_{}'.format(round(search_list[i], 1))] =", "acc_top5=acc_top5, )) if args.evaluate: logits_matrix = np.concatenate(logits_matrix) targets_list = np.concatenate(targets_list)", "# train mode, hardware setting and others related arguments parser.add_argument('-j',", "type=int, metavar='N', help='intermediate feature dimension for image-based features') parser.add_argument('--coord_feature_dim', default=512,", "default='../data/dataset_splits/compositional/validation.json', help='path to the json file with validation video meta", "= fusion_function(output_vision, output_coord, output_fusion) # warning: loss_fusion is the loss", "args) return losses.avg def cf_inference_group(val_loader, model_list, fusion_function, search_list, class_to_idx=None): batch_time", "type=float, metavar='W', help='weight decay (default: 1e-4)') parser.add_argument('--clip_gradient', '-cg', default=5, type=float,", "type=str, help='function for fusing activations from each branch') parser.add_argument('--img_feature_dim', default=512,", "dataset and log related arguments parser.add_argument('--root_frames', type=str, default='/mnt/data1/home/sunpengzhan/sth-sth-v2/', help='path to", "1 == len(val_loader): print('Cf-Inference: [{0}/{1}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Acc1_0.0 {acc_top1_00.val:.1f}", "default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('--resume_coord',", "default=0.01, type=float, metavar='LR', help='initial learning rate') parser.add_argument('--lr_steps', default=[24, 35, 45],", "shuffle=True, num_workers=args.workers, drop_last=True, pin_memory=True ) val_loader = torch.utils.data.DataLoader( dataset_val, drop_last=True,", "print(\"=> loaded checkpoint '{}' (epoch {})\" .format(args.resume_coord, checkpoint['epoch'])) if args.resume_fusion:", "print('global_i3d loaded!!') elif args.model_vision == 'rgb_roi': from model.model_lib import BboxVisualModel", "the computation graph, avoid the gradient confusion feature_vision_detached = feature_vision.detach()", "the activation and coordinate features from coordinate branch output_coord, feature_coord", "= [optimizer_vision, optimizer_coord, optimizer_fusion] criterion = torch.nn.CrossEntropyLoss() search_list = np.linspace(0.0,", "optimizer_list = [optimizer_vision, optimizer_coord, optimizer_fusion] criterion = torch.nn.CrossEntropyLoss() search_list =", "default=5, help='test performance every n strides') # train mode, hardware", "tqdm(range(args.start_epoch, args.epochs)): adjust_learning_rate(optimizer_vision, epoch, args.lr_steps, 'vision') adjust_learning_rate(optimizer_coord, epoch, args.lr_steps, 'coord')", "args.lr * decay if branch_name == 'vision': for param_group in", "found at '{}'\".format(args.resume_coord) print(\"=> loading checkpoint '{}'\".format(args.resume_coord)) checkpoint = torch.load(args.resume_coord)", "nargs=\"+\", metavar='LRSteps', help='epochs to decay learning rate by 10') parser.add_argument('--momentum',", "activation and vision features from vision branch output_vision, feature_vision =", "parser.add_argument('--model_fusion', default='concat_fusion') parser.add_argument('--fusion_function', default='fused_sum', type=str, help='function for fusing activations from", "measure accuracy and record loss losses.update(loss.item(), global_img_tensors.size(0)) acc_top1.update(acc1.item(), global_img_tensors.size(0)) acc_top5.update(acc5.item(),", "'best_loss': best_loss, }, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_fusion, args.logname))) def train(train_loader, model_list,", ".format(args.resume_fusion, checkpoint['epoch'])) if args.start_epoch is None: args.start_epoch = 0 cudnn.benchmark", "best_loss, }, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_vision, args.logname))) save_checkpoint( { 'epoch': epoch", "parser.add_argument('--img_feature_dim', default=512, type=int, metavar='N', help='intermediate feature dimension for image-based features')", "with multi GPUs') parser.add_argument('--gpu_index', type=str, default='0, 1, 2, 3', help='the", "metavar='W', help='weight decay (default: 1e-4)') parser.add_argument('--clip_gradient', '-cg', default=5, type=float, metavar='W',", "'Acc1_0.2 {acc_top1_02.val:.1f} ({acc_top1_02.avg:.1f})\\t' 'Acc1_0.5 {acc_top1_05.val:.1f} ({acc_top1_05.avg:.1f})\\t' 'Acc1_0.8 {acc_top1_08.val:.1f} ({acc_top1_08.avg:.1f})\\t' 'Acc1_1.0", "feature_coord.detach() # obtain the activation of fusion branch output_fusion =", "}, is_best, os.path.join(args.ckpt, '{}_{}'.format(args.model_coord, args.logname))) save_checkpoint( { 'epoch': epoch +", "random from collections import OrderedDict import torch import torch.backends.cudnn as", "optimizer_fusion = torch.optim.SGD(filter(lambda p: p.requires_grad, fusion_model.parameters()), momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_list", "res = [] for k in topk: correct_k = correct[:k].contiguous().view(-1).float().sum(0,", "# Counterfactual inference by trying a list of hyperparameter if", "5)) if args.evaluate: logits_matrix.append(output.cpu().data.numpy()) targets_list.append(video_label.cpu().numpy()) # measure accuracy and record", "inference output_vision_subtrahend = output_vision output_counterfactual = fusion_function(output_vision_subtrahend, torch.tensor(0.0), torch.tensor(0.0)) for", "feature dimension for image-based features') parser.add_argument('--coord_feature_dim', default=512, type=int, metavar='N', help='intermediate", "model on validation set') parser.add_argument('--cf_inference_group', action='store_true', help='counterfactual inference model on", "utils import save_results from tqdm import tqdm parser = argparse.ArgumentParser(description='Counterfactual", "[vision_model, coord_model, fusion_model] = model_list # load four optimizers, including", "loading checkpoint '{}'\".format(args.resume_vision)) checkpoint = torch.load(args.resume_vision) if args.start_epoch is None:", "1 == len(val_loader): print('Test: [{0}/{1}]\\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t' 'Loss {loss.val:.4f}", "acc_top1.update(acc1.item(), global_img_tensors.size(0)) acc_top5.update(acc5.item(), global_img_tensors.size(0)) # refresh the optimizer optimizer_vision.zero_grad() optimizer_coord.zero_grad()", "batch_time.update(time.time() - end) end = time.time() if i % args.print_freq", "+ '_best.pth.tar') def adjust_learning_rate(optimizer, epoch, lr_steps, branch_name=None): \"\"\"Sets the learning", "args.lr_steps, 'vision') adjust_learning_rate(optimizer_coord, epoch, args.lr_steps, 'coord') adjust_learning_rate(optimizer_fusion, epoch, args.lr_steps, 'fusion')", "fusion_function, search_list, class_to_idx=None): batch_time = AverageMeter() search_length = len(search_list) search_dict", "if branch_name == 'vision': for param_group in optimizer.param_groups: param_group['lr'] =", "video_label) output_coord = output_coord.view((-1, len(val_loader.dataset.classes))) # obtain the activation of", "= loss_vision acc1, acc5 = accuracy(output.cpu(), video_label, topk=(1, 5)) if", "and logs') parser.add_argument('--print_freq', '-p', default=20, type=int, metavar='N', help='print frequency (default:", "feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label) output_coord = output_coord.view((-1, len(val_loader.dataset.classes)))", "'--learning-rate', default=0.01, type=float, metavar='LR', help='initial learning rate') parser.add_argument('--lr_steps', default=[24, 35,", "help='path to the json file with train video meta data')", "file with validation video meta data') parser.add_argument('--json_file_labels', type=str, default='../data/dataset_splits/compositional/labels.json', help='path", "(epoch+1) >= 30 and (epoch + 1) % args.search_stride ==", "import naivesum as fusion_func print('naive_sum loaded!!') else: print('no such a", "max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True,", "of the experiment for checkpoints and logs') parser.add_argument('--print_freq', '-p', default=20,", "time.time() if i % args.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\\t' 'Time", "file_input=args.json_data_train, file_labels=args.json_file_labels, frames_duration=args.num_frames, args=args, is_val=False, if_augment=True, ) dataset_val = VideoFolder(root=args.root_frames,", "for j in range(search_length): weight = search_list[j] output_debiased = output_factual", "or i + 1 == len(val_loader): print('Test: [{0}/{1}]\\t' 'Time {batch_time.val:.3f}", "args.logname))) save_checkpoint( { 'epoch': epoch + 1, 'state_dict': coord_model.state_dict(), 'best_loss':", "you want to use') best_loss = 1000000 def main(): global", "optimizer.param_groups: param_group['lr'] = lr * 0.8 elif branch_name == 'coord':", "for param_group in optimizer.param_groups: param_group['lr'] = lr def accuracy(output, target,", "VideoFolder(root=args.root_frames, num_boxes=args.num_boxes, file_input=args.json_data_train, file_labels=args.json_file_labels, frames_duration=args.num_frames, args=args, is_val=False, if_augment=True, ) dataset_val", "criterion(output_factual, video_label.long().cuda()) # statistic result from fusion_branch or value after", "if args.cf_inference_group: cf_inference_group(val_loader, model_list, fusion_function, search_list, class_to_idx=dataset_val.classes_dict) return print('training begin...')", "video_label) output_coord = output_coord.view((-1, len(val_loader.dataset.classes))) # detach the computation graph,", "\"\"\"Sets the learning rate to the initial LR decayed by", "momentum=args.momentum, lr=args.lr, weight_decay=args.weight_decay) optimizer_fusion = torch.optim.SGD(filter(lambda p: p.requires_grad, fusion_model.parameters()), momentum=args.momentum,", "output_coord, feature_coord = coord_model(global_img_tensors, box_categories.cuda(), box_tensors.cuda(), video_label) output_coord = output_coord.view((-1,", "video_label) in enumerate(train_loader): data_time.update(time.time() - end) # obtain the activation", "None: args.start_epoch = checkpoint['epoch'] best_loss = checkpoint['best_loss'] vision_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded", "# create coord model if args.model_coord == 'interaction': from model.model_lib", "of frames for the model') parser.add_argument('--num_classes', default=174, type=int, help='num of", "set') parser.add_argument('--cf_inference_group', action='store_true', help='counterfactual inference model on validation set') parser.add_argument('--parallel',", "image&feature dim and training related arguments parser.add_argument('--model_vision', default='rgb_roi') parser.add_argument('--model_coord', default='interaction')", "if args.clip_gradient is not None: torch.nn.utils.clip_grad_norm_(vision_model.parameters(), args.clip_gradient) # update the", "loss < best_loss best_loss = min(loss, best_loss) save_checkpoint( { 'epoch':", "latest checkpoint (default: none)') # model, image&feature dim and training", "'Acc1_0.0 {acc_top1_00.val:.1f} ({acc_top1_00.avg:.1f})\\t' 'Acc1_0.2 {acc_top1_02.val:.1f} ({acc_top1_02.avg:.1f})\\t' 'Acc1_0.5 {acc_top1_05.val:.1f} ({acc_top1_05.avg:.1f})\\t' 'Acc1_0.8", "to use') best_loss = 1000000 def main(): global args, best_loss", "if args.evaluate: logits_matrix.append(output.cpu().data.numpy()) targets_list.append(video_label.cpu().numpy()) # measure accuracy and record loss", "epoch number (useful on restarts)') parser.add_argument('--batch_size', '-b', default=16, type=int, metavar='N',", "= time.time() for i, (global_img_tensors, box_tensors, box_categories, video_label) in enumerate(val_loader):", "shuffle=False, num_workers=args.workers, pin_memory=False ) model_list = [vision_model, coord_model, fusion_model] optimizer_vision", "lr def accuracy(output, target, topk=(1,)): \"\"\"Computes the accuracy over the", "'Acc1 {acc_top1.val:.1f} ({acc_top1.avg:.1f})\\t' 'Acc5 {acc_top5.val:.1f} ({acc_top5.avg:.1f})'.format( epoch, i, len(train_loader), batch_time=batch_time,", "batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True)", "1, 2, 3', help='the index of gpu you want to", "args.resume_vision: assert os.path.isfile(args.resume_vision), \"No checkpoint found at '{}'\".format(args.resume_vision) print(\"=> loading", "if args.resume_vision: assert os.path.isfile(args.resume_vision), \"No checkpoint found at '{}'\".format(args.resume_vision) print(\"=>", "= validate(val_loader, model_list, fusion_function, criterion, epoch=epoch, class_to_idx=dataset_val.classes_dict) else: loss =", "train(train_loader, model_list, fusion_function, optimizer_list, epoch, criterion) if (epoch+1) >= 30", "# compute output with torch.no_grad(): # factual inference output_vision, feature_vision", "checkpoint if args.resume_vision: assert os.path.isfile(args.resume_vision), \"No checkpoint found at '{}'\".format(args.resume_vision)", "default='concat_fusion') parser.add_argument('--fusion_function', default='fused_sum', type=str, help='function for fusing activations from each", "to the json file with validation video meta data') parser.add_argument('--json_file_labels',", "checkpoint['best_loss'] fusion_model.load_state_dict(checkpoint['state_dict']) print(\"=> loaded checkpoint '{}' (epoch {})\" .format(args.resume_fusion, checkpoint['epoch']))", "dataset_val = VideoFolder(root=args.root_frames, num_boxes=args.num_boxes, file_input=args.json_data_val, file_labels=args.json_file_labels, frames_duration=args.num_frames, args=args, is_val=True, if_augment=True,", "one epoch train(train_loader, model_list, fusion_function, optimizer_list, epoch, criterion) if (epoch+1)", "save_checkpoint(state, is_best, filename): torch.save(state, filename + '_latest.pth.tar') if is_best: shutil.copyfile(filename", "(sum(epoch >= np.array(lr_steps))) lr = args.lr * decay if branch_name", "model_list, fusion_function, optimizer_list, epoch, criterion): global args batch_time = AverageMeter()", "= torch.load(args.resume_vision) if args.start_epoch is None: args.start_epoch = checkpoint['epoch'] best_loss", "default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') #", "outputs output_factual = fusion_function(output_vision, output_coord, output_fusion) # counterfactual inference output_vision_subtrahend", "load four optimizers, including the one designed for uniform assumption", "parser.add_argument('--batch_size', '-b', default=16, type=int, metavar='N', help='mini-batch size') parser.add_argument('--lr', '--learning-rate', default=0.01,", "parser.add_argument('--parallel', default=True, type=bool, help='whether or not train with multi GPUs')" ]
[ "flask_cors import CORS import logging app = Flask(__name__) CORS(app, resources={r\"/*\":", "application.jwt import application.routes.config import application.routes.user import application.routes.permission import application.routes.role import", "= logging.getLogger(__name__) logger.setLevel(logging.INFO) ''' ''' import application.jwt import application.routes.config import", "SQLAlchemy(app) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) ''' ''' import application.jwt import", "import application.routes.user import application.routes.permission import application.routes.role import application.routes.access # after", "application.routes.permission import application.routes.role import application.routes.access # after Model defined db.create_all()", "Flask(__name__) CORS(app, resources={r\"/*\": {\"origins\": \"*\"}}) app.config.from_object('config.current') db = SQLAlchemy(app) logger", "Flask from flask_sqlalchemy import SQLAlchemy from flask_cors import CORS import", "import Flask from flask_sqlalchemy import SQLAlchemy from flask_cors import CORS", "logger.setLevel(logging.INFO) ''' ''' import application.jwt import application.routes.config import application.routes.user import", "app = Flask(__name__) CORS(app, resources={r\"/*\": {\"origins\": \"*\"}}) app.config.from_object('config.current') db =", "utf-8 from flask import Flask from flask_sqlalchemy import SQLAlchemy from", "''' import application.jwt import application.routes.config import application.routes.user import application.routes.permission import", "flask import Flask from flask_sqlalchemy import SQLAlchemy from flask_cors import", "{\"origins\": \"*\"}}) app.config.from_object('config.current') db = SQLAlchemy(app) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO)", "import application.routes.permission import application.routes.role import application.routes.access # after Model defined", "from flask_cors import CORS import logging app = Flask(__name__) CORS(app,", "''' ''' import application.jwt import application.routes.config import application.routes.user import application.routes.permission", "from flask_sqlalchemy import SQLAlchemy from flask_cors import CORS import logging", "logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) ''' ''' import application.jwt import application.routes.config", "encoding: utf-8 from flask import Flask from flask_sqlalchemy import SQLAlchemy", "logging.getLogger(__name__) logger.setLevel(logging.INFO) ''' ''' import application.jwt import application.routes.config import application.routes.user", "logging app = Flask(__name__) CORS(app, resources={r\"/*\": {\"origins\": \"*\"}}) app.config.from_object('config.current') db", "= SQLAlchemy(app) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) ''' ''' import application.jwt", "CORS import logging app = Flask(__name__) CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})", "import CORS import logging app = Flask(__name__) CORS(app, resources={r\"/*\": {\"origins\":", "CORS(app, resources={r\"/*\": {\"origins\": \"*\"}}) app.config.from_object('config.current') db = SQLAlchemy(app) logger =", "import SQLAlchemy from flask_cors import CORS import logging app =", "from flask import Flask from flask_sqlalchemy import SQLAlchemy from flask_cors", "\"*\"}}) app.config.from_object('config.current') db = SQLAlchemy(app) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) '''", "flask_sqlalchemy import SQLAlchemy from flask_cors import CORS import logging app", "application.routes.user import application.routes.permission import application.routes.role import application.routes.access # after Model", "application.routes.config import application.routes.user import application.routes.permission import application.routes.role import application.routes.access #", "# encoding: utf-8 from flask import Flask from flask_sqlalchemy import", "= Flask(__name__) CORS(app, resources={r\"/*\": {\"origins\": \"*\"}}) app.config.from_object('config.current') db = SQLAlchemy(app)", "app.config.from_object('config.current') db = SQLAlchemy(app) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) ''' '''", "import logging app = Flask(__name__) CORS(app, resources={r\"/*\": {\"origins\": \"*\"}}) app.config.from_object('config.current')", "import application.jwt import application.routes.config import application.routes.user import application.routes.permission import application.routes.role", "resources={r\"/*\": {\"origins\": \"*\"}}) app.config.from_object('config.current') db = SQLAlchemy(app) logger = logging.getLogger(__name__)", "SQLAlchemy from flask_cors import CORS import logging app = Flask(__name__)", "db = SQLAlchemy(app) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) ''' ''' import", "import application.routes.config import application.routes.user import application.routes.permission import application.routes.role import application.routes.access" ]
[ "antecedents, user_options): from Betsy import module_utils original_file = module_utils.get_inputid(antecedents.identifier) filename", "outfile ) def name_outfile(self, antecedents, user_options): from Betsy import module_utils", "run( self, network, antecedents, out_attributes, user_options, num_cores, outfile): import os", "import os import shutil from genomicode import filelib in_data =", "antecedents result_files = os.listdir(in_data.identifier) for result_file in result_files: if '-controls'", "( 'the output file %s for illu_control fails' % outfile", "outfile): import os import shutil from genomicode import filelib in_data", "in_data = antecedents result_files = os.listdir(in_data.identifier) for result_file in result_files:", "name_outfile(self, antecedents, user_options): from Betsy import module_utils original_file = module_utils.get_inputid(antecedents.identifier)", "shutil from genomicode import filelib in_data = antecedents result_files =", "os import shutil from genomicode import filelib in_data = antecedents", "Module(AbstractModule): def __init__(self): AbstractModule.__init__(self) def run( self, network, antecedents, out_attributes,", "module_utils original_file = module_utils.get_inputid(antecedents.identifier) filename = 'control_illumina_' + original_file +", "import shutil from genomicode import filelib in_data = antecedents result_files", "class Module(AbstractModule): def __init__(self): AbstractModule.__init__(self) def run( self, network, antecedents,", "original_file = module_utils.get_inputid(antecedents.identifier) filename = 'control_illumina_' + original_file + '.gct'", "output file %s for illu_control fails' % outfile ) def", "AbstractModule.__init__(self) def run( self, network, antecedents, out_attributes, user_options, num_cores, outfile):", "module_utils.get_inputid(antecedents.identifier) filename = 'control_illumina_' + original_file + '.gct' return filename", "import AbstractModule class Module(AbstractModule): def __init__(self): AbstractModule.__init__(self) def run( self,", "assert filelib.exists_nz(outfile), ( 'the output file %s for illu_control fails'", "filelib.exists_nz(outfile), ( 'the output file %s for illu_control fails' %", "from Betsy import module_utils original_file = module_utils.get_inputid(antecedents.identifier) filename = 'control_illumina_'", "Betsy import module_utils original_file = module_utils.get_inputid(antecedents.identifier) filename = 'control_illumina_' +", "in result_file: goal_file = os.path.join(in_data.identifier, result_file) shutil.copyfile(goal_file, outfile) assert filelib.exists_nz(outfile),", "if '-controls' in result_file: goal_file = os.path.join(in_data.identifier, result_file) shutil.copyfile(goal_file, outfile)", "result_files = os.listdir(in_data.identifier) for result_file in result_files: if '-controls' in", "= os.path.join(in_data.identifier, result_file) shutil.copyfile(goal_file, outfile) assert filelib.exists_nz(outfile), ( 'the output", "out_attributes, user_options, num_cores, outfile): import os import shutil from genomicode", "AbstractModule class Module(AbstractModule): def __init__(self): AbstractModule.__init__(self) def run( self, network,", "__init__(self): AbstractModule.__init__(self) def run( self, network, antecedents, out_attributes, user_options, num_cores,", ") def name_outfile(self, antecedents, user_options): from Betsy import module_utils original_file", "fails' % outfile ) def name_outfile(self, antecedents, user_options): from Betsy", "user_options, num_cores, outfile): import os import shutil from genomicode import", "illu_control fails' % outfile ) def name_outfile(self, antecedents, user_options): from", "% outfile ) def name_outfile(self, antecedents, user_options): from Betsy import", "import module_utils original_file = module_utils.get_inputid(antecedents.identifier) filename = 'control_illumina_' + original_file", "for illu_control fails' % outfile ) def name_outfile(self, antecedents, user_options):", "%s for illu_control fails' % outfile ) def name_outfile(self, antecedents,", "os.listdir(in_data.identifier) for result_file in result_files: if '-controls' in result_file: goal_file", "file %s for illu_control fails' % outfile ) def name_outfile(self,", "def run( self, network, antecedents, out_attributes, user_options, num_cores, outfile): import", "genomicode import filelib in_data = antecedents result_files = os.listdir(in_data.identifier) for", "network, antecedents, out_attributes, user_options, num_cores, outfile): import os import shutil", "result_file in result_files: if '-controls' in result_file: goal_file = os.path.join(in_data.identifier,", "in result_files: if '-controls' in result_file: goal_file = os.path.join(in_data.identifier, result_file)", "result_files: if '-controls' in result_file: goal_file = os.path.join(in_data.identifier, result_file) shutil.copyfile(goal_file,", "= antecedents result_files = os.listdir(in_data.identifier) for result_file in result_files: if", "Module import AbstractModule class Module(AbstractModule): def __init__(self): AbstractModule.__init__(self) def run(", "import filelib in_data = antecedents result_files = os.listdir(in_data.identifier) for result_file", "result_file: goal_file = os.path.join(in_data.identifier, result_file) shutil.copyfile(goal_file, outfile) assert filelib.exists_nz(outfile), (", "self, network, antecedents, out_attributes, user_options, num_cores, outfile): import os import", "from Module import AbstractModule class Module(AbstractModule): def __init__(self): AbstractModule.__init__(self) def", "filelib in_data = antecedents result_files = os.listdir(in_data.identifier) for result_file in", "result_file) shutil.copyfile(goal_file, outfile) assert filelib.exists_nz(outfile), ( 'the output file %s", "antecedents, out_attributes, user_options, num_cores, outfile): import os import shutil from", "def name_outfile(self, antecedents, user_options): from Betsy import module_utils original_file =", "'-controls' in result_file: goal_file = os.path.join(in_data.identifier, result_file) shutil.copyfile(goal_file, outfile) assert", "os.path.join(in_data.identifier, result_file) shutil.copyfile(goal_file, outfile) assert filelib.exists_nz(outfile), ( 'the output file", "= os.listdir(in_data.identifier) for result_file in result_files: if '-controls' in result_file:", "for result_file in result_files: if '-controls' in result_file: goal_file =", "'the output file %s for illu_control fails' % outfile )", "user_options): from Betsy import module_utils original_file = module_utils.get_inputid(antecedents.identifier) filename =", "def __init__(self): AbstractModule.__init__(self) def run( self, network, antecedents, out_attributes, user_options,", "num_cores, outfile): import os import shutil from genomicode import filelib", "shutil.copyfile(goal_file, outfile) assert filelib.exists_nz(outfile), ( 'the output file %s for", "= module_utils.get_inputid(antecedents.identifier) filename = 'control_illumina_' + original_file + '.gct' return", "from genomicode import filelib in_data = antecedents result_files = os.listdir(in_data.identifier)", "goal_file = os.path.join(in_data.identifier, result_file) shutil.copyfile(goal_file, outfile) assert filelib.exists_nz(outfile), ( 'the", "outfile) assert filelib.exists_nz(outfile), ( 'the output file %s for illu_control" ]
[ "m in self.pattern.finditer(template): print(m, type(m)) #print(dir(m)) #print(len(m.groups())) print(m[0]) #print(m.groups()) #print(m,", "from string import Template import re class PositionalArgumentTemplate(Template): # (?i):", "#for m in re.finditer(self.pattern, template): for m in self.pattern.finditer(template): print(m,", "re.finditer(self.pattern, template): for m in self.pattern.finditer(template): print(m, type(m)) #print(dir(m)) #print(len(m.groups()))", "#print(m.group('braced')) #print(m.group('invalid')) if __name__ == '__main__': template_str = '${0} is", "'([0-9]+)' def find_place_holders(self, template:str): #for m in re.findall(self.pattern, template): #for", "import Template import re class PositionalArgumentTemplate(Template): # (?i): 大文字小文字を区別しないモードを開始する #", "type(m)) #print(dir(m)) #print(len(m.groups())) print(m[0]) #print(m.groups()) #print(m, m.groups(), m.group('named'), type(m)) #print(m.group('escaped'))", "#print(m.group('escaped')) #print(m.group('named')) #print(m.group('braced')) #print(m.group('invalid')) if __name__ == '__main__': template_str =", "(?-i): 大文字小文字を区別しないモードを無効にする idpattern_default = Template.idpattern # (?-i:[_a-zA-Z][_a-zA-Z0-9]*) idpattern = '([0-9]+)'", "# (?i): 大文字小文字を区別しないモードを開始する # (?-i): 大文字小文字を区別しないモードを無効にする idpattern_default = Template.idpattern #", "'${0} is Aug.' t = PositionalArgumentTemplate(template_str) print(template_str) print(dir(t)) print(t.delimiter) print(t.idpattern)", "PositionalArgumentTemplate(Template): # (?i): 大文字小文字を区別しないモードを開始する # (?-i): 大文字小文字を区別しないモードを無効にする idpattern_default = Template.idpattern", "'__main__': template_str = '${0} is Aug.' t = PositionalArgumentTemplate(template_str) print(template_str)", "= '${0} is Aug.' t = PositionalArgumentTemplate(template_str) print(template_str) print(dir(t)) print(t.delimiter)", "PositionalArgumentTemplate(template_str) print(template_str) print(dir(t)) print(t.delimiter) print(t.idpattern) print(type(t.idpattern)) print(t.flags) print(t.pattern) print(t.substitute(**{'0':'V'})) t.find_place_holders(template_str)", "re.findall(self.pattern, template): #for m in re.finditer(self.pattern, template): for m in", "#print(m, m.groups(), m.group('named'), type(m)) #print(m.group('escaped')) #print(m.group('named')) #print(m.group('braced')) #print(m.group('invalid')) if __name__", "<gh_stars>0 from string import Template import re class PositionalArgumentTemplate(Template): #", "#print(dir(m)) #print(len(m.groups())) print(m[0]) #print(m.groups()) #print(m, m.groups(), m.group('named'), type(m)) #print(m.group('escaped')) #print(m.group('named'))", "(?i): 大文字小文字を区別しないモードを開始する # (?-i): 大文字小文字を区別しないモードを無効にする idpattern_default = Template.idpattern # (?-i:[_a-zA-Z][_a-zA-Z0-9]*)", "idpattern_default = Template.idpattern # (?-i:[_a-zA-Z][_a-zA-Z0-9]*) idpattern = '([0-9]+)' def find_place_holders(self,", "m.group('named'), type(m)) #print(m.group('escaped')) #print(m.group('named')) #print(m.group('braced')) #print(m.group('invalid')) if __name__ == '__main__':", "Template.idpattern # (?-i:[_a-zA-Z][_a-zA-Z0-9]*) idpattern = '([0-9]+)' def find_place_holders(self, template:str): #for", "== '__main__': template_str = '${0} is Aug.' t = PositionalArgumentTemplate(template_str)", "= Template.idpattern # (?-i:[_a-zA-Z][_a-zA-Z0-9]*) idpattern = '([0-9]+)' def find_place_holders(self, template:str):", "#print(len(m.groups())) print(m[0]) #print(m.groups()) #print(m, m.groups(), m.group('named'), type(m)) #print(m.group('escaped')) #print(m.group('named')) #print(m.group('braced'))", "is Aug.' t = PositionalArgumentTemplate(template_str) print(template_str) print(dir(t)) print(t.delimiter) print(t.idpattern) print(type(t.idpattern))", "(?-i:[_a-zA-Z][_a-zA-Z0-9]*) idpattern = '([0-9]+)' def find_place_holders(self, template:str): #for m in", "idpattern = '([0-9]+)' def find_place_holders(self, template:str): #for m in re.findall(self.pattern,", "t = PositionalArgumentTemplate(template_str) print(template_str) print(dir(t)) print(t.delimiter) print(t.idpattern) print(type(t.idpattern)) print(t.flags) print(t.pattern)", "m.groups(), m.group('named'), type(m)) #print(m.group('escaped')) #print(m.group('named')) #print(m.group('braced')) #print(m.group('invalid')) if __name__ ==", "in re.finditer(self.pattern, template): for m in self.pattern.finditer(template): print(m, type(m)) #print(dir(m))", "m in re.finditer(self.pattern, template): for m in self.pattern.finditer(template): print(m, type(m))", "#print(m.group('named')) #print(m.group('braced')) #print(m.group('invalid')) if __name__ == '__main__': template_str = '${0}", "__name__ == '__main__': template_str = '${0} is Aug.' t =", "print(m[0]) #print(m.groups()) #print(m, m.groups(), m.group('named'), type(m)) #print(m.group('escaped')) #print(m.group('named')) #print(m.group('braced')) #print(m.group('invalid'))", "Template import re class PositionalArgumentTemplate(Template): # (?i): 大文字小文字を区別しないモードを開始する # (?-i):", "class PositionalArgumentTemplate(Template): # (?i): 大文字小文字を区別しないモードを開始する # (?-i): 大文字小文字を区別しないモードを無効にする idpattern_default =", "self.pattern.finditer(template): print(m, type(m)) #print(dir(m)) #print(len(m.groups())) print(m[0]) #print(m.groups()) #print(m, m.groups(), m.group('named'),", "template_str = '${0} is Aug.' t = PositionalArgumentTemplate(template_str) print(template_str) print(dir(t))", "in re.findall(self.pattern, template): #for m in re.finditer(self.pattern, template): for m", "in self.pattern.finditer(template): print(m, type(m)) #print(dir(m)) #print(len(m.groups())) print(m[0]) #print(m.groups()) #print(m, m.groups(),", "import re class PositionalArgumentTemplate(Template): # (?i): 大文字小文字を区別しないモードを開始する # (?-i): 大文字小文字を区別しないモードを無効にする", "#for m in re.findall(self.pattern, template): #for m in re.finditer(self.pattern, template):", "def find_place_holders(self, template:str): #for m in re.findall(self.pattern, template): #for m", "= '([0-9]+)' def find_place_holders(self, template:str): #for m in re.findall(self.pattern, template):", "find_place_holders(self, template:str): #for m in re.findall(self.pattern, template): #for m in", "m in re.findall(self.pattern, template): #for m in re.finditer(self.pattern, template): for", "# (?-i): 大文字小文字を区別しないモードを無効にする idpattern_default = Template.idpattern # (?-i:[_a-zA-Z][_a-zA-Z0-9]*) idpattern =", "大文字小文字を区別しないモードを無効にする idpattern_default = Template.idpattern # (?-i:[_a-zA-Z][_a-zA-Z0-9]*) idpattern = '([0-9]+)' def", "大文字小文字を区別しないモードを開始する # (?-i): 大文字小文字を区別しないモードを無効にする idpattern_default = Template.idpattern # (?-i:[_a-zA-Z][_a-zA-Z0-9]*) idpattern", "template): #for m in re.finditer(self.pattern, template): for m in self.pattern.finditer(template):", "# (?-i:[_a-zA-Z][_a-zA-Z0-9]*) idpattern = '([0-9]+)' def find_place_holders(self, template:str): #for m", "#print(m.group('invalid')) if __name__ == '__main__': template_str = '${0} is Aug.'", "= PositionalArgumentTemplate(template_str) print(template_str) print(dir(t)) print(t.delimiter) print(t.idpattern) print(type(t.idpattern)) print(t.flags) print(t.pattern) print(t.substitute(**{'0':'V'}))", "template:str): #for m in re.findall(self.pattern, template): #for m in re.finditer(self.pattern,", "type(m)) #print(m.group('escaped')) #print(m.group('named')) #print(m.group('braced')) #print(m.group('invalid')) if __name__ == '__main__': template_str", "template): for m in self.pattern.finditer(template): print(m, type(m)) #print(dir(m)) #print(len(m.groups())) print(m[0])", "#print(m.groups()) #print(m, m.groups(), m.group('named'), type(m)) #print(m.group('escaped')) #print(m.group('named')) #print(m.group('braced')) #print(m.group('invalid')) if", "if __name__ == '__main__': template_str = '${0} is Aug.' t", "for m in self.pattern.finditer(template): print(m, type(m)) #print(dir(m)) #print(len(m.groups())) print(m[0]) #print(m.groups())", "re class PositionalArgumentTemplate(Template): # (?i): 大文字小文字を区別しないモードを開始する # (?-i): 大文字小文字を区別しないモードを無効にする idpattern_default", "string import Template import re class PositionalArgumentTemplate(Template): # (?i): 大文字小文字を区別しないモードを開始する", "Aug.' t = PositionalArgumentTemplate(template_str) print(template_str) print(dir(t)) print(t.delimiter) print(t.idpattern) print(type(t.idpattern)) print(t.flags)", "print(m, type(m)) #print(dir(m)) #print(len(m.groups())) print(m[0]) #print(m.groups()) #print(m, m.groups(), m.group('named'), type(m))" ]
[ "add duplicate company data = { 'company_id' : uuid.uuid4(), 'company_name'", "assert response.status == HTTP_200 # add duplicate company data =", "import os import requests import uuid import hug import pytest", "# add duplicate company data = { 'company_id' : uuid.uuid4(),", "= 'test_company_name' data = { 'company_id' : uuid.uuid4() , 'company_name'", "pytest from falcon import HTTP_200, HTTP_409 import cla from cla", "# Copyright The Linux Foundation and each contributor to CommunityBridge.", "os.environ.get('API_URL') def test_create_company_duplicate(): \"\"\" Test creating duplicate company names \"\"\"", "Copyright The Linux Foundation and each contributor to CommunityBridge. #", "'test_company_name' data = { 'company_id' : uuid.uuid4() , 'company_name' :", "import hug import pytest from falcon import HTTP_200, HTTP_409 import", "contributor to CommunityBridge. # SPDX-License-Identifier: MIT import json import os", "f'{API_URL}/v1/company' company_name = 'test_company_name' data = { 'company_id' : uuid.uuid4()", "cla import routes ID_TOKEN = os.environ.get('ID_TOKEN') API_URL = os.environ.get('API_URL') def", "company_name } req = hug.test.post(routes, url, data=data, headers=headers) assert req.status", "import requests import uuid import hug import pytest from falcon", "def test_create_company_duplicate(): \"\"\" Test creating duplicate company names \"\"\" import", "= { 'company_id' : uuid.uuid4() , 'company_name' : company_name ,", "falcon import HTTP_200, HTTP_409 import cla from cla import routes", "creating duplicate company names \"\"\" import pdb;pdb.set_trace() url = f'{API_URL}/v1/company'", "url = f'{API_URL}/v1/company' company_name = 'test_company_name' data = { 'company_id'", "{ 'Authorization' : f'Bearer {ID_TOKEN}' } response = requests.post(url, data=data,", "import uuid import hug import pytest from falcon import HTTP_200,", "and each contributor to CommunityBridge. # SPDX-License-Identifier: MIT import json", "f'Bearer {ID_TOKEN}' } response = requests.post(url, data=data, headers=headers) assert response.status", "hug import pytest from falcon import HTTP_200, HTTP_409 import cla", "cla from cla import routes ID_TOKEN = os.environ.get('ID_TOKEN') API_URL =", "os import requests import uuid import hug import pytest from", "'Authorization' : f'Bearer {ID_TOKEN}' } response = requests.post(url, data=data, headers=headers)", "Foundation and each contributor to CommunityBridge. # SPDX-License-Identifier: MIT import", "headers = { 'Authorization' : f'Bearer {ID_TOKEN}' } response =", "requests import uuid import hug import pytest from falcon import", "'company_name' : company_name } req = hug.test.post(routes, url, data=data, headers=headers)", "= os.environ.get('API_URL') def test_create_company_duplicate(): \"\"\" Test creating duplicate company names", "ID_TOKEN = os.environ.get('ID_TOKEN') API_URL = os.environ.get('API_URL') def test_create_company_duplicate(): \"\"\" Test", ", } headers = { 'Authorization' : f'Bearer {ID_TOKEN}' }", "data = { 'company_id' : uuid.uuid4() , 'company_name' : company_name", "= { 'company_id' : uuid.uuid4(), 'company_name' : company_name } req", "company_name , } headers = { 'Authorization' : f'Bearer {ID_TOKEN}'", "\"\"\" import pdb;pdb.set_trace() url = f'{API_URL}/v1/company' company_name = 'test_company_name' data", "= os.environ.get('ID_TOKEN') API_URL = os.environ.get('API_URL') def test_create_company_duplicate(): \"\"\" Test creating", "response.status == HTTP_200 # add duplicate company data = {", "company data = { 'company_id' : uuid.uuid4(), 'company_name' : company_name", "'company_id' : uuid.uuid4(), 'company_name' : company_name } req = hug.test.post(routes,", "HTTP_200 # add duplicate company data = { 'company_id' :", "{ID_TOKEN}' } response = requests.post(url, data=data, headers=headers) assert response.status ==", "= { 'Authorization' : f'Bearer {ID_TOKEN}' } response = requests.post(url,", "requests.post(url, data=data, headers=headers) assert response.status == HTTP_200 # add duplicate", "os.environ.get('ID_TOKEN') API_URL = os.environ.get('API_URL') def test_create_company_duplicate(): \"\"\" Test creating duplicate", "data=data, headers=headers) assert response.status == HTTP_200 # add duplicate company", "} req = hug.test.post(routes, url, data=data, headers=headers) assert req.status ==", ": uuid.uuid4() , 'company_name' : company_name , } headers =", "import pytest from falcon import HTTP_200, HTTP_409 import cla from", "routes ID_TOKEN = os.environ.get('ID_TOKEN') API_URL = os.environ.get('API_URL') def test_create_company_duplicate(): \"\"\"", "Test creating duplicate company names \"\"\" import pdb;pdb.set_trace() url =", "CommunityBridge. # SPDX-License-Identifier: MIT import json import os import requests", "company names \"\"\" import pdb;pdb.set_trace() url = f'{API_URL}/v1/company' company_name =", "# SPDX-License-Identifier: MIT import json import os import requests import", "== HTTP_200 # add duplicate company data = { 'company_id'", "Linux Foundation and each contributor to CommunityBridge. # SPDX-License-Identifier: MIT", ": company_name } req = hug.test.post(routes, url, data=data, headers=headers) assert", "data = { 'company_id' : uuid.uuid4(), 'company_name' : company_name }", "\"\"\" Test creating duplicate company names \"\"\" import pdb;pdb.set_trace() url", "import HTTP_200, HTTP_409 import cla from cla import routes ID_TOKEN", "uuid.uuid4() , 'company_name' : company_name , } headers = {", "SPDX-License-Identifier: MIT import json import os import requests import uuid", "pdb;pdb.set_trace() url = f'{API_URL}/v1/company' company_name = 'test_company_name' data = {", "company_name = 'test_company_name' data = { 'company_id' : uuid.uuid4() ,", "= f'{API_URL}/v1/company' company_name = 'test_company_name' data = { 'company_id' :", "names \"\"\" import pdb;pdb.set_trace() url = f'{API_URL}/v1/company' company_name = 'test_company_name'", "import cla from cla import routes ID_TOKEN = os.environ.get('ID_TOKEN') API_URL", "API_URL = os.environ.get('API_URL') def test_create_company_duplicate(): \"\"\" Test creating duplicate company", "HTTP_409 import cla from cla import routes ID_TOKEN = os.environ.get('ID_TOKEN')", "each contributor to CommunityBridge. # SPDX-License-Identifier: MIT import json import", "} response = requests.post(url, data=data, headers=headers) assert response.status == HTTP_200", ", 'company_name' : company_name , } headers = { 'Authorization'", "uuid.uuid4(), 'company_name' : company_name } req = hug.test.post(routes, url, data=data,", "from cla import routes ID_TOKEN = os.environ.get('ID_TOKEN') API_URL = os.environ.get('API_URL')", "'company_id' : uuid.uuid4() , 'company_name' : company_name , } headers", "import json import os import requests import uuid import hug", "duplicate company data = { 'company_id' : uuid.uuid4(), 'company_name' :", "json import os import requests import uuid import hug import", "{ 'company_id' : uuid.uuid4(), 'company_name' : company_name } req =", "MIT import json import os import requests import uuid import", "from falcon import HTTP_200, HTTP_409 import cla from cla import", "import routes ID_TOKEN = os.environ.get('ID_TOKEN') API_URL = os.environ.get('API_URL') def test_create_company_duplicate():", "import pdb;pdb.set_trace() url = f'{API_URL}/v1/company' company_name = 'test_company_name' data =", "uuid import hug import pytest from falcon import HTTP_200, HTTP_409", "duplicate company names \"\"\" import pdb;pdb.set_trace() url = f'{API_URL}/v1/company' company_name", "= requests.post(url, data=data, headers=headers) assert response.status == HTTP_200 # add", "'company_name' : company_name , } headers = { 'Authorization' :", "test_create_company_duplicate(): \"\"\" Test creating duplicate company names \"\"\" import pdb;pdb.set_trace()", ": f'Bearer {ID_TOKEN}' } response = requests.post(url, data=data, headers=headers) assert", "} headers = { 'Authorization' : f'Bearer {ID_TOKEN}' } response", "The Linux Foundation and each contributor to CommunityBridge. # SPDX-License-Identifier:", "<filename>cla-backend/cla/tests/unit/test_company.py<gh_stars>0 # Copyright The Linux Foundation and each contributor to", ": company_name , } headers = { 'Authorization' : f'Bearer", "{ 'company_id' : uuid.uuid4() , 'company_name' : company_name , }", "req = hug.test.post(routes, url, data=data, headers=headers) assert req.status == HTTP_409", ": uuid.uuid4(), 'company_name' : company_name } req = hug.test.post(routes, url,", "HTTP_200, HTTP_409 import cla from cla import routes ID_TOKEN =", "response = requests.post(url, data=data, headers=headers) assert response.status == HTTP_200 #", "headers=headers) assert response.status == HTTP_200 # add duplicate company data", "to CommunityBridge. # SPDX-License-Identifier: MIT import json import os import" ]
[ "for i, x in enumerate(l1): GCL[GCint] = \"[%i] \" %", "write_str(\"Empty List\", GCL) else: list2str(x, writeInd, GCL, GCint, size1) elif", "88: width1 = 88 self.Width = 10 + (width1 +", "= ln1 str_file.write(\"%s%s\\n\" % (\"\".join(GCL), str1) ) def list2str(l1, writeInd,", "import * from cStringIO import StringIO str_file = StringIO() size1", "2) * 9 #character width seems to vary between PCs", "# @5devene, <EMAIL> # www.badmonkeys.net import clr clr.AddReference('System.Windows.Forms') clr.AddReference('System.Drawing') from", "= Font(\"Calibri\", 12) self.box1.BackColor = Color.FromArgb(53,53,53) self.box1.ForeColor = Color.FromArgb(234,234,234) self.box1.DetectUrls", "GCint=-1, size1=size1): if GCL is None: GCL = [] GCint", "70) def save(self, sender, event): self.text1 = self.box1.Text self.Close() l1", "Color.FromArgb(234,234,234) self.button1.Click += self.save self.Controls.Add(self.button1) self.box1 = RichTextBox() self.box1.Multiline =", "(width1 + 2) * 9 #character width seems to vary", "else: return [obj1] def write_str(str1, GCL, str_file=str_file, size1=size1): ln1 =", "23] #height, width def tolist(obj1): if hasattr(obj1,\"__iter__\"): return obj1 else:", "\" % i if writeInd else \" \" if hasattr(x,", "= self.Width - 17 self.box1.Height = self.Height - 80 self.button1.Location", "self.Controls.Add(self.box1) def adjust_controls(self, height1, width1): if height1 > 800: height1", "> size1[1]: size1[1] = ln1 str_file.write(\"%s%s\\n\" % (\"\".join(GCL), str1) )", "self.Height - 70) def save(self, sender, event): self.text1 = self.box1.Text", "height1 > 800: height1 = 800 self.box1.ScrollBars = RichTextBoxScrollBars.Vertical if", "import Point, Color, Font from System.Windows.Forms import * from cStringIO", "+= self.resize1 self.text1 = None self.button1 = Button() self.button1.Text =", "\" if hasattr(x, \"Id\"): #is element write_str(\"%s %i\" % (x.ToString(),", "= height1 + 90 self.box1.Width = self.Width - 17 self.box1.Height", "i if writeInd else \" \" if hasattr(x, \"Id\"): #is", "= self.box1.Text self.Close() l1 = [] if IN[0] is None", "is None: write_str(\"null\", GCL) else: write_str(x.ToString(), GCL) size1[0] += 19", "FormStartPosition.CenterScreen self.Resize += self.resize1 self.text1 = None self.button1 = Button()", "x: write_str(\"Empty List\", GCL) else: list2str(x, writeInd, GCL, GCint, size1)", "= \"[%i] \" % i if writeInd else \" \"", "list2str(x, writeInd, GCL, GCint, size1) elif x is None: write_str(\"null\",", "19 GCL.pop(GCint) GCint -= 1 class WatchBox(Form): def __init__(self, t1):", "self.Width = 230 if self.Height < 120: self.Height = 120", "# Copyright(c) 2017, <NAME> # @5devene, <EMAIL> # www.badmonkeys.net import", "103, self.Height - 70) def save(self, sender, event): self.text1 =", "None else tolist(IN[0]) list2str(l1, IN[1]) str_content = str_file.getvalue() str_file.close() width1", "self.button1.Font = Font(\"Calibri\", 10) self.button1.AutoSize = True self.button1.Width = 200", "= 10 + (width1 + 2) * 9 #character width", "writeInd, GCL=None, GCint=-1, size1=size1): if GCL is None: GCL =", "self.text1 = None self.button1 = Button() self.button1.Text = 'Close' self.button1.Font", "return obj1 else: return [obj1] def write_str(str1, GCL, str_file=str_file, size1=size1):", "writeInd, GCL, GCint, size1) elif x is None: write_str(\"null\", GCL)", "(x.ToString(), x.Id), GCL) elif hasattr(x, \"__iter__\"): if not x: write_str(\"Empty", "= [] if IN[0] is None else tolist(IN[0]) list2str(l1, IN[1])", "write_str(str1, GCL, str_file=str_file, size1=size1): ln1 = len(str1) if ln1 >", "self.save self.Controls.Add(self.button1) self.box1 = RichTextBox() self.box1.Multiline = True self.box1.Location =", "self.box1.Width = self.Width - 17 self.box1.Height = self.Height - 80", "1 class WatchBox(Form): def __init__(self, t1): self.Text = \"SpringNodes: Expandable", "True self.button1.Width = 200 self.button1.ForeColor = Color.FromArgb(234,234,234) self.button1.Click += self.save", "size1[1]: size1[1] = ln1 str_file.write(\"%s%s\\n\" % (\"\".join(GCL), str1) ) def", "self.button1.AutoSize = True self.button1.Width = 200 self.button1.ForeColor = Color.FromArgb(234,234,234) self.button1.Click", "* 9 #character width seems to vary between PCs self.Height", "self.TopMost = True self.FormBorderStyle = FormBorderStyle.Sizable self.StartPosition = FormStartPosition.CenterScreen self.Resize", "size1) elif x is None: write_str(\"null\", GCL) else: write_str(x.ToString(), GCL)", "width1 = 23 if width1 > 88: width1 = 88", "[] GCint += 1 GCL.append(None) for i, x in enumerate(l1):", "- 70) def save(self, sender, event): self.text1 = self.box1.Text self.Close()", "= False self.TopMost = True self.FormBorderStyle = FormBorderStyle.Sizable self.StartPosition =", "= Font(\"Calibri\", 10) self.button1.AutoSize = True self.button1.Width = 200 self.button1.ForeColor", "width1 = 100 form = WatchBox(str_content) form.adjust_controls(*size1) Application.Run(form) OUT =", "from System.Drawing import Point, Color, Font from System.Windows.Forms import *", "GCL=None, GCint=-1, size1=size1): if GCL is None: GCL = []", "= RichTextBox() self.box1.Multiline = True self.box1.Location = Point(5, 5) self.box1.Font", "= [30, 23] #height, width def tolist(obj1): if hasattr(obj1,\"__iter__\"): return", "__init__(self, t1): self.Text = \"SpringNodes: Expandable Watch Window\" self.BackColor =", "save(self, sender, event): self.text1 = self.box1.Text self.Close() l1 = []", "def adjust_controls(self, height1, width1): if height1 > 800: height1 =", "seems to vary between PCs self.Height = height1 + 90", "> 800: height1 = 800 self.box1.ScrollBars = RichTextBoxScrollBars.Vertical if width1", "System.Drawing import Point, Color, Font from System.Windows.Forms import * from", "self.FormBorderStyle = FormBorderStyle.Sizable self.StartPosition = FormStartPosition.CenterScreen self.Resize += self.resize1 self.text1", "width1 > 88: width1 = 88 self.Width = 10 +", "5) self.box1.Font = Font(\"Calibri\", 12) self.box1.BackColor = Color.FromArgb(53,53,53) self.box1.ForeColor =", "GCL) else: write_str(x.ToString(), GCL) size1[0] += 19 GCL.pop(GCint) GCint -=", "= len(str1) if ln1 > size1[1]: size1[1] = ln1 str_file.write(\"%s%s\\n\"", "self.box1.Text self.Close() l1 = [] if IN[0] is None else", "form = WatchBox(str_content) form.adjust_controls(*size1) Application.Run(form) OUT = form.text1 Application.Exit() form.Dispose()", "writeInd else \" \" if hasattr(x, \"Id\"): #is element write_str(\"%s", "x in enumerate(l1): GCL[GCint] = \"[%i] \" % i if", "str_file = StringIO() size1 = [30, 23] #height, width def", "hasattr(obj1,\"__iter__\"): return obj1 else: return [obj1] def write_str(str1, GCL, str_file=str_file,", "i, x in enumerate(l1): GCL[GCint] = \"[%i] \" % i", "= RichTextBoxScrollBars.Vertical if width1 < 23 : width1 = 23", "< 120: self.Height = 120 self.box1.Width = self.Width - 17", "= self.Height - 80 self.button1.Location = Point(self.Width/2 - 103, self.Height", "= FormStartPosition.CenterScreen self.Resize += self.resize1 self.text1 = None self.button1 =", "GCL, str_file=str_file, size1=size1): ln1 = len(str1) if ln1 > size1[1]:", "120: self.Height = 120 self.box1.Width = self.Width - 17 self.box1.Height", "self.Width - 17 self.box1.Height = self.Height - 80 self.button1.Location =", "def save(self, sender, event): self.text1 = self.box1.Text self.Close() l1 =", "Point(self.Width/2 - 103, self.Height - 70) def resize1(self, sender, event):", "23 : width1 = 23 if width1 > 88: width1", "self.box1.BackColor = Color.FromArgb(53,53,53) self.box1.ForeColor = Color.FromArgb(234,234,234) self.box1.DetectUrls = True self.box1.Text", "<gh_stars>10-100 # Copyright(c) 2017, <NAME> # @5devene, <EMAIL> # www.badmonkeys.net", "not x: write_str(\"Empty List\", GCL) else: list2str(x, writeInd, GCL, GCint,", "IN[0] is None else tolist(IN[0]) list2str(l1, IN[1]) str_content = str_file.getvalue()", "RichTextBoxScrollBars.Vertical if width1 < 23 : width1 = 23 if", "= Point(5, 5) self.box1.Font = Font(\"Calibri\", 12) self.box1.BackColor = Color.FromArgb(53,53,53)", "@5devene, <EMAIL> # www.badmonkeys.net import clr clr.AddReference('System.Windows.Forms') clr.AddReference('System.Drawing') from System.Drawing", "230 if self.Height < 120: self.Height = 120 self.box1.Width =", "210: self.Width = 230 if self.Height < 120: self.Height =", "if width1 > 88: width1 = 88 self.Width = 10", "else: write_str(x.ToString(), GCL) size1[0] += 19 GCL.pop(GCint) GCint -= 1", "= None self.button1 = Button() self.button1.Text = 'Close' self.button1.Font =", "len(str1) if ln1 > size1[1]: size1[1] = ln1 str_file.write(\"%s%s\\n\" %", "sender, event): self.text1 = self.box1.Text self.Close() l1 = [] if", "elif hasattr(x, \"__iter__\"): if not x: write_str(\"Empty List\", GCL) else:", "+ (width1 + 2) * 9 #character width seems to", "self.resize1 self.text1 = None self.button1 = Button() self.button1.Text = 'Close'", "width seems to vary between PCs self.Height = height1 +", "Color, Font from System.Windows.Forms import * from cStringIO import StringIO", "self.Controls.Add(self.button1) self.box1 = RichTextBox() self.box1.Multiline = True self.box1.Location = Point(5,", "Color.FromArgb(40,40,40) self.ControlBox = False self.TopMost = True self.FormBorderStyle = FormBorderStyle.Sizable", "Font(\"Calibri\", 10) self.button1.AutoSize = True self.button1.Width = 200 self.button1.ForeColor =", "str_content = str_file.getvalue() str_file.close() width1 = 100 form = WatchBox(str_content)", "Button() self.button1.Text = 'Close' self.button1.Font = Font(\"Calibri\", 10) self.button1.AutoSize =", "if width1 < 23 : width1 = 23 if width1", "10) self.button1.AutoSize = True self.button1.Width = 200 self.button1.ForeColor = Color.FromArgb(234,234,234)", "[30, 23] #height, width def tolist(obj1): if hasattr(obj1,\"__iter__\"): return obj1", "\"[%i] \" % i if writeInd else \" \" if", "12) self.box1.BackColor = Color.FromArgb(53,53,53) self.box1.ForeColor = Color.FromArgb(234,234,234) self.box1.DetectUrls = True", "80 self.button1.Location = Point(self.Width/2 - 103, self.Height - 70) def", ") def list2str(l1, writeInd, GCL=None, GCint=-1, size1=size1): if GCL is", "size1 = [30, 23] #height, width def tolist(obj1): if hasattr(obj1,\"__iter__\"):", "= 23 if width1 > 88: width1 = 88 self.Width", "if ln1 > size1[1]: size1[1] = ln1 str_file.write(\"%s%s\\n\" % (\"\".join(GCL),", "= Point(self.Width/2 - 103, self.Height - 70) def save(self, sender,", "self.Resize += self.resize1 self.text1 = None self.button1 = Button() self.button1.Text", "if GCL is None: GCL = [] GCint += 1", "= 800 self.box1.ScrollBars = RichTextBoxScrollBars.Vertical if width1 < 23 :", "x.Id), GCL) elif hasattr(x, \"__iter__\"): if not x: write_str(\"Empty List\",", "if self.Height < 120: self.Height = 120 self.box1.Width = self.Width", "else: list2str(x, writeInd, GCL, GCint, size1) elif x is None:", "size1[0] += 19 GCL.pop(GCint) GCint -= 1 class WatchBox(Form): def", "# www.badmonkeys.net import clr clr.AddReference('System.Windows.Forms') clr.AddReference('System.Drawing') from System.Drawing import Point,", "= t1 self.Controls.Add(self.box1) def adjust_controls(self, height1, width1): if height1 >", "= True self.box1.Text = t1 self.Controls.Add(self.box1) def adjust_controls(self, height1, width1):", "+ 90 self.box1.Width = self.Width - 17 self.box1.Height = self.Height", "17 self.box1.Height = self.Height - 80 self.button1.Location = Point(self.Width/2 -", "True self.box1.Text = t1 self.Controls.Add(self.box1) def adjust_controls(self, height1, width1): if", "- 103, self.Height - 70) def resize1(self, sender, event): if", "88 self.Width = 10 + (width1 + 2) * 9", "self.button1.Location = Point(self.Width/2 - 103, self.Height - 70) def save(self,", "www.badmonkeys.net import clr clr.AddReference('System.Windows.Forms') clr.AddReference('System.Drawing') from System.Drawing import Point, Color,", "= Color.FromArgb(234,234,234) self.button1.Click += self.save self.Controls.Add(self.button1) self.box1 = RichTextBox() self.box1.Multiline", "Watch Window\" self.BackColor = Color.FromArgb(40,40,40) self.ControlBox = False self.TopMost =", "True self.FormBorderStyle = FormBorderStyle.Sizable self.StartPosition = FormStartPosition.CenterScreen self.Resize += self.resize1", "str_file.close() width1 = 100 form = WatchBox(str_content) form.adjust_controls(*size1) Application.Run(form) OUT", "import StringIO str_file = StringIO() size1 = [30, 23] #height,", "to vary between PCs self.Height = height1 + 90 self.box1.Width", "Point(self.Width/2 - 103, self.Height - 70) def save(self, sender, event):", "%i\" % (x.ToString(), x.Id), GCL) elif hasattr(x, \"__iter__\"): if not", "WatchBox(Form): def __init__(self, t1): self.Text = \"SpringNodes: Expandable Watch Window\"", "GCint += 1 GCL.append(None) for i, x in enumerate(l1): GCL[GCint]", "Font from System.Windows.Forms import * from cStringIO import StringIO str_file", "size1=size1): ln1 = len(str1) if ln1 > size1[1]: size1[1] =", "width1 = 88 self.Width = 10 + (width1 + 2)", "class WatchBox(Form): def __init__(self, t1): self.Text = \"SpringNodes: Expandable Watch", "ln1 = len(str1) if ln1 > size1[1]: size1[1] = ln1", "event): if self.Width < 210: self.Width = 230 if self.Height", "= 120 self.box1.Width = self.Width - 17 self.box1.Height = self.Height", "self.button1.Text = 'Close' self.button1.Font = Font(\"Calibri\", 10) self.button1.AutoSize = True", "write_str(\"null\", GCL) else: write_str(x.ToString(), GCL) size1[0] += 19 GCL.pop(GCint) GCint", "> 88: width1 = 88 self.Width = 10 + (width1", "cStringIO import StringIO str_file = StringIO() size1 = [30, 23]", "height1 + 90 self.box1.Width = self.Width - 17 self.box1.Height =", "if hasattr(x, \"Id\"): #is element write_str(\"%s %i\" % (x.ToString(), x.Id),", "if writeInd else \" \" if hasattr(x, \"Id\"): #is element", "self.button1.Click += self.save self.Controls.Add(self.button1) self.box1 = RichTextBox() self.box1.Multiline = True", "<EMAIL> # www.badmonkeys.net import clr clr.AddReference('System.Windows.Forms') clr.AddReference('System.Drawing') from System.Drawing import", "= str_file.getvalue() str_file.close() width1 = 100 form = WatchBox(str_content) form.adjust_controls(*size1)", "103, self.Height - 70) def resize1(self, sender, event): if self.Width", "if self.Width < 210: self.Width = 230 if self.Height <", "self.button1.Location = Point(self.Width/2 - 103, self.Height - 70) def resize1(self,", "= 200 self.button1.ForeColor = Color.FromArgb(234,234,234) self.button1.Click += self.save self.Controls.Add(self.button1) self.box1", "True self.box1.Location = Point(5, 5) self.box1.Font = Font(\"Calibri\", 12) self.box1.BackColor", "width1 < 23 : width1 = 23 if width1 >", "clr.AddReference('System.Drawing') from System.Drawing import Point, Color, Font from System.Windows.Forms import", "enumerate(l1): GCL[GCint] = \"[%i] \" % i if writeInd else", "- 17 self.box1.Height = self.Height - 80 self.button1.Location = Point(self.Width/2", "Color.FromArgb(234,234,234) self.box1.DetectUrls = True self.box1.Text = t1 self.Controls.Add(self.box1) def adjust_controls(self,", "self.Width = 10 + (width1 + 2) * 9 #character", "= StringIO() size1 = [30, 23] #height, width def tolist(obj1):", "2017, <NAME> # @5devene, <EMAIL> # www.badmonkeys.net import clr clr.AddReference('System.Windows.Forms')", "return [obj1] def write_str(str1, GCL, str_file=str_file, size1=size1): ln1 = len(str1)", "event): self.text1 = self.box1.Text self.Close() l1 = [] if IN[0]", "\"SpringNodes: Expandable Watch Window\" self.BackColor = Color.FromArgb(40,40,40) self.ControlBox = False", "self.Close() l1 = [] if IN[0] is None else tolist(IN[0])", "self.Height - 80 self.button1.Location = Point(self.Width/2 - 103, self.Height -", "GCL) elif hasattr(x, \"__iter__\"): if not x: write_str(\"Empty List\", GCL)", "10 + (width1 + 2) * 9 #character width seems", "between PCs self.Height = height1 + 90 self.box1.Width = self.Width", "def list2str(l1, writeInd, GCL=None, GCint=-1, size1=size1): if GCL is None:", "+= 1 GCL.append(None) for i, x in enumerate(l1): GCL[GCint] =", "< 210: self.Width = 230 if self.Height < 120: self.Height", "GCL.pop(GCint) GCint -= 1 class WatchBox(Form): def __init__(self, t1): self.Text", "<NAME> # @5devene, <EMAIL> # www.badmonkeys.net import clr clr.AddReference('System.Windows.Forms') clr.AddReference('System.Drawing')", "(\"\".join(GCL), str1) ) def list2str(l1, writeInd, GCL=None, GCint=-1, size1=size1): if", "+ 2) * 9 #character width seems to vary between", "StringIO() size1 = [30, 23] #height, width def tolist(obj1): if", "str_file.getvalue() str_file.close() width1 = 100 form = WatchBox(str_content) form.adjust_controls(*size1) Application.Run(form)", "= 'Close' self.button1.Font = Font(\"Calibri\", 10) self.button1.AutoSize = True self.button1.Width", "PCs self.Height = height1 + 90 self.box1.Width = self.Width -", "1 GCL.append(None) for i, x in enumerate(l1): GCL[GCint] = \"[%i]", "self.box1.Location = Point(5, 5) self.box1.Font = Font(\"Calibri\", 12) self.box1.BackColor =", "200 self.button1.ForeColor = Color.FromArgb(234,234,234) self.button1.Click += self.save self.Controls.Add(self.button1) self.box1 =", "self.box1.Height = self.Height - 80 self.button1.Location = Point(self.Width/2 - 103,", "\"Id\"): #is element write_str(\"%s %i\" % (x.ToString(), x.Id), GCL) elif", "adjust_controls(self, height1, width1): if height1 > 800: height1 = 800", "else \" \" if hasattr(x, \"Id\"): #is element write_str(\"%s %i\"", "self.button1 = Button() self.button1.Text = 'Close' self.button1.Font = Font(\"Calibri\", 10)", "StringIO str_file = StringIO() size1 = [30, 23] #height, width", "import clr clr.AddReference('System.Windows.Forms') clr.AddReference('System.Drawing') from System.Drawing import Point, Color, Font", "- 103, self.Height - 70) def save(self, sender, event): self.text1", "Color.FromArgb(53,53,53) self.box1.ForeColor = Color.FromArgb(234,234,234) self.box1.DetectUrls = True self.box1.Text = t1", "height1, width1): if height1 > 800: height1 = 800 self.box1.ScrollBars", "GCL.append(None) for i, x in enumerate(l1): GCL[GCint] = \"[%i] \"", "RichTextBox() self.box1.Multiline = True self.box1.Location = Point(5, 5) self.box1.Font =", "#character width seems to vary between PCs self.Height = height1", "if not x: write_str(\"Empty List\", GCL) else: list2str(x, writeInd, GCL,", "self.box1.ForeColor = Color.FromArgb(234,234,234) self.box1.DetectUrls = True self.box1.Text = t1 self.Controls.Add(self.box1)", "self.box1.Font = Font(\"Calibri\", 12) self.box1.BackColor = Color.FromArgb(53,53,53) self.box1.ForeColor = Color.FromArgb(234,234,234)", "Window\" self.BackColor = Color.FromArgb(40,40,40) self.ControlBox = False self.TopMost = True", "GCL) else: list2str(x, writeInd, GCL, GCint, size1) elif x is", "GCL) size1[0] += 19 GCL.pop(GCint) GCint -= 1 class WatchBox(Form):", "x is None: write_str(\"null\", GCL) else: write_str(x.ToString(), GCL) size1[0] +=", "self.Height - 70) def resize1(self, sender, event): if self.Width <", "hasattr(x, \"Id\"): #is element write_str(\"%s %i\" % (x.ToString(), x.Id), GCL)", "Font(\"Calibri\", 12) self.box1.BackColor = Color.FromArgb(53,53,53) self.box1.ForeColor = Color.FromArgb(234,234,234) self.box1.DetectUrls =", "23 if width1 > 88: width1 = 88 self.Width =", "False self.TopMost = True self.FormBorderStyle = FormBorderStyle.Sizable self.StartPosition = FormStartPosition.CenterScreen", "def resize1(self, sender, event): if self.Width < 210: self.Width =", "#is element write_str(\"%s %i\" % (x.ToString(), x.Id), GCL) elif hasattr(x,", "from System.Windows.Forms import * from cStringIO import StringIO str_file =", "self.button1.ForeColor = Color.FromArgb(234,234,234) self.button1.Click += self.save self.Controls.Add(self.button1) self.box1 = RichTextBox()", "Expandable Watch Window\" self.BackColor = Color.FromArgb(40,40,40) self.ControlBox = False self.TopMost", "self.Text = \"SpringNodes: Expandable Watch Window\" self.BackColor = Color.FromArgb(40,40,40) self.ControlBox", "= Point(self.Width/2 - 103, self.Height - 70) def resize1(self, sender,", "self.Height = height1 + 90 self.box1.Width = self.Width - 17", "width def tolist(obj1): if hasattr(obj1,\"__iter__\"): return obj1 else: return [obj1]", "GCint -= 1 class WatchBox(Form): def __init__(self, t1): self.Text =", "self.StartPosition = FormStartPosition.CenterScreen self.Resize += self.resize1 self.text1 = None self.button1", "\"__iter__\"): if not x: write_str(\"Empty List\", GCL) else: list2str(x, writeInd,", "self.box1.DetectUrls = True self.box1.Text = t1 self.Controls.Add(self.box1) def adjust_controls(self, height1,", "[] if IN[0] is None else tolist(IN[0]) list2str(l1, IN[1]) str_content", "obj1 else: return [obj1] def write_str(str1, GCL, str_file=str_file, size1=size1): ln1", "resize1(self, sender, event): if self.Width < 210: self.Width = 230", "self.ControlBox = False self.TopMost = True self.FormBorderStyle = FormBorderStyle.Sizable self.StartPosition", "t1 self.Controls.Add(self.box1) def adjust_controls(self, height1, width1): if height1 > 800:", "+= 19 GCL.pop(GCint) GCint -= 1 class WatchBox(Form): def __init__(self,", "width1): if height1 > 800: height1 = 800 self.box1.ScrollBars =", "vary between PCs self.Height = height1 + 90 self.box1.Width =", "Point, Color, Font from System.Windows.Forms import * from cStringIO import", "is None: GCL = [] GCint += 1 GCL.append(None) for", "clr clr.AddReference('System.Windows.Forms') clr.AddReference('System.Drawing') from System.Drawing import Point, Color, Font from", "from cStringIO import StringIO str_file = StringIO() size1 = [30,", "self.box1.Text = t1 self.Controls.Add(self.box1) def adjust_controls(self, height1, width1): if height1", "800 self.box1.ScrollBars = RichTextBoxScrollBars.Vertical if width1 < 23 : width1", "9 #character width seems to vary between PCs self.Height =", "- 80 self.button1.Location = Point(self.Width/2 - 103, self.Height - 70)", "str_file.write(\"%s%s\\n\" % (\"\".join(GCL), str1) ) def list2str(l1, writeInd, GCL=None, GCint=-1,", "% (\"\".join(GCL), str1) ) def list2str(l1, writeInd, GCL=None, GCint=-1, size1=size1):", "write_str(x.ToString(), GCL) size1[0] += 19 GCL.pop(GCint) GCint -= 1 class", "ln1 > size1[1]: size1[1] = ln1 str_file.write(\"%s%s\\n\" % (\"\".join(GCL), str1)", "% i if writeInd else \" \" if hasattr(x, \"Id\"):", "-= 1 class WatchBox(Form): def __init__(self, t1): self.Text = \"SpringNodes:", "hasattr(x, \"__iter__\"): if not x: write_str(\"Empty List\", GCL) else: list2str(x,", "= True self.FormBorderStyle = FormBorderStyle.Sizable self.StartPosition = FormStartPosition.CenterScreen self.Resize +=", "% (x.ToString(), x.Id), GCL) elif hasattr(x, \"__iter__\"): if not x:", "tolist(obj1): if hasattr(obj1,\"__iter__\"): return obj1 else: return [obj1] def write_str(str1,", "clr.AddReference('System.Windows.Forms') clr.AddReference('System.Drawing') from System.Drawing import Point, Color, Font from System.Windows.Forms", "is None else tolist(IN[0]) list2str(l1, IN[1]) str_content = str_file.getvalue() str_file.close()", "def __init__(self, t1): self.Text = \"SpringNodes: Expandable Watch Window\" self.BackColor", "90 self.box1.Width = self.Width - 17 self.box1.Height = self.Height -", "[obj1] def write_str(str1, GCL, str_file=str_file, size1=size1): ln1 = len(str1) if", "None self.button1 = Button() self.button1.Text = 'Close' self.button1.Font = Font(\"Calibri\",", "l1 = [] if IN[0] is None else tolist(IN[0]) list2str(l1,", "GCL = [] GCint += 1 GCL.append(None) for i, x", "GCint, size1) elif x is None: write_str(\"null\", GCL) else: write_str(x.ToString(),", "Point(5, 5) self.box1.Font = Font(\"Calibri\", 12) self.box1.BackColor = Color.FromArgb(53,53,53) self.box1.ForeColor", "self.box1.ScrollBars = RichTextBoxScrollBars.Vertical if width1 < 23 : width1 =", "height1 = 800 self.box1.ScrollBars = RichTextBoxScrollBars.Vertical if width1 < 23", "= True self.button1.Width = 200 self.button1.ForeColor = Color.FromArgb(234,234,234) self.button1.Click +=", "size1=size1): if GCL is None: GCL = [] GCint +=", "= 100 form = WatchBox(str_content) form.adjust_controls(*size1) Application.Run(form) OUT = form.text1", "def tolist(obj1): if hasattr(obj1,\"__iter__\"): return obj1 else: return [obj1] def", "def write_str(str1, GCL, str_file=str_file, size1=size1): ln1 = len(str1) if ln1", "write_str(\"%s %i\" % (x.ToString(), x.Id), GCL) elif hasattr(x, \"__iter__\"): if", "t1): self.Text = \"SpringNodes: Expandable Watch Window\" self.BackColor = Color.FromArgb(40,40,40)", "element write_str(\"%s %i\" % (x.ToString(), x.Id), GCL) elif hasattr(x, \"__iter__\"):", "Copyright(c) 2017, <NAME> # @5devene, <EMAIL> # www.badmonkeys.net import clr", "+= self.save self.Controls.Add(self.button1) self.box1 = RichTextBox() self.box1.Multiline = True self.box1.Location", "tolist(IN[0]) list2str(l1, IN[1]) str_content = str_file.getvalue() str_file.close() width1 = 100", "= Color.FromArgb(53,53,53) self.box1.ForeColor = Color.FromArgb(234,234,234) self.box1.DetectUrls = True self.box1.Text =", "FormBorderStyle.Sizable self.StartPosition = FormStartPosition.CenterScreen self.Resize += self.resize1 self.text1 = None", "100 form = WatchBox(str_content) form.adjust_controls(*size1) Application.Run(form) OUT = form.text1 Application.Exit()", "= 230 if self.Height < 120: self.Height = 120 self.box1.Width", "* from cStringIO import StringIO str_file = StringIO() size1 =", "self.Height < 120: self.Height = 120 self.box1.Width = self.Width -", "self.box1 = RichTextBox() self.box1.Multiline = True self.box1.Location = Point(5, 5)", "GCL, GCint, size1) elif x is None: write_str(\"null\", GCL) else:", "120 self.box1.Width = self.Width - 17 self.box1.Height = self.Height -", "< 23 : width1 = 23 if width1 > 88:", "self.box1.Multiline = True self.box1.Location = Point(5, 5) self.box1.Font = Font(\"Calibri\",", "self.Width < 210: self.Width = 230 if self.Height < 120:", "System.Windows.Forms import * from cStringIO import StringIO str_file = StringIO()", "self.text1 = self.box1.Text self.Close() l1 = [] if IN[0] is", "GCL is None: GCL = [] GCint += 1 GCL.append(None)", "= FormBorderStyle.Sizable self.StartPosition = FormStartPosition.CenterScreen self.Resize += self.resize1 self.text1 =", "size1[1] = ln1 str_file.write(\"%s%s\\n\" % (\"\".join(GCL), str1) ) def list2str(l1,", "\" \" if hasattr(x, \"Id\"): #is element write_str(\"%s %i\" %", "GCL[GCint] = \"[%i] \" % i if writeInd else \"", "= True self.box1.Location = Point(5, 5) self.box1.Font = Font(\"Calibri\", 12)", "str1) ) def list2str(l1, writeInd, GCL=None, GCint=-1, size1=size1): if GCL", "'Close' self.button1.Font = Font(\"Calibri\", 10) self.button1.AutoSize = True self.button1.Width =", "= Color.FromArgb(40,40,40) self.ControlBox = False self.TopMost = True self.FormBorderStyle =", "ln1 str_file.write(\"%s%s\\n\" % (\"\".join(GCL), str1) ) def list2str(l1, writeInd, GCL=None,", "IN[1]) str_content = str_file.getvalue() str_file.close() width1 = 100 form =", "70) def resize1(self, sender, event): if self.Width < 210: self.Width", "800: height1 = 800 self.box1.ScrollBars = RichTextBoxScrollBars.Vertical if width1 <", "List\", GCL) else: list2str(x, writeInd, GCL, GCint, size1) elif x", "= \"SpringNodes: Expandable Watch Window\" self.BackColor = Color.FromArgb(40,40,40) self.ControlBox =", ": width1 = 23 if width1 > 88: width1 =", "elif x is None: write_str(\"null\", GCL) else: write_str(x.ToString(), GCL) size1[0]", "#height, width def tolist(obj1): if hasattr(obj1,\"__iter__\"): return obj1 else: return", "= [] GCint += 1 GCL.append(None) for i, x in", "self.Height = 120 self.box1.Width = self.Width - 17 self.box1.Height =", "self.button1.Width = 200 self.button1.ForeColor = Color.FromArgb(234,234,234) self.button1.Click += self.save self.Controls.Add(self.button1)", "= Button() self.button1.Text = 'Close' self.button1.Font = Font(\"Calibri\", 10) self.button1.AutoSize", "if IN[0] is None else tolist(IN[0]) list2str(l1, IN[1]) str_content =", "None: write_str(\"null\", GCL) else: write_str(x.ToString(), GCL) size1[0] += 19 GCL.pop(GCint)", "if height1 > 800: height1 = 800 self.box1.ScrollBars = RichTextBoxScrollBars.Vertical", "None: GCL = [] GCint += 1 GCL.append(None) for i,", "self.BackColor = Color.FromArgb(40,40,40) self.ControlBox = False self.TopMost = True self.FormBorderStyle", "sender, event): if self.Width < 210: self.Width = 230 if", "if hasattr(obj1,\"__iter__\"): return obj1 else: return [obj1] def write_str(str1, GCL,", "list2str(l1, IN[1]) str_content = str_file.getvalue() str_file.close() width1 = 100 form", "= Color.FromArgb(234,234,234) self.box1.DetectUrls = True self.box1.Text = t1 self.Controls.Add(self.box1) def", "str_file=str_file, size1=size1): ln1 = len(str1) if ln1 > size1[1]: size1[1]", "- 70) def resize1(self, sender, event): if self.Width < 210:", "list2str(l1, writeInd, GCL=None, GCint=-1, size1=size1): if GCL is None: GCL", "= 88 self.Width = 10 + (width1 + 2) *", "else tolist(IN[0]) list2str(l1, IN[1]) str_content = str_file.getvalue() str_file.close() width1 =", "in enumerate(l1): GCL[GCint] = \"[%i] \" % i if writeInd" ]
[ "remove, the last stone will always be removed by your", "are playing the following Nim Game with your friend: There", "a function to determine whether you can win the game", "matter 1, 2, or 3 stones you remove, the last", "be removed by your friend. Hint: If there are 5", "out a way to remove the stones such that you", "way to remove the stones such that you will always", "in the heap. For example, if there are 4 stones", "return True assert Solution().canWinNim(0) is True assert Solution().canWinNim(1) is True", "Both of you are very clever and have optimal strategies", "be the winner? Credits: Special thanks to @jianchao.li.fighter for adding", "assert Solution().canWinNim(5) is True assert Solution().canWinNim(6) is True assert Solution().canWinNim(7)", "in the heap, then you will never win the game:", "all test cases. Performance: 1. Total Accepted: 31755 Total Submissions:", "submissions. \"\"\" class Solution(object): def canWinNim(self, n): \"\"\" :type n:", "is True assert Solution().canWinNim(3) is True assert Solution().canWinNim(4) is False", "function to determine whether you can win the game given", "time one of you take turns to remove 1 to", "Hint: If there are 5 stones in the heap, could", "to determine whether you can win the game given the", "could you figure out a way to remove the stones", "to @jianchao.li.fighter for adding this problem and creating all test", "3 stones. The one who removes the last stone will", "playing the following Nim Game with your friend: There is", "each time one of you take turns to remove 1", "is True assert Solution().canWinNim(1) is True assert Solution().canWinNim(2) is True", "for the game. Write a function to determine whether you", "Solution().canWinNim(5) is True assert Solution().canWinNim(6) is True assert Solution().canWinNim(7) is", "Solution().canWinNim(4) is False assert Solution().canWinNim(5) is True assert Solution().canWinNim(6) is", "return True if n % 4 == 0: return False", "Write a function to determine whether you can win the", "to remove the stones such that you will always be", "or 3 stones you remove, the last stone will always", "the heap, then you will never win the game: no", "assert Solution().canWinNim(3) is True assert Solution().canWinNim(4) is False assert Solution().canWinNim(5)", "True assert Solution().canWinNim(6) is True assert Solution().canWinNim(7) is True assert", "def canWinNim(self, n): \"\"\" :type n: int :rtype: bool \"\"\"", ":type n: int :rtype: bool \"\"\" if n <= 3:", "stones on the table, each time one of you take", "1 to 3 stones. The one who removes the last", "the stones. Both of you are very clever and have", "You will take the first turn to remove the stones.", "n % 4 == 0: return False else: return True", "determine whether you can win the game given the number", "bool \"\"\" if n <= 3: return True if n", "no matter 1, 2, or 3 stones you remove, the", "last stone will always be removed by your friend. Hint:", "a way to remove the stones such that you will", "adding this problem and creating all test cases. Performance: 1.", "assert Solution().canWinNim(4) is False assert Solution().canWinNim(5) is True assert Solution().canWinNim(6)", "Solution().canWinNim(2) is True assert Solution().canWinNim(3) is True assert Solution().canWinNim(4) is", "that you will always be the winner? Credits: Special thanks", "Performance: 1. Total Accepted: 31755 Total Submissions: 63076 Difficulty: Easy", "table, each time one of you take turns to remove", "one who removes the last stone will be the winner.", "the last stone will be the winner. You will take", "heap of stones on the table, each time one of", "friend: There is a heap of stones on the table,", "strategies for the game. Write a function to determine whether", "n): \"\"\" :type n: int :rtype: bool \"\"\" if n", "whether you can win the game given the number of", "last stone will be the winner. You will take the", "\"\"\" :type n: int :rtype: bool \"\"\" if n <=", "True assert Solution().canWinNim(1) is True assert Solution().canWinNim(2) is True assert", "thanks to @jianchao.li.fighter for adding this problem and creating all", "the heap, could you figure out a way to remove", "Special thanks to @jianchao.li.fighter for adding this problem and creating", "stones in the heap. For example, if there are 4", "stone will be the winner. You will take the first", "your friend: There is a heap of stones on the", "clever and have optimal strategies for the game. Write a", "will be the winner. You will take the first turn", "by your friend. Hint: If there are 5 stones in", "there are 5 stones in the heap, could you figure", "0: return False else: return True assert Solution().canWinNim(0) is True", "== 0: return False else: return True assert Solution().canWinNim(0) is", "the game given the number of stones in the heap.", "win the game: no matter 1, 2, or 3 stones", "the table, each time one of you take turns to", "stones in the heap, then you will never win the", "always be removed by your friend. Hint: If there are", "if n % 4 == 0: return False else: return", "game. Write a function to determine whether you can win", "The one who removes the last stone will be the", "Total Accepted: 31755 Total Submissions: 63076 Difficulty: Easy 2. Your", "63076 Difficulty: Easy 2. Your runtime beats 43.52% of python", "assert Solution().canWinNim(6) is True assert Solution().canWinNim(7) is True assert Solution().canWinNim(8)", "Accepted: 31755 Total Submissions: 63076 Difficulty: Easy 2. Your runtime", "For example, if there are 4 stones in the heap,", "you will always be the winner? Credits: Special thanks to", "will never win the game: no matter 1, 2, or", "you remove, the last stone will always be removed by", "winner? Credits: Special thanks to @jianchao.li.fighter for adding this problem", "Easy 2. Your runtime beats 43.52% of python submissions. \"\"\"", "of stones in the heap. For example, if there are", "in the heap, could you figure out a way to", "assert Solution().canWinNim(0) is True assert Solution().canWinNim(1) is True assert Solution().canWinNim(2)", "you will never win the game: no matter 1, 2,", "with your friend: There is a heap of stones on", "of you take turns to remove 1 to 3 stones.", "is True assert Solution().canWinNim(6) is True assert Solution().canWinNim(7) is True", "of python submissions. \"\"\" class Solution(object): def canWinNim(self, n): \"\"\"", "Question: Nim Game My Submissions Question You are playing the", "the number of stones in the heap. For example, if", "stones you remove, the last stone will always be removed", "the winner. You will take the first turn to remove", "are 5 stones in the heap, could you figure out", "2, or 3 stones you remove, the last stone will", "of stones on the table, each time one of you", "will always be the winner? Credits: Special thanks to @jianchao.li.fighter", "your friend. Hint: If there are 5 stones in the", "You are playing the following Nim Game with your friend:", "win the game given the number of stones in the", "number of stones in the heap. For example, if there", "runtime beats 43.52% of python submissions. \"\"\" class Solution(object): def", "if n <= 3: return True if n % 4", "Total Submissions: 63076 Difficulty: Easy 2. Your runtime beats 43.52%", "never win the game: no matter 1, 2, or 3", "the game. Write a function to determine whether you can", "remove the stones such that you will always be the", "int :rtype: bool \"\"\" if n <= 3: return True", "is True assert Solution().canWinNim(7) is True assert Solution().canWinNim(8) is False", "friend. Hint: If there are 5 stones in the heap,", "are very clever and have optimal strategies for the game.", "True assert Solution().canWinNim(4) is False assert Solution().canWinNim(5) is True assert", "the winner? Credits: Special thanks to @jianchao.li.fighter for adding this", "stones. The one who removes the last stone will be", "take the first turn to remove the stones. Both of", "False assert Solution().canWinNim(5) is True assert Solution().canWinNim(6) is True assert", "one of you take turns to remove 1 to 3", "you take turns to remove 1 to 3 stones. The", "remove the stones. Both of you are very clever and", "this problem and creating all test cases. Performance: 1. Total", "problem and creating all test cases. Performance: 1. Total Accepted:", "python submissions. \"\"\" class Solution(object): def canWinNim(self, n): \"\"\" :type", "you can win the game given the number of stones", "the heap. For example, if there are 4 stones in", "beats 43.52% of python submissions. \"\"\" class Solution(object): def canWinNim(self,", "remove 1 to 3 stones. The one who removes the", "always be the winner? Credits: Special thanks to @jianchao.li.fighter for", "Nim Game My Submissions Question You are playing the following", "False else: return True assert Solution().canWinNim(0) is True assert Solution().canWinNim(1)", "are 4 stones in the heap, then you will never", "the last stone will always be removed by your friend.", "n <= 3: return True if n % 4 ==", "4 == 0: return False else: return True assert Solution().canWinNim(0)", "can win the game given the number of stones in", "4 stones in the heap, then you will never win", "1. Total Accepted: 31755 Total Submissions: 63076 Difficulty: Easy 2.", "assert Solution().canWinNim(1) is True assert Solution().canWinNim(2) is True assert Solution().canWinNim(3)", "heap, then you will never win the game: no matter", "to 3 stones. The one who removes the last stone", "take turns to remove 1 to 3 stones. The one", "for adding this problem and creating all test cases. Performance:", "Game with your friend: There is a heap of stones", "and have optimal strategies for the game. Write a function", "creating all test cases. Performance: 1. Total Accepted: 31755 Total", "Submissions: 63076 Difficulty: Easy 2. Your runtime beats 43.52% of", "\"\"\" class Solution(object): def canWinNim(self, n): \"\"\" :type n: int", "assert Solution().canWinNim(2) is True assert Solution().canWinNim(3) is True assert Solution().canWinNim(4)", "2. Your runtime beats 43.52% of python submissions. \"\"\" class", "first turn to remove the stones. Both of you are", "Nim Game with your friend: There is a heap of", "such that you will always be the winner? Credits: Special", "Game My Submissions Question You are playing the following Nim", "is True assert Solution().canWinNim(4) is False assert Solution().canWinNim(5) is True", "3 stones you remove, the last stone will always be", "game: no matter 1, 2, or 3 stones you remove,", "Credits: Special thanks to @jianchao.li.fighter for adding this problem and", "stones. Both of you are very clever and have optimal", "test cases. Performance: 1. Total Accepted: 31755 Total Submissions: 63076", "else: return True assert Solution().canWinNim(0) is True assert Solution().canWinNim(1) is", "is a heap of stones on the table, each time", "very clever and have optimal strategies for the game. Write", "to remove the stones. Both of you are very clever", "winner. You will take the first turn to remove the", "Solution().canWinNim(6) is True assert Solution().canWinNim(7) is True assert Solution().canWinNim(8) is", "be the winner. You will take the first turn to", "heap, could you figure out a way to remove the", "example, if there are 4 stones in the heap, then", "\"\"\" Question: Nim Game My Submissions Question You are playing", "1, 2, or 3 stones you remove, the last stone", "31755 Total Submissions: 63076 Difficulty: Easy 2. Your runtime beats", "My Submissions Question You are playing the following Nim Game", "the following Nim Game with your friend: There is a", "turns to remove 1 to 3 stones. The one who", "there are 4 stones in the heap, then you will", "3: return True if n % 4 == 0: return", "Submissions Question You are playing the following Nim Game with", "the first turn to remove the stones. Both of you", "a heap of stones on the table, each time one", "43.52% of python submissions. \"\"\" class Solution(object): def canWinNim(self, n):", "True assert Solution().canWinNim(0) is True assert Solution().canWinNim(1) is True assert", "given the number of stones in the heap. For example,", "True assert Solution().canWinNim(3) is True assert Solution().canWinNim(4) is False assert", "class Solution(object): def canWinNim(self, n): \"\"\" :type n: int :rtype:", "Solution(object): def canWinNim(self, n): \"\"\" :type n: int :rtype: bool", "5 stones in the heap, could you figure out a", "removed by your friend. Hint: If there are 5 stones", "of you are very clever and have optimal strategies for", "and creating all test cases. Performance: 1. Total Accepted: 31755", "Solution().canWinNim(1) is True assert Solution().canWinNim(2) is True assert Solution().canWinNim(3) is", "on the table, each time one of you take turns", "if there are 4 stones in the heap, then you", "Question You are playing the following Nim Game with your", "cases. Performance: 1. Total Accepted: 31755 Total Submissions: 63076 Difficulty:", "stones such that you will always be the winner? Credits:", "to remove 1 to 3 stones. The one who removes", "return False else: return True assert Solution().canWinNim(0) is True assert", "Solution().canWinNim(0) is True assert Solution().canWinNim(1) is True assert Solution().canWinNim(2) is", "True assert Solution().canWinNim(2) is True assert Solution().canWinNim(3) is True assert", "game given the number of stones in the heap. For", "optimal strategies for the game. Write a function to determine", "True if n % 4 == 0: return False else:", "is True assert Solution().canWinNim(2) is True assert Solution().canWinNim(3) is True", "who removes the last stone will be the winner. You", "Solution().canWinNim(3) is True assert Solution().canWinNim(4) is False assert Solution().canWinNim(5) is", "is False assert Solution().canWinNim(5) is True assert Solution().canWinNim(6) is True", "\"\"\" if n <= 3: return True if n %", "have optimal strategies for the game. Write a function to", "There is a heap of stones on the table, each", "figure out a way to remove the stones such that", "<= 3: return True if n % 4 == 0:", "the stones such that you will always be the winner?", "following Nim Game with your friend: There is a heap", "@jianchao.li.fighter for adding this problem and creating all test cases.", "heap. For example, if there are 4 stones in the", "the game: no matter 1, 2, or 3 stones you", "If there are 5 stones in the heap, could you", ":rtype: bool \"\"\" if n <= 3: return True if", "Difficulty: Easy 2. Your runtime beats 43.52% of python submissions.", "will take the first turn to remove the stones. Both", "canWinNim(self, n): \"\"\" :type n: int :rtype: bool \"\"\" if", "Your runtime beats 43.52% of python submissions. \"\"\" class Solution(object):", "stones in the heap, could you figure out a way", "you figure out a way to remove the stones such", "turn to remove the stones. Both of you are very", "n: int :rtype: bool \"\"\" if n <= 3: return", "then you will never win the game: no matter 1,", "you are very clever and have optimal strategies for the", "stone will always be removed by your friend. Hint: If", "% 4 == 0: return False else: return True assert", "will always be removed by your friend. Hint: If there", "removes the last stone will be the winner. You will" ]
[ "base class Test(base.BaseScriptTest, unittest.TestCase): command_line = \"./scripts/maf_extract_ranges_indexed.py ./test_data/maf_tests/mm8_chr7_tiny.maf -c -m", "= \"./scripts/maf_extract_ranges_indexed.py ./test_data/maf_tests/mm8_chr7_tiny.maf -c -m 5 -p mm8.\" input_stdin =", "class Test(base.BaseScriptTest, unittest.TestCase): command_line = \"./scripts/maf_extract_ranges_indexed.py ./test_data/maf_tests/mm8_chr7_tiny.maf -c -m 5", "./test_data/maf_tests/mm8_chr7_tiny.maf -c -m 5 -p mm8.\" input_stdin = base.TestFile(filename=\"./test_data/maf_tests/dcking_ghp074.bed\") output_stdout", "\"./scripts/maf_extract_ranges_indexed.py ./test_data/maf_tests/mm8_chr7_tiny.maf -c -m 5 -p mm8.\" input_stdin = base.TestFile(filename=\"./test_data/maf_tests/dcking_ghp074.bed\")", "unittest.TestCase): command_line = \"./scripts/maf_extract_ranges_indexed.py ./test_data/maf_tests/mm8_chr7_tiny.maf -c -m 5 -p mm8.\"", "-m 5 -p mm8.\" input_stdin = base.TestFile(filename=\"./test_data/maf_tests/dcking_ghp074.bed\") output_stdout = base.TestFile(filename=\"./test_data/maf_tests/dcking_ghp074.maf\")", "command_line = \"./scripts/maf_extract_ranges_indexed.py ./test_data/maf_tests/mm8_chr7_tiny.maf -c -m 5 -p mm8.\" input_stdin", "Test(base.BaseScriptTest, unittest.TestCase): command_line = \"./scripts/maf_extract_ranges_indexed.py ./test_data/maf_tests/mm8_chr7_tiny.maf -c -m 5 -p", "-c -m 5 -p mm8.\" input_stdin = base.TestFile(filename=\"./test_data/maf_tests/dcking_ghp074.bed\") output_stdout =", "import base class Test(base.BaseScriptTest, unittest.TestCase): command_line = \"./scripts/maf_extract_ranges_indexed.py ./test_data/maf_tests/mm8_chr7_tiny.maf -c", "unittest import base class Test(base.BaseScriptTest, unittest.TestCase): command_line = \"./scripts/maf_extract_ranges_indexed.py ./test_data/maf_tests/mm8_chr7_tiny.maf", "import unittest import base class Test(base.BaseScriptTest, unittest.TestCase): command_line = \"./scripts/maf_extract_ranges_indexed.py" ]
[ "import gca import itertools import string import numpy as np", "lCount + 1 return float(lCount) / lLen ''' Test function", "naDist2[:,0], naDist2[:,1], color='r' ) plt.ylabel( 'Feature 2' ) plt.xlabel( 'Feature", "ranking features quickly. It uses the knn implementation. @status: oneKNN", "+ 1 dResult = float(lCount) / naResults.size return dResult '''", "1 ] sPerm = perm[0] ''' stack other distributions on", "knn implementation. @status: oneKNN functions correctly, optimized to use n^2/2", "'1-KNN Value' ) #plt.xlabel( 'Distribution Merge' ) plt.title( '1-KNN Performance'", "dDistance = 0.0; ''' Loop through finding closest neighbors '''", "1, len(perm) ): sPerm = sPerm + str(perm[j]) naTest =", "function for 1KNN, return value is a double between 0", "), lfOneKnn ) plt.ylabel( '1-KNN Value' ) #plt.xlabel( 'Distribution Merge'", ", 0 ), arrowprops=dict(facecolor='black', shrink=0.05) ) plt.title( 'Data Distribution' )", "np.random.permutation(naTest) ) ) lsNames.append( sPerm ) ''' Plot results '''", "''' naResults = knn.query( naTest[:,:-1], 5, 'mode') ''' Count returns", "_plotDist( naDist1, naDist2, i ): plt.clf() plt.scatter( naDist1[:,0], naDist1[:,1] )", "np.random.normal( loc=[0,0],scale=.25,size=[500,2] ) naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) )", "cOneRuntime + (clock() - t) t = clock() lfKnnResults.append( _knnResult(", "clock() cOneRuntime = t-t; cKnnRuntime = t-t; lfResults = []", "(-1,1) ) gca().xaxis.set_ticks( np.arange(len(lf1Vals)) + .2 ) gca().xaxis.set_ticklabels( lsNames )", "i in range( 15 ): #_plotDist( naTest1, naBoth[100:,:], i )", "time import clock ''' @summary: Query function for 1KNN, return", "): plt.clf() plt.scatter( naDist1[:,0], naDist1[:,1] ) plt.scatter( naDist2[:,0], naDist2[:,1], color='r'", "A 2D numpy array. Each row is a data point", "features used except for distribution 4 ''' distY = np.sin(", ") ''' Two distances to check, for i's best, and", "(naTest1, naTest2) ) ''' Keep track of runtimes ''' t", "distY[i] = 0 for i in range( 1, 6 ):", "dist5 ] ''' All features used except for distribution 4", "naTest ) ) lfVals.append( _knnResult( np.random.permutation(naTest) ) ) lsNames.append( sPerm", ").reshape( -1, 1 ) dist2 = np.random.uniform( -1, 1, 1000", "1.5 , 0 ), arrowprops=dict(facecolor='black', shrink=0.05) ) plt.title( 'Data Distribution'", "should have two dimensions\" ) lLen = naData.shape[0] ''' #", "released under the New BSD license. Please see http://wiki.quantsoftware.org/index.php?title=QSTK_License for", "lLen dDistance = 0.0; ''' Loop through finding closest neighbors", "''' for i in range(3): ''' Select one of three", "sPerm = sPerm + str(perm[j]) naTest = np.hstack( (naTest, lDists[", "dDistance = 0.0 for k in range( 0, lDim ):", "lsNames = [] lf1Vals = [] lfVals = [] for", "1KNN, return value is a double between 0 and 1.", "distY.reshape( -1, 1 ) for i, fVal in enumerate( distY", "of matching pairs ''' for i in range( lLen ):", "= [] for i in range( 15 ): #_plotDist( naTest1,", "between 0 and 1. @param naData: A 2D numpy array.", "of Technology @contact: <EMAIL> @summary: This is an implementation of", "naDist1[:,1] ) plt.scatter( naDist2[:,0], naDist2[:,1], color='r' ) plt.ylabel( 'Feature 2'", "== 1: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] ) naTest1 = np.hstack(", "performance of 1-KNN ''' def _test1(): ''' Generate three random", "loc=[0,0],scale=.25,size=[500,2] ) naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) ) naTest2", "else: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] ) naTest1 = np.hstack( (naTest1,", "''' # of dimensions, subtract one for classification ''' lDim", "lCount + 1 dResult = float(lCount) / naResults.size return dResult", "+ .2 ) gca().xaxis.set_ticklabels( lsNames ) plt.show() if __name__ ==", "in range( 0, lDim ): dDistance += (naData[i][k] - naData[j][k])**2", "''' Test function to plot results ''' def _plotResults( naDist1,", "plt2[0]), ('1-KNN', 'KNN, K=5') ) plt.ylabel('1-KNN Value/KNN Classification') plt.xlabel('Feature Set')", "Performance' ) plt.subplot(313) plt.plot( range( len(lf5Knn) ), lf5Knn ) plt.ylabel(", "-1, 1 ) dist3 = np.random.uniform( -1, 1, 1000 ).reshape(", "subtract one for classification ''' lDim = naData.shape[1] - 1", "Test function to plot results ''' def _plotResults( naDist1, naDist2,", ") plt.ylabel( '% Correct Classification' ) #plt.xlabel( 'Distribution Merge' )", "+ str(i) ) plt.show() ''' Function to test KNN performance", "naTest = naData[lSplit:, :] knn.addEvidence( naTrain.astype(float), 1 ); ''' Query", "15 ): #_plotDist( naTest1, naBoth[100:,:], i ) t = clock()", "cOneRuntime = cOneRuntime + (clock() - t) t = clock()", "= np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 ) dist4", "6 ): lsNames = [] lf1Vals = [] lfVals =", "test distribution to first element ''' naTest = lDists[ int(perm[0])", "if naData.ndim != 2: raise Exception( \"Data should have two", "distribution to first element ''' naTest = lDists[ int(perm[0]) -", "of 1-KNN compared to 5KNN learner performance ''' np.random.seed( 12345", "len(lf5Knn) ), lf5Knn ) plt.ylabel( '% Correct Classification' ) #plt.xlabel(", ") plt.ylabel( 'Feature 2' ) plt.xlabel( 'Feature 1' ) plt.title(", "5 attributes ''' dist1 = np.random.uniform( -1, 1, 1000 ).reshape(", ") ''' Tests performance of 1-KNN ''' def _test2(): '''", "best, and j's best ''' if dDistance < ldDistances[i]: ldDistances[i]", "np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 ) dist3 =", "< 2: plt.xlim( (-1,1) ) gca().xaxis.set_ticks( np.arange(len(lf1Vals)) + .2 )", "@organization: Georgia Institute of Technology @contact: <EMAIL> @summary: This is", "cOneRuntime print 'Runtime 5-KNN:', cKnnRuntime _plotResults( naTest1, naTest2, lfResults, lfKnnResults", "'KNN, K=5') ) plt.ylabel('1-KNN Value/KNN Classification') plt.xlabel('Feature Set') plt.title('Combinations of", "test KNN performance ''' def _knnResult( naData ): ''' Split", "''' for j in range( 1, len(perm) ): sPerm =", "np.arange(len(lf1Vals)), lf1Vals, .2, color='r' ) plt2 = plt.bar( np.arange(len(lfVals)) +", "_test2(): ''' Generate three random samples to show the value", "str(i) ) plt.show() ''' Function to test KNN performance '''", "i in range( 1, 6 ): lsNames = [] lf1Vals", "(naTest1, np.zeros(500).reshape(-1,1) ) ) naTest2 = np.random.normal( loc=[1.5,0],scale=.1,size=[500,2] ) naTest2", "np.sin( dist3 ) + np.sin( dist5 ) distY = distY.reshape(", "1' ) #gca().annotate( '', xy=( .8, 0 ), xytext=( -.3", "5, 'mode') ''' Count returns which are correct ''' lCount", "Classification') plt.xlabel('Feature Set') plt.title('Combinations of ' + str(i) + '", "dist2, dist3, dist4, dist5 ] ''' All features used except", "= naData.shape[0] ''' # of dimensions, subtract one for classification", "2011, 2012 Georgia Tech Research Corporation This source code is", "k in range( 0, lDim ): dDistance += (naData[i][k] -", "distributions for each of the 5 attributes ''' dist1 =", "math import knn from time import clock ''' @summary: Query", "= clock() cOneRuntime = t-t; cKnnRuntime = t-t; lfResults =", "(naTest, distY) ) lf1Vals.append( oneKnn( naTest ) ) lfVals.append( _knnResult(", "np.vstack( (naTest1, naTest2) ) naBoth = np.vstack( (naTest1, naTest2) )", "from time import clock ''' @summary: Query function for 1KNN,", "oneKnn( naTest ) ) lfVals.append( _knnResult( np.random.permutation(naTest) ) ) lsNames.append(", ".2 ) gca().xaxis.set_ticklabels( lsNames ) plt.show() if __name__ == '__main__':", "- t) t = clock() lfKnnResults.append( _knnResult( np.random.permutation(naBoth) ) )", "# of dimensions, subtract one for classification ''' lDim =", "lfKnnResults ) ''' Tests performance of 1-KNN ''' def _test2():", "''' @summary: Query function for 1KNN, return value is a", "range( 0, lDim ): dDistance += (naData[i][k] - naData[j][k])**2 dDistance", "naTest[:,:-1], 5, 'mode') ''' Count returns which are correct '''", "1 ) dist2 = np.random.uniform( -1, 1, 1000 ).reshape( -1,", "int(perm[j]) - 1 ] ) ) ''' finally stack y", "plt.bar( np.arange(len(lfVals)) + 0.2, lfVals, .2, color='b' ) plt.legend( (plt1[0],", "1 return float(lCount) / lLen ''' Test function to plot", "ldDistances[j] = dDistance llIndexes[j] = i lCount = 0 '''", "plt.legend( (plt1[0], plt2[0]), ('1-KNN', 'KNN, K=5') ) plt.ylabel('1-KNN Value/KNN Classification')", "two dimensions\" ) lLen = naData.shape[0] ''' # of dimensions,", ") naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[500,2] ) naTest2 = np.hstack( (naTest2,", "set test distribution to first element ''' naTest = lDists[", "distributions ''' def _plotDist( naDist1, naDist2, i ): plt.clf() plt.scatter(", "llIndexes[i] ][-1]: lCount = lCount + 1 return float(lCount) /", "one for classification ''' lDim = naData.shape[1] - 1 '''", "lLen = naData.shape[0] ''' # of dimensions, subtract one for", "'12345', i ): ''' set test distribution to first element", "dResult ''' Tests performance of 1-KNN ''' def _test1(): '''", ".8, 0 ), xytext=( -.3 , 0 ), arrowprops=dict(facecolor='red', shrink=0.05)", ") plt.scatter( naDist2[:,0], naDist2[:,1], color='r' ) plt.ylabel( 'Feature 2' )", "] ''' All features used except for distribution 4 '''", "each of the 5 attributes ''' dist1 = np.random.uniform( -1,", "plt.ylabel( '% Correct Classification' ) #plt.xlabel( 'Distribution Merge' ) plt.title(", "the classification. ''' def oneKnn( naData ): if naData.ndim !=", "= [] lfKnnResults = [] for i in range( 15", "Function to test KNN performance ''' def _knnResult( naData ):", "of three distributions ''' if i == 0: naTest1 =", "[-1] * lLen dDistance = 0.0; ''' Loop through finding", "''' plt1 = plt.bar( np.arange(len(lf1Vals)), lf1Vals, .2, color='r' ) plt2", "plt.xlabel('Feature Set') plt.title('Combinations of ' + str(i) + ' Features')", "plt.clf() plt.scatter( naDist1[:,0], naDist1[:,1] ) plt.scatter( naDist2[:,0], naDist2[:,1], color='r' )", "is a data point with the final column containing the", "''' Tests performance of 1-KNN ''' def _test1(): ''' Generate", "j if dDistance < ldDistances[j]: ldDistances[j] = dDistance llIndexes[j] =", "plot results ''' def _plotResults( naDist1, naDist2, lfOneKnn, lf5Knn ):", "dist1, dist2, dist3, dist4, dist5 ] ''' All features used", ") plt.subplot(313) plt.plot( range( len(lf5Knn) ), lf5Knn ) plt.ylabel( '%", "numpy as np import math import knn from time import", "float(lCount) / lLen ''' Test function to plot results '''", "#_plotDist( naTest1, naBoth[100:,:], i ) t = clock() lfResults.append( oneKnn(", ") ''' Keep track of runtimes ''' t = clock()", ") lLen = naData.shape[0] ''' # of dimensions, subtract one", "oneKNN functions correctly, optimized to use n^2/2 algorithm. ''' import", "+ np.sin( dist5 ) distY = distY.reshape( -1, 1 )", "to test KNN performance ''' def _knnResult( naData ): '''", "naData[i][-1] == naData[ llIndexes[i] ][-1]: lCount = lCount + 1", "data into training/testing ''' lSplit = naData.shape[0] * .7 naTrain", "naDist2[:,0], naDist2[:,1], color='r' ) #plt.ylabel( 'Feature 2' ) #plt.xlabel( 'Feature", ") if len(lf1Vals) < 2: plt.xlim( (-1,1) ) gca().xaxis.set_ticks( np.arange(len(lf1Vals))", "very large ''' ldDistances = [1E300] * lLen llIndexes =", "'mode') ''' Count returns which are correct ''' lCount =", ") naTest2 = np.hstack( (naTest2, np.ones(250).reshape(-1,1) ) ) naOrig =", "= cKnnRuntime + (clock() - t) naBoth[500:,0] = naBoth[500:,0] -", "1 ) lDists = [ dist1, dist2, dist3, dist4, dist5", "naTest2 = np.hstack( (naTest2, np.ones(250).reshape(-1,1) ) ) naOrig = np.vstack(", "license. Please see http://wiki.quantsoftware.org/index.php?title=QSTK_License for license details. Created on Feb", "= 0.0 for k in range( 0, lDim ): dDistance", "= np.vstack( (naTest1, naTest2) ) naBoth = np.vstack( (naTest1, naTest2)", "def _test2(): ''' Generate three random samples to show the", ".7 naTrain = naData[:lSplit, :] naTest = naData[lSplit:, :] knn.addEvidence(", "naData ): ''' Split up data into training/testing ''' lSplit", "llIndexes = [-1] * lLen dDistance = 0.0; ''' Loop", "naDist1[:,0], naDist1[:,1] ) plt.scatter( naDist2[:,0], naDist2[:,1], color='r' ) plt.ylabel( 'Feature", "compared to 5KNN learner performance ''' for i in range(3):", "np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) ) else: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2]", "- 1 ''' Start best distances as very large '''", "color='r' ) plt.ylabel( 'Feature 2' ) plt.xlabel( 'Feature 1' )", ") ) cKnnRuntime = cKnnRuntime + (clock() - t) naBoth[500:,0]", "= dDistance llIndexes[j] = i lCount = 0 ''' Now", "Classification' ) #plt.xlabel( 'Distribution Merge' ) plt.title( '5-KNN Performance' )", "naDist1[:,0], naDist1[:,1] ) plt.scatter( naDist2[:,0], naDist2[:,1], color='r' ) #plt.ylabel( 'Feature", "): dDistance = 0.0 for k in range( 0, lDim", ") cKnnRuntime = cKnnRuntime + (clock() - t) naBoth[500:,0] =", "distances as very large ''' ldDistances = [1E300] * lLen", "for 1KNN, return value is a double between 0 and", ") naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) ) naTest2 =", "np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) ) naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[250,2] )", "np.hstack( (naTest, lDists[ int(perm[j]) - 1 ] ) ) '''", "== naTest[i,-1]: lCount = lCount + 1 dResult = float(lCount)", ") + np.sin( dist2 ) + np.sin( dist3 ) +", ") dist5 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1", "Technology @contact: <EMAIL> @summary: This is an implementation of the", "dResult = float(lCount) / naResults.size return dResult ''' Tests performance", "for j in range( 1, len(perm) ): sPerm = sPerm", "j in range( i+1, lLen ): dDistance = 0.0 for", "arrowprops=dict(facecolor='red', shrink=0.05) ) gca().annotate( '', xy=( .7, 0 ), xytext=(", "i lCount = 0 ''' Now count # of matching", "Feb 20, 2011 @author: <NAME> @organization: Georgia Institute of Technology", "= i lCount = 0 ''' Now count # of", "1000 ).reshape( -1, 1 ) dist2 = np.random.uniform( -1, 1,", "llIndexes[j] = i lCount = 0 ''' Now count #", "+ 1 return float(lCount) / lLen ''' Test function to", "= [ dist1, dist2, dist3, dist4, dist5 ] ''' All", "stack other distributions on ''' for j in range( 1,", "''' Keep track of runtimes ''' t = clock() cOneRuntime", "range( 1, len(perm) ): sPerm = sPerm + str(perm[j]) naTest", "''' lSplit = naData.shape[0] * .7 naTrain = naData[:lSplit, :]", "1000 ).reshape( -1, 1 ) dist4 = np.random.uniform( -1, 1,", "('1-KNN', 'KNN, K=5') ) plt.ylabel('1-KNN Value/KNN Classification') plt.xlabel('Feature Set') plt.title('Combinations", "Split up data into training/testing ''' lSplit = naData.shape[0] *", "OneKnn:', cOneRuntime print 'Runtime 5-KNN:', cKnnRuntime _plotResults( naTest1, naTest2, lfResults,", "1-KNN compared to 5KNN learner performance ''' np.random.seed( 12345 )", "] sPerm = perm[0] ''' stack other distributions on '''", "0, lDim ): dDistance += (naData[i][k] - naData[j][k])**2 dDistance =", ") plt.title( '5-KNN Performance' ) plt.subplots_adjust() plt.show() ''' Function to", "i == 1: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] ) naTest1 =", "''' lCount = 0 for i, dVal in enumerate(naResults): if", "for classification ''' lDim = naData.shape[1] - 1 ''' Start", ") for i, fVal in enumerate( distY ): if fVal", "= 0 for i in range( 1, 6 ): lsNames", "lDists[ int(perm[0]) - 1 ] sPerm = perm[0] ''' stack", ") plt.show() ''' Function to test KNN performance ''' def", "learner performance ''' np.random.seed( 12345 ) ''' Create 5 distributions", "): for j in range( i+1, lLen ): dDistance =", "else: distY[i] = 0 for i in range( 1, 6", "and 1. @param naData: A 2D numpy array. Each row", "1 ) dist4 = np.random.uniform( -1, 1, 1000 ).reshape( -1,", ") ) naTest2 = np.random.normal( loc=[1.5,0],scale=.1,size=[500,2] ) naTest2 = np.hstack(", "np.random.seed( 12345 ) ''' Create 5 distributions for each of", "i == 0: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] ) naTest1 =", "= np.random.normal( loc=[0,0],scale=.25,size=[500,2] ) naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) )", "np.random.normal( loc=[1.5,0],scale=.25,size=[250,2] ) naTest2 = np.hstack( (naTest2, np.ones(250).reshape(-1,1) ) )", ") lsNames.append( sPerm ) ''' Plot results ''' plt1 =", "point with the final column containing the classification. ''' def", "dDistance < ldDistances[i]: ldDistances[i] = dDistance llIndexes[i] = j if", "plt.subplots_adjust() plt.show() ''' Function to plot 2 distributions ''' def", "lDists = [ dist1, dist2, dist3, dist4, dist5 ] '''", "lDim ): dDistance += (naData[i][k] - naData[j][k])**2 dDistance = math.sqrt(", "np.zeros(500).reshape(-1,1) ) ) naTest2 = np.random.normal( loc=[1.5,0],scale=.1,size=[500,2] ) naTest2 =", "= plt.bar( np.arange(len(lf1Vals)), lf1Vals, .2, color='r' ) plt2 = plt.bar(", ").reshape( -1, 1 ) lDists = [ dist1, dist2, dist3,", "plt.plot( range( len(lf5Knn) ), lf5Knn ) plt.ylabel( '% Correct Classification'", "2D numpy array. Each row is a data point with", "functions correctly, optimized to use n^2/2 algorithm. ''' import matplotlib.pyplot", "): if naData.ndim != 2: raise Exception( \"Data should have", "plot 2 distributions ''' def _plotDist( naDist1, naDist2, i ):", "''' Split up data into training/testing ''' lSplit = naData.shape[0]", ") ) naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[500,2] ) naTest2 = np.hstack(", "best ''' if dDistance < ldDistances[i]: ldDistances[i] = dDistance llIndexes[i]", "for i in range(3): ''' Select one of three distributions", "(naTest2, np.ones(500).reshape(-1,1) ) ) else: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] )", ") ) naOrig = np.vstack( (naTest1, naTest2) ) naBoth =", "cKnnRuntime = cKnnRuntime + (clock() - t) naBoth[500:,0] = naBoth[500:,0]", "optimized to use n^2/2 algorithm. ''' import matplotlib.pyplot as plt", ") ''' Plot results ''' plt1 = plt.bar( np.arange(len(lf1Vals)), lf1Vals,", "dist5 ) distY = distY.reshape( -1, 1 ) for i,", "np.arange(len(lfVals)) + 0.2, lfVals, .2, color='b' ) plt.legend( (plt1[0], plt2[0]),", "color='r' ) #plt.ylabel( 'Feature 2' ) #plt.xlabel( 'Feature 1' )", ") plt.title( 'Iteration ' + str(i) ) plt.show() ''' Function", "2' ) #plt.xlabel( 'Feature 1' ) #gca().annotate( '', xy=( .8,", "0 ), arrowprops=dict(facecolor='black', shrink=0.05) ) plt.title( 'Data Distribution' ) plt.subplot(312)", "naTest2) ) naBoth = np.vstack( (naTest1, naTest2) ) ''' Keep", "cOneRuntime = t-t; cKnnRuntime = t-t; lfResults = [] lfKnnResults", "_test1(): ''' Generate three random samples to show the value", "range( 15 ): #_plotDist( naTest1, naBoth[100:,:], i ) t =", "5-KNN:', cKnnRuntime _plotResults( naTest1, naTest2, lfResults, lfKnnResults ) ''' Tests", "np.vstack( (naTest1, naTest2) ) ''' Keep track of runtimes '''", "1, 1000 ).reshape( -1, 1 ) lDists = [ dist1,", ") ) lfVals.append( _knnResult( np.random.permutation(naTest) ) ) lsNames.append( sPerm )", "correctly, optimized to use n^2/2 algorithm. ''' import matplotlib.pyplot as", "performance of 1-KNN ''' def _test2(): ''' Generate three random", "str(i) + ' Features') plt.ylim( (0,1) ) if len(lf1Vals) <", "if len(lf1Vals) < 2: plt.xlim( (-1,1) ) gca().xaxis.set_ticks( np.arange(len(lf1Vals)) +", "Tests performance of 1-KNN ''' def _test2(): ''' Generate three", "* .7 naTrain = naData[:lSplit, :] naTest = naData[lSplit:, :]", "lCount = 0 for i, dVal in enumerate(naResults): if dVal", "arrowprops=dict(facecolor='black', shrink=0.05) ) plt.title( 'Data Distribution' ) plt.subplot(312) plt.plot( range(", "1, 1000 ).reshape( -1, 1 ) dist5 = np.random.uniform( -1,", "@contact: <EMAIL> @summary: This is an implementation of the 1-KNN", "distY[i] = 1 else: distY[i] = 0 for i in", "of runtimes ''' t = clock() cOneRuntime = t-t; cKnnRuntime", "i+1, lLen ): dDistance = 0.0 for k in range(", "''' Create 5 distributions for each of the 5 attributes", "see http://wiki.quantsoftware.org/index.php?title=QSTK_License for license details. Created on Feb 20, 2011", "range( lLen ): for j in range( i+1, lLen ):", "in range( lLen ): if naData[i][-1] == naData[ llIndexes[i] ][-1]:", "to plot results ''' def _plotResults( naDist1, naDist2, lfOneKnn, lf5Knn", "xytext=( 1.5 , 0 ), arrowprops=dict(facecolor='black', shrink=0.05) ) plt.title( 'Data", "the 1-KNN algorithm for ranking features quickly. It uses the", "0.0; ''' Loop through finding closest neighbors ''' for i", "K=5') ) plt.ylabel('1-KNN Value/KNN Classification') plt.xlabel('Feature Set') plt.title('Combinations of '", "Plot results ''' plt1 = plt.bar( np.arange(len(lf1Vals)), lf1Vals, .2, color='r'", "(plt1[0], plt2[0]), ('1-KNN', 'KNN, K=5') ) plt.ylabel('1-KNN Value/KNN Classification') plt.xlabel('Feature", "len(perm) ): sPerm = sPerm + str(perm[j]) naTest = np.hstack(", ".2, color='b' ) plt.legend( (plt1[0], plt2[0]), ('1-KNN', 'KNN, K=5') )", "results ''' plt1 = plt.bar( np.arange(len(lf1Vals)), lf1Vals, .2, color='r' )", "gca import itertools import string import numpy as np import", "''' Function to test KNN performance ''' def _knnResult( naData", "] ) ) ''' finally stack y values ''' naTest", "-1, 1 ) lDists = [ dist1, dist2, dist3, dist4,", ") naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[250,2] ) naTest2 = np.hstack( (naTest2,", "np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) ) naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[500,2] )", "= np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) ) elif i == 1:", "range( 1, 6 ): lsNames = [] lf1Vals = []", "Two distances to check, for i's best, and j's best", "random samples to show the value of 1-KNN compared to", "gca().annotate( '', xy=( .7, 0 ), xytext=( 1.5 , 0", "i in range( lLen ): for j in range( i+1,", "naDist1[:,1] ) plt.scatter( naDist2[:,0], naDist2[:,1], color='r' ) #plt.ylabel( 'Feature 2'", "pylab import gca import itertools import string import numpy as", "naBoth ) ) cOneRuntime = cOneRuntime + (clock() - t)", "= 0 for i, dVal in enumerate(naResults): if dVal ==", "show the value of 1-KNN compared to 5KNN learner performance", "ldDistances = [1E300] * lLen llIndexes = [-1] * lLen", "itertools.combinations( '12345', i ): ''' set test distribution to first", "''' finally stack y values ''' naTest = np.hstack( (naTest,", "color='r' ) plt2 = plt.bar( np.arange(len(lfVals)) + 0.2, lfVals, .2,", "'Distribution Merge' ) plt.title( '5-KNN Performance' ) plt.subplots_adjust() plt.show() '''", "of 1-KNN ''' def _test1(): ''' Generate three random samples", "= np.hstack( (naTest2, np.ones(250).reshape(-1,1) ) ) naOrig = np.vstack( (naTest1,", "-1, 1 ) dist4 = np.random.uniform( -1, 1, 1000 ).reshape(", "for license details. Created on Feb 20, 2011 @author: <NAME>", "used except for distribution 4 ''' distY = np.sin( dist1", "naResults = knn.query( naTest[:,:-1], 5, 'mode') ''' Count returns which", "distY = np.sin( dist1 ) + np.sin( dist2 ) +", "Created on Feb 20, 2011 @author: <NAME> @organization: Georgia Institute", "= naData[:lSplit, :] naTest = naData[lSplit:, :] knn.addEvidence( naTrain.astype(float), 1", "classification ''' lDim = naData.shape[1] - 1 ''' Start best", "i, dVal in enumerate(naResults): if dVal == naTest[i,-1]: lCount =", "0.0 for k in range( 0, lDim ): dDistance +=", "perm in itertools.combinations( '12345', i ): ''' set test distribution", ") plt.title( 'Data Distribution' ) plt.subplot(312) plt.plot( range( len(lfOneKnn) ),", "= clock() lfKnnResults.append( _knnResult( np.random.permutation(naBoth) ) ) cKnnRuntime = cKnnRuntime", ") ) lsNames.append( sPerm ) ''' Plot results ''' plt1", "naTest1, naTest2, lfResults, lfKnnResults ) ''' Tests performance of 1-KNN", ") plt.xlabel( 'Feature 1' ) plt.title( 'Iteration ' + str(i)", "= np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) ) naTest2 = np.random.normal( loc=[1.5,0],scale=.1,size=[500,2]", "naData.ndim != 2: raise Exception( \"Data should have two dimensions\"", "* lLen llIndexes = [-1] * lLen dDistance = 0.0;", "learner performance ''' for i in range(3): ''' Select one", "if i == 0: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] ) naTest1", ") plt.ylabel('1-KNN Value/KNN Classification') plt.xlabel('Feature Set') plt.title('Combinations of ' +", "+ np.sin( dist2 ) + np.sin( dist3 ) + np.sin(", "''' ldDistances = [1E300] * lLen llIndexes = [-1] *", "naData.shape[0] * .7 naTrain = naData[:lSplit, :] naTest = naData[lSplit:,", "plt.ylabel( '1-KNN Value' ) #plt.xlabel( 'Distribution Merge' ) plt.title( '1-KNN", "Tests performance of 1-KNN ''' def _test1(): ''' Generate three", "lfResults = [] lfKnnResults = [] for i in range(", "clock() lfKnnResults.append( _knnResult( np.random.permutation(naBoth) ) ) cKnnRuntime = cKnnRuntime +", "details. Created on Feb 20, 2011 @author: <NAME> @organization: Georgia", "import knn from time import clock ''' @summary: Query function", "t) t = clock() lfKnnResults.append( _knnResult( np.random.permutation(naBoth) ) ) cKnnRuntime", "dist4, dist5 ] ''' All features used except for distribution", "str(perm[j]) naTest = np.hstack( (naTest, lDists[ int(perm[j]) - 1 ]", "= naData[lSplit:, :] knn.addEvidence( naTrain.astype(float), 1 ); ''' Query with", "'Iteration ' + str(i) ) plt.show() ''' Function to test", "track of runtimes ''' t = clock() cOneRuntime = t-t;", "naDist2[:,1], color='r' ) #plt.ylabel( 'Feature 2' ) #plt.xlabel( 'Feature 1'", ") plt2 = plt.bar( np.arange(len(lfVals)) + 0.2, lfVals, .2, color='b'", "dDistance llIndexes[i] = j if dDistance < ldDistances[j]: ldDistances[j] =", "[] lfVals = [] for perm in itertools.combinations( '12345', i", "naDist2, i ): plt.clf() plt.scatter( naDist1[:,0], naDist1[:,1] ) plt.scatter( naDist2[:,0],", "Features') plt.ylim( (0,1) ) if len(lf1Vals) < 2: plt.xlim( (-1,1)", "= [] lf1Vals = [] lfVals = [] for perm", "): plt.clf() plt.subplot(311) plt.scatter( naDist1[:,0], naDist1[:,1] ) plt.scatter( naDist2[:,0], naDist2[:,1],", ") ) cOneRuntime = cOneRuntime + (clock() - t) t", "plt.xlim( (-1,1) ) gca().xaxis.set_ticks( np.arange(len(lf1Vals)) + .2 ) gca().xaxis.set_ticklabels( lsNames", "np.arange(len(lf1Vals)) + .2 ) gca().xaxis.set_ticklabels( lsNames ) plt.show() if __name__", "to first element ''' naTest = lDists[ int(perm[0]) - 1", "dist1 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )", "element ''' naTest = lDists[ int(perm[0]) - 1 ] sPerm", "-1, 1, 1000 ).reshape( -1, 1 ) dist4 = np.random.uniform(", "np.sin( dist5 ) distY = distY.reshape( -1, 1 ) for", "''' Tests performance of 1-KNN ''' def _test2(): ''' Generate", "if naData[i][-1] == naData[ llIndexes[i] ][-1]: lCount = lCount +", "dDistance = math.sqrt( dDistance ) ''' Two distances to check,", "''' for i in range( lLen ): for j in", ") ) else: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] ) naTest1 =", "), arrowprops=dict(facecolor='red', shrink=0.05) ) gca().annotate( '', xy=( .7, 0 ),", "''' def oneKnn( naData ): if naData.ndim != 2: raise", "#plt.xlabel( 'Distribution Merge' ) plt.title( '5-KNN Performance' ) plt.subplots_adjust() plt.show()", "-1, 1, 1000 ).reshape( -1, 1 ) dist3 = np.random.uniform(", "dist3 ) + np.sin( dist5 ) distY = distY.reshape( -1,", "runtimes ''' t = clock() cOneRuntime = t-t; cKnnRuntime =", "1000 ).reshape( -1, 1 ) lDists = [ dist1, dist2,", "\"Data should have two dimensions\" ) lLen = naData.shape[0] '''", "lf5Knn ) plt.ylabel( '% Correct Classification' ) #plt.xlabel( 'Distribution Merge'", "naDist1, naDist2, i ): plt.clf() plt.scatter( naDist1[:,0], naDist1[:,1] ) plt.scatter(", "np.zeros(500).reshape(-1,1) ) ) naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[500,2] ) naTest2 =", "_knnResult( naData ): ''' Split up data into training/testing '''", ") naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) ) else: naTest1", "= np.random.normal( loc=[1.5,0],scale=.25,size=[250,2] ) naTest2 = np.hstack( (naTest2, np.ones(250).reshape(-1,1) )", "np import math import knn from time import clock '''", "''' def _test2(): ''' Generate three random samples to show", ") naTest2 = np.random.normal( loc=[1.5,0],scale=.1,size=[500,2] ) naTest2 = np.hstack( (naTest2,", ") #plt.xlabel( 'Distribution Merge' ) plt.title( '5-KNN Performance' ) plt.subplots_adjust()", "Create 5 distributions for each of the 5 attributes '''", "0: distY[i] = 1 else: distY[i] = 0 for i", "''' Query with last column omitted and 5 nearest neighbors", "len(lf1Vals) < 2: plt.xlim( (-1,1) ) gca().xaxis.set_ticks( np.arange(len(lf1Vals)) + .2", "naTest = np.hstack( (naTest, lDists[ int(perm[j]) - 1 ] )", "llIndexes[i] = j if dDistance < ldDistances[j]: ldDistances[j] = dDistance", "naData.shape[1] - 1 ''' Start best distances as very large", ":] knn.addEvidence( naTrain.astype(float), 1 ); ''' Query with last column", "plt.title( '5-KNN Performance' ) plt.subplots_adjust() plt.show() ''' Function to plot", "'Runtime OneKnn:', cOneRuntime print 'Runtime 5-KNN:', cKnnRuntime _plotResults( naTest1, naTest2,", "lfOneKnn ) plt.ylabel( '1-KNN Value' ) #plt.xlabel( 'Distribution Merge' )", "for i in range( lLen ): if naData[i][-1] == naData[", "dVal == naTest[i,-1]: lCount = lCount + 1 dResult =", "''' All features used except for distribution 4 ''' distY", "def _test1(): ''' Generate three random samples to show the", "''' Loop through finding closest neighbors ''' for i in", "Each row is a data point with the final column", "naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] ) naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1)", "lsNames.append( sPerm ) ''' Plot results ''' plt1 = plt.bar(", "1 dResult = float(lCount) / naResults.size return dResult ''' Tests", "to 5KNN learner performance ''' np.random.seed( 12345 ) ''' Create", "up data into training/testing ''' lSplit = naData.shape[0] * .7", "if dVal == naTest[i,-1]: lCount = lCount + 1 dResult", "plt.title('Combinations of ' + str(i) + ' Features') plt.ylim( (0,1)", "lf1Vals.append( oneKnn( naTest ) ) lfVals.append( _knnResult( np.random.permutation(naTest) ) )", "lfOneKnn, lf5Knn ): plt.clf() plt.subplot(311) plt.scatter( naDist1[:,0], naDist1[:,1] ) plt.scatter(", "an implementation of the 1-KNN algorithm for ranking features quickly.", "range( len(lfOneKnn) ), lfOneKnn ) plt.ylabel( '1-KNN Value' ) #plt.xlabel(", "Corporation This source code is released under the New BSD", "_plotResults( naTest1, naTest2, lfResults, lfKnnResults ) ''' Tests performance of", "1-KNN compared to 5KNN learner performance ''' for i in", "implementation. @status: oneKNN functions correctly, optimized to use n^2/2 algorithm.", "dimensions, subtract one for classification ''' lDim = naData.shape[1] -", "elif i == 1: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] ) naTest1", "for distribution 4 ''' distY = np.sin( dist1 ) +", "(naTest1, np.zeros(500).reshape(-1,1) ) ) naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[500,2] ) naTest2", "naTest2) ) ''' Keep track of runtimes ''' t =", "1, 1000 ).reshape( -1, 1 ) dist2 = np.random.uniform( -1,", "oneKnn( naBoth ) ) cOneRuntime = cOneRuntime + (clock() -", "= t-t; lfResults = [] lfKnnResults = [] for i", "naDist2[:,1], color='r' ) plt.ylabel( 'Feature 2' ) plt.xlabel( 'Feature 1'", ") cOneRuntime = cOneRuntime + (clock() - t) t =", "): sPerm = sPerm + str(perm[j]) naTest = np.hstack( (naTest,", "distY ): if fVal >= 0: distY[i] = 1 else:", "(naTest, lDists[ int(perm[j]) - 1 ] ) ) ''' finally", "neighbors ''' for i in range( lLen ): for j", "for j in range( i+1, lLen ): dDistance = 0.0", "Query with last column omitted and 5 nearest neighbors '''", "= knn.query( naTest[:,:-1], 5, 'mode') ''' Count returns which are", "finally stack y values ''' naTest = np.hstack( (naTest, distY)", "= np.hstack( (naTest, lDists[ int(perm[j]) - 1 ] ) )", "0 and 1. @param naData: A 2D numpy array. Each", ") #plt.xlabel( 'Distribution Merge' ) plt.title( '1-KNN Performance' ) plt.subplot(313)", "dDistance += (naData[i][k] - naData[j][k])**2 dDistance = math.sqrt( dDistance )", "+ (clock() - t) t = clock() lfKnnResults.append( _knnResult( np.random.permutation(naBoth)", "dist5 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )", "dist1 ) + np.sin( dist2 ) + np.sin( dist3 )", "/ lLen ''' Test function to plot results ''' def", "lLen ): if naData[i][-1] == naData[ llIndexes[i] ][-1]: lCount =", "count # of matching pairs ''' for i in range(", "distributions ''' if i == 0: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2]", "lf1Vals = [] lfVals = [] for perm in itertools.combinations(", "plt.clf() plt.subplot(311) plt.scatter( naDist1[:,0], naDist1[:,1] ) plt.scatter( naDist2[:,0], naDist2[:,1], color='r'", "from pylab import gca import itertools import string import numpy", "@param naData: A 2D numpy array. Each row is a", "plt.scatter( naDist2[:,0], naDist2[:,1], color='r' ) plt.ylabel( 'Feature 2' ) plt.xlabel(", "attributes ''' dist1 = np.random.uniform( -1, 1, 1000 ).reshape( -1,", "final column containing the classification. ''' def oneKnn( naData ):", "color='b' ) plt.legend( (plt1[0], plt2[0]), ('1-KNN', 'KNN, K=5') ) plt.ylabel('1-KNN", ") ) naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[250,2] ) naTest2 = np.hstack(", "one of three distributions ''' if i == 0: naTest1", "_plotResults( naDist1, naDist2, lfOneKnn, lf5Knn ): plt.clf() plt.subplot(311) plt.scatter( naDist1[:,0],", "matching pairs ''' for i in range( lLen ): if", ") else: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] ) naTest1 = np.hstack(", "on ''' for j in range( 1, len(perm) ): sPerm", "= math.sqrt( dDistance ) ''' Two distances to check, for", "1 ) dist5 = np.random.uniform( -1, 1, 1000 ).reshape( -1,", "naData[:lSplit, :] naTest = naData[lSplit:, :] knn.addEvidence( naTrain.astype(float), 1 );", "naData ): if naData.ndim != 2: raise Exception( \"Data should", ").reshape( -1, 1 ) dist3 = np.random.uniform( -1, 1, 1000", "1. @param naData: A 2D numpy array. Each row is", "ldDistances[j]: ldDistances[j] = dDistance llIndexes[j] = i lCount = 0", "performance ''' np.random.seed( 12345 ) ''' Create 5 distributions for", "''' lDim = naData.shape[1] - 1 ''' Start best distances", "naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) ) elif i ==", "Merge' ) plt.title( '1-KNN Performance' ) plt.subplot(313) plt.plot( range( len(lf5Knn)", "naData[ llIndexes[i] ][-1]: lCount = lCount + 1 return float(lCount)", "in range(3): ''' Select one of three distributions ''' if", "code is released under the New BSD license. Please see", "+= (naData[i][k] - naData[j][k])**2 dDistance = math.sqrt( dDistance ) '''", "This source code is released under the New BSD license.", "Tech Research Corporation This source code is released under the", "xy=( .8, 0 ), xytext=( -.3 , 0 ), arrowprops=dict(facecolor='red',", "naTest2 = np.random.normal( loc=[1.5,0],scale=.1,size=[500,2] ) naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1)", "def _plotDist( naDist1, naDist2, i ): plt.clf() plt.scatter( naDist1[:,0], naDist1[:,1]", "cKnnRuntime + (clock() - t) naBoth[500:,0] = naBoth[500:,0] - .1", "in enumerate( distY ): if fVal >= 0: distY[i] =", "distributions on ''' for j in range( 1, len(perm) ):", "raise Exception( \"Data should have two dimensions\" ) lLen =", "= plt.bar( np.arange(len(lfVals)) + 0.2, lfVals, .2, color='b' ) plt.legend(", "@summary: Query function for 1KNN, return value is a double", "plt.title( '1-KNN Performance' ) plt.subplot(313) plt.plot( range( len(lf5Knn) ), lf5Knn", "import itertools import string import numpy as np import math", "'Feature 2' ) plt.xlabel( 'Feature 1' ) plt.title( 'Iteration '", "finding closest neighbors ''' for i in range( lLen ):", "y values ''' naTest = np.hstack( (naTest, distY) ) lf1Vals.append(", "= float(lCount) / naResults.size return dResult ''' Tests performance of", "= np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 ) lDists", "np.sin( dist2 ) + np.sin( dist3 ) + np.sin( dist5", "in enumerate(naResults): if dVal == naTest[i,-1]: lCount = lCount +", "is an implementation of the 1-KNN algorithm for ranking features", "except for distribution 4 ''' distY = np.sin( dist1 )", "* lLen dDistance = 0.0; ''' Loop through finding closest", "a data point with the final column containing the classification.", "stack y values ''' naTest = np.hstack( (naTest, distY) )", "lCount = lCount + 1 dResult = float(lCount) / naResults.size", "lLen ): for j in range( i+1, lLen ): dDistance", "< ldDistances[j]: ldDistances[j] = dDistance llIndexes[j] = i lCount =", "'Data Distribution' ) plt.subplot(312) plt.plot( range( len(lfOneKnn) ), lfOneKnn )", "5KNN learner performance ''' for i in range(3): ''' Select", "np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) ) naTest2 = np.random.normal( loc=[1.5,0],scale=.1,size=[500,2] )", "''' distY = np.sin( dist1 ) + np.sin( dist2 )", "= np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) ) naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[500,2]", "1 else: distY[i] = 0 for i in range( 1,", "naTrain.astype(float), 1 ); ''' Query with last column omitted and", "+ (clock() - t) naBoth[500:,0] = naBoth[500:,0] - .1 print", "lfResults, lfKnnResults ) ''' Tests performance of 1-KNN ''' def", "= t-t; cKnnRuntime = t-t; lfResults = [] lfKnnResults =", "fVal >= 0: distY[i] = 1 else: distY[i] = 0", "== 0: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] ) naTest1 = np.hstack(", "): ''' Split up data into training/testing ''' lSplit =", "= 1 else: distY[i] = 0 for i in range(", "column omitted and 5 nearest neighbors ''' naResults = knn.query(", "Function to plot 2 distributions ''' def _plotDist( naDist1, naDist2,", "plt.bar( np.arange(len(lf1Vals)), lf1Vals, .2, color='r' ) plt2 = plt.bar( np.arange(len(lfVals))", "i in range(3): ''' Select one of three distributions '''", "plt.xlabel( 'Feature 1' ) plt.title( 'Iteration ' + str(i) )", "for i's best, and j's best ''' if dDistance <", "http://wiki.quantsoftware.org/index.php?title=QSTK_License for license details. Created on Feb 20, 2011 @author:", "dist2 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )", "1-KNN algorithm for ranking features quickly. It uses the knn", "into training/testing ''' lSplit = naData.shape[0] * .7 naTrain =", ").reshape( -1, 1 ) dist5 = np.random.uniform( -1, 1, 1000", "in range( 1, 6 ): lsNames = [] lf1Vals =", "[] for perm in itertools.combinations( '12345', i ): ''' set", "np.random.normal( loc=[1.5,0],scale=.25,size=[500,2] ) naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) )", "''' naTest = lDists[ int(perm[0]) - 1 ] sPerm =", "value of 1-KNN compared to 5KNN learner performance ''' np.random.seed(", "np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 ) dist4 =", "range( i+1, lLen ): dDistance = 0.0 for k in", ") plt.legend( (plt1[0], plt2[0]), ('1-KNN', 'KNN, K=5') ) plt.ylabel('1-KNN Value/KNN", "naBoth[500:,0] - .1 print 'Runtime OneKnn:', cOneRuntime print 'Runtime 5-KNN:',", "naTest2, lfResults, lfKnnResults ) ''' Tests performance of 1-KNN '''", "clock ''' @summary: Query function for 1KNN, return value is", ", 0 ), arrowprops=dict(facecolor='red', shrink=0.05) ) gca().annotate( '', xy=( .7,", "itertools import string import numpy as np import math import", "np.ones(500).reshape(-1,1) ) ) elif i == 1: naTest1 = np.random.normal(", "np.hstack( (naTest2, np.ones(250).reshape(-1,1) ) ) naOrig = np.vstack( (naTest1, naTest2)", "): if naData[i][-1] == naData[ llIndexes[i] ][-1]: lCount = lCount", "on Feb 20, 2011 @author: <NAME> @organization: Georgia Institute of", "string import numpy as np import math import knn from", "numpy array. Each row is a data point with the", "best distances as very large ''' ldDistances = [1E300] *", "lf5Knn ): plt.clf() plt.subplot(311) plt.scatter( naDist1[:,0], naDist1[:,1] ) plt.scatter( naDist2[:,0],", "0 for i, dVal in enumerate(naResults): if dVal == naTest[i,-1]:", "'1-KNN Performance' ) plt.subplot(313) plt.plot( range( len(lf5Knn) ), lf5Knn )", "- .1 print 'Runtime OneKnn:', cOneRuntime print 'Runtime 5-KNN:', cKnnRuntime", "containing the classification. ''' def oneKnn( naData ): if naData.ndim", ") naBoth = np.vstack( (naTest1, naTest2) ) ''' Keep track", "= np.hstack( (naTest, distY) ) lf1Vals.append( oneKnn( naTest ) )", ".2, color='r' ) plt2 = plt.bar( np.arange(len(lfVals)) + 0.2, lfVals,", ") ) ''' finally stack y values ''' naTest =", "in itertools.combinations( '12345', i ): ''' set test distribution to", "= clock() lfResults.append( oneKnn( naBoth ) ) cOneRuntime = cOneRuntime", "np.zeros(500).reshape(-1,1) ) ) naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[250,2] ) naTest2 =", "returns which are correct ''' lCount = 0 for i,", "1 ); ''' Query with last column omitted and 5", "1, 1000 ).reshape( -1, 1 ) dist4 = np.random.uniform( -1,", "dDistance < ldDistances[j]: ldDistances[j] = dDistance llIndexes[j] = i lCount", "values ''' naTest = np.hstack( (naTest, distY) ) lf1Vals.append( oneKnn(", "math.sqrt( dDistance ) ''' Two distances to check, for i's", "other distributions on ''' for j in range( 1, len(perm)", "to use n^2/2 algorithm. ''' import matplotlib.pyplot as plt from", "Query function for 1KNN, return value is a double between", ") plt.subplots_adjust() plt.show() ''' Function to plot 2 distributions '''", "uses the knn implementation. @status: oneKNN functions correctly, optimized to", "range(3): ''' Select one of three distributions ''' if i", "= np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) ) else: naTest1 = np.random.normal(", "''' def _knnResult( naData ): ''' Split up data into", "lCount = 0 ''' Now count # of matching pairs", "the 5 attributes ''' dist1 = np.random.uniform( -1, 1, 1000", "BSD license. Please see http://wiki.quantsoftware.org/index.php?title=QSTK_License for license details. Created on", "gca().xaxis.set_ticks( np.arange(len(lf1Vals)) + .2 ) gca().xaxis.set_ticklabels( lsNames ) plt.show() if", "naDist2, lfOneKnn, lf5Knn ): plt.clf() plt.subplot(311) plt.scatter( naDist1[:,0], naDist1[:,1] )", "-1, 1, 1000 ).reshape( -1, 1 ) lDists = [", "with the final column containing the classification. ''' def oneKnn(", ".7, 0 ), xytext=( 1.5 , 0 ), arrowprops=dict(facecolor='black', shrink=0.05)", "distY) ) lf1Vals.append( oneKnn( naTest ) ) lfVals.append( _knnResult( np.random.permutation(naTest)", "KNN performance ''' def _knnResult( naData ): ''' Split up", "sPerm ) ''' Plot results ''' plt1 = plt.bar( np.arange(len(lf1Vals)),", "int(perm[0]) - 1 ] sPerm = perm[0] ''' stack other", "naTest = lDists[ int(perm[0]) - 1 ] sPerm = perm[0]", "j in range( 1, len(perm) ): sPerm = sPerm +", "1 ] ) ) ''' finally stack y values '''", ") plt.title( '1-KNN Performance' ) plt.subplot(313) plt.plot( range( len(lf5Knn) ),", "= lCount + 1 dResult = float(lCount) / naResults.size return", "dVal in enumerate(naResults): if dVal == naTest[i,-1]: lCount = lCount", ") + np.sin( dist5 ) distY = distY.reshape( -1, 1", "5 nearest neighbors ''' naResults = knn.query( naTest[:,:-1], 5, 'mode')", ") + np.sin( dist3 ) + np.sin( dist5 ) distY", "plt.scatter( naDist2[:,0], naDist2[:,1], color='r' ) #plt.ylabel( 'Feature 2' ) #plt.xlabel(", "correct ''' lCount = 0 for i, dVal in enumerate(naResults):", "as plt from pylab import gca import itertools import string", "lf1Vals, .2, color='r' ) plt2 = plt.bar( np.arange(len(lfVals)) + 0.2,", "Count returns which are correct ''' lCount = 0 for", "It uses the knn implementation. @status: oneKNN functions correctly, optimized", "return value is a double between 0 and 1. @param", "naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) ) else: naTest1 =", "i ) t = clock() lfResults.append( oneKnn( naBoth ) )", "= cOneRuntime + (clock() - t) t = clock() lfKnnResults.append(", "Institute of Technology @contact: <EMAIL> @summary: This is an implementation", "''' Count returns which are correct ''' lCount = 0", "- 1 ] sPerm = perm[0] ''' stack other distributions", "'Distribution Merge' ) plt.title( '1-KNN Performance' ) plt.subplot(313) plt.plot( range(", ") naOrig = np.vstack( (naTest1, naTest2) ) naBoth = np.vstack(", "i ): plt.clf() plt.scatter( naDist1[:,0], naDist1[:,1] ) plt.scatter( naDist2[:,0], naDist2[:,1],", ">= 0: distY[i] = 1 else: distY[i] = 0 for", "and 5 nearest neighbors ''' naResults = knn.query( naTest[:,:-1], 5,", "of the 1-KNN algorithm for ranking features quickly. It uses", "!= 2: raise Exception( \"Data should have two dimensions\" )", "knn from time import clock ''' @summary: Query function for", "/ naResults.size return dResult ''' Tests performance of 1-KNN '''", "loc=[1.5,0],scale=.25,size=[500,2] ) naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) ) elif", "Value/KNN Classification') plt.xlabel('Feature Set') plt.title('Combinations of ' + str(i) +", "float(lCount) / naResults.size return dResult ''' Tests performance of 1-KNN", "lfKnnResults.append( _knnResult( np.random.permutation(naBoth) ) ) cKnnRuntime = cKnnRuntime + (clock()", "t) naBoth[500:,0] = naBoth[500:,0] - .1 print 'Runtime OneKnn:', cOneRuntime", "= lDists[ int(perm[0]) - 1 ] sPerm = perm[0] '''", "for i, fVal in enumerate( distY ): if fVal >=", "performance ''' for i in range(3): ''' Select one of", "2: raise Exception( \"Data should have two dimensions\" ) lLen", "algorithm for ranking features quickly. It uses the knn implementation.", "as very large ''' ldDistances = [1E300] * lLen llIndexes", "= np.random.normal( loc=[1.5,0],scale=.1,size=[500,2] ) naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) )", "if fVal >= 0: distY[i] = 1 else: distY[i] =", "naTest1, naBoth[100:,:], i ) t = clock() lfResults.append( oneKnn( naBoth", "= perm[0] ''' stack other distributions on ''' for j", "three random samples to show the value of 1-KNN compared", ") lf1Vals.append( oneKnn( naTest ) ) lfVals.append( _knnResult( np.random.permutation(naTest) )", "+ str(i) + ' Features') plt.ylim( (0,1) ) if len(lf1Vals)", ") ''' finally stack y values ''' naTest = np.hstack(", "[] lf1Vals = [] lfVals = [] for perm in", "plt.title( 'Data Distribution' ) plt.subplot(312) plt.plot( range( len(lfOneKnn) ), lfOneKnn", "dimensions\" ) lLen = naData.shape[0] ''' # of dimensions, subtract", "value is a double between 0 and 1. @param naData:", "= np.random.normal( loc=[1.5,0],scale=.25,size=[500,2] ) naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) )", "pairs ''' for i in range( lLen ): if naData[i][-1]", "#plt.xlabel( 'Distribution Merge' ) plt.title( '1-KNN Performance' ) plt.subplot(313) plt.plot(", "matplotlib.pyplot as plt from pylab import gca import itertools import", "lLen ''' Test function to plot results ''' def _plotResults(", "2' ) plt.xlabel( 'Feature 1' ) plt.title( 'Iteration ' +", "first element ''' naTest = lDists[ int(perm[0]) - 1 ]", "naTest = np.hstack( (naTest, distY) ) lf1Vals.append( oneKnn( naTest )", "last column omitted and 5 nearest neighbors ''' naResults =", "data point with the final column containing the classification. '''", "plt from pylab import gca import itertools import string import", "(naTest2, np.ones(500).reshape(-1,1) ) ) elif i == 1: naTest1 =", "[] lfKnnResults = [] for i in range( 15 ):", "i in range( lLen ): if naData[i][-1] == naData[ llIndexes[i]", "1-KNN ''' def _test2(): ''' Generate three random samples to", "naResults.size return dResult ''' Tests performance of 1-KNN ''' def", "This is an implementation of the 1-KNN algorithm for ranking", ") plt.ylabel( '1-KNN Value' ) #plt.xlabel( 'Distribution Merge' ) plt.title(", "value of 1-KNN compared to 5KNN learner performance ''' for", "lLen ): dDistance = 0.0 for k in range( 0,", "<filename>qstklearn/1knn.py<gh_stars>100-1000 ''' (c) 2011, 2012 Georgia Tech Research Corporation This", "use n^2/2 algorithm. ''' import matplotlib.pyplot as plt from pylab", "lfKnnResults = [] for i in range( 15 ): #_plotDist(", "sPerm + str(perm[j]) naTest = np.hstack( (naTest, lDists[ int(perm[j]) -", "features quickly. It uses the knn implementation. @status: oneKNN functions", "j's best ''' if dDistance < ldDistances[i]: ldDistances[i] = dDistance", "2: plt.xlim( (-1,1) ) gca().xaxis.set_ticks( np.arange(len(lf1Vals)) + .2 ) gca().xaxis.set_ticklabels(", "): #_plotDist( naTest1, naBoth[100:,:], i ) t = clock() lfResults.append(", "Generate three random samples to show the value of 1-KNN", "is released under the New BSD license. Please see http://wiki.quantsoftware.org/index.php?title=QSTK_License", ") ''' Create 5 distributions for each of the 5", "-1, 1 ) dist2 = np.random.uniform( -1, 1, 1000 ).reshape(", "plt.scatter( naDist1[:,0], naDist1[:,1] ) plt.scatter( naDist2[:,0], naDist2[:,1], color='r' ) #plt.ylabel(", "if dDistance < ldDistances[i]: ldDistances[i] = dDistance llIndexes[i] = j", "dDistance ) ''' Two distances to check, for i's best,", "lfVals.append( _knnResult( np.random.permutation(naTest) ) ) lsNames.append( sPerm ) ''' Plot", "= [-1] * lLen dDistance = 0.0; ''' Loop through", ") dist2 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1", "plt.ylabel( 'Feature 2' ) plt.xlabel( 'Feature 1' ) plt.title( 'Iteration", "'5-KNN Performance' ) plt.subplots_adjust() plt.show() ''' Function to plot 2", "np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 ) lDists =", "Merge' ) plt.title( '5-KNN Performance' ) plt.subplots_adjust() plt.show() ''' Function", "''' def _test1(): ''' Generate three random samples to show", "#gca().annotate( '', xy=( .8, 0 ), xytext=( -.3 , 0", "distribution 4 ''' distY = np.sin( dist1 ) + np.sin(", "@summary: This is an implementation of the 1-KNN algorithm for", "import numpy as np import math import knn from time", "as np import math import knn from time import clock", "for i in range( 1, 6 ): lsNames = []", "- 1 ] ) ) ''' finally stack y values", "for each of the 5 attributes ''' dist1 = np.random.uniform(", "2012 Georgia Tech Research Corporation This source code is released", "): dDistance += (naData[i][k] - naData[j][k])**2 dDistance = math.sqrt( dDistance", "= j if dDistance < ldDistances[j]: ldDistances[j] = dDistance llIndexes[j]", ") distY = distY.reshape( -1, 1 ) for i, fVal", "dist3, dist4, dist5 ] ''' All features used except for", "performance ''' def _knnResult( naData ): ''' Split up data", "''' for i in range( lLen ): if naData[i][-1] ==", "Exception( \"Data should have two dimensions\" ) lLen = naData.shape[0]", "plt.subplot(311) plt.scatter( naDist1[:,0], naDist1[:,1] ) plt.scatter( naDist2[:,0], naDist2[:,1], color='r' )", "of ' + str(i) + ' Features') plt.ylim( (0,1) )", "= np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 ) dist2", "4 ''' distY = np.sin( dist1 ) + np.sin( dist2", "source code is released under the New BSD license. Please", "= np.vstack( (naTest1, naTest2) ) ''' Keep track of runtimes", "(naTest2, np.ones(250).reshape(-1,1) ) ) naOrig = np.vstack( (naTest1, naTest2) )", "plt.subplot(312) plt.plot( range( len(lfOneKnn) ), lfOneKnn ) plt.ylabel( '1-KNN Value'", "(naData[i][k] - naData[j][k])**2 dDistance = math.sqrt( dDistance ) ''' Two", "0.2, lfVals, .2, color='b' ) plt.legend( (plt1[0], plt2[0]), ('1-KNN', 'KNN,", "@status: oneKNN functions correctly, optimized to use n^2/2 algorithm. '''", "return float(lCount) / lLen ''' Test function to plot results", "training/testing ''' lSplit = naData.shape[0] * .7 naTrain = naData[:lSplit,", ") gca().xaxis.set_ticklabels( lsNames ) plt.show() if __name__ == '__main__': _test1()", "samples to show the value of 1-KNN compared to 5KNN", "(naTest1, naTest2) ) naBoth = np.vstack( (naTest1, naTest2) ) '''", "a double between 0 and 1. @param naData: A 2D", "@author: <NAME> @organization: Georgia Institute of Technology @contact: <EMAIL> @summary:", "np.hstack( (naTest, distY) ) lf1Vals.append( oneKnn( naTest ) ) lfVals.append(", "in range( 1, len(perm) ): sPerm = sPerm + str(perm[j])", "''' (c) 2011, 2012 Georgia Tech Research Corporation This source", "1 ) for i, fVal in enumerate( distY ): if", "0: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] ) naTest1 = np.hstack( (naTest1,", "naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) ) naTest2 = np.random.normal(", ") gca().xaxis.set_ticks( np.arange(len(lf1Vals)) + .2 ) gca().xaxis.set_ticklabels( lsNames ) plt.show()", "0 ), xytext=( 1.5 , 0 ), arrowprops=dict(facecolor='black', shrink=0.05) )", "naData.shape[0] ''' # of dimensions, subtract one for classification '''", "Value' ) #plt.xlabel( 'Distribution Merge' ) plt.title( '1-KNN Performance' )", "check, for i's best, and j's best ''' if dDistance", "naTest[i,-1]: lCount = lCount + 1 dResult = float(lCount) /", "Now count # of matching pairs ''' for i in", "return dResult ''' Tests performance of 1-KNN ''' def _test1():", "+ np.sin( dist3 ) + np.sin( dist5 ) distY =", "distY = distY.reshape( -1, 1 ) for i, fVal in", "2 distributions ''' def _plotDist( naDist1, naDist2, i ): plt.clf()", "5 distributions for each of the 5 attributes ''' dist1", "i ): ''' set test distribution to first element '''", "t = clock() lfKnnResults.append( _knnResult( np.random.permutation(naBoth) ) ) cKnnRuntime =", ".1 print 'Runtime OneKnn:', cOneRuntime print 'Runtime 5-KNN:', cKnnRuntime _plotResults(", "lLen llIndexes = [-1] * lLen dDistance = 0.0; '''", "perm[0] ''' stack other distributions on ''' for j in", "Performance' ) plt.subplots_adjust() plt.show() ''' Function to plot 2 distributions", "i, fVal in enumerate( distY ): if fVal >= 0:", "the value of 1-KNN compared to 5KNN learner performance '''", "for perm in itertools.combinations( '12345', i ): ''' set test", "for i in range( 15 ): #_plotDist( naTest1, naBoth[100:,:], i", "' + str(i) ) plt.show() ''' Function to test KNN", "-1, 1 ) for i, fVal in enumerate( distY ):", "naBoth[100:,:], i ) t = clock() lfResults.append( oneKnn( naBoth )", "''' np.random.seed( 12345 ) ''' Create 5 distributions for each", "#plt.xlabel( 'Feature 1' ) #gca().annotate( '', xy=( .8, 0 ),", "-1, 1, 1000 ).reshape( -1, 1 ) dist2 = np.random.uniform(", "= distY.reshape( -1, 1 ) for i, fVal in enumerate(", "the final column containing the classification. ''' def oneKnn( naData", "+ 0.2, lfVals, .2, color='b' ) plt.legend( (plt1[0], plt2[0]), ('1-KNN',", "plt.ylim( (0,1) ) if len(lf1Vals) < 2: plt.xlim( (-1,1) )", "fVal in enumerate( distY ): if fVal >= 0: distY[i]", "<EMAIL> @summary: This is an implementation of the 1-KNN algorithm", "cKnnRuntime = t-t; lfResults = [] lfKnnResults = [] for", "); ''' Query with last column omitted and 5 nearest", "implementation of the 1-KNN algorithm for ranking features quickly. It", "''' stack other distributions on ''' for j in range(", "'Feature 2' ) #plt.xlabel( 'Feature 1' ) #gca().annotate( '', xy=(", "+ ' Features') plt.ylim( (0,1) ) if len(lf1Vals) < 2:", "1' ) plt.title( 'Iteration ' + str(i) ) plt.show() '''", "), xytext=( 1.5 , 0 ), arrowprops=dict(facecolor='black', shrink=0.05) ) plt.title(", "range( len(lf5Knn) ), lf5Knn ) plt.ylabel( '% Correct Classification' )", ") lDists = [ dist1, dist2, dist3, dist4, dist5 ]", "_knnResult( np.random.permutation(naBoth) ) ) cKnnRuntime = cKnnRuntime + (clock() -", "gca().xaxis.set_ticklabels( lsNames ) plt.show() if __name__ == '__main__': _test1() #_test2()", "== naData[ llIndexes[i] ][-1]: lCount = lCount + 1 return", "'Feature 1' ) #gca().annotate( '', xy=( .8, 0 ), xytext=(", "New BSD license. Please see http://wiki.quantsoftware.org/index.php?title=QSTK_License for license details. Created", "n^2/2 algorithm. ''' import matplotlib.pyplot as plt from pylab import", "'Runtime 5-KNN:', cKnnRuntime _plotResults( naTest1, naTest2, lfResults, lfKnnResults ) '''", ") ) elif i == 1: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2]", "< ldDistances[i]: ldDistances[i] = dDistance llIndexes[i] = j if dDistance", "''' t = clock() cOneRuntime = t-t; cKnnRuntime = t-t;", "(clock() - t) naBoth[500:,0] = naBoth[500:,0] - .1 print 'Runtime", "''' import matplotlib.pyplot as plt from pylab import gca import", "Georgia Tech Research Corporation This source code is released under", "- t) naBoth[500:,0] = naBoth[500:,0] - .1 print 'Runtime OneKnn:',", "- naData[j][k])**2 dDistance = math.sqrt( dDistance ) ''' Two distances", "xy=( .7, 0 ), xytext=( 1.5 , 0 ), arrowprops=dict(facecolor='black',", "license details. Created on Feb 20, 2011 @author: <NAME> @organization:", "enumerate( distY ): if fVal >= 0: distY[i] = 1", "_knnResult( np.random.permutation(naTest) ) ) lsNames.append( sPerm ) ''' Plot results", "compared to 5KNN learner performance ''' np.random.seed( 12345 ) '''", "np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 ) dist2 =", "lSplit = naData.shape[0] * .7 naTrain = naData[:lSplit, :] naTest", "plt2 = plt.bar( np.arange(len(lfVals)) + 0.2, lfVals, .2, color='b' )", "[ dist1, dist2, dist3, dist4, dist5 ] ''' All features", "shrink=0.05) ) gca().annotate( '', xy=( .7, 0 ), xytext=( 1.5", "plt.show() ''' Function to test KNN performance ''' def _knnResult(", "naData[j][k])**2 dDistance = math.sqrt( dDistance ) ''' Two distances to", "enumerate(naResults): if dVal == naTest[i,-1]: lCount = lCount + 1", "np.random.normal( loc=[1.5,0],scale=.1,size=[500,2] ) naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) )", "''' Select one of three distributions ''' if i ==", "cKnnRuntime _plotResults( naTest1, naTest2, lfResults, lfKnnResults ) ''' Tests performance", ") dist3 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1", "lfVals = [] for perm in itertools.combinations( '12345', i ):", "): if fVal >= 0: distY[i] = 1 else: distY[i]", "= [] for perm in itertools.combinations( '12345', i ): '''", "t = clock() lfResults.append( oneKnn( naBoth ) ) cOneRuntime =", "plt.scatter( naDist1[:,0], naDist1[:,1] ) plt.scatter( naDist2[:,0], naDist2[:,1], color='r' ) plt.ylabel(", "to show the value of 1-KNN compared to 5KNN learner", "def _knnResult( naData ): ''' Split up data into training/testing", "of 1-KNN compared to 5KNN learner performance ''' for i", "i's best, and j's best ''' if dDistance < ldDistances[i]:", "0 ''' Now count # of matching pairs ''' for", ") #plt.xlabel( 'Feature 1' ) #gca().annotate( '', xy=( .8, 0", "closest neighbors ''' for i in range( lLen ): for", "is a double between 0 and 1. @param naData: A", ") naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) ) elif i", "+ str(perm[j]) naTest = np.hstack( (naTest, lDists[ int(perm[j]) - 1", "# of matching pairs ''' for i in range( lLen", "lfResults.append( oneKnn( naBoth ) ) cOneRuntime = cOneRuntime + (clock()", "for k in range( 0, lDim ): dDistance += (naData[i][k]", "double between 0 and 1. @param naData: A 2D numpy", "Research Corporation This source code is released under the New", "large ''' ldDistances = [1E300] * lLen llIndexes = [-1]", "to 5KNN learner performance ''' for i in range(3): '''", "ldDistances[i] = dDistance llIndexes[i] = j if dDistance < ldDistances[j]:", "''' if dDistance < ldDistances[i]: ldDistances[i] = dDistance llIndexes[i] =", "''' Plot results ''' plt1 = plt.bar( np.arange(len(lf1Vals)), lf1Vals, .2,", "Distribution' ) plt.subplot(312) plt.plot( range( len(lfOneKnn) ), lfOneKnn ) plt.ylabel(", "2011 @author: <NAME> @organization: Georgia Institute of Technology @contact: <EMAIL>", "for i, dVal in enumerate(naResults): if dVal == naTest[i,-1]: lCount", "lfVals, .2, color='b' ) plt.legend( (plt1[0], plt2[0]), ('1-KNN', 'KNN, K=5')", "which are correct ''' lCount = 0 for i, dVal", "dist4 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )", "np.ones(250).reshape(-1,1) ) ) naOrig = np.vstack( (naTest1, naTest2) ) naBoth", "lCount = lCount + 1 return float(lCount) / lLen '''", "20, 2011 @author: <NAME> @organization: Georgia Institute of Technology @contact:", "of the 5 attributes ''' dist1 = np.random.uniform( -1, 1,", "of dimensions, subtract one for classification ''' lDim = naData.shape[1]", "), arrowprops=dict(facecolor='black', shrink=0.05) ) plt.title( 'Data Distribution' ) plt.subplot(312) plt.plot(", "loc=[1.5,0],scale=.25,size=[250,2] ) naTest2 = np.hstack( (naTest2, np.ones(250).reshape(-1,1) ) ) naOrig", "of 1-KNN ''' def _test2(): ''' Generate three random samples", "dist2 ) + np.sin( dist3 ) + np.sin( dist5 )", "[1E300] * lLen llIndexes = [-1] * lLen dDistance =", "loc=[1.5,0],scale=.1,size=[500,2] ) naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) ) else:", "np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 ) dist5 =", "Please see http://wiki.quantsoftware.org/index.php?title=QSTK_License for license details. Created on Feb 20,", "the knn implementation. @status: oneKNN functions correctly, optimized to use", "1 ''' Start best distances as very large ''' ldDistances", "= 0.0; ''' Loop through finding closest neighbors ''' for", "= lCount + 1 return float(lCount) / lLen ''' Test", "np.ones(500).reshape(-1,1) ) ) else: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] ) naTest1", "plt.plot( range( len(lfOneKnn) ), lfOneKnn ) plt.ylabel( '1-KNN Value' )", "''' Generate three random samples to show the value of", ") t = clock() lfResults.append( oneKnn( naBoth ) ) cOneRuntime", "shrink=0.05) ) plt.title( 'Data Distribution' ) plt.subplot(312) plt.plot( range( len(lfOneKnn)", "np.sin( dist1 ) + np.sin( dist2 ) + np.sin( dist3", "' Features') plt.ylim( (0,1) ) if len(lf1Vals) < 2: plt.xlim(", "if dDistance < ldDistances[j]: ldDistances[j] = dDistance llIndexes[j] = i", "lDim = naData.shape[1] - 1 ''' Start best distances as", ") #plt.ylabel( 'Feature 2' ) #plt.xlabel( 'Feature 1' ) #gca().annotate(", "'% Correct Classification' ) #plt.xlabel( 'Distribution Merge' ) plt.title( '5-KNN", ") lfVals.append( _knnResult( np.random.permutation(naTest) ) ) lsNames.append( sPerm ) '''", "5KNN learner performance ''' np.random.seed( 12345 ) ''' Create 5", "1, 1000 ).reshape( -1, 1 ) dist3 = np.random.uniform( -1,", "(0,1) ) if len(lf1Vals) < 2: plt.xlim( (-1,1) ) gca().xaxis.set_ticks(", ":] naTest = naData[lSplit:, :] knn.addEvidence( naTrain.astype(float), 1 ); '''", "(naTest1, np.zeros(500).reshape(-1,1) ) ) naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[250,2] ) naTest2", "= naBoth[500:,0] - .1 print 'Runtime OneKnn:', cOneRuntime print 'Runtime", "= [] lfVals = [] for perm in itertools.combinations( '12345',", "t-t; lfResults = [] lfKnnResults = [] for i in", "dDistance llIndexes[j] = i lCount = 0 ''' Now count", "plt.subplot(313) plt.plot( range( len(lf5Knn) ), lf5Knn ) plt.ylabel( '% Correct", "range( lLen ): if naData[i][-1] == naData[ llIndexes[i] ][-1]: lCount", "<NAME> @organization: Georgia Institute of Technology @contact: <EMAIL> @summary: This", "import matplotlib.pyplot as plt from pylab import gca import itertools", "), lf5Knn ) plt.ylabel( '% Correct Classification' ) #plt.xlabel( 'Distribution", "): ''' set test distribution to first element ''' naTest", "naData: A 2D numpy array. Each row is a data", "t-t; cKnnRuntime = t-t; lfResults = [] lfKnnResults = []", "12345 ) ''' Create 5 distributions for each of the", "naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[500,2] ) naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1)", "array. Each row is a data point with the final", "three distributions ''' if i == 0: naTest1 = np.random.normal(", "= naData.shape[0] * .7 naTrain = naData[:lSplit, :] naTest =", ") elif i == 1: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] )", "''' Function to plot 2 distributions ''' def _plotDist( naDist1,", "ldDistances[i]: ldDistances[i] = dDistance llIndexes[i] = j if dDistance <", "Start best distances as very large ''' ldDistances = [1E300]", "np.random.permutation(naBoth) ) ) cKnnRuntime = cKnnRuntime + (clock() - t)", "naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[250,2] ) naTest2 = np.hstack( (naTest2, np.ones(250).reshape(-1,1)", ") #gca().annotate( '', xy=( .8, 0 ), xytext=( -.3 ,", "0 ), arrowprops=dict(facecolor='red', shrink=0.05) ) gca().annotate( '', xy=( .7, 0", "with last column omitted and 5 nearest neighbors ''' naResults", "' + str(i) + ' Features') plt.ylim( (0,1) ) if", "1, 6 ): lsNames = [] lf1Vals = [] lfVals", "for ranking features quickly. It uses the knn implementation. @status:", "(clock() - t) t = clock() lfKnnResults.append( _knnResult( np.random.permutation(naBoth) )", "naDist1, naDist2, lfOneKnn, lf5Knn ): plt.clf() plt.subplot(311) plt.scatter( naDist1[:,0], naDist1[:,1]", "plt.show() ''' Function to plot 2 distributions ''' def _plotDist(", "def oneKnn( naData ): if naData.ndim != 2: raise Exception(", "distances to check, for i's best, and j's best '''", "naData[lSplit:, :] knn.addEvidence( naTrain.astype(float), 1 ); ''' Query with last", "are correct ''' lCount = 0 for i, dVal in", "print 'Runtime 5-KNN:', cKnnRuntime _plotResults( naTest1, naTest2, lfResults, lfKnnResults )", ").reshape( -1, 1 ) dist4 = np.random.uniform( -1, 1, 1000", ") dist4 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1", "plt.ylabel('1-KNN Value/KNN Classification') plt.xlabel('Feature Set') plt.title('Combinations of ' + str(i)", "'', xy=( .8, 0 ), xytext=( -.3 , 0 ),", "algorithm. ''' import matplotlib.pyplot as plt from pylab import gca", "''' def _plotDist( naDist1, naDist2, i ): plt.clf() plt.scatter( naDist1[:,0],", "Loop through finding closest neighbors ''' for i in range(", "len(lfOneKnn) ), lfOneKnn ) plt.ylabel( '1-KNN Value' ) #plt.xlabel( 'Distribution", "= 0 ''' Now count # of matching pairs '''", "Georgia Institute of Technology @contact: <EMAIL> @summary: This is an", "0 for i in range( 1, 6 ): lsNames =", "print 'Runtime OneKnn:', cOneRuntime print 'Runtime 5-KNN:', cKnnRuntime _plotResults( naTest1,", "function to plot results ''' def _plotResults( naDist1, naDist2, lfOneKnn,", "in range( lLen ): for j in range( i+1, lLen", "(c) 2011, 2012 Georgia Tech Research Corporation This source code", "''' naTest = np.hstack( (naTest, distY) ) lf1Vals.append( oneKnn( naTest", "'', xy=( .7, 0 ), xytext=( 1.5 , 0 ),", "1000 ).reshape( -1, 1 ) dist3 = np.random.uniform( -1, 1,", "Correct Classification' ) #plt.xlabel( 'Distribution Merge' ) plt.title( '5-KNN Performance'", "''' dist1 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1", "): lsNames = [] lf1Vals = [] lfVals = []", "t = clock() cOneRuntime = t-t; cKnnRuntime = t-t; lfResults", "import string import numpy as np import math import knn", "''' if i == 0: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] )", "row is a data point with the final column containing", "under the New BSD license. Please see http://wiki.quantsoftware.org/index.php?title=QSTK_License for license", "naTrain = naData[:lSplit, :] naTest = naData[lSplit:, :] knn.addEvidence( naTrain.astype(float),", "oneKnn( naData ): if naData.ndim != 2: raise Exception( \"Data", "''' Two distances to check, for i's best, and j's", "Select one of three distributions ''' if i == 0:", "= np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 ) dist3", "1: naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] ) naTest1 = np.hstack( (naTest1,", "neighbors ''' naResults = knn.query( naTest[:,:-1], 5, 'mode') ''' Count", "All features used except for distribution 4 ''' distY =", "column containing the classification. ''' def oneKnn( naData ): if", "= dDistance llIndexes[i] = j if dDistance < ldDistances[j]: ldDistances[j]", "1000 ).reshape( -1, 1 ) dist5 = np.random.uniform( -1, 1,", "''' def _plotResults( naDist1, naDist2, lfOneKnn, lf5Knn ): plt.clf() plt.subplot(311)", "in range( i+1, lLen ): dDistance = 0.0 for k", "def _plotResults( naDist1, naDist2, lfOneKnn, lf5Knn ): plt.clf() plt.subplot(311) plt.scatter(", "-.3 , 0 ), arrowprops=dict(facecolor='red', shrink=0.05) ) gca().annotate( '', xy=(", ") plt.scatter( naDist2[:,0], naDist2[:,1], color='r' ) #plt.ylabel( 'Feature 2' )", ") gca().annotate( '', xy=( .7, 0 ), xytext=( 1.5 ,", "naOrig = np.vstack( (naTest1, naTest2) ) naBoth = np.vstack( (naTest1,", "= naData.shape[1] - 1 ''' Start best distances as very", "to plot 2 distributions ''' def _plotDist( naDist1, naDist2, i", "'Feature 1' ) plt.title( 'Iteration ' + str(i) ) plt.show()", "= np.sin( dist1 ) + np.sin( dist2 ) + np.sin(", "1 ) dist3 = np.random.uniform( -1, 1, 1000 ).reshape( -1,", "''' set test distribution to first element ''' naTest =", "plt1 = plt.bar( np.arange(len(lf1Vals)), lf1Vals, .2, color='r' ) plt2 =", "= sPerm + str(perm[j]) naTest = np.hstack( (naTest, lDists[ int(perm[j])", "), xytext=( -.3 , 0 ), arrowprops=dict(facecolor='red', shrink=0.05) ) gca().annotate(", "naBoth[500:,0] = naBoth[500:,0] - .1 print 'Runtime OneKnn:', cOneRuntime print", "dist3 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )", "np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) ) elif i == 1: naTest1", "= [1E300] * lLen llIndexes = [-1] * lLen dDistance", "knn.addEvidence( naTrain.astype(float), 1 ); ''' Query with last column omitted", "1-KNN ''' def _test1(): ''' Generate three random samples to", "naBoth = np.vstack( (naTest1, naTest2) ) ''' Keep track of", "import math import knn from time import clock ''' @summary:", "][-1]: lCount = lCount + 1 return float(lCount) / lLen", "[] for i in range( 15 ): #_plotDist( naTest1, naBoth[100:,:],", "and j's best ''' if dDistance < ldDistances[i]: ldDistances[i] =", "nearest neighbors ''' naResults = knn.query( naTest[:,:-1], 5, 'mode') '''", ") plt.subplot(312) plt.plot( range( len(lfOneKnn) ), lfOneKnn ) plt.ylabel( '1-KNN", "''' Start best distances as very large ''' ldDistances =", "lDists[ int(perm[j]) - 1 ] ) ) ''' finally stack", "quickly. It uses the knn implementation. @status: oneKNN functions correctly,", "import clock ''' @summary: Query function for 1KNN, return value", "plt.title( 'Iteration ' + str(i) ) plt.show() ''' Function to", "-1, 1 ) dist5 = np.random.uniform( -1, 1, 1000 ).reshape(", "for i in range( lLen ): for j in range(", "through finding closest neighbors ''' for i in range( lLen", "knn.query( naTest[:,:-1], 5, 'mode') ''' Count returns which are correct", "Set') plt.title('Combinations of ' + str(i) + ' Features') plt.ylim(", "= np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) ) naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[250,2]", "clock() lfResults.append( oneKnn( naBoth ) ) cOneRuntime = cOneRuntime +", "xytext=( -.3 , 0 ), arrowprops=dict(facecolor='red', shrink=0.05) ) gca().annotate( '',", "sPerm = perm[0] ''' stack other distributions on ''' for", "Keep track of runtimes ''' t = clock() cOneRuntime =", "in range( 15 ): #_plotDist( naTest1, naBoth[100:,:], i ) t", "0 ), xytext=( -.3 , 0 ), arrowprops=dict(facecolor='red', shrink=0.05) )", "the New BSD license. Please see http://wiki.quantsoftware.org/index.php?title=QSTK_License for license details.", "to check, for i's best, and j's best ''' if", "''' Now count # of matching pairs ''' for i", "omitted and 5 nearest neighbors ''' naResults = knn.query( naTest[:,:-1],", "results ''' def _plotResults( naDist1, naDist2, lfOneKnn, lf5Knn ): plt.clf()", "classification. ''' def oneKnn( naData ): if naData.ndim != 2:", "= np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 ) dist5", "-1, 1, 1000 ).reshape( -1, 1 ) dist5 = np.random.uniform(", "#plt.ylabel( 'Feature 2' ) #plt.xlabel( 'Feature 1' ) #gca().annotate( '',", "have two dimensions\" ) lLen = naData.shape[0] ''' # of" ]
[ "coordinates and species, etc ao_log : description of functions (either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "Computes the matrix elements given by funct, for instance coulomb", "s2, f2, norbs) else: for i1 in range(s1,f1): for i2", "= \", count) #print(\"sum kernel: {0:.6f}\".format(np.sum(abs(res)))) #np.savetxt(\"kernel_pyscf.txt\", res) #import sys", "ao_log : description of functions (either orbitals or product basis", "use_numba: fill_triu_v2(oo2f, res, s1, f1, s2, f2, norbs) else: for", "Developers. All Rights Reserved. # # Licensed under the Apache", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "All Rights Reserved. # # Licensed under the Apache License,", "2.0 (the \"License\"); # you may not use this file", "file except in compliance with the License. # You may", "agreed to in writing, software # distributed under the License", "Unless required by applicable law or agreed to in writing,", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "nb from pyscf.nao.m_numba_utils import fill_triu_v2, fill_tril use_numba = True except:", "import fill_triu_v2, fill_tril use_numba = True except: use_numba = False", "arrays of coordinates and species, etc ao_log : description of", "Copyright 2014-2018 The PySCF Developers. All Rights Reserved. # #", "from pyscf.nao.m_coulomb_am import coulomb_am import numpy as np try: import", "distributed under the License is distributed on an \"AS IS\"", "in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): if atom2>atom1: continue # skip oo2f = funct(me,sp1,rv1,sp2,rv2,", "Returns: matrix elements for the whole system in packed form", "me = ao_matelem_c(sv.ao_log) if ao_log is None else aome.init_one_set(ao_log) atom2s", "fill_triu_v2(oo2f, res, s1, f1, s2, f2, norbs) else: for i1", "skip oo2f = funct(me,sp1,rv1,sp2,rv2, **kvargs) if use_numba: fill_triu_v2(oo2f, res, s1,", "in range(s2, min(i1+1, f2)): res[ij2pack_l(i1,i2,norbs)] = oo2f[i1-s1,i2-s2] #print(\"number call =", "the matrix elements given by funct, for instance coulomb interaction", "pyscf.nao.m_pack2den import ij2pack_l aome = ao_matelem_c(sv.ao_log.rr, sv.ao_log.pp) me = ao_matelem_c(sv.ao_log)", "atom,sp in enumerate(sv.atom2sp): atom2s[atom+1]=atom2s[atom]+me.ao1.sp2norbs[sp] norbs = atom2s[-1] res = np.zeros(norbs*(norbs+1)//2,", "False # # # def comp_coulomb_pack(sv, ao_log=None, funct=coulomb_am, dtype=np.float64, **kvargs):", "the specific language governing permissions and # limitations under the", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "if use_numba: fill_triu_v2(oo2f, res, s1, f1, s2, f2, norbs) else:", "have arrays of coordinates and species, etc ao_log : description", "print_function, division from pyscf.nao.m_coulomb_am import coulomb_am import numpy as np", "atom2,[sp2,rv2,s2,f2] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): if atom2>atom1: continue # skip oo2f =", "from pyscf.nao.m_pack2den import ij2pack_l aome = ao_matelem_c(sv.ao_log.rr, sv.ao_log.pp) me =", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "triangular part) \"\"\" from pyscf.nao.m_ao_matelem import ao_matelem_c from pyscf.nao.m_pack2den import", "#print(\"sum kernel: {0:.6f}\".format(np.sum(abs(res)))) #np.savetxt(\"kernel_pyscf.txt\", res) #import sys #sys.exit() return res,", "except in compliance with the License. # You may obtain", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "functions (either orbitals or product basis functions) Returns: matrix elements", "np.zeros((sv.natm+1), dtype=np.int64) for atom,sp in enumerate(sv.atom2sp): atom2s[atom+1]=atom2s[atom]+me.ao1.sp2norbs[sp] norbs = atom2s[-1]", "elements for the whole system in packed form (lower triangular", "for i1 in range(s1,f1): for i2 in range(s2, min(i1+1, f2)):", "# # def comp_coulomb_pack(sv, ao_log=None, funct=coulomb_am, dtype=np.float64, **kvargs): \"\"\" Computes", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "dtype=np.float64, **kvargs): \"\"\" Computes the matrix elements given by funct,", "np.zeros(norbs*(norbs+1)//2, dtype=dtype) for atom1,[sp1,rv1,s1,f1] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): #print(\"atom1 = {0}, rv1", "oo2f[i1-s1,i2-s2] #print(\"number call = \", count) #print(\"sum kernel: {0:.6f}\".format(np.sum(abs(res)))) #np.savetxt(\"kernel_pyscf.txt\",", "not use this file except in compliance with the License.", "i2 in range(s2, min(i1+1, f2)): res[ij2pack_l(i1,i2,norbs)] = oo2f[i1-s1,i2-s2] #print(\"number call", "(either orbitals or product basis functions) Returns: matrix elements for", "= ao_matelem_c(sv.ao_log) if ao_log is None else aome.init_one_set(ao_log) atom2s =", "writing, software # distributed under the License is distributed on", "in writing, software # distributed under the License is distributed", "pyscf.nao.m_coulomb_am import coulomb_am import numpy as np try: import numba", "description of functions (either orbitals or product basis functions) Returns:", "norbs = atom2s[-1] res = np.zeros(norbs*(norbs+1)//2, dtype=dtype) for atom1,[sp1,rv1,s1,f1] in", "f1, s2, f2, norbs) else: for i1 in range(s1,f1): for", "you may not use this file except in compliance with", "s1, f1, s2, f2, norbs) else: for i1 in range(s1,f1):", "#print(\"number call = \", count) #print(\"sum kernel: {0:.6f}\".format(np.sum(abs(res)))) #np.savetxt(\"kernel_pyscf.txt\", res)", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "aome.init_one_set(ao_log) atom2s = np.zeros((sv.natm+1), dtype=np.int64) for atom,sp in enumerate(sv.atom2sp): atom2s[atom+1]=atom2s[atom]+me.ao1.sp2norbs[sp]", "for atom,sp in enumerate(sv.atom2sp): atom2s[atom+1]=atom2s[atom]+me.ao1.sp2norbs[sp] norbs = atom2s[-1] res =", "language governing permissions and # limitations under the License. from", "import numba as nb from pyscf.nao.m_numba_utils import fill_triu_v2, fill_tril use_numba", "sv.ao_log.pp) me = ao_matelem_c(sv.ao_log) if ao_log is None else aome.init_one_set(ao_log)", "# limitations under the License. from __future__ import print_function, division", "use this file except in compliance with the License. #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "True except: use_numba = False # # # def comp_coulomb_pack(sv,", "in enumerate(sv.atom2sp): atom2s[atom+1]=atom2s[atom]+me.ao1.sp2norbs[sp] norbs = atom2s[-1] res = np.zeros(norbs*(norbs+1)//2, dtype=dtype)", "import print_function, division from pyscf.nao.m_coulomb_am import coulomb_am import numpy as", "orbitals or product basis functions) Returns: matrix elements for the", "the whole system in packed form (lower triangular part) \"\"\"", "pyscf.nao.m_ao_matelem import ao_matelem_c from pyscf.nao.m_pack2den import ij2pack_l aome = ao_matelem_c(sv.ao_log.rr,", "enumerate(sv.atom2sp): atom2s[atom+1]=atom2s[atom]+me.ao1.sp2norbs[sp] norbs = atom2s[-1] res = np.zeros(norbs*(norbs+1)//2, dtype=dtype) for", "for atom2,[sp2,rv2,s2,f2] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): if atom2>atom1: continue # skip oo2f", "of functions (either orbitals or product basis functions) Returns: matrix", "instance coulomb interaction Args: sv : (System Variables), this must", "CONDITIONS OF ANY KIND, either express or implied. # See", "pyscf.nao.m_numba_utils import fill_triu_v2, fill_tril use_numba = True except: use_numba =", "**kvargs): \"\"\" Computes the matrix elements given by funct, for", "matrix elements for the whole system in packed form (lower", "res = np.zeros(norbs*(norbs+1)//2, dtype=dtype) for atom1,[sp1,rv1,s1,f1] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): #print(\"atom1 =", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "dtype=dtype) for atom1,[sp1,rv1,s1,f1] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): #print(\"atom1 = {0}, rv1 =", "import coulomb_am import numpy as np try: import numba as", "oo2f = funct(me,sp1,rv1,sp2,rv2, **kvargs) if use_numba: fill_triu_v2(oo2f, res, s1, f1,", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "Rights Reserved. # # Licensed under the Apache License, Version", "governing permissions and # limitations under the License. from __future__", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "rv1 = {1}\".format(atom1, rv1)) for atom2,[sp2,rv2,s2,f2] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): if atom2>atom1:", "f2)): res[ij2pack_l(i1,i2,norbs)] = oo2f[i1-s1,i2-s2] #print(\"number call = \", count) #print(\"sum", "# You may obtain a copy of the License at", "# def comp_coulomb_pack(sv, ao_log=None, funct=coulomb_am, dtype=np.float64, **kvargs): \"\"\" Computes the", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "import ao_matelem_c from pyscf.nao.m_pack2den import ij2pack_l aome = ao_matelem_c(sv.ao_log.rr, sv.ao_log.pp)", "use_numba = True except: use_numba = False # # #", "# Copyright 2014-2018 The PySCF Developers. All Rights Reserved. #", "<filename>pyscf/nao/m_comp_coulomb_pack.py<gh_stars>1-10 # Copyright 2014-2018 The PySCF Developers. All Rights Reserved.", "by funct, for instance coulomb interaction Args: sv : (System", "for i2 in range(s2, min(i1+1, f2)): res[ij2pack_l(i1,i2,norbs)] = oo2f[i1-s1,i2-s2] #print(\"number", "under the License is distributed on an \"AS IS\" BASIS,", "ao_matelem_c(sv.ao_log.rr, sv.ao_log.pp) me = ao_matelem_c(sv.ao_log) if ao_log is None else", "atom1,[sp1,rv1,s1,f1] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): #print(\"atom1 = {0}, rv1 = {1}\".format(atom1, rv1))", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "np try: import numba as nb from pyscf.nao.m_numba_utils import fill_triu_v2,", "min(i1+1, f2)): res[ij2pack_l(i1,i2,norbs)] = oo2f[i1-s1,i2-s2] #print(\"number call = \", count)", "License for the specific language governing permissions and # limitations", "from pyscf.nao.m_ao_matelem import ao_matelem_c from pyscf.nao.m_pack2den import ij2pack_l aome =", "\", count) #print(\"sum kernel: {0:.6f}\".format(np.sum(abs(res)))) #np.savetxt(\"kernel_pyscf.txt\", res) #import sys #sys.exit()", "# # # def comp_coulomb_pack(sv, ao_log=None, funct=coulomb_am, dtype=np.float64, **kvargs): \"\"\"", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", ": description of functions (either orbitals or product basis functions)", "funct=coulomb_am, dtype=np.float64, **kvargs): \"\"\" Computes the matrix elements given by", "in range(s1,f1): for i2 in range(s2, min(i1+1, f2)): res[ij2pack_l(i1,i2,norbs)] =", "Reserved. # # Licensed under the Apache License, Version 2.0", "PySCF Developers. All Rights Reserved. # # Licensed under the", "call = \", count) #print(\"sum kernel: {0:.6f}\".format(np.sum(abs(res)))) #np.savetxt(\"kernel_pyscf.txt\", res) #import", "2014-2018 The PySCF Developers. All Rights Reserved. # # Licensed", "range(s1,f1): for i2 in range(s2, min(i1+1, f2)): res[ij2pack_l(i1,i2,norbs)] = oo2f[i1-s1,i2-s2]", "\"\"\" Computes the matrix elements given by funct, for instance", "use_numba = False # # # def comp_coulomb_pack(sv, ao_log=None, funct=coulomb_am,", "the License for the specific language governing permissions and #", "limitations under the License. from __future__ import print_function, division from", "= True except: use_numba = False # # # def", "etc ao_log : description of functions (either orbitals or product", "(the \"License\"); # you may not use this file except", "Apache License, Version 2.0 (the \"License\"); # you may not", "from __future__ import print_function, division from pyscf.nao.m_coulomb_am import coulomb_am import", "try: import numba as nb from pyscf.nao.m_numba_utils import fill_triu_v2, fill_tril", "# you may not use this file except in compliance", "except: use_numba = False # # # def comp_coulomb_pack(sv, ao_log=None,", "ao_log=None, funct=coulomb_am, dtype=np.float64, **kvargs): \"\"\" Computes the matrix elements given", "either express or implied. # See the License for the", "(System Variables), this must have arrays of coordinates and species,", "(lower triangular part) \"\"\" from pyscf.nao.m_ao_matelem import ao_matelem_c from pyscf.nao.m_pack2den", "part) \"\"\" from pyscf.nao.m_ao_matelem import ao_matelem_c from pyscf.nao.m_pack2den import ij2pack_l", "= np.zeros((sv.natm+1), dtype=np.int64) for atom,sp in enumerate(sv.atom2sp): atom2s[atom+1]=atom2s[atom]+me.ao1.sp2norbs[sp] norbs =", "continue # skip oo2f = funct(me,sp1,rv1,sp2,rv2, **kvargs) if use_numba: fill_triu_v2(oo2f,", "OR CONDITIONS OF ANY KIND, either express or implied. #", "enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): if atom2>atom1: continue # skip oo2f = funct(me,sp1,rv1,sp2,rv2, **kvargs)", "whole system in packed form (lower triangular part) \"\"\" from", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "and # limitations under the License. from __future__ import print_function,", "ao_matelem_c from pyscf.nao.m_pack2den import ij2pack_l aome = ao_matelem_c(sv.ao_log.rr, sv.ao_log.pp) me", "the License is distributed on an \"AS IS\" BASIS, #", "from pyscf.nao.m_numba_utils import fill_triu_v2, fill_tril use_numba = True except: use_numba", "ao_log is None else aome.init_one_set(ao_log) atom2s = np.zeros((sv.natm+1), dtype=np.int64) for", "atom2>atom1: continue # skip oo2f = funct(me,sp1,rv1,sp2,rv2, **kvargs) if use_numba:", "division from pyscf.nao.m_coulomb_am import coulomb_am import numpy as np try:", "**kvargs) if use_numba: fill_triu_v2(oo2f, res, s1, f1, s2, f2, norbs)", "in compliance with the License. # You may obtain a", "permissions and # limitations under the License. from __future__ import", "norbs) else: for i1 in range(s1,f1): for i2 in range(s2,", "else: for i1 in range(s1,f1): for i2 in range(s2, min(i1+1,", "software # distributed under the License is distributed on an", "basis functions) Returns: matrix elements for the whole system in", "dtype=np.int64) for atom,sp in enumerate(sv.atom2sp): atom2s[atom+1]=atom2s[atom]+me.ao1.sp2norbs[sp] norbs = atom2s[-1] res", "as np try: import numba as nb from pyscf.nao.m_numba_utils import", "kernel: {0:.6f}\".format(np.sum(abs(res)))) #np.savetxt(\"kernel_pyscf.txt\", res) #import sys #sys.exit() return res, norbs", "Args: sv : (System Variables), this must have arrays of", "i1 in range(s1,f1): for i2 in range(s2, min(i1+1, f2)): res[ij2pack_l(i1,i2,norbs)]", "funct, for instance coulomb interaction Args: sv : (System Variables),", "for instance coulomb interaction Args: sv : (System Variables), this", "# # Unless required by applicable law or agreed to", "= atom2s[-1] res = np.zeros(norbs*(norbs+1)//2, dtype=dtype) for atom1,[sp1,rv1,s1,f1] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])):", "= False # # # def comp_coulomb_pack(sv, ao_log=None, funct=coulomb_am, dtype=np.float64,", "range(s2, min(i1+1, f2)): res[ij2pack_l(i1,i2,norbs)] = oo2f[i1-s1,i2-s2] #print(\"number call = \",", "is None else aome.init_one_set(ao_log) atom2s = np.zeros((sv.natm+1), dtype=np.int64) for atom,sp", "packed form (lower triangular part) \"\"\" from pyscf.nao.m_ao_matelem import ao_matelem_c", "system in packed form (lower triangular part) \"\"\" from pyscf.nao.m_ao_matelem", "{1}\".format(atom1, rv1)) for atom2,[sp2,rv2,s2,f2] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): if atom2>atom1: continue #", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "def comp_coulomb_pack(sv, ao_log=None, funct=coulomb_am, dtype=np.float64, **kvargs): \"\"\" Computes the matrix", "in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): #print(\"atom1 = {0}, rv1 = {1}\".format(atom1, rv1)) for", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "\"\"\" from pyscf.nao.m_ao_matelem import ao_matelem_c from pyscf.nao.m_pack2den import ij2pack_l aome", "Version 2.0 (the \"License\"); # you may not use this", "coulomb interaction Args: sv : (System Variables), this must have", "import ij2pack_l aome = ao_matelem_c(sv.ao_log.rr, sv.ao_log.pp) me = ao_matelem_c(sv.ao_log) if", "# skip oo2f = funct(me,sp1,rv1,sp2,rv2, **kvargs) if use_numba: fill_triu_v2(oo2f, res,", "atom2s[atom+1]=atom2s[atom]+me.ao1.sp2norbs[sp] norbs = atom2s[-1] res = np.zeros(norbs*(norbs+1)//2, dtype=dtype) for atom1,[sp1,rv1,s1,f1]", "#print(\"atom1 = {0}, rv1 = {1}\".format(atom1, rv1)) for atom2,[sp2,rv2,s2,f2] in", "law or agreed to in writing, software # distributed under", "fill_triu_v2, fill_tril use_numba = True except: use_numba = False #", "the License. from __future__ import print_function, division from pyscf.nao.m_coulomb_am import", "this must have arrays of coordinates and species, etc ao_log", "{0}, rv1 = {1}\".format(atom1, rv1)) for atom2,[sp2,rv2,s2,f2] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): if", "import numpy as np try: import numba as nb from", "comp_coulomb_pack(sv, ao_log=None, funct=coulomb_am, dtype=np.float64, **kvargs): \"\"\" Computes the matrix elements", "for atom1,[sp1,rv1,s1,f1] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): #print(\"atom1 = {0}, rv1 = {1}\".format(atom1,", "rv1)) for atom2,[sp2,rv2,s2,f2] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): if atom2>atom1: continue # skip", "if atom2>atom1: continue # skip oo2f = funct(me,sp1,rv1,sp2,rv2, **kvargs) if", "and species, etc ao_log : description of functions (either orbitals", "numba as nb from pyscf.nao.m_numba_utils import fill_triu_v2, fill_tril use_numba =", "implied. # See the License for the specific language governing", "= {0}, rv1 = {1}\".format(atom1, rv1)) for atom2,[sp2,rv2,s2,f2] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])):", "under the Apache License, Version 2.0 (the \"License\"); # you", "None else aome.init_one_set(ao_log) atom2s = np.zeros((sv.natm+1), dtype=np.int64) for atom,sp in", "else aome.init_one_set(ao_log) atom2s = np.zeros((sv.natm+1), dtype=np.int64) for atom,sp in enumerate(sv.atom2sp):", "\"License\"); # you may not use this file except in", "coulomb_am import numpy as np try: import numba as nb", "atom2s = np.zeros((sv.natm+1), dtype=np.int64) for atom,sp in enumerate(sv.atom2sp): atom2s[atom+1]=atom2s[atom]+me.ao1.sp2norbs[sp] norbs", "count) #print(\"sum kernel: {0:.6f}\".format(np.sum(abs(res)))) #np.savetxt(\"kernel_pyscf.txt\", res) #import sys #sys.exit() return", "= np.zeros(norbs*(norbs+1)//2, dtype=dtype) for atom1,[sp1,rv1,s1,f1] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): #print(\"atom1 = {0},", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "f2, norbs) else: for i1 in range(s1,f1): for i2 in", "ij2pack_l aome = ao_matelem_c(sv.ao_log.rr, sv.ao_log.pp) me = ao_matelem_c(sv.ao_log) if ao_log", "in packed form (lower triangular part) \"\"\" from pyscf.nao.m_ao_matelem import", "elements given by funct, for instance coulomb interaction Args: sv", "functions) Returns: matrix elements for the whole system in packed", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): #print(\"atom1 = {0}, rv1 = {1}\".format(atom1, rv1)) for atom2,[sp2,rv2,s2,f2]", "= {1}\".format(atom1, rv1)) for atom2,[sp2,rv2,s2,f2] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): if atom2>atom1: continue", "The PySCF Developers. All Rights Reserved. # # Licensed under", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "= funct(me,sp1,rv1,sp2,rv2, **kvargs) if use_numba: fill_triu_v2(oo2f, res, s1, f1, s2,", "res[ij2pack_l(i1,i2,norbs)] = oo2f[i1-s1,i2-s2] #print(\"number call = \", count) #print(\"sum kernel:", "atom2s[-1] res = np.zeros(norbs*(norbs+1)//2, dtype=dtype) for atom1,[sp1,rv1,s1,f1] in enumerate(zip(sv.atom2sp,sv.atom2coord,atom2s,atom2s[1:])): #print(\"atom1", "Variables), this must have arrays of coordinates and species, etc", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", ": (System Variables), this must have arrays of coordinates and", "__future__ import print_function, division from pyscf.nao.m_coulomb_am import coulomb_am import numpy", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "for the whole system in packed form (lower triangular part)", "to in writing, software # distributed under the License is", "fill_tril use_numba = True except: use_numba = False # #", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "matrix elements given by funct, for instance coulomb interaction Args:", "given by funct, for instance coulomb interaction Args: sv :", "aome = ao_matelem_c(sv.ao_log.rr, sv.ao_log.pp) me = ao_matelem_c(sv.ao_log) if ao_log is", "form (lower triangular part) \"\"\" from pyscf.nao.m_ao_matelem import ao_matelem_c from", "or product basis functions) Returns: matrix elements for the whole", "interaction Args: sv : (System Variables), this must have arrays", "= oo2f[i1-s1,i2-s2] #print(\"number call = \", count) #print(\"sum kernel: {0:.6f}\".format(np.sum(abs(res))))", "You may obtain a copy of the License at #", "License. from __future__ import print_function, division from pyscf.nao.m_coulomb_am import coulomb_am", "numpy as np try: import numba as nb from pyscf.nao.m_numba_utils", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "required by applicable law or agreed to in writing, software", "must have arrays of coordinates and species, etc ao_log :", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "with the License. # You may obtain a copy of", "product basis functions) Returns: matrix elements for the whole system", "this file except in compliance with the License. # You", "ao_matelem_c(sv.ao_log) if ao_log is None else aome.init_one_set(ao_log) atom2s = np.zeros((sv.natm+1),", "if ao_log is None else aome.init_one_set(ao_log) atom2s = np.zeros((sv.natm+1), dtype=np.int64)", "the Apache License, Version 2.0 (the \"License\"); # you may", "as nb from pyscf.nao.m_numba_utils import fill_triu_v2, fill_tril use_numba = True", "species, etc ao_log : description of functions (either orbitals or", "sv : (System Variables), this must have arrays of coordinates", "under the License. from __future__ import print_function, division from pyscf.nao.m_coulomb_am", "of coordinates and species, etc ao_log : description of functions", "funct(me,sp1,rv1,sp2,rv2, **kvargs) if use_numba: fill_triu_v2(oo2f, res, s1, f1, s2, f2,", "= ao_matelem_c(sv.ao_log.rr, sv.ao_log.pp) me = ao_matelem_c(sv.ao_log) if ao_log is None", "res, s1, f1, s2, f2, norbs) else: for i1 in" ]
[ "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "may obtain # a copy of the License at #", "True but there is some misconfiguration with the [service_user] section", "which makes KSA return None for the service user auth.", "the License. from keystoneauth1 import loading as ks_loading from keystoneauth1", "agreed to in writing, software # distributed under the License", "nova.conf from nova import context from nova import service_auth from", "Unless required by applicable law or agreed to in writing,", "def setUp(self): super(ServiceAuthTestCase, self).setUp() self.ctx = context.RequestContext('fake', 'fake') self.addCleanup(service_auth.reset_globals) @mock.patch.object(ks_loading,", "the case that send_service_user_token is True but there is some", "distributed under the License is distributed on an \"AS IS\"", "mock_load): \"\"\"Tests the case that send_service_user_token is True but there", "nova.conf.CONF class ServiceAuthTestCase(test.NoDBTestCase): def setUp(self): super(ServiceAuthTestCase, self).setUp() self.ctx = context.RequestContext('fake',", "self.assertEqual(\"fake\", result) mock_load.assert_not_called() @mock.patch.object(ks_loading, 'load_auth_from_conf_options') def test_get_auth_plugin_wraps(self, mock_load): self.flags(send_service_user_token=True, group='service_user')", "'fake') self.addCleanup(service_auth.reset_globals) @mock.patch.object(ks_loading, 'load_auth_from_conf_options') def test_get_auth_plugin_no_wraps(self, mock_load): context = mock.MagicMock()", "License, Version 2.0 (the \"License\"); you may # not use", "CONDITIONS OF ANY KIND, either express or implied. See the", "but there is some misconfiguration with the [service_user] section which", "obtain # a copy of the License at # #", "def test_get_auth_plugin_wraps(self, mock_load): self.flags(send_service_user_token=True, group='service_user') result = service_auth.get_auth_plugin(self.ctx) self.assertIsInstance(result, service_token.ServiceTokenAuthWrapper)", "applicable law or agreed to in writing, software # distributed", "section which makes KSA return None for the service user", "setUp(self): super(ServiceAuthTestCase, self).setUp() self.ctx = context.RequestContext('fake', 'fake') self.addCleanup(service_auth.reset_globals) @mock.patch.object(ks_loading, 'load_auth_from_conf_options')", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "service_auth.get_auth_plugin(self.ctx) self.assertIsInstance(result, service_token.ServiceTokenAuthWrapper) @mock.patch.object(ks_loading, 'load_auth_from_conf_options', return_value=None) def test_get_auth_plugin_wraps_bad_config(self, mock_load): \"\"\"Tests", "context from nova import service_auth from nova import test CONF", "test_get_auth_plugin_wraps_bad_config(self, mock_load): \"\"\"Tests the case that send_service_user_token is True but", "Version 2.0 (the \"License\"); you may # not use this", "specific language governing permissions and limitations # under the License.", "return None for the service user auth. \"\"\" self.flags(send_service_user_token=True, group='service_user')", "# not use this file except in compliance with the", "not use this file except in compliance with the License.", "OF ANY KIND, either express or implied. See the #", "mock.MagicMock() context.get_auth_plugin.return_value = \"fake\" result = service_auth.get_auth_plugin(context) self.assertEqual(\"fake\", result) mock_load.assert_not_called()", "writing, software # distributed under the License is distributed on", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "in writing, software # distributed under the License is distributed", "import service_token import mock import nova.conf from nova import context", "= service_auth.get_auth_plugin(context) self.assertEqual(\"fake\", result) mock_load.assert_not_called() @mock.patch.object(ks_loading, 'load_auth_from_conf_options') def test_get_auth_plugin_wraps(self, mock_load):", "in compliance with the License. You may obtain # a", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "License for the specific language governing permissions and limitations #", "service user auth. \"\"\" self.flags(send_service_user_token=True, group='service_user') result = service_auth.get_auth_plugin(self.ctx) self.assertEqual(1,", "test_get_auth_plugin_wraps(self, mock_load): self.flags(send_service_user_token=True, group='service_user') result = service_auth.get_auth_plugin(self.ctx) self.assertIsInstance(result, service_token.ServiceTokenAuthWrapper) @mock.patch.object(ks_loading,", "def test_get_auth_plugin_wraps_bad_config(self, mock_load): \"\"\"Tests the case that send_service_user_token is True", "some misconfiguration with the [service_user] section which makes KSA return", "@mock.patch.object(ks_loading, 'load_auth_from_conf_options') def test_get_auth_plugin_no_wraps(self, mock_load): context = mock.MagicMock() context.get_auth_plugin.return_value =", "from nova import service_auth from nova import test CONF =", "@mock.patch.object(ks_loading, 'load_auth_from_conf_options') def test_get_auth_plugin_wraps(self, mock_load): self.flags(send_service_user_token=True, group='service_user') result = service_auth.get_auth_plugin(self.ctx)", "under the License. from keystoneauth1 import loading as ks_loading from", "the License. You may obtain # a copy of the", "def test_get_auth_plugin_no_wraps(self, mock_load): context = mock.MagicMock() context.get_auth_plugin.return_value = \"fake\" result", "mock_load): context = mock.MagicMock() context.get_auth_plugin.return_value = \"fake\" result = service_auth.get_auth_plugin(context)", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "use this file except in compliance with the License. You", "case that send_service_user_token is True but there is some misconfiguration", "You may obtain # a copy of the License at", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "None for the service user auth. \"\"\" self.flags(send_service_user_token=True, group='service_user') result", "from keystoneauth1 import loading as ks_loading from keystoneauth1 import service_token", "self.assertIsInstance(result, service_token.ServiceTokenAuthWrapper) @mock.patch.object(ks_loading, 'load_auth_from_conf_options', return_value=None) def test_get_auth_plugin_wraps_bad_config(self, mock_load): \"\"\"Tests the", "import test CONF = nova.conf.CONF class ServiceAuthTestCase(test.NoDBTestCase): def setUp(self): super(ServiceAuthTestCase,", "that send_service_user_token is True but there is some misconfiguration with", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "CONF = nova.conf.CONF class ServiceAuthTestCase(test.NoDBTestCase): def setUp(self): super(ServiceAuthTestCase, self).setUp() self.ctx", "nova import test CONF = nova.conf.CONF class ServiceAuthTestCase(test.NoDBTestCase): def setUp(self):", "group='service_user') result = service_auth.get_auth_plugin(self.ctx) self.assertIsInstance(result, service_token.ServiceTokenAuthWrapper) @mock.patch.object(ks_loading, 'load_auth_from_conf_options', return_value=None) def", "self.flags(send_service_user_token=True, group='service_user') result = service_auth.get_auth_plugin(self.ctx) self.assertIsInstance(result, service_token.ServiceTokenAuthWrapper) @mock.patch.object(ks_loading, 'load_auth_from_conf_options', return_value=None)", "\"\"\" self.flags(send_service_user_token=True, group='service_user') result = service_auth.get_auth_plugin(self.ctx) self.assertEqual(1, mock_load.call_count) self.assertNotIsInstance(result, service_token.ServiceTokenAuthWrapper)", "service_token import mock import nova.conf from nova import context from", "either express or implied. See the # License for the", "as ks_loading from keystoneauth1 import service_token import mock import nova.conf", "= service_auth.get_auth_plugin(self.ctx) self.assertIsInstance(result, service_token.ServiceTokenAuthWrapper) @mock.patch.object(ks_loading, 'load_auth_from_conf_options', return_value=None) def test_get_auth_plugin_wraps_bad_config(self, mock_load):", "under the License is distributed on an \"AS IS\" BASIS,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "context.get_auth_plugin.return_value = \"fake\" result = service_auth.get_auth_plugin(context) self.assertEqual(\"fake\", result) mock_load.assert_not_called() @mock.patch.object(ks_loading,", "Licensed under the Apache License, Version 2.0 (the \"License\"); you", "self).setUp() self.ctx = context.RequestContext('fake', 'fake') self.addCleanup(service_auth.reset_globals) @mock.patch.object(ks_loading, 'load_auth_from_conf_options') def test_get_auth_plugin_no_wraps(self,", "= \"fake\" result = service_auth.get_auth_plugin(context) self.assertEqual(\"fake\", result) mock_load.assert_not_called() @mock.patch.object(ks_loading, 'load_auth_from_conf_options')", "@mock.patch.object(ks_loading, 'load_auth_from_conf_options', return_value=None) def test_get_auth_plugin_wraps_bad_config(self, mock_load): \"\"\"Tests the case that", "may # not use this file except in compliance with", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "License is distributed on an \"AS IS\" BASIS, WITHOUT #", "with the License. You may obtain # a copy of", "KIND, either express or implied. See the # License for", "# License for the specific language governing permissions and limitations", "keystoneauth1 import service_token import mock import nova.conf from nova import", "return_value=None) def test_get_auth_plugin_wraps_bad_config(self, mock_load): \"\"\"Tests the case that send_service_user_token is", "= nova.conf.CONF class ServiceAuthTestCase(test.NoDBTestCase): def setUp(self): super(ServiceAuthTestCase, self).setUp() self.ctx =", "you may # not use this file except in compliance", "\"License\"); you may # not use this file except in", "# under the License. from keystoneauth1 import loading as ks_loading", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "import service_auth from nova import test CONF = nova.conf.CONF class", "test_get_auth_plugin_no_wraps(self, mock_load): context = mock.MagicMock() context.get_auth_plugin.return_value = \"fake\" result =", "= context.RequestContext('fake', 'fake') self.addCleanup(service_auth.reset_globals) @mock.patch.object(ks_loading, 'load_auth_from_conf_options') def test_get_auth_plugin_no_wraps(self, mock_load): context", "from nova import context from nova import service_auth from nova", "express or implied. See the # License for the specific", "this file except in compliance with the License. You may", "language governing permissions and limitations # under the License. from", "compliance with the License. You may obtain # a copy", "the service user auth. \"\"\" self.flags(send_service_user_token=True, group='service_user') result = service_auth.get_auth_plugin(self.ctx)", "the Apache License, Version 2.0 (the \"License\"); you may #", "service_auth from nova import test CONF = nova.conf.CONF class ServiceAuthTestCase(test.NoDBTestCase):", "result = service_auth.get_auth_plugin(context) self.assertEqual(\"fake\", result) mock_load.assert_not_called() @mock.patch.object(ks_loading, 'load_auth_from_conf_options') def test_get_auth_plugin_wraps(self,", "for the service user auth. \"\"\" self.flags(send_service_user_token=True, group='service_user') result =", "= mock.MagicMock() context.get_auth_plugin.return_value = \"fake\" result = service_auth.get_auth_plugin(context) self.assertEqual(\"fake\", result)", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "'load_auth_from_conf_options', return_value=None) def test_get_auth_plugin_wraps_bad_config(self, mock_load): \"\"\"Tests the case that send_service_user_token", "mock_load): self.flags(send_service_user_token=True, group='service_user') result = service_auth.get_auth_plugin(self.ctx) self.assertIsInstance(result, service_token.ServiceTokenAuthWrapper) @mock.patch.object(ks_loading, 'load_auth_from_conf_options',", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "import loading as ks_loading from keystoneauth1 import service_token import mock", "See the # License for the specific language governing permissions", "software # distributed under the License is distributed on an", "(the \"License\"); you may # not use this file except", "with the [service_user] section which makes KSA return None for", "context = mock.MagicMock() context.get_auth_plugin.return_value = \"fake\" result = service_auth.get_auth_plugin(context) self.assertEqual(\"fake\",", "ServiceAuthTestCase(test.NoDBTestCase): def setUp(self): super(ServiceAuthTestCase, self).setUp() self.ctx = context.RequestContext('fake', 'fake') self.addCleanup(service_auth.reset_globals)", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "the [service_user] section which makes KSA return None for the", "the # License for the specific language governing permissions and", "\"fake\" result = service_auth.get_auth_plugin(context) self.assertEqual(\"fake\", result) mock_load.assert_not_called() @mock.patch.object(ks_loading, 'load_auth_from_conf_options') def", "result) mock_load.assert_not_called() @mock.patch.object(ks_loading, 'load_auth_from_conf_options') def test_get_auth_plugin_wraps(self, mock_load): self.flags(send_service_user_token=True, group='service_user') result", "service_token.ServiceTokenAuthWrapper) @mock.patch.object(ks_loading, 'load_auth_from_conf_options', return_value=None) def test_get_auth_plugin_wraps_bad_config(self, mock_load): \"\"\"Tests the case", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "# # Unless required by applicable law or agreed to", "KSA return None for the service user auth. \"\"\" self.flags(send_service_user_token=True,", "auth. \"\"\" self.flags(send_service_user_token=True, group='service_user') result = service_auth.get_auth_plugin(self.ctx) self.assertEqual(1, mock_load.call_count) self.assertNotIsInstance(result,", "self.ctx = context.RequestContext('fake', 'fake') self.addCleanup(service_auth.reset_globals) @mock.patch.object(ks_loading, 'load_auth_from_conf_options') def test_get_auth_plugin_no_wraps(self, mock_load):", "keystoneauth1 import loading as ks_loading from keystoneauth1 import service_token import", "'load_auth_from_conf_options') def test_get_auth_plugin_no_wraps(self, mock_load): context = mock.MagicMock() context.get_auth_plugin.return_value = \"fake\"", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "mock import nova.conf from nova import context from nova import", "file except in compliance with the License. You may obtain", "nova import context from nova import service_auth from nova import", "misconfiguration with the [service_user] section which makes KSA return None", "for the specific language governing permissions and limitations # under", "nova import service_auth from nova import test CONF = nova.conf.CONF", "law or agreed to in writing, software # distributed under", "from keystoneauth1 import service_token import mock import nova.conf from nova", "mock_load.assert_not_called() @mock.patch.object(ks_loading, 'load_auth_from_conf_options') def test_get_auth_plugin_wraps(self, mock_load): self.flags(send_service_user_token=True, group='service_user') result =", "OR CONDITIONS OF ANY KIND, either express or implied. See", "the specific language governing permissions and limitations # under the", "service_auth.get_auth_plugin(context) self.assertEqual(\"fake\", result) mock_load.assert_not_called() @mock.patch.object(ks_loading, 'load_auth_from_conf_options') def test_get_auth_plugin_wraps(self, mock_load): self.flags(send_service_user_token=True,", "user auth. \"\"\" self.flags(send_service_user_token=True, group='service_user') result = service_auth.get_auth_plugin(self.ctx) self.assertEqual(1, mock_load.call_count)", "License. from keystoneauth1 import loading as ks_loading from keystoneauth1 import", "under the Apache License, Version 2.0 (the \"License\"); you may", "except in compliance with the License. You may obtain #", "2.0 (the \"License\"); you may # not use this file", "implied. See the # License for the specific language governing", "import context from nova import service_auth from nova import test", "and limitations # under the License. from keystoneauth1 import loading", "loading as ks_loading from keystoneauth1 import service_token import mock import", "makes KSA return None for the service user auth. \"\"\"", "send_service_user_token is True but there is some misconfiguration with the", "License. You may obtain # a copy of the License", "super(ServiceAuthTestCase, self).setUp() self.ctx = context.RequestContext('fake', 'fake') self.addCleanup(service_auth.reset_globals) @mock.patch.object(ks_loading, 'load_auth_from_conf_options') def", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "ANY KIND, either express or implied. See the # License", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "'load_auth_from_conf_options') def test_get_auth_plugin_wraps(self, mock_load): self.flags(send_service_user_token=True, group='service_user') result = service_auth.get_auth_plugin(self.ctx) self.assertIsInstance(result,", "[service_user] section which makes KSA return None for the service", "# Unless required by applicable law or agreed to in", "self.addCleanup(service_auth.reset_globals) @mock.patch.object(ks_loading, 'load_auth_from_conf_options') def test_get_auth_plugin_no_wraps(self, mock_load): context = mock.MagicMock() context.get_auth_plugin.return_value", "from nova import test CONF = nova.conf.CONF class ServiceAuthTestCase(test.NoDBTestCase): def", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "permissions and limitations # under the License. from keystoneauth1 import", "\"\"\"Tests the case that send_service_user_token is True but there is", "to in writing, software # distributed under the License is", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "there is some misconfiguration with the [service_user] section which makes", "is some misconfiguration with the [service_user] section which makes KSA", "result = service_auth.get_auth_plugin(self.ctx) self.assertIsInstance(result, service_token.ServiceTokenAuthWrapper) @mock.patch.object(ks_loading, 'load_auth_from_conf_options', return_value=None) def test_get_auth_plugin_wraps_bad_config(self,", "or agreed to in writing, software # distributed under the", "governing permissions and limitations # under the License. from keystoneauth1", "import nova.conf from nova import context from nova import service_auth", "class ServiceAuthTestCase(test.NoDBTestCase): def setUp(self): super(ServiceAuthTestCase, self).setUp() self.ctx = context.RequestContext('fake', 'fake')", "import mock import nova.conf from nova import context from nova", "required by applicable law or agreed to in writing, software", "test CONF = nova.conf.CONF class ServiceAuthTestCase(test.NoDBTestCase): def setUp(self): super(ServiceAuthTestCase, self).setUp()", "is True but there is some misconfiguration with the [service_user]", "ks_loading from keystoneauth1 import service_token import mock import nova.conf from", "context.RequestContext('fake', 'fake') self.addCleanup(service_auth.reset_globals) @mock.patch.object(ks_loading, 'load_auth_from_conf_options') def test_get_auth_plugin_no_wraps(self, mock_load): context =", "limitations # under the License. from keystoneauth1 import loading as", "or implied. See the # License for the specific language", "Apache License, Version 2.0 (the \"License\"); you may # not" ]
[ "source == 'usps' or target == 'usps': return usps.Feature() elif", "target): if source == 'usps' or target == 'usps': return", "target, pixelda=False): if source == 'usps' or target == 'usps':", "== 'usps': return usps.Predictor() if source == 'svhn': return svhn2mnist.Predictor()", "import syndig2svhn def Generator(source, target, pixelda=False): if source == 'usps'", "== 'usps' or target == 'usps': return usps.Feature() elif source", "source == 'usps' or target == 'usps': return usps.Predictor() if", "usps.Predictor() if source == 'svhn': return svhn2mnist.Predictor() if source ==", "return usps.Predictor() if source == 'svhn': return svhn2mnist.Predictor() if source", "or target == 'usps': return usps.Feature() elif source == 'svhn':", "import usps import syn2gtrsb import syndig2svhn def Generator(source, target, pixelda=False):", "'synth': return syn2gtrsb.Feature() def Classifier(source, target): if source == 'usps'", "source == 'svhn': return svhn2mnist.Predictor() if source == 'synth': return", "== 'synth': return syn2gtrsb.Feature() def Classifier(source, target): if source ==", "return syn2gtrsb.Feature() def Classifier(source, target): if source == 'usps' or", "elif source == 'svhn': return svhn2mnist.Feature() elif source == 'synth':", "Generator(source, target, pixelda=False): if source == 'usps' or target ==", "elif source == 'synth': return syn2gtrsb.Feature() def Classifier(source, target): if", "import svhn2mnist import usps import syn2gtrsb import syndig2svhn def Generator(source,", "'usps' or target == 'usps': return usps.Predictor() if source ==", "'usps': return usps.Predictor() if source == 'svhn': return svhn2mnist.Predictor() if", "target == 'usps': return usps.Predictor() if source == 'svhn': return", "svhn2mnist.Feature() elif source == 'synth': return syn2gtrsb.Feature() def Classifier(source, target):", "syn2gtrsb import syndig2svhn def Generator(source, target, pixelda=False): if source ==", "syn2gtrsb.Feature() def Classifier(source, target): if source == 'usps' or target", "if source == 'usps' or target == 'usps': return usps.Predictor()", "'svhn': return svhn2mnist.Feature() elif source == 'synth': return syn2gtrsb.Feature() def", "svhn2mnist import usps import syn2gtrsb import syndig2svhn def Generator(source, target,", "'usps' or target == 'usps': return usps.Feature() elif source ==", "return svhn2mnist.Feature() elif source == 'synth': return syn2gtrsb.Feature() def Classifier(source,", "syndig2svhn def Generator(source, target, pixelda=False): if source == 'usps' or", "source == 'synth': return syn2gtrsb.Feature() def Classifier(source, target): if source", "usps.Feature() elif source == 'svhn': return svhn2mnist.Feature() elif source ==", "Classifier(source, target): if source == 'usps' or target == 'usps':", "or target == 'usps': return usps.Predictor() if source == 'svhn':", "if source == 'svhn': return svhn2mnist.Predictor() if source == 'synth':", "import syn2gtrsb import syndig2svhn def Generator(source, target, pixelda=False): if source", "return usps.Feature() elif source == 'svhn': return svhn2mnist.Feature() elif source", "== 'usps' or target == 'usps': return usps.Predictor() if source", "def Classifier(source, target): if source == 'usps' or target ==", "== 'svhn': return svhn2mnist.Predictor() if source == 'synth': return syn2gtrsb.Predictor()", "if source == 'usps' or target == 'usps': return usps.Feature()", "pixelda=False): if source == 'usps' or target == 'usps': return", "source == 'svhn': return svhn2mnist.Feature() elif source == 'synth': return", "target == 'usps': return usps.Feature() elif source == 'svhn': return", "def Generator(source, target, pixelda=False): if source == 'usps' or target", "usps import syn2gtrsb import syndig2svhn def Generator(source, target, pixelda=False): if", "'usps': return usps.Feature() elif source == 'svhn': return svhn2mnist.Feature() elif", "== 'svhn': return svhn2mnist.Feature() elif source == 'synth': return syn2gtrsb.Feature()", "== 'usps': return usps.Feature() elif source == 'svhn': return svhn2mnist.Feature()" ]
[ "= 0.7) -> Tensor: sim = cos_sim_matrix(z_origin, z_noisy) exp_sim =", "= a.norm(dim=1), b.norm(dim=1) a_norm = a / torch.clamp(a_n.unsqueeze(1), min=eps) b_norm", "-> None: \"\"\" Args: reduction (str) \"\"\" super().__init__(reduction=reduction) self.reduction =", "and <NAME>, \"Representation Learning with Contrastive Predictive Coding,\" ArXiv:1807.03748 [cs.LG],", "<reponame>pfnet-research/deep-table<filename>deep_table/nn/models/loss/info_nce_loss.py import torch from torch import Tensor from torch.nn.modules.loss import", "loss def cos_sim_matrix(a: Tensor, b: Tensor, eps: float = 1e-8)", "sim = cos_sim_matrix(z_origin, z_noisy) exp_sim = torch.exp(sim / t) loss", "cos_sim_matrix(z_origin, z_noisy) exp_sim = torch.exp(sim / t) loss = -torch.log(torch.diagonal(exp_sim)", "a / torch.clamp(a_n.unsqueeze(1), min=eps) b_norm = b / torch.clamp(b_n.unsqueeze(1), min=eps)", "= b / torch.clamp(b_n.unsqueeze(1), min=eps) sim_matrix = torch.mm(a_norm, b_norm.transpose(0, 1))", "torch import Tensor from torch.nn.modules.loss import _Loss class InfoNCELoss(_Loss): \"\"\"Info", "class InfoNCELoss(_Loss): \"\"\"Info NCE Loss. A type of contrastive loss", "ArXiv:1807.03748 [cs.LG], 2018. <https://arxiv.org/abs/1807.03748v2> \"\"\" def __init__(self, reduction: str =", "0.7) -> Tensor: sim = cos_sim_matrix(z_origin, z_noisy) exp_sim = torch.exp(sim", "reduction: str = \"sum\") -> None: \"\"\" Args: reduction (str)", "b_norm = b / torch.clamp(b_n.unsqueeze(1), min=eps) sim_matrix = torch.mm(a_norm, b_norm.transpose(0,", "b_n = a.norm(dim=1), b.norm(dim=1) a_norm = a / torch.clamp(a_n.unsqueeze(1), min=eps)", "self.reduction = reduction def forward(self, z_origin: Tensor, z_noisy: Tensor, t:", "InfoNCELoss(_Loss): \"\"\"Info NCE Loss. A type of contrastive loss function", "Loss. A type of contrastive loss function used for self-supervised", "Tensor, z_noisy: Tensor, t: float = 0.7) -> Tensor: sim", "_Loss class InfoNCELoss(_Loss): \"\"\"Info NCE Loss. A type of contrastive", "= \"sum\") -> None: \"\"\" Args: reduction (str) \"\"\" super().__init__(reduction=reduction)", "loss = -torch.log(torch.diagonal(exp_sim) / exp_sim.sum(1)) if self.reduction == \"sum\": loss", "Predictive Coding,\" ArXiv:1807.03748 [cs.LG], 2018. <https://arxiv.org/abs/1807.03748v2> \"\"\" def __init__(self, reduction:", "learning. References: <NAME>, <NAME>, and <NAME>, \"Representation Learning with Contrastive", "\"\"\" def __init__(self, reduction: str = \"sum\") -> None: \"\"\"", "\"Representation Learning with Contrastive Predictive Coding,\" ArXiv:1807.03748 [cs.LG], 2018. <https://arxiv.org/abs/1807.03748v2>", "torch.nn.modules.loss import _Loss class InfoNCELoss(_Loss): \"\"\"Info NCE Loss. A type", "from torch.nn.modules.loss import _Loss class InfoNCELoss(_Loss): \"\"\"Info NCE Loss. A", "contrastive loss function used for self-supervised learning. References: <NAME>, <NAME>,", "eps: float = 1e-8) -> Tensor: a_n, b_n = a.norm(dim=1),", "loss = loss.mean() return loss def cos_sim_matrix(a: Tensor, b: Tensor,", "t) loss = -torch.log(torch.diagonal(exp_sim) / exp_sim.sum(1)) if self.reduction == \"sum\":", "super().__init__(reduction=reduction) self.reduction = reduction def forward(self, z_origin: Tensor, z_noisy: Tensor,", "Args: reduction (str) \"\"\" super().__init__(reduction=reduction) self.reduction = reduction def forward(self,", "= 1e-8) -> Tensor: a_n, b_n = a.norm(dim=1), b.norm(dim=1) a_norm", "torch from torch import Tensor from torch.nn.modules.loss import _Loss class", "<NAME>, \"Representation Learning with Contrastive Predictive Coding,\" ArXiv:1807.03748 [cs.LG], 2018.", "forward(self, z_origin: Tensor, z_noisy: Tensor, t: float = 0.7) ->", "2018. <https://arxiv.org/abs/1807.03748v2> \"\"\" def __init__(self, reduction: str = \"sum\") ->", "type of contrastive loss function used for self-supervised learning. References:", "import Tensor from torch.nn.modules.loss import _Loss class InfoNCELoss(_Loss): \"\"\"Info NCE", "<https://arxiv.org/abs/1807.03748v2> \"\"\" def __init__(self, reduction: str = \"sum\") -> None:", "function used for self-supervised learning. References: <NAME>, <NAME>, and <NAME>,", "reduction (str) \"\"\" super().__init__(reduction=reduction) self.reduction = reduction def forward(self, z_origin:", "Learning with Contrastive Predictive Coding,\" ArXiv:1807.03748 [cs.LG], 2018. <https://arxiv.org/abs/1807.03748v2> \"\"\"", "== \"mean\": loss = loss.mean() return loss def cos_sim_matrix(a: Tensor,", "of contrastive loss function used for self-supervised learning. References: <NAME>,", "/ torch.clamp(b_n.unsqueeze(1), min=eps) sim_matrix = torch.mm(a_norm, b_norm.transpose(0, 1)) return sim_matrix", "z_noisy: Tensor, t: float = 0.7) -> Tensor: sim =", "loss = loss.sum() elif self.reduction == \"mean\": loss = loss.mean()", "a_norm = a / torch.clamp(a_n.unsqueeze(1), min=eps) b_norm = b /", "= -torch.log(torch.diagonal(exp_sim) / exp_sim.sum(1)) if self.reduction == \"sum\": loss =", "b: Tensor, eps: float = 1e-8) -> Tensor: a_n, b_n", "b / torch.clamp(b_n.unsqueeze(1), min=eps) sim_matrix = torch.mm(a_norm, b_norm.transpose(0, 1)) return", "self-supervised learning. References: <NAME>, <NAME>, and <NAME>, \"Representation Learning with", "\"sum\": loss = loss.sum() elif self.reduction == \"mean\": loss =", "\"\"\" Args: reduction (str) \"\"\" super().__init__(reduction=reduction) self.reduction = reduction def", "return loss def cos_sim_matrix(a: Tensor, b: Tensor, eps: float =", "with Contrastive Predictive Coding,\" ArXiv:1807.03748 [cs.LG], 2018. <https://arxiv.org/abs/1807.03748v2> \"\"\" def", "None: \"\"\" Args: reduction (str) \"\"\" super().__init__(reduction=reduction) self.reduction = reduction", "str = \"sum\") -> None: \"\"\" Args: reduction (str) \"\"\"", "for self-supervised learning. References: <NAME>, <NAME>, and <NAME>, \"Representation Learning", "torch.clamp(a_n.unsqueeze(1), min=eps) b_norm = b / torch.clamp(b_n.unsqueeze(1), min=eps) sim_matrix =", "Tensor, eps: float = 1e-8) -> Tensor: a_n, b_n =", "<NAME>, <NAME>, and <NAME>, \"Representation Learning with Contrastive Predictive Coding,\"", "= a / torch.clamp(a_n.unsqueeze(1), min=eps) b_norm = b / torch.clamp(b_n.unsqueeze(1),", "= loss.mean() return loss def cos_sim_matrix(a: Tensor, b: Tensor, eps:", "Contrastive Predictive Coding,\" ArXiv:1807.03748 [cs.LG], 2018. <https://arxiv.org/abs/1807.03748v2> \"\"\" def __init__(self,", "\"mean\": loss = loss.mean() return loss def cos_sim_matrix(a: Tensor, b:", "loss.mean() return loss def cos_sim_matrix(a: Tensor, b: Tensor, eps: float", "def __init__(self, reduction: str = \"sum\") -> None: \"\"\" Args:", "(str) \"\"\" super().__init__(reduction=reduction) self.reduction = reduction def forward(self, z_origin: Tensor,", "__init__(self, reduction: str = \"sum\") -> None: \"\"\" Args: reduction", "\"sum\") -> None: \"\"\" Args: reduction (str) \"\"\" super().__init__(reduction=reduction) self.reduction", "== \"sum\": loss = loss.sum() elif self.reduction == \"mean\": loss", "min=eps) b_norm = b / torch.clamp(b_n.unsqueeze(1), min=eps) sim_matrix = torch.mm(a_norm,", "used for self-supervised learning. References: <NAME>, <NAME>, and <NAME>, \"Representation", "/ t) loss = -torch.log(torch.diagonal(exp_sim) / exp_sim.sum(1)) if self.reduction ==", "self.reduction == \"mean\": loss = loss.mean() return loss def cos_sim_matrix(a:", "elif self.reduction == \"mean\": loss = loss.mean() return loss def", "Tensor, b: Tensor, eps: float = 1e-8) -> Tensor: a_n,", "import torch from torch import Tensor from torch.nn.modules.loss import _Loss", "-torch.log(torch.diagonal(exp_sim) / exp_sim.sum(1)) if self.reduction == \"sum\": loss = loss.sum()", "def cos_sim_matrix(a: Tensor, b: Tensor, eps: float = 1e-8) ->", "b.norm(dim=1) a_norm = a / torch.clamp(a_n.unsqueeze(1), min=eps) b_norm = b", "z_origin: Tensor, z_noisy: Tensor, t: float = 0.7) -> Tensor:", "Tensor, t: float = 0.7) -> Tensor: sim = cos_sim_matrix(z_origin,", "exp_sim = torch.exp(sim / t) loss = -torch.log(torch.diagonal(exp_sim) / exp_sim.sum(1))", "= loss.sum() elif self.reduction == \"mean\": loss = loss.mean() return", "A type of contrastive loss function used for self-supervised learning.", "def forward(self, z_origin: Tensor, z_noisy: Tensor, t: float = 0.7)", "loss function used for self-supervised learning. References: <NAME>, <NAME>, and", "a.norm(dim=1), b.norm(dim=1) a_norm = a / torch.clamp(a_n.unsqueeze(1), min=eps) b_norm =", "exp_sim.sum(1)) if self.reduction == \"sum\": loss = loss.sum() elif self.reduction", "Coding,\" ArXiv:1807.03748 [cs.LG], 2018. <https://arxiv.org/abs/1807.03748v2> \"\"\" def __init__(self, reduction: str", "[cs.LG], 2018. <https://arxiv.org/abs/1807.03748v2> \"\"\" def __init__(self, reduction: str = \"sum\")", "z_noisy) exp_sim = torch.exp(sim / t) loss = -torch.log(torch.diagonal(exp_sim) /", "float = 0.7) -> Tensor: sim = cos_sim_matrix(z_origin, z_noisy) exp_sim", "<NAME>, and <NAME>, \"Representation Learning with Contrastive Predictive Coding,\" ArXiv:1807.03748", "\"\"\"Info NCE Loss. A type of contrastive loss function used", "\"\"\" super().__init__(reduction=reduction) self.reduction = reduction def forward(self, z_origin: Tensor, z_noisy:", "from torch import Tensor from torch.nn.modules.loss import _Loss class InfoNCELoss(_Loss):", "References: <NAME>, <NAME>, and <NAME>, \"Representation Learning with Contrastive Predictive", "cos_sim_matrix(a: Tensor, b: Tensor, eps: float = 1e-8) -> Tensor:", "Tensor: sim = cos_sim_matrix(z_origin, z_noisy) exp_sim = torch.exp(sim / t)", "-> Tensor: a_n, b_n = a.norm(dim=1), b.norm(dim=1) a_norm = a", "Tensor: a_n, b_n = a.norm(dim=1), b.norm(dim=1) a_norm = a /", "float = 1e-8) -> Tensor: a_n, b_n = a.norm(dim=1), b.norm(dim=1)", "self.reduction == \"sum\": loss = loss.sum() elif self.reduction == \"mean\":", "torch.exp(sim / t) loss = -torch.log(torch.diagonal(exp_sim) / exp_sim.sum(1)) if self.reduction", "if self.reduction == \"sum\": loss = loss.sum() elif self.reduction ==", "/ torch.clamp(a_n.unsqueeze(1), min=eps) b_norm = b / torch.clamp(b_n.unsqueeze(1), min=eps) sim_matrix", "reduction def forward(self, z_origin: Tensor, z_noisy: Tensor, t: float =", "= torch.exp(sim / t) loss = -torch.log(torch.diagonal(exp_sim) / exp_sim.sum(1)) if", "loss.sum() elif self.reduction == \"mean\": loss = loss.mean() return loss", "Tensor from torch.nn.modules.loss import _Loss class InfoNCELoss(_Loss): \"\"\"Info NCE Loss.", "/ exp_sim.sum(1)) if self.reduction == \"sum\": loss = loss.sum() elif", "= cos_sim_matrix(z_origin, z_noisy) exp_sim = torch.exp(sim / t) loss =", "t: float = 0.7) -> Tensor: sim = cos_sim_matrix(z_origin, z_noisy)", "1e-8) -> Tensor: a_n, b_n = a.norm(dim=1), b.norm(dim=1) a_norm =", "NCE Loss. A type of contrastive loss function used for", "import _Loss class InfoNCELoss(_Loss): \"\"\"Info NCE Loss. A type of", "a_n, b_n = a.norm(dim=1), b.norm(dim=1) a_norm = a / torch.clamp(a_n.unsqueeze(1),", "-> Tensor: sim = cos_sim_matrix(z_origin, z_noisy) exp_sim = torch.exp(sim /", "= reduction def forward(self, z_origin: Tensor, z_noisy: Tensor, t: float" ]
[ "in config}) return config def get(self, key, default=None): return self.__effective_configuration.get(key,", "_build_environment_configuration(): ret = defaultdict(dict) def _popenv(name): return os.environ.pop(PATRONI_ENV_PREFIX + name.upper(),", "self._load_config_file() else: config_env = os.environ.pop(self.PATRONI_CONFIG_VARIABLE, None) self._local_configuration = config_env and", "# 'exhibitor' inside 'zookeeper': if 'zookeeper' in config and 'exhibitor'", "deepcopy(value) elif name == 'standby_cluster': for name, value in (value", "config['restapi']: config['restapi']['auth'] = '{username}:{password}'.format(**config['restapi']['authentication']) # special treatment for old config", "in ('connect_address', 'listen', 'data_dir', 'pgpass', 'authentication'): config['postgresql'][name] = deepcopy(value) elif", "configuration file in the old format and converting it into", "False, 'synchronous_mode_strict': False, 'synchronous_node_count': 1, 'standby_cluster': { 'create_replica_methods': '', 'host':", "validator: error = validator(self._local_configuration) if error: raise ConfigParseError(error) self.__effective_configuration =", "logger.error('Can not close temporary file %s', tmpfile) if tmpfile and", "in ('max_queue_size', 'file_size', 'file_num'): value = ret.get('log', {}).pop(second, None) if", "DCS was accidentally wiped 3) Loading of configuration file in", "u in ('replication', 'superuser') if u in pg_config} # no", "path') overall_config = {} for fname in files: with open(fname)", "yaml.safe_load(value) except Exception: logger.exception('Exception when parsing dict %s', value) return", "= shutil.move(tmpfile, self._cache_file) self._cache_needs_saving = False except Exception: logger.exception('Exception when", "name not in config or name in ['watchdog']: config[name] =", "pg_config: pg_config['authentication']['superuser'] = pg_config['pg_rewind'] # handle setting additional connection parameters", "def default_validator(conf): if not conf: return \"Config is empty.\" class", "error = validator(self._local_configuration) if error: raise ConfigParseError(error) self.__effective_configuration = self._build_effective_configuration({},", "(value or {}).items(): if name == 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value)) elif name", "or _AUTH_ALLOWED_PARAMETERS[:2]: value = _popenv(name + '_' + param) if", "= {u: pg_config[u] for u in ('replication', 'superuser') if u", "self.__CACHE_FILENAME) self._load_cache() self._cache_needs_saving = False @property def config_file(self): return self._config_file", "`dict` interfaces to make it possible to work with it", "config or name in ['watchdog']: config[name] = deepcopy(value) if value", "patch_config(overall_config, config) return overall_config def _load_config_file(self): \"\"\"Loads config.yaml from filesystem", "patroni.utils import deep_compare, parse_bool, parse_int, patch_config logger = logging.getLogger(__name__) _AUTH_ALLOWED_PARAMETERS", "'_' + param) if value: ret[section][param] = value _set_section_values('restapi', ['listen',", "1) + [''])[:2] # PATRONI_<username>_PASSWORD=<password>, PATRONI_<username>_OPTIONS=<option1,option2,...> # CREATE USER \"<username>\"", "'loggers']) _set_section_values('raft', ['data_dir', 'self_addr', 'partner_addrs', 'password', 'bind_addr']) for first, second", "if 'restapi' in config and 'authentication' in config['restapi']: config['restapi']['auth'] =", "'patroni.dynamic.json' __DEFAULT_CONFIG = { 'ttl': 30, 'loop_wait': 10, 'retry_timeout': 10,", "_load_config_path(self, path): \"\"\" If path is a file, loads the", "p, v in ConfigHandler.CMDLINE_OPTIONS.items() if p not in ('wal_keep_segments', 'wal_keep_size')})", "_fix_log_env(name, oldname) def _set_section_values(section, params): for param in params: value", "second in (('restapi', 'allowlist_include_members'), ('ctl', 'insecure')): value = ret.get(first, {}).pop(second,", "value = ret.get(first, {}).pop(second, None) if value: value = _parse_list(value)", "in ('etcd', 'etcd3'): if dcs in ret: ret[dcs].update(_get_auth(dcs)) users =", "self._load_config_path(self._config_file) patch_config(config, self.__environment_configuration) return config def _load_cache(self): if os.path.isfile(self._cache_file): try:", "order \"\"\" if os.path.isfile(path): files = [path] elif os.path.isdir(path): files", "%s', self._cache_file) def save_cache(self): if self._cache_needs_saving: tmpfile = fd =", "params or _AUTH_ALLOWED_PARAMETERS[:2]: value = _popenv(name + '_' + param)", "'create_replica_methods': '', 'host': '', 'port': '', 'primary_slot_name': '', 'restore_command': '',", "'', 'restore_command': '', 'archive_cleanup_command': '', 'recovery_min_apply_delay': '' }, 'postgresql': {", "'[' in value): value = '[{0}]'.format(value) try: return yaml.safe_load(value) except", "'primary_slot_name': '', 'restore_command': '', 'archive_cleanup_command': '', 'recovery_min_apply_delay': '' }, 'postgresql':", "in DCS * `local_configuration` -- configuration from `config.yml` or environment", "first, second in (('restapi', 'allowlist_include_members'), ('ctl', 'insecure')): value = ret.get(first,", "else: logger.error('config path %s is neither directory nor file', path)", "PATRONI_ENV_PREFIX + 'CONFIGURATION' __CACHE_FILENAME = 'patroni.dynamic.json' __DEFAULT_CONFIG = { 'ttl':", "for second in params: value = ret.get(first, {}).pop(second, None) if", "not value.strip().startswith('{'): value = '{{{0}}}'.format(value) try: return yaml.safe_load(value) except Exception:", "'PORT', 'USE_PROXIES', 'PROTOCOL', 'SRV', 'SRV_SUFFIX', 'URL', 'PROXY', 'CACERT', 'CERT', 'KEY',", "pg_config} # no 'superuser' in 'postgresql'.'authentication' if 'superuser' not in", "None for first, second in (('raft', 'partner_addrs'), ('restapi', 'allowlist')): value", "{}).items() if name not in ConfigHandler.CMDLINE_OPTIONS or not is_local and", "False except Exception: logger.exception('Exception when saving file: %s', self._cache_file) if", "updated_fields if p in config}) return config def get(self, key,", "{} for param in list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX): name, suffix =", "restapi.auth = 'username:password' if 'restapi' in config and 'authentication' in", "'KEY', 'VERIFY', 'TOKEN', 'CHECKS', 'DC', 'CONSISTENCY', 'REGISTER_SERVICE', 'SERVICE_CHECK_INTERVAL', 'NAMESPACE', 'CONTEXT',", "1) + [''])[:2] if suffix in ('HOST', 'HOSTS', 'PORT', 'USE_PROXIES',", "name not in ConfigHandler.CMDLINE_OPTIONS or not is_local and ConfigHandler.CMDLINE_OPTIONS[name][1](value)} def", "try: os.close(fd) except Exception: logger.error('Can not close temporary file %s',", "u in pg_config} # no 'superuser' in 'postgresql'.'authentication' if 'superuser'", "= value return ret restapi_auth = _get_auth('restapi') if restapi_auth: ret['restapi']['authentication']", "value = _parse_dict(value) elif suffix in ('USE_PROXIES', 'REGISTER_SERVICE', 'USE_ENDPOINTS', 'BYPASS_API_SERVICE',", "name == 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value, True)) elif name != 'use_slots': #", "param in params or _AUTH_ALLOWED_PARAMETERS[:2]: value = _popenv(name + '_'", "in ('USE_PROXIES', 'REGISTER_SERVICE', 'USE_ENDPOINTS', 'BYPASS_API_SERVICE', 'VERIFY'): value = parse_bool(value) if", "defaultdict(dict) def _popenv(name): return os.environ.pop(PATRONI_ENV_PREFIX + name.upper(), None) for param", "configuration = configuration.data if not deep_compare(self._dynamic_configuration, configuration): try: self.__effective_configuration =", "neither directory nor file', path) raise ConfigParseError('invalid config path') overall_config", "if DCS was accidentally wiped 3) Loading of configuration file", "return os.environ.pop(PATRONI_ENV_PREFIX + name.upper(), None) for param in ('name', 'namespace',", "False, 'master_start_timeout': 300, 'master_stop_timeout': 0, 'synchronous_mode': False, 'synchronous_mode_strict': False, 'synchronous_node_count':", "parse_int(value) elif suffix in ('HOSTS', 'PORTS', 'CHECKS'): value = value", "in local_configuration.items(): if name == 'postgresql': for name, value in", "{ 'ttl': 30, 'loop_wait': 10, 'retry_timeout': 10, 'maximum_lag_on_failover': 1048576, 'maximum_lag_on_syncnode':", "saving file: %s', self._cache_file) if fd: try: os.close(fd) except Exception:", "config and 'name' in pg_config: config['name'] = pg_config['name'] updated_fields =", "from patroni.postgresql.config import CaseInsensitiveDict, ConfigHandler from patroni.utils import deep_compare, parse_bool,", "or dict def set_dynamic_configuration(self, configuration): if isinstance(configuration, ClusterConfig): if self._modify_index", "the yml file pointed to by path. If path is", "tmpfile) # configuration could be either ClusterConfig or dict def", "0, 'synchronous_mode': False, 'synchronous_mode_strict': False, 'synchronous_node_count': 1, 'standby_cluster': { 'create_replica_methods':", "ret[section][param] = value _set_section_values('restapi', ['listen', 'connect_address', 'certfile', 'keyfile', 'keyfile_password', 'cafile',", "= options if users: ret['bootstrap']['users'] = users return ret def", "if value: ret[first][second] = value def _parse_dict(value): if not value.strip().startswith('{'):", "os.environ: os.environ[name] = value for name, oldname in (('level', 'loglevel'),", "} def __init__(self, configfile, validator=default_validator): self._modify_index = -1 self._dynamic_configuration =", "= None json.dump(self.dynamic_configuration, f) tmpfile = shutil.move(tmpfile, self._cache_file) self._cache_needs_saving =", "if self._config_file: self._local_configuration = self._load_config_file() else: config_env = os.environ.pop(self.PATRONI_CONFIG_VARIABLE, None)", "self._data_dir = self.__effective_configuration.get('postgresql', {}).get('data_dir', \"\") self._cache_file = os.path.join(self._data_dir, self.__CACHE_FILENAME) self._load_cache()", "Exception: logger.error('Can not remove temporary file %s', tmpfile) # configuration", "to do self._modify_index = configuration.modify_index configuration = configuration.data if not", "config['zookeeper'].pop('exhibitor') config.pop('zookeeper') pg_config = config['postgresql'] # no 'authentication' in 'postgresql',", "= deepcopy(value) if value else {} # restapi server expects", "'bin_dir': '', 'use_slots': True, 'parameters': CaseInsensitiveDict({p: v[0] for p, v", "}, 'watchdog': { 'mode': 'automatic', } } def __init__(self, configfile,", "logger.exception('Exception when parsing list %s', value) return None for first,", "'master_start_timeout': 300, 'master_stop_timeout': 0, 'synchronous_mode': False, 'synchronous_mode_strict': False, 'synchronous_node_count': 1,", "may be available # in the configuration file, such as", "and ConfigHandler.CMDLINE_OPTIONS[name][1](value)} def _safe_copy_dynamic_configuration(self, dynamic_configuration): config = deepcopy(self.__DEFAULT_CONFIG) for name,", "= deepcopy(self.__DEFAULT_CONFIG) for name, value in dynamic_configuration.items(): if name ==", "`Config.__DEFAULT_CONFIG` -- some sane default values * `dynamic_configuration` -- configuration", "'PROTOCOL', 'SRV', 'SRV_SUFFIX', 'URL', 'PROXY', 'CACERT', 'CERT', 'KEY', 'VERIFY', 'TOKEN',", "'partner_addrs', 'password', 'bind_addr']) for first, second in (('restapi', 'allowlist_include_members'), ('ctl',", "value: value = _parse_list(value) if value: ret[first][second] = value def", "def _parse_dict(value): if not value.strip().startswith('{'): value = '{{{0}}}'.format(value) try: return", "overall_config = {} for fname in files: with open(fname) as", "possible to work with it as with the old `config`", "from patroni.exceptions import ConfigParseError from patroni.dcs import ClusterConfig from patroni.postgresql.config", "None) if value: value = _parse_dict(value) if value: ret[first][second] =", "options: users[name]['options'] = options if users: ret['bootstrap']['users'] = users return", "environment self._config_file = configfile and os.path.exists(configfile) and configfile if self._config_file:", "except Exception: logger.exception('Exception when reloading local configuration from %s', self.config_file)", "to `effective_configuration` from: * `Config.__DEFAULT_CONFIG` -- some sane default values", "access to `effective_configuration` from: * `Config.__DEFAULT_CONFIG` -- some sane default", "special treatment for old config # 'exhibitor' inside 'zookeeper': if", "value in (value or {}).items(): if name == 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value))", "old format and converting it into new format 4) Mimicking", "configuration items changed.') except Exception: logger.exception('Exception when reloading local configuration", "def reload_local_configuration(self): if self.config_file: try: configuration = self._load_config_file() if not", "(f.endswith('.yml') or f.endswith('.yaml')) and os.path.isfile(os.path.join(path, f))] else: logger.error('config path %s", "path. If path is a directory, loads all yml files", "CREATE USER \"<username>\" WITH <OPTIONS> PASSWORD '<password>' if name and", "of the `dict` interfaces to make it possible to work", "for name, value in dynamic_configuration.items(): if name == 'postgresql': for", "get(self, key, default=None): return self.__effective_configuration.get(key, default) def __contains__(self, key): return", "tmpfile) = tempfile.mkstemp(prefix=self.__CACHE_FILENAME, dir=self._data_dir) with os.fdopen(fd, 'w') as f: fd", "= {} for fname in files: with open(fname) as f:", "config}) return config def get(self, key, default=None): return self.__effective_configuration.get(key, default)", "= value for dcs in ('etcd', 'etcd3'): if dcs in", "value: ret[first][second] = value def _parse_dict(value): if not value.strip().startswith('{'): value", "not in ConfigHandler.CMDLINE_OPTIONS or not is_local and ConfigHandler.CMDLINE_OPTIONS[name][1](value)} def _safe_copy_dynamic_configuration(self,", "if value is not None: ret['log'][second] = value def _parse_list(value):", "name = PATRONI_ENV_PREFIX + 'LOG_' + name.upper() if value and", "{}).pop(second, None) if value: value = _parse_list(value) if value: ret[first][second]", "config: # only variables present in __DEFAULT_CONFIG allowed to be", "('LABELS', 'SET_ACLS'): value = _parse_dict(value) elif suffix in ('USE_PROXIES', 'REGISTER_SERVICE',", "PATRONI_<username>_PASSWORD=<password>, PATRONI_<username>_OPTIONS=<option1,option2,...> # CREATE USER \"<username>\" WITH <OPTIONS> PASSWORD '<password>'", "( 'username', 'password', 'sslmode', 'sslcert', 'sslkey', 'sslpassword', 'sslrootcert', 'sslcrl', 'sslcrldir',", "{}).get('data_dir', \"\") self._cache_file = os.path.join(self._data_dir, self.__CACHE_FILENAME) self._load_cache() self._cache_needs_saving = False", "('name', 'namespace', 'scope'): value = _popenv(param) if value: ret[param] =", "True except Exception: logger.exception('Exception when setting dynamic_configuration') def reload_local_configuration(self): if", "CaseInsensitiveDict({p: v[0] for p, v in ConfigHandler.CMDLINE_OPTIONS.items() if p not", "were set via ENV\"\"\" config = self._load_config_path(self._config_file) patch_config(config, self.__environment_configuration) return", "value else {} # restapi server expects to get restapi.auth", "for param in list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX): # PATRONI_(ETCD|CONSUL|ZOOKEEPER|EXHIBITOR|...)_(HOSTS?|PORT|..) name, suffix", "param in ('name', 'namespace', 'scope'): value = _popenv(param) if value:", "shutil import tempfile import yaml from collections import defaultdict from", "for: 1) Building and giving access to `effective_configuration` from: *", "logger.error('config path %s is neither directory nor file', path) raise", "+ '_' + param) if value: ret[param] = value return", "pg_config['authentication'] = {u: pg_config[u] for u in ('replication', 'superuser') if", "= os.path.join(self._data_dir, self.__CACHE_FILENAME) self._load_cache() self._cache_needs_saving = False @property def config_file(self):", "def _load_config_file(self): \"\"\"Loads config.yaml from filesystem and applies some values", "os.environ.pop(self.PATRONI_CONFIG_VARIABLE, None) self._local_configuration = config_env and yaml.safe_load(config_env) or self.__environment_configuration if", "= os.environ.pop(param[:-9] + '_OPTIONS', None) options = options and _parse_list(options)", "ConfigParseError from patroni.dcs import ClusterConfig from patroni.postgresql.config import CaseInsensitiveDict, ConfigHandler", "changed there is nothing to do self._modify_index = configuration.modify_index configuration", "in ['watchdog']: config[name] = deepcopy(value) if value else {} #", "'file_num', 'loggers']) _set_section_values('raft', ['data_dir', 'self_addr', 'partner_addrs', 'password', 'bind_addr']) for first,", "'config_dir', 'data_dir', 'pgpass', 'bin_dir']) _set_section_values('log', ['level', 'traceback_level', 'format', 'dateformat', 'max_queue_size',", "in (value or {}).items(): if name == 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value, True))", "dir=self._data_dir) with os.fdopen(fd, 'w') as f: fd = None json.dump(self.dynamic_configuration,", "= self._build_effective_configuration(configuration, self._local_configuration) self._dynamic_configuration = configuration self._cache_needs_saving = True return", "in ret: ret[dcs].update(_get_auth(dcs)) users = {} for param in list(os.environ.keys()):", "( 'name', 'scope', 'retry_timeout', 'synchronous_mode', 'synchronous_mode_strict', 'synchronous_node_count', 'maximum_lag_on_syncnode' ) pg_config.update({p:", "= False except Exception: logger.exception('Exception when saving file: %s', self._cache_file)", "import deep_compare, parse_bool, parse_int, patch_config logger = logging.getLogger(__name__) _AUTH_ALLOWED_PARAMETERS =", "__DEFAULT_CONFIG = { 'ttl': 30, 'loop_wait': 10, 'retry_timeout': 10, 'maximum_lag_on_failover':", "return ret restapi_auth = _get_auth('restapi') if restapi_auth: ret['restapi']['authentication'] = restapi_auth", "+ 'CONFIGURATION' __CACHE_FILENAME = 'patroni.dynamic.json' __DEFAULT_CONFIG = { 'ttl': 30,", "values which were set via ENV\"\"\" config = self._load_config_path(self._config_file) patch_config(config,", "configfile if self._config_file: self._local_configuration = self._load_config_file() else: config_env = os.environ.pop(self.PATRONI_CONFIG_VARIABLE,", "logger.exception('Exception when saving file: %s', self._cache_file) if fd: try: os.close(fd)", "the environment self._config_file = configfile and os.path.exists(configfile) and configfile if", "close temporary file %s', tmpfile) if tmpfile and os.path.exists(tmpfile): try:", "(param[8:].rsplit('_', 1) + [''])[:2] # PATRONI_<username>_PASSWORD=<password>, PATRONI_<username>_OPTIONS=<option1,option2,...> # CREATE USER", "['listen', 'connect_address', 'certfile', 'keyfile', 'keyfile_password', 'cafile', 'ciphers', 'verify_client', 'http_extra_headers', 'https_extra_headers',", "ENV\"\"\" config = self._load_config_path(self._config_file) patch_config(config, self.__environment_configuration) return config def _load_cache(self):", "set via ENV\"\"\" config = self._load_config_path(self._config_file) patch_config(config, self.__environment_configuration) return config", "self._local_configuration = configuration self.__effective_configuration = new_configuration return True else: logger.info('No", "`config.yml` or environment 2) Saving and loading `dynamic_configuration` into 'patroni.dynamic.json'", "for fname in files: with open(fname) as f: config =", "value = parse_bool(value) if value: ret[name.lower()][suffix.lower()] = value for dcs", "param.startswith(PATRONI_ENV_PREFIX): name, suffix = (param[8:].rsplit('_', 1) + [''])[:2] # PATRONI_<username>_PASSWORD=<password>,", "_parse_list(value) elif suffix in ('LABELS', 'SET_ACLS'): value = _parse_dict(value) elif", "is_local=False): return {name: value for name, value in (parameters or", "['data_dir', 'self_addr', 'partner_addrs', 'password', 'bind_addr']) for first, second in (('restapi',", "configuration could be either ClusterConfig or dict def set_dynamic_configuration(self, configuration):", "return self.__effective_configuration.get(key, default) def __contains__(self, key): return key in self.__effective_configuration", "ConfigHandler.CMDLINE_OPTIONS or not is_local and ConfigHandler.CMDLINE_OPTIONS[name][1](value)} def _safe_copy_dynamic_configuration(self, dynamic_configuration): config", "+ 'LOG_' + name.upper() if value and name not in", "True return True except Exception: logger.exception('Exception when setting dynamic_configuration') def", "if value: value = _parse_dict(value) if value: ret[first][second] = value", "in (value or {}).items(): if name == 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value)) elif", "file, loads the yml file pointed to by path. If", "path) raise ConfigParseError('invalid config path') overall_config = {} for fname", "'synchronous_mode': False, 'synchronous_mode_strict': False, 'synchronous_node_count': 1, 'standby_cluster': { 'create_replica_methods': '',", "= False @property def config_file(self): return self._config_file @property def dynamic_configuration(self):", "as f: fd = None json.dump(self.dynamic_configuration, f) tmpfile = shutil.move(tmpfile,", "not in os.environ: os.environ[name] = value for name, oldname in", "ret = defaultdict(dict) def _popenv(name): return os.environ.pop(PATRONI_ENV_PREFIX + name.upper(), None)", "new format 4) Mimicking some of the `dict` interfaces to", "for name, value in (value or {}).items(): if name ==", "param in list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX): name, suffix = (param[8:].rsplit('_', 1)", "suffix in ('HOSTS', 'PORTS', 'CHECKS'): value = value and _parse_list(value)", "dynamic_configuration.items(): if name == 'postgresql': for name, value in (value", "= entry if authentication: ret['postgresql']['authentication'] = authentication for param in", "except Exception: logger.exception('Exception when loading file: %s', self._cache_file) def save_cache(self):", "value) return None for first, params in (('restapi', ('http_extra_headers', 'https_extra_headers')),", "and yaml.safe_load(config_env) or self.__environment_configuration if validator: error = validator(self._local_configuration) if", "value: ret[param] = value def _fix_log_env(name, oldname): value = _popenv(oldname)", "'exhibitor' inside 'zookeeper': if 'zookeeper' in config and 'exhibitor' in", "= configuration.modify_index configuration = configuration.data if not deep_compare(self._dynamic_configuration, configuration): try:", "dynamic_configuration): config = deepcopy(self.__DEFAULT_CONFIG) for name, value in dynamic_configuration.items(): if", "{ 'mode': 'automatic', } } def __init__(self, configfile, validator=default_validator): self._modify_index", "return None for first, params in (('restapi', ('http_extra_headers', 'https_extra_headers')), ('log',", "'gssencmode', 'channel_binding' ) def default_validator(conf): if not conf: return \"Config", "not in ('wal_keep_segments', 'wal_keep_size')}) }, 'watchdog': { 'mode': 'automatic', }", "from `config.yml` or environment 2) Saving and loading `dynamic_configuration` into", "= self.__effective_configuration.get('postgresql', {}).get('data_dir', \"\") self._cache_file = os.path.join(self._data_dir, self.__CACHE_FILENAME) self._load_cache() self._cache_needs_saving", "try: os.remove(tmpfile) except Exception: logger.error('Can not remove temporary file %s',", "== 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value)) elif name not in ('connect_address', 'listen', 'data_dir',", "self._build_effective_configuration(self._dynamic_configuration, configuration) self._local_configuration = configuration self.__effective_configuration = new_configuration return True", "key): return key in self.__effective_configuration def __getitem__(self, key): return self.__effective_configuration[key]", "except Exception: logger.exception('Exception when parsing dict %s', value) return None", "if authentication: ret['postgresql']['authentication'] = authentication for param in list(os.environ.keys()): if", "'PORTS', 'LABELS', 'BYPASS_API_SERVICE', 'KEY_PASSWORD', 'USE_SSL', 'SET_ACLS') and name: value =", "Exception: logger.exception('Exception when loading file: %s', self._cache_file) def save_cache(self): if", "\"<username>\" WITH <OPTIONS> PASSWORD '<password>' if name and suffix ==", "= yaml.safe_load(f) patch_config(overall_config, config) return overall_config def _load_config_file(self): \"\"\"Loads config.yaml", "_set_section_values('restapi', ['listen', 'connect_address', 'certfile', 'keyfile', 'keyfile_password', 'cafile', 'ciphers', 'verify_client', 'http_extra_headers',", "{}).pop(second, None) if value: value = _parse_dict(value) if value: ret[first][second]", "('connect_address', 'listen', 'data_dir', 'pgpass', 'authentication'): config['postgresql'][name] = deepcopy(value) elif name", "Patroni reads the configuration from the command-line argument if it", "PATRONI_ENV_PREFIX + 'LOG_' + name.upper() if value and name not", "Exception: logger.exception('Exception when saving file: %s', self._cache_file) if fd: try:", "nor file', path) raise ConfigParseError('invalid config path') overall_config = {}", "else: logger.info('No local configuration items changed.') except Exception: logger.exception('Exception when", "f in sorted(os.listdir(path)) if (f.endswith('.yml') or f.endswith('.yaml')) and os.path.isfile(os.path.join(path, f))]", "if value is not None: ret[first][second] = value for second", "local configuration items changed.') except Exception: logger.exception('Exception when reloading local", "overridden from DCS if name in ('synchronous_mode', 'synchronous_mode_strict'): config[name] =", "if name == 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value, True)) elif name != 'use_slots':", "'authentication' in config['restapi']: config['restapi']['auth'] = '{username}:{password}'.format(**config['restapi']['authentication']) # special treatment for", "'REGISTER_SERVICE', 'USE_ENDPOINTS', 'BYPASS_API_SERVICE', 'VERIFY'): value = parse_bool(value) if value: ret[name.lower()][suffix.lower()]", "not None: ret['log'][second] = value def _parse_list(value): if not (value.strip().startswith('-')", "if value: value = parse_int(value) if value is not None:", "if value: ret[section][param] = value _set_section_values('restapi', ['listen', 'connect_address', 'certfile', 'keyfile',", "value: ret[first][second] = value def _get_auth(name, params=None): ret = {}", "ret def _build_effective_configuration(self, dynamic_configuration, local_configuration): config = self._safe_copy_dynamic_configuration(dynamic_configuration) for name,", "authentication = {} for user_type in ('replication', 'superuser', 'rewind'): entry", "pointed to by path. If path is a directory, loads", "os.path.isfile(self._cache_file): try: with open(self._cache_file) as f: self.set_dynamic_configuration(json.load(f)) except Exception: logger.exception('Exception", "suffix == 'PORT': value = value and parse_int(value) elif suffix", "{u: pg_config[u] for u in ('replication', 'superuser') if u in", "it as with the old `config` object. \"\"\" PATRONI_CONFIG_VARIABLE =", "'patroni.dynamic.json' file located in local_configuration['postgresql']['data_dir'] directory. This is necessary to", "PATRONI_CONFIG_VARIABLE = PATRONI_ENV_PREFIX + 'CONFIGURATION' __CACHE_FILENAME = 'patroni.dynamic.json' __DEFAULT_CONFIG =", "self.config_file) @staticmethod def _process_postgresql_parameters(parameters, is_local=False): return {name: value for name,", "'DC', 'CONSISTENCY', 'REGISTER_SERVICE', 'SERVICE_CHECK_INTERVAL', 'NAMESPACE', 'CONTEXT', 'USE_ENDPOINTS', 'SCOPE_LABEL', 'ROLE_LABEL', 'POD_IP',", "for name, oldname in (('level', 'loglevel'), ('format', 'logformat'), ('dateformat', 'log_datefmt')):", "ret['bootstrap']['users'] = users return ret def _build_effective_configuration(self, dynamic_configuration, local_configuration): config", "elif name not in ('connect_address', 'listen', 'data_dir', 'pgpass', 'authentication'): config['postgresql'][name]", "ClusterConfig from patroni.postgresql.config import CaseInsensitiveDict, ConfigHandler from patroni.utils import deep_compare,", "in list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX): # PATRONI_(ETCD|CONSUL|ZOOKEEPER|EXHIBITOR|...)_(HOSTS?|PORT|..) name, suffix = (param[8:].split('_',", "'synchronous_mode_strict'): config[name] = value else: config[name] = int(value) return config", "config def get(self, key, default=None): return self.__effective_configuration.get(key, default) def __contains__(self,", "not deep_compare(self._local_configuration, configuration): new_configuration = self._build_effective_configuration(self._dynamic_configuration, configuration) self._local_configuration = configuration", "v in value.items() if n in _AUTH_ALLOWED_PARAMETERS} # no 'name'", "ret: ret[dcs].update(_get_auth(dcs)) users = {} for param in list(os.environ.keys()): if", "yml files in that directory in alphabetical order \"\"\" if", "value and parse_int(value) elif suffix in ('HOSTS', 'PORTS', 'CHECKS'): value", "config = self._safe_copy_dynamic_configuration(dynamic_configuration) for name, value in local_configuration.items(): if name", "'VERIFY'): value = parse_bool(value) if value: ret[name.lower()][suffix.lower()] = value for", "%s', tmpfile) if tmpfile and os.path.exists(tmpfile): try: os.remove(tmpfile) except Exception:", "self.__effective_configuration = new_configuration return True else: logger.info('No local configuration items", "new_configuration return True else: logger.info('No local configuration items changed.') except", "def _set_section_values(section, params): for param in params: value = _popenv(section", "'{{{0}}}'.format(value) try: return yaml.safe_load(value) except Exception: logger.exception('Exception when parsing dict", "['watchdog']: config[name] = deepcopy(value) if value else {} # restapi", "_load_config_file(self): \"\"\"Loads config.yaml from filesystem and applies some values which", "`local_configuration` -- configuration from `config.yml` or environment 2) Saving and", "value in local_configuration.items(): if name == 'postgresql': for name, value", "dict def set_dynamic_configuration(self, configuration): if isinstance(configuration, ClusterConfig): if self._modify_index ==", "value = value and parse_int(value) elif suffix in ('HOSTS', 'PORTS',", "_popenv(param) if value: ret[param] = value def _fix_log_env(name, oldname): value", "_parse_list(value) if value: ret[first][second] = value def _parse_dict(value): if not", "config['zookeeper']: config['exhibitor'] = config['zookeeper'].pop('exhibitor') config.pop('zookeeper') pg_config = config['postgresql'] # no", "{}).items(): if name == 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value, True)) elif name !=", "self._local_configuration) self._dynamic_configuration = configuration self._cache_needs_saving = True return True except", "value) return None for first, second in (('raft', 'partner_addrs'), ('restapi',", "= restapi_auth authentication = {} for user_type in ('replication', 'superuser',", "# no 'superuser' in 'postgresql'.'authentication' if 'superuser' not in pg_config['authentication']", "ret[first][second] = value def _parse_dict(value): if not value.strip().startswith('{'): value =", "= configuration self._cache_needs_saving = True return True except Exception: logger.exception('Exception", "setting dynamic_configuration') def reload_local_configuration(self): if self.config_file: try: configuration = self._load_config_file()", "= parse_bool(value) if value is not None: ret[first][second] = value", "environment 2) Saving and loading `dynamic_configuration` into 'patroni.dynamic.json' file located", "False @property def config_file(self): return self._config_file @property def dynamic_configuration(self): return", "pg_config['pg_rewind'] # handle setting additional connection parameters that may be", "'authentication'): config['postgresql'][name] = deepcopy(value) elif name == 'standby_cluster': for name,", "connection parameters for name, value in pg_config['authentication'].items(): pg_config['authentication'][name] = {n:", "self.__environment_configuration) return config def _load_cache(self): if os.path.isfile(self._cache_file): try: with open(self._cache_file)", "value = _parse_list(value) if value: ret[first][second] = value def _parse_dict(value):", "the old `config` object. \"\"\" PATRONI_CONFIG_VARIABLE = PATRONI_ENV_PREFIX + 'CONFIGURATION'", "key, default=None): return self.__effective_configuration.get(key, default) def __contains__(self, key): return key", "patch_config(config, self.__environment_configuration) return config def _load_cache(self): if os.path.isfile(self._cache_file): try: with", "parameters that may be available # in the configuration file,", "if os.path.isfile(self._cache_file): try: with open(self._cache_file) as f: self.set_dynamic_configuration(json.load(f)) except Exception:", "not None: ret[first][second] = value for second in ('max_queue_size', 'file_size',", "param) if value: ret[param] = value return ret restapi_auth =", "{} for user_type in ('replication', 'superuser', 'rewind'): entry = _get_auth(user_type,", "__contains__(self, key): return key in self.__effective_configuration def __getitem__(self, key): return", "else: config_env = os.environ.pop(self.PATRONI_CONFIG_VARIABLE, None) self._local_configuration = config_env and yaml.safe_load(config_env)", "patroni import PATRONI_ENV_PREFIX from patroni.exceptions import ConfigParseError from patroni.dcs import", "configuration.data if not deep_compare(self._dynamic_configuration, configuration): try: self.__effective_configuration = self._build_effective_configuration(configuration, self._local_configuration)", "_set_section_values('postgresql', ['listen', 'connect_address', 'config_dir', 'data_dir', 'pgpass', 'bin_dir']) _set_section_values('log', ['level', 'traceback_level',", "['listen', 'connect_address', 'config_dir', 'data_dir', 'pgpass', 'bin_dir']) _set_section_values('log', ['level', 'traceback_level', 'format',", "was accidentally wiped 3) Loading of configuration file in the", "in os.environ: os.environ[name] = value for name, oldname in (('level',", "config[name] = deepcopy(value) if value else {} # restapi server", "only variables present in __DEFAULT_CONFIG allowed to be overridden from", "== 'standby_cluster': for name, value in (value or {}).items(): if", "value _set_section_values('restapi', ['listen', 'connect_address', 'certfile', 'keyfile', 'keyfile_password', 'cafile', 'ciphers', 'verify_client',", "'etcd3'): if dcs in ret: ret[dcs].update(_get_auth(dcs)) users = {} for", "as f: config = yaml.safe_load(f) patch_config(overall_config, config) return overall_config def", "self.__effective_configuration.get('postgresql', {}).get('data_dir', \"\") self._cache_file = os.path.join(self._data_dir, self.__CACHE_FILENAME) self._load_cache() self._cache_needs_saving =", "_AUTH_ALLOWED_PARAMETERS[:2]: value = _popenv(name + '_' + param) if value:", "in files: with open(fname) as f: config = yaml.safe_load(f) patch_config(overall_config,", "= _popenv(name + '_' + param) if value: ret[param] =", "if p in config}) return config def get(self, key, default=None):", "%s', value) return None for first, second in (('raft', 'partner_addrs'),", "('http_extra_headers', 'https_extra_headers')), ('log', ('loggers',))): for second in params: value =", "users: ret['bootstrap']['users'] = users return ret def _build_effective_configuration(self, dynamic_configuration, local_configuration):", "= _popenv(oldname) name = PATRONI_ENV_PREFIX + 'LOG_' + name.upper() if", "config_env and yaml.safe_load(config_env) or self.__environment_configuration if validator: error = validator(self._local_configuration)", "default=None): return self.__effective_configuration.get(key, default) def __contains__(self, key): return key in", "= fd = None try: (fd, tmpfile) = tempfile.mkstemp(prefix=self.__CACHE_FILENAME, dir=self._data_dir)", "configuration) self._local_configuration = configuration self.__effective_configuration = new_configuration return True else:", "except Exception: logger.exception('Exception when parsing list %s', value) return None", "value def _parse_dict(value): if not value.strip().startswith('{'): value = '{{{0}}}'.format(value) try:", "into 'patroni.dynamic.json' file located in local_configuration['postgresql']['data_dir'] directory. This is necessary", "name, value in local_configuration.items(): if name == 'postgresql': for name,", "'retry_timeout': 10, 'maximum_lag_on_failover': 1048576, 'maximum_lag_on_syncnode': -1, 'check_timeline': False, 'master_start_timeout': 300,", "with the old `config` object. \"\"\" PATRONI_CONFIG_VARIABLE = PATRONI_ENV_PREFIX +", "'SERVICE_CHECK_INTERVAL', 'NAMESPACE', 'CONTEXT', 'USE_ENDPOINTS', 'SCOPE_LABEL', 'ROLE_LABEL', 'POD_IP', 'PORTS', 'LABELS', 'BYPASS_API_SERVICE',", "filesystem and applies some values which were set via ENV\"\"\"", "in alphabetical order \"\"\" if os.path.isfile(path): files = [path] elif", "`dynamic_configuration` -- configuration stored in DCS * `local_configuration` -- configuration", "old config # 'exhibitor' inside 'zookeeper': if 'zookeeper' in config", "= ( 'username', 'password', 'sslmode', 'sslcert', 'sslkey', 'sslpassword', 'sslrootcert', 'sslcrl',", "value in pg_config['authentication'].items(): pg_config['authentication'][name] = {n: v for n, v", "p in updated_fields if p in config}) return config def", "'PORT': value = value and parse_int(value) elif suffix in ('HOSTS',", "ret['log'][second] = value def _parse_list(value): if not (value.strip().startswith('-') or '['", "'file_size', 'file_num', 'loggers']) _set_section_values('raft', ['data_dir', 'self_addr', 'partner_addrs', 'password', 'bind_addr']) for", "params): for param in params: value = _popenv(section + '_'", "'sslpassword', 'sslrootcert', 'sslcrl', 'sslcrldir', 'gssencmode', 'channel_binding' ) def default_validator(conf): if", "= configfile and os.path.exists(configfile) and configfile if self._config_file: self._local_configuration =", "except Exception: logger.error('Can not close temporary file %s', tmpfile) if", "'CONFIGURATION' __CACHE_FILENAME = 'patroni.dynamic.json' __DEFAULT_CONFIG = { 'ttl': 30, 'loop_wait':", "value is not None: ret['log'][second] = value def _parse_list(value): if", "pg_config['use_pg_rewind'] = 'pg_rewind' in pg_config pg_config['authentication'] = {u: pg_config[u] for", "in value.items() if n in _AUTH_ALLOWED_PARAMETERS} # no 'name' in", "[''])[:2] if suffix in ('HOST', 'HOSTS', 'PORT', 'USE_PROXIES', 'PROTOCOL', 'SRV',", "value = os.environ.pop(param) if suffix == 'PORT': value = value", "in config and 'exhibitor' in config['zookeeper']: config['exhibitor'] = config['zookeeper'].pop('exhibitor') config.pop('zookeeper')", "ret['postgresql']['authentication'] = authentication for param in list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX): #", "if fd: try: os.close(fd) except Exception: logger.error('Can not close temporary", "for param in params or _AUTH_ALLOWED_PARAMETERS[:2]: value = _popenv(name +", "{}).pop(second, None) if value: value = parse_bool(value) if value is", "= pg_config['name'] updated_fields = ( 'name', 'scope', 'retry_timeout', 'synchronous_mode', 'synchronous_mode_strict',", "return self._config_file @property def dynamic_configuration(self): return deepcopy(self._dynamic_configuration) def check_mode(self, mode):", "'synchronous_node_count': 1, 'standby_cluster': { 'create_replica_methods': '', 'host': '', 'port': '',", "import ConfigParseError from patroni.dcs import ClusterConfig from patroni.postgresql.config import CaseInsensitiveDict,", "ret[param] = value return ret restapi_auth = _get_auth('restapi') if restapi_auth:", "suffix in ('LABELS', 'SET_ACLS'): value = _parse_dict(value) elif suffix in", "os.close(fd) except Exception: logger.error('Can not close temporary file %s', tmpfile)", "with it as with the old `config` object. \"\"\" PATRONI_CONFIG_VARIABLE", "(value.strip().startswith('-') or '[' in value): value = '[{0}]'.format(value) try: return", "must be enabled/disabled globally config['postgresql'][name] = deepcopy(value) elif name not", "import CaseInsensitiveDict, ConfigHandler from patroni.utils import deep_compare, parse_bool, parse_int, patch_config", "if 'name' not in config and 'name' in pg_config: config['name']", "import json import logging import os import shutil import tempfile", "= { 'ttl': 30, 'loop_wait': 10, 'retry_timeout': 10, 'maximum_lag_on_failover': 1048576,", "path is a directory, loads all yml files in that", "value for name, oldname in (('level', 'loglevel'), ('format', 'logformat'), ('dateformat',", "yaml from collections import defaultdict from copy import deepcopy from", "ret.get(first, {}).pop(second, None) if value: value = _parse_dict(value) if value:", "server expects to get restapi.auth = 'username:password' if 'restapi' in", "30, 'loop_wait': 10, 'retry_timeout': 10, 'maximum_lag_on_failover': 1048576, 'maximum_lag_on_syncnode': -1, 'check_timeline':", "work with it as with the old `config` object. \"\"\"", "'CERT', 'KEY', 'VERIFY', 'TOKEN', 'CHECKS', 'DC', 'CONSISTENCY', 'REGISTER_SERVICE', 'SERVICE_CHECK_INTERVAL', 'NAMESPACE',", "self.set_dynamic_configuration(json.load(f)) except Exception: logger.exception('Exception when loading file: %s', self._cache_file) def", "import os import shutil import tempfile import yaml from collections", "in 'postgresql'.'authentication' if 'superuser' not in pg_config['authentication'] and 'pg_rewind' in", "additional connection parameters that may be available # in the", "is empty.\" class Config(object): \"\"\" This class is responsible for:", "%s', tmpfile) # configuration could be either ClusterConfig or dict", "fname in files: with open(fname) as f: config = yaml.safe_load(f)", "'name', 'scope', 'retry_timeout', 'synchronous_mode', 'synchronous_mode_strict', 'synchronous_node_count', 'maximum_lag_on_syncnode' ) pg_config.update({p: config[p]", "if p not in ('wal_keep_segments', 'wal_keep_size')}) }, 'watchdog': { 'mode':", "configuration.modify_index configuration = configuration.data if not deep_compare(self._dynamic_configuration, configuration): try: self.__effective_configuration", "'_' + param) if value: ret[param] = value return ret", "self.__DEFAULT_CONFIG['standby_cluster']: config['standby_cluster'][name] = deepcopy(value) elif name in config: # only", "for p in updated_fields if p in config}) return config", "in pg_config: pg_config['use_pg_rewind'] = 'pg_rewind' in pg_config pg_config['authentication'] = {u:", "# no 'name' in config if 'name' not in config", "to be able to restore `dynamic_configuration` if DCS was accidentally", "value: value = parse_bool(value) if value is not None: ret[first][second]", "_get_auth('restapi') if restapi_auth: ret['restapi']['authentication'] = restapi_auth authentication = {} for", "default values * `dynamic_configuration` -- configuration stored in DCS *", "from %s', self.config_file) @staticmethod def _process_postgresql_parameters(parameters, is_local=False): return {name: value", "'zookeeper': if 'zookeeper' in config and 'exhibitor' in config['zookeeper']: config['exhibitor']", "for first, second in (('raft', 'partner_addrs'), ('restapi', 'allowlist')): value =", "('wal_keep_segments', 'wal_keep_size')}) }, 'watchdog': { 'mode': 'automatic', } } def", "if suffix == 'PORT': value = value and parse_int(value) elif", "for name, value in (value or {}).items(): if name in", "fd: try: os.close(fd) except Exception: logger.error('Can not close temporary file", "# If the index didn't changed there is nothing to", "in ConfigHandler.CMDLINE_OPTIONS.items() if p not in ('wal_keep_segments', 'wal_keep_size')}) }, 'watchdog':", "f: self.set_dynamic_configuration(json.load(f)) except Exception: logger.exception('Exception when loading file: %s', self._cache_file)", "'traceback_level', 'format', 'dateformat', 'max_queue_size', 'dir', 'file_size', 'file_num', 'loggers']) _set_section_values('raft', ['data_dir',", "ret[first][second] = value def _get_auth(name, params=None): ret = {} for", "self.__effective_configuration = self._build_effective_configuration(configuration, self._local_configuration) self._dynamic_configuration = configuration self._cache_needs_saving = True", "dcs in ret: ret[dcs].update(_get_auth(dcs)) users = {} for param in", "or {}).items(): if name == 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value)) elif name not", "and _parse_list(options) if options: users[name]['options'] = options if users: ret['bootstrap']['users']", "value is not None: ret[first][second] = value for second in", "True, 'parameters': CaseInsensitiveDict({p: v[0] for p, v in ConfigHandler.CMDLINE_OPTIONS.items() if", "'allowlist', 'allowlist_include_members']) _set_section_values('ctl', ['insecure', 'cacert', 'certfile', 'keyfile', 'keyfile_password']) _set_section_values('postgresql', ['listen',", "# PATRONI_(ETCD|CONSUL|ZOOKEEPER|EXHIBITOR|...)_(HOSTS?|PORT|..) name, suffix = (param[8:].split('_', 1) + [''])[:2] if", "or {}).items(): if name == 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value, True)) elif name", "return deepcopy(self._dynamic_configuration) def check_mode(self, mode): return bool(parse_bool(self._dynamic_configuration.get(mode))) def _load_config_path(self, path):", "_parse_list(options) if options: users[name]['options'] = options if users: ret['bootstrap']['users'] =", "'postgresql': for name, value in (value or {}).items(): if name", "config path') overall_config = {} for fname in files: with", "value = '{{{0}}}'.format(value) try: return yaml.safe_load(value) except Exception: logger.exception('Exception when", "None json.dump(self.dynamic_configuration, f) tmpfile = shutil.move(tmpfile, self._cache_file) self._cache_needs_saving = False", "config['postgresql'][name] = deepcopy(value) elif name == 'standby_cluster': for name, value", "via ENV\"\"\" config = self._load_config_path(self._config_file) patch_config(config, self.__environment_configuration) return config def", "not close temporary file %s', tmpfile) if tmpfile and os.path.exists(tmpfile):", "['level', 'traceback_level', 'format', 'dateformat', 'max_queue_size', 'dir', 'file_size', 'file_num', 'loggers']) _set_section_values('raft',", "'use_slots': # replication slots must be enabled/disabled globally config['postgresql'][name] =", "or not is_local and ConfigHandler.CMDLINE_OPTIONS[name][1](value)} def _safe_copy_dynamic_configuration(self, dynamic_configuration): config =", "or environment 2) Saving and loading `dynamic_configuration` into 'patroni.dynamic.json' file", "elif suffix in ('LABELS', 'SET_ACLS'): value = _parse_dict(value) elif suffix", "\"\") self._cache_file = os.path.join(self._data_dir, self.__CACHE_FILENAME) self._load_cache() self._cache_needs_saving = False @property", "ret restapi_auth = _get_auth('restapi') if restapi_auth: ret['restapi']['authentication'] = restapi_auth authentication", "that may be available # in the configuration file, such", "empty.\" class Config(object): \"\"\" This class is responsible for: 1)", "# replication slots must be enabled/disabled globally config['postgresql'][name] = deepcopy(value)", "from the environment self._config_file = configfile and os.path.exists(configfile) and configfile", "if name and suffix == 'PASSWORD': password = <PASSWORD>(param) if", "This is necessary to be able to restore `dynamic_configuration` if", "}, 'postgresql': { 'bin_dir': '', 'use_slots': True, 'parameters': CaseInsensitiveDict({p: v[0]", "config = deepcopy(self.__DEFAULT_CONFIG) for name, value in dynamic_configuration.items(): if name", "'automatic', } } def __init__(self, configfile, validator=default_validator): self._modify_index = -1", "as SSL connection parameters for name, value in pg_config['authentication'].items(): pg_config['authentication'][name]", "'data_dir', 'pgpass', 'authentication'): config['postgresql'][name] = deepcopy(value) elif name == 'standby_cluster':", "None: ret['log'][second] = value def _parse_list(value): if not (value.strip().startswith('-') or", "tempfile import yaml from collections import defaultdict from copy import", "password} options = os.environ.pop(param[:-9] + '_OPTIONS', None) options = options", "for p, v in ConfigHandler.CMDLINE_OPTIONS.items() if p not in ('wal_keep_segments',", "<PASSWORD>(param) if password: users[name] = {'password': password} options = os.environ.pop(param[:-9]", "os.path.isfile(path): files = [path] elif os.path.isdir(path): files = [os.path.join(path, f)", "# only variables present in __DEFAULT_CONFIG allowed to be overridden", "local configuration from %s', self.config_file) @staticmethod def _process_postgresql_parameters(parameters, is_local=False): return", "'keyfile_password']) _set_section_values('postgresql', ['listen', 'connect_address', 'config_dir', 'data_dir', 'pgpass', 'bin_dir']) _set_section_values('log', ['level',", "_set_section_values(section, params): for param in params: value = _popenv(section +", "values * `dynamic_configuration` -- configuration stored in DCS * `local_configuration`", "import shutil import tempfile import yaml from collections import defaultdict", "('etcd', 'etcd3'): if dcs in ret: ret[dcs].update(_get_auth(dcs)) users = {}", "# CREATE USER \"<username>\" WITH <OPTIONS> PASSWORD '<password>' if name", "'allowlist_include_members'), ('ctl', 'insecure')): value = ret.get(first, {}).pop(second, None) if value:", "sorted(os.listdir(path)) if (f.endswith('.yml') or f.endswith('.yaml')) and os.path.isfile(os.path.join(path, f))] else: logger.error('config", "files = [path] elif os.path.isdir(path): files = [os.path.join(path, f) for", "WITH <OPTIONS> PASSWORD '<password>' if name and suffix == 'PASSWORD':", "self._cache_needs_saving = True return True except Exception: logger.exception('Exception when setting", "from patroni.dcs import ClusterConfig from patroni.postgresql.config import CaseInsensitiveDict, ConfigHandler from", "not in config and 'name' in pg_config: config['name'] = pg_config['name']", "+ param) if value: ret[section][param] = value _set_section_values('restapi', ['listen', 'connect_address',", "oldname in (('level', 'loglevel'), ('format', 'logformat'), ('dateformat', 'log_datefmt')): _fix_log_env(name, oldname)", "value = parse_bool(value) if value is not None: ret[first][second] =", "items changed.') except Exception: logger.exception('Exception when reloading local configuration from", "def _build_effective_configuration(self, dynamic_configuration, local_configuration): config = self._safe_copy_dynamic_configuration(dynamic_configuration) for name, value", "= authentication for param in list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX): # PATRONI_(ETCD|CONSUL|ZOOKEEPER|EXHIBITOR|...)_(HOSTS?|PORT|..)", "'watchdog': { 'mode': 'automatic', } } def __init__(self, configfile, validator=default_validator):", "not in ('connect_address', 'listen', 'data_dir', 'pgpass', 'authentication'): config['postgresql'][name] = deepcopy(value)", "parse_bool(value) if value: ret[name.lower()][suffix.lower()] = value for dcs in ('etcd',", "self._cache_needs_saving = False @property def config_file(self): return self._config_file @property def", "None) if value: value = parse_bool(value) if value is not", "'', 'use_slots': True, 'parameters': CaseInsensitiveDict({p: v[0] for p, v in", "in 'postgresql', but 'replication' and 'superuser' if 'authentication' not in", "logger = logging.getLogger(__name__) _AUTH_ALLOWED_PARAMETERS = ( 'username', 'password', 'sslmode', 'sslcert',", "('loggers',))): for second in params: value = ret.get(first, {}).pop(second, None)", "v in ConfigHandler.CMDLINE_OPTIONS.items() if p not in ('wal_keep_segments', 'wal_keep_size')}) },", "def check_mode(self, mode): return bool(parse_bool(self._dynamic_configuration.get(mode))) def _load_config_path(self, path): \"\"\" If", "(parameters or {}).items() if name not in ConfigHandler.CMDLINE_OPTIONS or not", "authentication[user_type] = entry if authentication: ret['postgresql']['authentication'] = authentication for param", "__init__(self, configfile, validator=default_validator): self._modify_index = -1 self._dynamic_configuration = {} self.__environment_configuration", "def get(self, key, default=None): return self.__effective_configuration.get(key, default) def __contains__(self, key):", "self._load_cache() self._cache_needs_saving = False @property def config_file(self): return self._config_file @property", "be available # in the configuration file, such as SSL", "in config or name in ['watchdog']: config[name] = deepcopy(value) if", "configuration from %s', self.config_file) @staticmethod def _process_postgresql_parameters(parameters, is_local=False): return {name:", "if password: users[name] = {'password': password} options = os.environ.pop(param[:-9] +", "# Patroni reads the configuration from the command-line argument if", "('ctl', 'insecure')): value = ret.get(first, {}).pop(second, None) if value: value", "value for second in ('max_queue_size', 'file_size', 'file_num'): value = ret.get('log',", "in the configuration file, such as SSL connection parameters for", "reload_local_configuration(self): if self.config_file: try: configuration = self._load_config_file() if not deep_compare(self._local_configuration,", "name == 'postgresql': for name, value in (value or {}).items():", "'', 'port': '', 'primary_slot_name': '', 'restore_command': '', 'archive_cleanup_command': '', 'recovery_min_apply_delay':", "None) for param in ('name', 'namespace', 'scope'): value = _popenv(param)", "patroni.dcs import ClusterConfig from patroni.postgresql.config import CaseInsensitiveDict, ConfigHandler from patroni.utils", "if (f.endswith('.yml') or f.endswith('.yaml')) and os.path.isfile(os.path.join(path, f))] else: logger.error('config path", "for name, value in pg_config['authentication'].items(): pg_config['authentication'][name] = {n: v for", "_build_effective_configuration(self, dynamic_configuration, local_configuration): config = self._safe_copy_dynamic_configuration(dynamic_configuration) for name, value in", "file %s', tmpfile) if tmpfile and os.path.exists(tmpfile): try: os.remove(tmpfile) except", "ret[first][second] = value for second in ('max_queue_size', 'file_size', 'file_num'): value", "'format', 'dateformat', 'max_queue_size', 'dir', 'file_size', 'file_num', 'loggers']) _set_section_values('raft', ['data_dir', 'self_addr',", "value: ret[param] = value return ret restapi_auth = _get_auth('restapi') if", "value def _fix_log_env(name, oldname): value = _popenv(oldname) name = PATRONI_ENV_PREFIX", "be either ClusterConfig or dict def set_dynamic_configuration(self, configuration): if isinstance(configuration,", "@staticmethod def _build_environment_configuration(): ret = defaultdict(dict) def _popenv(name): return os.environ.pop(PATRONI_ENV_PREFIX", "'superuser', 'rewind'): entry = _get_auth(user_type, _AUTH_ALLOWED_PARAMETERS) if entry: authentication[user_type] =", "def _get_auth(name, params=None): ret = {} for param in params", "logger.exception('Exception when reloading local configuration from %s', self.config_file) @staticmethod def", "in that directory in alphabetical order \"\"\" if os.path.isfile(path): files", "-- some sane default values * `dynamic_configuration` -- configuration stored", "options and _parse_list(options) if options: users[name]['options'] = options if users:", "ret[dcs].update(_get_auth(dcs)) users = {} for param in list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX):", "yml file pointed to by path. If path is a", "'keyfile', 'keyfile_password']) _set_section_values('postgresql', ['listen', 'connect_address', 'config_dir', 'data_dir', 'pgpass', 'bin_dir']) _set_section_values('log',", "'https_extra_headers')), ('log', ('loggers',))): for second in params: value = ret.get(first,", "'[{0}]'.format(value) try: return yaml.safe_load(value) except Exception: logger.exception('Exception when parsing list", "'connect_address', 'certfile', 'keyfile', 'keyfile_password', 'cafile', 'ciphers', 'verify_client', 'http_extra_headers', 'https_extra_headers', 'allowlist',", "'use_slots': True, 'parameters': CaseInsensitiveDict({p: v[0] for p, v in ConfigHandler.CMDLINE_OPTIONS.items()", "= {} for user_type in ('replication', 'superuser', 'rewind'): entry =", "value = ret.get('log', {}).pop(second, None) if value: value = parse_int(value)", "{}).pop(second, None) if value: value = parse_int(value) if value is", "self._cache_file) def save_cache(self): if self._cache_needs_saving: tmpfile = fd = None", "no 'superuser' in 'postgresql'.'authentication' if 'superuser' not in pg_config['authentication'] and", "_safe_copy_dynamic_configuration(self, dynamic_configuration): config = deepcopy(self.__DEFAULT_CONFIG) for name, value in dynamic_configuration.items():", "users return ret def _build_effective_configuration(self, dynamic_configuration, local_configuration): config = self._safe_copy_dynamic_configuration(dynamic_configuration)", "os import shutil import tempfile import yaml from collections import", "elif name != 'use_slots': # replication slots must be enabled/disabled", "def __contains__(self, key): return key in self.__effective_configuration def __getitem__(self, key):", "bool(parse_bool(self._dynamic_configuration.get(mode))) def _load_config_path(self, path): \"\"\" If path is a file,", "-- configuration stored in DCS * `local_configuration` -- configuration from", "not is_local and ConfigHandler.CMDLINE_OPTIONS[name][1](value)} def _safe_copy_dynamic_configuration(self, dynamic_configuration): config = deepcopy(self.__DEFAULT_CONFIG)", "(('raft', 'partner_addrs'), ('restapi', 'allowlist')): value = ret.get(first, {}).pop(second, None) if", "'SRV_SUFFIX', 'URL', 'PROXY', 'CACERT', 'CERT', 'KEY', 'VERIFY', 'TOKEN', 'CHECKS', 'DC',", "{'password': password} options = os.environ.pop(param[:-9] + '_OPTIONS', None) options =", "'CACERT', 'CERT', 'KEY', 'VERIFY', 'TOKEN', 'CHECKS', 'DC', 'CONSISTENCY', 'REGISTER_SERVICE', 'SERVICE_CHECK_INTERVAL',", "name.upper(), None) for param in ('name', 'namespace', 'scope'): value =", "('replication', 'superuser') if u in pg_config} # no 'superuser' in", "= self._load_config_file() else: config_env = os.environ.pop(self.PATRONI_CONFIG_VARIABLE, None) self._local_configuration = config_env", "deep_compare(self._local_configuration, configuration): new_configuration = self._build_effective_configuration(self._dynamic_configuration, configuration) self._local_configuration = configuration self.__effective_configuration", "self._modify_index = configuration.modify_index configuration = configuration.data if not deep_compare(self._dynamic_configuration, configuration):", "'insecure')): value = ret.get(first, {}).pop(second, None) if value: value =", "(fd, tmpfile) = tempfile.mkstemp(prefix=self.__CACHE_FILENAME, dir=self._data_dir) with os.fdopen(fd, 'w') as f:", "and name not in os.environ: os.environ[name] = value for name,", "patch_config logger = logging.getLogger(__name__) _AUTH_ALLOWED_PARAMETERS = ( 'username', 'password', 'sslmode',", "= None try: (fd, tmpfile) = tempfile.mkstemp(prefix=self.__CACHE_FILENAME, dir=self._data_dir) with os.fdopen(fd,", "open(self._cache_file) as f: self.set_dynamic_configuration(json.load(f)) except Exception: logger.exception('Exception when loading file:", "@staticmethod def _process_postgresql_parameters(parameters, is_local=False): return {name: value for name, value", "value = value and _parse_list(value) elif suffix in ('LABELS', 'SET_ACLS'):", "'http_extra_headers', 'https_extra_headers', 'allowlist', 'allowlist_include_members']) _set_section_values('ctl', ['insecure', 'cacert', 'certfile', 'keyfile', 'keyfile_password'])", "expects to get restapi.auth = 'username:password' if 'restapi' in config", "{ 'create_replica_methods': '', 'host': '', 'port': '', 'primary_slot_name': '', 'restore_command':", "'sslcrl', 'sslcrldir', 'gssencmode', 'channel_binding' ) def default_validator(conf): if not conf:", "value and name not in os.environ: os.environ[name] = value for", "name == 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value)) elif name not in ('connect_address', 'listen',", "json.dump(self.dynamic_configuration, f) tmpfile = shutil.move(tmpfile, self._cache_file) self._cache_needs_saving = False except", "list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX): name, suffix = (param[8:].rsplit('_', 1) + [''])[:2]", "in ConfigHandler.CMDLINE_OPTIONS or not is_local and ConfigHandler.CMDLINE_OPTIONS[name][1](value)} def _safe_copy_dynamic_configuration(self, dynamic_configuration):", "if name == 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value)) elif name not in ('connect_address',", "in ('synchronous_mode', 'synchronous_mode_strict'): config[name] = value else: config[name] = int(value)", "config and 'authentication' in config['restapi']: config['restapi']['auth'] = '{username}:{password}'.format(**config['restapi']['authentication']) # special", "{} for fname in files: with open(fname) as f: config", "the old format and converting it into new format 4)", "for user_type in ('replication', 'superuser', 'rewind'): entry = _get_auth(user_type, _AUTH_ALLOWED_PARAMETERS)", "'SET_ACLS'): value = _parse_dict(value) elif suffix in ('USE_PROXIES', 'REGISTER_SERVICE', 'USE_ENDPOINTS',", "value in dynamic_configuration.items(): if name == 'postgresql': for name, value", "be enabled/disabled globally config['postgresql'][name] = deepcopy(value) elif name not in", "nothing to do self._modify_index = configuration.modify_index configuration = configuration.data if", "('log', ('loggers',))): for second in params: value = ret.get(first, {}).pop(second,", "when setting dynamic_configuration') def reload_local_configuration(self): if self.config_file: try: configuration =", "'cafile', 'ciphers', 'verify_client', 'http_extra_headers', 'https_extra_headers', 'allowlist', 'allowlist_include_members']) _set_section_values('ctl', ['insecure', 'cacert',", "None) options = options and _parse_list(options) if options: users[name]['options'] =", "alphabetical order \"\"\" if os.path.isfile(path): files = [path] elif os.path.isdir(path):", "'superuser' not in pg_config['authentication'] and 'pg_rewind' in pg_config: pg_config['authentication']['superuser'] =", "accidentally wiped 3) Loading of configuration file in the old", "patroni.postgresql.config import CaseInsensitiveDict, ConfigHandler from patroni.utils import deep_compare, parse_bool, parse_int,", "= {n: v for n, v in value.items() if n", "pg_config[u] for u in ('replication', 'superuser') if u in pg_config}", "path %s is neither directory nor file', path) raise ConfigParseError('invalid", "value = ret.get(first, {}).pop(second, None) if value: value = parse_bool(value)", "'synchronous_mode', 'synchronous_mode_strict', 'synchronous_node_count', 'maximum_lag_on_syncnode' ) pg_config.update({p: config[p] for p in", "for second in ('max_queue_size', 'file_size', 'file_num'): value = ret.get('log', {}).pop(second,", "config_file(self): return self._config_file @property def dynamic_configuration(self): return deepcopy(self._dynamic_configuration) def check_mode(self,", "'maximum_lag_on_syncnode' ) pg_config.update({p: config[p] for p in updated_fields if p", "SSL connection parameters for name, value in pg_config['authentication'].items(): pg_config['authentication'][name] =", "deepcopy(value) elif name in config: # only variables present in", "'superuser' if 'authentication' not in pg_config: pg_config['use_pg_rewind'] = 'pg_rewind' in", "'loop_wait': 10, 'retry_timeout': 10, 'maximum_lag_on_failover': 1048576, 'maximum_lag_on_syncnode': -1, 'check_timeline': False,", "enabled/disabled globally config['postgresql'][name] = deepcopy(value) elif name not in config", "name, value in (value or {}).items(): if name == 'parameters':", "the index didn't changed there is nothing to do self._modify_index", "self._build_effective_configuration(configuration, self._local_configuration) self._dynamic_configuration = configuration self._cache_needs_saving = True return True", "value: value = parse_int(value) if value is not None: ret['log'][second]", "value = '[{0}]'.format(value) try: return yaml.safe_load(value) except Exception: logger.exception('Exception when", "directory in alphabetical order \"\"\" if os.path.isfile(path): files = [path]", "= 'patroni.dynamic.json' __DEFAULT_CONFIG = { 'ttl': 30, 'loop_wait': 10, 'retry_timeout':", "def set_dynamic_configuration(self, configuration): if isinstance(configuration, ClusterConfig): if self._modify_index == configuration.modify_index:", "and giving access to `effective_configuration` from: * `Config.__DEFAULT_CONFIG` -- some", "'standby_cluster': { 'create_replica_methods': '', 'host': '', 'port': '', 'primary_slot_name': '',", "'sslkey', 'sslpassword', 'sslrootcert', 'sslcrl', 'sslcrldir', 'gssencmode', 'channel_binding' ) def default_validator(conf):", "= tempfile.mkstemp(prefix=self.__CACHE_FILENAME, dir=self._data_dir) with os.fdopen(fd, 'w') as f: fd =", "configuration): try: self.__effective_configuration = self._build_effective_configuration(configuration, self._local_configuration) self._dynamic_configuration = configuration self._cache_needs_saving", "get restapi.auth = 'username:password' if 'restapi' in config and 'authentication'", "except Exception: logger.exception('Exception when saving file: %s', self._cache_file) if fd:", "_set_section_values('ctl', ['insecure', 'cacert', 'certfile', 'keyfile', 'keyfile_password']) _set_section_values('postgresql', ['listen', 'connect_address', 'config_dir',", "'file_size', 'file_num'): value = ret.get('log', {}).pop(second, None) if value: value", "is nothing to do self._modify_index = configuration.modify_index configuration = configuration.data", "if value else {} # restapi server expects to get", "overall_config def _load_config_file(self): \"\"\"Loads config.yaml from filesystem and applies some", "'file_num'): value = ret.get('log', {}).pop(second, None) if value: value =", "value def _parse_list(value): if not (value.strip().startswith('-') or '[' in value):", "'sslcert', 'sslkey', 'sslpassword', 'sslrootcert', 'sslcrl', 'sslcrldir', 'gssencmode', 'channel_binding' ) def", ") pg_config.update({p: config[p] for p in updated_fields if p in", "'self_addr', 'partner_addrs', 'password', 'bind_addr']) for first, second in (('restapi', 'allowlist_include_members'),", "[os.path.join(path, f) for f in sorted(os.listdir(path)) if (f.endswith('.yml') or f.endswith('.yaml'))", "password: users[name] = {'password': password} options = os.environ.pop(param[:-9] + '_OPTIONS',", "from the command-line argument if it exists, otherwise from the", "path is a file, loads the yml file pointed to", "remove temporary file %s', tmpfile) # configuration could be either", "entry: authentication[user_type] = entry if authentication: ret['postgresql']['authentication'] = authentication for", "f: config = yaml.safe_load(f) patch_config(overall_config, config) return overall_config def _load_config_file(self):", "'logformat'), ('dateformat', 'log_datefmt')): _fix_log_env(name, oldname) def _set_section_values(section, params): for param", "and configfile if self._config_file: self._local_configuration = self._load_config_file() else: config_env =", "value else: config[name] = int(value) return config @staticmethod def _build_environment_configuration():", "_parse_dict(value): if not value.strip().startswith('{'): value = '{{{0}}}'.format(value) try: return yaml.safe_load(value)", "'certfile', 'keyfile', 'keyfile_password', 'cafile', 'ciphers', 'verify_client', 'http_extra_headers', 'https_extra_headers', 'allowlist', 'allowlist_include_members'])", "'password', 'bind_addr']) for first, second in (('restapi', 'allowlist_include_members'), ('ctl', 'insecure')):", "when saving file: %s', self._cache_file) if fd: try: os.close(fd) except", "error: raise ConfigParseError(error) self.__effective_configuration = self._build_effective_configuration({}, self._local_configuration) self._data_dir = self.__effective_configuration.get('postgresql',", "= logging.getLogger(__name__) _AUTH_ALLOWED_PARAMETERS = ( 'username', 'password', 'sslmode', 'sslcert', 'sslkey',", "file pointed to by path. If path is a directory,", "# handle setting additional connection parameters that may be available", "is not None: ret[first][second] = value for second in ('max_queue_size',", "self._modify_index = -1 self._dynamic_configuration = {} self.__environment_configuration = self._build_environment_configuration() #", "slots must be enabled/disabled globally config['postgresql'][name] = deepcopy(value) elif name", "== 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value, True)) elif name != 'use_slots': # replication", "config[name] = value else: config[name] = int(value) return config @staticmethod", "('replication', 'superuser', 'rewind'): entry = _get_auth(user_type, _AUTH_ALLOWED_PARAMETERS) if entry: authentication[user_type]", "= options and _parse_list(options) if options: users[name]['options'] = options if", "def save_cache(self): if self._cache_needs_saving: tmpfile = fd = None try:", "Loading of configuration file in the old format and converting", "= '[{0}]'.format(value) try: return yaml.safe_load(value) except Exception: logger.exception('Exception when parsing", "'certfile', 'keyfile', 'keyfile_password']) _set_section_values('postgresql', ['listen', 'connect_address', 'config_dir', 'data_dir', 'pgpass', 'bin_dir'])", "suffix = (param[8:].split('_', 1) + [''])[:2] if suffix in ('HOST',", "tmpfile and os.path.exists(tmpfile): try: os.remove(tmpfile) except Exception: logger.error('Can not remove", "config @staticmethod def _build_environment_configuration(): ret = defaultdict(dict) def _popenv(name): return", "'loglevel'), ('format', 'logformat'), ('dateformat', 'log_datefmt')): _fix_log_env(name, oldname) def _set_section_values(section, params):", "'pg_rewind' in pg_config pg_config['authentication'] = {u: pg_config[u] for u in", "in local_configuration['postgresql']['data_dir'] directory. This is necessary to be able to", "command-line argument if it exists, otherwise from the environment self._config_file", "deepcopy from patroni import PATRONI_ENV_PREFIX from patroni.exceptions import ConfigParseError from", "open(fname) as f: config = yaml.safe_load(f) patch_config(overall_config, config) return overall_config", "'CHECKS', 'DC', 'CONSISTENCY', 'REGISTER_SERVICE', 'SERVICE_CHECK_INTERVAL', 'NAMESPACE', 'CONTEXT', 'USE_ENDPOINTS', 'SCOPE_LABEL', 'ROLE_LABEL',", "to get restapi.auth = 'username:password' if 'restapi' in config and", "'', 'recovery_min_apply_delay': '' }, 'postgresql': { 'bin_dir': '', 'use_slots': True,", "second in (('raft', 'partner_addrs'), ('restapi', 'allowlist')): value = ret.get(first, {}).pop(second,", "value in (value or {}).items(): if name in self.__DEFAULT_CONFIG['standby_cluster']: config['standby_cluster'][name]", "format 4) Mimicking some of the `dict` interfaces to make", "`dynamic_configuration` if DCS was accidentally wiped 3) Loading of configuration", "None: ret[first][second] = value for second in ('max_queue_size', 'file_size', 'file_num'):", "= -1 self._dynamic_configuration = {} self.__environment_configuration = self._build_environment_configuration() # Patroni", "name == 'standby_cluster': for name, value in (value or {}).items():", "= os.environ.pop(self.PATRONI_CONFIG_VARIABLE, None) self._local_configuration = config_env and yaml.safe_load(config_env) or self.__environment_configuration", "for first, second in (('restapi', 'allowlist_include_members'), ('ctl', 'insecure')): value =", "= users return ret def _build_effective_configuration(self, dynamic_configuration, local_configuration): config =", "or {}).items() if name not in ConfigHandler.CMDLINE_OPTIONS or not is_local", "value = _popenv(param) if value: ret[param] = value def _fix_log_env(name,", "value = _popenv(name + '_' + param) if value: ret[param]", "collections import defaultdict from copy import deepcopy from patroni import", "if name not in ConfigHandler.CMDLINE_OPTIONS or not is_local and ConfigHandler.CMDLINE_OPTIONS[name][1](value)}", "file %s', tmpfile) # configuration could be either ClusterConfig or", "If path is a directory, loads all yml files in", "= value else: config[name] = int(value) return config @staticmethod def", "name, value in pg_config['authentication'].items(): pg_config['authentication'][name] = {n: v for n,", "inside 'zookeeper': if 'zookeeper' in config and 'exhibitor' in config['zookeeper']:", "make it possible to work with it as with the", "format and converting it into new format 4) Mimicking some", "when parsing dict %s', value) return None for first, params", "in config and 'name' in pg_config: config['name'] = pg_config['name'] updated_fields", "index didn't changed there is nothing to do self._modify_index =", "name, value in (value or {}).items(): if name in self.__DEFAULT_CONFIG['standby_cluster']:", "in updated_fields if p in config}) return config def get(self,", "if n in _AUTH_ALLOWED_PARAMETERS} # no 'name' in config if", "self.__environment_configuration if validator: error = validator(self._local_configuration) if error: raise ConfigParseError(error)", "'REGISTER_SERVICE', 'SERVICE_CHECK_INTERVAL', 'NAMESPACE', 'CONTEXT', 'USE_ENDPOINTS', 'SCOPE_LABEL', 'ROLE_LABEL', 'POD_IP', 'PORTS', 'LABELS',", "authentication: ret['postgresql']['authentication'] = authentication for param in list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX):", "config and 'exhibitor' in config['zookeeper']: config['exhibitor'] = config['zookeeper'].pop('exhibitor') config.pop('zookeeper') pg_config", "fd = None json.dump(self.dynamic_configuration, f) tmpfile = shutil.move(tmpfile, self._cache_file) self._cache_needs_saving", "if self.config_file: try: configuration = self._load_config_file() if not deep_compare(self._local_configuration, configuration):", "shutil.move(tmpfile, self._cache_file) self._cache_needs_saving = False except Exception: logger.exception('Exception when saving", "return config def get(self, key, default=None): return self.__effective_configuration.get(key, default) def", "'retry_timeout', 'synchronous_mode', 'synchronous_mode_strict', 'synchronous_node_count', 'maximum_lag_on_syncnode' ) pg_config.update({p: config[p] for p", "= _get_auth('restapi') if restapi_auth: ret['restapi']['authentication'] = restapi_auth authentication = {}", "value return ret restapi_auth = _get_auth('restapi') if restapi_auth: ret['restapi']['authentication'] =", "'bin_dir']) _set_section_values('log', ['level', 'traceback_level', 'format', 'dateformat', 'max_queue_size', 'dir', 'file_size', 'file_num',", "config['postgresql'][name].update(self._process_postgresql_parameters(value, True)) elif name != 'use_slots': # replication slots must", "('dateformat', 'log_datefmt')): _fix_log_env(name, oldname) def _set_section_values(section, params): for param in", "it into new format 4) Mimicking some of the `dict`", "is responsible for: 1) Building and giving access to `effective_configuration`", "2) Saving and loading `dynamic_configuration` into 'patroni.dynamic.json' file located in", "'host': '', 'port': '', 'primary_slot_name': '', 'restore_command': '', 'archive_cleanup_command': '',", "necessary to be able to restore `dynamic_configuration` if DCS was", "as f: self.set_dynamic_configuration(json.load(f)) except Exception: logger.exception('Exception when loading file: %s',", "'verify_client', 'http_extra_headers', 'https_extra_headers', 'allowlist', 'allowlist_include_members']) _set_section_values('ctl', ['insecure', 'cacert', 'certfile', 'keyfile',", "+ name.upper() if value and name not in os.environ: os.environ[name]", "os.environ.pop(PATRONI_ENV_PREFIX + name.upper(), None) for param in ('name', 'namespace', 'scope'):", "dict %s', value) return None for first, params in (('restapi',", "params in (('restapi', ('http_extra_headers', 'https_extra_headers')), ('log', ('loggers',))): for second in", "return \"Config is empty.\" class Config(object): \"\"\" This class is", "return None for first, second in (('raft', 'partner_addrs'), ('restapi', 'allowlist')):", "'' }, 'postgresql': { 'bin_dir': '', 'use_slots': True, 'parameters': CaseInsensitiveDict({p:", "'BYPASS_API_SERVICE', 'VERIFY'): value = parse_bool(value) if value: ret[name.lower()][suffix.lower()] = value", "= PATRONI_ENV_PREFIX + 'CONFIGURATION' __CACHE_FILENAME = 'patroni.dynamic.json' __DEFAULT_CONFIG = {", "_popenv(section + '_' + param) if value: ret[section][param] = value", "config['name'] = pg_config['name'] updated_fields = ( 'name', 'scope', 'retry_timeout', 'synchronous_mode',", "is not None: ret['log'][second] = value def _parse_list(value): if not", "config['standby_cluster'][name] = deepcopy(value) elif name in config: # only variables", "self._dynamic_configuration = {} self.__environment_configuration = self._build_environment_configuration() # Patroni reads the", "'LOG_' + name.upper() if value and name not in os.environ:", "import deepcopy from patroni import PATRONI_ENV_PREFIX from patroni.exceptions import ConfigParseError", "ConfigParseError(error) self.__effective_configuration = self._build_effective_configuration({}, self._local_configuration) self._data_dir = self.__effective_configuration.get('postgresql', {}).get('data_dir', \"\")", "if restapi_auth: ret['restapi']['authentication'] = restapi_auth authentication = {} for user_type", "restapi_auth: ret['restapi']['authentication'] = restapi_auth authentication = {} for user_type in", "This class is responsible for: 1) Building and giving access", "'wal_keep_size')}) }, 'watchdog': { 'mode': 'automatic', } } def __init__(self,", "directory nor file', path) raise ConfigParseError('invalid config path') overall_config =", "is_local and ConfigHandler.CMDLINE_OPTIONS[name][1](value)} def _safe_copy_dynamic_configuration(self, dynamic_configuration): config = deepcopy(self.__DEFAULT_CONFIG) for", "'_OPTIONS', None) options = options and _parse_list(options) if options: users[name]['options']", "in the old format and converting it into new format", "pg_config = config['postgresql'] # no 'authentication' in 'postgresql', but 'replication'", "available # in the configuration file, such as SSL connection", "logger.exception('Exception when loading file: %s', self._cache_file) def save_cache(self): if self._cache_needs_saving:", "'SET_ACLS') and name: value = os.environ.pop(param) if suffix == 'PORT':", "ClusterConfig or dict def set_dynamic_configuration(self, configuration): if isinstance(configuration, ClusterConfig): if", "for first, params in (('restapi', ('http_extra_headers', 'https_extra_headers')), ('log', ('loggers',))): for", "isinstance(configuration, ClusterConfig): if self._modify_index == configuration.modify_index: return False # If", "= config['postgresql'] # no 'authentication' in 'postgresql', but 'replication' and", "'BYPASS_API_SERVICE', 'KEY_PASSWORD', 'USE_SSL', 'SET_ACLS') and name: value = os.environ.pop(param) if", "if suffix in ('HOST', 'HOSTS', 'PORT', 'USE_PROXIES', 'PROTOCOL', 'SRV', 'SRV_SUFFIX',", "authentication for param in list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX): # PATRONI_(ETCD|CONSUL|ZOOKEEPER|EXHIBITOR|...)_(HOSTS?|PORT|..) name,", "value = _parse_dict(value) if value: ret[first][second] = value def _get_auth(name,", "restore `dynamic_configuration` if DCS was accidentally wiped 3) Loading of", "return overall_config def _load_config_file(self): \"\"\"Loads config.yaml from filesystem and applies", "'name' in pg_config: config['name'] = pg_config['name'] updated_fields = ( 'name',", "_parse_dict(value) if value: ret[first][second] = value def _get_auth(name, params=None): ret", "= os.environ.pop(param) if suffix == 'PORT': value = value and", "for param in params: value = _popenv(section + '_' +", "temporary file %s', tmpfile) if tmpfile and os.path.exists(tmpfile): try: os.remove(tmpfile)", "suffix in ('HOST', 'HOSTS', 'PORT', 'USE_PROXIES', 'PROTOCOL', 'SRV', 'SRV_SUFFIX', 'URL',", "config['postgresql'][name] = deepcopy(value) elif name not in config or name", "Building and giving access to `effective_configuration` from: * `Config.__DEFAULT_CONFIG` --", "PATRONI_(ETCD|CONSUL|ZOOKEEPER|EXHIBITOR|...)_(HOSTS?|PORT|..) name, suffix = (param[8:].split('_', 1) + [''])[:2] if suffix", "@property def config_file(self): return self._config_file @property def dynamic_configuration(self): return deepcopy(self._dynamic_configuration)", "in ('name', 'namespace', 'scope'): value = _popenv(param) if value: ret[param]", "'mode': 'automatic', } } def __init__(self, configfile, validator=default_validator): self._modify_index =", "'ciphers', 'verify_client', 'http_extra_headers', 'https_extra_headers', 'allowlist', 'allowlist_include_members']) _set_section_values('ctl', ['insecure', 'cacert', 'certfile',", "= 'pg_rewind' in pg_config pg_config['authentication'] = {u: pg_config[u] for u", "is neither directory nor file', path) raise ConfigParseError('invalid config path')", "name: value = os.environ.pop(param) if suffix == 'PORT': value =", "= deepcopy(value) elif name in config: # only variables present", "} } def __init__(self, configfile, validator=default_validator): self._modify_index = -1 self._dynamic_configuration", "config = self._load_config_path(self._config_file) patch_config(config, self.__environment_configuration) return config def _load_cache(self): if", "first, second in (('raft', 'partner_addrs'), ('restapi', 'allowlist')): value = ret.get(first,", "'', 'primary_slot_name': '', 'restore_command': '', 'archive_cleanup_command': '', 'recovery_min_apply_delay': '' },", "temporary file %s', tmpfile) # configuration could be either ClusterConfig", "self._config_file = configfile and os.path.exists(configfile) and configfile if self._config_file: self._local_configuration", "self.__effective_configuration.get(key, default) def __contains__(self, key): return key in self.__effective_configuration def", "'KEY_PASSWORD', 'USE_SSL', 'SET_ACLS') and name: value = os.environ.pop(param) if suffix", "validator=default_validator): self._modify_index = -1 self._dynamic_configuration = {} self.__environment_configuration = self._build_environment_configuration()", "parsing list %s', value) return None for first, second in", "config = yaml.safe_load(f) patch_config(overall_config, config) return overall_config def _load_config_file(self): \"\"\"Loads", "when loading file: %s', self._cache_file) def save_cache(self): if self._cache_needs_saving: tmpfile", "in ('HOST', 'HOSTS', 'PORT', 'USE_PROXIES', 'PROTOCOL', 'SRV', 'SRV_SUFFIX', 'URL', 'PROXY',", "value): value = '[{0}]'.format(value) try: return yaml.safe_load(value) except Exception: logger.exception('Exception", "dynamic_configuration, local_configuration): config = self._safe_copy_dynamic_configuration(dynamic_configuration) for name, value in local_configuration.items():", "name, value in dynamic_configuration.items(): if name == 'postgresql': for name,", "_process_postgresql_parameters(parameters, is_local=False): return {name: value for name, value in (parameters", "configuration self.__effective_configuration = new_configuration return True else: logger.info('No local configuration", "= self._load_config_file() if not deep_compare(self._local_configuration, configuration): new_configuration = self._build_effective_configuration(self._dynamic_configuration, configuration)", "value in (value or {}).items(): if name == 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value,", "converting it into new format 4) Mimicking some of the", "elif suffix in ('HOSTS', 'PORTS', 'CHECKS'): value = value and", "not remove temporary file %s', tmpfile) # configuration could be", "users[name]['options'] = options if users: ret['bootstrap']['users'] = users return ret", "value = _popenv(section + '_' + param) if value: ret[section][param]", "raise ConfigParseError('invalid config path') overall_config = {} for fname in", "= deepcopy(value) elif name not in config or name in", "Saving and loading `dynamic_configuration` into 'patroni.dynamic.json' file located in local_configuration['postgresql']['data_dir']", "if not conf: return \"Config is empty.\" class Config(object): \"\"\"", "self._cache_file) if fd: try: os.close(fd) except Exception: logger.error('Can not close", "value: ret[section][param] = value _set_section_values('restapi', ['listen', 'connect_address', 'certfile', 'keyfile', 'keyfile_password',", "if not (value.strip().startswith('-') or '[' in value): value = '[{0}]'.format(value)", "list %s', value) return None for first, second in (('raft',", "_AUTH_ALLOWED_PARAMETERS) if entry: authentication[user_type] = entry if authentication: ret['postgresql']['authentication'] =", "self._local_configuration) self._data_dir = self.__effective_configuration.get('postgresql', {}).get('data_dir', \"\") self._cache_file = os.path.join(self._data_dir, self.__CACHE_FILENAME)", "10, 'retry_timeout': 10, 'maximum_lag_on_failover': 1048576, 'maximum_lag_on_syncnode': -1, 'check_timeline': False, 'master_start_timeout':", "config.pop('zookeeper') pg_config = config['postgresql'] # no 'authentication' in 'postgresql', but", "value def _get_auth(name, params=None): ret = {} for param in", "a directory, loads all yml files in that directory in", "_load_cache(self): if os.path.isfile(self._cache_file): try: with open(self._cache_file) as f: self.set_dynamic_configuration(json.load(f)) except", "list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX): # PATRONI_(ETCD|CONSUL|ZOOKEEPER|EXHIBITOR|...)_(HOSTS?|PORT|..) name, suffix = (param[8:].split('_', 1)", "and 'exhibitor' in config['zookeeper']: config['exhibitor'] = config['zookeeper'].pop('exhibitor') config.pop('zookeeper') pg_config =", "= True return True except Exception: logger.exception('Exception when setting dynamic_configuration')", "= ret.get(first, {}).pop(second, None) if value: value = _parse_list(value) if", "params=None): ret = {} for param in params or _AUTH_ALLOWED_PARAMETERS[:2]:", "logging import os import shutil import tempfile import yaml from", "if param.startswith(PATRONI_ENV_PREFIX): # PATRONI_(ETCD|CONSUL|ZOOKEEPER|EXHIBITOR|...)_(HOSTS?|PORT|..) name, suffix = (param[8:].split('_', 1) +", "'URL', 'PROXY', 'CACERT', 'CERT', 'KEY', 'VERIFY', 'TOKEN', 'CHECKS', 'DC', 'CONSISTENCY',", "deepcopy(self._dynamic_configuration) def check_mode(self, mode): return bool(parse_bool(self._dynamic_configuration.get(mode))) def _load_config_path(self, path): \"\"\"", "if dcs in ret: ret[dcs].update(_get_auth(dcs)) users = {} for param", "return {name: value for name, value in (parameters or {}).items()", "if error: raise ConfigParseError(error) self.__effective_configuration = self._build_effective_configuration({}, self._local_configuration) self._data_dir =", "if 'superuser' not in pg_config['authentication'] and 'pg_rewind' in pg_config: pg_config['authentication']['superuser']", "from collections import defaultdict from copy import deepcopy from patroni", "yaml.safe_load(f) patch_config(overall_config, config) return overall_config def _load_config_file(self): \"\"\"Loads config.yaml from", "oldname): value = _popenv(oldname) name = PATRONI_ENV_PREFIX + 'LOG_' +", "in list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX): name, suffix = (param[8:].rsplit('_', 1) +", "= self._safe_copy_dynamic_configuration(dynamic_configuration) for name, value in local_configuration.items(): if name ==", "file: %s', self._cache_file) def save_cache(self): if self._cache_needs_saving: tmpfile = fd", "if os.path.isfile(path): files = [path] elif os.path.isdir(path): files = [os.path.join(path,", "= [os.path.join(path, f) for f in sorted(os.listdir(path)) if (f.endswith('.yml') or", "value.strip().startswith('{'): value = '{{{0}}}'.format(value) try: return yaml.safe_load(value) except Exception: logger.exception('Exception", "import defaultdict from copy import deepcopy from patroni import PATRONI_ENV_PREFIX", "no 'name' in config if 'name' not in config and", "file in the old format and converting it into new", "return True except Exception: logger.exception('Exception when setting dynamic_configuration') def reload_local_configuration(self):", "some of the `dict` interfaces to make it possible to", "'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value)) elif name not in ('connect_address', 'listen', 'data_dir', 'pgpass',", "param in list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX): # PATRONI_(ETCD|CONSUL|ZOOKEEPER|EXHIBITOR|...)_(HOSTS?|PORT|..) name, suffix =", "in _AUTH_ALLOWED_PARAMETERS} # no 'name' in config if 'name' not", "Mimicking some of the `dict` interfaces to make it possible", "%s', self._cache_file) if fd: try: os.close(fd) except Exception: logger.error('Can not", "all yml files in that directory in alphabetical order \"\"\"", "if 'zookeeper' in config and 'exhibitor' in config['zookeeper']: config['exhibitor'] =", "'max_queue_size', 'dir', 'file_size', 'file_num', 'loggers']) _set_section_values('raft', ['data_dir', 'self_addr', 'partner_addrs', 'password',", "applies some values which were set via ENV\"\"\" config =", "try: with open(self._cache_file) as f: self.set_dynamic_configuration(json.load(f)) except Exception: logger.exception('Exception when", "try: return yaml.safe_load(value) except Exception: logger.exception('Exception when parsing list %s',", "parse_int, patch_config logger = logging.getLogger(__name__) _AUTH_ALLOWED_PARAMETERS = ( 'username', 'password',", "'data_dir', 'pgpass', 'bin_dir']) _set_section_values('log', ['level', 'traceback_level', 'format', 'dateformat', 'max_queue_size', 'dir',", "to make it possible to work with it as with", "f) tmpfile = shutil.move(tmpfile, self._cache_file) self._cache_needs_saving = False except Exception:", "as with the old `config` object. \"\"\" PATRONI_CONFIG_VARIABLE = PATRONI_ENV_PREFIX", "config.yaml from filesystem and applies some values which were set", "'scope', 'retry_timeout', 'synchronous_mode', 'synchronous_mode_strict', 'synchronous_node_count', 'maximum_lag_on_syncnode' ) pg_config.update({p: config[p] for", "= value for name, oldname in (('level', 'loglevel'), ('format', 'logformat'),", "self._local_configuration = config_env and yaml.safe_load(config_env) or self.__environment_configuration if validator: error", "'postgresql', but 'replication' and 'superuser' if 'authentication' not in pg_config:", "if value: ret[name.lower()][suffix.lower()] = value for dcs in ('etcd', 'etcd3'):", "yaml.safe_load(config_env) or self.__environment_configuration if validator: error = validator(self._local_configuration) if error:", "[''])[:2] # PATRONI_<username>_PASSWORD=<password>, PATRONI_<username>_OPTIONS=<option1,option2,...> # CREATE USER \"<username>\" WITH <OPTIONS>", "os.environ[name] = value for name, oldname in (('level', 'loglevel'), ('format',", "self._cache_file = os.path.join(self._data_dir, self.__CACHE_FILENAME) self._load_cache() self._cache_needs_saving = False @property def", "\"\"\" if os.path.isfile(path): files = [path] elif os.path.isdir(path): files =", "def _popenv(name): return os.environ.pop(PATRONI_ENV_PREFIX + name.upper(), None) for param in", "'NAMESPACE', 'CONTEXT', 'USE_ENDPOINTS', 'SCOPE_LABEL', 'ROLE_LABEL', 'POD_IP', 'PORTS', 'LABELS', 'BYPASS_API_SERVICE', 'KEY_PASSWORD',", "os.path.isdir(path): files = [os.path.join(path, f) for f in sorted(os.listdir(path)) if", "config[name] = int(value) return config @staticmethod def _build_environment_configuration(): ret =", "in ('replication', 'superuser') if u in pg_config} # no 'superuser'", "= _parse_list(value) if value: ret[first][second] = value def _parse_dict(value): if", "_popenv(name + '_' + param) if value: ret[param] = value", "and converting it into new format 4) Mimicking some of", "self._safe_copy_dynamic_configuration(dynamic_configuration) for name, value in local_configuration.items(): if name == 'postgresql':", "reads the configuration from the command-line argument if it exists,", "mode): return bool(parse_bool(self._dynamic_configuration.get(mode))) def _load_config_path(self, path): \"\"\" If path is", "or name in ['watchdog']: config[name] = deepcopy(value) if value else", "and 'superuser' if 'authentication' not in pg_config: pg_config['use_pg_rewind'] = 'pg_rewind'", "patroni.exceptions import ConfigParseError from patroni.dcs import ClusterConfig from patroni.postgresql.config import", "directory. This is necessary to be able to restore `dynamic_configuration`", "'zookeeper' in config and 'exhibitor' in config['zookeeper']: config['exhibitor'] = config['zookeeper'].pop('exhibitor')", "return ret def _build_effective_configuration(self, dynamic_configuration, local_configuration): config = self._safe_copy_dynamic_configuration(dynamic_configuration) for", "!= 'use_slots': # replication slots must be enabled/disabled globally config['postgresql'][name]", "tmpfile) if tmpfile and os.path.exists(tmpfile): try: os.remove(tmpfile) except Exception: logger.error('Can", "restapi server expects to get restapi.auth = 'username:password' if 'restapi'", "'maximum_lag_on_syncnode': -1, 'check_timeline': False, 'master_start_timeout': 300, 'master_stop_timeout': 0, 'synchronous_mode': False,", "Exception: logger.error('Can not close temporary file %s', tmpfile) if tmpfile", "if value: ret[param] = value return ret restapi_auth = _get_auth('restapi')", "in config if 'name' not in config and 'name' in", "with open(self._cache_file) as f: self.set_dynamic_configuration(json.load(f)) except Exception: logger.exception('Exception when loading", "'SRV', 'SRV_SUFFIX', 'URL', 'PROXY', 'CACERT', 'CERT', 'KEY', 'VERIFY', 'TOKEN', 'CHECKS',", "raise ConfigParseError(error) self.__effective_configuration = self._build_effective_configuration({}, self._local_configuration) self._data_dir = self.__effective_configuration.get('postgresql', {}).get('data_dir',", "('HOST', 'HOSTS', 'PORT', 'USE_PROXIES', 'PROTOCOL', 'SRV', 'SRV_SUFFIX', 'URL', 'PROXY', 'CACERT',", "config) return overall_config def _load_config_file(self): \"\"\"Loads config.yaml from filesystem and", "replication slots must be enabled/disabled globally config['postgresql'][name] = deepcopy(value) elif", "loading `dynamic_configuration` into 'patroni.dynamic.json' file located in local_configuration['postgresql']['data_dir'] directory. This", "'log_datefmt')): _fix_log_env(name, oldname) def _set_section_values(section, params): for param in params:", "_set_section_values('raft', ['data_dir', 'self_addr', 'partner_addrs', 'password', 'bind_addr']) for first, second in", "there is nothing to do self._modify_index = configuration.modify_index configuration =", "of configuration file in the old format and converting it", "not (value.strip().startswith('-') or '[' in value): value = '[{0}]'.format(value) try:", "_get_auth(name, params=None): ret = {} for param in params or", "suffix == 'PASSWORD': password = <PASSWORD>(param) if password: users[name] =", "such as SSL connection parameters for name, value in pg_config['authentication'].items():", "= '{{{0}}}'.format(value) try: return yaml.safe_load(value) except Exception: logger.exception('Exception when parsing", "in sorted(os.listdir(path)) if (f.endswith('.yml') or f.endswith('.yaml')) and os.path.isfile(os.path.join(path, f))] else:", "in pg_config pg_config['authentication'] = {u: pg_config[u] for u in ('replication',", "'connect_address', 'config_dir', 'data_dir', 'pgpass', 'bin_dir']) _set_section_values('log', ['level', 'traceback_level', 'format', 'dateformat',", "+ name.upper(), None) for param in ('name', 'namespace', 'scope'): value", "def _fix_log_env(name, oldname): value = _popenv(oldname) name = PATRONI_ENV_PREFIX +", "False # If the index didn't changed there is nothing", "local_configuration.items(): if name == 'postgresql': for name, value in (value", "for u in ('replication', 'superuser') if u in pg_config} #", "'USE_SSL', 'SET_ACLS') and name: value = os.environ.pop(param) if suffix ==", "user_type in ('replication', 'superuser', 'rewind'): entry = _get_auth(user_type, _AUTH_ALLOWED_PARAMETERS) if", "configuration from `config.yml` or environment 2) Saving and loading `dynamic_configuration`", "file', path) raise ConfigParseError('invalid config path') overall_config = {} for", "'replication' and 'superuser' if 'authentication' not in pg_config: pg_config['use_pg_rewind'] =", "name in ['watchdog']: config[name] = deepcopy(value) if value else {}", "'ttl': 30, 'loop_wait': 10, 'retry_timeout': 10, 'maximum_lag_on_failover': 1048576, 'maximum_lag_on_syncnode': -1,", "_popenv(name): return os.environ.pop(PATRONI_ENV_PREFIX + name.upper(), None) for param in ('name',", "for old config # 'exhibitor' inside 'zookeeper': if 'zookeeper' in", "otherwise from the environment self._config_file = configfile and os.path.exists(configfile) and", "-1, 'check_timeline': False, 'master_start_timeout': 300, 'master_stop_timeout': 0, 'synchronous_mode': False, 'synchronous_mode_strict':", "else: config[name] = int(value) return config @staticmethod def _build_environment_configuration(): ret", "in dynamic_configuration.items(): if name == 'postgresql': for name, value in", "it exists, otherwise from the environment self._config_file = configfile and", "import logging import os import shutil import tempfile import yaml", "and parse_int(value) elif suffix in ('HOSTS', 'PORTS', 'CHECKS'): value =", "name.upper() if value and name not in os.environ: os.environ[name] =", "`config` object. \"\"\" PATRONI_CONFIG_VARIABLE = PATRONI_ENV_PREFIX + 'CONFIGURATION' __CACHE_FILENAME =", "'allowlist_include_members']) _set_section_values('ctl', ['insecure', 'cacert', 'certfile', 'keyfile', 'keyfile_password']) _set_section_values('postgresql', ['listen', 'connect_address',", "if tmpfile and os.path.exists(tmpfile): try: os.remove(tmpfile) except Exception: logger.error('Can not", "for n, v in value.items() if n in _AUTH_ALLOWED_PARAMETERS} #", "= value def _parse_dict(value): if not value.strip().startswith('{'): value = '{{{0}}}'.format(value)", "def dynamic_configuration(self): return deepcopy(self._dynamic_configuration) def check_mode(self, mode): return bool(parse_bool(self._dynamic_configuration.get(mode))) def", "(('restapi', ('http_extra_headers', 'https_extra_headers')), ('log', ('loggers',))): for second in params: value", "handle setting additional connection parameters that may be available #", "`dynamic_configuration` into 'patroni.dynamic.json' file located in local_configuration['postgresql']['data_dir'] directory. This is", "DCS if name in ('synchronous_mode', 'synchronous_mode_strict'): config[name] = value else:", "in self.__DEFAULT_CONFIG['standby_cluster']: config['standby_cluster'][name] = deepcopy(value) elif name in config: #", "Exception: logger.exception('Exception when parsing list %s', value) return None for", "some sane default values * `dynamic_configuration` -- configuration stored in", "1048576, 'maximum_lag_on_syncnode': -1, 'check_timeline': False, 'master_start_timeout': 300, 'master_stop_timeout': 0, 'synchronous_mode':", "in pg_config['authentication'] and 'pg_rewind' in pg_config: pg_config['authentication']['superuser'] = pg_config['pg_rewind'] #", "key in self.__effective_configuration def __getitem__(self, key): return self.__effective_configuration[key] def copy(self):", "= '{username}:{password}'.format(**config['restapi']['authentication']) # special treatment for old config # 'exhibitor'", "self._cache_needs_saving = False except Exception: logger.exception('Exception when saving file: %s',", "logger.error('Can not remove temporary file %s', tmpfile) # configuration could", "return config def _load_cache(self): if os.path.isfile(self._cache_file): try: with open(self._cache_file) as", "'synchronous_mode_strict': False, 'synchronous_node_count': 1, 'standby_cluster': { 'create_replica_methods': '', 'host': '',", "= self._build_effective_configuration({}, self._local_configuration) self._data_dir = self.__effective_configuration.get('postgresql', {}).get('data_dir', \"\") self._cache_file =", "= new_configuration return True else: logger.info('No local configuration items changed.')", "import ClusterConfig from patroni.postgresql.config import CaseInsensitiveDict, ConfigHandler from patroni.utils import", "'CHECKS'): value = value and _parse_list(value) elif suffix in ('LABELS',", "'dateformat', 'max_queue_size', 'dir', 'file_size', 'file_num', 'loggers']) _set_section_values('raft', ['data_dir', 'self_addr', 'partner_addrs',", "but 'replication' and 'superuser' if 'authentication' not in pg_config: pg_config['use_pg_rewind']", "configuration = self._load_config_file() if not deep_compare(self._local_configuration, configuration): new_configuration = self._build_effective_configuration(self._dynamic_configuration,", "'<password>' if name and suffix == 'PASSWORD': password = <PASSWORD>(param)", "deepcopy(value) elif name not in config or name in ['watchdog']:", "and applies some values which were set via ENV\"\"\" config", "class is responsible for: 1) Building and giving access to", "\"\"\" If path is a file, loads the yml file", "try: configuration = self._load_config_file() if not deep_compare(self._local_configuration, configuration): new_configuration =", "'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value, True)) elif name != 'use_slots': # replication slots", "'pgpass', 'authentication'): config['postgresql'][name] = deepcopy(value) elif name == 'standby_cluster': for", "set_dynamic_configuration(self, configuration): if isinstance(configuration, ClusterConfig): if self._modify_index == configuration.modify_index: return", "= configuration self.__effective_configuration = new_configuration return True else: logger.info('No local", "wiped 3) Loading of configuration file in the old format", "some values which were set via ENV\"\"\" config = self._load_config_path(self._config_file)", "+ [''])[:2] if suffix in ('HOST', 'HOSTS', 'PORT', 'USE_PROXIES', 'PROTOCOL',", "ret[name.lower()][suffix.lower()] = value for dcs in ('etcd', 'etcd3'): if dcs", "param in params: value = _popenv(section + '_' + param)", "not in config or name in ['watchdog']: config[name] = deepcopy(value)", "PATRONI_<username>_OPTIONS=<option1,option2,...> # CREATE USER \"<username>\" WITH <OPTIONS> PASSWORD '<password>' if", "users = {} for param in list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX): name,", "parsing dict %s', value) return None for first, params in", "= value and parse_int(value) elif suffix in ('HOSTS', 'PORTS', 'CHECKS'):", "configuration.modify_index: return False # If the index didn't changed there", "if not value.strip().startswith('{'): value = '{{{0}}}'.format(value) try: return yaml.safe_load(value) except", "'partner_addrs'), ('restapi', 'allowlist')): value = ret.get(first, {}).pop(second, None) if value:", "(value or {}).items(): if name in self.__DEFAULT_CONFIG['standby_cluster']: config['standby_cluster'][name] = deepcopy(value)", "= ret.get('log', {}).pop(second, None) if value: value = parse_int(value) if", "and os.path.isfile(os.path.join(path, f))] else: logger.error('config path %s is neither directory", "return yaml.safe_load(value) except Exception: logger.exception('Exception when parsing dict %s', value)", "n in _AUTH_ALLOWED_PARAMETERS} # no 'name' in config if 'name'", "= config_env and yaml.safe_load(config_env) or self.__environment_configuration if validator: error =", "for f in sorted(os.listdir(path)) if (f.endswith('.yml') or f.endswith('.yaml')) and os.path.isfile(os.path.join(path,", "value for dcs in ('etcd', 'etcd3'): if dcs in ret:", "= self._build_effective_configuration(self._dynamic_configuration, configuration) self._local_configuration = configuration self.__effective_configuration = new_configuration return", "name not in os.environ: os.environ[name] = value for name, oldname", "v for n, v in value.items() if n in _AUTH_ALLOWED_PARAMETERS}", "configfile and os.path.exists(configfile) and configfile if self._config_file: self._local_configuration = self._load_config_file()", "[path] elif os.path.isdir(path): files = [os.path.join(path, f) for f in", "if name in self.__DEFAULT_CONFIG['standby_cluster']: config['standby_cluster'][name] = deepcopy(value) elif name in", "<OPTIONS> PASSWORD '<password>' if name and suffix == 'PASSWORD': password", "= int(value) return config @staticmethod def _build_environment_configuration(): ret = defaultdict(dict)", "pg_config pg_config['authentication'] = {u: pg_config[u] for u in ('replication', 'superuser')", "= PATRONI_ENV_PREFIX + 'LOG_' + name.upper() if value and name", "3) Loading of configuration file in the old format and", "config_env = os.environ.pop(self.PATRONI_CONFIG_VARIABLE, None) self._local_configuration = config_env and yaml.safe_load(config_env) or", "restapi_auth = _get_auth('restapi') if restapi_auth: ret['restapi']['authentication'] = restapi_auth authentication =", "pg_config['authentication']['superuser'] = pg_config['pg_rewind'] # handle setting additional connection parameters that", "= config['zookeeper'].pop('exhibitor') config.pop('zookeeper') pg_config = config['postgresql'] # no 'authentication' in", "responsible for: 1) Building and giving access to `effective_configuration` from:", "config # 'exhibitor' inside 'zookeeper': if 'zookeeper' in config and", "in pg_config['authentication'].items(): pg_config['authentication'][name] = {n: v for n, v in", "in config['restapi']: config['restapi']['auth'] = '{username}:{password}'.format(**config['restapi']['authentication']) # special treatment for old", "the command-line argument if it exists, otherwise from the environment", "None try: (fd, tmpfile) = tempfile.mkstemp(prefix=self.__CACHE_FILENAME, dir=self._data_dir) with os.fdopen(fd, 'w')", "return yaml.safe_load(value) except Exception: logger.exception('Exception when parsing list %s', value)", "_fix_log_env(name, oldname): value = _popenv(oldname) name = PATRONI_ENV_PREFIX + 'LOG_'", "from copy import deepcopy from patroni import PATRONI_ENV_PREFIX from patroni.exceptions", "= value for second in ('max_queue_size', 'file_size', 'file_num'): value =", "suffix in ('USE_PROXIES', 'REGISTER_SERVICE', 'USE_ENDPOINTS', 'BYPASS_API_SERVICE', 'VERIFY'): value = parse_bool(value)", "= _parse_dict(value) elif suffix in ('USE_PROXIES', 'REGISTER_SERVICE', 'USE_ENDPOINTS', 'BYPASS_API_SERVICE', 'VERIFY'):", "== 'PASSWORD': password = <PASSWORD>(param) if password: users[name] = {'password':", "'POD_IP', 'PORTS', 'LABELS', 'BYPASS_API_SERVICE', 'KEY_PASSWORD', 'USE_SSL', 'SET_ACLS') and name: value", "CaseInsensitiveDict, ConfigHandler from patroni.utils import deep_compare, parse_bool, parse_int, patch_config logger", "pg_config['authentication'] and 'pg_rewind' in pg_config: pg_config['authentication']['superuser'] = pg_config['pg_rewind'] # handle", "= _parse_dict(value) if value: ret[first][second] = value def _get_auth(name, params=None):", "Config(object): \"\"\" This class is responsible for: 1) Building and", "self._build_environment_configuration() # Patroni reads the configuration from the command-line argument", "Exception: logger.exception('Exception when reloading local configuration from %s', self.config_file) @staticmethod", "_popenv(oldname) name = PATRONI_ENV_PREFIX + 'LOG_' + name.upper() if value", "defaultdict from copy import deepcopy from patroni import PATRONI_ENV_PREFIX from", "if name in ('synchronous_mode', 'synchronous_mode_strict'): config[name] = value else: config[name]", "self._build_effective_configuration({}, self._local_configuration) self._data_dir = self.__effective_configuration.get('postgresql', {}).get('data_dir', \"\") self._cache_file = os.path.join(self._data_dir,", "= {} for param in params or _AUTH_ALLOWED_PARAMETERS[:2]: value =", "ConfigHandler.CMDLINE_OPTIONS[name][1](value)} def _safe_copy_dynamic_configuration(self, dynamic_configuration): config = deepcopy(self.__DEFAULT_CONFIG) for name, value", "'USE_ENDPOINTS', 'SCOPE_LABEL', 'ROLE_LABEL', 'POD_IP', 'PORTS', 'LABELS', 'BYPASS_API_SERVICE', 'KEY_PASSWORD', 'USE_SSL', 'SET_ACLS')", "entry = _get_auth(user_type, _AUTH_ALLOWED_PARAMETERS) if entry: authentication[user_type] = entry if", "ret[param] = value def _fix_log_env(name, oldname): value = _popenv(oldname) name", "and loading `dynamic_configuration` into 'patroni.dynamic.json' file located in local_configuration['postgresql']['data_dir'] directory.", "'channel_binding' ) def default_validator(conf): if not conf: return \"Config is", "{ 'bin_dir': '', 'use_slots': True, 'parameters': CaseInsensitiveDict({p: v[0] for p,", "or self.__environment_configuration if validator: error = validator(self._local_configuration) if error: raise", "if options: users[name]['options'] = options if users: ret['bootstrap']['users'] = users", "loading file: %s', self._cache_file) def save_cache(self): if self._cache_needs_saving: tmpfile =", "'postgresql': { 'bin_dir': '', 'use_slots': True, 'parameters': CaseInsensitiveDict({p: v[0] for", "# special treatment for old config # 'exhibitor' inside 'zookeeper':", "giving access to `effective_configuration` from: * `Config.__DEFAULT_CONFIG` -- some sane", "is necessary to be able to restore `dynamic_configuration` if DCS", "'SCOPE_LABEL', 'ROLE_LABEL', 'POD_IP', 'PORTS', 'LABELS', 'BYPASS_API_SERVICE', 'KEY_PASSWORD', 'USE_SSL', 'SET_ACLS') and", "not deep_compare(self._dynamic_configuration, configuration): try: self.__effective_configuration = self._build_effective_configuration(configuration, self._local_configuration) self._dynamic_configuration =", "def _build_environment_configuration(): ret = defaultdict(dict) def _popenv(name): return os.environ.pop(PATRONI_ENV_PREFIX +", "in (parameters or {}).items() if name not in ConfigHandler.CMDLINE_OPTIONS or", "if value: ret[first][second] = value def _get_auth(name, params=None): ret =", "import tempfile import yaml from collections import defaultdict from copy", "check_mode(self, mode): return bool(parse_bool(self._dynamic_configuration.get(mode))) def _load_config_path(self, path): \"\"\" If path", "%s', self.config_file) @staticmethod def _process_postgresql_parameters(parameters, is_local=False): return {name: value for", "in pg_config} # no 'superuser' in 'postgresql'.'authentication' if 'superuser' not", "== configuration.modify_index: return False # If the index didn't changed", "second in params: value = ret.get(first, {}).pop(second, None) if value:", "return bool(parse_bool(self._dynamic_configuration.get(mode))) def _load_config_path(self, path): \"\"\" If path is a", "is a file, loads the yml file pointed to by", "could be either ClusterConfig or dict def set_dynamic_configuration(self, configuration): if", "options = os.environ.pop(param[:-9] + '_OPTIONS', None) options = options and", "the configuration from the command-line argument if it exists, otherwise", "1, 'standby_cluster': { 'create_replica_methods': '', 'host': '', 'port': '', 'primary_slot_name':", "{name: value for name, value in (parameters or {}).items() if", "'https_extra_headers', 'allowlist', 'allowlist_include_members']) _set_section_values('ctl', ['insecure', 'cacert', 'certfile', 'keyfile', 'keyfile_password']) _set_section_values('postgresql',", "if name == 'postgresql': for name, value in (value or", "self._cache_needs_saving: tmpfile = fd = None try: (fd, tmpfile) =", "'w') as f: fd = None json.dump(self.dynamic_configuration, f) tmpfile =", "+ param) if value: ret[param] = value return ret restapi_auth", "= self._build_environment_configuration() # Patroni reads the configuration from the command-line", "save_cache(self): if self._cache_needs_saving: tmpfile = fd = None try: (fd,", "'keyfile_password', 'cafile', 'ciphers', 'verify_client', 'http_extra_headers', 'https_extra_headers', 'allowlist', 'allowlist_include_members']) _set_section_values('ctl', ['insecure',", "local_configuration['postgresql']['data_dir'] directory. This is necessary to be able to restore", "in pg_config: pg_config['authentication']['superuser'] = pg_config['pg_rewind'] # handle setting additional connection", "self.__environment_configuration = self._build_environment_configuration() # Patroni reads the configuration from the", "+ '_OPTIONS', None) options = options and _parse_list(options) if options:", "loads the yml file pointed to by path. If path", "for param in list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX): name, suffix = (param[8:].rsplit('_',", "no 'authentication' in 'postgresql', but 'replication' and 'superuser' if 'authentication'", "%s', value) return None for first, params in (('restapi', ('http_extra_headers',", "`effective_configuration` from: * `Config.__DEFAULT_CONFIG` -- some sane default values *", "'synchronous_node_count', 'maximum_lag_on_syncnode' ) pg_config.update({p: config[p] for p in updated_fields if", "import yaml from collections import defaultdict from copy import deepcopy", "and 'name' in pg_config: config['name'] = pg_config['name'] updated_fields = (", "'standby_cluster': for name, value in (value or {}).items(): if name", "dynamic_configuration(self): return deepcopy(self._dynamic_configuration) def check_mode(self, mode): return bool(parse_bool(self._dynamic_configuration.get(mode))) def _load_config_path(self,", "DCS * `local_configuration` -- configuration from `config.yml` or environment 2)", "1) Building and giving access to `effective_configuration` from: * `Config.__DEFAULT_CONFIG`", "interfaces to make it possible to work with it as", "to restore `dynamic_configuration` if DCS was accidentally wiped 3) Loading", "name in self.__DEFAULT_CONFIG['standby_cluster']: config['standby_cluster'][name] = deepcopy(value) elif name in config:", "os.remove(tmpfile) except Exception: logger.error('Can not remove temporary file %s', tmpfile)", "self._local_configuration = self._load_config_file() else: config_env = os.environ.pop(self.PATRONI_CONFIG_VARIABLE, None) self._local_configuration =", "and os.path.exists(tmpfile): try: os.remove(tmpfile) except Exception: logger.error('Can not remove temporary", "f.endswith('.yaml')) and os.path.isfile(os.path.join(path, f))] else: logger.error('config path %s is neither", "Exception: logger.exception('Exception when parsing dict %s', value) return None for", "path): \"\"\" If path is a file, loads the yml", "= 'username:password' if 'restapi' in config and 'authentication' in config['restapi']:", "new_configuration = self._build_effective_configuration(self._dynamic_configuration, configuration) self._local_configuration = configuration self.__effective_configuration = new_configuration", "'master_stop_timeout': 0, 'synchronous_mode': False, 'synchronous_mode_strict': False, 'synchronous_node_count': 1, 'standby_cluster': {", "(value or {}).items(): if name == 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value, True)) elif", "options if users: ret['bootstrap']['users'] = users return ret def _build_effective_configuration(self,", "= configuration.data if not deep_compare(self._dynamic_configuration, configuration): try: self.__effective_configuration = self._build_effective_configuration(configuration,", "which were set via ENV\"\"\" config = self._load_config_path(self._config_file) patch_config(config, self.__environment_configuration)", "pg_config['authentication'].items(): pg_config['authentication'][name] = {n: v for n, v in value.items()", "if u in pg_config} # no 'superuser' in 'postgresql'.'authentication' if", "if self._cache_needs_saving: tmpfile = fd = None try: (fd, tmpfile)", "return True else: logger.info('No local configuration items changed.') except Exception:", "variables present in __DEFAULT_CONFIG allowed to be overridden from DCS", "if value: value = parse_bool(value) if value is not None:", "'allowlist')): value = ret.get(first, {}).pop(second, None) if value: value =", "value: ret[name.lower()][suffix.lower()] = value for dcs in ('etcd', 'etcd3'): if", "param.startswith(PATRONI_ENV_PREFIX): # PATRONI_(ETCD|CONSUL|ZOOKEEPER|EXHIBITOR|...)_(HOSTS?|PORT|..) name, suffix = (param[8:].split('_', 1) + [''])[:2]", "def _process_postgresql_parameters(parameters, is_local=False): return {name: value for name, value in", "os.environ.pop(param[:-9] + '_OPTIONS', None) options = options and _parse_list(options) if", "def __init__(self, configfile, validator=default_validator): self._modify_index = -1 self._dynamic_configuration = {}", "('restapi', 'allowlist')): value = ret.get(first, {}).pop(second, None) if value: value", "v[0] for p, v in ConfigHandler.CMDLINE_OPTIONS.items() if p not in", "if not deep_compare(self._dynamic_configuration, configuration): try: self.__effective_configuration = self._build_effective_configuration(configuration, self._local_configuration) self._dynamic_configuration", "deep_compare, parse_bool, parse_int, patch_config logger = logging.getLogger(__name__) _AUTH_ALLOWED_PARAMETERS = (", "if entry: authentication[user_type] = entry if authentication: ret['postgresql']['authentication'] = authentication", "+ [''])[:2] # PATRONI_<username>_PASSWORD=<password>, PATRONI_<username>_OPTIONS=<option1,option2,...> # CREATE USER \"<username>\" WITH", "= _popenv(section + '_' + param) if value: ret[section][param] =", "os.path.exists(tmpfile): try: os.remove(tmpfile) except Exception: logger.error('Can not remove temporary file", "= _popenv(param) if value: ret[param] = value def _fix_log_env(name, oldname):", "= value def _fix_log_env(name, oldname): value = _popenv(oldname) name =", "= value _set_section_values('restapi', ['listen', 'connect_address', 'certfile', 'keyfile', 'keyfile_password', 'cafile', 'ciphers',", "'PORTS', 'CHECKS'): value = value and _parse_list(value) elif suffix in", "= ret.get(first, {}).pop(second, None) if value: value = parse_bool(value) if", "'parameters': CaseInsensitiveDict({p: v[0] for p, v in ConfigHandler.CMDLINE_OPTIONS.items() if p", "# configuration could be either ClusterConfig or dict def set_dynamic_configuration(self,", "for dcs in ('etcd', 'etcd3'): if dcs in ret: ret[dcs].update(_get_auth(dcs))", "%s is neither directory nor file', path) raise ConfigParseError('invalid config", "for name, value in (parameters or {}).items() if name not", "self.config_file: try: configuration = self._load_config_file() if not deep_compare(self._local_configuration, configuration): new_configuration", "config['exhibitor'] = config['zookeeper'].pop('exhibitor') config.pop('zookeeper') pg_config = config['postgresql'] # no 'authentication'", "4) Mimicking some of the `dict` interfaces to make it", "treatment for old config # 'exhibitor' inside 'zookeeper': if 'zookeeper'", "'ROLE_LABEL', 'POD_IP', 'PORTS', 'LABELS', 'BYPASS_API_SERVICE', 'KEY_PASSWORD', 'USE_SSL', 'SET_ACLS') and name:", "in (value or {}).items(): if name in self.__DEFAULT_CONFIG['standby_cluster']: config['standby_cluster'][name] =", "it possible to work with it as with the old", "ConfigHandler.CMDLINE_OPTIONS.items() if p not in ('wal_keep_segments', 'wal_keep_size')}) }, 'watchdog': {", "if self._modify_index == configuration.modify_index: return False # If the index", "USER \"<username>\" WITH <OPTIONS> PASSWORD '<password>' if name and suffix", "in (('raft', 'partner_addrs'), ('restapi', 'allowlist')): value = ret.get(first, {}).pop(second, None)", "None) if value: value = parse_int(value) if value is not", "except Exception: logger.error('Can not remove temporary file %s', tmpfile) #", "for param in ('name', 'namespace', 'scope'): value = _popenv(param) if", "'listen', 'data_dir', 'pgpass', 'authentication'): config['postgresql'][name] = deepcopy(value) elif name ==", "f: fd = None json.dump(self.dynamic_configuration, f) tmpfile = shutil.move(tmpfile, self._cache_file)", "_AUTH_ALLOWED_PARAMETERS} # no 'name' in config if 'name' not in", "in ('HOSTS', 'PORTS', 'CHECKS'): value = value and _parse_list(value) elif", "and suffix == 'PASSWORD': password = <PASSWORD>(param) if password: users[name]", "param) if value: ret[section][param] = value _set_section_values('restapi', ['listen', 'connect_address', 'certfile',", "ret.get('log', {}).pop(second, None) if value: value = parse_int(value) if value", "deep_compare(self._dynamic_configuration, configuration): try: self.__effective_configuration = self._build_effective_configuration(configuration, self._local_configuration) self._dynamic_configuration = configuration", "'port': '', 'primary_slot_name': '', 'restore_command': '', 'archive_cleanup_command': '', 'recovery_min_apply_delay': ''", "file: %s', self._cache_file) if fd: try: os.close(fd) except Exception: logger.error('Can", "not in pg_config: pg_config['use_pg_rewind'] = 'pg_rewind' in pg_config pg_config['authentication'] =", "oldname) def _set_section_values(section, params): for param in params: value =", "value: value = _parse_dict(value) if value: ret[first][second] = value def", "config['postgresql'][name].update(self._process_postgresql_parameters(value)) elif name not in ('connect_address', 'listen', 'data_dir', 'pgpass', 'authentication'):", "globally config['postgresql'][name] = deepcopy(value) elif name not in config or", "parse_bool(value) if value is not None: ret[first][second] = value for", "{} self.__environment_configuration = self._build_environment_configuration() # Patroni reads the configuration from", "Exception: logger.exception('Exception when setting dynamic_configuration') def reload_local_configuration(self): if self.config_file: try:", "return key in self.__effective_configuration def __getitem__(self, key): return self.__effective_configuration[key] def", "== 'PORT': value = value and parse_int(value) elif suffix in", "value = ret.get(first, {}).pop(second, None) if value: value = _parse_dict(value)", "in ('wal_keep_segments', 'wal_keep_size')}) }, 'watchdog': { 'mode': 'automatic', } }", "= (param[8:].split('_', 1) + [''])[:2] if suffix in ('HOST', 'HOSTS',", "'name' not in config and 'name' in pg_config: config['name'] =", "second in ('max_queue_size', 'file_size', 'file_num'): value = ret.get('log', {}).pop(second, None)", "setting additional connection parameters that may be available # in", "in value): value = '[{0}]'.format(value) try: return yaml.safe_load(value) except Exception:", "parse_bool, parse_int, patch_config logger = logging.getLogger(__name__) _AUTH_ALLOWED_PARAMETERS = ( 'username',", "default) def __contains__(self, key): return key in self.__effective_configuration def __getitem__(self,", "= ret.get(first, {}).pop(second, None) if value: value = _parse_dict(value) if", "name in config: # only variables present in __DEFAULT_CONFIG allowed", "@property def dynamic_configuration(self): return deepcopy(self._dynamic_configuration) def check_mode(self, mode): return bool(parse_bool(self._dynamic_configuration.get(mode)))", "= value def _get_auth(name, params=None): ret = {} for param", "name, suffix = (param[8:].split('_', 1) + [''])[:2] if suffix in", "in (('restapi', ('http_extra_headers', 'https_extra_headers')), ('log', ('loggers',))): for second in params:", "not conf: return \"Config is empty.\" class Config(object): \"\"\" This", "'archive_cleanup_command': '', 'recovery_min_apply_delay': '' }, 'postgresql': { 'bin_dir': '', 'use_slots':", "If path is a file, loads the yml file pointed", "pg_config: config['name'] = pg_config['name'] updated_fields = ( 'name', 'scope', 'retry_timeout',", "from filesystem and applies some values which were set via", "files = [os.path.join(path, f) for f in sorted(os.listdir(path)) if (f.endswith('.yml')", "when parsing list %s', value) return None for first, second", "_set_section_values('log', ['level', 'traceback_level', 'format', 'dateformat', 'max_queue_size', 'dir', 'file_size', 'file_num', 'loggers'])", "config def _load_cache(self): if os.path.isfile(self._cache_file): try: with open(self._cache_file) as f:", "def _safe_copy_dynamic_configuration(self, dynamic_configuration): config = deepcopy(self.__DEFAULT_CONFIG) for name, value in", "= value and _parse_list(value) elif suffix in ('LABELS', 'SET_ACLS'): value", "in params or _AUTH_ALLOWED_PARAMETERS[:2]: value = _popenv(name + '_' +", "and _parse_list(value) elif suffix in ('LABELS', 'SET_ACLS'): value = _parse_dict(value)", "# no 'authentication' in 'postgresql', but 'replication' and 'superuser' if", "default_validator(conf): if not conf: return \"Config is empty.\" class Config(object):", "loads all yml files in that directory in alphabetical order", "None) self._local_configuration = config_env and yaml.safe_load(config_env) or self.__environment_configuration if validator:", "validator(self._local_configuration) if error: raise ConfigParseError(error) self.__effective_configuration = self._build_effective_configuration({}, self._local_configuration) self._data_dir", "f))] else: logger.error('config path %s is neither directory nor file',", "{} for param in params or _AUTH_ALLOWED_PARAMETERS[:2]: value = _popenv(name", "= (param[8:].rsplit('_', 1) + [''])[:2] # PATRONI_<username>_PASSWORD=<password>, PATRONI_<username>_OPTIONS=<option1,option2,...> # CREATE", "'restapi' in config and 'authentication' in config['restapi']: config['restapi']['auth'] = '{username}:{password}'.format(**config['restapi']['authentication'])", "self.__effective_configuration def __getitem__(self, key): return self.__effective_configuration[key] def copy(self): return deepcopy(self.__effective_configuration)", "\"\"\" PATRONI_CONFIG_VARIABLE = PATRONI_ENV_PREFIX + 'CONFIGURATION' __CACHE_FILENAME = 'patroni.dynamic.json' __DEFAULT_CONFIG", "if isinstance(configuration, ClusterConfig): if self._modify_index == configuration.modify_index: return False #", "* `local_configuration` -- configuration from `config.yml` or environment 2) Saving", "def _parse_list(value): if not (value.strip().startswith('-') or '[' in value): value", "tmpfile = shutil.move(tmpfile, self._cache_file) self._cache_needs_saving = False except Exception: logger.exception('Exception", "in (('level', 'loglevel'), ('format', 'logformat'), ('dateformat', 'log_datefmt')): _fix_log_env(name, oldname) def", "didn't changed there is nothing to do self._modify_index = configuration.modify_index", "by path. If path is a directory, loads all yml", "import PATRONI_ENV_PREFIX from patroni.exceptions import ConfigParseError from patroni.dcs import ClusterConfig", "from DCS if name in ('synchronous_mode', 'synchronous_mode_strict'): config[name] = value", "in params: value = _popenv(section + '_' + param) if", "name and suffix == 'PASSWORD': password = <PASSWORD>(param) if password:", "in config['zookeeper']: config['exhibitor'] = config['zookeeper'].pop('exhibitor') config.pop('zookeeper') pg_config = config['postgresql'] #", "restapi_auth authentication = {} for user_type in ('replication', 'superuser', 'rewind'):", "reloading local configuration from %s', self.config_file) @staticmethod def _process_postgresql_parameters(parameters, is_local=False):", "do self._modify_index = configuration.modify_index configuration = configuration.data if not deep_compare(self._dynamic_configuration,", "try: return yaml.safe_load(value) except Exception: logger.exception('Exception when parsing dict %s',", "in config and 'authentication' in config['restapi']: config['restapi']['auth'] = '{username}:{password}'.format(**config['restapi']['authentication']) #", "configuration stored in DCS * `local_configuration` -- configuration from `config.yml`", "10, 'maximum_lag_on_failover': 1048576, 'maximum_lag_on_syncnode': -1, 'check_timeline': False, 'master_start_timeout': 300, 'master_stop_timeout':", "'exhibitor' in config['zookeeper']: config['exhibitor'] = config['zookeeper'].pop('exhibitor') config.pop('zookeeper') pg_config = config['postgresql']", "params: value = ret.get(first, {}).pop(second, None) if value: value =", "'namespace', 'scope'): value = _popenv(param) if value: ret[param] = value", "\"Config is empty.\" class Config(object): \"\"\" This class is responsible", "with open(fname) as f: config = yaml.safe_load(f) patch_config(overall_config, config) return", "('USE_PROXIES', 'REGISTER_SERVICE', 'USE_ENDPOINTS', 'BYPASS_API_SERVICE', 'VERIFY'): value = parse_bool(value) if value:", "'username', 'password', 'sslmode', 'sslcert', 'sslkey', 'sslpassword', 'sslrootcert', 'sslcrl', 'sslcrldir', 'gssencmode',", "except Exception: logger.exception('Exception when setting dynamic_configuration') def reload_local_configuration(self): if self.config_file:", "value and _parse_list(value) elif suffix in ('LABELS', 'SET_ACLS'): value =", "exists, otherwise from the environment self._config_file = configfile and os.path.exists(configfile)", "'username:password' if 'restapi' in config and 'authentication' in config['restapi']: config['restapi']['auth']", "in params: value = ret.get(first, {}).pop(second, None) if value: value", "or '[' in value): value = '[{0}]'.format(value) try: return yaml.safe_load(value)", "{n: v for n, v in value.items() if n in", "users[name] = {'password': password} options = os.environ.pop(param[:-9] + '_OPTIONS', None)", "p not in ('wal_keep_segments', 'wal_keep_size')}) }, 'watchdog': { 'mode': 'automatic',", "None) if value: value = _parse_list(value) if value: ret[first][second] =", "ret = {} for param in params or _AUTH_ALLOWED_PARAMETERS[:2]: value", "'maximum_lag_on_failover': 1048576, 'maximum_lag_on_syncnode': -1, 'check_timeline': False, 'master_start_timeout': 300, 'master_stop_timeout': 0,", "name, value in (parameters or {}).items() if name not in", "value in (parameters or {}).items() if name not in ConfigHandler.CMDLINE_OPTIONS", "f) for f in sorted(os.listdir(path)) if (f.endswith('.yml') or f.endswith('.yaml')) and", "== 'postgresql': for name, value in (value or {}).items(): if", "elif name not in config or name in ['watchdog']: config[name]", "'superuser') if u in pg_config} # no 'superuser' in 'postgresql'.'authentication'", "def config_file(self): return self._config_file @property def dynamic_configuration(self): return deepcopy(self._dynamic_configuration) def", "\"\"\" This class is responsible for: 1) Building and giving", "-- configuration from `config.yml` or environment 2) Saving and loading", "present in __DEFAULT_CONFIG allowed to be overridden from DCS if", "sane default values * `dynamic_configuration` -- configuration stored in DCS", "'{username}:{password}'.format(**config['restapi']['authentication']) # special treatment for old config # 'exhibitor' inside", "config if 'name' not in config and 'name' in pg_config:", "either ClusterConfig or dict def set_dynamic_configuration(self, configuration): if isinstance(configuration, ClusterConfig):", "entry if authentication: ret['postgresql']['authentication'] = authentication for param in list(os.environ.keys()):", "'keyfile', 'keyfile_password', 'cafile', 'ciphers', 'verify_client', 'http_extra_headers', 'https_extra_headers', 'allowlist', 'allowlist_include_members']) _set_section_values('ctl',", "parameters for name, value in pg_config['authentication'].items(): pg_config['authentication'][name] = {n: v", "'restore_command': '', 'archive_cleanup_command': '', 'recovery_min_apply_delay': '' }, 'postgresql': { 'bin_dir':", "'sslrootcert', 'sslcrl', 'sslcrldir', 'gssencmode', 'channel_binding' ) def default_validator(conf): if not", "be overridden from DCS if name in ('synchronous_mode', 'synchronous_mode_strict'): config[name]", "password = <PASSWORD>(param) if password: users[name] = {'password': password} options", "_parse_dict(value) elif suffix in ('USE_PROXIES', 'REGISTER_SERVICE', 'USE_ENDPOINTS', 'BYPASS_API_SERVICE', 'VERIFY'): value", "params: value = _popenv(section + '_' + param) if value:", "conf: return \"Config is empty.\" class Config(object): \"\"\" This class", "suffix = (param[8:].rsplit('_', 1) + [''])[:2] # PATRONI_<username>_PASSWORD=<password>, PATRONI_<username>_OPTIONS=<option1,option2,...> #", "pg_config.update({p: config[p] for p in updated_fields if p in config})", "value = parse_int(value) if value is not None: ret['log'][second] =", "tempfile.mkstemp(prefix=self.__CACHE_FILENAME, dir=self._data_dir) with os.fdopen(fd, 'w') as f: fd = None", "PATRONI_ENV_PREFIX from patroni.exceptions import ConfigParseError from patroni.dcs import ClusterConfig from", "os.path.exists(configfile) and configfile if self._config_file: self._local_configuration = self._load_config_file() else: config_env", "configuration from the command-line argument if it exists, otherwise from", "'CONTEXT', 'USE_ENDPOINTS', 'SCOPE_LABEL', 'ROLE_LABEL', 'POD_IP', 'PORTS', 'LABELS', 'BYPASS_API_SERVICE', 'KEY_PASSWORD', 'USE_SSL',", "# restapi server expects to get restapi.auth = 'username:password' if", "configfile, validator=default_validator): self._modify_index = -1 self._dynamic_configuration = {} self.__environment_configuration =", "-1 self._dynamic_configuration = {} self.__environment_configuration = self._build_environment_configuration() # Patroni reads", "a file, loads the yml file pointed to by path.", "def _load_cache(self): if os.path.isfile(self._cache_file): try: with open(self._cache_file) as f: self.set_dynamic_configuration(json.load(f))", "n, v in value.items() if n in _AUTH_ALLOWED_PARAMETERS} # no", "value.items() if n in _AUTH_ALLOWED_PARAMETERS} # no 'name' in config", "__CACHE_FILENAME = 'patroni.dynamic.json' __DEFAULT_CONFIG = { 'ttl': 30, 'loop_wait': 10,", "True else: logger.info('No local configuration items changed.') except Exception: logger.exception('Exception", "'cacert', 'certfile', 'keyfile', 'keyfile_password']) _set_section_values('postgresql', ['listen', 'connect_address', 'config_dir', 'data_dir', 'pgpass',", "'password', 'sslmode', 'sslcert', 'sslkey', 'sslpassword', 'sslrootcert', 'sslcrl', 'sslcrldir', 'gssencmode', 'channel_binding'", "argument if it exists, otherwise from the environment self._config_file =", "value for name, value in (parameters or {}).items() if name", "located in local_configuration['postgresql']['data_dir'] directory. This is necessary to be able", "into new format 4) Mimicking some of the `dict` interfaces", "options = options and _parse_list(options) if options: users[name]['options'] = options", "self._cache_file) self._cache_needs_saving = False except Exception: logger.exception('Exception when saving file:", "stored in DCS * `local_configuration` -- configuration from `config.yml` or", "from patroni import PATRONI_ENV_PREFIX from patroni.exceptions import ConfigParseError from patroni.dcs", "name, oldname in (('level', 'loglevel'), ('format', 'logformat'), ('dateformat', 'log_datefmt')): _fix_log_env(name,", "else {} # restapi server expects to get restapi.auth =", "+ '_' + param) if value: ret[section][param] = value _set_section_values('restapi',", "'PROXY', 'CACERT', 'CERT', 'KEY', 'VERIFY', 'TOKEN', 'CHECKS', 'DC', 'CONSISTENCY', 'REGISTER_SERVICE',", "and 'authentication' in config['restapi']: config['restapi']['auth'] = '{username}:{password}'.format(**config['restapi']['authentication']) # special treatment", "json import logging import os import shutil import tempfile import", "in pg_config: config['name'] = pg_config['name'] updated_fields = ( 'name', 'scope',", "= parse_int(value) if value is not None: ret['log'][second] = value", "self._dynamic_configuration = configuration self._cache_needs_saving = True return True except Exception:", "for name, value in local_configuration.items(): if name == 'postgresql': for", "os.environ.pop(param) if suffix == 'PORT': value = value and parse_int(value)", "'pg_rewind' in pg_config: pg_config['authentication']['superuser'] = pg_config['pg_rewind'] # handle setting additional", "copy import deepcopy from patroni import PATRONI_ENV_PREFIX from patroni.exceptions import", "class Config(object): \"\"\" This class is responsible for: 1) Building", "and os.path.exists(configfile) and configfile if self._config_file: self._local_configuration = self._load_config_file() else:", "= [path] elif os.path.isdir(path): files = [os.path.join(path, f) for f", "['insecure', 'cacert', 'certfile', 'keyfile', 'keyfile_password']) _set_section_values('postgresql', ['listen', 'connect_address', 'config_dir', 'data_dir',", "elif suffix in ('USE_PROXIES', 'REGISTER_SERVICE', 'USE_ENDPOINTS', 'BYPASS_API_SERVICE', 'VERIFY'): value =", "= self._load_config_path(self._config_file) patch_config(config, self.__environment_configuration) return config def _load_cache(self): if os.path.isfile(self._cache_file):", "'scope'): value = _popenv(param) if value: ret[param] = value def", ") def default_validator(conf): if not conf: return \"Config is empty.\"", "ret.get(first, {}).pop(second, None) if value: value = parse_bool(value) if value", "'recovery_min_apply_delay': '' }, 'postgresql': { 'bin_dir': '', 'use_slots': True, 'parameters':", "'TOKEN', 'CHECKS', 'DC', 'CONSISTENCY', 'REGISTER_SERVICE', 'SERVICE_CHECK_INTERVAL', 'NAMESPACE', 'CONTEXT', 'USE_ENDPOINTS', 'SCOPE_LABEL',", "'LABELS', 'BYPASS_API_SERVICE', 'KEY_PASSWORD', 'USE_SSL', 'SET_ACLS') and name: value = os.environ.pop(param)", "in (('restapi', 'allowlist_include_members'), ('ctl', 'insecure')): value = ret.get(first, {}).pop(second, None)", "if value: value = _parse_list(value) if value: ret[first][second] = value", "is a directory, loads all yml files in that directory", "'authentication' not in pg_config: pg_config['use_pg_rewind'] = 'pg_rewind' in pg_config pg_config['authentication']", "('max_queue_size', 'file_size', 'file_num'): value = ret.get('log', {}).pop(second, None) if value:", "if not deep_compare(self._local_configuration, configuration): new_configuration = self._build_effective_configuration(self._dynamic_configuration, configuration) self._local_configuration =", "name in ('synchronous_mode', 'synchronous_mode_strict'): config[name] = value else: config[name] =", "p in config}) return config def get(self, key, default=None): return", "files in that directory in alphabetical order \"\"\" if os.path.isfile(path):", "os.path.isfile(os.path.join(path, f))] else: logger.error('config path %s is neither directory nor", "self._modify_index == configuration.modify_index: return False # If the index didn't", "config[p] for p in updated_fields if p in config}) return", "'check_timeline': False, 'master_start_timeout': 300, 'master_stop_timeout': 0, 'synchronous_mode': False, 'synchronous_mode_strict': False,", "ret.get(first, {}).pop(second, None) if value: value = _parse_list(value) if value:", "\"\"\"Loads config.yaml from filesystem and applies some values which were", "object. \"\"\" PATRONI_CONFIG_VARIABLE = PATRONI_ENV_PREFIX + 'CONFIGURATION' __CACHE_FILENAME = 'patroni.dynamic.json'", "'bind_addr']) for first, second in (('restapi', 'allowlist_include_members'), ('ctl', 'insecure')): value", "self._load_config_file() if not deep_compare(self._local_configuration, configuration): new_configuration = self._build_effective_configuration(self._dynamic_configuration, configuration) self._local_configuration", "to be overridden from DCS if name in ('synchronous_mode', 'synchronous_mode_strict'):", "('format', 'logformat'), ('dateformat', 'log_datefmt')): _fix_log_env(name, oldname) def _set_section_values(section, params): for", "in __DEFAULT_CONFIG allowed to be overridden from DCS if name", "('synchronous_mode', 'synchronous_mode_strict'): config[name] = value else: config[name] = int(value) return", "with os.fdopen(fd, 'w') as f: fd = None json.dump(self.dynamic_configuration, f)", "if value: ret[param] = value def _fix_log_env(name, oldname): value =", "if 'authentication' not in pg_config: pg_config['use_pg_rewind'] = 'pg_rewind' in pg_config", "= defaultdict(dict) def _popenv(name): return os.environ.pop(PATRONI_ENV_PREFIX + name.upper(), None) for", "configuration): if isinstance(configuration, ClusterConfig): if self._modify_index == configuration.modify_index: return False", "= {} for param in list(os.environ.keys()): if param.startswith(PATRONI_ENV_PREFIX): name, suffix", "True)) elif name != 'use_slots': # replication slots must be", "_parse_list(value): if not (value.strip().startswith('-') or '[' in value): value =", "{}).items(): if name == 'parameters': config['postgresql'][name].update(self._process_postgresql_parameters(value)) elif name not in", "that directory in alphabetical order \"\"\" if os.path.isfile(path): files =", "be able to restore `dynamic_configuration` if DCS was accidentally wiped", "able to restore `dynamic_configuration` if DCS was accidentally wiped 3)", "ConfigHandler from patroni.utils import deep_compare, parse_bool, parse_int, patch_config logger =", "# in the configuration file, such as SSL connection parameters", "in config: # only variables present in __DEFAULT_CONFIG allowed to", "'postgresql'.'authentication' if 'superuser' not in pg_config['authentication'] and 'pg_rewind' in pg_config:", "parse_int(value) if value is not None: ret['log'][second] = value def", "in ('LABELS', 'SET_ACLS'): value = _parse_dict(value) elif suffix in ('USE_PROXIES',", "'PASSWORD': password = <PASSWORD>(param) if password: users[name] = {'password': password}", "the `dict` interfaces to make it possible to work with", "'dir', 'file_size', 'file_num', 'loggers']) _set_section_values('raft', ['data_dir', 'self_addr', 'partner_addrs', 'password', 'bind_addr'])", "configuration): new_configuration = self._build_effective_configuration(self._dynamic_configuration, configuration) self._local_configuration = configuration self.__effective_configuration =", "config['postgresql'] # no 'authentication' in 'postgresql', but 'replication' and 'superuser'", "= value def _parse_list(value): if not (value.strip().startswith('-') or '[' in", "def _load_config_path(self, path): \"\"\" If path is a file, loads", "in ('replication', 'superuser', 'rewind'): entry = _get_auth(user_type, _AUTH_ALLOWED_PARAMETERS) if entry:", "{} # restapi server expects to get restapi.auth = 'username:password'", "self._config_file: self._local_configuration = self._load_config_file() else: config_env = os.environ.pop(self.PATRONI_CONFIG_VARIABLE, None) self._local_configuration", "{}).items(): if name in self.__DEFAULT_CONFIG['standby_cluster']: config['standby_cluster'][name] = deepcopy(value) elif name", "fd = None try: (fd, tmpfile) = tempfile.mkstemp(prefix=self.__CACHE_FILENAME, dir=self._data_dir) with", "file, such as SSL connection parameters for name, value in", "if validator: error = validator(self._local_configuration) if error: raise ConfigParseError(error) self.__effective_configuration", "directory, loads all yml files in that directory in alphabetical", "'VERIFY', 'TOKEN', 'CHECKS', 'DC', 'CONSISTENCY', 'REGISTER_SERVICE', 'SERVICE_CHECK_INTERVAL', 'NAMESPACE', 'CONTEXT', 'USE_ENDPOINTS',", "os.path.join(self._data_dir, self.__CACHE_FILENAME) self._load_cache() self._cache_needs_saving = False @property def config_file(self): return", "first, params in (('restapi', ('http_extra_headers', 'https_extra_headers')), ('log', ('loggers',))): for second", "changed.') except Exception: logger.exception('Exception when reloading local configuration from %s',", "= <PASSWORD>(param) if password: users[name] = {'password': password} options =", "local_configuration): config = self._safe_copy_dynamic_configuration(dynamic_configuration) for name, value in local_configuration.items(): if", "elif name in config: # only variables present in __DEFAULT_CONFIG", "when reloading local configuration from %s', self.config_file) @staticmethod def _process_postgresql_parameters(parameters,", "tmpfile = fd = None try: (fd, tmpfile) = tempfile.mkstemp(prefix=self.__CACHE_FILENAME,", "'CONSISTENCY', 'REGISTER_SERVICE', 'SERVICE_CHECK_INTERVAL', 'NAMESPACE', 'CONTEXT', 'USE_ENDPOINTS', 'SCOPE_LABEL', 'ROLE_LABEL', 'POD_IP', 'PORTS',", "(param[8:].split('_', 1) + [''])[:2] if suffix in ('HOST', 'HOSTS', 'PORT',", "'USE_ENDPOINTS', 'BYPASS_API_SERVICE', 'VERIFY'): value = parse_bool(value) if value: ret[name.lower()][suffix.lower()] =", "ret['restapi']['authentication'] = restapi_auth authentication = {} for user_type in ('replication',", "self.__effective_configuration = self._build_effective_configuration({}, self._local_configuration) self._data_dir = self.__effective_configuration.get('postgresql', {}).get('data_dir', \"\") self._cache_file", "300, 'master_stop_timeout': 0, 'synchronous_mode': False, 'synchronous_mode_strict': False, 'synchronous_node_count': 1, 'standby_cluster':", "PASSWORD '<password>' if name and suffix == 'PASSWORD': password =", "in self.__effective_configuration def __getitem__(self, key): return self.__effective_configuration[key] def copy(self): return", "int(value) return config @staticmethod def _build_environment_configuration(): ret = defaultdict(dict) def", "dynamic_configuration') def reload_local_configuration(self): if self.config_file: try: configuration = self._load_config_file() if", "yaml.safe_load(value) except Exception: logger.exception('Exception when parsing list %s', value) return", "_get_auth(user_type, _AUTH_ALLOWED_PARAMETERS) if entry: authentication[user_type] = entry if authentication: ret['postgresql']['authentication']", "configuration self._cache_needs_saving = True return True except Exception: logger.exception('Exception when", "None for first, params in (('restapi', ('http_extra_headers', 'https_extra_headers')), ('log', ('loggers',))):", "deepcopy(self.__DEFAULT_CONFIG) for name, value in dynamic_configuration.items(): if name == 'postgresql':", "elif name == 'standby_cluster': for name, value in (value or", "'USE_PROXIES', 'PROTOCOL', 'SRV', 'SRV_SUFFIX', 'URL', 'PROXY', 'CACERT', 'CERT', 'KEY', 'VERIFY',", "self._config_file @property def dynamic_configuration(self): return deepcopy(self._dynamic_configuration) def check_mode(self, mode): return", "return False # If the index didn't changed there is", "from patroni.utils import deep_compare, parse_bool, parse_int, patch_config logger = logging.getLogger(__name__)", "if param.startswith(PATRONI_ENV_PREFIX): name, suffix = (param[8:].rsplit('_', 1) + [''])[:2] #", "if it exists, otherwise from the environment self._config_file = configfile", "if value and name not in os.environ: os.environ[name] = value", "try: (fd, tmpfile) = tempfile.mkstemp(prefix=self.__CACHE_FILENAME, dir=self._data_dir) with os.fdopen(fd, 'w') as", "(('level', 'loglevel'), ('format', 'logformat'), ('dateformat', 'log_datefmt')): _fix_log_env(name, oldname) def _set_section_values(section,", "logger.exception('Exception when parsing dict %s', value) return None for first,", "pg_config['name'] updated_fields = ( 'name', 'scope', 'retry_timeout', 'synchronous_mode', 'synchronous_mode_strict', 'synchronous_node_count',", "* `Config.__DEFAULT_CONFIG` -- some sane default values * `dynamic_configuration` --", "False, 'synchronous_node_count': 1, 'standby_cluster': { 'create_replica_methods': '', 'host': '', 'port':", "and name: value = os.environ.pop(param) if suffix == 'PORT': value", "'sslmode', 'sslcert', 'sslkey', 'sslpassword', 'sslrootcert', 'sslcrl', 'sslcrldir', 'gssencmode', 'channel_binding' )", "logger.exception('Exception when setting dynamic_configuration') def reload_local_configuration(self): if self.config_file: try: configuration", "__DEFAULT_CONFIG allowed to be overridden from DCS if name in", "dcs in ('etcd', 'etcd3'): if dcs in ret: ret[dcs].update(_get_auth(dcs)) users", "or {}).items(): if name in self.__DEFAULT_CONFIG['standby_cluster']: config['standby_cluster'][name] = deepcopy(value) elif", "(('restapi', 'allowlist_include_members'), ('ctl', 'insecure')): value = ret.get(first, {}).pop(second, None) if", "if users: ret['bootstrap']['users'] = users return ret def _build_effective_configuration(self, dynamic_configuration,", "= pg_config['pg_rewind'] # handle setting additional connection parameters that may", "config['restapi']['auth'] = '{username}:{password}'.format(**config['restapi']['authentication']) # special treatment for old config #", "allowed to be overridden from DCS if name in ('synchronous_mode',", "name not in ('connect_address', 'listen', 'data_dir', 'pgpass', 'authentication'): config['postgresql'][name] =", "'rewind'): entry = _get_auth(user_type, _AUTH_ALLOWED_PARAMETERS) if entry: authentication[user_type] = entry", "old `config` object. \"\"\" PATRONI_CONFIG_VARIABLE = PATRONI_ENV_PREFIX + 'CONFIGURATION' __CACHE_FILENAME", "ConfigParseError('invalid config path') overall_config = {} for fname in files:", "or f.endswith('.yaml')) and os.path.isfile(os.path.join(path, f))] else: logger.error('config path %s is", "updated_fields = ( 'name', 'scope', 'retry_timeout', 'synchronous_mode', 'synchronous_mode_strict', 'synchronous_node_count', 'maximum_lag_on_syncnode'", "('HOSTS', 'PORTS', 'CHECKS'): value = value and _parse_list(value) elif suffix", "name != 'use_slots': # replication slots must be enabled/disabled globally", "# PATRONI_<username>_PASSWORD=<password>, PATRONI_<username>_OPTIONS=<option1,option2,...> # CREATE USER \"<username>\" WITH <OPTIONS> PASSWORD", "elif os.path.isdir(path): files = [os.path.join(path, f) for f in sorted(os.listdir(path))", "'synchronous_mode_strict', 'synchronous_node_count', 'maximum_lag_on_syncnode' ) pg_config.update({p: config[p] for p in updated_fields", "deepcopy(value) if value else {} # restapi server expects to", "= parse_bool(value) if value: ret[name.lower()][suffix.lower()] = value for dcs in", "return config @staticmethod def _build_environment_configuration(): ret = defaultdict(dict) def _popenv(name):", "= {} self.__environment_configuration = self._build_environment_configuration() # Patroni reads the configuration", "If the index didn't changed there is nothing to do", "connection parameters that may be available # in the configuration", "to by path. If path is a directory, loads all", "= validator(self._local_configuration) if error: raise ConfigParseError(error) self.__effective_configuration = self._build_effective_configuration({}, self._local_configuration)", "to work with it as with the old `config` object.", "_AUTH_ALLOWED_PARAMETERS = ( 'username', 'password', 'sslmode', 'sslcert', 'sslkey', 'sslpassword', 'sslrootcert',", "from: * `Config.__DEFAULT_CONFIG` -- some sane default values * `dynamic_configuration`", "logging.getLogger(__name__) _AUTH_ALLOWED_PARAMETERS = ( 'username', 'password', 'sslmode', 'sslcert', 'sslkey', 'sslpassword',", "'', 'host': '', 'port': '', 'primary_slot_name': '', 'restore_command': '', 'archive_cleanup_command':", "and 'pg_rewind' in pg_config: pg_config['authentication']['superuser'] = pg_config['pg_rewind'] # handle setting", "the configuration file, such as SSL connection parameters for name,", "os.fdopen(fd, 'w') as f: fd = None json.dump(self.dynamic_configuration, f) tmpfile", "ClusterConfig): if self._modify_index == configuration.modify_index: return False # If the", "= deepcopy(value) elif name == 'standby_cluster': for name, value in", "= ( 'name', 'scope', 'retry_timeout', 'synchronous_mode', 'synchronous_mode_strict', 'synchronous_node_count', 'maximum_lag_on_syncnode' )", "'authentication' in 'postgresql', but 'replication' and 'superuser' if 'authentication' not", "'superuser' in 'postgresql'.'authentication' if 'superuser' not in pg_config['authentication'] and 'pg_rewind'", "not in pg_config['authentication'] and 'pg_rewind' in pg_config: pg_config['authentication']['superuser'] = pg_config['pg_rewind']", "= {'password': password} options = os.environ.pop(param[:-9] + '_OPTIONS', None) options", "pg_config['authentication'][name] = {n: v for n, v in value.items() if", "files: with open(fname) as f: config = yaml.safe_load(f) patch_config(overall_config, config)", "'', 'archive_cleanup_command': '', 'recovery_min_apply_delay': '' }, 'postgresql': { 'bin_dir': '',", "'HOSTS', 'PORT', 'USE_PROXIES', 'PROTOCOL', 'SRV', 'SRV_SUFFIX', 'URL', 'PROXY', 'CACERT', 'CERT',", "'sslcrldir', 'gssencmode', 'channel_binding' ) def default_validator(conf): if not conf: return", "pg_config: pg_config['use_pg_rewind'] = 'pg_rewind' in pg_config pg_config['authentication'] = {u: pg_config[u]", "configuration file, such as SSL connection parameters for name, value", "= _get_auth(user_type, _AUTH_ALLOWED_PARAMETERS) if entry: authentication[user_type] = entry if authentication:", "'name' in config if 'name' not in config and 'name'", "try: self.__effective_configuration = self._build_effective_configuration(configuration, self._local_configuration) self._dynamic_configuration = configuration self._cache_needs_saving =", "name, suffix = (param[8:].rsplit('_', 1) + [''])[:2] # PATRONI_<username>_PASSWORD=<password>, PATRONI_<username>_OPTIONS=<option1,option2,...>", "value = _popenv(oldname) name = PATRONI_ENV_PREFIX + 'LOG_' + name.upper()", "'pgpass', 'bin_dir']) _set_section_values('log', ['level', 'traceback_level', 'format', 'dateformat', 'max_queue_size', 'dir', 'file_size',", "file located in local_configuration['postgresql']['data_dir'] directory. This is necessary to be", "logger.info('No local configuration items changed.') except Exception: logger.exception('Exception when reloading", "* `dynamic_configuration` -- configuration stored in DCS * `local_configuration` --" ]
[ "what is stored in their dirpath, FSDV's will do their", "= 'My Foovalue' fs.foo = test_foo self.assertEqual(fs.foo, test_foo) self.assertEqual(fs.__dict__['_real'].foo, test_foo)", "directory view is reading properties self.assertEqual(self.ob.fake_skin.testPT.title, 'Zope Pope') def test_ignored(self):", "test_DeleteFolder(self): # Make sure a deleted folder goes away self.assertTrue(hasattr(self.ob.fake_skin,", "a different class under the fake meta_type # \"FOLDER\" and", "provisions of the Zope Public License, # Version 2.1 (ZPL).", "that a folder inside the fake skin really is of", "\"\"\" def setUp(self): from Products.CMFCore.DirectoryView import addDirectoryViews from Products.CMFCore.DirectoryView import", "FSDVTest from .base.testcase import WritableFSDVTest class DirectoryViewPathTests(unittest.TestCase): \"\"\" These test", "def setUp(self): from Products.CMFCore.DirectoryView import _dirreg WritableFSDVTest.setUp(self) self.saved_cfg_debug_mode = getConfiguration().debug_mode", "DummyDirectoryViewSurrogate() registerMetaType('FOLDER', DummyDirectoryView) # In order to regenerate the FSDV", "Check that a warning was raised. self.assertEqual(len(w), 1) self.assertTrue(issubclass(w[-1].category, UserWarning))", "'My Foovalue' fs.foo = test_foo self.assertEqual(fs.foo, test_foo) self.assertEqual(fs.__dict__['_real'].foo, test_foo) del", "Copyright (c) 2002 Zope Foundation and Contributors. # # This", "because there was # no equivalent __delattr__ on the surrogate", "text = ('DirectoryView fake_skin refers to a non-existing path %r'", "a folder inside the fake skin really is of type", "fs.foo self.assertRaises(AttributeError, getattr, fs, 'foo') self.assertRaises(AttributeError, getattr, fs.__dict__['_real'], 'foo') class", "should accompany this distribution. # THIS SOFTWARE IS PROVIDED \"AS", "test_DirectoryViewMetadata(self): # Test to determine if metadata shows up correctly", "'test2.3'\", self.use_dir_mtime) # check self.assertEqual(self.ob.fake_skin.test2(), 'test2.3') def test_NewFolder(self): # See", "'test2')) def test_DeleteAddEditMethod(self): # Check that if we delete a", "weirdity. remove(join(self.skin_path_name, 'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin, 'test2')) # add method back to", "del fs.foo self.assertRaises(AttributeError, getattr, fs, 'foo') self.assertRaises(AttributeError, getattr, fs.__dict__['_real'], 'foo')", "test_EditMethod(self): # See if an edited method exhibits its new", "AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING,", "'test2.2'\", self.use_dir_mtime) # check self.assertEqual(self.ob.fake_skin.test2(), 'test2.2') # edit method self._writeFile('test2.py',", "\"return 'test2.3'\", self.use_dir_mtime) # check self.assertEqual(self.ob.fake_skin.test2(), 'test2.3') def test_NewFolder(self): #", "copy of the ZPL should accompany this distribution. # THIS", "back to the fake skin folder self._writeFile('test2.py', \"return 'test2.2'\", self.use_dir_mtime)", "def test_ignored(self): # Test that \"artifact\" files and dirs are", "remove(join(self.skin_path_name, 'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin, 'test2')) # add method back to the", "dirs are ignored, # even when a custom ignore list", "Products.CMFCore.DirectoryView import _findProductForPath cmfpath = sys.modules['Products.CMFCore'].__path__[0] self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore', '')) cmfpath", "('DirectoryView fake_skin refers to a non-existing path %r' % file)", "self._writeFile('test1.py', \"return 'new test1'\") # add a new folder mkdir(join(self.skin_path_name,", "attributes on the non-persistent surrogate writes them # into the", "= True # initialise skins self._registerDirectory(self) # add a method", "appropriate skin and only do nothing in the case where", "registerMetaType('FOLDER', DummyDirectoryView) # In order to regenerate the FSDV data", "test1 method self._writeFile('test1.py', \"return 'new test1'\") # add a new", "by addDirectoryViews # appears as a DirectoryViewSurrogate due # to", "ignore list is also honored auto_ign = ('#test1', '.test1', 'test1~')", "# setting attributes on the non-persistent surrogate writes them #", "Products.CMFCore.DirectoryView import DirectoryViewSurrogate testfolder = self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder, DirectoryViewSurrogate)) def test_DirectoryViewFolderCustom(self):", "bad in situations # where you only want to store", "appropriate skin can't be found. \"\"\" def setUp(self): from Products.CMFCore.DirectoryView", "# right now... metatype_registry = DirectoryView._dirreg._meta_types if 'FOLDER' in metatype_registry:", "if given a really wacky path def test_UnhandleableExpandPath(self): file =", "= self.saved_cfg_debug_mode WritableFSDVTest.tearDown(self) def test_AddNewMethod(self): # See if a method", "and Contributors. # # This software is subject to the", "IS\" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES", "\"artifact\" files and dirs are ignored for name in '#test1',", "fs.foo = test_foo self.assertEqual(fs.foo, test_foo) self.assertEqual(fs.__dict__['_real'].foo, test_foo) del fs.foo self.assertRaises(AttributeError,", "self.assertEqual(self.ob.fake_skin.test2(), 'test2.2') # edit method self._writeFile('test2.py', \"return 'test2.3'\", self.use_dir_mtime) #", "# check self.assertEqual(self.ob.fake_skin.test2(), 'test2.2') # edit method self._writeFile('test2.py', \"return 'test2.3'\",", "test_suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(DirectoryViewPathTests)) suite.addTest(unittest.makeSuite(DirectoryViewTests)) suite.addTest(unittest.makeSuite(DirectoryViewIgnoreTests)) suite.addTest(unittest.makeSuite(DirectoryViewFolderTests)) suite.addTest(unittest.makeSuite(DebugModeTests)) return", "AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE", "test_ignored(self): # Test that \"artifact\" files and dirs are ignored", "self.assertEqual(fs.foo, test_foo) self.assertEqual(fs.__dict__['_real'].foo, test_foo) del fs.foo self.assertRaises(AttributeError, getattr, fs, 'foo')", "works self.assertEqual(self.ob.fake_skin.test1(), 'test1') def test_properties(self): # Make sure the directory", "test_DirectoryViewFolderCustom(self): # Now we register a different class under the", "self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder, DummyDirectoryViewSurrogate)) class DebugModeTests(WritableFSDVTest): def setUp(self): from Products.CMFCore.DirectoryView import", "subject to the provisions of the Zope Public License, #", "2002 Zope Foundation and Contributors. # # This software is", "PARTICULAR PURPOSE. # ############################################################################## \"\"\" Unit tests for DirectoryView module.", "DummyFolder from .base.testcase import FSDVTest from .base.testcase import WritableFSDVTest class", "a DirectoryViewSurrogate due # to Acquisition hackery. from Products.CMFCore.DirectoryView import", "self.assertTrue(isinstance(self.ob.fake_skin, DirectoryViewSurrogate)) def test_DirectoryViewMethod(self): # Check if DirectoryView method works", "method, then add it back, # then edit it, the", "INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ##############################################################################", "sure a deleted method goes away remove(join(self.skin_path_name, 'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin, 'test2'))", "# FOR A PARTICULAR PURPOSE. # ############################################################################## \"\"\" Unit tests", "('CVS', 'SVN', 'test_manual_ignore.py') self._registerDirectory(self, ignore=self.manual_ign) def test_ignored(self): # Test that", "Foovalue' fs.foo = test_foo self.assertEqual(fs.foo, test_foo) self.assertEqual(fs.__dict__['_real'].foo, test_foo) del fs.foo", "right now... metatype_registry = DirectoryView._dirreg._meta_types if 'FOLDER' in metatype_registry: del", "in metatype_registry: del metatype_registry['FOLDER'] FSDVTest.tearDown(self) def test_DirectoryViewMetadata(self): # Test to", "\"return 'new test1'\") # add a new folder mkdir(join(self.skin_path_name, 'test3'))", "raised. self.assertEqual(len(w), 1) self.assertTrue(issubclass(w[-1].category, UserWarning)) text = ('DirectoryView fake_skin refers", "# This software is subject to the provisions of the", ".metadata file testfolder = self.ob.fake_skin.test_directory self.assertEqual(testfolder.title, 'test_directory Title') def test_DirectoryViewMetadataOnPropertyManager(self):", "accompany this distribution. # THIS SOFTWARE IS PROVIDED \"AS IS\"", "is reading properties self.assertEqual(self.ob.fake_skin.testPT.title, 'Zope Pope') def test_ignored(self): # Test", "on the non-persistent surrogate writes them # into the persistent", "persistent DirectoryView as well. fs = self.ob.fake_skin test_foo = 'My", "skin really is of type # DirectoryViewSurrogate from Products.CMFCore.DirectoryView import", "edit the test1 method self._writeFile('test1.py', \"return 'new test1'\") # add", "from App.config import getConfiguration from . import _globals from .base.dummy", "method self._writeFile('test1.py', \"return 'new test1'\") # add a new folder", "ignored, # even when a custom ignore list is used;", "mktemp() with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') self.ob.fake_skin.manage_properties(file) self.assertEqual(self.ob.fake_skin.objectIds(), []) #", "we need to remove and # register again, that way", "'FOLDER' in metatype_registry: del metatype_registry['FOLDER'] FSDVTest.tearDown(self) def test_DirectoryViewMetadata(self): # Test", "App.config import getConfiguration from . import _globals from .base.dummy import", "= self.manual_ign + auto_ign + ('test_manual_ignore',) visible = self.ob.fake_skin.objectIds() for", "Products.CMFCore.DirectoryView import addDirectoryViews from Products.CMFCore.DirectoryView import registerDirectory registerDirectory('fake_skins', _globals) self.ob", "DirectoryView._dirreg._meta_types if 'FOLDER' in metatype_registry: del metatype_registry['FOLDER'] FSDVTest.tearDown(self) def test_DirectoryViewMetadata(self):", "from .base.dummy import DummyFolder from .base.testcase import FSDVTest from .base.testcase", "this distribution. # THIS SOFTWARE IS PROVIDED \"AS IS\" AND", "possible to cause ZODB writes because # setting attributes on", "view is reading properties self.assertEqual(self.ob.fake_skin.testPT.title, 'Zope Pope') def test_ignored(self): #", "no way to unregister anything # right now... metatype_registry =", "DirectoryView # This is nasty, but there is no way", "register again, that way the newly registered meta_type is used", "skin folder can be found self.assertEqual(self.ob.fake_skin.test2(), 'test2') def test_EditMethod(self): #", "the DirectoryView notices. # This exercises yet another Win32 mtime", "def test_DirectoryViewFolderDefault(self): # Test that a folder inside the fake", "to delete first. self.assertTrue(hasattr(self.ob.fake_skin.test_directory, 'README.txt')) self._deleteFile(join('test_directory', 'README.txt'), self.use_dir_mtime) self._deleteDirectory('test_directory', self.use_dir_mtime)", "self.assertFalse(hasattr(self.ob.fake_skin, 'test_directory')) def test_suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(DirectoryViewPathTests)) suite.addTest(unittest.makeSuite(DirectoryViewTests)) suite.addTest(unittest.makeSuite(DirectoryViewIgnoreTests))", "LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST", "dirs.keys()) self.assertEqual(self.ob.fake_skin.getDirPath(), 'Products.CMFCore.tests:fake_skins/fake_skin') class DirectoryViewTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self._registerDirectory(self) def", "__delattr__ on the surrogate that would clean up # the", "tempfile import mktemp from App.config import getConfiguration from . import", "the newly registered meta_type is used self.ob._delObject('fake_skin') self._registerDirectory(self) testfolder =", "self.manual_ign = ('CVS', 'SVN', 'test_manual_ignore.py') self._registerDirectory(self, ignore=self.manual_ign) def test_ignored(self): #", "and remove them before the # transaction has ended -", "FSDVTest.tearDown(self) def test_DirectoryViewMetadata(self): # Test to determine if metadata shows", "that has a corresponding .metadata file testfolder = self.ob.fake_skin.test_directory self.assertEqual(testfolder.title,", "= self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder, DummyDirectoryViewSurrogate)) class DebugModeTests(WritableFSDVTest): def setUp(self): from Products.CMFCore.DirectoryView", "registerDirectory registerDirectory('fake_skins', _globals) self.ob = DummyFolder() addDirectoryViews(self.ob, 'fake_skins', _globals) def", "# to Acquisition hackery. from Products.CMFCore.DirectoryView import DirectoryViewSurrogate self.assertTrue(isinstance(self.ob.fake_skin, DirectoryViewSurrogate))", "See if an edited method exhibits its new behaviour self.assertEqual(self.ob.fake_skin.test1(),", "\"\"\" Unit tests for DirectoryView module. \"\"\" import sys import", "def test_DirectoryViewMethod(self): # Check if DirectoryView method works self.assertEqual(self.ob.fake_skin.test1(), 'test1')", "import DirectoryViewSurrogate testfolder = self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder, DirectoryViewSurrogate)) def test_DirectoryViewFolderCustom(self): #", "it, the DirectoryView notices. # This exercises yet another Win32", "to a non-existing path %r' % file) self.assertTrue(text in str(w[-1].message))", "from Products.CMFCore.DirectoryView import _generateKey key = _generateKey('Products.CMFCore', 'tests') self.assertEqual(key.split(':')[0], 'Products.CMFCore')", "from Products.CMFCore.DirectoryView import addDirectoryViews from Products.CMFCore.DirectoryView import registerDirectory registerDirectory('fake_skins', _globals)", "store markers and remove them before the # transaction has", "folder self._writeFile('test2.py', \"return 'test2.2'\", self.use_dir_mtime) # check self.assertEqual(self.ob.fake_skin.test2(), 'test2.2') #", "be found. \"\"\" def setUp(self): from Products.CMFCore.DirectoryView import addDirectoryViews from", "_generateKey('Products.CMFCore', 'tests\\foo') self.assertTrue(subkey.startswith(key)) def test__findProductForPath(self): from Products.CMFCore.DirectoryView import _findProductForPath cmfpath", "be found self.assertEqual(self.ob.fake_skin.test2(), 'test2') def test_EditMethod(self): # See if an", "ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED,", "FSDV's will do their best to find an appropriate skin", "again... from Products.CMFCore.DirectoryView import DirectoryView from Products.CMFCore.DirectoryView import registerMetaType class", "setUp(self): from Products.CMFCore.DirectoryView import addDirectoryViews from Products.CMFCore.DirectoryView import registerDirectory registerDirectory('fake_skins',", "def test_getDirectoryInfo(self): skin = self.ob.fake_skin skin.manage_properties('Products.CMFCore.tests:fake_skins/fake_skin') self.assertTrue(hasattr(self.ob.fake_skin, 'test1'), self.ob.fake_skin.getDirPath()) #", "DirectoryView as well. fs = self.ob.fake_skin test_foo = 'My Foovalue'", "= self.ob.fake_skin.test_directory self.assertEqual(testfolder.getProperty('title'), 'test_directory Title') def test_DirectoryViewFolderDefault(self): # Test that", "auto_ign + ('test_manual_ignore',) visible = self.ob.fake_skin.objectIds() for name in must_ignore:", "delete first. self.assertTrue(hasattr(self.ob.fake_skin.test_directory, 'README.txt')) self._deleteFile(join('test_directory', 'README.txt'), self.use_dir_mtime) self._deleteDirectory('test_directory', self.use_dir_mtime) self.assertFalse(hasattr(self.ob.fake_skin,", "'test2')) # add method back to the fake skin folder", "order to regenerate the FSDV data we need to remove", "setUp(self): FSDVTest.setUp(self) self._registerDirectory(self) def test_addDirectoryViews(self): # Test addDirectoryViews # also", "Products.CMFCore.DirectoryView import DirectoryViewSurrogate self.assertTrue(isinstance(self.ob.fake_skin, DirectoryViewSurrogate)) def test_DirectoryViewMethod(self): # Check if", "Public License, # Version 2.1 (ZPL). A copy of the", "'test1'), self.ob.fake_skin.getDirPath()) # Test we do nothing if given a", "self.assertTrue(hasattr(self.ob.fake_skin.test_directory, 'README.txt')) self._deleteFile(join('test_directory', 'README.txt'), self.use_dir_mtime) self._deleteDirectory('test_directory', self.use_dir_mtime) self.assertFalse(hasattr(self.ob.fake_skin, 'test_directory')) def", "non-existing path %r' % file) self.assertTrue(text in str(w[-1].message)) # this", "self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore', '')) cmfpath = join(cmfpath, 'tests') self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore', 'tests'))", "self._registerDirectory(self, ignore=self.manual_ign) def test_ignored(self): # Test that \"artifact\" files and", "tests for DirectoryView module. \"\"\" import sys import unittest import", "class DummyDirectoryView(DirectoryView): def __of__(self, parent): return DummyDirectoryViewSurrogate() registerMetaType('FOLDER', DummyDirectoryView) #", "# add a new folder mkdir(join(self.skin_path_name, 'test3')) info = _dirreg.getDirectoryInfo(self.ob.fake_skin._dirpath)", "test__generateKey(self): from Products.CMFCore.DirectoryView import _generateKey key = _generateKey('Products.CMFCore', 'tests') self.assertEqual(key.split(':')[0],", "metatype_registry = DirectoryView._dirreg._meta_types if 'FOLDER' in metatype_registry: del metatype_registry['FOLDER'] FSDVTest.tearDown(self)", "determine if metadata shows up correctly on a # FSDV", "import _dirreg WritableFSDVTest.setUp(self) self.saved_cfg_debug_mode = getConfiguration().debug_mode getConfiguration().debug_mode = True #", "from .base.testcase import WritableFSDVTest class DirectoryViewPathTests(unittest.TestCase): \"\"\" These test that,", "= _generateKey('Products.CMFCore', 'tests') self.assertEqual(key.split(':')[0], 'Products.CMFCore') subkey = _generateKey('Products.CMFCore', 'tests\\foo') self.assertTrue(subkey.startswith(key))", "def test_AddNewMethod(self): # See if a method added to the", "method self._writeFile('test2.py', \"return 'test2.3'\", self.use_dir_mtime) # check self.assertEqual(self.ob.fake_skin.test2(), 'test2.3') def", "registered meta_type is used self.ob._delObject('fake_skin') self._registerDirectory(self) testfolder = self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder,", "and dirs are ignored for name in '#test1', 'CVS', '.test1',", "a really wacky path def test_UnhandleableExpandPath(self): file = mktemp() with", "path %r' % file) self.assertTrue(text in str(w[-1].message)) # this test", "self.assertTrue(hasattr(self.ob.fake_skin, 'test1'), self.ob.fake_skin.getDirPath()) # Test we do nothing if given", "# This exercises yet another Win32 mtime weirdity. remove(join(self.skin_path_name, 'test2.py'))", "test_foo) del fs.foo self.assertRaises(AttributeError, getattr, fs, 'foo') self.assertRaises(AttributeError, getattr, fs.__dict__['_real'],", "'fake_skins', _globals) def test__generateKey(self): from Products.CMFCore.DirectoryView import _generateKey key =", "# Test we do nothing if given a really wacky", "that registerDirectory creates keys in the right format. def test_registerDirectoryKeys(self):", "self.assertEqual(len(w), 1) self.assertTrue(issubclass(w[-1].category, UserWarning)) text = ('DirectoryView fake_skin refers to", "self.assertEqual(self.ob.fake_skin.test2(), 'test2') def test_EditMethod(self): # See if an edited method", "- they never got removed because there was # no", "getConfiguration from . import _globals from .base.dummy import DummyFolder from", "for name in must_ignore: self.assertFalse(name in visible) class DirectoryViewFolderTests(FSDVTest): def", "self.saved_cfg_debug_mode = getConfiguration().debug_mode getConfiguration().debug_mode = True # initialise skins self._registerDirectory(self)", "method back to the fake skin folder self._writeFile('test2.py', \"return 'test2.2'\",", "list is used; and that the # custom ignore list", "'Products.CMFCore.tests:fake_skins/fake_skin') class DirectoryViewTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self._registerDirectory(self) def test_addDirectoryViews(self): #", "import _generateKey key = _generateKey('Products.CMFCore', 'tests') self.assertEqual(key.split(':')[0], 'Products.CMFCore') subkey =", "the surrogate that would clean up # the persistent DirectoryView", "test_getDirectoryInfo(self): skin = self.ob.fake_skin skin.manage_properties('Products.CMFCore.tests:fake_skins/fake_skin') self.assertTrue(hasattr(self.ob.fake_skin, 'test1'), self.ob.fake_skin.getDirPath()) # Test", "self.assertTrue(issubclass(w[-1].category, UserWarning)) text = ('DirectoryView fake_skin refers to a non-existing", "class DirectoryViewTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self._registerDirectory(self) def test_addDirectoryViews(self): # Test", "persistent DirectoryView as well. This is bad in situations #", "__of__(self, parent): return DummyDirectoryViewSurrogate() registerMetaType('FOLDER', DummyDirectoryView) # In order to", "= getConfiguration().debug_mode getConfiguration().debug_mode = True # initialise skins self._registerDirectory(self) #", "surrogate writes them # into the persistent DirectoryView as well.", "a corresponding .metadata file testfolder = self.ob.fake_skin.test_directory self.assertEqual(testfolder.title, 'test_directory Title')", "can be found self.assertEqual(self.ob.fake_skin.test2(), 'test2') def test_EditMethod(self): # See if", "os import remove from os.path import join from tempfile import", "test_registerDirectoryKeys(self): from Products.CMFCore.DirectoryView import _dirreg dirs = _dirreg._directories self.assertTrue('Products.CMFCore.tests:fake_skins/fake_skin' in", "exhibits its new behaviour self.assertEqual(self.ob.fake_skin.test1(), 'new test1') def test_DeleteMethod(self): #", "in dirs, dirs.keys()) self.assertEqual(self.ob.fake_skin.getDirPath(), 'Products.CMFCore.tests:fake_skins/fake_skin') class DirectoryViewTests(FSDVTest): def setUp(self): FSDVTest.setUp(self)", "# appears as a DirectoryViewSurrogate due # to Acquisition hackery.", "equivalent __delattr__ on the surrogate that would clean up #", "of type # DirectoryViewSurrogate from Products.CMFCore.DirectoryView import DirectoryViewSurrogate testfolder =", "OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED", "test_DeleteAddEditMethod(self): # Check that if we delete a method, then", "that has a corresponding .metadata file testfolder = self.ob.fake_skin.test_directory self.assertEqual(testfolder.getProperty('title'),", "also honored auto_ign = ('#test1', '.test1', 'test1~') must_ignore = self.manual_ign", "% name) def test_surrogate_writethrough(self): # CMF Collector 316: It is", "TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT,", "to the fake skin folder self._writeFile('test2.py', \"return 'test2.2'\", self.use_dir_mtime) #", "'tests') self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore', 'tests')) def test_getDirectoryInfo(self): skin = self.ob.fake_skin skin.manage_properties('Products.CMFCore.tests:fake_skins/fake_skin')", "name in '#test1', 'CVS', '.test1', 'test1~': self.assertTrue(name not in self.ob.fake_skin.objectIds(),", "again, that way the newly registered meta_type is used self.ob._delObject('fake_skin')", "is used; and that the # custom ignore list is", "would clean up # the persistent DirectoryView as well. fs", "method exhibits its new behaviour self.assertEqual(self.ob.fake_skin.test1(), 'new test1') def test_DeleteMethod(self):", "warnings.simplefilter('always') self.ob.fake_skin.manage_properties(file) self.assertEqual(self.ob.fake_skin.objectIds(), []) # Check that a warning was", "def test_registerDirectoryKeys(self): from Products.CMFCore.DirectoryView import _dirreg dirs = _dirreg._directories self.assertTrue('Products.CMFCore.tests:fake_skins/fake_skin'", "if we delete a method, then add it back, #", "really wacky path def test_UnhandleableExpandPath(self): file = mktemp() with warnings.catch_warnings(record=True)", "import _dirreg dirs = _dirreg._directories self.assertTrue('Products.CMFCore.tests:fake_skins/fake_skin' in dirs, dirs.keys()) self.assertEqual(self.ob.fake_skin.getDirPath(),", "= DummyFolder() addDirectoryViews(self.ob, 'fake_skins', _globals) def test__generateKey(self): from Products.CMFCore.DirectoryView import", "to the skin folder can be found self.assertEqual(self.ob.fake_skin.test2(), 'test2') def", "'test2'\") # edit the test1 method self._writeFile('test1.py', \"return 'new test1'\")", "Contributors. # # This software is subject to the provisions", "exercises yet another Win32 mtime weirdity. remove(join(self.skin_path_name, 'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin, 'test2'))", "add a new folder mkdir(join(self.skin_path_name, 'test3')) info = _dirreg.getDirectoryInfo(self.ob.fake_skin._dirpath) info.reload()", "DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES", "a method, then add it back, # then edit it,", "This is nasty, but there is no way to unregister", "(ZPL). A copy of the ZPL should accompany this distribution.", "is no way to unregister anything # right now... metatype_registry", "test__findProductForPath(self): from Products.CMFCore.DirectoryView import _findProductForPath cmfpath = sys.modules['Products.CMFCore'].__path__[0] self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore',", "A copy of the ZPL should accompany this distribution. #", "info.use_dir_mtime def tearDown(self): getConfiguration().debug_mode = self.saved_cfg_debug_mode WritableFSDVTest.tearDown(self) def test_AddNewMethod(self): #", "metatype_registry['FOLDER'] FSDVTest.tearDown(self) def test_DirectoryViewMetadata(self): # Test to determine if metadata", "Products.CMFCore.DirectoryView import registerMetaType class DummyDirectoryViewSurrogate: pass class DummyDirectoryView(DirectoryView): def __of__(self,", "found. \"\"\" def setUp(self): from Products.CMFCore.DirectoryView import addDirectoryViews from Products.CMFCore.DirectoryView", "the skin folder can be found self.assertEqual(self.ob.fake_skin.test2(), 'test2') def test_EditMethod(self):", "check self.assertEqual(self.ob.fake_skin.test2(), 'test2.2') # edit method self._writeFile('test2.py', \"return 'test2.3'\", self.use_dir_mtime)", "test that, no matter what is stored in their dirpath,", "testfolder = self.ob.fake_skin.test_directory self.assertEqual(testfolder.getProperty('title'), 'test_directory Title') def test_DirectoryViewFolderDefault(self): # Test", "used self.ob._delObject('fake_skin') self._registerDirectory(self) testfolder = self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder, DummyDirectoryViewSurrogate)) class DebugModeTests(WritableFSDVTest):", ".base.dummy import DummyFolder from .base.testcase import FSDVTest from .base.testcase import", "tearDown(self): getConfiguration().debug_mode = self.saved_cfg_debug_mode WritableFSDVTest.tearDown(self) def test_AddNewMethod(self): # See if", "we delete a method, then add it back, # then", "a method to the fake skin folder self._writeFile('test2.py', \"return 'test2'\")", "import join from tempfile import mktemp from App.config import getConfiguration", "dirs are ignored for name in '#test1', 'CVS', '.test1', 'test1~':", "the # transaction has ended - they never got removed", "used; and that the # custom ignore list is also", "def setUp(self): FSDVTest.setUp(self) self._registerDirectory(self) def tearDown(self): from Products.CMFCore import DirectoryView", "def __of__(self, parent): return DummyDirectoryViewSurrogate() registerMetaType('FOLDER', DummyDirectoryView) # In order", "THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL", "ZODB writes because # setting attributes on the non-persistent surrogate", "want to store markers and remove them before the #", "fake skin really is of type # DirectoryViewSurrogate from Products.CMFCore.DirectoryView", "% file) self.assertTrue(text in str(w[-1].message)) # this test tests that", "barf pass def test_DirectoryViewExists(self): # Check DirectoryView added by addDirectoryViews", "ignored for name in '#test1', 'CVS', '.test1', 'test1~': self.assertTrue(name not", "to the provisions of the Zope Public License, # Version", "Products.CMFCore.DirectoryView import _dirreg dirs = _dirreg._directories self.assertTrue('Products.CMFCore.tests:fake_skins/fake_skin' in dirs, dirs.keys())", "OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A", "setUp(self): FSDVTest.setUp(self) self._registerDirectory(self) def tearDown(self): from Products.CMFCore import DirectoryView #", "to find an appropriate skin and only do nothing in", "the test1 method self._writeFile('test1.py', \"return 'new test1'\") # add a", "# Now we register a different class under the fake", "if a method added to the skin folder can be", "See if a new folder shows up self.assertFalse(hasattr(self.ob.fake_skin, 'test3')) def", "'tests') self.assertEqual(key.split(':')[0], 'Products.CMFCore') subkey = _generateKey('Products.CMFCore', 'tests\\foo') self.assertTrue(subkey.startswith(key)) def test__findProductForPath(self):", "'new test1'\") # add a new folder mkdir(join(self.skin_path_name, 'test3')) info", "file) self.assertTrue(text in str(w[-1].message)) # this test tests that registerDirectory", "from Products.CMFCore.DirectoryView import registerMetaType class DummyDirectoryViewSurrogate: pass class DummyDirectoryView(DirectoryView): def", "def setUp(self): FSDVTest.setUp(self) self._registerDirectory(self) def test_addDirectoryViews(self): # Test addDirectoryViews #", "self.assertFalse(hasattr(self.ob.fake_skin, 'test3')) def test_DeleteFolder(self): # Make sure a deleted folder", "way the newly registered meta_type is used self.ob._delObject('fake_skin') self._registerDirectory(self) testfolder", "self.assertTrue(name not in self.ob.fake_skin.objectIds(), '%s not ignored' % name) def", "# Copyright (c) 2002 Zope Foundation and Contributors. # #", "def setUp(self): FSDVTest.setUp(self) self.manual_ign = ('CVS', 'SVN', 'test_manual_ignore.py') self._registerDirectory(self, ignore=self.manual_ign)", "the right format. def test_registerDirectoryKeys(self): from Products.CMFCore.DirectoryView import _dirreg dirs", "# Test that \"artifact\" files and dirs are ignored for", "writes because # setting attributes on the non-persistent surrogate writes", "A PARTICULAR PURPOSE. # ############################################################################## \"\"\" Unit tests for DirectoryView", "setUp(self): FSDVTest.setUp(self) self.manual_ign = ('CVS', 'SVN', 'test_manual_ignore.py') self._registerDirectory(self, ignore=self.manual_ign) def", "nothing in the case where an appropriate skin can't be", "add a method to the fake skin folder self._writeFile('test2.py', \"return", "WritableFSDVTest.tearDown(self) def test_AddNewMethod(self): # See if a method added to", "new behaviour self.assertEqual(self.ob.fake_skin.test1(), 'new test1') def test_DeleteMethod(self): # Make sure", "folder shows up self.assertFalse(hasattr(self.ob.fake_skin, 'test3')) def test_DeleteFolder(self): # Make sure", "mtime weirdity. remove(join(self.skin_path_name, 'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin, 'test2')) # add method back", "do their best to find an appropriate skin and only", "'.test1', 'test1~') must_ignore = self.manual_ign + auto_ign + ('test_manual_ignore',) visible", "= self.ob.fake_skin.test_directory self.assertEqual(testfolder.title, 'test_directory Title') def test_DirectoryViewMetadataOnPropertyManager(self): # Test to", "'foo') class DirectoryViewIgnoreTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self.manual_ign = ('CVS', 'SVN',", "registerDirectory('fake_skins', _globals) self.ob = DummyFolder() addDirectoryViews(self.ob, 'fake_skins', _globals) def test__generateKey(self):", "# Make sure a deleted method goes away remove(join(self.skin_path_name, 'test2.py'))", "path def test_UnhandleableExpandPath(self): file = mktemp() with warnings.catch_warnings(record=True) as w:", "self.manual_ign + auto_ign + ('test_manual_ignore',) visible = self.ob.fake_skin.objectIds() for name", "cause ZODB writes because # setting attributes on the non-persistent", "well. This is bad in situations # where you only", "def test_DirectoryViewMetadata(self): # Test to determine if metadata shows up", "type # DirectoryViewSurrogate from Products.CMFCore.DirectoryView import DirectoryViewSurrogate testfolder = self.ob.fake_skin.test_directory", "mkdir(join(self.skin_path_name, 'test3')) info = _dirreg.getDirectoryInfo(self.ob.fake_skin._dirpath) info.reload() self.use_dir_mtime = info.use_dir_mtime def", "check self.assertEqual(self.ob.fake_skin.test2(), 'test2.3') def test_NewFolder(self): # See if a new", "_dirreg WritableFSDVTest.setUp(self) self.saved_cfg_debug_mode = getConfiguration().debug_mode getConfiguration().debug_mode = True # initialise", "class DirectoryViewPathTests(unittest.TestCase): \"\"\" These test that, no matter what is", "These test that, no matter what is stored in their", "def setUp(self): from Products.CMFCore.DirectoryView import addDirectoryViews from Products.CMFCore.DirectoryView import registerDirectory", "# # This software is subject to the provisions of", "DirectoryView added by addDirectoryViews # appears as a DirectoryViewSurrogate due", "self.ob.fake_skin.objectIds(), '%s not ignored' % name) def test_surrogate_writethrough(self): # CMF", "Unit tests for DirectoryView module. \"\"\" import sys import unittest", "'%s not ignored' % name) def test_surrogate_writethrough(self): # CMF Collector", "as well. This is bad in situations # where you", "# initialise skins self._registerDirectory(self) # add a method to the", "test_UnhandleableExpandPath(self): file = mktemp() with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') self.ob.fake_skin.manage_properties(file)", "test_DeleteMethod(self): # Make sure a deleted method goes away remove(join(self.skin_path_name,", "import DirectoryView from Products.CMFCore.DirectoryView import registerMetaType class DummyDirectoryViewSurrogate: pass class", "\"FOLDER\" and test again... from Products.CMFCore.DirectoryView import DirectoryView from Products.CMFCore.DirectoryView", "'tests')) def test_getDirectoryInfo(self): skin = self.ob.fake_skin skin.manage_properties('Products.CMFCore.tests:fake_skins/fake_skin') self.assertTrue(hasattr(self.ob.fake_skin, 'test1'), self.ob.fake_skin.getDirPath())", "from Products.CMFCore.DirectoryView import _dirreg WritableFSDVTest.setUp(self) self.saved_cfg_debug_mode = getConfiguration().debug_mode getConfiguration().debug_mode =", "self.assertEqual(key.split(':')[0], 'Products.CMFCore') subkey = _generateKey('Products.CMFCore', 'tests\\foo') self.assertTrue(subkey.startswith(key)) def test__findProductForPath(self): from", "ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT", "# Test that a folder inside the fake skin really", "def test_NewFolder(self): # See if a new folder shows up", "getattr, fs, 'foo') self.assertRaises(AttributeError, getattr, fs.__dict__['_real'], 'foo') class DirectoryViewIgnoreTests(FSDVTest): def", "THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND", "nasty, but there is no way to unregister anything #", "new folder shows up self.assertFalse(hasattr(self.ob.fake_skin, 'test3')) def test_DeleteFolder(self): # Make", "registerDirectory creates keys in the right format. def test_registerDirectoryKeys(self): from", "correctly on a # FSDV that has a corresponding .metadata", "self.ob.fake_skin.objectIds() for name in must_ignore: self.assertFalse(name in visible) class DirectoryViewFolderTests(FSDVTest):", "to regenerate the FSDV data we need to remove and", "there was # no equivalent __delattr__ on the surrogate that", "tearDown(self): from Products.CMFCore import DirectoryView # This is nasty, but", "self.use_dir_mtime = info.use_dir_mtime def tearDown(self): getConfiguration().debug_mode = self.saved_cfg_debug_mode WritableFSDVTest.tearDown(self) def", "found self.assertEqual(self.ob.fake_skin.test2(), 'test2') def test_EditMethod(self): # See if an edited", "import getConfiguration from . import _globals from .base.dummy import DummyFolder", "where you only want to store markers and remove them", "and that the # custom ignore list is also honored", "+ ('test_manual_ignore',) visible = self.ob.fake_skin.objectIds() for name in must_ignore: self.assertFalse(name", "1) self.assertTrue(issubclass(w[-1].category, UserWarning)) text = ('DirectoryView fake_skin refers to a", "we register a different class under the fake meta_type #", "sure a deleted folder goes away self.assertTrue(hasattr(self.ob.fake_skin, 'test_directory')) # It", "self._writeFile('test2.py', \"return 'test2.2'\", self.use_dir_mtime) # check self.assertEqual(self.ob.fake_skin.test2(), 'test2.2') # edit", "has ended - they never got removed because there was", "import DummyFolder from .base.testcase import FSDVTest from .base.testcase import WritableFSDVTest", "DirectoryViewSurrogate from Products.CMFCore.DirectoryView import DirectoryViewSurrogate testfolder = self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder, DirectoryViewSurrogate))", "the fake skin really is of type # DirectoryViewSurrogate from", "deleted folder goes away self.assertTrue(hasattr(self.ob.fake_skin, 'test_directory')) # It has a", "test_foo) self.assertEqual(fs.__dict__['_real'].foo, test_foo) del fs.foo self.assertRaises(AttributeError, getattr, fs, 'foo') self.assertRaises(AttributeError,", "fs, 'foo') self.assertRaises(AttributeError, getattr, fs.__dict__['_real'], 'foo') class DirectoryViewIgnoreTests(FSDVTest): def setUp(self):", "where an appropriate skin can't be found. \"\"\" def setUp(self):", "from tempfile import mktemp from App.config import getConfiguration from .", "list is also honored auto_ign = ('#test1', '.test1', 'test1~') must_ignore", "DirectoryView from Products.CMFCore.DirectoryView import registerMetaType class DummyDirectoryViewSurrogate: pass class DummyDirectoryView(DirectoryView):", "yet another Win32 mtime weirdity. remove(join(self.skin_path_name, 'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin, 'test2')) #", "self.assertTrue(subkey.startswith(key)) def test__findProductForPath(self): from Products.CMFCore.DirectoryView import _findProductForPath cmfpath = sys.modules['Products.CMFCore'].__path__[0]", "its new behaviour self.assertEqual(self.ob.fake_skin.test1(), 'new test1') def test_DeleteMethod(self): # Make", "Title') def test_DirectoryViewFolderDefault(self): # Test that a folder inside the", "has a file, which we need to delete first. self.assertTrue(hasattr(self.ob.fake_skin.test_directory,", "test_addDirectoryViews(self): # Test addDirectoryViews # also test registration of directory", "it back, # then edit it, the DirectoryView notices. #", "cmfpath = join(cmfpath, 'tests') self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore', 'tests')) def test_getDirectoryInfo(self): skin", "goes away self.assertTrue(hasattr(self.ob.fake_skin, 'test_directory')) # It has a file, which", "visible) class DirectoryViewFolderTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self._registerDirectory(self) def tearDown(self): from", "self.assertRaises(AttributeError, getattr, fs, 'foo') self.assertRaises(AttributeError, getattr, fs.__dict__['_real'], 'foo') class DirectoryViewIgnoreTests(FSDVTest):", "the fake skin folder self._writeFile('test2.py', \"return 'test2'\") # edit the", "_dirreg.getDirectoryInfo(self.ob.fake_skin._dirpath) info.reload() self.use_dir_mtime = info.use_dir_mtime def tearDown(self): getConfiguration().debug_mode = self.saved_cfg_debug_mode", "= ('CVS', 'SVN', 'test_manual_ignore.py') self._registerDirectory(self, ignore=self.manual_ign) def test_ignored(self): # Test", "tests that registerDirectory creates keys in the right format. def", "addDirectoryViews # also test registration of directory views doesn't barf", "creates keys in the right format. def test_registerDirectoryKeys(self): from Products.CMFCore.DirectoryView", "of the Zope Public License, # Version 2.1 (ZPL). A", "= _dirreg.getDirectoryInfo(self.ob.fake_skin._dirpath) info.reload() self.use_dir_mtime = info.use_dir_mtime def tearDown(self): getConfiguration().debug_mode =", "'test_directory')) def test_suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(DirectoryViewPathTests)) suite.addTest(unittest.makeSuite(DirectoryViewTests)) suite.addTest(unittest.makeSuite(DirectoryViewIgnoreTests)) suite.addTest(unittest.makeSuite(DirectoryViewFolderTests))", "# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE", "testfolder = self.ob.fake_skin.test_directory self.assertEqual(testfolder.title, 'test_directory Title') def test_DirectoryViewMetadataOnPropertyManager(self): # Test", "they never got removed because there was # no equivalent", "their best to find an appropriate skin and only do", "name) def test_surrogate_writethrough(self): # CMF Collector 316: It is possible", "Collector 316: It is possible to cause ZODB writes because", "'test_directory Title') def test_DirectoryViewMetadataOnPropertyManager(self): # Test to determine if metadata", "test again... from Products.CMFCore.DirectoryView import DirectoryView from Products.CMFCore.DirectoryView import registerMetaType", "auto_ign = ('#test1', '.test1', 'test1~') must_ignore = self.manual_ign + auto_ign", "2.1 (ZPL). A copy of the ZPL should accompany this", "# also test registration of directory views doesn't barf pass", ".base.testcase import WritableFSDVTest class DirectoryViewPathTests(unittest.TestCase): \"\"\" These test that, no", "due # to Acquisition hackery. from Products.CMFCore.DirectoryView import DirectoryViewSurrogate self.assertTrue(isinstance(self.ob.fake_skin,", "parent): return DummyDirectoryViewSurrogate() registerMetaType('FOLDER', DummyDirectoryView) # In order to regenerate", "Products.CMFCore.DirectoryView import _dirreg WritableFSDVTest.setUp(self) self.saved_cfg_debug_mode = getConfiguration().debug_mode getConfiguration().debug_mode = True", "= ('#test1', '.test1', 'test1~') must_ignore = self.manual_ign + auto_ign +", "away remove(join(self.skin_path_name, 'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin, 'test2')) def test_DeleteAddEditMethod(self): # Check that", "= self.ob.fake_skin skin.manage_properties('Products.CMFCore.tests:fake_skins/fake_skin') self.assertTrue(hasattr(self.ob.fake_skin, 'test1'), self.ob.fake_skin.getDirPath()) # Test we do", "this test tests that registerDirectory creates keys in the right", "corresponding .metadata file testfolder = self.ob.fake_skin.test_directory self.assertEqual(testfolder.getProperty('title'), 'test_directory Title') def", "# ############################################################################## \"\"\" Unit tests for DirectoryView module. \"\"\" import", "test_DirectoryViewMetadataOnPropertyManager(self): # Test to determine if metadata shows up correctly", "folder mkdir(join(self.skin_path_name, 'test3')) info = _dirreg.getDirectoryInfo(self.ob.fake_skin._dirpath) info.reload() self.use_dir_mtime = info.use_dir_mtime", "import registerMetaType class DummyDirectoryViewSurrogate: pass class DummyDirectoryView(DirectoryView): def __of__(self, parent):", "# DirectoryViewSurrogate from Products.CMFCore.DirectoryView import DirectoryViewSurrogate testfolder = self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder,", "# Make sure a deleted folder goes away self.assertTrue(hasattr(self.ob.fake_skin, 'test_directory'))", "also test registration of directory views doesn't barf pass def", "self.use_dir_mtime) # check self.assertEqual(self.ob.fake_skin.test2(), 'test2.2') # edit method self._writeFile('test2.py', \"return", "that \"artifact\" files and dirs are ignored for name in", "different class under the fake meta_type # \"FOLDER\" and test", "import addDirectoryViews from Products.CMFCore.DirectoryView import registerDirectory registerDirectory('fake_skins', _globals) self.ob =", "remove from os.path import join from tempfile import mktemp from", "the ZPL should accompany this distribution. # THIS SOFTWARE IS", "test_foo self.assertEqual(fs.foo, test_foo) self.assertEqual(fs.__dict__['_real'].foo, test_foo) del fs.foo self.assertRaises(AttributeError, getattr, fs,", "Test we do nothing if given a really wacky path", "Now we register a different class under the fake meta_type", "self.ob._delObject('fake_skin') self._registerDirectory(self) testfolder = self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder, DummyDirectoryViewSurrogate)) class DebugModeTests(WritableFSDVTest): def", "import WritableFSDVTest class DirectoryViewPathTests(unittest.TestCase): \"\"\" These test that, no matter", "file testfolder = self.ob.fake_skin.test_directory self.assertEqual(testfolder.title, 'test_directory Title') def test_DirectoryViewMetadataOnPropertyManager(self): #", ".base.testcase import FSDVTest from .base.testcase import WritableFSDVTest class DirectoryViewPathTests(unittest.TestCase): \"\"\"", "is used self.ob._delObject('fake_skin') self._registerDirectory(self) testfolder = self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder, DummyDirectoryViewSurrogate)) class", "anything # right now... metatype_registry = DirectoryView._dirreg._meta_types if 'FOLDER' in", "DirectoryViewSurrogate)) def test_DirectoryViewFolderCustom(self): # Now we register a different class", "def test__findProductForPath(self): from Products.CMFCore.DirectoryView import _findProductForPath cmfpath = sys.modules['Products.CMFCore'].__path__[0] self.assertEqual(_findProductForPath(cmfpath),", "mktemp from App.config import getConfiguration from . import _globals from", "_generateKey('Products.CMFCore', 'tests') self.assertEqual(key.split(':')[0], 'Products.CMFCore') subkey = _generateKey('Products.CMFCore', 'tests\\foo') self.assertTrue(subkey.startswith(key)) def", "= ('DirectoryView fake_skin refers to a non-existing path %r' %", "them before the # transaction has ended - they never", "ended - they never got removed because there was #", "class DirectoryViewFolderTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self._registerDirectory(self) def tearDown(self): from Products.CMFCore", "a deleted method goes away remove(join(self.skin_path_name, 'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin, 'test2')) def", ".metadata file testfolder = self.ob.fake_skin.test_directory self.assertEqual(testfolder.getProperty('title'), 'test_directory Title') def test_DirectoryViewFolderDefault(self):", "'test3')) def test_DeleteFolder(self): # Make sure a deleted folder goes", "join from tempfile import mktemp from App.config import getConfiguration from", "import warnings from os import mkdir from os import remove", "info.reload() self.use_dir_mtime = info.use_dir_mtime def tearDown(self): getConfiguration().debug_mode = self.saved_cfg_debug_mode WritableFSDVTest.tearDown(self)", "'test1~': self.assertTrue(name not in self.ob.fake_skin.objectIds(), '%s not ignored' % name)", "Zope Public License, # Version 2.1 (ZPL). A copy of", "on a # FSDV that has a corresponding .metadata file", "in situations # where you only want to store markers", "setUp(self): from Products.CMFCore.DirectoryView import _dirreg WritableFSDVTest.setUp(self) self.saved_cfg_debug_mode = getConfiguration().debug_mode getConfiguration().debug_mode", "'README.txt'), self.use_dir_mtime) self._deleteDirectory('test_directory', self.use_dir_mtime) self.assertFalse(hasattr(self.ob.fake_skin, 'test_directory')) def test_suite(): suite =", "self.ob.fake_skin skin.manage_properties('Products.CMFCore.tests:fake_skins/fake_skin') self.assertTrue(hasattr(self.ob.fake_skin, 'test1'), self.ob.fake_skin.getDirPath()) # Test we do nothing", "FSDVTest.setUp(self) self._registerDirectory(self) def test_addDirectoryViews(self): # Test addDirectoryViews # also test", "self.assertEqual(testfolder.getProperty('title'), 'test_directory Title') def test_DirectoryViewFolderDefault(self): # Test that a folder", "dirs = _dirreg._directories self.assertTrue('Products.CMFCore.tests:fake_skins/fake_skin' in dirs, dirs.keys()) self.assertEqual(self.ob.fake_skin.getDirPath(), 'Products.CMFCore.tests:fake_skins/fake_skin') class", "Title') def test_DirectoryViewMetadataOnPropertyManager(self): # Test to determine if metadata shows", "'Products.CMFCore') subkey = _generateKey('Products.CMFCore', 'tests\\foo') self.assertTrue(subkey.startswith(key)) def test__findProductForPath(self): from Products.CMFCore.DirectoryView", "DummyDirectoryView(DirectoryView): def __of__(self, parent): return DummyDirectoryViewSurrogate() registerMetaType('FOLDER', DummyDirectoryView) # In", "even when a custom ignore list is used; and that", "('test_manual_ignore',) visible = self.ob.fake_skin.objectIds() for name in must_ignore: self.assertFalse(name in", "# no equivalent __delattr__ on the surrogate that would clean", "corresponding .metadata file testfolder = self.ob.fake_skin.test_directory self.assertEqual(testfolder.title, 'test_directory Title') def", "an appropriate skin and only do nothing in the case", "+ auto_ign + ('test_manual_ignore',) visible = self.ob.fake_skin.objectIds() for name in", "= _generateKey('Products.CMFCore', 'tests\\foo') self.assertTrue(subkey.startswith(key)) def test__findProductForPath(self): from Products.CMFCore.DirectoryView import _findProductForPath", "find an appropriate skin and only do nothing in the", "DirectoryViewFolderTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self._registerDirectory(self) def tearDown(self): from Products.CMFCore import", "ignored' % name) def test_surrogate_writethrough(self): # CMF Collector 316: It", "self.ob.fake_skin.getDirPath()) # Test we do nothing if given a really", "skin.manage_properties('Products.CMFCore.tests:fake_skins/fake_skin') self.assertTrue(hasattr(self.ob.fake_skin, 'test1'), self.ob.fake_skin.getDirPath()) # Test we do nothing if", "data we need to remove and # register again, that", "another Win32 mtime weirdity. remove(join(self.skin_path_name, 'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin, 'test2')) # add", "= info.use_dir_mtime def tearDown(self): getConfiguration().debug_mode = self.saved_cfg_debug_mode WritableFSDVTest.tearDown(self) def test_AddNewMethod(self):", "# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND", "IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS", "self.ob.fake_skin test_foo = 'My Foovalue' fs.foo = test_foo self.assertEqual(fs.foo, test_foo)", "def test_EditMethod(self): # See if an edited method exhibits its", "is nasty, but there is no way to unregister anything", "warning was raised. self.assertEqual(len(w), 1) self.assertTrue(issubclass(w[-1].category, UserWarning)) text = ('DirectoryView", "registerMetaType class DummyDirectoryViewSurrogate: pass class DummyDirectoryView(DirectoryView): def __of__(self, parent): return", "when a custom ignore list is used; and that the", "This exercises yet another Win32 mtime weirdity. remove(join(self.skin_path_name, 'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin,", "AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. #", "Make sure a deleted method goes away remove(join(self.skin_path_name, 'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin,", "Pope') def test_ignored(self): # Test that \"artifact\" files and dirs", "[]) # Check that a warning was raised. self.assertEqual(len(w), 1)", "are ignored for name in '#test1', 'CVS', '.test1', 'test1~': self.assertTrue(name", "WritableFSDVTest.setUp(self) self.saved_cfg_debug_mode = getConfiguration().debug_mode getConfiguration().debug_mode = True # initialise skins", "self.assertEqual(fs.__dict__['_real'].foo, test_foo) del fs.foo self.assertRaises(AttributeError, getattr, fs, 'foo') self.assertRaises(AttributeError, getattr,", "'foo') self.assertRaises(AttributeError, getattr, fs.__dict__['_real'], 'foo') class DirectoryViewIgnoreTests(FSDVTest): def setUp(self): FSDVTest.setUp(self)", "self.assertEqual(self.ob.fake_skin.test2(), 'test2.3') def test_NewFolder(self): # See if a new folder", "from os.path import join from tempfile import mktemp from App.config", "= join(cmfpath, 'tests') self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore', 'tests')) def test_getDirectoryInfo(self): skin =", "Test addDirectoryViews # also test registration of directory views doesn't", "test_surrogate_writethrough(self): # CMF Collector 316: It is possible to cause", "IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR", "# transaction has ended - they never got removed because", "# Check that a warning was raised. self.assertEqual(len(w), 1) self.assertTrue(issubclass(w[-1].category,", "is subject to the provisions of the Zope Public License,", "added by addDirectoryViews # appears as a DirectoryViewSurrogate due #", "'test1') def test_properties(self): # Make sure the directory view is", "FSDV data we need to remove and # register again,", "do nothing in the case where an appropriate skin can't", "up self.assertFalse(hasattr(self.ob.fake_skin, 'test3')) def test_DeleteFolder(self): # Make sure a deleted", "sys import unittest import warnings from os import mkdir from", "'.test1', 'test1~': self.assertTrue(name not in self.ob.fake_skin.objectIds(), '%s not ignored' %", "# into the persistent DirectoryView as well. This is bad", "and test again... from Products.CMFCore.DirectoryView import DirectoryView from Products.CMFCore.DirectoryView import", "is possible to cause ZODB writes because # setting attributes", "self.saved_cfg_debug_mode WritableFSDVTest.tearDown(self) def test_AddNewMethod(self): # See if a method added", "in the case where an appropriate skin can't be found.", "the provisions of the Zope Public License, # Version 2.1", "addDirectoryViews(self.ob, 'fake_skins', _globals) def test__generateKey(self): from Products.CMFCore.DirectoryView import _generateKey key", "# See if a new folder shows up self.assertFalse(hasattr(self.ob.fake_skin, 'test3'))", "and # register again, that way the newly registered meta_type", "cmfpath = sys.modules['Products.CMFCore'].__path__[0] self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore', '')) cmfpath = join(cmfpath, 'tests')", "do nothing if given a really wacky path def test_UnhandleableExpandPath(self):", "def test_DirectoryViewExists(self): # Check DirectoryView added by addDirectoryViews # appears", "# register again, that way the newly registered meta_type is", "'test_directory')) # It has a file, which we need to", "test_ignored(self): # Test that \"artifact\" files and dirs are ignored,", "need to remove and # register again, that way the", "'README.txt')) self._deleteFile(join('test_directory', 'README.txt'), self.use_dir_mtime) self._deleteDirectory('test_directory', self.use_dir_mtime) self.assertFalse(hasattr(self.ob.fake_skin, 'test_directory')) def test_suite():", "fs.__dict__['_real'], 'foo') class DirectoryViewIgnoreTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self.manual_ign = ('CVS',", "# CMF Collector 316: It is possible to cause ZODB", "BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE,", "unittest import warnings from os import mkdir from os import", "_globals from .base.dummy import DummyFolder from .base.testcase import FSDVTest from", "= self.ob.fake_skin test_foo = 'My Foovalue' fs.foo = test_foo self.assertEqual(fs.foo,", "skins self._registerDirectory(self) # add a method to the fake skin", "DirectoryViewPathTests(unittest.TestCase): \"\"\" These test that, no matter what is stored", "('#test1', '.test1', 'test1~') must_ignore = self.manual_ign + auto_ign + ('test_manual_ignore',)", "WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED", "FSDVTest.setUp(self) self._registerDirectory(self) def tearDown(self): from Products.CMFCore import DirectoryView # This", "add it back, # then edit it, the DirectoryView notices.", "for DirectoryView module. \"\"\" import sys import unittest import warnings", "only do nothing in the case where an appropriate skin", "# where you only want to store markers and remove", "the case where an appropriate skin can't be found. \"\"\"", "folder can be found self.assertEqual(self.ob.fake_skin.test2(), 'test2') def test_EditMethod(self): # See", "in their dirpath, FSDV's will do their best to find", "= self.ob.fake_skin.objectIds() for name in must_ignore: self.assertFalse(name in visible) class", "# FSDV that has a corresponding .metadata file testfolder =", "unregister anything # right now... metatype_registry = DirectoryView._dirreg._meta_types if 'FOLDER'", "to Acquisition hackery. from Products.CMFCore.DirectoryView import DirectoryViewSurrogate self.assertTrue(isinstance(self.ob.fake_skin, DirectoryViewSurrogate)) def", "is bad in situations # where you only want to", "\"artifact\" files and dirs are ignored, # even when a", "self.use_dir_mtime) # check self.assertEqual(self.ob.fake_skin.test2(), 'test2.3') def test_NewFolder(self): # See if", "# Test to determine if metadata shows up correctly on", "os.path import join from tempfile import mktemp from App.config import", "= test_foo self.assertEqual(fs.foo, test_foo) self.assertEqual(fs.__dict__['_real'].foo, test_foo) del fs.foo self.assertRaises(AttributeError, getattr,", "inside the fake skin really is of type # DirectoryViewSurrogate", "has a corresponding .metadata file testfolder = self.ob.fake_skin.test_directory self.assertEqual(testfolder.getProperty('title'), 'test_directory", "subkey = _generateKey('Products.CMFCore', 'tests\\foo') self.assertTrue(subkey.startswith(key)) def test__findProductForPath(self): from Products.CMFCore.DirectoryView import", "self.assertEqual(self.ob.fake_skin.testPT.title, 'Zope Pope') def test_ignored(self): # Test that \"artifact\" files", "# custom ignore list is also honored auto_ign = ('#test1',", "there is no way to unregister anything # right now...", "self._writeFile('test2.py', \"return 'test2'\") # edit the test1 method self._writeFile('test1.py', \"return", "a method added to the skin folder can be found", "import _findProductForPath cmfpath = sys.modules['Products.CMFCore'].__path__[0] self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore', '')) cmfpath =", "added to the skin folder can be found self.assertEqual(self.ob.fake_skin.test2(), 'test2')", "from os import mkdir from os import remove from os.path", "regenerate the FSDV data we need to remove and #", "got removed because there was # no equivalent __delattr__ on", "def test_DeleteFolder(self): # Make sure a deleted folder goes away", "a custom ignore list is used; and that the #", "not ignored' % name) def test_surrogate_writethrough(self): # CMF Collector 316:", "Win32 mtime weirdity. remove(join(self.skin_path_name, 'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin, 'test2')) # add method", "from .base.testcase import FSDVTest from .base.testcase import WritableFSDVTest class DirectoryViewPathTests(unittest.TestCase):", "self.assertEqual(self.ob.fake_skin.getDirPath(), 'Products.CMFCore.tests:fake_skins/fake_skin') class DirectoryViewTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self._registerDirectory(self) def test_addDirectoryViews(self):", "self._writeFile('test2.py', \"return 'test2.3'\", self.use_dir_mtime) # check self.assertEqual(self.ob.fake_skin.test2(), 'test2.3') def test_NewFolder(self):", "Products.CMFCore import DirectoryView # This is nasty, but there is", "setting attributes on the non-persistent surrogate writes them # into", "PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED", "can't be found. \"\"\" def setUp(self): from Products.CMFCore.DirectoryView import addDirectoryViews", "'test2.3') def test_NewFolder(self): # See if a new folder shows", "is also honored auto_ign = ('#test1', '.test1', 'test1~') must_ignore =", "remove them before the # transaction has ended - they", "class under the fake meta_type # \"FOLDER\" and test again...", "import DirectoryView # This is nasty, but there is no", "def test__generateKey(self): from Products.CMFCore.DirectoryView import _generateKey key = _generateKey('Products.CMFCore', 'tests')", "= mktemp() with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') self.ob.fake_skin.manage_properties(file) self.assertEqual(self.ob.fake_skin.objectIds(), [])", "method works self.assertEqual(self.ob.fake_skin.test1(), 'test1') def test_properties(self): # Make sure the", "must_ignore = self.manual_ign + auto_ign + ('test_manual_ignore',) visible = self.ob.fake_skin.objectIds()", "now... metatype_registry = DirectoryView._dirreg._meta_types if 'FOLDER' in metatype_registry: del metatype_registry['FOLDER']", "self.assertEqual(testfolder.title, 'test_directory Title') def test_DirectoryViewMetadataOnPropertyManager(self): # Test to determine if", "their dirpath, FSDV's will do their best to find an", "import remove from os.path import join from tempfile import mktemp", "if metadata shows up correctly on a # FSDV that", "really is of type # DirectoryViewSurrogate from Products.CMFCore.DirectoryView import DirectoryViewSurrogate", "meta_type is used self.ob._delObject('fake_skin') self._registerDirectory(self) testfolder = self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder, DummyDirectoryViewSurrogate))", "del metatype_registry['FOLDER'] FSDVTest.tearDown(self) def test_DirectoryViewMetadata(self): # Test to determine if", "wacky path def test_UnhandleableExpandPath(self): file = mktemp() with warnings.catch_warnings(record=True) as", "the non-persistent surrogate writes them # into the persistent DirectoryView", "method to the fake skin folder self._writeFile('test2.py', \"return 'test2'\") #", "def test_addDirectoryViews(self): # Test addDirectoryViews # also test registration of", "directory views doesn't barf pass def test_DirectoryViewExists(self): # Check DirectoryView", "DirectoryView as well. This is bad in situations # where", "import mkdir from os import remove from os.path import join", "edit method self._writeFile('test2.py', \"return 'test2.3'\", self.use_dir_mtime) # check self.assertEqual(self.ob.fake_skin.test2(), 'test2.3')", "nothing if given a really wacky path def test_UnhandleableExpandPath(self): file", "Test that \"artifact\" files and dirs are ignored for name", "skin can't be found. \"\"\" def setUp(self): from Products.CMFCore.DirectoryView import", "we do nothing if given a really wacky path def", "under the fake meta_type # \"FOLDER\" and test again... from", "a new folder mkdir(join(self.skin_path_name, 'test3')) info = _dirreg.getDirectoryInfo(self.ob.fake_skin._dirpath) info.reload() self.use_dir_mtime", "the FSDV data we need to remove and # register", "It has a file, which we need to delete first.", "self.use_dir_mtime) self.assertFalse(hasattr(self.ob.fake_skin, 'test_directory')) def test_suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(DirectoryViewPathTests)) suite.addTest(unittest.makeSuite(DirectoryViewTests))", "test_NewFolder(self): # See if a new folder shows up self.assertFalse(hasattr(self.ob.fake_skin,", "skin folder self._writeFile('test2.py', \"return 'test2.2'\", self.use_dir_mtime) # check self.assertEqual(self.ob.fake_skin.test2(), 'test2.2')", "('Products.CMFCore', 'tests')) def test_getDirectoryInfo(self): skin = self.ob.fake_skin skin.manage_properties('Products.CMFCore.tests:fake_skins/fake_skin') self.assertTrue(hasattr(self.ob.fake_skin, 'test1'),", "self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder, DirectoryViewSurrogate)) def test_DirectoryViewFolderCustom(self): # Now we register a", "sys.modules['Products.CMFCore'].__path__[0] self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore', '')) cmfpath = join(cmfpath, 'tests') self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore',", "# See if an edited method exhibits its new behaviour", "import DirectoryViewSurrogate self.assertTrue(isinstance(self.ob.fake_skin, DirectoryViewSurrogate)) def test_DirectoryViewMethod(self): # Check if DirectoryView", "add method back to the fake skin folder self._writeFile('test2.py', \"return", "in self.ob.fake_skin.objectIds(), '%s not ignored' % name) def test_surrogate_writethrough(self): #", "Products.CMFCore.DirectoryView import _generateKey key = _generateKey('Products.CMFCore', 'tests') self.assertEqual(key.split(':')[0], 'Products.CMFCore') subkey", "DirectoryViewSurrogate self.assertTrue(isinstance(self.ob.fake_skin, DirectoryViewSurrogate)) def test_DirectoryViewMethod(self): # Check if DirectoryView method", "register a different class under the fake meta_type # \"FOLDER\"", "folder inside the fake skin really is of type #", "no matter what is stored in their dirpath, FSDV's will", "Check DirectoryView added by addDirectoryViews # appears as a DirectoryViewSurrogate", "if a new folder shows up self.assertFalse(hasattr(self.ob.fake_skin, 'test3')) def test_DeleteFolder(self):", "self.ob.fake_skin.test_directory self.assertEqual(testfolder.title, 'test_directory Title') def test_DirectoryViewMetadataOnPropertyManager(self): # Test to determine", "\"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED #", "\"return 'test2.2'\", self.use_dir_mtime) # check self.assertEqual(self.ob.fake_skin.test2(), 'test2.2') # edit method", "############################################################################## \"\"\" Unit tests for DirectoryView module. \"\"\" import sys", "warnings from os import mkdir from os import remove from", "getConfiguration().debug_mode getConfiguration().debug_mode = True # initialise skins self._registerDirectory(self) # add", "markers and remove them before the # transaction has ended", "# It has a file, which we need to delete", "%r' % file) self.assertTrue(text in str(w[-1].message)) # this test tests", ". import _globals from .base.dummy import DummyFolder from .base.testcase import", "# Test addDirectoryViews # also test registration of directory views", "to unregister anything # right now... metatype_registry = DirectoryView._dirreg._meta_types if", "up # the persistent DirectoryView as well. fs = self.ob.fake_skin", "test_DirectoryViewMethod(self): # Check if DirectoryView method works self.assertEqual(self.ob.fake_skin.test1(), 'test1') def", "# # Copyright (c) 2002 Zope Foundation and Contributors. #", "import mktemp from App.config import getConfiguration from . import _globals", "as w: warnings.simplefilter('always') self.ob.fake_skin.manage_properties(file) self.assertEqual(self.ob.fake_skin.objectIds(), []) # Check that a", "from Products.CMFCore.DirectoryView import DirectoryViewSurrogate self.assertTrue(isinstance(self.ob.fake_skin, DirectoryViewSurrogate)) def test_DirectoryViewMethod(self): # Check", "class DebugModeTests(WritableFSDVTest): def setUp(self): from Products.CMFCore.DirectoryView import _dirreg WritableFSDVTest.setUp(self) self.saved_cfg_debug_mode", "'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin, 'test2')) def test_DeleteAddEditMethod(self): # Check that if we", "skin = self.ob.fake_skin skin.manage_properties('Products.CMFCore.tests:fake_skins/fake_skin') self.assertTrue(hasattr(self.ob.fake_skin, 'test1'), self.ob.fake_skin.getDirPath()) # Test we", "self.assertTrue(text in str(w[-1].message)) # this test tests that registerDirectory creates", "software is subject to the provisions of the Zope Public", "from Products.CMFCore.DirectoryView import DirectoryViewSurrogate testfolder = self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder, DirectoryViewSurrogate)) def", "'new test1') def test_DeleteMethod(self): # Make sure a deleted method", "# add a method to the fake skin folder self._writeFile('test2.py',", "to the fake skin folder self._writeFile('test2.py', \"return 'test2'\") # edit", "a corresponding .metadata file testfolder = self.ob.fake_skin.test_directory self.assertEqual(testfolder.getProperty('title'), 'test_directory Title')", "sure the directory view is reading properties self.assertEqual(self.ob.fake_skin.testPT.title, 'Zope Pope')", "up correctly on a # FSDV that has a corresponding", "mkdir from os import remove from os.path import join from", "we need to delete first. self.assertTrue(hasattr(self.ob.fake_skin.test_directory, 'README.txt')) self._deleteFile(join('test_directory', 'README.txt'), self.use_dir_mtime)", "method added to the skin folder can be found self.assertEqual(self.ob.fake_skin.test2(),", "test1') def test_DeleteMethod(self): # Make sure a deleted method goes", "metadata shows up correctly on a # FSDV that has", "Foundation and Contributors. # # This software is subject to", "the persistent DirectoryView as well. This is bad in situations", "meta_type # \"FOLDER\" and test again... from Products.CMFCore.DirectoryView import DirectoryView", "'test_manual_ignore.py') self._registerDirectory(self, ignore=self.manual_ign) def test_ignored(self): # Test that \"artifact\" files", "self.assertTrue(isinstance(testfolder, DummyDirectoryViewSurrogate)) class DebugModeTests(WritableFSDVTest): def setUp(self): from Products.CMFCore.DirectoryView import _dirreg", "surrogate that would clean up # the persistent DirectoryView as", "is of type # DirectoryViewSurrogate from Products.CMFCore.DirectoryView import DirectoryViewSurrogate testfolder", "FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## \"\"\" Unit", "self.assertEqual(self.ob.fake_skin.objectIds(), []) # Check that a warning was raised. self.assertEqual(len(w),", "from Products.CMFCore.DirectoryView import _dirreg dirs = _dirreg._directories self.assertTrue('Products.CMFCore.tests:fake_skins/fake_skin' in dirs,", "Test that a folder inside the fake skin really is", "honored auto_ign = ('#test1', '.test1', 'test1~') must_ignore = self.manual_ign +", "\"return 'test2'\") # edit the test1 method self._writeFile('test1.py', \"return 'new", "INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF", "AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## \"\"\"", "# Version 2.1 (ZPL). A copy of the ZPL should", "Test that \"artifact\" files and dirs are ignored, # even", "folder goes away self.assertTrue(hasattr(self.ob.fake_skin, 'test_directory')) # It has a file,", "Check if DirectoryView method works self.assertEqual(self.ob.fake_skin.test1(), 'test1') def test_properties(self): #", "test_DirectoryViewExists(self): # Check DirectoryView added by addDirectoryViews # appears as", "writes them # into the persistent DirectoryView as well. This", "you only want to store markers and remove them before", "class DummyDirectoryViewSurrogate: pass class DummyDirectoryView(DirectoryView): def __of__(self, parent): return DummyDirectoryViewSurrogate()", "Make sure a deleted folder goes away self.assertTrue(hasattr(self.ob.fake_skin, 'test_directory')) #", "# the persistent DirectoryView as well. fs = self.ob.fake_skin test_foo", "NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY,", "need to delete first. self.assertTrue(hasattr(self.ob.fake_skin.test_directory, 'README.txt')) self._deleteFile(join('test_directory', 'README.txt'), self.use_dir_mtime) self._deleteDirectory('test_directory',", "self._registerDirectory(self) def tearDown(self): from Products.CMFCore import DirectoryView # This is", "never got removed because there was # no equivalent __delattr__", "This is bad in situations # where you only want", "folder self._writeFile('test2.py', \"return 'test2'\") # edit the test1 method self._writeFile('test1.py',", "remove and # register again, that way the newly registered", "UserWarning)) text = ('DirectoryView fake_skin refers to a non-existing path", "def test_DirectoryViewFolderCustom(self): # Now we register a different class under", "files and dirs are ignored for name in '#test1', 'CVS',", "_findProductForPath cmfpath = sys.modules['Products.CMFCore'].__path__[0] self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore', '')) cmfpath = join(cmfpath,", "delete a method, then add it back, # then edit", "as well. fs = self.ob.fake_skin test_foo = 'My Foovalue' fs.foo", "if 'FOLDER' in metatype_registry: del metatype_registry['FOLDER'] FSDVTest.tearDown(self) def test_DirectoryViewMetadata(self): #", "def test_properties(self): # Make sure the directory view is reading", "no equivalent __delattr__ on the surrogate that would clean up", "initialise skins self._registerDirectory(self) # add a method to the fake", "fake skin folder self._writeFile('test2.py', \"return 'test2.2'\", self.use_dir_mtime) # check self.assertEqual(self.ob.fake_skin.test2(),", "on the surrogate that would clean up # the persistent", "'test3')) info = _dirreg.getDirectoryInfo(self.ob.fake_skin._dirpath) info.reload() self.use_dir_mtime = info.use_dir_mtime def tearDown(self):", "clean up # the persistent DirectoryView as well. fs =", "that a warning was raised. self.assertEqual(len(w), 1) self.assertTrue(issubclass(w[-1].category, UserWarning)) text", "testfolder = self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder, DirectoryViewSurrogate)) def test_DirectoryViewFolderCustom(self): # Now we", "test_AddNewMethod(self): # See if a method added to the skin", "file, which we need to delete first. self.assertTrue(hasattr(self.ob.fake_skin.test_directory, 'README.txt')) self._deleteFile(join('test_directory',", "that \"artifact\" files and dirs are ignored, # even when", "dirpath, FSDV's will do their best to find an appropriate", "a non-existing path %r' % file) self.assertTrue(text in str(w[-1].message)) #", "def test_surrogate_writethrough(self): # CMF Collector 316: It is possible to", "def test_DirectoryViewMetadataOnPropertyManager(self): # Test to determine if metadata shows up", "test registration of directory views doesn't barf pass def test_DirectoryViewExists(self):", "Version 2.1 (ZPL). A copy of the ZPL should accompany", "case where an appropriate skin can't be found. \"\"\" def", "self.assertEqual(self.ob.fake_skin.test1(), 'test1') def test_properties(self): # Make sure the directory view", "removed because there was # no equivalent __delattr__ on the", "testfolder = self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder, DummyDirectoryViewSurrogate)) class DebugModeTests(WritableFSDVTest): def setUp(self): from", "that way the newly registered meta_type is used self.ob._delObject('fake_skin') self._registerDirectory(self)", "newly registered meta_type is used self.ob._delObject('fake_skin') self._registerDirectory(self) testfolder = self.ob.fake_skin.test_directory", "getConfiguration().debug_mode = True # initialise skins self._registerDirectory(self) # add a", "'test1~') must_ignore = self.manual_ign + auto_ign + ('test_manual_ignore',) visible =", "'test_directory Title') def test_DirectoryViewFolderDefault(self): # Test that a folder inside", "that would clean up # the persistent DirectoryView as well.", "# even when a custom ignore list is used; and", "\"\"\" These test that, no matter what is stored in", "stored in their dirpath, FSDV's will do their best to", "# \"FOLDER\" and test again... from Products.CMFCore.DirectoryView import DirectoryView from", "back, # then edit it, the DirectoryView notices. # This", "of directory views doesn't barf pass def test_DirectoryViewExists(self): # Check", "WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR", "pass def test_DirectoryViewExists(self): # Check DirectoryView added by addDirectoryViews #", "refers to a non-existing path %r' % file) self.assertTrue(text in", "then add it back, # then edit it, the DirectoryView", "DirectoryViewSurrogate testfolder = self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder, DirectoryViewSurrogate)) def test_DirectoryViewFolderCustom(self): # Now", "DebugModeTests(WritableFSDVTest): def setUp(self): from Products.CMFCore.DirectoryView import _dirreg WritableFSDVTest.setUp(self) self.saved_cfg_debug_mode =", "custom ignore list is also honored auto_ign = ('#test1', '.test1',", "behaviour self.assertEqual(self.ob.fake_skin.test1(), 'new test1') def test_DeleteMethod(self): # Make sure a", "notices. # This exercises yet another Win32 mtime weirdity. remove(join(self.skin_path_name,", "away self.assertTrue(hasattr(self.ob.fake_skin, 'test_directory')) # It has a file, which we", "fake meta_type # \"FOLDER\" and test again... from Products.CMFCore.DirectoryView import", "DirectoryViewTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self._registerDirectory(self) def test_addDirectoryViews(self): # Test addDirectoryViews", "reading properties self.assertEqual(self.ob.fake_skin.testPT.title, 'Zope Pope') def test_ignored(self): # Test that", "keys in the right format. def test_registerDirectoryKeys(self): from Products.CMFCore.DirectoryView import", "shows up self.assertFalse(hasattr(self.ob.fake_skin, 'test3')) def test_DeleteFolder(self): # Make sure a", "_globals) def test__generateKey(self): from Products.CMFCore.DirectoryView import _generateKey key = _generateKey('Products.CMFCore',", "properties self.assertEqual(self.ob.fake_skin.testPT.title, 'Zope Pope') def test_ignored(self): # Test that \"artifact\"", "Products.CMFCore.DirectoryView import DirectoryView from Products.CMFCore.DirectoryView import registerMetaType class DummyDirectoryViewSurrogate: pass", "'test2') def test_EditMethod(self): # See if an edited method exhibits", "them # into the persistent DirectoryView as well. This is", "fs = self.ob.fake_skin test_foo = 'My Foovalue' fs.foo = test_foo", "that the # custom ignore list is also honored auto_ign", "self.assertTrue(isinstance(testfolder, DirectoryViewSurrogate)) def test_DirectoryViewFolderCustom(self): # Now we register a different", "(c) 2002 Zope Foundation and Contributors. # # This software", "the persistent DirectoryView as well. fs = self.ob.fake_skin test_foo =", "metatype_registry: del metatype_registry['FOLDER'] FSDVTest.tearDown(self) def test_DirectoryViewMetadata(self): # Test to determine", "str(w[-1].message)) # this test tests that registerDirectory creates keys in", "to cause ZODB writes because # setting attributes on the", "key = _generateKey('Products.CMFCore', 'tests') self.assertEqual(key.split(':')[0], 'Products.CMFCore') subkey = _generateKey('Products.CMFCore', 'tests\\foo')", "os import mkdir from os import remove from os.path import", "_dirreg dirs = _dirreg._directories self.assertTrue('Products.CMFCore.tests:fake_skins/fake_skin' in dirs, dirs.keys()) self.assertEqual(self.ob.fake_skin.getDirPath(), 'Products.CMFCore.tests:fake_skins/fake_skin')", "# Check if DirectoryView method works self.assertEqual(self.ob.fake_skin.test1(), 'test1') def test_properties(self):", "to store markers and remove them before the # transaction", "self.ob.fake_skin.manage_properties(file) self.assertEqual(self.ob.fake_skin.objectIds(), []) # Check that a warning was raised.", "before the # transaction has ended - they never got", "edited method exhibits its new behaviour self.assertEqual(self.ob.fake_skin.test1(), 'new test1') def", "self.assertRaises(AttributeError, getattr, fs.__dict__['_real'], 'foo') class DirectoryViewIgnoreTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self.manual_ign", "self._deleteDirectory('test_directory', self.use_dir_mtime) self.assertFalse(hasattr(self.ob.fake_skin, 'test_directory')) def test_suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(DirectoryViewPathTests))", "hackery. from Products.CMFCore.DirectoryView import DirectoryViewSurrogate self.assertTrue(isinstance(self.ob.fake_skin, DirectoryViewSurrogate)) def test_DirectoryViewMethod(self): #", "a file, which we need to delete first. self.assertTrue(hasattr(self.ob.fake_skin.test_directory, 'README.txt'))", "import sys import unittest import warnings from os import mkdir", "def test_suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(DirectoryViewPathTests)) suite.addTest(unittest.makeSuite(DirectoryViewTests)) suite.addTest(unittest.makeSuite(DirectoryViewIgnoreTests)) suite.addTest(unittest.makeSuite(DirectoryViewFolderTests)) suite.addTest(unittest.makeSuite(DebugModeTests))", "edit it, the DirectoryView notices. # This exercises yet another", "must_ignore: self.assertFalse(name in visible) class DirectoryViewFolderTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self._registerDirectory(self)", "DirectoryView method works self.assertEqual(self.ob.fake_skin.test1(), 'test1') def test_properties(self): # Make sure", "a deleted folder goes away self.assertTrue(hasattr(self.ob.fake_skin, 'test_directory')) # It has", "way to unregister anything # right now... metatype_registry = DirectoryView._dirreg._meta_types", "'SVN', 'test_manual_ignore.py') self._registerDirectory(self, ignore=self.manual_ign) def test_ignored(self): # Test that \"artifact\"", "_generateKey key = _generateKey('Products.CMFCore', 'tests') self.assertEqual(key.split(':')[0], 'Products.CMFCore') subkey = _generateKey('Products.CMFCore',", "non-persistent surrogate writes them # into the persistent DirectoryView as", "visible = self.ob.fake_skin.objectIds() for name in must_ignore: self.assertFalse(name in visible)", "CMF Collector 316: It is possible to cause ZODB writes", "test_DirectoryViewFolderDefault(self): # Test that a folder inside the fake skin", "warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') self.ob.fake_skin.manage_properties(file) self.assertEqual(self.ob.fake_skin.objectIds(), []) # Check that", "test1'\") # add a new folder mkdir(join(self.skin_path_name, 'test3')) info =", "if an edited method exhibits its new behaviour self.assertEqual(self.ob.fake_skin.test1(), 'new", "goes away remove(join(self.skin_path_name, 'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin, 'test2')) def test_DeleteAddEditMethod(self): # Check", "fake skin folder self._writeFile('test2.py', \"return 'test2'\") # edit the test1", "= _dirreg._directories self.assertTrue('Products.CMFCore.tests:fake_skins/fake_skin' in dirs, dirs.keys()) self.assertEqual(self.ob.fake_skin.getDirPath(), 'Products.CMFCore.tests:fake_skins/fake_skin') class DirectoryViewTests(FSDVTest):", "self.use_dir_mtime) self._deleteDirectory('test_directory', self.use_dir_mtime) self.assertFalse(hasattr(self.ob.fake_skin, 'test_directory')) def test_suite(): suite = unittest.TestSuite()", "def tearDown(self): getConfiguration().debug_mode = self.saved_cfg_debug_mode WritableFSDVTest.tearDown(self) def test_AddNewMethod(self): # See", "custom ignore list is used; and that the # custom", "distribution. # THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY", "of the ZPL should accompany this distribution. # THIS SOFTWARE", "that if we delete a method, then add it back,", "self._deleteFile(join('test_directory', 'README.txt'), self.use_dir_mtime) self._deleteDirectory('test_directory', self.use_dir_mtime) self.assertFalse(hasattr(self.ob.fake_skin, 'test_directory')) def test_suite(): suite", "IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,", "right format. def test_registerDirectoryKeys(self): from Products.CMFCore.DirectoryView import _dirreg dirs =", "Make sure the directory view is reading properties self.assertEqual(self.ob.fake_skin.testPT.title, 'Zope", "316: It is possible to cause ZODB writes because #", "FSDVTest.setUp(self) self.manual_ign = ('CVS', 'SVN', 'test_manual_ignore.py') self._registerDirectory(self, ignore=self.manual_ign) def test_ignored(self):", "ignore list is used; and that the # custom ignore", "DummyDirectoryView) # In order to regenerate the FSDV data we", "an edited method exhibits its new behaviour self.assertEqual(self.ob.fake_skin.test1(), 'new test1')", "from Products.CMFCore.DirectoryView import _findProductForPath cmfpath = sys.modules['Products.CMFCore'].__path__[0] self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore', ''))", "_dirreg._directories self.assertTrue('Products.CMFCore.tests:fake_skins/fake_skin' in dirs, dirs.keys()) self.assertEqual(self.ob.fake_skin.getDirPath(), 'Products.CMFCore.tests:fake_skins/fake_skin') class DirectoryViewTests(FSDVTest): def", "DummyFolder() addDirectoryViews(self.ob, 'fake_skins', _globals) def test__generateKey(self): from Products.CMFCore.DirectoryView import _generateKey", "was raised. self.assertEqual(len(w), 1) self.assertTrue(issubclass(w[-1].category, UserWarning)) text = ('DirectoryView fake_skin", "'#test1', 'CVS', '.test1', 'test1~': self.assertTrue(name not in self.ob.fake_skin.objectIds(), '%s not", "# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS #", "def tearDown(self): from Products.CMFCore import DirectoryView # This is nasty,", "shows up correctly on a # FSDV that has a", "return DummyDirectoryViewSurrogate() registerMetaType('FOLDER', DummyDirectoryView) # In order to regenerate the", "# This is nasty, but there is no way to", "self.assertTrue(hasattr(self.ob.fake_skin, 'test_directory')) # It has a file, which we need", "getattr, fs.__dict__['_real'], 'foo') class DirectoryViewIgnoreTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self.manual_ign =", "is stored in their dirpath, FSDV's will do their best", "info = _dirreg.getDirectoryInfo(self.ob.fake_skin._dirpath) info.reload() self.use_dir_mtime = info.use_dir_mtime def tearDown(self): getConfiguration().debug_mode", "self.assertFalse(hasattr(self.ob.fake_skin, 'test2')) def test_DeleteAddEditMethod(self): # Check that if we delete", "matter what is stored in their dirpath, FSDV's will do", "an appropriate skin can't be found. \"\"\" def setUp(self): from", "import _globals from .base.dummy import DummyFolder from .base.testcase import FSDVTest", "was # no equivalent __delattr__ on the surrogate that would", "self._registerDirectory(self) testfolder = self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder, DummyDirectoryViewSurrogate)) class DebugModeTests(WritableFSDVTest): def setUp(self):", "\"\"\" import sys import unittest import warnings from os import", "fake_skin refers to a non-existing path %r' % file) self.assertTrue(text", "# add method back to the fake skin folder self._writeFile('test2.py',", "FOR A PARTICULAR PURPOSE. # ############################################################################## \"\"\" Unit tests for", "will do their best to find an appropriate skin and", "ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #", "# In order to regenerate the FSDV data we need", "w: warnings.simplefilter('always') self.ob.fake_skin.manage_properties(file) self.assertEqual(self.ob.fake_skin.objectIds(), []) # Check that a warning", "but there is no way to unregister anything # right", "= self.ob.fake_skin.test_directory self.assertTrue(isinstance(testfolder, DirectoryViewSurrogate)) def test_DirectoryViewFolderCustom(self): # Now we register", "in str(w[-1].message)) # this test tests that registerDirectory creates keys", "files and dirs are ignored, # even when a custom", "a # FSDV that has a corresponding .metadata file testfolder", "addDirectoryViews # appears as a DirectoryViewSurrogate due # to Acquisition", "test tests that registerDirectory creates keys in the right format.", "pass class DummyDirectoryView(DirectoryView): def __of__(self, parent): return DummyDirectoryViewSurrogate() registerMetaType('FOLDER', DummyDirectoryView)", "the fake skin folder self._writeFile('test2.py', \"return 'test2.2'\", self.use_dir_mtime) # check", "transaction has ended - they never got removed because there", "appears as a DirectoryViewSurrogate due # to Acquisition hackery. from", "Test to determine if metadata shows up correctly on a", "True # initialise skins self._registerDirectory(self) # add a method to", "# then edit it, the DirectoryView notices. # This exercises", "Check that if we delete a method, then add it", "'test2.2') # edit method self._writeFile('test2.py', \"return 'test2.3'\", self.use_dir_mtime) # check", "self.assertFalse(name in visible) class DirectoryViewFolderTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self._registerDirectory(self) def", "import unittest import warnings from os import mkdir from os", "join(cmfpath, 'tests') self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore', 'tests')) def test_getDirectoryInfo(self): skin = self.ob.fake_skin", "ignore=self.manual_ign) def test_ignored(self): # Test that \"artifact\" files and dirs", "FSDV that has a corresponding .metadata file testfolder = self.ob.fake_skin.test_directory", "# this test tests that registerDirectory creates keys in the", "PURPOSE. # ############################################################################## \"\"\" Unit tests for DirectoryView module. \"\"\"", "as a DirectoryViewSurrogate due # to Acquisition hackery. from Products.CMFCore.DirectoryView", "MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE.", "self.assertEqual(self.ob.fake_skin.test1(), 'new test1') def test_DeleteMethod(self): # Make sure a deleted", "file = mktemp() with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') self.ob.fake_skin.manage_properties(file) self.assertEqual(self.ob.fake_skin.objectIds(),", "# Check that if we delete a method, then add", "WritableFSDVTest class DirectoryViewPathTests(unittest.TestCase): \"\"\" These test that, no matter what", "'Zope Pope') def test_ignored(self): # Test that \"artifact\" files and", "See if a method added to the skin folder can", "test_properties(self): # Make sure the directory view is reading properties", "has a corresponding .metadata file testfolder = self.ob.fake_skin.test_directory self.assertEqual(testfolder.title, 'test_directory", "self.ob = DummyFolder() addDirectoryViews(self.ob, 'fake_skins', _globals) def test__generateKey(self): from Products.CMFCore.DirectoryView", "import registerDirectory registerDirectory('fake_skins', _globals) self.ob = DummyFolder() addDirectoryViews(self.ob, 'fake_skins', _globals)", "which we need to delete first. self.assertTrue(hasattr(self.ob.fake_skin.test_directory, 'README.txt')) self._deleteFile(join('test_directory', 'README.txt'),", "addDirectoryViews from Products.CMFCore.DirectoryView import registerDirectory registerDirectory('fake_skins', _globals) self.ob = DummyFolder()", "In order to regenerate the FSDV data we need to", "self.assertTrue('Products.CMFCore.tests:fake_skins/fake_skin' in dirs, dirs.keys()) self.assertEqual(self.ob.fake_skin.getDirPath(), 'Products.CMFCore.tests:fake_skins/fake_skin') class DirectoryViewTests(FSDVTest): def setUp(self):", "because # setting attributes on the non-persistent surrogate writes them", "SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS", "class DirectoryViewIgnoreTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self.manual_ign = ('CVS', 'SVN', 'test_manual_ignore.py')", "# Test that \"artifact\" files and dirs are ignored, #", "# edit the test1 method self._writeFile('test1.py', \"return 'new test1'\") #", "# check self.assertEqual(self.ob.fake_skin.test2(), 'test2.3') def test_NewFolder(self): # See if a", "DirectoryViewSurrogate)) def test_DirectoryViewMethod(self): # Check if DirectoryView method works self.assertEqual(self.ob.fake_skin.test1(),", "to remove and # register again, that way the newly", "in '#test1', 'CVS', '.test1', 'test1~': self.assertTrue(name not in self.ob.fake_skin.objectIds(), '%s", "('Products.CMFCore', '')) cmfpath = join(cmfpath, 'tests') self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore', 'tests')) def", "registration of directory views doesn't barf pass def test_DirectoryViewExists(self): #", "test_foo = 'My Foovalue' fs.foo = test_foo self.assertEqual(fs.foo, test_foo) self.assertEqual(fs.__dict__['_real'].foo,", "'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin, 'test2')) # add method back to the fake", "suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(DirectoryViewPathTests)) suite.addTest(unittest.makeSuite(DirectoryViewTests)) suite.addTest(unittest.makeSuite(DirectoryViewIgnoreTests)) suite.addTest(unittest.makeSuite(DirectoryViewFolderTests)) suite.addTest(unittest.makeSuite(DebugModeTests)) return suite", "Zope Foundation and Contributors. # # This software is subject", "well. fs = self.ob.fake_skin test_foo = 'My Foovalue' fs.foo =", "to determine if metadata shows up correctly on a #", "deleted method goes away remove(join(self.skin_path_name, 'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin, 'test2')) def test_DeleteAddEditMethod(self):", "TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR", "and only do nothing in the case where an appropriate", "getConfiguration().debug_mode = self.saved_cfg_debug_mode WritableFSDVTest.tearDown(self) def test_AddNewMethod(self): # See if a", "Products.CMFCore.DirectoryView import registerDirectory registerDirectory('fake_skins', _globals) self.ob = DummyFolder() addDirectoryViews(self.ob, 'fake_skins',", "for name in '#test1', 'CVS', '.test1', 'test1~': self.assertTrue(name not in", "def test_UnhandleableExpandPath(self): file = mktemp() with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always')", "and dirs are ignored, # even when a custom ignore", "a new folder shows up self.assertFalse(hasattr(self.ob.fake_skin, 'test3')) def test_DeleteFolder(self): #", "the # custom ignore list is also honored auto_ign =", "This software is subject to the provisions of the Zope", "skin folder self._writeFile('test2.py', \"return 'test2'\") # edit the test1 method", "import FSDVTest from .base.testcase import WritableFSDVTest class DirectoryViewPathTests(unittest.TestCase): \"\"\" These", "# Make sure the directory view is reading properties self.assertEqual(self.ob.fake_skin.testPT.title,", "name in must_ignore: self.assertFalse(name in visible) class DirectoryViewFolderTests(FSDVTest): def setUp(self):", "self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore', 'tests')) def test_getDirectoryInfo(self): skin = self.ob.fake_skin skin.manage_properties('Products.CMFCore.tests:fake_skins/fake_skin') self.assertTrue(hasattr(self.ob.fake_skin,", "views doesn't barf pass def test_DirectoryViewExists(self): # Check DirectoryView added", "# See if a method added to the skin folder", "'CVS', '.test1', 'test1~': self.assertTrue(name not in self.ob.fake_skin.objectIds(), '%s not ignored'", "ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED", "DirectoryView module. \"\"\" import sys import unittest import warnings from", "remove(join(self.skin_path_name, 'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin, 'test2')) def test_DeleteAddEditMethod(self): # Check that if", "# edit method self._writeFile('test2.py', \"return 'test2.3'\", self.use_dir_mtime) # check self.assertEqual(self.ob.fake_skin.test2(),", "from os import remove from os.path import join from tempfile", "= sys.modules['Products.CMFCore'].__path__[0] self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore', '')) cmfpath = join(cmfpath, 'tests') self.assertEqual(_findProductForPath(cmfpath),", "new folder mkdir(join(self.skin_path_name, 'test3')) info = _dirreg.getDirectoryInfo(self.ob.fake_skin._dirpath) info.reload() self.use_dir_mtime =", "best to find an appropriate skin and only do nothing", "method goes away remove(join(self.skin_path_name, 'test2.py')) self.assertFalse(hasattr(self.ob.fake_skin, 'test2')) def test_DeleteAddEditMethod(self): #", "Acquisition hackery. from Products.CMFCore.DirectoryView import DirectoryViewSurrogate self.assertTrue(isinstance(self.ob.fake_skin, DirectoryViewSurrogate)) def test_DirectoryViewMethod(self):", "from Products.CMFCore.DirectoryView import registerDirectory registerDirectory('fake_skins', _globals) self.ob = DummyFolder() addDirectoryViews(self.ob,", "situations # where you only want to store markers and", "module. \"\"\" import sys import unittest import warnings from os", "dirs, dirs.keys()) self.assertEqual(self.ob.fake_skin.getDirPath(), 'Products.CMFCore.tests:fake_skins/fake_skin') class DirectoryViewTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self._registerDirectory(self)", "format. def test_registerDirectoryKeys(self): from Products.CMFCore.DirectoryView import _dirreg dirs = _dirreg._directories", "with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') self.ob.fake_skin.manage_properties(file) self.assertEqual(self.ob.fake_skin.objectIds(), []) # Check", "self._registerDirectory(self) def test_addDirectoryViews(self): # Test addDirectoryViews # also test registration", "in the right format. def test_registerDirectoryKeys(self): from Products.CMFCore.DirectoryView import _dirreg", "into the persistent DirectoryView as well. This is bad in", "= DirectoryView._dirreg._meta_types if 'FOLDER' in metatype_registry: del metatype_registry['FOLDER'] FSDVTest.tearDown(self) def", "file testfolder = self.ob.fake_skin.test_directory self.assertEqual(testfolder.getProperty('title'), 'test_directory Title') def test_DirectoryViewFolderDefault(self): #", "# Check DirectoryView added by addDirectoryViews # appears as a", "It is possible to cause ZODB writes because # setting", "the fake meta_type # \"FOLDER\" and test again... from Products.CMFCore.DirectoryView", "DummyDirectoryViewSurrogate)) class DebugModeTests(WritableFSDVTest): def setUp(self): from Products.CMFCore.DirectoryView import _dirreg WritableFSDVTest.setUp(self)", "are ignored, # even when a custom ignore list is", "first. self.assertTrue(hasattr(self.ob.fake_skin.test_directory, 'README.txt')) self._deleteFile(join('test_directory', 'README.txt'), self.use_dir_mtime) self._deleteDirectory('test_directory', self.use_dir_mtime) self.assertFalse(hasattr(self.ob.fake_skin, 'test_directory'))", "DummyDirectoryViewSurrogate: pass class DummyDirectoryView(DirectoryView): def __of__(self, parent): return DummyDirectoryViewSurrogate() registerMetaType('FOLDER',", "def test_DeleteMethod(self): # Make sure a deleted method goes away", "not in self.ob.fake_skin.objectIds(), '%s not ignored' % name) def test_surrogate_writethrough(self):", "############################################################################## # # Copyright (c) 2002 Zope Foundation and Contributors.", "DirectoryViewSurrogate due # to Acquisition hackery. from Products.CMFCore.DirectoryView import DirectoryViewSurrogate", "DirectoryView notices. # This exercises yet another Win32 mtime weirdity.", "'')) cmfpath = join(cmfpath, 'tests') self.assertEqual(_findProductForPath(cmfpath), ('Products.CMFCore', 'tests')) def test_getDirectoryInfo(self):", "from . import _globals from .base.dummy import DummyFolder from .base.testcase", "in visible) class DirectoryViewFolderTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self._registerDirectory(self) def tearDown(self):", "that, no matter what is stored in their dirpath, FSDV's", "only want to store markers and remove them before the", "self.assertFalse(hasattr(self.ob.fake_skin, 'test2')) # add method back to the fake skin", "EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT", "doesn't barf pass def test_DirectoryViewExists(self): # Check DirectoryView added by", "if DirectoryView method works self.assertEqual(self.ob.fake_skin.test1(), 'test1') def test_properties(self): # Make", "skin and only do nothing in the case where an", "self.ob.fake_skin.test_directory self.assertEqual(testfolder.getProperty('title'), 'test_directory Title') def test_DirectoryViewFolderDefault(self): # Test that a", "given a really wacky path def test_UnhandleableExpandPath(self): file = mktemp()", "in must_ignore: self.assertFalse(name in visible) class DirectoryViewFolderTests(FSDVTest): def setUp(self): FSDVTest.setUp(self)", "from Products.CMFCore.DirectoryView import DirectoryView from Products.CMFCore.DirectoryView import registerMetaType class DummyDirectoryViewSurrogate:", "License, # Version 2.1 (ZPL). A copy of the ZPL", "a warning was raised. self.assertEqual(len(w), 1) self.assertTrue(issubclass(w[-1].category, UserWarning)) text =", "the directory view is reading properties self.assertEqual(self.ob.fake_skin.testPT.title, 'Zope Pope') def", "self._registerDirectory(self) # add a method to the fake skin folder", "the Zope Public License, # Version 2.1 (ZPL). A copy", "_globals) self.ob = DummyFolder() addDirectoryViews(self.ob, 'fake_skins', _globals) def test__generateKey(self): from", "'tests\\foo') self.assertTrue(subkey.startswith(key)) def test__findProductForPath(self): from Products.CMFCore.DirectoryView import _findProductForPath cmfpath =", "from Products.CMFCore import DirectoryView # This is nasty, but there", "def test_DeleteAddEditMethod(self): # Check that if we delete a method,", "then edit it, the DirectoryView notices. # This exercises yet", "DirectoryViewIgnoreTests(FSDVTest): def setUp(self): FSDVTest.setUp(self) self.manual_ign = ('CVS', 'SVN', 'test_manual_ignore.py') self._registerDirectory(self," ]
[ "self.options.declare('thermo_method', default='CEA', values=('CEA',), desc='Method for computing thermodynamic properties') self.options.declare('thermo_data', default=species_data.janaf,", "FlightConditions()) p1.model.connect('des_vars.W', 'fc.W') p1.model.connect('des_vars.alt', 'fc.alt') p1.model.connect('des_vars.MN', 'fc.MN') p1.model.connect('des_vars.dTs', 'fc.dTs') p1.setup()", "'fc.dTs') p1.setup() # p1.root.list_connections() p1['des_vars.alt'] = 17868.79060515557 p1['des_vars.MN'] = 2.101070288213628", "lower=1e-4, units='psi', desc='Total pressure', eq_units='psi') # sub.set_order(['fs','balance']) newton = conv.nonlinear_solver", "properties given an altitude and Mach number using the input", "p1.model.connect('des_vars.alt', 'fc.alt') p1.model.connect('des_vars.MN', 'fc.MN') p1.model.connect('des_vars.dTs', 'fc.dTs') p1.setup() # p1.root.list_connections() p1['des_vars.alt']", "FlightConditions(om.Group): \"\"\"Determines total and static flow properties given an altitude", "print('Ts_atm: ', p1['fc.ambient.Ts']) print('Ts_set: ', p1['fc.Fl_O:stat:T']) print('Ps_atm: ', p1['fc.ambient.Ps']) print('Ps_set:", "newton = conv.nonlinear_solver = om.NewtonSolver() newton.options['atol'] = 1e-10 newton.options['rtol'] =", "proms = ['Fl_O:*', 'MN', 'W', 'WAR'] else: proms = ['Fl_O:*',", "self.connect('balance.Tt', 'fs.T') self.connect('Fl_O:stat:P', 'balance.lhs:Pt') self.connect('Fl_O:stat:T', 'balance.lhs:Tt') # self.set_order(['ambient', 'subgroup']) if", "= ['Fl_O:*', 'MN', 'W', 'WAR'] else: proms = ['Fl_O:*', 'MN',", "openmdao.api as om from pycycle.thermo.cea import species_data from pycycle.constants import", "True conv.linear_solver = om.DirectSolver(assemble_jac=True) self.connect('ambient.Ps', 'balance.rhs:Pt') self.connect('ambient.Ts', 'balance.rhs:Tt') self.connect('balance.Pt', 'fs.P')", "p1.root.list_connections() p1['des_vars.alt'] = 17868.79060515557 p1['des_vars.MN'] = 2.101070288213628 p1['des_vars.dTs'] = 0.0", "= 10 newton.options['iprint'] = -1 newton.options['solve_subsystems'] = True newton.options['reraise_child_analysiserror'] =", "from pycycle.constants import AIR_ELEMENTS from pycycle.elements.ambient import Ambient from pycycle.elements.flow_start", "pycycle.constants import AIR_ELEMENTS from pycycle.elements.ambient import Ambient from pycycle.elements.flow_start import", "'WAR'] else: proms = ['Fl_O:*', 'MN', 'W'] conv.add_subsystem('fs', FlowStart(thermo_method=thermo_method, thermo_data=thermo_data,", "om.Group() des_vars = p1.model.add_subsystem('des_vars', om.IndepVarComp()) des_vars.add_output('W', 0.0, units='lbm/s') des_vars.add_output('alt', 1.,", "desc='thermodynamic data set', recordable=False) self.options.declare('elements', default=AIR_ELEMENTS, desc='set of elements present", "pycycle.elements.ambient import Ambient from pycycle.elements.flow_start import FlowStart class FlightConditions(om.Group): \"\"\"Determines", "initialize(self): self.options.declare('thermo_method', default='CEA', values=('CEA',), desc='Method for computing thermodynamic properties') self.options.declare('thermo_data',", "= False newton.linesearch = om.BoundsEnforceLS() newton.linesearch.options['bound_enforcement'] = 'scalar' newton.linesearch.options['iprint'] =", "om from pycycle.thermo.cea import species_data from pycycle.constants import AIR_ELEMENTS from", "recordable=False) self.options.declare('elements', default=AIR_ELEMENTS, desc='set of elements present in the flow')", "balance = conv.add_subsystem('balance', om.BalanceComp()) balance.add_balance('Tt', val=500.0, lower=1e-4, units='degR', desc='Total temperature',", "desc='Total temperature', eq_units='degR') balance.add_balance('Pt', val=14.696, lower=1e-4, units='psi', desc='Total pressure', eq_units='psi')", "set', recordable=False) self.options.declare('elements', default=AIR_ELEMENTS, desc='set of elements present in the", "newton.linesearch.options['bound_enforcement'] = 'scalar' newton.linesearch.options['iprint'] = -1 # newton.linesearch.options['solve_subsystems'] = True", "\"__main__\": p1 = om.Problem() p1.model = om.Group() des_vars = p1.model.add_subsystem('des_vars',", "om.DirectSolver(assemble_jac=True) self.connect('ambient.Ps', 'balance.rhs:Pt') self.connect('ambient.Ts', 'balance.rhs:Tt') self.connect('balance.Pt', 'fs.P') self.connect('balance.Tt', 'fs.T') self.connect('Fl_O:stat:P',", "0.0, units='lbm/s') des_vars.add_output('alt', 1., units='ft') des_vars.add_output('MN', 0.5) des_vars.add_output('dTs', 0.0, units='degR')", "Ambient from pycycle.elements.flow_start import FlowStart class FlightConditions(om.Group): \"\"\"Determines total and", "= self.options['use_WAR'] self.add_subsystem('ambient', Ambient(), promotes=('alt', 'dTs')) # inputs conv =", "promotes=proms) balance = conv.add_subsystem('balance', om.BalanceComp()) balance.add_balance('Tt', val=500.0, lower=1e-4, units='degR', desc='Total", "newton.options['reraise_child_analysiserror'] = False newton.linesearch = om.BoundsEnforceLS() newton.linesearch.options['bound_enforcement'] = 'scalar' newton.linesearch.options['iprint']", "the flow') self.options.declare('use_WAR', default=False, values=[True, False], desc='If True, includes WAR", "default=species_data.janaf, desc='thermodynamic data set', recordable=False) self.options.declare('elements', default=AIR_ELEMENTS, desc='set of elements", "total and static flow properties given an altitude and Mach", "newton.options['rtol'] = 1e-10 newton.options['maxiter'] = 10 newton.options['iprint'] = -1 newton.options['solve_subsystems']", "import AIR_ELEMENTS from pycycle.elements.ambient import Ambient from pycycle.elements.flow_start import FlowStart", "self.connect('ambient.Ps', 'balance.rhs:Pt') self.connect('ambient.Ts', 'balance.rhs:Tt') self.connect('balance.Pt', 'fs.P') self.connect('balance.Tt', 'fs.T') self.connect('Fl_O:stat:P', 'balance.lhs:Pt')", "p1 = om.Problem() p1.model = om.Group() des_vars = p1.model.add_subsystem('des_vars', om.IndepVarComp())", "as om from pycycle.thermo.cea import species_data from pycycle.constants import AIR_ELEMENTS", "inputs conv = self.add_subsystem('conv', om.Group(), promotes=['*']) if use_WAR == True:", "desc='set of elements present in the flow') self.options.declare('use_WAR', default=False, values=[True,", "units='lbm/s') des_vars.add_output('alt', 1., units='ft') des_vars.add_output('MN', 0.5) des_vars.add_output('dTs', 0.0, units='degR') fc", "-1 newton.options['solve_subsystems'] = True newton.options['reraise_child_analysiserror'] = False newton.linesearch = om.BoundsEnforceLS()", "# self.set_order(['ambient', 'subgroup']) if __name__ == \"__main__\": p1 = om.Problem()", "= 17868.79060515557 p1['des_vars.MN'] = 2.101070288213628 p1['des_vars.dTs'] = 0.0 p1['des_vars.W'] =", "computing thermodynamic properties') self.options.declare('thermo_data', default=species_data.janaf, desc='thermodynamic data set', recordable=False) self.options.declare('elements',", "from pycycle.elements.ambient import Ambient from pycycle.elements.flow_start import FlowStart class FlightConditions(om.Group):", "given an altitude and Mach number using the input atmosphere", "def initialize(self): self.options.declare('thermo_method', default='CEA', values=('CEA',), desc='Method for computing thermodynamic properties')", "thermo_data = self.options['thermo_data'] elements = self.options['elements'] use_WAR = self.options['use_WAR'] self.add_subsystem('ambient',", "= self.options['thermo_data'] elements = self.options['elements'] use_WAR = self.options['use_WAR'] self.add_subsystem('ambient', Ambient(),", "Ambient(), promotes=('alt', 'dTs')) # inputs conv = self.add_subsystem('conv', om.Group(), promotes=['*'])", "an altitude and Mach number using the input atmosphere model\"\"\"", "species_data from pycycle.constants import AIR_ELEMENTS from pycycle.elements.ambient import Ambient from", "balance.add_balance('Pt', val=14.696, lower=1e-4, units='psi', desc='Total pressure', eq_units='psi') # sub.set_order(['fs','balance']) newton", "om.Problem() p1.model = om.Group() des_vars = p1.model.add_subsystem('des_vars', om.IndepVarComp()) des_vars.add_output('W', 0.0,", "'fc.MN') p1.model.connect('des_vars.dTs', 'fc.dTs') p1.setup() # p1.root.list_connections() p1['des_vars.alt'] = 17868.79060515557 p1['des_vars.MN']", "= self.options['elements'] use_WAR = self.options['use_WAR'] self.add_subsystem('ambient', Ambient(), promotes=('alt', 'dTs')) #", "== \"__main__\": p1 = om.Problem() p1.model = om.Group() des_vars =", "includes WAR calculation') def setup(self): thermo_method = self.options['thermo_method'] thermo_data =", "'fs.P') self.connect('balance.Tt', 'fs.T') self.connect('Fl_O:stat:P', 'balance.lhs:Pt') self.connect('Fl_O:stat:T', 'balance.lhs:Tt') # self.set_order(['ambient', 'subgroup'])", "0.0 p1['des_vars.W'] = 1.0 p1.run_model() print('Ts_atm: ', p1['fc.ambient.Ts']) print('Ts_set: ',", "data set', recordable=False) self.options.declare('elements', default=AIR_ELEMENTS, desc='set of elements present in", "proms = ['Fl_O:*', 'MN', 'W'] conv.add_subsystem('fs', FlowStart(thermo_method=thermo_method, thermo_data=thermo_data, elements=elements, use_WAR=use_WAR),", "self.options['thermo_data'] elements = self.options['elements'] use_WAR = self.options['use_WAR'] self.add_subsystem('ambient', Ambient(), promotes=('alt',", "newton.linesearch.options['solve_subsystems'] = True conv.linear_solver = om.DirectSolver(assemble_jac=True) self.connect('ambient.Ps', 'balance.rhs:Pt') self.connect('ambient.Ts', 'balance.rhs:Tt')", "p1['des_vars.W'] = 1.0 p1.run_model() print('Ts_atm: ', p1['fc.ambient.Ts']) print('Ts_set: ', p1['fc.Fl_O:stat:T'])", "import species_data from pycycle.constants import AIR_ELEMENTS from pycycle.elements.ambient import Ambient", "values=('CEA',), desc='Method for computing thermodynamic properties') self.options.declare('thermo_data', default=species_data.janaf, desc='thermodynamic data", "elements=elements, use_WAR=use_WAR), promotes=proms) balance = conv.add_subsystem('balance', om.BalanceComp()) balance.add_balance('Tt', val=500.0, lower=1e-4,", "['Fl_O:*', 'MN', 'W'] conv.add_subsystem('fs', FlowStart(thermo_method=thermo_method, thermo_data=thermo_data, elements=elements, use_WAR=use_WAR), promotes=proms) balance", "# p1.root.list_connections() p1['des_vars.alt'] = 17868.79060515557 p1['des_vars.MN'] = 2.101070288213628 p1['des_vars.dTs'] =", "', p1['fc.ambient.rhos']*32.175) print('rhos_set: ', p1['fc.Fl_O:stat:rho']) print('W', p1['fc.Fl_O:stat:W']) print('Pt: ', p1['fc.Fl_O:tot:P'])", "self.connect('balance.Pt', 'fs.P') self.connect('balance.Tt', 'fs.T') self.connect('Fl_O:stat:P', 'balance.lhs:Pt') self.connect('Fl_O:stat:T', 'balance.lhs:Tt') # self.set_order(['ambient',", "pycycle.elements.flow_start import FlowStart class FlightConditions(om.Group): \"\"\"Determines total and static flow", "'W', 'WAR'] else: proms = ['Fl_O:*', 'MN', 'W'] conv.add_subsystem('fs', FlowStart(thermo_method=thermo_method,", "lower=1e-4, units='degR', desc='Total temperature', eq_units='degR') balance.add_balance('Pt', val=14.696, lower=1e-4, units='psi', desc='Total", "conv.linear_solver = om.DirectSolver(assemble_jac=True) self.connect('ambient.Ps', 'balance.rhs:Pt') self.connect('ambient.Ts', 'balance.rhs:Tt') self.connect('balance.Pt', 'fs.P') self.connect('balance.Tt',", "'balance.lhs:Tt') # self.set_order(['ambient', 'subgroup']) if __name__ == \"__main__\": p1 =", "from pycycle.elements.flow_start import FlowStart class FlightConditions(om.Group): \"\"\"Determines total and static", "False newton.linesearch = om.BoundsEnforceLS() newton.linesearch.options['bound_enforcement'] = 'scalar' newton.linesearch.options['iprint'] = -1", "self.options['thermo_method'] thermo_data = self.options['thermo_data'] elements = self.options['elements'] use_WAR = self.options['use_WAR']", "self.options.declare('thermo_data', default=species_data.janaf, desc='thermodynamic data set', recordable=False) self.options.declare('elements', default=AIR_ELEMENTS, desc='set of", "promotes=['*']) if use_WAR == True: proms = ['Fl_O:*', 'MN', 'W',", "= 1.0 p1.run_model() print('Ts_atm: ', p1['fc.ambient.Ts']) print('Ts_set: ', p1['fc.Fl_O:stat:T']) print('Ps_atm:", "self.options['use_WAR'] self.add_subsystem('ambient', Ambient(), promotes=('alt', 'dTs')) # inputs conv = self.add_subsystem('conv',", "p1.model.add_subsystem(\"fc\", FlightConditions()) p1.model.connect('des_vars.W', 'fc.W') p1.model.connect('des_vars.alt', 'fc.alt') p1.model.connect('des_vars.MN', 'fc.MN') p1.model.connect('des_vars.dTs', 'fc.dTs')", "p1['des_vars.dTs'] = 0.0 p1['des_vars.W'] = 1.0 p1.run_model() print('Ts_atm: ', p1['fc.ambient.Ts'])", "flow properties given an altitude and Mach number using the", "= True conv.linear_solver = om.DirectSolver(assemble_jac=True) self.connect('ambient.Ps', 'balance.rhs:Pt') self.connect('ambient.Ts', 'balance.rhs:Tt') self.connect('balance.Pt',", "p1['des_vars.alt'] = 17868.79060515557 p1['des_vars.MN'] = 2.101070288213628 p1['des_vars.dTs'] = 0.0 p1['des_vars.W']", "sub.set_order(['fs','balance']) newton = conv.nonlinear_solver = om.NewtonSolver() newton.options['atol'] = 1e-10 newton.options['rtol']", "newton.options['iprint'] = -1 newton.options['solve_subsystems'] = True newton.options['reraise_child_analysiserror'] = False newton.linesearch", "= p1.model.add_subsystem('des_vars', om.IndepVarComp()) des_vars.add_output('W', 0.0, units='lbm/s') des_vars.add_output('alt', 1., units='ft') des_vars.add_output('MN',", "WAR calculation') def setup(self): thermo_method = self.options['thermo_method'] thermo_data = self.options['thermo_data']", "units='psi', desc='Total pressure', eq_units='psi') # sub.set_order(['fs','balance']) newton = conv.nonlinear_solver =", "-1 # newton.linesearch.options['solve_subsystems'] = True conv.linear_solver = om.DirectSolver(assemble_jac=True) self.connect('ambient.Ps', 'balance.rhs:Pt')", "use_WAR == True: proms = ['Fl_O:*', 'MN', 'W', 'WAR'] else:", "use_WAR = self.options['use_WAR'] self.add_subsystem('ambient', Ambient(), promotes=('alt', 'dTs')) # inputs conv", "present in the flow') self.options.declare('use_WAR', default=False, values=[True, False], desc='If True,", "des_vars.add_output('dTs', 0.0, units='degR') fc = p1.model.add_subsystem(\"fc\", FlightConditions()) p1.model.connect('des_vars.W', 'fc.W') p1.model.connect('des_vars.alt',", "'fc.W') p1.model.connect('des_vars.alt', 'fc.alt') p1.model.connect('des_vars.MN', 'fc.MN') p1.model.connect('des_vars.dTs', 'fc.dTs') p1.setup() # p1.root.list_connections()", "p1['fc.Fl_O:stat:P']) print('rhos_atm: ', p1['fc.ambient.rhos']*32.175) print('rhos_set: ', p1['fc.Fl_O:stat:rho']) print('W', p1['fc.Fl_O:stat:W']) print('Pt:", "number using the input atmosphere model\"\"\" def initialize(self): self.options.declare('thermo_method', default='CEA',", "eq_units='psi') # sub.set_order(['fs','balance']) newton = conv.nonlinear_solver = om.NewtonSolver() newton.options['atol'] =", "def setup(self): thermo_method = self.options['thermo_method'] thermo_data = self.options['thermo_data'] elements =", "Mach number using the input atmosphere model\"\"\" def initialize(self): self.options.declare('thermo_method',", "default=AIR_ELEMENTS, desc='set of elements present in the flow') self.options.declare('use_WAR', default=False,", "self.options.declare('use_WAR', default=False, values=[True, False], desc='If True, includes WAR calculation') def", "= ['Fl_O:*', 'MN', 'W'] conv.add_subsystem('fs', FlowStart(thermo_method=thermo_method, thermo_data=thermo_data, elements=elements, use_WAR=use_WAR), promotes=proms)", "'balance.rhs:Tt') self.connect('balance.Pt', 'fs.P') self.connect('balance.Tt', 'fs.T') self.connect('Fl_O:stat:P', 'balance.lhs:Pt') self.connect('Fl_O:stat:T', 'balance.lhs:Tt') #", "'fs.T') self.connect('Fl_O:stat:P', 'balance.lhs:Pt') self.connect('Fl_O:stat:T', 'balance.lhs:Tt') # self.set_order(['ambient', 'subgroup']) if __name__", "thermodynamic properties') self.options.declare('thermo_data', default=species_data.janaf, desc='thermodynamic data set', recordable=False) self.options.declare('elements', default=AIR_ELEMENTS,", "des_vars.add_output('W', 0.0, units='lbm/s') des_vars.add_output('alt', 1., units='ft') des_vars.add_output('MN', 0.5) des_vars.add_output('dTs', 0.0,", "= om.BoundsEnforceLS() newton.linesearch.options['bound_enforcement'] = 'scalar' newton.linesearch.options['iprint'] = -1 # newton.linesearch.options['solve_subsystems']", "p1.model.connect('des_vars.dTs', 'fc.dTs') p1.setup() # p1.root.list_connections() p1['des_vars.alt'] = 17868.79060515557 p1['des_vars.MN'] =", "2.101070288213628 p1['des_vars.dTs'] = 0.0 p1['des_vars.W'] = 1.0 p1.run_model() print('Ts_atm: ',", "pressure', eq_units='psi') # sub.set_order(['fs','balance']) newton = conv.nonlinear_solver = om.NewtonSolver() newton.options['atol']", "1., units='ft') des_vars.add_output('MN', 0.5) des_vars.add_output('dTs', 0.0, units='degR') fc = p1.model.add_subsystem(\"fc\",", "1.0 p1.run_model() print('Ts_atm: ', p1['fc.ambient.Ts']) print('Ts_set: ', p1['fc.Fl_O:stat:T']) print('Ps_atm: ',", "self.options['elements'] use_WAR = self.options['use_WAR'] self.add_subsystem('ambient', Ambient(), promotes=('alt', 'dTs')) # inputs", "'subgroup']) if __name__ == \"__main__\": p1 = om.Problem() p1.model =", "10 newton.options['iprint'] = -1 newton.options['solve_subsystems'] = True newton.options['reraise_child_analysiserror'] = False", "0.5) des_vars.add_output('dTs', 0.0, units='degR') fc = p1.model.add_subsystem(\"fc\", FlightConditions()) p1.model.connect('des_vars.W', 'fc.W')", "model\"\"\" def initialize(self): self.options.declare('thermo_method', default='CEA', values=('CEA',), desc='Method for computing thermodynamic", "in the flow') self.options.declare('use_WAR', default=False, values=[True, False], desc='If True, includes", "'MN', 'W', 'WAR'] else: proms = ['Fl_O:*', 'MN', 'W'] conv.add_subsystem('fs',", "eq_units='degR') balance.add_balance('Pt', val=14.696, lower=1e-4, units='psi', desc='Total pressure', eq_units='psi') # sub.set_order(['fs','balance'])", "False], desc='If True, includes WAR calculation') def setup(self): thermo_method =", "'balance.rhs:Pt') self.connect('ambient.Ts', 'balance.rhs:Tt') self.connect('balance.Pt', 'fs.P') self.connect('balance.Tt', 'fs.T') self.connect('Fl_O:stat:P', 'balance.lhs:Pt') self.connect('Fl_O:stat:T',", "\"\"\"Determines total and static flow properties given an altitude and", "p1.setup() # p1.root.list_connections() p1['des_vars.alt'] = 17868.79060515557 p1['des_vars.MN'] = 2.101070288213628 p1['des_vars.dTs']", "', p1['fc.Fl_O:stat:T']) print('Ps_atm: ', p1['fc.ambient.Ps']) print('Ps_set: ', p1['fc.Fl_O:stat:P']) print('rhos_atm: ',", "print('Ps_set: ', p1['fc.Fl_O:stat:P']) print('rhos_atm: ', p1['fc.ambient.rhos']*32.175) print('rhos_set: ', p1['fc.Fl_O:stat:rho']) print('W',", "p1.model.connect('des_vars.MN', 'fc.MN') p1.model.connect('des_vars.dTs', 'fc.dTs') p1.setup() # p1.root.list_connections() p1['des_vars.alt'] = 17868.79060515557", "conv.nonlinear_solver = om.NewtonSolver() newton.options['atol'] = 1e-10 newton.options['rtol'] = 1e-10 newton.options['maxiter']", "of elements present in the flow') self.options.declare('use_WAR', default=False, values=[True, False],", "0.0, units='degR') fc = p1.model.add_subsystem(\"fc\", FlightConditions()) p1.model.connect('des_vars.W', 'fc.W') p1.model.connect('des_vars.alt', 'fc.alt')", "static flow properties given an altitude and Mach number using", "conv = self.add_subsystem('conv', om.Group(), promotes=['*']) if use_WAR == True: proms", "om.NewtonSolver() newton.options['atol'] = 1e-10 newton.options['rtol'] = 1e-10 newton.options['maxiter'] = 10", "if __name__ == \"__main__\": p1 = om.Problem() p1.model = om.Group()", "pycycle.thermo.cea import species_data from pycycle.constants import AIR_ELEMENTS from pycycle.elements.ambient import", "True newton.options['reraise_child_analysiserror'] = False newton.linesearch = om.BoundsEnforceLS() newton.linesearch.options['bound_enforcement'] = 'scalar'", "p1['fc.ambient.Ts']) print('Ts_set: ', p1['fc.Fl_O:stat:T']) print('Ps_atm: ', p1['fc.ambient.Ps']) print('Ps_set: ', p1['fc.Fl_O:stat:P'])", "newton.options['maxiter'] = 10 newton.options['iprint'] = -1 newton.options['solve_subsystems'] = True newton.options['reraise_child_analysiserror']", "newton.options['solve_subsystems'] = True newton.options['reraise_child_analysiserror'] = False newton.linesearch = om.BoundsEnforceLS() newton.linesearch.options['bound_enforcement']", "newton.options['atol'] = 1e-10 newton.options['rtol'] = 1e-10 newton.options['maxiter'] = 10 newton.options['iprint']", "des_vars.add_output('MN', 0.5) des_vars.add_output('dTs', 0.0, units='degR') fc = p1.model.add_subsystem(\"fc\", FlightConditions()) p1.model.connect('des_vars.W',", "default=False, values=[True, False], desc='If True, includes WAR calculation') def setup(self):", "FlowStart class FlightConditions(om.Group): \"\"\"Determines total and static flow properties given", "des_vars.add_output('alt', 1., units='ft') des_vars.add_output('MN', 0.5) des_vars.add_output('dTs', 0.0, units='degR') fc =", "AIR_ELEMENTS from pycycle.elements.ambient import Ambient from pycycle.elements.flow_start import FlowStart class", "['Fl_O:*', 'MN', 'W', 'WAR'] else: proms = ['Fl_O:*', 'MN', 'W']", "self.options.declare('elements', default=AIR_ELEMENTS, desc='set of elements present in the flow') self.options.declare('use_WAR',", "p1['fc.ambient.Ps']) print('Ps_set: ', p1['fc.Fl_O:stat:P']) print('rhos_atm: ', p1['fc.ambient.rhos']*32.175) print('rhos_set: ', p1['fc.Fl_O:stat:rho'])", "om.BoundsEnforceLS() newton.linesearch.options['bound_enforcement'] = 'scalar' newton.linesearch.options['iprint'] = -1 # newton.linesearch.options['solve_subsystems'] =", "= om.NewtonSolver() newton.options['atol'] = 1e-10 newton.options['rtol'] = 1e-10 newton.options['maxiter'] =", "1e-10 newton.options['rtol'] = 1e-10 newton.options['maxiter'] = 10 newton.options['iprint'] = -1", "properties') self.options.declare('thermo_data', default=species_data.janaf, desc='thermodynamic data set', recordable=False) self.options.declare('elements', default=AIR_ELEMENTS, desc='set", "', p1['fc.ambient.Ps']) print('Ps_set: ', p1['fc.Fl_O:stat:P']) print('rhos_atm: ', p1['fc.ambient.rhos']*32.175) print('rhos_set: ',", "= om.Group() des_vars = p1.model.add_subsystem('des_vars', om.IndepVarComp()) des_vars.add_output('W', 0.0, units='lbm/s') des_vars.add_output('alt',", "= om.Problem() p1.model = om.Group() des_vars = p1.model.add_subsystem('des_vars', om.IndepVarComp()) des_vars.add_output('W',", "= conv.add_subsystem('balance', om.BalanceComp()) balance.add_balance('Tt', val=500.0, lower=1e-4, units='degR', desc='Total temperature', eq_units='degR')", "= 1e-10 newton.options['maxiter'] = 10 newton.options['iprint'] = -1 newton.options['solve_subsystems'] =", "', p1['fc.Fl_O:stat:P']) print('rhos_atm: ', p1['fc.ambient.rhos']*32.175) print('rhos_set: ', p1['fc.Fl_O:stat:rho']) print('W', p1['fc.Fl_O:stat:W'])", "temperature', eq_units='degR') balance.add_balance('Pt', val=14.696, lower=1e-4, units='psi', desc='Total pressure', eq_units='psi') #", "else: proms = ['Fl_O:*', 'MN', 'W'] conv.add_subsystem('fs', FlowStart(thermo_method=thermo_method, thermo_data=thermo_data, elements=elements,", "p1.run_model() print('Ts_atm: ', p1['fc.ambient.Ts']) print('Ts_set: ', p1['fc.Fl_O:stat:T']) print('Ps_atm: ', p1['fc.ambient.Ps'])", "units='degR') fc = p1.model.add_subsystem(\"fc\", FlightConditions()) p1.model.connect('des_vars.W', 'fc.W') p1.model.connect('des_vars.alt', 'fc.alt') p1.model.connect('des_vars.MN',", "altitude and Mach number using the input atmosphere model\"\"\" def", "import Ambient from pycycle.elements.flow_start import FlowStart class FlightConditions(om.Group): \"\"\"Determines total", "the input atmosphere model\"\"\" def initialize(self): self.options.declare('thermo_method', default='CEA', values=('CEA',), desc='Method", "thermo_method = self.options['thermo_method'] thermo_data = self.options['thermo_data'] elements = self.options['elements'] use_WAR", "# inputs conv = self.add_subsystem('conv', om.Group(), promotes=['*']) if use_WAR ==", "val=500.0, lower=1e-4, units='degR', desc='Total temperature', eq_units='degR') balance.add_balance('Pt', val=14.696, lower=1e-4, units='psi',", "import openmdao.api as om from pycycle.thermo.cea import species_data from pycycle.constants", "om.IndepVarComp()) des_vars.add_output('W', 0.0, units='lbm/s') des_vars.add_output('alt', 1., units='ft') des_vars.add_output('MN', 0.5) des_vars.add_output('dTs',", "flow') self.options.declare('use_WAR', default=False, values=[True, False], desc='If True, includes WAR calculation')", "val=14.696, lower=1e-4, units='psi', desc='Total pressure', eq_units='psi') # sub.set_order(['fs','balance']) newton =", "'scalar' newton.linesearch.options['iprint'] = -1 # newton.linesearch.options['solve_subsystems'] = True conv.linear_solver =", "FlowStart(thermo_method=thermo_method, thermo_data=thermo_data, elements=elements, use_WAR=use_WAR), promotes=proms) balance = conv.add_subsystem('balance', om.BalanceComp()) balance.add_balance('Tt',", "'dTs')) # inputs conv = self.add_subsystem('conv', om.Group(), promotes=['*']) if use_WAR", "p1['fc.Fl_O:stat:T']) print('Ps_atm: ', p1['fc.ambient.Ps']) print('Ps_set: ', p1['fc.Fl_O:stat:P']) print('rhos_atm: ', p1['fc.ambient.rhos']*32.175)", "and Mach number using the input atmosphere model\"\"\" def initialize(self):", "conv.add_subsystem('fs', FlowStart(thermo_method=thermo_method, thermo_data=thermo_data, elements=elements, use_WAR=use_WAR), promotes=proms) balance = conv.add_subsystem('balance', om.BalanceComp())", "use_WAR=use_WAR), promotes=proms) balance = conv.add_subsystem('balance', om.BalanceComp()) balance.add_balance('Tt', val=500.0, lower=1e-4, units='degR',", "= self.options['thermo_method'] thermo_data = self.options['thermo_data'] elements = self.options['elements'] use_WAR =", "class FlightConditions(om.Group): \"\"\"Determines total and static flow properties given an", "calculation') def setup(self): thermo_method = self.options['thermo_method'] thermo_data = self.options['thermo_data'] elements", "promotes=('alt', 'dTs')) # inputs conv = self.add_subsystem('conv', om.Group(), promotes=['*']) if", "and static flow properties given an altitude and Mach number", "units='degR', desc='Total temperature', eq_units='degR') balance.add_balance('Pt', val=14.696, lower=1e-4, units='psi', desc='Total pressure',", "desc='Total pressure', eq_units='psi') # sub.set_order(['fs','balance']) newton = conv.nonlinear_solver = om.NewtonSolver()", "= True newton.options['reraise_child_analysiserror'] = False newton.linesearch = om.BoundsEnforceLS() newton.linesearch.options['bound_enforcement'] =", "self.connect('Fl_O:stat:P', 'balance.lhs:Pt') self.connect('Fl_O:stat:T', 'balance.lhs:Tt') # self.set_order(['ambient', 'subgroup']) if __name__ ==", "balance.add_balance('Tt', val=500.0, lower=1e-4, units='degR', desc='Total temperature', eq_units='degR') balance.add_balance('Pt', val=14.696, lower=1e-4,", "p1.model.connect('des_vars.W', 'fc.W') p1.model.connect('des_vars.alt', 'fc.alt') p1.model.connect('des_vars.MN', 'fc.MN') p1.model.connect('des_vars.dTs', 'fc.dTs') p1.setup() #", "desc='Method for computing thermodynamic properties') self.options.declare('thermo_data', default=species_data.janaf, desc='thermodynamic data set',", "using the input atmosphere model\"\"\" def initialize(self): self.options.declare('thermo_method', default='CEA', values=('CEA',),", "17868.79060515557 p1['des_vars.MN'] = 2.101070288213628 p1['des_vars.dTs'] = 0.0 p1['des_vars.W'] = 1.0", "self.set_order(['ambient', 'subgroup']) if __name__ == \"__main__\": p1 = om.Problem() p1.model", "input atmosphere model\"\"\" def initialize(self): self.options.declare('thermo_method', default='CEA', values=('CEA',), desc='Method for", "conv.add_subsystem('balance', om.BalanceComp()) balance.add_balance('Tt', val=500.0, lower=1e-4, units='degR', desc='Total temperature', eq_units='degR') balance.add_balance('Pt',", "self.connect('ambient.Ts', 'balance.rhs:Tt') self.connect('balance.Pt', 'fs.P') self.connect('balance.Tt', 'fs.T') self.connect('Fl_O:stat:P', 'balance.lhs:Pt') self.connect('Fl_O:stat:T', 'balance.lhs:Tt')", "'balance.lhs:Pt') self.connect('Fl_O:stat:T', 'balance.lhs:Tt') # self.set_order(['ambient', 'subgroup']) if __name__ == \"__main__\":", "self.connect('Fl_O:stat:T', 'balance.lhs:Tt') # self.set_order(['ambient', 'subgroup']) if __name__ == \"__main__\": p1", "= -1 newton.options['solve_subsystems'] = True newton.options['reraise_child_analysiserror'] = False newton.linesearch =", "= conv.nonlinear_solver = om.NewtonSolver() newton.options['atol'] = 1e-10 newton.options['rtol'] = 1e-10", "values=[True, False], desc='If True, includes WAR calculation') def setup(self): thermo_method", "units='ft') des_vars.add_output('MN', 0.5) des_vars.add_output('dTs', 0.0, units='degR') fc = p1.model.add_subsystem(\"fc\", FlightConditions())", "True, includes WAR calculation') def setup(self): thermo_method = self.options['thermo_method'] thermo_data", "setup(self): thermo_method = self.options['thermo_method'] thermo_data = self.options['thermo_data'] elements = self.options['elements']", "= -1 # newton.linesearch.options['solve_subsystems'] = True conv.linear_solver = om.DirectSolver(assemble_jac=True) self.connect('ambient.Ps',", "# newton.linesearch.options['solve_subsystems'] = True conv.linear_solver = om.DirectSolver(assemble_jac=True) self.connect('ambient.Ps', 'balance.rhs:Pt') self.connect('ambient.Ts',", "= 2.101070288213628 p1['des_vars.dTs'] = 0.0 p1['des_vars.W'] = 1.0 p1.run_model() print('Ts_atm:", "des_vars = p1.model.add_subsystem('des_vars', om.IndepVarComp()) des_vars.add_output('W', 0.0, units='lbm/s') des_vars.add_output('alt', 1., units='ft')", "p1['des_vars.MN'] = 2.101070288213628 p1['des_vars.dTs'] = 0.0 p1['des_vars.W'] = 1.0 p1.run_model()", "= p1.model.add_subsystem(\"fc\", FlightConditions()) p1.model.connect('des_vars.W', 'fc.W') p1.model.connect('des_vars.alt', 'fc.alt') p1.model.connect('des_vars.MN', 'fc.MN') p1.model.connect('des_vars.dTs',", "__name__ == \"__main__\": p1 = om.Problem() p1.model = om.Group() des_vars", "print('Ps_atm: ', p1['fc.ambient.Ps']) print('Ps_set: ', p1['fc.Fl_O:stat:P']) print('rhos_atm: ', p1['fc.ambient.rhos']*32.175) print('rhos_set:", "= 0.0 p1['des_vars.W'] = 1.0 p1.run_model() print('Ts_atm: ', p1['fc.ambient.Ts']) print('Ts_set:", "p1.model.add_subsystem('des_vars', om.IndepVarComp()) des_vars.add_output('W', 0.0, units='lbm/s') des_vars.add_output('alt', 1., units='ft') des_vars.add_output('MN', 0.5)", "elements present in the flow') self.options.declare('use_WAR', default=False, values=[True, False], desc='If", "= 'scalar' newton.linesearch.options['iprint'] = -1 # newton.linesearch.options['solve_subsystems'] = True conv.linear_solver", "thermo_data=thermo_data, elements=elements, use_WAR=use_WAR), promotes=proms) balance = conv.add_subsystem('balance', om.BalanceComp()) balance.add_balance('Tt', val=500.0,", "= om.DirectSolver(assemble_jac=True) self.connect('ambient.Ps', 'balance.rhs:Pt') self.connect('ambient.Ts', 'balance.rhs:Tt') self.connect('balance.Pt', 'fs.P') self.connect('balance.Tt', 'fs.T')", "fc = p1.model.add_subsystem(\"fc\", FlightConditions()) p1.model.connect('des_vars.W', 'fc.W') p1.model.connect('des_vars.alt', 'fc.alt') p1.model.connect('des_vars.MN', 'fc.MN')", "atmosphere model\"\"\" def initialize(self): self.options.declare('thermo_method', default='CEA', values=('CEA',), desc='Method for computing", "import FlowStart class FlightConditions(om.Group): \"\"\"Determines total and static flow properties", "True: proms = ['Fl_O:*', 'MN', 'W', 'WAR'] else: proms =", "'W'] conv.add_subsystem('fs', FlowStart(thermo_method=thermo_method, thermo_data=thermo_data, elements=elements, use_WAR=use_WAR), promotes=proms) balance = conv.add_subsystem('balance',", "newton.linesearch = om.BoundsEnforceLS() newton.linesearch.options['bound_enforcement'] = 'scalar' newton.linesearch.options['iprint'] = -1 #", "= 1e-10 newton.options['rtol'] = 1e-10 newton.options['maxiter'] = 10 newton.options['iprint'] =", "1e-10 newton.options['maxiter'] = 10 newton.options['iprint'] = -1 newton.options['solve_subsystems'] = True", "', p1['fc.ambient.Ts']) print('Ts_set: ', p1['fc.Fl_O:stat:T']) print('Ps_atm: ', p1['fc.ambient.Ps']) print('Ps_set: ',", "from pycycle.thermo.cea import species_data from pycycle.constants import AIR_ELEMENTS from pycycle.elements.ambient", "om.BalanceComp()) balance.add_balance('Tt', val=500.0, lower=1e-4, units='degR', desc='Total temperature', eq_units='degR') balance.add_balance('Pt', val=14.696,", "# sub.set_order(['fs','balance']) newton = conv.nonlinear_solver = om.NewtonSolver() newton.options['atol'] = 1e-10", "for computing thermodynamic properties') self.options.declare('thermo_data', default=species_data.janaf, desc='thermodynamic data set', recordable=False)", "== True: proms = ['Fl_O:*', 'MN', 'W', 'WAR'] else: proms", "default='CEA', values=('CEA',), desc='Method for computing thermodynamic properties') self.options.declare('thermo_data', default=species_data.janaf, desc='thermodynamic", "p1.model = om.Group() des_vars = p1.model.add_subsystem('des_vars', om.IndepVarComp()) des_vars.add_output('W', 0.0, units='lbm/s')", "newton.linesearch.options['iprint'] = -1 # newton.linesearch.options['solve_subsystems'] = True conv.linear_solver = om.DirectSolver(assemble_jac=True)", "= self.add_subsystem('conv', om.Group(), promotes=['*']) if use_WAR == True: proms =", "print('rhos_atm: ', p1['fc.ambient.rhos']*32.175) print('rhos_set: ', p1['fc.Fl_O:stat:rho']) print('W', p1['fc.Fl_O:stat:W']) print('Pt: ',", "self.add_subsystem('conv', om.Group(), promotes=['*']) if use_WAR == True: proms = ['Fl_O:*',", "self.add_subsystem('ambient', Ambient(), promotes=('alt', 'dTs')) # inputs conv = self.add_subsystem('conv', om.Group(),", "'fc.alt') p1.model.connect('des_vars.MN', 'fc.MN') p1.model.connect('des_vars.dTs', 'fc.dTs') p1.setup() # p1.root.list_connections() p1['des_vars.alt'] =", "if use_WAR == True: proms = ['Fl_O:*', 'MN', 'W', 'WAR']", "desc='If True, includes WAR calculation') def setup(self): thermo_method = self.options['thermo_method']", "print('Ts_set: ', p1['fc.Fl_O:stat:T']) print('Ps_atm: ', p1['fc.ambient.Ps']) print('Ps_set: ', p1['fc.Fl_O:stat:P']) print('rhos_atm:", "'MN', 'W'] conv.add_subsystem('fs', FlowStart(thermo_method=thermo_method, thermo_data=thermo_data, elements=elements, use_WAR=use_WAR), promotes=proms) balance =", "elements = self.options['elements'] use_WAR = self.options['use_WAR'] self.add_subsystem('ambient', Ambient(), promotes=('alt', 'dTs'))", "om.Group(), promotes=['*']) if use_WAR == True: proms = ['Fl_O:*', 'MN'," ]
[ "AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm from django.contrib.auth import update_session_auth_hash, login, authenticate from", "PasswordChangeForm, UserCreationForm from django.contrib.auth import update_session_auth_hash, login, authenticate from django.contrib", "request.user prepare_user(user) token,_ = Token.objects.get_or_create(user=user) url = \"travel://?token=\" + token.key", "= HttpResponse(url, status=302) response['Location'] = url return response @login_required def", "twitter_login = None try: facebook_login = user.social_auth.get(provider='facebook') except UserSocialAuth.DoesNotExist: facebook_login", "django.contrib import messages from django.shortcuts import render, redirect from social_django.models", "django.shortcuts import render, redirect from social_django.models import UserSocialAuth from django.http", "django.contrib.auth.forms import AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm from django.contrib.auth import update_session_auth_hash, login,", "was successfully updated!') return redirect('password') else: messages.error(request, 'Please correct the", "provider='facebook') return HttpResponse(str(q.extra_data)) def signup(request): return render(request, 'signup.html') @login_required def", "redirect('password') else: messages.error(request, 'Please correct the error below.') else: form", "rest_framework.authtoken.models import Token from app.methods import prepare_user def get_token(request): if", "'Please correct the error below.') else: form = PasswordForm(request.user) return", "= PasswordForm(request.user, request.POST) if form.is_valid(): form.save() update_session_auth_hash(request, form.user) messages.success(request, 'Your", "password(request): if request.user.has_usable_password(): PasswordForm = PasswordChangeForm else: PasswordForm = AdminPasswordChangeForm", "user = request.user prepare_user(user) token,_ = Token.objects.get_or_create(user=user) url = \"travel://?token=\"", "'home.html') @login_required def settings(request): user = request.user try: github_login =", "import render from django.contrib.auth.decorators import login_required from django.contrib.auth.forms import AdminPasswordChangeForm,", "try: facebook_login = user.social_auth.get(provider='facebook') except UserSocialAuth.DoesNotExist: facebook_login = None can_disconnect", "login, authenticate from django.contrib import messages from django.shortcuts import render,", "password was successfully updated!') return redirect('password') else: messages.error(request, 'Please correct", "None try: twitter_login = user.social_auth.get(provider='twitter') except UserSocialAuth.DoesNotExist: twitter_login = None", "import UserSocialAuth from django.http import HttpResponse from django.shortcuts import get_object_or_404,", "def signup(request): return render(request, 'signup.html') @login_required def home(request): return render(request,", "user.has_usable_password()) return render(request, 'settings.html', { 'facebook_login': facebook_login, 'can_disconnect': can_disconnect })", "successfully updated!') return redirect('password') else: messages.error(request, 'Please correct the error", "+ '&id=' + str(user.id) else: url = \"travel://error\" response =", "import HttpResponse from django.shortcuts import get_object_or_404, redirect from rest_framework.authtoken.models import", "user.social_auth.get(provider='facebook') except UserSocialAuth.DoesNotExist: facebook_login = None can_disconnect = (user.social_auth.count() >", "from social_django.models import UserSocialAuth from django.http import HttpResponse from django.shortcuts", "<reponame>mashaka/TravelHelper from django.shortcuts import render from django.contrib.auth.decorators import login_required from", "user=request.user, provider='facebook') return HttpResponse(str(q.extra_data)) def signup(request): return render(request, 'signup.html') @login_required", "render(request, 'home.html') @login_required def settings(request): user = request.user try: github_login", "response['Location'] = url return response @login_required def get_facebook_token(request): q =", "HttpResponse(url, status=302) response['Location'] = url return response @login_required def get_facebook_token(request):", "def home(request): return render(request, 'home.html') @login_required def settings(request): user =", "messages.error(request, 'Please correct the error below.') else: form = PasswordForm(request.user)", "= user.social_auth.get(provider='github') except UserSocialAuth.DoesNotExist: github_login = None try: twitter_login =", "= user.social_auth.get(provider='facebook') except UserSocialAuth.DoesNotExist: facebook_login = None can_disconnect = (user.social_auth.count()", "form.save() update_session_auth_hash(request, form.user) messages.success(request, 'Your password was successfully updated!') return", "if request.user.has_usable_password(): PasswordForm = PasswordChangeForm else: PasswordForm = AdminPasswordChangeForm if", "signup(request): return render(request, 'signup.html') @login_required def home(request): return render(request, 'home.html')", "(user.social_auth.count() > 1 or user.has_usable_password()) return render(request, 'settings.html', { 'facebook_login':", "from rest_framework.authtoken.models import Token from app.methods import prepare_user def get_token(request):", "else: PasswordForm = AdminPasswordChangeForm if request.method == 'POST': form =", "below.') else: form = PasswordForm(request.user) return render(request, 'password.html', {'form': form})", "except UserSocialAuth.DoesNotExist: github_login = None try: twitter_login = user.social_auth.get(provider='twitter') except", "render from django.contrib.auth.decorators import login_required from django.contrib.auth.forms import AdminPasswordChangeForm, PasswordChangeForm,", "@login_required def get_facebook_token(request): q = get_object_or_404(UserSocialAuth, user=request.user, provider='facebook') return HttpResponse(str(q.extra_data))", "messages.success(request, 'Your password was successfully updated!') return redirect('password') else: messages.error(request,", "request.user.has_usable_password(): PasswordForm = PasswordChangeForm else: PasswordForm = AdminPasswordChangeForm if request.method", "{ 'facebook_login': facebook_login, 'can_disconnect': can_disconnect }) @login_required def password(request): if", "= \"travel://error\" response = HttpResponse(url, status=302) response['Location'] = url return", "else: messages.error(request, 'Please correct the error below.') else: form =", "except UserSocialAuth.DoesNotExist: twitter_login = None try: facebook_login = user.social_auth.get(provider='facebook') except", "render, redirect from social_django.models import UserSocialAuth from django.http import HttpResponse", "get_facebook_token(request): q = get_object_or_404(UserSocialAuth, user=request.user, provider='facebook') return HttpResponse(str(q.extra_data)) def signup(request):", "PasswordForm = PasswordChangeForm else: PasswordForm = AdminPasswordChangeForm if request.method ==", "from django.http import HttpResponse from django.shortcuts import get_object_or_404, redirect from", "PasswordForm(request.user, request.POST) if form.is_valid(): form.save() update_session_auth_hash(request, form.user) messages.success(request, 'Your password", "UserSocialAuth.DoesNotExist: twitter_login = None try: facebook_login = user.social_auth.get(provider='facebook') except UserSocialAuth.DoesNotExist:", "get_object_or_404, redirect from rest_framework.authtoken.models import Token from app.methods import prepare_user", "render(request, 'signup.html') @login_required def home(request): return render(request, 'home.html') @login_required def", "str(user.id) else: url = \"travel://error\" response = HttpResponse(url, status=302) response['Location']", "return render(request, 'signup.html') @login_required def home(request): return render(request, 'home.html') @login_required", "or user.has_usable_password()) return render(request, 'settings.html', { 'facebook_login': facebook_login, 'can_disconnect': can_disconnect", "import update_session_auth_hash, login, authenticate from django.contrib import messages from django.shortcuts", "from django.contrib.auth.forms import AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm from django.contrib.auth import update_session_auth_hash,", "import Token from app.methods import prepare_user def get_token(request): if request.user:", "request.user: user = request.user prepare_user(user) token,_ = Token.objects.get_or_create(user=user) url =", "= None try: facebook_login = user.social_auth.get(provider='facebook') except UserSocialAuth.DoesNotExist: facebook_login =", "request.user try: github_login = user.social_auth.get(provider='github') except UserSocialAuth.DoesNotExist: github_login = None", "url = \"travel://error\" response = HttpResponse(url, status=302) response['Location'] = url", "status=302) response['Location'] = url return response @login_required def get_facebook_token(request): q", "> 1 or user.has_usable_password()) return render(request, 'settings.html', { 'facebook_login': facebook_login,", "= request.user prepare_user(user) token,_ = Token.objects.get_or_create(user=user) url = \"travel://?token=\" +", "HttpResponse(str(q.extra_data)) def signup(request): return render(request, 'signup.html') @login_required def home(request): return", "import get_object_or_404, redirect from rest_framework.authtoken.models import Token from app.methods import", "PasswordChangeForm else: PasswordForm = AdminPasswordChangeForm if request.method == 'POST': form", "get_token(request): if request.user: user = request.user prepare_user(user) token,_ = Token.objects.get_or_create(user=user)", "facebook_login = None can_disconnect = (user.social_auth.count() > 1 or user.has_usable_password())", "'can_disconnect': can_disconnect }) @login_required def password(request): if request.user.has_usable_password(): PasswordForm =", "get_object_or_404(UserSocialAuth, user=request.user, provider='facebook') return HttpResponse(str(q.extra_data)) def signup(request): return render(request, 'signup.html')", "try: github_login = user.social_auth.get(provider='github') except UserSocialAuth.DoesNotExist: github_login = None try:", "1 or user.has_usable_password()) return render(request, 'settings.html', { 'facebook_login': facebook_login, 'can_disconnect':", "django.shortcuts import get_object_or_404, redirect from rest_framework.authtoken.models import Token from app.methods", "if form.is_valid(): form.save() update_session_auth_hash(request, form.user) messages.success(request, 'Your password was successfully", "= \"travel://?token=\" + token.key + '&id=' + str(user.id) else: url", "from django.contrib.auth.decorators import login_required from django.contrib.auth.forms import AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm", "def settings(request): user = request.user try: github_login = user.social_auth.get(provider='github') except", "response = HttpResponse(url, status=302) response['Location'] = url return response @login_required", "request.method == 'POST': form = PasswordForm(request.user, request.POST) if form.is_valid(): form.save()", "from app.methods import prepare_user def get_token(request): if request.user: user =", "def password(request): if request.user.has_usable_password(): PasswordForm = PasswordChangeForm else: PasswordForm =", "'&id=' + str(user.id) else: url = \"travel://error\" response = HttpResponse(url,", "github_login = user.social_auth.get(provider='github') except UserSocialAuth.DoesNotExist: github_login = None try: twitter_login", "return HttpResponse(str(q.extra_data)) def signup(request): return render(request, 'signup.html') @login_required def home(request):", "= url return response @login_required def get_facebook_token(request): q = get_object_or_404(UserSocialAuth,", "UserSocialAuth.DoesNotExist: github_login = None try: twitter_login = user.social_auth.get(provider='twitter') except UserSocialAuth.DoesNotExist:", "django.contrib.auth import update_session_auth_hash, login, authenticate from django.contrib import messages from", "None try: facebook_login = user.social_auth.get(provider='facebook') except UserSocialAuth.DoesNotExist: facebook_login = None", "return render(request, 'settings.html', { 'facebook_login': facebook_login, 'can_disconnect': can_disconnect }) @login_required", "error below.') else: form = PasswordForm(request.user) return render(request, 'password.html', {'form':", "HttpResponse from django.shortcuts import get_object_or_404, redirect from rest_framework.authtoken.models import Token", "app.methods import prepare_user def get_token(request): if request.user: user = request.user", "user.social_auth.get(provider='github') except UserSocialAuth.DoesNotExist: github_login = None try: twitter_login = user.social_auth.get(provider='twitter')", "+ token.key + '&id=' + str(user.id) else: url = \"travel://error\"", "token.key + '&id=' + str(user.id) else: url = \"travel://error\" response", "AdminPasswordChangeForm if request.method == 'POST': form = PasswordForm(request.user, request.POST) if", "\"travel://?token=\" + token.key + '&id=' + str(user.id) else: url =", "except UserSocialAuth.DoesNotExist: facebook_login = None can_disconnect = (user.social_auth.count() > 1", "from django.shortcuts import render from django.contrib.auth.decorators import login_required from django.contrib.auth.forms", "def get_token(request): if request.user: user = request.user prepare_user(user) token,_ =", "form.is_valid(): form.save() update_session_auth_hash(request, form.user) messages.success(request, 'Your password was successfully updated!')", "+ str(user.id) else: url = \"travel://error\" response = HttpResponse(url, status=302)", "update_session_auth_hash, login, authenticate from django.contrib import messages from django.shortcuts import", "updated!') return redirect('password') else: messages.error(request, 'Please correct the error below.')", "else: url = \"travel://error\" response = HttpResponse(url, status=302) response['Location'] =", "user.social_auth.get(provider='twitter') except UserSocialAuth.DoesNotExist: twitter_login = None try: facebook_login = user.social_auth.get(provider='facebook')", "@login_required def home(request): return render(request, 'home.html') @login_required def settings(request): user", "from django.contrib.auth import update_session_auth_hash, login, authenticate from django.contrib import messages", "def get_facebook_token(request): q = get_object_or_404(UserSocialAuth, user=request.user, provider='facebook') return HttpResponse(str(q.extra_data)) def", "request.POST) if form.is_valid(): form.save() update_session_auth_hash(request, form.user) messages.success(request, 'Your password was", "redirect from social_django.models import UserSocialAuth from django.http import HttpResponse from", "= AdminPasswordChangeForm if request.method == 'POST': form = PasswordForm(request.user, request.POST)", "Token from app.methods import prepare_user def get_token(request): if request.user: user", "prepare_user(user) token,_ = Token.objects.get_or_create(user=user) url = \"travel://?token=\" + token.key +", "= (user.social_auth.count() > 1 or user.has_usable_password()) return render(request, 'settings.html', {", "None can_disconnect = (user.social_auth.count() > 1 or user.has_usable_password()) return render(request,", "facebook_login, 'can_disconnect': can_disconnect }) @login_required def password(request): if request.user.has_usable_password(): PasswordForm", "from django.shortcuts import render, redirect from social_django.models import UserSocialAuth from", "login_required from django.contrib.auth.forms import AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm from django.contrib.auth import", "return redirect('password') else: messages.error(request, 'Please correct the error below.') else:", "import AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm from django.contrib.auth import update_session_auth_hash, login, authenticate", "settings(request): user = request.user try: github_login = user.social_auth.get(provider='github') except UserSocialAuth.DoesNotExist:", "return response @login_required def get_facebook_token(request): q = get_object_or_404(UserSocialAuth, user=request.user, provider='facebook')", "= get_object_or_404(UserSocialAuth, user=request.user, provider='facebook') return HttpResponse(str(q.extra_data)) def signup(request): return render(request,", "render(request, 'settings.html', { 'facebook_login': facebook_login, 'can_disconnect': can_disconnect }) @login_required def", "token,_ = Token.objects.get_or_create(user=user) url = \"travel://?token=\" + token.key + '&id='", "home(request): return render(request, 'home.html') @login_required def settings(request): user = request.user", "can_disconnect }) @login_required def password(request): if request.user.has_usable_password(): PasswordForm = PasswordChangeForm", "authenticate from django.contrib import messages from django.shortcuts import render, redirect", "twitter_login = user.social_auth.get(provider='twitter') except UserSocialAuth.DoesNotExist: twitter_login = None try: facebook_login", "update_session_auth_hash(request, form.user) messages.success(request, 'Your password was successfully updated!') return redirect('password')", "== 'POST': form = PasswordForm(request.user, request.POST) if form.is_valid(): form.save() update_session_auth_hash(request,", "if request.method == 'POST': form = PasswordForm(request.user, request.POST) if form.is_valid():", "messages from django.shortcuts import render, redirect from social_django.models import UserSocialAuth", "@login_required def password(request): if request.user.has_usable_password(): PasswordForm = PasswordChangeForm else: PasswordForm", "= PasswordChangeForm else: PasswordForm = AdminPasswordChangeForm if request.method == 'POST':", "= user.social_auth.get(provider='twitter') except UserSocialAuth.DoesNotExist: twitter_login = None try: facebook_login =", "social_django.models import UserSocialAuth from django.http import HttpResponse from django.shortcuts import", "django.http import HttpResponse from django.shortcuts import get_object_or_404, redirect from rest_framework.authtoken.models", "return render(request, 'home.html') @login_required def settings(request): user = request.user try:", "= None try: twitter_login = user.social_auth.get(provider='twitter') except UserSocialAuth.DoesNotExist: twitter_login =", "UserSocialAuth.DoesNotExist: facebook_login = None can_disconnect = (user.social_auth.count() > 1 or", "= Token.objects.get_or_create(user=user) url = \"travel://?token=\" + token.key + '&id=' +", "'settings.html', { 'facebook_login': facebook_login, 'can_disconnect': can_disconnect }) @login_required def password(request):", "'signup.html') @login_required def home(request): return render(request, 'home.html') @login_required def settings(request):", "form = PasswordForm(request.user, request.POST) if form.is_valid(): form.save() update_session_auth_hash(request, form.user) messages.success(request,", "UserCreationForm from django.contrib.auth import update_session_auth_hash, login, authenticate from django.contrib import", "import render, redirect from social_django.models import UserSocialAuth from django.http import", "@login_required def settings(request): user = request.user try: github_login = user.social_auth.get(provider='github')", "try: twitter_login = user.social_auth.get(provider='twitter') except UserSocialAuth.DoesNotExist: twitter_login = None try:", "'POST': form = PasswordForm(request.user, request.POST) if form.is_valid(): form.save() update_session_auth_hash(request, form.user)", "q = get_object_or_404(UserSocialAuth, user=request.user, provider='facebook') return HttpResponse(str(q.extra_data)) def signup(request): return", "github_login = None try: twitter_login = user.social_auth.get(provider='twitter') except UserSocialAuth.DoesNotExist: twitter_login", "form.user) messages.success(request, 'Your password was successfully updated!') return redirect('password') else:", "prepare_user def get_token(request): if request.user: user = request.user prepare_user(user) token,_", "if request.user: user = request.user prepare_user(user) token,_ = Token.objects.get_or_create(user=user) url", "url return response @login_required def get_facebook_token(request): q = get_object_or_404(UserSocialAuth, user=request.user,", "django.shortcuts import render from django.contrib.auth.decorators import login_required from django.contrib.auth.forms import", "user = request.user try: github_login = user.social_auth.get(provider='github') except UserSocialAuth.DoesNotExist: github_login", "}) @login_required def password(request): if request.user.has_usable_password(): PasswordForm = PasswordChangeForm else:", "facebook_login = user.social_auth.get(provider='facebook') except UserSocialAuth.DoesNotExist: facebook_login = None can_disconnect =", "import login_required from django.contrib.auth.forms import AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm from django.contrib.auth", "from django.shortcuts import get_object_or_404, redirect from rest_framework.authtoken.models import Token from", "= None can_disconnect = (user.social_auth.count() > 1 or user.has_usable_password()) return", "import messages from django.shortcuts import render, redirect from social_django.models import", "\"travel://error\" response = HttpResponse(url, status=302) response['Location'] = url return response", "from django.contrib import messages from django.shortcuts import render, redirect from", "PasswordForm = AdminPasswordChangeForm if request.method == 'POST': form = PasswordForm(request.user,", "'facebook_login': facebook_login, 'can_disconnect': can_disconnect }) @login_required def password(request): if request.user.has_usable_password():", "can_disconnect = (user.social_auth.count() > 1 or user.has_usable_password()) return render(request, 'settings.html',", "= request.user try: github_login = user.social_auth.get(provider='github') except UserSocialAuth.DoesNotExist: github_login =", "response @login_required def get_facebook_token(request): q = get_object_or_404(UserSocialAuth, user=request.user, provider='facebook') return", "import prepare_user def get_token(request): if request.user: user = request.user prepare_user(user)", "the error below.') else: form = PasswordForm(request.user) return render(request, 'password.html',", "redirect from rest_framework.authtoken.models import Token from app.methods import prepare_user def", "url = \"travel://?token=\" + token.key + '&id=' + str(user.id) else:", "correct the error below.') else: form = PasswordForm(request.user) return render(request,", "django.contrib.auth.decorators import login_required from django.contrib.auth.forms import AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm from", "Token.objects.get_or_create(user=user) url = \"travel://?token=\" + token.key + '&id=' + str(user.id)", "UserSocialAuth from django.http import HttpResponse from django.shortcuts import get_object_or_404, redirect", "'Your password was successfully updated!') return redirect('password') else: messages.error(request, 'Please" ]
[ "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "data[\"name\"] in train_names: train_data.append(data) elif data[\"name\"] in valid_names: valid_data.append(data) elif", "under \"/person_split\". It will generate new files with the following", "generate new files with the following structure: ├──person_split │ ├──", "\"hyw\", \"shiyun\", \"tangsy\", \"dengyl\", \"jiangyh\", \"xunkai\", \"negative3\", \"negative4\", \"negative5\", \"negative6\"", "saved separately under \"/person_split\". It will generate new files with", "# Copyright 2019 The TensorFlow Authors. All Rights Reserved. #", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "All Rights Reserved. # # Licensed under the Apache License,", "2.0 (the \"License\"); # you may not use this file", "file except in compliance with the License. # You may", "agreed to in writing, software # distributed under the License", "Unless required by applicable law or agreed to in writing,", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "would be saved separately under \"/person_split\". It will generate new", "valid_data.append(data) elif data[\"name\"] in test_names: test_data.append(data) print(\"train_length:\" + str(len(train_data))) print(\"valid_length:\"", "data as validation, and the rest ones' data as test.", "test_names: test_data.append(data) print(\"train_length:\" + str(len(train_data))) print(\"valid_length:\" + str(len(valid_data))) print(\"test_length:\" +", "[\"lsj\", \"pengxl\", \"negative2\", \"negative7\"] test_names = [\"liucx\", \"zhangxy\", \"negative1\", \"negative8\"]", "│ └──valid \"\"\" from __future__ import absolute_import from __future__ import", "distributed under the License is distributed on an \"AS IS\"", "\"negative4\", \"negative5\", \"negative6\" ] valid_names = [\"lsj\", \"pengxl\", \"negative2\", \"negative7\"]", "people's data as train, some other people's data as validation,", "# limitations under the License. # ============================================================================== \"\"\"Split data into", "random from data_split import read_data from data_split import write_data def", "data by person.\"\"\" random.seed(30) random.shuffle(whole_data) train_data = [] valid_data =", "ones' data as test. These data would be saved separately", "the specific language governing permissions and # limitations under the", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "# coding=utf-8 # Copyright 2019 The TensorFlow Authors. All Rights", "limitations under the License. # ============================================================================== \"\"\"Split data into train,", "valid_names = [\"lsj\", \"pengxl\", \"negative2\", \"negative7\"] test_names = [\"liucx\", \"zhangxy\",", "str(len(valid_data))) print(\"test_length:\" + str(len(test_data))) return train_data, valid_data, test_data if __name__", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "person.\"\"\" random.seed(30) random.shuffle(whole_data) train_data = [] valid_data = [] test_data", "python3 # coding=utf-8 # Copyright 2019 The TensorFlow Authors. All", "except in compliance with the License. # You may obtain", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "separately under \"/person_split\". It will generate new files with the", "__future__ import division from __future__ import print_function import os import", "# ============================================================================== \"\"\"Split data into train, validation and test dataset", "\"zhangxy\", \"negative1\", \"negative8\"] train_data, valid_data, test_data = person_split(data, train_names, valid_names,", "print(\"test_length:\" + str(len(test_data))) return train_data, valid_data, test_data if __name__ ==", "\"/person_split\". It will generate new files with the following structure:", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "# pylint: disable=unused-variable if data[\"name\"] in train_names: train_data.append(data) elif data[\"name\"]", "data would be saved separately under \"/person_split\". It will generate", "not use this file except in compliance with the License.", "into train, validation and test dataset according to person. That", "other people's data as validation, and the rest ones' data", "valid_names: valid_data.append(data) elif data[\"name\"] in test_names: test_data.append(data) print(\"train_length:\" + str(len(train_data)))", "validation, and the rest ones' data as test. These data", "import write_data def person_split(whole_data, train_names, valid_names, test_names): \"\"\"Split data by", "person. That is, use some people's data as train, some", "+ str(len(test_data))) return train_data, valid_data, test_data if __name__ == \"__main__\":", "writing, software # distributed under the License is distributed on", "\"negative7\"] test_names = [\"liucx\", \"zhangxy\", \"negative1\", \"negative8\"] train_data, valid_data, test_data", "in writing, software # distributed under the License is distributed", "write_data def person_split(whole_data, train_names, valid_names, test_names): \"\"\"Split data by person.\"\"\"", "you may not use this file except in compliance with", "for idx, data in enumerate(whole_data): # pylint: disable=unused-variable if data[\"name\"]", "train_data, valid_data, test_data = person_split(data, train_names, valid_names, test_names) if not", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "train, validation and test dataset according to person. That is,", "\"jiangyh\", \"xunkai\", \"negative3\", \"negative4\", \"negative5\", \"negative6\" ] valid_names = [\"lsj\",", "from __future__ import division from __future__ import print_function import os", "= [] for idx, data in enumerate(whole_data): # pylint: disable=unused-variable", "data in enumerate(whole_data): # pylint: disable=unused-variable if data[\"name\"] in train_names:", "data_split import write_data def person_split(whole_data, train_names, valid_names, test_names): \"\"\"Split data", "# Lint as: python3 # coding=utf-8 # Copyright 2019 The", "= read_data(\"./data/complete_data\") train_names = [ \"hyw\", \"shiyun\", \"tangsy\", \"dengyl\", \"jiangyh\",", "train_names = [ \"hyw\", \"shiyun\", \"tangsy\", \"dengyl\", \"jiangyh\", \"xunkai\", \"negative3\",", "test_names = [\"liucx\", \"zhangxy\", \"negative1\", \"negative8\"] train_data, valid_data, test_data =", "data_split import read_data from data_split import write_data def person_split(whole_data, train_names,", "in train_names: train_data.append(data) elif data[\"name\"] in valid_names: valid_data.append(data) elif data[\"name\"]", "use this file except in compliance with the License. #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "is, use some people's data as train, some other people's", "import division from __future__ import print_function import os import random", "│ ├── train │ └──valid \"\"\" from __future__ import absolute_import", "read_data(\"./data/complete_data\") train_names = [ \"hyw\", \"shiyun\", \"tangsy\", \"dengyl\", \"jiangyh\", \"xunkai\",", "CONDITIONS OF ANY KIND, either express or implied. # See", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "as test. These data would be saved separately under \"/person_split\".", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "Rights Reserved. # # Licensed under the Apache License, Version", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "some other people's data as validation, and the rest ones'", "under the License. # ============================================================================== \"\"\"Split data into train, validation", "in valid_names: valid_data.append(data) elif data[\"name\"] in test_names: test_data.append(data) print(\"train_length:\" +", "The TensorFlow Authors. All Rights Reserved. # # Licensed under", "== \"__main__\": data = read_data(\"./data/complete_data\") train_names = [ \"hyw\", \"shiyun\",", "# You may obtain a copy of the License at", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "random.seed(30) random.shuffle(whole_data) train_data = [] valid_data = [] test_data =", "train_data.append(data) elif data[\"name\"] in valid_names: valid_data.append(data) elif data[\"name\"] in test_names:", "with the following structure: ├──person_split │ ├── test │ ├──", "from data_split import write_data def person_split(whole_data, train_names, valid_names, test_names): \"\"\"Split", "+ str(len(train_data))) print(\"valid_length:\" + str(len(valid_data))) print(\"test_length:\" + str(len(test_data))) return train_data,", "under the License is distributed on an \"AS IS\" BASIS,", "from data_split import read_data from data_split import write_data def person_split(whole_data,", "read_data from data_split import write_data def person_split(whole_data, train_names, valid_names, test_names):", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "License for the specific language governing permissions and # limitations", "Authors. All Rights Reserved. # # Licensed under the Apache", "train_data, valid_data, test_data if __name__ == \"__main__\": data = read_data(\"./data/complete_data\")", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "These data would be saved separately under \"/person_split\". It will", "files with the following structure: ├──person_split │ ├── test │", "Reserved. # # Licensed under the Apache License, Version 2.0", "if __name__ == \"__main__\": data = read_data(\"./data/complete_data\") train_names = [", "structure: ├──person_split │ ├── test │ ├── train │ └──valid", "train │ └──valid \"\"\" from __future__ import absolute_import from __future__", "as validation, and the rest ones' data as test. These", "\"\"\"Split data by person.\"\"\" random.seed(30) random.shuffle(whole_data) train_data = [] valid_data", "\"negative8\"] train_data, valid_data, test_data = person_split(data, train_names, valid_names, test_names) if", "train, some other people's data as validation, and the rest", "governing permissions and # limitations under the License. # ==============================================================================", "the License for the specific language governing permissions and #", "(the \"License\"); # you may not use this file except", "permissions and # limitations under the License. # ============================================================================== \"\"\"Split", "TensorFlow Authors. All Rights Reserved. # # Licensed under the", "Apache License, Version 2.0 (the \"License\"); # you may not", "\"tangsy\", \"dengyl\", \"jiangyh\", \"xunkai\", \"negative3\", \"negative4\", \"negative5\", \"negative6\" ] valid_names", "# you may not use this file except in compliance", "test. These data would be saved separately under \"/person_split\". It", "enumerate(whole_data): # pylint: disable=unused-variable if data[\"name\"] in train_names: train_data.append(data) elif", "either express or implied. # See the License for the", "import print_function import os import random from data_split import read_data", "def person_split(whole_data, train_names, valid_names, test_names): \"\"\"Split data by person.\"\"\" random.seed(30)", "OR CONDITIONS OF ANY KIND, either express or implied. #", "= [] test_data = [] for idx, data in enumerate(whole_data):", "[\"liucx\", \"zhangxy\", \"negative1\", \"negative8\"] train_data, valid_data, test_data = person_split(data, train_names,", "the License. # ============================================================================== \"\"\"Split data into train, validation and", "data as train, some other people's data as validation, and", "data = read_data(\"./data/complete_data\") train_names = [ \"hyw\", \"shiyun\", \"tangsy\", \"dengyl\",", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "============================================================================== \"\"\"Split data into train, validation and test dataset according", "the License is distributed on an \"AS IS\" BASIS, #", "from __future__ import absolute_import from __future__ import division from __future__", "\"__main__\": data = read_data(\"./data/complete_data\") train_names = [ \"hyw\", \"shiyun\", \"tangsy\",", "idx, data in enumerate(whole_data): # pylint: disable=unused-variable if data[\"name\"] in", "in compliance with the License. # You may obtain a", "train_data = [] valid_data = [] test_data = [] for", "data into train, validation and test dataset according to person.", "= [ \"hyw\", \"shiyun\", \"tangsy\", \"dengyl\", \"jiangyh\", \"xunkai\", \"negative3\", \"negative4\",", "elif data[\"name\"] in test_names: test_data.append(data) print(\"train_length:\" + str(len(train_data))) print(\"valid_length:\" +", "│ ├── test │ ├── train │ └──valid \"\"\" from", "software # distributed under the License is distributed on an", "from __future__ import print_function import os import random from data_split", "some people's data as train, some other people's data as", "valid_data, test_data if __name__ == \"__main__\": data = read_data(\"./data/complete_data\") train_names", "and the rest ones' data as test. These data would", "validation and test dataset according to person. That is, use", "data as test. These data would be saved separately under", "# # Unless required by applicable law or agreed to", "the rest ones' data as test. These data would be", "data[\"name\"] in test_names: test_data.append(data) print(\"train_length:\" + str(len(train_data))) print(\"valid_length:\" + str(len(valid_data)))", "the following structure: ├──person_split │ ├── test │ ├── train", "disable=unused-variable if data[\"name\"] in train_names: train_data.append(data) elif data[\"name\"] in valid_names:", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "by person.\"\"\" random.seed(30) random.shuffle(whole_data) train_data = [] valid_data = []", "\"negative2\", \"negative7\"] test_names = [\"liucx\", \"zhangxy\", \"negative1\", \"negative8\"] train_data, valid_data,", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "str(len(test_data))) return train_data, valid_data, test_data if __name__ == \"__main__\": data", "\"shiyun\", \"tangsy\", \"dengyl\", \"jiangyh\", \"xunkai\", \"negative3\", \"negative4\", \"negative5\", \"negative6\" ]", "test_data.append(data) print(\"train_length:\" + str(len(train_data))) print(\"valid_length:\" + str(len(valid_data))) print(\"test_length:\" + str(len(test_data)))", "Version 2.0 (the \"License\"); # you may not use this", "rest ones' data as test. These data would be saved", "os import random from data_split import read_data from data_split import", "valid_data, test_data = person_split(data, train_names, valid_names, test_names) if not os.path.exists(\"./person_split\"):", "law or agreed to in writing, software # distributed under", "and # limitations under the License. # ============================================================================== \"\"\"Split data", "+ str(len(valid_data))) print(\"test_length:\" + str(len(test_data))) return train_data, valid_data, test_data if", "\"\"\" from __future__ import absolute_import from __future__ import division from", "test_data if __name__ == \"__main__\": data = read_data(\"./data/complete_data\") train_names =", "├── train │ └──valid \"\"\" from __future__ import absolute_import from", "import os import random from data_split import read_data from data_split", "That is, use some people's data as train, some other", "to person. That is, use some people's data as train,", "division from __future__ import print_function import os import random from", "absolute_import from __future__ import division from __future__ import print_function import", "train_names, valid_names, test_names): \"\"\"Split data by person.\"\"\" random.seed(30) random.shuffle(whole_data) train_data", "= person_split(data, train_names, valid_names, test_names) if not os.path.exists(\"./person_split\"): os.makedirs(\"./person_split\") write_data(train_data,", "person_split(data, train_names, valid_names, test_names) if not os.path.exists(\"./person_split\"): os.makedirs(\"./person_split\") write_data(train_data, \"./person_split/train\")", "__future__ import absolute_import from __future__ import division from __future__ import", "└──valid \"\"\" from __future__ import absolute_import from __future__ import division", "person_split(whole_data, train_names, valid_names, test_names): \"\"\"Split data by person.\"\"\" random.seed(30) random.shuffle(whole_data)", "implied. # See the License for the specific language governing", "elif data[\"name\"] in valid_names: valid_data.append(data) elif data[\"name\"] in test_names: test_data.append(data)", "print(\"valid_length:\" + str(len(valid_data))) print(\"test_length:\" + str(len(test_data))) return train_data, valid_data, test_data", "\"dengyl\", \"jiangyh\", \"xunkai\", \"negative3\", \"negative4\", \"negative5\", \"negative6\" ] valid_names =", "people's data as validation, and the rest ones' data as", "2019 The TensorFlow Authors. All Rights Reserved. # # Licensed", "under the Apache License, Version 2.0 (the \"License\"); # you", "\"License\"); # you may not use this file except in", "as: python3 # coding=utf-8 # Copyright 2019 The TensorFlow Authors.", "[] test_data = [] for idx, data in enumerate(whole_data): #", "= [\"liucx\", \"zhangxy\", \"negative1\", \"negative8\"] train_data, valid_data, test_data = person_split(data,", "[] for idx, data in enumerate(whole_data): # pylint: disable=unused-variable if", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "new files with the following structure: ├──person_split │ ├── test", "test_names): \"\"\"Split data by person.\"\"\" random.seed(30) random.shuffle(whole_data) train_data = []", "test │ ├── train │ └──valid \"\"\" from __future__ import", "\"negative5\", \"negative6\" ] valid_names = [\"lsj\", \"pengxl\", \"negative2\", \"negative7\"] test_names", "in test_names: test_data.append(data) print(\"train_length:\" + str(len(train_data))) print(\"valid_length:\" + str(len(valid_data))) print(\"test_length:\"", "print(\"train_length:\" + str(len(train_data))) print(\"valid_length:\" + str(len(valid_data))) print(\"test_length:\" + str(len(test_data))) return", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "[] valid_data = [] test_data = [] for idx, data", "in enumerate(whole_data): # pylint: disable=unused-variable if data[\"name\"] in train_names: train_data.append(data)", "__name__ == \"__main__\": data = read_data(\"./data/complete_data\") train_names = [ \"hyw\",", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "\"xunkai\", \"negative3\", \"negative4\", \"negative5\", \"negative6\" ] valid_names = [\"lsj\", \"pengxl\",", "= [\"lsj\", \"pengxl\", \"negative2\", \"negative7\"] test_names = [\"liucx\", \"zhangxy\", \"negative1\",", "if data[\"name\"] in train_names: train_data.append(data) elif data[\"name\"] in valid_names: valid_data.append(data)", "import random from data_split import read_data from data_split import write_data", "\"negative3\", \"negative4\", \"negative5\", \"negative6\" ] valid_names = [\"lsj\", \"pengxl\", \"negative2\",", "\"pengxl\", \"negative2\", \"negative7\"] test_names = [\"liucx\", \"zhangxy\", \"negative1\", \"negative8\"] train_data,", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "coding=utf-8 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.", "for the specific language governing permissions and # limitations under", "import read_data from data_split import write_data def person_split(whole_data, train_names, valid_names,", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "to in writing, software # distributed under the License is", "use some people's data as train, some other people's data", "test dataset according to person. That is, use some people's", "as train, some other people's data as validation, and the", "├── test │ ├── train │ └──valid \"\"\" from __future__", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "according to person. That is, use some people's data as", "print_function import os import random from data_split import read_data from", "and test dataset according to person. That is, use some", "random.shuffle(whole_data) train_data = [] valid_data = [] test_data = []", "\"negative1\", \"negative8\"] train_data, valid_data, test_data = person_split(data, train_names, valid_names, test_names)", "You may obtain a copy of the License at #", "├──person_split │ ├── test │ ├── train │ └──valid \"\"\"", "language governing permissions and # limitations under the License. #", "valid_names, test_names) if not os.path.exists(\"./person_split\"): os.makedirs(\"./person_split\") write_data(train_data, \"./person_split/train\") write_data(valid_data, \"./person_split/valid\")", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "will generate new files with the following structure: ├──person_split │", "] valid_names = [\"lsj\", \"pengxl\", \"negative2\", \"negative7\"] test_names = [\"liucx\",", "be saved separately under \"/person_split\". It will generate new files", "test_data = [] for idx, data in enumerate(whole_data): # pylint:", "test_names) if not os.path.exists(\"./person_split\"): os.makedirs(\"./person_split\") write_data(train_data, \"./person_split/train\") write_data(valid_data, \"./person_split/valid\") write_data(test_data,", "required by applicable law or agreed to in writing, software", "Copyright 2019 The TensorFlow Authors. All Rights Reserved. # #", "following structure: ├──person_split │ ├── test │ ├── train │", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "License. # ============================================================================== \"\"\"Split data into train, validation and test", "\"\"\"Split data into train, validation and test dataset according to", "valid_names, test_names): \"\"\"Split data by person.\"\"\" random.seed(30) random.shuffle(whole_data) train_data =", "str(len(train_data))) print(\"valid_length:\" + str(len(valid_data))) print(\"test_length:\" + str(len(test_data))) return train_data, valid_data,", "with the License. # You may obtain a copy of", "Lint as: python3 # coding=utf-8 # Copyright 2019 The TensorFlow", "this file except in compliance with the License. # You", "= [] valid_data = [] test_data = [] for idx,", "if not os.path.exists(\"./person_split\"): os.makedirs(\"./person_split\") write_data(train_data, \"./person_split/train\") write_data(valid_data, \"./person_split/valid\") write_data(test_data, \"./person_split/test\")", "the Apache License, Version 2.0 (the \"License\"); # you may", "data[\"name\"] in valid_names: valid_data.append(data) elif data[\"name\"] in test_names: test_data.append(data) print(\"train_length:\"", "return train_data, valid_data, test_data if __name__ == \"__main__\": data =", "import absolute_import from __future__ import division from __future__ import print_function", "test_data = person_split(data, train_names, valid_names, test_names) if not os.path.exists(\"./person_split\"): os.makedirs(\"./person_split\")", "pylint: disable=unused-variable if data[\"name\"] in train_names: train_data.append(data) elif data[\"name\"] in", "train_names, valid_names, test_names) if not os.path.exists(\"./person_split\"): os.makedirs(\"./person_split\") write_data(train_data, \"./person_split/train\") write_data(valid_data,", "valid_data = [] test_data = [] for idx, data in", "It will generate new files with the following structure: ├──person_split", "[ \"hyw\", \"shiyun\", \"tangsy\", \"dengyl\", \"jiangyh\", \"xunkai\", \"negative3\", \"negative4\", \"negative5\",", "\"negative6\" ] valid_names = [\"lsj\", \"pengxl\", \"negative2\", \"negative7\"] test_names =", "train_names: train_data.append(data) elif data[\"name\"] in valid_names: valid_data.append(data) elif data[\"name\"] in", "dataset according to person. That is, use some people's data", "__future__ import print_function import os import random from data_split import" ]