code
stringlengths
22
1.05M
apis
listlengths
1
3.31k
extract_api
stringlengths
75
3.25M
import numpy as np from scipy.stats import rankdata from sklearn.datasets import load_iris from utilities.rank_data import rank_data def test_rank_data(): data = load_iris().data # rank the data all at once output = rank_data(data) # check each column versus scipy equivalent for i in range(data.shape[1]): feature = data[:, i] expected = rankdata(feature) assert np.allclose(expected, output[:, i]) if __name__ == '__main__': import pytest pytest.main()
[ "sklearn.datasets.load_iris", "numpy.allclose", "scipy.stats.rankdata", "pytest.main", "utilities.rank_data.rank_data" ]
[((232, 247), 'utilities.rank_data.rank_data', 'rank_data', (['data'], {}), '(data)\n', (241, 247), False, 'from utilities.rank_data import rank_data\n'), ((500, 513), 'pytest.main', 'pytest.main', ([], {}), '()\n', (511, 513), False, 'import pytest\n'), ((169, 180), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (178, 180), False, 'from sklearn.datasets import load_iris\n'), ((380, 397), 'scipy.stats.rankdata', 'rankdata', (['feature'], {}), '(feature)\n', (388, 397), False, 'from scipy.stats import rankdata\n'), ((413, 448), 'numpy.allclose', 'np.allclose', (['expected', 'output[:, i]'], {}), '(expected, output[:, i])\n', (424, 448), True, 'import numpy as np\n')]
""" MIT License Copyright (c) 2018-Present NeuroAssassin Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import json from typing import Union import aiohttp import discord from redbot.core import checks, commands from redbot.core.utils.menus import DEFAULT_CONTROLS, menu from .image import ( HUMANDESCRIPTION, IMAGES, PLANETDESCRIPTION, PLANETS, PLANETTHUMBNAIL, SPECIESDESCRIPTION, SPECIESTHUMBNAIL, STARSHIPDESCRIPTIONS, STARSHIPSIMAGES, VEHICLEDESCRIPTION, VEHICLEIMAGE, ) class SW(commands.Cog): """Interact with the Star Wars API""" def __init__(self, bot): self.bot = bot self.session = aiohttp.ClientSession() def cog_unload(self): self.__unload() def __unload(self): self.session.detach() async def red_delete_data_for_user(self, **kwargs): """This cog does not store user data""" return @checks.bot_has_permissions(embed_links=True) @commands.group(name="swapi", aliases=["starwars"]) async def starwars(self, ctx): """Group command for interacting with the Star Wars API""" pass @starwars.command() async def person(self, ctx, person_id: Union[int, str]): """Gets the profile of a person by their ID""" if isinstance(person_id, int): async with ctx.typing(): response = await self.session.get( r"https://swapi.dev/api/people/" + str(person_id) ) if response.status == 404: return await ctx.send("Invalid Person ID.") person = json.loads(await response.text()) embed = discord.Embed( title=f"Person: {person['name']}", description=HUMANDESCRIPTION[person["name"]], color=0x32CD32, ) embed.add_field(name="ID:", value=str(person_id)) for key, value in person.items(): if key in [ "name", "homeworld", "films", "species", "vehicles", "starships", "created", "edited", "url", ]: continue embed.add_field(name=key.replace("_", " ").title(), value=value.title()) embed.set_thumbnail(url=IMAGES[person["name"]]) homeworld_num = int(person["homeworld"].split(r"/")[-2]) homeworld = await self.session.get(person["homeworld"]) homeworld = json.loads(await homeworld.text()) embed.add_field( name="Homeworld", value=f"Name: {homeworld['name']}; ID: {str(homeworld_num)}", ) films = [] for film in person["films"]: film_num = int(film.split(r"/")[-2]) response = await self.session.get(film) film = json.loads(await response.text()) films.append(f"Title: {film['title']}; ID: {str(film_num)}") if len(films) != 0: embed.add_field(name="Films:", value="\n".join(films)) if person["species"]: species_num = int(person["species"][0].split(r"/")[-2]) species = await self.session.get(person["species"][0]) species = json.loads(await species.text()) embed.add_field( name="Species", value=f"Name: {species['name']}; ID: {str(species_num)}", ) else: embed.add_field(name="Species", value="Name: Unknown") vehicles = [] for vehicle in person["vehicles"]: vehicle_num = int(vehicle.split(r"/")[-2]) response = await self.session.get(vehicle) vehicle = json.loads(await response.text()) vehicles.append(f"Name: {vehicle['name']}; ID: {str(vehicle_num)}") if len(vehicles) != 0: embed.add_field(name="Vehicles:", value="\n".join(vehicles)) starships = [] for starship in person["starships"]: starship_num = int(starship.split(r"/")[-2]) response = await self.session.get(starship) starship = json.loads(await response.text()) starships.append(f"Name: {starship['name']}; ID: {str(starship_num)}") if len(starships) != 0: embed.add_field(name="Starships:", value="\n".join(starships)) await ctx.send(embed=embed) else: async with ctx.typing(): response = await self.session.get( r"https://swapi.dev/api/people/?search=" + str(person_id) ) if response.status == 404: return await ctx.send("Invalid Person ID.") person = json.loads(await response.text()) name = person["results"][0]["name"] embed = discord.Embed( title=f"Person: {name}", description=HUMANDESCRIPTION[name], color=0x32CD32, ) for key, value in person["results"][0].items(): if key in [ "name", "homeworld", "films", "species", "vehicles", "starships", "created", "edited", "url", ]: continue embed.add_field(name=key.replace("_", " ").title(), value=value.title()) embed.set_thumbnail(url=IMAGES[name]) homeworld_num = int(person["results"][0]["homeworld"].split(r"/")[-2]) homeworld = await self.session.get(person["results"][0]["homeworld"]) homeworld = json.loads(await homeworld.text()) embed.add_field( name="Homeworld", value=f"Name: {homeworld['name']}; ID: {str(homeworld_num)}", ) films = [] for film in person["results"][0]["films"]: film_num = int(film.split(r"/")[-2]) response = await self.session.get(film) film = json.loads(await response.text()) films.append(f"Title: {film['title']}; ID: {str(film_num)}") if len(films) != 0: embed.add_field(name="Films:", value="\n".join(films)) if person["results"][0]["species"]: species_num = int(person["results"][0]["species"][0].split(r"/")[-2]) species = await self.session.get(person["species"][0]) species = json.loads(await species.text()) embed.add_field( name="Species", value=f"Name: {species['name']}; ID: {str(species_num)}", ) else: embed.add_field(name="Species", value="Name: Unknown") vehicles = [] for vehicle in person["results"][0]["vehicles"]: vehicle_num = int(vehicle.split(r"/")[-2]) response = await self.session.get(vehicle) vehicle = json.loads(await response.text()) vehicles.append(f"Name: {vehicle['name']}; ID: {str(vehicle_num)}") if len(vehicles) != 0: embed.add_field(name="Vehicles:", value="\n".join(vehicles)) starships = [] for starship in person["results"][0]["starships"]: starship_num = int(starship.split(r"/")[-2]) response = await self.session.get(starship) starship = json.loads(await response.text()) starships.append(f"Name: {starship['name']}; ID: {str(starship_num)}") if len(starships) != 0: embed.add_field(name="Starships:", value="\n".join(starships)) await ctx.send(embed=embed) @starwars.command() async def planet(self, ctx, planet_id: Union[int, str]): """Gets the profile of a planet by their ID""" if isinstance(planet_id, int): async with ctx.typing(): response = await self.session.get( r"https://swapi.dev/api/planets/" + str(planet_id) ) if response.status == 404: return await ctx.send("Invalid Planet ID.") planet = json.loads(await response.text()) embed = discord.Embed( title=f"Planet: {planet['name']}", description=PLANETDESCRIPTION[planet["name"]], color=0x800080, ) embed.add_field(name="ID:", value=str(planet_id)) for key, value in planet.items(): if key in [ "name", "residents", "films", "edited", "created", "url", ]: continue embed.add_field(name=key.replace("_", " ").title(), value=value.title()) embed.set_thumbnail(url=PLANETTHUMBNAIL[planet["name"]]) embed.set_image(url=PLANETS[planet["name"]]) films = [] for film in planet["films"]: film_num = int(film.split(r"/")[-2]) response = await self.session.get(film) film = json.loads(await response.text()) films.append(f"Title: {film['title']}; ID: {str(film_num)}") if len(films) != 0: embed.add_field(name="Films:", value="\n".join(films)) residents = [] for resident in planet["residents"]: resident_num = int(resident.split(r"/")[-2]) response = await self.session.get(resident) resident = json.loads(await response.text()) residents.append(f"Name: {resident['name']}; ID: {str(resident_num)}") if len(residents) != 0: embed.add_field(name="Residents:", value="\n".join(residents)) await ctx.send(embed=embed) else: async with ctx.typing(): response = await self.session.get( r"https://swapi.dev/api/planets/?search=" + str(planet_id) ) if response.status == 404: return await ctx.send("Invalid Planet ID.") planet = json.loads(await response.text()) name = planet["results"][0]["name"] embed = discord.Embed( title=f"Planet: {name}", description=PLANETDESCRIPTION[name], color=0x800080, ) for key, value in planet["results"][0].items(): if key in [ "name", "residents", "films", "edited", "created", "url", ]: continue embed.add_field(name=key.replace("_", " ").title(), value=value.title()) embed.set_thumbnail(url=PLANETTHUMBNAIL[name]) embed.set_image(url=PLANETS[name]) films = [] for film in planet["results"][0]["films"]: film_num = int(film.split(r"/")[-2]) response = await self.session.get(film) film = json.loads(await response.text()) films.append(f"Title: {film['title']}; ID: {str(film_num)}") if len(films) != 0: embed.add_field(name="Films:", value="\n".join(films)) residents = [] for resident in planet["results"][0]["residents"]: resident_num = int(resident.split(r"/")[-2]) response = await self.session.get(resident) resident = json.loads(await response.text()) residents.append(f"Name: {resident['name']}; ID: {str(resident_num)}") if len(residents) != 0: embed.add_field(name="Residents:", value="\n".join(residents)) await ctx.send(embed=embed) @starwars.command() async def film(self, ctx, film_id: Union[int, str]): """Gets the info about a film by their ID""" if isinstance(film_id, int): async with ctx.typing(): response = await self.session.get(r"https://swapi.dev/api/films/" + str(film_id)) if response.status == 404: return await ctx.send("Invalid Film ID.") film = json.loads(await response.text()) embed = discord.Embed(title=f"Film: {film['title']}; Page 1/4", color=0x0000FF) embed.add_field(name="ID:", value=str(film_id)) for key, value in film.items(): if key in [ "name", "characters", "planets", "starships", "vehicles", "species", "created", "edited", "url", "opening_crawl", ]: continue value = value.title() if hasattr(value, "title") else value embed.add_field(name=key.replace("_", " ").title(), value=value) embed2 = discord.Embed(title=f"Film: {film['title']}; Page 2/4", color=0x0000FF) embed2.add_field(name="Opening Crawl", value=film["opening_crawl"]) embed3 = discord.Embed(title=f"Film: {film['title']}; Page 3/4", color=0x0000FF) residents = [] for resident in film["characters"]: resident_num = int(resident.split(r"/")[-2]) response = await self.session.get(resident) resident = json.loads(await response.text()) residents.append(f"Name: {resident['name']}; ID: {str(resident_num)}") if len(residents) != 0: embed3.add_field(name="Characters:", value="\n".join(residents)) planets = [] for planet in film["planets"]: planet_num = int(planet.split(r"/")[-2]) response = await self.session.get(planet) planet = json.loads(await response.text()) planets.append(f"Name: {planet['name']}; ID: {str(planet_num)}") if len(planets) != 0: embed3.add_field(name="Planets:", value="\n".join(planets)) embed4 = discord.Embed(title=f"Film: {film['title']}; Page 4/4", color=0x0000FF) objects = [] for entry in film["starships"]: entry_num = int(entry.split(r"/")[-2]) response = await self.session.get(entry) entry = json.loads(await response.text()) objects.append(f"Name: {entry['name']}; ID: {str(entry_num)}") if len(objects) != 0: embed4.add_field(name="Starships:", value="\n".join(objects)) objects = [] for entry in film["vehicles"]: entry_num = int(entry.split(r"/")[-2]) response = await self.session.get(entry) entry = json.loads(await response.text()) objects.append(f"Name: {entry['name']}; ID: {str(entry_num)}") if len(objects) != 0: embed4.add_field(name="Vehicles:", value="\n".join(objects)) objects = [] for entry in film["species"]: entry_num = int(entry.split(r"/")[-2]) response = await self.session.get(entry) entry = json.loads(await response.text()) objects.append(f"Name: {entry['name']}; ID: {str(entry_num)}") if len(objects) != 0: embed4.add_field(name="Species:", value="\n".join(objects)) embeds = [embed, embed2, embed3, embed4] await menu(ctx, embeds, DEFAULT_CONTROLS) else: async with ctx.typing(): response = await self.session.get( r"https://swapi.dev/api/films/?search=" + str(film_id) ) if response.status == 404: return await ctx.send("Invalid Film ID.") film = json.loads(await response.text()) name = film["results"][0]["title"] embed = discord.Embed(title=f"Film: {name}; Page 1/4", color=0x0000FF) for key, value in film["results"][0].items(): if key in [ "name", "characters", "planets", "starships", "vehicles", "species", "created", "edited", "url", "opening_crawl", ]: continue value = value.title() if hasattr(value, "title") else value embed.add_field(name=key.replace("_", " ").title(), value=value) embed2 = discord.Embed(title=f"Film: {name}; Page 2/4", color=0x0000FF) embed2.add_field(name="Opening Crawl", value=film["results"][0]["opening_crawl"]) embed3 = discord.Embed(title=f"Film: {name}; Page 3/4", color=0x0000FF) residents = [] for resident in film["results"][0]["characters"]: resident_num = int(resident.split(r"/")[-2]) response = await self.session.get(resident) resident = json.loads(await response.text()) residents.append(f"Name: {resident['name']}; ID: {str(resident_num)}") if len(residents) != 0: embed3.add_field(name="Characters:", value="\n".join(residents)) planets = [] for planet in film["results"][0]["planets"]: planet_num = int(planet.split(r"/")[-2]) response = await self.session.get(planet) planet = json.loads(await response.text()) planets.append(f"Name: {planet['name']}; ID: {str(planet_num)}") if len(planets) != 0: embed3.add_field(name="Planets:", value="\n".join(planets)) embed4 = discord.Embed(title=f"Film: {name}; Page 4/4", color=0x0000FF) objects = [] for entry in film["results"][0]["starships"]: entry_num = int(entry.split(r"/")[-2]) response = await self.session.get(entry) entry = json.loads(await response.text()) objects.append(f"Name: {entry['name']}; ID: {str(entry_num)}") if len(objects) != 0: embed4.add_field(name="Starships:", value="\n".join(objects)) objects = [] for entry in film["results"][0]["vehicles"]: entry_num = int(entry.split(r"/")[-2]) response = await self.session.get(entry) entry = json.loads(await response.text()) objects.append(f"Name: {entry['name']}; ID: {str(entry_num)}") if len(objects) != 0: embed4.add_field(name="Vehicles:", value="\n".join(objects)) objects = [] for entry in film["results"][0]["species"]: entry_num = int(entry.split(r"/")[-2]) response = await self.session.get(entry) entry = json.loads(await response.text()) objects.append(f"Name: {entry['name']}; ID: {str(entry_num)}") if len(objects) != 0: embed4.add_field(name="Species:", value="\n".join(objects)) embeds = [embed, embed2, embed3, embed4] await menu(ctx, embeds, DEFAULT_CONTROLS) @starwars.command() async def starship(self, ctx, starship_id: Union[int, str]): """Gets the profile of a starship by its ID""" if isinstance(starship_id, int): async with ctx.typing(): response = await self.session.get( r"https://swapi.dev/api/starships/" + str(starship_id) ) if response.status == 404: return await ctx.send("Invalid Starship ID.") starship = json.loads(await response.text()) embed = discord.Embed( title=f"Starship: {starship['name']}", description=STARSHIPDESCRIPTIONS[starship["name"]], color=0x000000, ) embed.add_field(name="ID:", value=str(starship_id)) for key, value in starship.items(): if key in ["name", "films", "edited", "created", "url", "pilots"]: continue embed.add_field(name=key.replace("_", " ").title(), value=value.title()) embed.set_image(url=STARSHIPSIMAGES[starship["name"]]) objects = [] for entry in starship["films"]: entry_num = int(entry.split(r"/")[-2]) response = await self.session.get(entry) entry = json.loads(await response.text()) objects.append(f"Name: {entry['title']}; ID: {str(entry_num)}") if len(objects) != 0: embed.add_field(name="Films:", value="\n".join(objects)) objects = [] for entry in starship["pilots"]: entry_num = int(entry.split(r"/")[-2]) response = await self.session.get(entry) entry = json.loads(await response.text()) objects.append(f"Name: {entry['name']}; ID: {str(entry_num)}") if len(objects) != 0: embed.add_field(name="Pilots:", value="\n".join(objects)) await ctx.send(embed=embed) else: async with ctx.typing(): response = await self.session.get( r"https://swapi.dev/api/starships/?search=" + str(starship_id) ) if response.status == 404: return await ctx.send("Invalid Starship ID.") starship = json.loads(await response.text()) name = starship["results"][0]["name"] embed = discord.Embed( title=f"Starship: {name}", description=STARSHIPDESCRIPTIONS[name], color=0x000000, ) for key, value in starship["results"][0].items(): if key in ["name", "films", "edited", "created", "url", "pilots"]: continue embed.add_field(name=key.replace("_", " ").title(), value=value.title()) objects = [] embed.set_image(url=STARSHIPSIMAGES[name]) for entry in starship["results"][0]["films"]: entry_num = int(entry.split(r"/")[-2]) response = await self.session.get(entry) entry = json.loads(await response.text()) objects.append(f"Name: {entry['title']}; ID: {str(entry_num)}") if len(objects) != 0: embed.add_field(name="Films:", value="\n".join(objects)) objects = [] for entry in starship["results"][0]["pilots"]: entry_num = int(entry.split(r"/")[-2]) response = await self.session.get(entry) entry = json.loads(await response.text()) objects.append(f"Name: {entry['name']}; ID: {str(entry_num)}") if len(objects) != 0: embed.add_field(name="Pilots:", value="\n".join(objects)) await ctx.send(embed=embed) @starwars.command() async def vehicle(self, ctx, vehicle_id: Union[int, str]): """Gets the profile of a vehicle by its ID""" if isinstance(vehicle_id, int): async with ctx.typing(): response = await self.session.get( r"https://swapi.dev/api/vehicles/" + str(vehicle_id) ) if response.status == 404: return await ctx.send("Invalid Vehicle ID.") vehicle = json.loads(await response.text()) embed = discord.Embed( title=f"Vehicle: {vehicle['name']}", description=VEHICLEDESCRIPTION[vehicle["name"]], color=0x228B22, ) embed.add_field(name="ID:", value=str(vehicle_id)) for key, value in vehicle.items(): if key in ["name", "films", "edited", "created", "url", "pilots"]: continue embed.add_field(name=key.replace("_", " ").title(), value=value.title()) objects = [] embed.set_image(url=VEHICLEIMAGE[vehicle["name"]]) for entry in vehicle["films"]: entry_num = int(entry.split(r"/")[-2]) response = await self.session.get(entry) entry = json.loads(await response.text()) objects.append(f"Name: {entry['title']}; ID: {str(entry_num)}") if len(objects) != 0: embed.add_field(name="Films:", value="\n".join(objects)) objects = [] for entry in vehicle["pilots"]: entry_num = int(entry.split(r"/")[-2]) response = await self.session.get(entry) entry = json.loads(await response.text()) objects.append(f"Name: {entry['name']}; ID: {str(entry_num)}") if len(objects) != 0: embed.add_field(name="Pilots:", value="\n".join(objects)) await ctx.send(embed=embed) else: async with ctx.typing(): response = await self.session.get( r"https://swapi.dev/api/vehicles/?search=" + str(vehicle_id) ) if response.status == 404: return await ctx.send("Invalid Vehicle ID.") vehicle = json.loads(await response.text()) name = vehicle["results"][0]["name"] embed = discord.Embed( title=f"Vehicle: {name}", description=VEHICLEDESCRIPTION[name], color=0x228B22, ) for key, value in vehicle["results"][0].items(): if key in ["name", "films", "edited", "created", "url", "pilots"]: continue embed.add_field(name=key.replace("_", " ").title(), value=value.title()) objects = [] embed.set_image(url=VEHICLEIMAGE[name]) for entry in vehicle["results"][0]["films"]: entry_num = int(entry.split(r"/")[-2]) response = await self.session.get(entry) entry = json.loads(await response.text()) objects.append(f"Name: {entry['title']}; ID: {str(entry_num)}") if len(objects) != 0: embed.add_field(name="Films:", value="\n".join(objects)) objects = [] for entry in vehicle["results"][0]["pilots"]: entry_num = int(entry.split(r"/")[-2]) response = await self.session.get(entry) entry = json.loads(await response.text()) objects.append(f"Name: {entry['name']}; ID: {str(entry_num)}") if len(objects) != 0: embed.add_field(name="Pilots:", value="\n".join(objects)) await ctx.send(embed=embed) @starwars.command() async def species(self, ctx, species_id: Union[int, str]): """Gets the profile of a species by its ID""" if isinstance(species_id, int): async with ctx.typing(): response = await self.session.get( r"https://swapi.dev/api/species/" + str(species_id) ) if response.status == 404: return await ctx.send("Invalid Species ID.") species = json.loads(await response.text()) embed = discord.Embed( title=f"Species: {species['name']}", description=SPECIESDESCRIPTION[species["name"]], color=0xD2B48C, ) embed.add_field(name="ID:", value=str(species_id)) for key, value in species.items(): if key in [ "name", "homeworld", "films", "people", "edited", "created", "url", ]: continue embed.add_field(name=key.replace("_", " ").title(), value=value.title()) embed.set_thumbnail(url=SPECIESTHUMBNAIL[species["name"]]) homeworld_num = int(species["homeworld"].split(r"/")[-2]) homeworld = await self.session.get(species["homeworld"]) homeworld = json.loads(await homeworld.text()) embed.add_field( name="Homeworld", value=f"Name: {homeworld['name']}; ID: {str(homeworld_num)}", ) objects = [] for entry in species["films"]: entry_num = int(entry.split(r"/")[-2]) response = await self.session.get(entry) entry = json.loads(await response.text()) objects.append(f"Name: {entry['title']}; ID: {str(entry_num)}") if len(objects) != 0: embed.add_field(name="Films:", value="\n".join(objects)) objects = [] for entry in species["people"]: entry_num = int(entry.split(r"/")[-2]) response = await self.session.get(entry) entry = json.loads(await response.text()) objects.append(f"Name: {entry['name']}; ID: {str(entry_num)}") if len(objects) != 0: embed.add_field(name="People:", value="\n".join(objects)) await ctx.send(embed=embed) else: async with ctx.typing(): response = await self.session.get( r"https://swapi.dev/api/species/?search=" + str(species_id) ) if response.status == 404: return await ctx.send("Invalid Species ID.") species = json.loads(await response.text()) name = species["results"][0]["name"] embed = discord.Embed( title=f"Species: {name}", description=SPECIESDESCRIPTION[name], color=0xD2B48C, ) embed.add_field(name="ID:", value=str(species_id)) for key, value in species["results"][0].items(): if key in [ "name", "homeworld", "films", "people", "edited", "created", "url", ]: continue embed.add_field(name=key.replace("_", " ").title(), value=value.title()) embed.set_thumbnail(url=SPECIESTHUMBNAIL[name]) homeworld_num = int(species["results"][0]["homeworld"].split(r"/")[-2]) homeworld = await self.session.get(species["results"][0]["homeworld"]) homeworld = json.loads(await homeworld.text()) embed.add_field( name="Homeworld", value=f"Name: {homeworld['name']}; ID: {str(homeworld_num)}", ) objects = [] for entry in species["results"][0]["films"]: entry_num = int(entry.split(r"/")[-2]) response = await self.session.get(entry) entry = json.loads(await response.text()) objects.append(f"Name: {entry['title']}; ID: {str(entry_num)}") if len(objects) != 0: embed.add_field(name="Films:", value="\n".join(objects)) objects = [] for entry in species["results"][0]["people"]: entry_num = int(entry.split(r"/")[-2]) response = await self.session.get(entry) entry = json.loads(await response.text()) objects.append(f"Name: {entry['name']}; ID: {str(entry_num)}") if len(objects) != 0: embed.add_field(name="People:", value="\n".join(objects)) await ctx.send(embed=embed) @starwars.group(name="all") async def _all_group(self, ctx): """Get all people, planets, starships, vehicles, species or films of star wars""" pass @_all_group.command() async def people(self, ctx): """Grabs all people in the star wars API. This command does take a bit.""" async with ctx.typing(): data = [] query = "https://swapi.dev/api/people" while True: response = await self.session.get(query) text = json.loads(await response.text()) data_two = text["results"] data += data_two if bool(text["next"]): query = text["next"] else: break persons_list = [] for person in data: embed = discord.Embed(title=f"Person: {person['name']}", color=0x32CD32) num = int(person["url"].split(r"/")[-2]) embed.add_field(name="ID:", value=str(num)) for key, value in person.items(): if key in [ "name", "homeworld", "films", "species", "vehicles", "starships", "created", "edited", "url", ]: continue embed.add_field(name=key.replace("_", " ").title(), value=value.title()) homeworld_num = int(person["homeworld"].split(r"/")[-2]) embed.add_field(name="Homeworld", value=f"ID: {str(homeworld_num)}") persons_list.append(embed) persons_list.sort( key=lambda x: int( [field for field in x.to_dict()["fields"] if field["name"] == "ID:"][0][ "value" ] ) ) await menu(ctx, persons_list, DEFAULT_CONTROLS) @_all_group.command() async def planets(self, ctx): """Grabs all planets in the star wars API. This command does take a bit.""" async with ctx.typing(): data = [] query = "https://swapi.dev/api/planets" while True: response = await self.session.get(query) text = json.loads(await response.text()) data_two = text["results"] data += data_two if bool(text["next"]): query = text["next"] else: break planets_list = [] for planet in data: embed = discord.Embed(title=f"Planet: {planet['name']}", color=0x800080) num = int(planet["url"].split(r"/")[-2]) embed.add_field(name="ID:", value=str(num)) for key, value in planet.items(): if key in [ "name", "residents", "films", "edited", "created", "url", ]: continue embed.add_field(name=key.replace("_", " ").title(), value=value.title()) planets_list.append(embed) planets_list.sort( key=lambda x: int( [field for field in x.to_dict()["fields"] if field["name"] == "ID:"][0][ "value" ] ) ) await menu(ctx, planets_list, DEFAULT_CONTROLS) @_all_group.command() async def films(self, ctx): """Grabs all films in the star wars API. This command does take a bit.""" async with ctx.typing(): data = [] query = "https://swapi.dev/api/films" while True: response = await self.session.get(query) text = json.loads(await response.text()) data_two = text["results"] data += data_two if bool(text["next"]): query = text["next"] else: break films_list = [] for film in data: embed = discord.Embed(title=f"Film: {film['title']}", color=0xD2B48C) num = int(film["url"].split(r"/")[-2]) embed.add_field(name="ID:", value=str(num)) for key, value in film.items(): if key in [ "name", "characters", "planets", "starships", "vehicles", "species", "created", "edited", "url", "opening_crawl", ]: continue value = value.title() if hasattr(value, "title") else value embed.add_field(name=key.replace("_", " ").title(), value=value) films_list.append(embed) films_list.sort( key=lambda x: int( [field for field in x.to_dict()["fields"] if field["name"] == "ID:"][0][ "value" ] ) ) await menu(ctx, films_list, DEFAULT_CONTROLS) @_all_group.command() async def starships(self, ctx): """Grabs all starships in the star wars API. This command does take a bit.""" async with ctx.typing(): data = [] query = "https://swapi.dev/api/starships" while True: response = await self.session.get(query) text = json.loads(await response.text()) data_two = text["results"] data += data_two if bool(text["next"]): query = text["next"] else: break starships_list = [] for starship in data: embed = discord.Embed(title=f"Starship: {starship['name']}", color=0x000000) num = int(starship["url"].split(r"/")[-2]) embed.add_field(name="ID:", value=str(num)) for key, value in starship.items(): if key in ["name", "films", "edited", "created", "url", "pilots"]: continue value = value.title() if hasattr(value, "title") else value embed.add_field(name=key.replace("_", " ").title(), value=value) starships_list.append(embed) starships_list.sort( key=lambda x: int( [field for field in x.to_dict()["fields"] if field["name"] == "ID:"][0][ "value" ] ) ) await menu(ctx, starships_list, DEFAULT_CONTROLS) @_all_group.command() async def vehicles(self, ctx): """Grabs all vehicles in the star wars API. This command does take a bit.""" async with ctx.typing(): data = [] query = "https://swapi.dev/api/vehicles" while True: response = await self.session.get(query) text = json.loads(await response.text()) data_two = text["results"] data += data_two if bool(text["next"]): query = text["next"] else: break vehicles_list = [] for vehicle in data: embed = discord.Embed(title=f"Vehicle: {vehicle['name']}", color=0x228B22) num = int(vehicle["url"].split(r"/")[-2]) embed.add_field(name="ID:", value=str(num)) for key, value in vehicle.items(): if key in ["name", "films", "edited", "created", "url", "pilots"]: continue value = value.title() if hasattr(value, "title") else value embed.add_field(name=key.replace("_", " ").title(), value=value) vehicles_list.append(embed) vehicles_list.sort( key=lambda x: int( [field for field in x.to_dict()["fields"] if field["name"] == "ID:"][0][ "value" ] ) ) await menu(ctx, vehicles_list, DEFAULT_CONTROLS) @_all_group.command(name="species") async def _all_species(self, ctx): """Grabs all vehicles in the star wars API. This command does take a bit.""" async with ctx.typing(): data = [] query = "https://swapi.dev/api/species" while True: response = await self.session.get(query) text = json.loads(await response.text()) data_two = text["results"] data += data_two if bool(text["next"]): query = text["next"] else: break species_list = [] for species in data: embed = discord.Embed(title=f"Species: {species['name']}", color=0xD2B48C) num = int(species["url"].split(r"/")[-2]) embed.add_field(name="ID:", value=str(num)) for key, value in species.items(): if key in [ "name", "homeworld", "films", "people", "edited", "created", "url", ]: continue value = value.title() if hasattr(value, "title") else value embed.add_field(name=key.replace("_", " ").title(), value=value) species_list.append(embed) species_list.sort( key=lambda x: int( [field for field in x.to_dict()["fields"] if field["name"] == "ID:"][0][ "value" ] ) ) await menu(ctx, species_list, DEFAULT_CONTROLS)
[ "discord.Embed", "redbot.core.checks.bot_has_permissions", "redbot.core.commands.group", "aiohttp.ClientSession", "redbot.core.utils.menus.menu" ]
[((1969, 2013), 'redbot.core.checks.bot_has_permissions', 'checks.bot_has_permissions', ([], {'embed_links': '(True)'}), '(embed_links=True)\n', (1995, 2013), False, 'from redbot.core import checks, commands\n'), ((2020, 2070), 'redbot.core.commands.group', 'commands.group', ([], {'name': '"""swapi"""', 'aliases': "['starwars']"}), "(name='swapi', aliases=['starwars'])\n", (2034, 2070), False, 'from redbot.core import checks, commands\n'), ((1701, 1724), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (1722, 1724), False, 'import aiohttp\n'), ((38219, 38260), 'redbot.core.utils.menus.menu', 'menu', (['ctx', 'persons_list', 'DEFAULT_CONTROLS'], {}), '(ctx, persons_list, DEFAULT_CONTROLS)\n', (38223, 38260), False, 'from redbot.core.utils.menus import DEFAULT_CONTROLS, menu\n'), ((39910, 39951), 'redbot.core.utils.menus.menu', 'menu', (['ctx', 'planets_list', 'DEFAULT_CONTROLS'], {}), '(ctx, planets_list, DEFAULT_CONTROLS)\n', (39914, 39951), False, 'from redbot.core.utils.menus import DEFAULT_CONTROLS, menu\n'), ((41809, 41848), 'redbot.core.utils.menus.menu', 'menu', (['ctx', 'films_list', 'DEFAULT_CONTROLS'], {}), '(ctx, films_list, DEFAULT_CONTROLS)\n', (41813, 41848), False, 'from redbot.core.utils.menus import DEFAULT_CONTROLS, menu\n'), ((43416, 43459), 'redbot.core.utils.menus.menu', 'menu', (['ctx', 'starships_list', 'DEFAULT_CONTROLS'], {}), '(ctx, starships_list, DEFAULT_CONTROLS)\n', (43420, 43459), False, 'from redbot.core.utils.menus import DEFAULT_CONTROLS, menu\n'), ((45016, 45058), 'redbot.core.utils.menus.menu', 'menu', (['ctx', 'vehicles_list', 'DEFAULT_CONTROLS'], {}), '(ctx, vehicles_list, DEFAULT_CONTROLS)\n', (45020, 45058), False, 'from redbot.core.utils.menus import DEFAULT_CONTROLS, menu\n'), ((46841, 46882), 'redbot.core.utils.menus.menu', 'menu', (['ctx', 'species_list', 'DEFAULT_CONTROLS'], {}), '(ctx, species_list, DEFAULT_CONTROLS)\n', (46845, 46882), False, 'from redbot.core.utils.menus import DEFAULT_CONTROLS, menu\n'), ((2748, 2862), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Person: {person[\'name\']}"""', 'description': "HUMANDESCRIPTION[person['name']]", 'color': '(3329330)'}), '(title=f"Person: {person[\'name\']}", description=\n HUMANDESCRIPTION[person[\'name\']], color=3329330)\n', (2761, 2862), False, 'import discord\n'), ((6441, 6534), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Person: {name}"""', 'description': 'HUMANDESCRIPTION[name]', 'color': '(3329330)'}), "(title=f'Person: {name}', description=HUMANDESCRIPTION[name],\n color=3329330)\n", (6454, 6534), False, 'import discord\n'), ((10217, 10332), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Planet: {planet[\'name\']}"""', 'description': "PLANETDESCRIPTION[planet['name']]", 'color': '(8388736)'}), '(title=f"Planet: {planet[\'name\']}", description=\n PLANETDESCRIPTION[planet[\'name\']], color=8388736)\n', (10230, 10332), False, 'import discord\n'), ((12505, 12599), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Planet: {name}"""', 'description': 'PLANETDESCRIPTION[name]', 'color': '(8388736)'}), "(title=f'Planet: {name}', description=PLANETDESCRIPTION[name],\n color=8388736)\n", (12518, 12599), False, 'import discord\n'), ((14739, 14805), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Film: {film[\'title\']}; Page 1/4"""', 'color': '(255)'}), '(title=f"Film: {film[\'title\']}; Page 1/4", color=255)\n', (14752, 14805), False, 'import discord\n'), ((15573, 15639), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Film: {film[\'title\']}; Page 2/4"""', 'color': '(255)'}), '(title=f"Film: {film[\'title\']}; Page 2/4", color=255)\n', (15586, 15639), False, 'import discord\n'), ((15756, 15822), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Film: {film[\'title\']}; Page 3/4"""', 'color': '(255)'}), '(title=f"Film: {film[\'title\']}; Page 3/4", color=255)\n', (15769, 15822), False, 'import discord\n'), ((16828, 16894), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Film: {film[\'title\']}; Page 4/4"""', 'color': '(255)'}), '(title=f"Film: {film[\'title\']}; Page 4/4", color=255)\n', (16841, 16894), False, 'import discord\n'), ((18863, 18920), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Film: {name}; Page 1/4"""', 'color': '(255)'}), "(title=f'Film: {name}; Page 1/4', color=255)\n", (18876, 18920), False, 'import discord\n'), ((19637, 19694), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Film: {name}; Page 2/4"""', 'color': '(255)'}), "(title=f'Film: {name}; Page 2/4', color=255)\n", (19650, 19694), False, 'import discord\n'), ((19825, 19882), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Film: {name}; Page 3/4"""', 'color': '(255)'}), "(title=f'Film: {name}; Page 3/4', color=255)\n", (19838, 19882), False, 'import discord\n'), ((20916, 20973), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Film: {name}; Page 4/4"""', 'color': '(255)'}), "(title=f'Film: {name}; Page 4/4', color=255)\n", (20929, 20973), False, 'import discord\n'), ((23116, 23234), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Starship: {starship[\'name\']}"""', 'description': "STARSHIPDESCRIPTIONS[starship['name']]", 'color': '(0)'}), '(title=f"Starship: {starship[\'name\']}", description=\n STARSHIPDESCRIPTIONS[starship[\'name\']], color=0)\n', (23129, 23234), False, 'import discord\n'), ((25169, 25263), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Starship: {name}"""', 'description': 'STARSHIPDESCRIPTIONS[name]', 'color': '(0)'}), "(title=f'Starship: {name}', description=STARSHIPDESCRIPTIONS[\n name], color=0)\n", (25182, 25263), False, 'import discord\n'), ((27264, 27383), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Vehicle: {vehicle[\'name\']}"""', 'description': "VEHICLEDESCRIPTION[vehicle['name']]", 'color': '(2263842)'}), '(title=f"Vehicle: {vehicle[\'name\']}", description=\n VEHICLEDESCRIPTION[vehicle[\'name\']], color=2263842)\n', (27277, 27383), False, 'import discord\n'), ((29299, 29396), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Vehicle: {name}"""', 'description': 'VEHICLEDESCRIPTION[name]', 'color': '(2263842)'}), "(title=f'Vehicle: {name}', description=VEHICLEDESCRIPTION[name\n ], color=2263842)\n", (29312, 29396), False, 'import discord\n'), ((31342, 31462), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Species: {species[\'name\']}"""', 'description': "SPECIESDESCRIPTION[species['name']]", 'color': '(13808780)'}), '(title=f"Species: {species[\'name\']}", description=\n SPECIESDESCRIPTION[species[\'name\']], color=13808780)\n', (31355, 31462), False, 'import discord\n'), ((33963, 34061), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Species: {name}"""', 'description': 'SPECIESDESCRIPTION[name]', 'color': '(13808780)'}), "(title=f'Species: {name}', description=SPECIESDESCRIPTION[name\n ], color=13808780)\n", (33976, 34061), False, 'import discord\n'), ((37009, 37072), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Person: {person[\'name\']}"""', 'color': '(3329330)'}), '(title=f"Person: {person[\'name\']}", color=3329330)\n', (37022, 37072), False, 'import discord\n'), ((38971, 39034), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Planet: {planet[\'name\']}"""', 'color': '(8388736)'}), '(title=f"Planet: {planet[\'name\']}", color=8388736)\n', (38984, 39034), False, 'import discord\n'), ((40652, 40713), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Film: {film[\'title\']}"""', 'color': '(13808780)'}), '(title=f"Film: {film[\'title\']}", color=13808780)\n', (40665, 40713), False, 'import discord\n'), ((42569, 42630), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Starship: {starship[\'name\']}"""', 'color': '(0)'}), '(title=f"Starship: {starship[\'name\']}", color=0)\n', (42582, 42630), False, 'import discord\n'), ((44175, 44240), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Vehicle: {vehicle[\'name\']}"""', 'color': '(2263842)'}), '(title=f"Vehicle: {vehicle[\'name\']}", color=2263842)\n', (44188, 44240), False, 'import discord\n'), ((45790, 45856), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""Species: {species[\'name\']}"""', 'color': '(13808780)'}), '(title=f"Species: {species[\'name\']}", color=13808780)\n', (45803, 45856), False, 'import discord\n'), ((18385, 18420), 'redbot.core.utils.menus.menu', 'menu', (['ctx', 'embeds', 'DEFAULT_CONTROLS'], {}), '(ctx, embeds, DEFAULT_CONTROLS)\n', (18389, 18420), False, 'from redbot.core.utils.menus import DEFAULT_CONTROLS, menu\n'), ((22506, 22541), 'redbot.core.utils.menus.menu', 'menu', (['ctx', 'embeds', 'DEFAULT_CONTROLS'], {}), '(ctx, embeds, DEFAULT_CONTROLS)\n', (22510, 22541), False, 'from redbot.core.utils.menus import DEFAULT_CONTROLS, menu\n')]
# -*- coding: utf-8 -*- """ The :mod:`parsimony.algorithms.bases` module includes several base classes for using and creating algorithms. Algorithms may not store states. I.e., if they are classes, do not keep references to objects with state in the algorithm objects. It should be possible to copy and share algorithms between e.g. estimators, and thus they should not depend on any state. Created on Thu Feb 20 17:42:16 2014 Copyright (c) 2013-2017, CEA/DSV/I2BM/Neurospin. All rights reserved. @author: <NAME>, <NAME> @email: <EMAIL>, <EMAIL> @license: BSD 3-clause. """ from six import with_metaclass import abc import functools import parsimony.utils.consts as consts import parsimony.functions.properties as properties __all__ = ["BaseAlgorithm", "check_compatibility", "ImplicitAlgorithm", "ExplicitAlgorithm", "IterativeAlgorithm", "InformationAlgorithm", "KernelAlgorithm"] class BaseAlgorithm(with_metaclass(abc.ABCMeta, object)): @staticmethod def check_compatibility(function, required_properties): """Check if the function considered implements the given properties. """ if not isinstance(function, (list, tuple)): function = [function] for f in function: for prop in required_properties: if isinstance(prop, properties.OR): if not prop.evaluate(f): raise ValueError("%s does not implement all " "properties %s" % (str(f), str(prop))) elif not isinstance(f, prop): raise ValueError("%s does not implement interface %s" % (str(f), str(prop))) def set_params(self, **kwargs): for k in kwargs: self.__setattr__(k, kwargs[k]) def get_params(self): raise NotImplementedError('Method "get_params" has not been ' 'implemented.') def reset(self): """Resets the algorithm so that it is as if just created. Override in order to reset more things, but remember to call the base class' reset() method. """ # TODO: Keep this list up to date! if isinstance(self, IterativeAlgorithm): self.iter_reset() if isinstance(self, InformationAlgorithm): self.info_reset() # TODO: Replace the one in BaseAlgorithm. def check_compatibility(f): """Automatically checks if a function implements a given set of properties. """ @functools.wraps(f) def wrapper(self, function, *args, **kwargs): BaseAlgorithm.check_compatibility(function, self.INTERFACES) return f(self, function, *args, **kwargs) return wrapper def force_reset(f): """Decorate run with this method to force a reset of your algorithm. Automatically resets an algorithm by checking the implementing classes and calling the appropriate reset methods. """ @functools.wraps(f) def wrapper(self, function, *args, **kwargs): # Add more subclasses here if necessary. if isinstance(self, IterativeAlgorithm): self.iter_reset() if isinstance(self, InformationAlgorithm): self.info_reset() return f(self, function, *args, **kwargs) return wrapper class ImplicitAlgorithm(with_metaclass(abc.ABCMeta, BaseAlgorithm)): """Implicit algorithms are algorithms that do not utilise a loss function. Implicit algorithms instead minimise or maximise some underlying function implicitly, usually from the data. Parameters ---------- X : numpy.ndarray or list of numpy.ndarray One or more data matrices. """ @abc.abstractmethod def run(X, **kwargs): raise NotImplementedError('Abstract method "run" must be ' 'specialised!') class ExplicitAlgorithm(with_metaclass(abc.ABCMeta, BaseAlgorithm)): """Explicit algorithms are algorithms that minimises a given function. The function is explicitly minimised from properties of said function. Implementing classes should update the INTERFACES class variable with the properties that function must implement. Defaults to a list with one element, the Function. """ INTERFACES = [properties.Function] @abc.abstractmethod def run(function, x, **kwargs): """This function obtains a minimiser of a give function. Parameters ---------- function : parsimony.functions.properties.Function The function to minimise. x : numpy.ndarray or list of numpy.ndarray A starting point. """ raise NotImplementedError('Abstract method "run" must be ' 'specialised!') class IterativeAlgorithm(object): """Algorithms that require iterative steps to achieve the goal. Parameters ---------- max_iter : int, optional A non-negative integer. The maximum number of allowed iterations. Default is consts.MAX_ITER. min_iter : int, optional A non-negative integer. The minimum number of required iterations. Default is 1. callback : Callable, optional A callable that accepts a dictionary with parameters and their values. Usually callback will be called with the output of locals() at each iteration of the algorithm. Fields ------ max_iter : int Non-negative integer. The maximum number of allowed iterations. min_iter : int Non-negative integer less than or equal to max_iter. The minimum number of iterations that must be performed. Default is 1. num_iter : int Non-negative integer greater than or equal to min_iter. The number of iterations performed by the iterative algorithm. All algorithms that inherit from IterativeAlgortihm MUST call iter_reset before every run. """ def __init__(self, max_iter=consts.MAX_ITER, min_iter=1, callback=None, **kwargs): super(IterativeAlgorithm, self).__init__(**kwargs) self.min_iter = max(0, int(min_iter)) self.max_iter = max(self.min_iter, int(max_iter)) if (callback is None) or hasattr(callback, "__call__"): self.callback = callback else: raise ValueError("The callback must be callable, or None.") self.num_iter = 0 self.iter_reset() def iter_reset(self): self.num_iter = 0 class InformationAlgorithm(object): """Algorithms that produce information about their run. Implementing classes should update the INFO_PROVIDED class variable with the information provided by the algorithm. Defauls to an empty list. ALL algorithms that inherit from InformationAlgorithm MUST add force_reset as a decorator to the run method. Parameters ---------- info : list or tuple of utils.Info, optional The identifiers for the run information to return. Default is an empty list. Fields ------ info_ret : dict The algorithm outputs are collected in this dictionary. info : list of utils.Info The identifiers for the requested information outputs. The algorithms will store the requested outputs in self.info. INFO_PROVIDED : list of utils.Info The allowed output identifiers. The implementing class should update this list with the provided/allowed outputs. Examples -------- >>> import numpy as np >>> import parsimony.algorithms as algorithms >>> from parsimony.algorithms.utils import Info >>> from parsimony.functions.losses import LinearRegression >>> np.random.seed(42) >>> >>> gd = algorithms.gradient.GradientDescent(info=[Info.fvalue]) >>> gd.info_copy() ['fvalue'] >>> lr = LinearRegression(X=np.random.rand(10,15), y=np.random.rand(10,1)) >>> beta = gd.run(lr, np.random.rand(15, 1)) >>> fvalue = gd.info_get(Info.fvalue) >>> fvalue[0] # doctest: +ELLIPSIS 0.06851092... >>> fvalue[-1] # doctest: +ELLIPSIS 1.88...e-12 """ INFO_PROVIDED = [] def __init__(self, info=[], **kwargs): super(InformationAlgorithm, self).__init__(**kwargs) if not isinstance(info, (list, tuple)): self.info = [info] else: self.info = list(info) self.info_ret = dict() self.check_info_compatibility(self.info) def info_get(self, nfo=None): """Returns the computed information about the algorithm run. Parameters ---------- nfo : utils.Info The identifier to return information about. If nfo is None, all information is returned in a dictionary. """ if nfo is None: return self.info_ret else: return self.info_ret[nfo] def info_set(self, nfo, value): """Sets the computed information about the algorithm run identified by nfo. Parameters ---------- nfo : utils.Info The identifier to for the computed information about. value : object The value to associate with nfo. """ self.info_ret[nfo] = value def info_provided(self, nfo): """Returns true if the current algorithm provides the given information, and False otherwise. """ return nfo in self.INFO_PROVIDED def info_requested(self, nfo): """Returns true if the the given information was requested, and False otherwise. """ return nfo in self.info def info_add_request(self, nfo): """Add a request to the algorithm's list of requested info. """ return self.info.append(nfo) def info_reset(self): """Resets the information saved in the previous run. The info_ret field, a dictionary, is cleared. """ self.info_ret.clear() def info_copy(self): """Returns a shallow copy of the requested information. """ return list(self.info) def check_info_compatibility(self, info): """Check if the requested information is provided. Parameters ---------- info : list of utils.Info The identifiers for information that should be computed. """ for i in info: if not self.info_provided(i): raise ValueError("Requested information (%s) not provided." % (str(i),)) class KernelAlgorithm(object): """Algorithms that use Mercer kernels. Implementing classes should have a field kernel and supply a get_kernel method. Fields ------ kernel_get : kernel object, optional Returns the kernel. Default is a linear kernel. Examples -------- >>> import parsimony.algorithms.algorithms as algorithms >>> import parsimony.algorithms.utils as utils >>> >>> K = utils.LinearKernel() >>> smo = algorithms.SequentialMinimalOptimization(1.0, kernel=K) >>> # smo.kernel_get() """ def __init__(self, kernel=None, **kwargs): """ Parameters ---------- kernel : kernel object, optional The kernel to use. Default is a linear kernel. """ super(KernelAlgorithm, self).__init__(**kwargs) self.kernel = kernel def kernel_get(self): """Returns the kernel. """ return self.kernel if __name__ == "__main__": import doctest doctest.testmod()
[ "doctest.testmod", "functools.wraps", "six.with_metaclass" ]
[((936, 971), 'six.with_metaclass', 'with_metaclass', (['abc.ABCMeta', 'object'], {}), '(abc.ABCMeta, object)\n', (950, 971), False, 'from six import with_metaclass\n'), ((3376, 3418), 'six.with_metaclass', 'with_metaclass', (['abc.ABCMeta', 'BaseAlgorithm'], {}), '(abc.ABCMeta, BaseAlgorithm)\n', (3390, 3418), False, 'from six import with_metaclass\n'), ((3931, 3973), 'six.with_metaclass', 'with_metaclass', (['abc.ABCMeta', 'BaseAlgorithm'], {}), '(abc.ABCMeta, BaseAlgorithm)\n', (3945, 3973), False, 'from six import with_metaclass\n'), ((2559, 2577), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (2574, 2577), False, 'import functools\n'), ((3000, 3018), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (3015, 3018), False, 'import functools\n'), ((11627, 11644), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (11642, 11644), False, 'import doctest\n')]
#!/usr/bin/env python import rospy from mongodb_store.message_store import MessageStoreProxy from bayes_people_tracker.msg import PeopleTracker from topological_logging_manager.msg import LoggingManager from bayes_people_tracker_logging.msg import PeopleTrackerLogging from geometry_msgs.msg import Pose, PoseStamped import message_filters import tf from std_msgs.msg import String class SaveLocations(): def __init__(self): rospy.logdebug("Intialising logging") self.robot_pose = Pose() self.current_node = "none" self.current_edge = "none" self.closest_node = "none" self.tfl = tf.TransformListener() self.dataset_name = "tracks" self.target_frame = "/map" self.msg_store = MessageStoreProxy(collection="people_perception") manager_topic = rospy.get_param("~manager_topic", "") rospy.Subscriber( "/robot_pose", Pose, callback=self.pose_callback, queue_size=10 ) rospy.Subscriber( "/current_node", String, callback=self.node_callback, queue_size=10 ) rospy.Subscriber( "/current_edge", String, callback=self.edge_callback, queue_size=10 ) rospy.Subscriber( "/closest_node", String, callback=self.closest_callback, queue_size=10 ) subs = [ message_filters.Subscriber( "/people_tracker/positions", PeopleTracker ) ] if not manager_topic == '': subs += [message_filters.Subscriber(manager_topic, LoggingManager)] ts = message_filters.ApproximateTimeSynchronizer( subs, 10, 0.5 ) ts.registerCallback(self.people_callback) def transform(self, pose, target_frame): try: self.tfl.waitForTransform( target_frame, pose.header.frame_id, pose.header.stamp, rospy.Duration(3.0) ) return self.tfl.transformPose(target_frame=target_frame, ps=pose) except ( tf.Exception, tf.ConnectivityException, tf.LookupException, tf.ExtrapolationException ) as e: rospy.logwarn(e) return None return None def people_callback(self, pl, *mgr): if len(mgr) and not mgr[0].log: return if not len(pl.distances): return meta = {} meta["people"] = self.dataset_name rospy.logdebug( "Person detected. " "Logging to people_perception collection." ) insert = PeopleTrackerLogging() insert.header = pl.header insert.uuids = pl.uuids for p in pl.poses: tp = self.transform(PoseStamped(header=pl.header, pose=p), self.target_frame) if tp: insert.people.append(tp) insert.robot = self.robot_pose insert.people_tracker = pl insert.closest_node = self.closest_node insert.current_edge = self.current_edge insert.current_node = self.current_node self.msg_store.insert(insert, meta) def pose_callback(self, pose): self.robot_pose = pose def node_callback(self, msg): self.current_node = msg.data def edge_callback(self, msg): self.current_edge = msg.data def closest_callback(self, msg): self.closest_node = msg.data if __name__ == '__main__': rospy.init_node('save_people_locations') sl = SaveLocations() rospy.spin()
[ "rospy.logwarn", "geometry_msgs.msg.PoseStamped", "mongodb_store.message_store.MessageStoreProxy", "rospy.Subscriber", "message_filters.ApproximateTimeSynchronizer", "bayes_people_tracker_logging.msg.PeopleTrackerLogging", "rospy.get_param", "rospy.logdebug", "rospy.init_node", "message_filters.Subscriber", "rospy.spin", "geometry_msgs.msg.Pose", "tf.TransformListener", "rospy.Duration" ]
[((3684, 3724), 'rospy.init_node', 'rospy.init_node', (['"""save_people_locations"""'], {}), "('save_people_locations')\n", (3699, 3724), False, 'import rospy\n'), ((3754, 3766), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (3764, 3766), False, 'import rospy\n'), ((440, 477), 'rospy.logdebug', 'rospy.logdebug', (['"""Intialising logging"""'], {}), "('Intialising logging')\n", (454, 477), False, 'import rospy\n'), ((504, 510), 'geometry_msgs.msg.Pose', 'Pose', ([], {}), '()\n', (508, 510), False, 'from geometry_msgs.msg import Pose, PoseStamped\n'), ((635, 657), 'tf.TransformListener', 'tf.TransformListener', ([], {}), '()\n', (655, 657), False, 'import tf\n'), ((755, 804), 'mongodb_store.message_store.MessageStoreProxy', 'MessageStoreProxy', ([], {'collection': '"""people_perception"""'}), "(collection='people_perception')\n", (772, 804), False, 'from mongodb_store.message_store import MessageStoreProxy\n'), ((830, 867), 'rospy.get_param', 'rospy.get_param', (['"""~manager_topic"""', '""""""'], {}), "('~manager_topic', '')\n", (845, 867), False, 'import rospy\n'), ((877, 962), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/robot_pose"""', 'Pose'], {'callback': 'self.pose_callback', 'queue_size': '(10)'}), "('/robot_pose', Pose, callback=self.pose_callback,\n queue_size=10)\n", (893, 962), False, 'import rospy\n'), ((1025, 1114), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/current_node"""', 'String'], {'callback': 'self.node_callback', 'queue_size': '(10)'}), "('/current_node', String, callback=self.node_callback,\n queue_size=10)\n", (1041, 1114), False, 'import rospy\n'), ((1177, 1266), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/current_edge"""', 'String'], {'callback': 'self.edge_callback', 'queue_size': '(10)'}), "('/current_edge', String, callback=self.edge_callback,\n queue_size=10)\n", (1193, 1266), False, 'import rospy\n'), ((1329, 1421), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/closest_node"""', 'String'], {'callback': 'self.closest_callback', 'queue_size': '(10)'}), "('/closest_node', String, callback=self.closest_callback,\n queue_size=10)\n", (1345, 1421), False, 'import rospy\n'), ((1758, 1816), 'message_filters.ApproximateTimeSynchronizer', 'message_filters.ApproximateTimeSynchronizer', (['subs', '(10)', '(0.5)'], {}), '(subs, 10, 0.5)\n', (1801, 1816), False, 'import message_filters\n'), ((2708, 2783), 'rospy.logdebug', 'rospy.logdebug', (['"""Person detected. Logging to people_perception collection."""'], {}), "('Person detected. Logging to people_perception collection.')\n", (2722, 2783), False, 'import rospy\n'), ((2838, 2860), 'bayes_people_tracker_logging.msg.PeopleTrackerLogging', 'PeopleTrackerLogging', ([], {}), '()\n', (2858, 2860), False, 'from bayes_people_tracker_logging.msg import PeopleTrackerLogging\n'), ((1506, 1576), 'message_filters.Subscriber', 'message_filters.Subscriber', (['"""/people_tracker/positions"""', 'PeopleTracker'], {}), "('/people_tracker/positions', PeopleTracker)\n", (1532, 1576), False, 'import message_filters\n'), ((1686, 1743), 'message_filters.Subscriber', 'message_filters.Subscriber', (['manager_topic', 'LoggingManager'], {}), '(manager_topic, LoggingManager)\n', (1712, 1743), False, 'import message_filters\n'), ((2130, 2149), 'rospy.Duration', 'rospy.Duration', (['(3.0)'], {}), '(3.0)\n', (2144, 2149), False, 'import rospy\n'), ((2421, 2437), 'rospy.logwarn', 'rospy.logwarn', (['e'], {}), '(e)\n', (2434, 2437), False, 'import rospy\n'), ((2986, 3023), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {'header': 'pl.header', 'pose': 'p'}), '(header=pl.header, pose=p)\n', (2997, 3023), False, 'from geometry_msgs.msg import Pose, PoseStamped\n')]
from libcloud.container.types import Provider from libcloud.container.providers import get_driver cls = get_driver(Provider.JOYENT) conn = cls(host='us-east-1.docker.joyent.com', port=2376, key_file='key.pem', cert_file='~/.sdc/docker/admin/ca.pem') conn.list_images()
[ "libcloud.container.providers.get_driver" ]
[((105, 132), 'libcloud.container.providers.get_driver', 'get_driver', (['Provider.JOYENT'], {}), '(Provider.JOYENT)\n', (115, 132), False, 'from libcloud.container.providers import get_driver\n')]
# -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-03-14 08:58 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import modelcluster.fields class Migration(migrations.Migration): dependencies = [ ('v1', '0148_relatedresource'), ('ask_cfpb', '0021_rssfeed_improvements'), ] operations = [ migrations.DeleteModel( name='EnglishAnswerProxy', ), migrations.DeleteModel( name='SpanishAnswerProxy', ), migrations.AddField( model_name='answerpage', name='answer_id', field=models.IntegerField(default=0), ), migrations.AddField( model_name='answerpage', name='category', field=models.ManyToManyField(blank=True, help_text='Categorize this answer. Avoid putting into more than one category.', to='ask_cfpb.Category'), ), migrations.AddField( model_name='answerpage', name='featured', field=models.BooleanField(default=False, help_text='Check to make this one of two featured answers on the landing page.'), ), migrations.AddField( model_name='answerpage', name='featured_rank', field=models.IntegerField(blank=True, null=True), ), migrations.AddField( model_name='answerpage', name='redirect_to_page', field=models.ForeignKey(blank=True, help_text='Choose another AnswerPage to redirect this page to', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='redirect_to_pages', to='ask_cfpb.AnswerPage'), ), migrations.AddField( model_name='answerpage', name='related_questions', field=modelcluster.fields.ParentalManyToManyField(blank=True, help_text='Maximum of 3', related_name='related_question', to='ask_cfpb.AnswerPage'), ), migrations.AddField( model_name='answerpage', name='related_resource', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='v1.RelatedResource'), ), migrations.AddField( model_name='answerpage', name='subcategory', field=models.ManyToManyField(blank=True, help_text='Choose only subcategories that belong to one of the categories checked above.', to='ask_cfpb.SubCategory'), ), migrations.AlterField( model_name='answerpage', name='search_tags', field=models.CharField(blank=True, help_text='Search words or phrases, separated by commas', max_length=1000), ), ]
[ "django.db.models.ManyToManyField", "django.db.models.ForeignKey", "django.db.models.CharField", "django.db.models.BooleanField", "django.db.migrations.DeleteModel", "django.db.models.IntegerField" ]
[((404, 453), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""EnglishAnswerProxy"""'}), "(name='EnglishAnswerProxy')\n", (426, 453), False, 'from django.db import migrations, models\n'), ((486, 535), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""SpanishAnswerProxy"""'}), "(name='SpanishAnswerProxy')\n", (508, 535), False, 'from django.db import migrations, models\n'), ((674, 704), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (693, 704), False, 'from django.db import migrations, models\n'), ((830, 977), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'help_text': '"""Categorize this answer. Avoid putting into more than one category."""', 'to': '"""ask_cfpb.Category"""'}), "(blank=True, help_text=\n 'Categorize this answer. Avoid putting into more than one category.',\n to='ask_cfpb.Category')\n", (852, 977), False, 'from django.db import migrations, models\n'), ((1094, 1214), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Check to make this one of two featured answers on the landing page."""'}), "(default=False, help_text=\n 'Check to make this one of two featured answers on the landing page.')\n", (1113, 1214), False, 'from django.db import migrations, models\n'), ((1340, 1382), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1359, 1382), False, 'from django.db import migrations, models\n'), ((1516, 1740), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'help_text': '"""Choose another AnswerPage to redirect this page to"""', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""redirect_to_pages"""', 'to': '"""ask_cfpb.AnswerPage"""'}), "(blank=True, help_text=\n 'Choose another AnswerPage to redirect this page to', null=True,\n on_delete=django.db.models.deletion.SET_NULL, related_name=\n 'redirect_to_pages', to='ask_cfpb.AnswerPage')\n", (1533, 1740), False, 'from django.db import migrations, models\n'), ((2135, 2251), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': '"""v1.RelatedResource"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, to='v1.RelatedResource')\n", (2152, 2251), False, 'from django.db import migrations, models\n'), ((2375, 2537), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'help_text': '"""Choose only subcategories that belong to one of the categories checked above."""', 'to': '"""ask_cfpb.SubCategory"""'}), "(blank=True, help_text=\n 'Choose only subcategories that belong to one of the categories checked above.'\n , to='ask_cfpb.SubCategory')\n", (2397, 2537), False, 'from django.db import migrations, models\n'), ((2658, 2766), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Search words or phrases, separated by commas"""', 'max_length': '(1000)'}), "(blank=True, help_text=\n 'Search words or phrases, separated by commas', max_length=1000)\n", (2674, 2766), False, 'from django.db import migrations, models\n')]
# -------------- # import the libraries import numpy as np import pandas as pd import seaborn as sns from sklearn.model_selection import train_test_split import warnings warnings.filterwarnings('ignore') # Code starts here # load the file using pandas df = pd.read_csv(path) # observe the top rows of the dataset print(df.head()) # split the data into features and labels X = df.drop(columns=['insuranceclaim'], axis=1) y = df['insuranceclaim'] # split into train and test set X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=6, test_size=0.2) #print the shapes of train and test set print(X_train.shape, X_test.shape, y_train.shape, y_test.shape) # Code ends here # -------------- import matplotlib.pyplot as plt # Code starts here # plotting box plot for train features sns.boxplot(y='bmi', data=X_train) #Set quantile equal to 0.95for X_train['bmi' q_value = X_train['bmi'].quantile(q=0.95) #Check the value counts of the y_train print("Distribution of labels in y_train is:",y_train.value_counts()) # Code ends here # -------------- # Code starts here #find the correlation between features relation = X_train.corr() print(relation) #plot the pairplot between all the features of the train set sns.pairplot(X_train) # Code ends here # -------------- import seaborn as sns import matplotlib.pyplot as plt # Code starts here # identifying the columns for countplot cols = ['children','sex','region','smoker'] #defining the subplots fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10,8)) # looping through the rows and cols for i,row in enumerate(axes): for j,ax in enumerate(row): # plotting countplot col = cols[i*2 + j] sns.countplot(x=col, data=X_train , hue= y_train, ax=axes[i,j]) # Code ends here # -------------- from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score # parameters for grid search parameters = {'C':[0.1,0.5,1,5]} # Code starts here # initialize the Logistic regression lr = LogisticRegression(random_state=9) # Using GridSearchCV do exhaustive search over specified parameter values for an estimator grid = GridSearchCV(estimator=lr, param_grid = parameters) # fit the model to train data grid.fit(X_train, y_train) # do the predictions y_pred = grid.predict(X_test) # calculate the accuracy accuracy = accuracy_score(y_test, y_pred) print(accuracy) # Code ends here # -------------- from sklearn.metrics import roc_auc_score from sklearn import metrics from sklearn.metrics import roc_curve # Code starts here # calculate the roc-auc score score = roc_auc_score(y_test, y_pred) print('roc_auc_score is:',score) # Calculate the probability using grid search with the best found parameters y_pred_proba = grid.predict_proba(X_test)[:,1] # calculate the false positive recall and true positive recall using roc curve fpr, tpr, _ = roc_curve(y_test, y_pred_proba) # #Calculate the roc_auc score of roc_auc = roc_auc_score(y_test, y_pred_proba) # # plot the auc curve plt.plot(fpr, tpr,label='Logistic Model, auc'+ str(roc_auc)) # book-keeping plt.title('Receiver Operating Characteristic') plt.legend(loc = 'lower right') plt.plot([0, 1], [0, 1],'r--') plt.xlim([0, 1]) plt.ylim([0, 1]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.show() # Code ends here
[ "matplotlib.pyplot.title", "sklearn.model_selection.GridSearchCV", "pandas.read_csv", "sklearn.model_selection.train_test_split", "sklearn.metrics.accuracy_score", "seaborn.pairplot", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "matplotlib.pyplot.ylim", "matplotlib.pyplot.legend", "sklearn.metrics.roc_auc_score", "sklearn.linear_model.LogisticRegression", "seaborn.boxplot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlim", "sklearn.metrics.roc_curve", "warnings.filterwarnings", "matplotlib.pyplot.plot", "seaborn.countplot", "matplotlib.pyplot.xlabel" ]
[((170, 203), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (193, 203), False, 'import warnings\n'), ((258, 275), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (269, 275), True, 'import pandas as pd\n'), ((516, 569), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'random_state': '(6)', 'test_size': '(0.2)'}), '(X, y, random_state=6, test_size=0.2)\n', (532, 569), False, 'from sklearn.model_selection import train_test_split\n'), ((803, 837), 'seaborn.boxplot', 'sns.boxplot', ([], {'y': '"""bmi"""', 'data': 'X_train'}), "(y='bmi', data=X_train)\n", (814, 837), True, 'import seaborn as sns\n'), ((1236, 1257), 'seaborn.pairplot', 'sns.pairplot', (['X_train'], {}), '(X_train)\n', (1248, 1257), True, 'import seaborn as sns\n'), ((1489, 1536), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'figsize': '(10, 8)'}), '(nrows=2, ncols=2, figsize=(10, 8))\n', (1501, 1536), True, 'import matplotlib.pyplot as plt\n'), ((2094, 2128), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(9)'}), '(random_state=9)\n', (2112, 2128), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2228, 2277), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', ([], {'estimator': 'lr', 'param_grid': 'parameters'}), '(estimator=lr, param_grid=parameters)\n', (2240, 2277), False, 'from sklearn.model_selection import GridSearchCV, RandomizedSearchCV\n'), ((2426, 2456), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (2440, 2456), False, 'from sklearn.metrics import accuracy_score\n'), ((2676, 2705), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (2689, 2705), False, 'from sklearn.metrics import roc_auc_score\n'), ((2959, 2990), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'y_pred_proba'], {}), '(y_test, y_pred_proba)\n', (2968, 2990), False, 'from sklearn.metrics import roc_curve\n'), ((3037, 3072), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_test', 'y_pred_proba'], {}), '(y_test, y_pred_proba)\n', (3050, 3072), False, 'from sklearn.metrics import roc_auc_score\n'), ((3173, 3219), 'matplotlib.pyplot.title', 'plt.title', (['"""Receiver Operating Characteristic"""'], {}), "('Receiver Operating Characteristic')\n", (3182, 3219), True, 'import matplotlib.pyplot as plt\n'), ((3220, 3249), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (3230, 3249), True, 'import matplotlib.pyplot as plt\n'), ((3252, 3283), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""r--"""'], {}), "([0, 1], [0, 1], 'r--')\n", (3260, 3283), True, 'import matplotlib.pyplot as plt\n'), ((3283, 3299), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1]'], {}), '([0, 1])\n', (3291, 3299), True, 'import matplotlib.pyplot as plt\n'), ((3300, 3316), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (3308, 3316), True, 'import matplotlib.pyplot as plt\n'), ((3317, 3349), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (3327, 3349), True, 'import matplotlib.pyplot as plt\n'), ((3350, 3383), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (3360, 3383), True, 'import matplotlib.pyplot as plt\n'), ((3384, 3394), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3392, 3394), True, 'import matplotlib.pyplot as plt\n'), ((1702, 1764), 'seaborn.countplot', 'sns.countplot', ([], {'x': 'col', 'data': 'X_train', 'hue': 'y_train', 'ax': 'axes[i, j]'}), '(x=col, data=X_train, hue=y_train, ax=axes[i, j])\n', (1715, 1764), True, 'import seaborn as sns\n')]
# -*- coding: utf-8 -*- '''Example 01 Shows how to create simple geometry from splines and ellipse arcs, and how to mesh a quad mesh in GmshMesher. Also demonstrates drawGeometry(), drawMesh, and drawing texts and labels in a figure. ''' import numpy as np import calfem.geometry as cfg import calfem.mesh as cfm import calfem.vis_mpl as cfv import calfem.core as cfc import calfem.utils as cfu cfu.enableLogging() # ---- Problem constants ---------------------------------------------------- kx1 = 100 ky1 = 100 t = 1.0 # Gauss points or integration points n = 2 ep = [t, n] D = np.matrix([ [kx1, 0.], [0., ky1] ]) # ---- Define geometry ------------------------------------------------------ g = cfg.Geometry() # Create a GeoData object that holds the geometry. g.point([0, 0]) g.point([2, 0]) g.point([2, 1]) g.point([0, 1]) g.point([0.5, 0.3]) g.point([0.3, 0.7]) g.point([0.7, 0.7]) g.point([0.8, 0.5]) g.point([1.7, 0.5]) g.point([1.5, 0.5]) g.point([1.7, 0.7]) id_hole1 = 50 id_hole2 = 60 id_outer = 80 g.ellipse([7, 8, 9, 10], marker=id_hole1) g.spline([0, 1], marker=id_outer) g.spline([2, 1], marker=id_outer) g.spline([3, 2], marker=id_outer) g.spline([0, 3], marker=id_outer) g.spline([7, 9], marker=id_hole1) g.spline([10, 9], marker=id_hole1) g.spline([4, 5, 6, 4], marker=id_hole2) g.surface([4, 3, 2, 1], [[7], [5, 6, 0]]) # ---- Generate mesh -------------------------------------------------------- mesh = cfm.GmshMesh(g) mesh.el_type = 16 mesh.dofs_per_node = 1 # Degrees of freedom per node. mesh.el_size_factor = 0.05 # Factor that changes element sizes. coords, edof, dofs, bdofs, element_markers = mesh.create() print(edof) # ---- Solve problem -------------------------------------------------------- print("Assembling system matrix...") n_dofs = np.size(dofs) ex, ey = cfc.coordxtr(edof, coords, dofs) K = np.zeros([n_dofs, n_dofs]) for el_topo, elx, ely, marker in zip(edof, ex, ey, element_markers): # Calc element stiffness matrix: Conductivity matrix D is taken # from Ddict and depends on which region (which marker) the element is in. if mesh.el_type == 2: Ke = cfc.flw2te(elx, ely, ep, D) elif mesh.el_type == 3: Ke = cfc.flw2i4e(elx, ely, ep, D) elif mesh.el_type == 16: Ke = cfc.flw2i8e(elx, ely, ep, D) else: print("Element type not supported") cfc.assem(el_topo, K, Ke) print("Solving equation system...") f = np.zeros([n_dofs, 1]) bc = np.array([], 'i') bc_val = np.array([], 'f') bc, bc_val = cfu.applybc(bdofs, bc, bc_val, id_outer, 30.0) bc, bc_val = cfu.applybc(bdofs, bc, bc_val, id_hole1, 300.0) bc, bc_val = cfu.applybc(bdofs, bc, bc_val, id_hole2, 400.0) a, r = cfc.solveq(K, f, bc, bc_val) # ---- Compute element forces ----------------------------------------------- print("Computing element forces...") ed = cfc.extract_eldisp(edof, a) for i in range(np.shape(ex)[0]): if mesh.el_type == 2: es, et = cfc.flw2ts(ex[i, :], ey[i, :], D, ed[i, :]) elif mesh.el_type == 3: es, et, eci = cfc.flw2i4s(ex[i, :], ey[i, :], ep, D, ed[i, :]) elif mesh.el_type == 16: es, et, eci = cfc.flw2i8s(ex[i, :], ey[i, :], ep, D, ed[i, :]) else: print("Element type not supported.") # Do something with es, et, eci here. # ---- Visualise mesh ------------------------------------------------------- # Hold left mouse button to pan. # Hold right mouse button to zoom. # Draw the geometry. Note that surfaces and volumes are not drawn at all by # this function. cfv.draw_geometry(g) # New figure window cfv.figure() # Draw the mesh. cfv.draw_mesh( coords=coords, edof=edof, dofs_per_node=mesh.dofs_per_node, el_type=mesh.el_type, filled=True, title="Example 01" ) cfv.figure() cfv.draw_nodal_values_shaded(a, coords, edof, title="Temperature") cfv.colorbar() cfv.figure() cfv.draw_nodal_values_contourf(a, coords, edof, title="Temperature", dofs_per_node=mesh.dofs_per_node, el_type=mesh.el_type, draw_elements=True) cfv.colorbar() cfv.figure() cfv.draw_nodal_values_contour(a, coords, edof) cfv.colorbar() # cfv.addText("This is a Text", pos=(1, -0.3), angle=45) #Adds a text in world space # ourLabel = cfv.label("This is a Label", pos=(100,200), angle=-45) #Adds a label in the screen space # ourLabel.text = "Label, changed." #We can change the attributes of labels and texts, such as color, text, and position. # ourLabel.textColor = 'r' #Make it red. (1,0,0) would also have worked. #ourLabel.position = (20,30) # Enter main loop: cfv.showAndWait()
[ "calfem.vis_mpl.figure", "calfem.vis_mpl.draw_nodal_values_contourf", "calfem.vis_mpl.draw_nodal_values_contour", "calfem.vis_mpl.draw_mesh", "calfem.core.coordxtr", "numpy.shape", "calfem.core.flw2i8s", "calfem.core.solveq", "calfem.geometry.Geometry", "calfem.vis_mpl.draw_geometry", "calfem.mesh.GmshMesh", "calfem.core.assem", "calfem.utils.applybc", "calfem.core.flw2i4s", "numpy.size", "calfem.vis_mpl.draw_nodal_values_shaded", "calfem.core.extract_eldisp", "calfem.core.flw2i8e", "calfem.core.flw2ts", "numpy.matrix", "calfem.vis_mpl.colorbar", "numpy.zeros", "calfem.core.flw2te", "numpy.array", "calfem.core.flw2i4e", "calfem.utils.enableLogging", "calfem.vis_mpl.showAndWait" ]
[((400, 419), 'calfem.utils.enableLogging', 'cfu.enableLogging', ([], {}), '()\n', (417, 419), True, 'import calfem.utils as cfu\n'), ((590, 625), 'numpy.matrix', 'np.matrix', (['[[kx1, 0.0], [0.0, ky1]]'], {}), '([[kx1, 0.0], [0.0, ky1]])\n', (599, 625), True, 'import numpy as np\n'), ((718, 732), 'calfem.geometry.Geometry', 'cfg.Geometry', ([], {}), '()\n', (730, 732), True, 'import calfem.geometry as cfg\n'), ((1451, 1466), 'calfem.mesh.GmshMesh', 'cfm.GmshMesh', (['g'], {}), '(g)\n', (1463, 1466), True, 'import calfem.mesh as cfm\n'), ((1806, 1819), 'numpy.size', 'np.size', (['dofs'], {}), '(dofs)\n', (1813, 1819), True, 'import numpy as np\n'), ((1829, 1861), 'calfem.core.coordxtr', 'cfc.coordxtr', (['edof', 'coords', 'dofs'], {}), '(edof, coords, dofs)\n', (1841, 1861), True, 'import calfem.core as cfc\n'), ((1867, 1893), 'numpy.zeros', 'np.zeros', (['[n_dofs, n_dofs]'], {}), '([n_dofs, n_dofs])\n', (1875, 1893), True, 'import numpy as np\n'), ((2448, 2469), 'numpy.zeros', 'np.zeros', (['[n_dofs, 1]'], {}), '([n_dofs, 1])\n', (2456, 2469), True, 'import numpy as np\n'), ((2476, 2493), 'numpy.array', 'np.array', (['[]', '"""i"""'], {}), "([], 'i')\n", (2484, 2493), True, 'import numpy as np\n'), ((2503, 2520), 'numpy.array', 'np.array', (['[]', '"""f"""'], {}), "([], 'f')\n", (2511, 2520), True, 'import numpy as np\n'), ((2535, 2581), 'calfem.utils.applybc', 'cfu.applybc', (['bdofs', 'bc', 'bc_val', 'id_outer', '(30.0)'], {}), '(bdofs, bc, bc_val, id_outer, 30.0)\n', (2546, 2581), True, 'import calfem.utils as cfu\n'), ((2595, 2642), 'calfem.utils.applybc', 'cfu.applybc', (['bdofs', 'bc', 'bc_val', 'id_hole1', '(300.0)'], {}), '(bdofs, bc, bc_val, id_hole1, 300.0)\n', (2606, 2642), True, 'import calfem.utils as cfu\n'), ((2656, 2703), 'calfem.utils.applybc', 'cfu.applybc', (['bdofs', 'bc', 'bc_val', 'id_hole2', '(400.0)'], {}), '(bdofs, bc, bc_val, id_hole2, 400.0)\n', (2667, 2703), True, 'import calfem.utils as cfu\n'), ((2712, 2740), 'calfem.core.solveq', 'cfc.solveq', (['K', 'f', 'bc', 'bc_val'], {}), '(K, f, bc, bc_val)\n', (2722, 2740), True, 'import calfem.core as cfc\n'), ((2864, 2891), 'calfem.core.extract_eldisp', 'cfc.extract_eldisp', (['edof', 'a'], {}), '(edof, a)\n', (2882, 2891), True, 'import calfem.core as cfc\n'), ((3553, 3573), 'calfem.vis_mpl.draw_geometry', 'cfv.draw_geometry', (['g'], {}), '(g)\n', (3570, 3573), True, 'import calfem.vis_mpl as cfv\n'), ((3596, 3608), 'calfem.vis_mpl.figure', 'cfv.figure', ([], {}), '()\n', (3606, 3608), True, 'import calfem.vis_mpl as cfv\n'), ((3628, 3760), 'calfem.vis_mpl.draw_mesh', 'cfv.draw_mesh', ([], {'coords': 'coords', 'edof': 'edof', 'dofs_per_node': 'mesh.dofs_per_node', 'el_type': 'mesh.el_type', 'filled': '(True)', 'title': '"""Example 01"""'}), "(coords=coords, edof=edof, dofs_per_node=mesh.dofs_per_node,\n el_type=mesh.el_type, filled=True, title='Example 01')\n", (3641, 3760), True, 'import calfem.vis_mpl as cfv\n'), ((3784, 3796), 'calfem.vis_mpl.figure', 'cfv.figure', ([], {}), '()\n', (3794, 3796), True, 'import calfem.vis_mpl as cfv\n'), ((3797, 3863), 'calfem.vis_mpl.draw_nodal_values_shaded', 'cfv.draw_nodal_values_shaded', (['a', 'coords', 'edof'], {'title': '"""Temperature"""'}), "(a, coords, edof, title='Temperature')\n", (3825, 3863), True, 'import calfem.vis_mpl as cfv\n'), ((3864, 3878), 'calfem.vis_mpl.colorbar', 'cfv.colorbar', ([], {}), '()\n', (3876, 3878), True, 'import calfem.vis_mpl as cfv\n'), ((3880, 3892), 'calfem.vis_mpl.figure', 'cfv.figure', ([], {}), '()\n', (3890, 3892), True, 'import calfem.vis_mpl as cfv\n'), ((3893, 4041), 'calfem.vis_mpl.draw_nodal_values_contourf', 'cfv.draw_nodal_values_contourf', (['a', 'coords', 'edof'], {'title': '"""Temperature"""', 'dofs_per_node': 'mesh.dofs_per_node', 'el_type': 'mesh.el_type', 'draw_elements': '(True)'}), "(a, coords, edof, title='Temperature',\n dofs_per_node=mesh.dofs_per_node, el_type=mesh.el_type, draw_elements=True)\n", (3923, 4041), True, 'import calfem.vis_mpl as cfv\n'), ((4038, 4052), 'calfem.vis_mpl.colorbar', 'cfv.colorbar', ([], {}), '()\n', (4050, 4052), True, 'import calfem.vis_mpl as cfv\n'), ((4054, 4066), 'calfem.vis_mpl.figure', 'cfv.figure', ([], {}), '()\n', (4064, 4066), True, 'import calfem.vis_mpl as cfv\n'), ((4067, 4113), 'calfem.vis_mpl.draw_nodal_values_contour', 'cfv.draw_nodal_values_contour', (['a', 'coords', 'edof'], {}), '(a, coords, edof)\n', (4096, 4113), True, 'import calfem.vis_mpl as cfv\n'), ((4114, 4128), 'calfem.vis_mpl.colorbar', 'cfv.colorbar', ([], {}), '()\n', (4126, 4128), True, 'import calfem.vis_mpl as cfv\n'), ((4565, 4582), 'calfem.vis_mpl.showAndWait', 'cfv.showAndWait', ([], {}), '()\n', (4580, 4582), True, 'import calfem.vis_mpl as cfv\n'), ((2380, 2405), 'calfem.core.assem', 'cfc.assem', (['el_topo', 'K', 'Ke'], {}), '(el_topo, K, Ke)\n', (2389, 2405), True, 'import calfem.core as cfc\n'), ((2152, 2179), 'calfem.core.flw2te', 'cfc.flw2te', (['elx', 'ely', 'ep', 'D'], {}), '(elx, ely, ep, D)\n', (2162, 2179), True, 'import calfem.core as cfc\n'), ((2908, 2920), 'numpy.shape', 'np.shape', (['ex'], {}), '(ex)\n', (2916, 2920), True, 'import numpy as np\n'), ((2969, 3012), 'calfem.core.flw2ts', 'cfc.flw2ts', (['ex[i, :]', 'ey[i, :]', 'D', 'ed[i, :]'], {}), '(ex[i, :], ey[i, :], D, ed[i, :])\n', (2979, 3012), True, 'import calfem.core as cfc\n'), ((2221, 2249), 'calfem.core.flw2i4e', 'cfc.flw2i4e', (['elx', 'ely', 'ep', 'D'], {}), '(elx, ely, ep, D)\n', (2232, 2249), True, 'import calfem.core as cfc\n'), ((3063, 3111), 'calfem.core.flw2i4s', 'cfc.flw2i4s', (['ex[i, :]', 'ey[i, :]', 'ep', 'D', 'ed[i, :]'], {}), '(ex[i, :], ey[i, :], ep, D, ed[i, :])\n', (3074, 3111), True, 'import calfem.core as cfc\n'), ((2292, 2320), 'calfem.core.flw2i8e', 'cfc.flw2i8e', (['elx', 'ely', 'ep', 'D'], {}), '(elx, ely, ep, D)\n', (2303, 2320), True, 'import calfem.core as cfc\n'), ((3163, 3211), 'calfem.core.flw2i8s', 'cfc.flw2i8s', (['ex[i, :]', 'ey[i, :]', 'ep', 'D', 'ed[i, :]'], {}), '(ex[i, :], ey[i, :], ep, D, ed[i, :])\n', (3174, 3211), True, 'import calfem.core as cfc\n')]
# Generated by Django 3.0.8 on 2020-07-08 22:35 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Page', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=50)), ], ), migrations.CreateModel( name='Profile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('full_name', models.CharField(max_length=50)), ('email', models.EmailField(max_length=50, unique=True)), ('user_type', models.CharField(choices=[('client', 'Client'), ('dev', 'Developer')], max_length=10)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Project', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=50)), ('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='client', to='main_app.Profile')), ('dev', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dev', to='main_app.Profile')), ], ), migrations.CreateModel( name='Sprint', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main_app.Project')), ], ), migrations.CreateModel( name='Wireframe', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('image', models.ImageField(upload_to='')), ('description', models.TextField(max_length=200)), ('page', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main_app.Page')), ], ), migrations.CreateModel( name='Task', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('description', models.TextField(max_length=200)), ('completed', models.BooleanField(default=False)), ('sprint', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main_app.Sprint')), ], ), migrations.AddField( model_name='page', name='project', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main_app.Project'), ), ]
[ "django.db.models.TextField", "django.db.migrations.swappable_dependency", "django.db.models.ForeignKey", "django.db.models.CharField", "django.db.models.BooleanField", "django.db.models.EmailField", "django.db.models.AutoField", "django.db.models.ImageField" ]
[((247, 304), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (278, 304), False, 'from django.db import migrations, models\n'), ((3131, 3221), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""main_app.Project"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'main_app.Project')\n", (3148, 3221), False, 'from django.db import migrations, models\n'), ((433, 526), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (449, 526), False, 'from django.db import migrations, models\n'), ((551, 582), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (567, 582), False, 'from django.db import migrations, models\n'), ((715, 808), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (731, 808), False, 'from django.db import migrations, models\n'), ((837, 868), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (853, 868), False, 'from django.db import migrations, models\n'), ((897, 942), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(50)', 'unique': '(True)'}), '(max_length=50, unique=True)\n', (914, 942), False, 'from django.db import migrations, models\n'), ((975, 1064), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('client', 'Client'), ('dev', 'Developer')]", 'max_length': '(10)'}), "(choices=[('client', 'Client'), ('dev', 'Developer')],\n max_length=10)\n", (991, 1064), False, 'from django.db import migrations, models\n'), ((1088, 1184), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (1105, 1184), False, 'from django.db import migrations, models\n'), ((1312, 1405), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1328, 1405), False, 'from django.db import migrations, models\n'), ((1430, 1461), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1446, 1461), False, 'from django.db import migrations, models\n'), ((1491, 1604), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""client"""', 'to': '"""main_app.Profile"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='client', to='main_app.Profile')\n", (1508, 1604), False, 'from django.db import migrations, models\n'), ((1626, 1736), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""dev"""', 'to': '"""main_app.Profile"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='dev', to='main_app.Profile')\n", (1643, 1736), False, 'from django.db import migrations, models\n'), ((1863, 1956), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1879, 1956), False, 'from django.db import migrations, models\n'), ((1983, 2073), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""main_app.Project"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'main_app.Project')\n", (2000, 2073), False, 'from django.db import migrations, models\n'), ((2203, 2296), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2219, 2296), False, 'from django.db import migrations, models\n'), ((2321, 2352), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '""""""'}), "(upload_to='')\n", (2338, 2352), False, 'from django.db import migrations, models\n'), ((2387, 2419), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (2403, 2419), False, 'from django.db import migrations, models\n'), ((2447, 2534), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""main_app.Page"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'main_app.Page')\n", (2464, 2534), False, 'from django.db import migrations, models\n'), ((2659, 2752), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2675, 2752), False, 'from django.db import migrations, models\n'), ((2783, 2815), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (2799, 2815), False, 'from django.db import migrations, models\n'), ((2848, 2882), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2867, 2882), False, 'from django.db import migrations, models\n'), ((2912, 3001), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""main_app.Sprint"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'main_app.Sprint')\n", (2929, 3001), False, 'from django.db import migrations, models\n')]
#%% import time import math import sys import argparse import cPickle as pickle import codecs import numpy as np from chainer import cuda, Variable, FunctionSet import chainer.functions as F from CharRNN import CharRNN, make_initial_state sys.stdout = codecs.getwriter('utf_8')(sys.stdout) #%% arguments parser = argparse.ArgumentParser() parser.add_argument('--model', type=str, required=True) parser.add_argument('--vocabulary', type=str, required=True) parser.add_argument('--seed', type=int, default=123) parser.add_argument('--sample', type=int, default=1) parser.add_argument('--primetext', type=str, default='') parser.add_argument('--length', type=int, default=2000) parser.add_argument('--gpu', type=int, default=-1) args = parser.parse_args() np.random.seed(args.seed) # load vocabulary vocab = pickle.load(open(args.vocabulary, 'rb')) ivocab = {} for c, i in vocab.items(): ivocab[i] = c # load model model = pickle.load(open(args.model, 'rb')) n_units = model.embed.W.data.shape[1] if args.gpu >= 0: cuda.get_device(args.gpu).use() model.to_gpu() # initialize generator state = make_initial_state(n_units, batchsize=1, train=False) if args.gpu >= 0: for key, value in state.items(): value.data = cuda.to_gpu(value.data) prev_char = np.array([0], dtype=np.int32) if args.gpu >= 0: prev_char = cuda.to_gpu(prev_char) if len(args.primetext) > 0: for i in unicode(args.primetext, 'utf-8'): sys.stdout.write(i) prev_char = np.ones((1,), dtype=np.int32) * vocab[i] if args.gpu >= 0: prev_char = cuda.to_gpu(prev_char) state, prob = model.forward_one_step(prev_char, prev_char, state, train=False) for i in xrange(args.length): state, prob = model.forward_one_step(prev_char, prev_char, state, train=False) if args.sample > 0: probability = cuda.to_cpu(prob.data)[0].astype(np.float64) probability /= np.sum(probability) index = np.random.choice(range(len(probability)), p=probability) else: index = np.argmax(cuda.to_cpu(prob.data)) sys.stdout.write(ivocab[index]) prev_char = np.array([index], dtype=np.int32) if args.gpu >= 0: prev_char = cuda.to_gpu(prev_char) print
[ "sys.stdout.write", "numpy.random.seed", "argparse.ArgumentParser", "numpy.sum", "chainer.cuda.get_device", "codecs.getwriter", "numpy.ones", "chainer.cuda.to_cpu", "numpy.array", "chainer.cuda.to_gpu", "CharRNN.make_initial_state" ]
[((316, 341), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (339, 341), False, 'import argparse\n'), ((801, 826), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (815, 826), True, 'import numpy as np\n'), ((1154, 1207), 'CharRNN.make_initial_state', 'make_initial_state', (['n_units'], {'batchsize': '(1)', 'train': '(False)'}), '(n_units, batchsize=1, train=False)\n', (1172, 1207), False, 'from CharRNN import CharRNN, make_initial_state\n'), ((1321, 1350), 'numpy.array', 'np.array', (['[0]'], {'dtype': 'np.int32'}), '([0], dtype=np.int32)\n', (1329, 1350), True, 'import numpy as np\n'), ((254, 279), 'codecs.getwriter', 'codecs.getwriter', (['"""utf_8"""'], {}), "('utf_8')\n", (270, 279), False, 'import codecs\n'), ((1385, 1407), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['prev_char'], {}), '(prev_char)\n', (1396, 1407), False, 'from chainer import cuda, Variable, FunctionSet\n'), ((2120, 2151), 'sys.stdout.write', 'sys.stdout.write', (['ivocab[index]'], {}), '(ivocab[index])\n', (2136, 2151), False, 'import sys\n'), ((2169, 2202), 'numpy.array', 'np.array', (['[index]'], {'dtype': 'np.int32'}), '([index], dtype=np.int32)\n', (2177, 2202), True, 'import numpy as np\n'), ((1284, 1307), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['value.data'], {}), '(value.data)\n', (1295, 1307), False, 'from chainer import cuda, Variable, FunctionSet\n'), ((1492, 1511), 'sys.stdout.write', 'sys.stdout.write', (['i'], {}), '(i)\n', (1508, 1511), False, 'import sys\n'), ((1963, 1982), 'numpy.sum', 'np.sum', (['probability'], {}), '(probability)\n', (1969, 1982), True, 'import numpy as np\n'), ((2245, 2267), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['prev_char'], {}), '(prev_char)\n', (2256, 2267), False, 'from chainer import cuda, Variable, FunctionSet\n'), ((1071, 1096), 'chainer.cuda.get_device', 'cuda.get_device', (['args.gpu'], {}), '(args.gpu)\n', (1086, 1096), False, 'from chainer import cuda, Variable, FunctionSet\n'), ((1532, 1561), 'numpy.ones', 'np.ones', (['(1,)'], {'dtype': 'np.int32'}), '((1,), dtype=np.int32)\n', (1539, 1561), True, 'import numpy as np\n'), ((1623, 1645), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['prev_char'], {}), '(prev_char)\n', (1634, 1645), False, 'from chainer import cuda, Variable, FunctionSet\n'), ((2092, 2114), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['prob.data'], {}), '(prob.data)\n', (2103, 2114), False, 'from chainer import cuda, Variable, FunctionSet\n'), ((1895, 1917), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['prob.data'], {}), '(prob.data)\n', (1906, 1917), False, 'from chainer import cuda, Variable, FunctionSet\n')]
# -*- coding: UTF-8 -*- # Copyright 2014 <NAME> # Copyright 2015 Modified by <NAME> # License: BSD (see file COPYING for details) from __future__ import unicode_literals from __future__ import print_function from commondata.utils import Place, PlaceGenerator class PlaceInEgypt(Place): def __unicode__(self): return self.name class Country(PlaceInEgypt): value = 0 class Governorate(Country): value = 1 class City(Governorate): value = 2 class Region(City): value = 3 def root(): p = PlaceGenerator() p.install(Country, Governorate, City, Region) p.set_args('name_en name_ar zip_code') egypt = p.country("Egypt","مصر") from .cairo import populate ; populate(p) from .giza import populate ; populate(p) from .alexandria import populate ; populate(p) from .damietta import populate ; populate(p) from .port_said import populate ; populate(p) return egypt
[ "commondata.utils.PlaceGenerator" ]
[((528, 544), 'commondata.utils.PlaceGenerator', 'PlaceGenerator', ([], {}), '()\n', (542, 544), False, 'from commondata.utils import Place, PlaceGenerator\n')]
# from envs.envs_assistive.scratch_itch_envs import ScratchItchPR2MeshEnv, ScratchItchBaxterMeshEnv, ScratchItchSawyerMeshEnv, ScratchItchJacoMeshEnv, ScratchItchStretchMeshEnv, ScratchItchPandaMeshEnv from envs.envs_assistive.drinking_envs import DrinkingPR2Env, DrinkingBaxterEnv, DrinkingSawyerEnv, DrinkingJacoEnv, DrinkingStretchEnv, DrinkingPandaEnv, DrinkingPR2HumanEnv, DrinkingBaxterHumanEnv, DrinkingSawyerHumanEnv, DrinkingJacoHumanEnv, DrinkingStretchHumanEnv, DrinkingPandaHumanEnv from envs.envs_assistive.feeding_envs import FeedingPR2Env, FeedingBaxterEnv, FeedingSawyerEnv, FeedingJacoEnv, FeedingStretchEnv, FeedingPandaEnv, FeedingPR2HumanEnv, FeedingBaxterHumanEnv, FeedingSawyerHumanEnv, FeedingJacoHumanEnv, FeedingStretchHumanEnv, FeedingPandaHumanEnv from envs.envs_assistive.feeding_envs import FeedingPR2MeshEnv, FeedingBaxterMeshEnv, FeedingSawyerMeshEnv, FeedingJacoMeshEnv, FeedingStretchMeshEnv, FeedingPandaMeshEnv from envs.envs_assistive.human_testing import HumanTestingEnv # from envs.envs_assistive.smplx_testing import SMPLXTestingEnv from gym.envs.registration import register tasks = ['ScratchItch', 'BedBathing', 'Feeding', 'Drinking', 'Dressing', 'ArmManipulation'] robots = ['PR2', 'Jaco', 'Baxter', 'Sawyer', 'Stretch', 'Panda'] for task in tasks: for robot in robots: register( id='%s%s-v1' % (task, robot), entry_point='assistive_gym.envs:%s%sEnv' % (task, robot), max_episode_steps=200, ) for task in ['ScratchItch', 'Feeding']: for robot in robots: register( id='%s%sMesh-v1' % (task, robot), entry_point='assistive_gym.envs:%s%sMeshEnv' % (task, robot), max_episode_steps=200, ) register( id='HumanTesting-v1', entry_point='assistive_gym.envs:HumanTestingEnv', max_episode_steps=200, ) register( id='SMPLXTesting-v1', entry_point='assistive_gym.envs:SMPLXTestingEnv', max_episode_steps=200, )
[ "gym.envs.registration.register" ]
[((1744, 1852), 'gym.envs.registration.register', 'register', ([], {'id': '"""HumanTesting-v1"""', 'entry_point': '"""assistive_gym.envs:HumanTestingEnv"""', 'max_episode_steps': '(200)'}), "(id='HumanTesting-v1', entry_point=\n 'assistive_gym.envs:HumanTestingEnv', max_episode_steps=200)\n", (1752, 1852), False, 'from gym.envs.registration import register\n'), ((1864, 1972), 'gym.envs.registration.register', 'register', ([], {'id': '"""SMPLXTesting-v1"""', 'entry_point': '"""assistive_gym.envs:SMPLXTestingEnv"""', 'max_episode_steps': '(200)'}), "(id='SMPLXTesting-v1', entry_point=\n 'assistive_gym.envs:SMPLXTestingEnv', max_episode_steps=200)\n", (1872, 1972), False, 'from gym.envs.registration import register\n'), ((1327, 1451), 'gym.envs.registration.register', 'register', ([], {'id': "('%s%s-v1' % (task, robot))", 'entry_point': "('assistive_gym.envs:%s%sEnv' % (task, robot))", 'max_episode_steps': '(200)'}), "(id='%s%s-v1' % (task, robot), entry_point=\n 'assistive_gym.envs:%s%sEnv' % (task, robot), max_episode_steps=200)\n", (1335, 1451), False, 'from gym.envs.registration import register\n'), ((1568, 1700), 'gym.envs.registration.register', 'register', ([], {'id': "('%s%sMesh-v1' % (task, robot))", 'entry_point': "('assistive_gym.envs:%s%sMeshEnv' % (task, robot))", 'max_episode_steps': '(200)'}), "(id='%s%sMesh-v1' % (task, robot), entry_point=\n 'assistive_gym.envs:%s%sMeshEnv' % (task, robot), max_episode_steps=200)\n", (1576, 1700), False, 'from gym.envs.registration import register\n')]
# coding=utf-8 # # Copyright (c) 2013-2015 First Flamingo Enterprise B.V. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # TAManager.py # firstflamingo/treinenaapje # # Created by <NAME> on 21-Feb-13. # import webapp2 import logging from datetime import timedelta from google.appengine.ext import db from google.appengine.api import memcache, taskqueue from ffe import config from ffe.gae import issue_tasks, task_name from ffe.ffe_time import now_utc from TSStation import TSStation from TASeries import TASeries from TAMission import TAMission class TARequestHandler(webapp2.RequestHandler): _instruction = None def get(self): if self.instruction == 'create_avt_tasks': self.create_and_issue_tasks(TSStation, config.STATION_AVT_DURATION, 'avt') elif self.instruction == 'update_stations': TSStation.update_stations() elif self.instruction == 'new_day': self.create_and_issue_tasks(TASeries, config.SERIES_CONSOLIDATION_DURATION, 'new_day') elif self.instruction == 'remove_orphans': self.remove_orphans() @staticmethod def create_and_issue_tasks(target_class, period, instruction): tasks = [] ids = target_class.active_ids() if ids: issue_time = now_utc() + timedelta(seconds=config.WAIT_BEFORE_FIRST_TASK) period *= 60.0 eta_delta = period / len(ids) for identifier in ids: url = '%s/%s' % (target_class.agent_url, identifier) logging.info('Create task for %s at %s UTC' % (url, issue_time.strftime('%H:%M:%S'))) task = taskqueue.Task(name=task_name(issue_time, instruction), url=url, params={'inst':instruction}, eta=issue_time) tasks.append(task) issue_time += timedelta(seconds=eta_delta) issue_tasks(tasks) @staticmethod def remove_orphans(): mission_keys = db.Query(TAMission, keys_only=True).filter('series_id =', 'orphan').fetch(1000) mission_ids = [] for key in mission_keys: mission_ids.append(key.name()) logging.info('Remove %d orphan missions' % len(mission_keys)) memcache.delete_multi(mission_ids, namespace='TAMission') db.delete(mission_keys) @property def instruction(self): if self._instruction is None: comps = self.request.path.split('/') if len(comps) == 3: self._instruction = comps[2] return self._instruction # WSGI Application URL_SCHEMA = [('/TAManager.*', TARequestHandler)] app = webapp2.WSGIApplication(URL_SCHEMA, debug=True)
[ "TSStation.TSStation.update_stations", "ffe.ffe_time.now_utc", "google.appengine.ext.db.Query", "ffe.gae.task_name", "datetime.timedelta", "google.appengine.api.memcache.delete_multi", "webapp2.WSGIApplication", "ffe.gae.issue_tasks", "google.appengine.ext.db.delete" ]
[((3342, 3389), 'webapp2.WSGIApplication', 'webapp2.WSGIApplication', (['URL_SCHEMA'], {'debug': '(True)'}), '(URL_SCHEMA, debug=True)\n', (3365, 3389), False, 'import webapp2\n'), ((2589, 2607), 'ffe.gae.issue_tasks', 'issue_tasks', (['tasks'], {}), '(tasks)\n', (2600, 2607), False, 'from ffe.gae import issue_tasks, task_name\n'), ((2935, 2992), 'google.appengine.api.memcache.delete_multi', 'memcache.delete_multi', (['mission_ids'], {'namespace': '"""TAMission"""'}), "(mission_ids, namespace='TAMission')\n", (2956, 2992), False, 'from google.appengine.api import memcache, taskqueue\n'), ((3001, 3024), 'google.appengine.ext.db.delete', 'db.delete', (['mission_keys'], {}), '(mission_keys)\n', (3010, 3024), False, 'from google.appengine.ext import db\n'), ((1460, 1487), 'TSStation.TSStation.update_stations', 'TSStation.update_stations', ([], {}), '()\n', (1485, 1487), False, 'from TSStation import TSStation\n'), ((1904, 1913), 'ffe.ffe_time.now_utc', 'now_utc', ([], {}), '()\n', (1911, 1913), False, 'from ffe.ffe_time import now_utc\n'), ((1916, 1964), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'config.WAIT_BEFORE_FIRST_TASK'}), '(seconds=config.WAIT_BEFORE_FIRST_TASK)\n', (1925, 1964), False, 'from datetime import timedelta\n'), ((2552, 2580), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'eta_delta'}), '(seconds=eta_delta)\n', (2561, 2580), False, 'from datetime import timedelta\n'), ((2283, 2317), 'ffe.gae.task_name', 'task_name', (['issue_time', 'instruction'], {}), '(issue_time, instruction)\n', (2292, 2317), False, 'from ffe.gae import issue_tasks, task_name\n'), ((2676, 2711), 'google.appengine.ext.db.Query', 'db.Query', (['TAMission'], {'keys_only': '(True)'}), '(TAMission, keys_only=True)\n', (2684, 2711), False, 'from google.appengine.ext import db\n')]
#!/usr/bin/env python # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import os import sys import aurora_postgres_monitoring def lambda_handler(event, context): # resolve the configuration from the sources required config_sources = [event, os.environ] aurora_postgres_monitoring.monitor_cluster(config_sources) return 'Finished' if __name__ == "__main__": lambda_handler(sys.argv[0], None)
[ "aurora_postgres_monitoring.monitor_cluster" ]
[((326, 384), 'aurora_postgres_monitoring.monitor_cluster', 'aurora_postgres_monitoring.monitor_cluster', (['config_sources'], {}), '(config_sources)\n', (368, 384), False, 'import aurora_postgres_monitoring\n')]
from celery.schedules import crontab from django.conf import settings from mycelery.main import app # 任务队列的链接地址 # broker_url = '队伍队列软件类型://服务端地址:端口/仓库地址' broker_url = 'redis://dba:123.com@127.0.0.1:6379/15' # 结果队列的链接地址 result_backend = 'redis://dba:123.com@127.0.0.1:6379/14' app.conf.timezone = settings.TIME_ZONE app.conf.beat_schedule = { # 定时任务列表 'pub-article-every-two-minute': { 'task': 'interval_pub_article', # 指定定时执行的的异步任务 # 'schedule': crontab(), # 时间间隔,一分钟 # 'schedule': 120.0, # 时间间隔,默认:秒 'schedule': crontab(minute='*/2'), # 时间间隔,每2分钟 # 'args': (16, 16) # 如果任务有固定参数,则可以写在args }, }
[ "celery.schedules.crontab" ]
[((584, 605), 'celery.schedules.crontab', 'crontab', ([], {'minute': '"""*/2"""'}), "(minute='*/2')\n", (591, 605), False, 'from celery.schedules import crontab\n')]
from datetime import datetime from typing import Literal, TypedDict, List NotificationType = Literal["primary", "secondary", "success", "warning", "danger", "info"] class Notification(TypedDict): msg_type: NotificationType msg_header: str msg_content: str timestamp: str def post_notification( notifications: List[Notification], msg_type: NotificationType, msg_header: str, msg_content: str ): notifications.append(Notification( msg_type=msg_type, msg_header=msg_header, msg_content=msg_content, timestamp=datetime.now().isoformat() ))
[ "datetime.datetime.now" ]
[((595, 609), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (607, 609), False, 'from datetime import datetime\n')]
import tensorflow as tf from tensorflow.keras.initializers import random_normal from tensorflow.python.keras.utils import data_utils from .residual_block import Bottleneck, WEIGHTS_HASHES, BASE_WEIGHTS_PATH class ResNetBackbone(tf.keras.Model): def __init__(self, resnet_type): resnet_spec = {50: (Bottleneck, [3, 4, 6, 3], [64, 256, 512, 1024, 2048], 'resnet50v2'), 101: (Bottleneck, [3, 4, 23, 3], [64, 256, 512, 1024, 2048], 'resnet101v2'), 152: (Bottleneck, [3, 8, 36, 3], [64, 256, 512, 1024, 2048], 'resnet152v2')} block, layers, channels, name = resnet_spec[resnet_type] self.net_name = name self.inplanes = 64 super(ResNetBackbone, self).__init__() self.conv1 = tf.keras.layers.Conv2D(64, kernel_size=7, strides=2, padding="same", use_bias=False, kernel_initializer=random_normal(mean=0, stddev=0.001)) self.bn1 = tf.keras.layers.BatchNormalization() self.maxpool = tf.keras.layers.MaxPooling2D(pool_size=3, strides=2, padding="same") self.layer1 = self._make_layer(block, 64, blocks=layers[0]) self.layer2 = self._make_layer(block, 128, blocks=layers[1], stride=2) self.layer3 = self._make_layer(block, 256, blocks=layers[2], stride=2) self.layer4 = self._make_layer(block, 512, blocks=layers[3], stride=2) def _make_layer(self, block, planes, blocks, stride=1): layers = tf.keras.Sequential() layers.add(block(self.inplanes, planes, stride)) self.inplanes = planes * block.expansion for _ in range(1, blocks): layers.add(block(self.inplanes, planes)) return layers def call(self, inputs, **kwargs): training = False if len(inputs) == 2: x, training = inputs else: x = inputs x = self.conv1(x) x = self.bn1(x) x = tf.nn.relu(x) x = self.maxpool(x) x = self.layer1(x, training=training) x = self.layer2(x, training=training) x = self.layer3(x, training=training) x = self.layer4(x, training=training) return x def init_weights(self): file_name = self.name + '_weights_tf_dim_ordering_tf_kernels_notop.h5' file_hash = WEIGHTS_HASHES[self.net_name][1] weights_path = data_utils.get_file( file_name, BASE_WEIGHTS_PATH + file_name, cache_subdir='models', file_hash=file_hash) self.load_weights(weights_path) print("Initialize resnet from model zoo")
[ "tensorflow.nn.relu", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.BatchNormalization", "tensorflow.python.keras.utils.data_utils.get_file", "tensorflow.keras.Sequential", "tensorflow.keras.initializers.random_normal" ]
[((979, 1015), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (1013, 1015), True, 'import tensorflow as tf\n'), ((1039, 1107), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(3)', 'strides': '(2)', 'padding': '"""same"""'}), "(pool_size=3, strides=2, padding='same')\n", (1067, 1107), True, 'import tensorflow as tf\n'), ((1491, 1512), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {}), '()\n', (1510, 1512), True, 'import tensorflow as tf\n'), ((1955, 1968), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (1965, 1968), True, 'import tensorflow as tf\n'), ((2383, 2493), 'tensorflow.python.keras.utils.data_utils.get_file', 'data_utils.get_file', (['file_name', '(BASE_WEIGHTS_PATH + file_name)'], {'cache_subdir': '"""models"""', 'file_hash': 'file_hash'}), "(file_name, BASE_WEIGHTS_PATH + file_name, cache_subdir=\n 'models', file_hash=file_hash)\n", (2402, 2493), False, 'from tensorflow.python.keras.utils import data_utils\n'), ((923, 958), 'tensorflow.keras.initializers.random_normal', 'random_normal', ([], {'mean': '(0)', 'stddev': '(0.001)'}), '(mean=0, stddev=0.001)\n', (936, 958), False, 'from tensorflow.keras.initializers import random_normal\n')]
import math from geopandas import GeoDataFrame from matplotlib import pyplot as plt from map_poster_creator.geojson import MapGeometry from map_poster_creator.logs import log_processing def road_width(speed: int) -> float: if speed in range(0, 30): return 0.05 if speed in range(30, 50): return 0.1 if speed in range(50, 90): return 0.2 if speed in range(90, 200): return 0.3 return 0.4 def plot_and_save( roads: GeoDataFrame, water: GeoDataFrame, greens: GeoDataFrame, color: dict, geometry: MapGeometry, path: str, dpi: int = 300, ) -> None: ax = set_subplot(color) plot_water(ax, color, water) plot_greens(ax, color, greens) plot_roads(ax, color, roads) aspect = 1 / math.cos(math.pi / 180 * geometry.center[0]) ax.set_aspect(aspect) ax.set_ylim((geometry.bottom, geometry.top)) ax.set_xlim((geometry.left, geometry.right)) plt.axis('off') save_image(dpi, path) @log_processing def save_image(dpi: int, path: str) -> None: plt.savefig(path, bbox_inches='tight', dpi=dpi) @log_processing def set_subplot(color: dict) -> plt.subplot: plt.clf() f, ax = plt.subplots(1, figsize=(19, 19), facecolor=color['facecolor']) return ax @log_processing def plot_water(ax: plt.subplot, color: dict, water: GeoDataFrame) -> None: water.plot(ax=ax, color=color['water'], linewidth=0.1) @log_processing def plot_greens(ax: plt.subplot, color: dict, greens: GeoDataFrame) -> None: greens.plot(ax=ax, color=color['greens'], linewidth=0.1) @log_processing def plot_roads(ax: plt.subplot, color: dict, roads: GeoDataFrame) -> None: roads.plot( ax=ax, color=color['roads'], linewidth=[road_width(d) for d in roads.speeds] )
[ "matplotlib.pyplot.clf", "matplotlib.pyplot.axis", "math.cos", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig" ]
[((982, 997), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (990, 997), True, 'from matplotlib import pyplot as plt\n'), ((1091, 1138), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'bbox_inches': '"""tight"""', 'dpi': 'dpi'}), "(path, bbox_inches='tight', dpi=dpi)\n", (1102, 1138), True, 'from matplotlib import pyplot as plt\n'), ((1206, 1215), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1213, 1215), True, 'from matplotlib import pyplot as plt\n'), ((1228, 1291), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(19, 19)', 'facecolor': "color['facecolor']"}), "(1, figsize=(19, 19), facecolor=color['facecolor'])\n", (1240, 1291), True, 'from matplotlib import pyplot as plt\n'), ((809, 853), 'math.cos', 'math.cos', (['(math.pi / 180 * geometry.center[0])'], {}), '(math.pi / 180 * geometry.center[0])\n', (817, 853), False, 'import math\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- # @File : server.py # @Author : edgar.chen # @Create Date : 2020/11/12-2:47 下午 import os import json import uuid from werkzeug.wrappers import Response from werkzeug.exceptions import BadRequest from traceback import format_exc from conf import config from utils.log import logger, init_log from utils.errors import ServerProcessError, ParameterError from decorator import timer, expose from api import TestApi class Server(object): def __init__(self): self._pid = os.getpid() os.makedirs(config.log_path) if not os.path.exists(config.log_path) else None log_name = "{}.{}".format(config.log_name, self._pid) init_log(config.log_path, log_name) logger.debug('server is running...') def _rsp_encode(self, rsp): return json.dumps(rsp, separators=(',', ':')) def _req_decode(self, req): try: data = json.loads(req) return data except Exception: raise ParameterError("post data not json string!") def _errcode(self, code=0, msg='ok'): return dict(errCode=code, errMsg=msg, err_code=code, err_msg=msg) def _response(self, response): response['version'] = config.version encode_response = self._rsp_encode(response) return Response(encode_response, mimetype='application/json') def _get_query_args(self, data, apply_detail): try: apply_detail['order_id'] = int(data["order_id"]) except: raise ParameterError('request params not complete or format not right') @expose('/test', methods=['POST']) @timer def api_test(self, request): try: req_id = str(uuid.uuid1()) request_data = request.get_data() if not request_data: raise BadRequest() logger.debug('req_id: [{}] - request:{}'.format(req_id, request_data)) # 得到请求参数字典 data = self._req_decode(request_data) # 获取需要的请求参数 apply_detail = dict() apply_detail['req_id'] = req_id self._get_query_args(data, apply_detail) # 结果 res_data = TestApi(req_data=apply_detail).process() # 返回 response = self._errcode(0) order_id = apply_detail["order_id"] result = {"order_id": order_id, "uuid": req_id, "data": res_data} response.update(result) logger.debug('req_id: [%s] - order_id: %s, predict_org, response:%s', req_id, order_id, response) except BadRequest: logger.error('bad request, request params needed!') response = self._errcode(-2, 'bad request, request params needed!') except ParameterError as e: logger.error(str(e)) response = self._errcode(-4, str(e)) except ServerProcessError as e: logger.error('req_id: [%s] - apply_id: [%s] except: %s' % (req_id, order_id, str(e))) response = self._errcode(-3, str(e)) except: logger.error('req_id: [%s] - apply_id: [%s] except: %s' % (req_id, order_id, format_exc())) response = self._errcode(-1, 'server error') finally: return self._response(response) g_server = Server()
[ "utils.log.logger.debug", "os.getpid", "os.makedirs", "json.loads", "utils.errors.ParameterError", "werkzeug.wrappers.Response", "werkzeug.exceptions.BadRequest", "utils.log.logger.error", "os.path.exists", "utils.log.init_log", "json.dumps", "uuid.uuid1", "traceback.format_exc", "decorator.expose", "api.TestApi" ]
[((1612, 1645), 'decorator.expose', 'expose', (['"""/test"""'], {'methods': "['POST']"}), "('/test', methods=['POST'])\n", (1618, 1645), False, 'from decorator import timer, expose\n'), ((528, 539), 'os.getpid', 'os.getpid', ([], {}), '()\n', (537, 539), False, 'import os\n'), ((697, 732), 'utils.log.init_log', 'init_log', (['config.log_path', 'log_name'], {}), '(config.log_path, log_name)\n', (705, 732), False, 'from utils.log import logger, init_log\n'), ((741, 777), 'utils.log.logger.debug', 'logger.debug', (['"""server is running..."""'], {}), "('server is running...')\n", (753, 777), False, 'from utils.log import logger, init_log\n'), ((826, 864), 'json.dumps', 'json.dumps', (['rsp'], {'separators': "(',', ':')"}), "(rsp, separators=(',', ':'))\n", (836, 864), False, 'import json\n'), ((1325, 1379), 'werkzeug.wrappers.Response', 'Response', (['encode_response'], {'mimetype': '"""application/json"""'}), "(encode_response, mimetype='application/json')\n", (1333, 1379), False, 'from werkzeug.wrappers import Response\n'), ((549, 577), 'os.makedirs', 'os.makedirs', (['config.log_path'], {}), '(config.log_path)\n', (560, 577), False, 'import os\n'), ((930, 945), 'json.loads', 'json.loads', (['req'], {}), '(req)\n', (940, 945), False, 'import json\n'), ((2528, 2629), 'utils.log.logger.debug', 'logger.debug', (['"""req_id: [%s] - order_id: %s, predict_org, response:%s"""', 'req_id', 'order_id', 'response'], {}), "('req_id: [%s] - order_id: %s, predict_org, response:%s',\n req_id, order_id, response)\n", (2540, 2629), False, 'from utils.log import logger, init_log\n'), ((585, 616), 'os.path.exists', 'os.path.exists', (['config.log_path'], {}), '(config.log_path)\n', (599, 616), False, 'import os\n'), ((1014, 1058), 'utils.errors.ParameterError', 'ParameterError', (['"""post data not json string!"""'], {}), "('post data not json string!')\n", (1028, 1058), False, 'from utils.errors import ServerProcessError, ParameterError\n'), ((1540, 1605), 'utils.errors.ParameterError', 'ParameterError', (['"""request params not complete or format not right"""'], {}), "('request params not complete or format not right')\n", (1554, 1605), False, 'from utils.errors import ServerProcessError, ParameterError\n'), ((1729, 1741), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (1739, 1741), False, 'import uuid\n'), ((1844, 1856), 'werkzeug.exceptions.BadRequest', 'BadRequest', ([], {}), '()\n', (1854, 1856), False, 'from werkzeug.exceptions import BadRequest\n'), ((2666, 2717), 'utils.log.logger.error', 'logger.error', (['"""bad request, request params needed!"""'], {}), "('bad request, request params needed!')\n", (2678, 2717), False, 'from utils.log import logger, init_log\n'), ((2211, 2241), 'api.TestApi', 'TestApi', ([], {'req_data': 'apply_detail'}), '(req_data=apply_detail)\n', (2218, 2241), False, 'from api import TestApi\n'), ((3212, 3224), 'traceback.format_exc', 'format_exc', ([], {}), '()\n', (3222, 3224), False, 'from traceback import format_exc\n')]
from __future__ import print_function from __future__ import absolute_import import unittest import csv from owmeta_core.context import Context from owmeta_core.command import OWM from owmeta_core.bundle import Bundle from owmeta.worm import Worm from owmeta.cell import Cell from owmeta.neuron import Neuron from owmeta.connection import Connection import rdflib as R import pytest @pytest.mark.inttest @pytest.mark.data_bundle class DataIntegrityTest(unittest.TestCase): """ Integration tests that read from the database and ensure that basic queries have expected answers, as a way to keep data quality high. """ @classmethod def setUpClass(cls): # grab the list of the names of the 302 neurons csvfile = open('tests/neurons.csv', 'r') reader = csv.reader(csvfile, delimiter=';', quotechar='|') # array that holds the names of the 302 neurons at class-level scope cls.neurons = [] for row in reader: if len(row[0]) > 0: # Only saves valid neuron names cls.neurons.append(row[0]) def setUp(self): self.bnd = Bundle('openworm/owmeta-data') self.bnd.initdb() self.conn = self.bnd.connection self.conf = self.conn.conf self.g = self.conf["rdf.graph"] self.context = self.conn(Context)(ident="http://openworm.org/data") self.qctx = self.context.stored def tearDown(self): self.conn.disconnect() def test_correct_neuron_number(self): """ This test verifies that the worm model has exactly 302 neurons. """ # FIXME: Test execution is not properly isolated -- it fails if # test_compare_to_xls fails. Other conditions may cause # it to pass net = self.qctx(Worm).query().get_neuron_network() self.assertEqual(302, net.neuron.count()) def test_correct_muscle_number(self): """ This test verifies that the worm model has exactly 158 muscles. 95 body wall muscles, 37 Pharynx muscles, 26 other muscles See counts on row 3 here: https://docs.google.com/spreadsheets/d/1NDx9LRF_B2phR5w4HlEtxJzxx1ZIPT2gA0ZmNmozjos/edit#gid=1 """ self.assertEqual(158, self.qctx(Worm).query().muscle.count()) def test_INS_26_neuropeptide_neuron_list(self): """ This test verifies that the set of neurons which contain the neuropeptide INS-26 is correct (the list is given below). """ neuronlist = self.qctx(Neuron)() neuronlist.neuropeptide("INS-26") thlist = set(x.name() for x in neuronlist.load()) self.assertEqual({'ASEL', 'ASER', 'ASIL', 'ASIR'}, thlist) def test_bentley_expr_data(self): """ This verifies that the data in Bentley et. al (2016) receptor expression has been incorporated, by checking that one of the novel receptor expression patterns is in the worm. """ va9 = self.qctx(Neuron).query('VA9') self.assertIn('LGC-53', va9.receptors()) def test_unique_neuron_node(self): """ There should one and only one unique RDF node for every neuron. If more than one is present for a given cell name, then our data is inconsistent. If there is not at least one present, then we are missing neurons. """ results = {} for n in self.neurons: # Create a SPARQL query per neuron that looks for all RDF nodes # that have text matching the name of the neuron qres = self.g.query( f""" SELECT distinct ?n WHERE {{ ?n <{Cell.name.link}> {R.Literal(n).n3()} }} LIMIT 5 """) results[n] = (len(qres), [x[0] for x in qres]) # If there is not only one result back, then there is more than one RDF # node. more_than_one = [(x, results[x]) for x in results if results[x][0] > 1] less_than_one = [(x, results[x]) for x in results if results[x][0] < 1] self.assertEqual( 0, len(more_than_one), "Some neurons have more than 1 node: " + "\n".join( str(x) for x in more_than_one)) self.assertEqual( 0, len(less_than_one), "Some neurons have no node: " + "\n".join( str(x) for x in less_than_one)) def test_neurons_have_types(self): """ Every Neuron should have a non-blank type """ results = set() for n in self.neurons: s = f'''SELECT ?v WHERE {{ ?k <{Cell.name.link}> {R.Literal(n).n3()} . ?k <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <{Neuron.rdf_type}> . ?k <{Neuron.type.link}> ?v . }}''' qres = self.g.query(s) for x in qres: v = x[0] if isinstance(v, R.Literal): results.add(n) self.assertEqual(len(results), len(self.neurons), "Some neurons are missing a type: {}".format(set(self.neurons) - results)) def test_neuron_GJ_degree(self): """ Get the number of gap junctions from a representation """ # was 81 -- now retunring 44 -- are we sure this is correct? self.assertEqual(self.qctx(Neuron).query(name='AVAL').GJ_degree(), 44) def test_neuron_Syn_degree(self): """ Get the number of chemical synapses from a representation """ # was 187 -- now returning 105 -- are we sure this is correct? self.assertEqual(self.qctx(Neuron).query(name='AVAL').Syn_degree(), 105) @unittest.skip("have not yet defined asserts") def test_what_nodes_get_type_info(self): qres = self.g.query("""SELECT ?o ?p ?s WHERE {{ ?o <http://openworm.org/entities/SimpleProperty/value> "motor". ?o ?p ?s # for that type ?o, get its value ?v }} LIMIT 10 """) for row in qres: print(row) def test_all_cells_have_wormbaseID(self): """ This test verifies that every cell has a Wormbase ID. """ cells = set(self.qctx(Cell)().load()) for cell in cells: assert cell.wormbaseID() is not None def test_all_neurons_have_wormbaseID(self): """ This test verifies that every neuron has a Wormbase ID. """ net = self.qctx(Worm).query().get_neuron_network() for neuron_object in net.neurons(): assert neuron_object.wormbaseID() is not None def test_all_muscles_have_wormbaseID(self): """ This test verifies that every muscle has a Wormbase ID. """ muscles = self.qctx(Worm).query().muscles() for muscle_object in muscles: assert muscle_object.wormbaseID() is not None def test_all_neurons_are_cells(self): """ This test verifies that all Neuron objects are also Cell objects. """ net = self.qctx(Worm).query().get_neuron_network() for neuron_object in net.neurons(): self.assertIsInstance(neuron_object, Cell) def test_all_muscles_are_cells(self): """ This test verifies that all Muscle objects are also Cell objects. """ muscles = self.qctx(Worm).query().muscles() for muscle_object in muscles: self.assertIsInstance(muscle_object, Cell) def test_correct_connections_number(self): """ This test verifies that there are exactly 7319 connections. """ net = self.qctx(Worm).query().get_neuron_network() # XXX: The synapses contain some cells that aren't neurons self.assertEqual(7319, net.synapses.count()) def test_number_neuron_to_neuron(self): """ This test verifies that the worm model has exactly 5805 neuron to neuron connections. """ synapse = self.qctx(Connection)() synapse.termination('neuron') self.qctx(Worm).query().get_neuron_network().synapse(synapse) self.assertEqual(5805, synapse.count()) def test_number_neuron_to_muscle(self): """ This test verifies that the worm model has exactly 1111 neuron to muscle connections. """ synapse = self.qctx(Connection)() synapse.termination('muscle') self.qctx(Worm).query().get_neuron_network().synapse(synapse) self.assertEqual(1111, synapse.count()) def test_correct_number_unique_neurons(self): """ This test verifies that the worm model has exactly 300 unique neurons making connections. """ synapse = self.qctx(Connection)() pre = self.qctx(Neuron)() synapse.pre_cell(pre) self.qctx(Worm).query().get_neuron_network().synapse(synapse) self.assertEqual(300, pre.count()) def test_unconnected_neurons(self): """ This test verifies that there are exactly 2 unconnected neurons, i.e., CANL and CANR, in the new connectome. """ # In previous tests, there is a check for exactly 302 neurons in total. # There is also a test for exactly 300 unique neurons making connections. # That means it should be enough to check that the set {CANL, CANR} and # the set of neurons making connections are disjoint. neuron = self.qctx(Neuron)() synapse = self.qctx(Connection)() synapse.pre_cell(neuron) self.qctx(Worm).query().get_neuron_network().synapse(synapse) connected_neurons = set() unconnected_neurons = {'CANL', 'CANR'} for name in neuron.name.get(): connected_neurons.add(name) self.assertTrue(connected_neurons.isdisjoint(unconnected_neurons)) def test_neuron_lineage_names(self): """ Neurons should have lineage names in the bundle """ neuron = self.qctx(Neuron)() self.qctx(Worm).query().get_neuron_network().neuron(neuron) for n in neuron.load(): assert set(n.lineageName.get())
[ "unittest.skip", "owmeta_core.bundle.Bundle", "csv.reader", "rdflib.Literal" ]
[((5846, 5891), 'unittest.skip', 'unittest.skip', (['"""have not yet defined asserts"""'], {}), "('have not yet defined asserts')\n", (5859, 5891), False, 'import unittest\n'), ((804, 853), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""";"""', 'quotechar': '"""|"""'}), "(csvfile, delimiter=';', quotechar='|')\n", (814, 853), False, 'import csv\n'), ((1133, 1163), 'owmeta_core.bundle.Bundle', 'Bundle', (['"""openworm/owmeta-data"""'], {}), "('openworm/owmeta-data')\n", (1139, 1163), False, 'from owmeta_core.bundle import Bundle\n'), ((4788, 4800), 'rdflib.Literal', 'R.Literal', (['n'], {}), '(n)\n', (4797, 4800), True, 'import rdflib as R\n'), ((3766, 3778), 'rdflib.Literal', 'R.Literal', (['n'], {}), '(n)\n', (3775, 3778), True, 'import rdflib as R\n')]
from django.urls import path from feedzero.feeds import views app_name = "feeds" urlpatterns = [ path("", views.InboxView.as_view(), name="inbox"), path("feeds/add/", views.FeedAddView.as_view(), name="add"), ]
[ "feedzero.feeds.views.FeedAddView.as_view", "feedzero.feeds.views.InboxView.as_view" ]
[((113, 138), 'feedzero.feeds.views.InboxView.as_view', 'views.InboxView.as_view', ([], {}), '()\n', (136, 138), False, 'from feedzero.feeds import views\n'), ((178, 205), 'feedzero.feeds.views.FeedAddView.as_view', 'views.FeedAddView.as_view', ([], {}), '()\n', (203, 205), False, 'from feedzero.feeds import views\n')]
from django.urls import path from django.views.decorators.csrf import csrf_exempt from . import views urlpatterns = [ path('yougotmsg/', csrf_exempt(views.telegram_data), name='message_from_telegram') ]
[ "django.views.decorators.csrf.csrf_exempt" ]
[((143, 175), 'django.views.decorators.csrf.csrf_exempt', 'csrf_exempt', (['views.telegram_data'], {}), '(views.telegram_data)\n', (154, 175), False, 'from django.views.decorators.csrf import csrf_exempt\n')]
import numpy as np def dichoto(function, p0, max_depth=10, eps=1e-10): a,b=p0 i=0 while i < max_depth: c=0.5*(a+b) if np.abs(function(c))<=eps: return c if(function(c)<0): a=c if(function(c)>0): b=c i=i+1 return c # Find the the golden ratio f = lambda x : x**2-1-x x = dichoto(f, (1, 2)) print("The golden ratio is : {}".format(x)) # Find the the solution f = lambda x : np.tan(x)-1 x = dichoto(f, (0.5, 3.15/4), ) print("The solution to tan(x)=1 is : {}".format(x)) # Find the the solution f = lambda x : (x-2)**2 x = dichoto(f, (1, 3), ) print("The solution to (x-2)^2=0 is : {}".format(x))
[ "numpy.tan" ]
[((472, 481), 'numpy.tan', 'np.tan', (['x'], {}), '(x)\n', (478, 481), True, 'import numpy as np\n')]
# %% import set_base_path import numpy as np import pandas as pd from IPython.display import display import plotly.figure_factory as ff import plotly.graph_objects as go from enum import Enum, auto from typing import List, Sequence, Tuple from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px from sklearn.experimental import enable_iterative_imputer from sklearn.impute import IterativeImputer import warnings from src.constants import RAW_DATA_PATH, INTERIM_DATA_PATH import src.utils.preprocess as pp import src.utils.eda as eu import src.utils.common as cmn warnings.filterwarnings('ignore') # %% # Ignore warnings # %% [markdown] ''' ## Data load ''' # %% # Load Data metadata: pd.DataFrame = pd.read_csv(RAW_DATA_PATH/'metadata.zip') # %% [markdown] ''' ## Pandas settings ''' # %% pd.options.display.max_columns = None pd.options.display.max_rows = 500 # pd.options.display.width = None # pd.options.display.max_colwidth = 100 pd.options.display.precision = 3 # %% # %% [markdown] ''' ### Collect less frequent categories ''' # TODO: Collect less frequent categories # %% # metadata.head().style.set_properties(**{'text-align': 'left'}).hide_index() metadata.head(1).style.hide_index() # %% metadata.shape # %% metadata.isnull().sum() # %% eu.print_null_percents(metadata) # %% eu.count_of_uniques(metadata, display_res=True) # %% metadata.drop("mag_id", axis=1, inplace=True) # %% cols_with_nulls = eu.print_null_percents(metadata) # %% replace_value_map = {c: "NOT_PROVIDED" for c in cols_with_nulls.index} # %% for col, value in replace_value_map.items(): metadata[col] = metadata[col].fillna(value) # %% eu.print_null_percents(metadata) # %% metadata.to_csv(INTERIM_DATA_PATH/'metadata.zip', index=False, quoting=1, compression='zip') # %%
[ "pandas.read_csv", "src.utils.eda.print_null_percents", "src.utils.eda.count_of_uniques", "warnings.filterwarnings" ]
[((643, 676), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (666, 676), False, 'import warnings\n'), ((781, 824), 'pandas.read_csv', 'pd.read_csv', (["(RAW_DATA_PATH / 'metadata.zip')"], {}), "(RAW_DATA_PATH / 'metadata.zip')\n", (792, 824), True, 'import pandas as pd\n'), ((1334, 1366), 'src.utils.eda.print_null_percents', 'eu.print_null_percents', (['metadata'], {}), '(metadata)\n', (1356, 1366), True, 'import src.utils.eda as eu\n'), ((1372, 1419), 'src.utils.eda.count_of_uniques', 'eu.count_of_uniques', (['metadata'], {'display_res': '(True)'}), '(metadata, display_res=True)\n', (1391, 1419), True, 'import src.utils.eda as eu\n'), ((1496, 1528), 'src.utils.eda.print_null_percents', 'eu.print_null_percents', (['metadata'], {}), '(metadata)\n', (1518, 1528), True, 'import src.utils.eda as eu\n'), ((1709, 1741), 'src.utils.eda.print_null_percents', 'eu.print_null_percents', (['metadata'], {}), '(metadata)\n', (1731, 1741), True, 'import src.utils.eda as eu\n')]
import unittest import pandas as pd import numpy as np from dask import dataframe as dd from .normalize_functions import ( encode_objects_general, normalize_general, normalize_chex, ) class NormalizeTests(unittest.TestCase): def test_encode_objects_general(self): strings_feats = "alpha bravo charlie delta echo".split(" ") strings_feats_rev = strings_feats[::-1] int_feats = list(range(len(strings_feats))) colnames = "A B C".split(" ") data_dict = { colname: data for colname, data in zip( colnames, (strings_feats, strings_feats_rev, int_feats) ) } sequence = np.arange(0, 5) test_array = np.column_stack((sequence.T, sequence[::-1].T, sequence.T)) with self.subTest("Pandas test"): mock_df = pd.DataFrame.from_dict(data=data_dict) encoded_df = encode_objects_general(mock_df, "A B".split(" ")) df_test_groundtruth = pd.DataFrame( test_array, columns=colnames, ) self.assertTrue(encoded_df.eq(df_test_groundtruth).all(axis=None)) with self.subTest("Dask test"): mock_df = dd.from_pandas( pd.DataFrame.from_dict(data=data_dict), npartitions=1 ) encoded_df = encode_objects_general(mock_df, "A B".split(" ")) df_test_groundtruth = dd.from_array( test_array, columns=colnames, ) self.assertTrue(encoded_df.eq(df_test_groundtruth).compute().all(axis=None)) def test_normalize_general(self): sequence = np.arange(0, 5) test_array = np.column_stack((sequence.T, sequence[::-1].T, sequence.T)) colnames = "A B C".split(" ") gt_sequence = np.arange(-1, 1.5, 0.5) gt_array = np.column_stack((gt_sequence.T, gt_sequence[::-1].T, gt_sequence.T)) with self.subTest("Pandas test"): df = pd.DataFrame( test_array, columns=colnames, ) df_norm = normalize_general(df, colnames) gt_df = pd.DataFrame( gt_array, columns=colnames, ) self.assertTrue(df_norm.eq(gt_df).all(axis=None)) with self.subTest("Dask test"): df = dd.from_array( test_array, columns=colnames, ) df_norm = normalize_general(df, colnames) gt_df = dd.from_array( gt_array, columns=colnames, ) self.assertTrue(df_norm.eq(gt_df).compute().all(axis=None)) if __name__ == "__main__": unittest.main()
[ "unittest.main", "pandas.DataFrame", "pandas.DataFrame.from_dict", "numpy.arange", "numpy.column_stack", "dask.dataframe.from_array" ]
[((2737, 2752), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2750, 2752), False, 'import unittest\n'), ((690, 705), 'numpy.arange', 'np.arange', (['(0)', '(5)'], {}), '(0, 5)\n', (699, 705), True, 'import numpy as np\n'), ((727, 786), 'numpy.column_stack', 'np.column_stack', (['(sequence.T, sequence[::-1].T, sequence.T)'], {}), '((sequence.T, sequence[::-1].T, sequence.T))\n', (742, 786), True, 'import numpy as np\n'), ((1679, 1694), 'numpy.arange', 'np.arange', (['(0)', '(5)'], {}), '(0, 5)\n', (1688, 1694), True, 'import numpy as np\n'), ((1716, 1775), 'numpy.column_stack', 'np.column_stack', (['(sequence.T, sequence[::-1].T, sequence.T)'], {}), '((sequence.T, sequence[::-1].T, sequence.T))\n', (1731, 1775), True, 'import numpy as np\n'), ((1836, 1859), 'numpy.arange', 'np.arange', (['(-1)', '(1.5)', '(0.5)'], {}), '(-1, 1.5, 0.5)\n', (1845, 1859), True, 'import numpy as np\n'), ((1879, 1947), 'numpy.column_stack', 'np.column_stack', (['(gt_sequence.T, gt_sequence[::-1].T, gt_sequence.T)'], {}), '((gt_sequence.T, gt_sequence[::-1].T, gt_sequence.T))\n', (1894, 1947), True, 'import numpy as np\n'), ((852, 890), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', ([], {'data': 'data_dict'}), '(data=data_dict)\n', (874, 890), True, 'import pandas as pd\n'), ((1000, 1042), 'pandas.DataFrame', 'pd.DataFrame', (['test_array'], {'columns': 'colnames'}), '(test_array, columns=colnames)\n', (1012, 1042), True, 'import pandas as pd\n'), ((1440, 1483), 'dask.dataframe.from_array', 'dd.from_array', (['test_array'], {'columns': 'colnames'}), '(test_array, columns=colnames)\n', (1453, 1483), True, 'from dask import dataframe as dd\n'), ((2007, 2049), 'pandas.DataFrame', 'pd.DataFrame', (['test_array'], {'columns': 'colnames'}), '(test_array, columns=colnames)\n', (2019, 2049), True, 'import pandas as pd\n'), ((2171, 2211), 'pandas.DataFrame', 'pd.DataFrame', (['gt_array'], {'columns': 'colnames'}), '(gt_array, columns=colnames)\n', (2183, 2211), True, 'import pandas as pd\n'), ((2378, 2421), 'dask.dataframe.from_array', 'dd.from_array', (['test_array'], {'columns': 'colnames'}), '(test_array, columns=colnames)\n', (2391, 2421), True, 'from dask import dataframe as dd\n'), ((2543, 2584), 'dask.dataframe.from_array', 'dd.from_array', (['gt_array'], {'columns': 'colnames'}), '(gt_array, columns=colnames)\n', (2556, 2584), True, 'from dask import dataframe as dd\n'), ((1263, 1301), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', ([], {'data': 'data_dict'}), '(data=data_dict)\n', (1285, 1301), True, 'import pandas as pd\n')]
from bson.objectid import ObjectId from CoordenacaoFacil import db class UseOfAbstracts(): def __init__(self, origin=None, destiny=None, student=None, menu=None, createdAt=""): self.origin = origin self.destiny = destiny self.student = student self.menu = menu self.status = "Processamento" self.createdAt = createdAt def create(self, uoa=None): try: db.useOfAbstracts.insert({ "origin": uoa.origin, "destiny": uoa.destiny, "student": uoa.student, "menu": uoa.menu, "status": self.status, "createdAt": self.createdAt }) return True except: print("Problema ao criar aproveitamento de cadeiras.") return False def getAllUOA(self): try: uoas = db.useOfAbstracts.find({}) return uoas except: print("Houve um problema ao retornar uoas.") return None def getAllUOAByCourse(self, course=""): try: uoas = db.useOfAbstracts.find({"student.course.code": course}) return uoas except: print("Houve um problema ao retornar uoas.") return None def getByCode(self, code=""): try: uoa = db.useOfAbstracts.find_one({"_id": ObjectId(code)}) return uoa except: print("Houve um problema ao retornar uoas.") return None def getUOAByCode(self, code): uoa = db.useOfAbstracts.find_one({"_id": ObjectId(code)}) return uoa def delete(self, id): try: db.useOfAbstracts.remove({"_id": ObjectId(id)}) return True except: print("Houve um problema ao obter UOA.") return False
[ "CoordenacaoFacil.db.useOfAbstracts.insert", "CoordenacaoFacil.db.useOfAbstracts.find", "bson.objectid.ObjectId" ]
[((429, 603), 'CoordenacaoFacil.db.useOfAbstracts.insert', 'db.useOfAbstracts.insert', (["{'origin': uoa.origin, 'destiny': uoa.destiny, 'student': uoa.student,\n 'menu': uoa.menu, 'status': self.status, 'createdAt': self.createdAt}"], {}), "({'origin': uoa.origin, 'destiny': uoa.destiny,\n 'student': uoa.student, 'menu': uoa.menu, 'status': self.status,\n 'createdAt': self.createdAt})\n", (453, 603), False, 'from CoordenacaoFacil import db\n'), ((897, 923), 'CoordenacaoFacil.db.useOfAbstracts.find', 'db.useOfAbstracts.find', (['{}'], {}), '({})\n', (919, 923), False, 'from CoordenacaoFacil import db\n'), ((1123, 1178), 'CoordenacaoFacil.db.useOfAbstracts.find', 'db.useOfAbstracts.find', (["{'student.course.code': course}"], {}), "({'student.course.code': course})\n", (1145, 1178), False, 'from CoordenacaoFacil import db\n'), ((1625, 1639), 'bson.objectid.ObjectId', 'ObjectId', (['code'], {}), '(code)\n', (1633, 1639), False, 'from bson.objectid import ObjectId\n'), ((1402, 1416), 'bson.objectid.ObjectId', 'ObjectId', (['code'], {}), '(code)\n', (1410, 1416), False, 'from bson.objectid import ObjectId\n'), ((1747, 1759), 'bson.objectid.ObjectId', 'ObjectId', (['id'], {}), '(id)\n', (1755, 1759), False, 'from bson.objectid import ObjectId\n')]
import unittest from katas.kyu_7.credit_card_checker import valid_card class ValidCardTestCase(unittest.TestCase): def test_true_1(self): self.assertTrue(valid_card('5457 6238 9823 4311')) def test_true_2(self): self.assertTrue(valid_card('2222 2222 2222 2224')) def test_true_3(self): self.assertTrue(valid_card('9999 9999 9999 9995')) def test_true_4(self): self.assertTrue(valid_card('4444 4444 4444 4448')) def test_true_5(self): self.assertTrue(valid_card('3333 3333 3333 3331')) def test_true_6(self): self.assertTrue(valid_card('6666 6666 6666 6664')) def test_true_7(self): self.assertTrue(valid_card('0000 0000 0000 0000')) def test_true_8(self): self.assertTrue(valid_card('5457 6238 9823 4311')) def test_true_9(self): self.assertTrue(valid_card('8888 8888 8888 8888')) def test_true_10(self): self.assertTrue(valid_card('1111 1111 1111 1117')) def test_true_11(self): self.assertTrue(valid_card('1234 5678 9012 3452')) def test_true_12(self): self.assertTrue(valid_card('5555 5555 5555 5557')) def test_false_1(self): self.assertFalse(valid_card('8895 6238 9323 4311')) def test_false_2(self): self.assertFalse(valid_card('5457 6238 5568 4311')) def test_false_3(self): self.assertFalse(valid_card('5457 6238 9323 4311')) def test_false_4(self): self.assertFalse(valid_card('5457 1125 9323 4311')) def test_false_5(self): self.assertFalse(valid_card('1252 6238 9323 4311')) def test_false_6(self): self.assertFalse(valid_card('0000 0300 0000 0000')) def test_false_7(self): self.assertFalse(valid_card('5457 6238 9323 1252')) def test_false_8(self): self.assertFalse(valid_card('5457 6238 1251 4311')) def test_false_9(self): self.assertFalse(valid_card('5457 6238 0254 4311')) def test_false_10(self): self.assertFalse(valid_card('5457 1111 9323 4311')) def test_false_11(self): self.assertFalse(valid_card('1145 6238 9323 4311')) def test_false_12(self): self.assertFalse(valid_card('0025 2521 9323 4311')) def test_false_13(self): self.assertFalse(valid_card('5457 6238 9323 4311')) def test_false_14(self): self.assertFalse(valid_card('5458 4444 9323 4311')) def test_false_15(self): self.assertFalse(valid_card('5457 6238 3333 4311')) def test_false_16(self): self.assertFalse(valid_card('0123 4567 8901 2345'))
[ "katas.kyu_7.credit_card_checker.valid_card" ]
[((169, 202), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""5457 6238 9823 4311"""'], {}), "('5457 6238 9823 4311')\n", (179, 202), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((256, 289), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""2222 2222 2222 2224"""'], {}), "('2222 2222 2222 2224')\n", (266, 289), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((343, 376), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""9999 9999 9999 9995"""'], {}), "('9999 9999 9999 9995')\n", (353, 376), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((430, 463), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""4444 4444 4444 4448"""'], {}), "('4444 4444 4444 4448')\n", (440, 463), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((517, 550), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""3333 3333 3333 3331"""'], {}), "('3333 3333 3333 3331')\n", (527, 550), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((604, 637), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""6666 6666 6666 6664"""'], {}), "('6666 6666 6666 6664')\n", (614, 637), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((691, 724), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""0000 0000 0000 0000"""'], {}), "('0000 0000 0000 0000')\n", (701, 724), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((778, 811), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""5457 6238 9823 4311"""'], {}), "('5457 6238 9823 4311')\n", (788, 811), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((865, 898), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""8888 8888 8888 8888"""'], {}), "('8888 8888 8888 8888')\n", (875, 898), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((953, 986), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""1111 1111 1111 1117"""'], {}), "('1111 1111 1111 1117')\n", (963, 986), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((1041, 1074), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""1234 5678 9012 3452"""'], {}), "('1234 5678 9012 3452')\n", (1051, 1074), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((1129, 1162), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""5555 5555 5555 5557"""'], {}), "('5555 5555 5555 5557')\n", (1139, 1162), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((1218, 1251), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""8895 6238 9323 4311"""'], {}), "('8895 6238 9323 4311')\n", (1228, 1251), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((1307, 1340), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""5457 6238 5568 4311"""'], {}), "('5457 6238 5568 4311')\n", (1317, 1340), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((1396, 1429), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""5457 6238 9323 4311"""'], {}), "('5457 6238 9323 4311')\n", (1406, 1429), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((1485, 1518), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""5457 1125 9323 4311"""'], {}), "('5457 1125 9323 4311')\n", (1495, 1518), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((1574, 1607), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""1252 6238 9323 4311"""'], {}), "('1252 6238 9323 4311')\n", (1584, 1607), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((1663, 1696), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""0000 0300 0000 0000"""'], {}), "('0000 0300 0000 0000')\n", (1673, 1696), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((1752, 1785), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""5457 6238 9323 1252"""'], {}), "('5457 6238 9323 1252')\n", (1762, 1785), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((1841, 1874), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""5457 6238 1251 4311"""'], {}), "('5457 6238 1251 4311')\n", (1851, 1874), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((1930, 1963), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""5457 6238 0254 4311"""'], {}), "('5457 6238 0254 4311')\n", (1940, 1963), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((2020, 2053), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""5457 1111 9323 4311"""'], {}), "('5457 1111 9323 4311')\n", (2030, 2053), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((2110, 2143), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""1145 6238 9323 4311"""'], {}), "('1145 6238 9323 4311')\n", (2120, 2143), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((2200, 2233), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""0025 2521 9323 4311"""'], {}), "('0025 2521 9323 4311')\n", (2210, 2233), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((2290, 2323), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""5457 6238 9323 4311"""'], {}), "('5457 6238 9323 4311')\n", (2300, 2323), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((2380, 2413), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""5458 4444 9323 4311"""'], {}), "('5458 4444 9323 4311')\n", (2390, 2413), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((2470, 2503), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""5457 6238 3333 4311"""'], {}), "('5457 6238 3333 4311')\n", (2480, 2503), False, 'from katas.kyu_7.credit_card_checker import valid_card\n'), ((2560, 2593), 'katas.kyu_7.credit_card_checker.valid_card', 'valid_card', (['"""0123 4567 8901 2345"""'], {}), "('0123 4567 8901 2345')\n", (2570, 2593), False, 'from katas.kyu_7.credit_card_checker import valid_card\n')]
from selenium import webdriver driver = webdriver.Chrome('chromedriver.exe') driver.get('https://www.google.com/recaptcha/api2/demo') captcha_iframe = driver.find_element_by_css_selector("iframe[src^='https://www.google.com/recaptcha/api2/']") # switch to captcha iframe driver.switch_to.frame(captcha_iframe) # this is specific to a recaptcha iframe check_box = driver.find_element_by_class_name('recaptcha-checkbox-checkmark') # opens image selection menu check_box.click() #"Select all squares with a sidewalk, If there are none, click skip" #"Select all images with roads, Click verify once there are none left." #"Select all images with a store front"
[ "selenium.webdriver.Chrome" ]
[((40, 76), 'selenium.webdriver.Chrome', 'webdriver.Chrome', (['"""chromedriver.exe"""'], {}), "('chromedriver.exe')\n", (56, 76), False, 'from selenium import webdriver\n')]
''' Community Application URLs community/urls.py @author <NAME> (810Teams) ''' from django.urls import path, include from rest_framework.routers import DefaultRouter from community.views import CommunityViewSet, ClubViewSet, EventViewSet, CommunityEventViewSet, LabViewSet from community.views import MyCommunityView, MyClubView, MyEventView, MyCommunityEventView, MyLabView router = DefaultRouter() router.register('community', CommunityViewSet) router.register('club', ClubViewSet) router.register('event/community', CommunityEventViewSet) router.register('event', EventViewSet) router.register('lab', LabViewSet) urlpatterns = [ path('community/me/', MyCommunityView.as_view()), path('club/me/', MyClubView.as_view()), path('event/me/', MyEventView.as_view()), path('event/community/me/', MyCommunityEventView.as_view()), path('lab/me/', MyLabView.as_view()), path('', include(router.urls)) ]
[ "community.views.MyCommunityView.as_view", "community.views.MyClubView.as_view", "django.urls.include", "community.views.MyEventView.as_view", "community.views.MyCommunityEventView.as_view", "community.views.MyLabView.as_view", "rest_framework.routers.DefaultRouter" ]
[((400, 415), 'rest_framework.routers.DefaultRouter', 'DefaultRouter', ([], {}), '()\n', (413, 415), False, 'from rest_framework.routers import DefaultRouter\n'), ((675, 700), 'community.views.MyCommunityView.as_view', 'MyCommunityView.as_view', ([], {}), '()\n', (698, 700), False, 'from community.views import MyCommunityView, MyClubView, MyEventView, MyCommunityEventView, MyLabView\n'), ((724, 744), 'community.views.MyClubView.as_view', 'MyClubView.as_view', ([], {}), '()\n', (742, 744), False, 'from community.views import MyCommunityView, MyClubView, MyEventView, MyCommunityEventView, MyLabView\n'), ((769, 790), 'community.views.MyEventView.as_view', 'MyEventView.as_view', ([], {}), '()\n', (788, 790), False, 'from community.views import MyCommunityView, MyClubView, MyEventView, MyCommunityEventView, MyLabView\n'), ((825, 855), 'community.views.MyCommunityEventView.as_view', 'MyCommunityEventView.as_view', ([], {}), '()\n', (853, 855), False, 'from community.views import MyCommunityView, MyClubView, MyEventView, MyCommunityEventView, MyLabView\n'), ((878, 897), 'community.views.MyLabView.as_view', 'MyLabView.as_view', ([], {}), '()\n', (895, 897), False, 'from community.views import MyCommunityView, MyClubView, MyEventView, MyCommunityEventView, MyLabView\n'), ((913, 933), 'django.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (920, 933), False, 'from django.urls import path, include\n')]
# coding: utf-8 # Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. import oci # noqa: F401 from oci.util import WAIT_RESOURCE_NOT_FOUND # noqa: F401 class CloudGuardClientCompositeOperations(object): """ This class provides a wrapper around :py:class:`~oci.cloud_guard.CloudGuardClient` and offers convenience methods for operations that would otherwise need to be chained together. For example, instead of performing an action on a resource (e.g. launching an instance, creating a load balancer) and then using a waiter to wait for the resource to enter a given state, you can call a single method in this class to accomplish the same functionality """ def __init__(self, client, **kwargs): """ Creates a new CloudGuardClientCompositeOperations object :param CloudGuardClient client: The service client which will be wrapped by this object """ self.client = client def create_data_mask_rule_and_wait_for_state(self, create_data_mask_rule_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.create_data_mask_rule` and waits for the :py:class:`~oci.cloud_guard.models.DataMaskRule` acted upon to enter the given state(s). :param oci.cloud_guard.models.CreateDataMaskRuleDetails create_data_mask_rule_details: (required) Definition for the new Data Mask Rule. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.DataMaskRule.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.create_data_mask_rule` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.create_data_mask_rule(create_data_mask_rule_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_data_mask_rule(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def create_detector_recipe_and_wait_for_state(self, create_detector_recipe_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.create_detector_recipe` and waits for the :py:class:`~oci.cloud_guard.models.DetectorRecipe` acted upon to enter the given state(s). :param oci.cloud_guard.models.CreateDetectorRecipeDetails create_detector_recipe_details: (required) Details for the new DetectorRecipe. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.DetectorRecipe.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.create_detector_recipe` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.create_detector_recipe(create_detector_recipe_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_detector_recipe(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def create_managed_list_and_wait_for_state(self, create_managed_list_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.create_managed_list` and waits for the :py:class:`~oci.cloud_guard.models.ManagedList` acted upon to enter the given state(s). :param oci.cloud_guard.models.CreateManagedListDetails create_managed_list_details: (required) Details for the new ManagedList. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.ManagedList.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.create_managed_list` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.create_managed_list(create_managed_list_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_managed_list(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def create_responder_recipe_and_wait_for_state(self, create_responder_recipe_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.create_responder_recipe` and waits for the :py:class:`~oci.cloud_guard.models.ResponderRecipe` acted upon to enter the given state(s). :param oci.cloud_guard.models.CreateResponderRecipeDetails create_responder_recipe_details: (required) Details for ResponderRecipe. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.ResponderRecipe.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.create_responder_recipe` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.create_responder_recipe(create_responder_recipe_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_responder_recipe(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def create_target_and_wait_for_state(self, create_target_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.create_target` and waits for the :py:class:`~oci.cloud_guard.models.Target` acted upon to enter the given state(s). :param oci.cloud_guard.models.CreateTargetDetails create_target_details: (required) Details for the new Target. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.Target.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.create_target` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.create_target(create_target_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_target(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def create_target_detector_recipe_and_wait_for_state(self, target_id, attach_target_detector_recipe_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.create_target_detector_recipe` and waits for the :py:class:`~oci.cloud_guard.models.TargetDetectorRecipe` acted upon to enter the given state(s). :param str target_id: (required) OCID of target :param oci.cloud_guard.models.AttachTargetDetectorRecipeDetails attach_target_detector_recipe_details: (required) Details for associating DetectorRecipe to Target :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.TargetDetectorRecipe.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.create_target_detector_recipe` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.create_target_detector_recipe(target_id, attach_target_detector_recipe_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_target_detector_recipe(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def delete_data_mask_rule_and_wait_for_state(self, data_mask_rule_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.delete_data_mask_rule` and waits for the :py:class:`~oci.cloud_guard.models.DataMaskRule` acted upon to enter the given state(s). :param str data_mask_rule_id: (required) OCID of dataMaskRule :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.DataMaskRule.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.delete_data_mask_rule` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ initial_get_result = self.client.get_data_mask_rule(data_mask_rule_id) operation_result = None try: operation_result = self.client.delete_data_mask_rule(data_mask_rule_id, **operation_kwargs) except oci.exceptions.ServiceError as e: if e.status == 404: return WAIT_RESOURCE_NOT_FOUND else: raise e if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] try: waiter_result = oci.wait_until( self.client, initial_get_result, evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, succeed_on_not_found=True, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def delete_detector_recipe_and_wait_for_state(self, detector_recipe_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.delete_detector_recipe` and waits for the :py:class:`~oci.cloud_guard.models.DetectorRecipe` acted upon to enter the given state(s). :param str detector_recipe_id: (required) DetectorRecipe OCID :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.DetectorRecipe.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.delete_detector_recipe` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ initial_get_result = self.client.get_detector_recipe(detector_recipe_id) operation_result = None try: operation_result = self.client.delete_detector_recipe(detector_recipe_id, **operation_kwargs) except oci.exceptions.ServiceError as e: if e.status == 404: return WAIT_RESOURCE_NOT_FOUND else: raise e if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] try: waiter_result = oci.wait_until( self.client, initial_get_result, evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, succeed_on_not_found=True, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def delete_managed_list_and_wait_for_state(self, managed_list_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.delete_managed_list` and waits for the :py:class:`~oci.cloud_guard.models.ManagedList` acted upon to enter the given state(s). :param str managed_list_id: (required) The cloudguard list OCID to be passed in the request. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.ManagedList.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.delete_managed_list` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ initial_get_result = self.client.get_managed_list(managed_list_id) operation_result = None try: operation_result = self.client.delete_managed_list(managed_list_id, **operation_kwargs) except oci.exceptions.ServiceError as e: if e.status == 404: return WAIT_RESOURCE_NOT_FOUND else: raise e if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] try: waiter_result = oci.wait_until( self.client, initial_get_result, evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, succeed_on_not_found=True, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def delete_responder_recipe_and_wait_for_state(self, responder_recipe_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.delete_responder_recipe` and waits for the :py:class:`~oci.cloud_guard.models.ResponderRecipe` acted upon to enter the given state(s). :param str responder_recipe_id: (required) OCID of ResponderRecipe :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.ResponderRecipe.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.delete_responder_recipe` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ initial_get_result = self.client.get_responder_recipe(responder_recipe_id) operation_result = None try: operation_result = self.client.delete_responder_recipe(responder_recipe_id, **operation_kwargs) except oci.exceptions.ServiceError as e: if e.status == 404: return WAIT_RESOURCE_NOT_FOUND else: raise e if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] try: waiter_result = oci.wait_until( self.client, initial_get_result, evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, succeed_on_not_found=True, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def delete_target_and_wait_for_state(self, target_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.delete_target` and waits for the :py:class:`~oci.cloud_guard.models.Target` acted upon to enter the given state(s). :param str target_id: (required) OCID of target :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.Target.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.delete_target` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ initial_get_result = self.client.get_target(target_id) operation_result = None try: operation_result = self.client.delete_target(target_id, **operation_kwargs) except oci.exceptions.ServiceError as e: if e.status == 404: return WAIT_RESOURCE_NOT_FOUND else: raise e if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] try: waiter_result = oci.wait_until( self.client, initial_get_result, evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, succeed_on_not_found=True, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def update_data_mask_rule_and_wait_for_state(self, data_mask_rule_id, update_data_mask_rule_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.update_data_mask_rule` and waits for the :py:class:`~oci.cloud_guard.models.DataMaskRule` acted upon to enter the given state(s). :param str data_mask_rule_id: (required) OCID of dataMaskRule :param oci.cloud_guard.models.UpdateDataMaskRuleDetails update_data_mask_rule_details: (required) The information to be updated. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.DataMaskRule.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.update_data_mask_rule` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.update_data_mask_rule(data_mask_rule_id, update_data_mask_rule_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_data_mask_rule(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def update_detector_recipe_and_wait_for_state(self, detector_recipe_id, update_detector_recipe_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.update_detector_recipe` and waits for the :py:class:`~oci.cloud_guard.models.DetectorRecipe` acted upon to enter the given state(s). :param str detector_recipe_id: (required) DetectorRecipe OCID :param oci.cloud_guard.models.UpdateDetectorRecipeDetails update_detector_recipe_details: (required) Details for the DetectorRecipe to be updated :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.DetectorRecipe.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.update_detector_recipe` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.update_detector_recipe(detector_recipe_id, update_detector_recipe_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_detector_recipe(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def update_detector_recipe_detector_rule_and_wait_for_state(self, detector_recipe_id, detector_rule_id, update_detector_recipe_detector_rule_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.update_detector_recipe_detector_rule` and waits for the :py:class:`~oci.cloud_guard.models.DetectorRecipeDetectorRule` acted upon to enter the given state(s). :param str detector_recipe_id: (required) DetectorRecipe OCID :param str detector_rule_id: (required) The key of Detector Rule. :param oci.cloud_guard.models.UpdateDetectorRecipeDetectorRuleDetails update_detector_recipe_detector_rule_details: (required) The details to be updated for DetectorRule. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.DetectorRecipeDetectorRule.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.update_detector_recipe_detector_rule` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.update_detector_recipe_detector_rule(detector_recipe_id, detector_rule_id, update_detector_recipe_detector_rule_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_detector_recipe_detector_rule(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def update_managed_list_and_wait_for_state(self, managed_list_id, update_managed_list_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.update_managed_list` and waits for the :py:class:`~oci.cloud_guard.models.ManagedList` acted upon to enter the given state(s). :param str managed_list_id: (required) The cloudguard list OCID to be passed in the request. :param oci.cloud_guard.models.UpdateManagedListDetails update_managed_list_details: (required) Details for the ManagedList to be updated :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.ManagedList.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.update_managed_list` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.update_managed_list(managed_list_id, update_managed_list_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_managed_list(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def update_problem_status_and_wait_for_state(self, problem_id, update_problem_status_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.update_problem_status` and waits for the :py:class:`~oci.cloud_guard.models.Problem` acted upon to enter the given state(s). :param str problem_id: (required) OCId of the problem. :param oci.cloud_guard.models.UpdateProblemStatusDetails update_problem_status_details: (required) The additional details for the problem. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.Problem.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.update_problem_status` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.update_problem_status(problem_id, update_problem_status_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_problem(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def update_responder_recipe_and_wait_for_state(self, responder_recipe_id, update_responder_recipe_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.update_responder_recipe` and waits for the :py:class:`~oci.cloud_guard.models.ResponderRecipe` acted upon to enter the given state(s). :param str responder_recipe_id: (required) OCID of ResponderRecipe :param oci.cloud_guard.models.UpdateResponderRecipeDetails update_responder_recipe_details: (required) The details to be updated. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.ResponderRecipe.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.update_responder_recipe` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.update_responder_recipe(responder_recipe_id, update_responder_recipe_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_responder_recipe(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def update_responder_recipe_responder_rule_and_wait_for_state(self, responder_recipe_id, responder_rule_id, update_responder_recipe_responder_rule_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.update_responder_recipe_responder_rule` and waits for the :py:class:`~oci.cloud_guard.models.ResponderRecipeResponderRule` acted upon to enter the given state(s). :param str responder_recipe_id: (required) OCID of ResponderRecipe :param str responder_rule_id: (required) The id of ResponderRule :param oci.cloud_guard.models.UpdateResponderRecipeResponderRuleDetails update_responder_recipe_responder_rule_details: (required) The details to be updated for ResponderRule. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.ResponderRecipeResponderRule.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.update_responder_recipe_responder_rule` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.update_responder_recipe_responder_rule(responder_recipe_id, responder_rule_id, update_responder_recipe_responder_rule_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_responder_recipe_responder_rule(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def update_target_and_wait_for_state(self, target_id, update_target_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.update_target` and waits for the :py:class:`~oci.cloud_guard.models.Target` acted upon to enter the given state(s). :param str target_id: (required) OCID of target :param oci.cloud_guard.models.UpdateTargetDetails update_target_details: (required) The information to be updated. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.Target.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.update_target` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.update_target(target_id, update_target_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_target(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def update_target_detector_recipe_and_wait_for_state(self, target_id, target_detector_recipe_id, update_target_detector_recipe_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.update_target_detector_recipe` and waits for the :py:class:`~oci.cloud_guard.models.TargetDetectorRecipe` acted upon to enter the given state(s). :param str target_id: (required) OCID of target :param str target_detector_recipe_id: (required) OCID of TargetDetectorRecipe :param oci.cloud_guard.models.UpdateTargetDetectorRecipeDetails update_target_detector_recipe_details: (required) The details to be updated. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.TargetDetectorRecipe.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.update_target_detector_recipe` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.update_target_detector_recipe(target_id, target_detector_recipe_id, update_target_detector_recipe_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_target_detector_recipe(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def update_target_detector_recipe_detector_rule_and_wait_for_state(self, target_id, target_detector_recipe_id, detector_rule_id, update_target_detector_recipe_detector_rule_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.update_target_detector_recipe_detector_rule` and waits for the :py:class:`~oci.cloud_guard.models.TargetDetectorRecipeDetectorRule` acted upon to enter the given state(s). :param str target_id: (required) OCID of target :param str target_detector_recipe_id: (required) OCID of TargetDetectorRecipe :param str detector_rule_id: (required) The id of DetectorRule :param oci.cloud_guard.models.UpdateTargetDetectorRecipeDetectorRuleDetails update_target_detector_recipe_detector_rule_details: (required) The details to be updated for DetectorRule. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.TargetDetectorRecipeDetectorRule.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.update_target_detector_recipe_detector_rule` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.update_target_detector_recipe_detector_rule(target_id, target_detector_recipe_id, detector_rule_id, update_target_detector_recipe_detector_rule_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_target_detector_recipe_detector_rule(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def update_target_responder_recipe_responder_rule_and_wait_for_state(self, target_id, target_responder_recipe_id, responder_rule_id, update_target_responder_recipe_responder_rule_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.cloud_guard.CloudGuardClient.update_target_responder_recipe_responder_rule` and waits for the :py:class:`~oci.cloud_guard.models.TargetResponderRecipeResponderRule` acted upon to enter the given state(s). :param str target_id: (required) OCID of target :param str target_responder_recipe_id: (required) OCID of TargetResponderRecipe :param str responder_rule_id: (required) The id of ResponderRule :param oci.cloud_guard.models.UpdateTargetResponderRecipeResponderRuleDetails update_target_responder_recipe_responder_rule_details: (required) The details to be updated for ResponderRule. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.cloud_guard.models.TargetResponderRecipeResponderRule.lifecycle_state` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.cloud_guard.CloudGuardClient.update_target_responder_recipe_responder_rule` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.update_target_responder_recipe_responder_rule(target_id, target_responder_recipe_id, responder_rule_id, update_target_responder_recipe_responder_rule_details, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.data.id try: waiter_result = oci.wait_until( self.client, self.client.get_target_responder_recipe_responder_rule(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'lifecycle_state') and getattr(r.data, 'lifecycle_state').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e)
[ "oci.exceptions.CompositeOperationError" ]
[((3249, 3336), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (3287, 3336), False, 'import oci\n'), ((5365, 5452), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (5403, 5452), False, 'import oci\n'), ((7445, 7532), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (7483, 7532), False, 'import oci\n'), ((9565, 9652), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (9603, 9652), False, 'import oci\n'), ((11577, 11664), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (11615, 11664), False, 'import oci\n'), ((13871, 13958), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (13909, 13958), False, 'import oci\n'), ((16129, 16216), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (16167, 16216), False, 'import oci\n'), ((18399, 18486), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (18437, 18486), False, 'import oci\n'), ((20670, 20757), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (20708, 20757), False, 'import oci\n'), ((22955, 23042), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (22993, 23042), False, 'import oci\n'), ((25123, 25210), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (25161, 25210), False, 'import oci\n'), ((27341, 27428), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (27379, 27428), False, 'import oci\n'), ((29589, 29676), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (29627, 29676), False, 'import oci\n'), ((32107, 32194), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (32145, 32194), False, 'import oci\n'), ((34344, 34431), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (34382, 34431), False, 'import oci\n'), ((36534, 36621), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (36572, 36621), False, 'import oci\n'), ((38782, 38869), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (38820, 38869), False, 'import oci\n'), ((41331, 41418), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (41369, 41418), False, 'import oci\n'), ((43437, 43524), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (43475, 43524), False, 'import oci\n'), ((45862, 45949), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (45900, 45949), False, 'import oci\n'), ((48572, 48659), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (48610, 48659), False, 'import oci\n'), ((51313, 51400), 'oci.exceptions.CompositeOperationError', 'oci.exceptions.CompositeOperationError', ([], {'partial_results': '[operation_result]', 'cause': 'e'}), '(partial_results=[operation_result],\n cause=e)\n', (51351, 51400), False, 'import oci\n')]
import random import secrets import os from datetime import datetime from flask import Flask, render_template, redirect, url_for, send_file, request from flask_sqlalchemy import SQLAlchemy from flaskwebgui import FlaskUI from win10toast import ToastNotifier from flask_uploads import UploadSet, configure_uploads, ALL import requests import wikipedia print("Imported Modules") app = Flask(__name__)
[ "flask.Flask" ]
[((384, 399), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (389, 399), False, 'from flask import Flask, render_template, redirect, url_for, send_file, request\n')]
import os import torchvision from examples.NIPS.generate_data_utils import mnist_true_values, gather_examples, get_digit_for_initiated_at, \ get_correct_digit_for_initiated_at, default_noises, default_folder_name trainset = torchvision.datasets.MNIST(root='../../../../../data/MNIST', train=True, download=True) testset = torchvision.datasets.MNIST(root='../../../../../data/MNIST', train=False, download=True) def generate_data(noises_function=default_noises, folder_name=default_folder_name, scenario_function=get_digit_for_initiated_at, test_function=get_correct_digit_for_initiated_at, relevant_digits=1, training_set=trainset, testing_set=testset, start_sequence=None, end_sequence=None, get_true_values=mnist_true_values, network_clause='digit'): in_train_data = 'in_train_data.txt' init_train_data = 'init_train_data.txt' holds_train_data = 'holds_train_data.txt' digits_train_data = '{}s_train_data.txt'.format(network_clause) init_digit_train_data = 'init_{}_train_data.txt'.format(network_clause) in_test_data = 'in_test_data.txt' init_test_data = 'init_test_data.txt' holds_test_data = 'holds_test_data.txt' digits_test_data = '{}s_test_data.txt'.format(network_clause) init_digit_test_data = 'init_{}_test_data.txt'.format(network_clause) for noise in noises_function(): folder = folder_name(noise) if not os.path.exists(folder): os.makedirs(folder) iteration_in_train_data = folder + in_train_data iteration_init_train_data = folder + init_train_data iteration_holds_train_data = folder + holds_train_data iteration_digits_train_data = folder + digits_train_data iteration_init_digit_train_data = folder + init_digit_train_data iteration_in_test_data = folder + in_test_data iteration_init_test_data = folder + init_test_data iteration_holds_test_data = folder + holds_test_data iteration_digits_test_data = folder + digits_test_data iteration_init_digit_test_data = folder + init_digit_test_data gather_examples( dataset=training_set, in_filename=iteration_in_train_data, initiated_filename=iteration_init_train_data, holds_filename=iteration_holds_train_data, network_filename=iteration_digits_train_data, init_network_filename=iteration_init_digit_train_data, threshold=noise, scenario_function=scenario_function, relevant_digits=relevant_digits, start_sequence=start_sequence, end_sequence=end_sequence, get_true_values=get_true_values, network_clause=network_clause ) gather_examples( dataset=testing_set, in_filename=iteration_in_test_data, initiated_filename=iteration_init_test_data, holds_filename=iteration_holds_test_data, network_filename=iteration_digits_test_data, init_network_filename=iteration_init_digit_test_data, threshold=0.0, scenario_function=test_function, relevant_digits=relevant_digits, start_sequence=start_sequence, end_sequence=end_sequence, get_true_values=get_true_values, network_clause=network_clause ) if __name__ == '__main__': generate_data()
[ "os.makedirs", "torchvision.datasets.MNIST", "examples.NIPS.generate_data_utils.gather_examples", "os.path.exists" ]
[((230, 321), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', ([], {'root': '"""../../../../../data/MNIST"""', 'train': '(True)', 'download': '(True)'}), "(root='../../../../../data/MNIST', train=True,\n download=True)\n", (256, 321), False, 'import torchvision\n'), ((328, 420), 'torchvision.datasets.MNIST', 'torchvision.datasets.MNIST', ([], {'root': '"""../../../../../data/MNIST"""', 'train': '(False)', 'download': '(True)'}), "(root='../../../../../data/MNIST', train=False,\n download=True)\n", (354, 420), False, 'import torchvision\n'), ((2136, 2643), 'examples.NIPS.generate_data_utils.gather_examples', 'gather_examples', ([], {'dataset': 'training_set', 'in_filename': 'iteration_in_train_data', 'initiated_filename': 'iteration_init_train_data', 'holds_filename': 'iteration_holds_train_data', 'network_filename': 'iteration_digits_train_data', 'init_network_filename': 'iteration_init_digit_train_data', 'threshold': 'noise', 'scenario_function': 'scenario_function', 'relevant_digits': 'relevant_digits', 'start_sequence': 'start_sequence', 'end_sequence': 'end_sequence', 'get_true_values': 'get_true_values', 'network_clause': 'network_clause'}), '(dataset=training_set, in_filename=iteration_in_train_data,\n initiated_filename=iteration_init_train_data, holds_filename=\n iteration_holds_train_data, network_filename=\n iteration_digits_train_data, init_network_filename=\n iteration_init_digit_train_data, threshold=noise, scenario_function=\n scenario_function, relevant_digits=relevant_digits, start_sequence=\n start_sequence, end_sequence=end_sequence, get_true_values=\n get_true_values, network_clause=network_clause)\n', (2151, 2643), False, 'from examples.NIPS.generate_data_utils import mnist_true_values, gather_examples, get_digit_for_initiated_at, get_correct_digit_for_initiated_at, default_noises, default_folder_name\n'), ((2784, 3270), 'examples.NIPS.generate_data_utils.gather_examples', 'gather_examples', ([], {'dataset': 'testing_set', 'in_filename': 'iteration_in_test_data', 'initiated_filename': 'iteration_init_test_data', 'holds_filename': 'iteration_holds_test_data', 'network_filename': 'iteration_digits_test_data', 'init_network_filename': 'iteration_init_digit_test_data', 'threshold': '(0.0)', 'scenario_function': 'test_function', 'relevant_digits': 'relevant_digits', 'start_sequence': 'start_sequence', 'end_sequence': 'end_sequence', 'get_true_values': 'get_true_values', 'network_clause': 'network_clause'}), '(dataset=testing_set, in_filename=iteration_in_test_data,\n initiated_filename=iteration_init_test_data, holds_filename=\n iteration_holds_test_data, network_filename=iteration_digits_test_data,\n init_network_filename=iteration_init_digit_test_data, threshold=0.0,\n scenario_function=test_function, relevant_digits=relevant_digits,\n start_sequence=start_sequence, end_sequence=end_sequence,\n get_true_values=get_true_values, network_clause=network_clause)\n', (2799, 3270), False, 'from examples.NIPS.generate_data_utils import mnist_true_values, gather_examples, get_digit_for_initiated_at, get_correct_digit_for_initiated_at, default_noises, default_folder_name\n'), ((1442, 1464), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (1456, 1464), False, 'import os\n'), ((1478, 1497), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (1489, 1497), False, 'import os\n')]
import rpyc import rpyc.utils import rpyc.utils.zerodeploy import plumbum.machines.paramiko_machine import fiepipelib.localuser.routines.localuser class client(object): """Local client for the fiepipeserver server""" _localUser = None _hostname = None _username = None _policy = None def __init__(self, hostname, username, localUser, autoAddHosts=False): """@param autoAddHosts: If true, automatically adds hosts to the list of trusted hosts if it hasn't seen them before. If false, it rejects them. """ assert isinstance(localUser, fiepipelib.localuser.routines.localuser.LocalUserRoutines) self._localUser = localUser self._hostname = hostname self._username = username self._connections = [] if autoAddHosts: self._policy = plumbum.machines.paramiko_machine.paramiko.AutoAddPolicy else: self._policy = plumbum.machines.paramiko_machine.paramiko.RejectPolicy _machine = None _server = None _connections = None def GetHostsFilePath(self): return os.path.joint(self._localUser.get_pipe_configuration_dir(), "fiepipeclient_known_hosts.txt") def RemoveKnownHost(self): hosts = plumbum.machines.paramiko_machine.paramiko.HostKeys(self.GetHostsFilePath()) if hosts.lookup(self._hostname) != None: hosts.pop(self._hostname) hosts.save(self.GetHostsFilePath()) def getConnection(self): """Warning. missing host policy is auto-add. The first time you connect to this thing, make sure you actually trust your DNS and network. Subsequent reconnections should be secure. """ if len(self._connections) != 0: return self._connections.pop() else: if self._machine == None: self._machine = plumbum.machines.paramiko_machine.ParamikoMachine(host=self._hostname,user=self._username,missing_host_policy=self._policy,keyfile=self.GetHostsFilePath()) if self._server == None: self._server = rpyc.utils.zerodeploy.DeployedServer(remote_machine=self._machine,server_class='fiepipelib.fiepipeserver.server.server') connection = self._server.connect() return connection def returnConnection(self, connection): if not connection.closed: self._connections.append(connection) def close(self): if self._server != None: self._server.close() for c in self._connections: c.close() self._connections.clear() def get_all_registered_sites(self, connection, fqdn): """Usually, this data is harmless if spoofed. Annoying for sure, but harmless. All warnings about signatures should be heeded when one uses this info to connect to a site later however. """ return connection.get_all_registered_sites(fqdn) def get_all_regestered_legal_entities(self, connection): """This can be a good legal entity distribution mechanism as long as the user knows how to verify connect securely the first time they pull. See getConnection for the technical explanation. Ultimately, the question is: do you trust the server you logged into originally? """ return connection.get_all_regestered_legal_entities() def get_all_registered_containers(self, connection): """This can be a good container distribution mechanism as long as the user knows how to verify a secure connection the first time they pull. See getConnection for the technical explanation. Ultimately, the quesation is: do you trust the server you logged into originally? Consider using a site statesever method instead, as you can validate that the legal entity trusts the state server even if you've never seen it before. """ return connection.get_all_registered_containers() def get_registered_containers_by_fqdn(self, connection, fqdn): """See get_all_registered_containers @param fqdn: the fqdn to restrict the search to. """ return connection.get_registered_containers_by_fqdn(fqdn) def set_registered_containers(self, connection, containers): """Sets the given cainters to the registry on the server. Used to push containers.""" return connection.set_registered_containers(containers) def ping(self, connection): return connection.ping()
[ "rpyc.utils.zerodeploy.DeployedServer" ]
[((2097, 2222), 'rpyc.utils.zerodeploy.DeployedServer', 'rpyc.utils.zerodeploy.DeployedServer', ([], {'remote_machine': 'self._machine', 'server_class': '"""fiepipelib.fiepipeserver.server.server"""'}), "(remote_machine=self._machine,\n server_class='fiepipelib.fiepipeserver.server.server')\n", (2133, 2222), False, 'import rpyc\n')]
import os import numpy as np import h5py as h5 import tensorflow as tf def _int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _floats_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) # Input data folder_in = "/home/flo/PycharmProjects/21cm/Data/high_res/Numpy/Downscaled" stages = range(1, 8) for stage in stages: this_file = os.path.join(folder_in, "fl" + str(stage) + "_shuffled.h5") with h5.File(this_file, 'r') as hf: Y = np.asarray(hf["data"]) X = np.asarray(hf["params"]) print("File '" + this_file + "' loaded. Size of image array in memory: " + str(Y.nbytes // 1e6) + " MB.") name = "train.tfrecords_" + str(stage) filename = os.path.join(folder_in, name) tfrecord_writer = tf.python_io.TFRecordWriter(filename) n_samples = X.shape[0] rows = Y.shape[1] cols = Y.shape[2] for index in range(n_samples): # 1. Convert data into tf.train.Feature Y_raw = Y[index].flatten() #.tostring() X_raw = X[index].flatten() #.tostring() feature = { 'params_raw': _floats_feature(X_raw), 'image_raw': _floats_feature(Y_raw) } # 2. Create a tf.train.Features features = tf.train.Features(feature=feature) # 3. Createan example protocol example = tf.train.Example(features=features) # 4. Serialize the Example to string example_to_string = example.SerializeToString() # 5. Write to TFRecord tfrecord_writer.write(example_to_string) # Test # filename = '/home/flo/PycharmProjects/21cm/Data/high_res/Numpy/Downscaled/train.tfrecords_1' # def decode(serialized_example): # # 1. define a parser # features = tf.parse_single_example( # serialized_example, # # Defaults are not specified since both keys are required. # features={ # 'params_raw': tf.VarLenFeature(tf.float32), # 'image_raw': tf.VarLenFeature(tf.float32), # }) # # # 2. Convert the data # image = tf.sparse_tensor_to_dense(features['image_raw'], default_value=0) # params = tf.sparse_tensor_to_dense(features['params_raw'], default_value=0) # # # 3. Reshape # image.set_shape((8)) # image = tf.reshape(image, [1, 8]) # params.set_shape(3) # return image, params # # dataset = tf.data.TFRecordDataset(filename) # dataset = dataset.map(decode)
[ "tensorflow.train.BytesList", "h5py.File", "tensorflow.python_io.TFRecordWriter", "tensorflow.train.Int64List", "tensorflow.train.Example", "numpy.asarray", "tensorflow.train.Features", "tensorflow.train.FloatList", "os.path.join" ]
[((590, 613), 'h5py.File', 'h5.File', (['this_file', '"""r"""'], {}), "(this_file, 'r')\n", (597, 613), True, 'import h5py as h5\n'), ((633, 655), 'numpy.asarray', 'np.asarray', (["hf['data']"], {}), "(hf['data'])\n", (643, 655), True, 'import numpy as np\n'), ((668, 692), 'numpy.asarray', 'np.asarray', (["hf['params']"], {}), "(hf['params'])\n", (678, 692), True, 'import numpy as np\n'), ((874, 903), 'os.path.join', 'os.path.join', (['folder_in', 'name'], {}), '(folder_in, name)\n', (886, 903), False, 'import os\n'), ((930, 967), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['filename'], {}), '(filename)\n', (957, 967), True, 'import tensorflow as tf\n'), ((136, 169), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[value]'}), '(value=[value])\n', (154, 169), True, 'import tensorflow as tf\n'), ((236, 269), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[value]'}), '(value=[value])\n', (254, 269), True, 'import tensorflow as tf\n'), ((339, 370), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': 'value'}), '(value=value)\n', (357, 370), True, 'import tensorflow as tf\n'), ((1463, 1497), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature'}), '(feature=feature)\n', (1480, 1497), True, 'import tensorflow as tf\n'), ((1563, 1598), 'tensorflow.train.Example', 'tf.train.Example', ([], {'features': 'features'}), '(features=features)\n', (1579, 1598), True, 'import tensorflow as tf\n')]
import FWCore.ParameterSet.Config as cms process = cms.Process("GEN") # this will run plig-in energy-flat random particle gun # and puts particles (HepMCPRoduct) into edm::Event process.load("SimGeneral.HepPDTESSource.pdt_cfi") process.RandomNumberGeneratorService = cms.Service("RandomNumberGeneratorService", moduleSeeds = cms.PSet( generator = cms.untracked.uint32(456789) ), sourceSeed = cms.untracked.uint32(54321) ) process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(2000) ) process.source = cms.Source("EmptySource") process.generator = cms.EDProducer("FlatRandomEGunProducer", PGunParameters = cms.PSet( PartID = cms.vint32(211), MinEta = cms.double(3.5765), MaxEta = cms.double(3.5765), MinPhi = cms.double(0.6109), MaxPhi = cms.double(0.6109), MinE = cms.double(100.0), MaxE = cms.double(100.0) ), AddAntiParticle = cms.bool(False), psethack = cms.string('single pion 100GeV on fwd hcal'), Verbosity = cms.untracked.int32(0), ## for printouts, set it to 1 (or greater) firstRun = cms.untracked.uint32(1) ) process.GEN = cms.OutputModule("PoolOutputModule", fileName = cms.untracked.string('mc_pi+100_etaphi344.root') ) process.p1 = cms.Path(process.generator) process.p2 = cms.EndPath(process.GEN)
[ "FWCore.ParameterSet.Config.string", "FWCore.ParameterSet.Config.untracked.int32", "FWCore.ParameterSet.Config.double", "FWCore.ParameterSet.Config.vint32", "FWCore.ParameterSet.Config.untracked.string", "FWCore.ParameterSet.Config.EndPath", "FWCore.ParameterSet.Config.Process", "FWCore.ParameterSet.Config.bool", "FWCore.ParameterSet.Config.Source", "FWCore.ParameterSet.Config.untracked.uint32", "FWCore.ParameterSet.Config.Path" ]
[((52, 70), 'FWCore.ParameterSet.Config.Process', 'cms.Process', (['"""GEN"""'], {}), "('GEN')\n", (63, 70), True, 'import FWCore.ParameterSet.Config as cms\n'), ((543, 568), 'FWCore.ParameterSet.Config.Source', 'cms.Source', (['"""EmptySource"""'], {}), "('EmptySource')\n", (553, 568), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1298, 1325), 'FWCore.ParameterSet.Config.Path', 'cms.Path', (['process.generator'], {}), '(process.generator)\n', (1306, 1325), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1339, 1363), 'FWCore.ParameterSet.Config.EndPath', 'cms.EndPath', (['process.GEN'], {}), '(process.GEN)\n', (1350, 1363), True, 'import FWCore.ParameterSet.Config as cms\n'), ((414, 441), 'FWCore.ParameterSet.Config.untracked.uint32', 'cms.untracked.uint32', (['(54321)'], {}), '(54321)\n', (434, 441), True, 'import FWCore.ParameterSet.Config as cms\n'), ((497, 522), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(2000)'], {}), '(2000)\n', (516, 522), True, 'import FWCore.ParameterSet.Config as cms\n'), ((944, 959), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(False)'], {}), '(False)\n', (952, 959), True, 'import FWCore.ParameterSet.Config as cms\n'), ((983, 1027), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""single pion 100GeV on fwd hcal"""'], {}), "('single pion 100GeV on fwd hcal')\n", (993, 1027), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1051, 1073), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(0)'], {}), '(0)\n', (1070, 1073), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1140, 1163), 'FWCore.ParameterSet.Config.untracked.uint32', 'cms.untracked.uint32', (['(1)'], {}), '(1)\n', (1160, 1163), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1233, 1281), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""mc_pi+100_etaphi344.root"""'], {}), "('mc_pi+100_etaphi344.root')\n", (1253, 1281), True, 'import FWCore.ParameterSet.Config as cms\n'), ((361, 389), 'FWCore.ParameterSet.Config.untracked.uint32', 'cms.untracked.uint32', (['(456789)'], {}), '(456789)\n', (381, 389), True, 'import FWCore.ParameterSet.Config as cms\n'), ((679, 694), 'FWCore.ParameterSet.Config.vint32', 'cms.vint32', (['(211)'], {}), '(211)\n', (689, 694), True, 'import FWCore.ParameterSet.Config as cms\n'), ((713, 731), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(3.5765)'], {}), '(3.5765)\n', (723, 731), True, 'import FWCore.ParameterSet.Config as cms\n'), ((750, 768), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(3.5765)'], {}), '(3.5765)\n', (760, 768), True, 'import FWCore.ParameterSet.Config as cms\n'), ((787, 805), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.6109)'], {}), '(0.6109)\n', (797, 805), True, 'import FWCore.ParameterSet.Config as cms\n'), ((824, 842), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.6109)'], {}), '(0.6109)\n', (834, 842), True, 'import FWCore.ParameterSet.Config as cms\n'), ((861, 878), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(100.0)'], {}), '(100.0)\n', (871, 878), True, 'import FWCore.ParameterSet.Config as cms\n'), ((897, 914), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(100.0)'], {}), '(100.0)\n', (907, 914), True, 'import FWCore.ParameterSet.Config as cms\n')]
from math import sqrt primes = [2] sp = [2] # Selected Primes limit = 1000000 def check_sp(a): if a < 10: return True for c in str(a): if c in ['0', '2', '4', '5', '6', '8']: return False return True def is_prime(a): b = int(sqrt(a)) + 1 for p in primes: if a % p == 0: return False break if p > b: return True return True for i in range(3, limit, 2): if is_prime(i): primes.append(i) if check_sp(i): sp.append(i) print("Number of primes found : ", len(primes)) # print(primes) print("# of specials:", len(sp)) # print(sp) # Check rotation spp = [] #r = [] def rotation(a): r = [] if a < 10: return r s = str(a) p = s for i in range(len(s)): n = p[-1] + p[:-1] p = n r.append(int(n)) return r #print("test: ", rotation(197)) for p in sp: f = 0 # r=[] for i in rotation(p): if i not in sp: f = 1 break if f == 0: spp.append(p) print("total circular prime = ", len(spp)) # print(spp) # input()
[ "math.sqrt" ]
[((276, 283), 'math.sqrt', 'sqrt', (['a'], {}), '(a)\n', (280, 283), False, 'from math import sqrt\n')]
from datetime import date atual = date.today().year nacs = int(input('Digite o ano do seu nascimento ')) idade = atual - nacs if idade <= 9: print('Você tem {} anos de idade e é uma nadadora da classe MIRIM.'.format(idade)) elif idade <= 14: print('Você tem {} de idade e é um nadador da classe INFANTIL.'.format(idade)) elif idade <= 19: print('Você tem {} anos de idade e é um nadador da classe JUNIOR.'.format(idade)) elif idade == 20: print('Você tem {} anos de idade e é um nadador da classe SÊNIOR.'.format(idade)) elif idade > 20: print('Você tem {} anos de idade e é um nadador da classe MASTER.'.format(idade)) print('FIM!!!')
[ "datetime.date.today" ]
[((34, 46), 'datetime.date.today', 'date.today', ([], {}), '()\n', (44, 46), False, 'from datetime import date\n')]
from base import DatadogBaseAction from datadog import api class DatadogCreateComment(DatadogBaseAction): def _run(self, **kwargs): return api.Comment.create(**kwargs) class DatadogDeleteComment(DatadogBaseAction): def _run(self, **kwargs): return api.Comment.delete(kwargs.pop("comment_id")) class DatadogEditComment(DatadogBaseAction): def _run(self, **kwargs): return api.Comment.update(kwargs.pop("comment_id"), **kwargs)
[ "datadog.api.Comment.create" ]
[((153, 181), 'datadog.api.Comment.create', 'api.Comment.create', ([], {}), '(**kwargs)\n', (171, 181), False, 'from datadog import api\n')]
from flask import Flask from snakeeyes.blueprints.page import page def create_app(settings_override=None): """ Create a Flask application using the app factory pattern. :param settings_override: Override settings :return: Flask app """ app = Flask(__name__, instance_relative_config=True) app.config.from_object('config.settings') app.config.from_pyfile('settings.py', silent=True) if settings_override: app.config.update(settings_override) app.register_blueprint(page) return app
[ "flask.Flask" ]
[((270, 316), 'flask.Flask', 'Flask', (['__name__'], {'instance_relative_config': '(True)'}), '(__name__, instance_relative_config=True)\n', (275, 316), False, 'from flask import Flask\n')]
from setuptools import setup SHORT_DESCRIPTION = """ A tool to get your member's overtime from King of Time for every N weeks.""".strip() DEPENDENCIES = [ 'fire==0.1.2', 'beautifultable==0.4.0', 'requests==2.18.4', ] TEST_DEPENDENCIES = [ ] VERSION = '0.1.4' URL = 'https://github.com/showwin/scrum-overtime-kot' setup( name='sokot', version=VERSION, description=SHORT_DESCRIPTION, url=URL, author='showwin', author_email='<EMAIL>', license='Apache Software License', classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], entry_points={ 'console_scripts': 'sokot = sokot.main:main' }, keywords='overtime king-of-time scrum python tool cli', packages=['sokot'], install_requires=DEPENDENCIES, tests_require=TEST_DEPENDENCIES, )
[ "setuptools.setup" ]
[((330, 895), 'setuptools.setup', 'setup', ([], {'name': '"""sokot"""', 'version': 'VERSION', 'description': 'SHORT_DESCRIPTION', 'url': 'URL', 'author': '"""showwin"""', 'author_email': '"""<EMAIL>"""', 'license': '"""Apache Software License"""', 'classifiers': "['Programming Language :: Python', 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6']", 'entry_points': "{'console_scripts': 'sokot = sokot.main:main'}", 'keywords': '"""overtime king-of-time scrum python tool cli"""', 'packages': "['sokot']", 'install_requires': 'DEPENDENCIES', 'tests_require': 'TEST_DEPENDENCIES'}), "(name='sokot', version=VERSION, description=SHORT_DESCRIPTION, url=URL,\n author='showwin', author_email='<EMAIL>', license=\n 'Apache Software License', classifiers=[\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'], entry_points={\n 'console_scripts': 'sokot = sokot.main:main'}, keywords=\n 'overtime king-of-time scrum python tool cli', packages=['sokot'],\n install_requires=DEPENDENCIES, tests_require=TEST_DEPENDENCIES)\n", (335, 895), False, 'from setuptools import setup\n')]
# This implementation is based on codes of <NAME> & <NAME> import time import random import numpy as np import tensorflow as tf import tensorflow_probability as tfp from tqdm import tqdm import pickle from base.rl import ActorCriticRLAlgorithm from base.replay_buffer import ReplayBuffer @tf.function def clip_with_gradient(x, low=-1, high=1): clip_high = tf.cast(x > high, tf.float32) clip_low = tf.cast(x < low, tf.float32) return x + tf.stop_gradient((high - x) * clip_high + (low - x) * clip_low) @tf.function def apply_squashing_func(sample, logp): """ Squash the ouput of the gaussian distribution and account for that in the log probability. :param sample: (tf.Tensor) Action sampled from Gaussian distribution :param logp: (tf.Tensor) Log probability before squashing """ # Squash the output squashed_action = tf.tanh(sample) squashed_action_logp = \ logp - tf.reduce_sum(tf.math.log( clip_with_gradient(1 - squashed_action ** 2, low=0, high=1) + 1e-6), axis=1) # incurred by change of variable return squashed_action, squashed_action_logp class SquashedGaussianActor(tf.keras.layers.Layer): def __init__(self, env): super(SquashedGaussianActor, self).__init__() # obs_shape, action_dim, self.obs_shape = env.observation_space.shape self.action_dim = env.action_space.shape[0] self.max_action = env.action_space.high[0] # Actor parameters self.l1 = tf.keras.layers.Dense(64, activation='relu', name='f0', input_shape=(None,) + self.obs_shape) self.l2 = tf.keras.layers.Dense(64, activation='relu', name='f1') self.l3_mu = tf.keras.layers.Dense(self.action_dim, name='f2_mu') self.l3_log_std = tf.keras.layers.Dense(self.action_dim, name='f2_log_std') @tf.function def call(self, inputs, **kwargs): h = self.l1(inputs) h = self.l2(h) mean = self.l3_mu(h) log_std = self.l3_log_std(h) std = tf.exp(log_std) dist = tfp.distributions.MultivariateNormalDiag(mean, std) sampled_action = dist.sample() sampled_action_logp = dist.log_prob(sampled_action) squahsed_action, squahsed_action_logp = apply_squashing_func(sampled_action, sampled_action_logp) return squahsed_action, tf.reshape(squahsed_action_logp, (-1,1)) def dist(self, inputs): h = self.l1(inputs) h = self.l2(h) mean = self.l3_mu(h) log_std = self.l3_log_std(h) std = tf.exp(log_std) dist = tfp.distributions.MultivariateNormalDiag(mean, std) return dist def step(self, obs, deterministic=False): if deterministic: dist = self.dist(obs) mean_action = dist.mean().numpy() mean_action = np.nan_to_num(mean_action) squashed_action = np.tanh(mean_action) else: squashed_action, _ = self.call(obs) squashed_action = np.nan_to_num(squashed_action) # squashed_action = squashed_action.numpy() return squashed_action * self.max_action class VNetwork(tf.keras.layers.Layer): def __init__(self, obs_shape, output_dim=1): super(VNetwork, self).__init__() self.v_l0 = tf.keras.layers.Dense(64, activation='relu', name='v/f0', input_shape=(None,) + obs_shape) self.v_l1 = tf.keras.layers.Dense(64, activation='relu', name='v/f1') self.v_l2 = tf.keras.layers.Dense(output_dim, name='v/f2') @tf.function def call(self, inputs, **kwargs): h = self.v_l0(inputs) h = self.v_l1(h) v = self.v_l2(h) return v class QNetwork(tf.keras.layers.Layer): def __init__(self, obs_shape, num_critics=2): super(QNetwork, self).__init__() self.num_critics = num_critics self.qs_l0, self.qs_l1, self.qs_l2 = [], [], [] for i in range(self.num_critics): self.qs_l0.append(tf.keras.layers.Dense(64, activation='relu', name='q%d/f0' % i, input_shape=(None,) + obs_shape)) self.qs_l1.append(tf.keras.layers.Dense(64, activation='relu', name='q%d/f1' % i)) self.qs_l2.append(tf.keras.layers.Dense(1, name='q%d/f2' % i)) @tf.function def call(self, inputs, **kwargs): obs, action = inputs obs_action = tf.concat([obs, action], axis=1) qs = [] for i in range(self.num_critics): h = self.qs_l0[i](obs_action) h = self.qs_l1[i](h) q = self.qs_l2[i](h) qs.append(q) return qs class SAC(ActorCriticRLAlgorithm): def __init__(self, env, test_env, policy_class=SquashedGaussianActor, ent_coef='auto', reward_scale=1, seed=0): super(SAC, self).__init__(policy_class=policy_class, env=env, test_env=test_env) self.seed = seed tf.random.set_seed(seed) np.random.seed(seed) random.seed(seed) self.env = env self.test_env = test_env self.max_action = self.env.action_space.high[0] self.reward_scale = reward_scale self.obs_shape = self.env.observation_space.shape self.state_dim = self.env.observation_space.shape[0] self.action_dim = self.env.action_space.shape[0] self.replay_buffer = ReplayBuffer(size=64000) self.num_critics = 2 self.gamma = 0.99 self.tau = 0.05 self.learning_rate = 3e-4 self.batch_size = 256 self.target_entropy = -np.prod(self.env.action_space.shape).astype(np.float32) self.ent_coef = ent_coef # self.optimizer_variables = [] self.info_labels = ['actor_loss', 'v_loss', 'q_loss', 'mean(v)', 'mean(qs)', 'ent_coef', 'entropy', 'logp_pi'] # Entropy coefficient (auto or fixed) if isinstance(self.ent_coef, str) and self.ent_coef == 'auto': # Default initial value of ent_coef when learned init_value = 1.0 self.log_ent_coef = tf.keras.backend.variable(init_value, dtype=tf.float32, name='log_ent_coef') self.ent_coefficient = tf.exp(self.log_ent_coef) self.entropy_variables = [self.log_ent_coef] else: self.log_ent_coef = tf.math.log(self.ent_coef) self.ent_coefficient = tf.constant(self.ent_coef) # Actor, Critic Networks self.actor = policy_class(self.env) self.v = VNetwork(self.obs_shape) self.q = QNetwork(self.obs_shape, num_critics=self.num_critics) self.v_target = VNetwork(self.obs_shape) self.actor_variables = self.actor.trainable_variables self.critic_variables = self.v.trainable_variables + self.q.trainable_variables self.actor_optimizer = tf.keras.optimizers.Adam(self.learning_rate) self.critic_optimizer = tf.keras.optimizers.Adam(self.learning_rate) if isinstance(ent_coef, str) and ent_coef == 'auto': self.entropy_optimizer = tf.keras.optimizers.Adam(learning_rate=self.learning_rate) self.optimizer_variables = self.actor.trainable_variables + self.v.trainable_variables + \ self.q.trainable_variables + self.v_target.trainable_variables # @tf.function def update_target(self, target_params, source_params): for target, source in zip(target_params, source_params): tf.keras.backend.set_value(target, (1 - self.tau) * target + self.tau * source) @tf.function def initialize_variables(self): zero_like_state = tf.zeros((1,) + self.obs_shape) zero_like_action = tf.zeros((1,self.action_dim)) self.actor(zero_like_state) self.v(zero_like_state) self.v_target(zero_like_state) self.q(inputs=(zero_like_state, zero_like_action)) @tf.function def train(self, obs, action, reward, next_obs, done): # Casting from float64 to float32 obs = tf.cast(obs, tf.float32) action = tf.cast(action, tf.float32) / self.max_action reward = tf.cast(reward, tf.float32)[:, None] * self.reward_scale next_obs = tf.cast(next_obs, tf.float32) done = tf.cast(done, tf.float32)[:, None] dist = self.actor.dist(obs) with tf.GradientTape() as tape_actor: # Actor training (pi) action_pi, logp_pi = self.actor.call(obs) qs_pi = self.q.call(inputs=(obs, action_pi)) # min_q_target = tf.reduce_min(qs_pi, axis=0) actor_loss = tf.reduce_mean(tf.math.exp(self.log_ent_coef) * logp_pi - qs_pi[0]) actor_variables = self.actor.trainable_variables grads_actor = tape_actor.gradient(actor_loss, actor_variables) actor_op = self.actor_optimizer.apply_gradients(zip(grads_actor, actor_variables)) with tf.control_dependencies([actor_op]): v_target = self.v_target(next_obs) min_q_pi = tf.reduce_min(qs_pi, axis=0) # (batch, 1) v_backup = tf.stop_gradient(min_q_pi - tf.math.exp(self.log_ent_coef) * logp_pi) # (batch, 1) q_backup = tf.stop_gradient(reward + (1 - done) * self.gamma * v_target) # (batch, 1) with tf.GradientTape() as tape_critic: # Critic training (V, Q) v = self.v(obs) v_loss = 0.5 * tf.reduce_mean((v_backup - v) ** 2) # MSE, scalar qs = self.q(inputs=(obs, action)) q_losses = [0.5 * tf.reduce_mean((q_backup - qs[k]) ** 2) for k in range(self.num_critics)] # (2, batch) q_loss = tf.reduce_sum(q_losses, axis=0) # scalar value_loss = v_loss + q_loss critic_variables = self.v.trainable_variables + self.q.trainable_variables grads_critic = tape_critic.gradient(value_loss, critic_variables) self.critic_optimizer.apply_gradients(zip(grads_critic, critic_variables)) if isinstance(self.ent_coef, str) and self.ent_coef == 'auto': with tf.GradientTape() as tape_ent: ent_coef_loss = -tf.reduce_mean(self.log_ent_coef * tf.stop_gradient(logp_pi + self.target_entropy)) entropy_variables = [self.log_ent_coef] grads_ent = tape_ent.gradient(ent_coef_loss, entropy_variables) self.entropy_optimizer.apply_gradients(zip(grads_ent, entropy_variables)) return actor_loss, tf.reduce_mean(v_loss), tf.reduce_mean(q_loss), tf.reduce_mean(v), tf.reduce_mean(qs), \ tf.math.exp(self.log_ent_coef), tf.reduce_mean(dist.entropy()), tf.reduce_mean(logp_pi) def learn(self, total_timesteps, log_interval=640, callback=None, verbose=1, eval_interval=5000, eval_rollout=True, save_path=None, save_interval=500000): self.initialize_variables() for target, source in zip(self.v_target.trainable_variables, self.v.trainable_variables): tf.keras.backend.set_value(target, source.numpy()) start_time = time.time() episode_rewards = [] eval_rewards = [] obs = self.env.reset() current_episode_reward = 0 for step in tqdm(range(total_timesteps), desc='SAC', ncols=70): if callback is not None: if callback(locals(), globals()) is False: break # Take an action action = np.reshape(self.predict(np.array([obs]), deterministic=False)[0], -1) next_obs, reward, done, _ = self.env.step(action) # Store transition in the replay buffer. self.replay_buffer.add(obs, action, reward, next_obs, float(done)) obs = next_obs current_episode_reward += reward if done: obs = self.env.reset() episode_rewards.append(current_episode_reward) current_episode_reward = 0.0 if self.replay_buffer.can_sample(self.batch_size): obss, actions, rewards, next_obss, dones = self.replay_buffer.sample(self.batch_size) # action is normalize step_info = self.train(obss, actions, rewards, next_obss, dones) if verbose >= 1 and step % log_interval == 0: print('\n============================') print('%15s: %10.6f' % ('10ep_rewmean', np.mean(episode_rewards[-10:]))) for i, label in enumerate(self.info_labels): print('%15s: %10.6f' %(label, step_info[i].numpy())) print('============================\n') self.update_target(self.v_target.trainable_variables, self.v.trainable_variables) if step % eval_interval == 0: if eval_rollout: eval_rewards.append(self.evaluate(1)) else: eval_rewards.append(episode_rewards[-1]) if step % save_interval == 0 and save_path is not None: print('** Saving models and evaluation returns..') np.save(save_path + "/%s_rews_seed%d_iter%d.npy"%(self.env.spec.id, self.seed, step), np.array(eval_rewards)) self.save(save_path + "/%s_model_seed%d.zip" % (self.env.spec.id, self.seed) ) return eval_rewards def predict(self, obs, deterministic=False): obs_rank = len(obs.shape) if len(obs.shape) == 1: obs = np.array([obs]) assert len(obs.shape) == 2 action = self.actor.step(obs, deterministic=deterministic) # action = np.clip(action, self.action_space.low, self.action_space.high) if obs_rank == 1: return action[0], None else: return action, None def load(self, filepath): self.initialize_variables() with open(filepath, 'rb') as f: parameters = pickle.load(f) self.load_parameters(parameters)
[ "tensorflow.random.set_seed", "tensorflow.keras.backend.set_value", "tensorflow.reduce_sum", "numpy.random.seed", "numpy.nan_to_num", "tensorflow.keras.layers.Dense", "tensorflow.reshape", "pickle.load", "numpy.mean", "base.replay_buffer.ReplayBuffer", "numpy.prod", "tensorflow.math.log", "tensorflow.concat", "tensorflow.cast", "tensorflow.exp", "random.seed", "tensorflow.keras.optimizers.Adam", "tensorflow.reduce_min", "tensorflow.keras.backend.variable", "tensorflow.control_dependencies", "numpy.tanh", "tensorflow.stop_gradient", "tensorflow.reduce_mean", "tensorflow.constant", "tensorflow.math.exp", "tensorflow_probability.distributions.MultivariateNormalDiag", "time.time", "tensorflow.zeros", "numpy.array", "tensorflow.tanh", "tensorflow.GradientTape" ]
[((361, 390), 'tensorflow.cast', 'tf.cast', (['(x > high)', 'tf.float32'], {}), '(x > high, tf.float32)\n', (368, 390), True, 'import tensorflow as tf\n'), ((406, 434), 'tensorflow.cast', 'tf.cast', (['(x < low)', 'tf.float32'], {}), '(x < low, tf.float32)\n', (413, 434), True, 'import tensorflow as tf\n'), ((860, 875), 'tensorflow.tanh', 'tf.tanh', (['sample'], {}), '(sample)\n', (867, 875), True, 'import tensorflow as tf\n'), ((450, 513), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['((high - x) * clip_high + (low - x) * clip_low)'], {}), '((high - x) * clip_high + (low - x) * clip_low)\n', (466, 513), True, 'import tensorflow as tf\n'), ((1498, 1595), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""f0"""', 'input_shape': '((None,) + self.obs_shape)'}), "(64, activation='relu', name='f0', input_shape=(None,) +\n self.obs_shape)\n", (1519, 1595), True, 'import tensorflow as tf\n'), ((1610, 1665), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""f1"""'}), "(64, activation='relu', name='f1')\n", (1631, 1665), True, 'import tensorflow as tf\n'), ((1687, 1739), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['self.action_dim'], {'name': '"""f2_mu"""'}), "(self.action_dim, name='f2_mu')\n", (1708, 1739), True, 'import tensorflow as tf\n'), ((1766, 1823), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['self.action_dim'], {'name': '"""f2_log_std"""'}), "(self.action_dim, name='f2_log_std')\n", (1787, 1823), True, 'import tensorflow as tf\n'), ((2019, 2034), 'tensorflow.exp', 'tf.exp', (['log_std'], {}), '(log_std)\n', (2025, 2034), True, 'import tensorflow as tf\n'), ((2060, 2111), 'tensorflow_probability.distributions.MultivariateNormalDiag', 'tfp.distributions.MultivariateNormalDiag', (['mean', 'std'], {}), '(mean, std)\n', (2100, 2111), True, 'import tensorflow_probability as tfp\n'), ((2559, 2574), 'tensorflow.exp', 'tf.exp', (['log_std'], {}), '(log_std)\n', (2565, 2574), True, 'import tensorflow as tf\n'), ((2590, 2641), 'tensorflow_probability.distributions.MultivariateNormalDiag', 'tfp.distributions.MultivariateNormalDiag', (['mean', 'std'], {}), '(mean, std)\n', (2630, 2641), True, 'import tensorflow_probability as tfp\n'), ((3315, 3410), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""v/f0"""', 'input_shape': '((None,) + obs_shape)'}), "(64, activation='relu', name='v/f0', input_shape=(None\n ,) + obs_shape)\n", (3336, 3410), True, 'import tensorflow as tf\n'), ((3426, 3483), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""v/f1"""'}), "(64, activation='relu', name='v/f1')\n", (3447, 3483), True, 'import tensorflow as tf\n'), ((3504, 3550), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['output_dim'], {'name': '"""v/f2"""'}), "(output_dim, name='v/f2')\n", (3525, 3550), True, 'import tensorflow as tf\n'), ((4391, 4423), 'tensorflow.concat', 'tf.concat', (['[obs, action]'], {'axis': '(1)'}), '([obs, action], axis=1)\n', (4400, 4423), True, 'import tensorflow as tf\n'), ((4937, 4961), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (4955, 4961), True, 'import tensorflow as tf\n'), ((4978, 4998), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4992, 4998), True, 'import numpy as np\n'), ((5007, 5024), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (5018, 5024), False, 'import random\n'), ((5392, 5416), 'base.replay_buffer.ReplayBuffer', 'ReplayBuffer', ([], {'size': '(64000)'}), '(size=64000)\n', (5404, 5416), False, 'from base.replay_buffer import ReplayBuffer\n'), ((6873, 6917), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['self.learning_rate'], {}), '(self.learning_rate)\n', (6897, 6917), True, 'import tensorflow as tf\n'), ((6950, 6994), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['self.learning_rate'], {}), '(self.learning_rate)\n', (6974, 6994), True, 'import tensorflow as tf\n'), ((7692, 7723), 'tensorflow.zeros', 'tf.zeros', (['((1,) + self.obs_shape)'], {}), '((1,) + self.obs_shape)\n', (7700, 7723), True, 'import tensorflow as tf\n'), ((7751, 7781), 'tensorflow.zeros', 'tf.zeros', (['(1, self.action_dim)'], {}), '((1, self.action_dim))\n', (7759, 7781), True, 'import tensorflow as tf\n'), ((8110, 8134), 'tensorflow.cast', 'tf.cast', (['obs', 'tf.float32'], {}), '(obs, tf.float32)\n', (8117, 8134), True, 'import tensorflow as tf\n'), ((8292, 8321), 'tensorflow.cast', 'tf.cast', (['next_obs', 'tf.float32'], {}), '(next_obs, tf.float32)\n', (8299, 8321), True, 'import tensorflow as tf\n'), ((11309, 11320), 'time.time', 'time.time', ([], {}), '()\n', (11318, 11320), False, 'import time\n'), ((2358, 2399), 'tensorflow.reshape', 'tf.reshape', (['squahsed_action_logp', '(-1, 1)'], {}), '(squahsed_action_logp, (-1, 1))\n', (2368, 2399), True, 'import tensorflow as tf\n'), ((2854, 2880), 'numpy.nan_to_num', 'np.nan_to_num', (['mean_action'], {}), '(mean_action)\n', (2867, 2880), True, 'import numpy as np\n'), ((2911, 2931), 'numpy.tanh', 'np.tanh', (['mean_action'], {}), '(mean_action)\n', (2918, 2931), True, 'import numpy as np\n'), ((3025, 3055), 'numpy.nan_to_num', 'np.nan_to_num', (['squashed_action'], {}), '(squashed_action)\n', (3038, 3055), True, 'import numpy as np\n'), ((6110, 6186), 'tensorflow.keras.backend.variable', 'tf.keras.backend.variable', (['init_value'], {'dtype': 'tf.float32', 'name': '"""log_ent_coef"""'}), "(init_value, dtype=tf.float32, name='log_ent_coef')\n", (6135, 6186), True, 'import tensorflow as tf\n'), ((6222, 6247), 'tensorflow.exp', 'tf.exp', (['self.log_ent_coef'], {}), '(self.log_ent_coef)\n', (6228, 6247), True, 'import tensorflow as tf\n'), ((6352, 6378), 'tensorflow.math.log', 'tf.math.log', (['self.ent_coef'], {}), '(self.ent_coef)\n', (6363, 6378), True, 'import tensorflow as tf\n'), ((6414, 6440), 'tensorflow.constant', 'tf.constant', (['self.ent_coef'], {}), '(self.ent_coef)\n', (6425, 6440), True, 'import tensorflow as tf\n'), ((7102, 7160), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (7126, 7160), True, 'import tensorflow as tf\n'), ((7524, 7603), 'tensorflow.keras.backend.set_value', 'tf.keras.backend.set_value', (['target', '((1 - self.tau) * target + self.tau * source)'], {}), '(target, (1 - self.tau) * target + self.tau * source)\n', (7550, 7603), True, 'import tensorflow as tf\n'), ((8152, 8179), 'tensorflow.cast', 'tf.cast', (['action', 'tf.float32'], {}), '(action, tf.float32)\n', (8159, 8179), True, 'import tensorflow as tf\n'), ((8337, 8362), 'tensorflow.cast', 'tf.cast', (['done', 'tf.float32'], {}), '(done, tf.float32)\n', (8344, 8362), True, 'import tensorflow as tf\n'), ((8423, 8440), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (8438, 8440), True, 'import tensorflow as tf\n'), ((9009, 9044), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[actor_op]'], {}), '([actor_op])\n', (9032, 9044), True, 'import tensorflow as tf\n'), ((9128, 9156), 'tensorflow.reduce_min', 'tf.reduce_min', (['qs_pi'], {'axis': '(0)'}), '(qs_pi, axis=0)\n', (9141, 9156), True, 'import tensorflow as tf\n'), ((9299, 9360), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['(reward + (1 - done) * self.gamma * v_target)'], {}), '(reward + (1 - done) * self.gamma * v_target)\n', (9315, 9360), True, 'import tensorflow as tf\n'), ((10713, 10735), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['v_loss'], {}), '(v_loss)\n', (10727, 10735), True, 'import tensorflow as tf\n'), ((10737, 10759), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['q_loss'], {}), '(q_loss)\n', (10751, 10759), True, 'import tensorflow as tf\n'), ((10761, 10778), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['v'], {}), '(v)\n', (10775, 10778), True, 'import tensorflow as tf\n'), ((10780, 10798), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['qs'], {}), '(qs)\n', (10794, 10798), True, 'import tensorflow as tf\n'), ((10817, 10847), 'tensorflow.math.exp', 'tf.math.exp', (['self.log_ent_coef'], {}), '(self.log_ent_coef)\n', (10828, 10847), True, 'import tensorflow as tf\n'), ((10881, 10904), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['logp_pi'], {}), '(logp_pi)\n', (10895, 10904), True, 'import tensorflow as tf\n'), ((13937, 13952), 'numpy.array', 'np.array', (['[obs]'], {}), '([obs])\n', (13945, 13952), True, 'import numpy as np\n'), ((14407, 14421), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (14418, 14421), False, 'import pickle\n'), ((4013, 4114), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': "('q%d/f0' % i)", 'input_shape': '((None,) + obs_shape)'}), "(64, activation='relu', name='q%d/f0' % i, input_shape\n =(None,) + obs_shape)\n", (4034, 4114), True, 'import tensorflow as tf\n'), ((4141, 4204), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': "('q%d/f1' % i)"}), "(64, activation='relu', name='q%d/f1' % i)\n", (4162, 4204), True, 'import tensorflow as tf\n'), ((4236, 4279), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'name': "('q%d/f2' % i)"}), "(1, name='q%d/f2' % i)\n", (4257, 4279), True, 'import tensorflow as tf\n'), ((8216, 8243), 'tensorflow.cast', 'tf.cast', (['reward', 'tf.float32'], {}), '(reward, tf.float32)\n', (8223, 8243), True, 'import tensorflow as tf\n'), ((9393, 9410), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (9408, 9410), True, 'import tensorflow as tf\n'), ((9843, 9874), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['q_losses'], {'axis': '(0)'}), '(q_losses, axis=0)\n', (9856, 9874), True, 'import tensorflow as tf\n'), ((5592, 5628), 'numpy.prod', 'np.prod', (['self.env.action_space.shape'], {}), '(self.env.action_space.shape)\n', (5599, 5628), True, 'import numpy as np\n'), ((9547, 9582), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((v_backup - v) ** 2)'], {}), '((v_backup - v) ** 2)\n', (9561, 9582), True, 'import tensorflow as tf\n'), ((10298, 10315), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (10313, 10315), True, 'import tensorflow as tf\n'), ((8714, 8744), 'tensorflow.math.exp', 'tf.math.exp', (['self.log_ent_coef'], {}), '(self.log_ent_coef)\n', (8725, 8744), True, 'import tensorflow as tf\n'), ((9221, 9251), 'tensorflow.math.exp', 'tf.math.exp', (['self.log_ent_coef'], {}), '(self.log_ent_coef)\n', (9232, 9251), True, 'import tensorflow as tf\n'), ((9731, 9770), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((q_backup - qs[k]) ** 2)'], {}), '((q_backup - qs[k]) ** 2)\n', (9745, 9770), True, 'import tensorflow as tf\n'), ((11713, 11728), 'numpy.array', 'np.array', (['[obs]'], {}), '([obs])\n', (11721, 11728), True, 'import numpy as np\n'), ((13651, 13673), 'numpy.array', 'np.array', (['eval_rewards'], {}), '(eval_rewards)\n', (13659, 13673), True, 'import numpy as np\n'), ((10401, 10448), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['(logp_pi + self.target_entropy)'], {}), '(logp_pi + self.target_entropy)\n', (10417, 10448), True, 'import tensorflow as tf\n'), ((12733, 12763), 'numpy.mean', 'np.mean', (['episode_rewards[-10:]'], {}), '(episode_rewards[-10:])\n', (12740, 12763), True, 'import numpy as np\n')]
# 크로바 아파트 들어가 아파트 이름, 주소, 정보까지 출력하는데 성공, csv화는 미성공 import requests, re, time from bs4 import BeautifulSoup as bs from selenium import webdriver from selenium.webdriver.common.keys import Keys driver = webdriver.Chrome(r'"D:\user\Documents\chromedriver.exe"') driver.get('https://www.naver.com')
[ "selenium.webdriver.Chrome" ]
[((202, 261), 'selenium.webdriver.Chrome', 'webdriver.Chrome', (['""""D:\\\\user\\\\Documents\\\\chromedriver.exe\\""""'], {}), '(\'"D:\\\\user\\\\Documents\\\\chromedriver.exe"\')\n', (218, 261), False, 'from selenium import webdriver\n')]
from runners.output_parser import OutputParser def setup(oBenchmarkRunner, cores, phys_cores, scenario, memory): oBenchmarkRunner.configure("caf", "caf/build/bin", memory, args = ["--parseable", "--scheduler.max-threads=" + str(cores)] + scenario) def gnuplot(cores, files, results): OutputParser(files).parse(cores, results)
[ "runners.output_parser.OutputParser" ]
[((290, 309), 'runners.output_parser.OutputParser', 'OutputParser', (['files'], {}), '(files)\n', (302, 309), False, 'from runners.output_parser import OutputParser\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2018/7/17 15:22 # @Author : kimmy-pan # @File : GET_DATA.py from flask import Flask,request, jsonify app = Flask(__name__) @app.route('/getdata', methods=['POST','GET']) def getdata(): if request.method == 'POST': try: token = request.headers['Token'] except Exception: return jsonify({'message': u'数据发送失败!没有传Token值', 'data':'', 'code': 201}) params = ['task_id','result_lists'] for i in params: if request.json[i] == "": return jsonify({'message': u'数据发送失败!{}不能为空'.format(i), 'data': '', 'code': 201}) if type(request.json['result_lists']) is not list: return jsonify({'message': u'数据发送失败!result_lists数据格式错误', 'data': '', 'code': 201}) params = ['case_id','case_result','case_time','case_executor','case_exetype'] key = [] for i in request.json['result_lists']: # print(i) for k,v in i.items(): key.append(k) for j in params: if j not in key: return jsonify({'message': 'case_id:'+str(i["case_id"])+'数据发送失败!没有传{}'.format(j), 'data': '', 'code': 201}) return jsonify({'message': 'success','data': '','code': 200}) elif request.method == 'GET': return jsonify({'message': '请求方式错误', 'data': '', 'code': 201}) if __name__ == '__main__': app.run(threaded=True,debug=True)
[ "flask.jsonify", "flask.Flask" ]
[((176, 191), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (181, 191), False, 'from flask import Flask, request, jsonify\n'), ((1541, 1597), 'flask.jsonify', 'jsonify', (["{'message': 'success', 'data': '', 'code': 200}"], {}), "({'message': 'success', 'data': '', 'code': 200})\n", (1548, 1597), False, 'from flask import Flask, request, jsonify\n'), ((879, 954), 'flask.jsonify', 'jsonify', (["{'message': u'数据发送失败!result_lists数据格式错误', 'data': '', 'code': 201}"], {}), "({'message': u'数据发送失败!result_lists数据格式错误', 'data': '', 'code': 201})\n", (886, 954), False, 'from flask import Flask, request, jsonify\n'), ((1647, 1702), 'flask.jsonify', 'jsonify', (["{'message': '请求方式错误', 'data': '', 'code': 201}"], {}), "({'message': '请求方式错误', 'data': '', 'code': 201})\n", (1654, 1702), False, 'from flask import Flask, request, jsonify\n'), ((401, 467), 'flask.jsonify', 'jsonify', (["{'message': u'数据发送失败!没有传Token值', 'data': '', 'code': 201}"], {}), "({'message': u'数据发送失败!没有传Token值', 'data': '', 'code': 201})\n", (408, 467), False, 'from flask import Flask, request, jsonify\n')]
# coding=utf-8 # Copyright (c) 2018 <NAME> import logging from django.db.models import Count from django.db.models.functions import TruncDate from rest_framework import permissions from rest_framework.response import Response from rest_framework.viewsets import GenericViewSet, ModelViewSet, ViewSet from .. import models from . import serializers log = logging.getLogger(__name__) class SharedFileViewSet(ModelViewSet, GenericViewSet): queryset = models.SharedFile.objects.all() serializer_class = serializers.SharedFileSerializer def perform_create(self, serializer): serializer.validated_data['author_id'] = self.request.user.pk super(SharedFileViewSet, self).perform_create(serializer) class SharedUrlViewSet(ModelViewSet, GenericViewSet): queryset = models.SharedUrl.objects.all() serializer_class = serializers.SharedUrlSerializer def perform_create(self, serializer): serializer.validated_data['author_id'] = self.request.user.pk super(SharedUrlViewSet, self).perform_create(serializer) class StatView(ViewSet): """ In addition, a secured endpoint should be created to provide information on the number of items of each type, added every day, that have been visited at least once (see example). Example: October 25, 2017, added: file that you have visited 5 times the link that has been visited 2 times October 26, 2017, added: file that has been visited 2 times another file that has not been visited even once a link that has not been visited even once The result of the query should be: { "2017-10-25": { "files": 1, "links": 1 }, "2017-10-26": { "files": 1, "links": 0 }, } """ # authentication_classes = [authentication.TokenAuthentication] permission_classes = [permissions.IsAdminUser] def list(self, request, *args, **kwargs): """ Return access stats """ stats = {} self.update_stats(stats, models.SharedUrl) self.update_stats(stats, models.SharedFile) return Response(stats) def update_stats(self, stats, model): for item in self.get_query(model): stats.setdefault(str(item['day']), {})['links'] = item['count'] # noinspection PyMethodMayBeStatic def get_query(self, model): data = model.objects.filter( access_counter__gt=0 ).annotate( day=TruncDate('created'), count=Count('pk'), ).values('day', 'count') return data
[ "django.db.models.Count", "rest_framework.response.Response", "django.db.models.functions.TruncDate", "logging.getLogger" ]
[((358, 385), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (375, 385), False, 'import logging\n'), ((2172, 2187), 'rest_framework.response.Response', 'Response', (['stats'], {}), '(stats)\n', (2180, 2187), False, 'from rest_framework.response import Response\n'), ((2528, 2548), 'django.db.models.functions.TruncDate', 'TruncDate', (['"""created"""'], {}), "('created')\n", (2537, 2548), False, 'from django.db.models.functions import TruncDate\n'), ((2568, 2579), 'django.db.models.Count', 'Count', (['"""pk"""'], {}), "('pk')\n", (2573, 2579), False, 'from django.db.models import Count\n')]
from django.contrib.admin.apps import AdminConfig from django.apps import AppConfig import sys, logging class MyAdminConfig(AdminConfig): default_site = 'homeauto.admin.MyAdminSite' logger = logging.getLogger(__name__) class HomeautoConfig(AppConfig): name = 'homeauto' verbose_name = "House Devices and Configuations" if 'runserver' in sys.argv: def ready(self): logger.info('Starting '+self.verbose_name+' App') import homeauto.jobs as jobs jobs.start()
[ "homeauto.jobs.start", "logging.getLogger" ]
[((198, 225), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (215, 225), False, 'import sys, logging\n'), ((507, 519), 'homeauto.jobs.start', 'jobs.start', ([], {}), '()\n', (517, 519), True, 'import homeauto.jobs as jobs\n')]
import io import json import math import re import typing import zipfile import tempfile from urllib.parse import urljoin from base64 import b64encode from io import BufferedIOBase from typing import Optional, List import textwrap import graphviz import requests import svg_stack from graphviz import escape from flask import current_app from pdfminer import high_level from bioc.biocjson import BioCJsonIterWriter, fromJSON as biocFromJSON, toJSON as biocToJSON from jsonlines import Reader as BioCJsonIterReader, Writer as BioCJsonIterWriter import os import bioc from marshmallow import ValidationError from PyPDF4 import PdfFileWriter, PdfFileReader from PIL import Image from lxml import etree from neo4japp.models import Files from neo4japp.schemas.formats.drawing_tool import validate_map from neo4japp.schemas.formats.enrichment_tables import validate_enrichment_table from neo4japp.schemas.formats.graph import validate_graph from neo4japp.services.file_types.exports import FileExport, ExportFormatError from neo4japp.services.file_types.service import BaseFileTypeProvider from neo4japp.utils.logger import EventLog from neo4japp.constants import ( ANNOTATION_STYLES_DICT, ARROW_STYLE_DICT, BORDER_STYLES_DICT, DEFAULT_BORDER_COLOR, DEFAULT_FONT_SIZE, DEFAULT_NODE_WIDTH, DEFAULT_NODE_HEIGHT, MAX_LINE_WIDTH, BASE_ICON_DISTANCE, IMAGE_HEIGHT_INCREMENT, FONT_SIZE_MULTIPLIER, SCALING_FACTOR, FILE_MIME_TYPE_DIRECTORY, FILE_MIME_TYPE_PDF, FILE_MIME_TYPE_BIOC, FILE_MIME_TYPE_MAP, FILE_MIME_TYPE_GRAPH, FILE_MIME_TYPE_ENRICHMENT_TABLE, ICON_SIZE, FRONTEND_URL, BYTE_ENCODING, DEFAULT_DPI, POINT_TO_PIXEL, HORIZONTAL_TEXT_PADDING, LABEL_OFFSET, MAP_ICON_OFFSET, PDF_MARGIN, NAME_NODE_OFFSET, TRANSPARENT_PIXEL, VERTICAL_NODE_PADDING, NAME_LABEL_FONT_AVERAGE_WIDTH, NAME_LABEL_PADDING_MULTIPLIER, FILENAME_LABEL_MARGIN, FILENAME_LABEL_FONT_SIZE, IMAGES_RE, ASSETS_PATH, ICON_NODES, RELATION_NODES, DETAIL_TEXT_LIMIT, DEFAULT_IMAGE_NODE_WIDTH, DEFAULT_IMAGE_NODE_HEIGHT, LogEventType, IMAGE_BORDER_SCALE, WATERMARK_DISTANCE, WATERMARK_WIDTH, WATERMARK_ICON_SIZE ) # This file implements handlers for every file type that we have in Lifelike so file-related # code can use these handlers to figure out how to handle different file types from neo4japp.utils.string import extract_text extension_mime_types = { '.pdf': 'application/pdf', '.llmap': 'vnd.lifelike.document/map', '.svg': 'image/svg+xml', '.png': 'image/png', '.jpg': 'image/jpeg', '.jpeg': 'image/jpeg', # TODO: Use a mime type library? } def is_valid_doi(doi): try: # not [bad request, not found] but yes to 403 - no access return requests.get(doi, headers={ # sometimes request is filtered if there is no user-agent header "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) " "AppleWebKit/537.36 " "(KHTML, like Gecko) Chrome/51.0.2704.103 " "Safari/537.36" } ).status_code not in [400, 404] except Exception as e: return False # ref: https://stackoverflow.com/a/10324802 # Has a good breakdown of the DOI specifications, # in case need to play around with the regex in the future doi_re = re.compile( # match label pointing that it is DOI rb'(doi[\W]*)?' # match url to doi.org # doi might contain subdomain or 'www' etc. rb'((?:https?:\/\/)(?:[-A-z0-9]*\.)*doi\.org\/)?' # match folder (10) and register name rb'(10\.[0-9]{3,}(?:[\.][0-9]+)*\/)' # try match commonly used DOI format rb'([-A-z0-9]*)' # match up to first space (values after # are ~ignored anyway) rb'([^ \n\f#]*)' # match up to 20 characters in the same line (values after # are ~ignored anyway) rb'([^\n\f#]{0,20})', flags=re.IGNORECASE ) # noqa protocol_re = re.compile(r'https?:\/\/') unusual_characters_re = re.compile(r'([^-A-z0-9]+)') characters_groups_re = re.compile(r'([a-z]+|[A-Z]+|[0-9]+|-+|[^-A-z0-9]+)') common_escape_patterns_re = re.compile(rb'\\') dash_types_re = re.compile(bytes("[‐᠆﹣-⁃−¬]+", BYTE_ENCODING)) # Used to match the links in maps during the export SANKEY_RE = re.compile(r'^ */projects/.+/sankey/.+$') MAIL_RE = re.compile(r'^ *mailto:.+$') ENRICHMENT_TABLE_RE = re.compile(r'^ */projects/.+/enrichment-table/.+$') DOCUMENT_RE = re.compile(r'^ */projects/.+/files/.+$') BIOC_RE = re.compile(r'^ */projects/.+/bioc/.+$') ANY_FILE_RE = re.compile(r'^ */files/.+$') # As other links begin with "projects" as well, we are looking for those without additional slashes # looking like /projects/Example or /projects/COVID-19 PROJECTS_RE = re.compile(r'^ */projects/(?!.*/.+).*') ICON_DATA: dict = {} PDF_PAD = 1.0 def _search_doi_in(content: bytes) -> Optional[str]: doi: Optional[str] try: for match in doi_re.finditer(content): label, url, folderRegistrant, likelyDOIName, tillSpace, DOISuffix = \ [s.decode(BYTE_ENCODING, errors='ignore') if s else '' for s in match.groups()] certainly_doi = label + url url = 'https://doi.org/' # is whole match a DOI? (finished on \n, trimmed whitespaces) doi = ((url + folderRegistrant + likelyDOIName + tillSpace + DOISuffix).strip()) if is_valid_doi(doi): return doi # is match till space a DOI? doi = (url + folderRegistrant + likelyDOIName + tillSpace) if is_valid_doi(doi): return doi # make deep search only if there was clear indicator that it is a doi if certainly_doi: # if contains escape patterns try substitute them if common_escape_patterns_re.search(match.group()): doi = _search_doi_in( common_escape_patterns_re.sub( b'', match.group() ) ) if is_valid_doi(doi): return doi # try substitute different dash types if dash_types_re.search(match.group()): doi = _search_doi_in( dash_types_re.sub( b'-', match.group() ) ) if is_valid_doi(doi): return doi # we iteratively start cutting off suffix on each group of # unusual characters try: reversedDOIEnding = (tillSpace + DOISuffix)[::-1] while reversedDOIEnding: _, _, reversedDOIEnding = characters_groups_re.split( reversedDOIEnding, 1) doi = ( url + folderRegistrant + likelyDOIName + reversedDOIEnding[::-1] ) if is_valid_doi(doi): return doi except Exception: pass # we iteratively start cutting off suffix on each group of either # lowercase letters # uppercase letters # numbers try: reversedDOIEnding = (likelyDOIName + tillSpace)[::-1] while reversedDOIEnding: _, _, reversedDOIEnding = characters_groups_re.split( reversedDOIEnding, 1) doi = ( url + folderRegistrant + reversedDOIEnding[::-1] ) if is_valid_doi(doi): return doi except Exception: pass # yield 0 matches on test case # # is it a DOI in common format? # doi = (url + folderRegistrant + likelyDOIName) # if self._is_valid_doi(doi): # print('match by common format xxx') # return doi # in very rare cases there is \n in text containing doi try: end_of_match_idx = match.end(0) first_char_after_match = content[end_of_match_idx:end_of_match_idx + 1] if first_char_after_match == b'\n': doi = _search_doi_in( # new input = match + 50 chars after new line match.group() + content[end_of_match_idx + 1:end_of_match_idx + 1 + 50] ) if is_valid_doi(doi): return doi except Exception as e: pass except Exception as e: pass return None class DirectoryTypeProvider(BaseFileTypeProvider): MIME_TYPE = FILE_MIME_TYPE_DIRECTORY SHORTHAND = 'directory' mime_types = (MIME_TYPE,) def can_create(self) -> bool: return True def validate_content(self, buffer: BufferedIOBase): # Figure out file size buffer.seek(0, io.SEEK_END) size = buffer.tell() if size > 0: raise ValueError("Directories can't have content") class PDFTypeProvider(BaseFileTypeProvider): MIME_TYPE = FILE_MIME_TYPE_PDF SHORTHAND = 'pdf' mime_types = (MIME_TYPE,) def detect_mime_type(self, buffer: BufferedIOBase) -> List[typing.Tuple[float, str]]: return [(0, self.MIME_TYPE)] if buffer.read(5) == b'%PDF-' else [] def can_create(self) -> bool: return True def validate_content(self, buffer: BufferedIOBase): # TODO: Actually validate PDF content pass def extract_doi(self, buffer: BufferedIOBase) -> Optional[str]: data = buffer.read() buffer.seek(0) # Attempt 1: search through the first N bytes (most probably containing only metadata) chunk = data[:2 ** 17] doi = _search_doi_in(chunk) if doi is not None: return doi # Attempt 2: search through the first two pages of text (no metadata) fp = io.BytesIO(data) text = high_level.extract_text(fp, page_numbers=[0, 1], caching=False) doi = _search_doi_in(bytes(text, encoding='utf8')) return doi def _is_valid_doi(self, doi): try: # not [bad request, not found] but yes to 403 - no access return requests.get(doi, headers={ # sometimes request is filtered if there is no user-agent header "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) " "AppleWebKit/537.36 " "(KHTML, like Gecko) Chrome/51.0.2704.103 " "Safari/537.36" } ).status_code not in [400, 404] except Exception as e: return False # ref: https://stackoverflow.com/a/10324802 # Has a good breakdown of the DOI specifications, # in case need to play around with the regex in the future doi_re = re.compile( # match label pointing that it is DOI rb'(doi[\W]*)?' # match url to doi.org # doi might contain subdomain or 'www' etc. rb'((?:https?:\/\/)(?:[-A-z0-9]*\.)*doi\.org\/)?' # match folder (10) and register name rb'(10\.[0-9]{3,}(?:[\.][0-9]+)*\/)' # try match commonly used DOI format rb'([-A-z0-9]*)' # match up to first space (values after # are ~ignored anyway) rb'([^ \n\f#]*)' # match up to 20 characters in the same line (values after # are ~ignored anyway) rb'([^\n\f#]{0,20})', flags=re.IGNORECASE ) # noqa protocol_re = re.compile(r'https?:\/\/') unusual_characters_re = re.compile(r'([^-A-z0-9]+)') characters_groups_re = re.compile(r'([a-z]+|[A-Z]+|[0-9]+|-+|[^-A-z0-9]+)') common_escape_patterns_re = re.compile(rb'\\') dash_types_re = re.compile(bytes("[‐᠆﹣-⁃−¬]+", BYTE_ENCODING)) def to_indexable_content(self, buffer: BufferedIOBase): return buffer # Elasticsearch can index PDF files directly def should_highlight_content_text_matches(self) -> bool: return True class BiocTypeProvider(BaseFileTypeProvider): MIME_TYPE = FILE_MIME_TYPE_BIOC SHORTHAND = 'BioC' mime_types = (MIME_TYPE,) ALLOWED_TYPES = ['.xml', '.bioc'] def detect_mime_type(self, buffer: BufferedIOBase) -> List[typing.Tuple[float, str]]: try: # If it is xml file and bioc self.check_xml_and_bioc(buffer) return [(0, self.MIME_TYPE)] except BaseException: return [] finally: buffer.seek(0) def handles(self, file: Files) -> bool: ext = os.path.splitext(file.filename)[1].lower() return super().handles(file) and ext in self.ALLOWED_TYPES def can_create(self) -> bool: return True def validate_content(self, buffer: BufferedIOBase): with BioCJsonIterReader(buffer) as reader: for obj in reader: passage = biocFromJSON(obj, level=bioc.DOCUMENT) def extract_doi(self, buffer: BufferedIOBase) -> Optional[str]: data = buffer.read() buffer.seek(0) chunk = data[:2 ** 17] doi = _search_doi_in(chunk) return doi def convert(self, buffer): # assume it is xml collection = bioc.load(buffer) buffer.stream = io.BytesIO() with BioCJsonIterWriter(buffer) as writer: for doc in collection.documents: writer.write(biocToJSON(doc)) buffer.seek(0) def check_xml_and_bioc(self, buffer: BufferedIOBase): tree = etree.parse(buffer) system_url: str = tree.docinfo.system_url result = system_url.lower().find('bioc') if result < 0: raise ValueError() def substitute_svg_images(map_content: io.BytesIO, images: list, zip_file: zipfile.ZipFile, folder_name: str): """ Match every link inside SVG file and replace it with raw PNG data of icons or images from zip file. This has to be done after the graphviz call, as base64 PNG data is often longer than graphviz max length limit (~16k chars) params: :param map_content: bytes of the exported map :param images: list containing names of the images to embed :param zip_file: zip containing images :param folder_name: uuid of a temporary folder containing the images :returns: a modified svg file containing embedded images """ icon_data = get_icons_data() text_content = map_content.read().decode(BYTE_ENCODING) text_content = IMAGES_RE.sub(lambda match: icon_data[match.group(0)], text_content) for image in images: text_content = text_content.replace( folder_name + '/' + image, 'data:image/png;base64,' + b64encode( zip_file.read("".join(['images/', image]))).decode(BYTE_ENCODING)) return io.BytesIO(bytes(text_content, BYTE_ENCODING)) def get_icons_data(): """ Lazy loading of the byte icon data from the PNG files """ if ICON_DATA: return ICON_DATA else: for key in ['map', 'link', 'email', 'sankey', 'document', 'enrichment_table', 'note', 'ms-word', 'ms-excel', 'ms-powerpoint', 'cytoscape', 'lifelike']: icon_path = os.path.join(ASSETS_PATH, f'{key}.png') with open(icon_path, 'rb') as file: ICON_DATA[icon_path] = 'data:image/png;base64,' \ + b64encode(file.read()) \ .decode(BYTE_ENCODING) return ICON_DATA def create_default_node(node): """ Creates a param dict with all the parameters required to create a simple text node or saving a baseline for more complex node - like map/note/link nodes :params: :param node: a dictionary containing the information about currently rendered node :return: baseline dict with Graphviz paramaters """ style = node.get('style', {}) # Ensure that display name is of type string, as it can be None display_name = node['display_name'] or "" return { 'name': node['hash'], # Graphviz offer no text break utility - it has to be done outside of it 'label': escape('\n'.join(textwrap.TextWrapper( width=min(10 + len(display_name) // 4, MAX_LINE_WIDTH), replace_whitespace=False).wrap(display_name))), # We have to inverse the y axis, as Graphviz coordinate system origin is at the bottom 'pos': ( f"{node['data']['x'] / SCALING_FACTOR}," f"{-node['data']['y'] / SCALING_FACTOR}!" ), # Resize the node base on font size, as otherwise the margin would be smaller than # in the Lifelike map editor 'width': f"{node['data'].get('width', DEFAULT_NODE_WIDTH) / SCALING_FACTOR}", 'height': f"{node['data'].get('height', DEFAULT_NODE_HEIGHT) / SCALING_FACTOR}", 'shape': 'box', 'style': 'rounded,' + BORDER_STYLES_DICT.get(style.get('lineType'), ''), 'color': style.get('strokeColor') or DEFAULT_BORDER_COLOR, 'fontcolor': style.get('fillColor') or ANNOTATION_STYLES_DICT.get( node['label'], {'color': 'black'}).get('color'), 'fontname': 'sans-serif', 'margin': "0.2,0.0", 'fontsize': f"{style.get('fontSizeScale', 1.0) * DEFAULT_FONT_SIZE}", # Setting penwidth to 0 removes the border 'penwidth': f"{style.get('lineWidthScale', 1.0)}" if style.get('lineType') != 'none' else '0.0' } def create_image_label(node): """ Creates a node acting as a label for the image :params: :param node: dict containing the node data :returns: label params """ style = node.get('style', {}) height = node['data'].get('height', DEFAULT_IMAGE_NODE_HEIGHT) width = node['data'].get('width', DEFAULT_IMAGE_NODE_WIDTH) border_width = style.get('lineWidthScale', 1.0) if style.get('lineType') != 'none' else 0.0 # Try to match the front-end max width by assuming that average font width is equal to 50%% # of the height - and adjusting the text to be roughly of the image width label_font_size = style.get('fontSizeScale', 1.0) * DEFAULT_FONT_SIZE label = escape('\n'.join(textwrap.TextWrapper( width=int(width / (label_font_size * 0.5)), replace_whitespace=False).wrap(node['display_name'] or ""))) label_offset = -height / 2.0 - LABEL_OFFSET - (label_font_size / 2.0 * (1 + label.count('\n'))) - border_width return { 'label': label, 'pos': ( f"{node['data']['x'] / SCALING_FACTOR}," f"{(-node['data']['y'] + label_offset) / SCALING_FACTOR + FILENAME_LABEL_MARGIN}!" ), 'fontsize': f"{label_font_size}", 'penwidth': '0.0', 'fontcolor': style.get('fillColor') or 'black', 'fontname': 'sans-serif', 'name': node['hash'] + '_label' } def create_image_node(node, params): """ Add parameters specific to the image label. :params: :param node: dict containing the node data :param params: dict containing baseline parameters :returns: modified params """ style = node.get('style', {}) # Remove the label generated in 'create_default_node' - we will add it as separate node params['label'] = "" height = node['data'].get('height', DEFAULT_IMAGE_NODE_HEIGHT) width = node['data'].get('width', DEFAULT_IMAGE_NODE_WIDTH) params['penwidth'] = f"{style.get('lineWidthScale', 1.0) * IMAGE_BORDER_SCALE}" \ if style.get('lineType') != 'none' else '0.0' params['width'] = f"{width / SCALING_FACTOR}" params['height'] = f"{height / SCALING_FACTOR}" params['fixedsize'] = 'true' params['imagescale'] = 'both' params['shape'] = 'rect' params['style'] = 'bold,' + BORDER_STYLES_DICT.get(style.get('lineType'), '') params['color'] = style.get('strokeColor') or 'white' return params def create_detail_node(node, params): """ Add parameters specific to the nodes which has a 'show detail text instead of a label' property. Due to the copyright, we limit the text in detail nodes dragged from the pdfs to 250 characters - see https://sbrgsoftware.atlassian.net/browse/LL-3387 for details on problems. Due to the fact, that new lines can be present in the detail text (and need to be replaced with slash + l (which cant be written here due to the pep8 check) to align the text to the left, we need to be careful while escaping the text :params: :param node: dict containing the node data :param params: dict containing baseline parameters that have to be altered :returns: modified params dict TODO: Mimic the text metric and text breaking from the drawing-tool """ params['style'] += ',filled' detail_text = node['data'].get('detail', '') if detail_text: if node['data'].get('sources'): # Check if the node was dragged from the pdf - if so, it will have a source link if any(DOCUMENT_RE.match(src.get('url')) for src in node['data'].get('sources')): detail_text = detail_text[:DETAIL_TEXT_LIMIT] detail_text = detail_text.rstrip('\\') # Split lines to inspect their length and replace them with '\l' later # Use regex to split, otherwise \n (text, not new lines) are matched as well lines = re.split("\n", detail_text) # Escape the characters and break lines longer than max line width lines = map(lambda x: r' \l '.join(textwrap.TextWrapper(width=MAX_LINE_WIDTH ).wrap(escape(x))), lines) # '\l' is graphviz special new line, which placed at the end of the line will align it # to the left - we use that instead of \n (and add one at the end to align last line) detail_text = r"\l".join(lines) + r'\l' params['label'] = detail_text params['fillcolor'] = ANNOTATION_STYLES_DICT.get(node['label'], {'bgcolor': 'black'} ).get('bgcolor') if not node.get('style', {}).get('strokeColor'): # No border by default params['penwidth'] = '0.0' return params def get_link_icon_type(node): """ Evaluate the icon that link node should have (document, sankey, ET, mail or link) If the link is valid, save it and use it later when setting the node href Otherwise return None. :params: :param node: dict containing the node data :returns: the correct label for the icon and a corresponding URL - if valid """ data = node['data'].get('sources', []) + node['data'].get('hyperlinks', []) for link in data: # TODO: This is getting bigger and bigger - refactor this for some clarity if ENRICHMENT_TABLE_RE.match(link['url']): return 'enrichment_table', link['url'] elif SANKEY_RE.match(link['url']): return 'sankey', link['url'] elif DOCUMENT_RE.match(link['url']): doi_src = next( (src for src in node['data'].get('sources') if src.get( 'domain') == "DOI"), None) # If there is a valid doi, link to DOI if doi_src and is_valid_doi(doi_src['url']): return 'document', doi_src['url'] # If the links point to internal document, remove it from the node data so it would # not became exported as node url - as that might violate copyrights if link in node['data'].get('sources', []): node['data']['sources'].remove(link) else: node['data']['hyperlinks'].remove(link) return 'document', None elif PROJECTS_RE.match(link['url']): return 'project', link['url'] elif BIOC_RE.match(link['url']): return 'bioc', link['url'] elif MAIL_RE.match(link['url']): return 'email', link['url'] elif ANY_FILE_RE.match(link['url']): domain = link.get('domain', "").strip() if domain: # Do not return url, as we are not creating links to files that we not create on LL if domain.endswith('.docx') or domain.endswith('.doc'): return 'ms-word', None elif domain.endswith('.pptx') or domain.endswith('.ppt'): return 'ms-powerpoint', None elif domain.endswith('.xlsx') or domain.endswith('.xls'): return 'ms-excel', None elif domain.endswith('.cys'): return 'cytoscape', None return 'link', None def create_icon_node(node, params): """ Alters the params dict with the values suitable for creation of the nodes with icons and creates additional parameters dict storing the information about the icon node :params: :param node: dict containing the node data :param params: dict containing baseline parameters that have to be altered :returns: modified params dict descriping icon label and a new dict describing the icon itself. Additionally, returns computed height of icon + label to set it to a proper value """ style = node.get('style', {}) label = escape(node['label']) # remove border around icon label params['penwidth'] = '0.0' # Calculate the distance between icon and the label center distance_from_the_label = BASE_ICON_DISTANCE + params['label'].count('\n') \ * IMAGE_HEIGHT_INCREMENT + FONT_SIZE_MULTIPLIER * (style.get('fontSizeScale', 1.0) - 1.0) node_height = distance_from_the_label * 2 + float(ICON_SIZE) node_height *= SCALING_FACTOR # Move the label below to make space for the icon node params['pos'] = ( f"{node['data']['x'] / SCALING_FACTOR}," f"{-node['data']['y'] / SCALING_FACTOR - distance_from_the_label}!" ) # Create a separate node which will hold the image icon_params = { 'name': "icon_" + node['hash'], 'pos': ( f"{node['data']['x'] / SCALING_FACTOR}," f"{-node['data']['y'] / SCALING_FACTOR}!" ), 'label': "" } default_icon_color = ANNOTATION_STYLES_DICT.get(node['label'], {'defaultimagecolor': 'black'} )['defaultimagecolor'] custom_icons = ANNOTATION_STYLES_DICT.get('custom_icons', {}) if label == 'link': label, link = get_link_icon_type(node) # Save the link for later usage node['link'] = link custom_icons = ANNOTATION_STYLES_DICT.get('custom_icons', {}) # If label is microsoft icon, we set default text color to its color for consistent look if label in custom_icons.keys(): default_icon_color = custom_icons.get(label, default_icon_color) icon_params['image'] = ( os.path.join(ASSETS_PATH, f'{label}.png') ) if label not in custom_icons.keys(): # We are setting the icon color by using 'inverse' icon images and colorful background # But not for microsoft icons, as those are always in the same color icon_params['fillcolor'] = style.get("fillColor") or default_icon_color icon_params['style'] = 'filled' icon_params['shape'] = 'box' icon_params['height'] = ICON_SIZE icon_params['width'] = ICON_SIZE icon_params['fixedsize'] = 'true' icon_params['imagescale'] = 'true' icon_params['penwidth'] = '0.0' params['fontcolor'] = style.get("fillColor") or default_icon_color return params, icon_params, node_height def create_relation_node(node, params): """ Adjusts the node into the relation node (purple ones) :params: :param node: dict containing the node data :param params: dict containing Graphviz parameters that will be altered :returns: altered params dict """ style = node.get('style', {}) default_color = ANNOTATION_STYLES_DICT.get( node['label'], {'color': 'black'})['color'] params['color'] = style.get('strokeColor') or default_color if style.get('fillColor'): params['color'] = style.get('strokeColor') or DEFAULT_BORDER_COLOR # Changing font color changes background to white params['fillcolor'] = 'white' if style.get('fillColor') else default_color params['fontcolor'] = style.get('fillColor') or 'black' params['style'] += ',filled' return params def set_node_href(node): """ Evaluates and sets the href for the node. If link parameter was not set previously, we are dealing with entity node (or icon node without any sources) - so we prioritize the hyperlinks here :params: :param node: dict containing the node data :returns: string with URL to which node should point - or empty string """ href = '' if node.get('link'): href = node['link'] elif node['data'].get('hyperlinks'): href = node['data']['hyperlinks'][0].get('url') elif node['data'].get('sources'): href = node['data']['sources'][0].get('url') # Whitespaces will break the link if we prepend the domain current_link = href.strip() # If url points to internal file, prepend it with the domain address if current_link.startswith('/'): # Remove Lifelike links to files that we do not create - due to the possible copyrights if ANY_FILE_RE.match(current_link): # Remove the link from the dictionary if node.get('link'): del node['link'] elif node['data'].get('hyperlinks'): del node['data']['hyperlinks'][0] else: del node['data']['sources'][0] # And search again href = set_node_href(node) else: href = urljoin(FRONTEND_URL, current_link) return href def create_map_name_node(): """ Creates the baseline dict for map name node :retuns: dict describing the name node with Graphviz parameters """ return { 'fontcolor': ANNOTATION_STYLES_DICT.get('map', {'defaultimagecolor': 'black'} )['defaultimagecolor'], 'fontsize': str(FILENAME_LABEL_FONT_SIZE), 'shape': 'box', 'style': 'rounded', 'margin': f'{FILENAME_LABEL_MARGIN * 2},{FILENAME_LABEL_MARGIN}' } def create_edge(edge, node_hash_type_dict): """ Creates a dict with parameters required to render an edge :params: :param edge: dict containing the edge information :param node_hash_type_dict: lookup dict allowing to quickly check whether either head or tail is pointing to link or note (as this changes the default edge style) :returns: dict describing the edge with Graphviz parameters """ style = edge.get('style', {}) default_line_style = 'solid' default_arrow_head = 'arrow' edge_data = edge.get('data', {}) url_data = edge_data.get('hyperlinks', []) + edge_data.get('sources', []) url = url_data[-1]['url'] if len(url_data) else '' if any(item in [node_hash_type_dict[edge['from']], node_hash_type_dict[edge['to']]] for item in ['link', 'note']): default_line_style = 'dashed' default_arrow_head = 'none' return { 'tail_name': edge['from'], 'head_name': edge['to'], # Pristine edges have 'label: null' - so we have to check them as escaping None type gives # error. Do not use .get() with default, as the key exist - it's the content that is missing 'label': escape(edge['label']) if edge['label'] else "", 'dir': 'both', 'color': style.get('strokeColor') or DEFAULT_BORDER_COLOR, 'arrowtail': ARROW_STYLE_DICT.get(style.get('sourceHeadType') or 'none'), 'arrowhead': ARROW_STYLE_DICT.get( style.get('targetHeadType') or default_arrow_head), 'penwidth': str(style.get('lineWidthScale', 1.0)) if style.get( 'lineType') != 'none' else '0.0', 'fontsize': str(style.get('fontSizeScale', 1.0) * DEFAULT_FONT_SIZE), 'style': BORDER_STYLES_DICT.get(style.get('lineType') or default_line_style), 'URL': url } def create_watermark(x_center, y): """ Create a Lifelike watermark (icon, text, hyperlink) below the pdf. We need to ensure that the lowest node is not intersecting it - if so, we push it even lower. :params: :param x_center: middle of the pdf :param y: position of the lowest node bottom on the pdf :param lowest_node: details of the lowest node (used to get BBox) returns: 3 dictionaries - each for one of the watermark elements """ y += WATERMARK_DISTANCE label_params = { 'name': 'watermark_node', 'label': 'Created by Lifelike', 'pos': ( f"{x_center / SCALING_FACTOR}," f"{-y / SCALING_FACTOR}!" ), 'width': f"{WATERMARK_WIDTH / SCALING_FACTOR}", 'height': f"{DEFAULT_NODE_HEIGHT / SCALING_FACTOR}", 'fontcolor': 'black', 'fontname': 'sans-serif', 'margin': "0.2,0.0", 'fontsize': f"{DEFAULT_FONT_SIZE}", 'penwidth': '0.0', } url_params = { 'name': 'watermark_hyper', 'label': 'lifelike.bio', 'href': 'https://lifelike.bio', 'pos': ( f"{x_center / SCALING_FACTOR}," f"{-(y + DEFAULT_NODE_HEIGHT / 2.0) / SCALING_FACTOR}!" ), 'width': f"{WATERMARK_WIDTH / SCALING_FACTOR}", 'height': f"{DEFAULT_NODE_HEIGHT / SCALING_FACTOR}", 'fontcolor': 'blue', 'fontname': 'sans-serif', 'margin': "0.2,0.0", 'fontsize': f"{DEFAULT_FONT_SIZE - 2}", 'penwidth': '0.0' } icon_params = { 'name': 'watermark_icon', 'label': '', 'pos': ( f"{(x_center - WATERMARK_WIDTH / 2.0 + WATERMARK_ICON_SIZE) / SCALING_FACTOR}," f"{-y / SCALING_FACTOR}!" ), 'penhwidth': '0.0', 'fixedsize': 'true', 'imagescale': 'both', 'shape': 'rect', 'image': ASSETS_PATH + 'lifelike.png', 'width': f"{WATERMARK_ICON_SIZE / SCALING_FACTOR}", 'height': f"{WATERMARK_ICON_SIZE / SCALING_FACTOR}", 'penwidth': '0.0' } return label_params, url_params, icon_params class MapTypeProvider(BaseFileTypeProvider): MIME_TYPE = FILE_MIME_TYPE_MAP SHORTHAND = 'map' mime_types = (MIME_TYPE,) def detect_mime_type(self, buffer: BufferedIOBase) -> List[typing.Tuple[float, str]]: try: # If the data validates, I guess it's a map? self.validate_content(buffer) return [(0, self.MIME_TYPE)] except ValueError: return [] finally: buffer.seek(0) def can_create(self) -> bool: return True def validate_content(self, buffer: BufferedIOBase): """ Validates whether the uploaded file is a Lifelike map - a zip containing graph.json file describing the map and optionally, folder with the images. If there are any images specified in the json graph, their presence and accordance to the png standard is verified. :params: :param buffer: buffer containing the bytes of the file that has to be tested :raises ValueError: if the file is not a proper map file """ zipped_map = buffer.read() try: with zipfile.ZipFile(io.BytesIO(zipped_map)) as zip_file: # Test zip returns the name of the first invalid file inside the archive; if any if zip_file.testzip(): raise ValueError json_graph = json.loads(zip_file.read('graph.json')) validate_map(json_graph) for node in json_graph['nodes']: if node.get('image_id'): zip_file.read("".join(['images/', node.get('image_id'), '.png'])) except (zipfile.BadZipFile, KeyError): raise ValueError def to_indexable_content(self, buffer: BufferedIOBase): # Do not catch exceptions here - there are handled in elastic_service.py zip_file = zipfile.ZipFile(io.BytesIO(buffer.read())) content_json = json.loads(zip_file.read('graph.json')) content = io.StringIO() string_list = [] for node in content_json.get('nodes', []): node_data = node.get('data', {}) display_name = node.get('display_name', '') detail = node_data.get('detail', '') if node_data else '' string_list.append('' if display_name is None else display_name) string_list.append('' if detail is None else detail) for edge in content_json.get('edges', []): edge_data = edge.get('data', {}) label = edge.get('label', '') detail = edge_data.get('detail', '') if edge_data else '' string_list.append('' if label is None else label) string_list.append('' if detail is None else detail) content.write(' '.join(string_list)) return typing.cast(BufferedIOBase, io.BytesIO(content.getvalue().encode(BYTE_ENCODING))) def generate_export(self, file: Files, format: str, self_contained_export=False) -> FileExport: """ Generates the map as a file in provided format. While working with this, remember that: - Most of the node parameters is optional (including width and height). - Graphviz y-axis is inverted (starts at the top) - SVG requires separate image embedding mechanism (get_icons_data) """ if format not in ('png', 'svg', 'pdf'): raise ExportFormatError() # This should handle the naming and removal of the temporary directory folder = tempfile.TemporaryDirectory() try: zip_file = zipfile.ZipFile(io.BytesIO(file.content.raw_file)) json_graph = json.loads(zip_file.read('graph.json')) except KeyError: current_app.logger.info( f'Invalid map file: {file.hash_id} Cannot find map graph inside the zip!', extra=EventLog( event_type=LogEventType.MAP_EXPORT_FAILURE.value).to_dict() ) raise ValidationError('Cannot retrieve contents of the file - it might be corrupted') except zipfile.BadZipFile: current_app.logger.info( f'Invalid map file: {file.hash_id} File is a bad zipfile.', extra=EventLog( event_type=LogEventType.MAP_EXPORT_FAILURE.value).to_dict() ) raise ValidationError('Cannot retrieve contents of the file - it might be corrupted') graph_attr = [('margin', f'{PDF_MARGIN}'), ('outputorder', 'nodesfirst'), ('pad', f'{PDF_PAD}')] if format == 'png': graph_attr.append(('dpi', '100')) graph = graphviz.Digraph( escape(file.filename), # New lines are not permitted in the comment - they will crash the export. # Replace them with spaces until we find different solution comment=file.description.replace('\n', ' ') if file.description else None, engine='neato', graph_attr=graph_attr, format=format) node_hash_type_dict = {} x_values, y_values = [], [] images = [] nodes = json_graph['nodes'] # Sort the images to the front of the list to ensure that they do not cover other nodes nodes.sort(key=lambda n: n.get('label', "") == 'image', reverse=True) for i, node in enumerate(nodes): # Store the coordinates of each node as map name node and watermark are based on them x_values.append(node['data']['x']) y_values.append(node['data']['y']) # Store node hash->label for faster edge default type evaluation node_hash_type_dict[node['hash']] = node['label'] style = node.get('style', {}) params = create_default_node(node) if node['label'] == 'image': try: image_name = node.get('image_id') + '.png' images.append(image_name) im = zip_file.read("".join(['images/', image_name])) file_path = os.path.sep.join([folder.name, image_name]) f = open(file_path, "wb") f.write(im) f.close() # Note: Add placeholder images instead? except KeyError: name = node.get('image_id') + '.png' current_app.logger.info( f'Invalid map file: {file.hash_id} Cannot retrieve image {name}.', extra=EventLog( event_type=LogEventType.MAP_EXPORT_FAILURE.value).to_dict() ) raise ValidationError( f"Cannot retrieve image: {name} - file might be corrupted") params = create_image_node(node, params) if node['display_name']: graph.node(**create_image_label(node)) params['image'] = file_path if node['label'] in ICON_NODES: # map and note should point to the first source or hyperlink, if the are no sources link_data = node['data'].get('sources', []) + node['data'].get('hyperlinks', []) node['link'] = link_data[0].get('url') if link_data else None if style.get('showDetail'): params = create_detail_node(node, params) else: params, icon_params, node_height = create_icon_node(node, params) # We need to set this to ensure that watermark is not intersect some edge cases nodes[i]['data']['height'] = node_height # Create separate node with the icon graph.node(**icon_params) if node['label'] in RELATION_NODES: params = create_relation_node(node, params) params['href'] = set_node_href(node) graph.node(**params) min_x = min(x_values, default=0) min_y = min(y_values, default=0) if self_contained_export: # We add name of the map in left top corner to ease map recognition in linked export name_node_params = create_map_name_node() # Set outside of the function to avoid unnecessary copying of potentially big variables name_node_params['name'] = file.filename name_node_params['pos'] = ( f"{(min_x - NAME_NODE_OFFSET) / SCALING_FACTOR}," f"{(-min_y - NAME_NODE_OFFSET) / SCALING_FACTOR}!" ) graph.node(**name_node_params) lower_ys = list(map(lambda x: x['data']['y'] + x['data'].get( 'height', DEFAULT_NODE_HEIGHT) / 2.0, nodes)) max_x = max(x_values, default=0) x_center = min_x + (max_x - min_x) / 2.0 for params in create_watermark(x_center, max(lower_ys, default=0)): graph.node(**params) for edge in json_graph['edges']: edge_params = create_edge(edge, node_hash_type_dict) graph.edge(**edge_params) ext = f".{format}" content = io.BytesIO(graph.pipe()) if format == 'svg': content = substitute_svg_images(content, images, zip_file, folder.name) return FileExport( content=content, mime_type=extension_mime_types[ext], filename=f"{file.filename}{ext}" ) def merge(self, files: list, requested_format: str, links=None): """ Export, merge and prepare as FileExport the list of files :param files: List of Files objects. The first entry is always the main map, :param requested_format: export format :param links: List of dict objects storing info about links that should be embedded: x: x pos; y: y pos; page_origin: which page contains icon; page_destination: where should it take you :return: an exportable file. :raises: ValidationError if provided format is invalid """ if requested_format == 'png': merger = self.merge_pngs_vertically elif requested_format == 'pdf': merger = self.merge_pdfs elif requested_format == 'svg': merger = self.merge_svgs else: raise ValidationError("Unknown or invalid export format for the requested file.", requested_format) ext = f'.{requested_format}' if len(files) > 1: content = merger(files, links) else: content = self.get_file_export(files[0], requested_format) return FileExport( content=content, mime_type=extension_mime_types[ext], filename=f"{files[0].filename}{ext}" ) def get_file_export(self, file, format): """ Get the exported version of the file in requested format wrapper around abstract method to add map specific params and catch exception params :param file: map file to export :param format: wanted format :raises ValidationError: When provided format is invalid :return: Exported map as BytesIO """ try: return io.BytesIO(self.generate_export(file, format, self_contained_export=True) .content.getvalue()) except ExportFormatError: raise ValidationError("Unknown or invalid export " "format for the requested file.", format) def merge_pngs_vertically(self, files, _=None): """ Append pngs vertically. params: :param files: list of files to export :param _: links: omitted in case of png, added to match the merge_pdfs signature :returns: maps concatenated vertically :raises SystemError: when one of the images exceeds PILLOW decompression bomb size limits """ final_bytes = io.BytesIO() try: images = [Image.open(self.get_file_export(file, 'png')) for file in files] except Image.DecompressionBombError as e: raise SystemError('One of the files exceeds the maximum size - it cannot be exported' 'as part of the linked export') cropped_images = [image.crop(image.getbbox()) for image in images] widths, heights = zip(*(i.size for i in cropped_images)) max_width = max(widths) total_height = sum(heights) new_im = Image.new('RGBA', (max_width, total_height), TRANSPARENT_PIXEL) y_offset = 0 for im in cropped_images: x_offset = int((max_width - im.size[0]) / 2) new_im.paste(im, (x_offset, y_offset)) y_offset += im.size[1] new_im.save(final_bytes, format='PNG') return final_bytes def merge_pdfs(self, files: list, links=None): """ Merge pdfs and add links to map. params: :param files: list of files to export. :param links: list of dicts describing internal map links """ links = links or [] final_bytes = io.BytesIO() writer = PdfFileWriter() half_size = int(ICON_SIZE) * DEFAULT_DPI / 2.0 for i, out_file in enumerate(files): out_file = self.get_file_export(out_file, 'pdf') reader = PdfFileReader(out_file, strict=False) writer.appendPagesFromReader(reader) for link in links: file_index = link['page_origin'] coord_offset, pixel_offset = get_content_offsets(files[file_index]) x_base = ((link['x'] - coord_offset[0]) / SCALING_FACTOR * POINT_TO_PIXEL) + \ PDF_MARGIN * DEFAULT_DPI + pixel_offset[0] y_base = ((-1 * link['y'] - coord_offset[1]) / SCALING_FACTOR * POINT_TO_PIXEL) + \ PDF_MARGIN * DEFAULT_DPI - pixel_offset[1] writer.addLink(file_index, link['page_destination'], [x_base - half_size, y_base - half_size - LABEL_OFFSET, x_base + half_size, y_base + half_size]) writer.write(final_bytes) return final_bytes def merge_svgs(self, files: list, _=None): """ Merge svg files together with svg_stack params: :param files: list of files to be merged :param _: links: omitted in case of svg, added to match the merge_pdfs signature """ doc = svg_stack.Document() layout2 = svg_stack.VBoxLayout() # String is used, since svg_stack cannot save to IOBytes - raises an error result_string = io.StringIO() for file in files: layout2.addSVG(self.get_file_export(file, 'svg'), alignment=svg_stack.AlignCenter) doc.setLayout(layout2) doc.save(result_string) return io.BytesIO(result_string.getvalue().encode(BYTE_ENCODING)) class GraphTypeProvider(BaseFileTypeProvider): MIME_TYPE = FILE_MIME_TYPE_GRAPH SHORTHAND = 'Graph' mime_types = (MIME_TYPE,) def detect_mime_type(self, buffer: BufferedIOBase) -> List[typing.Tuple[float, str]]: try: # If the data validates, I guess it's a map? if os.path.splitext(str( # buffer in here is actually wrapper of BufferedIOBase and it contains # filename even if type check fails buffer.filename # type: ignore[attr-defined] ))[1] == '.graph': return [(0, self.MIME_TYPE)] else: return [] except (ValueError, AttributeError): return [] finally: buffer.seek(0) def can_create(self) -> bool: return True def validate_content(self, buffer: BufferedIOBase): data = json.loads(buffer.read()) validate_graph(data) def to_indexable_content(self, buffer: BufferedIOBase): content_json = json.load(buffer) content = io.StringIO() string_list = set(extract_text(content_json)) content.write(' '.join(list(string_list))) return typing.cast(BufferedIOBase, io.BytesIO(content.getvalue().encode(BYTE_ENCODING))) def extract_metadata_from_content(self, file: Files, buffer: BufferedIOBase): if not file.description: data = json.loads(buffer.read()) description = data['graph']['description'] file.description = description class EnrichmentTableTypeProvider(BaseFileTypeProvider): MIME_TYPE = FILE_MIME_TYPE_ENRICHMENT_TABLE SHORTHAND = 'enrichment-table' mime_types = (MIME_TYPE,) def detect_mime_type(self, buffer: BufferedIOBase) -> List[typing.Tuple[float, str]]: try: # If the data validates, I guess it's an enrichment table? # The enrichment table schema is very simple though so this is very simplistic # and will cause problems in the future self.validate_content(buffer) return [(0, self.MIME_TYPE)] except ValueError: return [] finally: buffer.seek(0) def can_create(self) -> bool: return True def validate_content(self, buffer: BufferedIOBase): data = json.loads(buffer.read()) validate_enrichment_table(data) def to_indexable_content(self, buffer: BufferedIOBase): data = json.load(buffer) content = io.StringIO() genes = data['data']['genes'].split(',') organism = data['data']['organism'] content.write(', '.join(genes)) content.write('\r\n\r\n') content.write(organism) content.write('\r\n\r\n') if 'result' in data: genes = data['result']['genes'] for gene in genes: content.write('\u2022 ') content.write(gene['imported']) if 'matched' in gene: content.write(': ') content.write(gene['matched']) if 'fullName' in gene: content.write(' (') content.write(gene['fullName']) content.write(')') if 'domains' in gene: for gene_domain in gene['domains'].values(): for value in gene_domain.values(): if len(value['text']): content.write('\n\u2192 ') content.write(value['text']) content.write('.\r\n\r\n') return typing.cast(BufferedIOBase, io.BytesIO(content.getvalue().encode(BYTE_ENCODING))) def should_highlight_content_text_matches(self) -> bool: return True def handle_content_update(self, file: Files): file.enrichment_annotations = None def get_content_offsets(file): """ Gets offset box of the map, allowing to translate the coordinates to the pixels of the pdf generated by graphviz. *params* file: A Files object of map that is supposed to be analyzed Return: two pairs of coordinates: x & y. First denotes the offset to the pdf origin (in the units used by front-end renderer) Second denotes the offset created by the map name node (from which the margin is calculated) in pixels. """ x_values, y_values = [], [] zip_file = zipfile.ZipFile(io.BytesIO(file.content.raw_file)) try: json_graph = json.loads(zip_file.read('graph.json')) except KeyError: raise ValidationError for node in json_graph['nodes']: x_values.append(node['data']['x']) y_values.append(-node['data']['y']) if node['label'] in ICON_NODES: # If the node is icon node, we have to consider that the label is lower that pos # indicates due to the addition of the icon node y_values[-1] -= BASE_ICON_DISTANCE + math.ceil(len(node['display_name']) / min(15 + len( node['display_name']) // 3, MAX_LINE_WIDTH)) \ * IMAGE_HEIGHT_INCREMENT + FONT_SIZE_MULTIPLIER * \ (node.get('style', {}).get('fontSizeScale', 1.0) - 1.0) x_offset = max(len(file.filename), 0) * NAME_LABEL_FONT_AVERAGE_WIDTH / 2.0 - \ MAP_ICON_OFFSET + HORIZONTAL_TEXT_PADDING * NAME_LABEL_PADDING_MULTIPLIER y_offset = VERTICAL_NODE_PADDING return (min(x_values), min(y_values)), (x_offset, y_offset)
[ "PIL.Image.new", "jsonlines.Reader", "urllib.parse.urljoin", "PyPDF4.PdfFileWriter", "neo4japp.services.file_types.exports.ExportFormatError", "bioc.biocjson.toJSON", "os.path.join", "svg_stack.Document", "os.path.sep.join", "PyPDF4.PdfFileReader", "tempfile.TemporaryDirectory", "svg_stack.VBoxLayout", "marshmallow.ValidationError", "bioc.biocjson.fromJSON", "requests.get", "lxml.etree.parse", "neo4japp.utils.logger.EventLog", "neo4japp.schemas.formats.enrichment_tables.validate_enrichment_table", "io.BytesIO", "io.StringIO", "re.split", "graphviz.escape", "textwrap.TextWrapper", "re.compile", "json.load", "neo4japp.constants.ANNOTATION_STYLES_DICT.get", "neo4japp.services.file_types.exports.FileExport", "neo4japp.schemas.formats.graph.validate_graph", "neo4japp.utils.string.extract_text", "neo4japp.schemas.formats.drawing_tool.validate_map", "pdfminer.high_level.extract_text", "os.path.splitext", "jsonlines.Writer", "bioc.load" ]
[((3595, 3782), 're.compile', 're.compile', (["b'(doi[\\\\W]*)?((?:https?:\\\\/\\\\/)(?:[-A-z0-9]*\\\\.)*doi\\\\.org\\\\/)?(10\\\\.[0-9]{3,}(?:[\\\\.][0-9]+)*\\\\/)([-A-z0-9]*)([^ \\\\n\\\\f#]*)([^\\\\n\\\\f#]{0,20})'"], {'flags': 're.IGNORECASE'}), "(\n b'(doi[\\\\W]*)?((?:https?:\\\\/\\\\/)(?:[-A-z0-9]*\\\\.)*doi\\\\.org\\\\/)?(10\\\\.[0-9]{3,}(?:[\\\\.][0-9]+)*\\\\/)([-A-z0-9]*)([^ \\\\n\\\\f#]*)([^\\\\n\\\\f#]{0,20})'\n , flags=re.IGNORECASE)\n", (3605, 3782), False, 'import re\n'), ((4191, 4218), 're.compile', 're.compile', (['"""https?:\\\\/\\\\/"""'], {}), "('https?:\\\\/\\\\/')\n", (4201, 4218), False, 'import re\n'), ((4242, 4269), 're.compile', 're.compile', (['"""([^-A-z0-9]+)"""'], {}), "('([^-A-z0-9]+)')\n", (4252, 4269), False, 'import re\n'), ((4294, 4345), 're.compile', 're.compile', (['"""([a-z]+|[A-Z]+|[0-9]+|-+|[^-A-z0-9]+)"""'], {}), "('([a-z]+|[A-Z]+|[0-9]+|-+|[^-A-z0-9]+)')\n", (4304, 4345), False, 'import re\n'), ((4375, 4394), 're.compile', 're.compile', (["b'\\\\\\\\'"], {}), "(b'\\\\\\\\')\n", (4385, 4394), False, 'import re\n'), ((4521, 4561), 're.compile', 're.compile', (['"""^ */projects/.+/sankey/.+$"""'], {}), "('^ */projects/.+/sankey/.+$')\n", (4531, 4561), False, 'import re\n'), ((4573, 4600), 're.compile', 're.compile', (['"""^ *mailto:.+$"""'], {}), "('^ *mailto:.+$')\n", (4583, 4600), False, 'import re\n'), ((4624, 4674), 're.compile', 're.compile', (['"""^ */projects/.+/enrichment-table/.+$"""'], {}), "('^ */projects/.+/enrichment-table/.+$')\n", (4634, 4674), False, 'import re\n'), ((4690, 4729), 're.compile', 're.compile', (['"""^ */projects/.+/files/.+$"""'], {}), "('^ */projects/.+/files/.+$')\n", (4700, 4729), False, 'import re\n'), ((4741, 4779), 're.compile', 're.compile', (['"""^ */projects/.+/bioc/.+$"""'], {}), "('^ */projects/.+/bioc/.+$')\n", (4751, 4779), False, 'import re\n'), ((4795, 4822), 're.compile', 're.compile', (['"""^ */files/.+$"""'], {}), "('^ */files/.+$')\n", (4805, 4822), False, 'import re\n'), ((4993, 5031), 're.compile', 're.compile', (['"""^ */projects/(?!.*/.+).*"""'], {}), "('^ */projects/(?!.*/.+).*')\n", (5003, 5031), False, 'import re\n'), ((11704, 11891), 're.compile', 're.compile', (["b'(doi[\\\\W]*)?((?:https?:\\\\/\\\\/)(?:[-A-z0-9]*\\\\.)*doi\\\\.org\\\\/)?(10\\\\.[0-9]{3,}(?:[\\\\.][0-9]+)*\\\\/)([-A-z0-9]*)([^ \\\\n\\\\f#]*)([^\\\\n\\\\f#]{0,20})'"], {'flags': 're.IGNORECASE'}), "(\n b'(doi[\\\\W]*)?((?:https?:\\\\/\\\\/)(?:[-A-z0-9]*\\\\.)*doi\\\\.org\\\\/)?(10\\\\.[0-9]{3,}(?:[\\\\.][0-9]+)*\\\\/)([-A-z0-9]*)([^ \\\\n\\\\f#]*)([^\\\\n\\\\f#]{0,20})'\n , flags=re.IGNORECASE)\n", (11714, 11891), False, 'import re\n'), ((12364, 12391), 're.compile', 're.compile', (['"""https?:\\\\/\\\\/"""'], {}), "('https?:\\\\/\\\\/')\n", (12374, 12391), False, 'import re\n'), ((12419, 12446), 're.compile', 're.compile', (['"""([^-A-z0-9]+)"""'], {}), "('([^-A-z0-9]+)')\n", (12429, 12446), False, 'import re\n'), ((12475, 12526), 're.compile', 're.compile', (['"""([a-z]+|[A-Z]+|[0-9]+|-+|[^-A-z0-9]+)"""'], {}), "('([a-z]+|[A-Z]+|[0-9]+|-+|[^-A-z0-9]+)')\n", (12485, 12526), False, 'import re\n'), ((12560, 12579), 're.compile', 're.compile', (["b'\\\\\\\\'"], {}), "(b'\\\\\\\\')\n", (12570, 12579), False, 'import re\n'), ((26232, 26253), 'graphviz.escape', 'escape', (["node['label']"], {}), "(node['label'])\n", (26238, 26253), False, 'from graphviz import escape\n'), ((27398, 27444), 'neo4japp.constants.ANNOTATION_STYLES_DICT.get', 'ANNOTATION_STYLES_DICT.get', (['"""custom_icons"""', '{}'], {}), "('custom_icons', {})\n", (27424, 27444), False, 'from neo4japp.constants import ANNOTATION_STYLES_DICT, ARROW_STYLE_DICT, BORDER_STYLES_DICT, DEFAULT_BORDER_COLOR, DEFAULT_FONT_SIZE, DEFAULT_NODE_WIDTH, DEFAULT_NODE_HEIGHT, MAX_LINE_WIDTH, BASE_ICON_DISTANCE, IMAGE_HEIGHT_INCREMENT, FONT_SIZE_MULTIPLIER, SCALING_FACTOR, FILE_MIME_TYPE_DIRECTORY, FILE_MIME_TYPE_PDF, FILE_MIME_TYPE_BIOC, FILE_MIME_TYPE_MAP, FILE_MIME_TYPE_GRAPH, FILE_MIME_TYPE_ENRICHMENT_TABLE, ICON_SIZE, FRONTEND_URL, BYTE_ENCODING, DEFAULT_DPI, POINT_TO_PIXEL, HORIZONTAL_TEXT_PADDING, LABEL_OFFSET, MAP_ICON_OFFSET, PDF_MARGIN, NAME_NODE_OFFSET, TRANSPARENT_PIXEL, VERTICAL_NODE_PADDING, NAME_LABEL_FONT_AVERAGE_WIDTH, NAME_LABEL_PADDING_MULTIPLIER, FILENAME_LABEL_MARGIN, FILENAME_LABEL_FONT_SIZE, IMAGES_RE, ASSETS_PATH, ICON_NODES, RELATION_NODES, DETAIL_TEXT_LIMIT, DEFAULT_IMAGE_NODE_WIDTH, DEFAULT_IMAGE_NODE_HEIGHT, LogEventType, IMAGE_BORDER_SCALE, WATERMARK_DISTANCE, WATERMARK_WIDTH, WATERMARK_ICON_SIZE\n'), ((27907, 27948), 'os.path.join', 'os.path.join', (['ASSETS_PATH', 'f"""{label}.png"""'], {}), "(ASSETS_PATH, f'{label}.png')\n", (27919, 27948), False, 'import os\n'), ((10581, 10597), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (10591, 10597), False, 'import io\n'), ((10613, 10676), 'pdfminer.high_level.extract_text', 'high_level.extract_text', (['fp'], {'page_numbers': '[0, 1]', 'caching': '(False)'}), '(fp, page_numbers=[0, 1], caching=False)\n', (10636, 10676), False, 'from pdfminer import high_level\n'), ((14074, 14091), 'bioc.load', 'bioc.load', (['buffer'], {}), '(buffer)\n', (14083, 14091), False, 'import bioc\n'), ((14116, 14128), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (14126, 14128), False, 'import io\n'), ((14368, 14387), 'lxml.etree.parse', 'etree.parse', (['buffer'], {}), '(buffer)\n', (14379, 14387), False, 'from lxml import etree\n'), ((22272, 22299), 're.split', 're.split', (['"""\n"""', 'detail_text'], {}), "('\\n', detail_text)\n", (22280, 22299), False, 'import re\n'), ((27179, 27252), 'neo4japp.constants.ANNOTATION_STYLES_DICT.get', 'ANNOTATION_STYLES_DICT.get', (["node['label']", "{'defaultimagecolor': 'black'}"], {}), "(node['label'], {'defaultimagecolor': 'black'})\n", (27205, 27252), False, 'from neo4japp.constants import ANNOTATION_STYLES_DICT, ARROW_STYLE_DICT, BORDER_STYLES_DICT, DEFAULT_BORDER_COLOR, DEFAULT_FONT_SIZE, DEFAULT_NODE_WIDTH, DEFAULT_NODE_HEIGHT, MAX_LINE_WIDTH, BASE_ICON_DISTANCE, IMAGE_HEIGHT_INCREMENT, FONT_SIZE_MULTIPLIER, SCALING_FACTOR, FILE_MIME_TYPE_DIRECTORY, FILE_MIME_TYPE_PDF, FILE_MIME_TYPE_BIOC, FILE_MIME_TYPE_MAP, FILE_MIME_TYPE_GRAPH, FILE_MIME_TYPE_ENRICHMENT_TABLE, ICON_SIZE, FRONTEND_URL, BYTE_ENCODING, DEFAULT_DPI, POINT_TO_PIXEL, HORIZONTAL_TEXT_PADDING, LABEL_OFFSET, MAP_ICON_OFFSET, PDF_MARGIN, NAME_NODE_OFFSET, TRANSPARENT_PIXEL, VERTICAL_NODE_PADDING, NAME_LABEL_FONT_AVERAGE_WIDTH, NAME_LABEL_PADDING_MULTIPLIER, FILENAME_LABEL_MARGIN, FILENAME_LABEL_FONT_SIZE, IMAGES_RE, ASSETS_PATH, ICON_NODES, RELATION_NODES, DETAIL_TEXT_LIMIT, DEFAULT_IMAGE_NODE_WIDTH, DEFAULT_IMAGE_NODE_HEIGHT, LogEventType, IMAGE_BORDER_SCALE, WATERMARK_DISTANCE, WATERMARK_WIDTH, WATERMARK_ICON_SIZE\n'), ((27607, 27653), 'neo4japp.constants.ANNOTATION_STYLES_DICT.get', 'ANNOTATION_STYLES_DICT.get', (['"""custom_icons"""', '{}'], {}), "('custom_icons', {})\n", (27633, 27653), False, 'from neo4japp.constants import ANNOTATION_STYLES_DICT, ARROW_STYLE_DICT, BORDER_STYLES_DICT, DEFAULT_BORDER_COLOR, DEFAULT_FONT_SIZE, DEFAULT_NODE_WIDTH, DEFAULT_NODE_HEIGHT, MAX_LINE_WIDTH, BASE_ICON_DISTANCE, IMAGE_HEIGHT_INCREMENT, FONT_SIZE_MULTIPLIER, SCALING_FACTOR, FILE_MIME_TYPE_DIRECTORY, FILE_MIME_TYPE_PDF, FILE_MIME_TYPE_BIOC, FILE_MIME_TYPE_MAP, FILE_MIME_TYPE_GRAPH, FILE_MIME_TYPE_ENRICHMENT_TABLE, ICON_SIZE, FRONTEND_URL, BYTE_ENCODING, DEFAULT_DPI, POINT_TO_PIXEL, HORIZONTAL_TEXT_PADDING, LABEL_OFFSET, MAP_ICON_OFFSET, PDF_MARGIN, NAME_NODE_OFFSET, TRANSPARENT_PIXEL, VERTICAL_NODE_PADDING, NAME_LABEL_FONT_AVERAGE_WIDTH, NAME_LABEL_PADDING_MULTIPLIER, FILENAME_LABEL_MARGIN, FILENAME_LABEL_FONT_SIZE, IMAGES_RE, ASSETS_PATH, ICON_NODES, RELATION_NODES, DETAIL_TEXT_LIMIT, DEFAULT_IMAGE_NODE_WIDTH, DEFAULT_IMAGE_NODE_HEIGHT, LogEventType, IMAGE_BORDER_SCALE, WATERMARK_DISTANCE, WATERMARK_WIDTH, WATERMARK_ICON_SIZE\n'), ((28964, 29025), 'neo4japp.constants.ANNOTATION_STYLES_DICT.get', 'ANNOTATION_STYLES_DICT.get', (["node['label']", "{'color': 'black'}"], {}), "(node['label'], {'color': 'black'})\n", (28990, 29025), False, 'from neo4japp.constants import ANNOTATION_STYLES_DICT, ARROW_STYLE_DICT, BORDER_STYLES_DICT, DEFAULT_BORDER_COLOR, DEFAULT_FONT_SIZE, DEFAULT_NODE_WIDTH, DEFAULT_NODE_HEIGHT, MAX_LINE_WIDTH, BASE_ICON_DISTANCE, IMAGE_HEIGHT_INCREMENT, FONT_SIZE_MULTIPLIER, SCALING_FACTOR, FILE_MIME_TYPE_DIRECTORY, FILE_MIME_TYPE_PDF, FILE_MIME_TYPE_BIOC, FILE_MIME_TYPE_MAP, FILE_MIME_TYPE_GRAPH, FILE_MIME_TYPE_ENRICHMENT_TABLE, ICON_SIZE, FRONTEND_URL, BYTE_ENCODING, DEFAULT_DPI, POINT_TO_PIXEL, HORIZONTAL_TEXT_PADDING, LABEL_OFFSET, MAP_ICON_OFFSET, PDF_MARGIN, NAME_NODE_OFFSET, TRANSPARENT_PIXEL, VERTICAL_NODE_PADDING, NAME_LABEL_FONT_AVERAGE_WIDTH, NAME_LABEL_PADDING_MULTIPLIER, FILENAME_LABEL_MARGIN, FILENAME_LABEL_FONT_SIZE, IMAGES_RE, ASSETS_PATH, ICON_NODES, RELATION_NODES, DETAIL_TEXT_LIMIT, DEFAULT_IMAGE_NODE_WIDTH, DEFAULT_IMAGE_NODE_HEIGHT, LogEventType, IMAGE_BORDER_SCALE, WATERMARK_DISTANCE, WATERMARK_WIDTH, WATERMARK_ICON_SIZE\n'), ((37394, 37407), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (37405, 37407), False, 'import io\n'), ((38898, 38927), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (38925, 38927), False, 'import tempfile\n'), ((44706, 44809), 'neo4japp.services.file_types.exports.FileExport', 'FileExport', ([], {'content': 'content', 'mime_type': 'extension_mime_types[ext]', 'filename': 'f"""{file.filename}{ext}"""'}), "(content=content, mime_type=extension_mime_types[ext], filename=\n f'{file.filename}{ext}')\n", (44716, 44809), False, 'from neo4japp.services.file_types.exports import FileExport, ExportFormatError\n'), ((46073, 46180), 'neo4japp.services.file_types.exports.FileExport', 'FileExport', ([], {'content': 'content', 'mime_type': 'extension_mime_types[ext]', 'filename': 'f"""{files[0].filename}{ext}"""'}), "(content=content, mime_type=extension_mime_types[ext], filename=\n f'{files[0].filename}{ext}')\n", (46083, 46180), False, 'from neo4japp.services.file_types.exports import FileExport, ExportFormatError\n'), ((47392, 47404), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (47402, 47404), False, 'import io\n'), ((47942, 48005), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(max_width, total_height)', 'TRANSPARENT_PIXEL'], {}), "('RGBA', (max_width, total_height), TRANSPARENT_PIXEL)\n", (47951, 48005), False, 'from PIL import Image\n'), ((48567, 48579), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (48577, 48579), False, 'import io\n'), ((48597, 48612), 'PyPDF4.PdfFileWriter', 'PdfFileWriter', ([], {}), '()\n', (48610, 48612), False, 'from PyPDF4 import PdfFileWriter, PdfFileReader\n'), ((49897, 49917), 'svg_stack.Document', 'svg_stack.Document', ([], {}), '()\n', (49915, 49917), False, 'import svg_stack\n'), ((49936, 49958), 'svg_stack.VBoxLayout', 'svg_stack.VBoxLayout', ([], {}), '()\n', (49956, 49958), False, 'import svg_stack\n'), ((50066, 50079), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (50077, 50079), False, 'import io\n'), ((51282, 51302), 'neo4japp.schemas.formats.graph.validate_graph', 'validate_graph', (['data'], {}), '(data)\n', (51296, 51302), False, 'from neo4japp.schemas.formats.graph import validate_graph\n'), ((51387, 51404), 'json.load', 'json.load', (['buffer'], {}), '(buffer)\n', (51396, 51404), False, 'import json\n'), ((51423, 51436), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (51434, 51436), False, 'import io\n'), ((52726, 52757), 'neo4japp.schemas.formats.enrichment_tables.validate_enrichment_table', 'validate_enrichment_table', (['data'], {}), '(data)\n', (52751, 52757), False, 'from neo4japp.schemas.formats.enrichment_tables import validate_enrichment_table\n'), ((52834, 52851), 'json.load', 'json.load', (['buffer'], {}), '(buffer)\n', (52843, 52851), False, 'import json\n'), ((52870, 52883), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (52881, 52883), False, 'import io\n'), ((54842, 54875), 'io.BytesIO', 'io.BytesIO', (['file.content.raw_file'], {}), '(file.content.raw_file)\n', (54852, 54875), False, 'import io\n'), ((13652, 13678), 'jsonlines.Reader', 'BioCJsonIterReader', (['buffer'], {}), '(buffer)\n', (13670, 13678), True, 'from jsonlines import Reader as BioCJsonIterReader, Writer as BioCJsonIterWriter\n'), ((14142, 14168), 'jsonlines.Writer', 'BioCJsonIterWriter', (['buffer'], {}), '(buffer)\n', (14160, 14168), True, 'from jsonlines import Reader as BioCJsonIterReader, Writer as BioCJsonIterWriter\n'), ((16055, 16094), 'os.path.join', 'os.path.join', (['ASSETS_PATH', 'f"""{key}.png"""'], {}), "(ASSETS_PATH, f'{key}.png')\n", (16067, 16094), False, 'import os\n'), ((22849, 22912), 'neo4japp.constants.ANNOTATION_STYLES_DICT.get', 'ANNOTATION_STYLES_DICT.get', (["node['label']", "{'bgcolor': 'black'}"], {}), "(node['label'], {'bgcolor': 'black'})\n", (22875, 22912), False, 'from neo4japp.constants import ANNOTATION_STYLES_DICT, ARROW_STYLE_DICT, BORDER_STYLES_DICT, DEFAULT_BORDER_COLOR, DEFAULT_FONT_SIZE, DEFAULT_NODE_WIDTH, DEFAULT_NODE_HEIGHT, MAX_LINE_WIDTH, BASE_ICON_DISTANCE, IMAGE_HEIGHT_INCREMENT, FONT_SIZE_MULTIPLIER, SCALING_FACTOR, FILE_MIME_TYPE_DIRECTORY, FILE_MIME_TYPE_PDF, FILE_MIME_TYPE_BIOC, FILE_MIME_TYPE_MAP, FILE_MIME_TYPE_GRAPH, FILE_MIME_TYPE_ENRICHMENT_TABLE, ICON_SIZE, FRONTEND_URL, BYTE_ENCODING, DEFAULT_DPI, POINT_TO_PIXEL, HORIZONTAL_TEXT_PADDING, LABEL_OFFSET, MAP_ICON_OFFSET, PDF_MARGIN, NAME_NODE_OFFSET, TRANSPARENT_PIXEL, VERTICAL_NODE_PADDING, NAME_LABEL_FONT_AVERAGE_WIDTH, NAME_LABEL_PADDING_MULTIPLIER, FILENAME_LABEL_MARGIN, FILENAME_LABEL_FONT_SIZE, IMAGES_RE, ASSETS_PATH, ICON_NODES, RELATION_NODES, DETAIL_TEXT_LIMIT, DEFAULT_IMAGE_NODE_WIDTH, DEFAULT_IMAGE_NODE_HEIGHT, LogEventType, IMAGE_BORDER_SCALE, WATERMARK_DISTANCE, WATERMARK_WIDTH, WATERMARK_ICON_SIZE\n'), ((30830, 30865), 'urllib.parse.urljoin', 'urljoin', (['FRONTEND_URL', 'current_link'], {}), '(FRONTEND_URL, current_link)\n', (30837, 30865), False, 'from urllib.parse import urljoin\n'), ((31078, 31143), 'neo4japp.constants.ANNOTATION_STYLES_DICT.get', 'ANNOTATION_STYLES_DICT.get', (['"""map"""', "{'defaultimagecolor': 'black'}"], {}), "('map', {'defaultimagecolor': 'black'})\n", (31104, 31143), False, 'from neo4japp.constants import ANNOTATION_STYLES_DICT, ARROW_STYLE_DICT, BORDER_STYLES_DICT, DEFAULT_BORDER_COLOR, DEFAULT_FONT_SIZE, DEFAULT_NODE_WIDTH, DEFAULT_NODE_HEIGHT, MAX_LINE_WIDTH, BASE_ICON_DISTANCE, IMAGE_HEIGHT_INCREMENT, FONT_SIZE_MULTIPLIER, SCALING_FACTOR, FILE_MIME_TYPE_DIRECTORY, FILE_MIME_TYPE_PDF, FILE_MIME_TYPE_BIOC, FILE_MIME_TYPE_MAP, FILE_MIME_TYPE_GRAPH, FILE_MIME_TYPE_ENRICHMENT_TABLE, ICON_SIZE, FRONTEND_URL, BYTE_ENCODING, DEFAULT_DPI, POINT_TO_PIXEL, HORIZONTAL_TEXT_PADDING, LABEL_OFFSET, MAP_ICON_OFFSET, PDF_MARGIN, NAME_NODE_OFFSET, TRANSPARENT_PIXEL, VERTICAL_NODE_PADDING, NAME_LABEL_FONT_AVERAGE_WIDTH, NAME_LABEL_PADDING_MULTIPLIER, FILENAME_LABEL_MARGIN, FILENAME_LABEL_FONT_SIZE, IMAGES_RE, ASSETS_PATH, ICON_NODES, RELATION_NODES, DETAIL_TEXT_LIMIT, DEFAULT_IMAGE_NODE_WIDTH, DEFAULT_IMAGE_NODE_HEIGHT, LogEventType, IMAGE_BORDER_SCALE, WATERMARK_DISTANCE, WATERMARK_WIDTH, WATERMARK_ICON_SIZE\n'), ((32595, 32616), 'graphviz.escape', 'escape', (["edge['label']"], {}), "(edge['label'])\n", (32601, 32616), False, 'from graphviz import escape\n'), ((38781, 38800), 'neo4japp.services.file_types.exports.ExportFormatError', 'ExportFormatError', ([], {}), '()\n', (38798, 38800), False, 'from neo4japp.services.file_types.exports import FileExport, ExportFormatError\n'), ((40080, 40101), 'graphviz.escape', 'escape', (['file.filename'], {}), '(file.filename)\n', (40086, 40101), False, 'from graphviz import escape\n'), ((48795, 48832), 'PyPDF4.PdfFileReader', 'PdfFileReader', (['out_file'], {'strict': '(False)'}), '(out_file, strict=False)\n', (48808, 48832), False, 'from PyPDF4 import PdfFileWriter, PdfFileReader\n'), ((51463, 51489), 'neo4japp.utils.string.extract_text', 'extract_text', (['content_json'], {}), '(content_json)\n', (51475, 51489), False, 'from neo4japp.utils.string import extract_text\n'), ((2839, 2998), 'requests.get', 'requests.get', (['doi'], {'headers': "{'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'\n }"}), "(doi, headers={'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'\n })\n", (2851, 2998), False, 'import requests\n'), ((13747, 13785), 'bioc.biocjson.fromJSON', 'biocFromJSON', (['obj'], {'level': 'bioc.DOCUMENT'}), '(obj, level=bioc.DOCUMENT)\n', (13759, 13785), True, 'from bioc.biocjson import BioCJsonIterWriter, fromJSON as biocFromJSON, toJSON as biocToJSON\n'), ((36823, 36847), 'neo4japp.schemas.formats.drawing_tool.validate_map', 'validate_map', (['json_graph'], {}), '(json_graph)\n', (36835, 36847), False, 'from neo4japp.schemas.formats.drawing_tool import validate_map\n'), ((38981, 39014), 'io.BytesIO', 'io.BytesIO', (['file.content.raw_file'], {}), '(file.content.raw_file)\n', (38991, 39014), False, 'import io\n'), ((39378, 39457), 'marshmallow.ValidationError', 'ValidationError', (['"""Cannot retrieve contents of the file - it might be corrupted"""'], {}), "('Cannot retrieve contents of the file - it might be corrupted')\n", (39393, 39457), False, 'from marshmallow import ValidationError\n'), ((39750, 39829), 'marshmallow.ValidationError', 'ValidationError', (['"""Cannot retrieve contents of the file - it might be corrupted"""'], {}), "('Cannot retrieve contents of the file - it might be corrupted')\n", (39765, 39829), False, 'from marshmallow import ValidationError\n'), ((46852, 46939), 'marshmallow.ValidationError', 'ValidationError', (['"""Unknown or invalid export format for the requested file."""', 'format'], {}), "('Unknown or invalid export format for the requested file.',\n format)\n", (46867, 46939), False, 'from marshmallow import ValidationError\n'), ((10893, 11052), 'requests.get', 'requests.get', (['doi'], {'headers': "{'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'\n }"}), "(doi, headers={'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'\n })\n", (10905, 11052), False, 'import requests\n'), ((13417, 13448), 'os.path.splitext', 'os.path.splitext', (['file.filename'], {}), '(file.filename)\n', (13433, 13448), False, 'import os\n'), ((14254, 14269), 'bioc.biocjson.toJSON', 'biocToJSON', (['doc'], {}), '(doc)\n', (14264, 14269), True, 'from bioc.biocjson import BioCJsonIterWriter, fromJSON as biocFromJSON, toJSON as biocToJSON\n'), ((17936, 17997), 'neo4japp.constants.ANNOTATION_STYLES_DICT.get', 'ANNOTATION_STYLES_DICT.get', (["node['label']", "{'color': 'black'}"], {}), "(node['label'], {'color': 'black'})\n", (17962, 17997), False, 'from neo4japp.constants import ANNOTATION_STYLES_DICT, ARROW_STYLE_DICT, BORDER_STYLES_DICT, DEFAULT_BORDER_COLOR, DEFAULT_FONT_SIZE, DEFAULT_NODE_WIDTH, DEFAULT_NODE_HEIGHT, MAX_LINE_WIDTH, BASE_ICON_DISTANCE, IMAGE_HEIGHT_INCREMENT, FONT_SIZE_MULTIPLIER, SCALING_FACTOR, FILE_MIME_TYPE_DIRECTORY, FILE_MIME_TYPE_PDF, FILE_MIME_TYPE_BIOC, FILE_MIME_TYPE_MAP, FILE_MIME_TYPE_GRAPH, FILE_MIME_TYPE_ENRICHMENT_TABLE, ICON_SIZE, FRONTEND_URL, BYTE_ENCODING, DEFAULT_DPI, POINT_TO_PIXEL, HORIZONTAL_TEXT_PADDING, LABEL_OFFSET, MAP_ICON_OFFSET, PDF_MARGIN, NAME_NODE_OFFSET, TRANSPARENT_PIXEL, VERTICAL_NODE_PADDING, NAME_LABEL_FONT_AVERAGE_WIDTH, NAME_LABEL_PADDING_MULTIPLIER, FILENAME_LABEL_MARGIN, FILENAME_LABEL_FONT_SIZE, IMAGES_RE, ASSETS_PATH, ICON_NODES, RELATION_NODES, DETAIL_TEXT_LIMIT, DEFAULT_IMAGE_NODE_WIDTH, DEFAULT_IMAGE_NODE_HEIGHT, LogEventType, IMAGE_BORDER_SCALE, WATERMARK_DISTANCE, WATERMARK_WIDTH, WATERMARK_ICON_SIZE\n'), ((36528, 36550), 'io.BytesIO', 'io.BytesIO', (['zipped_map'], {}), '(zipped_map)\n', (36538, 36550), False, 'import io\n'), ((41479, 41522), 'os.path.sep.join', 'os.path.sep.join', (['[folder.name, image_name]'], {}), '([folder.name, image_name])\n', (41495, 41522), False, 'import os\n'), ((45738, 45835), 'marshmallow.ValidationError', 'ValidationError', (['"""Unknown or invalid export format for the requested file."""', 'requested_format'], {}), "('Unknown or invalid export format for the requested file.',\n requested_format)\n", (45753, 45835), False, 'from marshmallow import ValidationError\n'), ((22531, 22540), 'graphviz.escape', 'escape', (['x'], {}), '(x)\n', (22537, 22540), False, 'from graphviz import escape\n'), ((42089, 42164), 'marshmallow.ValidationError', 'ValidationError', (['f"""Cannot retrieve image: {name} - file might be corrupted"""'], {}), "(f'Cannot retrieve image: {name} - file might be corrupted')\n", (42104, 42164), False, 'from marshmallow import ValidationError\n'), ((22418, 22460), 'textwrap.TextWrapper', 'textwrap.TextWrapper', ([], {'width': 'MAX_LINE_WIDTH'}), '(width=MAX_LINE_WIDTH)\n', (22438, 22460), False, 'import textwrap\n'), ((39256, 39314), 'neo4japp.utils.logger.EventLog', 'EventLog', ([], {'event_type': 'LogEventType.MAP_EXPORT_FAILURE.value'}), '(event_type=LogEventType.MAP_EXPORT_FAILURE.value)\n', (39264, 39314), False, 'from neo4japp.utils.logger import EventLog\n'), ((39628, 39686), 'neo4japp.utils.logger.EventLog', 'EventLog', ([], {'event_type': 'LogEventType.MAP_EXPORT_FAILURE.value'}), '(event_type=LogEventType.MAP_EXPORT_FAILURE.value)\n', (39636, 39686), False, 'from neo4japp.utils.logger import EventLog\n'), ((41943, 42001), 'neo4japp.utils.logger.EventLog', 'EventLog', ([], {'event_type': 'LogEventType.MAP_EXPORT_FAILURE.value'}), '(event_type=LogEventType.MAP_EXPORT_FAILURE.value)\n', (41951, 42001), False, 'from neo4japp.utils.logger import EventLog\n')]
from puzzle8 import Puzzle from time import sleep def main(): puzzle8 = Puzzle([1,2,3,4,5,6,7,8,0]) print('Seu puzzle embaralhado') puzzle8.print_matrix(puzzle8.get_initial_state()) sleep(5) puzzle8.clear_terminal() print('Resolvendo seu puzzle') puzzle8.print_matrix(puzzle8.get_initial_state()) sleep(5) puzzle8.clear_terminal() print('Resolvido em {} tentativas via busca em largura'.format(puzzle8.solve_puzzle_bfs())) best_way, attempts = puzzle8.solve_puzzle_manhattan() print('Resolvido em {} tentativas via distancia Manhattan\nA solucao minima necessita de {} tentativas e se encontra acima'.format(attempts, best_way)) if __name__ == "__main__": main()
[ "puzzle8.Puzzle", "time.sleep" ]
[((74, 109), 'puzzle8.Puzzle', 'Puzzle', (['[1, 2, 3, 4, 5, 6, 7, 8, 0]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 0])\n', (80, 109), False, 'from puzzle8 import Puzzle\n'), ((190, 198), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (195, 198), False, 'from time import sleep\n'), ((311, 319), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (316, 319), False, 'from time import sleep\n')]
from functools import wraps import time def retry(func, max_retry=5, backoff=300): @wraps(func) def wrapper(*args, **kwargs): retExc = None for i in range(max_retry): try: return func(*args, **kwargs) except Exception as e: retExc = e time.sleep(backoff) raise retExc return wrapper
[ "functools.wraps", "time.sleep" ]
[((90, 101), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (95, 101), False, 'from functools import wraps\n'), ((333, 352), 'time.sleep', 'time.sleep', (['backoff'], {}), '(backoff)\n', (343, 352), False, 'import time\n')]
from specter.test.test_psf import TestPixPSF, TestSpotPSF from specter.test.test_specio import TestSpecIO from specter.test.test_throughput import TestThroughput from specter.test.test_util import TestUtil from specter.test.test_extract import TestExtract from specter.test.test_pixspline import TestPixSpline from specter.test.test_binscripts import TestBinScripts def test(): """ Run a suite of specter tests """ import unittest load = unittest.defaultTestLoader.loadTestsFromTestCase tests = list() tests.append(load(TestPixPSF)) tests.append(load(TestSpotPSF)) tests.append(load(TestSpecIO)) tests.append(load(TestThroughput)) tests.append(load(TestUtil)) tests.append(load(TestExtract)) tests.append(load(TestPixSpline)) tests.append(load(TestBinScripts)) suite = unittest.TestSuite(tests) unittest.TextTestRunner(verbosity=2).run(suite)
[ "unittest.TextTestRunner", "unittest.TestSuite" ]
[((832, 857), 'unittest.TestSuite', 'unittest.TestSuite', (['tests'], {}), '(tests)\n', (850, 857), False, 'import unittest\n'), ((862, 898), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (885, 898), False, 'import unittest\n')]
import pyfolio as pyf import pandas as pd import yfinance as yf #tickers to analyze tickers = ['AAPL','MSFT','AMZN','WMT'] #create a dataframe placeholder data = pd.DataFrame(columns=tickers) #get data for ticker in tickers: data[ticker] = yf.download(ticker,period='5y',)['Adj Close'] #print(data.head()) # compute daily mean returns. # The mean return is the daily portfolio returns with the above four stocks. data = data.pct_change().dropna().mean(axis=1) print(data.head()) pyf.create_full_tear_sheet(data)
[ "pandas.DataFrame", "pyfolio.create_full_tear_sheet", "yfinance.download" ]
[((164, 193), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'tickers'}), '(columns=tickers)\n', (176, 193), True, 'import pandas as pd\n'), ((489, 521), 'pyfolio.create_full_tear_sheet', 'pyf.create_full_tear_sheet', (['data'], {}), '(data)\n', (515, 521), True, 'import pyfolio as pyf\n'), ((247, 279), 'yfinance.download', 'yf.download', (['ticker'], {'period': '"""5y"""'}), "(ticker, period='5y')\n", (258, 279), True, 'import yfinance as yf\n')]
from timeit import default_timer as timer from opensfm import io from opensfm import tracking from opensfm.dataset_base import DataSetBase def run_dataset(data: DataSetBase): """Link matches pair-wise matches into tracks.""" start = timer() features, colors, segmentations, instances = tracking.load_features( data, data.images() ) features_end = timer() matches = tracking.load_matches(data, data.images()) matches_end = timer() tracks_manager = tracking.create_tracks_manager( features, colors, segmentations, instances, matches, data.config, ) tracks_end = timer() data.save_tracks_manager(tracks_manager) write_report( data, tracks_manager, features_end - start, matches_end - features_end, tracks_end - matches_end, ) def write_report( data: DataSetBase, tracks_manager, features_time, matches_time, tracks_time ): view_graph = [ (k[0], k[1], v) for k, v in tracks_manager.get_all_pairs_connectivity().items() ] report = { "wall_times": { "load_features": features_time, "load_matches": matches_time, "compute_tracks": tracks_time, }, "wall_time": features_time + matches_time + tracks_time, "num_images": tracks_manager.num_shots(), "num_tracks": tracks_manager.num_tracks(), "view_graph": view_graph, } data.save_report(io.json_dumps(report), "tracks.json")
[ "timeit.default_timer", "opensfm.io.json_dumps", "opensfm.tracking.create_tracks_manager" ]
[((245, 252), 'timeit.default_timer', 'timer', ([], {}), '()\n', (250, 252), True, 'from timeit import default_timer as timer\n'), ((379, 386), 'timeit.default_timer', 'timer', ([], {}), '()\n', (384, 386), True, 'from timeit import default_timer as timer\n'), ((462, 469), 'timeit.default_timer', 'timer', ([], {}), '()\n', (467, 469), True, 'from timeit import default_timer as timer\n'), ((491, 591), 'opensfm.tracking.create_tracks_manager', 'tracking.create_tracks_manager', (['features', 'colors', 'segmentations', 'instances', 'matches', 'data.config'], {}), '(features, colors, segmentations, instances,\n matches, data.config)\n', (521, 591), False, 'from opensfm import tracking\n'), ((660, 667), 'timeit.default_timer', 'timer', ([], {}), '()\n', (665, 667), True, 'from timeit import default_timer as timer\n'), ((1498, 1519), 'opensfm.io.json_dumps', 'io.json_dumps', (['report'], {}), '(report)\n', (1511, 1519), False, 'from opensfm import io\n')]
"""Python Cookbook Chapter 9, recipe 10. """ import logging import sys from logging import Formatter from pathlib import Path def create_log(): PROD_LOG_FORMAT = ('[{asctime}]' ' {levelname} in {module}: {message}' ) with Path('sample.log').open('w') as sample_log_file: logging.basicConfig( stream=sample_log_file, level=logging.DEBUG ) logger = logging.getLogger() for handler in logger.handlers: handler.setFormatter(Formatter(PROD_LOG_FORMAT, style='{')) logger.info("Sample Message One") logger.debug("Debugging") logger.warn("Something might have gone wrong") import re from pathlib import Path import csv log_pattern = re.compile( r"\[(?P<timestamp>.*?)\]" r"\s(?P<levelname>\w+)" r"\sin\s(?P<module>[\w\._]+):" r"\s(?P<message>.*)") def extract_row_iter(source_log_file): for line in source_log_file: match = log_pattern.match(line) if match is None: continue yield match.groupdict() def parse_log(): summary_path = Path('summary_log.csv') with summary_path.open('w') as summary_file: writer = csv.DictWriter(summary_file, ['timestamp', 'levelname', 'module', 'message']) writer.writeheader() source_log_dir = Path('.') for source_log_path in source_log_dir.glob('*.log'): with source_log_path.open() as source_log_file: writer.writerows( extract_row_iter(source_log_file) ) print('Converted', source_log_path, 'to', summary_path) def counting_extract_row_iter(counts, source_log_file): for line in source_log_file: match = log_pattern.match(line) if match is None: counts['non-match'] += 1 continue counts['valid'] += 1 yield match.groupdict() from collections import Counter def parse_log2(): summary_path = Path('summary_log.csv') with summary_path.open('w') as summary_file: writer = csv.DictWriter(summary_file, ['timestamp', 'levelname', 'module', 'message']) writer.writeheader() source_log_dir = Path('.') for source_log_path in source_log_dir.glob('*.log'): counts = Counter() with source_log_path.open() as source_log_file: writer.writerows( counting_extract_row_iter(counts, source_log_file) ) print('Converted', source_log_path, 'to', summary_path) print(counts) if __name__ == "__main__": create_log() parse_log2()
[ "logging.basicConfig", "re.compile", "logging.Formatter", "pathlib.Path", "collections.Counter", "logging.getLogger", "csv.DictWriter" ]
[((717, 836), 're.compile', 're.compile', (['"""\\\\[(?P<timestamp>.*?)\\\\]\\\\s(?P<levelname>\\\\w+)\\\\sin\\\\s(?P<module>[\\\\w\\\\._]+):\\\\s(?P<message>.*)"""'], {}), "(\n '\\\\[(?P<timestamp>.*?)\\\\]\\\\s(?P<levelname>\\\\w+)\\\\sin\\\\s(?P<module>[\\\\w\\\\._]+):\\\\s(?P<message>.*)'\n )\n", (727, 836), False, 'import re\n'), ((1065, 1088), 'pathlib.Path', 'Path', (['"""summary_log.csv"""'], {}), "('summary_log.csv')\n", (1069, 1088), False, 'from pathlib import Path\n'), ((1956, 1979), 'pathlib.Path', 'Path', (['"""summary_log.csv"""'], {}), "('summary_log.csv')\n", (1960, 1979), False, 'from pathlib import Path\n'), ((306, 370), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sample_log_file', 'level': 'logging.DEBUG'}), '(stream=sample_log_file, level=logging.DEBUG)\n', (325, 370), False, 'import logging\n'), ((391, 410), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (408, 410), False, 'import logging\n'), ((1156, 1233), 'csv.DictWriter', 'csv.DictWriter', (['summary_file', "['timestamp', 'levelname', 'module', 'message']"], {}), "(summary_file, ['timestamp', 'levelname', 'module', 'message'])\n", (1170, 1233), False, 'import csv\n'), ((1301, 1310), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (1305, 1310), False, 'from pathlib import Path\n'), ((2047, 2124), 'csv.DictWriter', 'csv.DictWriter', (['summary_file', "['timestamp', 'levelname', 'module', 'message']"], {}), "(summary_file, ['timestamp', 'levelname', 'module', 'message'])\n", (2061, 2124), False, 'import csv\n'), ((2192, 2201), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (2196, 2201), False, 'from pathlib import Path\n'), ((2284, 2293), 'collections.Counter', 'Counter', ([], {}), '()\n', (2291, 2293), False, 'from collections import Counter\n'), ((249, 267), 'pathlib.Path', 'Path', (['"""sample.log"""'], {}), "('sample.log')\n", (253, 267), False, 'from pathlib import Path\n'), ((484, 521), 'logging.Formatter', 'Formatter', (['PROD_LOG_FORMAT'], {'style': '"""{"""'}), "(PROD_LOG_FORMAT, style='{')\n", (493, 521), False, 'from logging import Formatter\n')]
# Generated by Django 3.2.2 on 2021-07-04 21:33 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('blweb', '0007_vehicleconfig'), ] operations = [ migrations.AlterField( model_name='vehicleconfig', name='config_name', field=models.CharField(blank=True, default=None, help_text='The name of this vehicle configuration', max_length=200, null=True), ), ]
[ "django.db.models.CharField" ]
[((341, 467), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': 'None', 'help_text': '"""The name of this vehicle configuration"""', 'max_length': '(200)', 'null': '(True)'}), "(blank=True, default=None, help_text=\n 'The name of this vehicle configuration', max_length=200, null=True)\n", (357, 467), False, 'from django.db import migrations, models\n')]
#----------------------------------------------------------------------------- # Copyright (c) 2012 - 2018, Anaconda, Inc. and Intake contributors # All rights reserved. # # The full license is in the LICENSE file, distributed with this software. #----------------------------------------------------------------------------- import importlib import re import logging import os import warnings from ._version import get_versions __version__ = get_versions()['version'] del get_versions from .source import registry from .catalog.base import Catalog imports = { "DataSource": "intake.source.base:DataSource", 'Schema': "intake.source.base:Schema", "load_combo_catalog": "intake.catalog.default:load_combo_catalog", "upload": "intake.container:upload", "gui": "intake.interface:instance", "cat": "intake.catalog:builtin", "output_notebook": "intake.interface:output_notebook", "register_driver": "intake.source:register_driver", "unregister_driver": "intake.source:unregister_driver", } openers = set() logger = logging.getLogger('intake') def __getattr__(attr): """Lazy attribute propagator Defers inputs of functions until they are needed, according to the contents of the ``imports`` (submodules and classes) and ``openers`` (functions which instantiate data sources directly) dicts. All keys in ``openers`` must start with "open_", else they will be ignored. """ gl = globals() if attr in openers and attr[:5] == "open_": driver = registry[attr[5:]] # "open_..." gl[attr] = driver else: if attr in gl: return gl[attr] elif attr in imports: dest = imports[attr] modname = dest.split(":", 1)[0] logger.debug("Importing: %s" % modname) mod = importlib.import_module(modname) if ":" in dest: gl[attr] = getattr(mod, dest.split(":")[1]) else: gl[attr] = mod if attr == "__all__": return __dir__() try: return gl[attr] except KeyError: raise AttributeError(attr) def __dir__(*_, **__): return sorted(list(globals()) + list(openers) + list(imports)) def make_open_functions(): """From the current state of ``registry``, create open_* functions""" from .source.discovery import drivers for name in drivers.enabled_plugins(): func_name = 'open_' + name if not func_name.isidentifier(): # primitive name normalization func_name = re.sub('[-=~^&|@+]', '_', func_name) if func_name.isidentifier(): # stash name for dir() and later fetch openers.add(func_name) else: warnings.warn('Invalid Intake plugin name "%s" found.', name, stacklevel=2) make_open_functions() def open_catalog(uri=None, **kwargs): """Create a Catalog object Can load YAML catalog files, connect to an intake server, or create any arbitrary Catalog subclass instance. In the general case, the user should supply ``driver=`` with a value from the plugins registry which has a container type of catalog. File locations can generally be remote, if specifying a URL protocol. The default behaviour if not specifying the driver is as follows: - if ``uri`` is a a single string ending in "yml" or "yaml", open it as a catalog file - if ``uri`` is a list of strings, a string containing a glob character ("*") or a string not ending in "y(a)ml", open as a set of catalog files. In the latter case, assume it is a directory. - if ``uri`` beings with protocol ``"intake:"``, connect to a remote Intake server - if ``uri`` is ``None`` or missing, create a base Catalog object without entries. Parameters ---------- uri: str or pathlib.Path Designator for the location of the catalog. kwargs: passed to subclass instance, see documentation of the individual catalog classes. For example, ``yaml_files_cat`` (when specifying multiple uris or a glob string) takes the additional parameter ``flatten=True|False``, specifying whether all data sources are merged in a single namespace, or each file becomes a sub-catalog. See also -------- intake.open_yaml_files_cat, intake.open_yaml_file_cat, intake.open_intake_remote """ driver = kwargs.pop('driver', None) if isinstance(uri, os.PathLike): uri = os.fspath(uri) if driver is None: if uri: if ((isinstance(uri, str) and "*" in uri) or ((isinstance(uri, (list, tuple))) and len(uri) > 1)): # glob string or list of files/globs driver = 'yaml_files_cat' elif isinstance(uri, (list, tuple)) and len(uri) == 1: uri = uri[0] if "*" in uri[0]: # single glob string in a list driver = 'yaml_files_cat' else: # single filename in a list driver = 'yaml_file_cat' elif isinstance(uri, str): # single URL if uri.startswith('intake:'): # server driver = 'intake_remote' else: if uri.endswith(('.yml', '.yaml')): driver = 'yaml_file_cat' else: uri = uri.rstrip('/') + '/*.y*ml' driver = 'yaml_files_cat' else: raise ValueError("URI not understood: %s" % uri) else: # empty cat driver = 'catalog' if '_file' not in driver: kwargs.pop('fs', None) if driver not in registry: raise ValueError('Unknown catalog driver (%s), supply one of: %s' % (driver, list(sorted(registry)))) return registry[driver](uri, **kwargs)
[ "importlib.import_module", "os.fspath", "warnings.warn", "re.sub", "logging.getLogger" ]
[((1050, 1077), 'logging.getLogger', 'logging.getLogger', (['"""intake"""'], {}), "('intake')\n", (1067, 1077), False, 'import logging\n'), ((4511, 4525), 'os.fspath', 'os.fspath', (['uri'], {}), '(uri)\n', (4520, 4525), False, 'import os\n'), ((2552, 2588), 're.sub', 're.sub', (['"""[-=~^&|@+]"""', '"""_"""', 'func_name'], {}), "('[-=~^&|@+]', '_', func_name)\n", (2558, 2588), False, 'import re\n'), ((2738, 2813), 'warnings.warn', 'warnings.warn', (['"""Invalid Intake plugin name "%s" found."""', 'name'], {'stacklevel': '(2)'}), '(\'Invalid Intake plugin name "%s" found.\', name, stacklevel=2)\n', (2751, 2813), False, 'import warnings\n'), ((1817, 1849), 'importlib.import_module', 'importlib.import_module', (['modname'], {}), '(modname)\n', (1840, 1849), False, 'import importlib\n')]
from __future__ import print_function, unicode_literals try: import http.server as http_server except ImportError: # Python 2.7 import BaseHTTPServer as http_server root_path = '/' intan_status_path = '/intan_status.html' OK = 200 NO_CONTENT = 204 NOT_FOUND = 404 METHOD_NOT_ALLOWED = 405 class RCBLVDSRequestHandler(http_server.BaseHTTPRequestHandler): def do_GET(self): if self.path == root_path: self.send_response(METHOD_NOT_ALLOWED) self.send_header('Allow', 'POST') self.end_headers() elif self.path != intan_status_path: self.send_error(NOT_FOUND) else: self.send_response(OK) self.send_header('Content-Type', 'text/plain;charset=utf-8') self.end_headers() self.wfile.write(('This is a simulated RCS-LVDS ' 'device\n').encode('utf-8')) def do_POST(self): if self.path == intan_status_path: self.send_response(METHOD_NOT_ALLOWED) self.send_header('Allow', 'GET') self.end_headers() elif self.path != root_path: self.send_error(NOT_FOUND) else: content_length = int(self.headers['Content-Length']) body = self.rfile.read(content_length) self.send_response(NO_CONTENT) self.log_message('Request body: %s' % body.decode('utf-8')) self.end_headers() server = http_server.HTTPServer(('localhost', 8080), RCBLVDSRequestHandler) try: server.serve_forever() except KeyboardInterrupt: print() # Add final newline
[ "BaseHTTPServer.HTTPServer" ]
[((1472, 1538), 'BaseHTTPServer.HTTPServer', 'http_server.HTTPServer', (["('localhost', 8080)", 'RCBLVDSRequestHandler'], {}), "(('localhost', 8080), RCBLVDSRequestHandler)\n", (1494, 1538), True, 'import BaseHTTPServer as http_server\n')]
import socket import multiprocessing import re import time import mini_framework class WSGIServer: def __init__(self): """初始化功能,创建套接字/绑定等""" # 创建一个服务器套接字 self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 套接字 地址重用选项 1设置0取消 self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # 绑定 self.server_socket.bind(('', 9999)) # 监听 被动套接字 设置已完成三次握手队列的长度 self.server_socket.listen(128) def request_handler(self, client_socket): """为每个客户进行服务""" recv_data = client_socket.recv(4096) if not recv_data: print("客户端已经断开连接") client_socket.close() return # 对接收到的数据进行解码 request_str_data = recv_data.decode() # data_list = request_str_data.split("\r\n") # request_line = data_list[0] # 请求行中第一个数据 就是用户的资源请求路径 # request_line # GET /index.html HTTP/1.1 # POST /index.html HTTP/1.1 # 通过正则表达式来提取数据,更方便 # ret = re.match(r"[^/]+([^ ]+)", request_line) ret = re.match(r"[^/]+([^ ]+)", request_str_data) if ret: path_info = ret.group(1) # /index.html print(">"*30, path_info) else: path_info = "/" print("用户请求路径是%s" % path_info) # 如果通过正则提取url之后,发现是/那么就意味着需要访问的是主页 # 一般主页的名字是 /index.html if path_info == '/': path_info = '/index.html' # 通过if判断来区分动态请求/静态请求 # 假如.py结尾的为动态请求 if not path_info.endswith(".py"): # 如果不是以.py结尾的 try: # ./html/index.html with open("./html" + path_info, "rb") as f: file_data = f.read() except Exception as e: # 用户请求路径是失败了 # 响应行 response_line = "HTTP/1.1 404 Not Found\r\n" # 响应头 response_header = "Server: PythonWebServer2.0\r\n" # 响应体 response_body = "ERROR!!!!!" # 拼接报文 response_data = response_line + response_header + "\r\n" + response_body # 发送 client_socket.send(response_data.encode()) else: # 给客户端回复HTTP响应报文:响应行 + 响应头 +空行 + 响应体 # request---->请求 # response --->应答(响应) # 响应头(response_header) response_header = "HTTP/1.1 200 OK\r\n" response_header += "Server: PythonWebServer1.0\r\n" response_header += "\r\n" # 响应体(response_body) response_body = file_data # 拼接报文 response = response_header.encode("utf-8") + response_body # 发送 client_socket.send(response) finally: # 关闭套接字 client_socket.close() else: # 如果是以.py结尾的 # 响应头(response_header) """ response_header = "HTTP/1.1 200 OK\r\n" response_header += "Server: PythonWebServer1.0\r\n" response_header += "Content-Type: text/html; charset=UTF-8\r\n" response_header += "\r\n" """ env = dict() env['PATH_INFO'] = path_info # /index.py # 响应体(response_body) response_body = mini_framework.application(env, self.set_headers) """ if path_info == "/index.py": response_body = mini_framework.index() elif path_info == "/center.py": response_body = mini_framework.center() else: response_body = "-----not found you page-----" """ # 拼接response response = self.response_header + response_body # 发送 client_socket.send(response.encode("utf-8")) def set_headers(self, status, headers): print("-----web_server.py set_headers 被调用-----") # status ---> 200 OK # headers--->[("Content-Type", "text/html;")] response_header = "HTTP/1.1 %s\r\n" % status for temp in headers: response_header += "%s: %s\r\n" % (temp[0], temp[1]) response_header += "\r\n" self.response_header = response_header def run(self): """等待客户端的链接,然后创建子进程为其服务""" while True: # 从队列中取出一个客户套接字用以服务 client_socket, client_addr = self.server_socket.accept() p = multiprocessing.Process(target=self.request_handler, args=(client_socket,)) p.start() client_socket.close() def main(): # 1. 创建一个server对象 wsgi_server = WSGIServer() # 2. 调用这个对象中的运行方法 wsgi_server.run() if __name__ == '__main__': main()
[ "multiprocessing.Process", "socket.socket", "re.match", "mini_framework.application" ]
[((207, 256), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (220, 256), False, 'import socket\n'), ((1107, 1149), 're.match', 're.match', (['"""[^/]+([^ ]+)"""', 'request_str_data'], {}), "('[^/]+([^ ]+)', request_str_data)\n", (1115, 1149), False, 'import re\n'), ((3407, 3456), 'mini_framework.application', 'mini_framework.application', (['env', 'self.set_headers'], {}), '(env, self.set_headers)\n', (3433, 3456), False, 'import mini_framework\n'), ((4535, 4610), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'self.request_handler', 'args': '(client_socket,)'}), '(target=self.request_handler, args=(client_socket,))\n', (4558, 4610), False, 'import multiprocessing\n')]
# -*- coding: utf-8 -*- """ Created on Thu Dec 27 14:20:08 2018 @author: haider.raheem-ug """ import random import matplotlib.pyplot as plt numberofparticles = int(input("Enter number of particles: ")) nofsteps = int(input("Enter number of steps: ")) prob = float(input("Enter the probability of going right: ")) def move(n_steps, prob): for i in range(n_steps): npos = 0 for i in range(n_steps): x = takeStep(prob) npos += x return npos def takeStep(prob): stepChoices =[] for i in range(int(10*prob)): stepChoices.append(1) for i in range(int(10-(10*prob))): stepChoices.append(-1) return random.choice(stepChoices) fp = [] sumfp = 0 avg = 0 for i in range(numberofparticles): fp.append(move(nofsteps, prob)) for i in range(len(fp)): sumfp += fp[i] average = sumfp/numberofparticles print("mean: {:.2f} ".format(average)) plt.clf() plt.plot(fp, "ro") plt.ylabel("final position") plt.xlabel("Practice index") plt.title("Final positions of " + str(numberofparticles) + " particles in " + str(nofsteps) +" prob(going right) = " + str(prob)) plt.figure() plt.hist(fp) plt.xlabel("particle final position") plt.ylabel("count of particles") plt.title("Histogram of Final positions of " + str(numberofparticles) + " particles in " + str(nofsteps) +" steps")
[ "matplotlib.pyplot.plot", "matplotlib.pyplot.clf", "matplotlib.pyplot.hist", "random.choice", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel" ]
[((1008, 1017), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1015, 1017), True, 'import matplotlib.pyplot as plt\n'), ((1019, 1037), 'matplotlib.pyplot.plot', 'plt.plot', (['fp', '"""ro"""'], {}), "(fp, 'ro')\n", (1027, 1037), True, 'import matplotlib.pyplot as plt\n'), ((1039, 1067), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""final position"""'], {}), "('final position')\n", (1049, 1067), True, 'import matplotlib.pyplot as plt\n'), ((1069, 1097), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Practice index"""'], {}), "('Practice index')\n", (1079, 1097), True, 'import matplotlib.pyplot as plt\n'), ((1232, 1244), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1242, 1244), True, 'import matplotlib.pyplot as plt\n'), ((1246, 1258), 'matplotlib.pyplot.hist', 'plt.hist', (['fp'], {}), '(fp)\n', (1254, 1258), True, 'import matplotlib.pyplot as plt\n'), ((1260, 1297), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""particle final position"""'], {}), "('particle final position')\n", (1270, 1297), True, 'import matplotlib.pyplot as plt\n'), ((1299, 1331), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""count of particles"""'], {}), "('count of particles')\n", (1309, 1331), True, 'import matplotlib.pyplot as plt\n'), ((744, 770), 'random.choice', 'random.choice', (['stepChoices'], {}), '(stepChoices)\n', (757, 770), False, 'import random\n')]
from mlpproc.conditions import * from mlpproc.defaults import Preprocessor def test_lexer(): test = [ ("a==b", ["a", "==", "b"]), ("not a\tand\nb!=notop", ["not", "a", "and", "b", "!=", "notop"]), ("allo=(d))not(b and c)", ["allo=", "(", "d", ")", ")", "not", "(", "b", "and", "c", ")"]), ] for string, tokenlist in test: assert condition_lexer(string) == tokenlist def test_conditions(): preproc = Preprocessor() test = [ ("true", True), ("1", True), ("\"\"", False), ("false", False), ("0", False), (" hello == \thello", True), (" hi == hello", False), (" hello != \thello", False), (" hi != hello", True), (" def label", True), (" def qffqfze", False), (" ndef label", False), (" ndef qffqfze", True), ("(def label)", True), ("(a == a and b != a)", True), ] for string, result in test: assert condition_eval(preproc, string) == result assert condition_eval(preproc, "not "+string) != result for o_string, o_result in test: assert condition_eval(preproc, string + " and " + o_string) == (result and o_result) assert condition_eval(preproc, string + " or " + o_string) == (result or o_result)
[ "mlpproc.defaults.Preprocessor" ]
[((414, 428), 'mlpproc.defaults.Preprocessor', 'Preprocessor', ([], {}), '()\n', (426, 428), False, 'from mlpproc.defaults import Preprocessor\n')]
import subprocess import sys import difflib def prepend(itm): return '\'' + itm master = sys.argv[1].replace('%0A', '\n') branch = sys.argv[2].replace('%0A', '\n') diff = ''.join(difflib.Differ().compare(master.splitlines(True), branch.splitlines(True))) lines = [line for line in diff.splitlines() if line[0] == '+' or line[0] == '-' or line[0] == '?'] print('<br />'.join(map(prepend, lines)), end="")
[ "difflib.Differ" ]
[((185, 201), 'difflib.Differ', 'difflib.Differ', ([], {}), '()\n', (199, 201), False, 'import difflib\n')]
# 导入类库 from flask_bootstrap import Bootstrap from flask_mail import Mail from flask_sqlalchemy import SQLAlchemy # 实例化对象 bootstrap = Bootstrap() mail = Mail() db = SQLAlchemy() # 封装函数 完成初始化 def config_extensions(app): bootstrap.init_app(app=app) mail.init_app(app=app) db.init_app(app=app)
[ "flask_sqlalchemy.SQLAlchemy", "flask_bootstrap.Bootstrap", "flask_mail.Mail" ]
[((137, 148), 'flask_bootstrap.Bootstrap', 'Bootstrap', ([], {}), '()\n', (146, 148), False, 'from flask_bootstrap import Bootstrap\n'), ((156, 162), 'flask_mail.Mail', 'Mail', ([], {}), '()\n', (160, 162), False, 'from flask_mail import Mail\n'), ((168, 180), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (178, 180), False, 'from flask_sqlalchemy import SQLAlchemy\n')]
import torch float_tensor = torch.tensor([-1.0, 0.0, 1.0, 2.0]) q_made_per_tensor = torch.quantize_per_tensor(float_tensor, 0.1, 10, torch.quint8) # Dequantize dequantized_tensor = q_made_per_tensor.dequantize() # Quantized Tensor supports slicing like usual Tensors do s = q_made_per_tensor[2] # a quantized Tensor of with same scale and zero_point # that contains the values of the 2nd row of the original quantized Tensor # same as q_made_per_tensor[2, :] # Assignment q_made_per_tensor[0] = 3.5 # quantize 3.5 and store the int value in quantized Tensor # Copy # we can copy from a quantized Tensor of the same size and dtype # but different scale and zero_point scale1, zero_point1 = 1e-1, 0 scale2, zero_point2 = 1, -1 q1 = torch._empty_affine_quantized( [2, 3], scale=scale1, zero_point=zero_point1, dtype=torch.qint8 ) q2 = torch._empty_affine_quantized( [2, 3], scale=scale2, zero_point=zero_point2, dtype=torch.qint8 ) q2.copy_(q1) # Permutation q1.transpose(0, 1) # see https://pytorch.org/docs/stable/torch.html#torch.transpose q1.permute([1, 0]) # https://pytorch.org/docs/stable/tensors.html#torch.Tensor.permute q1.contiguous() # Convert to contiguous Tensor # Serialization and Deserialization import tempfile with tempfile.NamedTemporaryFile() as f: torch.save(q2, f) f.seek(0) q3 = torch.load(f)
[ "tempfile.NamedTemporaryFile", "torch.load", "torch.save", "torch._empty_affine_quantized", "torch.quantize_per_tensor", "torch.tensor" ]
[((29, 64), 'torch.tensor', 'torch.tensor', (['[-1.0, 0.0, 1.0, 2.0]'], {}), '([-1.0, 0.0, 1.0, 2.0])\n', (41, 64), False, 'import torch\n'), ((85, 147), 'torch.quantize_per_tensor', 'torch.quantize_per_tensor', (['float_tensor', '(0.1)', '(10)', 'torch.quint8'], {}), '(float_tensor, 0.1, 10, torch.quint8)\n', (110, 147), False, 'import torch\n'), ((737, 835), 'torch._empty_affine_quantized', 'torch._empty_affine_quantized', (['[2, 3]'], {'scale': 'scale1', 'zero_point': 'zero_point1', 'dtype': 'torch.qint8'}), '([2, 3], scale=scale1, zero_point=zero_point1,\n dtype=torch.qint8)\n', (766, 835), False, 'import torch\n'), ((843, 941), 'torch._empty_affine_quantized', 'torch._empty_affine_quantized', (['[2, 3]'], {'scale': 'scale2', 'zero_point': 'zero_point2', 'dtype': 'torch.qint8'}), '([2, 3], scale=scale2, zero_point=zero_point2,\n dtype=torch.qint8)\n', (872, 941), False, 'import torch\n'), ((1252, 1281), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (1279, 1281), False, 'import tempfile\n'), ((1292, 1309), 'torch.save', 'torch.save', (['q2', 'f'], {}), '(q2, f)\n', (1302, 1309), False, 'import torch\n'), ((1333, 1346), 'torch.load', 'torch.load', (['f'], {}), '(f)\n', (1343, 1346), False, 'import torch\n')]
""" LoopFullUnrolling fully unrolls loops with static bounds. """ from pythran import metadata from pythran.analyses import HasBreak, HasContinue, NodeCount from pythran.openmp import OMPDirective from pythran.passmanager import Transformation from copy import deepcopy import gast as ast from functools import reduce class LoopFullUnrolling(Transformation): ''' Fully unroll loops with static bounds >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse('for j in [1,2,3]: i += j') >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(LoopFullUnrolling, node) >>> print pm.dump(backend.Python, node) j = 1 i += j j = 2 i += j j = 3 i += j ''' MAX_NODE_COUNT = 1024 def visit_For(self, node): # first unroll children if needed or possible self.generic_visit(node) # if the user added some OpenMP directive, trust him and no unroll has_omp = metadata.get(node, OMPDirective) # a break or continue in the loop prevents unrolling too has_break = any(self.passmanager.gather(HasBreak, n, self.ctx) for n in node.body) has_cont = any(self.passmanager.gather(HasContinue, n, self.ctx) for n in node.body) # do not unroll too much to prevent code growth node_count = self.passmanager.gather(NodeCount, node, self.ctx) if isinstance(node.iter, ast.List): isvalid = not(has_omp or has_break or has_cont) total_count = node_count * len(node.iter.elts) issmall = total_count < LoopFullUnrolling.MAX_NODE_COUNT if isvalid and issmall: def unroll(elt): return ([ast.Assign([deepcopy(node.target)], elt)] + deepcopy(node.body)) self.update = True return sum([unroll(elt) for elt in node.iter.elts], []) return node
[ "copy.deepcopy", "pythran.metadata.get" ]
[((995, 1027), 'pythran.metadata.get', 'metadata.get', (['node', 'OMPDirective'], {}), '(node, OMPDirective)\n', (1007, 1027), False, 'from pythran import metadata\n'), ((1855, 1874), 'copy.deepcopy', 'deepcopy', (['node.body'], {}), '(node.body)\n', (1863, 1874), False, 'from copy import deepcopy\n'), ((1795, 1816), 'copy.deepcopy', 'deepcopy', (['node.target'], {}), '(node.target)\n', (1803, 1816), False, 'from copy import deepcopy\n')]
# -*- coding: utf-8 -*- # Generated by Django 1.11.7 on 2017-11-08 07:14 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('main', '0004_auto_20171108_0659'), ] operations = [ migrations.AlterField( model_name='listing', name='host_since', field=models.DateField(default='1998-01-02'), ), migrations.AlterField( model_name='listing', name='lat', field=models.DecimalField(decimal_places=15, default=0.0, max_digits=19), ), migrations.AlterField( model_name='listing', name='long', field=models.DecimalField(decimal_places=15, default=0.0, max_digits=19), ), migrations.AlterField( model_name='listing', name='price_cleaning', field=models.DecimalField(decimal_places=3, default=0.0, max_digits=8), ), migrations.AlterField( model_name='listing', name='price_night', field=models.DecimalField(decimal_places=3, default=0.0, max_digits=8), ), ]
[ "django.db.models.DateField", "django.db.models.DecimalField" ]
[((403, 441), 'django.db.models.DateField', 'models.DateField', ([], {'default': '"""1998-01-02"""'}), "(default='1998-01-02')\n", (419, 441), False, 'from django.db import migrations, models\n'), ((561, 627), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(15)', 'default': '(0.0)', 'max_digits': '(19)'}), '(decimal_places=15, default=0.0, max_digits=19)\n', (580, 627), False, 'from django.db import migrations, models\n'), ((748, 814), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(15)', 'default': '(0.0)', 'max_digits': '(19)'}), '(decimal_places=15, default=0.0, max_digits=19)\n', (767, 814), False, 'from django.db import migrations, models\n'), ((945, 1009), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(3)', 'default': '(0.0)', 'max_digits': '(8)'}), '(decimal_places=3, default=0.0, max_digits=8)\n', (964, 1009), False, 'from django.db import migrations, models\n'), ((1137, 1201), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(3)', 'default': '(0.0)', 'max_digits': '(8)'}), '(decimal_places=3, default=0.0, max_digits=8)\n', (1156, 1201), False, 'from django.db import migrations, models\n')]
from random import randint import pytest from flask import url_for from registry.donor.models import ( DonationCenter, DonorsOverride, DonorsOverview, IgnoredDonors, ) from .fixtures import sample_of_rc, skip_if_ignored from .helpers import login class TestDonorsOverview: @pytest.mark.parametrize("rodne_cislo", sample_of_rc(100)) def test_refresh_overview(self, rodne_cislo, test_data_df): skip_if_ignored(rodne_cislo) # Check of the total amount of donations donor_overview = DonorsOverview.query.filter_by(rodne_cislo=rodne_cislo).first() last_imports = ( test_data_df[test_data_df.RC == rodne_cislo] .sort_values(by="DATUM_IMPORTU") .drop_duplicates(["MISTO_ODBERU"], keep="last") ) total_donations = last_imports.POCET_ODBERU.sum() assert donor_overview.donation_count_total == total_donations # Check of the partial amounts of donations for each donation center donation_centers = DonationCenter.query.all() for donation_center_slug in [dc.slug for dc in donation_centers] + ["manual"]: try: dc_last_count = last_imports.loc[ last_imports.MISTO_ODBERU == donation_center_slug, "POCET_ODBERU" ].values[0] except IndexError: dc_last_count = 0 do_last_count = getattr( donor_overview, f"donation_count_{donation_center_slug}" ) assert dc_last_count == do_last_count # Check of all other attributes last_import = last_imports.tail(1) override = DonorsOverride.query.get(rodne_cislo) for csv_column, attr in ( ("JMENO", "first_name"), ("PRIJMENI", "last_name"), ("ULICE", "address"), ("MESTO", "city"), ("PSC", "postal_code"), ("POJISTOVNA", "kod_pojistovny"), ): if override and getattr(override, attr): assert getattr(donor_overview, attr) == getattr(override, attr) else: assert last_import[csv_column].values[0] == getattr( donor_overview, attr ) class TestIgnore: @pytest.mark.parametrize("rodne_cislo", sample_of_rc(10)) def test_ignore(self, user, testapp, rodne_cislo): skip_if_ignored(rodne_cislo) login(user, testapp) res = testapp.get(url_for("donor.show_ignored")) random_reason = str(randint(11111111, 99999999)) form = res.forms[0] form.fields["rodne_cislo"][0].value = rodne_cislo form.fields["reason"][0].value = random_reason res = form.submit().follow() assert rodne_cislo in res.text assert random_reason in res.text assert "Dárce ignorován." in res.text do = testapp.get(url_for("donor.detail", rc=rodne_cislo), status=302) assert do.status_code == 302 res = do.follow() assert res.status_code == 200 assert "Dárce je ignorován" in res.text for _, form in res.forms.items(): if form.fields["rodne_cislo"][0].value == rodne_cislo: unignore_form = form res = unignore_form.submit().follow() assert rodne_cislo not in res.text assert random_reason not in res.text assert "Dárce již není ignorován." in res.text do = testapp.get(url_for("donor.detail", rc=rodne_cislo), status=200) assert do.status_code == 200 def test_ignore_already_ignored(self, user, testapp): login(user, testapp) ignored_count = IgnoredDonors.query.count() already_ignored_rc = IgnoredDonors.query.first().rodne_cislo res = testapp.get(url_for("donor.show_ignored")) form = res.forms[0] form.fields["rodne_cislo"][0].value = already_ignored_rc form.fields["reason"][0].value = "foobarbaz" res = form.submit().follow() assert "Dárce již je v seznamu ignorovaných" in res assert ignored_count == IgnoredDonors.query.count() def test_ignore_no_reason(self, user, testapp): login(user, testapp) ignored_count = IgnoredDonors.query.count() rodne_cislo = DonorsOverview.query.order_by( DonorsOverview.rodne_cislo.desc() ).first() res = testapp.get(url_for("donor.show_ignored")) form = res.forms[0] form.fields["rodne_cislo"][0].value = rodne_cislo form.fields["reason"][0].value = "" res = form.submit().follow() assert "Při přidávání do ignorovaných došlo k chybě" in res assert ignored_count == IgnoredDonors.query.count() def test_unignore_not_ignored(self, user, testapp): login(user, testapp) ignored_count = IgnoredDonors.query.count() rodne_cislo = DonorsOverview.query.order_by( DonorsOverview.rodne_cislo.desc() ).first() res = testapp.get(url_for("donor.show_ignored")) form = res.forms[1] form.fields["rodne_cislo"][0].value = rodne_cislo res = form.submit().follow() assert "Při odebírání ze seznamu ignorovaných dárců došlo k chybě" in res assert ignored_count == IgnoredDonors.query.count() class TestOverride: @pytest.mark.parametrize("rodne_cislo", sample_of_rc(5)) def test_override(self, user, testapp, rodne_cislo): skip_if_ignored(rodne_cislo) login(user, testapp) res = testapp.get(url_for("donor.detail", rc=rodne_cislo)) old_data = DonorsOverview.query.get(rodne_cislo) # Test save form = res.forms["donorsOverrideForm"] form["first_name"] = "--First--" form["last_name"] = "--Last--" res = form.submit("save_btn").follow() assert "Výjimka uložena" in res assert "Jméno: --First--" in res assert "Příjmení: --Last--" in res # Test repeated save form = res.forms["donorsOverrideForm"] res = form.submit("save_btn").follow() assert "Výjimka uložena" in res assert "Jméno: --First--" in res assert "Příjmení: --Last--" in res # Test removing one field's value but keeping the other form = res.forms["donorsOverrideForm"] form["first_name"] = "" res = form.submit("save_btn").follow() assert "Výjimka uložena" in res assert ("Jméno: " + str(old_data.first_name)) in res assert "Příjmení: --Last--" in res # Test deleting the override form = res.forms["donorsOverrideForm"] res = form.submit("delete_btn").follow() assert "Výjimka smazána" in res assert ("Jméno: " + str(old_data.first_name)) in res assert ("Příjmení: " + str(old_data.last_name)) in res def test_get_overrides_json_endpoint(self, user, testapp): login(user, testapp) res = testapp.get(url_for("donor.get_overrides")) overrides = DonorsOverride.query.all() assert len(overrides) == len(res.json) for override in res.json.values(): assert len(DonorsOverview.basic_fields) == len(override) @pytest.mark.parametrize("rodne_cislo", sample_of_rc(1)) def test_incorrect_override(self, user, testapp, rodne_cislo): skip_if_ignored(rodne_cislo) login(user, testapp) res = testapp.get(url_for("donor.detail", rc=rodne_cislo)) form = res.forms["donorsOverrideForm"] form.fields["rodne_cislo"][0].value = "9999999999" res = form.submit(name="delete_btn").follow() # First follow above tries to redirect us to non-existing donor detail # so the second one gives us HTTP/404 and then the home # page with the error messages. res = res.follow(status=404) assert "Není co mazat" in res assert "Stránka, kterou hledáte, neexistuje" in res @pytest.mark.parametrize("rodne_cislo", sample_of_rc(1)) def test_form_errors(self, user, testapp, rodne_cislo): skip_if_ignored(rodne_cislo) login(user, testapp) res = testapp.get(url_for("donor.detail", rc=rodne_cislo)) form = res.forms["donorsOverrideForm"] form.fields["postal_code"][0].value = "7380X" form.fields["kod_pojistovny"][0].value = "1X0" res = form.submit(name="save_btn").follow() assert "PSČ - Pole musí obsahovat pouze číslice" in res assert "Pojišťovna - Pole musí obsahovat pouze číslice" in res
[ "registry.donor.models.DonorsOverview.rodne_cislo.desc", "registry.donor.models.DonorsOverview.query.get", "registry.donor.models.DonationCenter.query.all", "random.randint", "registry.donor.models.DonorsOverview.query.filter_by", "registry.donor.models.DonorsOverride.query.get", "flask.url_for", "registry.donor.models.IgnoredDonors.query.count", "registry.donor.models.DonorsOverride.query.all", "registry.donor.models.IgnoredDonors.query.first" ]
[((1027, 1053), 'registry.donor.models.DonationCenter.query.all', 'DonationCenter.query.all', ([], {}), '()\n', (1051, 1053), False, 'from registry.donor.models import DonationCenter, DonorsOverride, DonorsOverview, IgnoredDonors\n'), ((1666, 1703), 'registry.donor.models.DonorsOverride.query.get', 'DonorsOverride.query.get', (['rodne_cislo'], {}), '(rodne_cislo)\n', (1690, 1703), False, 'from registry.donor.models import DonationCenter, DonorsOverride, DonorsOverview, IgnoredDonors\n'), ((3670, 3697), 'registry.donor.models.IgnoredDonors.query.count', 'IgnoredDonors.query.count', ([], {}), '()\n', (3695, 3697), False, 'from registry.donor.models import DonationCenter, DonorsOverride, DonorsOverview, IgnoredDonors\n'), ((4233, 4260), 'registry.donor.models.IgnoredDonors.query.count', 'IgnoredDonors.query.count', ([], {}), '()\n', (4258, 4260), False, 'from registry.donor.models import DonationCenter, DonorsOverride, DonorsOverview, IgnoredDonors\n'), ((4840, 4867), 'registry.donor.models.IgnoredDonors.query.count', 'IgnoredDonors.query.count', ([], {}), '()\n', (4865, 4867), False, 'from registry.donor.models import DonationCenter, DonorsOverride, DonorsOverview, IgnoredDonors\n'), ((5600, 5637), 'registry.donor.models.DonorsOverview.query.get', 'DonorsOverview.query.get', (['rodne_cislo'], {}), '(rodne_cislo)\n', (5624, 5637), False, 'from registry.donor.models import DonationCenter, DonorsOverride, DonorsOverview, IgnoredDonors\n'), ((7013, 7039), 'registry.donor.models.DonorsOverride.query.all', 'DonorsOverride.query.all', ([], {}), '()\n', (7037, 7039), False, 'from registry.donor.models import DonationCenter, DonorsOverride, DonorsOverview, IgnoredDonors\n'), ((2481, 2510), 'flask.url_for', 'url_for', (['"""donor.show_ignored"""'], {}), "('donor.show_ignored')\n", (2488, 2510), False, 'from flask import url_for\n'), ((2540, 2567), 'random.randint', 'randint', (['(11111111)', '(99999999)'], {}), '(11111111, 99999999)\n', (2547, 2567), False, 'from random import randint\n'), ((2902, 2941), 'flask.url_for', 'url_for', (['"""donor.detail"""'], {'rc': 'rodne_cislo'}), "('donor.detail', rc=rodne_cislo)\n", (2909, 2941), False, 'from flask import url_for\n'), ((3468, 3507), 'flask.url_for', 'url_for', (['"""donor.detail"""'], {'rc': 'rodne_cislo'}), "('donor.detail', rc=rodne_cislo)\n", (3475, 3507), False, 'from flask import url_for\n'), ((3727, 3754), 'registry.donor.models.IgnoredDonors.query.first', 'IgnoredDonors.query.first', ([], {}), '()\n', (3752, 3754), False, 'from registry.donor.models import DonationCenter, DonorsOverride, DonorsOverview, IgnoredDonors\n'), ((3793, 3822), 'flask.url_for', 'url_for', (['"""donor.show_ignored"""'], {}), "('donor.show_ignored')\n", (3800, 3822), False, 'from flask import url_for\n'), ((4099, 4126), 'registry.donor.models.IgnoredDonors.query.count', 'IgnoredDonors.query.count', ([], {}), '()\n', (4124, 4126), False, 'from registry.donor.models import DonationCenter, DonorsOverride, DonorsOverview, IgnoredDonors\n'), ((4404, 4433), 'flask.url_for', 'url_for', (['"""donor.show_ignored"""'], {}), "('donor.show_ignored')\n", (4411, 4433), False, 'from flask import url_for\n'), ((4702, 4729), 'registry.donor.models.IgnoredDonors.query.count', 'IgnoredDonors.query.count', ([], {}), '()\n', (4727, 4729), False, 'from registry.donor.models import DonationCenter, DonorsOverride, DonorsOverview, IgnoredDonors\n'), ((5011, 5040), 'flask.url_for', 'url_for', (['"""donor.show_ignored"""'], {}), "('donor.show_ignored')\n", (5018, 5040), False, 'from flask import url_for\n'), ((5279, 5306), 'registry.donor.models.IgnoredDonors.query.count', 'IgnoredDonors.query.count', ([], {}), '()\n', (5304, 5306), False, 'from registry.donor.models import DonationCenter, DonorsOverride, DonorsOverview, IgnoredDonors\n'), ((5539, 5578), 'flask.url_for', 'url_for', (['"""donor.detail"""'], {'rc': 'rodne_cislo'}), "('donor.detail', rc=rodne_cislo)\n", (5546, 5578), False, 'from flask import url_for\n'), ((6961, 6991), 'flask.url_for', 'url_for', (['"""donor.get_overrides"""'], {}), "('donor.get_overrides')\n", (6968, 6991), False, 'from flask import url_for\n'), ((7421, 7460), 'flask.url_for', 'url_for', (['"""donor.detail"""'], {'rc': 'rodne_cislo'}), "('donor.detail', rc=rodne_cislo)\n", (7428, 7460), False, 'from flask import url_for\n'), ((8154, 8193), 'flask.url_for', 'url_for', (['"""donor.detail"""'], {'rc': 'rodne_cislo'}), "('donor.detail', rc=rodne_cislo)\n", (8161, 8193), False, 'from flask import url_for\n'), ((532, 587), 'registry.donor.models.DonorsOverview.query.filter_by', 'DonorsOverview.query.filter_by', ([], {'rodne_cislo': 'rodne_cislo'}), '(rodne_cislo=rodne_cislo)\n', (562, 587), False, 'from registry.donor.models import DonationCenter, DonorsOverride, DonorsOverview, IgnoredDonors\n'), ((4326, 4359), 'registry.donor.models.DonorsOverview.rodne_cislo.desc', 'DonorsOverview.rodne_cislo.desc', ([], {}), '()\n', (4357, 4359), False, 'from registry.donor.models import DonationCenter, DonorsOverride, DonorsOverview, IgnoredDonors\n'), ((4933, 4966), 'registry.donor.models.DonorsOverview.rodne_cislo.desc', 'DonorsOverview.rodne_cislo.desc', ([], {}), '()\n', (4964, 4966), False, 'from registry.donor.models import DonationCenter, DonorsOverride, DonorsOverview, IgnoredDonors\n')]
import json from rest_framework import serializers from fastrunner import models from fastrunner.utils.parser import Parse class ProjectSerializer(serializers.ModelSerializer): """ 项目信息序列化 """ class Meta: model = models.Project fields = ['id', 'name', 'desc', 'responsible', 'update_time'] class TeamSerializer(serializers.ModelSerializer): """ 项目成员序列化 """ permission = serializers.CharField(source="get_permission_display") project = serializers.CharField(source="project.name") class Meta: model = models.Team fields = ["id", "account", "permission", "project"] class DataBaseSerializer(serializers.ModelSerializer): """ 数据库信息序列化 """ # type = serializers.CharField(source="get_type_display") class Meta: model = models.DataBase fields = '__all__' class DebugTalkSerializer(serializers.ModelSerializer): """ 驱动代码序列化 """ class Meta: model = models.Debugtalk fields = ['id', 'code'] class RelationSerializer(serializers.ModelSerializer): """ 树形结构序列化 """ class Meta: model = models.Relation fields = '__all__' class APISerializer(serializers.ModelSerializer): """ 接口信息序列化 """ body = serializers.SerializerMethodField() class Meta: model = models.API fields = ['id', 'name', 'url', 'method', 'project', 'relation', 'body'] def get_body(self, obj): parse = Parse(eval(obj.body)) parse.parse_http() return parse.testcase class CaseSerializer(serializers.ModelSerializer): """ 用例信息序列化 """ class Meta: model = models.Case fields = '__all__' class CaseStepSerializer(serializers.ModelSerializer): """ 用例步骤序列化 """ body = serializers.SerializerMethodField() class Meta: model = models.CaseStep fields = ['id', 'name', 'url', 'method', 'body', 'case'] depth = 1 def get_body(self, obj): parse = Parse(eval(obj.body)) parse.parse_http() return parse.testcase class ConfigSerializer(serializers.ModelSerializer): """ 配置信息序列化 """ body = serializers.SerializerMethodField() class Meta: model = models.Config fields = ['id', 'base_url', 'body', 'name', 'update_time'] depth = 1 def get_body(self, obj): parse = Parse(eval(obj.body), level='config') parse.parse_http() return parse.testcase class ReportSerializer(serializers.ModelSerializer): """ 报告信息序列化 """ type = serializers.CharField(source="get_type_display") project = serializers.CharField(source="project.name") summary = serializers.SerializerMethodField() class Meta: model = models.Report fields = ["id", "name", "type", "project", "summary"] def get_summary(self, obj): return json.loads(obj.summary)
[ "rest_framework.serializers.CharField", "json.loads", "rest_framework.serializers.SerializerMethodField" ]
[((424, 478), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""get_permission_display"""'}), "(source='get_permission_display')\n", (445, 478), False, 'from rest_framework import serializers\n'), ((493, 537), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""project.name"""'}), "(source='project.name')\n", (514, 537), False, 'from rest_framework import serializers\n'), ((1288, 1323), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (1321, 1323), False, 'from rest_framework import serializers\n'), ((1822, 1857), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (1855, 1857), False, 'from rest_framework import serializers\n'), ((2209, 2244), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (2242, 2244), False, 'from rest_framework import serializers\n'), ((2612, 2660), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""get_type_display"""'}), "(source='get_type_display')\n", (2633, 2660), False, 'from rest_framework import serializers\n'), ((2675, 2719), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'source': '"""project.name"""'}), "(source='project.name')\n", (2696, 2719), False, 'from rest_framework import serializers\n'), ((2734, 2769), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (2767, 2769), False, 'from rest_framework import serializers\n'), ((2927, 2950), 'json.loads', 'json.loads', (['obj.summary'], {}), '(obj.summary)\n', (2937, 2950), False, 'import json\n')]
import pytest from django.contrib import auth as django_auth UserModel = django_auth.get_user_model() @pytest.fixture def context(mocker): class Session(dict): def cycle_key(self): pass def flush(self): pass context = mocker.Mock() context.request.session = Session() django_auth.logout(context.request) return context @pytest.fixture def user(db, group, tag): user = UserModel.objects.create_user(username="user", password="password") return user
[ "django.contrib.auth.logout", "django.contrib.auth.get_user_model" ]
[((75, 103), 'django.contrib.auth.get_user_model', 'django_auth.get_user_model', ([], {}), '()\n', (101, 103), True, 'from django.contrib import auth as django_auth\n'), ((330, 365), 'django.contrib.auth.logout', 'django_auth.logout', (['context.request'], {}), '(context.request)\n', (348, 365), True, 'from django.contrib import auth as django_auth\n')]
import io EXAMPLE = """[({(<(())[]>[[{[]{<()<>> [(()[<>])]({[<{<<[]>>( {([(<{}[<>[]}>{[]{[(<()> (((({<>}<{<{<>}{[]{[]{} [[<[([]))<([[{}[[()]]] [{[{({}]{}}([{[{{{}}([] {<[[]]>}<{[{[{[]{()[[[] [<(<(<(<{}))><([]([]() <{([([[(<>()){}]>(<<{{ <{([{{}}[<[[[<>{}]]]>[]]""" PAIRS = { "(": ")", "[": "]", "{": "}", "<": ">", } SCORES = { ")": 3, "]": 57, "}": 1197, ">": 25137, } def error_score(line: str) -> int: stack = [] for char in line: if char in PAIRS: stack.append(PAIRS[char]) elif len(stack) > 0 and stack[-1] == char: stack.pop() else: return SCORES[char] return 0 def solve(reader: io.TextIOBase) -> int: total_score = 0 for line in reader.readlines(): total_score += error_score(line.strip()) return total_score assert solve(io.StringIO(EXAMPLE)) == 26397 def main(): with open("input/day10.txt") as file: result = solve(file) print(f"The total syntax error score is {result}") if __name__ == "__main__": main()
[ "io.StringIO" ]
[((864, 884), 'io.StringIO', 'io.StringIO', (['EXAMPLE'], {}), '(EXAMPLE)\n', (875, 884), False, 'import io\n')]
"""Global constants for the app """ import pathlib LOGFILENAME = pathlib.Path(__file__).parent / "data/weightlog.csv" FIELDS = ["date", "weight in lbs"] GRAPH_FILENAME = pathlib.Path(__file__).parent / "data/weightlog.png" NOTRENDFLAG = 10000
[ "pathlib.Path" ]
[((66, 88), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (78, 88), False, 'import pathlib\n'), ((171, 193), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (183, 193), False, 'import pathlib\n')]
import tensorflow as tf from keras import backend as K from keras_segmentation.custom_losses import smooth_l1_loss def bounding_box_iou_based_network_loss(y_true, y_pred): pred_x1 = y_pred[:, 0] pred_y1 = y_pred[:, 1] pred_x2 = pred_x1 + y_pred[:, 2] pred_y2 = pred_y1 + y_pred[:, 3] true_x1 = y_true[:, 0] true_y1 = y_true[:, 1] true_x2 = true_x1 + y_true[:, 2] true_y2 = true_y1 + y_true[:, 3] intersection_x1 = tf.maximum(pred_x1, true_x1) intersection_y1 = tf.maximum(pred_y1, true_y1) intersection_x2 = tf.minimum(pred_x2, true_x2) intersection_y2 = tf.minimum(pred_y2, true_y2) intersection_area = tf.maximum(tf.zeros_like(intersection_x1), intersection_x2 - intersection_x1 + tf.ones_like(intersection_x1)) * tf.maximum( tf.zeros_like(intersection_y1), intersection_y2 - intersection_y1 + tf.ones_like(intersection_y1)) pred_area = (pred_x2 - pred_x1 + tf.ones_like(pred_x1)) * (pred_y2 - pred_y1 + tf.ones_like(pred_y1)) true_area = (true_x2 - true_x1 + tf.ones_like(true_x1)) * (true_y2 - true_y1 + tf.ones_like(true_y1)) union_area = pred_area + true_area - intersection_area iou = intersection_area / tf.maximum(union_area, K.epsilon()) iou = K.clip(iou, 0.0 + K.epsilon(), 1.0) iou_loss = -tf.log(iou) # convert loss (?,) to (1) iou_loss = tf.reduce_sum(iou_loss, axis=-1) l1_loss = smooth_l1_loss(y_true, y_pred) return iou_loss + l1_loss def bounding_box_iou_based_network_metric(y_true, y_pred): pred_x1 = y_pred[:, 0] pred_y1 = y_pred[:, 1] pred_x2 = pred_x1 + y_pred[:, 2] pred_y2 = pred_y1 + y_pred[:, 3] true_x1 = y_true[:, 0] true_y1 = y_true[:, 1] true_x2 = true_x1 + y_true[:, 2] true_y2 = true_y1 + y_true[:, 3] intersection_x1 = tf.maximum(pred_x1, true_x1) intersection_y1 = tf.maximum(pred_y1, true_y1) intersection_x2 = tf.minimum(pred_x2, true_x2) intersection_y2 = tf.minimum(pred_y2, true_y2) intersection_area = tf.maximum(tf.zeros_like(intersection_x1), intersection_x2 - intersection_x1 + tf.ones_like(intersection_x1)) * tf.maximum( tf.zeros_like(intersection_y1), intersection_y2 - intersection_y1 + tf.ones_like(intersection_y1)) pred_area = (pred_x2 - pred_x1 + tf.ones_like(pred_x1)) * (pred_y2 - pred_y1 + tf.ones_like(pred_y1)) true_area = (true_x2 - true_x1 + tf.ones_like(true_x1)) * (true_y2 - true_y1 + tf.ones_like(true_y1)) union_area = pred_area + true_area - intersection_area iou = intersection_area / tf.maximum(union_area, K.epsilon()) return iou
[ "tensorflow.reduce_sum", "tensorflow.maximum", "keras_segmentation.custom_losses.smooth_l1_loss", "keras.backend.epsilon", "tensorflow.zeros_like", "tensorflow.minimum", "tensorflow.ones_like", "tensorflow.log" ]
[((455, 483), 'tensorflow.maximum', 'tf.maximum', (['pred_x1', 'true_x1'], {}), '(pred_x1, true_x1)\n', (465, 483), True, 'import tensorflow as tf\n'), ((506, 534), 'tensorflow.maximum', 'tf.maximum', (['pred_y1', 'true_y1'], {}), '(pred_y1, true_y1)\n', (516, 534), True, 'import tensorflow as tf\n'), ((557, 585), 'tensorflow.minimum', 'tf.minimum', (['pred_x2', 'true_x2'], {}), '(pred_x2, true_x2)\n', (567, 585), True, 'import tensorflow as tf\n'), ((608, 636), 'tensorflow.minimum', 'tf.minimum', (['pred_y2', 'true_y2'], {}), '(pred_y2, true_y2)\n', (618, 636), True, 'import tensorflow as tf\n'), ((1391, 1423), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['iou_loss'], {'axis': '(-1)'}), '(iou_loss, axis=-1)\n', (1404, 1423), True, 'import tensorflow as tf\n'), ((1439, 1469), 'keras_segmentation.custom_losses.smooth_l1_loss', 'smooth_l1_loss', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1453, 1469), False, 'from keras_segmentation.custom_losses import smooth_l1_loss\n'), ((1842, 1870), 'tensorflow.maximum', 'tf.maximum', (['pred_x1', 'true_x1'], {}), '(pred_x1, true_x1)\n', (1852, 1870), True, 'import tensorflow as tf\n'), ((1893, 1921), 'tensorflow.maximum', 'tf.maximum', (['pred_y1', 'true_y1'], {}), '(pred_y1, true_y1)\n', (1903, 1921), True, 'import tensorflow as tf\n'), ((1944, 1972), 'tensorflow.minimum', 'tf.minimum', (['pred_x2', 'true_x2'], {}), '(pred_x2, true_x2)\n', (1954, 1972), True, 'import tensorflow as tf\n'), ((1995, 2023), 'tensorflow.minimum', 'tf.minimum', (['pred_y2', 'true_y2'], {}), '(pred_y2, true_y2)\n', (2005, 2023), True, 'import tensorflow as tf\n'), ((1332, 1343), 'tensorflow.log', 'tf.log', (['iou'], {}), '(iou)\n', (1338, 1343), True, 'import tensorflow as tf\n'), ((673, 703), 'tensorflow.zeros_like', 'tf.zeros_like', (['intersection_x1'], {}), '(intersection_x1)\n', (686, 703), True, 'import tensorflow as tf\n'), ((829, 859), 'tensorflow.zeros_like', 'tf.zeros_like', (['intersection_y1'], {}), '(intersection_y1)\n', (842, 859), True, 'import tensorflow as tf\n'), ((966, 987), 'tensorflow.ones_like', 'tf.ones_like', (['pred_x1'], {}), '(pred_x1)\n', (978, 987), True, 'import tensorflow as tf\n'), ((1012, 1033), 'tensorflow.ones_like', 'tf.ones_like', (['pred_y1'], {}), '(pred_y1)\n', (1024, 1033), True, 'import tensorflow as tf\n'), ((1072, 1093), 'tensorflow.ones_like', 'tf.ones_like', (['true_x1'], {}), '(true_x1)\n', (1084, 1093), True, 'import tensorflow as tf\n'), ((1118, 1139), 'tensorflow.ones_like', 'tf.ones_like', (['true_y1'], {}), '(true_y1)\n', (1130, 1139), True, 'import tensorflow as tf\n'), ((1255, 1266), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (1264, 1266), True, 'from keras import backend as K\n'), ((1297, 1308), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (1306, 1308), True, 'from keras import backend as K\n'), ((2060, 2090), 'tensorflow.zeros_like', 'tf.zeros_like', (['intersection_x1'], {}), '(intersection_x1)\n', (2073, 2090), True, 'import tensorflow as tf\n'), ((2216, 2246), 'tensorflow.zeros_like', 'tf.zeros_like', (['intersection_y1'], {}), '(intersection_y1)\n', (2229, 2246), True, 'import tensorflow as tf\n'), ((2353, 2374), 'tensorflow.ones_like', 'tf.ones_like', (['pred_x1'], {}), '(pred_x1)\n', (2365, 2374), True, 'import tensorflow as tf\n'), ((2399, 2420), 'tensorflow.ones_like', 'tf.ones_like', (['pred_y1'], {}), '(pred_y1)\n', (2411, 2420), True, 'import tensorflow as tf\n'), ((2459, 2480), 'tensorflow.ones_like', 'tf.ones_like', (['true_x1'], {}), '(true_x1)\n', (2471, 2480), True, 'import tensorflow as tf\n'), ((2505, 2526), 'tensorflow.ones_like', 'tf.ones_like', (['true_y1'], {}), '(true_y1)\n', (2517, 2526), True, 'import tensorflow as tf\n'), ((2642, 2653), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (2651, 2653), True, 'from keras import backend as K\n'), ((776, 805), 'tensorflow.ones_like', 'tf.ones_like', (['intersection_x1'], {}), '(intersection_x1)\n', (788, 805), True, 'import tensorflow as tf\n'), ((897, 926), 'tensorflow.ones_like', 'tf.ones_like', (['intersection_y1'], {}), '(intersection_y1)\n', (909, 926), True, 'import tensorflow as tf\n'), ((2163, 2192), 'tensorflow.ones_like', 'tf.ones_like', (['intersection_x1'], {}), '(intersection_x1)\n', (2175, 2192), True, 'import tensorflow as tf\n'), ((2284, 2313), 'tensorflow.ones_like', 'tf.ones_like', (['intersection_y1'], {}), '(intersection_y1)\n', (2296, 2313), True, 'import tensorflow as tf\n')]
from __future__ import absolute_import from __future__ import unicode_literals from django.utils import timezone import factory from factory import fuzzy class UserFactory(factory.django.DjangoModelFactory): class Meta: model = 'auth.User' username = fuzzy.FuzzyText() password = factory.PostGenerationMethodCall("set_password", "<PASSWORD>") is_active = True email = fuzzy.FuzzyText(suffix="@bar.it") class AssopyUserFactory(factory.django.DjangoModelFactory): class Meta: model = 'assopy.User' user = factory.SubFactory(UserFactory) class OrderFactory(factory.django.DjangoModelFactory): class Meta: model = "assopy.Order" user = factory.SubFactory(AssopyUserFactory) payment = "cc" class OrderItemFactory(factory.django.DjangoModelFactory): class Meta: model = "assopy.OrderItem" class TicketFactory(factory.django.DjangoModelFactory): class Meta: model = "conference.Ticket" class VatFactory(factory.django.DjangoModelFactory): class Meta: model = "assopy.Vat" value = 21 class VatFareFactory(factory.django.DjangoModelFactory): class Meta: model = "assopy.VatFare" vat = factory.SubFactory(VatFactory) fare = None class FareFactory(factory.django.DjangoModelFactory): class Meta: model = "conference.Fare" vats = factory.RelatedFactory(VatFareFactory, "fare") conference = "testconf" code = "TOSP" name = fuzzy.FuzzyText() price = 10 @factory.lazy_attribute def start_validity(self): return timezone.now() - timezone.timedelta(days=10) @factory.lazy_attribute def end_validity(self): return timezone.now() + timezone.timedelta(days=10)
[ "factory.RelatedFactory", "factory.fuzzy.FuzzyText", "django.utils.timezone.now", "factory.SubFactory", "django.utils.timezone.timedelta", "factory.PostGenerationMethodCall" ]
[((272, 289), 'factory.fuzzy.FuzzyText', 'fuzzy.FuzzyText', ([], {}), '()\n', (287, 289), False, 'from factory import fuzzy\n'), ((305, 367), 'factory.PostGenerationMethodCall', 'factory.PostGenerationMethodCall', (['"""set_password"""', '"""<PASSWORD>"""'], {}), "('set_password', '<PASSWORD>')\n", (337, 367), False, 'import factory\n'), ((401, 434), 'factory.fuzzy.FuzzyText', 'fuzzy.FuzzyText', ([], {'suffix': '"""@bar.it"""'}), "(suffix='@bar.it')\n", (416, 434), False, 'from factory import fuzzy\n'), ((555, 586), 'factory.SubFactory', 'factory.SubFactory', (['UserFactory'], {}), '(UserFactory)\n', (573, 586), False, 'import factory\n'), ((703, 740), 'factory.SubFactory', 'factory.SubFactory', (['AssopyUserFactory'], {}), '(AssopyUserFactory)\n', (721, 740), False, 'import factory\n'), ((1217, 1247), 'factory.SubFactory', 'factory.SubFactory', (['VatFactory'], {}), '(VatFactory)\n', (1235, 1247), False, 'import factory\n'), ((1382, 1428), 'factory.RelatedFactory', 'factory.RelatedFactory', (['VatFareFactory', '"""fare"""'], {}), "(VatFareFactory, 'fare')\n", (1404, 1428), False, 'import factory\n'), ((1486, 1503), 'factory.fuzzy.FuzzyText', 'fuzzy.FuzzyText', ([], {}), '()\n', (1501, 1503), False, 'from factory import fuzzy\n'), ((1593, 1607), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1605, 1607), False, 'from django.utils import timezone\n'), ((1610, 1637), 'django.utils.timezone.timedelta', 'timezone.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (1628, 1637), False, 'from django.utils import timezone\n'), ((1710, 1724), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1722, 1724), False, 'from django.utils import timezone\n'), ((1727, 1754), 'django.utils.timezone.timedelta', 'timezone.timedelta', ([], {'days': '(10)'}), '(days=10)\n', (1745, 1754), False, 'from django.utils import timezone\n')]
from django.shortcuts import get_object_or_404 from rest_framework import status from rest_framework.response import Response from rest_framework.views import APIView from v1.accounts.models.profile import Profile from v1.accounts.models.user import User from v1.accounts.models.libraries import Library from v1.accounts.serializers.user import UserSerializer, UserSerializerCreate, UserSerializerLogin, UserSerializerUpdate from v1.utils import constants from v1.utils.permissions import is_administrator, is_moderator # users class UserView(APIView): @staticmethod def get(request): """ List users """ users = User.objects.all() return Response(UserSerializer(users, many=True).data) @staticmethod def post(request): """ Create user """ serializer = UserSerializerCreate(data=request.data, context={'request': request}) if serializer.is_valid(): user = serializer.save() user.set_password(serializer.validated_data['password']) user.update(signup_ip=request.META['REMOTE_ADDR']) user.save() Profile(user=user).save() Library(user=user).save() return Response(UserSerializer(user).data, status=status.HTTP_201_CREATED) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) # users/{user_id} class UserDetail(APIView): @staticmethod def get(request, user_id): """ View individual user """ user = get_object_or_404(User, pk=user_id) return Response(UserSerializer(user).data) @staticmethod def patch(request, user_id): """ Update authenticated user """ user = get_object_or_404(User, pk=user_id) if user != request.user: return Response(status=status.HTTP_401_UNAUTHORIZED) serializer = UserSerializerUpdate(user, data=request.data, context={'request': request}, partial=True) if serializer.is_valid(): serializer.save() return Response(UserSerializerLogin(serializer.instance).data) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) @staticmethod def delete(request, user_id): """ Delete user """ user = get_object_or_404(User, pk=user_id) if is_administrator(user) or user.is_superuser: return Response({ constants.ERROR: 'That user can not be deleted' }, status=status.HTTP_401_UNAUTHORIZED) if is_moderator(user) and not is_administrator(request.user): return Response({ constants.ERROR: 'Admin permissions needed to delete moderators' }, status=status.HTTP_401_UNAUTHORIZED) if not is_moderator(request.user): return Response({ constants.ERROR: 'Moderator permissions needed to delete users' }, status=status.HTTP_401_UNAUTHORIZED) user.delete() return Response(status=status.HTTP_204_NO_CONTENT)
[ "v1.utils.permissions.is_moderator", "v1.accounts.serializers.user.UserSerializer", "v1.accounts.serializers.user.UserSerializerCreate", "django.shortcuts.get_object_or_404", "rest_framework.response.Response", "v1.accounts.models.libraries.Library", "v1.accounts.serializers.user.UserSerializerUpdate", "v1.utils.permissions.is_administrator", "v1.accounts.models.profile.Profile", "v1.accounts.serializers.user.UserSerializerLogin", "v1.accounts.models.user.User.objects.all" ]
[((656, 674), 'v1.accounts.models.user.User.objects.all', 'User.objects.all', ([], {}), '()\n', (672, 674), False, 'from v1.accounts.models.user import User\n'), ((846, 915), 'v1.accounts.serializers.user.UserSerializerCreate', 'UserSerializerCreate', ([], {'data': 'request.data', 'context': "{'request': request}"}), "(data=request.data, context={'request': request})\n", (866, 915), False, 'from v1.accounts.serializers.user import UserSerializer, UserSerializerCreate, UserSerializerLogin, UserSerializerUpdate\n'), ((1321, 1384), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (1329, 1384), False, 'from rest_framework.response import Response\n'), ((1551, 1586), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['User'], {'pk': 'user_id'}), '(User, pk=user_id)\n', (1568, 1586), False, 'from django.shortcuts import get_object_or_404\n'), ((1764, 1799), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['User'], {'pk': 'user_id'}), '(User, pk=user_id)\n', (1781, 1799), False, 'from django.shortcuts import get_object_or_404\n'), ((1919, 2012), 'v1.accounts.serializers.user.UserSerializerUpdate', 'UserSerializerUpdate', (['user'], {'data': 'request.data', 'context': "{'request': request}", 'partial': '(True)'}), "(user, data=request.data, context={'request': request},\n partial=True)\n", (1939, 2012), False, 'from v1.accounts.serializers.user import UserSerializer, UserSerializerCreate, UserSerializerLogin, UserSerializerUpdate\n'), ((2163, 2226), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (2171, 2226), False, 'from rest_framework.response import Response\n'), ((2340, 2375), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['User'], {'pk': 'user_id'}), '(User, pk=user_id)\n', (2357, 2375), False, 'from django.shortcuts import get_object_or_404\n'), ((3053, 3096), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_204_NO_CONTENT'}), '(status=status.HTTP_204_NO_CONTENT)\n', (3061, 3096), False, 'from rest_framework.response import Response\n'), ((1852, 1897), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_401_UNAUTHORIZED'}), '(status=status.HTTP_401_UNAUTHORIZED)\n', (1860, 1897), False, 'from rest_framework.response import Response\n'), ((2387, 2409), 'v1.utils.permissions.is_administrator', 'is_administrator', (['user'], {}), '(user)\n', (2403, 2409), False, 'from v1.utils.permissions import is_administrator, is_moderator\n'), ((2451, 2552), 'rest_framework.response.Response', 'Response', (["{constants.ERROR: 'That user can not be deleted'}"], {'status': 'status.HTTP_401_UNAUTHORIZED'}), "({constants.ERROR: 'That user can not be deleted'}, status=status.\n HTTP_401_UNAUTHORIZED)\n", (2459, 2552), False, 'from rest_framework.response import Response\n'), ((2589, 2607), 'v1.utils.permissions.is_moderator', 'is_moderator', (['user'], {}), '(user)\n', (2601, 2607), False, 'from v1.utils.permissions import is_administrator, is_moderator\n'), ((2667, 2784), 'rest_framework.response.Response', 'Response', (["{constants.ERROR: 'Admin permissions needed to delete moderators'}"], {'status': 'status.HTTP_401_UNAUTHORIZED'}), "({constants.ERROR: 'Admin permissions needed to delete moderators'},\n status=status.HTTP_401_UNAUTHORIZED)\n", (2675, 2784), False, 'from rest_framework.response import Response\n'), ((2826, 2852), 'v1.utils.permissions.is_moderator', 'is_moderator', (['request.user'], {}), '(request.user)\n', (2838, 2852), False, 'from v1.utils.permissions import is_administrator, is_moderator\n'), ((2873, 2989), 'rest_framework.response.Response', 'Response', (["{constants.ERROR: 'Moderator permissions needed to delete users'}"], {'status': 'status.HTTP_401_UNAUTHORIZED'}), "({constants.ERROR: 'Moderator permissions needed to delete users'},\n status=status.HTTP_401_UNAUTHORIZED)\n", (2881, 2989), False, 'from rest_framework.response import Response\n'), ((699, 731), 'v1.accounts.serializers.user.UserSerializer', 'UserSerializer', (['users'], {'many': '(True)'}), '(users, many=True)\n', (713, 731), False, 'from v1.accounts.serializers.user import UserSerializer, UserSerializerCreate, UserSerializerLogin, UserSerializerUpdate\n'), ((1611, 1631), 'v1.accounts.serializers.user.UserSerializer', 'UserSerializer', (['user'], {}), '(user)\n', (1625, 1631), False, 'from v1.accounts.serializers.user import UserSerializer, UserSerializerCreate, UserSerializerLogin, UserSerializerUpdate\n'), ((2616, 2646), 'v1.utils.permissions.is_administrator', 'is_administrator', (['request.user'], {}), '(request.user)\n', (2632, 2646), False, 'from v1.utils.permissions import is_administrator, is_moderator\n'), ((1155, 1173), 'v1.accounts.models.profile.Profile', 'Profile', ([], {'user': 'user'}), '(user=user)\n', (1162, 1173), False, 'from v1.accounts.models.profile import Profile\n'), ((1193, 1211), 'v1.accounts.models.libraries.Library', 'Library', ([], {'user': 'user'}), '(user=user)\n', (1200, 1211), False, 'from v1.accounts.models.libraries import Library\n'), ((1247, 1267), 'v1.accounts.serializers.user.UserSerializer', 'UserSerializer', (['user'], {}), '(user)\n', (1261, 1267), False, 'from v1.accounts.serializers.user import UserSerializer, UserSerializerCreate, UserSerializerLogin, UserSerializerUpdate\n'), ((2101, 2141), 'v1.accounts.serializers.user.UserSerializerLogin', 'UserSerializerLogin', (['serializer.instance'], {}), '(serializer.instance)\n', (2120, 2141), False, 'from v1.accounts.serializers.user import UserSerializer, UserSerializerCreate, UserSerializerLogin, UserSerializerUpdate\n')]
from django import forms from django.forms import widgets class ChooseGameForm(forms.Form): game_choice = forms.ChoiceField( label="New or existing Game?", widget=widgets.RadioSelect(), choices=( ("solo", "New Solo Game"), ("new", "New Game"), ("id", "Continue Game"), ), ) game_id = forms.CharField(label="Continue Game by id", required=False)
[ "django.forms.widgets.RadioSelect", "django.forms.CharField" ]
[((366, 426), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Continue Game by id"""', 'required': '(False)'}), "(label='Continue Game by id', required=False)\n", (381, 426), False, 'from django import forms\n'), ((185, 206), 'django.forms.widgets.RadioSelect', 'widgets.RadioSelect', ([], {}), '()\n', (204, 206), False, 'from django.forms import widgets\n')]
from http.server import HTTPServer from typing import Callable, Dict, Optional from .routes import Route from .request_handler import ReqHandler class API(): def __init__(self) -> None: self.server: HTTPServer self.serverinfo: Dict = {} self.routes = {} def get(self, path: str, status_code: int = 200): # , name: Optional[str]): def decorated(func: Callable): route = Route( path=path, endpoint=func, # name=name, method='GET', status_code=status_code ) self.routes[route.path] = route return func return decorated def run(self, host: str, port: int = 8000): ReqHandler.add_routes(self.routes) self.server = HTTPServer( server_address=(host, port), RequestHandlerClass=ReqHandler ) self.serverinfo["host"] = host self.serverinfo["port"] = port try: self.server.serve_forever() except KeyboardInterrupt as e: print("\nShutting down server.") self.server.shutdown()
[ "http.server.HTTPServer" ]
[((811, 882), 'http.server.HTTPServer', 'HTTPServer', ([], {'server_address': '(host, port)', 'RequestHandlerClass': 'ReqHandler'}), '(server_address=(host, port), RequestHandlerClass=ReqHandler)\n', (821, 882), False, 'from http.server import HTTPServer\n')]
import numpy as np from code.waveform import Triangle, Sawtooth, SquareWave, Sine # discrete integration where s is your signal as array and l is your first entry def running_sum(s, l): y = np.zeros(len(s)) y[0] = s[0] + l for n in range(1, len(s)): y[n] = s[n] + y[n - 1] return y def normalize(y): return (y - y.min(axis=0)) / (y.max(axis=0) - y.min(axis=0)) def current_fourier_wf(label, i, fm, x): if label == 'Triangle': return Triangle.fourier_series(int(i), fm, x) elif label == 'Sawtooth': return Sawtooth.fourier_series(int(i), fm, x) elif label == 'Square Wave': return SquareWave.fourier_series(int(i), fm, x) def current_trigon_wf(label, a, fm, x, c, lfo=0): if label == 'Triangle': return Triangle.trigonometric(a, fm, x, c, lfo) elif label == 'Sawtooth': return Sawtooth.trigonometric(a, fm, x, c, lfo) elif label == 'Square Wave': return SquareWave.trigonometric(a, fm, x, c, lfo) elif label == 'Sine': return Sine.trigonometric(a, fm, x, c, lfo) def equation_type(wf, title): if title == 'Fourier series': return wf.equation_fourier() elif title == 'Trigonometric function': return wf.equation_trigon() def current_equation(label, title): if label == 'Triangle': return equation_type(Triangle, title) elif label == 'Sawtooth': return equation_type(Sawtooth, title) elif label == 'Square Wave': return equation_type(SquareWave, title)
[ "code.waveform.Sine.trigonometric", "code.waveform.Triangle.trigonometric", "code.waveform.Sawtooth.trigonometric", "code.waveform.SquareWave.trigonometric" ]
[((786, 826), 'code.waveform.Triangle.trigonometric', 'Triangle.trigonometric', (['a', 'fm', 'x', 'c', 'lfo'], {}), '(a, fm, x, c, lfo)\n', (808, 826), False, 'from code.waveform import Triangle, Sawtooth, SquareWave, Sine\n'), ((872, 912), 'code.waveform.Sawtooth.trigonometric', 'Sawtooth.trigonometric', (['a', 'fm', 'x', 'c', 'lfo'], {}), '(a, fm, x, c, lfo)\n', (894, 912), False, 'from code.waveform import Triangle, Sawtooth, SquareWave, Sine\n'), ((961, 1003), 'code.waveform.SquareWave.trigonometric', 'SquareWave.trigonometric', (['a', 'fm', 'x', 'c', 'lfo'], {}), '(a, fm, x, c, lfo)\n', (985, 1003), False, 'from code.waveform import Triangle, Sawtooth, SquareWave, Sine\n'), ((1045, 1081), 'code.waveform.Sine.trigonometric', 'Sine.trigonometric', (['a', 'fm', 'x', 'c', 'lfo'], {}), '(a, fm, x, c, lfo)\n', (1063, 1081), False, 'from code.waveform import Triangle, Sawtooth, SquareWave, Sine\n')]
# -*- coding: utf-8 -*- import logging from django.apps import AppConfig logger = logging.getLogger('inventree') class CommonConfig(AppConfig): name = 'common' def ready(self): self.clear_restart_flag() def clear_restart_flag(self): """ Clear the SERVER_RESTART_REQUIRED setting """ try: import common.models if common.models.InvenTreeSetting.get_setting('SERVER_RESTART_REQUIRED'): logger.info("Clearing SERVER_RESTART_REQUIRED flag") common.models.InvenTreeSetting.set_setting('SERVER_RESTART_REQUIRED', False, None) except: pass
[ "logging.getLogger" ]
[((86, 116), 'logging.getLogger', 'logging.getLogger', (['"""inventree"""'], {}), "('inventree')\n", (103, 116), False, 'import logging\n')]
from IDQCorrelator import * sys.path.append("D:/Control/PythonPackages/") import pbec_analysis #Parameters expo_time = 0.5 channel_mask = 1+2+8 #7 buffer_size = 2e5 ##auto_col_channel=1 elec_trig_ch=0 optical_trig_ch=1 signal_ch=3 #Setup the correlator device: seems to be needed each time we retrieve the timestamps correlator = CorrelatorControl() correlator.setExposureTime(expo_time) #1 second exposure correlator.enableChannels(channel_mask) #channels 1,2, and 3 correlator.setTimestampBufferSize(int(buffer_size)) timebase=correlator._getTimebase_() #Acquire the data timestamps, channels = correlator.getLastTimeStamps(expo_time/1.3) #there's something dodgy here... correlator.close() pbec_ts = pbec_analysis.make_timestamp() ts_ch = zip(timestamps,channels) #Put the data into a Data object corr_data = CorrelatorData(pbec_ts) corr_data.setData((timestamps, channels)) #For checking the optical trigger tmin, tmax=1945.5e-9, 1947e-9 corr_data.plotHistogram(1*timebase,tmin,tmax,elec_trig_ch,optical_trig_ch,fignum=432) #Checking signal channel #tmin, tmax=1945e-9, 1948e-9 corr_data.plotHistogram(1*timebase,tmin,tmax,elec_trig_ch,signal_ch,fignum=433) #corr_data.plotAutoCorrelation(10*timebase,tmin,tmax,auto_col_channel) #corr_data.plotCoincidences() #This will fail massively #####corr_data.saveData() ''' ex = pbec_analysis.ExperimentalDataSet(pbec_ts) ex.dataset = {"correlation_data":corr_data} ex.meta.comments="Still testing" ex.meta.parameters={"expo_time":expo_time, "channel_mask":channel_mask, "buffer_size":buffer_size,"timebase":timebase, "Output Amplitude":output_amplitude, "Amplifier Control Value":amplifier_control_value, "Output Repetition Rate":output_repetition_rate} ex.saveAllData() '''
[ "pbec_analysis.make_timestamp" ]
[((735, 765), 'pbec_analysis.make_timestamp', 'pbec_analysis.make_timestamp', ([], {}), '()\n', (763, 765), False, 'import pbec_analysis\n')]
from Products.ATContentTypes.content import schemata from Products.Archetypes import atapi from AccessControl import ClassSecurityInfo from DateTime import DateTime from Products.ATExtensions.ateapi import DateTimeField, DateTimeWidget, RecordsField from Products.Archetypes.config import REFERENCE_CATALOG from Products.Archetypes.public import * from Products.CMFCore.permissions import ListFolderContents, View from Products.CMFCore.utils import getToolByName from Products.CMFPlone.utils import safe_unicode from bika.lims.content.bikaschema import BikaSchema from bika.lims.config import PROJECTNAME from bika.lims import bikaMessageFactory as _ from bika.lims.utils import t from zope.interface import implements schema = BikaSchema.copy() + Schema(( ComputedField('RequestID', expression = 'here.getRequestID()', widget = ComputedWidget( visible = True, ), ), FileField('AttachmentFile', widget = FileWidget( label=_("Attachment"), ), ), ReferenceField('AttachmentType', required = 0, allowed_types = ('AttachmentType',), relationship = 'AttachmentAttachmentType', widget = ReferenceWidget( label=_("Attachment Type"), ), ), StringField('AttachmentKeys', searchable = True, widget = StringWidget( label=_("Attachment Keys"), ), ), DateTimeField('DateLoaded', required = 1, default_method = 'current_date', widget = DateTimeWidget( label=_("Date Loaded"), ), ), ComputedField('AttachmentTypeUID', expression="context.getAttachmentType().UID() if context.getAttachmentType() else ''", widget = ComputedWidget( visible = False, ), ), ComputedField('ClientUID', expression = 'here.aq_parent.UID()', widget = ComputedWidget( visible = False, ), ), ), ) schema['id'].required = False schema['title'].required = False class Attachment(BaseFolder): security = ClassSecurityInfo() displayContentsTab = False schema = schema _at_rename_after_creation = True def _renameAfterCreation(self, check_auto_id=False): from bika.lims.idserver import renameAfterCreation renameAfterCreation(self) def Title(self): """ Return the Id """ return safe_unicode(self.getId()).encode('utf-8') def getTextTitle(self): """ Return the request and possibly analayis title as title """ requestid = self.getRequestID() if requestid: analysis = self.getAnalysis() if analysis: return '%s - %s' % (requestid, analysis.Title()) else: return requestid else: return None def getRequest(self): """ Return the AR to which this is linked """ """ there is a short time between creation and linking """ """ when it is not linked """ tool = getToolByName(self, REFERENCE_CATALOG) uids = [uid for uid in tool.getBackReferences(self, 'AnalysisRequestAttachment')] if len(uids) == 1: reference = uids[0] ar = tool.lookupObject(reference.sourceUID) return ar else: uids = [uid for uid in tool.getBackReferences(self, 'AnalysisAttachment')] if len(uids) == 1: reference = uids[0] analysis = tool.lookupObject(reference.sourceUID) ar = analysis.aq_parent return ar return None def getRequestID(self): """ Return the ID of the request to which this is linked """ ar = self.getRequest() if ar: return ar.getRequestID() else: return None def getAnalysis(self): """ Return the analysis to which this is linked """ """ it may not be linked to an analysis """ tool = getToolByName(self, REFERENCE_CATALOG) uids = [uid for uid in tool.getBackReferences(self, 'AnalysisAttachment')] if len(uids) == 1: reference = uids[0] analysis = tool.lookupObject(reference.sourceUID) return analysis return None def getParentState(self): """ Return the review state of the object - analysis or AR """ """ to which this is linked """ tool = getToolByName(self, REFERENCE_CATALOG) uids = [uid for uid in tool.getBackReferences(self, 'AnalysisAttachment')] if len(uids) == 1: reference = uids[0] parent = tool.lookupObject(reference.sourceUID) else: uids = [uid for uid in tool.getBackReferences(self, 'AnalysisRequestAttachment')] if len(uids) == 1: reference = uids[0] parent = tool.lookupObject(reference.sourceUID) workflow = getToolByName(self, 'portal_workflow') return workflow.getInfoFor(parent, 'review_state', '') security.declarePublic('current_date') def current_date(self): """ return current date """ return DateTime() atapi.registerType(Attachment, PROJECTNAME)
[ "bika.lims.bikaMessageFactory", "bika.lims.idserver.renameAfterCreation", "DateTime.DateTime", "Products.CMFCore.utils.getToolByName", "Products.Archetypes.atapi.registerType", "AccessControl.ClassSecurityInfo", "bika.lims.content.bikaschema.BikaSchema.copy" ]
[((5278, 5321), 'Products.Archetypes.atapi.registerType', 'atapi.registerType', (['Attachment', 'PROJECTNAME'], {}), '(Attachment, PROJECTNAME)\n', (5296, 5321), False, 'from Products.Archetypes import atapi\n'), ((729, 746), 'bika.lims.content.bikaschema.BikaSchema.copy', 'BikaSchema.copy', ([], {}), '()\n', (744, 746), False, 'from bika.lims.content.bikaschema import BikaSchema\n'), ((2090, 2109), 'AccessControl.ClassSecurityInfo', 'ClassSecurityInfo', ([], {}), '()\n', (2107, 2109), False, 'from AccessControl import ClassSecurityInfo\n'), ((2323, 2348), 'bika.lims.idserver.renameAfterCreation', 'renameAfterCreation', (['self'], {}), '(self)\n', (2342, 2348), False, 'from bika.lims.idserver import renameAfterCreation\n'), ((3044, 3082), 'Products.CMFCore.utils.getToolByName', 'getToolByName', (['self', 'REFERENCE_CATALOG'], {}), '(self, REFERENCE_CATALOG)\n', (3057, 3082), False, 'from Products.CMFCore.utils import getToolByName\n'), ((4041, 4079), 'Products.CMFCore.utils.getToolByName', 'getToolByName', (['self', 'REFERENCE_CATALOG'], {}), '(self, REFERENCE_CATALOG)\n', (4054, 4079), False, 'from Products.CMFCore.utils import getToolByName\n'), ((4505, 4543), 'Products.CMFCore.utils.getToolByName', 'getToolByName', (['self', 'REFERENCE_CATALOG'], {}), '(self, REFERENCE_CATALOG)\n', (4518, 4543), False, 'from Products.CMFCore.utils import getToolByName\n'), ((5040, 5078), 'Products.CMFCore.utils.getToolByName', 'getToolByName', (['self', '"""portal_workflow"""'], {}), "(self, 'portal_workflow')\n", (5053, 5078), False, 'from Products.CMFCore.utils import getToolByName\n'), ((5265, 5275), 'DateTime.DateTime', 'DateTime', ([], {}), '()\n', (5273, 5275), False, 'from DateTime import DateTime\n'), ((991, 1006), 'bika.lims.bikaMessageFactory', '_', (['"""Attachment"""'], {}), "('Attachment')\n", (992, 1006), True, 'from bika.lims import bikaMessageFactory as _\n'), ((1233, 1253), 'bika.lims.bikaMessageFactory', '_', (['"""Attachment Type"""'], {}), "('Attachment Type')\n", (1234, 1253), True, 'from bika.lims import bikaMessageFactory as _\n'), ((1383, 1403), 'bika.lims.bikaMessageFactory', '_', (['"""Attachment Keys"""'], {}), "('Attachment Keys')\n", (1384, 1403), True, 'from bika.lims import bikaMessageFactory as _\n'), ((1569, 1585), 'bika.lims.bikaMessageFactory', '_', (['"""Date Loaded"""'], {}), "('Date Loaded')\n", (1570, 1585), True, 'from bika.lims import bikaMessageFactory as _\n')]
import os import sys from pip.req import parse_requirements from pip.download import PipSession from setuptools import find_packages try: from setuptools import setup except ImportError: from distutils.core import setup # reading requirements install_reqs = parse_requirements('requirements.txt', session=PipSession()) reqs = [str(ir.req) for ir in install_reqs] sys.path.insert(0, os.path.dirname(__file__)) version = '0.1.1' setup( name='agua', version=version, packages=find_packages(), install_requires=reqs, include_package_data=True, license='MIT', description='Compare data in columns with other columns with the help of comparator functions', keywords = ['agua', 'testing', 'data', 'csv'], url='https://github.com/CompileInc/agua', download_url = 'https://github.com/CompileInc/agua/archive/v{version}.tar.gz'.format(version=version), entry_points=''' [console_scripts] agua=agua:cli ''' )
[ "pip.download.PipSession", "os.path.dirname", "setuptools.find_packages" ]
[((393, 418), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (408, 418), False, 'import os\n'), ((316, 328), 'pip.download.PipSession', 'PipSession', ([], {}), '()\n', (326, 328), False, 'from pip.download import PipSession\n'), ((496, 511), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (509, 511), False, 'from setuptools import find_packages\n')]
import logging import re logging.basicConfig(level=logging.DEBUG) def _clear_str(s): return ''.join([c for c in s if c not in '\\;"\'\n']) class BaseCol(): """Base class for table col type""" def __init__(self, **kwargs): """ :param kwargs: 'name' - column name, 'type' - column type ('int', 'text', 'char(...)', 'varchar(...)', 'required' - is column required or not, 'pk' - if True column is primary key, 'fk' - if True column is foreign key, 'fk_table' - table name for foreign key, 'fk_col' - column name in foreign table for foreign key """ self.name = kwargs.get('name', None) self.type = kwargs.get('type', None) self.is_required = kwargs.get('required', False) self.is_pk = kwargs.get('pk', False) if self.is_pk: self.is_required = True self.is_fk = kwargs.get('fk', False) self.fk_ref_table = kwargs.get('fk_table', None) self.fk_ref_col = kwargs.get('fk_col', None) def name_to_create(self): """ returns string for using in the SQL statement for current column creation """ s = '{} {}'.format(self.name, self.type) if self.is_pk: s += ' PRIMARY KEY' if self.is_required: s += ' NOT NULL' if self.is_fk: s += ',\nFOREIGN KEY ({}) REFERENCES {}({})'.format( self.name, self.fk_ref_table, self.fk_ref_col) return s class Base(): """Base orm_lite class""" connection = None table_cols = None is_values_passed = False is_all_required = False parse_error = False values_list = [] def _get_table_name(self): # returns current table name table_name = self.__class__.__dict__.get('__tablename__', None) return table_name def _get_table_cols(self): # returns column names of the current table return [x for x in self.__class__.__dict__.keys() if not x.startswith('__')] def _check_tablename_connection(self): # check if connection to DB is set AND if tablename is set. # Returns False if not if not self._get_table_name() or not self.connection: logging.error('__tablename__ or connection missed') return return True def _parse_table_cols(self): # Parses class variables and set up table columns if self.table_cols: return self.table_cols = [] for col_name in self._get_table_cols(): params = self.__class__.__dict__.get(col_name) col_req = False col_pk = False col_fk = False col_fk_table = None col_fk_col = None if not len(params): continue col_type = _clear_str(params[0]) if len(params) == 2: if params[1] == 'pk': col_pk = True elif params[1] == 'required': col_req = True elif len(params) > 2: if params[1] == 'fk' and ('.' in params[2]): col_fk = True col_fk_table = _clear_str((params[2]).split('.')[0]) col_fk_col = _clear_str((params[2]).split('.')[1]) col = BaseCol(name=col_name, type=col_type, required=col_req, pk=col_pk, fk=col_fk, fk_table=col_fk_table, fk_col=col_fk_col) self.table_cols.append(col) def _filter_kwargs(self, **kwargs): # filters passed args and verifies the data types correctness str_type = [r'^TEXT$', r'^VARCHAR\(\d+\)$', r'^CHAR\(\d+\)$'] is_values_passed = False values_list = [] parse_error = False for col in self.table_cols: if col.name in kwargs.keys(): value = None if col.type.upper() == 'INT': try: value = int(kwargs[col.name]) except ValueError: logging.error('Incorrect value type for {}({})'.format( col.name, col.type)) parse_error = True return value = kwargs[col.name] else: str_type_found = False for s in str_type: match = re.search(s, col.type.upper()) if match is not None: str_type_found = True value = "'" + str(kwargs[col.name]) + "'" break if not str_type_found: logging.error('Unrecognized type {}'.format(col.type)) parse_error = True return values_list.append((col.name, value)) is_values_passed = True is_all_required = True fln = [c[0] for c in values_list] for col in self.table_cols: if col.is_required and col.name not in fln: is_all_required = False return is_values_passed, values_list[:], is_all_required, \ parse_error def _execute_sql(self, sql_stmt, error_msg): # execute single SQL statement cur = self.connection.cursor() try: cur.execute(sql_stmt) self.connection.commit() except Exception as err: logging.error('Error SQL query executing: {}'.format(error_msg)) logging.error(err) cur.close() def _execute_sql_with_result(self, sql_stmt, error_msg): # execute single SQL statement with return value cur = self.connection.cursor() res = None try: cur.execute(sql_stmt) res = cur.fetchall() except Exception as err: logging.error('Error SQL query executing: {}'.format(error_msg)) logging.error(err) cur.close() return res def _set_conn_and_parse(self, **kwargs): # setup connection, parse table columns and passed args self.connection = kwargs.get('connection', self.connection) self._parse_table_cols() self.is_values_passed, self.values_list, self.is_all_required, \ self.parse_error = self._filter_kwargs(**kwargs) def __call__(self, **kwargs): self._set_conn_and_parse(**kwargs) return self def __init__(self, **kwargs): self._set_conn_and_parse(**kwargs) def add(self): """ Add row to the table. Perform INSERT SQL statement :return: None """ if not self._check_tablename_connection(): return if not self.is_all_required: logging.error('Required fields missed') return if self.parse_error: logging.error('Value parsing error') return cols = ', '.join(c[0] for c in self.values_list) values = ', '.join(str(c[1]) for c in self.values_list) sql_stmt = 'INSERT INTO {} ({}) VALUES ({});'.format( self._get_table_name(), cols, values) self._execute_sql(sql_stmt, 'add') def update(self, **kwargs): """ Update row data. Perform UPDATE SQL statement :param kwargs: update column names and values for example: username='Tom' :return: None """ upd_is_values_passed, upd_values_list, upd_is_all_required, \ upd_parse_error = self._filter_kwargs(**kwargs) if not upd_is_values_passed: logging.warning('Nothing to update') return if upd_parse_error or self.parse_error: logging.error('Values parsing error') return if upd_values_list: upd_args = ', '.join(c[0] + " = " + str(c[1]) for c in upd_values_list) sql_stmt = 'UPDATE {} SET {}'.format( self._get_table_name(), upd_args) if self.values_list: cond_args = ' AND '.join(c[0] + " = " + str(c[1]) for c in self.values_list) sql_stmt += ' WHERE {}'.format(cond_args) sql_stmt += ';' self._execute_sql(sql_stmt, 'update') def delete(self): """ Delete row in the table. Perform DELETE SQL statement :return: None """ if not self._check_tablename_connection(): return if self.parse_error: logging.error('Value parsing error') return table_name = self._get_table_name() if not self.values_list: sql_stmt = 'DELETE FROM {};'.format(table_name) else: args = ', '.join(c[0] + '=' + str(c[1]) for c in self.values_list) sql_stmt = 'DELETE FROM {} WHERE {};'.format(table_name, args) self._execute_sql(sql_stmt, 'delete') def is_exists(self): """ Checks existence of the table. :return: True - if table is exists, False - if not """ if not self._check_tablename_connection(): return table_name = self._get_table_name() sql_stmt = "SELECT name FROM sqlite_master WHERE name = '{}';".format( table_name) return self._execute_sql_with_result(sql_stmt, 'is_exists') != [] def create(self): """ Creates a table with certain parameters (table name, column names and types). Perform CREATE TABLE SQL statement :return: None """ if not self._check_tablename_connection(): return table_name = self._get_table_name() argstr = ', '.join(c.name_to_create() for c in self.table_cols) sql_stmt = 'CREATE TABLE IF NOT EXISTS {} ({});'.format( table_name, argstr) self._execute_sql(sql_stmt, 'create') def drop(self): """ Deletes a table if it's exists. Perform DROP TABLE SQL statement :return: None """ if not self._check_tablename_connection(): return table_name = self._get_table_name() sql_stmt = 'DROP TABLE IF EXISTS {};'.format(table_name) self._execute_sql(sql_stmt, 'drop') def select_all(self, select_cols='*'): """ Selects and returns rows from table. Perform SELECT FROM SQL statement :param select_cols: column names for selection :return: list of tuples with rows data """ if not self._check_tablename_connection(): return sql_stmt = 'SELECT {} FROM {}'.format( select_cols, self._get_table_name()) table_join = '' for col in self.table_cols: if col.is_fk: table_join += ' INNER JOIN {} ON {}.{}={}.{}'.format( col.fk_ref_table, col.fk_ref_table, col.fk_ref_col, self._get_table_name(), col.name) sql_stmt += table_join if self.is_values_passed: args = ', '.join(c[0] + '=' + str(c[1]) for c in self.values_list) sql_stmt += ' WHERE {}'.format(args) sql_stmt += ';' return self._execute_sql_with_result(sql_stmt, 'select_all') def select(self, *args): """ Filters args for select_all with params. :param args: list of column names :return: list of tuples with rows data """ if not args: return self.select_all() else: cols_arg = [] for arg in args: append_arg = arg if '.' in arg else \ self._get_table_name() + '.' + arg cols_arg.append(append_arg) return self.select_all(', '.join(cols_arg))
[ "logging.warning", "logging.error", "logging.basicConfig" ]
[((26, 66), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (45, 66), False, 'import logging\n'), ((2285, 2336), 'logging.error', 'logging.error', (['"""__tablename__ or connection missed"""'], {}), "('__tablename__ or connection missed')\n", (2298, 2336), False, 'import logging\n'), ((6911, 6950), 'logging.error', 'logging.error', (['"""Required fields missed"""'], {}), "('Required fields missed')\n", (6924, 6950), False, 'import logging\n'), ((7011, 7047), 'logging.error', 'logging.error', (['"""Value parsing error"""'], {}), "('Value parsing error')\n", (7024, 7047), False, 'import logging\n'), ((7761, 7797), 'logging.warning', 'logging.warning', (['"""Nothing to update"""'], {}), "('Nothing to update')\n", (7776, 7797), False, 'import logging\n'), ((7877, 7914), 'logging.error', 'logging.error', (['"""Values parsing error"""'], {}), "('Values parsing error')\n", (7890, 7914), False, 'import logging\n'), ((8732, 8768), 'logging.error', 'logging.error', (['"""Value parsing error"""'], {}), "('Value parsing error')\n", (8745, 8768), False, 'import logging\n'), ((5662, 5680), 'logging.error', 'logging.error', (['err'], {}), '(err)\n', (5675, 5680), False, 'import logging\n'), ((6080, 6098), 'logging.error', 'logging.error', (['err'], {}), '(err)\n', (6093, 6098), False, 'import logging\n')]
import io import os import sys import copy import subprocess import importlib from dotenv import load_dotenv from timeout import timeout dotenv_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '.env')) load_dotenv(dotenv_path) PYTHON_EXE = os.environ.get('PYTHON_EXE') # might bring this back next year, dont know yet... # def check_console(test_file, q_name, args, answers): # proc = subprocess.Popen([PYTHON_EXE, '-u', os.path.abspath(os.path.join(os.path.dirname(__file__), '..', test_file))], stdout=subprocess.PIPE) # results = [] # messages = [] # status = 'Successful!' # anss = [] # for i, line in enumerate(proc.stdout): # ans = line.strip('\n') # anss.append(ans) # if ans == '': # break # else: # if int(ans) == answers[i]: # results.append(True) # messages.append('<p>Nice one! Was expecting {0} and got {1}</p>'.format(answers[i], ans)) # else: # results.append(False) # messages.append('<p>Hmm not quite. Was expecting {0} but got {1}</p>'.format(answers[i], ans)) # if False in results: # status = 'Unsuccessful' # return {'q_id': q_id, 'question_name': q_name,'answers': anss, 'status': status,'results':results, 'messages':messages} def check_functions(file_path, function_name, args, answers, time_out, no_unpack=False, nested=False, unbracket=False): result = False try: # get the function out of the file submitted filename = os.path.basename(file_path).split('.')[0] function = getattr(importlib.import_module('uploads.{0}'.format(filename)), function_name) func = timeout(timeout=time_out)(function) # make a copy to submit to function incase argument is mutatable args_in = copy.copy(args) # check the number of args and call function accrodingly if len(args_in) == 0: ans = func() else: if no_unpack: ans = func(args_in) else: ans = func(*args_in) # if answer is within an array if nested: if ans == answers: result = True else: if [ans] == answers: result = True # remove bracktes for display in markerbot if unbracket: answers = answers[0] return {'input': args, 'result': result, 'output': ans, 'expected': answers} except Exception as e: return {'input': args, 'result': result, 'output': e.message, 'expected': answers}
[ "timeout.timeout", "os.path.basename", "os.path.dirname", "copy.copy", "dotenv.load_dotenv", "os.environ.get" ]
[((223, 247), 'dotenv.load_dotenv', 'load_dotenv', (['dotenv_path'], {}), '(dotenv_path)\n', (234, 247), False, 'from dotenv import load_dotenv\n'), ((261, 289), 'os.environ.get', 'os.environ.get', (['"""PYTHON_EXE"""'], {}), "('PYTHON_EXE')\n", (275, 289), False, 'import os\n'), ((181, 206), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (196, 206), False, 'import os\n'), ((1856, 1871), 'copy.copy', 'copy.copy', (['args'], {}), '(args)\n', (1865, 1871), False, 'import copy\n'), ((1728, 1753), 'timeout.timeout', 'timeout', ([], {'timeout': 'time_out'}), '(timeout=time_out)\n', (1735, 1753), False, 'from timeout import timeout\n'), ((1572, 1599), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (1588, 1599), False, 'import os\n')]
""" A helper module that initializes the display and buttons for the Adafruit PyGamer game console. """ import board import analogio import stage import displayio import busio import time import keypad K_X = 0x01 K_O = 0x02 K_START = 0x04 K_SELECT = 0x08 K_DOWN = 0x10 K_LEFT = 0x20 K_RIGHT = 0x40 K_UP = 0x80 # re-initialize the display for correct rotation and RGB mode _TFT_INIT = ( b"\x01\x80\x96" # SWRESET and Delay 150ms b"\x11\x80\xff" # SLPOUT and Delay b"\xb1\x03\x01\x2C\x2D" # _FRMCTR1 b"\xb2\x03\x01\x2C\x2D" # _FRMCTR2 b"\xb3\x06\x01\x2C\x2D\x01\x2C\x2D" # _FRMCTR3 b"\xb4\x01\x07" # _INVCTR line inversion b"\xc0\x03\xa2\x02\x84" # _PWCTR1 GVDD = 4.7V, 1.0uA b"\xc1\x01\xc5" # _PWCTR2 VGH=14.7V, VGL=-7.35V b"\xc2\x02\x0a\x00" # _PWCTR3 Opamp current small, Boost frequency b"\xc3\x02\x8a\x2a" b"\xc4\x02\x8a\xee" b"\xc5\x01\x0e" # _VMCTR1 VCOMH = 4V, VOML = -1.1V b"\x20\x00" # _INVOFF b"\x36\x01\xa0" # _MADCTL # 1 clk cycle nonoverlap, 2 cycle gate rise, 3 sycle osc equalie, # fix on VTL b"\x3a\x01\x05" # COLMOD - 16bit color b"\xe0\x10\x02\x1c\x07\x12\x37\x32\x29\x2d\x29\x25\x2B\x39\x00\x01\x03\x10" # _GMCTRP1 Gamma b"\xe1\x10\x03\x1d\x07\x06\x2E\x2C\x29\x2D\x2E\x2E\x37\x3F\x00\x00\x02\x10" # _GMCTRN1 b"\x13\x80\x0a" # _NORON b"\x29\x80\x64" # _DISPON ) class _Buttons: def __init__(self): self.keys = keypad.ShiftRegisterKeys(clock=board.BUTTON_CLOCK, data=board.BUTTON_OUT, latch=board.BUTTON_LATCH, key_count=4, interval=0.05) self.last_state = 0 self.event = keypad.Event(0, False) self.last_z_press = None self.joy_x = analogio.AnalogIn(board.JOYSTICK_X) self.joy_y = analogio.AnalogIn(board.JOYSTICK_Y) def get_pressed(self): buttons = self.last_state events = self.keys.events while events: if events.get_into(self.event): bit = 1 << self.event.key_number if self.event.pressed: buttons |= bit self.last_state |= bit else: self.last_state &= ~bit if buttons & K_START: now = time.monotonic() if self.last_z_press: if now - self.last_z_press > 2: supervisor.set_next_code_file(None) supervisor.reload() else: self.last_z_press = now else: self.last_z_press = None dead = 15000 x = self.joy_x.value - 32767 if x < -dead: buttons |= K_LEFT elif x > dead: buttons |= K_RIGHT y = self.joy_y.value - 32767 if y < -dead: buttons |= K_UP elif y > dead: buttons |= K_DOWN return buttons displayio.release_displays() _tft_spi = busio.SPI(clock=board.TFT_SCK, MOSI=board.TFT_MOSI) _fourwire = displayio.FourWire(_tft_spi, command=board.TFT_DC, chip_select=board.TFT_CS, reset=board.TFT_RST) display = displayio.Display(_fourwire, _TFT_INIT, width=160, height=128, rotation=0, backlight_pin=board.TFT_LITE, auto_refresh=False, auto_brightness=True) del _TFT_INIT buttons = _Buttons() audio = stage.Audio(board.SPEAKER, board.SPEAKER_ENABLE)
[ "displayio.FourWire", "displayio.Display", "stage.Audio", "keypad.Event", "keypad.ShiftRegisterKeys", "analogio.AnalogIn", "time.monotonic", "busio.SPI", "displayio.release_displays" ]
[((2876, 2904), 'displayio.release_displays', 'displayio.release_displays', ([], {}), '()\n', (2902, 2904), False, 'import displayio\n'), ((2916, 2967), 'busio.SPI', 'busio.SPI', ([], {'clock': 'board.TFT_SCK', 'MOSI': 'board.TFT_MOSI'}), '(clock=board.TFT_SCK, MOSI=board.TFT_MOSI)\n', (2925, 2967), False, 'import busio\n'), ((2980, 3081), 'displayio.FourWire', 'displayio.FourWire', (['_tft_spi'], {'command': 'board.TFT_DC', 'chip_select': 'board.TFT_CS', 'reset': 'board.TFT_RST'}), '(_tft_spi, command=board.TFT_DC, chip_select=board.TFT_CS,\n reset=board.TFT_RST)\n', (2998, 3081), False, 'import displayio\n'), ((3119, 3269), 'displayio.Display', 'displayio.Display', (['_fourwire', '_TFT_INIT'], {'width': '(160)', 'height': '(128)', 'rotation': '(0)', 'backlight_pin': 'board.TFT_LITE', 'auto_refresh': '(False)', 'auto_brightness': '(True)'}), '(_fourwire, _TFT_INIT, width=160, height=128, rotation=0,\n backlight_pin=board.TFT_LITE, auto_refresh=False, auto_brightness=True)\n', (3136, 3269), False, 'import displayio\n'), ((3365, 3413), 'stage.Audio', 'stage.Audio', (['board.SPEAKER', 'board.SPEAKER_ENABLE'], {}), '(board.SPEAKER, board.SPEAKER_ENABLE)\n', (3376, 3413), False, 'import stage\n'), ((1430, 1561), 'keypad.ShiftRegisterKeys', 'keypad.ShiftRegisterKeys', ([], {'clock': 'board.BUTTON_CLOCK', 'data': 'board.BUTTON_OUT', 'latch': 'board.BUTTON_LATCH', 'key_count': '(4)', 'interval': '(0.05)'}), '(clock=board.BUTTON_CLOCK, data=board.BUTTON_OUT,\n latch=board.BUTTON_LATCH, key_count=4, interval=0.05)\n', (1454, 1561), False, 'import keypad\n'), ((1631, 1653), 'keypad.Event', 'keypad.Event', (['(0)', '(False)'], {}), '(0, False)\n', (1643, 1653), False, 'import keypad\n'), ((1708, 1743), 'analogio.AnalogIn', 'analogio.AnalogIn', (['board.JOYSTICK_X'], {}), '(board.JOYSTICK_X)\n', (1725, 1743), False, 'import analogio\n'), ((1765, 1800), 'analogio.AnalogIn', 'analogio.AnalogIn', (['board.JOYSTICK_Y'], {}), '(board.JOYSTICK_Y)\n', (1782, 1800), False, 'import analogio\n'), ((2243, 2259), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2257, 2259), False, 'import time\n')]
import time import picamera import numpy as np import cv2 with picamera.PiCamera() as camera: camera.resolution = (320, 240) camera.framerate = 24 time.sleep(2) image = np.empty((240 * 320 * 3,), dtype=np.uint8) camera.capture(image, 'bgr') image = image.reshape((240, 320, 3))
[ "numpy.empty", "time.sleep", "picamera.PiCamera" ]
[((64, 83), 'picamera.PiCamera', 'picamera.PiCamera', ([], {}), '()\n', (81, 83), False, 'import picamera\n'), ((160, 173), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (170, 173), False, 'import time\n'), ((186, 228), 'numpy.empty', 'np.empty', (['(240 * 320 * 3,)'], {'dtype': 'np.uint8'}), '((240 * 320 * 3,), dtype=np.uint8)\n', (194, 228), True, 'import numpy as np\n')]
import pandas as pd import yfinance as yf from ta.others import DailyReturnIndicator,CumulativeReturnIndicator # get data gspc= yf.Ticker("^GSPC") df = gspc.history(start= '2007-01-01', end= '2020-12-31') daily_returns = DailyReturnIndicator(close=df["close"]) df["daily_returns"] = daily_returns.daily_return() df.to_csv('sp500_returns.csv')
[ "ta.others.DailyReturnIndicator", "yfinance.Ticker" ]
[((130, 148), 'yfinance.Ticker', 'yf.Ticker', (['"""^GSPC"""'], {}), "('^GSPC')\n", (139, 148), True, 'import yfinance as yf\n'), ((223, 262), 'ta.others.DailyReturnIndicator', 'DailyReturnIndicator', ([], {'close': "df['close']"}), "(close=df['close'])\n", (243, 262), False, 'from ta.others import DailyReturnIndicator, CumulativeReturnIndicator\n')]
import paho.mqtt.client as paho import config # from transitions import Machine import os, sys, inspect from transitions import * from transitions.extensions import GraphMachine from IPython.display import Image, display, display_png from threading import Timer from time import sleep import logging import socket import uuid import json import video_control logging.basicConfig(format='[%(levelname)s] %(asctime)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO) items = {} topics = [] # The states #(self, source, dest, conditions=None, unless=None, before=None, after=None, prepare=None) states=['new', 'offline', 'online', 'calling', 'ringing', 'caller', 'callee'] transitions = [ { 'trigger': 'initialize', 'source': 'new', 'dest': 'offline', 'prepare': 'init' }, { 'trigger': 'connect', 'source': 'offline', 'dest': 'online', 'conditions': 'toconnect' }, { 'trigger': 'disconnect', 'source': 'online', 'dest': 'offline', 'before': 'todisconnect' }, { 'trigger': 'ring', 'source': 'online', 'dest': 'ringing', 'after': 'toring' }, { 'trigger': 'caller', 'source': 'calling', 'dest': 'caller' }, { 'trigger': 'rejected', 'source': 'caller', 'dest': 'online', 'after': 'torejected' }, { 'trigger': 'hangup', 'source': 'caller', 'dest': 'online' }, { 'trigger': 'hangup', 'source': 'callee', 'dest': 'online' }, { 'trigger': 'call', 'source': 'online', 'dest': 'calling', 'conditions': 'tocall' }, { 'trigger': 'pickup', 'source': 'ringing', 'dest': 'callee' }, { 'trigger': 'reject', 'source': 'ringing', 'dest': 'online' }, ] def setupCall(client, id): data = { "id": self.uuid } client.publish("%s/%s/call" % (config.topic_root, id), json.dumps(data) ) def rejectCall(client, id): data = { "id": self.uuid, "action": "reject" } client.publish("%s/%s/answer" % (config.topic_root, id), json.dumps(data) ) def acceptCall(client, id, janusip): data = { "id": self.uuid, "action": "accept", "janus": janusip } client.publish("%s/%s/answer" % (config.topic_root, id), json.dumps(data) ) def hangupCall(client, id): data = { "id": self.uuid, "action": "hangup" } client.publish("%s/%s/answer" % (config.topic_root, id), json.dumps(data) ) class EWindow(object): nodes_dict = {} caller = None callee = None def init(self): self.uuid = str(uuid.uuid4()) self.node_path = config.topic_root + "/" + self.uuid logging.info("Initializing MQTT (%s)", self.uuid) self.mqttc = paho.Client(self.uuid) self.mqttc.username_pw_set(config.broker["user"], config.broker["password"]) self.mqttc.on_connect = self.on_mqttconnect self.mqttc.on_message = self.on_mqttmessage self.mqttc.on_disconnect = self.on_mqttdisconnect self.mqttc.on_subscribe = self.on_mqttsubscribe self.mqttc.on_log = self.on_mqttlog # Set will to delete node id from addressbook self.mqttc.will_set(self.node_path, None, 0, True) True def toconnect(self): logging.info("Connecting to Broker %s:%d", config.broker["hostname"], config.broker["port"]) try: self.mqttc.connect(config.broker["hostname"], config.broker["port"], keepalive=config.keepalive) self.mqttc.loop_start() except: logging.warning("Connection failed.") return False return True def todisconnect(self): logging.info("Disconnecting") try: self.mqttc.publish(self.node_path, None, 1, True) self.mqttc.loop_stop() self.mqttc.disconnect() except: logging.warning("Disconnect failed.") return False return True def tocall(self, target): logging.info("Outgoing call to %s", target) self.callee = target def toring(self, target): logging.info("Incoming call from %s", target) self.caller = target # TODO : callback to visualize ring def torejected(self): logging.info("Rejecting call from %s", self.callee) self.callee = None def on_mqttconnect(self, mosq, obj, rc, test): logging.info("Connect with RC %s", str(rc)) status = { "ip": socket.gethostbyname(socket.gethostname()), "status": "ready", "name": config.node_name } self.mqttc.publish(self.node_path, json.dumps(status), 1, True) self.mqttc.subscribe(config.topic_root + "/+", 1) self.mqttc.subscribe("%s/%s" % (self.node_path, config.node_call), 1) self.mqttc.subscribe("%s/%s" % (self.node_path, config.node_answer), 1) def on_mqttmessage(self, mosq, obj, msg): logging.info("Message %s [%s]: %s", msg.topic, str(msg.qos), str(msg.payload)) if not msg.topic.startswith(config.topic_root): return node = msg.topic[len(config.topic_root)+1:].split("/") node_uuid = node[0] # managing the phonebook if node_uuid != self.uuid: if len(msg.payload) > 0: # add node to local adressbook self.nodes_dict[node_uuid] = json.loads(msg.payload) else: # remove old node from dict try: self.nodes_dict.pop(node_uuid) except NameError: pass return # call interaction if len(node) == 2: action_node = node[1] caller_data = json.loads(msg.payload) if action_node == "call": if self.stats == "call": # TODO : knocking rejectCall(self.mqttc, caller_data.id) elif self.stats == "ringing": # TODO : knocking rejectCall(self.mqttc, caller_data.id) elif self.stats == "calling": # TODO : knocking rejectCall(self.mqttc, caller_data.id) elif self.stats == "caller": # TODO : knocking rejectCall(self.mqttc, caller_data.id) elif self.stats == "callee": # TODO : knocking rejectCall(self.mqttc, caller_data.id) elif self.stats == "online": self.ring(caller_data.id) pass elif action_node == "answer": if self.state != "caller": logger.warning("%s is answering without call request", caller_data.id) return if self.callee != caller_data.id: logger.warning("%s is answering without but %s was called", caller_data.id, self.callee) return if caller_data.action == "reject": self.rejected() elif caller_data.action == "accept": self.caller() elif caller_data.action == "hangup": self.hangup() def on_mqttlog(self, client, userdata, level, buf): logging.debug(buf) def on_mqttsubscribe(self, mosq, obj, mid, granted_qos): logging.info("Subscribed: %s %s", str(mid), str(granted_qos)) def on_mqttdisconnect(self, client, userdata, rc): logging.warning("Disconnected (RC %s)", str(rc)) client = EWindow() # Initialize machine = Machine(client, states=states, transitions=transitions, initial='new') machine.graph.draw('my_state_diagram.png', prog='dot')
[ "uuid.uuid4", "logging.debug", "json.loads", "logging.basicConfig", "logging.warning", "json.dumps", "logging.info", "socket.gethostname", "paho.mqtt.client.Client" ]
[((363, 485), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(levelname)s] %(asctime)s %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""', 'level': 'logging.INFO'}), "(format='[%(levelname)s] %(asctime)s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO)\n", (382, 485), False, 'import logging\n'), ((1762, 1778), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1772, 1778), False, 'import json\n'), ((1922, 1938), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1932, 1938), False, 'import json\n'), ((2109, 2125), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2119, 2125), False, 'import json\n'), ((2269, 2285), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2279, 2285), False, 'import json\n'), ((2498, 2547), 'logging.info', 'logging.info', (['"""Initializing MQTT (%s)"""', 'self.uuid'], {}), "('Initializing MQTT (%s)', self.uuid)\n", (2510, 2547), False, 'import logging\n'), ((2569, 2591), 'paho.mqtt.client.Client', 'paho.Client', (['self.uuid'], {}), '(self.uuid)\n', (2580, 2591), True, 'import paho.mqtt.client as paho\n'), ((3101, 3197), 'logging.info', 'logging.info', (['"""Connecting to Broker %s:%d"""', "config.broker['hostname']", "config.broker['port']"], {}), "('Connecting to Broker %s:%d', config.broker['hostname'],\n config.broker['port'])\n", (3113, 3197), False, 'import logging\n'), ((3502, 3531), 'logging.info', 'logging.info', (['"""Disconnecting"""'], {}), "('Disconnecting')\n", (3514, 3531), False, 'import logging\n'), ((3830, 3873), 'logging.info', 'logging.info', (['"""Outgoing call to %s"""', 'target'], {}), "('Outgoing call to %s', target)\n", (3842, 3873), False, 'import logging\n'), ((3942, 3987), 'logging.info', 'logging.info', (['"""Incoming call from %s"""', 'target'], {}), "('Incoming call from %s', target)\n", (3954, 3987), False, 'import logging\n'), ((4096, 4147), 'logging.info', 'logging.info', (['"""Rejecting call from %s"""', 'self.callee'], {}), "('Rejecting call from %s', self.callee)\n", (4108, 4147), False, 'import logging\n'), ((7194, 7212), 'logging.debug', 'logging.debug', (['buf'], {}), '(buf)\n', (7207, 7212), False, 'import logging\n'), ((2414, 2426), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2424, 2426), False, 'import uuid\n'), ((4482, 4500), 'json.dumps', 'json.dumps', (['status'], {}), '(status)\n', (4492, 4500), False, 'import json\n'), ((5580, 5603), 'json.loads', 'json.loads', (['msg.payload'], {}), '(msg.payload)\n', (5590, 5603), False, 'import json\n'), ((3381, 3418), 'logging.warning', 'logging.warning', (['"""Connection failed."""'], {}), "('Connection failed.')\n", (3396, 3418), False, 'import logging\n'), ((3707, 3744), 'logging.warning', 'logging.warning', (['"""Disconnect failed."""'], {}), "('Disconnect failed.')\n", (3722, 3744), False, 'import logging\n'), ((4338, 4358), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (4356, 4358), False, 'import socket\n'), ((5229, 5252), 'json.loads', 'json.loads', (['msg.payload'], {}), '(msg.payload)\n', (5239, 5252), False, 'import json\n')]
# Copyright 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from common import constants from common import exceptions from gae_libs import appengine_util from gae_libs.http import auth_util def IsPrivilegedUser(user_email, is_admin): """Returns True if the given email account is authorized for access.""" return is_admin or (user_email and user_email.endswith('@google.com')) def IsWhitelistedClientId(client_id): """Returns True if the given client id is whitelisted.""" return client_id in constants.WHITELISTED_CLIENT_IDS def CanTriggerNewAnalysis(user_email, is_admin): """Returns True if the given email account could trigger a new analysis.""" if not appengine_util.IsStaging(): whitelisted_app_accounts = constants.WHITELISTED_APP_ACCOUNTS else: whitelisted_app_accounts = constants.WHITELISTED_STAGING_APP_ACCOUNTS return IsPrivilegedUser(user_email, is_admin) or (user_email in whitelisted_app_accounts) def ValidateOauthUserForNewAnalysis(): """Validates whether the oauth user is authorized to trigger a new analysis. Returns: A tuple (user_email, is_admin). user_email (str): The email address of the oauth user. is_admin (bool): True if the oauth user is an Admin. Raises: common.exceptions.UnauthorizedException if the user has no permission. """ user_email = auth_util.GetOauthUserEmail() if not user_email: raise exceptions.UnauthorizedException('Unknown user.') # For Google service accounts, no need to whitelist client ids for them, # since email address uniquely identifies credentials used. # At some point someone might want to use Findit API from a GCE project # (*@<EMAIL> accounts) or from some script that use # service account keys (*@*.<EMAIL> accounts). if not user_email.endswith('@appspot.gserviceaccount.com'): client_id = auth_util.GetOauthClientId() if not IsWhitelistedClientId(client_id): raise exceptions.UnauthorizedException( 'Unknown client id %s.' % client_id) is_admin = auth_util.IsCurrentOauthUserAdmin() if not CanTriggerNewAnalysis(user_email, is_admin): raise exceptions.UnauthorizedException('Unknown email %s' % user_email) return user_email, is_admin
[ "common.exceptions.UnauthorizedException", "gae_libs.http.auth_util.GetOauthUserEmail", "gae_libs.http.auth_util.GetOauthClientId", "gae_libs.appengine_util.IsStaging", "gae_libs.http.auth_util.IsCurrentOauthUserAdmin" ]
[((1465, 1494), 'gae_libs.http.auth_util.GetOauthUserEmail', 'auth_util.GetOauthUserEmail', ([], {}), '()\n', (1492, 1494), False, 'from gae_libs.http import auth_util\n'), ((2151, 2186), 'gae_libs.http.auth_util.IsCurrentOauthUserAdmin', 'auth_util.IsCurrentOauthUserAdmin', ([], {}), '()\n', (2184, 2186), False, 'from gae_libs.http import auth_util\n'), ((780, 806), 'gae_libs.appengine_util.IsStaging', 'appengine_util.IsStaging', ([], {}), '()\n', (804, 806), False, 'from gae_libs import appengine_util\n'), ((1527, 1576), 'common.exceptions.UnauthorizedException', 'exceptions.UnauthorizedException', (['"""Unknown user."""'], {}), "('Unknown user.')\n", (1559, 1576), False, 'from common import exceptions\n'), ((1970, 1998), 'gae_libs.http.auth_util.GetOauthClientId', 'auth_util.GetOauthClientId', ([], {}), '()\n', (1996, 1998), False, 'from gae_libs.http import auth_util\n'), ((2251, 2316), 'common.exceptions.UnauthorizedException', 'exceptions.UnauthorizedException', (["('Unknown email %s' % user_email)"], {}), "('Unknown email %s' % user_email)\n", (2283, 2316), False, 'from common import exceptions\n'), ((2056, 2125), 'common.exceptions.UnauthorizedException', 'exceptions.UnauthorizedException', (["('Unknown client id %s.' % client_id)"], {}), "('Unknown client id %s.' % client_id)\n", (2088, 2125), False, 'from common import exceptions\n')]
# --coding:utf-8-- # # Copyright (c) 2020 vesoft inc. All rights reserved. # # This source code is licensed under Apache 2.0 License. import time from tests.common.nebula_test_suite import NebulaTestSuite class TestBigInt(NebulaTestSuite): @classmethod def prepare(self): resp = self.execute( 'CREATE SPACE IF NOT EXISTS BigInt2031(partition_num={partition_num}, replica_factor={replica_factor})' .format(partition_num=self.partition_num, replica_factor=self.replica_factor)) self.check_resp_succeeded(resp) time.sleep(self.delay) resp = self.execute('USE BigInt2031') self.check_resp_succeeded(resp) def test_issue2031(self): time.sleep(self.delay) resp = self.execute( 'CREATE TAG person1(name string, age bigint)') self.check_resp_failed(resp) resp = self.execute( 'CREATE TAG person2(name string, age bigint DEFAULT 100)') self.check_resp_failed(resp) resp = self.execute( 'CREATE TAG person3(name string, age Bigint)') self.check_resp_failed(resp) resp = self.execute( 'CREATE TAG person4(name string, age BIGINT)') self.check_resp_failed(resp) @classmethod def cleanup(self): resp = self.execute('drop space BigInt2031') self.check_resp_succeeded(resp)
[ "time.sleep" ]
[((587, 609), 'time.sleep', 'time.sleep', (['self.delay'], {}), '(self.delay)\n', (597, 609), False, 'import time\n'), ((735, 757), 'time.sleep', 'time.sleep', (['self.delay'], {}), '(self.delay)\n', (745, 757), False, 'import time\n')]
""" Create a Workset Creates a Workset - Revit 2017+ TESTED REVIT API: 2017 Author: <NAME> | github.com/gtalarico This file is shared on www.revitapidocs.com For more information visit http://github.com/gtalarico/revitapidocs License: http://github.com/gtalarico/revitapidocs/blob/master/LICENSE.md """ import clr clr.AddReference("RevitAPI") from Autodesk.Revit.DB import Workset, Transaction doc = __revit__.ActiveUIDocument.Document workset_name = 'Point Clouds' t = Transaction(doc) t.Start('Create Workset') Workset.Create(doc, workset_name) t.Commit()
[ "clr.AddReference", "Autodesk.Revit.DB.Transaction", "Autodesk.Revit.DB.Workset.Create" ]
[((318, 346), 'clr.AddReference', 'clr.AddReference', (['"""RevitAPI"""'], {}), "('RevitAPI')\n", (334, 346), False, 'import clr\n'), ((475, 491), 'Autodesk.Revit.DB.Transaction', 'Transaction', (['doc'], {}), '(doc)\n', (486, 491), False, 'from Autodesk.Revit.DB import Workset, Transaction\n'), ((518, 551), 'Autodesk.Revit.DB.Workset.Create', 'Workset.Create', (['doc', 'workset_name'], {}), '(doc, workset_name)\n', (532, 551), False, 'from Autodesk.Revit.DB import Workset, Transaction\n')]
#!/usr/bin/python3 # -*- coding_ utf-8 -*- """ This program implements a viewer for the LPO data structure. A LPO is a partial ordered set of events. If two events are ordered this order is represented by an arrow. If there is an arrow form an event a to an event b this means event b occurs after event a. Usage: python lpo_viewer.py [<lpo-file>] """ import math # calculation of intersection point (fabs, ...) import sys # sys.argv import partialorder # LPO parser and data structure import os # demo file from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QAction, qApp, QFileDialog, QTabWidget from PyQt5.QtCore import Qt, QPoint from PyQt5.QtGui import QPainter, QColor, QPen, QFont class LpoWidget(QWidget): """ This class implements a LPO widget. This class use a canvas to draw the Hasse diagram of a LPO. """ def __init__(self): """ This method creates a new, empty LpoView. """ super().__init__() self.__initUI() self.__lpo = None def __initUI(self): """ Set up user interface. """ self.setMinimumSize(100, 100) def showLpo(self, lpo_to_show): """ This method show the given LPO in this LpoView. """ self.__lpo = lpo_to_show self.updateSize() self.repaint() self.setToolTip(lpo_to_show.name) def updateSize(self): """ Update the size of the widget to fit the size of the LPO. """ size = [50, 50] for id, event in self.__lpo.events.items(): if event.position[0] > size[0]: size[0] = event.position[0] if event.position[1] > size[1]: size[1] = event.position[1] self.setMinimumSize(size[0] + 50, size[1] + 50) self.resize(size[0] + 50, size[1] + 50) def paintEvent(self, e): qp = QPainter() qp.begin(self) if self.__lpo != None: self.__drawLpo(qp) qp.end() def __drawLpo(self, qp): """ Draw LPO. """ for arc in self.__lpo.arcs: if arc.user_drawn == True: self.__drawArc(qp, arc) for id, event in self.__lpo.events.items(): self.__drawEvent(qp, event) def __setEventPen(self, qp): """ Set up painter for drawing events. """ qp.setPen(QPen(QColor(0, 0, 0), 2, Qt.SolidLine)) qp.setBrush(QColor(180, 180, 180)) font = QFont('Serif', 7, QFont.Light) qp.setFont(font) qp.setRenderHint(QPainter.HighQualityAntialiasing) def __drawEvent(self, qp, event): """ Draw the given event. """ self.__setEventPen(qp) qp.drawRect(event.position[0] - 10, event.position[1] - 10, 20, 20) metrics = qp.fontMetrics() fw = metrics.width(event.label) fh = metrics.height() qp.drawText(event.position[0] - fw / 2, event.position[1] + 12 + fh , event.label) def __setArcPen(self, qp): """ Set up painter for drawing arcs. """ qp.setPen(QPen(QColor(0, 0, 0), 2, Qt.SolidLine)) qp.setRenderHint(QPainter.HighQualityAntialiasing) def __setTipPen(self, qp): """ Set up painter for drawing arc tips. """ qp.setPen(QPen(QColor(0, 0, 0), 1, Qt.SolidLine)) qp.setBrush(QColor(0, 0, 0)) qp.setRenderHint(QPainter.HighQualityAntialiasing) def __drawArc(self, qp, arc): """ Draw the given arc. """ self.__setArcPen(qp) start_event = self.__lpo.events[arc.source] end_event = self.__lpo.events[arc.target] intersections = self.__calculateIntersections(start_event, end_event) start = start_event.position[0] + intersections[0][0], start_event.position[1] + intersections[0][1] end = end_event.position[0] + intersections[1][0], end_event.position[1] + intersections[1][1] qp.drawLine(start[0], start[1], end[0], end[1]) tip = self.__calculateTip(start_event, end_event) self.__setTipPen(qp) qp.drawPolygon(QPoint(end[0], end[1]), QPoint(end[0] + tip[0][0], end[1] + tip[0][1]), QPoint(end[0] + tip[1][0], end[1] + tip[1][1])) def __calculateTip(self, start, end): vector = float(start.position[0] - end.position[0]), float(start.position[1] - end.position[1]) vector_length = math.sqrt(vector[0] ** 2 + vector[1] ** 2) vector_sized = vector[0] * 10 / vector_length, vector[1] * 10 / vector_length alpha = 30 * 2 * math.pi / 360 sin_alpha = math.sin(alpha) cos_alpha = math.cos(alpha) tip1 = (vector_sized[0] * cos_alpha - vector_sized[1] * sin_alpha, vector_sized[0] * sin_alpha + vector_sized[1] * cos_alpha) sin_alpha = math.sin(-alpha) cos_alpha = math.cos(-alpha) tip2 = (vector_sized[0] * cos_alpha - vector_sized[1] * sin_alpha, vector_sized[0] * sin_alpha + vector_sized[1] * cos_alpha) return tip1, tip2 def __calculateIntersections(self, start, end): """ Calculate intersection point of start and end events with the arc. This method calculates two vectors which describe the intersection point of the arc from the given start event to the given end event. """ # vector from the center of the start event to the center of the end event vector = float(end.position[0] - start.position[0]), float(end.position[1] - start.position[1]) #calculate a factor to scale the x-component to 10px (half of side length) fact = 1 if vector[0] != 0: fact = 10 / math.fabs(vector[0]) # scale the vector start_vector = vector[0] * fact, vector[1] * fact # if y-component of vector is larger than 10px or x-component is 0, scale with y-component if math.fabs(start_vector[1]) > 10 or vector[0] == 0: fact = 10 / math.fabs(vector[1]) start_vector = vector[0] * fact, vector[1] * fact #calculate intersection for arc end if vector[0] != 0: fact = 10 / math.fabs(vector[0]) end_vector = -vector[0] * fact, -vector[1] * fact if math.fabs(end_vector[1]) > 10 or vector[0] == 0: fact = 10 / math.fabs(vector[1]) end_vector = -vector[0] * fact, -vector[1] * fact return start_vector, end_vector class LpoViewer(QMainWindow): """ This class implements the window of the Lpo viewer. This class implements a menu and tab management for the LpoView. """ def __init__(self): """ Create new window. """ super().__init__() self.__initUI() def __initUI(self): """ Set up UI. """ openAction = QAction('&Open', self) openAction.setShortcut('Ctrl+O') openAction.setStatusTip('Load LPO from XML file') openAction.triggered.connect(self.__onOpen) closeAction = QAction('&Close', self) closeAction.setShortcut('Ctrl+C') closeAction.setStatusTip('Close selected tab') closeAction.triggered.connect(self.__onClose) exitAction = QAction('&Exit', self) exitAction.setShortcut('Ctrl+X') exitAction.setStatusTip('Exit application') exitAction.triggered.connect(self.__onQuit) menubar = self.menuBar() filemenu = menubar.addMenu('&File') filemenu.addAction(openAction) filemenu.addAction(closeAction) filemenu.addAction(exitAction) self.__tabs = QTabWidget() self.setCentralWidget(self.__tabs) self.statusBar() self.setGeometry(500, 500, 200, 200) self.setWindowTitle("Lpo Viewer") self.show() def openLpo(self, file): """ This method shows the LPO contained in the given file as new tab. """ lpos = partialorder.parse_lpo_file(file) self.showLpo(lpos[0]) def showLpo(self, lpo): """ Show the given LPO in a new tab. """ view = LpoWidget() index = self.__tabs.addTab(view, lpo.name) self.__tabs.setCurrentIndex(index) view.showLpo(lpo) self.statusBar().showMessage(lpo.name) def __onClose(self): index = self.tabs.currentIndex() self.tabs.removeTab(index) def __onOpen(self): fname = QFileDialog.getOpenFileName(self, 'Open LPO') parsed_lpos = lpoparser.parse_lpo_file(fname[0]) self.showLpo(parsed_lpos[0]) def __onQuit(self): self.hide() qApp.quit() sys.exit(0) if __name__ == "__main__": app = QApplication(sys.argv) viewer = LpoViewer() if len(sys.argv) > 1: # load LPO if file is given as parameter viewer.openLpo(sys.argv[1]) if os.path.exists("../abcabc.lpo"): # debug/demo file viewer.openLpo("../abcabc.lpo") sys.exit(app.exec_())
[ "PyQt5.QtGui.QPainter", "math.sqrt", "PyQt5.QtGui.QColor", "math.fabs", "os.path.exists", "PyQt5.QtGui.QFont", "math.sin", "PyQt5.QtWidgets.QFileDialog.getOpenFileName", "partialorder.parse_lpo_file", "PyQt5.QtWidgets.qApp.quit", "math.cos", "PyQt5.QtCore.QPoint", "PyQt5.QtWidgets.QApplication", "PyQt5.QtWidgets.QAction", "PyQt5.QtWidgets.QTabWidget", "sys.exit" ]
[((8763, 8785), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (8775, 8785), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QAction, qApp, QFileDialog, QTabWidget\n'), ((8923, 8954), 'os.path.exists', 'os.path.exists', (['"""../abcabc.lpo"""'], {}), "('../abcabc.lpo')\n", (8937, 8954), False, 'import os\n'), ((1859, 1869), 'PyQt5.QtGui.QPainter', 'QPainter', ([], {}), '()\n', (1867, 1869), False, 'from PyQt5.QtGui import QPainter, QColor, QPen, QFont\n'), ((2438, 2468), 'PyQt5.QtGui.QFont', 'QFont', (['"""Serif"""', '(7)', 'QFont.Light'], {}), "('Serif', 7, QFont.Light)\n", (2443, 2468), False, 'from PyQt5.QtGui import QPainter, QColor, QPen, QFont\n'), ((4424, 4466), 'math.sqrt', 'math.sqrt', (['(vector[0] ** 2 + vector[1] ** 2)'], {}), '(vector[0] ** 2 + vector[1] ** 2)\n', (4433, 4466), False, 'import math\n'), ((4614, 4629), 'math.sin', 'math.sin', (['alpha'], {}), '(alpha)\n', (4622, 4629), False, 'import math\n'), ((4650, 4665), 'math.cos', 'math.cos', (['alpha'], {}), '(alpha)\n', (4658, 4665), False, 'import math\n'), ((4838, 4854), 'math.sin', 'math.sin', (['(-alpha)'], {}), '(-alpha)\n', (4846, 4854), False, 'import math\n'), ((4875, 4891), 'math.cos', 'math.cos', (['(-alpha)'], {}), '(-alpha)\n', (4883, 4891), False, 'import math\n'), ((6829, 6851), 'PyQt5.QtWidgets.QAction', 'QAction', (['"""&Open"""', 'self'], {}), "('&Open', self)\n", (6836, 6851), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QAction, qApp, QFileDialog, QTabWidget\n'), ((7026, 7049), 'PyQt5.QtWidgets.QAction', 'QAction', (['"""&Close"""', 'self'], {}), "('&Close', self)\n", (7033, 7049), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QAction, qApp, QFileDialog, QTabWidget\n'), ((7231, 7253), 'PyQt5.QtWidgets.QAction', 'QAction', (['"""&Exit"""', 'self'], {}), "('&Exit', self)\n", (7238, 7253), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QAction, qApp, QFileDialog, QTabWidget\n'), ((7626, 7638), 'PyQt5.QtWidgets.QTabWidget', 'QTabWidget', ([], {}), '()\n', (7636, 7638), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QAction, qApp, QFileDialog, QTabWidget\n'), ((7967, 8000), 'partialorder.parse_lpo_file', 'partialorder.parse_lpo_file', (['file'], {}), '(file)\n', (7994, 8000), False, 'import partialorder\n'), ((8474, 8519), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['self', '"""Open LPO"""'], {}), "(self, 'Open LPO')\n", (8501, 8519), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QAction, qApp, QFileDialog, QTabWidget\n'), ((8675, 8686), 'PyQt5.QtWidgets.qApp.quit', 'qApp.quit', ([], {}), '()\n', (8684, 8686), False, 'from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QAction, qApp, QFileDialog, QTabWidget\n'), ((8695, 8706), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (8703, 8706), False, 'import sys\n'), ((2399, 2420), 'PyQt5.QtGui.QColor', 'QColor', (['(180)', '(180)', '(180)'], {}), '(180, 180, 180)\n', (2405, 2420), False, 'from PyQt5.QtGui import QPainter, QColor, QPen, QFont\n'), ((3321, 3336), 'PyQt5.QtGui.QColor', 'QColor', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (3327, 3336), False, 'from PyQt5.QtGui import QPainter, QColor, QPen, QFont\n'), ((4086, 4108), 'PyQt5.QtCore.QPoint', 'QPoint', (['end[0]', 'end[1]'], {}), '(end[0], end[1])\n', (4092, 4108), False, 'from PyQt5.QtCore import Qt, QPoint\n'), ((4133, 4179), 'PyQt5.QtCore.QPoint', 'QPoint', (['(end[0] + tip[0][0])', '(end[1] + tip[0][1])'], {}), '(end[0] + tip[0][0], end[1] + tip[0][1])\n', (4139, 4179), False, 'from PyQt5.QtCore import Qt, QPoint\n'), ((4204, 4250), 'PyQt5.QtCore.QPoint', 'QPoint', (['(end[0] + tip[1][0])', '(end[1] + tip[1][1])'], {}), '(end[0] + tip[1][0], end[1] + tip[1][1])\n', (4210, 4250), False, 'from PyQt5.QtCore import Qt, QPoint\n'), ((2344, 2359), 'PyQt5.QtGui.QColor', 'QColor', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (2350, 2359), False, 'from PyQt5.QtGui import QPainter, QColor, QPen, QFont\n'), ((3055, 3070), 'PyQt5.QtGui.QColor', 'QColor', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (3061, 3070), False, 'from PyQt5.QtGui import QPainter, QColor, QPen, QFont\n'), ((3266, 3281), 'PyQt5.QtGui.QColor', 'QColor', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (3272, 3281), False, 'from PyQt5.QtGui import QPainter, QColor, QPen, QFont\n'), ((5705, 5725), 'math.fabs', 'math.fabs', (['vector[0]'], {}), '(vector[0])\n', (5714, 5725), False, 'import math\n'), ((5922, 5948), 'math.fabs', 'math.fabs', (['start_vector[1]'], {}), '(start_vector[1])\n', (5931, 5948), False, 'import math\n'), ((5997, 6017), 'math.fabs', 'math.fabs', (['vector[1]'], {}), '(vector[1])\n', (6006, 6017), False, 'import math\n'), ((6175, 6195), 'math.fabs', 'math.fabs', (['vector[0]'], {}), '(vector[0])\n', (6184, 6195), False, 'import math\n'), ((6267, 6291), 'math.fabs', 'math.fabs', (['end_vector[1]'], {}), '(end_vector[1])\n', (6276, 6291), False, 'import math\n'), ((6340, 6360), 'math.fabs', 'math.fabs', (['vector[1]'], {}), '(vector[1])\n', (6349, 6360), False, 'import math\n')]
# -*- coding:UTF-8 -*- # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import sys,os import time import numpy as np import tensorflow as tf from glob import glob from os import path import logging import shutil sys.path.append(".") #sys.path.append("/home/lhj/PHICOMM/Project/label_model/modelTest/label_model") def load_graph(model_file): graph = tf.Graph() graph_def = tf.GraphDef() with open(model_file, "rb") as f: graph_def.ParseFromString(f.read()) with graph.as_default(): tf.import_graph_def(graph_def) return graph def read_tensor_from_image_file(input_height=299, input_width=299, input_mean=0, input_std=255): input_name = "file_reader" output_name = "normalized" # [NEW] make file_name as a placeholder. file_name_placeholder = tf.placeholder("string", name="fname") file_reader = tf.read_file(file_name_placeholder, input_name) # if file_name.endswith(".png"): # image_reader = tf.image.decode_png(file_reader, channels = 3, # name='png_reader') # elif file_name.endswith(".gif"): # image_reader = tf.squeeze(tf.image.decode_gif(file_reader, # name='gif_reader')) # elif file_name.endswith(".bmp"): # image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader') # else: # image_reader = tf.image.decode_jpeg(file_reader, channels = 3, # name='jpeg_reader') image_reader = tf.image.decode_jpeg(file_reader, channels = 3, name='jpeg_reader') float_caster = tf.cast(image_reader, tf.float32) dims_expander = tf.expand_dims(float_caster, 0); resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width]) normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std]) #sess = tf.Session() #result = sess.run(normalized) #return result return normalized def sort_dict(dict_words,index): """ dict sort :param dict_words: :return: """ keys = dict_words.keys() values = dict_words.values() list_one = [(key, val) for key, val in zip(keys, values)] if index == "value": list_sort = sorted(list_one, key=lambda x: x[1], reverse=True) else: list_sort = sorted(list_one, key=lambda x: x[0], reverse=True) return list_sort def mymovefile(srcfile,dstfile): if not os.path.isfile(srcfile): print ("%s not exist!"%(srcfile)) else: fpath,fname=os.path.split(dstfile) #分离文件名和路径 if not os.path.exists(fpath): os.makedirs(fpath) #创建路径 shutil.move(srcfile,dstfile) #移动文件 print ("move %s -> %s"%( srcfile,dstfile)) def renameAndMovefile(srcfile,dstfile,prob): if not os.path.isfile(srcfile): print ("%s not exist!"%(srcfile)) else: fpath,fname=os.path.split(dstfile) #分离文件名和路径 if not os.path.exists(fpath): os.makedirs(fpath) #创建路径 print(os.path.splitext(srcfile)[0],os.path.splitext(srcfile)[1]) #change file houzhui newname = os.path.splitext(srcfile)[0]+"__"+str("%.2f"%prob)+os.path.splitext(srcfile)[1] #要改的新后缀 os.rename(srcfile,newname) shutil.move(newname,dstfile) #移动文件 print ("move %s -> %s"%( newname,dstfile)) def mycopyfile(srcfile,dstfile): if not os.path.isfile(srcfile): print ("%s not exist!"%(srcfile)) else: fpath,fname=os.path.split(dstfile) #分离文件名和路径 if not os.path.exists(fpath): os.makedirs(fpath) #创建路径 shutil.copyfile(srcfile,dstfile) #复制文件 print ("copy %s -> %s"%( srcfile,dstfile)) def load_labels(label_file): label = [] proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines() for l in proto_as_ascii_lines: label.append(l.rstrip()) return label if __name__ == "__main__": file_name = "tf_files/flower_photos/daisy/3475870145_685a19116d.jpg" model_file = "tf_files/retrained_graph.pb" label_file = "tf_files/retrained_labels.txt" input_height = 224 input_width = 224 input_mean = 128 input_std = 128 input_layer = "input" output_layer = "final_result" label_test = 0 label_test_ref = 0.99 label_test_ref_bottom = 0.05 image_path = "./dataset" #path_pattern = "*.[jJ][Pp]*" path_pattern = "*.jpg" #lable_test_fail_dir = "./dataset/potato_total/label_test_fail_img/" #lable_test_check_dir = "./dataset/potato_total/label_test_check_img/" #lable_test_pass_dir = "./dataset/potato_total/label_test_pass_img" process_mode = "move" parser = argparse.ArgumentParser() parser.add_argument("--image", help="image to be processed") parser.add_argument("--graph", help="graph/model to be executed") parser.add_argument("--labels", help="name of file containing labels") parser.add_argument("--input_height", type=int, help="input height") parser.add_argument("--input_width", type=int, help="input width") parser.add_argument("--input_mean", type=int, help="input mean") parser.add_argument("--input_std", type=int, help="input std") parser.add_argument("--input_layer", help="name of input layer") parser.add_argument("--output_layer", help="name of output layer") parser.add_argument("--label_test", help="name of label test") parser.add_argument("--label_test_ref", help="name of label test reference") parser.add_argument("--label_test_ref_bottom", help="name of label test reference") parser.add_argument("--image_path", help="image_path to be processed") parser.add_argument("--pattern", help="ile search pattern for glob") parser.add_argument("--lable_test_fail_dir", help="lable_test_fail_dir") parser.add_argument("--lable_test_check_dir", help="lable_test_check_dir") parser.add_argument("--lable_test_pass_dir", help="lable_test_pass_dir") parser.add_argument("--process_mode", help="copy or move") args = parser.parse_args() if args.graph: model_file = args.graph # if args.image: # file_name = args.image if args.labels: label_file = args.labels if args.input_height: input_height = args.input_height if args.input_width: input_width = args.input_width if args.input_mean: input_mean = args.input_mean if args.input_std: input_std = args.input_std if args.input_layer: input_layer = args.input_layer if args.output_layer: output_layer = args.output_layer if args.label_test: label_test = args.label_test if args.label_test: label_test_ref = args.label_test_ref if args.label_test_ref_bottom: label_test_ref_bottom = args.label_test_ref_bottom if args.image_path: image_path = args.image_path if args.pattern: path_pattern = args.pattern if args.lable_test_fail_dir: lable_test_fail_dir = args.lable_test_fail_dir if args.lable_test_check_dir: lable_test_check_dir = args.lable_test_check_dir if args.lable_test_pass_dir: lable_test_pass_dir = args.lable_test_pass_dir if lable_test_pass_dir[len(lable_test_pass_dir)-1] != '/': lable_test_pass_dir = lable_test_pass_dir + '/' if lable_test_check_dir[len(lable_test_check_dir)-1] != '/': lable_test_check_dir = lable_test_check_dir + '/' if lable_test_fail_dir[len(lable_test_fail_dir)-1] != '/': lable_test_fail_dir = lable_test_fail_dir + '/' if args.process_mode: process_mode = args.process_mode # 获取logger实例,如果参数为空则返回root logger logger = logging.getLogger("labelimage") # 指定logger输出格式 formatter = logging.Formatter('%(asctime)s %(levelname)-8s: %(message)s') # 文件日志 file_handler = logging.FileHandler("test.log") file_handler.setFormatter(formatter) # 可以通过setFormatter指定输出格式 # 控制台日志 console_handler = logging.StreamHandler(sys.stdout) console_handler.formatter = formatter # 也可以直接给formatter赋值 # 为logger添加的日志处理器 logger.addHandler(file_handler) logger.addHandler(console_handler) # 指定日志的最低输出级别,默认为WARN级别 logger.setLevel(logging.DEBUG) logger.debug('This is debug message') all_files = glob(path.join(image_path, path_pattern)) #all_files.sort() print('Found {} files in {} folder'.format(len(all_files), image_path)) #print(all_files) graph = load_graph(model_file) input_name = "import/" + input_layer output_name = "import/" + output_layer input_operation = graph.get_operation_by_name(input_name); output_operation = graph.get_operation_by_name(output_name); file_probability = {} file_probability_aftersort = {} start = time.time() with tf.Session(graph=graph) as sess: read_tensor_from_image_file_op = read_tensor_from_image_file( input_height=input_height, input_width=input_width, input_mean=input_mean, input_std=input_std) for file_name in all_files: #start = time.time() t = sess.run(read_tensor_from_image_file_op,feed_dict={"fname:0": file_name}) #t = sess.run(read_tensor_from_image_file_op,feed_dict={file_name_placeholder: file_name}) results = sess.run(output_operation.outputs[0], {input_operation.outputs[0]: t}) #end=time.time() results = np.squeeze(results) top_k = results.argsort()[-5:][::-1] labels = load_labels(label_file) #logger.debug('\nEvaluation image:'+str(file_name)) #print('\nEvaluation image:'+str(file_name)) #logger.debug('Evaluation time (1-image): {:.3f}s\n'.format(end-start)) label_index = 0 for i in top_k: #logger.debug("label: %s , %.2f ",labels[top_k[i]], results[top_k[i]]) #print("label: %s , %.2f "%(labels[top_k[i]], results[top_k[i]])) if str(labels[top_k[i]]) == label_test: label_index = i #logger.debug("evaluating label: %s , %d " % (str(labels[top_k[label_index]]), label_index)) #print("evaluating label: %s , %d " % (str(labels[top_k[label_index]]), label_index)) #logger.debug(" label_eval:%s probability:%.2f ExpectLabel:%s Hthresh:%s Lthresh:%s" % (labels[top_k[label_index]], results[top_k[label_index]], label_test, label_test_ref,label_test_ref_bottom)) #print(" label_eval:%s probability:%.2f ExpectLabel:%s Hthresh:%s Lthresh:%s" % (labels[top_k[label_index]], results[top_k[label_index]], label_test, label_test_ref,label_test_ref_bottom)) file_probability[file_name] = results[top_k[label_index]] if (float(results[top_k[label_index]]) >= float(label_test_ref)): if process_mode == "move": mymovefile(file_name, lable_test_pass_dir) else: image_name = file_name.split("/")[-1] mycopyfile(file_name, lable_test_pass_dir + "/" + image_name) #renameAndMovefile(file_name, lable_test_pass_dir,file_probability[file_name]) pass elif (float(results[top_k[label_index]]) <= float(label_test_ref_bottom)): if process_mode == "move": mymovefile(file_name, lable_test_fail_dir) else: image_name = file_name.split("/")[-1] mycopyfile(file_name, lable_test_fail_dir + "/" + image_name) #renameAndMovefile(file_name, lable_test_fail_dir,file_probability[file_name]) pass else: if process_mode == "move": mymovefile(file_name, lable_test_check_dir) else: image_name = file_name.split("/")[-1] mycopyfile(file_name, lable_test_check_dir + "/" + image_name) #renameAndMovefile(file_name, lable_test_check_dir,file_probability[file_name]) pass #print(file_probability) #file_probability_aftersort = sorted(file_probability.items(),key = lambda x:x[0],reverse = True) file_probability_aftersort = sort_dict(file_probability,"key") #print(file_probability_aftersort) for key,value in file_probability_aftersort: logger.debug("file name:%s,probability:%s" %(key,value)) end=time.time() logger.debug('Evaluation time (1-image): {:.3f}s\n'.format(end-start)) # 移除一些日志处理器 logger.removeHandler(file_handler)
[ "argparse.ArgumentParser", "logging.Formatter", "os.path.isfile", "os.path.join", "sys.path.append", "logging.FileHandler", "tensorflow.subtract", "os.path.exists", "tensorflow.placeholder", "tensorflow.cast", "shutil.copyfile", "tensorflow.GraphDef", "os.rename", "logging.StreamHandler", "tensorflow.Session", "tensorflow.gfile.GFile", "tensorflow.Graph", "numpy.squeeze", "tensorflow.read_file", "tensorflow.import_graph_def", "tensorflow.expand_dims", "os.makedirs", "time.time", "os.path.splitext", "shutil.move", "tensorflow.image.decode_jpeg", "os.path.split", "logging.getLogger", "tensorflow.image.resize_bilinear" ]
[((980, 1000), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (995, 1000), False, 'import sys, os\n'), ((1120, 1130), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1128, 1130), True, 'import tensorflow as tf\n'), ((1145, 1158), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (1156, 1158), True, 'import tensorflow as tf\n'), ((1548, 1586), 'tensorflow.placeholder', 'tf.placeholder', (['"""string"""'], {'name': '"""fname"""'}), "('string', name='fname')\n", (1562, 1586), True, 'import tensorflow as tf\n'), ((1604, 1651), 'tensorflow.read_file', 'tf.read_file', (['file_name_placeholder', 'input_name'], {}), '(file_name_placeholder, input_name)\n', (1616, 1651), True, 'import tensorflow as tf\n'), ((2246, 2311), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['file_reader'], {'channels': '(3)', 'name': '"""jpeg_reader"""'}), "(file_reader, channels=3, name='jpeg_reader')\n", (2266, 2311), True, 'import tensorflow as tf\n'), ((2372, 2405), 'tensorflow.cast', 'tf.cast', (['image_reader', 'tf.float32'], {}), '(image_reader, tf.float32)\n', (2379, 2405), True, 'import tensorflow as tf\n'), ((2424, 2455), 'tensorflow.expand_dims', 'tf.expand_dims', (['float_caster', '(0)'], {}), '(float_caster, 0)\n', (2438, 2455), True, 'import tensorflow as tf\n'), ((2469, 2537), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['dims_expander', '[input_height, input_width]'], {}), '(dims_expander, [input_height, input_width])\n', (2493, 2537), True, 'import tensorflow as tf\n'), ((5409, 5434), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5432, 5434), False, 'import argparse\n'), ((8243, 8274), 'logging.getLogger', 'logging.getLogger', (['"""labelimage"""'], {}), "('labelimage')\n", (8260, 8274), False, 'import logging\n'), ((8307, 8368), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)-8s: %(message)s"""'], {}), "('%(asctime)s %(levelname)-8s: %(message)s')\n", (8324, 8368), False, 'import logging\n'), ((8396, 8427), 'logging.FileHandler', 'logging.FileHandler', (['"""test.log"""'], {}), "('test.log')\n", (8415, 8427), False, 'import logging\n'), ((8524, 8557), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (8545, 8557), False, 'import logging\n'), ((9290, 9301), 'time.time', 'time.time', ([], {}), '()\n', (9299, 9301), False, 'import time\n'), ((12702, 12713), 'time.time', 'time.time', ([], {}), '()\n', (12711, 12713), False, 'import time\n'), ((1267, 1297), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {}), '(graph_def)\n', (1286, 1297), True, 'import tensorflow as tf\n'), ((2563, 2597), 'tensorflow.subtract', 'tf.subtract', (['resized', '[input_mean]'], {}), '(resized, [input_mean])\n', (2574, 2597), True, 'import tensorflow as tf\n'), ((3170, 3193), 'os.path.isfile', 'os.path.isfile', (['srcfile'], {}), '(srcfile)\n', (3184, 3193), False, 'import sys, os\n'), ((3267, 3289), 'os.path.split', 'os.path.split', (['dstfile'], {}), '(dstfile)\n', (3280, 3289), False, 'import sys, os\n'), ((3401, 3430), 'shutil.move', 'shutil.move', (['srcfile', 'dstfile'], {}), '(srcfile, dstfile)\n', (3412, 3430), False, 'import shutil\n'), ((3553, 3576), 'os.path.isfile', 'os.path.isfile', (['srcfile'], {}), '(srcfile)\n', (3567, 3576), False, 'import sys, os\n'), ((3650, 3672), 'os.path.split', 'os.path.split', (['dstfile'], {}), '(dstfile)\n', (3663, 3672), False, 'import sys, os\n'), ((3993, 4020), 'os.rename', 'os.rename', (['srcfile', 'newname'], {}), '(srcfile, newname)\n', (4002, 4020), False, 'import sys, os\n'), ((4028, 4057), 'shutil.move', 'shutil.move', (['newname', 'dstfile'], {}), '(newname, dstfile)\n', (4039, 4057), False, 'import shutil\n'), ((4168, 4191), 'os.path.isfile', 'os.path.isfile', (['srcfile'], {}), '(srcfile)\n', (4182, 4191), False, 'import sys, os\n'), ((4265, 4287), 'os.path.split', 'os.path.split', (['dstfile'], {}), '(dstfile)\n', (4278, 4287), False, 'import sys, os\n'), ((4399, 4432), 'shutil.copyfile', 'shutil.copyfile', (['srcfile', 'dstfile'], {}), '(srcfile, dstfile)\n', (4414, 4432), False, 'import shutil\n'), ((8832, 8867), 'os.path.join', 'path.join', (['image_path', 'path_pattern'], {}), '(image_path, path_pattern)\n', (8841, 8867), False, 'from os import path\n'), ((9309, 9332), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (9319, 9332), True, 'import tensorflow as tf\n'), ((3318, 3339), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (3332, 3339), False, 'import sys, os\n'), ((3353, 3371), 'os.makedirs', 'os.makedirs', (['fpath'], {}), '(fpath)\n', (3364, 3371), False, 'import sys, os\n'), ((3701, 3722), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (3715, 3722), False, 'import sys, os\n'), ((3736, 3754), 'os.makedirs', 'os.makedirs', (['fpath'], {}), '(fpath)\n', (3747, 3754), False, 'import sys, os\n'), ((4316, 4337), 'os.path.exists', 'os.path.exists', (['fpath'], {}), '(fpath)\n', (4330, 4337), False, 'import sys, os\n'), ((4351, 4369), 'os.makedirs', 'os.makedirs', (['fpath'], {}), '(fpath)\n', (4362, 4369), False, 'import sys, os\n'), ((4562, 4588), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['label_file'], {}), '(label_file)\n', (4576, 4588), True, 'import tensorflow as tf\n'), ((10003, 10022), 'numpy.squeeze', 'np.squeeze', (['results'], {}), '(results)\n', (10013, 10022), True, 'import numpy as np\n'), ((3790, 3815), 'os.path.splitext', 'os.path.splitext', (['srcfile'], {}), '(srcfile)\n', (3806, 3815), False, 'import sys, os\n'), ((3819, 3844), 'os.path.splitext', 'os.path.splitext', (['srcfile'], {}), '(srcfile)\n', (3835, 3844), False, 'import sys, os\n'), ((3948, 3973), 'os.path.splitext', 'os.path.splitext', (['srcfile'], {}), '(srcfile)\n', (3964, 3973), False, 'import sys, os\n'), ((3897, 3922), 'os.path.splitext', 'os.path.splitext', (['srcfile'], {}), '(srcfile)\n', (3913, 3922), False, 'import sys, os\n')]
import multiprocessing import numpy from smqtk.representation import DescriptorElement from smqtk.utils.postgres import norm_psql_cmd_string, PsqlConnectionHelper # Try to import required modules try: import psycopg2 except ImportError: psycopg2 = None PSQL_TABLE_CREATE_RLOCK = multiprocessing.RLock() # noinspection SqlNoDataSourceInspection class PostgresDescriptorElement (DescriptorElement): """ Descriptor element whose vector is stored in a Postgres database. We assume we will work with a Postgres version of at least 9.4 (due to versions tested). Efficient connection pooling may be achieved via external utilities like PGBounder. """ ARRAY_DTYPE = numpy.float64 UPSERT_TABLE_TMPL = norm_psql_cmd_string(""" CREATE TABLE IF NOT EXISTS {table_name:s} ( {type_col:s} TEXT NOT NULL, {uuid_col:s} TEXT NOT NULL, {binary_col:s} BYTEA NOT NULL, PRIMARY KEY ({type_col:s}, {uuid_col:s}) ); """) SELECT_TMPL = norm_psql_cmd_string(""" SELECT {binary_col:s} FROM {table_name:s} WHERE {type_col:s} = %(type_val)s AND {uuid_col:s} = %(uuid_val)s ; """) UPSERT_TMPL = norm_psql_cmd_string(""" WITH upsert AS ( UPDATE {table_name:s} SET {binary_col:s} = %(binary_val)s WHERE {type_col:s} = %(type_val)s AND {uuid_col:s} = %(uuid_val)s RETURNING * ) INSERT INTO {table_name:s} ({type_col:s}, {uuid_col:s}, {binary_col:s}) SELECT %(type_val)s, %(uuid_val)s, %(binary_val)s WHERE NOT EXISTS (SELECT * FROM upsert); """) @classmethod def is_usable(cls): if psycopg2 is None: cls.get_logger().warning("Not usable. Requires psycopg2 module") return False return True def __init__(self, type_str, uuid, table_name='descriptors', uuid_col='uid', type_col='type_str', binary_col='vector', db_name='postgres', db_host=None, db_port=None, db_user=None, db_pass=None, create_table=True): """ Initialize new PostgresDescriptorElement attached to some database credentials. We require that storage tables treat uuid AND type string columns as primary keys. The type and uuid columns should be of the 'text' type. The binary column should be of the 'bytea' type. Default argument values assume a local PostgreSQL database with a table created via the ``etc/smqtk/postgres/descriptor_element/example_table_init.sql`` file (relative to the SMQTK source tree or install root). NOTES: - Not all uuid types used here are necessarily of the ``uuid.UUID`` type, thus the recommendation to use a ``text`` type for the column. For certain specific use cases they may be proper ``uuid.UUID`` instances or strings, but this cannot be generally assumed. :param type_str: Type of descriptor. This is usually the name of the content descriptor that generated this vector. :type type_str: str :param uuid: Unique ID reference of the descriptor. :type uuid: collections.Hashable :param table_name: String label of the database table to use. :type table_name: str :param uuid_col: The column label for descriptor UUID storage :type uuid_col: str :param type_col: The column label for descriptor type string storage. :type type_col: str :param binary_col: The column label for descriptor vector binary storage. :type binary_col: str :param db_host: Host address of the Postgres server. If None, we assume the server is on the local machine and use the UNIX socket. This might be a required field on Windows machines (not tested yet). :type db_host: str | None :param db_port: Port the Postgres server is exposed on. If None, we assume the default port (5423). :type db_port: int | None :param db_name: The name of the database to connect to. :type db_name: str :param db_user: Postgres user to connect as. If None, postgres defaults to using the current accessing user account name on the operating system. :type db_user: str | None :param db_pass: Password for the user we're connecting as. This may be None if no password is to be used. :type db_pass: str | None :param create_table: If this instance should try to create the storing table before actions are performed against it. If the configured user does not have sufficient permissions to create the table and it does not currently exist, an exception will be raised. :type create_table: bool """ super(PostgresDescriptorElement, self).__init__(type_str, uuid) self.table_name = table_name self.uuid_col = uuid_col self.type_col = type_col self.binary_col = binary_col self.create_table = create_table self.db_name = db_name self.db_host = db_host self.db_port = db_port self.db_user = db_user self.db_pass = db_pass self._psql_helper = None def __getstate__(self): """ Construct serialization state. Due to the psql_helper containing a lock, it cannot be serialized. This is OK due to our creation of the helper on demand. The cost incurred by discarding the instance upon serialization is that once deserialized elsewhere the helper instance will have to be created. Since this creation post-deserialization only happens once, this is acceptable. """ state = super(PostgresDescriptorElement, self).__getstate__() state.update({ "table_name": self.table_name, "uuid_col": self.uuid_col, "type_col": self.type_col, "binary_col": self.binary_col, "create_table": self.create_table, "db_name": self.db_name, "db_host": self.db_host, "db_port": self.db_port, "db_user": self.db_user, "db_pass": self.db_pass, }) return state def __setstate__(self, state): # Base DescriptorElement parts super(PostgresDescriptorElement, self).__setstate__(state) # Our parts self.table_name = state['table_name'] self.uuid_col = state['uuid_col'] self.type_col = state['type_col'] self.binary_col = state['binary_col'] self.create_table = state['create_table'] self.db_name = state['db_name'] self.db_host = state['db_host'] self.db_port = state['db_port'] self.db_user = state['db_user'] self.db_pass = state['db_pass'] self._psql_helper = None def _get_psql_helper(self): """ Internal method to create on demand the PSQL connection helper class. :return: PsqlConnectionHelper utility. :rtype: PsqlConnectionHelper """ # `hasattr` check used for backwards compatibility when interacting with # databases containing elements serialized before the inclusion of this # helper class. if self._psql_helper is None: # Only using a transport iteration size of 1 since this element is # only meant to refer to a single entry in the associated table. self._psql_helper = PsqlConnectionHelper( self.db_name, self.db_host, self.db_port, self.db_user, self.db_pass, itersize=1, table_upsert_lock=PSQL_TABLE_CREATE_RLOCK ) # Register table upsert command if self.create_table: self._psql_helper.set_table_upsert_sql( self.UPSERT_TABLE_TMPL.format( table_name=self.table_name, type_col=self.type_col, uuid_col=self.uuid_col, binary_col=self.binary_col, ) ) return self._psql_helper def get_config(self): return { "table_name": self.table_name, "uuid_col": self.uuid_col, "type_col": self.type_col, "binary_col": self.binary_col, "create_table": self.create_table, "db_name": self.db_name, "db_host": self.db_host, "db_port": self.db_port, "db_user": self.db_user, "db_pass": self.db_pass, } def has_vector(self): """ Check if the target database has a vector for our keys. This also returns True if we have a cached vector since there must have been a source vector to draw from if there is a cache of it. If a vector is cached, this resets the cache expiry timeout. :return: Whether or not this container current has a descriptor vector stored. :rtype: bool """ # Very similar to vector query, but replacing vector binary return with # a true/null return. Save a little bit of time compared to testing # vector return. # OLD: return self.vector() is not None # Using static value 'true' for binary "column" to reduce data return # volume. q_select = self.SELECT_TMPL.format(**{ 'binary_col': 'true', 'table_name': self.table_name, 'type_col': self.type_col, 'uuid_col': self.uuid_col, }) q_select_values = { "type_val": self.type(), "uuid_val": str(self.uuid()) } def cb(cursor): cursor.execute(q_select, q_select_values) # Should either yield one or zero rows. psql_helper = self._get_psql_helper() return bool(list(psql_helper.single_execute( cb, yield_result_rows=True ))) def vector(self): """ Return this element's vector, or None if we don't have one. :return: Get the stored descriptor vector as a numpy array. This returns None of there is no vector stored in this container. :rtype: numpy.ndarray or None """ q_select = self.SELECT_TMPL.format(**{ "binary_col": self.binary_col, "table_name": self.table_name, "type_col": self.type_col, "uuid_col": self.uuid_col, }) q_select_values = { "type_val": self.type(), "uuid_val": str(self.uuid()) } # query execution callback # noinspection PyProtectedMember def cb(cursor): # type: (psycopg2._psycopg.cursor) -> None cursor.execute(q_select, q_select_values) # This should only fetch a single row. Cannot yield more than one due # use of primary keys. psql_helper = self._get_psql_helper() r = list(psql_helper.single_execute(cb, yield_result_rows=True)) if not r: return None else: b = r[0][0] v = numpy.frombuffer(b, self.ARRAY_DTYPE) return v def set_vector(self, new_vec): """ Set the contained vector. If this container already stores a descriptor vector, this will overwrite it. If we are configured to use caching, and one has not been cached yet, then we cache the vector and start a thread to monitor access times and to remove the cache if the access timeout has expired. If a vector was already cached, this new vector replaces the old one, the vector database-side is replaced, and the cache expiry timeout is reset. :raises ValueError: ``new_vec`` was not a numpy ndarray. :param new_vec: New vector to contain. This must be a numpy array. :type new_vec: numpy.ndarray :returns: Self. :rtype: PostgresDescriptorElement """ if not isinstance(new_vec, numpy.ndarray): new_vec = numpy.copy(new_vec) if new_vec.dtype != self.ARRAY_DTYPE: try: new_vec = new_vec.astype(self.ARRAY_DTYPE) except TypeError: raise ValueError("Could not convert input to a vector of type " "%s." % self.ARRAY_DTYPE) q_upsert = self.UPSERT_TMPL.strip().format(**{ "table_name": self.table_name, "binary_col": self.binary_col, "type_col": self.type_col, "uuid_col": self.uuid_col, }) q_upsert_values = { "binary_val": psycopg2.Binary(new_vec), "type_val": self.type(), "uuid_val": str(self.uuid()), } # query execution callback # noinspection PyProtectedMember def cb(cursor): # type: (psycopg2._psycopg.cursor) -> None cursor.execute(q_upsert, q_upsert_values) # No return but need to force iteration. psql_helper = self._get_psql_helper() list(psql_helper.single_execute(cb, yield_result_rows=False)) return self
[ "smqtk.utils.postgres.PsqlConnectionHelper", "numpy.copy", "numpy.frombuffer", "smqtk.utils.postgres.norm_psql_cmd_string", "psycopg2.Binary", "multiprocessing.RLock" ]
[((292, 315), 'multiprocessing.RLock', 'multiprocessing.RLock', ([], {}), '()\n', (313, 315), False, 'import multiprocessing\n'), ((748, 1022), 'smqtk.utils.postgres.norm_psql_cmd_string', 'norm_psql_cmd_string', (['"""\n CREATE TABLE IF NOT EXISTS {table_name:s} (\n {type_col:s} TEXT NOT NULL,\n {uuid_col:s} TEXT NOT NULL,\n {binary_col:s} BYTEA NOT NULL,\n PRIMARY KEY ({type_col:s}, {uuid_col:s})\n );\n """'], {}), '(\n """\n CREATE TABLE IF NOT EXISTS {table_name:s} (\n {type_col:s} TEXT NOT NULL,\n {uuid_col:s} TEXT NOT NULL,\n {binary_col:s} BYTEA NOT NULL,\n PRIMARY KEY ({type_col:s}, {uuid_col:s})\n );\n """\n )\n', (768, 1022), False, 'from smqtk.utils.postgres import norm_psql_cmd_string, PsqlConnectionHelper\n'), ((1032, 1233), 'smqtk.utils.postgres.norm_psql_cmd_string', 'norm_psql_cmd_string', (['"""\n SELECT {binary_col:s}\n FROM {table_name:s}\n WHERE {type_col:s} = %(type_val)s\n AND {uuid_col:s} = %(uuid_val)s\n ;\n """'], {}), '(\n """\n SELECT {binary_col:s}\n FROM {table_name:s}\n WHERE {type_col:s} = %(type_val)s\n AND {uuid_col:s} = %(uuid_val)s\n ;\n """\n )\n', (1052, 1233), False, 'from smqtk.utils.postgres import norm_psql_cmd_string, PsqlConnectionHelper\n'), ((1243, 1712), 'smqtk.utils.postgres.norm_psql_cmd_string', 'norm_psql_cmd_string', (['"""\n WITH upsert AS (\n UPDATE {table_name:s}\n SET {binary_col:s} = %(binary_val)s\n WHERE {type_col:s} = %(type_val)s\n AND {uuid_col:s} = %(uuid_val)s\n RETURNING *\n )\n INSERT INTO {table_name:s} ({type_col:s}, {uuid_col:s}, {binary_col:s})\n SELECT %(type_val)s, %(uuid_val)s, %(binary_val)s\n WHERE NOT EXISTS (SELECT * FROM upsert);\n """'], {}), '(\n """\n WITH upsert AS (\n UPDATE {table_name:s}\n SET {binary_col:s} = %(binary_val)s\n WHERE {type_col:s} = %(type_val)s\n AND {uuid_col:s} = %(uuid_val)s\n RETURNING *\n )\n INSERT INTO {table_name:s} ({type_col:s}, {uuid_col:s}, {binary_col:s})\n SELECT %(type_val)s, %(uuid_val)s, %(binary_val)s\n WHERE NOT EXISTS (SELECT * FROM upsert);\n """\n )\n', (1263, 1712), False, 'from smqtk.utils.postgres import norm_psql_cmd_string, PsqlConnectionHelper\n'), ((7734, 7883), 'smqtk.utils.postgres.PsqlConnectionHelper', 'PsqlConnectionHelper', (['self.db_name', 'self.db_host', 'self.db_port', 'self.db_user', 'self.db_pass'], {'itersize': '(1)', 'table_upsert_lock': 'PSQL_TABLE_CREATE_RLOCK'}), '(self.db_name, self.db_host, self.db_port, self.db_user,\n self.db_pass, itersize=1, table_upsert_lock=PSQL_TABLE_CREATE_RLOCK)\n', (7754, 7883), False, 'from smqtk.utils.postgres import norm_psql_cmd_string, PsqlConnectionHelper\n'), ((11416, 11453), 'numpy.frombuffer', 'numpy.frombuffer', (['b', 'self.ARRAY_DTYPE'], {}), '(b, self.ARRAY_DTYPE)\n', (11432, 11453), False, 'import numpy\n'), ((12378, 12397), 'numpy.copy', 'numpy.copy', (['new_vec'], {}), '(new_vec)\n', (12388, 12397), False, 'import numpy\n'), ((12975, 12999), 'psycopg2.Binary', 'psycopg2.Binary', (['new_vec'], {}), '(new_vec)\n', (12990, 12999), False, 'import psycopg2\n')]
""" Tests of responsiveness of sample merger """ import os from collections import Iterable, Mapping import pytest import yaml from peppy import * from peppy.const import * from peppy.project import NEW_PIPES_KEY ANNS_NAME = "ann.csv" PLIF_NAME = "plif.yaml" SUBPROJECT_NAME = "changed_output" @pytest.fixture def pl_iface(tmpdir): """ Provide test case with path to written pipeline interface file. """ data = {} fp = tmpdir.join(PLIF_NAME).strpath with open(fp, "w") as f: yaml.dump(data, f) return fp @pytest.fixture def annsdata(tmpdir): """ Provide a test case with path to written annotations file. """ lines = """sample_name,protocol,filename,data_source,read_type ATAC-seq_human_PE,ATAC-seq,atac-seq_PE.bam,microtest,paired ATAC-seq_human_SE,ATAC-seq,atac-seq_SE.bam,microtest,single ChIP-seq_human_CTCF_PE,ChIP-seq,chip-seq_PE.bam,microtest,paired ChIP-seq_human_CTCF_SE,ChIP-seq,chip-seq_SE.bam,microtest,single ChIP-seq_human_H3K27ac_PE,ChIP-seq,chip-seq_PE.bam,microtest,paired ChIP-seq_human_H3K27ac_SE,ChIP-seq,chip-seq_SE.bam,microtest,single ChIPmentation_human_CTCF_PE,ChIPmentation,chipmentation_PE.bam,microtest,paired ChIPmentation_human_CTCF_SE,ChIPmentation,chipmentation_SE.bam,microtest,single""".splitlines( True ) return _makefile(tmpdir, ANNS_NAME, lines, newline=False) @pytest.fixture def conf_file(tmpdir, annsdata, pl_iface): """ Provide test case with project config data. """ mainout, subout = [tmpdir.join(f).strpath for f in ["this", "that"]] os.makedirs(mainout) os.makedirs(subout) constant = "sleep" data = { METADATA_KEY: { OUTDIR_KEY: mainout, SAMPLE_ANNOTATIONS_KEY: annsdata, NEW_PIPES_KEY: pl_iface, }, DERIVATIONS_DECLARATION: "data_source", CONSTANTS_DECLARATION: {constant: 0.1}, DATA_SOURCES_SECTION: { "microtest": os.path.join( "..", "..", "microtest-master", "data", "{filename}" ) }, SUBPROJECTS_SECTION: { SUBPROJECT_NAME: { METADATA_KEY: {OUTDIR_KEY: subout}, CONSTANTS_DECLARATION: {constant: 0.5}, } }, } return _makefile(tmpdir, "conf.yaml", data) def test_subproject_activation_preserves_derived_path(tmpdir, conf_file): """ When a subproject changes no data relevant to a sample attribute, it shouldn't change. """ old_prj = Project(conf_file) old_path = old_prj.samples[0].data_path new_prj = old_prj.activate_subproject(SUBPROJECT_NAME) new_path = new_prj.samples[0].data_path assert old_path == new_path def _infer_write(data, newline=False): """ Infer function with which to write a data structure. :param str | Mapping | Iterable[str] data: the data to write to disk :param bool newline: whether to add newline to text lines :return function(object, file) -> object: function that writes data to a file stream, possibly returning a value """ if isinstance(data, Mapping): def write(d, f): yaml.dump(d, f) else: make_line = (lambda l: l + "\n") if newline else (lambda l: l) if isinstance(data, str): def write(d, f): f.write(make_line(d)) elif isinstance(data, Iterable): def write(d, f): for l in d: f.write(make_line(l)) else: raise TypeError("Unexpected data structure type: {}".format(type(data))) return write def _makefile(tmp, filename, data, newline=False): """ Write data to a file and return the filepath. :param py.path.local.LocalPath tmp: tempfolder from a test case :param str filename: name f :param str | Mapping | Iterable[str] data: :param bool newline: whether to add newline to text lines :return str: path to the file created """ fp = tmp.join(filename).strpath write = _infer_write(data, newline) with open(fp, "w") as f: write(data, f) return fp
[ "yaml.dump", "os.path.join", "os.makedirs" ]
[((1552, 1572), 'os.makedirs', 'os.makedirs', (['mainout'], {}), '(mainout)\n', (1563, 1572), False, 'import os\n'), ((1577, 1596), 'os.makedirs', 'os.makedirs', (['subout'], {}), '(subout)\n', (1588, 1596), False, 'import os\n'), ((505, 523), 'yaml.dump', 'yaml.dump', (['data', 'f'], {}), '(data, f)\n', (514, 523), False, 'import yaml\n'), ((1937, 2003), 'os.path.join', 'os.path.join', (['""".."""', '""".."""', '"""microtest-master"""', '"""data"""', '"""{filename}"""'], {}), "('..', '..', 'microtest-master', 'data', '{filename}')\n", (1949, 2003), False, 'import os\n'), ((3128, 3143), 'yaml.dump', 'yaml.dump', (['d', 'f'], {}), '(d, f)\n', (3137, 3143), False, 'import yaml\n')]
import pytest import wn @pytest.mark.usefixtures('empty_db') def test_lexicons_empty(): assert len(wn.lexicons()) == 0 @pytest.mark.usefixtures('mini_db') def test_lexicons_mini(): assert len(wn.lexicons()) == 2 assert all(isinstance(lex, wn.Lexicon) for lex in wn.lexicons()) results = wn.lexicons(lang='en') assert len(results) == 1 and results[0].language == 'en' results = wn.lexicons(lang='es') assert len(results) == 1 and results[0].language == 'es' results = wn.lexicons(lexicon='*') assert len(results) == 2 results = wn.lexicons(lexicon='*:1') assert len(results) == 2 results = wn.lexicons(lexicon='test-en') assert len(results) == 1 and results[0].language == 'en' results = wn.lexicons(lexicon='test-en:1') assert len(results) == 1 and results[0].language == 'en' results = wn.lexicons(lexicon='test-en:*') assert len(results) == 1 and results[0].language == 'en' assert wn.lexicons(lexicon='test-en')[0].specifier() == 'test-en:1' assert wn.lexicons(lexicon='test-es')[0].specifier() == 'test-es:1' assert wn.lexicons(lexicon='test-en')[0].requires() == {} assert wn.lexicons(lexicon='test-es')[0].requires() == {} @pytest.mark.usefixtures('mini_db') def test_lexicons_unknown(): results = wn.lexicons(lang='unk') assert len(results) == 0 results = wn.lexicons(lexicon='test-unk') assert len(results) == 0 @pytest.mark.usefixtures('empty_db') def test_words_empty(): assert len(wn.words()) == 0 @pytest.mark.usefixtures('mini_db') def test_words_mini(): assert len(wn.words()) == 15 assert all(isinstance(w, wn.Word) for w in wn.words()) words = wn.words('information') # search lemma assert len(words) == 1 assert words[0].lemma() == 'information' assert words[0].lemma().script == 'Latn' assert words[0].lemma().tags() == [wn.Tag('tag-text', 'tag-category')] words = wn.words('exemplifies') # search secondary form assert len(words) == 1 assert words[0].lemma() == 'exemplify' assert len(wn.words(pos='n')) == 10 assert all(w.pos == 'n' for w in wn.words(pos='n')) assert len(wn.words(pos='v')) == 5 assert len(wn.words(pos='q')) == 0 # fake pos assert len(wn.words(lang='en')) == 9 assert len(wn.words(lang='es')) == 6 assert len(wn.words(lexicon='test-en')) == 9 assert len(wn.words(lexicon='test-es')) == 6 assert len(wn.words(lang='en', lexicon='test-en')) == 9 assert len(wn.words(pos='v', lang='en')) == 3 assert len(wn.words('information', lang='en')) == 1 assert len(wn.words('information', lang='es')) == 0 with pytest.raises(wn.Error): wn.words(lang='unk') with pytest.raises(wn.Error): wn.words(lexicon='test-unk') @pytest.mark.usefixtures('empty_db') def test_word_empty(): with pytest.raises(wn.Error): assert wn.word('test-es-información-n') @pytest.mark.usefixtures('mini_db') def test_word_mini(): assert wn.word('test-es-información-n') assert wn.word('test-es-información-n', lang='es') assert wn.word('test-es-información-n', lexicon='test-es') with pytest.raises(wn.Error): assert wn.word('test-es-información-n', lang='en') with pytest.raises(wn.Error): assert wn.word('test-es-información-n', lexicon='test-en') with pytest.raises(wn.Error): assert wn.word('test-es-información-n', lang='unk') with pytest.raises(wn.Error): assert wn.word('test-es-información-n', lexicon='test-unk') @pytest.mark.usefixtures('empty_db') def test_senses_empty(): assert len(wn.senses()) == 0 @pytest.mark.usefixtures('mini_db') def test_senses_mini(): assert len(wn.senses()) == 16 assert all(isinstance(s, wn.Sense) for s in wn.senses()) senses = wn.senses('information') # search lemma assert len(senses) == 1 assert senses[0].word().lemma() == 'information' assert senses[0].counts() == [3] senses = wn.senses('exemplifies') # search secondary form assert len(senses) == 1 assert senses[0].word().lemma() == 'exemplify' assert len(wn.senses(pos='n')) == 11 assert len(wn.senses(pos='v')) == 5 assert len(wn.senses(pos='q')) == 0 # fake pos assert len(wn.senses(lang='en')) == 10 assert len(wn.senses(lang='es')) == 6 assert len(wn.senses(lexicon='test-en')) == 10 assert len(wn.senses(lexicon='test-es')) == 6 assert len(wn.senses(lang='en', lexicon='test-en')) == 10 assert len(wn.senses(pos='v', lang='en')) == 3 assert len(wn.senses('information', lang='en')) == 1 assert len(wn.senses('information', lang='es')) == 0 with pytest.raises(wn.Error): wn.senses(lang='unk') with pytest.raises(wn.Error): wn.senses(lexicon='test-unk') @pytest.mark.usefixtures('empty_db') def test_sense_empty(): with pytest.raises(wn.Error): assert wn.sense('test-es-información-n-0001-01') @pytest.mark.usefixtures('mini_db') def test_sense_mini(): assert wn.sense('test-es-información-n-0001-01') assert wn.sense('test-es-información-n-0001-01', lang='es') assert wn.sense('test-es-información-n-0001-01', lexicon='test-es') with pytest.raises(wn.Error): assert wn.sense('test-es-información-n-0001-01', lang='en') with pytest.raises(wn.Error): assert wn.sense('test-es-información-n-0001-01', lexicon='test-en') with pytest.raises(wn.Error): assert wn.sense('test-es-información-n-0001-01', lang='unk') with pytest.raises(wn.Error): assert wn.sense('test-es-información-n-0001-01', lexicon='test-unk') @pytest.mark.usefixtures('empty_db') def test_synsets_empty(): assert len(wn.synsets()) == 0 @pytest.mark.usefixtures('mini_db') def test_synsets_mini(): assert len(wn.synsets()) == 12 assert all(isinstance(ss, wn.Synset) for ss in wn.synsets()) synsets = wn.synsets('information') # search lemma assert len(synsets) == 1 assert 'information' in synsets[0].lemmas() synsets = wn.synsets('exemplifies') # search secondary form assert len(synsets) == 1 assert 'exemplify' in synsets[0].lemmas() assert len(wn.synsets(pos='n')) == 9 assert len(wn.synsets(pos='v')) == 3 assert len(wn.synsets(pos='q')) == 0 # fake pos assert len(wn.synsets(ili='i67469')) == 2 assert len(wn.synsets(ili='i67468')) == 0 assert len(wn.synsets(lang='en')) == 8 assert len(wn.synsets(lang='es')) == 4 assert len(wn.synsets(lexicon='test-en')) == 8 assert len(wn.synsets(lexicon='test-es')) == 4 assert len(wn.synsets(lang='en', lexicon='test-en')) == 8 assert len(wn.synsets(pos='v', lang='en')) == 2 assert len(wn.synsets('information', lang='en')) == 1 assert len(wn.synsets('information', lang='es')) == 0 assert len(wn.synsets(ili='i67469', lang='es')) == 1 with pytest.raises(wn.Error): wn.synsets(lang='unk') with pytest.raises(wn.Error): wn.synsets(lexicon='test-unk') @pytest.mark.usefixtures('empty_db') def test_synset_empty(): with pytest.raises(wn.Error): assert wn.synset('test-es-0001-n') @pytest.mark.usefixtures('mini_db') def test_synset_mini(): assert wn.synset('test-es-0001-n') assert wn.synset('test-es-0001-n', lang='es') assert wn.synset('test-es-0001-n', lexicon='test-es') with pytest.raises(wn.Error): assert wn.synset('test-es-0001-n', lang='en') with pytest.raises(wn.Error): assert wn.synset('test-es-0001-n', lexicon='test-en') with pytest.raises(wn.Error): assert wn.synset('test-es-0001-n', lang='unk') with pytest.raises(wn.Error): assert wn.synset('test-es-0001-n', lexicon='test-unk') @pytest.mark.usefixtures('mini_db_1_1') def test_mini_1_1(): assert len(wn.lexicons()) == 4 assert len(wn.lexicons(lang='en')) == 2 assert len(wn.lexicons(lang='ja')) == 1 assert wn.lexicons(lang='ja')[0].logo == 'logo.svg' w = wn.Wordnet(lang='en') assert len(w.lexicons()) == 2 assert len(w.expanded_lexicons()) == 0 assert len(w.word('test-en-exemplify-v').lemma().tags()) == 1 w = wn.Wordnet(lang='ja') assert len(w.lexicons()) == 1 assert len(w.expanded_lexicons()) == 1 assert len(w.synsets('例え')[0].hypernyms()) == 1 assert w.synsets('例え')[0].lexfile() == 'noun.cognition' assert len(w.word('test-ja-例え-n').lemma().pronunciations()) == 1 assert w.word('test-ja-例え-n').forms()[1].id == 'test-ja-例え-n-たとえ' p = w.word('test-ja-例え-n').lemma().pronunciations()[0] assert p.value == 'tatoe' assert p.variety == 'standard' assert p.notation == 'ipa' assert p.phonemic assert p.audio == 'tatoe.wav' w = wn.Wordnet(lang='ja', expand='') assert len(w.lexicons()) == 1 assert len(w.expanded_lexicons()) == 0 assert len(w.synsets('例え')[0].hypernyms()) == 0 w = wn.Wordnet(lexicon='test-en test-en-ext') assert len(w.lexicons()) == 2 assert len(w.expanded_lexicons()) == 0 assert len(w.synsets('fire')[0].hyponyms()) == 1
[ "wn.sense", "wn.Wordnet", "wn.senses", "wn.word", "wn.Tag", "wn.words", "pytest.raises", "wn.lexicons", "wn.synset", "pytest.mark.usefixtures", "wn.synsets" ]
[((29, 64), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""empty_db"""'], {}), "('empty_db')\n", (52, 64), False, 'import pytest\n'), ((130, 164), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""mini_db"""'], {}), "('mini_db')\n", (153, 164), False, 'import pytest\n'), ((1226, 1260), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""mini_db"""'], {}), "('mini_db')\n", (1249, 1260), False, 'import pytest\n'), ((1435, 1470), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""empty_db"""'], {}), "('empty_db')\n", (1458, 1470), False, 'import pytest\n'), ((1530, 1564), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""mini_db"""'], {}), "('mini_db')\n", (1553, 1564), False, 'import pytest\n'), ((2788, 2823), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""empty_db"""'], {}), "('empty_db')\n", (2811, 2823), False, 'import pytest\n'), ((2932, 2966), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""mini_db"""'], {}), "('mini_db')\n", (2955, 2966), False, 'import pytest\n'), ((3544, 3579), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""empty_db"""'], {}), "('empty_db')\n", (3567, 3579), False, 'import pytest\n'), ((3641, 3675), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""mini_db"""'], {}), "('mini_db')\n", (3664, 3675), False, 'import pytest\n'), ((4801, 4836), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""empty_db"""'], {}), "('empty_db')\n", (4824, 4836), False, 'import pytest\n'), ((4955, 4989), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""mini_db"""'], {}), "('mini_db')\n", (4978, 4989), False, 'import pytest\n'), ((5631, 5666), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""empty_db"""'], {}), "('empty_db')\n", (5654, 5666), False, 'import pytest\n'), ((5730, 5764), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""mini_db"""'], {}), "('mini_db')\n", (5753, 5764), False, 'import pytest\n'), ((7014, 7049), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""empty_db"""'], {}), "('empty_db')\n", (7037, 7049), False, 'import pytest\n'), ((7155, 7189), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""mini_db"""'], {}), "('mini_db')\n", (7178, 7189), False, 'import pytest\n'), ((7734, 7772), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""mini_db_1_1"""'], {}), "('mini_db_1_1')\n", (7757, 7772), False, 'import pytest\n'), ((310, 332), 'wn.lexicons', 'wn.lexicons', ([], {'lang': '"""en"""'}), "(lang='en')\n", (321, 332), False, 'import wn\n'), ((408, 430), 'wn.lexicons', 'wn.lexicons', ([], {'lang': '"""es"""'}), "(lang='es')\n", (419, 430), False, 'import wn\n'), ((507, 531), 'wn.lexicons', 'wn.lexicons', ([], {'lexicon': '"""*"""'}), "(lexicon='*')\n", (518, 531), False, 'import wn\n'), ((575, 601), 'wn.lexicons', 'wn.lexicons', ([], {'lexicon': '"""*:1"""'}), "(lexicon='*:1')\n", (586, 601), False, 'import wn\n'), ((645, 675), 'wn.lexicons', 'wn.lexicons', ([], {'lexicon': '"""test-en"""'}), "(lexicon='test-en')\n", (656, 675), False, 'import wn\n'), ((751, 783), 'wn.lexicons', 'wn.lexicons', ([], {'lexicon': '"""test-en:1"""'}), "(lexicon='test-en:1')\n", (762, 783), False, 'import wn\n'), ((859, 891), 'wn.lexicons', 'wn.lexicons', ([], {'lexicon': '"""test-en:*"""'}), "(lexicon='test-en:*')\n", (870, 891), False, 'import wn\n'), ((1304, 1327), 'wn.lexicons', 'wn.lexicons', ([], {'lang': '"""unk"""'}), "(lang='unk')\n", (1315, 1327), False, 'import wn\n'), ((1371, 1402), 'wn.lexicons', 'wn.lexicons', ([], {'lexicon': '"""test-unk"""'}), "(lexicon='test-unk')\n", (1382, 1402), False, 'import wn\n'), ((1693, 1716), 'wn.words', 'wn.words', (['"""information"""'], {}), "('information')\n", (1701, 1716), False, 'import wn\n'), ((1939, 1962), 'wn.words', 'wn.words', (['"""exemplifies"""'], {}), "('exemplifies')\n", (1947, 1962), False, 'import wn\n'), ((3000, 3032), 'wn.word', 'wn.word', (['"""test-es-información-n"""'], {}), "('test-es-información-n')\n", (3007, 3032), False, 'import wn\n'), ((3044, 3087), 'wn.word', 'wn.word', (['"""test-es-información-n"""'], {'lang': '"""es"""'}), "('test-es-información-n', lang='es')\n", (3051, 3087), False, 'import wn\n'), ((3099, 3150), 'wn.word', 'wn.word', (['"""test-es-información-n"""'], {'lexicon': '"""test-es"""'}), "('test-es-información-n', lexicon='test-es')\n", (3106, 3150), False, 'import wn\n'), ((3809, 3833), 'wn.senses', 'wn.senses', (['"""information"""'], {}), "('information')\n", (3818, 3833), False, 'import wn\n'), ((3982, 4006), 'wn.senses', 'wn.senses', (['"""exemplifies"""'], {}), "('exemplifies')\n", (3991, 4006), False, 'import wn\n'), ((5024, 5065), 'wn.sense', 'wn.sense', (['"""test-es-información-n-0001-01"""'], {}), "('test-es-información-n-0001-01')\n", (5032, 5065), False, 'import wn\n'), ((5077, 5129), 'wn.sense', 'wn.sense', (['"""test-es-información-n-0001-01"""'], {'lang': '"""es"""'}), "('test-es-información-n-0001-01', lang='es')\n", (5085, 5129), False, 'import wn\n'), ((5141, 5201), 'wn.sense', 'wn.sense', (['"""test-es-información-n-0001-01"""'], {'lexicon': '"""test-es"""'}), "('test-es-información-n-0001-01', lexicon='test-es')\n", (5149, 5201), False, 'import wn\n'), ((5905, 5930), 'wn.synsets', 'wn.synsets', (['"""information"""'], {}), "('information')\n", (5915, 5930), False, 'import wn\n'), ((6039, 6064), 'wn.synsets', 'wn.synsets', (['"""exemplifies"""'], {}), "('exemplifies')\n", (6049, 6064), False, 'import wn\n'), ((7225, 7252), 'wn.synset', 'wn.synset', (['"""test-es-0001-n"""'], {}), "('test-es-0001-n')\n", (7234, 7252), False, 'import wn\n'), ((7264, 7302), 'wn.synset', 'wn.synset', (['"""test-es-0001-n"""'], {'lang': '"""es"""'}), "('test-es-0001-n', lang='es')\n", (7273, 7302), False, 'import wn\n'), ((7314, 7360), 'wn.synset', 'wn.synset', (['"""test-es-0001-n"""'], {'lexicon': '"""test-es"""'}), "('test-es-0001-n', lexicon='test-es')\n", (7323, 7360), False, 'import wn\n'), ((7982, 8003), 'wn.Wordnet', 'wn.Wordnet', ([], {'lang': '"""en"""'}), "(lang='en')\n", (7992, 8003), False, 'import wn\n'), ((8156, 8177), 'wn.Wordnet', 'wn.Wordnet', ([], {'lang': '"""ja"""'}), "(lang='ja')\n", (8166, 8177), False, 'import wn\n'), ((8726, 8758), 'wn.Wordnet', 'wn.Wordnet', ([], {'lang': '"""ja"""', 'expand': '""""""'}), "(lang='ja', expand='')\n", (8736, 8758), False, 'import wn\n'), ((8897, 8938), 'wn.Wordnet', 'wn.Wordnet', ([], {'lexicon': '"""test-en test-en-ext"""'}), "(lexicon='test-en test-en-ext')\n", (8907, 8938), False, 'import wn\n'), ((2660, 2683), 'pytest.raises', 'pytest.raises', (['wn.Error'], {}), '(wn.Error)\n', (2673, 2683), False, 'import pytest\n'), ((2693, 2713), 'wn.words', 'wn.words', ([], {'lang': '"""unk"""'}), "(lang='unk')\n", (2701, 2713), False, 'import wn\n'), ((2723, 2746), 'pytest.raises', 'pytest.raises', (['wn.Error'], {}), '(wn.Error)\n', (2736, 2746), False, 'import pytest\n'), ((2756, 2784), 'wn.words', 'wn.words', ([], {'lexicon': '"""test-unk"""'}), "(lexicon='test-unk')\n", (2764, 2784), False, 'import wn\n'), ((2856, 2879), 'pytest.raises', 'pytest.raises', (['wn.Error'], {}), '(wn.Error)\n', (2869, 2879), False, 'import pytest\n'), ((2896, 2928), 'wn.word', 'wn.word', (['"""test-es-información-n"""'], {}), "('test-es-información-n')\n", (2903, 2928), False, 'import wn\n'), ((3160, 3183), 'pytest.raises', 'pytest.raises', (['wn.Error'], {}), '(wn.Error)\n', (3173, 3183), False, 'import pytest\n'), ((3200, 3243), 'wn.word', 'wn.word', (['"""test-es-información-n"""'], {'lang': '"""en"""'}), "('test-es-información-n', lang='en')\n", (3207, 3243), False, 'import wn\n'), ((3253, 3276), 'pytest.raises', 'pytest.raises', (['wn.Error'], {}), '(wn.Error)\n', (3266, 3276), False, 'import pytest\n'), ((3293, 3344), 'wn.word', 'wn.word', (['"""test-es-información-n"""'], {'lexicon': '"""test-en"""'}), "('test-es-información-n', lexicon='test-en')\n", (3300, 3344), False, 'import wn\n'), ((3354, 3377), 'pytest.raises', 'pytest.raises', (['wn.Error'], {}), '(wn.Error)\n', (3367, 3377), False, 'import pytest\n'), ((3394, 3438), 'wn.word', 'wn.word', (['"""test-es-información-n"""'], {'lang': '"""unk"""'}), "('test-es-información-n', lang='unk')\n", (3401, 3438), False, 'import wn\n'), ((3448, 3471), 'pytest.raises', 'pytest.raises', (['wn.Error'], {}), '(wn.Error)\n', (3461, 3471), False, 'import pytest\n'), ((3488, 3540), 'wn.word', 'wn.word', (['"""test-es-información-n"""'], {'lexicon': '"""test-unk"""'}), "('test-es-información-n', lexicon='test-unk')\n", (3495, 3540), False, 'import wn\n'), ((4671, 4694), 'pytest.raises', 'pytest.raises', (['wn.Error'], {}), '(wn.Error)\n', (4684, 4694), False, 'import pytest\n'), ((4704, 4725), 'wn.senses', 'wn.senses', ([], {'lang': '"""unk"""'}), "(lang='unk')\n", (4713, 4725), False, 'import wn\n'), ((4735, 4758), 'pytest.raises', 'pytest.raises', (['wn.Error'], {}), '(wn.Error)\n', (4748, 4758), False, 'import pytest\n'), ((4768, 4797), 'wn.senses', 'wn.senses', ([], {'lexicon': '"""test-unk"""'}), "(lexicon='test-unk')\n", (4777, 4797), False, 'import wn\n'), ((4870, 4893), 'pytest.raises', 'pytest.raises', (['wn.Error'], {}), '(wn.Error)\n', (4883, 4893), False, 'import pytest\n'), ((4910, 4951), 'wn.sense', 'wn.sense', (['"""test-es-información-n-0001-01"""'], {}), "('test-es-información-n-0001-01')\n", (4918, 4951), False, 'import wn\n'), ((5211, 5234), 'pytest.raises', 'pytest.raises', (['wn.Error'], {}), '(wn.Error)\n', (5224, 5234), False, 'import pytest\n'), ((5251, 5303), 'wn.sense', 'wn.sense', (['"""test-es-información-n-0001-01"""'], {'lang': '"""en"""'}), "('test-es-información-n-0001-01', lang='en')\n", (5259, 5303), False, 'import wn\n'), ((5313, 5336), 'pytest.raises', 'pytest.raises', (['wn.Error'], {}), '(wn.Error)\n', (5326, 5336), False, 'import pytest\n'), ((5353, 5413), 'wn.sense', 'wn.sense', (['"""test-es-información-n-0001-01"""'], {'lexicon': '"""test-en"""'}), "('test-es-información-n-0001-01', lexicon='test-en')\n", (5361, 5413), False, 'import wn\n'), ((5423, 5446), 'pytest.raises', 'pytest.raises', (['wn.Error'], {}), '(wn.Error)\n', (5436, 5446), False, 'import pytest\n'), ((5463, 5516), 'wn.sense', 'wn.sense', (['"""test-es-información-n-0001-01"""'], {'lang': '"""unk"""'}), "('test-es-información-n-0001-01', lang='unk')\n", (5471, 5516), False, 'import wn\n'), ((5526, 5549), 'pytest.raises', 'pytest.raises', (['wn.Error'], {}), '(wn.Error)\n', (5539, 5549), False, 'import pytest\n'), ((5566, 5627), 'wn.sense', 'wn.sense', (['"""test-es-información-n-0001-01"""'], {'lexicon': '"""test-unk"""'}), "('test-es-información-n-0001-01', lexicon='test-unk')\n", (5574, 5627), False, 'import wn\n'), ((6882, 6905), 'pytest.raises', 'pytest.raises', (['wn.Error'], {}), '(wn.Error)\n', (6895, 6905), False, 'import pytest\n'), ((6915, 6937), 'wn.synsets', 'wn.synsets', ([], {'lang': '"""unk"""'}), "(lang='unk')\n", (6925, 6937), False, 'import wn\n'), ((6947, 6970), 'pytest.raises', 'pytest.raises', (['wn.Error'], {}), '(wn.Error)\n', (6960, 6970), False, 'import pytest\n'), ((6980, 7010), 'wn.synsets', 'wn.synsets', ([], {'lexicon': '"""test-unk"""'}), "(lexicon='test-unk')\n", (6990, 7010), False, 'import wn\n'), ((7084, 7107), 'pytest.raises', 'pytest.raises', (['wn.Error'], {}), '(wn.Error)\n', (7097, 7107), False, 'import pytest\n'), ((7124, 7151), 'wn.synset', 'wn.synset', (['"""test-es-0001-n"""'], {}), "('test-es-0001-n')\n", (7133, 7151), False, 'import wn\n'), ((7370, 7393), 'pytest.raises', 'pytest.raises', (['wn.Error'], {}), '(wn.Error)\n', (7383, 7393), False, 'import pytest\n'), ((7410, 7448), 'wn.synset', 'wn.synset', (['"""test-es-0001-n"""'], {'lang': '"""en"""'}), "('test-es-0001-n', lang='en')\n", (7419, 7448), False, 'import wn\n'), ((7458, 7481), 'pytest.raises', 'pytest.raises', (['wn.Error'], {}), '(wn.Error)\n', (7471, 7481), False, 'import pytest\n'), ((7498, 7544), 'wn.synset', 'wn.synset', (['"""test-es-0001-n"""'], {'lexicon': '"""test-en"""'}), "('test-es-0001-n', lexicon='test-en')\n", (7507, 7544), False, 'import wn\n'), ((7554, 7577), 'pytest.raises', 'pytest.raises', (['wn.Error'], {}), '(wn.Error)\n', (7567, 7577), False, 'import pytest\n'), ((7594, 7633), 'wn.synset', 'wn.synset', (['"""test-es-0001-n"""'], {'lang': '"""unk"""'}), "('test-es-0001-n', lang='unk')\n", (7603, 7633), False, 'import wn\n'), ((7643, 7666), 'pytest.raises', 'pytest.raises', (['wn.Error'], {}), '(wn.Error)\n', (7656, 7666), False, 'import pytest\n'), ((7683, 7730), 'wn.synset', 'wn.synset', (['"""test-es-0001-n"""'], {'lexicon': '"""test-unk"""'}), "('test-es-0001-n', lexicon='test-unk')\n", (7692, 7730), False, 'import wn\n'), ((107, 120), 'wn.lexicons', 'wn.lexicons', ([], {}), '()\n', (118, 120), False, 'import wn\n'), ((206, 219), 'wn.lexicons', 'wn.lexicons', ([], {}), '()\n', (217, 219), False, 'import wn\n'), ((1510, 1520), 'wn.words', 'wn.words', ([], {}), '()\n', (1518, 1520), False, 'import wn\n'), ((1603, 1613), 'wn.words', 'wn.words', ([], {}), '()\n', (1611, 1613), False, 'import wn\n'), ((1890, 1924), 'wn.Tag', 'wn.Tag', (['"""tag-text"""', '"""tag-category"""'], {}), "('tag-text', 'tag-category')\n", (1896, 1924), False, 'import wn\n'), ((2074, 2091), 'wn.words', 'wn.words', ([], {'pos': '"""n"""'}), "(pos='n')\n", (2082, 2091), False, 'import wn\n'), ((2170, 2187), 'wn.words', 'wn.words', ([], {'pos': '"""v"""'}), "(pos='v')\n", (2178, 2187), False, 'import wn\n'), ((2209, 2226), 'wn.words', 'wn.words', ([], {'pos': '"""q"""'}), "(pos='q')\n", (2217, 2226), False, 'import wn\n'), ((2261, 2280), 'wn.words', 'wn.words', ([], {'lang': '"""en"""'}), "(lang='en')\n", (2269, 2280), False, 'import wn\n'), ((2302, 2321), 'wn.words', 'wn.words', ([], {'lang': '"""es"""'}), "(lang='es')\n", (2310, 2321), False, 'import wn\n'), ((2344, 2371), 'wn.words', 'wn.words', ([], {'lexicon': '"""test-en"""'}), "(lexicon='test-en')\n", (2352, 2371), False, 'import wn\n'), ((2393, 2420), 'wn.words', 'wn.words', ([], {'lexicon': '"""test-es"""'}), "(lexicon='test-es')\n", (2401, 2420), False, 'import wn\n'), ((2443, 2481), 'wn.words', 'wn.words', ([], {'lang': '"""en"""', 'lexicon': '"""test-en"""'}), "(lang='en', lexicon='test-en')\n", (2451, 2481), False, 'import wn\n'), ((2503, 2531), 'wn.words', 'wn.words', ([], {'pos': '"""v"""', 'lang': '"""en"""'}), "(pos='v', lang='en')\n", (2511, 2531), False, 'import wn\n'), ((2553, 2587), 'wn.words', 'wn.words', (['"""information"""'], {'lang': '"""en"""'}), "('information', lang='en')\n", (2561, 2587), False, 'import wn\n'), ((2609, 2643), 'wn.words', 'wn.words', (['"""information"""'], {'lang': '"""es"""'}), "('information', lang='es')\n", (2617, 2643), False, 'import wn\n'), ((3620, 3631), 'wn.senses', 'wn.senses', ([], {}), '()\n', (3629, 3631), False, 'import wn\n'), ((3715, 3726), 'wn.senses', 'wn.senses', ([], {}), '()\n', (3724, 3726), False, 'import wn\n'), ((4127, 4145), 'wn.senses', 'wn.senses', ([], {'pos': '"""n"""'}), "(pos='n')\n", (4136, 4145), False, 'import wn\n'), ((4168, 4186), 'wn.senses', 'wn.senses', ([], {'pos': '"""v"""'}), "(pos='v')\n", (4177, 4186), False, 'import wn\n'), ((4208, 4226), 'wn.senses', 'wn.senses', ([], {'pos': '"""q"""'}), "(pos='q')\n", (4217, 4226), False, 'import wn\n'), ((4261, 4281), 'wn.senses', 'wn.senses', ([], {'lang': '"""en"""'}), "(lang='en')\n", (4270, 4281), False, 'import wn\n'), ((4304, 4324), 'wn.senses', 'wn.senses', ([], {'lang': '"""es"""'}), "(lang='es')\n", (4313, 4324), False, 'import wn\n'), ((4347, 4375), 'wn.senses', 'wn.senses', ([], {'lexicon': '"""test-en"""'}), "(lexicon='test-en')\n", (4356, 4375), False, 'import wn\n'), ((4398, 4426), 'wn.senses', 'wn.senses', ([], {'lexicon': '"""test-es"""'}), "(lexicon='test-es')\n", (4407, 4426), False, 'import wn\n'), ((4449, 4488), 'wn.senses', 'wn.senses', ([], {'lang': '"""en"""', 'lexicon': '"""test-en"""'}), "(lang='en', lexicon='test-en')\n", (4458, 4488), False, 'import wn\n'), ((4511, 4540), 'wn.senses', 'wn.senses', ([], {'pos': '"""v"""', 'lang': '"""en"""'}), "(pos='v', lang='en')\n", (4520, 4540), False, 'import wn\n'), ((4562, 4597), 'wn.senses', 'wn.senses', (['"""information"""'], {'lang': '"""en"""'}), "('information', lang='en')\n", (4571, 4597), False, 'import wn\n'), ((4619, 4654), 'wn.senses', 'wn.senses', (['"""information"""'], {'lang': '"""es"""'}), "('information', lang='es')\n", (4628, 4654), False, 'import wn\n'), ((5708, 5720), 'wn.synsets', 'wn.synsets', ([], {}), '()\n', (5718, 5720), False, 'import wn\n'), ((5805, 5817), 'wn.synsets', 'wn.synsets', ([], {}), '()\n', (5815, 5817), False, 'import wn\n'), ((6181, 6200), 'wn.synsets', 'wn.synsets', ([], {'pos': '"""n"""'}), "(pos='n')\n", (6191, 6200), False, 'import wn\n'), ((6222, 6241), 'wn.synsets', 'wn.synsets', ([], {'pos': '"""v"""'}), "(pos='v')\n", (6232, 6241), False, 'import wn\n'), ((6263, 6282), 'wn.synsets', 'wn.synsets', ([], {'pos': '"""q"""'}), "(pos='q')\n", (6273, 6282), False, 'import wn\n'), ((6317, 6341), 'wn.synsets', 'wn.synsets', ([], {'ili': '"""i67469"""'}), "(ili='i67469')\n", (6327, 6341), False, 'import wn\n'), ((6363, 6387), 'wn.synsets', 'wn.synsets', ([], {'ili': '"""i67468"""'}), "(ili='i67468')\n", (6373, 6387), False, 'import wn\n'), ((6410, 6431), 'wn.synsets', 'wn.synsets', ([], {'lang': '"""en"""'}), "(lang='en')\n", (6420, 6431), False, 'import wn\n'), ((6453, 6474), 'wn.synsets', 'wn.synsets', ([], {'lang': '"""es"""'}), "(lang='es')\n", (6463, 6474), False, 'import wn\n'), ((6497, 6526), 'wn.synsets', 'wn.synsets', ([], {'lexicon': '"""test-en"""'}), "(lexicon='test-en')\n", (6507, 6526), False, 'import wn\n'), ((6548, 6577), 'wn.synsets', 'wn.synsets', ([], {'lexicon': '"""test-es"""'}), "(lexicon='test-es')\n", (6558, 6577), False, 'import wn\n'), ((6600, 6640), 'wn.synsets', 'wn.synsets', ([], {'lang': '"""en"""', 'lexicon': '"""test-en"""'}), "(lang='en', lexicon='test-en')\n", (6610, 6640), False, 'import wn\n'), ((6662, 6692), 'wn.synsets', 'wn.synsets', ([], {'pos': '"""v"""', 'lang': '"""en"""'}), "(pos='v', lang='en')\n", (6672, 6692), False, 'import wn\n'), ((6714, 6750), 'wn.synsets', 'wn.synsets', (['"""information"""'], {'lang': '"""en"""'}), "('information', lang='en')\n", (6724, 6750), False, 'import wn\n'), ((6772, 6808), 'wn.synsets', 'wn.synsets', (['"""information"""'], {'lang': '"""es"""'}), "('information', lang='es')\n", (6782, 6808), False, 'import wn\n'), ((6830, 6865), 'wn.synsets', 'wn.synsets', ([], {'ili': '"""i67469"""', 'lang': '"""es"""'}), "(ili='i67469', lang='es')\n", (6840, 6865), False, 'import wn\n'), ((7809, 7822), 'wn.lexicons', 'wn.lexicons', ([], {}), '()\n', (7820, 7822), False, 'import wn\n'), ((7844, 7866), 'wn.lexicons', 'wn.lexicons', ([], {'lang': '"""en"""'}), "(lang='en')\n", (7855, 7866), False, 'import wn\n'), ((7888, 7910), 'wn.lexicons', 'wn.lexicons', ([], {'lang': '"""ja"""'}), "(lang='ja')\n", (7899, 7910), False, 'import wn\n'), ((280, 293), 'wn.lexicons', 'wn.lexicons', ([], {}), '()\n', (291, 293), False, 'import wn\n'), ((1668, 1678), 'wn.words', 'wn.words', ([], {}), '()\n', (1676, 1678), False, 'import wn\n'), ((2136, 2153), 'wn.words', 'wn.words', ([], {'pos': '"""n"""'}), "(pos='n')\n", (2144, 2153), False, 'import wn\n'), ((3782, 3793), 'wn.senses', 'wn.senses', ([], {}), '()\n', (3791, 3793), False, 'import wn\n'), ((5876, 5888), 'wn.synsets', 'wn.synsets', ([], {}), '()\n', (5886, 5888), False, 'import wn\n'), ((7928, 7950), 'wn.lexicons', 'wn.lexicons', ([], {'lang': '"""ja"""'}), "(lang='ja')\n", (7939, 7950), False, 'import wn\n'), ((965, 995), 'wn.lexicons', 'wn.lexicons', ([], {'lexicon': '"""test-en"""'}), "(lexicon='test-en')\n", (976, 995), False, 'import wn\n'), ((1037, 1067), 'wn.lexicons', 'wn.lexicons', ([], {'lexicon': '"""test-es"""'}), "(lexicon='test-es')\n", (1048, 1067), False, 'import wn\n'), ((1110, 1140), 'wn.lexicons', 'wn.lexicons', ([], {'lexicon': '"""test-en"""'}), "(lexicon='test-en')\n", (1121, 1140), False, 'import wn\n'), ((1172, 1202), 'wn.lexicons', 'wn.lexicons', ([], {'lexicon': '"""test-es"""'}), "(lexicon='test-es')\n", (1183, 1202), False, 'import wn\n')]
#!/usr/bin/env python3 import argparse import mock import json import unittest from letsdebughelper import letsdebug class TestLetsdebug(unittest.TestCase): def setUp(self): self.url = 'https://letsdebug.net' self.domain = 'jeffistotallyawesome.space' self.test_id = 359646 self.post_data = {"method": "http-01", "domain": self.domain} self.bad_post_data = {"metho": "http=01", "domain": self.domain} self.test_id_url = '{}/{}/{}'.format(self.url, self.domain, str(self.test_id)) self.get_bad_result = 'Invalid request parameters.\n' self.post_result_text = '{"Domain":"jeffistotallyawesome.space","ID":359640}\n' self.post_bad_result = 'Please provide a valid domain name and validation method.\n' self.get_result_text = '{"id":359646,"domain":"jeffistotallyawesome.space","method":"http-01",\ "status":"Complete","created_at":"2020-11-16T20:39:19.970198Z","started_at":"2020-11-16T20:39:19.973775Z",\ "completed_at":"2020-11-16T20:39:22.855617Z","result":{"problems":[{"name":"CloudflareCDN","explanation":"The \ domain jeffistotallyawesome.space is being served through Cloudflare CDN. Any Let\'s Encrypt certificate installed \ on the origin server will only encrypt traffic between the server and Cloudflare. It is strongly recommended that \ the SSL option \'Full SSL (strict)\' be enabled.","detail":"https://support.cloudflare.com/hc/en-us/articles/\ 200170416-What-do-the-SSL-options-mean-","severity":"Warning"}]}}\n' self.get_result_dict = {'id': 359646, 'domain': 'jeffistotallyawesome.space', 'method': 'http-01', 'status': 'Complete', 'created_at': '2020-11-16T20:39:19.970198Z', 'started_at': '2020-11-16T20:39:19.973775Z', 'completed_at': '2020-11-16T20:39:22.855617Z', 'result': {'problems': [{'name': 'CloudflareCDN', 'explanation': "The domain jeffistotallyawesome.space is being \ served through Cloudflare CDN. Any Let's Encrypt certificate installed on the origin server will only encrypt \ traffic between the server and Cloudflare. It is strongly recommended that the SSL option 'Full SSL (strict)' \ be enabled.", 'detail': 'https://support.cloudflare.com/hc/en-us/articles/200170416-What-do-the-SSL-options-mean-', 'severity': 'Warning'}]}} def _mock_response(self, status=200, text=None, json_data=None): mock_resp = mock.Mock() # set status code and content mock_resp.status_code = status # add json data if provided mock_resp.json = mock.Mock(return_value=json_data) mock_resp.text = text return mock_resp @mock.patch('requests.get') def test_le_get_call(self, mock_get): mock_resp = self._mock_response(text=self.get_result_text) mock_get.return_value = mock_resp result = letsdebug.le_get_call(self.test_id_url) self.assertEqual(result.text, self.get_result_text) @mock.patch('requests.get') def test_fail_le_get_call(self, mock_get): mock_resp = self._mock_response(status=400, text=self.get_bad_result) mock_get.return_value = mock_resp result = letsdebug.le_get_call(self.test_id_url) self.assertEqual(result.text, self.get_bad_result) @mock.patch('requests.post') def test_le_post_call(self, mock_post): mock_resp = self._mock_response(text=self.post_result_text) mock_post.return_value = mock_resp result = letsdebug.le_post_call(self.post_data) self.assertEqual(result.text, self.post_result_text) @mock.patch('requests.post') def test_fail_le_post_call(self, mock_post): mock_resp = self._mock_response(status=400, text=self.post_bad_result) mock_post.return_value = mock_resp result = letsdebug.le_post_call(self.bad_post_data) self.assertEqual(result.text, self.post_bad_result) @mock.patch('requests.get') def test_success_decode_result(self, mock_get): mock_resp = self._mock_response(json_data=json.loads(self.get_result_text)) mock_get.return_value = mock_resp result = letsdebug.le_get_call(self.post_data) actual = letsdebug.decode_result(result) self.assertEqual(actual, self.get_result_dict) def test_fail_decode_result(self): with self.assertRaises(SystemExit): letsdebug.decode_result('Bad Data') @mock.patch('requests.get') def test_success_check_status(self, mock_get): mock_resp = self._mock_response(text=self.get_result_text) mock_get.return_value = mock_resp result = letsdebug.le_get_call(self.post_data) actual = letsdebug.check_status(result, self.get_bad_result) self.assertIsNone(actual) @mock.patch('requests.get') def test_fail_check_status(self, mock_get): mock_resp = self._mock_response(status=400, text=self.get_bad_result) mock_get.return_value = mock_resp result = letsdebug.le_get_call(self.bad_post_data) with self.assertRaises(SystemExit): letsdebug.check_status(result, self.get_bad_result) @mock.patch('argparse.ArgumentParser.parse_args') def test_parse_args(self, mock_args): mock_args.return_value = argparse.Namespace(domain='jeditest.com') expected = {'domain': 'jeditest.com'} actual = letsdebug.parse_args() actual_dict = vars(actual) self.assertEqual(actual_dict, expected) @mock.patch('argparse.ArgumentParser.parse_args') def test_parse_args_none(self, mock_args): mock_args.return_value = argparse.Namespace() with mock.patch('argparse._sys.argv', ['letsdebug.py']): with self.assertRaises(SystemExit): letsdebug.parse_args()
[ "argparse.Namespace", "letsdebughelper.letsdebug.check_status", "letsdebughelper.letsdebug.parse_args", "json.loads", "mock.patch", "mock.Mock", "letsdebughelper.letsdebug.le_get_call", "letsdebughelper.letsdebug.decode_result", "letsdebughelper.letsdebug.le_post_call" ]
[((2936, 2962), 'mock.patch', 'mock.patch', (['"""requests.get"""'], {}), "('requests.get')\n", (2946, 2962), False, 'import mock\n'), ((3237, 3263), 'mock.patch', 'mock.patch', (['"""requests.get"""'], {}), "('requests.get')\n", (3247, 3263), False, 'import mock\n'), ((3553, 3580), 'mock.patch', 'mock.patch', (['"""requests.post"""'], {}), "('requests.post')\n", (3563, 3580), False, 'import mock\n'), ((3859, 3886), 'mock.patch', 'mock.patch', (['"""requests.post"""'], {}), "('requests.post')\n", (3869, 3886), False, 'import mock\n'), ((4184, 4210), 'mock.patch', 'mock.patch', (['"""requests.get"""'], {}), "('requests.get')\n", (4194, 4210), False, 'import mock\n'), ((4686, 4712), 'mock.patch', 'mock.patch', (['"""requests.get"""'], {}), "('requests.get')\n", (4696, 4712), False, 'import mock\n'), ((5037, 5063), 'mock.patch', 'mock.patch', (['"""requests.get"""'], {}), "('requests.get')\n", (5047, 5063), False, 'import mock\n'), ((5405, 5453), 'mock.patch', 'mock.patch', (['"""argparse.ArgumentParser.parse_args"""'], {}), "('argparse.ArgumentParser.parse_args')\n", (5415, 5453), False, 'import mock\n'), ((5746, 5794), 'mock.patch', 'mock.patch', (['"""argparse.ArgumentParser.parse_args"""'], {}), "('argparse.ArgumentParser.parse_args')\n", (5756, 5794), False, 'import mock\n'), ((2691, 2702), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (2700, 2702), False, 'import mock\n'), ((2841, 2874), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'json_data'}), '(return_value=json_data)\n', (2850, 2874), False, 'import mock\n'), ((3131, 3170), 'letsdebughelper.letsdebug.le_get_call', 'letsdebug.le_get_call', (['self.test_id_url'], {}), '(self.test_id_url)\n', (3152, 3170), False, 'from letsdebughelper import letsdebug\n'), ((3448, 3487), 'letsdebughelper.letsdebug.le_get_call', 'letsdebug.le_get_call', (['self.test_id_url'], {}), '(self.test_id_url)\n', (3469, 3487), False, 'from letsdebughelper import letsdebug\n'), ((3753, 3791), 'letsdebughelper.letsdebug.le_post_call', 'letsdebug.le_post_call', (['self.post_data'], {}), '(self.post_data)\n', (3775, 3791), False, 'from letsdebughelper import letsdebug\n'), ((4075, 4117), 'letsdebughelper.letsdebug.le_post_call', 'letsdebug.le_post_call', (['self.bad_post_data'], {}), '(self.bad_post_data)\n', (4097, 4117), False, 'from letsdebughelper import letsdebug\n'), ((4406, 4443), 'letsdebughelper.letsdebug.le_get_call', 'letsdebug.le_get_call', (['self.post_data'], {}), '(self.post_data)\n', (4427, 4443), False, 'from letsdebughelper import letsdebug\n'), ((4461, 4492), 'letsdebughelper.letsdebug.decode_result', 'letsdebug.decode_result', (['result'], {}), '(result)\n', (4484, 4492), False, 'from letsdebughelper import letsdebug\n'), ((4890, 4927), 'letsdebughelper.letsdebug.le_get_call', 'letsdebug.le_get_call', (['self.post_data'], {}), '(self.post_data)\n', (4911, 4927), False, 'from letsdebughelper import letsdebug\n'), ((4945, 4996), 'letsdebughelper.letsdebug.check_status', 'letsdebug.check_status', (['result', 'self.get_bad_result'], {}), '(result, self.get_bad_result)\n', (4967, 4996), False, 'from letsdebughelper import letsdebug\n'), ((5249, 5290), 'letsdebughelper.letsdebug.le_get_call', 'letsdebug.le_get_call', (['self.bad_post_data'], {}), '(self.bad_post_data)\n', (5270, 5290), False, 'from letsdebughelper import letsdebug\n'), ((5529, 5570), 'argparse.Namespace', 'argparse.Namespace', ([], {'domain': '"""jeditest.com"""'}), "(domain='jeditest.com')\n", (5547, 5570), False, 'import argparse\n'), ((5634, 5656), 'letsdebughelper.letsdebug.parse_args', 'letsdebug.parse_args', ([], {}), '()\n', (5654, 5656), False, 'from letsdebughelper import letsdebug\n'), ((5875, 5895), 'argparse.Namespace', 'argparse.Namespace', ([], {}), '()\n', (5893, 5895), False, 'import argparse\n'), ((4644, 4679), 'letsdebughelper.letsdebug.decode_result', 'letsdebug.decode_result', (['"""Bad Data"""'], {}), "('Bad Data')\n", (4667, 4679), False, 'from letsdebughelper import letsdebug\n'), ((5347, 5398), 'letsdebughelper.letsdebug.check_status', 'letsdebug.check_status', (['result', 'self.get_bad_result'], {}), '(result, self.get_bad_result)\n', (5369, 5398), False, 'from letsdebughelper import letsdebug\n'), ((5909, 5959), 'mock.patch', 'mock.patch', (['"""argparse._sys.argv"""', "['letsdebug.py']"], {}), "('argparse._sys.argv', ['letsdebug.py'])\n", (5919, 5959), False, 'import mock\n'), ((4313, 4345), 'json.loads', 'json.loads', (['self.get_result_text'], {}), '(self.get_result_text)\n', (4323, 4345), False, 'import json\n'), ((6025, 6047), 'letsdebughelper.letsdebug.parse_args', 'letsdebug.parse_args', ([], {}), '()\n', (6045, 6047), False, 'from letsdebughelper import letsdebug\n')]
from ckeditor.fields import RichTextField from django.db import models from src.accounts.models import User class Company(models.Model): TYPE_CHOICE = ( ('per', 'Personal'), ('pre', 'Premium'), ('ent', 'Enterprise'), ) user = models.OneToOneField(User, on_delete=models.CASCADE) name = models.CharField(max_length=255, default="Name") tag_line = models.CharField(max_length=255, null=True, blank=True) description = models.TextField(null=True, blank=True) business_type = models.CharField(max_length=255, choices=TYPE_CHOICE, default='per') contact_number = models.CharField(max_length=20, null=True, blank=True) contact_email = models.CharField(max_length=255, null=True, blank=True) contact_address = models.TextField(null=True, blank=True) class Meta: verbose_name_plural = "Companies" def __str__(self): return self.name # CATEGORY class Category(models.Model): name = models.CharField(max_length=255) class Meta: verbose_name_plural = "Categories" def __str__(self): return self.name # MODEL JOBS class Job(models.Model): STATUS_CHOICE = ( ('o', 'Open'), ('c', 'Close') ) title = models.CharField(max_length=255) category = models.ForeignKey(Category, on_delete=models.CASCADE) description = models.TextField() detailed_description = RichTextField(null=True, blank=True) company = models.ForeignKey('Company', related_name='job_provider', on_delete=models.CASCADE, blank=True) start_time = models.DateTimeField(null=True, blank=True) end_time = models.DateTimeField(null=True, blank=True) likes = models.ManyToManyField(User, related_name='likes') candidates = models.ManyToManyField(User, related_name='candidates', through='Candidate') status = models.CharField(max_length=1, choices=STATUS_CHOICE, default='o') is_active = models.BooleanField(default=True) created_on = models.DateTimeField(auto_now=False, auto_now_add=True) class Meta: ordering = ['-pk'] def __str__(self): return self.title # CANDIDATE class Candidate(models.Model): STATUS_CHOICE = ( ('acc', 'Accepted'), ('pen', 'Pending'), ('app', 'Applied'), ) user = models.ForeignKey(User, on_delete=models.CASCADE, blank=True) bachelor_degree = models.CharField( max_length=255, help_text="Name of Field in which you have passed bachelors - Leave blank if you don't have bachelors degree" ) experience = models.PositiveIntegerField(default=0, help_text="Years of working experience") about = models.TextField( null=True, blank=True, help_text='Say something about yourself, your love to hear you, briefly describe yourself, interests etc' ) previous_company = models.CharField(max_length=255, null=True, blank=True, help_text="Leave blank if not") job = models.ForeignKey(Job, on_delete=models.CASCADE, blank=True) status = models.CharField(max_length=3, choices=STATUS_CHOICE, default='app') cv = models.FileField(upload_to='company/candidates/files/', blank=True, null=True) created_on = models.DateTimeField(auto_now_add=True) def __str__(self): return self.job.title # FEEDBACK class Feedback(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) company = models.ForeignKey(Company, on_delete=models.CASCADE) description = models.TextField() created_on = models.DateTimeField(auto_now_add=True) def __str__(self): return self.company.name
[ "django.db.models.FileField", "django.db.models.OneToOneField", "django.db.models.TextField", "django.db.models.ManyToManyField", "django.db.models.CharField", "django.db.models.ForeignKey", "django.db.models.PositiveIntegerField", "django.db.models.BooleanField", "ckeditor.fields.RichTextField", "django.db.models.DateTimeField" ]
[((265, 317), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (285, 317), False, 'from django.db import models\n'), ((329, 377), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'default': '"""Name"""'}), "(max_length=255, default='Name')\n", (345, 377), False, 'from django.db import models\n'), ((393, 448), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), '(max_length=255, null=True, blank=True)\n', (409, 448), False, 'from django.db import models\n'), ((467, 506), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (483, 506), False, 'from django.db import models\n'), ((527, 595), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'choices': 'TYPE_CHOICE', 'default': '"""per"""'}), "(max_length=255, choices=TYPE_CHOICE, default='per')\n", (543, 595), False, 'from django.db import models\n'), ((618, 672), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)', 'blank': '(True)'}), '(max_length=20, null=True, blank=True)\n', (634, 672), False, 'from django.db import models\n'), ((693, 748), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)'}), '(max_length=255, null=True, blank=True)\n', (709, 748), False, 'from django.db import models\n'), ((771, 810), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (787, 810), False, 'from django.db import models\n'), ((973, 1005), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (989, 1005), False, 'from django.db import models\n'), ((1242, 1274), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1258, 1274), False, 'from django.db import models\n'), ((1290, 1343), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Category'], {'on_delete': 'models.CASCADE'}), '(Category, on_delete=models.CASCADE)\n', (1307, 1343), False, 'from django.db import models\n'), ((1362, 1380), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1378, 1380), False, 'from django.db import models\n'), ((1408, 1444), 'ckeditor.fields.RichTextField', 'RichTextField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1421, 1444), False, 'from ckeditor.fields import RichTextField\n'), ((1459, 1559), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Company"""'], {'related_name': '"""job_provider"""', 'on_delete': 'models.CASCADE', 'blank': '(True)'}), "('Company', related_name='job_provider', on_delete=models.\n CASCADE, blank=True)\n", (1476, 1559), False, 'from django.db import models\n'), ((1572, 1615), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1592, 1615), False, 'from django.db import models\n'), ((1631, 1674), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (1651, 1674), False, 'from django.db import models\n'), ((1687, 1737), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['User'], {'related_name': '"""likes"""'}), "(User, related_name='likes')\n", (1709, 1737), False, 'from django.db import models\n'), ((1755, 1831), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['User'], {'related_name': '"""candidates"""', 'through': '"""Candidate"""'}), "(User, related_name='candidates', through='Candidate')\n", (1777, 1831), False, 'from django.db import models\n'), ((1845, 1911), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'choices': 'STATUS_CHOICE', 'default': '"""o"""'}), "(max_length=1, choices=STATUS_CHOICE, default='o')\n", (1861, 1911), False, 'from django.db import models\n'), ((1929, 1962), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1948, 1962), False, 'from django.db import models\n'), ((1980, 2035), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(False)', 'auto_now_add': '(True)'}), '(auto_now=False, auto_now_add=True)\n', (2000, 2035), False, 'from django.db import models\n'), ((2299, 2360), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE', 'blank': '(True)'}), '(User, on_delete=models.CASCADE, blank=True)\n', (2316, 2360), False, 'from django.db import models\n'), ((2383, 2536), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'help_text': '"""Name of Field in which you have passed bachelors - Leave blank if you don\'t have bachelors degree"""'}), '(max_length=255, help_text=\n "Name of Field in which you have passed bachelors - Leave blank if you don\'t have bachelors degree"\n )\n', (2399, 2536), False, 'from django.db import models\n'), ((2566, 2645), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(0)', 'help_text': '"""Years of working experience"""'}), "(default=0, help_text='Years of working experience')\n", (2593, 2645), False, 'from django.db import models\n'), ((2658, 2814), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)', 'blank': '(True)', 'help_text': '"""Say something about yourself, your love to hear you, briefly describe yourself, interests etc"""'}), "(null=True, blank=True, help_text=\n 'Say something about yourself, your love to hear you, briefly describe yourself, interests etc'\n )\n", (2674, 2814), False, 'from django.db import models\n'), ((2850, 2942), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(True)', 'blank': '(True)', 'help_text': '"""Leave blank if not"""'}), "(max_length=255, null=True, blank=True, help_text=\n 'Leave blank if not')\n", (2866, 2942), False, 'from django.db import models\n'), ((2948, 3008), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Job'], {'on_delete': 'models.CASCADE', 'blank': '(True)'}), '(Job, on_delete=models.CASCADE, blank=True)\n', (2965, 3008), False, 'from django.db import models\n'), ((3022, 3090), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(3)', 'choices': 'STATUS_CHOICE', 'default': '"""app"""'}), "(max_length=3, choices=STATUS_CHOICE, default='app')\n", (3038, 3090), False, 'from django.db import models\n'), ((3100, 3178), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""company/candidates/files/"""', 'blank': '(True)', 'null': '(True)'}), "(upload_to='company/candidates/files/', blank=True, null=True)\n", (3116, 3178), False, 'from django.db import models\n'), ((3196, 3235), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (3216, 3235), False, 'from django.db import models\n'), ((3344, 3393), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (3361, 3393), False, 'from django.db import models\n'), ((3408, 3460), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Company'], {'on_delete': 'models.CASCADE'}), '(Company, on_delete=models.CASCADE)\n', (3425, 3460), False, 'from django.db import models\n'), ((3479, 3497), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (3495, 3497), False, 'from django.db import models\n'), ((3515, 3554), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (3535, 3554), False, 'from django.db import models\n')]
import logging import mimetypes from confy import env import six from django.conf import settings from django.core.mail import EmailMultiAlternatives, EmailMessage from django.core.urlresolvers import reverse from django.template import loader, Template from django.utils.encoding import smart_text from django.utils.html import strip_tags from confy import env from ledger.accounts.models import Document from mooringlicensing.settings import SYSTEM_NAME logger = logging.getLogger('__name__') def _render(template, context): if isinstance(context, dict): context.update({'settings': settings}) if isinstance(template, six.string_types): template = Template(template) return template.render(context) def host_reverse(name, args=None, kwargs=None): return "{}{}".format(settings.DEFAULT_HOST, reverse(name, args=args, kwargs=kwargs)) class TemplateEmailBase(object): subject = '' html_template = 'mooringlicensing/emails/base_email.html' # txt_template can be None, in this case a 'tag-stripped' version of the html will be sent. (see send) txt_template = 'mooringlicensing/emails/base-email.txt' def __init__(self, subject='', html_template='', txt_template=''): # Update self.subject = subject if subject else self.subject self.html_template = html_template if html_template else self.html_template self.txt_template = txt_template if txt_template else self.txt_template def send_to_user(self, user, context=None): return self.send(user.email, context=context) def send(self, to_addresses, from_address=None, context=None, attachments=None, cc=None, bcc=None): """ Send an email using EmailMultiAlternatives with text and html. :param to_addresses: a string or a list of addresses :param from_address: if None the settings.DEFAULT_FROM_EMAIL is used :param context: a dictionary or a Context object used for rendering the templates. :param attachments: a list of (filepath, content, mimetype) triples (see https://docs.djangoproject.com/en/1.9/topics/email/) or Documents :param bcc: :param cc: :return: """ email_instance = env('EMAIL_INSTANCE','DEV') # The next line will throw a TemplateDoesNotExist if html template cannot be found html_template = loader.get_template(self.html_template) # render html html_body = _render(html_template, context) if self.txt_template is not None: txt_template = loader.get_template(self.txt_template) txt_body = _render(txt_template, context) else: txt_body = strip_tags(html_body) # build message if isinstance(to_addresses, six.string_types): to_addresses = [to_addresses] if attachments is None: attachments = [] if attachments is not None and not isinstance(attachments, list): attachments = list(attachments) if attachments is None: attachments = [] # Convert Documents to (filename, content, mime) attachment _attachments = [] for attachment in attachments: if isinstance(attachment, Document): filename = str(attachment) content = attachment.file.read() mime = mimetypes.guess_type(attachment.filename)[0] _attachments.append((filename, content, mime)) else: _attachments.append(attachment) msg = EmailMultiAlternatives(self.subject, txt_body, from_email=from_address, to=to_addresses, attachments=_attachments, cc=cc, bcc=bcc, headers={'System-Environment': email_instance} ) msg.attach_alternative(html_body, 'text/html') try: if not settings.DISABLE_EMAIL: msg.send(fail_silently=False) return msg except Exception as e: logger.exception("Error while sending email to {}: {}".format(to_addresses, e)) return None def _extract_email_headers(email_message, sender=None): print(sender) if isinstance(email_message, (EmailMultiAlternatives, EmailMessage,)): # TODO this will log the plain text body, should we log the html # instead text = email_message.body subject = email_message.subject fromm = smart_text(sender) if sender else smart_text( email_message.from_email) # the to email is normally a list if isinstance(email_message.to, list): to = ','.join(email_message.to) else: to = smart_text(email_message.to) # we log the cc and bcc in the same cc field of the log entry as a ',' # comma separated string all_ccs = [] if email_message.cc: all_ccs += list(email_message.cc) if email_message.bcc: all_ccs += list(email_message.bcc) all_ccs = ','.join(all_ccs) else: text = smart_text(email_message) subject = '' to = '' fromm = smart_text(sender) if sender else SYSTEM_NAME all_ccs = '' email_data = { 'subject': subject, 'text': text, 'to': to, 'fromm': fromm, 'cc': all_ccs } return email_data
[ "django.core.urlresolvers.reverse", "confy.env", "mimetypes.guess_type", "django.core.mail.EmailMultiAlternatives", "django.utils.html.strip_tags", "django.utils.encoding.smart_text", "django.template.Template", "logging.getLogger", "django.template.loader.get_template" ]
[((469, 498), 'logging.getLogger', 'logging.getLogger', (['"""__name__"""'], {}), "('__name__')\n", (486, 498), False, 'import logging\n'), ((680, 698), 'django.template.Template', 'Template', (['template'], {}), '(template)\n', (688, 698), False, 'from django.template import loader, Template\n'), ((833, 872), 'django.core.urlresolvers.reverse', 'reverse', (['name'], {'args': 'args', 'kwargs': 'kwargs'}), '(name, args=args, kwargs=kwargs)\n', (840, 872), False, 'from django.core.urlresolvers import reverse\n'), ((2258, 2286), 'confy.env', 'env', (['"""EMAIL_INSTANCE"""', '"""DEV"""'], {}), "('EMAIL_INSTANCE', 'DEV')\n", (2261, 2286), False, 'from confy import env\n'), ((2401, 2440), 'django.template.loader.get_template', 'loader.get_template', (['self.html_template'], {}), '(self.html_template)\n', (2420, 2440), False, 'from django.template import loader, Template\n'), ((3585, 3773), 'django.core.mail.EmailMultiAlternatives', 'EmailMultiAlternatives', (['self.subject', 'txt_body'], {'from_email': 'from_address', 'to': 'to_addresses', 'attachments': '_attachments', 'cc': 'cc', 'bcc': 'bcc', 'headers': "{'System-Environment': email_instance}"}), "(self.subject, txt_body, from_email=from_address, to=\n to_addresses, attachments=_attachments, cc=cc, bcc=bcc, headers={\n 'System-Environment': email_instance})\n", (3607, 3773), False, 'from django.core.mail import EmailMultiAlternatives, EmailMessage\n'), ((5097, 5122), 'django.utils.encoding.smart_text', 'smart_text', (['email_message'], {}), '(email_message)\n', (5107, 5122), False, 'from django.utils.encoding import smart_text\n'), ((2584, 2622), 'django.template.loader.get_template', 'loader.get_template', (['self.txt_template'], {}), '(self.txt_template)\n', (2603, 2622), False, 'from django.template import loader, Template\n'), ((2714, 2735), 'django.utils.html.strip_tags', 'strip_tags', (['html_body'], {}), '(html_body)\n', (2724, 2735), False, 'from django.utils.html import strip_tags\n'), ((4473, 4491), 'django.utils.encoding.smart_text', 'smart_text', (['sender'], {}), '(sender)\n', (4483, 4491), False, 'from django.utils.encoding import smart_text\n'), ((4507, 4543), 'django.utils.encoding.smart_text', 'smart_text', (['email_message.from_email'], {}), '(email_message.from_email)\n', (4517, 4543), False, 'from django.utils.encoding import smart_text\n'), ((4721, 4749), 'django.utils.encoding.smart_text', 'smart_text', (['email_message.to'], {}), '(email_message.to)\n', (4731, 4749), False, 'from django.utils.encoding import smart_text\n'), ((5176, 5194), 'django.utils.encoding.smart_text', 'smart_text', (['sender'], {}), '(sender)\n', (5186, 5194), False, 'from django.utils.encoding import smart_text\n'), ((3397, 3438), 'mimetypes.guess_type', 'mimetypes.guess_type', (['attachment.filename'], {}), '(attachment.filename)\n', (3417, 3438), False, 'import mimetypes\n')]
""" Routines that manipulate, read and convert lists of dependencies. """ # This file is part of the Snakefood open source package. # See http://furius.ca/snakefood/ for licensing details. import sys, logging from operator import itemgetter def read_depends(f): "Generator for the dependencies read from the given file object." for line in f: try: yield eval(line) except Exception: logging.warning("Invalid line: '%s'" % line) def output_depends(depdict): """Given a dictionary of (from -> list of targets), generate an appropriate output file.""" # Output the dependencies. write = sys.stdout.write for (from_root, from_), targets in sorted(depdict.iteritems(), key=itemgetter(0)): for to_root, to_ in sorted(targets): write(repr( ((from_root, from_), (to_root, to_)) )) write('\n') def eliminate_redundant_depends(depends): "Remove the redundant dependencies." alluniq = set() outdeps = [] for dep in depends: if dep in alluniq: continue alluniq.add(dep) outdeps.append(dep) return outdeps def flatten_depends(depends): """Yield the list of dependency pairs to a single list of (root, relfn) pairs, in the order that they appear. The list is guaranteed to be unique (we remove duplicates).""" seen = set([(None, None)]) for dep in depends: for pair in dep: if pair in seen: continue seen.add(pair) yield pair
[ "logging.warning", "operator.itemgetter" ]
[((786, 799), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (796, 799), False, 'from operator import itemgetter\n'), ((435, 479), 'logging.warning', 'logging.warning', (['("Invalid line: \'%s\'" % line)'], {}), '("Invalid line: \'%s\'" % line)\n', (450, 479), False, 'import sys, logging\n')]
from __future__ import unicode_literals import logging from django.conf import settings from push import mobile_module from push import pusher_module logger = logging.getLogger() def send_all(message_type, message, crowd, user=None): if settings.PUSH_DISABLED: logger.info('Push disabled, skipping push') return if settings.PUSH_MODULE == 'pusher': pusher_module.send_all(message_type, message, crowd, user) # Send to all the mobile devices attached to the user (or all if user # is None) mobile_module.send_all(message_type, message, crowd, user)
[ "push.pusher_module.send_all", "push.mobile_module.send_all", "logging.getLogger" ]
[((162, 181), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (179, 181), False, 'import logging\n'), ((538, 596), 'push.mobile_module.send_all', 'mobile_module.send_all', (['message_type', 'message', 'crowd', 'user'], {}), '(message_type, message, crowd, user)\n', (560, 596), False, 'from push import mobile_module\n'), ((386, 444), 'push.pusher_module.send_all', 'pusher_module.send_all', (['message_type', 'message', 'crowd', 'user'], {}), '(message_type, message, crowd, user)\n', (408, 444), False, 'from push import pusher_module\n')]
import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as F from torch.autograd import gradcheck from torch.autograd import Function from torch.autograd import Variable from torch.autograd import gradcheck from torch.autograd import Function import numpy as np ################################### ######### loss functions ########## ################################### class BiasReduceLoss(nn.Module): def __init__(self,opt): super(BiasReduceLoss, self).__init__() self.opt = opt self.criterion = nn.MSELoss() def forward(self, x, y, weight=1): w = torch.cuda.FloatTensor(1).fill_(weight) if self.opt.cuda: w.cuda() w = Variable(w, requires_grad=False) self.avg = torch.mean(x,0).unsqueeze(0) self.loss = w*self.criterion(self.avg, y) return self.loss class TotalVaryLoss(nn.Module): def __init__(self,opt): super(TotalVaryLoss, self).__init__() self.opt = opt def forward(self, x, weight=1): w = torch.cuda.FloatTensor(1).fill_(weight) if self.opt.cuda: w.cuda() w = Variable(w, requires_grad=False) self.loss = w * (torch.sum(torch.abs(x[:, :, :, :-1] - x[:, :, :, 1:])) + torch.sum(torch.abs(x[:, :, :-1, :] - x[:, :, 1:, :]))) return self.loss class SelfSmoothLoss2(nn.Module): def __init__(self,opt): super(SelfSmoothLoss2, self).__init__() self.opt = opt def forward(self, x, weight=1): w = torch.cuda.FloatTensor(1).fill_(weight) if self.opt.cuda: w.cuda() w = Variable(w, requires_grad=False) self.x_diff = x[:, :, :, :-1] - x[:, :, :, 1:] self.y_diff = x[:, :, :-1, :] - x[:, :, 1:, :] self.loss = torch.sum(torch.mul(self.x_diff, self.x_diff)) + torch.sum(torch.mul(self.y_diff, self.y_diff)) self.loss = w * self.loss return self.loss ################################### ######### basic blocks ########## ################################### # a mixer (linear layer) class waspMixer(nn.Module): def __init__(self, opt, ngpu=1, nin=128, nout=128): super(waspMixer, self).__init__() self.ngpu = ngpu self.main = nn.Sequential( # simply a linear layer nn.Linear(nin, nout), nn.Sigmoid() ) def forward(self, input): if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1: output = nn.parallel.data_parallel(self.main, input, range(self.ngpu)) else: output = self.main(input) return output # shading * albedo = texture(img) class waspIntrinsicComposer(nn.Module): def __init__(self, opt): super(waspIntrinsicComposer, self).__init__() self.ngpu = opt.ngpu self.nc = opt.nc def forward(self, shading, albedo): self.shading = shading.repeat(1,self.nc,1,1) self.img = torch.mul(self.shading, albedo) return self.img # warp image according to the grid class waspWarper(nn.Module): def __init__(self, opt): super(waspWarper, self).__init__() self.opt = opt self.batchSize = opt.batchSize self.imgSize = opt.imgSize def forward(self, input_img, input_grid): self.warp = input_grid.permute(0,2,3,1) self.output = F.grid_sample(input_img, self.warp) return self.output # integrate over the predicted grid offset to get the grid(deformation field) class waspGridSpatialIntegral(nn.Module): def __init__(self,opt): super(waspGridSpatialIntegral, self).__init__() self.opt = opt self.w = self.opt.imgSize self.filterx = torch.FloatTensor(1,1,1,self.w).fill_(1) self.filtery = torch.FloatTensor(1,1,self.w,1).fill_(1) if self.opt.cuda: self.filterx = self.filterx.type(torch.cuda.FloatTensor) self.filtery = self.filtery.type(torch.cuda.FloatTensor) self.filterx = Variable(self.filterx, requires_grad=False) self.filtery = Variable(self.filtery, requires_grad=False) if opt.cuda: self.filterx.cuda() self.filtery.cuda() def forward(self, input_diffgrid): #print(input_diffgrid.size()) fullx = F.conv_transpose2d(input_diffgrid[:,0,:,:].unsqueeze(1), self.filterx, stride=1, padding=0) fully = F.conv_transpose2d(input_diffgrid[:,1,:,:].unsqueeze(1), self.filtery, stride=1, padding=0) output_grid = torch.cat((fullx[:,:,0:self.w,0:self.w], fully[:,:,0:self.w,0:self.w]),1) return output_grid # an encoder architecture class waspEncoder(nn.Module): def __init__(self, opt, ngpu=1, nc=1, ndf = 32, ndim = 128): super(waspEncoder, self).__init__() self.ngpu = ngpu self.ndim = ndim self.main = nn.Sequential( # input is (nc) x 64 x 64 nn.Conv2d(nc, ndf, 4, 2, 1, bias=False), nn.LeakyReLU(0.2, False), # state size. (ndf) x 32 x 32 nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 2), nn.LeakyReLU(0.2, False), # state size. (ndf*2) x 16 x 16 nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 4), nn.LeakyReLU(0.2, False), # state size. (ndf*4) x 8 x 8 nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 8), nn.LeakyReLU(0.2, False), # state size. (ndf*8) x 4 x 4 nn.Conv2d(ndf * 8, ndim, 4, 4, 0, bias=False), nn.Sigmoid() ) def forward(self, input): output = self.main(input).view(-1,self.ndim) #print(output.size()) return output # a decoder architecture class waspDecoder(nn.Module): def __init__(self, opt, ngpu=1, nz=128, nc=1, ngf=32, lb=0, ub=1): super(waspDecoder, self).__init__() self.ngpu = ngpu self.main = nn.Sequential( # input is Z, going into a convolution nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False), nn.BatchNorm2d(ngf * 8), nn.ReLU(True), # state size. (ngf*8) x 4 x 4 nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 4), nn.ReLU(True), # state size. (ngf*4) x 8 x 8 nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True), # state size. (ngf*2) x 16 x 16 nn.ConvTranspose2d(ngf * 2,ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), # state size. (ngf) x 32 x 32 nn.ConvTranspose2d(ngf, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), # state size. (nc) x 64 x 64 nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False), nn.Hardtanh(lb,ub) ) def forward(self, input): if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1: output = nn.parallel.data_parallel(self.main, input, range(self.ngpu)) else: output = self.main(input) return output # a decoder architecture class waspDecoderTanh(nn.Module): def __init__(self, opt, ngpu=1, nz=128, nc=1, ngf=32, lb=0, ub=1): super(waspDecoderTanh, self).__init__() self.ngpu = ngpu self.main = nn.Sequential( # input is Z, going into a convolution nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False), nn.BatchNorm2d(ngf * 8), nn.Tanh(), # state size. (ngf*8) x 4 x 4 nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 4), nn.Tanh(), # state size. (ngf*4) x 8 x 8 nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.Tanh(), # state size. (ngf*2) x 16 x 16 nn.ConvTranspose2d(ngf * 2,ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.Tanh(), # state size. (ngf) x 32 x 32 nn.ConvTranspose2d(ngf, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.Tanh(), # state size. (nc) x 64 x 64 nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False), #nn.Hardtanh(lb,ub), nn.Sigmoid() ) def forward(self, input): if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1: output = nn.parallel.data_parallel(self.main, input, range(self.ngpu)) else: output = self.main(input) return output ################################### ######## densenet blocks ######### ################################### class DenseBlockEncoder(nn.Module): def __init__(self, n_channels, n_convs, activation=nn.ReLU, args=[False]): super(DenseBlockEncoder, self).__init__() assert(n_convs > 0) self.n_channels = n_channels self.n_convs = n_convs self.layers = nn.ModuleList() for i in range(n_convs): self.layers.append(nn.Sequential( nn.BatchNorm2d(n_channels), activation(*args), nn.Conv2d(n_channels, n_channels, 3, stride=1, padding=1, bias=False),)) def forward(self, inputs): outputs = [] for i, layer in enumerate(self.layers): if i > 0: next_output = 0 for no in outputs: next_output = next_output + no outputs.append(next_output) else: outputs.append(layer(inputs)) return outputs[-1] class DenseBlockDecoder(nn.Module): def __init__(self, n_channels, n_convs, activation=nn.ReLU, args=[False]): super(DenseBlockDecoder, self).__init__() assert(n_convs > 0) self.n_channels = n_channels self.n_convs = n_convs self.layers = nn.ModuleList() for i in range(n_convs): self.layers.append(nn.Sequential( # nn.BatchNorm2d(n_channels), nn.InstanceNorm2d(n_channels), activation(*args), nn.ConvTranspose2d(n_channels, n_channels, 3, stride=1, padding=1, bias=False),)) def forward(self, inputs): outputs = [] for i, layer in enumerate(self.layers): if i > 0: next_output = 0 for no in outputs: next_output = next_output + no outputs.append(next_output) else: outputs.append(layer(inputs)) return outputs[-1] class DenseTransitionBlockEncoder(nn.Module): def __init__(self, n_channels_in, n_channels_out, mp, activation=nn.ReLU, args=[False]): super(DenseTransitionBlockEncoder, self).__init__() self.n_channels_in = n_channels_in self.n_channels_out = n_channels_out self.mp = mp self.main = nn.Sequential( nn.BatchNorm2d(n_channels_in), activation(*args), nn.Conv2d(n_channels_in, n_channels_out, 1, stride=1, padding=0, bias=False), nn.MaxPool2d(mp), ) def forward(self, inputs): return self.main(inputs) class DenseTransitionBlockDecoder(nn.Module): def __init__(self, n_channels_in, n_channels_out, activation=nn.ReLU, args=[False]): super(DenseTransitionBlockDecoder, self).__init__() self.n_channels_in = n_channels_in self.n_channels_out = n_channels_out self.main = nn.Sequential( nn.BatchNorm2d(n_channels_in), activation(*args), nn.ConvTranspose2d(n_channels_in, n_channels_out, 4, stride=2, padding=1, bias=False), ) def forward(self, inputs): return self.main(inputs) class waspDenseEncoder(nn.Module): def __init__(self, opt, ngpu=1, nc=1, ndf = 32, ndim = 128, activation=nn.LeakyReLU, args=[0.2, False], f_activation=nn.Sigmoid, f_args=[]): super(waspDenseEncoder, self).__init__() self.ngpu = ngpu self.ndim = ndim self.main = nn.Sequential( # input is (nc) x 64 x 64 # nn.BatchNorm2d(nc), nn.InstanceNorm2d(nc), nn.ReLU(True), nn.Conv2d(nc, ndf, 4, stride=2, padding=1), # state size. (ndf) x 32 x 32 DenseBlockEncoder(ndf, 6), DenseTransitionBlockEncoder(ndf, ndf*2, 2, activation=activation, args=args), # state size. (ndf*2) x 16 x 16 DenseBlockEncoder(ndf*2, 12), DenseTransitionBlockEncoder(ndf*2, ndf*4, 2, activation=activation, args=args), # state size. (ndf*4) x 8 x 8 DenseBlockEncoder(ndf*4, 24), DenseTransitionBlockEncoder(ndf*4, ndf*8, 2, activation=activation, args=args), # state size. (ndf*8) x 4 x 4 DenseBlockEncoder(ndf*8, 16), DenseTransitionBlockEncoder(ndf*8, ndim, 4, activation=activation, args=args), f_activation(*f_args), ) def forward(self, input): output = self.main(input).view(-1,self.ndim) return output ''' class LightingTransfer(nn.Module): def __init__(self, opt, ngpu=1, nz=128, activation=nn.ReLU, args=[False]): super(LightingTransfer, self).__init__() self.ngpu = ngpu self.generate_light_space = nn.Sequential( nn.Linear(1, opt.sdim), nn.BatchNorm1d(opt.sdim), nn.ReLU(*args), nn.Linear(opt.sdim, opt.sdim), nn.BatchNorm1d(opt.sdim), nn.ReLU(*args), nn.Linear(opt.sdim, opt.sdim) ) self.main = nn.Sequential( nn.Linear(opt.sdim*2, opt.sdim*2), nn.BatchNorm1d(opt.sdim*2), nn.ReLU(*args), nn.Linear(opt.sdim*2, opt.sdim), nn.BatchNorm1d(opt.sdim), nn.ReLU(*args), nn.Linear(opt.sdim, opt.sdim) ) def forward(self, light_direction, encoded_shading): light_space = self.generate_light_space(light_direction) print('Generation Done:', light_space.shape, encoded_shading.shape) new_input = torch.cat((light_space, encoded_shading), 1) return self.main(new_input) ''' # One hot vector for lighting class LightingTransfer(nn.Module): def __init__(self, opt, ngpu=1, nz=49, activation=nn.ReLU, args=[False]): super(LightingTransfer, self).__init__() self.ngpu = ngpu self.illumination_map = {} self.fill_illlumination_map() self.generate_light_space = nn.Sequential( nn.Linear(19, nz), nn.BatchNorm1d(nz), nn.ReLU(*args), nn.Linear(nz, nz), nn.BatchNorm1d(nz), nn.ReLU(*args), nn.Linear(nz, nz) ) self.main = nn.Sequential( nn.Linear(nz+opt.sdim, nz), nn.BatchNorm1d(nz), nn.ReLU(*args), nn.Linear(nz, nz), nn.BatchNorm1d(nz), nn.ReLU(*args), nn.Linear(nz, opt.sdim) ) def fill_illlumination_map(self): self.illumination_map[0] = [0]*49 self.illumination_map[1] = ([0]*6 + [1]*1)*7 self.illumination_map[2] = ([0] * 5 + [1]*2) * 7 self.illumination_map[3] = ([0] * 4 + [1] * 3) * 7 self.illumination_map[4] = ([0] * 3 + [1] * 4) * 7 self.illumination_map[5] = ([0] *2 + [1] * 5) * 7 self.illumination_map[6] = ([0] * 1 + [1] * 6) * 7 self.illumination_map[7] = ([0] * 0 + [1] * 7) * 7 self.illumination_map[8] = ([1] * 6 + [0]*1) * 7 self.illumination_map[9] = ([1] * 5 + [0] * 2) * 7 self.illumination_map[10] = ([1] * 4 + [0] * 3) * 7 self.illumination_map[11] = ([1] * 3 + [0] * 4) * 7 self.illumination_map[12] = ([1] * 2 + [0] * 5) * 7 self.illumination_map[13] = ([1] * 1 + [0] * 6) * 7 self.illumination_map[14] = [0]*3 + [1]*4 + ([0] * 2 + [1] * 5) * 6 + [0]*3 + [1]*4 self.illumination_map[15] = [0] * 3 + [1] * 4 + ([0] * 1 + [1] * 6) * 6 + [0] * 3 + [1] * 4 self.illumination_map[16] = ([0] * 1 + [1] * 5 + [0]*1) * 5 + ([0] * 2 + [1] * 3 + [0]*2) * 2 self.illumination_map[17] = [1] * 4 + [0] * 3 + ([1] * 5 + [0] * 2) * 5 + [1] * 4 + [0] * 3 self.illumination_map[18] = [1] * 3 + [0] * 4 + ([1] * 6 + [0] * 1) * 5 + [1] * 3 + [0] * 4 self.illumination_map[19] = [0]*49 self.illumination_map[20] = [0]*49 def forward(self, light_direction, encoded_shading): illumination_vector = self.illumination_map[int(light_direction)] # light_space = self.generate_light_space(light_direction) # print('Generation Done:', light_space.shape, encoded_shading.shape) new_input = torch.cat((illumination_vector, encoded_shading), 1) return self.main(new_input) class waspDenseDecoder(nn.Module): def __init__(self, opt, ngpu=1, nz=128, nc=1, ngf=32, lb=0, ub=1, activation=nn.ReLU, args=[False], f_activation=nn.Hardtanh, f_args=[0,1]): super(waspDenseDecoder, self).__init__() self.ngpu = ngpu self.main = nn.Sequential( # input is Z, going into convolution # nn.BatchNorm2d(nz), nn.InstanceNorm2d(nz), activation(*args), nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False), # state size. (ngf*8) x 4 x 4 DenseBlockDecoder(ngf*8, 16), DenseTransitionBlockDecoder(ngf*8, ngf*4), # state size. (ngf*4) x 8 x 8 DenseBlockDecoder(ngf*4, 24), DenseTransitionBlockDecoder(ngf*4, ngf*2), # state size. (ngf*2) x 16 x 16 DenseBlockDecoder(ngf*2, 12), DenseTransitionBlockDecoder(ngf*2, ngf), # state size. (ngf) x 32 x 32 DenseBlockDecoder(ngf, 6), DenseTransitionBlockDecoder(ngf, ngf), # state size (ngf) x 64 x 64 # nn.BatchNorm2d(ngf), nn.InstanceNorm2d(ngf), activation(*args), nn.ConvTranspose2d(ngf, nc, 3, stride=1, padding=1, bias=False), f_activation(*f_args), ) def forward(self, inputs): return self.main(inputs) ################################### ###### encoders and decoders ###### ################################### #### The encoders #### # encoders of DAE class Encoders(nn.Module): def __init__(self, opt): super(Encoders, self).__init__() self.ngpu = opt.ngpu self.encoder = waspEncoder(opt, ngpu=1, nc=opt.nc, ndf = opt.ndf, ndim = opt.zdim) self.zImixer = waspMixer(opt, ngpu=1, nin = opt.zdim, nout = opt.idim) self.zWmixer = waspMixer(opt, ngpu=1, nin = opt.zdim, nout = opt.wdim) def forward(self, input): self.z = self.encoder(input) self.zImg = self.zImixer(self.z) self.zWarp = self.zWmixer(self.z) return self.z, self.zImg, self.zWarp # encoders of instrinsic DAE class Encoders_Intrinsic(nn.Module): def __init__(self, opt): super(Encoders_Intrinsic, self).__init__() self.ngpu = opt.ngpu self.encoder = waspEncoder(opt, ngpu=1, nc=opt.nc, ndf = opt.ndf, ndim = opt.zdim) #self.zImixer = waspMixer(opt, ngpu=1, nin = opt.zdim, nout = opt.idim) self.zSmixer = waspMixer(opt, ngpu=1, nin = opt.zdim, nout = opt.sdim) self.zTmixer = waspMixer(opt, ngpu=1, nin = opt.zdim, nout = opt.tdim) self.zWmixer = waspMixer(opt, ngpu=1, nin = opt.zdim, nout = opt.wdim) def forward(self, input): self.z = self.encoder(input) #self.zImg = self.zImixer(self.z) self.zShade = self.zSmixer(self.z) self.zTexture = self.zTmixer(self.z) self.zWarp = self.zWmixer(self.z) return self.z, self.zShade, self.zTexture, self.zWarp # encoders of DAE, using DenseNet architecture class Dense_Encoders(nn.Module): def __init__(self, opt): super(Dense_Encoders, self).__init__() self.ngpu = opt.ngpu self.encoder = waspDenseEncoder(opt, ngpu=1, nc=opt.nc, ndf = opt.ndf, ndim = opt.zdim) self.zImixer = waspMixer(opt, ngpu=1, nin = opt.zdim, nout = opt.idim) self.zWmixer = waspMixer(opt, ngpu=1, nin = opt.zdim, nout = opt.wdim) def forward(self, input): self.z = self.encoder(input) self.zImg = self.zImixer(self.z) self.zWarp = self.zWmixer(self.z) return self.z, self.zImg, self.zWarp # encoders of Intrinsic DAE, using DenseNet architecture class Dense_Encoders_Intrinsic(nn.Module): def __init__(self, opt): super(Dense_Encoders_Intrinsic, self).__init__() self.ngpu = opt.ngpu self.encoder = waspDenseEncoder(opt, ngpu=1, nc=opt.nc, ndf = opt.ndf, ndim = opt.zdim) self.zSmixer = waspMixer(opt, ngpu=1, nin = opt.zdim, nout = opt.sdim) self.zTmixer = waspMixer(opt, ngpu=1, nin = opt.zdim, nout = opt.tdim) self.zWmixer = waspMixer(opt, ngpu=1, nin = opt.zdim, nout = opt.wdim) def forward(self, input): self.z = self.encoder(input) self.zShade = self.zSmixer(self.z) self.zTexture = self.zTmixer(self.z) self.zWarp = self.zWmixer(self.z) return self.z, self.zShade, self.zTexture, self.zWarp #### The decoders #### # decoders of DAE class DecodersIntegralWarper2(nn.Module): def __init__(self, opt): super(DecodersIntegralWarper2, self).__init__() self.imagedimension = opt.imgSize self.ngpu = opt.ngpu self.idim = opt.idim self.wdim = opt.wdim self.decoderI = waspDecoder(opt, ngpu=self.ngpu, nz=opt.idim, nc=opt.nc, ngf=opt.ngf, lb=0, ub=1) self.decoderW = waspDecoderTanh(opt, ngpu=self.ngpu, nz=opt.wdim, nc=2, ngf=opt.ngf, lb=0, ub=0.1) self.warper = waspWarper(opt) self.integrator = waspGridSpatialIntegral(opt) self.cutter = nn.Hardtanh(-1,1) def forward(self, zI, zW, basegrid): self.texture = self.decoderI(zI.view(-1,self.idim,1,1)) self.diffentialWarping = self.decoderW(zW.view(-1,self.wdim,1,1))*(5.0/self.imagedimension) self.warping = self.integrator(self.diffentialWarping)-1.2 self.warping = self.cutter(self.warping) self.resWarping = self.warping-basegrid self.output = self.warper(self.texture, self.warping) return self.texture, self.resWarping, self.output, self.warping # decoders of intrinsic DAE class DecodersIntegralWarper2_Intrinsic(nn.Module): def __init__(self, opt): super(DecodersIntegralWarper2_Intrinsic, self).__init__() self.imagedimension = opt.imgSize self.ngpu = opt.ngpu self.idim = opt.idim self.sdim = opt.sdim self.tdim = opt.tdim self.wdim = opt.wdim self.lightNet = LightingTransfer(opt) self.decoderS = waspDecoder(opt, ngpu=self.ngpu, nz=opt.sdim, nc=1, ngf=opt.ngf, lb=0, ub=1) self.decoderT = waspDecoder(opt, ngpu=self.ngpu, nz=opt.tdim, nc=opt.nc, ngf=opt.ngf, lb=0, ub=1) self.decoderW = waspDecoderTanh(opt, ngpu=self.ngpu, nz=opt.wdim, nc=2, ngf=opt.ngf, lb=0, ub=0.1) self.intrinsicComposer = waspIntrinsicComposer(opt) self.warper = waspWarper(opt) self.integrator = waspGridSpatialIntegral(opt) self.cutter = nn.Hardtanh(-1,1) def forward(self, lightDirection, zS, zT, zW, basegrid): ld = lightDirection.type(torch.cuda.FloatTensor) newZS = self.lightNet(ld, zS) self.shading = self.decoderS(newZS.view(-1,self.sdim,1,1)) self.texture = self.decoderT(zT.view(-1,self.tdim,1,1)) self.img = self.intrinsicComposer(self.shading, self.texture) self.diffentialWarping = self.decoderW(zW.view(-1,self.wdim,1,1))*(5.0/self.imagedimension) self.warping = self.integrator(self.diffentialWarping)-1.2 self.warping = self.cutter(self.warping) self.resWarping = self.warping-basegrid self.output = self.warper(self.img, self.warping) return self.shading, self.texture, self.img, self.resWarping, self.output, self.warping # decoders of DAE, using DenseNet architecture class Dense_DecodersIntegralWarper2(nn.Module): def __init__(self, opt): super(Dense_DecodersIntegralWarper2, self).__init__() self.imagedimension = opt.imgSize self.ngpu = opt.ngpu self.idim = opt.idim self.wdim = opt.wdim self.decoderI = waspDenseDecoder(opt, ngpu=self.ngpu, nz=opt.idim, nc=opt.nc, ngf=opt.ngf, lb=0, ub=1) self.decoderW = waspDenseDecoder(opt, ngpu=self.ngpu, nz=opt.wdim, nc=2, ngf=opt.ngf, lb=0, ub=1, activation=nn.Tanh, args=[], f_activation=nn.Sigmoid, f_args=[]) self.warper = waspWarper(opt) self.integrator = waspGridSpatialIntegral(opt) self.cutter = nn.Hardtanh(-1,1) def forward(self, zI, zW, basegrid): self.img = self.decoderI(zI.view(-1,self.idim,1,1)) self.diffentialWarping = self.decoderW(zW.view(-1,self.wdim,1,1))*(5.0/self.imagedimension) self.warping = self.integrator(self.diffentialWarping)-1.2 self.warping = self.cutter(self.warping) self.resWarping = self.warping-basegrid self.output = self.warper(self.img, self.warping) return self.img, self.resWarping, self.output, self.warping # decoders of Intrinsic DAE, using DenseNet architecture class Dense_DecodersIntegralWarper2_Intrinsic(nn.Module): def __init__(self, opt): super(Dense_DecodersIntegralWarper2_Intrinsic, self).__init__() self.imagedimension = opt.imgSize self.ngpu = opt.ngpu self.idim = opt.idim self.sdim = opt.sdim + 1 self.tdim = opt.tdim self.wdim = opt.wdim # Lighting Net # self.lightNet = LightingTransfer(opt) # shading decoder self.decoderS = waspDenseDecoder(opt, ngpu=self.ngpu, nz=self.sdim, nc=1, ngf=opt.ngf, lb=0, ub=1) # albedo decoder self.decoderT = waspDenseDecoder(opt, ngpu=self.ngpu, nz=opt.tdim, nc=opt.nc, ngf=opt.ngf, lb=0, ub=1) # deformation decoder self.decoderW = waspDenseDecoder(opt, ngpu=self.ngpu, nz=opt.wdim, nc=2, ngf=opt.ngf, lb=0, ub=1, activation=nn.Tanh, args=[], f_activation=nn.Sigmoid, f_args=[]) # shading*albedo=texture self.intrinsicComposer = waspIntrinsicComposer(opt) # deformation offset decoder self.warper = waspWarper(opt) # spatial intergrator for deformation field self.integrator = waspGridSpatialIntegral(opt) self.cutter = nn.Hardtanh(-1,1) def forward(self, lightingDirection, zS, zT, zW, basegrid): ld = lightingDirection.type(torch.cuda.FloatTensor) ld = ld.reshape(ld.shape[0], 1) newZS = torch.cat((zS, ld), 1) # print('LD SHAPE:', zT.shape, newZS.shape) # newZS = self.lightNet(ld, zS) self.shading = self.decoderS(newZS.view(-1,self.sdim,1,1)) self.texture = self.decoderT(zT.view(-1,self.tdim,1,1)) self.img = self.intrinsicComposer(self.shading, self.texture) self.diffentialWarping = self.decoderW(zW.view(-1,self.wdim,1,1))*(5.0/self.imagedimension) self.warping = self.integrator(self.diffentialWarping)-1.2 self.warping = self.cutter(self.warping) self.resWarping = self.warping-basegrid self.output = self.warper(self.img, self.warping) return self.shading, self.texture, self.img, self.resWarping, self.output, self.warping
[ "torch.cat", "torch.nn.InstanceNorm2d", "torch.nn.MSELoss", "torch.nn.functional.grid_sample", "torch.cuda.FloatTensor", "torch.FloatTensor", "torch.nn.Linear", "torch.mean", "torch.nn.ModuleList", "torch.nn.Tanh", "torch.autograd.Variable", "torch.nn.Conv2d", "torch.nn.BatchNorm1d", "torch.mul", "torch.nn.BatchNorm2d", "torch.nn.MaxPool2d", "torch.nn.LeakyReLU", "torch.nn.Sigmoid", "torch.nn.Hardtanh", "torch.nn.ReLU", "torch.nn.ConvTranspose2d", "torch.abs" ]
[((573, 585), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (583, 585), True, 'import torch.nn as nn\n'), ((736, 768), 'torch.autograd.Variable', 'Variable', (['w'], {'requires_grad': '(False)'}), '(w, requires_grad=False)\n', (744, 768), False, 'from torch.autograd import Variable\n'), ((1169, 1201), 'torch.autograd.Variable', 'Variable', (['w'], {'requires_grad': '(False)'}), '(w, requires_grad=False)\n', (1177, 1201), False, 'from torch.autograd import Variable\n'), ((1659, 1691), 'torch.autograd.Variable', 'Variable', (['w'], {'requires_grad': '(False)'}), '(w, requires_grad=False)\n', (1667, 1691), False, 'from torch.autograd import Variable\n'), ((2994, 3025), 'torch.mul', 'torch.mul', (['self.shading', 'albedo'], {}), '(self.shading, albedo)\n', (3003, 3025), False, 'import torch\n'), ((3401, 3436), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['input_img', 'self.warp'], {}), '(input_img, self.warp)\n', (3414, 3436), True, 'import torch.nn.functional as F\n'), ((4041, 4084), 'torch.autograd.Variable', 'Variable', (['self.filterx'], {'requires_grad': '(False)'}), '(self.filterx, requires_grad=False)\n', (4049, 4084), False, 'from torch.autograd import Variable\n'), ((4108, 4151), 'torch.autograd.Variable', 'Variable', (['self.filtery'], {'requires_grad': '(False)'}), '(self.filtery, requires_grad=False)\n', (4116, 4151), False, 'from torch.autograd import Variable\n'), ((4552, 4637), 'torch.cat', 'torch.cat', (['(fullx[:, :, 0:self.w, 0:self.w], fully[:, :, 0:self.w, 0:self.w])', '(1)'], {}), '((fullx[:, :, 0:self.w, 0:self.w], fully[:, :, 0:self.w, 0:self.w]), 1\n )\n', (4561, 4637), False, 'import torch\n'), ((9281, 9296), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (9294, 9296), True, 'import torch.nn as nn\n'), ((10223, 10238), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (10236, 10238), True, 'import torch.nn as nn\n'), ((17297, 17349), 'torch.cat', 'torch.cat', (['(illumination_vector, encoded_shading)', '(1)'], {}), '((illumination_vector, encoded_shading), 1)\n', (17306, 17349), False, 'import torch\n'), ((22489, 22507), 'torch.nn.Hardtanh', 'nn.Hardtanh', (['(-1)', '(1)'], {}), '(-1, 1)\n', (22500, 22507), True, 'import torch.nn as nn\n'), ((23912, 23930), 'torch.nn.Hardtanh', 'nn.Hardtanh', (['(-1)', '(1)'], {}), '(-1, 1)\n', (23923, 23930), True, 'import torch.nn as nn\n'), ((25423, 25441), 'torch.nn.Hardtanh', 'nn.Hardtanh', (['(-1)', '(1)'], {}), '(-1, 1)\n', (25434, 25441), True, 'import torch.nn as nn\n'), ((27181, 27199), 'torch.nn.Hardtanh', 'nn.Hardtanh', (['(-1)', '(1)'], {}), '(-1, 1)\n', (27192, 27199), True, 'import torch.nn as nn\n'), ((27406, 27428), 'torch.cat', 'torch.cat', (['(zS, ld)', '(1)'], {}), '((zS, ld), 1)\n', (27415, 27428), False, 'import torch\n'), ((2348, 2368), 'torch.nn.Linear', 'nn.Linear', (['nin', 'nout'], {}), '(nin, nout)\n', (2357, 2368), True, 'import torch.nn as nn\n'), ((2382, 2394), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2392, 2394), True, 'import torch.nn as nn\n'), ((4954, 4993), 'torch.nn.Conv2d', 'nn.Conv2d', (['nc', 'ndf', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(nc, ndf, 4, 2, 1, bias=False)\n', (4963, 4993), True, 'import torch.nn as nn\n'), ((5007, 5031), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(False)'], {}), '(0.2, False)\n', (5019, 5031), True, 'import torch.nn as nn\n'), ((5087, 5131), 'torch.nn.Conv2d', 'nn.Conv2d', (['ndf', '(ndf * 2)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ndf, ndf * 2, 4, 2, 1, bias=False)\n', (5096, 5131), True, 'import torch.nn as nn\n'), ((5145, 5168), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ndf * 2)'], {}), '(ndf * 2)\n', (5159, 5168), True, 'import torch.nn as nn\n'), ((5182, 5206), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(False)'], {}), '(0.2, False)\n', (5194, 5206), True, 'import torch.nn as nn\n'), ((5264, 5312), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * 2)', '(ndf * 4)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ndf * 2, ndf * 4, 4, 2, 1, bias=False)\n', (5273, 5312), True, 'import torch.nn as nn\n'), ((5326, 5349), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ndf * 4)'], {}), '(ndf * 4)\n', (5340, 5349), True, 'import torch.nn as nn\n'), ((5363, 5387), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(False)'], {}), '(0.2, False)\n', (5375, 5387), True, 'import torch.nn as nn\n'), ((5443, 5491), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * 4)', '(ndf * 8)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ndf * 4, ndf * 8, 4, 2, 1, bias=False)\n', (5452, 5491), True, 'import torch.nn as nn\n'), ((5505, 5528), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ndf * 8)'], {}), '(ndf * 8)\n', (5519, 5528), True, 'import torch.nn as nn\n'), ((5542, 5566), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(False)'], {}), '(0.2, False)\n', (5554, 5566), True, 'import torch.nn as nn\n'), ((5622, 5667), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * 8)', 'ndim', '(4)', '(4)', '(0)'], {'bias': '(False)'}), '(ndf * 8, ndim, 4, 4, 0, bias=False)\n', (5631, 5667), True, 'import torch.nn as nn\n'), ((5681, 5693), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (5691, 5693), True, 'import torch.nn as nn\n'), ((6138, 6190), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['nz', '(ngf * 8)', '(4)', '(1)', '(0)'], {'bias': '(False)'}), '(nz, ngf * 8, 4, 1, 0, bias=False)\n', (6156, 6190), True, 'import torch.nn as nn\n'), ((6204, 6227), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 8)'], {}), '(ngf * 8)\n', (6218, 6227), True, 'import torch.nn as nn\n'), ((6241, 6254), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (6248, 6254), True, 'import torch.nn as nn\n'), ((6310, 6367), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 8)', '(ngf * 4)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 8, ngf * 4, 4, 2, 1, bias=False)\n', (6328, 6367), True, 'import torch.nn as nn\n'), ((6381, 6404), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 4)'], {}), '(ngf * 4)\n', (6395, 6404), True, 'import torch.nn as nn\n'), ((6418, 6431), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (6425, 6431), True, 'import torch.nn as nn\n'), ((6487, 6544), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 4)', '(ngf * 2)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 4, ngf * 2, 4, 2, 1, bias=False)\n', (6505, 6544), True, 'import torch.nn as nn\n'), ((6558, 6581), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 2)'], {}), '(ngf * 2)\n', (6572, 6581), True, 'import torch.nn as nn\n'), ((6595, 6608), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (6602, 6608), True, 'import torch.nn as nn\n'), ((6666, 6719), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 2)', 'ngf', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 2, ngf, 4, 2, 1, bias=False)\n', (6684, 6719), True, 'import torch.nn as nn\n'), ((6732, 6751), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ngf'], {}), '(ngf)\n', (6746, 6751), True, 'import torch.nn as nn\n'), ((6765, 6778), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (6772, 6778), True, 'import torch.nn as nn\n'), ((6834, 6883), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['ngf', 'ngf', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf, ngf, 4, 2, 1, bias=False)\n', (6852, 6883), True, 'import torch.nn as nn\n'), ((6897, 6916), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ngf'], {}), '(ngf)\n', (6911, 6916), True, 'import torch.nn as nn\n'), ((6930, 6943), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (6937, 6943), True, 'import torch.nn as nn\n'), ((6998, 7046), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['ngf', 'nc', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(ngf, nc, 3, 1, 1, bias=False)\n', (7016, 7046), True, 'import torch.nn as nn\n'), ((7060, 7079), 'torch.nn.Hardtanh', 'nn.Hardtanh', (['lb', 'ub'], {}), '(lb, ub)\n', (7071, 7079), True, 'import torch.nn as nn\n'), ((7657, 7709), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['nz', '(ngf * 8)', '(4)', '(1)', '(0)'], {'bias': '(False)'}), '(nz, ngf * 8, 4, 1, 0, bias=False)\n', (7675, 7709), True, 'import torch.nn as nn\n'), ((7723, 7746), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 8)'], {}), '(ngf * 8)\n', (7737, 7746), True, 'import torch.nn as nn\n'), ((7760, 7769), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (7767, 7769), True, 'import torch.nn as nn\n'), ((7825, 7882), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 8)', '(ngf * 4)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 8, ngf * 4, 4, 2, 1, bias=False)\n', (7843, 7882), True, 'import torch.nn as nn\n'), ((7896, 7919), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 4)'], {}), '(ngf * 4)\n', (7910, 7919), True, 'import torch.nn as nn\n'), ((7933, 7942), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (7940, 7942), True, 'import torch.nn as nn\n'), ((7998, 8055), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 4)', '(ngf * 2)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 4, ngf * 2, 4, 2, 1, bias=False)\n', (8016, 8055), True, 'import torch.nn as nn\n'), ((8069, 8092), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 2)'], {}), '(ngf * 2)\n', (8083, 8092), True, 'import torch.nn as nn\n'), ((8106, 8115), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (8113, 8115), True, 'import torch.nn as nn\n'), ((8173, 8226), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 2)', 'ngf', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 2, ngf, 4, 2, 1, bias=False)\n', (8191, 8226), True, 'import torch.nn as nn\n'), ((8239, 8258), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ngf'], {}), '(ngf)\n', (8253, 8258), True, 'import torch.nn as nn\n'), ((8272, 8281), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (8279, 8281), True, 'import torch.nn as nn\n'), ((8337, 8386), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['ngf', 'ngf', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf, ngf, 4, 2, 1, bias=False)\n', (8355, 8386), True, 'import torch.nn as nn\n'), ((8400, 8419), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ngf'], {}), '(ngf)\n', (8414, 8419), True, 'import torch.nn as nn\n'), ((8433, 8442), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (8440, 8442), True, 'import torch.nn as nn\n'), ((8497, 8545), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['ngf', 'nc', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(ngf, nc, 3, 1, 1, bias=False)\n', (8515, 8545), True, 'import torch.nn as nn\n'), ((8592, 8604), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (8602, 8604), True, 'import torch.nn as nn\n'), ((11322, 11351), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['n_channels_in'], {}), '(n_channels_in)\n', (11336, 11351), True, 'import torch.nn as nn\n'), ((11404, 11480), 'torch.nn.Conv2d', 'nn.Conv2d', (['n_channels_in', 'n_channels_out', '(1)'], {'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(n_channels_in, n_channels_out, 1, stride=1, padding=0, bias=False)\n', (11413, 11480), True, 'import torch.nn as nn\n'), ((11498, 11514), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['mp'], {}), '(mp)\n', (11510, 11514), True, 'import torch.nn as nn\n'), ((11937, 11966), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['n_channels_in'], {}), '(n_channels_in)\n', (11951, 11966), True, 'import torch.nn as nn\n'), ((12019, 12108), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['n_channels_in', 'n_channels_out', '(4)'], {'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(n_channels_in, n_channels_out, 4, stride=2, padding=1,\n bias=False)\n', (12037, 12108), True, 'import torch.nn as nn\n'), ((12592, 12613), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['nc'], {}), '(nc)\n', (12609, 12613), True, 'import torch.nn as nn\n'), ((12631, 12644), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (12638, 12644), True, 'import torch.nn as nn\n'), ((12662, 12704), 'torch.nn.Conv2d', 'nn.Conv2d', (['nc', 'ndf', '(4)'], {'stride': '(2)', 'padding': '(1)'}), '(nc, ndf, 4, stride=2, padding=1)\n', (12671, 12704), True, 'import torch.nn as nn\n'), ((15095, 15112), 'torch.nn.Linear', 'nn.Linear', (['(19)', 'nz'], {}), '(19, nz)\n', (15104, 15112), True, 'import torch.nn as nn\n'), ((15126, 15144), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['nz'], {}), '(nz)\n', (15140, 15144), True, 'import torch.nn as nn\n'), ((15158, 15172), 'torch.nn.ReLU', 'nn.ReLU', (['*args'], {}), '(*args)\n', (15165, 15172), True, 'import torch.nn as nn\n'), ((15186, 15203), 'torch.nn.Linear', 'nn.Linear', (['nz', 'nz'], {}), '(nz, nz)\n', (15195, 15203), True, 'import torch.nn as nn\n'), ((15217, 15235), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['nz'], {}), '(nz)\n', (15231, 15235), True, 'import torch.nn as nn\n'), ((15249, 15263), 'torch.nn.ReLU', 'nn.ReLU', (['*args'], {}), '(*args)\n', (15256, 15263), True, 'import torch.nn as nn\n'), ((15277, 15294), 'torch.nn.Linear', 'nn.Linear', (['nz', 'nz'], {}), '(nz, nz)\n', (15286, 15294), True, 'import torch.nn as nn\n'), ((15352, 15380), 'torch.nn.Linear', 'nn.Linear', (['(nz + opt.sdim)', 'nz'], {}), '(nz + opt.sdim, nz)\n', (15361, 15380), True, 'import torch.nn as nn\n'), ((15392, 15410), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['nz'], {}), '(nz)\n', (15406, 15410), True, 'import torch.nn as nn\n'), ((15424, 15438), 'torch.nn.ReLU', 'nn.ReLU', (['*args'], {}), '(*args)\n', (15431, 15438), True, 'import torch.nn as nn\n'), ((15452, 15469), 'torch.nn.Linear', 'nn.Linear', (['nz', 'nz'], {}), '(nz, nz)\n', (15461, 15469), True, 'import torch.nn as nn\n'), ((15483, 15501), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['nz'], {}), '(nz)\n', (15497, 15501), True, 'import torch.nn as nn\n'), ((15515, 15529), 'torch.nn.ReLU', 'nn.ReLU', (['*args'], {}), '(*args)\n', (15522, 15529), True, 'import torch.nn as nn\n'), ((15543, 15566), 'torch.nn.Linear', 'nn.Linear', (['nz', 'opt.sdim'], {}), '(nz, opt.sdim)\n', (15552, 15566), True, 'import torch.nn as nn\n'), ((17775, 17796), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['nz'], {}), '(nz)\n', (17792, 17796), True, 'import torch.nn as nn\n'), ((17841, 17893), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['nz', '(ngf * 8)', '(4)', '(1)', '(0)'], {'bias': '(False)'}), '(nz, ngf * 8, 4, 1, 0, bias=False)\n', (17859, 17893), True, 'import torch.nn as nn\n'), ((18537, 18559), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['ngf'], {}), '(ngf)\n', (18554, 18559), True, 'import torch.nn as nn\n'), ((18604, 18667), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['ngf', 'nc', '(3)'], {'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(ngf, nc, 3, stride=1, padding=1, bias=False)\n', (18622, 18667), True, 'import torch.nn as nn\n'), ((637, 662), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['(1)'], {}), '(1)\n', (659, 662), False, 'import torch\n'), ((788, 804), 'torch.mean', 'torch.mean', (['x', '(0)'], {}), '(x, 0)\n', (798, 804), False, 'import torch\n'), ((1070, 1095), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['(1)'], {}), '(1)\n', (1092, 1095), False, 'import torch\n'), ((1560, 1585), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['(1)'], {}), '(1)\n', (1582, 1585), False, 'import torch\n'), ((1832, 1867), 'torch.mul', 'torch.mul', (['self.x_diff', 'self.x_diff'], {}), '(self.x_diff, self.x_diff)\n', (1841, 1867), False, 'import torch\n'), ((1881, 1916), 'torch.mul', 'torch.mul', (['self.y_diff', 'self.y_diff'], {}), '(self.y_diff, self.y_diff)\n', (1890, 1916), False, 'import torch\n'), ((3749, 3783), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)', '(1)', '(1)', 'self.w'], {}), '(1, 1, 1, self.w)\n', (3766, 3783), False, 'import torch\n'), ((3813, 3847), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)', '(1)', 'self.w', '(1)'], {}), '(1, 1, self.w, 1)\n', (3830, 3847), False, 'import torch\n'), ((1237, 1280), 'torch.abs', 'torch.abs', (['(x[:, :, :, :-1] - x[:, :, :, 1:])'], {}), '(x[:, :, :, :-1] - x[:, :, :, 1:])\n', (1246, 1280), False, 'import torch\n'), ((1307, 1350), 'torch.abs', 'torch.abs', (['(x[:, :, :-1, :] - x[:, :, 1:, :])'], {}), '(x[:, :, :-1, :] - x[:, :, 1:, :])\n', (1316, 1350), False, 'import torch\n'), ((9396, 9422), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['n_channels'], {}), '(n_channels)\n', (9410, 9422), True, 'import torch.nn as nn\n'), ((9483, 9552), 'torch.nn.Conv2d', 'nn.Conv2d', (['n_channels', 'n_channels', '(3)'], {'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(n_channels, n_channels, 3, stride=1, padding=1, bias=False)\n', (9492, 9552), True, 'import torch.nn as nn\n'), ((10388, 10417), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['n_channels'], {}), '(n_channels)\n', (10405, 10417), True, 'import torch.nn as nn\n'), ((10478, 10556), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['n_channels', 'n_channels', '(3)'], {'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(n_channels, n_channels, 3, stride=1, padding=1, bias=False)\n', (10496, 10556), True, 'import torch.nn as nn\n')]
# -*- coding: utf-8 -*- """Install binary dependencies.""" # standard library imports import os import shutil import sys import tempfile from pathlib import Path # first-party imports import click import sh from packaging import version from progressbar import DataTransferBar from requests_download import ProgressTracker from requests_download import download as request_download # module imports from . import cli INSTALL_ENVIRON_VAR = ( # installs go into "/bin" and other subdirs of this directory "BIONORM_INSTALL_DIR" ) if INSTALL_ENVIRON_VAR in os.environ: INSTALL_PATH = Path(os.environ[INSTALL_ENVIRON_VAR]) else: INSTALL_PATH = Path(sys.executable).parent.parent BIN_PATH = INSTALL_PATH / "bin" SAMTOOLS_VER = "1.10" GFFREAD_VER = "0.11.8" GT_VER = "1.6.1" HTSLIB_DIR = "htslib-" + SAMTOOLS_VER DEPENDENCY_DICT = { "gffread": { "binaries": ["gffread"], "git_list": [ "https://github.com/gpertea/gffread.git", "https://github.com/gpertea/gclib", ], "dir": "gffread", "version": version.parse(GFFREAD_VER), "version_command": ["--version"], "make": ["release"], "copy_binaries": ["gffread"], }, "samtools": { "binaries": ["samtools", "tabix", "bgzip"], "tarball": ( "https://github.com/samtools/samtools/releases/download/" + SAMTOOLS_VER + "/samtools-" + SAMTOOLS_VER + ".tar.bz2" ), "dir": "samtools-" + SAMTOOLS_VER, "version": version.parse(SAMTOOLS_VER), "version_command": ["--version"], "make": [], "make_extra_dirs": [HTSLIB_DIR], "configure": [], "configure_extra_dirs": [HTSLIB_DIR], "copy_binaries": [ "samtools", HTSLIB_DIR + "/bgzip", HTSLIB_DIR + "/tabix", HTSLIB_DIR + "/htsfile", ], } # "genometools": { # "binaries": ["gt"], # "tarball": ( # "https://github.com/genometools/genometools/archive/v" # + GT_VER # + ".tar.gz" # ), # "dir": "genometools-" + GT_VER, # "version": version.parse(GT_VER), # "version_command": ["--version"], # "make": [ # "install", # "prefix=" + str(INSTALL_PATH), # "cairo=no", # "useshared=no", # ], # }, } class DependencyInstaller(object): """Install and check binary dependencies.""" def __init__(self, dependency_dict, force=False): """Initialize dictionary of dependencies.""" self.dependency_dict = dependency_dict self.force = force self.dependencies = tuple(dependency_dict.keys()) self.versions_checked = False self.bin_path = BIN_PATH self.bin_path_exists = self.bin_path.exists() self.bin_path_writable = os.access(self.bin_path, os.W_OK) self.bin_path_in_path = str(self.bin_path) in os.environ["PATH"].split( ":" ) def check_all(self, verbose=True): """Check all depenencies for existence and version.""" for dep in self.dependencies: target_version = self.dependency_dict[dep]["version"] version_command = self.dependency_dict[dep]["version_command"] self.dependency_dict[dep]["installed"] = not self.force for bin in self.dependency_dict[dep]["binaries"]: if sh.which(bin) == None: if verbose: print( f"Binary {bin} of dependency {dep} is not" " installed" ) self.dependency_dict[dep]["installed"] = False else: exe = sh.Command(bin) ver_out = exe(*version_command, _err_to_out=True) installed_version = version.parse( (ver_out.split("\n")[0]).split()[-1] ) if installed_version == target_version: ver_str = ( f"{bin} version at recommended version" f" {installed_version}." ) elif installed_version < target_version: ver_str = ( f"{bin} installed {installed_version} < target" f" {target_version}." ) self.dependency_dict[dep]["installed"] = False elif installed_version > target_version: ver_str = ( f"installed {installed_version} exceeds target" f" {target_version}." ) if verbose: print(f"{dep}: {exe} {ver_str}") self.versions_checked = True # Check that bin directory exists and is writable. if self.bin_path_exists: bin_path_state = "exists, " else: bin_path_state = "doesn't exist, " if self.bin_path_writable: bin_path_state += "writable, " else: bin_path_state += "not writable, " if self.bin_path_in_path: bin_path_state += "in path." else: bin_path_state += "not in path." if verbose: print(f"Bin dir '{self.bin_path}' {bin_path_state}") def install_list(self, deplist): """Install needed dependencies from a list.""" self.check_all(verbose=False) if deplist == ("all",): deplist = self.dependencies install_list = [ dep for dep in deplist if not self.dependency_dict[dep]["installed"] ] if len(install_list): if not self.bin_path_exists: print( f"ERROR--Installation directory {self.bin_path} does not" " exist." ) sys.exit(1) if not self.bin_path_writable: print( f"ERROR--Installation directory {self.bin_path} is not" " writable." ) sys.exit(1) if not self.bin_path_in_path: print( f"ERROR--Installation directory {self.bin_path} is not in" " PATH." ) sys.exit(1) for dep in install_list: self.install(dep) def _git(self, dep, dep_dict, verbose): """Git clone from list.""" from sh import git # isort:skip for repo in dep_dict["git_list"]: if verbose: print(f" cloning {dep} repo {repo}") git.clone(repo) def _download_untar(self, dep, dep_dict, verbose, progressbar=True): """Download and untar tarball.""" from sh import tar # isort:skip download_url = dep_dict["tarball"] dlname = download_url.split("/")[-1] download_path = Path(".") / dlname if verbose: print(f"downloading {download_url} to {dlname}") tmp_path = download_path / (dlname + ".tmp") if progressbar: trackers = (ProgressTracker(DataTransferBar()),) else: trackers = None request_download(download_url, download_path, trackers=trackers) if verbose: print( f"downloaded file {download_path}, size" f" {download_path.stat().st_size}" ) tar_output = tar("xvf", download_path) if verbose: print(tar_output) print("untar done") def _configure(self, dep, dep_dict, verbose): """Run make to build package.""" if verbose: print(f" configuring {dep} in {Path.cwd()}") configure = sh.Command("./configure") try: configure_out = configure() except: print("ERROR--configure failed.") sys.exit(1) if verbose: print(configure_out) def _make(self, dep, dep_dict, verbose): """Run make to build package.""" from sh import make # isort:skip if verbose: print(f" making {dep} in {Path.cwd()}") try: make_out = make(dep_dict["make"]) except: print("ERROR--make failed.") sys.exit(1) if verbose: print(make_out) def _make_install(self, dep, dep_dict, verbose): """Run make install to install a package.""" print(f" installing {dep} into {self.bin_path}") install_out = make.install(dep_dict["make_install"]) if verbose: print(install_out) def _copy_binaries(self, dep, dep_dict, verbose): """Copy the named binary to the bin directory.""" print(f" copying {dep} into {self.bin_path}") for bin in dep_dict["copy_binaries"]: binpath = Path(bin) shutil.copy2(binpath, self.bin_path / binpath.name) def install(self, dep, verbose=True): """Install a particular dependency.""" print(f"installing {dep}") dep_dict = self.dependency_dict[dep] with tempfile.TemporaryDirectory() as tmp: tmppath = Path(tmp) if verbose: print(f'build directory: "{tmppath}"') os.chdir(tmppath) # # Get the sources. Alternatives are git or download # if "git_list" in dep_dict: self._git(dep, dep_dict, verbose) elif "tarball" in dep_dict: self._download_untar(dep, dep_dict, verbose) # # Change to the work directory. # if verbose: print(f'building in directory {dep_dict["dir"]}') dirpath = Path(".") / dep_dict["dir"] if not dirpath.exists(): raise ValueError(f'directory "{dirpath}" does not exist.') if not dirpath.is_dir(): raise ValueError(f'directory "{dirpath}" is not a directory.') os.chdir(dirpath) workpath = Path.cwd() # # Build the executables. # if "configure" in dep_dict: self._configure(dep, dep_dict, verbose) if "configure_extra_dirs" in dep_dict: for newdir in dep_dict["configure_extra_dirs"]: os.chdir(workpath / newdir) self._configure(dep, dep_dict, verbose) os.chdir(workpath) if "make" in dep_dict: self._make(dep, dep_dict, verbose) if "make_extra_dirs" in dep_dict: for newdir in dep_dict["make_extra_dirs"]: os.chdir(workpath / newdir) self._make(dep, dep_dict, verbose) os.chdir(workpath) # # Install the executables. # if "make_install" in dep_dict: self._make_install(dep, dep_dict, verbose) elif "copy_binaries" in dep_dict: self._copy_binaries(dep, dep_dict, verbose) @cli.command() @click.option( "--force/--no-force", help="Force overwrites of existing binaries.", default=False, ) @click.argument("dependencies", nargs=-1) def install(dependencies, force): """Check for/install binary dependencies. \b Example: bionorm install all """ installer = DependencyInstaller(DEPENDENCY_DICT, force=force) if dependencies == (): installer.check_all() return installer.install_list(dependencies) print("installer done")
[ "tempfile.TemporaryDirectory", "click.argument", "sh.make.install", "packaging.version.parse", "shutil.copy2", "click.option", "sh.which", "sh.Command", "sh.tar", "pathlib.Path", "progressbar.DataTransferBar", "requests_download.download", "os.chdir", "sh.git.clone", "sh.make", "pathlib.Path.cwd", "os.access", "sys.exit" ]
[((11399, 11500), 'click.option', 'click.option', (['"""--force/--no-force"""'], {'help': '"""Force overwrites of existing binaries."""', 'default': '(False)'}), "('--force/--no-force', help=\n 'Force overwrites of existing binaries.', default=False)\n", (11411, 11500), False, 'import click\n'), ((11512, 11552), 'click.argument', 'click.argument', (['"""dependencies"""'], {'nargs': '(-1)'}), "('dependencies', nargs=-1)\n", (11526, 11552), False, 'import click\n'), ((592, 629), 'pathlib.Path', 'Path', (['os.environ[INSTALL_ENVIRON_VAR]'], {}), '(os.environ[INSTALL_ENVIRON_VAR])\n', (596, 629), False, 'from pathlib import Path\n'), ((1073, 1099), 'packaging.version.parse', 'version.parse', (['GFFREAD_VER'], {}), '(GFFREAD_VER)\n', (1086, 1099), False, 'from packaging import version\n'), ((1557, 1584), 'packaging.version.parse', 'version.parse', (['SAMTOOLS_VER'], {}), '(SAMTOOLS_VER)\n', (1570, 1584), False, 'from packaging import version\n'), ((2909, 2942), 'os.access', 'os.access', (['self.bin_path', 'os.W_OK'], {}), '(self.bin_path, os.W_OK)\n', (2918, 2942), False, 'import os\n'), ((7456, 7520), 'requests_download.download', 'request_download', (['download_url', 'download_path'], {'trackers': 'trackers'}), '(download_url, download_path, trackers=trackers)\n', (7472, 7520), True, 'from requests_download import download as request_download\n'), ((7703, 7728), 'sh.tar', 'tar', (['"""xvf"""', 'download_path'], {}), "('xvf', download_path)\n", (7706, 7728), False, 'from sh import tar\n'), ((8002, 8027), 'sh.Command', 'sh.Command', (['"""./configure"""'], {}), "('./configure')\n", (8012, 8027), False, 'import sh\n'), ((8800, 8838), 'sh.make.install', 'make.install', (["dep_dict['make_install']"], {}), "(dep_dict['make_install'])\n", (8812, 8838), False, 'from sh import make\n'), ((655, 675), 'pathlib.Path', 'Path', (['sys.executable'], {}), '(sys.executable)\n', (659, 675), False, 'from pathlib import Path\n'), ((6882, 6897), 'sh.git.clone', 'git.clone', (['repo'], {}), '(repo)\n', (6891, 6897), False, 'from sh import git\n'), ((7168, 7177), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (7172, 7177), False, 'from pathlib import Path\n'), ((8460, 8482), 'sh.make', 'make', (["dep_dict['make']"], {}), "(dep_dict['make'])\n", (8464, 8482), False, 'from sh import make\n'), ((9127, 9136), 'pathlib.Path', 'Path', (['bin'], {}), '(bin)\n', (9131, 9136), False, 'from pathlib import Path\n'), ((9149, 9200), 'shutil.copy2', 'shutil.copy2', (['binpath', '(self.bin_path / binpath.name)'], {}), '(binpath, self.bin_path / binpath.name)\n', (9161, 9200), False, 'import shutil\n'), ((9384, 9413), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (9411, 9413), False, 'import tempfile\n'), ((9444, 9453), 'pathlib.Path', 'Path', (['tmp'], {}), '(tmp)\n', (9448, 9453), False, 'from pathlib import Path\n'), ((9545, 9562), 'os.chdir', 'os.chdir', (['tmppath'], {}), '(tmppath)\n', (9553, 9562), False, 'import os\n'), ((10298, 10315), 'os.chdir', 'os.chdir', (['dirpath'], {}), '(dirpath)\n', (10306, 10315), False, 'import os\n'), ((10339, 10349), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (10347, 10349), False, 'from pathlib import Path\n'), ((6112, 6123), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6120, 6123), False, 'import sys\n'), ((6333, 6344), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6341, 6344), False, 'import sys\n'), ((6552, 6563), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6560, 6563), False, 'import sys\n'), ((8155, 8166), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8163, 8166), False, 'import sys\n'), ((8552, 8563), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8560, 8563), False, 'import sys\n'), ((10030, 10039), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (10034, 10039), False, 'from pathlib import Path\n'), ((3480, 3493), 'sh.which', 'sh.which', (['bin'], {}), '(bin)\n', (3488, 3493), False, 'import sh\n'), ((3819, 3834), 'sh.Command', 'sh.Command', (['bin'], {}), '(bin)\n', (3829, 3834), False, 'import sh\n'), ((7385, 7402), 'progressbar.DataTransferBar', 'DataTransferBar', ([], {}), '()\n', (7400, 7402), False, 'from progressbar import DataTransferBar\n'), ((10646, 10673), 'os.chdir', 'os.chdir', (['(workpath / newdir)'], {}), '(workpath / newdir)\n', (10654, 10673), False, 'import os\n'), ((10754, 10772), 'os.chdir', 'os.chdir', (['workpath'], {}), '(workpath)\n', (10762, 10772), False, 'import os\n'), ((10984, 11011), 'os.chdir', 'os.chdir', (['(workpath / newdir)'], {}), '(workpath / newdir)\n', (10992, 11011), False, 'import os\n'), ((11087, 11105), 'os.chdir', 'os.chdir', (['workpath'], {}), '(workpath)\n', (11095, 11105), False, 'import os\n'), ((7968, 7978), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (7976, 7978), False, 'from pathlib import Path\n'), ((8410, 8420), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (8418, 8420), False, 'from pathlib import Path\n')]