seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6811793818 | from random import random
import numpy as np
import time
from math import *
import os
import sys
sys.setrecursionlimit(10**6)
clusters = []
visible_cells = []
class Cluster:
def __init__(self,m,n):
# Get the dimensions of the grid
self.rows = m
self.cols = n
self.visited_map = np.zeros((m,n), dtype=bool)
global clusters
clusters = []
def traverse(self,r, c ):
# Check if the current cell is out of bounds or has already been visited
if r < 0 or r >= self.rows or c < 0 or c >= self.cols or self.visited_map[r][c]:
return
# Check if the current cell is a 0
if map[r][c] != 0.5:
return
# Mark the current cell as visited
self.visited_map[r][c] = True
self.component.append((c,r))
# Recursively traverse the neighbors of the current cell
self.traverse(r + 1, c) # right
self.traverse(r - 1, c) # left
self.traverse(r, c + 1) # down
self.traverse(r, c - 1) # up
def make_clusters(self):
for (x,y) in visible_cells:
(r,c) = (y,x)
# Skip cells that have already been visited
if self.visited_map[r][c]:
continue
# Initialize a new connected component as a list of coordinates
self.component = []
# Traverse the connected component and add the coordinates of each cell to the list
self.traverse(r, c )
# Add the connected component to the list of components
if self.is_Hole(self.component):
clusters.append(np.array(self.component))
def is_Hole(self, component):
# Get the dimensions of the map
rows = len(map)
cols = len(map[0])
visited_map = np.zeros((rows,cols), dtype=bool)
# Initialize a list to store the neighboring 0s of the component
covered = []
unexp = []
for cell in component:
(r, c) = cell
# Check the neighbors of the current cell
if r > 0 and r < rows - 1 and c > 0 and c < cols - 1 :
if map[r - 1][c] == 1.0 and (not visited_map [r-1][c]): # if the neighbouring cell is covered then append
visited_map [r-1][c] = True
covered.append((r - 1, c))
elif map[r - 1][c] == 0.0 and (not visited_map [r-1][c]): # if the neighbouring cell is covered then append
visited_map [r-1][c] = True
unexp.append((r - 1, c))
if map[r + 1][c] == 1.0 and (not visited_map [r+1][c]): # if the neighbouring cell is covered then append
visited_map [r+1][c] = True
covered.append((r + 1, c))
elif map[r + 1][c] == 0.0 and (not visited_map [r+1][c]): # if the neighbouring cell is covered then append
visited_map [r+1][c] = True
unexp.append((r+1, c))
if map[r][c - 1] == 1.0 and (not visited_map [r][c-1]): # if the neighbouring cell is covered then append
visited_map [r][c-1] = True
covered.append((r, c - 1))
elif map[r][c-1] == 0.0 and (not visited_map [r][c-1]): # if the neighbouring cell is covered then append
visited_map [r][c-1] = True
unexp.append((r, c-1))
if map[r][c + 1] == 1.0 and (not visited_map [r][c+1]): # if the neighbouring cell is covered then append
visited_map [r][c+1] = True
covered.append((r, c + 1))
elif map[r][c+1] == 0.0 and (not visited_map [r][c+1]): # if the neighbouring cell is covered then append
visited_map [r][c+1] = True
unexp.append((r, c+1))
if map[r - 1][c-1] == 1.0 and (not visited_map [r-1][c-1]): # if the neighbouring cell is covered then append
visited_map [r-1][c-1] = True
covered.append((r - 1, c-1))
elif map[r - 1][c-1] == 0.0 and (not visited_map [r-1][c-1]): # if the neighbouring cell is covered then append
visited_map [r-1][c-1] = True
unexp.append((r - 1, c-1))
if map[r + 1][c+ 1] == 1.0 and (not visited_map [r+1][c+ 1]): # if the neighbouring cell is covered then append
visited_map [r+1][c+ 1] = True
covered.append((r + 1, c+ 1))
elif map[r + 1][c+ 1] == 0.0 and (not visited_map [r+1][c+ 1]): # if the neighbouring cell is covered then append
visited_map [r+1][c+ 1] = True
unexp.append((r+1, c+ 1))
if map[r+ 1][c - 1] == 1.0 and (not visited_map [r+ 1][c-1]): # if the neighbouring cell is covered then append
visited_map [r+ 1][c-1] = True
covered.append((r+ 1, c - 1))
elif map[r+ 1][c-1] == 0.0 and (not visited_map [r+ 1][c-1]): # if the neighbouring cell is covered then append
visited_map [r+ 1][c-1] = True
unexp.append((r+ 1, c-1))
if map[r- 1][c + 1] == 1.0 and (not visited_map [r- 1][c+1]): # if the neighbouring cell is covered then append
visited_map [r- 1][c+1] = True
covered.append((r- 1, c + 1))
elif map[r- 1][c+1] == 0.0 and (not visited_map [r- 1][c+1]): # if the neighbouring cell is covered then append
visited_map [r- 1][c+1] = True
unexp.append((r- 1, c+1))
else: # if it is a boundary cell return false
return False
# Check if there are any covered in the list
return len(unexp)<len(covered)
def update_visible(row,col,D,l=1):
r_ = row
c_ = col
dimension_r = D
dimension_c = D
r_l = int (max (0, r_-l))
r_h = int (min (dimension_r, r_+l+1))
c_l = int (max (0, c_-l))
c_h = int (min (dimension_c, c_+l+1))
for r in range (r_l, r_h):
for c in range (c_l, c_h):
if map[r][c] == 0.0:
map[r][c] = 0.5
visible_cells.append((r,c))
def main(D,R,test):
global map
map = np.full ((D,D),0.0)
files = []
Prev_row = []
Prev_col = []
for r in range(R):
path = os.path.join(str(D)+'x'+str(D)+'_'+str(R)+'bots','TEST'+str(test),'WPts','robot_'+str(r))
files.append(open(path,'r'))
NewLine = files[r].readline()
row,col = int (NewLine.split(' ')[0]), int (NewLine.split(' ')[1])
Prev_row.append(row)
Prev_col.append(col)
update_visible(row,col,D)
while True:
line_check = False
for r in range(R):
(row,col) = Prev_row[r],Prev_col[r]
map[row][col] = 1.0
for r in range(R):
NewLine = files[r].readline()
if len(NewLine)>0:
line_check = True
row,col = int (NewLine.split(' ')[0]), int (NewLine.split(' ')[1])
update_visible(row,col,D)
Prev_row[r] = row
Prev_col[r] = col
else:
line_check = False
break
if(line_check==False):
break
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-r', dest='num_robots', type=int, help='Number of robots')
parser.add_argument('-d', dest='dimension', type=int, help='Size of workspace')
parser.add_argument('-t', default=1, dest='test', type=int, help='test no')
args = parser.parse_args()
R = int(args.num_robots)
D = int(args.dimension)
test = int(args.test)
| Luckykantnayak/uav-project-2 | performance_check.py | performance_check.py | py | 8,208 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.setrecursionlimit",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_... |
7595557598 | #!/usr/bin/env python3
import argparse
import sys
import os
from pathlib import Path
import json
import io
import re
import tempfile
import shutil
import copy
re_has_whitespace = re.compile(r"\s+")
re_has_indent = re.compile(r"\s{4}\s+")
re_empty_line = re.compile(r"^\s*$")
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--package", required=False, help="the namespace of the package to process")
parser.add_argument("-b", "--build", required=True, help="the path of the build folder")
parser.add_argument("-f", "--force", action="store_true", required=False, help="force generation, as opposed to generating only if more recent")
return parser.parse_args(args)
def error(msg):
print("[ERROR] " + msg, file=sys.stderr)
def progress(msg):
print("[PROGRESS] " + msg)
def get_package_filename(build, package):
return str(Path(build) / "packages" / (package + ".json"))
def get_source_filename(build, source):
unmodified = str(Path(build) / "files" / (source + ".yapl"))
if os.path.exists(unmodified):
return unmodified
return str(Path(build) / "files" / "modified_locally" / (source + ".yapl"))
def validate_parsed_args(parsed_args):
if not os.path.isdir(parsed_args.build):
error("The build parameter must specify a directory file")
return 1
if parsed_args.package is not None:
if not os.path.isfile(get_package_filename(parsed_args.build, parsed_args.package)):
error("The parameters must specify a package file")
return 1
return 0
def main(parsed_args):
progress("Starting")
progress("Preparing output directories")
modules_path = Path(parsed_args.build) / "modules"
modules_path.mkdir(parents=True, exist_ok=True)
if parsed_args.force:
shutil.rmtree(modules_path, ignore_errors=False, onerror=None)
modules_path.mkdir(parents=True, exist_ok=True)
if parsed_args.package:
process_package(parsed_args.build, parsed_args.package)
else:
packages_path = Path(parsed_args.build) / "packages"
packages = list(packages_path.glob("*.json"))
n = 1
for package in packages:
progress("Processing package {} ({} of {})".format(package, n, len(packages)))
n = n + 1
process_package(parsed_args.build, Path(package).stem)
progress("Finished")
return 0
def load_package(build, package):
package_filename = get_package_filename(build, package)
with io.open(package_filename) as json_file:
return json.load(json_file)
def load_sourcefile(build, hash):
source_filename = get_source_filename(build, hash)
with io.open(source_filename, "r") as yapl_file:
return yapl_file.read()
def dedent(l):
if l == "":
return l
elif l[0] == "\t":
return l[1:]
elif l[:4] == " ":
return l[4:]
elif l.strip() == "":
return ""
else:
assert False, "expected leading whitespace, not '{}'".format(l)
return l
def process_package(build, package):
progress("Processing package " + package)
package_descriptor = load_package(build, package)
source_files = list(package_descriptor["source_files"].items())
modules_path = Path(build) / "modules"
n = 1
for source_filename, sourcefile_info in source_files:
progress("Processing sourcefile {} ({} of {})".format(source_filename, n, len(source_files)))
n = n + 1
revision = sourcefile_info["revision"]
if os.path.isdir(modules_path / revision):
# TODO: this can lead to inconsistency
continue
def save_modules(build, package_descriptor, source_filename, sourcefile_info):
revision = sourcefile_info["revision"]
source_file = load_sourcefile(build, revision)
lines = source_file.split("\n")
extractor = None
parent_metadata = {
"package":package_descriptor["identifier"],
"identifier":package_descriptor["identifier"],
"source": {
"filename": source_filename,
"revision": revision,
},
"symbols": copy.deepcopy(package_descriptor["symbols"])
}
for line in lines:
if line.startswith("#!"):
# ignore hash-bangs
pass
elif extractor is None and re_has_indent.match(line):
pass
elif extractor is not None:
if not extractor.process_line(line):
extractor.save(build)
extractor = None
elif ModuleExtractor.matches(line):
extractor = ModuleExtractor(parent_metadata, line)
save_modules(build, package_descriptor, source_filename, sourcefile_info)
class Extractor:
def __init__(self, parent_metadata, category):
self._parent_metadata = parent_metadata
self._metadata = {
"identifier":None, # to be filled by subclasses
"category":category,
"source":parent_metadata["source"],
}
if "module" in parent_metadata:
self._metadata["module"] = parent_metadata["module"]
elif "package" in parent_metadata:
self._metadata["package"] = parent_metadata["package"]
self._lines = []
def process_line(self, line):
if line.startswith("}"):
return False
self._lines.append(dedent(line))
return True
def get_path(self, build):
_filename = self.get_filename(build)
_path = Path(_filename).parents[0]
_path.mkdir(parents=True, exist_ok=True)
return str(_path)
def get_filename(self, build):
assert False
def save(self, build):
_path = self.get_path(build)
Path(_path).mkdir(parents=True, exist_ok=True)
def strip_empty_lines(lines):
# this is a brute force evil algorithm, fix this one day.
while lines and re_empty_line.match(lines[0]):
lines.pop(0)
while lines and re_empty_line.match(lines[-1]):
lines.pop(-1)
lines.append("")
return lines
_lines = strip_empty_lines(self._lines)
_filename = self.get_filename(build)
with io.open(_filename + ".yapl", "w") as yapl_file:
yapl_file.write("\n".join(strip_empty_lines(_lines)))
with io.open(_filename + ".json", 'w') as metadata_file:
json.dump(self._metadata, metadata_file, sort_keys=True, indent=4)
@classmethod
def matches(cls, line):
m = cls._expression.match(line)
return m is not None and m and True
class FunctionExtractor(Extractor):
_expression = re.compile(r"^(private\s+){0,1}\s*("
r"((function)|(constructor)|(destructor)|(method)|(generator)|(closure))"
r"(\-((function)|(constructor)|(destructor)|(method)|(generator)|(closure)))*"
r")\s+(.+)")
_identifier = re.compile(r"\s*([^\(]+)(.*)")
def __init__(self, parent_metadata, line):
super().__init__(parent_metadata, "functions")
m = FunctionExtractor._expression.match(line)
_private = m.group(1) is not None and m.group(1).startswith("private")
remainder = m.group(len(m.groups()))
m = FunctionExtractor._identifier.match(remainder)
_name = m.group(1)
remainder = m.group(len(m.groups()))
self._metadata["identifier"] = parent_metadata["identifier"] + "." + _name
self._metadata["function"] = {
"name":_name,
"private":_private,
}
def process_line(self, line):
if line.startswith("}"):
return False
return True
def save(self, build):
return
class ModuleFunctionExtractor(FunctionExtractor):
def __init__(self, parent, line):
super().__init__(parent._metadata, line)
parent.declare_symbol("functions", self._metadata["function"]["name"], self._metadata["identifier"])
class VariableExtractor(Extractor):
_expression = re.compile(r"^(private\s+){0,1}\s*("
r"(\w+)\s*:\s*"
r"(\w+)\s*"
r"((:{0,1}=)\s*(.*))"
r")$"
)
def __init__(self, parent_metadata, line):
super().__init__(parent_metadata, "variables")
m = VariableExtractor._expression.match(line)
_private = m.group(1) is not None and m.group(1).startswith("private")
_name = m.group(3)
_type = m.group(4)
_op = m.group(6)
_mutable = m.group(6) is not None and _op == ":="
_remainder = m.group(len(m.groups()))
# TODO: extends
self._metadata["identifier"] = parent_metadata["identifier"] + "." + _name
self._metadata["scope"] = parent_metadata["identifier"]
self._metadata["variable"] = {
"type":_type,
"name":_name,
"private":_private,
"mutable":_mutable
}
self._lines.append("{}:{} {} {}".format(_name, _type, _op, _remainder))
def process_line(self, line):
if re_has_indent.match(line):
self._lines.append(dedent(line))
return True
return False
def save(self, build):
return
class ClassExtractor(Extractor):
_expression = re.compile(r"^(private\s+){0,1}\s*("
r"((abstract\s+class)|(class)|(interface)|(structure))"
r")\s+(.+)")
_identifier = re.compile(r"(\w+).*")
def __init__(self, parent_metadata, line):
super().__init__(parent_metadata, "classes")
m = ClassExtractor._expression.match(line)
_private = m.group(1) is not None and m.group(1).startswith("private")
remainder = m.group(len(m.groups()))
_name = ClassExtractor._identifier.match(remainder).group(1)
self._metadata["identifier"] = parent_metadata["identifier"] + "." + _name
self._metadata["class"] = {
"name":_name,
"private":_private,
}
def save(self, build):
return
class ModuleClassExtractor(ClassExtractor):
def __init__(self, parent, line):
super().__init__(parent._metadata, line)
self._parent = parent
parent.declare_symbol("classes", self._metadata["class"]["name"], self._metadata["identifier"])
class ModuleVariableExtractor(VariableExtractor):
def __init__(self, parent, line):
super().__init__(parent._metadata, line)
parent.declare_symbol("variables", self._metadata["variable"]["name"], self._metadata["identifier"])
class TypeExtractor(Extractor):
_expression = re.compile(r"^(private\s+){0,1}\s*("
r"(type)|(alias)"
r")\s+(.+)$"
)
_identifier = re.compile(r"\s*([^:\{\(\s=]+)(.*)")
def __init__(self, parent_metadata, line):
super().__init__(parent_metadata, "types")
m = TypeExtractor._expression.match(line)
_private = m.group(1) is not None and m.group(1).startswith("private")
remainder = m.group(len(m.groups()))
m = TypeExtractor._identifier.match(remainder)
_name = m.group(1)
self._metadata["identifier"] = parent_metadata["identifier"] + "." + _name
self._metadata["type"] = {
"name":_name,
"private":_private
}
def process_line(self, line):
super().process_line(line)
return False
def save(self, build):
return
class ModuleTypeExtractor(TypeExtractor):
def __init__(self, parent, line):
super().__init__(parent._metadata, line)
parent.declare_symbol("types", self._metadata["type"]["name"], self._metadata["identifier"])
class ModuleExtractor(Extractor):
_expression = re.compile(r'^((module)|(service)|(process)|(restservice))\s+(\w+)\s*\{\s*$')
_import = re.compile(r'^\s{4}import\s+(.+)')
_import_from = re.compile(r'^\s{4}import\s+(.*)\sfrom\s+(.+)')
def __init__(self, parent_metadata, line):
super().__init__(parent_metadata, "modules")
m = ModuleExtractor._expression.match(line)
_name = m.group(len(m.groups()))
assert _name == str(Path(parent_metadata["source"]["filename"]).stem)
self._metadata["identifier"] = parent_metadata["identifier"] + "." + _name
self._metadata["symbols"] = copy.deepcopy(parent_metadata["symbols"])
def declare_symbol(self, symbol_category, symbol_name, symbol_identifier):
self._metadata["symbols"][symbol_name] = symbol_identifier
def get_filename(self, build):
return str(Path(build) / "modules" / self._metadata["source"]["revision"] / "module")
def process_line(self, line):
if ModuleExtractor._import.match(line):
m = ModuleExtractor._import.match(line)
import_from = None
if ModuleExtractor._import_from.match(line):
x = ModuleExtractor._import_from.match(line)
without_from = x.group(1)
import_from = x.group(2)
else:
without_from = m.group(1)
for symbol in without_from.split(","):
symbol = symbol.strip()
if re_has_whitespace.match(symbol):
symbol = re.split(r"\s+", symbol)[0]
if import_from is None:
symbol = import_from
if import_from in self._metadata["symbols"]:
import_from = self._metadata["symbols"][import_from]
self.declare_symbol("imports", symbol, import_from)
else:
if symbol == "*":
if import_from in self._metadata["symbols"]:
import_from = self._metadata["symbols"][import_from]
self.declare_symbol("imports", "*." + import_from, import_from + ".*")
else:
if import_from in self._metadata["symbols"]:
import_from = self._metadata["symbols"][import_from]
import_from = import_from + "." + symbol
self.declare_symbol("imports", symbol, import_from)
return True
return super().process_line(line)
def save(self, build):
extractor = None
for line in self._lines:
if extractor is None and re_has_indent.match(line):
continue
if extractor is not None:
if not extractor.process_line(line):
extractor.save(build)
extractor = None
elif ModuleVariableExtractor.matches(line):
extractor = ModuleVariableExtractor(self, line)
elif ModuleFunctionExtractor.matches(line):
extractor = ModuleFunctionExtractor(self, line)
elif ModuleClassExtractor.matches(line):
extractor = ModuleClassExtractor(self, line)
elif ModuleTypeExtractor.matches(line):
extractor = ModuleTypeExtractor(self, line)
if extractor is not None:
more = extractor.process_line("")
assert (not more), "expected the extractor to complete"
extractor.save(build)
super().save(build)
if __name__ == "__main__":
parsed_args = parse_args(sys.argv[1:])
exit_code = validate_parsed_args(parsed_args)
if exit_code == 0:
exit_code = main(parsed_args)
sys.exit(exit_code)
| padresmurfa/yapl | v1/2_modules_from_package/cli.py | cli.py | py | 15,896 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_... |
74446495144 | from typing import Dict, List
from aiplayground.api.bots import Bot
from aiplayground.api.tournaments.models import Participant, Tournament, Match, PlayerQueueEntry, MatchState
from collections import defaultdict
import operator
from aiplayground.exceptions import AlreadyInTournament
from aiplayground.logging import logger
from aiplayground.types import PlayerSID
def add_player(bot: Bot, tournament: Tournament) -> Participant:
logger.debug("Getting tournament lock: %s", tournament.id)
with tournament.lock():
logger.debug("Got lock for tournament: %s", tournament.id)
participants = tournament.participants
participant_ids = {participant.bot.id for participant in participants}
if bot.id in participant_ids:
raise AlreadyInTournament
index = max(x.index for x in participants) + 1 if participants else 1
participant = Participant.create(index=index, bot=bot, tournament=tournament)
for opponent in participants:
if opponent.disqualified:
continue
Match.create(
index=100000 * index + opponent.index,
tournament=tournament,
players=[participant, opponent],
state=MatchState.pending,
)
return participant
def pick_match(tournament: Tournament) -> Match:
with tournament.lock():
queued_players = PlayerQueueEntry.list(tournament_id=tournament.id)
participants_by_id = {participant.id: participant for participant in tournament.participants}
participant_sids: Dict[Participant, List[PlayerSID]] = defaultdict(default_factory=list)
for player in queued_players:
participant = participants_by_id[player.participant_id]
participant_sids[participant].append(player.sid)
online_participants = set(participant_sids)
matches = [
match
for match in tournament.matches
if match.state == MatchState.pending and not set(match.players) - online_participants
]
sorted_matches = sorted(matches, key=operator.attrgetter("index"))
match = sorted_matches[0]
match.state = MatchState.running
match.save()
return match
| jackadamson/AI-Playground | aiplayground/api/tournaments/tournaments.py | tournaments.py | py | 2,268 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "aiplayground.api.bots.Bot",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "aiplayground.api.tournaments.models.Tournament",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "aiplayground.logging.logger.debug",
"line_number": 15,
"usage_ty... |
11602597643 | import streamlit as st
import time
import re
import chardet
import pandas as pd
import numpy as np
import geopandas as gpd
import matplotlib.pyplot as plt
from matplotlib.patches import ConnectionPatch
from functools import wraps
from shapely.geometry import Point
def main():
if 'run' not in st.session_state:
st.session_state.run = 0
if 'layer_selector' not in st.session_state:
st.session_state.layer_selector = 0
st.title('区域划分工具')
region_dict = read_layer_and_check('qgis')
selected_name, submit = layer_selector(region_dict)
input_mode = input_mode_selector()
if st.session_state.layer_selector:
if input_mode == '文本输入':
run_manual_input(region_dict, selected_name)
else:
run_file_input(region_dict, selected_name)
st.write('执行次数为:', st.session_state.run)
@st.cache
def read_layer_and_check(geofolder):
try:
dictionary = dict(pd.read_csv(f'.//{geofolder}//图层信息.csv', encoding='gb18030').loc[:, ['字段名称', '图层名称']].values)
key_list = dictionary.keys()
file_extension = 'shp' if geofolder == 'mapinfo' else 'gpkg'
for index, name in enumerate(key_list):
gdf = gpd.read_file(f'.//{geofolder}//{dictionary[name]}.{file_extension}', encoding='utf-8')
if name not in list(gdf):
st.error(f'图层字段<{name}>不在图层<{dictionary[name]}.{file_extension}>中')
else:
dictionary[name] = [dictionary[name]]
dictionary.setdefault(name, []).append(gdf)
return dictionary
except IOError:
st.error(f'找不到图层信息')
def layer_selector(region_dictionary):
st.header('1、图层展示')
with st.form(key='selector'):
# st.subheader('图层信息选择')
region_name = st.multiselect(
"请选择图层",
region_dictionary.keys(),
default=['区县', '三方区域', '规划区域'],
)
submit = st.form_submit_button(label='确认', on_click=layer_selector_counter)
figure = layer_ploting(region_dictionary, region_name, 3)
if region_name:
name_list = '、'.join(region_name)
st.write(f'选择的图层为:{name_list}')
st.pyplot(figure)
return region_name, submit
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
def layer_ploting(region_dictionary, region_name, fig_cols):
plt.rcParams['font.size'] = 5
num_fig = len(region_name)
if num_fig > 0:
nrows = (num_fig - 1) // fig_cols + 1
fig, ax = plt.subplots(nrows, fig_cols, figsize=(3 * fig_cols, 3 * nrows))
for i, field_name in enumerate(region_name):
geo_df = region_dictionary[field_name][1]
if nrows == 1:
ax_i = ax[i]
else:
ax_rows, ax_cols = i // fig_cols, i % fig_cols
ax_i = ax[ax_rows][ax_cols]
ax_i.set_xlim(119.1, 120.3)
ax_i.set_ylim(31.1, 32.1)
geo_df.plot(ax=ax_i, column=field_name, cmap='Spectral')
# 去掉坐标轴
mod_num = num_fig % fig_cols
if mod_num != 0:
if nrows == 1:
for n in range(mod_num, fig_cols):
ax[n].axis('off')
else:
for n in range(mod_num, fig_cols):
ax[nrows - 1][n].axis('off')
else:
fig, ax = plt.subplots()
ax.axis('off')
# st.write("Cache miss: layer_ploting")
return fig
def input_mode_selector():
st.header('2、数据选择')
st.sidebar.header('输入模式选择')
return st.sidebar.radio(
'请选择输入方式',
('文件导入', '文本输入'),
help='首次执行请先在图层选择处点击确认。'
)
def run_manual_input(region_dictionary, region_name):
st.write('数据选择模式:文本输入')
input_text = st.sidebar.text_input(
'输入经纬度',
value='例如:119.934 31.8528 119.939 31.84',
help='输入经纬度数据,可直接复制粘贴excel表格中的经度、纬度2列数据'
)
df_source = text_to_df(input_text)
st.write('数据源:')
st.table(df_source)
if not st.sidebar.button('执行区域划分'):
st.stop()
else:
st.sidebar.header('输出结果')
result = region_division(df_source, region_dictionary, region_name)
st.header('3、输出表格')
st.table(result)
st.header('4、地图展示')
st.map(result.rename(columns={'经度': 'lon', '纬度': 'lat'}))
st.sidebar.header('数据下载')
name_list = '、'.join(region_name)
st.sidebar.download_button(
label='下载结果',
data=ouput(result),
file_name=f'区域划分结果-{name_list}.csv',
mime='text/csv',
)
def text_to_df(text):
search_result = re.findall(r'(?P<lon>1[12][0-9].\d+)[\s,,]*(?P<lat>3[12].\d+)', text)
if search_result:
point = {}
for lon_lat in search_result:
point.setdefault('经度', []).append(float(lon_lat[0]))
point.setdefault('纬度', []).append(float(lon_lat[1]))
return pd.DataFrame(data=point)
else:
st.error('输入格式错误')
def run_file_input(region_dictionary, region_name):
st.write('数据选择模式:文件导入')
file_obj = st.sidebar.file_uploader(
'上传一个表格',
type=['csv', 'xlsx', 'xls'],
help='上传文件格式为csv、xlsx、xls,需包含表头为经度、纬度的2列数据',
)
if file_obj:
# 清理数据、执行区域划分
df_source = read_df(file_obj)
if df_source is None:
st.stop()
st.sidebar.header('输出结果')
result = region_division(df_source, region_dictionary, region_name)
# 显示数据源
render_rows = 10 if df_source.shape[0] >= 10 else df_source.shape[0] // 5 * 5
rows = st.sidebar.slider(
'选择数据源显示行数',
0, 50, render_rows, 5
)
st.write(f'数据源(前{rows}行):')
st.dataframe(df_source.head(rows))
# 结果采样
st.header('3、输出表格')
sample_rows = st.sidebar.slider(
'选择结果采样行数',
0, 50, render_rows, 5
)
st.write(f'随机采样{sample_rows}行:')
df_sample = result.sample(sample_rows)
st.dataframe(df_sample)
# 结果可视化
st.header('4、统计图表')
summary, rail_data = reslut_summary(result, region_name)
fig_list = summary_ploting(summary, rail_data)
for figure in fig_list:
st.pyplot(figure)
# 数据下载
st.sidebar.header('数据下载')
name_list = '、'.join(region_name)
st.sidebar.download_button(
label='下载明细结果',
data=ouput(result),
file_name=f'区域划分结果-{name_list}.csv',
mime='text/csv',
help='区域划分的明细数据',
)
st.sidebar.download_button(
label='下载统计结果',
data=output_summary(summary),
file_name=f'区域划分统计结果-{name_list}.csv',
mime='text/csv',
help='统计每个图层各个区域的数量',
)
def time_costing(step):
def func_name(func):
@wraps(func)
def core(*args, **kwargs):
start = time.time()
res = func(*args, **kwargs)
region_name = args[2]
if isinstance(region_name, str):
region_name = [region_name]
elif isinstance(region_name, list):
pass
st.sidebar.write('、'.join(region_name) + '已划分')
st.sidebar.write(f'{step}耗时:{float(time.time() - start):.3f}秒')
return res
return core
return func_name
LONLAT_STR_FORMAT = {'经度': 'string', '纬度': 'string'}
LONLAT_FLOAT_FORMAT = {'经度': 'float64', '纬度': 'float64'}
def df_clean(df):
if {'经度', '纬度'}.issubset(set(list(df))):
return df.pipe(clean_lotlan).astype(LONLAT_FLOAT_FORMAT)
else:
st.error('当前表格格式错误')
st.sidebar.error('当前表格格式错误')
def clean_lotlan(df_cell):
for col_name in list(df_cell.loc[:, ['经度', '纬度']]):
df_cell[col_name] = df_cell.astype({col_name: 'string'})[col_name].str.replace(r'\s', '', regex=True)
df_cell_split_list = df_cell['经度'].str.contains('/')
df_cell_split = df_cell[df_cell_split_list]
if not df_cell_split.empty:
df_comb = pd.DataFrame([], index=df_cell_split.index)
for col_name in list(df_cell_split.loc[:, ['经度', '纬度']]):
df_comb = pd.concat([df_comb, (df_cell_split[col_name].str.split('/', expand=True)
.stack().reset_index(level=1).rename(columns={0: col_name}))], axis=1)
df_cell = pd.concat([df_cell[~df_cell_split_list],
df_cell_split.iloc[:, :3].join(df_comb.drop(['level_1'], axis=1))]).reset_index(drop=True)
return df_cell
@st.cache(suppress_st_warning=True)
def read_df(file):
f_ext = file.name.split('.')[1]
df = None
if f_ext == 'csv':
encode = str.lower(chardet.detect(file.readline())["encoding"]).replace('-', '_')
file.seek(0)
if encode == 'utf-8':
df = pd_read(file, f_ext, 'utf-8')
elif encode == 'gb2312':
try:
df = pd_read(file, f_ext, 'gbk')
except UnicodeDecodeError:
df = pd_read(file, f_ext, 'gb18030')
elif encode == 'utf_8_sig':
df = pd_read(file, f_ext, 'utf_8_sig')
elif encode == "iso-8859-1":
df = pd_read(file, f_ext, 'gbk')
else:
st.error('文件编码错误')
elif f_ext in ['xlsx', 'xls']:
df = pd_read(file, f_ext)
else:
st.error('文件格式错误')
# st.write("Cache miss:read_df")
return df
def pd_read(file, extension, encode_n=None):
try:
if extension == 'csv':
return pd.read_csv(file, dtype=LONLAT_STR_FORMAT, encoding=encode_n, low_memory=False)
elif extension in ['xlsx', 'xls']:
return pd.read_excel(file, dtype=LONLAT_STR_FORMAT)
else:
st.error('文件格式错误')
except ValueError:
st.error('文件读取错误')
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
@time_costing('区域划分')
def region_division(df, region_dictionary, region_name):
lanlot_cols = ['经度', '纬度']
df = df_clean(df)
if isinstance(region_name, str):
region_name = [region_name]
elif isinstance(region_name, list):
pass
else:
st.error('错误:区域名称错误')
df_dropdu = df.drop_duplicates(subset=lanlot_cols).reset_index(drop=True)
my_bar = st.sidebar.progress(0)
for index, name in enumerate(region_name):
gdf_region = region_dictionary[name][1]
gdf_region = gdf_region.to_crs('EPSG:2381') if gdf_region.crs is None else gdf_region.to_crs('EPSG:2381')
lanlot = gpd.GeoSeries([Point(x, y) for x, y in zip(df_dropdu[lanlot_cols[0]], df_dropdu[lanlot_cols[1]])])
lanlot_region = gpd.sjoin(lanlot.reset_index().rename(columns={0: 'geometry'})
.set_crs('epsg:4326').to_crs('EPSG:2381'), gdf_region.loc[:, [name, 'geometry']])
df_dropdu = df_dropdu.join(lanlot_region.set_index('index').loc[:, name])
my_bar.progress((index + 1) / len(region_name))
df = df.merge(df_dropdu.loc[:, lanlot_cols + region_name], how='left', on=lanlot_cols)
# st.write("Cache miss: region_division")
run_counter()
return df
def run_counter():
st.session_state.run += 1
def layer_selector_counter():
st.session_state.layer_selector += 1
def ouput(df):
return df.to_csv(index=False).encode('utf-8-sig')
def output_summary(summary):
df_summary = pd.DataFrame([])
for key in summary.keys():
df_summary = pd.concat([df_summary, summary[key]], axis=1)
return df_summary.to_csv(index=False).encode('utf-8-sig')
@st.cache(suppress_st_warning=True)
def reslut_summary(df, region_name):
for name in region_name:
if name == '规划区域':
df['规划区域'] = df['规划区域'].fillna('农村')
elif name == '网格区域':
df['网格区域'] = df['网格区域'].fillna('网格外')
elif name == '高铁周边':
df['高铁周边'] = df['高铁周边'].fillna('铁路外')
else:
df[name] = df[name].fillna('其他')
county_order = ['天宁', '钟楼', '武进', '新北', '经开', '金坛', '溧阳', '其他']
third_party_order = ['华星', '华苏-武进', '华苏-金坛', '华苏-溧阳', '其他']
planning_region_order = ['主城区', '一般城区', '县城', '乡镇', '农村']
grid_order = ['网格内', '网格边界200米', '网格外']
rail_surrounding_order = ['京沪周边500米', '京沪周边1.5公里', '沪宁周边500米', '沪宁周边1.5公里', '宁杭周边500米', '宁杭周边1.5公里', '铁路外']
tag_order = ['主城区', '县城', '其他']
name_list = ['区县', '三方区域', '规划区域', '网格区域', '高铁周边', '标签区域']
order_list = [county_order, third_party_order, planning_region_order, grid_order, rail_surrounding_order, tag_order]
region_order_dict = dict(zip(name_list, order_list))
summary = {}
for name in region_name:
summary[name] = (df.groupby(name)['ECGI'].count().reset_index(name='数量')
.assign(temp=lambda x: x[name].astype('category').cat.set_categories(region_order_dict[name]))
.sort_values(by=['temp'], ignore_index=True).drop('temp', axis=1))
rail_data = summary.pop('高铁周边') if summary.get('高铁周边') is not None else None
# st.write("Cache miss: reslut_summary")
return summary, rail_data
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
def summary_ploting(summary, rail_data):
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
fig_list = []
region_name = list(summary.keys())
num_name = len(region_name)
nrows = (num_name - 1) // 4 + 1
if num_name > 0:
fig, ax = plt.subplots(nrows, 2, figsize=(10, 4.8 * nrows))
# 每2组画环形饼图(剔除高铁)
for index in range(0, num_name, 2):
name_1 = region_name[index]
name_2 = region_name[index + 1] if index < num_name - 1 else None
if nrows == 1:
ax_i = ax[index // 2]
else:
ax_rows, ax_cols = index // 2 // 2, index // 2 % 2
ax_i = ax[ax_rows][ax_cols]
if name_2 is not None:
size = 0.3
labels_1, vals_1 = summary[name_1][name_1].to_list(), summary[name_1]['数量'].values
labels_2, vals_2 = summary[name_2][name_2].to_list(), summary[name_2]['数量'].values
num_label_1, num_label_2 = len(labels_1), len(labels_2)
cmap = plt.get_cmap("tab20c")
if num_label_1 <= num_label_2:
outer_labels, outer_vals = labels_1, vals_1
inner_labels, inner_vals = labels_2, vals_2
outer_colors = cmap(tab20c_color_array(num_label_1, 'outer'))
inner_colors = cmap(tab20c_color_array(num_label_2, 'inner'))
else:
outer_labels, outer_vals = labels_2, vals_2
inner_labels, inner_vals = labels_1, vals_1
outer_colors = cmap(tab20c_color_array(num_label_2, 'outer'))
inner_colors = cmap(tab20c_color_array(num_label_1, 'inner'))
wedges1, texts1, autotexts1 = ax_i.pie(
inner_vals, radius=1 - size, labels=inner_labels, colors=inner_colors,
autopct=lambda pct: pct_func(pct, inner_vals), pctdistance=0.75, labeldistance=0.3,
startangle=90, wedgeprops=dict(width=size, edgecolor='w')
)
wedges2, texts2, autotexts2 = ax_i.pie(
outer_vals, radius=1, labels=outer_labels, colors=outer_colors,
autopct=lambda pct: pct_func(pct, outer_vals), pctdistance=0.85,
startangle=90, wedgeprops=dict(width=size, edgecolor='w')
)
plt.setp(autotexts1, size=10, weight="bold", color="w")
plt.setp(autotexts2, size=10, weight="bold", color="w")
plt.setp(texts1, size=10, color="k")
plt.setp(texts2, size=10, color="k")
ax_i.set(aspect="equal")
else:
# 单独剩一个画传统饼图
labels_1, vals_1 = summary[name_1][name_1].to_list(), summary[name_1]['数量'].values
num_label_1 = len(labels_1)
cmap = plt.get_cmap("tab20c")
outer_colors = cmap(tab20c_color_array(num_label_1, 'inner'))
wedges, texts, autotexts = ax_i.pie(vals_1, radius=1, labels=labels_1, colors=outer_colors,
autopct=lambda pct: pct_func(pct, vals_1), startangle=90)
plt.setp(autotexts, size=10, weight="bold", color="w")
plt.setp(texts, size=10, weight="bold", color="k")
ax_i.set(aspect="equal")
plt.axis('off')
fig_list.append(fig)
# 画高铁复合饼图
if rail_data is not None:
fig = plt.figure(figsize=(10, 4.8))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
fig.subplots_adjust(wspace=0)
merged_label = ['高铁周边', '铁路外']
df_rail = rail_data.query('高铁周边 != "铁路外"')
merged_val = [df_rail['数量'].sum(), rail_data.query('高铁周边 == "铁路外"')['数量'].sum()]
angle = -180 * merged_val[0] / merged_val[1]
explode = [0.1, 0]
cmap = plt.get_cmap("tab20c")
merged_colors = cmap([4, 0])
wedges1, texts1, autotexts1 = ax1.pie(merged_val, radius=1, labels=merged_label, colors=merged_colors,
autopct=lambda pct: pct_func(pct, merged_val),
startangle=angle, explode=explode)
plt.setp(autotexts1, size=10, weight="bold", color="w")
plt.setp(texts1, size=12, color="k")
detail_label, detail_val = df_rail['高铁周边'].to_list(), df_rail['数量'].values
num_label = len(detail_label)
cmap = plt.get_cmap("tab20c")
detail_colors = cmap(tab20c_color_array(num_label, 'inner'))
r2 = 0.8
wedges2, texts2, autotexts2 = ax2.pie(detail_val, radius=r2, labels=detail_label, colors=detail_colors,
autopct=lambda pct: pct_func(pct, detail_val),
startangle=90, counterclock=False)
plt.setp(autotexts2, size=10, weight="bold", color="w")
plt.setp(texts2, size=10, color="k")
# 饼图边缘的数据
theta1 = ax1.patches[0].theta1
theta2 = ax1.patches[0].theta2
center = ax1.patches[0].center
r = ax1.patches[0].r
width = 0.2
# 上边缘的连线
x = r * np.cos(np.pi / 180 * theta2) + center[0]
y = r * np.sin(np.pi / 180 * theta2) + center[1]
con_a = ConnectionPatch(xyA=(-width / 2, r2), xyB=(x, y), coordsA='data', coordsB='data', axesA=ax2, axesB=ax1)
# 下边缘的连线
x = r * np.cos(np.pi / 180 * theta1) + center[0]
y = r * np.sin(np.pi / 180 * theta1) + center[1]
con_b = ConnectionPatch(xyA=(-width / 2, -r2), xyB=(x, y), coordsA='data', coordsB='data', axesA=ax2, axesB=ax1)
for con in [con_a, con_b]:
con.set_linewidth(1) # 连线宽度
con.set_color = ([0, 0, 0]) # 连线颜色
ax2.add_artist(con) # 添加连线
fig_list.append(fig)
else:
pass
# st.write("Cache miss: summary_ploting")
return fig_list
def pct_func(pct, allvals):
absolute = int(round(pct/100.*np.sum(allvals)))
return "{:d}\n{:.1f}%".format(absolute, pct)
def tab20c_color_array(num_label, outer_or_inner):
array = np.empty((0, 5))
if outer_or_inner == 'outer':
outer_layer_num = (num_label - 1) // 5 + 1
for i in range(outer_layer_num):
array = np.append(array, np.arange(5) * 4 + i)
array = np.sort(array).astype(int)
elif outer_or_inner == 'inner':
inner_layer_num = (num_label - 1) // 10 + 1
for i in range(inner_layer_num):
if i == 0:
array = np.append(np.arange(5) * 4 + 1, np.arange(5) * 4 + 2)
else:
array = np.append(array, np.arange(5) * 4 + i + 2)
return np.sort(array)
if __name__ == "__main__":
main()
| spiritdncyer/region-divsion-streamlit | demo-regionDiv.py | demo-regionDiv.py | py | 21,686 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.session_state",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "streamlit.session_state",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "streamlit.session_state",
"line_number": 18,
"usage_type": "attribute"
},
{... |
74309241063 | import time, os, boto3, json, decimal
from boto3.dynamodb.conditions import Key
from helpers import send_to_datastream
from helpers import _get_body
from helpers import _get_response
from helpers import DecimalEncoder
try:
dynamodb = boto3.resource('dynamodb')
phase_status_table = dynamodb.Table(os.getenv('PHASE_STATUS_TABLE'))
except Exception as e:
print(e)
def post_phase_status(event, context):
body = _get_body(event)
try:
site = body['site']
message = body['message']
except Exception as e:
return _get_response(400, 'Unable to parse all required arguments. ')
timestamp = time.time()
payload = {
"site": site,
"timestamp": timestamp,
"message": message,
}
# Send to datastream
topic = "phase_status"
send_to_datastream(site, payload, topic)
# save in database
# Convert floats into decimals for dynamodb
payload["ttl"] = timestamp + 86400 # ttl = one day
dynamodb_entry = json.loads(json.dumps(payload, cls=DecimalEncoder), parse_float=decimal.Decimal)
table_response = phase_status_table.put_item(Item=dynamodb_entry)
return _get_response(200, 'Phase status broadcasted to sites successfully.')
def get_phase_status(event, context):
try:
site = event['pathParameters']['site']
except Exception as e:
return _get_response(400, 'Missing path parameter site')
max_age_seconds = event.get('queryStringParameters', {}).get('max_age_seconds', 3600) # default max age is 1 hour
timestamp_cutoff = int(time.time() - int(max_age_seconds))
phase_status = phase_status_table.query(
Limit=3,
ScanIndexForward=False, # sort by most recent first
KeyConditionExpression=Key('site').eq(site) & Key('timestamp').gt(timestamp_cutoff)
)
return _get_response(200, phase_status['Items'])
if __name__=="__main__":
phase_status_table = dynamodb.Table('phase-status-dev')
payload = json.dumps({
"site": "tst",
"message": "a phase message 2",
})
#post_phase_status({"body": payload}, {})
event = {
"pathParameters": {
"site": "tst"
},
"queryStringParameters": {
"max_age_seconds": "3600"
}
}
context = {}
ps = get_phase_status(event, context)
print(ps['body'])
| LCOGT/photonranch-status | phase_status.py | phase_status.py | py | 2,369 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "boto3.resource",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "helpers._get_body",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "helpers._get_response",
... |
18068483494 | # -*- coding:utf8 -*-
import tweepy
import os
import sys
import json
import time
import urllib2
import requests
"""
ref. http://kslee7746.tistory.com/entry/python-tweepy-%EC%82%AC%EC%9A%A9%ED%95%9C-%ED%8A%B8%EC%9C%84%ED%84%B0-%ED%81%AC%EB%A1%A4%EB%A7%81crawling
ref. https://proinlab.com/archives/1562
ref. http://kyeoneee.tistory.com/9
"""
reload(sys)
sys.setdefaultencoding('utf-8')
hotTopics = json.loads(urllib2.urlopen("http://polatics.news/get_hot_topics").read())
track_list = [topic["topic"] for topic in hotTopics]
# api 인증 요청
consumer_token = ""
consumer_secret = ""
auth = tweepy.OAuthHandler(consumer_token, consumer_secret)
# access 토큰 요청
access_token = ""
access_token_secret = ""
auth.set_access_token(access_token, access_token_secret)
f = open("twitter_%s.txt"%(time.strftime("%H-%d-%m-%Y")), "a");
print (time.strftime("%H:%d:%m:%Y"))
# api 생성
api = tweepy.API(auth)
data_len = 0
buf = []
class StreamListener(tweepy.StreamListener):
def on_data(self, data):
global data_len
global track_list
global buf
if data_len == 1000:
json_results = json.dumps(buf)
post_data= {'twitter': json_results}
res = requests.post("http://polatics.news/add_twitter", data=post_data)
buf = []
data_len = 0
print("전송 " + res.text)
return
json_data = json.loads(data)
#print ("=======================================================")
#print ("핫토픽: " + ",".join([ht for ht in track_list if ht in json.loads(data)["text"]]))
#print (json.loads(data)["text"])
#print ("유저아이디: " + json.loads(data)["user"]["name"])
ret = {}
ret["created_at"] = json_data["created_at"]
ret["text"] = json_data["text"]
ret["name"] = json_data["user"]["name"]
ret["screen_name"] = json_data["user"]["screen_name"]
ret["topic"] = [ht for ht in track_list if ht in json.loads(data)["text"]]
if len(ret["topic"]) > 0:
buf.append(ret)
f.write(data.encode("utf-8"))
data_len = data_len + 1
def on_error(self, status_code):
if status_code == 420:
return False
location = "%s,%s,%s" % ("35.95", "128.25", "1000km") # 대한민국 중심 좌표, 반지름
if __name__ == "__main__":
strmr = StreamListener()
strmr = tweepy.Stream(auth=api.auth, listener=strmr)
strmr.filter(track=track_list)
"""
keyword = "박근혜 OR 문재인" # 검색어
wfile = open(os.getcwd()+"/twitter.txt", mode='w')
cursor = tweepy.Cursor(api.search,
q=keyword,
since='2017-10-01', # 2015-01-01 이후에 작성된 트윗들로 가져옴
count=100, # 페이지당 반환할 트위터 수 최대 100
geocode=location,
include_entities=True)
for i, tweet in enumerate(cursor.items()):
print("{}: {}".format(i, tweet.text))
wfile.write(tweet.text + '\n')
wfile.close()
"""
| songjein/polatics | twitter_client.py | twitter_client.py | py | 2,878 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "sys.setdefaultencoding",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "urllib2.urlopen",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "tweepy.OAuthHandler... |
29398421861 | import setuptools
from setuptools import find_namespace_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="JarvisAI",
version="3.9",
author="Dipesh",
author_email="dipeshpal17@gmail.com",
description="JarvisAI is python library to build your own AI virtual assistant with natural language processing.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Dipeshpal/Jarvis_AI",
include_package_data=True,
packages=find_namespace_packages(include=['JarvisAI.*', 'JarvisAI']),
install_requires=['numpy', 'gtts==2.2.1', 'playsound==1.2.2',
'SpeechRecognition==3.8.1', 'pipwin==0.5.0', 'lxml==4.6.1', 'pyjokes',
'beautifulsoup4==4.9.3', 'wikipedia==1.4.0', 'auto_face_recognition', 'transformers==4.3.2',
'lazyme==0.0.23', 'librosa==0.8.0', "torch==1.7.1", "requests", "opencv-contrib-python==4.5.2.52",
"opencv-python==4.5.2.52", "cvzone==1.1.1", "pyttsx3", "googlesearch-python", "spacy",
"mediapipe==0.8.8", "googlesearch-python==1.0.1", "youtube-search-python==1.5.3", "shutup==0.1.3"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
project_urls={
'Documentation': 'https://github.com/Dipeshpal/Jarvis_AI',
'Donate': 'https://www.buymeacoffee.com/dipeshpal',
'Say Thanks!': 'https://youtube.com/techportofficial',
'Source': 'https://github.com/Dipeshpal/Jarvis_AI',
},
)
| MrVanHendrix/Beth.Ai | BETH_Ai/BETH_Ai/setup.py | setup.py | py | 1,722 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "setuptools.find_namespace_packages",
"line_number": 17,
"usage_type": "call"
}
] |
31379873011 | __author__ = 'Vincent'
from scrapy.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors import LinkExtractor
from scrapy.selector import HtmlXPathSelector
from mediascrap.items import NewsItem
import datetime
from pymongo import MongoClient
class ChokomagSpider(CrawlSpider):
"""
A spider crawls domains (in accordance with some rules we will define
to collect all the pages that we wish to extract our LemondeItems instances
from. Most of this crawling logic is provided by Scrapy in the CrawlSpider class, so we
can extend this class when writing our first spider.
"""
name = "chokomag"
allowed_domains =["chokomag.com"]
start_urls =[
"http://chokomag.com/beaute/cheveux/",
"http://chokomag.com/tag/make-up/",
"http://chokomag.com/beaute/box/",
"http://chokomag.com/beaute/soins/",
"http://chokomag.com/beaute/parfums/",
"http://chokomag.com/mode/",
"http://chokomag.com/tag/concours/",
"http://chokomag.com/non-classe/"
]
rules = [
#site which should be saved
Rule(
LinkExtractor(
allow = ['/(\d+|\w+)']),
'parse_page')]
def parse_page(self,response):
hxs = HtmlXPathSelector(response)
body = ''.join(hxs.select('//div[@class="entry-content clearfix"]/p//text()').extract()).strip()
item = NewsItem()
if len(body)> 0 :
item['body'] = body
item['url'] = response.url
item['timeOfScrap'] = datetime.datetime.now()
return item
else :
pass
| fisheatfish/mediascrap | mediascrap/spiders/chokomag.py | chokomag.py | py | 1,620 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.spiders.CrawlSpider",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "scrapy.spiders.Rule",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "scrapy.contrib.linkextractors.LinkExtractor",
"line_number": 38,
"usage_type": "call"
},... |
5618318828 | import cv2
import os
import argparse
def image_folder_to_video(folder_path, output_path):
# Get the list of image filenames
filenames = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.lower().endswith('.jpg')]
filenames.sort() # Sort the filenames
# Get the dimensions of the first image
image = cv2.imread(filenames[0])
height, width, _ = image.shape
# Define the codec and create a video writer object
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(output_path, fourcc, 30.0, (width, height))
# Add each image to the video with 0.2 seconds of duration
for filename in filenames:
image = cv2.imread(filename)
for i in range(15):
out.write(image)
# Release the video writer object
out.release()
def main():
parser = argparse.ArgumentParser(description='Convert a folder of images to a video.')
parser.add_argument('input_folder', metavar='input_folder', type=str,
help='The path to the folder containing the input images.')
parser.add_argument('output_video', metavar='output_video', type=str,
help='The path to the output video file.')
args = parser.parse_args()
input_folder = args.input_folder
output_video = args.output_video
image_folder_to_video(input_folder, output_video)
if __name__ == "__main__":
main()
| danfinlay/face-lapse | src/picstitch.py | picstitch.py | py | 1,418 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 11,... |
8444325228 | # class s_(object):
import functools
import numbers
import operator
import numpy
import cupy
from cupy._creation import from_data
from cupy._manipulation import join
class AxisConcatenator(object):
"""Translates slice objects to concatenation along an axis.
For detailed documentation on usage, see :func:`cupy.r_`.
This implementation is partially borrowed from NumPy's one.
"""
def _output_obj(self, obj, ndim, ndmin, trans1d):
k2 = ndmin - ndim
if trans1d < 0:
trans1d += k2 + 1
defaxes = list(range(ndmin))
k1 = trans1d
axes = defaxes[:k1] + defaxes[k2:] + \
defaxes[k1:k2]
return obj.transpose(axes)
def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1):
self.axis = axis
self.trans1d = trans1d
self.matrix = matrix
self.ndmin = ndmin
def __getitem__(self, key):
trans1d = self.trans1d
ndmin = self.ndmin
objs = []
arrays = []
scalars = []
if isinstance(key, str):
raise NotImplementedError
if not isinstance(key, tuple):
key = (key,)
for i, k in enumerate(key):
if isinstance(k, slice):
raise NotImplementedError
elif isinstance(k, str):
if i != 0:
raise ValueError(
'special directives must be the first entry.')
raise NotImplementedError
elif type(k) in numpy.ScalarType:
newobj = from_data.array(k, ndmin=ndmin)
scalars.append(i)
else:
newobj = from_data.array(k, copy=False, ndmin=ndmin)
if ndmin > 1:
ndim = from_data.array(k, copy=False).ndim
if trans1d != -1 and ndim < ndmin:
newobj = self._output_obj(newobj, ndim, ndmin, trans1d)
arrays.append(newobj)
objs.append(newobj)
final_dtype = numpy.result_type(*arrays, *[key[k] for k in scalars])
if final_dtype is not None:
for k in scalars:
objs[k] = objs[k].astype(final_dtype)
return join.concatenate(tuple(objs), axis=self.axis)
def __len__(self):
return 0
class CClass(AxisConcatenator):
def __init__(self):
super(CClass, self).__init__(-1, ndmin=2, trans1d=0)
c_ = CClass()
"""Translates slice objects to concatenation along the second axis.
This is a CuPy object that corresponds to :obj:`cupy.r_`, which is
useful because of its common occurrence. In particular, arrays will be
stacked along their last axis after being upgraded to at least 2-D with
1's post-pended to the shape (column vectors made out of 1-D arrays).
For detailed documentation, see :obj:`r_`.
This implementation is partially borrowed from NumPy's one.
Returns:
cupy.ndarray: Joined array.
.. seealso:: :obj:`numpy.c_`
Examples
--------
>>> a = cupy.array([[1, 2, 3]], dtype=np.int32)
>>> b = cupy.array([[4, 5, 6]], dtype=np.int32)
>>> cupy.c_[a, 0, 0, b]
array([[1, 2, 3, 0, 0, 4, 5, 6]], dtype=int32)
"""
class RClass(AxisConcatenator):
def __init__(self):
super(RClass, self).__init__()
r_ = RClass()
"""Translates slice objects to concatenation along the first axis.
This is a simple way to build up arrays quickly.
If the index expression contains comma separated arrays, then stack
them along their first axis.
This object can build up from normal CuPy arrays.
Therefore, the other objects (e.g. writing strings like '2,3,4',
or using imaginary numbers like [1,2,3j],
or using string integers like '-1') are not implemented yet
compared with NumPy.
This implementation is partially borrowed from NumPy's one.
Returns:
cupy.ndarray: Joined array.
.. seealso:: :obj:`numpy.r_`
Examples
--------
>>> a = cupy.array([1, 2, 3], dtype=np.int32)
>>> b = cupy.array([4, 5, 6], dtype=np.int32)
>>> cupy.r_[a, 0, 0, b]
array([1, 2, 3, 0, 0, 4, 5, 6], dtype=int32)
"""
def indices(dimensions, dtype=int):
"""Returns an array representing the indices of a grid.
Computes an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Args:
dimensions: The shape of the grid.
dtype: Data type specifier. It is int by default.
Returns:
ndarray:
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
Examples
--------
>>> grid = cupy.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 1, 2],
[0, 1, 2]])
.. seealso:: :func:`numpy.indices`
"""
dimensions = tuple(dimensions)
N = len(dimensions)
shape = (1,) * N
res = cupy.empty((N,) + dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
res[i] = cupy.arange(dim, dtype=dtype).reshape(
shape[:i] + (dim,) + shape[i + 1:]
)
return res
def ix_(*args):
"""Construct an open mesh from multiple sequences.
This function takes N 1-D sequences and returns N outputs with N
dimensions each, such that the shape is 1 in all but one dimension
and the dimension with the non-unit shape value cycles through all
N dimensions.
Using `ix_` one can quickly construct index arrays that will index
the cross product. ``a[cupy.ix_([1,3],[2,5])]`` returns the array
``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.
Args:
*args: 1-D sequences
Returns:
tuple of ndarrays:
N arrays with N dimensions each, with N the number of input sequences.
Together these arrays form an open mesh.
Examples
--------
>>> a = cupy.arange(10).reshape(2, 5)
>>> a
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> ixgrid = cupy.ix_([0,1], [2,4])
>>> ixgrid
(array([[0],
[1]]), array([[2, 4]]))
.. warning::
This function may synchronize the device.
.. seealso:: :func:`numpy.ix_`
"""
# TODO(niboshi): Avoid nonzero which may synchronize the device.
out = []
nd = len(args)
for k, new in enumerate(args):
new = from_data.asarray(new)
if new.ndim != 1:
raise ValueError('Cross index must be 1 dimensional')
if new.size == 0:
# Explicitly type empty arrays to avoid float default
new = new.astype(numpy.intp)
if cupy.issubdtype(new.dtype, cupy.bool_):
new, = new.nonzero() # may synchronize
new = new.reshape((1,) * k + (new.size,) + (1,) * (nd - k - 1))
out.append(new)
return tuple(out)
def ravel_multi_index(multi_index, dims, mode='wrap', order='C'):
"""
Converts a tuple of index arrays into an array of flat indices, applying
boundary modes to the multi-index.
Args:
multi_index (tuple of cupy.ndarray) : A tuple of integer arrays, one
array for each dimension.
dims (tuple of ints): The shape of array into which the indices from
``multi_index`` apply.
mode ('raise', 'wrap' or 'clip'), optional: Specifies how out-of-bounds
indices are handled. Can specify either one mode or a tuple of
modes, one mode per index:
- *'raise'* -- raise an error
- *'wrap'* -- wrap around (default)
- *'clip'* -- clip to the range
In 'clip' mode, a negative index which would normally wrap will
clip to 0 instead.
order ('C' or 'F'), optional: Determines whether the multi-index should
be viewed as indexing in row-major (C-style) or column-major
(Fortran-style) order.
Returns:
raveled_indices (cupy.ndarray): An array of indices into the flattened
version of an array of dimensions ``dims``.
.. warning::
This function may synchronize the device when ``mode == 'raise'``.
Notes
-----
Note that the default `mode` (``'wrap'``) is different than in NumPy. This
is done to avoid potential device synchronization.
Examples
--------
>>> cupy.ravel_multi_index(cupy.asarray([[3,6,6],[4,5,1]]), (7,6))
array([22, 41, 37])
>>> cupy.ravel_multi_index(cupy.asarray([[3,6,6],[4,5,1]]), (7,6),
... order='F')
array([31, 41, 13])
>>> cupy.ravel_multi_index(cupy.asarray([[3,6,6],[4,5,1]]), (4,6),
... mode='clip')
array([22, 23, 19])
>>> cupy.ravel_multi_index(cupy.asarray([[3,6,6],[4,5,1]]), (4,4),
... mode=('clip', 'wrap'))
array([12, 13, 13])
>>> cupy.ravel_multi_index(cupy.asarray((3,1,4,1)), (6,7,8,9))
array(1621)
.. seealso:: :func:`numpy.ravel_multi_index`, :func:`unravel_index`
"""
ndim = len(dims)
if len(multi_index) != ndim:
raise ValueError(
"parameter multi_index must be a sequence of "
"length {}".format(ndim))
for d in dims:
if not isinstance(d, numbers.Integral):
raise TypeError(
"{} object cannot be interpreted as an integer".format(
type(d)))
if isinstance(mode, str):
mode = (mode, ) * ndim
if functools.reduce(operator.mul, dims) > cupy.iinfo(cupy.int64).max:
raise ValueError("invalid dims: array size defined by dims is larger "
"than the maximum possible size")
s = 1
ravel_strides = [1] * ndim
order = 'C' if order is None else order.upper()
if order == 'C':
for i in range(ndim - 2, -1, -1):
s = s * dims[i + 1]
ravel_strides[i] = s
elif order == 'F':
for i in range(1, ndim):
s = s * dims[i - 1]
ravel_strides[i] = s
else:
raise ValueError('order not understood')
multi_index = cupy.broadcast_arrays(*multi_index)
raveled_indices = cupy.zeros(multi_index[0].shape, dtype=cupy.int64)
for d, stride, idx, _mode in zip(dims, ravel_strides, multi_index, mode):
if not isinstance(idx, cupy.ndarray):
raise TypeError("elements of multi_index must be cupy arrays")
if not cupy.can_cast(idx, cupy.int64, 'same_kind'):
raise TypeError(
'multi_index entries could not be cast from dtype(\'{}\') to '
'dtype(\'{}\') according to the rule \'same_kind\''.format(
idx.dtype, cupy.int64().dtype))
idx = idx.astype(cupy.int64, copy=False)
if _mode == "raise":
if cupy.any(cupy.logical_or(idx >= d, idx < 0)):
raise ValueError("invalid entry in coordinates array")
elif _mode == "clip":
idx = cupy.clip(idx, 0, d - 1)
elif _mode == 'wrap':
idx = idx % d
else:
raise ValueError('Unrecognized mode: {}'.format(_mode))
raveled_indices += stride * idx
return raveled_indices
def unravel_index(indices, dims, order='C'):
"""Converts array of flat indices into a tuple of coordinate arrays.
Args:
indices (cupy.ndarray): An integer array whose elements are indices
into the flattened version of an array of dimensions :obj:`dims`.
dims (tuple of ints): The shape of the array to use for unraveling
indices.
order ('C' or 'F'): Determines whether the indices should be viewed as
indexing in row-major (C-style) or column-major (Fortran-style)
order.
Returns:
tuple of ndarrays:
Each array in the tuple has the same shape as the indices array.
Examples
--------
>>> cupy.unravel_index(cupy.array([22, 41, 37]), (7, 6))
(array([3, 6, 6]), array([4, 5, 1]))
>>> cupy.unravel_index(cupy.array([31, 41, 13]), (7, 6), order='F')
(array([3, 6, 6]), array([4, 5, 1]))
.. warning::
This function may synchronize the device.
.. seealso:: :func:`numpy.unravel_index`, :func:`ravel_multi_index`
"""
order = 'C' if order is None else order.upper()
if order == 'C':
dims = reversed(dims)
elif order == 'F':
pass
else:
raise ValueError('order not understood')
if not cupy.can_cast(indices, cupy.int64, 'same_kind'):
raise TypeError(
'Iterator operand 0 dtype could not be cast '
'from dtype(\'{}\') to dtype(\'{}\') '
'according to the rule \'same_kind\''.format(
indices.dtype, cupy.int64().dtype))
if (indices < 0).any(): # synchronize!
raise ValueError('invalid entry in index array')
unraveled_coords = []
for dim in dims:
unraveled_coords.append(indices % dim)
indices = indices // dim
if (indices > 0).any(): # synchronize!
raise ValueError('invalid entry in index array')
if order == 'C':
unraveled_coords = reversed(unraveled_coords)
return tuple(unraveled_coords)
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of
size ``(n, n)`` with a possible offset argument `k`, when called
as ``mask_func(a, k)`` returns a new array with zeros in certain
locations (functions like :func:`~cupy.triu` or :func:`~cupy.tril` do
precisely this). Then this function returns the indices where the non-zero
values would be located.
Args:
n (int): The returned indices will be valid to access arrays
of shape (n, n).
mask_func (callable): A function whose call signature is
similar to that of :func:`~cupy.triu`, :func:`~tril`. That is,
``mask_func(x, k)`` returns a boolean array, shaped like
`x`. `k` is an optional argument to the function.
k (scalar): An optional argument which is passed through to
`mask_func`. Functions like :func:`~cupy.triu`, :func:`~cupy.tril`
take a second argument that is interpreted as an offset.
Returns:
tuple of arrays: The `n` arrays of indices corresponding to
the locations where ``mask_func(np.ones((n, n)), k)`` is
True.
.. warning::
This function may synchronize the device.
.. seealso:: :func:`numpy.mask_indices`
"""
a = cupy.ones((n, n), dtype=cupy.int8)
return mask_func(a, k).nonzero()
# TODO(okuta): Implement diag_indices
# TODO(okuta): Implement diag_indices_from
def tril_indices(n, k=0, m=None):
"""Returns the indices of the lower triangular matrix.
Here, the first group of elements contains row coordinates
of all indices and the second group of elements
contains column coordinates.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal above which to zero elements. `k = 0`
(the default) is the main diagonal, `k < 0` is
below it and `k > 0` is above.
m : int, optional
The column dimension of the arrays for which the
returned arrays will be valid. By default, `m = n`.
Returns
-------
y : tuple of ndarrays
The indices for the triangle. The returned tuple
contains two arrays, each with the indices along
one dimension of the array.
See Also
--------
numpy.tril_indices
"""
tri_ = cupy.tri(n, m, k=k, dtype=bool)
return tuple(cupy.broadcast_to(inds, tri_.shape)[tri_]
for inds in cupy.indices(tri_.shape, dtype=int))
def tril_indices_from(arr, k=0):
"""Returns the indices for the lower-triangle of arr.
Parameters
----------
arr : cupy.ndarray
The indices are valid for square arrays
whose dimensions are the same as arr.
k : int, optional
Diagonal offset.
See Also
--------
numpy.tril_indices_from
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""Returns the indices of the upper triangular matrix.
Here, the first group of elements contains row coordinates
of all indices and the second group of elements
contains column coordinates.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Refers to the diagonal offset. By default, `k = 0` i.e.
the main dialogal. The positive value of `k`
denotes the diagonals above the main diagonal, while the negative
value includes the diagonals below the main diagonal.
m : int, optional
The column dimension of the arrays for which the
returned arrays will be valid. By default, `m = n`.
Returns
-------
y : tuple of ndarrays
The indices for the triangle. The returned tuple
contains two arrays, each with the indices along
one dimension of the array.
See Also
--------
numpy.triu_indices
"""
tri_ = ~cupy.tri(n, m, k=k - 1, dtype=bool)
return tuple(cupy.broadcast_to(inds, tri_.shape)[tri_]
for inds in cupy.indices(tri_.shape, dtype=int))
def triu_indices_from(arr, k=0):
"""Returns indices for the upper-triangle of arr.
Parameters
----------
arr : cupy.ndarray
The indices are valid for square arrays.
k : int, optional
Diagonal offset (see 'triu_indices` for details).
Returns
-------
triu_indices_from : tuple of ndarrays
Indices for the upper-triangle of `arr`.
See Also
--------
numpy.triu_indices_from
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| cupy/cupy | cupy/_indexing/generate.py | generate.py | py | 18,125 | python | en | code | 7,341 | github-code | 36 | [
{
"api_name": "numpy.ScalarType",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "cupy._creation.from_data.array",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "cupy._creation.from_data",
"line_number": 58,
"usage_type": "name"
},
{
"api... |
30394243625 | """
FILES to YAML
"""
import argparse
import json
import json.decoder
import os
from pathlib import Path
import yaml
import yaml.scanner
def walk_thru(startdir: str) -> list:
p = Path(startdir)
a = [str(el).replace('\\', '/').replace(startdir, '') for el in p.rglob('*')]
return a
def read_file_content(filepath: str):
with open(filepath) as file:
content = file.read()
try:
template = yaml.safe_load(content)
return template
except yaml.scanner.ScannerError:
pass
try:
template = json.loads(content)
return template
except json.decoder.JSONDecodeError:
pass
return content
def dir_tree_dict(startdir: str) -> dict:
d = {}
for item in dirs_and_files:
p = d
for x in item.split('/'):
if os.path.isdir(startdir + item):
p = p.setdefault(x, {})
else:
content = read_file_content(startdir + item)
p = p.setdefault(x, content)
return d
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('startdir', type=str)
args = parser.parse_args()
startdir = args.startdir + '/'
dirs_and_files = walk_thru(startdir)
res = dir_tree_dict(startdir)
res = {startdir.split('/')[-2]: res}
with open('week5/res.yaml', 'w', newline='') as newfile:
yaml.dump(res, newfile, default_flow_style=False)
| MaksimPashkovsky/python-labs | week5/task1.py | task1.py | py | 1,452 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "yaml.safe_load",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "yaml.scanner",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_... |
38833898049 | from django.urls import path
from rest_framework.routers import DefaultRouter
from src.dates_api.views import DateViewSet, PopularMonthListView
app_name = "dates_api"
router = DefaultRouter()
router.register("dates", DateViewSet, basename="dates")
urlpatterns = [
path("popular/", PopularMonthListView.as_view(), name="popular_month"),
]
urlpatterns += router.urls
| danielkosytorz/dates-DRF-app | backend/src/dates_api/urls.py | urls.py | py | 373 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "src.dates_api.views.DateViewSet",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
... |
71673331943 | import pandas as pd
import datetime as dt
from kucoincli.client import Client
def test_lending_liquidity(quote='USDT'):
"""Obtain max point-in-time liquidity for lending markets in USDT terms"""
client = Client()
l = client.symbols(marginable=True).baseCurrency
liq = {}
for curr in l:
try:
df = client.lending_rate(curr)
stats = client.get_stats(curr + '-' + quote)
max_borrow = (((stats.buy + stats.sell) / 2) * df['size'].sum())
liq[curr] = max_borrow
except:
pass
return pd.Series(liq).sort_values(ascending=False)
def test_trading_liquidity(lookback=90, interval='1day'):
"""Calculate mean turnover for marginable currencies in `interval` granularity over `lookback` days"""
client = Client()
l = client.symbols(marginable=True).index
liq = {}
start = dt.datetime.now() - dt.timedelta(days=lookback)
for curr in l:
mean_vol = client.ohlcv(
tickers=curr,
interval=interval,
start=start
).turnover.mean()
liq[curr] = mean_vol
return pd.Series(liq).sort_values(ascending=False)
def get_csv_data(path):
"""Reads in CSV file exported from SQL db"""
df = pd.read_csv(path, index_col="time")
df.index = pd.to_datetime(df.index)
df = df.astype(float)
return df.sort_index(ascending=True)
| jaythequant/VBToptimizers | research/utils.py | utils.py | py | 1,411 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "kucoincli.client.Client",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "kucoincli.client.Client",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "datetime... |
17952993717 | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 14 13:57:29 2019
@author: Witold Klimczyk
# ICEM
foil = Airfoil(filein = r'E:\propeller\mh_airofils\mh117/mh117.txt', t = 0.001, chord = 0.2)
foil.runFluent(15,.2,1)#
# XFOIL
foil2 = Airfoil(ftype = 'XFOIL', filein = r'E:\AIRFOIL\airfoils/naca0012.txt', t = 0.001, chord = 0.2)
# x,y
X = pd.read_csv(f'http://airfoiltools.com/airfoil/seligdatfile?airfoil={foilname}-il')
X.to_csv(r'E:\AIRFOIL\temp.csv', header = False, index = False)
X = np.loadtxt(r'E:\AIRFOIL\temp.csv')
foil = Airfoil( 'XFOIL', r'E:\AIRFOIL\temp.csv')
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('text', usetex=True)
import subprocess
import os
from subprocess import run, PIPE
import gc
import pandas as pd
from urllib.error import HTTPError
from fluentScheme import generateScheme
from icemScheme import generateICEMScheme
class Airfoil():
def __init__(self, ftype = 'ICEM', filein = None, x = None, y = None, T_req = None, camber = None,
chord = None, beta = None, z = 0, fileoutICEM = None, t = 0, dx = 0, dy = 0, split = False, origin = 0,
camb = False, r_LE = None, verbose = False, workingdir = r'E:\AIRFOIL', xfoildir = None):
"""
inputs:
- ftype/name: 'ICEM' / 'XFOIL' / 'XY', specifies type of airofil input data or its name to download from airfoiltools
- filein: '.txt' file with points coords (can be non-txt)
- chord: dimensionless chord
- beta: originally used for propeller pitch, for wing stands for twist
- z: specifies third coordinae used for 3d wing stacking
- fileoutICEM: full path and name for ICEM output file format, no extension (only name)
- TEcut: specifies location of vertical cut
- t: float: te thickness
- T_req: maximum thickness to match required absolute thickness
- origin: float: used to keep particular airfoil poitn in center, e.g. origin = .25 keeps quarter chord in center
- camb: True/False: if we want to scael camber with thickness
- workingdir: specify if other than current
- xfoildir: contains xfoil.exe and uses this directory to save .txt files, if not given assumes it is in folder XFOIL under the same directory as current working folder
attributes:
- x: x-coords
- y: y-coords
- z: z-coords
"""
gc.collect()
self.camber = camber
self.chord = chord
self.z = z
self.filein = filein
self.workingdir = workingdir if workingdir != None else os.getcwd().strip('\\python')
print('workingdir {}'.format(self.workingdir))
self.xfoildir = self.workingdir + '/XFOIL/'
self.filebuffer = self.xfoildir + '/xfoilairfoil.txt'
self.filecp = self.xfoildir + '/xfoilairfoilcp.txt'
self.fileCptex = self.xfoildir + '/xfoilairfoilcptex.txt'
self.camber_t = self.xfoildir + '/camber_t.txt'
self.xfoilpath = self.xfoildir + '/xfoil.exe'
# directories to check before analysis
self.fileFig = self.workingdir + '/saved_plots/airfoil'
self.meshin = self.workingdir + '/mesh/fluent.msh'
self.meshDir = self.workingdir + '/mesh/'
self.fileoutICEM = self.workingdir+'/mesh/foilICEM' if fileoutICEM is None else fileoutICEM
self.fluentdir = self.workingdir + '/fluent/'
self.ftype = ftype
self.verbose = verbose
self.camber = None
self.thickness = None
self.split = False
self.t = t
if ftype == 'ICEM':
self.readICEM()
elif ftype == 'XFOIL':
self.readXFOIL()
self.saveXFOIL()
elif ftype == 'XY':
self.x = x
self.y = y
else:
try:
X = pd.read_csv(f'http://airfoiltools.com/airfoil/seligdatfile?airfoil={ftype}-il')
X.to_csv(r'E:\AIRFOIL\temp.csv', header = False, index = False)
X = np.loadtxt(r'E:\AIRFOIL\temp.csv')
self.x = X[:,0]
self.y = X[:,1]
self.z = self.z
except HTTPError:
print('error reading airofil from web')
return None
# chord scaling
if chord is None:
self.chord = np.max(self.x) - np.min(self.x)
if self.verbose:
print('evaluated chord is {:.2f}'.format(self.chord))
else:
self.chord = np.max(self.x) - np.min(self.x)
self.scale_XFOIL(chord/np.max(self.x))
self.saveXFOIL()
if self.verbose:
print('scaled airfoil to desired chord {:.3f}'.format(self.chord))
self.x1 = None
self.x2 = None
self.y1 = None
self.y2 = None
self.z1 = z
self.z2 = z
# imposing required thickness
if T_req is not None:
self.thicken(T_req, camb)
# cut TE
if t > 0:
self.cutTE_XFOIL(t, r = .5)
if r_LE is not None:
r_LE_current = self.LEradius()[0]
if r_LE > r_LE_current:
print('modifying LE radius')
factor = r_LE / r_LE_current
self.modify_XFOIL(1,1,factor)
print('LE factor = {:.1f}'.format(factor))
# twisting airfoil to required beta
if beta is not None:
self.rotate_XFOIL(beta, origin)
# translating airofil to match required origin
self.translate_XFOIL(dx - origin * self.chord, dy)
# setting split after all airofil modifications
self.split = split
if self.split:
self.splitCurve()
def readICEM(self):
X = np.loadtxt(self.filein, delimiter = '\t', skiprows = 1)
self.x = X[:,0]
self.y = X[:,1]
self.z = self.z
def readXFOIL(self, file = None):
if file is None:
X = np.loadtxt(self.filein, skiprows = 1)
else:
X = np.loadtxt(file, skiprows = 1)
self.x = X[:,0]
self.y = X[:,1]
self.z = self.z
self.filein = self.filebuffer
def saveXFOIL(self):
""" saves airfoil coords to .txt file with specified path
"""
# close trailing edge
# save coords to file
if not self.split:
with open(self.filebuffer, "w") as text_file:
print("airfoil", file = text_file)
for i in range(len(self.x)):
print(" {} {}".format(self.x[i], self.y[i]), file=text_file)
else:
with open(self.filebuffer, "w") as text_file:
print("airfoil", file = text_file)
for i in range(len(self.x2)-1,0,-1):
print(" {} {}".format(self.x2[i], self.y2[i]), file=text_file)
for i in range(len(self.x1)):
print(" {} {}".format(self.x1[i], self.y1[i]), file=text_file)
###
### =================== GEOMETRY SECTION ==========================
###
def cutTE_XFOIL(self, t = .005, r = 0.5):
""" modifies airfoil using xfoil to maintain camber
t: thickness
r: blending radius
"""
self.saveXFOIL()
airfoilIN = self.filebuffer
airfoilOUT = self.filebuffer
command = 'load ' + airfoilIN + '\npane\ngdes\ntgap '+ '{} {}'.format(t,r) + '\n\npane\n\nsave '+airfoilOUT+'\ny\n\nquit\n'
run([self.xfoilpath], stdout=PIPE, input=command, encoding='ascii', shell = False)
if self.verbose:
print('succesfully modified TE using xfoil')
self.readXFOIL(airfoilOUT)
def modify_XFOIL(self, thicken = 1, camber = 1, LE_radius=1):
""" modifies airfoil using xfoil to scale"
thickness and camber distribution
values below 1 decrease, above 1 increase scale (1 is no change)
"""
self.saveXFOIL()
airfoilIN = self.filebuffer
airfoilOUT = self.filebuffer
command = 'load ' + airfoilIN + '\npane\ngdes\ntfac '+ '{} {}'.format(thicken, camber) +'\nlera {} {}'.format(LE_radius, .2)+ '\n\npane\n\nsave '+airfoilOUT+'\ny\n\nquit\n'
p = run([self.xfoilpath], stdout=PIPE, input=command, encoding='ascii', shell = False)
if self.verbose:
print('modified thickness scaled by {}'.format(thicken))
self.readXFOIL(airfoilOUT)
def thicken(self, req_T, camb = False):
""" modifies thickness to required value of maximum thickness
can also modify camber of airfoil
"""
self.findCamberThickness()
factor = req_T/(self.t_max*self.chord)
print(f'{factor}')
if camb==True:
camb = factor
self.modify_XFOIL(thicken = factor, camber = camb)
else:
camb = 1
self.modify_XFOIL(thicken = factor, camber = camb)
if self.verbose:
print('modified thickness to desired value, i.e. {:.3f}, by a factor of {:.2f}'.format(req_T, factor))
def scale_XFOIL(self, factor = 1):
""" scales airfoil using xfoil
"""
print('chord before modification: {:.3f}'.format(self.chord))
self.saveXFOIL()
airfoilIN = self.filebuffer
airfoilOUT = self.filebuffer
command = 'load ' + airfoilIN + '\npane\ngdes\nscal '+ '{}'.format(factor) + '\n\npane\n\nsave '+airfoilOUT+'\ny\n\nquit\n'
p = run([self.xfoilpath], stdout=PIPE, input=command, encoding='ascii', shell = False)
if self.verbose:
print('modified chord by factor {}'.format(factor))
self.readXFOIL(airfoilOUT)
self.chord *= factor
print('chord after modification: {:.3f}'.format(self.chord))
def translate_XFOIL(self, dx = 0, dy = 0):
""" translates airfoil by specified dx and dy
"""
self.saveXFOIL()
airfoilIN = self.filebuffer
airfoilOUT = self.filebuffer
command = 'load ' + airfoilIN + '\npane\ngdes\ntran '+ '{} {}'.format(dx, dy) + '\n\npane\n\nsave '+airfoilOUT+'\ny\n\nquit\n'
p = run([self.xfoilpath], stdout=PIPE, input=command, encoding='ascii', shell = False)
if self.verbose:
print('airfoil translated by {:.3f} in x and {:.3f} in y'.format(dx, dy))
self.readXFOIL(airfoilOUT)
def rotate_XFOIL(self, angle = 0, origin = 0):
""" rotates airfoil using xfoil by specified angle in degrees, around (0,0), positive angle moves TE down
"""
if origin is not 0:
self.translate_XFOIL(dx = -origin*self.chord)
self.saveXFOIL()
airfoilIN = self.filebuffer
airfoilOUT = self.filebuffer
command = 'load ' + airfoilIN + '\npane\ngdes\nadeg '+ '{}'.format(angle) + '\n\npane\n\nsave '+airfoilOUT+'\ny\n\nquit\n'
p = run([self.xfoilpath], stdout=PIPE, input=command, encoding='ascii', shell = False)
if self.verbose:
print('airfoil rotated by {:.2f}'.format(angle))
self.readXFOIL(airfoilOUT)
if origin is not 0:
self.translate_XFOIL(dx = origin*self.chord )
def findCamberThickness(self, plot = False, tex = False, name = ''):
""" finds camber and thickness distributions usign xfoil """
self.saveXFOIL()
airfoilIN = self.filebuffer
airfoilOUT = self.filebuffer
command = 'load ' + airfoilIN + '\npane\ngdes\ntcpl\ncamb\nwrtc\n{}\n\n\nquit\n'.format(self.camber_t)
p = run([self.xfoilpath], stdout=PIPE, input=command, encoding='ascii', shell = False)
if p.returncode ==2:
if self.verbose:
print('found camber and thickness distributions')
X = np.loadtxt(self.camber_t, skiprows = 1)
self.camber = X[:,:2]
self.thickness = X[:,2:]
self.readXFOIL(airfoilOUT)
self.t_max = 2* np.max(self.thickness[:,1])
if plot:
plt.figure(figsize = (6,2),dpi = 200)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.plot(self.camber[:,0], self.camber[:,1], 'k-',linewidth = 1.2, label = 'camber')
plt.plot(self.thickness[:,0], self.thickness[:,1], 'k--',linewidth = 1.2, label = 'thickness')
plt.plot(self.thickness[:,0], -self.thickness[:,1], 'k--',linewidth = 1.2)
plt.xlabel(r'$x/c$')
plt.ylabel(r'$y/c$',fontsize=12)
plt.title(r"{}".format('camber and thickness distributions'), fontsize=12)
#plt.subplots_adjust(top=0.8)
plt.axis('equal')
plt.legend()
plt.tight_layout()
plt.grid('major', linewidth = .2)
plt.savefig(self.fileFig+'ct', dpi = 1000)
plt.show()
if tex:
camberdir = self.workingdir + r'\wing3d\tex-plots\{}camber.txt'.format(name)
thicknessdir = self.workingdir + r'\wing3d\tex-plots\{}thickness.txt'.format(name)
np.savetxt(camberdir, self.camber)
np.savetxt(thicknessdir, self.thickness)
def t_x(self, x=None):
""" finds thickness at specified x-position, x is x/c (i.e. between 0-1)
self.t_x(0.5) returns thickness at x/c = 0.5
if no argument passed, returns max thickness
"""
self.findCamberThickness()
i = 0
if x is None:
return self.t_max
for i in range(len(self.thickness[:,0])):
if self.thickness[i,0] > x :
return 2*self.thickness[i,1]
if self.verbose:
print('invalid argument')
def LEradius(self, plot = False, dpi = 500, saveFig = False):
""" method to find leading edge radius
buids many circles, each from 3 points from leading edge region
lowest radius circle is chosen as le radius
allows to plot le region to investigate le radius
"""
def findCircle(P1, P2, P3):
import sympy as sym
a, b, r2 = sym.symbols('a, b, r2')
e1 = sym.Eq((P1[0]-a)**2+(P1[1]-b)**2, r2**2)
e2 = sym.Eq((P2[0]-a)**2+(P2[1]-b)**2, r2**2)
e3 = sym.Eq((P3[0]-a)**2+(P3[1]-b)**2, r2**2)
solution = sym.solve([e1, e2, e3], (a, b, r2))
r = float(np.abs(solution[0][2]))
x = float(np.abs(solution[0][0]))
y = float(np.abs(solution[0][1]))
return x,y,r
i = np.where(self.x == min(self.x))[0][0]
# find several circles around LE
r = 1
j = 1
k = 1
while j<5:
while k<5:
x_temp,y_temp,r_temp = findCircle( [self.x[i-j], self.y[i-j]], [self.x[i], self.y[i]], [self.x[i+k], self.y[i+k]] )
if r_temp<r:
r = r_temp
x = x_temp
y = y_temp
k+=1
j+=1
if plot:
an = np.linspace(0, 2*np.pi, 100)
plt.figure(dpi = dpi)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.plot(self.x, self.y, 'ko-', linewidth = 1.4)
plt.plot([x],[y],'ro')
plt.plot(r*np.cos(an)+x, r*np.sin(an)+y, 'r-', linewidth = 1.4)
plt.title(r"{}".format('leading edge radius close up'), fontsize=12)
plt.axis('equal')
plt.ylim(-r, r*3.5)
if saveFig:
plt.savefig(self.fileFig, dpi = 1000)
plt.show()
fig, ax = plt.subplots(dpi = 500)
ax.plot(self.x, self.y, 'ko-', linewidth = 1.4)
ax.plot([x],[y],'ro')
ax.plot(r*np.cos(an)+x, r*np.sin(an)+y, 'r-', linewidth = 1.4)
ax.set_xlim(-r, r*3.5)
ax.set_ylim(-r*2, r*2)
ax.set_title('mh117: R=2')
ax.set_aspect(1.0)
ax.grid(which='major', linewidth = 0.2)
plt.show()
return r, x , y
def saveICEM(self, airfoilfile = None):
""" saves points in icem format, either as a single curve of splits to upper and lower (recommended) """
if self.y[1]>self.y[-1]:
self.x = np.flip(self.x, axis = 0)
self.y = np.flip(self.y, axis = 0)
if airfoilfile is not None:
self.fileoutICEM = airfoilfile
if not self.split:
self.zs = np.ones(len(self.x))*self.z
self.fileoutICEM += '.txt'
with open( self.fileoutICEM, 'w') as f:
f.write('{}\t{}\n'.format(len(self.x), 1))
for i in range(len(self.x)):
f.write('{}\t{}\t{}\n'.format(self.x[i]*1000, self.y[i]*1000, self.zs[i]*1000) )
else:
self.z1 = np.ones(len(self.x1))*self.z
self.z2 = np.ones(len(self.x2))*self.z
with open( self.fileoutICEM + '.0.txt', 'w') as f:
f.write('{}\t{}\n'.format(len(self.x1), 1))
for i in range(len(self.x1)):
f.write('{}\t{}\t{}\n'.format(self.x1[i]*1000, self.z1[i]*1000, self.y1[i]*1000) )
with open( self.fileoutICEM + '.1.txt', 'w') as f:
f.write('{}\t{}\n'.format(len(self.x2), 1))
for i in range(len(self.x2)):
f.write('{}\t{}\t{}\n'.format(self.x2[i]*1000, self.z2[i]*1000, self.y2[i]*1000) )
def saveSW(self, airfoilfile):
""" saves points in sw format, either as a single curve of splits to upper and lower (recommended) """
if not self.split:
self.zs = np.ones(len(self.x))*self.z
airfoilfile += '.txt'
with open( airfoilfile, 'w') as f:
for i in range(len(self.x)):
f.write('{}\t{}\t{}\n'.format(self.x[i]*1000, self.zs[i]*1000, self.y[i]*1000) )
else:
self.z1 = np.ones(len(self.x1))*self.z
self.z2 = np.ones(len(self.x2))*self.z
with open( airfoilfile + '.0.txt', 'w') as f:
for i in range(len(self.x1)):
f.write('{}\t{}\t{}\n'.format(self.x1[i]*1000, self.z1[i]*1000, self.y1[i]*1000) )
with open( airfoilfile + '.1.txt', 'w') as f:
for i in range(len(self.x2)):
f.write('{}\t{}\t{}\n'.format(self.x2[i]*1000, self.z2[i]*1000, self.y2[i]*1000) )
###
### =================== ANALYSIS SECTION ==========================
###
def runXFOIL(self, cl=.2, alfa = None, re=1e6, m =.2, n_crit = 6, iters = 500, cp = False):
self.saveXFOIL()
airfoilIN = self.filebuffer
if alfa is None:
S = cl
s = 'cl'
if self.verbose:
print('running XFOIL for: cl={}'.format(cl))
else:
S = alfa
s = 'a'
if self.verbose:
print('running XFOIL for: aoa={}'.format(alfa))
if not cp:
commands = 'load ' + airfoilIN + '\npane\noper\nvpar\nn {}\n\nvisc {}'.format(n_crit, re) + '\niter '+str(iters)+'\n{} {}'.format(s, S) + '\n\nquit\n'
p = run([self.xfoilpath], stdout=PIPE,
input=commands, encoding='ascii')
else:
commands = 'load ' + airfoilIN + '\npane\noper\nvpar\nn {}\n\nvisc {}'.format(n_crit, re) + '\niter '+str(iters)+'\n{} {} '.format(s, S) + '\ncpwr\n{}\n\nquit\n'.format(self.filecp)
p = run([self.xfoilpath], stdout=PIPE,
input=commands, encoding='ascii')
return 0
try:
alfa = float(p.stdout[-130:-118])
Cl = float(p.stdout[-112:-106])
Cd = float(p.stdout[-78:-69])
Cm = float(p.stdout[-94:-86])
print(alfa,Cl,Cd,Cm)
except ValueError:
if self.verbose:
print('error running xfoil, try slighlty different cl/alpha') # the reason is xfoil may not converge for this particular condition but in general it converges
if alfa is None:
alfa, Cd, Cm, Cl = self.runXFOIL(cl = 1.01*cl, re = re, m = m, n_crit = n_crit, iters = iters)
else:
alfa, Cd, Cm, Cl = self.runXFOIL(alfa = .01+alfa, re = re, m = m, n_crit = n_crit, iters = iters)
#return 1, 1, 1, 1
return alfa, Cd, Cm, Cl
def runPolar(self, a0=-4, a1=8, re=1e6, m=.2, n_crit = 6, plot = False):
alfas = np.zeros(a1-a0)
cds = np.zeros(a1-a0)
cls = np.zeros(a1-a0)
cms = np.zeros(a1-a0)
i=0
for aoa in np.arange(a0,a1,1):
alfas[i], cds[i], cms[i], cls[i] = self.runXFOIL(alfa = aoa, re= re, m = m, n_crit = n_crit)
i+=1
alfas = np.delete(alfas, np.where(cds == 1))
print (alfas)
if plot:
plt.figure()
plt.plot(alfas, cls, 'o-')
plt.xlabel(r'$ \alpha [^\circ]$')
plt.ylabel(r'$C_L$')
plt.show()
plt.figure()
plt.plot(alfas, cds, 'o-')
plt.xlabel(r'$ \alpha[^\circ]$')
plt.ylabel(r'$C_D$')
plt.show()
return alfas, cds, cms, cls
def plotCp(self, outputtex = False, dpi = 200, name = None, saveFig = False, airfoil= True, alfa = None):
X = np.loadtxt(self.filecp, skiprows = 3)
x = X[:,0]
cp = X[:,2]
if outputtex:
np.savetxt(self.fileCptex, X)
if name is None:
if alfa is not None:
name = '$C_p$ distribution at $\alpha = {}$'.format(alfa)
else:
name = '$C_p$ distribution'
plt.figure(figsize = (6,4),dpi = dpi)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.plot(x, -cp, 'k-',linewidth = 1)
if airfoil:
plt.plot(self.x/self.chord, self.y/self.chord*3-np.max(cp), 'k-',linewidth = 1)
plt.xlabel(r'$x/c$',fontsize=12)
plt.ylabel(r'$-C_p$',fontsize=12)
plt.title(r"{}".format(name), fontsize=12)
plt.subplots_adjust(top=0.8)
# plt.axis('equal')
plt.grid(which='major', linewidth = 0.2)
plt.tight_layout()
# plt.grid(True)
if saveFig:
plt.savefig(self.fileFig, dpi = 1000)
plt.show()
def runFluent(self, alfa, mach, chord,
rho = 1.225, T = 300, viscosity = 1.78e-5,
name = 'airfoil', path = None, ID = 0,
mesh = 'o', y1 = 0.01, n_r = 120, n_le = 30, n_top = 120,
model = 'kw-sst', intermittency = False, lowre = False, polar = False,
onlymesh = False, onlyfluent = False, mshin = None, meshunits = 'mm',
tt = 1, farfieldnames = ['farfield'], outletnames = [], interiornames = ['int_fluid']
):
"""
chord used to scale mesh in fluent and use for coefficients
if using auto o-mesh, generate airfoil with unit chord and scale mesh to required value
static method: can be applied for given mesh, without airfoil initialization
"""
if path is None:
path = self.workingdir + r'\fluent'
import time
start = time.time()
# begin with structured mesh generation
#
import subprocess
def subprocess_cmd(command):
process = subprocess.Popen(command,stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
# print(proc_stdout)
return proc_stdout
if not onlyfluent:
self.saveICEM(self.fileoutICEM)
ICEMrun ='"C:\\Program Files\\ANSYS Inc\\v194\\icemcfd\\win64_amd\\bin\\icemcfd" -script'
# pick mesh replay file to generate mesh
if mesh == 'o':
meshrpl = self.meshDir + 'omesh.rpl'
# ICEMscr = r'"E:\propeller\python\wing3d\rpl42.rpl"'
# ICEMscr = r'"E:\propeller\python\wing3d\omesh\omesh.rpl"'
ICEMscr = f'"{meshrpl}"'
elif mesh == 'unstructured':
ICEMscr = r'"C:\Users\wk5521\Documents\ICEM\airfoil replays\mesh_output.rpl"'
generateICEMScheme( y1 = y1, n_r = n_r, n_le = n_le, n_top = n_top, file = meshrpl)
ICEM = ICEMrun + ' ' + ICEMscr
subprocess_cmd(ICEM)
# now having the mesh, run shceme generation, hence fluent
if onlymesh:
print('finished mesh')
return 0
fluentjournal = self.workingdir + '/fluent/journal.txt'
casename = f'foil,{model},{alfa},{mach},{chord},{self.t}'
if polar:
casename = f'foil,{model},{mach},{chord},{self.t}'
if lowre:
casename+=',lowre'
if intermittency:
casename+= 'inter'
meshin = mshin if mshin is not None else self.meshin
generateScheme(filename = fluentjournal,
casename = casename,
chord = chord,
viscosity = viscosity,
T=T,
alfa = alfa,
mach = mach,
meshin = meshin,
meshunits = meshunits,
farfieldnames = farfieldnames,
outletnames = outletnames,
interiornames = interiornames,
path = self.fluentdir,
model = model,
intermittency = intermittency,
lowre = lowre,
polar = polar,
tt =tt
)
FLUENTrun = '"C:\\Program Files\\ANSYS Inc\\v194\\fluent\\ntbin\\win64\\fluent.exe" 2d -t8 -wait -i'
FLUENT = FLUENTrun + ' '+ '"{}"'.format(fluentjournal)
subprocess_cmd(FLUENT)
end = time.time()
showresult = False
if showresult:
result = np.loadtxt('{}/reports/{}.out'.format(self.fluentdir, casename), skiprows = 100)
result = result[-10:]
result = np.mean(result, axis = 0)
lift = result[1]
drag = result[2]
moment = result[3]
duration = end - start
print('mesh size: {}, lift: {:.4f}, drag: {:.6f}, duration: {}'.format(2*(n_le+n_top)*n_r , lift , drag , duration))
return 2*(n_le+n_top)*n_r , lift , drag
def splitCurve(self):
""" splits curve into two curves at leading edge by front-most point """
i_min = np.where(self.x == np.amin(self.x))[0][0]
self.split = True
self.x1 = self.x[:i_min+1]
self.y1 = self.y[:i_min+1]
self.x2 = self.x[i_min:]
self.y2 = self.y[i_min:]
self.z1 = np.ones(len(self.x1))*self.z
self.z2 = np.ones(len(self.x2))*self.z
def qpropData(self, m, re, n = 12, n_crit = 5):
""" this method finds coefficients required to define qprop input file
returns (cl0, clalfa, cd0, clcd0, cd2u, cd2l)
"""
# collect some data for range of angles of attack
alfas = np.zeros(n)
cds = np.zeros(n)
cms = np.zeros(n)
cls = np.zeros(n)
j = 0
for i in range(-6, 6, 1):
self.cutTE_XFOIL(t = 0.005, r = .3)
alfas[j], cds[j], cms[j], cls[j] = self.runXFOIL(alfa = i, re = re, m = m, iters = 1000, n_crit = n_crit)
j+=1
cl0 = cls[6]
clalfa = (cls[-1] - cls[4]) / np.radians(7)
# now begin drag section
from scipy.optimize import minimize, fmin
cd0 = cds.min()
for index in range(len(cds)):
if cds[index] == cd0:
break
clcd0 = cls[index]
def merit(x):
# args = (cd0, )
merit = np.abs( cds[index + 1] + cds[index + 2] - (cd0 + x * (cls[index + 1] - clcd0 )**2 + cd0 + x * ( cls[index + 2] - clcd0 )**2 ) )
return merit
result = fmin(merit, .1)
cd2u = result[0]
def merit2(x, *args):
return np.abs( cds[args[0] - 1] + cds[args[0] - 2] - (cd0 + x * (cls[args[0] - 1] - clcd0 )**2 + cd0 + x * ( cls[args[0] - 2] - clcd0 )**2 ) )
result2 = minimize(merit2, .05, args = (index))
cd2l = result2.x[0]
print('cl0, clalfa, cd0, clcd0 = {:.3f} {:.3f} {:.3f} {:.3f}'.format( cl0, clalfa, cd0, clcd0))
return cl0, clalfa, cd0, clcd0, cd2u, cd2l
def plotAirfoil(self, name=None, saveFig = False, dpi = 200, tex = False , nametex = ''):
if name is None:
name = 'airfoil'
plt.figure(figsize = (6,2),dpi = dpi)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
if self.split:
plt.plot(self.x1/self.chord, self.y1/self.chord, 'k-',linewidth = 1.2)
plt.plot(self.x2/self.chord, self.y2/self.chord, 'k-',linewidth = 1.2)
else:
plt.plot(self.x/self.chord, self.y/self.chord, 'k-',linewidth = 1.2)
plt.xlabel(r'$x/c$',fontsize=12)
plt.ylabel(r'$y/c$',fontsize=12)
plt.title(r"{}".format(name), fontsize=12)
plt.subplots_adjust(top=0.8)
plt.axis('equal')
plt.grid(which='major', linewidth = 0.2)
plt.tight_layout()
if saveFig:
plt.savefig(self.fileFig, dpi = 1000)
plt.show()
if tex:
X = np.append((self.x/self.chord).reshape(-1,1), (self.y/self.chord).reshape(-1,1), axis = 1 )
savedir = self.workingdir + r'\wing3d\tex-plots\{}airfoil.txt'.format(nametex)
np.savetxt(savedir, X)
| Witekklim/propellerDesign | airfoil.py | airfoil.py | py | 30,685 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "matplotlib.rc",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "gc.collect",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number... |
22317892633 | from django.urls import path
from django.conf.urls import include
from django.contrib import admin
from app.accounts.api.v1.views import (
UserCreatView,
UserUpdateView,
GetAuthToken,
AvatarAPIView,
ClubAPIView,
)
app_name = 'accounts'
urlpatterns = [
path('login/', GetAuthToken.as_view(), name='user-login'),
path('create/', UserCreatView.as_view(), name='user-create'),
path('update/<int:pk>/', UserUpdateView.as_view(), name='user-update'),
path('avatar/<int:pk>/', AvatarAPIView.as_view(), name='user-avatar'),
path('club/', ClubAPIView.as_view(), name='club-details')
]
| AndresGomesIglesias/LanTool-Backend | app/accounts/api/v1/urls.py | urls.py | py | 664 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "app.accounts.api.v1.views.GetAuthToken.as_view",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "app.accounts.api.v1.views.GetAuthToken",
"line_number": 17,
"usage_type":... |
43051538216 | import numpy as np
import networkx as nx
import random as pr
import matplotlib.pyplot as pl
import pp
import time
import copy
import sys
import os
import PIL
from Tkinter import *
import tkFileDialog
import tkSimpleDialog
import tkMessageBox
from fomite_ABM import *
from math import *
from PIL import Image
from PIL import ImageTk
global image1
global image2
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root, top, mod, dummy1, dummy2, dummy3, dummy4
global parameters
mod = 0
parameters = {'contactRateHH':0.0, 'contactRateHF':0.0, 'pickupFr':0.0, 'transferFr':0.0, 'faceTouchRate':0.0, 'infProb':0.0, 'washRate':0.0, 'incubationRate':0.0, 'recoveryRate':0.0, 'sheddingRate':0.0, 'shedding':0.0, 'dieOff':0.0, 'deconFreq':None, 'dayLength':0.0}
root = Tk()
top = New_Toplevel_1 (root)
root.protocol('WM_DELETE_WINDOW',lambda: close())
dummy1 = open('fig1.png', 'w')
dummy2 = open('fig2.png', 'w')
dummy3 = open('fig3.png', 'w')
dummy4 = open('fig4.png', 'w')
root.resizable(width=False, height=False)
root.mainloop()
def close():
#check for extraneous/duplicates
dummy1.close()
dummy2.close()
dummy3.close()
dummy4.close()
os.remove('fig1.png')
os.remove('fig2.png')
os.remove('fig3.png')
os.remove('fig4.png')
root.destroy()
class New_Toplevel_1:
def __init__(self, top=None):
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#d9d9d9' # X11 color: 'gray85'
font10 = "-family {DejaVu Sans Mono} -size 15 -weight normal " \
"-slant roman -underline 0 -overstrike 1"
font11 = "-family {DejaVu Sans Mono} -size 15 -weight bold " \
"-slant roman -underline 0 -overstrike 0"
font9 = "-family {DejaVu Sans Mono} -size 15 -weight normal " \
"-slant roman -underline 0 -overstrike 0"
global days, agents
days = 10
agents = 20
top.geometry("1031x593+89+80")
top.title('Maize & Blue SIWR v2.11')
top.configure(background="#135bd9")
top.configure(highlightcolor="black")
top.configure(cursor='pencil')
self.Label1 = Label(top)
self.Label1.place(relx=0.01, rely=0.03, height=18, width=126)
self.Label1.configure(activebackground="#135bd9")
self.Label1.configure(activeforeground="white")
self.Label1.configure(background="#135bd9")
self.Label1.configure(text='''Contact Rate HH''')
self.Label15 = Label(top)
self.Label15.place(relx=0.03, rely=0.07, height=18, width=126)
self.Label15.configure(activebackground="#f9f9f9")
self.Label15.configure(background="#135bd9")
self.Label15.configure(text='''Contact Rate HF''')
self.Label14 = Label(top)
self.Label14.place(relx=-.01, rely=0.11, height=18, width=126)
self.Label14.configure(activebackground="#f9f9f9")
self.Label14.configure(background="#135bd9")
self.Label14.configure(text='''Pickup FR''')
self.Label5 = Label(top)
self.Label5.place(relx=0.015, rely=0.15, height=18, width=126)
self.Label5.configure(activebackground="#f9f9f9")
self.Label5.configure(background="#135bd9")
self.Label5.configure(text='''Transfer FR''')
self.Label4 = Label(top)
self.Label4.place(relx=0.01, rely=0.19, height=18, width=126)
self.Label4.configure(activebackground="#f9f9f9")
self.Label4.configure(background="#135bd9")
self.Label4.configure(text='''Face Touch Rate''')
self.Label6 = Label(top)
self.Label6.place(relx=0.008, rely=0.23, height=18, width=126)
self.Label6.configure(activebackground="#f9f9f9")
self.Label6.configure(background="#135bd9")
self.Label6.configure(text='''INF Prob''')
self.Label7 = Label(top)
self.Label7.place(relx=-.01, rely=0.27, height=18, width=126)
self.Label7.configure(activebackground="#f9f9f9")
self.Label7.configure(background="#135bd9")
self.Label7.configure(text='''Wash Rate''')
self.Label8 = Label(top)
self.Label8.place(relx=0.03, rely=0.31, height=18, width=126)
self.Label8.configure(activebackground="#f9f9f9")
self.Label8.configure(background="#135bd9")
self.Label8.configure(text='''Incubation Rate''')
self.Label9 = Label(top)
self.Label9.place(relx=0.003, rely=0.35, height=18, width=126)
self.Label9.configure(activebackground="#f9f9f9")
self.Label9.configure(background="#135bd9")
self.Label9.configure(text='''Recovery Rate''')
self.Label10 = Label(top)
self.Label10.place(relx=0.027, rely=0.39, height=18, width=126)
self.Label10.configure(activebackground="#f9f9f9")
self.Label10.configure(background="#135bd9")
self.Label10.configure(text='''Shedding Rate''')
self.Label11 = Label(top)
self.Label11.place(relx=-.01, rely=0.43, height=18, width=126)
self.Label11.configure(activebackground="#f9f9f9")
self.Label11.configure(background="#135bd9")
self.Label11.configure(text='''Shedding''')
self.Label12 = Label(top)
self.Label12.place(relx=0.00, rely=0.47, height=18, width=126)
self.Label12.configure(activebackground="#f9f9f9")
self.Label12.configure(background="#135bd9")
self.Label12.configure(text='''Dieoff''')
self.Label3 = Label(top)
self.Label3.place(relx=-.003, rely=0.51, height=18, width=126)
self.Label3.configure(activebackground="#f9f9f9")
self.Label3.configure(background="#135bd9")
self.Label3.configure(text='''Decon Freq''')
self.Label13 = Label(top)
self.Label13.place(relx=0.018, rely=0.55, height=18, width=126)
self.Label13.configure(activebackground="#f9f9f9")
self.Label13.configure(background="#135bd9")
self.Label13.configure(text='''Day Length''')
self.Entry1 = Entry(top)
self.Entry1.place(relx=0.17, rely=0.03, relheight=0.03, relwidth=0.14)
self.Entry1.configure(background="white")
self.Entry1.configure(font="TkFixedFont")
self.Entry1.configure(selectbackground="#c4c4c4")
self.Entry2 = Entry(top)
self.Entry2.place(relx=0.19, rely=0.07, relheight=0.03, relwidth=0.14)
self.Entry2.configure(background="white")
self.Entry2.configure(font="TkFixedFont")
self.Entry2.configure(selectbackground="#c4c4c4")
self.Entry3 = Entry(top)
self.Entry3.place(relx=0.17, rely=0.11, relheight=0.03, relwidth=0.14)
self.Entry3.configure(background="white")
self.Entry3.configure(font="TkFixedFont")
self.Entry3.configure(selectbackground="#c4c4c4")
self.Entry4 = Entry(top)
self.Entry4.place(relx=0.19, rely=0.15, relheight=0.03, relwidth=0.14)
self.Entry4.configure(background="white")
self.Entry4.configure(font="TkFixedFont")
self.Entry4.configure(selectbackground="#c4c4c4")
self.Entry5 = Entry(top)
self.Entry5.place(relx=0.17, rely=0.19, relheight=0.03, relwidth=0.14)
self.Entry5.configure(background="white")
self.Entry5.configure(font="TkFixedFont")
self.Entry5.configure(selectbackground="#c4c4c4")
self.Entry6 = Entry(top)
self.Entry6.place(relx=0.19, rely=0.23, relheight=0.03, relwidth=0.14)
self.Entry6.configure(background="white")
self.Entry6.configure(font="TkFixedFont")
self.Entry6.configure(selectbackground="#c4c4c4")
self.Entry7 = Entry(top)
self.Entry7.place(relx=0.17, rely=0.27, relheight=0.03, relwidth=0.14)
self.Entry7.configure(background="white")
self.Entry7.configure(font="TkFixedFont")
self.Entry7.configure(selectbackground="#c4c4c4")
self.Entry8 = Entry(top)
self.Entry8.place(relx=0.19, rely=0.31, relheight=0.03, relwidth=0.14)
self.Entry8.configure(background="white")
self.Entry8.configure(font="TkFixedFont")
self.Entry8.configure(selectbackground="#c4c4c4")
self.Entry9 = Entry(top)
self.Entry9.place(relx=0.17, rely=0.35, relheight=0.03, relwidth=0.14)
self.Entry9.configure(background="white")
self.Entry9.configure(font="TkFixedFont")
self.Entry9.configure(selectbackground="#c4c4c4")
self.Entry10 = Entry(top)
self.Entry10.place(relx=0.19, rely=0.39, relheight=0.03, relwidth=0.14)
self.Entry10.configure(background="white")
self.Entry10.configure(font="TkFixedFont")
self.Entry10.configure(selectbackground="#c4c4c4")
self.Entry11 = Entry(top)
self.Entry11.place(relx=0.17, rely=0.43, relheight=0.03, relwidth=0.14)
self.Entry11.configure(background="white")
self.Entry11.configure(font="TkFixedFont")
self.Entry11.configure(selectbackground="#c4c4c4")
self.Entry12 = Entry(top)
self.Entry12.place(relx=0.19, rely=0.47, relheight=0.03, relwidth=0.14)
self.Entry12.configure(background="white")
self.Entry12.configure(font="TkFixedFont")
self.Entry12.configure(selectbackground="#c4c4c4")
self.Entry13 = Entry(top)
self.Entry13.place(relx=0.17, rely=0.51, relheight=0.03, relwidth=0.14)
self.Entry13.configure(background="white")
self.Entry13.configure(font="TkFixedFont")
self.Entry13.configure(selectbackground="#c4c4c4")
self.Entry14 = Entry(top)
self.Entry14.place(relx=0.19, rely=0.55, relheight=0.03, relwidth=0.14)
self.Entry14.configure(background="white")
self.Entry14.configure(font="TkFixedFont")
self.Entry14.configure(selectbackground="#c4c4c4")
self.Button1 = Button(top)
self.Button1.place(relx=0.02, rely=0.65, height=26, width=157)
self.Button1.configure(activebackground="#d9d9d9")
self.Button1.configure(background="#d9d938")
self.Button1.configure(font=font9)
self.Button1.configure(text='''Save''')
self.Button1.configure(cursor='crosshair')
self.Button1.configure(command=lambda: but1Press())
self.Button2 = Button(top)
self.Button2.place(relx=0.18, rely=0.65, height=26, width=157)
self.Button2.configure(activebackground="#d9d9d9")
self.Button2.configure(background="#d9d938")
self.Button2.configure(font=font9)
self.Button2.configure(text='''Load''')
self.Button2.configure(cursor='crosshair')
self.Button2.configure(command=lambda: but2Press())
self.Button3 = Button(top)
self.Button3.place(relx=0.02, rely=0.71, height=26, width=157)
self.Button3.configure(activebackground="#d9d9d9")
self.Button3.configure(background="#d9d938")
self.Button3.configure(font=font11)
self.Button3.configure(text='''Generate''')
self.Button3.configure(cursor='crosshair')
self.Button3.configure(command=lambda: but3Press())
self.Button4 = Button(top)
self.Button4.place(relx=0.18, rely=0.71, height=26, width=157)
self.Button4.configure(activebackground="#d9d9d9")
self.Button4.configure(background="#d9d938")
self.Button4.configure(font=font10)
self.Button4.configure(text='''Clear''')
self.Button4.configure(cursor='crosshair')
self.Button4.configure(command=lambda: but4Press())
self.Button6 = Button(top)
self.Button6.place(relx=0.02, rely=0.80, height=26, width=322)
self.Button6.configure(activebackground="#d9d9d9")
self.Button6.configure(background="#d9d938")
self.Button6.configure(font=font9)
self.Button6.configure(text='''Economic Analysis''')
self.Button6.configure(cursor='crosshair')
self.Button6.configure(command=lambda: but6Press())
self.Button7 = Button(top)
self.Button7.place(relx=0.02, rely=0.86, height=26, width=322)
self.Button7.configure(activebackground="#d9d9d9")
self.Button7.configure(background="#d9d938")
self.Button7.configure(font=font9)
self.Button7.configure(text='''Curve Interpolation''')
self.Button7.configure(cursor='crosshair')
self.Button7.configure(command=lambda: but7Press())
self.Button8 = Button(top)
self.Button8.place(relx=0.02, rely=0.92, height=26, width=322)
self.Button8.configure(activebackground="#d9d9d9")
self.Button8.configure(background="#d9d938")
self.Button8.configure(font=font9)
self.Button8.configure(text='''Oppa Gangnam Style''')
self.Button8.configure(cursor='crosshair')
self.Button8.configure(command=lambda: but8Press())
self.Label2 = Label(top)
self.Label2.place(relx=0.4, rely=0.03, height=18, width=33)
self.Label2.configure(activebackground="#f9f9f9")
self.Label2.configure(background="#135bd9")
self.Label2.configure(text='''Days''')
self.Entry15 = Entry(top)
self.Entry15.place(relx=0.44, rely=0.03, relheight=0.03, relwidth=0.14)
self.Entry15.configure(background="white")
self.Entry15.configure(font="TkFixedFont")
self.Entry15.configure(selectbackground="#c4c4c4")
self.Entry15.insert(0,days)
self.Label16 = Label(top)
self.Label16.place(relx=0.6, rely=0.03, height=18, width=51)
self.Label16.configure(activebackground="#f9f9f9")
self.Label16.configure(background="#135bd9")
self.Label16.configure(text='''Agents''')
self.Entry16 = Entry(top)
self.Entry16.place(relx=0.656, rely=0.03, relheight=0.03, relwidth=0.14)
self.Entry16.configure(background="white")
self.Entry16.configure(font="TkFixedFont")
self.Entry16.configure(selectbackground="#c4c4c4")
self.Entry16.insert(0,agents)
self.Button5 = Button(top)
self.Button5.place(relx=0.4, rely=0.12, height=486, width=587)
self.Button5.configure(activebackground="#d9d9d9")
self.Button5.configure(state=ACTIVE)
self.Button5.configure(cursor='exchange')
self.Button5.configure(command=lambda: but5Press())
def take(self):
global days, agents
self.entries = []
self.entries.append(self.Entry1.get())
self.entries.append(self.Entry2.get())
self.entries.append(self.Entry3.get())
self.entries.append(self.Entry4.get())
self.entries.append(self.Entry5.get())
self.entries.append(self.Entry6.get())
self.entries.append(self.Entry7.get())
self.entries.append(self.Entry8.get())
self.entries.append(self.Entry9.get())
self.entries.append(self.Entry10.get())
self.entries.append(self.Entry11.get())
self.entries.append(self.Entry12.get())
self.entries.append(self.Entry13.get())
self.entries.append(self.Entry14.get())
days = int(self.Entry15.get())
agents = int(self.Entry16.get())
def give(self, vals=[]):
print(vals)
self.Entry1.insert(0,vals[0])
self.Entry2.insert(0,vals[1])
self.Entry3.insert(0,vals[2])
self.Entry4.insert(0,vals[3])
self.Entry5.insert(0,vals[4])
self.Entry6.insert(0,vals[5])
self.Entry7.insert(0,vals[6])
self.Entry8.insert(0,vals[7])
self.Entry9.insert(0,vals[8])
self.Entry10.insert(0,vals[9])
self.Entry11.insert(0,vals[10])
self.Entry12.insert(0,vals[11])
self.Entry13.insert(0,vals[12])
self.Entry14.insert(0,vals[13])
def _set_out(self, val, agents):
self._total = val
self._agents = agents
def but1Press():
dialog = tkSimpleDialog.askstring('SIWR Input', 'Input a file name:')
dialog += '.siwr'
out = open(dialog, 'w')
top.take()
for x in top.entries:
out.write(x)
out.write(' ')
def but2Press():
name = tkFileDialog.askopenfilename()
out = open(name, 'r')
params = out.read().split()
top.give(params)
def but3Press():
global parameters
top.take()
parameters['contactRateHH'] = float(top.entries[0])
parameters['contactRateHF'] = float(top.entries[1])
parameters['pickupFr'] = float(top.entries[2])
parameters['transferFr'] = float(top.entries[3])
parameters['faceTouchRate'] = float(top.entries[4])
parameters['infProb'] = float(top.entries[5])
parameters['washRate'] = float(top.entries[6])
parameters['incubationRate'] = float(top.entries[7])
parameters['recoveryRate'] = float(top.entries[8])
parameters['sheddingRate'] = float(top.entries[9])
parameters['shedding'] = float(top.entries[10])
parameters['dieOff'] = float(top.entries[11])
if(float(top.entries[12]) != 0):
parameters['deconFreq'] = float(top.entries[12])
else:
parameters['deconFreq'] = None
parameters['dayLength'] = float(top.entries[13])
gen()
'''except:
tkMessageBox.showwarning("Warning!","Unfilled Parameters!")'''
def but4Press():
top.Entry1.delete(0,END)
top.Entry2.delete(0,END)
top.Entry3.delete(0,END)
top.Entry4.delete(0,END)
top.Entry5.delete(0,END)
top.Entry6.delete(0,END)
top.Entry7.delete(0,END)
top.Entry8.delete(0,END)
top.Entry9.delete(0,END)
top.Entry10.delete(0,END)
top.Entry11.delete(0,END)
top.Entry12.delete(0,END)
top.Entry13.delete(0,END)
top.Entry14.delete(0,END)
def but5Press():
global mod
if mod == 1:
top.Button5.configure(image=image2)
mod = 2
elif mod == 2:
top.Button5.configure(image=image1)
mod = 1
def but6Press():
from fomite_ABM_econGUI import vp_start_econgui
vp_start_econgui(top)
def but7Press():
#polynomial interpolation lagrange
from Numericals import lagrange_interpolation
from matplotlib.pylab import arange
try:
discretization_range = arange(0,days-1,.01)
incubating_out = []
symptomatic_out = []
xvals = [x[-1] for x in complete_output]
inyvals = [x[2] for x in complete_output]
symyvals = [x[3] for x in complete_output]
conyvals = [x[4] for x in complete_output]
incubating_out = lagrange_interpolation(discretization_range, xvals, inyvals)
symptomatic_out = lagrange_interpolation(discretization_range, xvals, symyvals)
contamination_out = lagrange_interpolation(discretization_range, xvals, conyvals)
print(xvals)
print(incubating_out)
global image1, image2, mod
pl.clf()
pl.plot(discretization_range,symptomatic_out,label='Symptomatic')
pl.plot(discretization_range,incubating_out,label='Incubating')
pl.legend()
pl.ylabel('Population')
pl.xlabel('Days')
pl.savefig('fig1')
pl.plot(discretization_range,contamination_out, label=None)
pl.ylabel('Fomite contamination')
pl.xlabel('Days')
pl.legend().remove()
pl.savefig('fig2')
pl.clf()
img = Image.open('fig1.png')
img = img.resize((587,486), PIL.Image.ANTIALIAS)
img.save('fig1.png')
img = Image.open('fig2.png')
img = img.resize((587,486), PIL.Image.ANTIALIAS)
img.save('fig2.png')
image1 = ImageTk.PhotoImage(file='fig1.png')
image2 = ImageTk.PhotoImage(file='fig2.png')
mod = 1
top.Button5.configure(image=image1)
except:
tkMessageBox.showwarning("Warning!","No Curve to Interpolate!")
def but8Press():
print('gangnam style')
#retrieve TSV and integrate to model
name = tkFileDialog.askopenfilename()
from sickchildcare_parser import cases_to_agents
agents = cases_to_agents(name, 'all', 'e', 5)
print(agents)
for i in agents:
print(i.data)
def gen():
from fomite_ABM import Agent, Fomite
### A bunch of crap to test run the model
agentList = []
fomite = Fomite(id='1f')
nAgents = agents
for i in range(nAgents):
agentList.append(Agent(id=i))
agentList[1].state = 3
#agentList[1].recoveryTime = 7
agentList[1].contamination = 500
## This matrix assumes one fomite that everybody touches
G = nx.complete_graph(nAgents)
#print G.edges()
nx.set_node_attributes(G,'bipartite',1)
G.add_node(fomite.id,bipartite=0)
for i in range(nAgents):
G.add_edge(i,'1f')
#print G.neighbors(1)
#param = parameters.values()
#print('param', len(param))
print(parameters)
print(days)
print(agents)
param = copy.deepcopy(parameters)
#print globals()
#reformatted parameters as dictionary for retrieval
#GUI generation
### parallelized multiple runs
'''
servers = ('local',)
jobServer = pp.Server(ppservers=servers)
print 'active nodes', jobServer.get_active_nodes()
mList = [Model(copy.deepcopy(agentList),[copy.deepcopy(fomite)],28,G,param) for i in range(200)]
output = []
start = time.time()
jobs = [jobServer.submit(run_model,args=(m,),modules=('numpy as np','networkx as nx','random as pr')) for m in mList]
for job in jobs:
output.append(job())
print 'time elapsed', time.time()-start
output = np.array(output)
avgOutput = np.mean(output,axis=0)
stdOutput = np.std(output,axis=0)
upperBound = avgOutput + stdOutput
lowerBound = avgOutput - stdOutput
days = avgOutput[:,-1]
pl.plot(days,avgOutput[:,3],'b',lw=4,label='Symptomatic')
pl.fill_between(days,lowerBound[:,3],upperBound[:,3],facecolor='b',lw=0,alpha=0.5)
pl.plot(days,avgOutput[:,2],'g',lw=4,label='Incubating')
pl.fill_between(days,lowerBound[:,2],upperBound[:,2],facecolor='g',lw=0,alpha=0.5)
pl.legend(loc=0)
pl.ylabel('Symptomatic')
pl.xlabel('Days')
pl.ylim(ymin=0)
pl.figure()
pl.plot(days,avgOutput[:,4],color='r',lw=4)
pl.fill_between(days,lowerBound[:,4],upperBound[:,4],facecolor='r',lw=0,alpha=0.5)
pl.ylabel('Fomite contamination')
pl.xlabel('Days')
pl.ylim(ymin=0)
pl.show()
'''
m = Model(agentList,[fomite,],days,G,param)
#print m.contactPairs.edges()
m.run()
global complete_output
complete_output = m.output
#safe copy by value NOT reference
top._set_out(complete_output, agentList)
out = np.array(complete_output)
#print out[:,2]
pl.plot(out[:,-1],out[:,3],label='Symptomatic')
pl.plot(out[:,-1],out[:,2],label='Incubating')
pl.legend()
pl.ylabel('Population')
pl.xlabel('Days')
pl.savefig('fig1')
pl.plot(out[:,-1],out[:,4], label=None)
pl.ylabel('Fomite contamination')
pl.xlabel('Days')
pl.legend().remove()
pl.savefig('fig2')
pl.clf()
global image1
global image2
global mod
mod = 1
img = Image.open('fig1.png')
img = img.resize((587,486), PIL.Image.ANTIALIAS)
img.save('fig1.png')
img = Image.open('fig2.png')
img = img.resize((587,486), PIL.Image.ANTIALIAS)
img.save('fig2.png')
image1 = ImageTk.PhotoImage(file='fig1.png')
image2 = ImageTk.PhotoImage(file='fig2.png')
top.Button5.configure(image=image1)
#print 'fomite contamination', m.fomite.contamination
#for a in m.agentList:
# print 'state', a.state
# print 'contamination', a.contamination
#for a in m.agentList:
# print a.neighbors
if __name__ == '__main__':
vp_start_gui()
| malhayashi/childcarefomites | fomite_ABM_GUI.py | fomite_ABM_GUI.py | py | 23,870 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.remove",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 48,
... |
75096299625 | import requests
from bs4 import BeautifulSoup
import re
def loastone_login():
print('http://na.finalfantasyxiv.com/lodestone/account/login/')
#Get a page from the Loadstone
# returns a BeautifulSoup object
def get_loadstone_page(url,session_id):
#Time format used for cookies
#import time
#time.strftime('%a, %d-%b-%Y %H:%M:%S %Z')
#ldst_is_support_browser=1, ldst_touchstone=1, ldst_bypass_browser=1", expires=session_expiration
cookies = dict(ldst_sess=session_id,domain='finalfantasyxiv.com', path='/')
raw_page = requests.get(url, cookies=cookies)
if(raw_page.status_code != 200):
raise Exception("Unable to download web page!")
return BeautifulSoup(raw_page.text,'html.parser')
#Each item has a separate detail page that must be loaded to determine if it's HQ or not
def is_item_hq(raw_item,session_id):
tooltip_url = 'http://na.finalfantasyxiv.com/' + item.find('div', attrs={"class": 'item_txt'})['data-lazy_load_url']
tooltip_page = get_loadstone_page(tooltip_url,session_id)
return bool(tooltip_page.find("img", src = re.compile('http://img\.finalfantasyxiv\.com/lds/pc/global/images/common/ic/hq.png.*')))
#Debug function to write some data to 'test.html'
def write_data(data):
out_file=open('test.html','w')
#for i in data:
#out_file.write(str(i))
out_file.write(str(data))
out_file.close()
#Debug function to write a pretty parsed version of a Loadstone page
def write_loadstone_page(url,session_id):
soup_page = get_loadstone_page(url,session_id)
write_data(soup_page.prettify().encode('utf8'))
#Use this to convert the provided items into something useful
def list_items_table(items):
item_row_format='<tr><td><img src="{image}"></img></td><td>{name}</td><td>{quantity}</td><td>{location}</td><td>{sub_location}</td></tr>\n'
item_buffer = '<table>\n'
for i in items:
item_buffer += item_row_format.format(**i)
item_buffer += '</table>\n'
return item_buffer
#Get all items in the Free company chest (does not get number of crystals or gil)
#Does not handle HQ Items yet
def get_fc_items(fc_id,session_id):
url = 'http://na.finalfantasyxiv.com/lodestone/freecompany/'+str(fc_id)+'/chest/'
soup_page = get_loadstone_page(url,session_id)
#Get all items
raw_items=soup_page.find_all("tr", attrs={"data-default_sort": True})
#Parse the items
items=[]
for item in raw_items:
tmp = {}
tmp['name'] = item.find("h2", attrs={"class": 'db-tooltip__item__name'}).text.strip()
tmp['quantity'] = int(item['data-stack'])
tmp['image'] = item.find("img")['src']
tmp['location'] = 'Company Chest'
tmp['sub_location'] = item.find_parent('tbody')['id']
items.append(tmp)
return items
#Get all items in a retainers inventory (does not get number of crystals or gil)
#Does not handle HQ Items yet
def get_retainer_items(char_id,retainer_id,session_id):
url = 'http://na.finalfantasyxiv.com/lodestone/character/'+str(char_id)+'/retainer/'+retainer_id+'/baggage/'
soup_page = get_loadstone_page(url,session_id)
#Get retainers name
retainer_name = soup_page.find("div", attrs={"class": 'retainer--name'}).p.text.strip()
#Get all items
raw_items=soup_page.find_all("tr", attrs={"data-default_sort": True})
#Parse the items
items=[]
for item in raw_items:
#if(is_item_hq(item,session_id)):
#print("HQ")
tmp = {}
tmp['name'] = item.find("a", attrs={"class": 'highlight'}).text.strip()
tmp['quantity'] = int(item['data-stack'])
tmp['image'] = item.find("img")['src']
tmp['location'] = 'Retainer: ' + retainer_name
tmp['sub_location'] = 'Inventory'
items.append(tmp)
return items
#Get all items a retainer is selling (does not get number of crystals or gil)
#HQ Item handling is suspect
#Note: This may return already sold items:
# sale_inventory is supposed to filter those out, but I din't think it's working correctly
def get_retainer_selling(char_id,retainer_id,session_id):
url = 'http://na.finalfantasyxiv.com/lodestone/character/'+str(char_id)+'/retainer/'+retainer_id+'/market/'
soup_page = get_loadstone_page(url,session_id)
#Get retainers name
retainer_name = soup_page.find("div", attrs={"class": 'retainer--name'}).p.text.strip()
#Get all items
sale_inventory=soup_page.find("div", attrs={"class": 'active'}).find('tbody')
#If no items, just return an empty set
if not sale_inventory:
return []
raw_items=sale_inventory.find_all("tr")
#Parse the items
items=[]
for item in raw_items:
tmp = {}
tmp['name'] = item.find("a", attrs={"class": 'highlight'}).text.strip()
tmp['quantity'] = int(item.find("td", attrs={"class": 'even'}).text.strip())
tmp['image'] = item.find("img")['src']
tmp['location'] = 'Retainer: ' + retainer_name
tmp['sub_location'] = 'Selling'
tmp['is_hq'] = bool(item.find("img", src = re.compile('http://img\.finalfantasyxiv\.com/lds/pc/global/images/common/ic/hq.png.*')))
items.append(tmp)
return items
| EmperorArthur/Loadstone_Parser | parse_loadstone.py | parse_loadstone.py | py | 5,272 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_numb... |
21252216686 |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class bgp(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-system-capabilities - based on the path /capabilities/bgp. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__next_hop_mpls','__redistribute_isis',)
_yang_name = 'bgp'
_rest_name = 'bgp'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__redistribute_isis = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="redistribute-isis", rest_name="redistribute-isis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-system-capabilities', defining_module='brocade-system-capabilities', yang_type='boolean', is_config=False)
self.__next_hop_mpls = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="next-hop-mpls", rest_name="next-hop-mpls", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-system-capabilities', defining_module='brocade-system-capabilities', yang_type='boolean', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'capabilities', u'bgp']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'capabilities', u'bgp']
def _get_next_hop_mpls(self):
"""
Getter method for next_hop_mpls, mapped from YANG variable /capabilities/bgp/next_hop_mpls (boolean)
"""
return self.__next_hop_mpls
def _set_next_hop_mpls(self, v, load=False):
"""
Setter method for next_hop_mpls, mapped from YANG variable /capabilities/bgp/next_hop_mpls (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_next_hop_mpls is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_next_hop_mpls() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="next-hop-mpls", rest_name="next-hop-mpls", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-system-capabilities', defining_module='brocade-system-capabilities', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """next_hop_mpls must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="next-hop-mpls", rest_name="next-hop-mpls", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-system-capabilities', defining_module='brocade-system-capabilities', yang_type='boolean', is_config=False)""",
})
self.__next_hop_mpls = t
if hasattr(self, '_set'):
self._set()
def _unset_next_hop_mpls(self):
self.__next_hop_mpls = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="next-hop-mpls", rest_name="next-hop-mpls", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-system-capabilities', defining_module='brocade-system-capabilities', yang_type='boolean', is_config=False)
def _get_redistribute_isis(self):
"""
Getter method for redistribute_isis, mapped from YANG variable /capabilities/bgp/redistribute_isis (boolean)
"""
return self.__redistribute_isis
def _set_redistribute_isis(self, v, load=False):
"""
Setter method for redistribute_isis, mapped from YANG variable /capabilities/bgp/redistribute_isis (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_redistribute_isis is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_redistribute_isis() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="redistribute-isis", rest_name="redistribute-isis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-system-capabilities', defining_module='brocade-system-capabilities', yang_type='boolean', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """redistribute_isis must be of a type compatible with boolean""",
'defined-type': "boolean",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="redistribute-isis", rest_name="redistribute-isis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-system-capabilities', defining_module='brocade-system-capabilities', yang_type='boolean', is_config=False)""",
})
self.__redistribute_isis = t
if hasattr(self, '_set'):
self._set()
def _unset_redistribute_isis(self):
self.__redistribute_isis = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="redistribute-isis", rest_name="redistribute-isis", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-system-capabilities', defining_module='brocade-system-capabilities', yang_type='boolean', is_config=False)
next_hop_mpls = __builtin__.property(_get_next_hop_mpls)
redistribute_isis = __builtin__.property(_get_redistribute_isis)
_pyangbind_elements = {'next_hop_mpls': next_hop_mpls, 'redistribute_isis': redistribute_isis, }
| extremenetworks/pybind | pybind/slxos/v17s_1_02/capabilities/bgp/__init__.py | __init__.py | py | 8,050 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyangbind.lib.base.PybindBase",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pyangbind.lib.xpathhelper.YANGPathHelper",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "pyangbind.lib.xpathhelper",
"line_number": 29,
"usage_type": ... |
16232453581 | from faker import Faker
from faker.providers import person, job, company, internet, phone_number
class BaseCard:
def __init__(self, name, family_name, e_mail, priv_phone):
self.name = name
self.family_name = family_name
self.e_mail = e_mail
self.priv_phone = priv_phone
self._length = len(self.name) + len(self.family_name)
def __str__(self):
return 'Name:' + self.name + '; Family Name:' + self.family_name + '; E-mail:' + self.e_mail + \
'; Phone:' + self.priv_phone
def contacts(self):
print(f'Please contact with: {self.name} {self.family_name} private phone:{self.priv_phone}')
@property
def length(self):
return self._length
@length.setter
def length(self, value):
if value != len(self.name) + len(self.family_name) + 1:
raise ValueError(f' Value {value} not eq to len of name and family name')
else:
self._length = value
class BusinessCard(BaseCard):
def __init__(self, position, company, business_phone, *args, **kwargs):
super().__init__(*args, **kwargs)
self.position = position
self.company = company
self.business_phone = business_phone
def contacts(self):
print(f'Please contact with: {self.name} {self.family_name} corporate phone:{self.business_phone}')
def __str__(self):
return super().__str__() + '; Position:' + self.position + ' ;Company:' + self.company + \
' ;Buss Phone:' + self.business_phone + ' ;LEN:' + str(self._length)
def create_contacts(type='Base', quantity=1):
fake = Faker()
fake.add_provider(person)
fake.add_provider(job)
fake.add_provider(internet)
list_of_cards = []
if type == 'Base':
for i in range(quantity):
card = BaseCard(name=fake.first_name_male(), family_name=fake.last_name_male(),
e_mail=fake.company_email(), priv_phone=fake.phone_number())
list_of_cards.append(card)
elif type == 'Business':
fake.add_provider(company)
for i in range(quantity):
card = BusinessCard(name=fake.first_name_male(), family_name=fake.last_name_male(),
e_mail=fake.company_email(), priv_phone=fake.phone_number(), position=fake.job(),
company=fake.company(), business_phone=fake.phone_number())
list_of_cards.append(card)
else:
ValueError('fWrong card type provided - Base or Business')
return list_of_cards
cards = create_contacts(type='Business', quantity=10)
for card in cards:
print(card)
| szczesnym/Kodilla-Python | Chapter7/AddressBook.py | AddressBook.py | py | 2,662 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "faker.providers.company",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "faker.Faker",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "faker.providers.person",
"line_number": 49,
"usage_type": "argument"
},
{
"api_name": "faker.... |
6994509380 | from lib.cuckoo.common.abstracts import Signature
class DropBox(Signature):
name = "cloud_dropbox"
description = "Looks up the Dropbox cloud service"
severity = 2
categories = ["cloud"]
authors = ["RedSocks"]
minimum = "2.0"
domains = [
"dropbox.com",
"www.dropbox.com",
"dl.dropboxusercontent.com",
"dl.dropbox.com",
"dl-balancer.x.dropbox.com",
"www.v.dropbox.com",
"duc-balancer.x.dropbox.com",
]
def on_complete(self):
for indicator in self.domains:
if self.check_domain(pattern=indicator):
self.mark_ioc("domain", indicator)
return True
| cuckoosandbox/community | modules/signatures/windows/cloud_dropbox.py | cloud_dropbox.py | py | 689 | python | en | code | 312 | github-code | 36 | [
{
"api_name": "lib.cuckoo.common.abstracts.Signature",
"line_number": 3,
"usage_type": "name"
}
] |
74736328744 | import pyautogui
# Returns two integers, the width and height of the screen. (The primary monitor, in multi-monitor setups.)
screenWidth, screenHeight = pyautogui.size()
# Returns two integers, the x and y of the mouse cursor's current position.
currentMouseX, currentMouseY = pyautogui.position()
print(screenWidth, screenHeight, currentMouseX, currentMouseY)
def moveMouse(start, finish):
pyautogui.mouseUp(button="left")
pyautogui.moveTo(start["x"], start["y"])
pyautogui.mouseDown()
# pyautogui.moveTo(finish["x"], finish["y"])
pyautogui.dragTo(finish["x"], finish["y"], 1, button='left')
pyautogui.mouseUp(button="left")
def mouseTo(point):
pyautogui.moveTo(point["x"], point["y"])
def withinGameBox(screen):
currentMouseX, currentMouseY = pyautogui.position()
if currentMouseX < screen['x1'] or \
currentMouseY < screen['y1'] or \
currentMouseX > screen['x2'] or \
currentMouseY > screen['y2']:
# Out of game box
return False
return True
| davidyu37/fruit-ninja-cv | mouse.py | mouse.py | py | 1,028 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyautogui.size",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pyautogui.position",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pyautogui.mouseUp",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pyautogui.moveTo",
... |
72076380264 | # SPDX-License-Identifier: LGPL-3.0-only
"""Package for the doorstop.core tests."""
import logging
import os
from typing import List
from unittest.mock import MagicMock, Mock, patch
from doorstop.core.base import BaseFileObject
from doorstop.core.document import Document
from doorstop.core.item import Item
from doorstop.core.validators.item_validator import ItemValidator
from doorstop.core.vcs.mockvcs import WorkingCopy
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
TESTS_ROOT = os.path.dirname(__file__)
FILES = os.path.join(os.path.dirname(__file__), "files")
FILES_MD = os.path.join(os.path.dirname(__file__), "files_md")
SYS = os.path.join(FILES, "parent")
TST = os.path.join(FILES, "child")
EMPTY = os.path.join(FILES, "empty") # an empty directory
EXTERNAL = os.path.join(FILES, "external") # external files to reference
NEW = os.path.join(FILES, "new") # new document with no items
ENV = "TEST_INTEGRATION" # environment variable to enable integration tests
REASON = "'{0}' variable not set".format(ENV)
if not os.path.exists(EMPTY):
os.makedirs(EMPTY)
class DocumentNoSkip(Document):
"""Document class that is never skipped."""
SKIP = "__disabled__" # never skip test Documents
class MockFileObject(BaseFileObject): # pylint: disable=W0223,R0902
"""Mock FileObject class with stubbed file IO."""
def __init__(self, *args, **kwargs):
self._file = kwargs.pop("_file", "") # mock file system contents
with patch("os.path.isfile", Mock(return_value=True)):
super().__init__(*args, **kwargs) # type: ignore
self._read = Mock(side_effect=self._mock_read)
self._write = Mock(side_effect=self._mock_write)
_create = Mock()
def _mock_read(self, path):
"""Mock read method."""
logging.debug("mock read path: {}".format(path))
text = self._file
logging.debug("mock read text: {}".format(repr(text)))
return text
def _mock_write(self, text, path):
"""Mock write method."""
logging.debug("mock write text: {}".format(repr(text)))
logging.debug("mock write path: {}".format(path))
self._file = text
def __bool__(self):
return True
class MockItem(MockFileObject, Item): # pylint: disable=W0223,R0902
"""Mock Item class with stubbed file IO."""
class MockItemValidator(ItemValidator): # pylint: disable=W0223,R0902
"""Mock Item class with stubbed file IO."""
def _no_get_issues_document(self, item, document, skip): # pylint: disable=W0613
return
yield # pylint: disable=W0101
def disable_get_issues_document(self):
self._get_issues_document = self._no_get_issues_document
class MockDocument(MockFileObject, Document): # pylint: disable=W0223,R0902
"""Mock Document class with stubbed file IO."""
class MockSimpleDocument:
"""Mock Document class with basic default members."""
def __init__(self):
self.parent = None
self.prefix = "RQ"
self.itemformat = "yaml"
self._items: List[Item] = []
self.extended_reviewed: List[str] = []
def __iter__(self):
yield from self._items
def set_items(self, items):
self._items = items
class MockDocumentSkip(MockDocument): # pylint: disable=W0223,R0902
"""Mock Document class that is always skipped in tree placement."""
skip = True
class MockDocumentNoSkip(MockDocumentSkip): # pylint: disable=W0223,R0902
"""Mock Document class that is never skipped in tree placement."""
SKIP = "__disabled__" # never skip mock Documents
class MockItemAndVCS(MockItem): # pylint: disable=W0223,R0902
"""Mock item class with stubbed IO and a mock VCS reference."""
def __init__(self, *args, **kwargs):
super().__init__(None, *args, **kwargs)
self.tree = Mock()
self.tree.vcs = WorkingCopy(None)
class MockDataMixIn: # pylint: disable=R0903
"""Data for test cases requiring mock items and documents."""
# purely mock objects
mock_document = MagicMock()
mock_document.prefix = "MOCK"
mock_document.items = []
mock_document.assets = None
mock_document.template = None
mock_tree = MagicMock()
mock_tree.documents = [mock_document]
# mock objects that behave like the real thing
item = MockItemAndVCS(
"path/to/req3.yml",
_file=(
"links: [sys3]" + "\n"
"text: 'Heading'" + "\n"
"level: 1.1.0" + "\n"
"normative: false"
),
)
item2 = MockItemAndVCS(
"path/to/req3.yml",
_file=("links: [sys3]\ntext: '" + ("Hello, world! " * 10) + "'\nlevel: 1.2"),
)
_mock_item = Mock()
_mock_item.uid = "sys3"
_mock_item.document.prefix = "sys"
item2.tree = Mock()
item2.tree.find_item = Mock(return_value=_mock_item)
_mock_item2 = Mock()
_mock_item2.uid = "tst1"
_mock_item2.document.prefix = "tst"
# pylint: disable=undefined-variable
item2.find_child_links = lambda: [MockDataMixIn._mock_item2.uid] # type: ignore
item2.find_child_items = lambda: [MockDataMixIn._mock_item2] # type: ignore
document = MagicMock(spec=["items"])
document.items = [
item,
item2,
MockItemAndVCS(
"path/to/req1.yml", _file="links: []\ntext: 'abc\n123'\nlevel: 1.1"
),
MockItemAndVCS("path/to/req2.yml", _file="links: []\ntext: ''\nlevel: 2"),
MockItemAndVCS(
"path/to/req4.yml",
_file="links: []\nref: 'CHECK_PUBLISHED_CONTENT'\n" "level: 2.1.1",
),
MockItemAndVCS(
"path/to/req2.yml",
_file="links: [sys1]\ntext: 'Heading 2'\nlevel: 2.1.0\n" "normative: false",
),
]
document.copy_assets = Mock()
document.assets = None
document.template = None
item3 = MockItem(
None,
"path/to/req4.yml",
_file=(
"links: [sys4]" + "\n"
"text: 'This shall...'" + "\n"
"ref: Doorstop.sublime-project" + "\n"
"level: 1.2" + "\n"
"normative: true"
),
)
_mock_item3 = Mock()
_mock_item3.uid = "sys4"
_mock_item3.document.prefix = "sys"
item3.tree = Mock()
item3.tree.find_item = Mock(return_value=_mock_item3)
item3.tree.vcs.paths = [
(
"Doorstop.sublime-project",
"Doorstop.sublime-project",
"Doorstop.sublime-project",
)
]
item4 = MockItemAndVCS(
"path/to/req3.yml",
_file=(
"links: [sys3]" + "\n"
"text: 'Heading'" + "\n"
"long: " + ('"' + "0" * 66 + '"') + "\n"
"level: 1.1.0" + "\n"
"normative: false"
),
)
item5 = MockItemAndVCS(
"path/to/req3.yml",
_file=(
"links: [sys3]" + "\n"
"text: 'Heading'" + "\n"
"level: 2.1.2" + "\n"
"normative: false" + "\n"
"ref: 'abc123'"
),
)
item6 = MockItemAndVCS(
"path/to/req3.yml",
_file=(
"links: [sys3]" + "\n"
"text: 'Heading'" + "\n"
"level: 2.1.2" + "\n"
"normative: false" + "\n"
"references:" + "\n"
" - path: abc1" + "\n"
" type: file" + "\n"
" - path: abc2" + "\n"
" type: file" + "\n"
),
)
| doorstop-dev/doorstop | doorstop/core/tests/__init__.py | __init__.py | py | 7,484 | python | en | code | 424 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line... |
18317520839 | # ----------------------------------------------------------------------------#
# Imports
# ----------------------------------------------------------------------------#
import random
from flask import Flask, abort, jsonify, request
from flask_cors import CORS
from models import setup_db, Category, Question
# ----------------------------------------------------------------------------#
# App Config.
# ----------------------------------------------------------------------------#
app = Flask(__name__)
setup_db(app)
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
# ----------------------------------------------------------------------------#
# PAGINATION LOGIC
# ----------------------------------------------------------------------------#
QUES_PER_PAGE = 10
def paginate_questions(request, selection):
page = request.args.get('page', 1, type=int)
start = (page - 1) * QUES_PER_PAGE
end = start + QUES_PER_PAGE
questions = [question.format() for question in selection]
current_questions = questions[start:end]
return current_questions
# ----------------------------------------------------------------------------#
# AFTER REQUEST
# ----------------------------------------------------------------------------#
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization,true')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
return response
# ----------------------------------------------------------------------------#
# GET CATEGORIES ENDPOINT
# ----------------------------------------------------------------------------#
@app.route('/categories', methods=['GET'])
def retrieve_categories():
categories = Category.query.order_by(Category.type).all()
if len(categories) == 0:
abort(404)
else:
categories_dict = {}
for category in categories:
categories_dict[category.id] = category.type
return jsonify({
'success': True,
'categories': categories_dict
})
# ----------------------------------------------------------------------------#
# GET QUESTIONS ENDPOINT
# ----------------------------------------------------------------------------#
@app.route('/questions')
def get_questions():
all_questions = Question.query.order_by(Question.id).all()
total_questions = len(all_questions)
pagenated_questions = paginate_questions(request, all_questions)
if (len(pagenated_questions) == 0):
abort(404)
try:
categories = Category.query.all()
categoriesDict = {}
for category in categories:
categoriesDict[category.id] = category.type
return jsonify({
'success': True,
'questions': pagenated_questions,
'total_questions': total_questions,
'categories': categoriesDict
})
except Exception as e:
print(e)
abort(400)
# ----------------------------------------------------------------------------#
# DELETE QUESTIONS ENDPOINT
# ----------------------------------------------------------------------------#
@app.route('/questions/<int:id>', methods=['DELETE'])
def delete_questions(id):
try:
question_to_be_deleted = Question.query.filter_by(id=id).one_or_none()
if question_to_be_deleted is None:
abort(404)
else:
question_to_be_deleted.delete()
return jsonify({
'success': True,
'deleted': str(id)
})
except Exception as e:
print(e)
abort(400)
# ----------------------------------------------------------------------------#
# POST QUESTIONS ENDPOINT
# ----------------------------------------------------------------------------#
@app.route("/questions", methods=['POST'])
def add_question():
body = request.get_json()
if body is None:
abort(400)
new_question = body.get('question', None)
new_answer = body.get('answer', None)
new_category = body.get('category', None)
new_difficulty = body.get('difficulty', None)
if new_question is None or new_answer is None or new_category is None or new_difficulty is None:
abort(400)
else:
try:
added_question = Question(question=new_question, answer=new_answer, category=new_category,
difficulty=new_difficulty)
added_question.insert()
all_questions = Question.query.order_by(Question.id).all()
current_questions = paginate_questions(request, all_questions)
return jsonify({
'success': True,
'created': added_question.id,
'questions': current_questions,
'total_questions': len(all_questions)
})
except Exception as e:
print(e)
abort(422)
# ----------------------------------------------------------------------------#
# SEARCH QUESTIONS ENDPOINT
# ----------------------------------------------------------------------------#
@app.route("/questions/search", methods=['POST'])
def search_question():
body = request.get_json()
search_ques = body.get('searchTerm', None)
if search_ques:
searched_question = Question.query.filter(Question.question.ilike(f'%{search_ques}%')).all()
return jsonify({
'success': True,
'questions': [question.format() for question in searched_question],
'total_questions': len(searched_question),
'current_category': None
})
else:
abort(404)
# ----------------------------------------------------------------------------#
# GET QUESTIONS BY CATEGORY ENDPOINT
# ----------------------------------------------------------------------------#
@app.route("/categories/<int:id>/questions")
def questions_by_category(id):
searched_category = Category.query.filter_by(id=id).one_or_none()
if searched_category:
questions_in_category = Question.query.filter_by(category=str(id)).all()
current_questions = paginate_questions(request, questions_in_category)
return jsonify({
'success': True,
'questions': current_questions,
'total_questions': len(questions_in_category),
'current_category': searched_category.type
})
else:
abort(404)
# ----------------------------------------------------------------------------#
# POST QUIZ ENDPOINT
# ----------------------------------------------------------------------------#
@app.route('/quizzes', methods=['POST'])
def get_quiz():
body = request.get_json()
quiz_category = body.get('quiz_category')
previous_question = body.get('previous_questions')
if quiz_category is None:
abort(422)
try:
if (quiz_category['id'] == 0):
# To handle all categories
questions_query = Question.query.all()
else:
questions_query = Question.query.filter_by(category=quiz_category['id']).all()
random_ques_index = random.randint(0, len(questions_query) - 1)
next_question = questions_query[random_ques_index]
while next_question.id not in previous_question:
next_question = questions_query[random_ques_index]
return jsonify({
'success': True,
'question': {
"id": next_question.id,
"question": next_question.question,
"answer": next_question.answer,
"difficulty": next_question.difficulty,
"category": next_question.category
},
'previousQuestion': previous_question
})
except Exception as e:
print(e)
abort(404)
# ----------------------------------------------------------------------------#
# ERROR HANDLERS FOR HTTP CODES
# ----------------------------------------------------------------------------#
@app.errorhandler(404)
def not_found(error):
return jsonify({
"success": False,
"error": 404,
"message": "Resource not found"
}), 404
@app.errorhandler(422)
def unprocessable_entity(error):
return jsonify({
"success": False,
"error": 422,
"message": "Unprocessable entity"
}), 422
@app.errorhandler(400)
def bad_request(error):
return jsonify({
"success": False,
"error": 400,
"message": "Bad request"
}), 400
# ----------------------------------------------------------------------------#
# Launch.
# ----------------------------------------------------------------------------#
# Default port:
if __name__ == '__main__':
app.run()
def create_app():
return app
| RaghavGoel13/trivia-solution | backend/flaskr/app.py | app.py | py | 8,877 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "models.setup_db",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
... |
37750672638 | # coding=utf-8
from global_test_case import GlobalTestCase as TestCase
from ..models import Message, WriteItInstance, \
Moderation, Confirmation, OutboundMessage
from popit.models import Person
from django.core import mail
from subdomains.utils import reverse
import datetime
from mock import patch
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from django.conf import settings
from django.test.utils import override_settings
from django.utils.unittest import skip
class ModerationMessagesTestCase(TestCase):
def setUp(self):
super(ModerationMessagesTestCase, self).setUp()
self.writeitinstance1 = WriteItInstance.objects.get(id=1)
self.person1 = Person.objects.get(id=1)
self.private_message = Message.objects.create(
content='Content 1',
author_name='Felipe',
author_email="falvarez@votainteligente.cl",
subject='Subject 1',
public=False,
writeitinstance=self.writeitinstance1,
persons=[self.person1],
)
self.confirmation = Confirmation.objects.create(message=self.private_message)
self.owner = self.writeitinstance1.owner
self.owner.set_password('feroz')
self.owner.save()
def test_private_messages_confirmation_created_move_from_new_to_needs_moderation(self):
moderation, created = Moderation.objects.get_or_create(message=self.private_message)
self.private_message.recently_confirmated()
outbound_message_to_pedro = OutboundMessage.objects.get(message=self.private_message)
self.assertEquals(outbound_message_to_pedro.status, 'needmodera')
def test_private_message_is_not_accesible(self):
self.confirmation.confirmated_at = datetime.datetime.now()
self.confirmation.save()
self.private_message.confirmated = True
self.private_message.save()
url = self.private_message.get_absolute_url()
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
def test_outbound_messages_of_a_confirmed_message_are_waiting_for_moderation(self):
# I need to do a get to the confirmation url
moderation, created = Moderation.objects.get_or_create(message=self.private_message)
url = reverse(
'confirm',
subdomain=self.private_message.writeitinstance.slug,
kwargs={
'slug': self.confirmation.key
},
)
self.client.get(url)
# this works proven somewhere else
outbound_message_to_pedro = OutboundMessage.objects.get(message=self.private_message)
self.assertEquals(outbound_message_to_pedro.status, 'needmodera')
def test_message_send_moderation_message(self):
# Let's have some longer message content so we can keep an eye on the text wrapping.
self.private_message.content = u'''A gaf fi dynnu sylw'r Prif Weinidog at y sefyllfa yn Ysbyty Penrhos Stanley yng Nghaergybi, lle mae un o'r ddwy ward wedi bod ar gau ers dros bythefnos erbyn hyn, oherwydd absenoldeb staff a diffyg staff wrth gefn, ac ni fydd y ward yn agor am bythefnos arall, tan 13 Ebrill—bron i dair wythnos a dweud y gwir?
A gaf i dynnu sylw'r Prif Weinidog hefyd at y sefyllfa yn Ysbyty Gwynedd yn ddiweddar, lle cadwyd etholwr i mi mewn ystafell storio dros nos wrth wella ar ôl llawdriniaeth, â’i declyn drip yn hongian oddi ar beg ar y wal, ac y rhoddwyd cloch bres Fictoraidd iddo i dynnu sylw’r nyrs. Mae'r nyrs yn gwneud gwaith gwych dan amgylchiadau anodd. A yw hynny'n swnio i'r Prif Weinidog fel GIG sydd ag adnoddau da ac yn cael ei reoli'n dda? Er bod gwleidyddiaeth cyni cyllidol yn gyfrifol am lawer o'r diffyg adnoddau, nid yw'n esgusodi camreolaeth y GIG gan Lywodraeth Cymru.'''
self.private_message.save()
moderation, created = Moderation.objects.get_or_create(message=self.private_message)
self.private_message.send_moderation_mail()
self.assertEquals(len(mail.outbox), 2)
moderation_mail = mail.outbox[1]
self.assertModerationMailSent(self.private_message, moderation_mail)
expected_from_email = self.private_message.writeitinstance.slug + "@" + settings.DEFAULT_FROM_DOMAIN
self.assertEquals(moderation_mail.from_email, expected_from_email)
def test_send_moderation_message_from_custom_connection(self):
'''If given a custom smtp config for its instance then
it sends the moderation mail with this custom config '''
config = self.private_message.writeitinstance.config
config.custom_from_domain = "custom.domain.cl"
config.email_host = 'cuttlefish.au.org'
config.email_host_password = 'f13r4'
config.email_host_user = 'fiera'
config.email_port = 25
config.email_use_tls = True
config.save()
moderation, created = Moderation.objects.get_or_create(message=self.private_message)
self.private_message.send_moderation_mail()
self.assertEquals(len(mail.outbox), 2)
moderation_mail = mail.outbox[1]
self.assertModerationMailSent(self.private_message, moderation_mail)
expected_from_email = self.private_message.writeitinstance.slug + "@" + config.custom_from_domain
self.assertEquals(moderation_mail.from_email, expected_from_email)
connection = moderation_mail.connection
self.assertEquals(connection.host, config.email_host)
self.assertEquals(connection.password, config.email_host_password)
self.assertEquals(connection.username, config.email_host_user)
self.assertEquals(connection.port, config.email_port)
self.assertEquals(connection.use_tls, config.email_use_tls)
def test_not_using_any_custom_config(self):
'''If not using any custom config the moderation
mail does not use that connection'''
moderation, created = Moderation.objects.get_or_create(message=self.private_message)
self.private_message.send_moderation_mail()
self.assertEquals(len(mail.outbox), 2)
moderation_mail = mail.outbox[1]
connection = moderation_mail.connection
self.assertFalse(hasattr(connection, 'host'))
self.assertFalse(hasattr(connection, 'password'))
self.assertFalse(hasattr(connection, 'username'))
self.assertFalse(hasattr(connection, 'port'))
self.assertFalse(hasattr(connection, 'use_tls'))
@override_settings(SEND_ALL_EMAILS_FROM_DEFAULT_FROM_EMAIL=True)
def test_moderation_sent_from_default_from_email(self):
'''Moderation is sent from default from email if specified'''
moderation, created = Moderation.objects.get_or_create(message=self.private_message)
self.private_message.send_moderation_mail()
moderation_mail = mail.outbox[1]
expected_from_email = settings.DEFAULT_FROM_EMAIL
self.assertEquals(moderation_mail.from_email, expected_from_email)
def test_create_a_moderation(self):
#I make sure that uuid.uuid1 is called and I get a sort of random key
with patch('uuid.uuid1') as string:
string.return_value.hex = 'oliwi'
message = Message.objects.create(
content='Content 1',
author_name='Felipe',
author_email="falvarez@votainteligente.cl",
subject='Fiera es una perra feroz',
public=False,
writeitinstance=self.writeitinstance1,
persons=[self.person1],
)
self.assertFalse(message.moderation is None)
self.assertEquals(message.moderation.key, 'oliwi')
string.assert_called()
# issue 114 found at https://github.com/ciudadanointeligente/write-it/issues/114
def test_send_mails_only_once(self):
with patch('nuntium.models.Message.send_moderation_mail') as send_moderation_mail:
self.writeitinstance1.config.moderation_needed_in_all_messages = True
self.writeitinstance1.config.save()
send_moderation_mail.return_value = None
message = Message.objects.create(
content='Content 1',
author_name='Felipe',
author_email="falvarez@votainteligente.cl",
subject='Fiera es una perra feroz',
public=False,
writeitinstance=self.writeitinstance1,
persons=[self.person1],
)
message.recently_confirmated()
# number_of_moderations = Moderation.objects.filter(message=message).count()
send_moderation_mail.assert_called_once_with()
def test_message_has_a_method_for_moderate(self):
self.confirmation.confirmated_at = datetime.datetime.now()
self.confirmation.save()
self.private_message.confirmated = True
self.private_message.save()
self.private_message.moderate()
outbound_message_to_pedro = OutboundMessage.objects.get(message=self.private_message)
self.assertTrue(self.private_message.moderated)
self.assertEquals(outbound_message_to_pedro.status, 'ready')
def test_message_that_has_not_been_confirmed_cannot_be_moderated(self):
# this message has not been confirmed
# and is private therefore requires moderation
message = Message.objects.create(
content='Content 1',
author_name='Felipe',
author_email="falvarez@votainteligente.cl",
subject='Fiera es una perra feroz',
public=False,
writeitinstance=self.writeitinstance1,
persons=[self.person1],
)
with self.assertRaises(ValidationError):
# this was taken from here
# http://stackoverflow.com/questions/8215653/using-a-context-manager-with-python-assertraises#8215739
try:
message.moderate()
except ValidationError as e:
self.assertEqual(e.message,
_('The message needs to be confirmated first',))
raise
self.assertFalse(message.moderated)
outbound_message_to_pedro = OutboundMessage.objects.get(message=message)
self.assertEquals(outbound_message_to_pedro.status, 'new')
def test_there_is_a_moderation_url_that_sets_the_message_to_ready(self):
self.client.login(username=self.owner.username, password='feroz')
self.confirmation.confirmated_at = datetime.datetime.now()
self.confirmation.save()
self.private_message.confirmated = True
self.private_message.save()
url = reverse('moderation_accept',
subdomain=self.private_message.writeitinstance.slug,
kwargs={
'slug': self.private_message.moderation.key
})
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'nuntium/moderation_accepted.html')
#private_message = Message.objects.get(id=self.private_message.id)
outbound_message_to_pedro = OutboundMessage.objects.get(message=self.private_message.id)
self.assertEquals(outbound_message_to_pedro.status, 'ready')
private_message = Message.objects.get(id=self.private_message.id)
self.assertTrue(private_message.moderated)
def test_moderation_get_success_url(self):
expected_url = reverse('moderation_accept',
self.private_message.writeitinstance.slug,
kwargs={
'slug': self.private_message.moderation.key
})
self.assertEquals(self.private_message.moderation.get_success_url(), expected_url)
def test_moderation_get_reject_url(self):
expected_url = reverse('moderation_rejected',
subdomain=self.private_message.writeitinstance.slug,
kwargs={
'slug': self.private_message.moderation.key
})
self.assertEquals(self.private_message.moderation.get_reject_url(), expected_url)
def test_there_is_a_reject_moderation_url_that_hides_the_message(self):
'''
This is the case when you proud owner of a writeitInstance
think that the private message should not go anywhere
and it should be hidden
'''
self.client.login(username=self.owner.username, password='feroz')
# Ok I'm going to make the message public
public_message = self.private_message
public_message.public = True
public_message.save()
url = reverse(
'moderation_rejected',
subdomain=public_message.writeitinstance.slug,
kwargs={
'slug': public_message.moderation.key
})
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'nuntium/moderation_rejected.html')
# If someone knows how to do the DoesNotExist or where to extend from
# I could do a self.assertRaises but I'm not taking any more time in this
message = Message.objects.get(id=public_message.id)
self.assertFalse(message.public)
self.assertTrue(message.moderated)
def test_when_moderation_needed_a_mail_for_its_owner_is_sent(self):
self.private_message.recently_confirmated()
# There should be two
# One is created for confirmation
# The other one is created for the moderation thing
self.assertEquals(len(mail.outbox), 2)
moderation_mail = mail.outbox[1]
# it is sent to the owner of the instance
self.assertEquals(moderation_mail.to[0], self.private_message.writeitinstance.owner.email)
self.assertTrue(self.private_message.content in moderation_mail.body)
self.assertTrue(self.private_message.subject in moderation_mail.body)
self.assertTrue(self.private_message.author_name in moderation_mail.body)
self.assertTrue(self.private_message.author_email in moderation_mail.body)
url_rejected = (reverse('moderation_rejected',
subdomain=self.private_message.writeitinstance.slug,
kwargs={'slug': self.private_message.moderation.key})
)
url_accept = (reverse('moderation_accept',
subdomain=self.private_message.writeitinstance.slug,
kwargs={'slug': self.private_message.moderation.key})
)
self.assertIn(url_rejected, moderation_mail.body)
self.assertIn(url_accept, moderation_mail.body)
def test_creates_automatically_a_moderation_when_a_private_message_is_created(self):
message = Message.objects.create(
content='Content 1',
author_name='Felipe',
author_email="falvarez@votainteligente.cl",
subject='Fiera es una perra feroz',
public=False,
writeitinstance=self.writeitinstance1,
persons=[self.person1],
)
self.assertFalse(message.moderation is None)
def test_a_moderation_does_not_change_its_key_on_save(self):
'''
I found that everytime I did resave a moderation
it key was regenerated
'''
previous_key = self.private_message.moderation.key
self.private_message.moderation.save()
moderation = Moderation.objects.get(message=self.private_message)
post_key = moderation.key
self.assertEquals(previous_key, post_key)
def test_moderates_method(self):
moderation = Moderation.objects.get(message=self.private_message)
moderation.success()
message = Message.objects.get(moderation=moderation)
self.assertTrue(message.moderated)
# this test is for the issue https://github.com/ciudadanointeligente/write-it/issues/186
@skip('Message creation is no longer in the instance detail view')
def test_confirmated_but_not_moderated_message_in_a_moderable_instance_is_in_needs_moderation_status(self):
self.writeitinstance1.config.moderation_needed_in_all_messages = True
self.writeitinstance1.config.save()
data = {
'author_email': u'falvarez@votainteligente.cl',
'author_name': u'feli',
'public': True,
'subject': u'Fiera no está',
'content': u'¿Dónde está Fiera Feroz? en la playa?',
'persons': [self.person1.id],
}
url = self.writeitinstance1.get_absolute_url()
self.client.post(url, data, follow=True)
message = Message.objects.get(
author_name="feli",
author_email="falvarez@votainteligente.cl",
subject="Fiera no está",
content='¿Dónde está Fiera Feroz? en la playa?')
confirmation = Confirmation.objects.get(message=message)
self.client.get(confirmation.get_absolute_url())
# one message to Pedro
outbound_message = OutboundMessage.objects.get(message=message)
# Here I have the bug!!!!!
self.assertEquals(outbound_message.status, 'needmodera')
# This one is the bug!!\
def test_non_authenticated_users_cant_accept_messages(self):
"""Moderation accept links require users to be logged in"""
self.confirmation.confirmated_at = datetime.datetime.now()
self.confirmation.save()
self.private_message.confirmated = True
self.private_message.save()
url = reverse('moderation_accept',
subdomain=self.private_message.writeitinstance.slug,
kwargs={
'slug': self.private_message.moderation.key
})
response = self.client.get(url)
self.assertEquals(response.status_code, 302)
outbound_message_to_pedro = OutboundMessage.objects.get(message=self.private_message.id)
self.assertEquals(outbound_message_to_pedro.status, 'new')
private_message = Message.objects.get(id=self.private_message.id)
self.assertFalse(private_message.moderated)
def test_non_authenticated_users_cant_reject_messages(self):
"""Moderation reject links require users to be logged in"""
self.confirmation.confirmated_at = datetime.datetime.now()
self.confirmation.save()
self.private_message.confirmated = True
self.private_message.save()
url = reverse('moderation_rejected',
subdomain=self.private_message.writeitinstance.slug,
kwargs={
'slug': self.private_message.moderation.key
})
response = self.client.get(url)
self.assertEquals(response.status_code, 302)
outbound_message_to_pedro = OutboundMessage.objects.get(message=self.private_message.id)
self.assertEquals(outbound_message_to_pedro.status, 'new')
private_message = Message.objects.get(id=self.private_message.id)
self.assertFalse(private_message.moderated)
| ciudadanointeligente/write-it | nuntium/tests/moderation_messages_test.py | moderation_messages_test.py | py | 19,167 | python | en | code | 38 | github-code | 36 | [
{
"api_name": "global_test_case.GlobalTestCase",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "models.WriteItInstance.objects.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "models.WriteItInstance.objects",
"line_number": 20,
"usage_type": "att... |
6790532061 | from django.db import models
from django.conf import settings
from ..querysets.resource import ResourceQuerySet
class ResourceManager(models.Manager):
queryset_class = ResourceQuerySet
def get_queryset(self):
return self.queryset_class(self.model, using=self._db)
def filter_by_project(self, project):
return self.get_queryset().filter_by_project(project)
def filter_by_assignment(self, assignment):
return self.get_queryset().filter_by_object(assignment)
def create_resource(
self, user_from, root_name, name, extension, content_type, *args, **kwargs
):
new_node = self.create(
created_by=user_from,
name=name,
extension=extension,
mimetype=content_type,
_filename=root_name,
link=kwargs.get('link'),
description=kwargs.get('description'),
file_size=kwargs.get('file_size', None),
)
return new_node
def create_user_resource(self, user_from, team, related, **kwargs):
new_resource = self.model(
created_by=user_from,
**kwargs
)
new_resource.save(tag_original=settings.FILES_USER_TAG)
related.add_user_resource(user_from, team, new_resource)
return new_resource
| tomasgarzon/exo-services | service-exo-core/files/managers/resource.py | resource.py | py | 1,318 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Manager",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "querysets.resource.ResourceQuerySet",
"line_number": 8,
"usage_type": "name"
},
{
"a... |
20115420162 | import logging
import concurrent.futures
import pandas as pd
import random
import time
import requests
import os
import sys
from datetime import datetime
from utils.vars import *
from utils.common import *
from requests.structures import CaseInsensitiveDict
# Output the logs to the stdout
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__file__)
class AzureAuthScaleAutomation:
def __init__(self,
microsoft_groups_token: str,
microsoft_create_apps_token: str,
windows_consent_app_token: str,
redirect_urls: list):
self.microsoft_groups_headers = CaseInsensitiveDict()
self.microsoft_groups_headers["Accept"] = "application/json"
self.microsoft_groups_headers["Content-Type"] = "application/json"
self.microsoft_groups_headers["Authorization"] = microsoft_groups_token
self.microsoft_create_apps_headers = CaseInsensitiveDict()
self.microsoft_create_apps_headers["Accept"] = "application/json"
self.microsoft_create_apps_headers["Content-Type"] = "application/json"
self.microsoft_create_apps_headers["Authorization"] = microsoft_create_apps_token
self.windows_consent_app_headers = CaseInsensitiveDict()
self.windows_consent_app_headers["Accept"] = "application/json"
self.windows_consent_app_headers["Content-Type"] = "application/json"
self.windows_consent_app_headers["Authorization"] = windows_consent_app_token
self.redirect_urls = redirect_urls
self.groups_size = None
self.groups_df = None
def __create_group_scale(self, scale_group_index: int):
"""
Creates an azure group
This function simply creates a group in an azure directory
Parameters
----------
scale_group_index : int
This is a just a number to add at the end of the group name
"""
index_str = str(scale_group_index)
json_body = {
"displayName": f"scale_group{index_str}",
"mailEnabled": True,
"securityEnabled": True,
"groupTypes": [
"Unified"
],
"description": f"scale_group{index_str}",
"mailNickname": f"scale_group{index_str}",
"visibility": "private"
}
requests.post(url=GROUPS_AZURE_LINK, headers=self.microsoft_groups_headers, json=json_body)
def create_group_scale_threading(self, start_index: int, number_of_groups: int):
"""
Creates multiple azure groups
This function creates azure groups using multi-threading approach
Parameters
----------
start_index : int
The sequential number that will be the suffix of the credential name
number_of_groups : int
The number of accounts that will be created
"""
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(self.__create_group_scale, range(start_index, start_index + number_of_groups))
def create_azure_app_registration(self, app_name: str):
"""
Creates a web app in azure
This function simply creates a web app in azure with a specific name which is requested as a parameter
Parameters
----------
app_name : str
Returns
-------
Response
"""
create_app_body = {
"displayName": app_name,
"spa": {
"redirectUris": []
},
"publicClient": {
"redirectUris": []
},
"web": {
"redirectUris": []
},
"signInAudience": "AzureADMyOrg",
"requiredResourceAccess": [
{
"resourceAppId": "00000003-0000-0000-c000-000000000000",
"resourceAccess": [
{
"id": "e1fe6dd8-ba31-4d61-89e7-88639da4683d",
"type": "Scope"
}
]
}
]
}
return requests.post(APPS_AZURE_LINK, headers=self.microsoft_create_apps_headers, json=create_app_body)
def __assign_groups_to_members(self, user_id):
"""
Assigns 1 to 6 random groups
This function is called from assign_groups_to_members_threading, because it uses a multi-threading
approach to call this function. This function assigns randomly from 1 to 6 groups to this user.
Parameters
----------
user_id : str
The id of the user
"""
json_body = {
"requests": []
}
num_of_groups = random.randint(1, 6)
random_groups = random.sample(range(0, self.groups_size), num_of_groups)
for group_index in random_groups:
group_id = self.groups_df.iloc[group_index]
tmp_request = {
"id": f"member_{group_id}_{user_id}",
"method": "POST",
"url": f"/groups/{group_id}/members/$ref",
"headers": {
"Content-Type": "application/json"
},
"body": {
"@odata.id": f"https://graph.microsoft.com/beta/directoryObjects/{user_id}"
}
}
json_body["requests"].append(tmp_request)
requests.post(ASSIGN_GROUPS_AZURE_LINK, headers=self.microsoft_groups_headers, json=json_body)
def assign_groups_to_members_threading(self, users_csv_path: str, groups_csv_path: str):
"""
Assigns randomly assigns 1 to 6 groups to users in azure
This function loads a csv with users info and groups info, then selects 1000 to 1000 users
to call a function that assigns a user with groups. After that, it needs an sleep time because
azure rejects many requests at the same, maybe it's something about DDOS protection. Be aware that the
execution may be interrupted or something, that's because you selected too many users. If that's
the case, then you should wait the program to end and watch manually who was the last users with
groups assigned.
Parameters
----------
users_csv_path : str
The path for the csv of the users you want to assign to groups
groups_csv_path : str
The path for the csv file of the groups you want to be assign
"""
users_df = pd.read_csv(users_csv_path)
groups_df = pd.read_csv(groups_csv_path)
self.groups_df = groups_df["id"]
users_df = users_df["id"]
users_size = users_df.size
self.groups_size = self.groups_df.size
start_index = end_index = 0
end_index = clamp(end_index+1000, 0, users_size-1)
while start_index < users_size:
tmp_users_df = users_df.loc[start_index:end_index]
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(self.__assign_groups_to_members, tmp_users_df)
logger.info(f"Execution finished from {start_index} to {end_index}, waiting to run 1000 more...")
time.sleep(130)
start_index = end_index+1
end_index = clamp(end_index+1000, 0, users_size-1)
logger.info(f"start index is: {start_index} and last index is: {end_index}")
def grant_read_permissions(self, app_object_id: str):
"""
Grants the basic permissions to an app
This function grants Read permissions to User, Directory and Group to the app. This only receives
the app_object_id of the app to work.
Parameters
----------
app_object_id : str
"""
grant_read_permissions = {
"requiredResourceAccess": [
{
"resourceAppId": "00000003-0000-0000-c000-000000000000",
"resourceAccess": [
{
"id": "e1fe6dd8-ba31-4d61-89e7-88639da4683d",
"type": "Scope"
},
{
"id": "df021288-bdef-4463-88db-98f22de89214",
"type": "Role"
},
{
"id": "5b567255-7703-4780-807c-7be8301ae99b",
"type": "Role"
},
{
"id": "7ab1d382-f21e-4acd-a863-ba3e13f7da61",
"type": "Role"
}
]
}
]
}
requests.patch(f"{APPS_AZURE_LINK}/{app_object_id}",
headers=self.microsoft_create_apps_headers, json=grant_read_permissions)
def consent_admin_permissions(self, app_client_id: str):
"""
Consents the permissions of an app
This function gives admin consent of all the permissions in the app. By default, the app
has only Read permissions.
Parameters
----------
app_client_id: str
"""
admin_consent_body = {
"clientAppId": f"{app_client_id}",
"onBehalfOfAll": True,
"checkOnly": False,
"tags": [],
"constrainToRra": True,
"dynamicPermissions": [
{
"appIdentifier": "00000003-0000-0000-c000-000000000000",
"appRoles": [
"User.Read.All",
"Group.Read.All",
"Directory.Read.All"
],
"scopes": [
"User.Read"
]
}
]
}
# Gives the consent to the app permissions as an admin
requests.post(ADMIN_CONSENT_FOR_APP_URL, headers=self.windows_consent_app_headers,
json=admin_consent_body)
def __create_secret_client(self, app_object_id: str, years_to_expire: int = 3):
"""
Creates api permission credentials to the app
This function generates an api key and client secret to access the app. This function has to be
private because you need to catch the client secret that is only shown once.
Parameters
----------
app_object_id : str
years_to_expire : int
Output
------
json
"""
start_date_time = datetime.now()
end_date_time = start_date_time.replace(year=start_date_time.year + years_to_expire)
parsed_start_dt = get_datetime_to_ISO_format(start_date_time)
parsed_end_dt = get_datetime_to_ISO_format(end_date_time)
app_secret_client_body = {
"passwordCredential": {
"displayName": "test-description",
"endDateTime": f"{parsed_end_dt}",
"startDateTime": f"{parsed_start_dt}"
}
}
return requests.post(f"{APPS_AZURE_LINK}/{app_object_id}/addPassword",
headers=self.microsoft_create_apps_headers, json=app_secret_client_body)
def modify_redirect_urls_of_app(self, app_object_id: str):
"""
Updates the redirect_urls of a web app
This function sends a patch request to modify the redirect urls of a specific web app. It is important to
know that this functions fully updates the redirect urls, so make sure to initialize this class
with all urls you want your app to have.
Parameters
----------
app_object_id : str
"""
redirect_url_body = {
"spa": {
"redirectUris": []
},
"publicClient": {
"redirectUris": []
},
"web": {
"redirectUris": self.redirect_urls
}
}
requests.patch(f"{APPS_AZURE_LINK}/{app_object_id}", headers=self.microsoft_create_apps_headers,
json=redirect_url_body)
def modify_redirect_urls_of_app_threading(self, object_id_csv_path: str):
"""
Modifies multiple redirect urls from a csv
This function reads a csv containing the object ids of the azure apps, then executes
the funcion modify_redirect_urls_of_app using multi-threading
Parameters
----------
object_id_csv_path : str
The path of the parameter of the csv
"""
df = pd.read_csv(object_id_csv_path)
df = df['object_id']
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(self.modify_redirect_urls_of_app, df)
def create_azure_app_registrations_apis(self, tenant_id: str, start: int, number_of_apps: int):
"""
Registers, configures an azure app and returns a csv with API credentials
This function calls several functions to fully configure an azure app. The first one is for the
creation of the app, then another to grant User.Read.All, Directory.Read.All, Group.Read.All. Then,
you it calls a function that works from another API to give admin consent the previous permissions
of the app. Furthermore, creates an API key and saves the secret key. After that, adds the
redirect urls in the app and at last, creates two csv with the credentials of the app.
Parameters
----------
start : int
The start index for the app name that will be created
number_of_apps: int
The number of apps that will be created starting from the ``start`` index
tenant_id: str
The id of the azure active directory
Returns
-------
None
Doesn't return anything but creates two csv files
Examples
--------
>>> create_azure_app_registrations_apis("89f8652e-c99e-43a0-ab1f-9273081e5aaa", 10, 5)
>>> create_azure_app_registrations_apis("89f8652e-c99e-43a0-ab1f-9273081e5aaa", 5, 1)
"""
tenant_id_list = list()
client_id_list = list()
client_secret_list = list()
object_id_list = list()
for app_index in range(start, start + number_of_apps):
try:
create_request_response = self.create_azure_app_registration(f"aruba-cloudauth-cred-scale-{app_index}")
app_object_id = create_request_response.json()["id"]
app_client_id = create_request_response.json()["appId"]
self.grant_read_permissions(app_object_id)
self.consent_admin_permissions(app_client_id)
app_secret_client_request = self.__create_secret_client(app_object_id)
secret_client_content = app_secret_client_request.json()
client_secret = secret_client_content["secretText"]
self.modify_redirect_urls_of_app(app_object_id)
tenant_id_list.append(tenant_id)
client_id_list.append(app_client_id)
client_secret_list.append(client_secret)
object_id_list.append(app_object_id)
except Exception:
logger.error(Exception("The script didn't finished as expected! Saving the results in the csv"))
break
df = pd.DataFrame({'tenant_id': tenant_id_list,
'client_id': client_id_list,
'client_secret': client_secret_list})
does_file_exists = os.path.isfile(r"azure_app_credentials.csv")
df.to_csv(r"azure_app_credentials.csv", index=False, header=(not does_file_exists), mode='a')
df_object_id = pd.DataFrame({'object_id': object_id_list})
does_file_exists_object_id = os.path.isfile(r"azure_app_object_id.csv")
df_object_id.to_csv(r"azure_app_object_id.csv", index=False, header=(not does_file_exists_object_id), mode='a')
def __delete_active_application(self, app_object_id: str):
"""
Deletes permanently an application
This function deletes temporary an active azure application, then deletes it permanently from that
temporary directory.
Parameters
----------
app_object_id: str
The object id of the application
"""
requests.delete(f"{APPS_AZURE_LINK}/{app_object_id}", headers=self.microsoft_create_apps_headers)
requests.delete(f"{APP_AZURE_DELETE_PERMANENTLY_LINK}/{app_object_id}", headers=self.microsoft_create_apps_headers)
def delete_active_application_threading(self, object_id_csv_path):
"""
Deletes multiple applications
This function reads a csv with the object ids of the application that will be deleted, then deletes
each of them twice; the first one is temporary and the second one is permanently.
Parameters
----------
object_id_csv_path: csv
The csv containing one or more objects id for applications
"""
df = pd.read_csv(object_id_csv_path)
df = df['object_id']
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(self.__delete_active_application, df)
| jotozhun/azure-pre-scale | azure_auth_scale.py | azure_auth_scale.py | py | 17,441 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogg... |
37738454658 | __docformat__ = 'restructuredtext en'
from collections import OrderedDict
import six
from six import string_types
from geoid.util import isimplify
from geoid.civick import GVid
from geoid import parse_to_gvid
from dateutil import parser
from sqlalchemy import event
from sqlalchemy import Column as SAColumn, Integer, UniqueConstraint
from sqlalchemy import String, ForeignKey
from sqlalchemy.orm import relationship, object_session, backref
from ambry.identity import ObjectNumber, PartialPartitionName, PartitionIdentity
from ambry.orm.columnstat import ColumnStat
from ambry.orm.dataset import Dataset
from ambry.util import Constant
import logging
from ambry.util import get_logger
logger = get_logger(__name__)
# logger.setLevel(logging.DEBUG)
from . import Base, MutationDict, MutationList, JSONEncodedObj, BigIntegerType
class PartitionDisplay(object):
"""Helper object to select what to display for titles and descriptions"""
def __init__(self, p):
self._p = p
desc_used = False
self.title = self._p.title
self.description = ''
if not self.title:
self.title = self._p.table.description
desc_used = True
if not self.title:
self.title = self._p.vname
if not desc_used:
self.description = self._p.description.strip('.') + '.' if self._p.description else ''
self.notes = self._p.notes
@property
def geo_description(self):
"""Return a description of the geographic extents, using the largest scale
space and grain coverages"""
sc = self._p.space_coverage
gc = self._p.grain_coverage
if sc and gc:
if parse_to_gvid(gc[0]).level == 'state' and parse_to_gvid(sc[0]).level == 'state':
return parse_to_gvid(sc[0]).geo_name
else:
return ("{} in {}".format(
parse_to_gvid(gc[0]).level_plural.title(),
parse_to_gvid(sc[0]).geo_name))
elif sc:
return parse_to_gvid(sc[0]).geo_name.title()
elif sc:
return parse_to_gvid(gc[0]).level_plural.title()
else:
return ''
@property
def time_description(self):
"""String description of the year or year range"""
tc = [t for t in self._p.time_coverage if t]
if not tc:
return ''
mn = min(tc)
mx = max(tc)
if not mn and not mx:
return ''
elif mn == mx:
return mn
else:
return "{} to {}".format(mn, mx)
@property
def sub_description(self):
"""Time and space dscription"""
gd = self.geo_description
td = self.time_description
if gd and td:
return '{}, {}. {} Rows.'.format(gd, td, self._p.count)
elif gd:
return '{}. {} Rows.'.format(gd, self._p.count)
elif td:
return '{}. {} Rows.'.format(td, self._p.count)
else:
return '{} Rows.'.format(self._p.count)
class Partition(Base):
__tablename__ = 'partitions'
STATES = Constant()
STATES.SYNCED = 'synced'
STATES.CLEANING = 'cleaning'
STATES.CLEANED = 'cleaned'
STATES.PREPARING = 'preparing'
STATES.PREPARED = 'prepared'
STATES.BUILDING = 'building'
STATES.BUILT = 'built'
STATES.COALESCING = 'coalescing'
STATES.COALESCED = 'coalesced'
STATES.ERROR = 'error'
STATES.FINALIZING = 'finalizing'
STATES.FINALIZED = 'finalized'
STATES.INSTALLING = 'installing'
STATES.INSTALLED = 'installed'
TYPE = Constant
TYPE.SEGMENT = 's'
TYPE.UNION = 'u'
sequence_id = SAColumn('p_sequence_id', Integer)
vid = SAColumn('p_vid', String(16), primary_key=True, nullable=False)
id = SAColumn('p_id', String(13), nullable=False)
d_vid = SAColumn('p_d_vid', String(13), ForeignKey('datasets.d_vid'), nullable=False, index=True)
t_vid = SAColumn('p_t_vid', String(15), ForeignKey('tables.t_vid'), nullable=False, index=True)
name = SAColumn('p_name', String(200), nullable=False, index=True)
vname = SAColumn('p_vname', String(200), unique=True, nullable=False, index=True)
fqname = SAColumn('p_fqname', String(200), unique=True, nullable=False, index=True)
title = SAColumn('p_title', String())
description = SAColumn('p_description', String())
notes = SAColumn('p_notes', String())
cache_key = SAColumn('p_cache_key', String(200), unique=True, nullable=False, index=True)
parent_vid = SAColumn('p_p_vid', String(16), ForeignKey('partitions.p_vid'), nullable=True, index=True)
ref = SAColumn('p_ref', String(16), index=True,
doc='VID reference to an eariler version to use instead of this one.')
type = SAColumn('p_type', String(20), default=TYPE.UNION,
doc='u - normal partition, s - segment')
table_name = SAColumn('p_table_name', String(50))
time = SAColumn('p_time', String(20)) # FIXME: add helptext
space = SAColumn('p_space', String(50))
grain = SAColumn('p_grain', String(50))
variant = SAColumn('p_variant', String(50))
format = SAColumn('p_format', String(50))
segment = SAColumn('p_segment', Integer,
doc='Part of a larger partition. segment_id is usually also a source ds_id')
epsg = SAColumn('p_epsg', Integer, doc='EPSG SRID for the reference system of a geographic dataset. ')
# The partition could hold data that is considered a dimension -- if multiple datasets
# were joined, that dimension would be a dimension column, but it only has a single
# value in each partition.
# That could be part of the name, or it could be declared in a table, with a single value for all of the
# rows in a partition.
min_id = SAColumn('p_min_id', BigIntegerType)
max_id = SAColumn('p_max_id', BigIntegerType)
count = SAColumn('p_count', Integer)
state = SAColumn('p_state', String(50))
data = SAColumn('p_data', MutationDict.as_mutable(JSONEncodedObj))
space_coverage = SAColumn('p_scov', MutationList.as_mutable(JSONEncodedObj))
time_coverage = SAColumn('p_tcov', MutationList.as_mutable(JSONEncodedObj))
grain_coverage = SAColumn('p_gcov', MutationList.as_mutable(JSONEncodedObj))
installed = SAColumn('p_installed', String(100))
_location = SAColumn('p_location', String(100)) # Location of the data file
__table_args__ = (
# ForeignKeyConstraint( [d_vid, d_location], ['datasets.d_vid','datasets.d_location']),
UniqueConstraint('p_sequence_id', 'p_d_vid', name='_uc_partitions_1'),
)
# For the primary table for the partition. There is one per partition, but a table
# can be primary in multiple partitions.
table = relationship('Table', backref='partitions', foreign_keys='Partition.t_vid')
stats = relationship(ColumnStat, backref='partition', cascade='all, delete, delete-orphan')
children = relationship('Partition', backref=backref('parent', remote_side=[vid]), cascade='all')
_bundle = None # Set when returned from a bundle.
_datafile = None # TODO: Unused variable.
_datafile_writer = None # TODO: Unused variable.
_stats_dict = None
@property
def identity(self):
"""Return this partition information as a PartitionId."""
if self.dataset is None:
# The relationship will be null until the object is committed
s = object_session(self)
ds = s.query(Dataset).filter(Dataset.id_ == self.d_id).one()
else:
ds = self.dataset
d = {
'id': self.id,
'vid': self.vid,
'name': self.name,
'vname': self.vname,
'ref': self.ref,
'space': self.space,
'time': self.time,
'table': self.table_name,
'grain': self.grain,
'variant': self.variant,
'segment': self.segment,
'format': self.format if self.format else 'db'
}
return PartitionIdentity.from_dict(dict(list(ds.dict.items()) + list(d.items())))
@property
def display(self):
"""Return an acessor object to get display titles and descriptions"""
return PartitionDisplay(self)
@property
def bundle(self):
return self._bundle # Set externally, such as Bundle.wrap_partition
@property
def is_segment(self):
return self.type == self.TYPE.SEGMENT
@property
def headers(self):
return [c.name for c in self.table.columns]
def __repr__(self):
return '<partition: {} {}>'.format(self.vid, self.vname)
def set_stats(self, stats):
self.stats[:] = [] # Delete existing stats
for c in self.table.columns:
if c.name not in stats:
continue
d = stats[c.name].dict
del d['name']
del d['flags']
cs = ColumnStat(p_vid=self.vid, d_vid=self.d_vid, c_vid=c.vid, **d)
self.stats.append(cs)
def parse_gvid_or_place(self, gvid_or_place):
try:
return parse_to_gvid(gvid_or_place)
except KeyError:
places = list(self._bundle._library.search.search_identifiers(gvid_or_place))
if not places:
err_msg = "Failed to find space identifier '{}' in full " \
"text identifier search for partition '{}'" \
.format(gvid_or_place, str(self.identity))
self._bundle.error(err_msg)
return None
return parse_to_gvid(places[0].vid)
def set_coverage(self, stats):
""""Extract time space and grain coverage from the stats and store them in the partition"""
from ambry.util.datestimes import expand_to_years
scov = set()
tcov = set()
grains = set()
def summarize_maybe(gvid):
try:
return parse_to_gvid(gvid).summarize()
except:
return None
def simplifiy_maybe(values, column):
parsed = []
for gvid in values:
# The gvid should not be a st
if gvid is None or gvid == 'None':
continue
try:
parsed.append(parse_to_gvid(gvid))
except ValueError as e:
if self._bundle:
self._bundle.warn("While analyzing geo coverage in final partition stage, " +
"Failed to parse gvid '{}' in {}.{}: {}"
.format(str(gvid), column.table.name, column.name, e))
try:
return isimplify(parsed)
except:
return None
def int_maybe(year):
try:
return int(year)
except:
return None
for c in self.table.columns:
if c.name not in stats:
continue
try:
if stats[c.name].is_gvid or stats[c.name].is_geoid:
scov |= set(x for x in simplifiy_maybe(stats[c.name].uniques, c))
grains |= set(summarize_maybe(gvid) for gvid in stats[c.name].uniques)
elif stats[c.name].is_year:
tcov |= set(int_maybe(x) for x in stats[c.name].uniques)
elif stats[c.name].is_date:
# The fuzzy=True argument allows ignoring the '-' char in dates produced by .isoformat()
try:
tcov |= set(parser.parse(x, fuzzy=True).year if isinstance(x, string_types) else x.year for x in
stats[c.name].uniques)
except ValueError:
pass
except Exception as e:
self._bundle.error("Failed to set coverage for column '{}', partition '{}': {}"
.format(c.name, self.identity.vname, e))
raise
# Space Coverage
if 'source_data' in self.data:
for source_name, source in list(self.data['source_data'].items()):
scov.add(self.parse_gvid_or_place(source['space']))
if self.identity.space: # And from the partition name
try:
scov.add(self.parse_gvid_or_place(self.identity.space))
except ValueError:
# Couldn't parse the space as a GVid
pass
# For geo_coverage, only includes the higher level summary levels, counties, states,
# places and urban areas.
self.space_coverage = sorted([str(x) for x in scov if bool(x) and x.sl
in (10, 40, 50, 60, 160, 400)])
#
# Time Coverage
# From the source
# If there was a time value in the source that this partition was created from, then
# add it to the years.
if 'source_data' in self.data:
for source_name, source in list(self.data['source_data'].items()):
if 'time' in source:
for year in expand_to_years(source['time']):
if year:
tcov.add(year)
# From the partition name
if self.identity.name.time:
for year in expand_to_years(self.identity.name.time):
if year:
tcov.add(year)
self.time_coverage = [t for t in tcov if t]
#
# Grains
if 'source_data' in self.data:
for source_name, source in list(self.data['source_data'].items()):
if 'grain' in source:
grains.add(source['grain'])
self.grain_coverage = sorted(str(g) for g in grains if g)
@property
def dict(self):
"""A dict that holds key/values for all of the properties in the
object.
:return:
"""
d = {p.key: getattr(self, p.key) for p in self.__mapper__.attrs
if p.key not in ('table', 'dataset', '_codes', 'stats', 'data', 'process_records')}
if self.data:
# Copy data fields into top level dict, but don't overwrite existind values.
for k, v in six.iteritems(self.data):
if k not in d and k not in ('table', 'stats', '_codes', 'data'):
d[k] = v
return d
@property
def detail_dict(self):
"""A more detailed dict that includes the descriptions, sub descriptions, table
and columns."""
d = self.dict
def aug_col(c):
d = c.dict
d['stats'] = [s.dict for s in c.stats]
return d
d['table'] = self.table.dict
d['table']['columns'] = [aug_col(c) for c in self.table.columns]
return d
@property
def stats_dict(self):
class Bunch(object):
"""Dict and object access to properties"""
def __init__(self, o):
self.__dict__.update(o)
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def keys(self):
return list(self.__dict__.keys())
def items(self):
return list(self.__dict__.items())
def iteritems(self):
return iter(self.__dict__.items())
def __getitem__(self, k):
if k in self.__dict__:
return self.__dict__[k]
else:
from . import ColumnStat
return ColumnStat(hist=[])
if not self._stats_dict:
cols = {s.column.name: Bunch(s.dict) for s in self.stats}
self._stats_dict = Bunch(cols)
return self._stats_dict
def build_sample(self):
name = self.table.name
count = int(
self.database.connection.execute('SELECT count(*) FROM "{}"'.format(name)).fetchone()[0])
skip = count / 20
if count > 100:
sql = 'SELECT * FROM "{}" WHERE id % {} = 0 LIMIT 20'.format(name, skip)
else:
sql = 'SELECT * FROM "{}" LIMIT 20'.format(name)
sample = []
for j, row in enumerate(self.database.connection.execute(sql)):
sample.append(list(row.values()))
self.record.data['sample'] = sample
s = self.bundle.database.session
s.merge(self.record)
s.commit()
@property
def row(self):
# Use an Ordered Dict to make it friendly to creating CSV files.
SKIP_KEYS = [
'sequence_id', 'vid', 'id', 'd_vid', 't_vid', 'min_key', 'max_key',
'installed', 'ref', 'count', 'state', 'data', 'space_coverage',
'time_coverage', 'grain_coverage', 'name', 'vname', 'fqname', 'cache_key'
]
d = OrderedDict([('table', self.table.name)] +
[(p.key, getattr(self, p.key)) for p in self.__mapper__.attrs
if p.key not in SKIP_KEYS])
return d
def update(self, **kwargs):
if 'table' in kwargs:
del kwargs['table'] # In source_schema.csv, this is the name of the table, not the object
for k, v in list(kwargs.items()):
if hasattr(self, k):
setattr(self, k, v)
def finalize(self, ps=None):
self.state = self.STATES.FINALIZING
# Write the stats for this partition back into the partition
with self.datafile.writer as w:
for i, c in enumerate(self.table.columns, 1):
wc = w.column(i)
assert wc.pos == c.sequence_id, (c.name, wc.pos, c.sequence_id)
wc.name = c.name
wc.description = c.description
wc.type = c.python_type.__name__
self.count = w.n_rows
w.finalize()
if self.type == self.TYPE.UNION:
ps.update('Running stats ', state='running')
stats = self.datafile.run_stats()
self.set_stats(stats)
self.set_coverage(stats)
self._location = 'build'
self.title = PartitionDisplay(self).title
self.description = PartitionDisplay(self).description
self.state = self.STATES.FINALIZED
# =============
# These methods are a bit non-cohesive, since they require the _bundle value to be set, which is
# set externally, when the object is retured from a bundle.
def clean(self):
"""Remove all built files and return the partition to a newly-created state"""
if self.datafile:
self.datafile.remove()
@property
def location(self):
base_location = self._location
if not base_location:
return None
if self._bundle.build_fs.exists(base_location):
if self._bundle.build_fs.hashsyspath(base_location):
return self._bundle.build_fs.getsyspath(base_location)
return base_location
@location.setter
def location(self, v):
self._location = v
@property
def datafile(self):
from ambry.exc import NotFoundError
if self.is_local:
# Use the local version, if it exists
logger.debug('datafile: Using local datafile {}'.format(self.vname))
return self.local_datafile
else:
# If it doesn't try to get the remote.
try:
logger.debug('datafile: Using remote datafile {}'.format(self.vname))
return self.remote_datafile
except NotFoundError:
# If the remote doesnt exist, return the local, so the caller can call exists() on it,
# get its path, etc.
return self.local_datafile
@property
def local_datafile(self):
"""Return the datafile for this partition, from the build directory, the remote, or the warehouse"""
from ambry_sources import MPRowsFile
from fs.errors import ResourceNotFoundError
from ambry.orm.exc import NotFoundError
try:
return MPRowsFile(self._bundle.build_fs, self.cache_key)
except ResourceNotFoundError:
raise NotFoundError(
'Could not locate data file for partition {} (local)'.format(self.identity.fqname))
@property
def remote(self):
"""
Return the remote for this partition
:return:
"""
from ambry.exc import NotFoundError
ds = self.dataset
if 'remote_name' not in ds.data:
raise NotFoundError('Could not determine remote for partition: {}'.format(self.identity.fqname))
return self._bundle.library.remote(ds.data['remote_name'])
@property
def remote_datafile(self):
from fs.errors import ResourceNotFoundError
from ambry.exc import AccessError, NotFoundError
from boto.exception import S3ResponseError
try:
from ambry_sources import MPRowsFile
remote = self.remote
datafile = MPRowsFile(remote.fs, self.cache_key)
if not datafile.exists:
raise NotFoundError(
'Could not locate data file for partition {} from remote {} : file does not exist'
.format(self.identity.fqname, remote))
except ResourceNotFoundError as e:
raise NotFoundError('Could not locate data file for partition {} (remote): {}'
.format(self.identity.fqname, e))
except S3ResponseError as e:
# HACK. It looks like we get the response error with an access problem when
# we have access to S3, but the file doesn't exist.
raise NotFoundError("Can't access MPR file for {} in remote {}".format(self.cache_key, remote.fs))
return datafile
@property
def is_local(self):
"""Return true is the partition file is local"""
from ambry.orm.exc import NotFoundError
try:
if self.local_datafile.exists:
return True
except NotFoundError:
pass
return False
def localize(self, ps=None):
"""Copy a non-local partition file to the local build directory"""
from filelock import FileLock
from ambry.util import ensure_dir_exists
from ambry_sources import MPRowsFile
from fs.errors import ResourceNotFoundError
if self.is_local:
return
local = self._bundle.build_fs
b = self._bundle.library.bundle(self.identity.as_dataset().vid)
remote = self._bundle.library.remote(b)
lock_path = local.getsyspath(self.cache_key + '.lock')
ensure_dir_exists(lock_path)
lock = FileLock(lock_path)
if ps:
ps.add_update(message='Localizing {}'.format(self.identity.name),
partition=self,
item_type='bytes',
state='downloading')
if ps:
def progress(bts):
if ps.rec.item_total is None:
ps.rec.item_count = 0
if not ps.rec.data:
ps.rec.data = {} # Should not need to do this.
return self
item_count = ps.rec.item_count + bts
ps.rec.data['updates'] = ps.rec.data.get('updates', 0) + 1
if ps.rec.data['updates'] % 32 == 1:
ps.update(message='Localizing {}'.format(self.identity.name),
item_count=item_count)
else:
from ambry.bundle.process import call_interval
@call_interval(5)
def progress(bts):
self._bundle.log("Localizing {}. {} bytes downloaded".format(self.vname, bts))
def exception_cb(e):
raise e
with lock:
# FIXME! This won't work with remote ( http) API, only FS ( s3:, file:)
if self.is_local:
return self
try:
with remote.fs.open(self.cache_key + MPRowsFile.EXTENSION, 'rb') as f:
event = local.setcontents_async(self.cache_key + MPRowsFile.EXTENSION,
f,
progress_callback=progress,
error_callback=exception_cb)
event.wait()
if ps:
ps.update_done()
except ResourceNotFoundError as e:
from ambry.orm.exc import NotFoundError
raise NotFoundError("Failed to get MPRfile '{}' from {}: {} "
.format(self.cache_key, remote.fs, e))
return self
@property
def reader(self):
from ambry.orm.exc import NotFoundError
from fs.errors import ResourceNotFoundError
"""The reader for the datafile"""
try:
return self.datafile.reader
except ResourceNotFoundError:
raise NotFoundError("Failed to find partition file, '{}' "
.format(self.datafile.path))
def select(self, predicate=None, headers=None):
"""
Select rows from the reader using a predicate to select rows and and itemgetter to return a
subset of elements
:param predicate: If defined, a callable that is called for each row, and if it returns true, the
row is included in the output.
:param headers: If defined, a list or tuple of header names to return from each row
:return: iterable of results
WARNING: This routine works from the reader iterator, which returns RowProxy objects. RowProxy objects
are reused, so if you construct a list directly from the output from this method, the list will have
multiple copies of a single RowProxy, which will have as an inner row the last result row. If you will
be directly constructing a list, use a getter that extracts the inner row, or which converts the RowProxy
to a dict:
list(s.datafile.select(lambda r: r.stusab == 'CA', lambda r: r.dict ))
"""
# FIXME; in Python 3, use yield from
with self.reader as r:
for row in r.select(predicate, headers):
yield row
def __iter__(self):
""" Iterator over the partition, returning RowProxy objects.
:return: a generator
"""
with self.reader as r:
for row in r:
yield row
@property
def analysis(self):
"""Return an AnalysisPartition proxy, which wraps this partition to provide acess to
dataframes, shapely shapes and other analysis services"""
if isinstance(self, PartitionProxy):
return AnalysisPartition(self._obj)
else:
return AnalysisPartition(self)
@property
def measuredim(self):
"""Return a MeasureDimension proxy, which wraps the partition to provide access to
columns in terms of measures and dimensions"""
if isinstance(self, PartitionProxy):
return MeasureDimensionPartition(self._obj)
else:
return MeasureDimensionPartition(self)
# ============================
def update_id(self, sequence_id=None):
"""Alter the sequence id, and all of the names and ids derived from it. This
often needs to be done after an IntegrityError in a multiprocessing run"""
if sequence_id:
self.sequence_id = sequence_id
self._set_ids(force=True)
if self.dataset:
self._update_names()
def _set_ids(self, force=False):
if not self.sequence_id:
from .exc import DatabaseError
raise DatabaseError('Sequence ID must be set before insertion')
if not self.vid or force:
assert bool(self.d_vid)
assert bool(self.sequence_id)
don = ObjectNumber.parse(self.d_vid)
assert don.revision
on = don.as_partition(self.sequence_id)
self.vid = str(on.rev(don.revision))
self.id = str(on.rev(None))
if not self.data:
self.data = {}
def _update_names(self):
"""Update the derived names"""
d = dict(
table=self.table_name,
time=self.time,
space=self.space,
grain=self.grain,
variant=self.variant,
segment=self.segment
)
assert self.dataset
name = PartialPartitionName(**d).promote(self.dataset.identity.name)
self.name = str(name.name)
self.vname = str(name.vname)
self.cache_key = name.cache_key
self.fqname = str(self.identity.fqname)
@staticmethod
def before_insert(mapper, conn, target):
"""event.listen method for Sqlalchemy to set the sequence for this
object and create an ObjectNumber value for the id_"""
target._set_ids()
if target.name and target.vname and target.cache_key and target.fqname and not target.dataset:
return
Partition.before_update(mapper, conn, target)
@staticmethod
def before_update(mapper, conn, target):
target._update_names()
@staticmethod
def before_delete(mapper, conn, target):
pass
event.listen(Partition, 'before_insert', Partition.before_insert)
event.listen(Partition, 'before_update', Partition.before_update)
event.listen(Partition, 'before_delete', Partition.before_delete)
class PartitionProxy(object):
__slots__ = ["_obj", "__weakref__"]
def __init__(self, obj):
object.__setattr__(self, "_obj", obj)
#
# proxying (special cases)
#
def __getattr__(self, name):
return getattr(object.__getattribute__(self, "_obj"), name)
def __delattr__(self, name):
delattr(object.__getattribute__(self, "_obj"), name)
def __setattr__(self, name, value):
setattr(object.__getattribute__(self, "_obj"), name, value)
def __nonzero__(self):
return bool(object.__getattribute__(self, "_obj"))
def __str__(self):
return "<{}: {}>".format(type(self), str(object.__getattribute__(self, "_obj")))
def __repr__(self):
return "<{}: {}>".format(type(self), repr(object.__getattribute__(self, "_obj")))
def __iter__(self):
return iter(object.__getattribute__(self, "_obj"))
class AnalysisPartition(PartitionProxy):
"""A subclass of Partition with methods designed for analysis with Pandas. It is produced from
the partitions analysis property"""
def dataframe(self, predicate=None, filtered_columns=None, columns=None, df_class=None):
"""Return the partition as a Pandas dataframe
:param predicate: If defined, a callable that is called for each row, and if it returns true, the
row is included in the output.
:param filtered_columns: If defined, the value is a dict of column names and
associated values. Only rows where all of the named columms have the given values will be returned.
Setting the argument will overwrite any value set for the predicate
:param columns: A list or tuple of column names to return
:return: Pandas dataframe
"""
from operator import itemgetter
from ambry.pands import AmbryDataFrame
df_class = df_class or AmbryDataFrame
if columns:
ig = itemgetter(*columns)
else:
ig = None
columns = self.table.header
if filtered_columns:
def maybe_quote(v):
from six import string_types
if isinstance(v, string_types):
return '"{}"'.format(v)
else:
return v
code = ' and '.join("row.{} == {}".format(k, maybe_quote(v))
for k, v in filtered_columns.items())
predicate = eval('lambda row: {}'.format(code))
if predicate:
def yielder():
for row in self.reader:
if predicate(row):
if ig:
yield ig(row)
else:
yield row.dict
df = df_class(yielder(), columns=columns, partition=self.measuredim)
return df
else:
def yielder():
for row in self.reader:
yield row.values()
# Put column names in header order
columns = [c for c in self.table.header if c in columns]
return df_class(yielder(), columns=columns, partition=self.measuredim)
def geoframe(self, simplify=None, predicate=None, crs=None, epsg=None):
"""
Return geopandas dataframe
:param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry.
:param predicate: A single-argument function to select which records to include in the output.
:param crs: Coordinate reference system information
:param epsg: Specifiy the CRS as an EPGS number.
:return: A Geopandas GeoDataFrame
"""
import geopandas
from shapely.wkt import loads
from fiona.crs import from_epsg
if crs is None and epsg is None and self.epsg is not None:
epsg = self.epsg
if crs is None:
try:
crs = from_epsg(epsg)
except TypeError:
raise TypeError('Must set either crs or epsg for output.')
df = self.dataframe(predicate=predicate)
geometry = df['geometry']
if simplify:
s = geometry.apply(lambda x: loads(x).simplify(simplify))
else:
s = geometry.apply(lambda x: loads(x))
df['geometry'] = geopandas.GeoSeries(s)
return geopandas.GeoDataFrame(df, crs=crs, geometry='geometry')
def shapes(self, simplify=None, predicate=None):
"""
Return geodata as a list of Shapely shapes
:param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry.
:param predicate: A single-argument function to select which records to include in the output.
:return: A list of Shapely objects
"""
from shapely.wkt import loads
if not predicate:
predicate = lambda row: True
if simplify:
return [loads(row.geometry).simplify(simplify) for row in self if predicate(row)]
else:
return [loads(row.geometry) for row in self if predicate(row)]
def patches(self, basemap, simplify=None, predicate=None, args_f=None, **kwargs):
"""
Return geodata as a list of Matplotlib patches
:param basemap: A mpl_toolkits.basemap.Basemap
:param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry.
:param predicate: A single-argument function to select which records to include in the output.
:param args_f: A function that takes a row and returns a dict of additional args for the Patch constructor
:param kwargs: Additional args to be passed to the descartes Path constructor
:return: A list of patch objects
"""
from descartes import PolygonPatch
from shapely.wkt import loads
from shapely.ops import transform
if not predicate:
predicate = lambda row: True
def map_xform(x, y, z=None):
return basemap(x, y)
def make_patch(shape, row):
args = dict(kwargs.items())
if args_f:
args.update(args_f(row))
return PolygonPatch(transform(map_xform, shape), **args)
def yield_patches(row):
if simplify:
shape = loads(row.geometry).simplify(simplify)
else:
shape = loads(row.geometry)
if shape.geom_type == 'MultiPolygon':
for subshape in shape.geoms:
yield make_patch(subshape, row)
else:
yield make_patch(shape, row)
return [patch for row in self if predicate(row)
for patch in yield_patches(row)]
class MeasureDimensionPartition(PartitionProxy):
"""A partition proxy for accessing measure and dimensions. When returning a column, it returns
a PartitionColumn, which proxies the table column while adding partition specific functions. """
def __init__(self, obj):
super(MeasureDimensionPartition, self).__init__(obj)
self.filters = {}
def column(self, c_name):
return PartitionColumn(self.table.column(c_name), self)
@property
def columns(self):
"""Iterate over all columns"""
return [PartitionColumn(c, self) for c in self.table.columns]
@property
def primary_columns(self):
"""Iterate over the primary columns, columns which do not have a parent"""
return [c for c in self.columns if not c.parent]
@property
def dimensions(self):
"""Iterate over all dimensions"""
from ambry.valuetype.core import ROLE
return [c for c in self.columns if c.role == ROLE.DIMENSION]
@property
def primary_dimensions(self):
"""Iterate over the primary columns, columns which do not have a parent and have a
cardinality greater than 1"""
from ambry.valuetype.core import ROLE
return [c for c in self.columns
if not c.parent and c.role == ROLE.DIMENSION and c.pstats.nuniques > 1]
@property
def measures(self):
"""Iterate over all measures"""
from ambry.valuetype.core import ROLE
return [c for c in self.columns if c.role == ROLE.MEASURE]
def measure(self, vid):
"""Return a measure, given its vid or another reference"""
from ambry.orm import Column
if isinstance(vid, PartitionColumn):
return vid
elif isinstance(vid, Column):
return PartitionColumn(vid)
else:
return PartitionColumn(self.table.column(vid), self)
def dimension(self, vid):
"""Return a dimention, given its vid or another reference"""
from ambry.orm import Column
if isinstance(vid, PartitionColumn):
return vid
elif isinstance(vid, Column):
return PartitionColumn(vid)
else:
return PartitionColumn(self.table.column(vid), self)
@property
def primary_measures(self):
"""Iterate over the primary measures, columns which do not have a parent"""
return [c for c in self.measures if not c.parent]
@property
def dict(self):
d = self.detail_dict
d['dimension_sets'] = self.enumerate_dimension_sets()
return d
def dataframe(self, measure, p_dim, s_dim=None, filters={}, df_class=None):
"""
Return a dataframe with a sumse of the columns of the partition, including a measure and one
or two dimensions. FOr dimensions that have labels, the labels are included
The returned dataframe will have extra properties to describe the conversion:
* plot_axes: List of dimension names for the first and second axis
* labels: THe names of the label columns for the axes
* filtered: The `filters` dict
* floating: The names of primary dimensions that are not axes nor filtered
THere is also an iterator, `rows`, which returns the header and then all of the rows.
:param measure: The column names of one or more measures
:param p_dim: The primary dimension. This will be the index of the dataframe.
:param s_dim: a secondary dimension. The returned frame will be unstacked on this dimension
:param filters: A dict of column names, mapped to a column value, indicating rows to select. a
row that passes the filter must have the values for all given rows; the entries are ANDED
:param df_class:
:return: a Dataframe, with extra properties
"""
import numpy as np
measure = self.measure(measure)
p_dim = self.dimension(p_dim)
assert p_dim
if s_dim:
s_dim = self.dimension(s_dim)
columns = set([measure.name, p_dim.name])
if p_dim.label:
# For geographic datasets, also need the gvid
if p_dim.geoid:
columns.add(p_dim.geoid.name)
columns.add(p_dim.label.name)
if s_dim:
columns.add(s_dim.name)
if s_dim.label:
columns.add(s_dim.label.name)
def maybe_quote(v):
from six import string_types
if isinstance(v, string_types):
return '"{}"'.format(v)
else:
return v
# Create the predicate to filter out the filtered dimensions
if filters:
selected_filters = []
for k, v in filters.items():
if isinstance(v, dict):
# The filter is actually the whole set of possible options, so
# just select the first one
v = v.keys()[0]
selected_filters.append("row.{} == {}".format(k, maybe_quote(v)))
code = ' and '.join(selected_filters)
predicate = eval('lambda row: {}'.format(code))
else:
code = None
def predicate(row):
return True
df = self.analysis.dataframe(predicate, columns=columns, df_class=df_class)
if df is None or df.empty or len(df) == 0:
return None
# So we can track how many records were aggregated into each output row
df['_count'] = 1
def aggregate_string(x):
return ', '.join(set(str(e) for e in x))
agg = {
'_count': 'count',
}
for col_name in columns:
c = self.column(col_name)
# The primary and secondary dimensions are put into the index by groupby
if c.name == p_dim.name or (s_dim and c.name == s_dim.name):
continue
# FIXME! This will only work if the child is only level from the parent. Should
# have an acessor for the top level.
if c.parent and (c.parent == p_dim.name or (s_dim and c.parent == s_dim.name)):
continue
if c.is_measure:
agg[c.name] = np.mean
if c.is_dimension:
agg[c.name] = aggregate_string
plot_axes = [p_dim.name]
if s_dim:
plot_axes.append(s_dim.name)
df = df.groupby(list(columns - set([measure.name]))).agg(agg).reset_index()
df._metadata = ['plot_axes', 'filtered', 'floating', 'labels', 'dimension_set', 'measure']
df.plot_axes = [c for c in plot_axes]
df.filtered = filters
# Dimensions that are not specified as axes nor filtered
df.floating = list(set(c.name for c in self.primary_dimensions) -
set(df.filtered.keys()) -
set(df.plot_axes))
df.labels = [self.column(c).label.name if self.column(c).label else c for c in df.plot_axes]
df.dimension_set = self.dimension_set(p_dim, s_dim=s_dim)
df.measure = measure.name
def rows(self):
yield ['id'] + list(df.columns)
for t in df.itertuples():
yield list(t)
# Really should not do this, but I don't want to re-build the dataframe with another
# class
df.__class__.rows = property(rows)
return df
def dimension_set(self, p_dim, s_dim=None, dimensions=None, extant=set()):
"""
Return a dict that describes the combination of one or two dimensions, for a plot
:param p_dim:
:param s_dim:
:param dimensions:
:param extant:
:return:
"""
if not dimensions:
dimensions = self.primary_dimensions
key = p_dim.name
if s_dim:
key += '/' + s_dim.name
# Ignore if the key already exists or the primary and secondary dims are the same
if key in extant or p_dim == s_dim:
return
# Don't allow geography to be a secondary dimension. It must either be a primary dimension
# ( to make a map ) or a filter, or a small-multiple
if s_dim and s_dim.valuetype_class.is_geo():
return
extant.add(key)
filtered = {}
for d in dimensions:
if d != p_dim and d != s_dim:
filtered[d.name] = d.pstats.uvalues.keys()
if p_dim.valuetype_class.is_time():
value_type = 'time'
chart_type = 'line'
elif p_dim.valuetype_class.is_geo():
value_type = 'geo'
chart_type = 'map'
else:
value_type = 'general'
chart_type = 'bar'
return dict(
key=key,
p_dim=p_dim.name,
p_dim_type=value_type,
p_label=p_dim.label_or_self.name,
s_dim=s_dim.name if s_dim else None,
s_label=s_dim.label_or_self.name if s_dim else None,
filters=filtered,
chart_type=chart_type
)
def enumerate_dimension_sets(self):
dimension_sets = {}
dimensions = self.primary_dimensions
extant = set()
for d1 in dimensions:
ds = self.dimension_set(d1, None, dimensions, extant)
if ds:
dimension_sets[ds['key']] = ds
for d1 in dimensions:
for d2 in dimensions:
if d2.cardinality >= d1.cardinality:
d1, d2 = d2, d1
ds = self.dimension_set(d1, d2, dimensions, extant)
if ds:
dimension_sets[ds['key']] = ds
return dimension_sets
class ColumnProxy(PartitionProxy):
def __init__(self, obj, partition):
object.__setattr__(self, "_obj", obj)
object.__setattr__(self, "_partition", partition)
MAX_LABELS = 75 # Maximum number of uniques records before it's assume that the values aren't valid labels
class PartitionColumn(ColumnProxy):
"""A proxy on the Column that links a Column to a Partition, for direct access to the stats
and column labels"""
def __init__(self, obj, partition):
super(PartitionColumn, self).__init__(obj, partition)
object.__setattr__(self, "pstats", partition.stats_dict[obj.name])
@property
def children(self):
""""Return the table's other column that have this column as a parent, excluding labels"""
for child in self.children:
yield PartitionColumn(child, self._partition)
@property
def label(self):
""""Return first child that of the column that is marked as a label"""
for c in self.table.columns:
if c.parent == self.name and 'label' in c.valuetype:
return PartitionColumn(c, self._partition)
@property
def value_labels(self):
"""Return a map of column code values mapped to labels, for columns that have a label column
If the column is not assocaited with a label column, it returns an identity map.
WARNING! This reads the whole partition, so it is really slow
"""
from operator import itemgetter
card = self.pstats.nuniques
if self.label:
ig = itemgetter(self.name, self.label.name)
elif self.pstats.nuniques < MAX_LABELS:
ig = itemgetter(self.name, self.name)
else:
return {}
label_set = set()
for row in self._partition:
label_set.add(ig(row))
if len(label_set) >= card:
break
d = dict(label_set)
assert len(d) == len(label_set) # Else the label set has multiple values per key
return d
@property
def cardinality(self):
"""Returns the bymber of unique elements"""
return self.pstats.nuniques
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.name)
| CivicSpleen/ambry | ambry/orm/partition.py | partition.py | py | 48,749 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "ambry.util.get_logger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "geoid.parse_to_gvid",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "geoid.parse_to_gvid",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "geoid.p... |
13459025825 | import argparse
class HackAssembler:
def __init__(self):
self.__comp_code = {
"0": "0101010",
"1": "0111111",
"-1": "0111010",
"D": "0001100",
"A": "0110000",
"!D": "0001101",
"!A": "0110001",
"-D": "0001111",
"-A": "0110011",
"D+1": "0011111",
"A+1": "0110111",
"D-1": "0001110",
"A-1": "0110010",
"D+A": "0000010",
"D-A": "0010011",
"A-D": "0000111",
"D&A": "0000000",
"D|A": "0010101",
"M": "1110000",
"!M": "1110001",
"-M": "1110011",
"M+1": "1110111",
"M-1": "1110010",
"D+M": "1000010",
"D-M": "1010011",
"M-D": "1000111",
"D&M": "1000000",
"D|M": "1010101",
}
self.__jump_code = ["", "JGT", "JEQ", "JGE", "JLT", "JNE", "JLE", "JMP"]
self.__defined_symbols = {
"SP": 0,
"LCL": 1,
"ARG": 2,
"THIS": 3,
"THAT": 4,
"R0": 0,
"R1": 1,
"R2": 2,
"R3": 3,
"R4": 4,
"R5": 5,
"R6": 6,
"R7": 7,
"R8": 8,
"R9": 9,
"R10": 10,
"R11": 11,
"R12": 12,
"R13": 13,
"R14": 14,
"R15": 15,
"SCREEN": 0x4000,
"KBD": 0x6000,
}
def translate(self, lines: list[str]) -> list[str]:
return self.__handle_instructions(
self.__handle_symbols(self.__handle_spaces(self.__handle_comments(lines)))
)
def __handle_symbols(self, lines: list[str]) -> list[str]:
symbols = self.__defined_symbols.copy()
results: list[str] = []
for line in lines:
if line[0] == "(" and line[-1] == ")":
symbols[line[1:-1]] = len(results)
else:
results.append(line)
counter = 16
for (idx, line) in enumerate(results):
if self.__is_a_instruction(line):
value: str = line[1:]
if value.isdigit():
continue
if value not in symbols:
symbols[value] = counter
counter += 1
results[idx] = line[0] + str(symbols[value])
return results
def __translate_dest(self, line: str) -> str:
result = 0
if "=" in line:
dest = line.split("=")[0]
if "M" in dest:
result |= 1
if "D" in dest:
result |= 1 << 1
if "A" in dest:
result |= 1 << 2
return format(result, "03b")
def __translate_comp(self, line: str) -> str:
st = line.index("=") + 1 if "=" in line else 0
nd = line.index(";") if ";" in line else len(line)
comp = line[st:nd]
return self.__comp_code.get(comp)
def __translate_jump(self, line: str) -> str:
jump = None
if ";" in line:
jump = line.split(";")[1]
result = self.__jump_code.index(jump or "")
return format(result, "03b")
def __is_a_instruction(self, line: str) -> bool:
return line[0] == "@"
def __translate_a_instruction(self, line: str) -> str:
return "0" + format(int(line[1:]), "015b")[-15:]
def __translate_c_instruction(self, line: str) -> str:
return (
"111"
+ self.__translate_comp(line)
+ self.__translate_dest(line)
+ self.__translate_jump(line)
)
def __handle_instructions(self, lines: list[str]) -> list[str]:
result: list[str] = []
for line in lines:
if self.__is_a_instruction(line):
result.append(self.__translate_a_instruction(line))
else:
result.append(self.__translate_c_instruction(line))
return result
def __handle_spaces(self, lines: list[str]) -> list[str]:
return ["".join(line.split()) for line in lines if line.strip()]
def __handle_comments(self, lines: list[str]) -> list[str]:
return [line.split("//")[0] for line in lines]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Translate Hack assembly code to Hack binary code"
)
parser.add_argument("asm", help="filepath of Hack assembly code")
args = parser.parse_args()
filepath: str = args.asm
assert filepath.endswith(".asm"), f"{filepath} doesn't end with .asm"
output: str = filepath.rstrip(".asm") + ".hack"
assembler: HackAssembler = HackAssembler()
with open(filepath, "r") as input_file:
code: list[str] = input_file.read().splitlines()
with open(output, "w") as output_file:
output_file.write("\n".join(assembler.translate(code)))
| zhixiangli/nand2tetris | projects/06/hack_assembler.py | hack_assembler.py | py | 5,017 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 144,
"usage_type": "call"
}
] |
14061601965 | import binascii
import json
import logging
import cv2
import numpy as np
import requests
VIDEO_UPLOAD_URL = 'http://video-fs.like.video/upload_video.php'
IMAGE_UPLOAD_URL = 'http://img-fs.like.video/FileuploadDownload/upload_img.php'
logger = logging.getLogger(__name__)
def upload_video(video_bytes):
files = {
'file': video_bytes
}
try:
resp = requests.post(VIDEO_UPLOAD_URL, files=files)
if resp.status_code == 200:
url = json.loads(resp.text)['url']
crc = binascii.crc32(video_bytes)
url = '{}?crc={}&type=5'.format(url, crc)
return url
else:
return None
except Exception as err:
logger.error('upload_video failed, error info {}'.format(err))
return None
def upload_image(image_bytes, req_name='default', ext='.jpg'):
files = {
'file': ('image{}'.format(ext), image_bytes)
}
try:
if req_name == 'bigo_live':
resp = requests.post('http://snapshot.calldev.bigo.sg/upload_file.php', files=files)
else:
resp = requests.post(IMAGE_UPLOAD_URL, files=files)
if resp.status_code == 200:
return json.loads(resp.text)['url']
else:
return None
except Exception as err:
logger.error('upload_image failed, error info {}'.format(err))
return None
def download_video(video_url):
try:
resp = requests.get(video_url)
except Exception as err:
logger.error('download_video failed, video_url {}, error info {}'.format(video_url, err))
return None
if resp.status_code != 200 or not resp.content:
logger.error('download_video failed, video_url {}'.format(video_url))
return None
video_bytes = resp.content
if video_bytes is None:
logger.error('download_video failed, empty video, video_url {}'.format(video_url))
return None
return video_bytes
def download_image(image_url, decode=False, to_rgb=False):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:70.0) Gecko/20100101 Firefox/70.0'
}
try:
resp = requests.get(image_url, headers=headers)
except Exception as err:
logger.error('download_image failed, image_url {}, error info {}'.format(image_url, err))
return None
if resp.status_code != 200 or not resp.content:
logger.error('download_image failed, image_url {}'.format(image_url))
return None
image_bytes = resp.content
image = cv2.imdecode(np.frombuffer(image_bytes, np.uint8), cv2.IMREAD_COLOR)
if image is None:
logger.error('download_image failed, empty image, image_url {}'.format(image_url))
return None
if decode and to_rgb:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image if decode else image_bytes
if __name__ == '__main__':
image_bytes = requests.get('http://img.like.video/asia_live/4h6/1Jgvll.jpg').content
image_url = upload_image(image_bytes)
print(image_url)
video_bytes = requests.get(
'http://video.like.video/asia_live/7h4/M0B/C9/D7/bvsbAF37MUGEev_7AAAAAGsyOC8464.mp4').content
video_url = upload_video(video_bytes)
print(video_url)
img = download_image('http://img.like.video/asia_live/4h6/1Jgvll.jpg', decode=True)
print(img.shape)
| ThreeBucks/model-deploy | src/utils/cdn_utils.py | cdn_utils.py | py | 3,379 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "binascii.crc32",
"line... |
39916510161 | import os
import re
import xlwt
from tkinter import *
from tkinter.filedialog import askdirectory
from xlwt import Workbook
root=Tk()
root.withdraw()
path=askdirectory()
print(path)
file_names=os.listdir(str(path))
name_lists=[]
output=[]
for file in file_names:
file_path=path + '/' + file
f=open(file_path,'r',encoding='utf-8',errors='ignore')
data=f.read()
data=data.strip()
mhs=re.search(r'Summary((.|\s)*)',data)
if mhs:
data=mhs.group(1)
rows=data.split('\n')
for row in rows:
if row.strip() and row.strip()!='|':
mhs=re.search(r'(([a-zA-Z]+\s)+)\s+.*?(\d+\.\d+)',row)
if mhs:
output.append([file,mhs.group(1),mhs.group(3)])
else:
output.append([file,'N/A','N/A'])
book=Workbook(encoding='utf-8')
sht1=book.add_sheet('sheet1')
for i in range(len(output)):
for j in range(len(output[1])):
sht1.write(i,j,output[i][j])
book.save('./output_data.xls')
print("over")
| nigo81/python_spider_learn | TXT处理/readtxt.py | readtxt.py | py | 1,005 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "tkinter.filedialog.askdirectory",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "re.search",
"... |
37463181641 | import Dataset as datos
import matplotlib.pyplot as plt
import numpy as np
import os
df_ventas = datos.get_df_ventas()
resample_meses = datos.get_resample_meses()
facturacion_por_juego = datos.get_facturacion_por_juego()
cantidad_ventas_por_juego = datos.get_cantidad_ventas_por_juego()
#----------------------------------------------------------------------------------------------------------------- guardo los datos en Excel
datos.guardar_en_excel(datos.get_facturacion_por_juego())
datos.guardar_en_excel(datos.get_cantidad_ventas_por_juego())
#----------------------------------------------------------------------------------------------------------------- imprimo datos en consola de python
print(facturacion_por_juego)
print(cantidad_ventas_por_juego)
#----------------------------------------------------------------------------------------------------------------- agrego descripcion de los juegos
a = datos.get_facturacion_por_juego().reset_index()
a.set_index("descripcion", inplace=True)
a.name = "Comparación de la facturación de cada juego "
#----------------------------------------------------------------------------------------------------grafico comparación facturacion de los juegos
plt.figure(figsize=[11,6]).suptitle("Comparación facturación (neta) de cada juego:")
plt.subplots_adjust(bottom=0.34, right=0.99, left=0.1, top=0.95)
plt.ylabel(a.columns[1] + " en $")
f1 = plt.bar(a.index, a["facturacion neta"], tick_label=a.index)
plt.grid(which="major", axis="y", color="black", alpha=0.15)
plt.axhline(y=a["facturacion neta"].mean(),ls="--", label= "Promedio: $" +
"{:,}".format(round(a["facturacion neta"].mean(),2)).replace(',','x').replace('.',',').replace('x','.')+
" (no muy útil porque se comparan \n todos los juegos, que son muy distintos)")
plt.xticks( rotation=90)
plt.yticks(np.arange(0,a["facturacion neta"].max()*1.1,datos.escala_grafico(a["facturacion neta"].max())))
plt.ticklabel_format(axis="y",style="plain", useLocale=True,)
plt.legend(loc="upper right")
axes = plt.gca()
axes.set_ylim([0,a["facturacion neta"].max()*1.1])
plt.savefig("Gráficos generales/"+a.name+".jpg")
plt.show()
plt.close()
#--------------------------------------------------------------------------------------- grafico de juegos vendidos por mes, de todos los juegos
contador=1
for juego in df_ventas.articulo.unique():
juego = datos.get_tabla_juegos(juego)
plt.figure().suptitle(juego)
plt.xlabel(resample_meses.index.name)
plt.ylabel("Número de juegos vendidos")
f1 = plt.bar(resample_meses.index, resample_meses[juego], width=30, tick_label=resample_meses.index.strftime('%m/%y'))
plt.grid(which="major", axis="y", color="black", alpha=0.15)
plt.axhline(y=resample_meses[juego].mean(), ls="--",label="Promedio: $" +
"{:,}".format(round(resample_meses[juego].mean(), 2)).replace(',', 'x').replace('.', ',').replace('x', '.'))
plt.xticks(rotation=45)
plt.yticks(np.arange(0, resample_meses[juego].max() * 1.1, datos.escala_grafico(resample_meses[juego].max())))
plt.ticklabel_format(axis="y", style="plain", useLocale=True, )
plt.legend(loc="upper right")
axes = plt.gca()
axes.set_ylim([0, resample_meses[juego].max() * 1.1])
for i in f1:
x = i.get_x()
y = i.get_height()
ancho = i.get_width()
plt.text(x + ancho / 2, 0, y, fontsize=10, color="black", ha="center")
print(contador," Se guardó " + juego+".jpg" )
contador+=1
plt.savefig("Gráfico de cada juego/"+juego+".jpg")
plt.close()
del contador
#-------------------------------------------------------------------------------------------------------------------------------- GUI image viewer
from tkinter import *
from PIL import ImageTk, Image
root = Tk()
root.title('CIENCIAS PARA TODOS - Estadísticas')
image_list = []
for foto in os.listdir("Gráfico de cada juego/"):
aux = ImageTk.PhotoImage(Image.open("Gráfico de cada juego/"+foto))
image_list.append(aux)
my_label = Label(image=image_list[0])
my_label.grid(row=0, column=0, columnspan=3)
def forward(image_number):
global my_label
global button_forward
global button_back
my_label.grid_forget()
my_label = Label(image=image_list[image_number - 1])
button_forward = Button(root, text=">>", command=lambda: forward(image_number + 1))
button_back = Button(root, text="<<", command=lambda: back(image_number - 1))
if image_number == len(image_list):
button_forward = Button(root, text=">>", state=DISABLED)
my_label.grid(row=0, column=0, columnspan=3)
button_back.grid(row=1, column=0)
button_forward.grid(row=1, column=2)
def back(image_number):
global my_label
global button_forward
global button_back
my_label.grid_forget()
my_label = Label(image=image_list[image_number - 1])
button_forward = Button(root, text=">>", command=lambda: forward(image_number + 1))
button_back = Button(root, text="<<", command=lambda: back(image_number - 1))
if image_number == 1:
button_back = Button(root, text="<<", state=DISABLED)
my_label.grid(row=0, column=0, columnspan=3)
button_back.grid(row=1, column=0)
button_forward.grid(row=1, column=2)
button_back = Button(root, text="<<", command=back, state=DISABLED)
button_exit = Button(root, text="Exit Program", command=root.quit)
button_forward = Button(root, text=">>", command=lambda: forward(2))
button_back.grid(row=1, column=0)
button_exit.grid(row=1, column=1)
button_forward.grid(row=1, column=2)
root.mainloop() | matinoseda/CPT-datos-ventas | Estadísticas Juegos.py | Estadísticas Juegos.py | py | 5,685 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "Dataset.get_df_ventas",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "Dataset.get_resample_meses",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "Dataset.get_facturacion_por_juego",
"line_number": 9,
"usage_type": "call"
},
{
"a... |
15646853934 | import json
import os
import string
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
import urllib.request
options = webdriver.ChromeOptions()
options.add_experimental_option("excludeSwitches", ["enable-automation"])
options.add_experimental_option('useAutomationExtension', False)
options.add_experimental_option("prefs", {
"profile.password_manager_enabled": False, "credentials_enable_service": False, 'profile.default_content_settings.popups': 0})
def initCheckPoint() -> None:
with open('checkPoint.json', 'r') as f:
checkPoint = json.load(f)
for key in checkPoint:
checkPoint[key] = False
with open('checkPoint.json', 'w') as f:
json.dump(checkPoint, f)
return None
def loadCheckPoint() -> dict:
with open('checkPoint.json', 'r') as f:
checkPoint = json.load(f)
return checkPoint
def saveCheckPoint(checkPoint: dict) -> None:
with open('checkPoint.json', 'w') as f:
json.dump(checkPoint, f)
return None
def main():
# initCheckPoint()
url = 'https://judgegirl.csie.org/problems/domain/0'
# load dict of checkPoint in json file
checkPoint = loadCheckPoint()
# put r in a text file
if not checkPoint['get problemSet']:
with open('text.txt', 'w', encoding='UTF-8') as f:
r = requests.get(url)
# encode r into string
f.write(r.text)
# read text file
# save all text between <li class="pure-menu-item" and </li> into a another text file
with open('text.txt', 'r', encoding='UTF-8') as f:
with open('problemSet.txt', 'w', encoding='UTF-8') as f2:
for line in f:
if '<li class="pure-menu-item" data=' in line:
while '</li>' not in line:
f2.write(line)
line = f.readline()
f2.write(line)
# delete text file
os.remove('text.txt')
# read problemSet.txt and save all problemSet id into a another text file
with open('problemSet.txt', 'r', encoding='UTF-8') as f:
with open('problemSetId.txt', 'w', encoding='UTF-8') as f2:
for line in f:
if 'data="' in line:
f2.write(line[line.find('#'):-3])
f2.write(' ')
continue
if '</a></li>' in line:
# fine the first alpha index
start = 0
for i in range(len(line)):
if line[i].isalpha():
start = i
break
# replace all '/' with ''
target = line[start:-10]
target = target.replace('/', '')
if target.find(' ') > 4:
continue
f2.write(target)
f2.write('\n')
# delete problemSet.txt
os.remove('problemSet.txt')
checkPoint['get problemSet'] = True
saveCheckPoint(checkPoint)
# read problemSetId.txt
if not checkPoint['get problemId']:
with open('problemSetId.txt', 'r', encoding='UTF-8') as f:
# create a folder named the text after ' ' of each line
for line in f:
# test if the folder already exists
folderName = line[line.find(' ')+2:-1]
if not os.path.exists(folderName):
os.mkdir(folderName)
driver = webdriver.Chrome(
'chromedriver.exe', chrome_options=options)
driver.minimize_window()
# create a file named Description.txt in each folder
with open(folderName+'/Description.txt', 'w', encoding='UTF-8') as f2:
# goto the problemSet page
url = 'https://judgegirl.csie.org/problems/domain/0' + \
line[0:line.find(' ')]
driver.get(url)
# wait for the page to load
# driver.implicitly_wait(3)
elements = driver.find_elements(
By.CLASS_NAME, 'problem-item')
# write element into Description.txt
for element in elements:
print(element.text)
f2.write(element.text)
f2.write('\n')
driver.close()
driver.quit()
checkPoint['get problemId'] = True
saveCheckPoint(checkPoint)
url = 'https://judgegirl.csie.org/problem/0'
if not checkPoint['get problems']:
# go to each problemSet folder in problemSetId.txt
folderNames = list()
with open('problemSetId.txt', 'r', encoding='UTF-8') as f:
for line in f:
folderNames.append(line[line.find(' ')+2:-1])
problemSets = dict()
for folderName in folderNames:
with open(folderName + '/Description.txt', 'r', encoding='UTF-8') as f:
problemSets[folderName] = list()
for line in f:
problemSets[folderName].append(line[0:-1])
for key in problemSets:
for problem in problemSets[key]:
title = problem
id = problem[0:problem.find(' ') - 1]
# if the last character of title is a punctuation, delete it
if title[-1] in string.punctuation:
title = title[:-1]
# replace all '/' with ' '
title = title.replace('/', ' ')
if title in checkPoint and checkPoint[title]:
continue
print('Now downloading ' + title)
driver = webdriver.Chrome(
'chromedriver.exe', chrome_options=options)
driver.minimize_window()
# goto the problem page
url = 'https://judgegirl.csie.org/problem/0/' + id
driver.get(url)
# wait for the page to load
# driver.implicitly_wait(3)
if not os.path.exists(key+'/'+title):
os.mkdir(key+'/'+title)
with open(key+'/'+title+'/Description.txt', 'w', encoding='UTF-8') as f:
f.write(driver.find_element(By.CLASS_NAME, 'content').text)
# open the file with read and write permission
with open(key+'/'+title+'/Description.txt', 'r', encoding='UTF-8') as f:
start = end = 0
for index, line in enumerate(f):
if 'Task Description' in line:
start = index
if 'Submit' in line:
end = index
break
# goto th top of the file
f.seek(0)
# save the line between start and end into newFile
newFile = f.readlines()[start:end]
with open(key+'/'+title+'/Description.txt', 'w', encoding='UTF-8') as f2:
# clear the file
f2.truncate()
# write the new file
for line in newFile:
f2.write(line)
# find the photo and save it
photos = driver.find_elements(
By.CLASS_NAME, 'pure-img-responsive')
if len(photos) != 0:
for index, photo in enumerate(photos):
link = photo.get_attribute('src')
# save the phot
if 'https://judgegirl.csie.org/images/problems/' in link:
urllib.request.urlretrieve(link, key+'/'+title+'/'+link[44:])
if not os.path.exists(key+'/'+title+'/testCases'):
os.mkdir(key+'/'+title+'/testCases')
# find the elements that inside text is 'Download Testdata' by XPATH
link = 'https://judgegirl.csie.org/testdata/download/' + id
print(title)
print(url)
print(link)
# goto the testCase page
driver.get(link)
# wait for the page to load
# driver.implicitly_wait(3)
content = driver.find_element(By.CLASS_NAME, 'content')
menu = content.find_element(By.CLASS_NAME, 'pure-g')
cases = menu.find_elements(By.CLASS_NAME, 'pure-menu-link')
# download each testCase
for case in cases:
url = case.get_attribute('href')
file = requests.get(url)
with open(key+'/'+title+'/testCases/'+case.text, 'wb') as f:
f.write(file.content)
driver.close()
driver.quit()
checkPoint[title] = True
saveCheckPoint(checkPoint)
checkPoint['get problems'] = True
saveCheckPoint(checkPoint)
# check if all checkPoint is True
for key in checkPoint:
if not checkPoint[key]:
print(key+' is not finished')
return None
print('all checkPoint is finished')
return None
if __name__ == '__main__':
main() | fatbrother/crawler-test | main.py | main.py | py | 9,656 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "json.dump... |
127603653 | # encoding=utf8
import requests
from lxml import etree
class cityAreaCode():
def __init__(self):
self.url = "http://www.ip33.com/area/2019.html"
def get(self):
page = requests.get(self.url)
page.encoding = 'utf-8'
_element = etree.HTML(page.text)
divs = _element.xpath('//div[@class="ip"]')[1:]
for div in divs:
divtext = etree.tostring(div, encoding="utf-8", pretty_print=False).decode("utf-8")
_element1 = etree.HTML(divtext)
h4s = _element1.xpath('//div/h4')
lis = _element1.xpath('//div/ul/li')
for li in lis:
litext = etree.tostring(li, encoding="utf-8", pretty_print=False).decode("utf-8")
_element2 = etree.HTML(litext)
h5s = _element2.xpath('//li/h5')
lilis = _element2.xpath('//li/ul/li')
for lili in lilis:
print('(省名称)',h4s[0].text.replace(' ','(省代码)'), '(市/区名称)', h5s[0].text.replace(' ','(市/区代码)'), "(地区名称)", lili.text.replace(' ','(地区代码)'))
if __name__ == '__main__':
cityAreaCode = cityAreaCode()
cityAreaCode.get()
| lazyting/climbworm | python/CityAreaCode.py | CityAreaCode.py | py | 1,228 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "lxml.etree.HTML",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "lxml.etree.tostring",
"li... |
73379175144 | from functools import wraps
from flask import url_for, redirect, session
# 登陆限制装饰器
# 用于需要登陆的页面,如果没有登陆则要求登陆(跳转至登陆页面)
def login_required(func):
@wraps(func)
def wapper(*args, **kwargs):
if session.get('user_id'):
return func(*args, **kwargs)
else:
return redirect(url_for('login'))
return wapper
| BobXGY/bobqa | decorators.py | decorators.py | py | 419 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "flask.session.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.url_for",
"l... |
27193740899 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import glob
import logging
import argparse
from ngsmetavirus.config import *
from ngsmetavirus.common import check_path, mkdir, read_tsv
from dagflow import DAG, Task, do_dag
LOG = logging.getLogger(__name__)
__version__ = "1.0.0"
__author__ = ("Xingguo Zhang",)
__email__ = "invicoun@foxmail.com"
__all__ = []
def create_merge_data_task(prefix, read1, read2, work_dir, job_type="local"):
if len(read1)==1:
comm1 = """\
ln -s {read1} {prefix}.raw.R1.fq.gz
ln -s {read2} {prefix}.raw.R2.fq.gz
""".format(read1=read1[0], read2=read2[0], prefix=prefix)
else:
comm1 = """\
cat {read1} >{prefix}.raw.R1.fq.gz
cat {read2} >{prefix}.raw.R2.fq.gz
""".format(read1=" ".join(read1), read2=" ".join(read2), prefix=prefix)
task = Task(
id="merge_data_%s" % prefix,
work_dir=work_dir,
type=job_type,
option="-pe smp 1",
script="""
{comm1}
""".format(comm1=comm1)
)
read1 = os.path.join(work_dir, "%s.raw.R1.fq.gz" % prefix)
read2 = os.path.join(work_dir, "%s.raw.R2.fq.gz" % prefix)
return task, read1, read2
def create_mngs_task(prefix, read1, read2, reference, nohost, dtype, atype,
job_type, work_dir, out_dir, trim=0, project="", id=""):
if nohost:
nohost = "--nohost"
else:
nohost = ""
if reference:
rx = "--reference %s" % reference
else:
rx = ""
task = Task(
id="mngs_%s" % prefix,
work_dir=work_dir,
type="local",
option="-pe smp 1",
script="""
{root}/ngsmetavirus.py all \\
--prefix {prefix} --read1 {read1} --read2 {read2} \\
--dtype {dtype} --atype {atype} {rx} \\
--project {project} --id {id} \\
--trim {trim} --thread 6 --job_type {job_type} {nohost} \\
--work_dir {work} --out_dir {out}
""".format(root=ROOT,
prefix=prefix,
read1=read1,
read2=read2,
rx=rx,
nohost=nohost,
dtype=dtype,
atype=atype,
trim=trim,
project=project,
id=id,
job_type=job_type,
work=work_dir,
out=out_dir
)
)
return task
def run_mngs_multi(input, reference, nohost, dtype, atype, trim, job_type,
concurrent, refresh, work_dir, out_dir, project="", id=""):
input = check_path(input)
work_dir = mkdir(work_dir)
out_dir = mkdir(out_dir)
if not reference:
pass
else:
try:
reference = check_path(check_path)
except:
reference = check_path("%s.3.ht2" % reference)
reference = reference.strip(".3.ht2")
else:
raise Exception("Reference genome %s does not exist" % reference)
data = {}
for line in read_tsv(input):
if line[0] not in data:
data[line[0]] = [[], []]
data[line[0]][0].append(check_path(line[1]))
data[line[0]][1].append(check_path(line[2]))
dag = DAG("mngs_multi")
for prefix in data:
prefix_work = mkdir(os.path.join(work_dir, prefix))
data_task, read1, read2 = create_merge_data_task(
prefix=prefix,
read1=data[prefix][0],
read2=data[prefix][1],
work_dir=prefix_work
)
dag.add_task(data_task)
task = create_mngs_task(
prefix=prefix,
read1=read1,
read2=read2,
reference=reference,
nohost=nohost,
dtype=dtype,
atype=atype,
trim=trim,
job_type=job_type,
project=project,
id=id,
work_dir=prefix_work,
out_dir=mkdir(os.path.join(out_dir, prefix))
)
dag.add_task(task)
task.set_upstream(data_task)
do_dag(dag, concurrent, refresh)
return 0
def mngs_multi(args):
run_mngs_multi(
input=args.input,
reference=args.reference,
nohost=args.nohost,
dtype=args.dtype,
atype=args.atype,
trim=args.trim,
work_dir=args.work_dir,
out_dir=args.out_dir,
concurrent=args.concurrent,
refresh=args.refresh,
job_type=args.job_type,
project=args.project,
id=args.id
)
def add_mngs_multi_args(parser):
parser.add_argument("input", metavar='FILE', type=str,
help="Input the reads list.")
parser.add_argument("-ref", "--reference", metavar="FILE", type=str, default="",
help="Input the host's reference database.")
parser.add_argument('--nohost', action='store_true',
help='Input the reference database is not the host.')
parser.add_argument("-dt", "--dtype", metavar='STR', type=str,
choices=["mgi", "illumina", "other"], default="illumina",
help="Set up the sequencing platform of the data, default=illumina.")
parser.add_argument("-at", "--atype", metavar='STR', type=str,
choices=["metagenome", "metaviral", "rnaviral"], default="metagenome",
help="""Set the type of analysis(metagenome, metavirus, rnaviral),\
default=metagenome.""")
parser.add_argument("--trim", metavar="INT", type=int, default=5,
help="Set trim length, default=5")
parser.add_argument("--project", metavar="STR", type=str, required=True,
help="Input project name.")
parser.add_argument("--id", metavar="STR", type=str, required=True,
help="Input project id.")
parser.add_argument("--concurrent", metavar="INT", type=int, default=10,
help="Maximum number of jobs concurrent (default: 10).")
parser.add_argument("--refresh", metavar="INT", type=int, default=30,
help="Refresh time of log in seconds (default: 30).")
parser.add_argument("--job_type", choices=["sge", "local"], default="local",
help="Jobs run on [sge, local] (default: local).")
parser.add_argument("--work_dir", metavar="DIR", type=str, default=".",
help="Work directory (default: current directory).")
parser.add_argument("--out_dir", metavar="DIR", type=str, default=".",
help="Output directory (default: current directory).")
return parser
def main():
logging.basicConfig(
stream=sys.stderr,
level=logging.INFO,
format="[%(levelname)s] %(message)s"
)
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
attention:
ngsmetavirus.py.py multi input.list
File format:
name R1 R2
version: %s
contact: %s <%s>\
""" % (__version__, " ".join(__author__), __email__))
parser = add_mngs_multi_args(parser)
args = parser.parse_args()
mngs_multi(args)
if __name__ == "__main__":
main()
| zxgsy520/metavirus | ngsmetavirus/mngs_multi.py | mngs_multi.py | py | 6,833 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "dagflow.Task",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "dagflow.Task",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "ngsmetavirus.common.check... |
21788142764 | import numpy as np
from scipy.constants import G
from scipy.interpolate import interp1d
from astropy.constants import kpc
import general as ge
class zhao(object):
""" Class for generating a potential for a spherical dark matter halo,
using the data generated by the model of Zhao (2009).
Attributes:
fName: The name of the file containing the data generated by the
model of Zhao. The data should be generated using the code
provided at: http://202.127.29.4/dhzhao/mandc.html
data: The loaded data from the text file; the data is stored in
a 2D numpy array. For more information on which data is
stored, see the README file generated using the code.
h0: Little h, hubble parameter at z=0 divided by 100, unitless.
Value from the Planck collaboration (2020).
red: The redshift values at which the properties of the dark
matter halo are computed.
mass: The virial mass of the dark matter halo in units of solar
masses, computed at the redshifts given by red.
conc: The concentration of the halo (r_vir / r_s) at the given
redshifts.
virR: The virial radius at the redshift values of the dark
matter halo in meters.
rhoS: The density at the scale radius in kg/m^3.
rS: The scale radius of the dark matter halo in meters.
time: The lookback time corresponding to the redshift values in
years.
"""
def __init__(self, fName):
""" Initializing the potential according to the model of Zhao
Input:
fName (string)
Returns:
zhao (object)
"""
self.fName = fName # File name
data = self.load_data()
h0 = 0.6766
t0 = data[:,-1][0] / h0
# Unpacking data
self.red = data[:,0] # Redshift
self.mass = data[:,1] / h0 # Virial mass
self.conc = data[:,2] # Concentration
self.virR = 1e3 * kpc.value * data[:,4] / h0 # Virial radius
self.rhoS = ge.conv_inv_dens(data[:,7]/1e18) * h0 * h0 # rho_s
self.rS = 1e3 * kpc.value * data[:,8] / h0 # r_s
self.time = t0 - data[:,-1] / h0 # Age of Universe
def load_data(self):
""" Loading data from a generated data file
Input:
-
Returns:
data: array containing the properties of the dark matter
halo (2D numpy array).
"""
with open(self.fName) as f:
data = np.loadtxt((x.replace(' ', ' ') for x in f), skiprows=1)
return data
def find_z_ind(self, zV):
""" Input a redshift and find the indices corresponding to the closest
redshift value(s) of the generated data. If zV is an array then
the closest indices for all the redshift values are determined and
returned.
Input:
zV: The redshift value(s) for which the closest index has
to be found (float or numpy array).
Returns:
The indices corresponding to the closest redshift values
(integer or numpy array).
"""
if type(zV) != np.ndarray and type(zV) != list and type(zV) != tuple:
return ge.find_closest(self.red, zV)[0]
return np.asarray([ge.find_closest(self.red, z)[0] for z in zV])
def rs_rhos_at_z(self, zV):
""" Find the scale radius (r_s) and the density at the scale radius
(rho_s) for a given redshift value. This is done by finding the
closest redshift value to the input redshift value(s), NOT by
interpolating.
Input:
zV: redshift(s) at which r_s and rho_s will be determined
(float or numpy array).
Returns:
Value of r_s at zV (float or numpy array).
Value of rho_s at zV (float or numpy array).
"""
zInd = self.find_z_ind(zV) # Correct z index
return self.rS[zInd], self.rhoS[zInd]
def mass_at_r(self, zV, r):
""" The mass of the dark matter halo as function of distance from the
center of the dark matter halo at a given redshift.
Input:
zV: redshift(s) at which the mass as function of radius
is determined (float or numpy array).
r: the distances from the center of the halo at which
the mass will be calculated (float or numpy array).
Returns:
mass as function of r and z (float or numpy array (1D or 2D))
"""
rS, rhoS = self.rs_rhos_at_z(zV) # r_s and rho_s
mass = [rhoS[i] * rS[i] * (np.log(1+r/rS[i]) - r / (rS[i] + r))
for i in range(len(rS))]
return 16 * np.pi * np.asarray(mass)
def simp_profile(self, x):
""" The NFW profile density profile for the dark matter halo. This
function gives rho/rho_s as function of r/r_s. Therefore you do
not need to specify the parameters r_s and rho_s. Moreover, this
profile is time independent.
Input:
x: r/r_s values, dimensionless (numpy array).
Returns:
rho/rho_s for the given x values (numpy array).
"""
return 4 / (x * np.power(1+x, 2))
def nfw_profile(self, zV, r):
""" A time dependent NFW density profile. With the input of the
desired redshift value(s), a time dependent density profile
as function of radius is output. r_s and rho_s are determined
using the model of van den Bosch.
Input:
zV: the redshift values at which the density profile
is computed (float or numpy array).
r: distance from the center of the halo (float or
numpy array).
Returns:
time dependent NFW profile (float or numpy array(1D or 2D))
"""
zInd = self.find_z_ind(zV) # Selecting z ind
rS = self.rS # r_s
rhoS = self.rhoS # rho_s
if type(zInd) != np.ndarray: # Single z value
denom = r * np.power(1 + r/rS[zInd], 2) / rS[zInd]
return 4 * rhoS[zInd] / denom
selrS, selrhoS = rS[zInd], rhoS[zInd] # Slicing lists
# Multiple z values
frac = [selrhoS[ind] / (r * np.power(1 + r/selrS[ind], 2) / selrS[ind])
for ind in range(len(selrS))]
return 4 * np.asarray(frac)
def pot_nfw(self, zV, r):
""" The gravitational potential corresponding to the NFW
density profile. This is obtained by solving the Poisson
equation. For the NFW profile there exists an analytical
solution.
Input:
zV: the redshift values at which the potential is
computed (float or numpy array).
r: distance from the center of the halo (float or
numpy array).
Returns:
gravitational potential (float or numpy array(1D or 2D))
"""
rhoS = self.rhoS # rho_s
rS = self.rS # r_s
zInd = self.find_z_ind(zV) # Finding correct z
if type(zInd) != np.ndarray: # Single z value
# Need to interpolate due to coarse grid
interpRS = interp1d(self.red, self.rS) # Creating r_s int. object
selrS = interpRS(zV) # Interpolating r_s
interpRhoS = interp1d(self.red, self.rhoS) # Creating rho_s int. object
selrhoS = interpRhoS(zV) # Interpolating rho_s
part1 = -16 * np.pi * G * selrhoS * selrS * selrS
part2 = np.log(1 + r/selrS) / (r/selrS)
return part1 * part2
# Multiple z values
selrS, selrhoS = rS[zInd], rhoS[zInd] # Slicing lists
part1 = -16 * np.pi * G * selrhoS * selrS * selrS # First part
part2 = [np.log(1 + r/rsV) / (r/rsV) for rsV in selrS] # Second part
phi = [part1[ind] * part2[ind] for ind in range(len(zInd))] # Potential
return np.asarray(phi)
| Evd-V/Bachelor-thesis | zhao.py | zhao.py | py | 9,478 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "astropy.constants.kpc.value",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "astropy.constants.kpc",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "general.conv_inv_dens",
"line_number": 67,
"usage_type": "call"
},
{
"api_... |
74586144105 | from collections import defaultdict
def solution(id_list, report, k):
table = defaultdict(int) # 누가 몇번 신고 받은지 체크하는 테이블
answer = [0] * len(id_list)
for repo in set(report):
table[repo.split()[1]] += 1
# k번 이상 신고 받은 사람(report.split(' ')[1])인 경우
# 신고한 사람(report.split(' ')[0]) 메일 발송 횟수 1 추가
for repo in set(report):
if table[repo.split(' ')[1]] >= k:
answer[id_list.index(repo.split(' ')[0])] += 1
return answer | ycs1m1yk/TWS | Programmers/92334_sjh.py | 92334_sjh.py | py | 551 | python | ko | code | 2 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 4,
"usage_type": "call"
}
] |
14821096504 | # Woman who habitually buys pastries before 5
import json
def find_customers_who_order_multiple_pastries_before_5am() -> list[str]:
"""
Identifies customer ids of customers who
placed orders between midnight and 5am
"""
with open('./noahs-jsonl/noahs-orders.jsonl', 'r') as jsonl_file:
maybe_tinder_lady_customer_id = []
for line in jsonl_file:
order = json.loads(line)
date_time = order["ordered"]
time = date_time.split(" ")[1]
hour = time.split(":")[0]
hour = int(hour)
bakery_items_count = 0
for i in range(len(order["items"])):
if "BKY" in order["items"][i]["sku"]:
bakery_items_count += 1 * int(order["items"][i]["qty"])
if hour < 5 and bakery_items_count > 1:
maybe_tinder_lady_customer_id.append(order["customerid"])
return maybe_tinder_lady_customer_id
def find_customers_from_customer_ids(customer_ids: list[str]) -> list[dict]:
"""
Gets a list of customers from
a list of customer ids
"""
with open('./noahs-jsonl/noahs-customers.jsonl', 'r') as jsonl_file:
maybe_tinder_lady_info = []
for line in jsonl_file:
customer = json.loads(line)
customer_id = customer["customerid"]
if customer_id in customer_ids:
frequency = customer_ids.count(customer_id)
customer["freq"] = frequency
maybe_tinder_lady_info.append(customer)
return maybe_tinder_lady_info
if __name__ == "__main__":
possible_customer_ids = find_customers_who_order_multiple_pastries_before_5am()
possible_customers = find_customers_from_customer_ids(possible_customer_ids)
possible_customers = sorted(possible_customers, key=lambda x: x["freq"], reverse=True)
for customer in possible_customers:
# First customer to be printed is the one who most habitually
# purchases pastries early morning
print(customer) | Annie-EXE/Hanukkah-of-Data-2022 | Hanukkah Day 4/main.py | main.py | py | 2,069 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 47,
"usage_type": "call"
}
] |
42243182620 | import matplotlib.pyplot as plt
import numpy as np
import dill as pickle
sens_to_plot = ['20180314_grav_noshield_cant-0mV_allharm.npy', \
'20180314_grav_shieldin-nofield_cant-0mV_allharm.npy', \
'20180314_grav_shieldin-1V-1300Hz_cant-0mV_allharm.npy', \
'20180314_grav_shieldin-2V-2200Hz_cant-0mV_allharm.npy']
labs = ['No Shield', 'Shield', 'Shield - 1300Hz', 'Shield - 2200Hz']
plot_just_current = False
sens_dat = []
for sens_file in sens_to_plot:
lambdas, alphas, diagalphas = np.load('/sensitivities/' + sens_file)
sens_dat.append(alphas)
alpha_plot_lims = (1000, 10**13)
lambda_plot_lims = (10**(-7), 10**(-4))
limitdata_path = '/sensitivities/decca1_limits.txt'
limitdata = np.loadtxt(limitdata_path, delimiter=',')
limitlab = 'No Decca 2'
limitdata_path2 = '/sensitivities/decca2_limits.txt'
limitdata2 = np.loadtxt(limitdata_path2, delimiter=',')
limitlab2 = 'With Decca 2'
fig, ax = plt.subplots(1,1,sharex='all',sharey='all',figsize=(5,5),dpi=150)
if not plot_just_current:
for i, sens in enumerate(sens_dat):
ax.loglog(lambdas, sens, linewidth=2, label=labs[i])
ax.loglog(limitdata[:,0], limitdata[:,1], '--', \
label=limitlab, linewidth=3, color='r')
ax.loglog(limitdata2[:,0], limitdata2[:,1], '--', \
label=limitlab2, linewidth=3, color='k')
ax.grid()
ax.set_xlim(lambda_plot_lims[0], lambda_plot_lims[1])
ax.set_ylim(alpha_plot_lims[0], alpha_plot_lims[1])
ax.set_xlabel('$\lambda$ [m]')
ax.set_ylabel('|$\\alpha$|')
ax.legend(numpoints=1, fontsize=9)
#ax.set_title(figtitle)
plt.tight_layout()
plt.show()
| charlesblakemore/opt_lev_analysis | scripts/mod_grav/plot_sensitivity.py | plot_sensitivity.py | py | 1,634 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.load",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
... |
34212052955 | # https://www.acmicpc.net/problem/1260
# solution
# 1) 주어진 입력을 인접행렬로 저장한다
# 2) 현재 정점 그때그때 출력하는 dfs를 돌린다
# 3) bfs 하고 path를 출력한다
# TIL
# adj_mat = [([0,] * n),]*n -> 이런식으로 초기화 하면안됨. copy라 원소의 id값 다 같아지는 문제
# 파이썬 입력으로 input()은 굉장히 느리다. sys.stdin.readline() 사용
# dfs의 경우 최단거리 찾는 문제(탐색 여러번 반복)와 단순히 탐색하는 문제(한번만 탐색) 구분해서 풀자
## 최단거리 찾는 경우만 여러번 반복해야해서 visited 리셋필요
# 반면 bfs의 path는 그 자체로 최단거리(자체적으로 여러개 path로 탐색)
# bfs 제대로 이해하자
## 1) bfs는 재귀없이 반복만
## 2) 방문과 발견 시점의 분리
## 3) 큐에 중복된 정점 안들어가게 조심
## 4) 정점 방문 표시는 반드시 큐에 넣을때 해야함
from collections import deque
import sys
def dfs(v):
global adj_mat, visited
print(v+1, end=' ')
for n_v, is_conn in enumerate(adj_mat[v]): # 다음으로 방문할 next_v를 찾기위해 인접행렬의 v번째 row 순회한다
if v == n_v:continue # 같은 v에 대해선 탐색하지 않음
if is_conn == 1 and visited[n_v] == 0: # 현재 v와 연결된 next_v에 방문하지 않았으면 방문한다
visited[n_v] = 1 # next_v에 방문했음을 마킹
dfs(n_v)
def bfs(start):
global adj_mat
visited = [0 for _ in range(n)]
path = deque([])
q = deque([start,]) # 첫 정점 q에 push하며 시작
visited[start] = 1 # q에 push 순간 방문표시
while len(q) > 0:
# 방문(q에서 pop하는 일)
v = q.popleft()
path.append(v)
# 발견(q에 push하는 일)
for n_v,is_conn in enumerate(adj_mat[v]):# 방문할 next_v를 찾기위해 인접행렬의 v번째 row 순회한다
if v == n_v: continue # 같은 v에 대해선 탐색하지 않음
if is_conn == 1 and visited[n_v] == 0: # v 다음에 방문할수있고, 현재 q에 없는 next_v 있으면
q.append(n_v) # 발견한 next_v를 큐에 추가
visited[n_v] = 1 # q에 push 순간 방문표시
for v in path:
print(v+1, end=' ')
if __name__ == "__main__":
n,m,v = tuple(map(int,sys.stdin.readline().split()))
v = v-1 # 인덱스로 탐색위해 정점 번호를 하나 줄여준다(v가 1부터 시작하기 때문)
# 인접행렬 초기화 및 저장
adj_mat = [[0 for _ in range(n)] for _ in range(n)]
for _ in range(m):
r,c = tuple(map(int,sys.stdin.readline().split()))
adj_mat[r-1][c-1] = 1
adj_mat[c-1][r-1] = 1
# dfs
visited = [0 for _ in range(n)]
visited[v] = 1
dfs(v)
print('\n',end='')
# bfs
bfs(v)
| chankoo/problem-solving | graph/1260-DFS와BFS.py | 1260-DFS와BFS.py | py | 2,933 | python | ko | code | 1 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sys.stdin.readline",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
... |
15695220227 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.stats
import wbgapi as wb
import seaborn as sns
def world(ind, code, years):
'''
this function returns original data, transposed data and world data for above indicators
Parameters
----------
ind : index
code : country code
years : total number of years
Returns
-------
data : original data
data_t : transposed data
worlddata : complete world data
'''
data = wb.data.DataFrame(ind, code, mrv=years)
data_t = data.T
worlddata = wb.data.DataFrame(ind, mrv=years)
return data, data_t, worlddata
def modify(data):
'''
this method can be used to clean up and generate modified data
Parameters
----------
data : the data to be modified
Returns
-------
data_mod1 : modified data with mean
data_mod3 : modified data after indexing and renaming
'''
data_mod1 = data.mean()
data_mod2 = pd.DataFrame(data_mod1)
data_mod2.reset_index(level=0, inplace=True)
data_mod3 = data_mod2.rename(columns={"index": "year", 0: "mean"})
return data_mod1, data_mod3
def box(x, y):
'''
box plot comparing the countries- INDIA, UNITED KINGDOM, CHINA with world
Parameters
----------
x : data for x-axis
y : Tdata for y-axis
Returns
-------
None.
'''
fig = plt.figure(figsize=(4, 3))
ax = fig.add_axes([0, 0, 1, 1])
cc = ax.boxplot(x)
ax.set_xlabel("countries")
ax.set_ylabel("CO2 EMISIONS(% change)")
ax.set_title("CO2 EMMISIONS COMPARISIONS")
ax.set_xticks([1, 2, 3, 4])
ax.set_xticklabels(y)
plt.show()
return
country_codes = ["PAK", "GBR", "CHN", "NAC", "IND"] # country codes
wb.series.info('EN.ATM.GHGT.KT.CE') # getting info from world bank api
indicator_id = {"EN.ATM.GHGT.KT.CE", "EN.ATM.CO2E.KT",
"AG.LND.ARBL.ZS", "AG.LND.AGRI.ZS"} # indicators to access data
#creating dictionary to access indicators.
AG = {"AG.LND.AGRI.ZS": "AGRICULTURAL LAND(%)"}
ABL = {"AG.LND.ARBL.ZS": "ARABLE LAND (%)"}
CO2 = {"EN.ATM.CO2E.KT": "CO2 EMISSIONS(KT)"}
GHG = {"EN.ATM.GHGT.KT.CE": "TOTAL GREENHOUSE GAS EMISSIONS(KT)"}
wb.series.info(indicator_id)
#accessing data by calling "world" function
AG, AG_T, AG_world = world(AG.keys(), country_codes, 30)
AG_T.describe()
#accessing data by calling "world" function
Co2, CO2_T, CO2_world = world(CO2.keys(), country_codes, 30)
CO2_T.describe()
#accessing data by calling "world" function
ABL, ABL_T, ABL_world = world(ABL.keys(), country_codes, 30)
ABL_T.describe()
#accessing data by calling "world" function
GHG, GHG_T, GHG_world = world(GHG.keys(), country_codes, 30)
GHG_T.describe()
#modified data for Co2
co2_mod, co2_W_mod = modify(CO2_world)
Ghg_mod, Ghg_W_mod = modify(GHG_world)
ag_mod, ag_W_mod = modify(AG_world)
abl_mod, abl_W_mod = modify(ABL_world)
abl_W_mod.describe()
c = CO2_T.rename(columns={"index": "year", 0: "mean"})
co2_t = c.rename_axis("year")
a = AG_T.rename(columns={"index": "year", 0: "mean"})
ag_t = a.rename_axis("year")
ag = ABL_T.rename(columns={"index": "year", 0: "mean"})
agl_t = ag.rename_axis("year")
g = GHG_T.rename(columns={"index": "year", 0: "mean"})
ghg_t = g.rename_axis("year")
# generate line plot of foreast cover over arable land for whole world
fig, ax = plt.subplots(figsize=[7, 3])
ax.plot(abl_W_mod["year"], abl_W_mod["mean"], marker="*")
ax.set_ylabel("Arable land (% of land area)", fontsize=7)
ax.set_xlabel("Year", fontsize=16)
plt.xticks(rotation=90)
ax1 = ax.twinx()
ax1.plot(ag_W_mod["year"], ag_W_mod["mean"], color="RED", marker="o")
ax1.set_ylabel("Agricultural land (% of land area)",
fontsize=7)
plt.title("Time series plot of ARABLE LAND and AGRICULTURAL LAND (% of total land)")
plt.show()
#geberate box plot
data = [CO2_T["IND"], CO2_T["NAC"], CO2_T["CHN"], co2_W_mod["mean"]]
coun = ["INDIA", "UNITED KINGDOM", "CHINA", "world"]
box(data, coun)
#violin plot for Green house gas emissian for countries, INDIA, UNITED KINGDOM, CHINA
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3)
ax1.violinplot(GHG_T["IND"], showmedians=True, points=10)
ax1.set_xticks([1])
ax1.set_ylabel("GREEN HOUSE GAS EMISSION")
ax1.set_xticklabels(["INDIA"])
ax2.violinplot(GHG_T["GBR"], showmedians=True, points=100)
ax2.set_xticks([1])
ax2.set_xticklabels(["UK"])
ax3.violinplot(GHG_T["CHN"], showmedians=True, points=500)
ax3.set_xticks([1])
ax3.set_xticklabels(["CHINA"])
plt.show()
#Heat map of greenhouse gases
rs = np.random.RandomState(0)
FORW = pd.DataFrame(rs.rand(8, 8))
corr = GHG_T.corr()
plt.figure(figsize=(6, 7))
sns.heatmap(corr, annot=True)
plt.show()
| sunithasomasundaran/ads1_statistics_and_trends_sunitha | assignment2.py | assignment2.py | py | 4,686 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "wbgapi.data.DataFrame",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "wbgapi.data",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "wbgapi.data.DataFrame",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "wbgapi.d... |
5154312569 | from sklearn.datasets import make_circles
from sklearn.datasets import make_blobs
from sklearn.datasets import make_moons
from sklearn.model_selection import train_test_split
class Circles(object):
def __init__(self):
self.X, self.labels = make_circles(n_samples=300, noise=0.1, random_state=5622, factor=0.6)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.labels,
test_size=0.3, random_state=5622)
class DataBlobs:
def __init__(self, centers, std=1.75):
self.X, self.labels = make_blobs(n_samples=300, n_features=2, cluster_std=std, centers=centers,
shuffle=False, random_state=5622)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.labels,
test_size=0.3, random_state=5622)
class DataMoons(object):
def __init__(self):
self.X, self.labels = make_moons(n_samples=300, noise=0.05, shuffle=False, random_state=5622)
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.X, self.labels,
test_size=0.3, random_state=5622)
import os
import pickle
import numpy as np
import pandas as pd
import json
import random
from sklearn.model_selection import train_test_split
current_folder = os.path.dirname(os.path.abspath(__file__))
class Concrete(object):
def __init__(self):
rawdata = pd.read_csv('data/Concrete_Data.csv').to_numpy()
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(rawdata[:, :-1], rawdata[:, -1],
test_size=0.2, random_state=5622)
class Digits(object):
def __init__(self):
loaded = np.load(os.path.join(current_folder, "mnist.npz"))
self.images = images = loaded["images"].reshape(-1, 28 * 28)
self.labels = labels = loaded["labels"]
train_size = 1000
valid_size = 500
test_size = 500
self.X_train, self.y_train = images[:train_size], labels[:train_size]
self.X_valid, self.y_valid = images[train_size: train_size + valid_size], labels[
train_size: train_size + valid_size]
self.X_test, self.y_test = (images[train_size + valid_size:train_size + valid_size + test_size],
labels[train_size + valid_size: train_size + valid_size + test_size])
class BinaryDigits:
"""
Class to store MNIST data for images of 9 and 8 only
"""
def __init__(self):
loaded = np.load(os.path.join(current_folder, "mnist.npz"))
images = loaded["images"].reshape(-1, 28 * 28)
labels = loaded["labels"]
labels = labels % 2
train_size = 1000
valid_size = 500
test_size = 500
self.X_train, self.y_train = images[:train_size], labels[:train_size]
self.X_valid, self.y_valid = images[train_size: train_size + valid_size], labels[
train_size: train_size + valid_size]
self.X_test, self.y_test = (images[train_size + valid_size:train_size + valid_size + test_size],
labels[train_size + valid_size: train_size + valid_size + test_size])
class IMDB:
"""
Class to store IMDB dataset
"""
def __init__(self):
with open(os.path.join(current_folder, "movie_review_data.json")) as f:
self.data = data = json.load(f)
X = [d['text'] for d in data['data']]
y = [d['label'] for d in data['data']]
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(X, y, test_size=0.3, shuffle=True,
random_state=42)
| peterrrock2/ML_coursework | Homework/Hw4/data/__init__.py | __init__.py | py | 4,114 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.datasets.make_circles",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets.make_blobs",
"line_number": 16,
"usage_type": "cal... |
5411923106 | import requests
import json
from config import currency
class APIException(Exception):
pass
class Converter:
@staticmethod
def get_price(base, sym, amount):
try:
base_key = currency[base.lower()]
except KeyError:
raise APIException(f'Валюта {base} не найдена!')
try:
sym_key = currency[sym.lower()]
except KeyError:
raise APIException(f'Валюта {sym} не найдена!')
if base_key == sym_key:
raise APIException(f'Невозможно конвертировать одинаковые валюты: {base}!')
try:
int(amount)
except ValueError:
raise APIException(f'Неправильно ввели количество: {amount}')
url = f'https://api.apilayer.com/fixer/convert?to={sym_key}&from={base_key}&amount={amount}'
payload = {}
headers = {
"apikey": "6TESL4S9m67q6gZqaBFdf4CqRcAw8t8Z"
}
r = requests.request("GET", url, headers=headers, data=payload)
response = json.loads(r.content)
result = round(response['result'], 2)
return result | kopitski/SkillFactory | Exchange_bot/extensions.py | extensions.py | py | 1,243 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "config.currency",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "config.currency",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "requests.request",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "json.loads",
"li... |
71960613545 | import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import confusion_matrix
import itertools
from PIL import Image as PIL
def show_image(image, cmap=None):
plt.figure(figsize=(12, 12))
plt.imshow(image, cmap=cmap)
plt.show()
def show_images(images, labels=None):
if labels:
fig = plt.figure(figsize=(12, 12))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.2, wspace=0.2)
else:
fig = plt.figure(figsize=(12, 12))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(64):
region = PIL.open(images[i])
ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
ax.imshow(region)
if labels:
ax.set_xlabel(labels[i])
plt.show()
def show_importance(clf, columns):
feat_num = len(columns)
importances = clf.feature_importances_
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(feat_num):
print("{}. feature {} : {} ({})".format(f + 1, indices[f], columns[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(feat_num), importances[indices],
color="r", align="center")
plt.xticks(range(feat_num), columns, rotation=270)
plt.xlim([-1, feat_num])
plt.show()
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, '{:.2f}'.format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
print("Normalized confusion matrix")
else:
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
print('Confusion matrix, without normalization')
# print(cm)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def plot_confusion_matrix_wrapper(y_test, y_pred, classes):
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=classes)
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=classes, normalize=True)
plt.show()
| fukuta0614/active_learning | shared/PIA/analysis.py | analysis.py | py | 3,289 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "mat... |
73172240425 | import cv2
import numpy as np
class VideoCamera(object):
def __init__(self):
self.video = cv2.VideoCapture(0)
def __del__(self):
self.video.release()
def get_frame(self):
while True:
a="Not Found"
lower_green = np.array([45, 140, 50])
upper_green = np.array([75, 255, 255])
success, frame = self.video.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hsv = cv2.medianBlur(hsv, 5)
imgThreshHighgreen = cv2.inRange(hsv, lower_green, upper_green)
circlesgreen = cv2.HoughCircles(imgThreshHighgreen, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=30,
minRadius=0, maxRadius=0)
if circlesgreen is not None:
a="Green Ball"
ret, jpeg = cv2.imencode('.jpg',frame)
return jpeg.tobytes(),a
VideoCamera().get_frame()
| ishivanshgoel/Technocrats-T1 | camera.py | camera.py | py | 936 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_numb... |
20081039110 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
# !pip install numpy
# !pip install pandas
# !pip install matplotlib
# !pip install sklearn
# !pip install dmba
# !pip install statsmodels
# !pip install yellowbrick
# In[2]:
import pandas as pd
import numpy as np
df = pd.read_csv("data/medical_clean.csv")
outcome = 'TotalCharge'
df = df.drop(['CaseOrder', 'Customer_id', 'Interaction', 'UID', 'City',
'State', 'County', 'Zip', 'Lat', 'Lng', 'Interaction', 'TimeZone',
'Additional_charges'], axis=1)
cat_columns = df.select_dtypes(exclude="number").columns
# Give categorical columns a numeric value
for col in cat_columns:
df[col] = pd.Categorical(df[col])
df[col] = df[col].cat.codes
df.head()
# In[3]:
# export prepared data
df.to_csv('data/medical_prepared.csv')
# In[4]:
df['Complication_risk']
# # Univariate Analysis
# In[5]:
import matplotlib.pyplot as plt
import seaborn as sns
# In[6]:
# perform univariate analysis on all columns
for col in df.columns:
plt.hist(df[col])
plt.title(col)
path = 'plots/univariate-%s.png'%col
plt.gcf().savefig(path)
# # Bivariate Analysis
#
# Since we are predicting Initial_days we will include Initial_days in our bivariate analysis of the features
# In[7]:
for col in df:
if col != outcome:
df.plot(kind='scatter', x=outcome, y=col)
path = 'plots/bivariate-%s-%s.png'%(outcome,col)
plt.gcf().savefig(path)
# ## Correlation Matrix
# In[8]:
correl = df.corr()
display(correl)
# In[9]:
abs(df.corr())[outcome].sort_values(ascending=False)
# In[10]:
fig, ax = plt.subplots(figsize=(15,15))
heatmap = sns.heatmap(correl, xticklabels = correl.columns, yticklabels = correl.columns, cmap='RdBu')
heatmap.get_figure().savefig('plots/heatmap.png')
# # Regression Models
#
# We start with a regression model with all of the features
# In[11]:
import statsmodels.api as sm
# In[12]:
X = df.loc[:,df.columns!=outcome]
y = df[outcome]
# In[13]:
Xc = sm.add_constant(X)
initial_model = sm.OLS(y,Xc)
results = initial_model.fit()
results.summary()
# In[ ]:
# ## Data Reduction
# In[14]:
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
linear_regression = LinearRegression(normalize=False,fit_intercept=True)
Xc = sm.add_constant(X)
def r2_est(X,y):
return r2_score(y,linear_regression.fit(X,y).predict(X))
r2_impact = list()
for j in range(X.shape[1]):
selection = [i for i in range(X.shape[1]) if i!=j]
r2_impact.append(((r2_est(Xc,y) - r2_est(Xc.values [:,selection],y)) ,Xc.columns[j]))
best_variables = list()
for imp, varname in sorted(r2_impact, reverse=True):
if imp >= 0.0005:
best_variables.append(varname)
print ('%6.5f %s' % (imp, varname))
# New dataset with reduced features
df_reduced = df[best_variables]
df_reduced.head()
# In[ ]:
# In[ ]:
# In[15]:
# export reduced data
df_reduced.to_csv('data/medical_reduced.csv')
# In[ ]:
# In[16]:
X_reduced = df_reduced.loc[:,df_reduced.columns!=outcome]
Xc_reduced = sm.add_constant(X_reduced)
model_reduced = sm.OLS(y,Xc_reduced)
results = model_reduced.fit()
results.summary()
# In[ ]:
# ## Residuals
# In[17]:
from sklearn.linear_model import Lasso, LassoCV, Ridge, RidgeCV
from sklearn.model_selection import train_test_split
from yellowbrick.regressor import AlphaSelection, PredictionError, ResidualsPlot
# In[18]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model = Ridge()
visualizer = ResidualsPlot(model)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
residual = visualizer.poof()
residual.get_figure().savefig('plots/residual-plot.png')
# In[19]:
model = Lasso()
visualizer = PredictionError(model)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
prediction_error = visualizer.poof()
prediction_error.get_figure().savefig('plots/prediction_error.png')
# In[ ]:
| cjhammons/Multiple-Linear-Regression-on-Medical-Data | submission/multiple-linear-regression.py | multiple-linear-regression.py | py | 4,028 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pandas.Categorical",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.hist",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.... |
9710179885 | import random
from random import randint
import pygame
from essais.essai_dijkstra_damier import title
from lib_dijkstra import DijkstraManager, Point, pyrect_to_point, point_to_pyrect
verbose = False
class Entity(pygame.sprite.Sprite):
def __init__(self, name, x, y, screen=None):
super().__init__()
self.name = name
self.sprite_sheet = pygame.image.load(f'../sprites/{name}.png')
self.image = self.get_image(0, 0)
self.image.set_colorkey([0, 0, 0])
self.rect = self.image.get_rect() # position du sprite
self.position = [x, y]
self.screen = screen if screen else None
# Mon sprite mesure 32 * 32
self.images = {
'down': self.get_image(0, 0),
'left': self.get_image(0, 32),
'right': self.get_image(0, 64),
'up': self.get_image(0, 96)
}
self.feet = pygame.Rect(0, 0, self.rect.width * 0.5, 16)
self.old_position = self.position.copy()
self.speed = 2
def save_location(self):
self.old_position = self.position.copy()
def change_animation(self, attitude):
# ('up', 'down', 'left', 'right')
self.image = self.images[attitude]
self.image.set_colorkey((0, 0, 0))
def move_right(self):
self.position[0] += self.speed
def move_left(self):
self.position[0] -= self.speed
def move_up(self):
self.position[1] -= self.speed
def move_down(self):
self.position[1] += self.speed
def update(self):
self.rect.topleft = self.position
self.feet.midbottom = self.rect.midbottom
def move_back(self):
self.position = self.old_position
self.rect.topleft = self.position
self.feet.midbottom = self.rect.midbottom
def get_image(self, x, y):
image = pygame.Surface([32, 32])
image.blit(self.sprite_sheet, (0, 0), (x, y, 32, 32))
return image
class Player(Entity):
def __init__(self, x, y):
super().__init__('player', x, y)
class NPC(Entity):
def __init__(self, name, map_manager, map_name, screen=None):
super().__init__(name, 500, 550, screen)
self.name = name #
self.change_animation("left")
self.map_manager = map_manager
self.map_name = map_name
self.debug_count =0
self.move_direction = None
# Les zones issues de la carte tmx. Elles sont désignées par un nom de type robin_path1.
# J'appelle cette zone une area. Elle est de type pygame.Rect
self.areas = [] # Les areas : liste de pygame.Rect
self.areas_nb = None
self.current_area_idx = None # ndint(0, self.nb_areas-1) # index de area
self.next_area_idx = None
# Entre 2 zones, on définit une promenade / walk. Le chemin de la promenade est trouvé selon un algorithme
# simple ou un algorithme de Dijkstra.
self.use_dijkstra = True
# les points de la carte simplifiée pour résoudre la promenade/ walk.
self.djik = None # Objet pour résoudre le chemin selon Dijkstra.
# Les points ci dessous sont utilisé pour guider le mouvement dans la promenade.
self.prev_point = None # Le Point d'où lon vient. Sera initialisé par init_dijkstra
self.next_point = None # Le Point où l'on va
self.next_point_rect: pygame.Rect = None # Son équivalent en pygame.rect
self.next_dir = None
# Il faut penser à lancer les méthodes de début après création de NPC:
# par exemple define_first_target()
def calculate_next_area_idx(self):
while True:
rnd = randint(0, self.areas_nb - 1)
if rnd != self.current_area_idx:
self.next_area_idx = rnd
break
def modify_speed(self):
self.speed = self.speed + randint(-1, 1)
if self.speed == 0:
self.speed = 1
elif self.speed == 4:
self.speed = 3
def calculate_move_direction(self):
"""Algorithme très primaire. Il a besoin de déterminer la direction générale à prendre."""
target_point = self.areas[self.next_area_idx].center
feet_point = self.feet.center
rect = pygame.Rect(feet_point[0], feet_point[1],
target_point[0] - feet_point[0], target_point[1] - feet_point[1])
x, y, w, h = rect
if w > 0:
if h > 0:
self.move_direction = 'SE'
else:
self.move_direction = 'NE'
else: # W est neg
if h > 0:
self.move_direction = 'SW'
else:
self.move_direction = 'NW'
print(f"Nouvelle cible : {self.next_area_idx}, direction : {self.move_direction}")
def calculate_dijkstra(self, verbose=False):
"""Lit la carte simplifiée.
L'algorithme utilise une version réduite de la carte. La réduction est de 1 ou 2 fois la taille des
tuiles.
Convertit une zone (area) en un Point de la carte simplifiée.
Donc, on convertit ce que j'appelais area (zone) en Point
"""
map = self.map_manager.maps[self.map_name].simple_map
self.dijk = DijkstraManager(map)
start_area = self.areas[self.current_area_idx]
start_point = pyrect_to_point(self.map_manager.maps[self.map_name], start_area, 32)
next_area = self.areas[self.next_area_idx]
map_name = self.map_manager.maps[self.map_name]
end_point = pyrect_to_point(map_name, next_area, 32)
if verbose:
print(f"Il faut aller du point {start_point} au point {end_point}")
self.dijk.dijkstra(start_point, verbose=0)
self.dijk.format_path(start_point, end_point, verbose=True)
self.prev_point = start_point
self.dijk.give_next_instruction() # IMPORTANT : on élimine la dernière valeur
self.next_point, self.next_dir = self.dijk.give_next_instruction()
self.next_point_rect = pygame.Rect(point_to_pyrect(map_name, self.next_point))
print("Fin de calcul du Dijkstra")
print(f"{self.next_dir} point_actuel: {self.rect} next_point: {self.next_point} ; next_point_rect : {self.next_point_rect}")
def define_first_target(self):
self.current_area_idx = 0 # index de area
# Pour le run, utiliser ces lignes
self.calculate_next_area_idx()
# self.calculate_move_direction()
# Pour une mise au point, utiliser ces lignes
# self.next_pyrect_idx = 2
# self.move_direction = 'SE'
def teleport_npc(self):
first_area = self.areas[self.current_area_idx]
self.position[0] = first_area.x
self.position[1] = first_area.y
self.save_location()
def move(self):
self.save_location() # Tentative de résolution d'un GROS BUG
self.debug_count += 1
if self.use_dijkstra:
self.move_dij()
else:
self.move_classical()
def move_dij(self):
"""Mouvement automatique. Algorithme type Djikstra à ma façon.
Cette fonction est en cours d'écriture"""
sens = self.next_dir
if sens == 'R':
self.move_right()
elif sens == 'L':
self.move_left()
elif sens == 'B':
self.move_down()
elif sens == 'T':
self.move_up()
elif sens is None:
pass
else:
raise ValueError(f"{sens} : error code letter")
if self.rect.colliderect(self.next_point_rect):
print(" *************** COLISION **************")
self.prev_point = self.next_point # ne sert à rien pour l'instant
self.next_point, self.next_dir = self.dijk.give_next_instruction()
if self.next_point:
self.next_point_rect = pygame.Rect(point_to_pyrect(self.map_name, self.next_point))
else:
print("********** Arrivé ! ************")
# Trouver une nouvelle cible au NPC
self.current_area_idx = self.next_area_idx
self.calculate_next_area_idx()
self.calculate_dijkstra()
print(f"{self.debug_count}, {sens} actuel : point_actuel: {self.prev_point} rect: {self.rect} next_point: {self.next_point} ; next_point_rect : {self.next_point_rect}")
print(f"next_dir devient {self.next_dir}")
pass
def move_classical(self):
"""Mouvement automatique. Algorithme primaire."""
feet_rect = self.feet
target_rect = self.areas[self.next_area_idx]
feet_to_target_rect = pygame.Rect(feet_rect.x, feet_rect.y,
target_rect.x - feet_rect.x, target_rect.y - feet_rect.y)
move_vert = None
move_horz = None
if self.move_direction == 'SE':
move_horz = self.move_right
move_vert = self.move_down
self.change_animation("right")
elif self.move_direction == 'NW':
move_horz = self.move_left
move_vert = self.move_up
self.change_animation("left")
elif self.move_direction == 'SW':
move_horz = self.move_left
move_vert = self.move_down
self.change_animation("left")
elif self.move_direction == 'NE':
move_horz = self.move_right
move_vert = self.move_up
self.change_animation("right")
if feet_to_target_rect.height == 0:
feet_to_target_rect.height = 5
move_vert()
else:
# odd n'est sans doute pas le bon terme.
try:
odd_horiz_mvt = feet_to_target_rect.width / (feet_to_target_rect.height + feet_to_target_rect.width)
except ZeroDivisionError:
odd_horiz_mvt = 0.95
if verbose:
print(f"{feet_to_target_rect}, {self.name} Odd ratio : {odd_horiz_mvt}")
if odd_horiz_mvt == 0:
move_vert()
else:
rnd = random.random()
# print(f"La valeur aléatoire est {rnd} ; limite de probabilité vaut {odd_horiz_mvt} : ", end = '')
if rnd > odd_horiz_mvt:
move_vert()
else:
move_horz()
if self.rect.colliderect(target_rect):
self.current_area_idx = self.next_area_idx
self.calculate_next_area_idx()
self.calculate_move_direction()
| bermau/PW_19_pygamon | src/player.py | player.py | py | 10,618 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "pygame.sprite",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
... |
20948666331 | import os
import os.path as osp
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
from torch import optim
from torch.utils.tensorboard import SummaryWriter
from sklearn.metrics import confusion_matrix
from model.refinenet import Segmentor
from model.discriminator import Discriminator
from dataset import TrainDataset, TestDataset
from loss import CrossEntropyLoss2d, BCEWithLogitsLoss2d, FocalLoss
from metric import evaluate_conf_mat
from eval import evaluate
from utils import makedir, save_metrics
import settings
# def lr_poly_scheduler(optim_G, optim_D, init_lr, init_lr_D, lr_decay_iter, iter, max_iter, poly_power):
# if iter % lr_decay_iter or iter > max_iter:
# return
# # calculate new lr
# new_lr = init_lr * (1 - float(iter) / max_iter) ** poly_power
# new_lr_D = init_lr_D * (1 - float(iter) / max_iter) ** poly_power
# # set optim_G lr
# optim_G.param_groups[0]['lr'] = new_lr
# optim_G.param_groups[1]['lr'] = new_lr * 10
# # set optim_D lr
# optim_D.param_groups[0]['lr'] = new_lr_D
def make_D_label(label, D_output):
D_label = torch.ones_like(D_output) * label
D_label = D_label.clone().detach().requires_grad_(True).cuda()
return D_label
# def make_D_label2(label, ignore_mask):
# ignore_mask = np.expand_dims(ignore_mask, axis=1)
# D_label = (np.ones(ignore_mask.shape)*label)
# D_label[ignore_mask] = settings.BCE_IGNORE_LABEL
# # D_label = Variable(torch.FloatTensor(D_label)).cuda()
# D_label = torch.tensor(D_label, dtype=torch.float64, requires_grad=True).cuda()
# return D_label
def save_checkpoint(epoch, model_G, model_D, optim_G, optim_D, lr_scheduler_G, lr_scheduler_D):
checkpoint = {
'epoch': epoch,
'model_G_state_dict': model_G.state_dict(),
'model_D_state_dict': model_D.state_dict(),
'optim_G_state_dict': optim_G.state_dict(),
'optim_D_state_dict': optim_D.state_dict(),
'lr_scheduler_G_state_dict': lr_scheduler_G.state_dict(),
'lr_scheduler_D_state_dict': lr_scheduler_D.state_dict()
}
print('saving a checkpoint in epoch {}'.format(epoch))
torch.save(checkpoint, osp.join(settings.CHECKPOINT_DIR, 'CHECKPOINT_'+str(epoch)+'.tar'))
def train_one_epoch(model_G, model_D, optim_G, optim_D, dataloader, test_dataloader, epoch,
upsample, ce_loss, bce_loss, writer, print_freq=5, eval_freq=settings.EVAL_FREQ):
max_iter = len(dataloader)
# initialize losses
loss_G_seg_values = []
loss_adv_seg_values = []
loss_D_values = []
eval_trainval = False
if epoch % eval_freq == 0:
eval_trainval = True
# confusion matrix ; to track metrics such as mIoU during training
conf_mat = np.zeros((settings.NUM_CLASSES, settings.NUM_CLASSES))
# labels for adversarial training
pred_label = 0
gt_label = 1
for i_iter, batch in enumerate(dataloader):
images, depths, labels = batch
images = images.cuda()
depths = depths.cuda()
labels = labels.cuda()
optim_G.zero_grad()
optim_D.zero_grad()
####### train generator #######
# disable accumulating grads in discriminator
for param in model_D.parameters():
param.requires_grad = False
# get a mask where an elemnt is True for every pixel with ignore_label value
ignore_mask = (labels == settings.IGNORE_LABEL)
target_mask = torch.logical_not(ignore_mask)
target_mask = target_mask.unsqueeze(dim=1)
# get the output of generator
if settings.MODALITY == 'rgb':
predict = upsample(model_G(images))
elif settings.MODALITY == 'middle':
predict = upsample(model_G(images, depths))
# calculate cross-entropy loss
loss_G_seg = ce_loss(predict, labels)
# calculate adversarial loss
D_output = upsample(model_D(F.softmax(predict, dim=1)))
loss_adv = bce_loss(D_output, make_D_label(gt_label, D_output), target_mask)
# accumulate loss, backward and store value
loss_G = loss_G_seg + settings.LAMBDA_ADV_SEG * loss_adv
loss_G.backward()
loss_G_seg_values.append(loss_G_seg.data.cpu().numpy())
loss_adv_seg_values.append(loss_adv.data.cpu().numpy())
if eval_trainval:
# get pred and gt to compute confusion matrix
seg_pred = np.argmax(predict.detach().cpu().numpy(), axis=1)
seg_gt = labels.cpu().numpy().copy()
seg_pred = seg_pred[target_mask.squeeze(dim=1).cpu().numpy()]
seg_gt = seg_gt[target_mask.squeeze(dim=1).cpu().numpy()]
conf_mat += confusion_matrix(seg_gt, seg_pred, labels=np.arange(settings.NUM_CLASSES))
####### end of train generator #######
####### train discriminator #######
# activate the gradient accumulation in D
for param in model_D.parameters():
param.requires_grad = True
# detach from G
predict = predict.detach()
D_output = upsample(model_D(F.softmax(predict, dim=1)))
loss_D = bce_loss(D_output, make_D_label(pred_label, D_output), target_mask)
loss_D.backward()
loss_D_values.append(loss_D.data.cpu().numpy())
# pass ground truth to discriminator
gt = labels.clone().detach().cuda()
gt_one_hot = F.one_hot(gt, num_classes=settings.NUM_CLASSES).permute(0,3,1,2).contiguous().float()
D_output = upsample(model_D(gt_one_hot))
loss_D = bce_loss(D_output, make_D_label(gt_label, D_output), target_mask)
loss_D.backward()
loss_D_values.append(loss_D.data.cpu().numpy())
####### end of train discriminator #######
optim_G.step()
optim_D.step()
if i_iter % print_freq == 0 and i_iter != 0:
loss_G_seg_value = np.mean(loss_G_seg_values)
loss_G_seg_values = []
loss_adv_seg_value = np.mean(loss_adv_seg_values)
loss_adv_seg_values = []
loss_D_value = np.mean(loss_D_values)
loss_D_values = []
writer.add_scalar('Loss_G_SEG/Train', loss_G_seg_value, i_iter+epoch*max_iter)
writer.add_scalar('Loss_G_SEG_ADV/Train', loss_adv_seg_value, i_iter+epoch*max_iter)
writer.add_scalar('Loss_D/Train', loss_D_value, i_iter+epoch*max_iter)
writer.add_scalar('learning_rate_G/Train', optim_G.param_groups[0]['lr'], i_iter+epoch*max_iter)
writer.add_scalar('learning_rate_D/Train', optim_D.param_groups[0]['lr'], i_iter+epoch*max_iter)
print("epoch = {:3d}/{:3d}: iter = {:3d},\t loss_seg = {:.3f},\t loss_adv = {:.3f},\t loss_d = {:.3f}".format(
epoch, settings.EPOCHS, i_iter, loss_G_seg_value, loss_adv_seg_value, loss_D_value))
if eval_trainval:
save_metrics(conf_mat, writer, epoch*max_iter, 'Train')
conf_mat = evaluate(model_G, test_dataloader)
save_metrics(conf_mat, writer, epoch*max_iter, 'Val')
model_G.train()
def main():
# set torch and numpy seed for reproducibility
torch.manual_seed(27)
np.random.seed(27)
# tensorboard writer
writer = SummaryWriter(settings.TENSORBOARD_DIR)
# makedir snapshot
makedir(settings.CHECKPOINT_DIR)
# enable cudnn
torch.backends.cudnn.enabled = True
# create segmentor network
model_G = Segmentor(pretrained=settings.PRETRAINED, num_classes=settings.NUM_CLASSES,
modality=settings.MODALITY)
model_G.train()
model_G.cuda()
torch.backends.cudnn.benchmark = True
# create discriminator network
model_D = Discriminator(settings.NUM_CLASSES)
model_D.train()
model_D.cuda()
# dataset and dataloader
dataset = TrainDataset()
dataloader = data.DataLoader(dataset, batch_size=settings.BATCH_SIZE,
shuffle=True, num_workers=settings.NUM_WORKERS,
pin_memory=True, drop_last=True)
test_dataset = TestDataset(data_root=settings.DATA_ROOT_VAL, data_list=settings.DATA_LIST_VAL)
test_dataloader = data.DataLoader(test_dataset, batch_size=1, shuffle=False,
num_workers=settings.NUM_WORKERS, pin_memory=True)
# optimizer for generator network (segmentor)
optim_G = optim.SGD(model_G.optim_parameters(settings.LR), lr=settings.LR,
momentum=settings.LR_MOMENTUM, weight_decay=settings.WEIGHT_DECAY)
# lr scheduler for optimi_G
lr_lambda_G = lambda epoch: (1 - epoch / settings.EPOCHS) ** settings.LR_POLY_POWER
lr_scheduler_G = optim.lr_scheduler.LambdaLR(optim_G, lr_lambda=lr_lambda_G)
# optimizer for discriminator network
optim_D = optim.Adam(model_D.parameters(), settings.LR_D)
# lr scheduler for optimi_D
lr_lambda_D = lambda epoch: (1 - epoch / settings.EPOCHS) ** settings.LR_POLY_POWER
lr_scheduler_D = optim.lr_scheduler.LambdaLR(optim_D, lr_lambda=lr_lambda_D)
# losses
ce_loss = CrossEntropyLoss2d(ignore_index=settings.IGNORE_LABEL) # to use for segmentor
bce_loss = BCEWithLogitsLoss2d() # to use for discriminator
# upsampling for the network output
upsample = nn.Upsample(size=(settings.CROP_SIZE, settings.CROP_SIZE), mode='bilinear', align_corners=True)
# # labels for adversarial training
# pred_label = 0
# gt_label = 1
# load the model to resume training
last_epoch = -1
if settings.RESUME_TRAIN:
checkpoint = torch.load(settings.LAST_CHECKPOINT)
model_G.load_state_dict(checkpoint['model_G_state_dict'])
model_G.train()
model_G.cuda()
model_D.load_state_dict(checkpoint['model_D_state_dict'])
model_D.train()
model_D.cuda()
optim_G.load_state_dict(checkpoint['optim_G_state_dict'])
optim_D.load_state_dict(checkpoint['optim_D_state_dict'])
lr_scheduler_G.load_state_dict(checkpoint['lr_scheduler_G_state_dict'])
lr_scheduler_D.load_state_dict(checkpoint['lr_scheduler_D_state_dict'])
last_epoch = checkpoint['epoch']
# purge the logs after the last_epoch
writer = SummaryWriter(settings.TENSORBOARD_DIR, purge_step=(last_epoch+1)*len(dataloader))
for epoch in range(last_epoch+1, settings.EPOCHS+1):
train_one_epoch(model_G, model_D, optim_G, optim_D, dataloader, test_dataloader, epoch,
upsample, ce_loss, bce_loss, writer, print_freq=5, eval_freq=settings.EVAL_FREQ)
if epoch % settings.CHECKPOINT_FREQ == 0 and epoch != 0:
save_checkpoint(epoch, model_G, model_D, optim_G, optim_D,
lr_scheduler_G, lr_scheduler_D)
# save the final model
if epoch >= settings.EPOCHS:
print('saving the final model')
save_checkpoint(epoch, model_G, model_D, optim_G, optim_D,
lr_scheduler_G, lr_scheduler_D)
writer.close()
lr_scheduler_G.step()
lr_scheduler_D.step()
if __name__ == "__main__":
main()
# for i_iter in range(settings.MAX_ITER):
# # initialize losses
# loss_G_seg_value = 0
# loss_adv_seg_value = 0
# loss_D_value = 0
# # clear optim gradients and adjust learning rates
# optim_G.zero_grad()
# optim_D.zero_grad()
# lr_poly_scheduler(optim_G, optim_D, settings.LR, settings.LR_D, settings.LR_DECAY_ITER,
# i_iter, settings.MAX_ITER, settings.LR_POLY_POWER)
# ####### train generator #######
# # not accumulate grads in discriminator
# for param in model_D.parameters():
# param.requires_grad = False
# # get the batch of data
# try:
# _, batch = next(dataloader_iter)
# except:
# dataloader_iter = enumerate(dataloader)
# _, batch = next(dataloader_iter)
# images, depths, labels = batch
# images = images.cuda()
# depths = depths.cuda()
# labels = labels.cuda()
# # get a mask where is True for every pixel with ignore_label value
# ignore_mask = (labels == settings.IGNORE_LABEL)
# target_mask = torch.logical_not(ignore_mask)
# target_mask = target_mask.unsqueeze(dim=1)
# # get the output of generator
# if settings.MODALITY == 'rgb':
# predict = upsample(model_G(images))
# elif settings.MODALITY == 'middle':
# predict = upsample(model_G(images, depths))
# # calculate cross-entropy loss
# loss_G_seg = ce_loss(predict, labels)
# # calculate adversarial loss
# D_output = upsample(model_D(F.softmax(predict, dim=1)))
# loss_adv = bce_loss(D_output, make_D_label(gt_label, D_output), target_mask)
# # accumulate loss, backward and store value
# loss = loss_G_seg + settings.LAMBDA_ADV_SEG * loss_adv
# loss.backward()
# loss_G_seg_value += loss_G_seg.data.cpu().numpy()
# loss_adv_seg_value += loss_adv.data.cpu().numpy()
# ####### end of train generator #######
# ####### train discriminator #######
# # pass prediction to discriminator
# # reset the gradient accumulation
# for param in model_D.parameters():
# param.requires_grad = True
# # detach from G
# predict = predict.detach()
# D_output = upsample(model_D(F.softmax(predict, dim=1)))
# loss_D = bce_loss(D_output, make_D_label(pred_label, D_output), target_mask)
# loss_D.backward()
# loss_D_value += loss_D.data.cpu().numpy()
# # pass ground truth to discriminator
# gt = labels.clone().detach().cuda()
# gt_one_hot = F.one_hot(gt, num_classes=settings.NUM_CLASSES).permute(0,3,1,2).float()
# D_output = upsample(model_D(gt_one_hot))
# loss_D = bce_loss(D_output, make_D_label(gt_label, D_output), target_mask)
# loss_D.backward()
# loss_D_value += loss_D.data.cpu().numpy()
# ####### end of train discriminator #######
# optim_G.step()
# optim_D.step()
# # get pred and gt to compute confusion matrix
# seg_pred = np.argmax(predict.cpu().numpy(), axis=1)
# seg_gt = labels.cpu().numpy().copy()
# seg_pred = seg_pred[target_mask.squeeze(dim=1).cpu().numpy()]
# seg_gt = seg_gt[target_mask.squeeze(dim=1).cpu().numpy()]
# conf_mat += confusion_matrix(seg_gt, seg_pred, labels=np.arange(settings.NUM_CLASSES))
# ####### log ########
# if i_iter % ((settings.TRAIN_SIZE // settings.BATCH_SIZE)) == 0 and i_iter != 0:
# metrics = evaluate(conf_mat)
# writer.add_scalar('Pixel Accuracy/Train', metrics['pAcc'], i_iter)
# writer.add_scalar('Mean Accuracy/Train', metrics['mAcc'], i_iter)
# writer.add_scalar('mIoU/Train', metrics['mIoU'], i_iter)
# writer.add_scalar('fwavacc/Train', metrics['fIoU'], i_iter)
# conf_mat = np.zeros_like(conf_mat)
# writer.add_scalar('Loss_G_SEG/Train', loss_G_seg_value, i_iter)
# writer.add_scalar('Loss_D/Train', loss_D_value, i_iter)
# writer.add_scalar('Loss_G_SEG_adv/Train', loss_adv_seg_value, i_iter)
# writer.add_scalar('learning_rate_G/Train', optim_G.param_groups[0]['lr'], i_iter)
# writer.add_scalar('learning_rate_D/Train', optim_D.param_groups[0]['lr'], i_iter)
# print( "iter = {:6d}/{:6d},\t loss_seg = {:.3f}, loss_adv = {:.3f}, loss_D = {:.3f}".format(
# i_iter, settings.MAX_ITER,
# loss_G_seg_value,
# loss_adv_seg_value,
# loss_D_value))
# with open(settings.LOG_FILE, "a") as f:
# output_log = '{:6d},\t {:.8f},\t {:.8f},\t {:.8f}\n'.format(
# i_iter,
# loss_G_seg_value,
# loss_adv_seg_value,
# loss_D_value)
# f.write(output_log)
# # taking snapshot
# if i_iter >= settings.MAX_ITER:
# print('saving the final model ...')
# torch.save(model_G.state_dict(),osp.join(settings.CHECKPOINT_DIR, 'CHECKPOINT_'+str(settings.MAX_ITER)+'.pt'))
# torch.save(model_D.state_dict(),osp.join(settings.CHECKPOINT_DIR, 'CHECKPOINT_'+str(settings.MAX_ITER)+'_D.pt'))
# break
# if i_iter % settings.SAVE_EVERY == 0 and i_iter != 0:
# print('taking snapshot ...')
# torch.save(model_G.state_dict(),osp.join(settings.CHECKPOINT_DIR, 'CHECKPOINT_'+str(i_iter)+'.pt'))
# torch.save(model_D.state_dict(),osp.join(settings.CHECKPOINT_DIR, 'CHECKPOINT_'+str(i_iter)+'_D.pt'))
| ElhamGhelichkhan/semiseggan | train.py | train.py | py | 17,083 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.ones_like",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torch.save",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
37407105905 | import pandas as pd
from sklearn.tree import DecisionTreeClassifier, plot_tree
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
# read in your data
df = pd.read_excel("CreditRisk.xlsx")
# define your independent and dependent variables
X = df[['Volume', 'Value', 'Age']]
y = df['Status']
# encode your categorical variables
label = LabelEncoder()
X = X.apply(label.fit_transform)
print(X)
# fit your tree model
tree = DecisionTreeClassifier(min_samples_split = 30)
tree = tree.fit(X, y)
# visualize the tree
plt.figure(figsize = (10,5))
plot_tree(tree, filled = True, feature_names = X.columns, class_names = ["Unpaid", "paid"])
plt.show()
| HyperionDevBootcamps/C4_DS_lecture_examples | Lecture code/Machine Learning/Decision Trees/Decision_Trees_cat.py | Decision_Trees_cat.py | py | 702 | python | en | code | 37 | github-code | 36 | [
{
"api_name": "pandas.read_excel",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sklearn.tree.DecisionTreeClassifier",
"line_number": 19,
"usage_type": "call"
},
... |
33589796845 | import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class fist_bookSpider(CrawlSpider):
name = 'fist_book'
allowed_domains = ['books.toscrape.com']
start_urls = ['http://books.toscrape.com/']
rules = (
Rule(LinkExtractor(), callback='parse_item', follow=True),
)
def parse_item(self, response):
all_headings = response.xpath('//h1/text()').getall()
for heading in all_headings:
yield {
'text': heading,
'page': response.url
}
| andycortex/blog-scraper | first_book.py | first_book.py | py | 598 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.spiders.CrawlSpider",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "scrapy.spiders.Rule",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "scrapy.linkextractors.LinkExtractor",
"line_number": 12,
"usage_type": "call"
}
] |
30133687535 | #!/ust/bin/python3
import fire
import sys
from functools import reduce
import os.path
red = '\033[0;31m'
green = '\033[0;32m'
yellow = '\033[0;33m'
blue = '\033[0;34m'
purple = '\033[0;35m'
cyan = '\033[0;36m'
white = '\033[0;37m'
end = '\033[0m'
host_file = '/etc/hosts'
new_file_path = None
def log_error(*msg):
if msg is None:
msg_str = ''
else:
msg_str = reduce(lambda x, y: str(x) + ' ' + str(y), msg)
print('%s%s%s%s' % (red, 'ERROR | ', msg_str, end))
def log_info(msg):
print('%s%s%s%s' % (green, 'INFO | ', msg, end))
def print_param(verb, args, comment):
print(' %s%-5s %s%-12s %s%s' % (green, verb, yellow, args, end, comment))
def help():
print('run: python3 app.py %s %s<verb>%s <args>%s' % ('', green, yellow, end))
print_param('-h', '', 'help')
print_param('-a', 'group file', 'add host group')
print_param('-r', 'group file', 'replace host group')
print_param('-on', 'group', 'uncomment host group')
print_param('-off', 'group', 'comment host group')
def get_group_start(value) -> str:
return '## op [ ' + str(value) + ' ]'
def get_group_end(value) -> str:
return '## ed [ ' + str(value) + ' ]'
def has_contain_group(group) -> bool:
with open(host_file) as file:
lines = file.readlines()
for line in lines:
if get_group_start(group) in line:
return True
return False
def append_group(group, file_path):
contained = has_contain_group(group)
if contained:
log_error('group already exist', group)
return
assert_file(file_path)
host = open(host_file, 'a')
host.write('\n' + get_group_start(group) + '\n\n')
with open(file_path) as file:
lines = file.readlines()
for line in lines:
host.write(line)
print(line, end='')
print()
host.write('\n\n' + get_group_end(group) + '\n')
log_info('append group sucessful')
# add # for content in group
def comment_content(result_lines, line, content_flag):
if content_flag and not line.startswith('#'):
result_lines.append('#' + line)
else :
result_lines.append(line)
# remove # for content in group
def uncomment_content(result_lines, line, content_flag):
if content_flag and line.startswith('#'):
result_lines.append(line[1:])
else:
result_lines.append(line)
# read origin file, write back origin file with the result list
def replace_content(group, content_func=None, logic_func=None):
if not has_contain_group(group):
log_error('group not exist')
return
result_lines = []
with open(host_file) as file:
lines = file.readlines()
result_lines = content_func(group, lines, logic_func)
write_to_hosts(result_lines)
# func value, trans into replace_content
def open_close_group(group, lines, logic_func) -> []:
content_flag = False
result_lines=[]
for line in lines:
if get_group_start(group) in line:
content_flag = True
result_lines.append(line)
continue
if get_group_end(group) in line:
content_flag = False
result_lines.append(line)
continue
if logic_func is None:
log_error('must have logic func')
sys.exit()
logic_func(result_lines, line, content_flag)
return result_lines
# func value , trans into replace_content
def replace_group_content(group, lines, logic_func=None):
result_lines = []
content_flag = False
for line in lines:
if get_group_start(group) in line:
content_flag = True
result_lines.append(line)
continue
if get_group_end(group) in line:
content_flag = False
print(new_file_path)
with open(new_file_path, 'r') as file:
new_lines = file.readlines()
for new_line in new_lines:
result_lines.append(new_line)
result_lines.append('\n')
result_lines.append(line)
continue
if not content_flag:
result_lines.append(line)
return result_lines
def write_to_hosts(lines):
if lines is None:
return
total_content = ''.join(lines)
with open(host_file, 'w+') as file:
file.write(total_content)
def assert_file(file_path):
if not os.path.exists(file_path):
log_error('file not found:', file_path)
sys.exit(1)
def assert_param(args, count):
if len(args) < count:
log_error('invalid param, at least need', count)
sys.exit(1)
def main(verb=None, *args):
assert_file(host_file)
if verb == '-h':
help()
sys.exit(0)
if verb == '-a':
assert_param(args, 2);
append_group(group=args[0], file_path=args[1])
if verb == '-r':
assert_param(args, 2);
global new_file_path
new_file_path=args[1]
replace_content(group=args[0], content_func=replace_group_content)
if verb == '-on':
assert_param(args, 1);
replace_content(group=args[0], content_func=open_close_group, logic_func=uncomment_content)
if verb == '-off':
assert_param(args, 1);
replace_content(group=args[0], content_func=open_close_group, logic_func=comment_content)
fire.Fire(main)
| Kuangcp/Script | python/tool/switch-host-group/app.py | app.py | py | 5,437 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "functools.reduce",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "os.path.path.exists",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"l... |
74429979942 | import streamlit as st
from fastai.vision.all import *
import plotly.express as px
import pathlib
from streamlit_option_menu import option_menu
from apps import home, app, contact
temp = pathlib.PosixPath
with st.sidebar:
navbar = option_menu("Main Menu", ["Home", "Project", "Contact"],
icons=['house', 'code-slash', "person-lines-fill"],
menu_icon="cast",
default_index=0,
styles={
"container": {"padding": "0!important", "background-color": "#0AAAB3"},
"icon": {"color": "orange", "font-size": "25px"},
"nav-link": {"font-size": "25px", "text-align": "left", "margin":"0px", "--hover-color": "#C8F3DB"},
"nav-link-selected": {"background-color": "green"},
}
)
if navbar == "Home":
home = home.app_func
home()
if navbar == "Project":
project = app.app_func
project()
if navbar == "Contact":
contact = contact.app_func
contact()
with st.sidebar:
st.sidebar.title("About")
st.title("Farkhod Khojikurbonov")
st.image("image/farkhod.jpg", width=150)
st.sidebar.info(
"""
Github: \nhttps://github.com/farkhod-developer
\nTelegram: \nhttps://t.me/Farkhod_Developerr
\nEmail: \nhttps://farhodand92@gmail.com
©️ 2022 Farkhod Khojikurbonov
"""
)
| farkhod-developer/DL_Image_Classification_Model | manage.py | manage.py | py | 1,434 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.PosixPath",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "streamlit.sidebar",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "streamlit_option_menu.option_menu",
"line_number": 11,
"usage_type": "call"
},
{
"ap... |
14086949836 | import streamlit as st
from pages.common.queries import run_query
from pages.common.presenter import display_result
from pages.common.utils import convert_template
from pages.common.plotting import get_figure
import logging
logging.getLogger("pybatfish").setLevel(logging.WARNING)
APP = """This is a Streamlit app that enables the user to run network analysis
queries using [Batfish](https://www.batfish.org/).
The app allows the user to select a Batfish question by category and name.
The app runs the selected question and displays the results in a table.
All answered questions are saved.
Find more information about Batfish questions
[here](https://batfish.readthedocs.io/en/latest/index.html).
"""
# Start Page Here
st.set_page_config(layout="wide")
st.header("Network Analysis")
# st.markdown(APP)
# Get selected questions
qlist = st.session_state.get("qlist")
if "activesnap" in st.session_state:
st.subheader(f"Snapshot: {st.session_state.activesnap['name']}")
# Run selected questions
if qlist:
qs = convert_template(qlist)
q_names = [q["name"] for q in qs]
tabs = st.tabs(q_names)
for idx, tab in enumerate(tabs):
with tab:
if qs[idx].get("options"):
st.write(f"**Options:** {qs[idx]['options']}")
answer = run_query(qs[idx])
display_result(qs[idx]["fun"], answer)
# Plot some answers
if qs[idx]["fun"] in ["layer3Edges", "userProvidedLayer1Edges"]:
_, col, _ = st.columns([1, 2, 1])
fig = get_figure(answer.frame())
col.pyplot(fig)
else:
st.warning("Select some questions to proceed.")
else:
st.warning("Please add a snapshot to continue.")
| martimy/Bat-Q | pages/2_Analysis.py | 2_Analysis.py | py | 1,801 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.WARNING",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "streamlit.set_page_config",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "stream... |
69930126823 | import pygame
import BulletClass
from EnemiesControllerClass import EnemiesController
class PlayerShip:
ShipSpeed = 7
# Static variable containing all the bullets the ship has fired
BULLET_RESOURCE = "../Resources/Images/bullet.png"
Bullets = []
def __init__(self, imageLocation, screenSize):
self.shipSprite = pygame.image.load(imageLocation)
self.shipSprite = pygame.transform.scale(self.shipSprite, (50, 50))
self.shipRect = self.shipSprite.get_rect()
self.shipRect.x = 320
self.shipRect.y = 400
self.bulletSound = pygame.mixer.Sound("../Resources/Sounds/bullet_sound.wav")
self.screenSize = screenSize
def move(self, x, y):
# Next two if statements constrain the ship to within the bounds of the screen
if x < 0 and (self.shipRect.x + x) < 0:
x = PlayerShip.ShipSpeed
if x > 0 and ((self.shipRect.x+self.shipRect.w) + x) > self.screenSize[0]:
x = -PlayerShip.ShipSpeed
speed = [x, y]
self.shipRect = self.shipRect.move(speed)
def display(self, screen):
screen.blit(self.shipSprite, self.shipRect)
def shoot(self):
# Create a bullet
xLoc = self.shipRect.x + self.shipRect.w/4
yLoc = self.shipRect.y
bullet = BulletClass.Bullet(xLoc, yLoc, PlayerShip.BULLET_RESOURCE, "player")
PlayerShip.Bullets.append(bullet)
self.bulletSound.play()
def moveBulletsAndDisplay(self, screen):
collided = self.checkCollisionForEnemyBullets()
if (collided):
return True
for bullet in PlayerShip.Bullets:
# If the bullet is off screen, remove it from the bullet list
if bullet.bulletRect.y < 0:
PlayerShip.Bullets.remove(bullet)
else: # Otherwise proceed as normal
bullet.move("player")
bullet.display(screen)
def checkCollisionForEnemyBullets(self):
for enemy in EnemiesController.EnemyList:
# Check for actual collision with the enemy
if (enemy.enemyRect.colliderect(self.shipRect)):
EnemiesController.EnemyList.remove(enemy)
print("You lose sucka!")
return True
# Check for a collision with that enemies bullets
for bullet in enemy.bullets:
if (bullet.bulletRect.colliderect(self.shipRect)):
enemy.bullets.remove(bullet)
print("You lose sucka!")
return True
| ErikTillberg/Space_Invaders_Clone | Game/PlayerShipClass.py | PlayerShipClass.py | py | 2,586 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.image.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.tra... |
34445125382 | #!/usr/bin/env python
import os
import sys
import fileinput
import datetime
import logging
logFile = 'setup.log'
logging.basicConfig( filename = logFile,filemode = 'w', level = logging.INFO,format = '%(asctime)s - %(levelname)s: %(message)s', datefmt = '%m/%d/%Y %I:%M:%S %p' )
#import poaMenu
def getVarFromFile(fileName):
import imp
f = open(fileName)
global data
data = imp.load_source('data', '', f)
f.close()
def replace(oldStr,newStr,fileName):
for i, line in enumerate(fileinput.input(fileName, inplace=1)):
sys.stdout.write(line.replace(oldStr, newStr))
# Get all params from config.txt (later we will also add a menu)
getVarFromFile('config.txt')
logging.info('Updating your VM')
os.system ('sudo apt-get update && sudo apt-get -y upgrade')
logging.info('Installing python-pip')
os.system ('sudo apt-get install python-pip')
logging.info('Installing git')
os.system ('sudo apt install git')
## Install ansible
logging.info('Installing Ansible')
os.system ('sudo pip install ansible')
## Install packages
logging.info('Installing boto and boto3')
os.system ('sudo pip install boto')
os.system ('sudo pip install boto3')
## Make sure you have latest AWS CLI
logging.info('Making sure you have latest AWS CLI')
os.system ('pip install awscli --upgrade --user')
## Download and configure playbook
logging.info('Downloading Ansible playbook')
os.chdir ('/home/ubuntu')
os.system ('git clone https://github.com/poanetwork/deployment-playbooks.git')
os.chdir ('deployment-playbooks')
# for core mainnet
# os.system('git checkout core')
# OR for sokol testnet
#os.system ('git checkout sokol')
text = "Selecting correct branch based on specified network type: [" + data.networkType + "]"
logging.info(text)
cmd = "git checkout " + data.networkType
os.system (cmd)
# check that you ended up on a correct branch (look where the `*` is)
os.system ('git branch')
## Prepare SSH keys (asummes you already have SSH keys for remote server)
logging.info('Coping SSH keys')
os.system ('cat ~/.ssh/id_rsa.pub > files/admins.pub')
#os.system ('cp files/admins.pub files/ssh_validator.pub')
cmd = "cp files/admins.pub files/ssh_" +data.nodeType+ ".pub"
#os.system ('cmd')
text = 'Configuring based on node type: [' +data.nodeType+ ']'
logging.info(text)
#os.system ('cat group_vars/all.network group_vars/validator.example > group_vars/all')
cmd = 'cat group_vars/all.network group_vars/'+data.nodeType+'.example > group_vars/all'
os.system (cmd)
## Start replacing params (This cloud be improved with foreach loops and key/value match and replace)
#scriptpath = os.path.dirname(__file__)
#scriptpath = "/home/ubuntu"
#fileName = os.path.join(scriptpath, 'deployment-playbooks/group_vars/all')
os.chdir ('/home/ubuntu/deployment-playbooks/group_vars')
logging.info("Updating files with your information...")
#testFile=open(filename)
fileName = "all"
##------------------------------------------------------------------
oldStr = 'access_key: "INSERT KEY HERE"'
newStr = 'access_key: "' + data.access_key + '"'
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'secret_key: "INSERT SECRET HERE"'
newStr = 'secret_key: "' + data.secret_key + '"'
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'awskeypair_name: "keypairname"'
newStr = 'awskeypair_name: "' + data.awskeypair_name + '"'
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'NODE_FULLNAME: "INSERT NODENAME"'
newStr = 'NODE_FULLNAME: "' + data.NODE_FULLNAME + '"'
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'NODE_ADMIN_EMAIL: "INSERT@EMAIL"'
newStr = 'NODE_ADMIN_EMAIL: "' + data.NODE_ADMIN_EMAIL + '"'
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'NETSTATS_SERVER: "INSERT FULL URL"'
newStr = 'NETSTATS_SERVER: "' + data.NETSTATS_SERVER + '"'
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'NETSTATS_SECRET: "INSERT SECRET"'
newStr = 'NETSTATS_SECRET: "' + data.NETSTATS_SECRET + '"'
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
##------------------------------------------------------------------
oldStr = 'MINING_KEYFILE: \'INSERT HERE\''
newStr = 'MINING_KEYFILE: \'' + data.MINING_KEYFILE + '\''
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'MINING_ADDRESS: "INSERT HERE"'
newStr = 'MINING_ADDRESS: "' + data.MINING_ADDRESS + '"'
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'MINING_KEYPASS: "INSERT HERE"'
newStr = 'MINING_KEYPASS: "' + data.MINING_KEYPASS + '"'
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'vpc_subnet_id = "subnet-ID-number"'
newStr = 'vpc_subnet_id: ' + data.vpc_subnet_id
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'allow_validator_ssh: true'
newStr = 'allow_validator_ssh: ' + data.allow_validator_ssh
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'allow_validator_p2p: true'
newStr = 'allow_validator_p2p: ' + data.allow_validator_p2p
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
oldStr = 'associate_validator_elastic_ip: false'
newStr = 'associate_validator_elastic_ip: ' + data.associate_validator_elastic_ip
replace(oldStr,newStr,fileName)
##------------------------------------------------------------------
## Create hosts file and add server IP
## We will need to add params here to specify correct node type
logging.info('Creating HOSTS file')
os.chdir ('/home/ubuntu/deployment-playbooks')
#cmd = "echo [validator] > hosts"
cmd = "echo ["+data.nodeType+"] > hosts"
os.system(cmd)
cmd = "echo " + data.SERVER_IP + " >> hosts"
os.system(cmd)
# run this script to configure the instance (might want to use paramiko - ssh via python)
logging.info('Running Ansible playbook and deploying')
os.system ('ansible-playbook -i hosts site.yml')
print ("Done\n==========\n")
logging.info('Done!')
## End of script
#### Additional items for improvements:
## Menu:
###-------------------------------------
#print ("Enter AWS access_key:")
#access_key = input( "> " )
#print ("Enter your FULL NAME. \n This would be visible to other members of the network")
#NODE_FULLNAME = input( "> " )
#print ("Enter your email. \n This would be visible to other members of the network")
#NODE_ADMIN_EMAIL = input( "> " )
#print ("Enter NETSTATS_SERVER - this should be a url provided to you by the Master of Ceremony")
#NETSTATS_SERVER = input( "> " )
#print ("Enter NETSTATS_SECRET - this should be a secret code provided to you by the Master of Ceremony")
#NETSTATS_SECRET = input( "> " )
#print ("Enter MINING_KEYFILE - this should be a secret code provided to you by the Master of Ceremony")
#MINING_KEYFILE = input( "> " )
## Also we could add a function to generate SSH keys and upload to remote server
# ssh-keygen -t rsa -b 4096 -C "your_email@example.com" | maratP/poa-devops | poa-node-setup.py | poa-node-setup.py | py | 7,598 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "imp.load_source",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "fileinput.input... |
20583493606 | import numpy as np
import cv2
import math
cap = cv2.VideoCapture(0)
while(True):
ret,frame = cap.read()
img = cv2.imread('D:/HW/OpenCV Workshop - distro2/OpenCV Workshop/tek1.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| Altair115/OpenCV2-Workshop | Opdrachten/Op2.py | Op2.py | py | 363 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_... |
31219990233 | import logging
import os
import sys
from logging import Logger
from typing import Any, Dict, List
import datasets
import torch
import transformers
import wandb
from transformers import TrainingArguments
from dp_arguments import DataTrainingArguments, ModelArguments
LABEL_DICT = {}
LABEL_DICT['ner'] = ['CARDINAL', 'DATE', 'EVENT', 'FAC', 'GPE', 'LANGUAGE',
'LAW', 'LOC', 'MONEY', 'NORP', 'ORDINAL', 'ORG', 'PERCENT', 'PERSON', 'PRODUCT',
'QUANTITY', 'TIME', 'WORK_OF_ART']
LABEL_DICT['pos'] = ['$', "''", ',', '-LRB-', '-RRB-', '.', ':', 'ADD', 'AFX',
'CC', 'CD', 'DT', 'EX', 'FW', 'HYPH', 'IN', 'JJ', 'JJR', 'JJS', 'LS', 'MD',
'NFP', 'NN', 'NNP', 'NNPS', 'NNS', 'PDT', 'POS', 'PRP', 'PRP$', 'RB', 'RBR',
'RBS', 'RP', 'SYM', 'TO', 'UH', 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ',
'WDT', 'WP', 'WP$', 'WRB', '``']
LABEL_DICT['const'] = ['ADJP', 'ADVP', 'CONJP', 'EMBED', 'FRAG', 'INTJ', 'LST',
'META', 'NAC', 'NML', 'NP', 'NX', 'PP', 'PRN', 'PRT', 'QP', 'RRC', 'S', 'SBAR',
'SBARQ', 'SINV', 'SQ', 'TOP', 'UCP', 'VP', 'WHADJP', 'WHADVP', 'WHNP', 'WHPP',
'X']
LABEL_DICT['coref'] = ['False', 'True']
LABEL_DICT['srl'] = ['ARG0', 'ARG1', 'ARG2', 'ARG3', 'ARG4', 'ARG5', 'ARGA',
'ARGM-ADJ', 'ARGM-ADV', 'ARGM-CAU', 'ARGM-COM', 'ARGM-DIR', 'ARGM-DIS', 'ARGM-DSP',
'ARGM-EXT', 'ARGM-GOL', 'ARGM-LOC', 'ARGM-LVB', 'ARGM-MNR', 'ARGM-MOD', 'ARGM-NEG',
'ARGM-PNC', 'ARGM-PRD', 'ARGM-PRP', 'ARGM-PRR', 'ARGM-PRX', 'ARGM-REC', 'ARGM-TMP',
'C-ARG0', 'C-ARG1', 'C-ARG2', 'C-ARG3', 'C-ARG4', 'C-ARGM-ADJ', 'C-ARGM-ADV',
'C-ARGM-CAU', 'C-ARGM-COM', 'C-ARGM-DIR', 'C-ARGM-DIS', 'C-ARGM-DSP', 'C-ARGM-EXT',
'C-ARGM-LOC', 'C-ARGM-MNR', 'C-ARGM-MOD', 'C-ARGM-NEG', 'C-ARGM-PRP', 'C-ARGM-TMP',
'R-ARG0', 'R-ARG1', 'R-ARG2', 'R-ARG3', 'R-ARG4', 'R-ARG5', 'R-ARGM-ADV', 'R-ARGM-CAU',
'R-ARGM-COM', 'R-ARGM-DIR', 'R-ARGM-EXT', 'R-ARGM-GOL', 'R-ARGM-LOC', 'R-ARGM-MNR',
'R-ARGM-MOD', 'R-ARGM-PNC', 'R-ARGM-PRD', 'R-ARGM-PRP', 'R-ARGM-TMP']
for task in LABEL_DICT:
LABEL_DICT[task] = {label: "label" + str(i) for i, label in enumerate(LABEL_DICT[task])}
def convert_gate_to_mask(gates, num_of_heads=None):
if num_of_heads is not None:
head_mask = torch.zeros_like(gates)
current_heads_to_keep = gates.view(-1).sort(descending=True)[1]
current_heads_to_keep = current_heads_to_keep[:num_of_heads]
head_mask = head_mask.view(-1)
head_mask[current_heads_to_keep] = 1.0
head_mask = head_mask.view_as(gates)
else:
head_mask = (gates > 0.5).float()
return head_mask
class STEFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, k):
threshold = input.sort(descending=True)[0][k]
return (input > threshold).float()
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
def transform_dict(config_dict: Dict, expand: bool = True):
"""
General function to transform any dictionary into wandb config acceptable format
(This is mostly due to datatypes that are not able to fit into YAML format which makes wandb angry)
The expand argument is used to expand iterables into dictionaries so that these configs can be used when compare across runs
"""
ret: Dict[str, Any] = {}
for k, v in config_dict.items():
if v is None or isinstance(v, (int, float, str)):
ret[k] = v
elif isinstance(v, (list, tuple, set)):
# Need to check if item in iterable is YAML-friendly
t = transform_dict(dict(enumerate(v)), expand)
# Transform back to iterable if expand is False
ret[k] = t if expand else [t[i] for i in range(len(v))]
elif isinstance(v, dict):
ret[k] = transform_dict(v, expand)
else:
# Transform to YAML-friendly (str) format
# Need to handle both Classes, Callables, Object Instances
# Custom Classes might not have great __repr__ so __name__ might be better in these cases
vname = v.__name__ if hasattr(v, '__name__') else v.__class__.__name__
ret[k] = f"{v.__module__}:{vname}"
return ret
def hardmax2(t):
idx = t.argmax(dim=-1).view(-1)
_t = 1
for i in t.shape[:-1]:
_t *= i
_range = torch.arange(_t, device=t.device)
step = t.shape[-1]
_range *= step
idx += _range
res = torch.zeros_like(t).view(-1)
res[idx] = 1.
return res.view(t.shape)
def hardmax(X):
M, _ = torch.max(X, dim=-1, keepdim=True)
A = (M == X).float()
A /= torch.sum(A, dim=-1, keepdim=True)
return A
# To test hardmax functions
# pre_x = [[-10, 2, 2, 2], [-100, 1, 0, 1]]
# X = torch.Tensor(pre_x)
# print(hardmax(X))
#
# for num_dims in range(1, 6):
# pre_x = [[-10, 2, 2, 2], [-100, 1, 0, 1]]
# for _ in range(num_dims - 1):
# pre_x = [pre_x]
# X = torch.Tensor(pre_x)
# print(X)
# print(hardmax2(X), '\n')
def bimodal_normal(x: torch.Tensor, mu: float, sigma: float) -> None:
"""
Inits the weights (in-place) with the bimodal normal distribution (symmetric).
:param x: input tensor
:param mu: mean of the normal distribution
:param sigma: standard deviation of the normal distribution
"""
x.normal_(mean=mu, std=sigma)
# size = x.size()
# mask = torch.randint(0, 2, size=size) * 2 - 1 # Randomly flip half the values to their opposite sign
# x *= mask
def rescale_norm(x: torch.Tensor, norm: float) -> torch.Tensor:
"""
Rescales the input tensor (in-place) to have the specified norm.
:param x: input tensor
:param norm: norm to rescale to
"""
return x / torch.norm(x) * norm
def get_total_gpus() -> int:
"""
Get total number of GPUs in the server
:return: number of GPUs
"""
import subprocess
sp = subprocess.Popen(['nvidia-smi', '--list-gpus'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out_str = sp.communicate()
out_list = out_str[0].decode("utf-8").split('\n')
# Subtract one as the last line is empty
num_gpus = len(out_list) - 1
print(f"... {num_gpus} GPUs found")
return num_gpus
def get_idle_gpus(num_gpus: int = 2) -> List[int]:
"""
Get idle GPUs in the server
:param num_gpus: requested number of GPUs
:return: list of idle GPU IDs
"""
import operator
import subprocess
total_gpus = get_total_gpus()
if num_gpus > total_gpus:
raise ValueError(f'Requested number of GPUs ({num_gpus}) exceeds available GPUs ({total_gpus})')
sp = subprocess.Popen(
['nvidia-smi', '--format=csv', '--query-gpu=utilization.gpu'], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out_str = sp.communicate()
out_list = out_str[0].decode("utf-8").split('\n')
gpu_utilization = []
for i, gpu in enumerate(out_list[1:-1]):
utilization = int(gpu.split(' ')[0])
gpu_utilization.append((i, utilization))
sorted_gpus = sorted(gpu_utilization, key=operator.itemgetter(1))
idle_gpus = [gpu[0] for gpu in sorted_gpus[:num_gpus]]
return idle_gpus
def set_gpu_env(num_gpus: int = 1):
"""
Set GPU environments in the server
:param num_gpus: number of GPUs to use
:return: PyTorch device
"""
import os
import torch
idle_gpus = get_idle_gpus(num_gpus)
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(map(str, idle_gpus))
print(f"... Available GPUs {idle_gpus}")
# list available GPUs
gpu_list = [torch.cuda.get_device_name(i) for i in range(torch.cuda.device_count())]
print(f"... {len(gpu_list)} visible 'logical' GPUs: {gpu_list}")
# Set up GPUs for multi-GPU training
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"... using {device}")
return device
def compute_metrics(eval_pred):
accuracy, _ = eval_pred
accuracy = accuracy.sum(axis=0)
accuracy = accuracy[0] / accuracy[1]
return {"accuracy": accuracy}
def setup_logger(training_args: TrainingArguments) -> Logger:
logger: Logger = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}\n device: {training_args.device}\n n_gpu: {training_args.n_gpu} \n"
f"distributed training: {bool(training_args.local_rank != -1)}\n 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
return logger
def setup_wandb(training_args: TrainingArguments, model_args: ModelArguments, data_args: DataTrainingArguments) -> str:
serial = f"Epoch{int(training_args.num_train_epochs)}-LR{training_args.learning_rate}-"
if model_args.randomized:
serial += "Randomized-"
else:
serial += "Pretrained-"
if model_args.dev:
serial += "Dev"
else:
serial += "Test"
# WanDB setup
if model_args.use_mlp:
wandb_proj_name = f"ConvergedProbe-{data_args.task}-DPMLP-Dim{model_args.mlp_dim}-Layer{model_args.mlp_layers}"
else:
wandb_proj_name = f"ConvergedProbe-{data_args.task}-DPLR-Dim{model_args.mlp_dim}-Layer{model_args.mlp_layers}"
if model_args.onehot:
wandb_proj_name += "-OneHot"
os.environ["WANDB_PROJECT"] = wandb_proj_name
wandb.init(
project=wandb_proj_name,
name=serial,
)
return serial
def record_num_of_params(model, logger: Logger) -> None:
num_trainable_params = model.num_parameters(only_trainable=True)
num_total_params = model.num_parameters()
logger.info(f"Number of parameters to train (without adapters): {num_trainable_params}")
logger.info(f"Total number of parameters (without adapters): {num_total_params}")
wandb.run.summary["num_trainable_params"] = num_trainable_params
wandb.run.summary["num_total_params"] = num_total_params
| yileitu/probing-via-prompting | utils.py | utils.py | py | 9,989 | python | en | code | null | github-code | 36 | [
{
"api_name": "torch.zeros_like",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "typing.Dict",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"l... |
23295546136 | import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Flatten, Dense, Dropout, BatchNormalization, Conv2D, MaxPool2D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing import image
print(tf.__version__)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from tqdm import tqdm
data = pd.read_csv("Movies-Poster_Dataset/train.csv")
img_width = 350
img_height = 350
X = []
for i in tqdm(range(data.shape[0])):
path = "Movies-Poster_Dataset/Images/" + data["Id"][i] + ".jpg"
img = image.load_img(path,target_size=(img_width,img_height,3))
img = image.img_to_array(img)/255.0
X.append(img)
X = np.array(X)
y = data.drop(['Id','Genre'],axis = 1)
y = y.to_numpy()
X_train, X_test, y_train, y_test = train_test_split(X,y,random_state=0, test_size = 0.15)
#Buidling the CNN
model = Sequential()
#First CNN Layer
model.add(Conv2D(16,(3,3),activation='relu',input_shape=X_train[0].shape))
model.add(BatchNormalization())
model.add(MaxPool2D(2,2))
model.add(Dropout(0.3))
#Second CNN Layer
model.add(Conv2D(32,(3,3),activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(2,2))
model.add(Dropout(0.3))
#Third CNN Layer
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(2,2))
model.add(Dropout(0.4))
#Fourth CNN Layer
model.add(Conv2D(128,(3,3),activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(2,2))
model.add(Dropout(0.5))
#First Fully connected layer
model.add(Flatten())
model.add(Dense(128,activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
#Second Fully connected layer
model.add(Flatten())
model.add(Dense(128,activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
#Output Layer
model.add(Dense(25, activation='sigmoid'))
# Include the epoch in the file name (uses `str.format`)
checkpoint_path = "training/cp-{epoch:04d}.ckpt"
# Create a callback that saves the model's weights every 5 epochs
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path,
verbose=1,
save_weights_only=True,
period=1)
model.compile(optimizer="adam",loss="binary_crossentropy", metrics=['accuracy'])
history = model.fit(X_train,
y_train,
epochs=5,
validation_data=(X_test,y_test),
callbacks=[cp_callback])
model.save('saved_model/workingModel')
#to Load the model:
# new_model = tf.keras.models.load_model('saved_model/my_model') | Ryan-Red/MoviePosters | main.py | main.py | py | 2,665 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.__version__",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras... |
3726457813 | from DQNAgent import DQNAgent
from pathlib import Path
my_file = Path("my_model.hd5")
if my_file.is_file():
Agent = DQNAgent("my_model.hd5")
else:
Agent = DQNAgent()
for i in range(1500):
Agent.observe()
Agent.train()
rewards = 0
for _ in range(10):
rewards += Agent.play()
print(rewards / 3)
Agent.save_network("my_model.hd5")
| sojunator/DV2454Proj | Q_ml_keras.py | Q_ml_keras.py | py | 353 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "DQNAgent.DQNAgent",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "DQNAgent.DQNAgent",
"line_number": 8,
"usage_type": "call"
}
] |
25441622848 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='member',
options={},
),
migrations.AlterModelOptions(
name='userknowspl',
options={'verbose_name': 'User knows Programming Language'},
),
migrations.AddField(
model_name='member',
name='experience',
field=models.SmallIntegerField(default=0),
),
]
| MJafarMashhadi/CodeDescriptionInformationCollector | core/migrations/0002_auto_20160107_1255.py | 0002_auto_20160107_1255.py | py | 648 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterModelOptions",
"line_number": 14,
"usage_type": "call"
... |
72398212263 | from django import forms
from django.forms import Textarea
from .models import Comment, Post
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ("text", "group", "image")
widgets = {
"text": Textarea(
attrs={"class": "form-control", "placeholder": "Текст нового поста"}
),
}
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ("text",)
widgets = {
"text": Textarea(
attrs={
"class": "form-control",
"placeholder": "Текст нового комментарий",
}
),
}
| EISerova/yatube-social-network | yatube/posts/forms.py | forms.py | py | 738 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "models.Post",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.forms.Textar... |
18977859509 | from dask.distributed import Client, wait
import dask.dataframe as dd
import os
from shapely.geometry import LineString, Polygon, Point, box
from shapely import wkb
import rtree
import xarray as xr
import pandas as pd
import pyarrow as pa
index_url = './../data/roads'
df_url = './../data/osm_roads/roads.parquet'
client = Client('192.168.0.134:8786')
def get_neighbors(index, row):
# Query R-Tree by the bounding box of road 'x' for neighbors except itself
return [int(i) for i in index.intersection(wkb.loads(row.bbox).bounds) if int(i) != row.name]
def find_intersections(is_neighbors, neighbors, row):
intersections = [] # Container for street intersections
nodes = [] # Arrays of tuples for NetworkX MultiDiGraph
a = wkb.loads(row.geometry)
road = a.coords[:]
if is_neighbors:
for entry in neighbors.itertuples():
b = wkb.loads(entry.geometry)
# Check if road with 'fid' osm_id actually intersects road 'x'
if not (entry.bridge or entry.tunnel) and a.intersects(b):
pts = a.intersection(b)
if pts.type == 'MultiPoint':
(nodes.append((pt.coords[:][0], {'junction': [row.name, entry.Index]})) for pt in pts)
(intersections.append(pt) for pt in pts if pt.coords[:][0] != road[0] and pt.coords[:][0] != road[-1] and (pt.coords[:][0] not in intersections))
elif pts.type == 'Point':
nodes.append((pts.coords[:][0], {'junction': [row.name, entry.Index]}))
if pts.coords[:][0] != road[0] and pts.coords[:][0] != road[-1] and (pts.coords[:][0] not in intersections):
intersections.append(pts)
[nodes.append((pt, {'junction': [row.name]})) for pt in [road[0], road[-1]] if not nodes or pt not in tuple(zip(*nodes))[0]]
return nodes, intersections
def compute_edges(intersections, nodes, row):
road = wkb.loads(row.geometry).coords[:]
edges = []
segment_len = 0
# Coordinate keeping track of previous intersection/edge end
previous_node = road[0]
for idx in range(len(road)-1):
# LineString of straight line segment between two consecutive points
segment = LineString(road[idx:idx+2])
# Coordinate updating on every segment or when intersection encountered
segment_start = road[idx]
queue = [] # Point objects that intersect this particular road straight line segment
for pt in list(intersections):
if segment.intersects(pt):
# Put all junctions intersecting this segment into a queue
queue.append(pt)
# Remove the junction from left-over list of street intersections
intersections.remove(pt)
if not queue:
# If no junctions in this road segment, increase length by distance between LineString consecutive points
segment_len += segment.length
else:
for pt in list(queue):
line_lengths = [LineString([segment_start, p.coords[:][0]]).length for p in queue]
shortest_line = min(line_lengths)
next_node_idx = [k for k, l in enumerate(line_lengths) if l == shortest_line][0]
next_node = queue[next_node_idx].coords[:][0]
segment_len += LineString([segment_start, next_node]).length
if segment_len: # Multiple roads crossing at the same junction. Can skip. osm_id's on intersectinos are maintained by nodes array
edges.append((
previous_node,
next_node,
{
'length': segment_len,
'weight': segment_len/row.maxspeed/1000,
}))
if not row.oneway: # If both way street, add identical reverse relation between MultiDiGraph nodes
edges.append((
next_node,
previous_node,
{
'length': segment_len,
'weight': segment_len/row.maxspeed/1000,
}))
segment_len = 0
previous_node = next_node
segment_start = next_node
# Remove the junction from the queue
queue.remove(queue[next_node_idx])
# Get distance to the endpoint of the segment
segment_len += LineString([segment_start, road[idx+1]]).length
edges.append((
previous_node,
road[-1],
{
'length': segment_len,
'weight': segment_len/row.maxspeed/1000,
}))
if not row.oneway: # If both way street, add identical reverse relation between MultiDiGraph nodes
edges.append((
road[-1],
previous_node,
{
'length': segment_len,
'weight': segment_len/row.maxspeed/1000,
}))
return edges
def foo(row, df, index):
neighbors = None
is_neighbors = False
# Assumption that bridges and tunnels do not have intersections
if not (row.bridge or row.tunnel):
# Retreive from R-tree osm_id's of roads whose bounding box overlaps this road's
fids = get_neighbors(index, row)
# Retreive those roads from the dataset by indexing
neighbors = df.loc[fids].compute(scheduler='single-threaded')
is_neighbors = True
# Build up list of Graph nodes and list of intersections
(nodes, intersections) = find_intersections(is_neighbors, neighbors, row)
# Calculate graph edges between junction nodes
edges = compute_edges(intersections, nodes, row)
return nodes, edges
def process(fn, df_url, index_url):
df = dd.read_parquet(df_url, engine='pyarrow')
d = pd.read_parquet(fn)
index = rtree.index.Rtree(index_url)
d[['nodes','edges']] = d.apply(
foo,
args=(df, index),
axis=1,
result_type='expand')
return d
def write(df, fn, schema):
print('Writing processed data to '+fn)
df.to_parquet(fn, engine='pyarrow', schema=schema)
return
schema = pa.schema([
('osm_id', pa.int64()),
('code', pa.int64()),
('fclass', pa.string()),
('road_name', pa.string()),
('ref', pa.string()),
('oneway', pa.bool_()),
('maxspeed', pa.int64()),
('layer', pa.int64()),
('bridge', pa.bool_()),
('tunnel', pa.bool_()),
('geometry', pa.binary()),
('bbox', pa.binary()),
('nodes', pa.list_(
pa.struct([
('0', pa.list_(pa.float64(), 2)),
('1', pa.struct([
('junction', pa.list_(pa.int64())),
('altitude', pa.int64()),
]))
])
)),
('edges', pa.list_(
pa.struct([
('0', pa.list_(pa.float64(), 2)),
('1', pa.list_(pa.float64(), 2)),
('2', pa.struct([
('length', pa.float64()),
('weight', pa.float64()),
('flatness', pa.float64()),
]))
])
))
])
in_path = './../data/osm_roads/roads_partition.parquet/'
out_path = './../data/osm_roads/roads_intersected.parquet/'
futures = []
for fn in os.listdir(in_path)[0:4]:
a = client.submit(process, in_path + fn, df_url, index_url)
b = client.submit(write, a, out_path + fn, schema)
futures.append(b)
wait(futures)
| maximyudayev/YY-MANET-Protocol | local/network_graph_build_no_dem.py | network_graph_build_no_dem.py | py | 7,549 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dask.distributed.Client",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "shapely.wkb.loads",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "shapely.wkb",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "shapely.wkb.loa... |
20436996206 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('customer', '0009_auto_20151012_1449'),
('tab1', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='contactus',
name='customer',
field=models.ForeignKey(default=None, verbose_name='\u7528\u6237', blank=True, to='customer.Customer'),
),
]
| wlj459/unipub | tab1/migrations/0002_contactus_customer.py | 0002_contactus_customer.py | py | 505 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AddField",
"line_number": 15,
"usage_type": "call"
},
{
... |
37439980327 | import pandas as pd
from pathlib import Path
import numpy as np
from data_cleaning import clean_data
from data_preparation import data_preparation
INPUT_TEST_PATH = Path('data/raw/test.csv')
OUTPUT_PATH = Path('data/test_with_predicted_revenue/')
if __name__ == "__main__":
test_df = pd.read_csv(INPUT_TEST_PATH)
test_cleaned_df = clean_data(df=test_df)
test_prepared_df = data_preparation(df=test_cleaned_df)
# Check for missing values
if test_prepared_df.isna().sum().any():
print(test_prepared_df.isna().sum())
raise ValueError("There is some NaN values in dataframe")
# Apply linear baseline from EDA
y_pred = np.exp(0.426147 + 0.993024 * test_prepared_df['log_rev_24h'])
test_prepared_df = test_prepared_df.assign(revenue_30d_total=y_pred)
OUTPUT_PATH.mkdir(exist_ok=True, parents=True)
test_prepared_df.to_csv(OUTPUT_PATH / 'linear_submission.csv', index=False)
| LlirikOknessu/brightika_test | linear_prod_version.py | linear_prod_version.py | py | 932 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "data_cleaning.clean_data",
... |
28205621966 | from typing import Dict, List, Optional, Union
from fastapi import APIRouter, Depends, Response, status
from sqlalchemy.orm import Session
from src import oauth, schemas
from src.db.database import get_db
from src.services import post as post_service
router = APIRouter(prefix="/posts", tags=["Blog Posts"])
@router.get(
"/",
response_model=List[Dict[str, Union[schemas.PostOut, int]]],
status_code=status.HTTP_200_OK,
)
async def get_all_post(
limit: int = 10, skip: Optional[int] = None, db: Session = Depends(get_db)
):
return post_service.get_all_post(db, limit, skip)
@router.post("/", response_model=schemas.PostOut, status_code=status.HTTP_201_CREATED)
async def create_post(
post: schemas.PostCreate,
user_id: int = Depends(oauth.get_current_user),
db: Session = Depends(get_db),
):
return post_service.create_new_post(post, db, user_id)
@router.get("/{id}", response_model=schemas.PostOut, status_code=status.HTTP_200_OK)
async def get(id: int, db: Session = Depends(get_db)):
return post_service.get_by_id(post_id=id, db=db)
@router.delete("/{id}", status_code=status.HTTP_204_NO_CONTENT)
async def delete_post(
id: int,
user_id: int = Depends(oauth.get_current_user),
db: Session = Depends(get_db),
):
post_service.delete_post(id, user_id, db)
return Response(status_code=status.HTTP_204_NO_CONTENT)
@router.put("/{id}", response_model=schemas.PostOut, status_code=status.HTTP_200_OK)
async def update_post(
id: int,
post: schemas.PostUpdate,
user_id: int = Depends(oauth.get_current_user),
db: Session = Depends(get_db),
):
post = post_service.update_posts(id, post, user_id, db)
return post
| hiteshsankhat/blog_post | backend/src/api/endpoints/posts.py | posts.py | py | 1,700 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "fastapi.Depen... |
28686029068 | import json
import pandas as pd
from os.path import join
PROJECT_PATH = '../../'
event = pd.read_csv(join(PROJECT_PATH, 'data', 'LinearSearchThreadEvent.csv'),
header=None,
names=['Id', 'RootEventId', 'UserIdentifier', 'CreationDate', 'DiffSeconds', 'EventSource',
'EventTarget',
'Referrer', 'Url', 'Query', 'FragmentIdentifier'],
dtype={
'Id': 'int64',
'RootEventId': pd.Int64Dtype(),
'UserIdentifier': 'str',
'CreationDate': 'str',
'DiffSeconds': pd.Int64Dtype(),
'EventSource': 'str',
'EventTarget': 'str',
'Referrer': 'str',
'Url': 'str',
'Query': 'str',
'FragmentIdentifier': 'str'
})
event['CreationDate'] = pd.to_datetime(event['CreationDate'], format='%Y-%m-%d %H:%M:%S')
event = event.sort_values(by=['RootEventId', 'CreationDate'], ascending=True)
REID2IDList = {}
for _, row in event.iterrows():
if pd.isna(row['RootEventId']):
REID2IDList[str(row['Id'])] = [str(row['Id'])]
else:
if str(row['RootEventId']) not in REID2IDList.keys():
REID2IDList[str(row['RootEventId'])] = [str(row['Id'])]
else:
REID2IDList[str(row['RootEventId'])].append(str(row['Id']))
with open(join(PROJECT_PATH, 'data', 'REID2IDList.json'), "w", encoding="utf-8") as fw:
json.dump(REID2IDList, fw)
| kbcao/sequer | code/DatasetExtraction/eventList_extraction.py | eventList_extraction.py | py | 1,645 | python | en | code | 15 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.Int64Dtype",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.Int64Dtype",
... |
6699760535 | from compat.legacy_model import LegacyModel
from customer.customers import CustomerObjectMap
import os
from common.io.interactive import query_yes_no
BASE_MODEL_DIR = r"E:\viNet_RnD\Deployment\Inference Models\Inception"
def create_candidate_model_legacy(class_map,
network_version=None,
dataset_version=None) -> None:
"""
Used to freeze weights generated with legacy tool
@param dataset_version: dataset version
@param network_version: network version
@param class_map: a path to class map file
@return: None
"""
# select customer
customers = list(CustomerObjectMap.keys())
for i in range(len(customers)):
print(f"{i}: {customers[i]}")
print()
customer = customers[int(input("Select customer (index):"))]
freeze_latest = query_yes_no("Load latest checkpoints? ")
if freeze_latest:
# select base model
models = os.listdir(BASE_MODEL_DIR)
for i in range(len(models)):
print(f"{i} - {models[i]}")
input_graph_path = models[int(input("Enter net graph: "))]
input_graph_path = os.path.join(BASE_MODEL_DIR, input_graph_path)
assert os.path.exists(input_graph_path)
model = LegacyModel(input_graph_path,
customer=customer,
net_version=network_version,
dataset_version=dataset_version)
model.operationalize()
model.upload(class_map)
model.run_verification(customer=customer)
else:
model = LegacyModel("", customer=customer)
model.run_verification()
if __name__ == '__main__':
version, dsv = '2.11', 2
classmap = r"E:\viNet_RnD\Deployment\Vattenfall\2.9\viNet_2.9_Vattenfall_ClassMap.txt"
create_candidate_model_legacy(classmap,
network_version=version,
dataset_version=str(dsv))
| h3nok/MLIntro | Notebooks/cli/legacy_cli.py | legacy_cli.py | py | 1,995 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "customer.customers.CustomerObjectMap.keys",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "customer.customers.CustomerObjectMap",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "customer.customers",
"line_number": 29,
"usage_type": "nam... |
13989770988 | from collections import deque
class Solution:
def wallsAndGates(self, rooms):
def neighbors(x, y):
for (i, j) in ((x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1)):
if 0 <= i < n and 0 <= j < m and rooms[i][j] > 0:
yield (i, j)
def bfs(start):
visited = set()
queue = deque([(start, 0)])
while queue:
node, dist = queue.popleft()
x, y = node
rooms[x][y] = min(rooms[x][y], dist)
for nei in neighbors(x, y):
i, j = nei
if nei not in visited and rooms[i][j] > dist + 1:
visited.add(nei)
queue.append((nei, dist + 1))
# main
n = len(rooms)
m = len(rooms[0]) if rooms else 0
if 0 in (n, m):
return
for i in range(n):
for j in range(m):
if rooms[i][j] == 0:
bfs((i, j))
| dariomx/topcoder-srm | leetcode/zero-pass/facebook/walls-and-gates/Solution.py | Solution.py | py | 1,023 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 13,
"usage_type": "call"
}
] |
9098896290 | import os
import numpy as np
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFilter
from PIL import ImageFont
from tqdm import tqdm
train_dir = "dataset/train/"
test_dir = "dataset/test/"
# digit generation
def digit_generator(
digit="1",
font_name="/usr/share/fonts/truetype/custom/HindSiliguri-Regular.ttf",
font_size=210,
x_pos=50,
y_pos=-20,
color=(255, 255, 255),
):
img = Image.new("RGB", (256, 256), color=color)
d = ImageDraw.Draw(img)
font = ImageFont.truetype(font=font_name, size=font_size)
d.text((x_pos, y_pos), digit, fill=(0, 0, 0), font=font)
return img
# train data generation
def train_datagen(
fonts,
color_list=[(255, 255, 255), (255, 255, 204), (0, 128, 128), (133, 193, 233)],
color_names=["white", "yellow", "teal", "sky_blue"],
image_count=100,
):
"""
color_list is a list of tuples like (255,255,255) and color_names represents the corresponding names.
------------------------------------------------------------------------------------------------------
Example:
color_list = [(255,255,255), (255, 255, 204), (255, 153, 102), (102, 255, 51), (0, 0, 255), (255, 0, 102)]
color_names = color_names = ['white', 'yellow', 'orange', 'green', 'blue', 'red']
------------------------------------------------------------------------------------------------------
"""
digits_bns = "০ ১ ২ ৩ ৪ ৫ ৬ ৭ ৮ ৯".split()
digits_ens = "0 1 2 3 4 5 6 7 8 9".split()
if len(os.listdir(train_dir + "0")) == 0:
print("Generating training images...")
img_cnt = 0
for idx, font_name in tqdm(enumerate(fonts)):
for jdx, (digit_bn, digit_en) in enumerate(zip(digits_bns, digits_ens)):
for color, color_name in zip(color_list, color_names):
try:
img = digit_generator(
digit=digit_bn, font_name=font_name, color=color
)
img_cnt += 1
if img_cnt <= image_count:
img.save(
"dataset/train/{}/{}_{}_{}_{}.jpg".format(
digit_en,
idx,
jdx,
color_name,
font_name.split(".ttf")[0].split("/")[-1],
)
)
except Exception as e:
raise Exception("TrainImgGenError:", e)
else:
print("Directory is not empty: Not generating training images")
# test data generation
def test_datagen(
fonts,
color_list=[(255, 255, 255), (255, 255, 204), (0, 128, 128), (133, 193, 233)],
color_names=["white", "yellow", "teal", "sky_blue"],
image_count=100,
):
font_size = 200
digits_bns = "০ ১ ২ ৩ ৪ ৫ ৬ ৭ ৮ ৯".split()
digits_ens = "0 1 2 3 4 5 6 7 8 9".split()
if len(os.listdir(test_dir + "0")) == 0:
print("Generating test images...")
img_cnt = 0
for idx, font_name in tqdm(enumerate(fonts)):
for jdx, (digit_bn, digit_en) in enumerate(zip(digits_bns, digits_ens)):
for color, color_name in zip(color_list, color_names):
try:
img = digit_generator(
digit=digit_bn,
font_name=font_name,
font_size=font_size,
color=color,
)
img_cnt += 1
if img_cnt <= image_count:
img.save(
"dataset/test/{}/{}_{}_{}_{}.jpg".format(
digit_en,
idx,
jdx,
color_name,
font_name.split(".ttf")[0].split("/")[-1],
)
)
except Exception as e:
raise Exception("TestImgGenError:", e)
else:
print("Directory is not empty: Not generating test images")
| rednafi/prinumco | digit_generation_src/digit_generation.py | digit_generation.py | py | 4,386 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "PIL.Image.new",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "PIL.ImageDraw.Draw",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "PIL.ImageDraw",
"line_... |
9268710699 | import cv2
import numpy as np
import time
import os
from cobit_opencv_lane_detect import CobitOpencvLaneDetect
class CobitOpenCVGetData:
def __init__(self):
self.cap = cv2.VideoCapture('data/car_video.avi')
self.cv_detector = CobitOpencvLaneDetect()
self.image = None
self.angle = None
self.index = 0
self.jpeg = None
def update(self):
ret, self.image = self.cap.read()
ret, self.jpeg = cv2.imencode('.jpg', self.image)
return ret, self.jpeg.tobytes()
'''
if ret is False:
self.image = np.zeros((240, 320, 3), np.uint8)
cv2.putText(self.image, 'No frame', (40, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1)
ret, self.jpeg = cv2.imencode('.jpg', self.image)
return ret, self.jpeg.tobytes()
else:
self.image = np.zeros((240, 320, 3), np.uint8)
ret, self.jpeg = cv2.imencode('.jpg', self.image)
return ret, self.jpeg.tobytes()
'''
def remove_old_data(self):
os.system("rm data/*.png")
def finish(self):
self.cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
cam = CobitOpenCVGetData()
loop = True
cam.remove_old_data()
while loop:
ret, img = cam.update()
print(ret)
if ret:
cv2.imshow("win", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cam.finish()
| cobit-git/little-cobit-web-ctrl | cobit_opencv_get_data_backup.py | cobit_opencv_get_data_backup.py | py | 1,512 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cobit_opencv_lane_detect.CobitOpencvLaneDetect",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.imencode",
"line_number": 18,
"usage_type": "call"
},
{
"api_n... |
32022624457 | #Load variable from a file. Userstat.txt
#USerstat.txt has first line as whether user wants to choose more projects, and next lines has names of all projects user has chosen.
#We have to create a login function here for the user, else, how would the app know which userfile to download.?
#For now let's simply ask the user for his username, and not the whole login process
import urllib.request
a=True
while(a):
try:
username = input("Please enter you username - ")
a=False
url = 'http://127.0.0.1:8000/download_userfile/'+username+'/'
obj=urllib.request.urlopen(url)
except:
print("Oops! wrong username, Try again!")
a=True
import sys
import time
from importlib import reload
'''zip = ZipFile('file.zip')
zip.extractall()'''
data = obj.read()
with open('Userstat_'+username+'.txt', "wb+") as code:
code.write(data)
code.close()
#urllib.request.urlretrieve(url,'Userstat_'+username+'.txt') it's in python2
file = open('Userstat_'+username+'.txt',"r")
l=file.readlines()
uid = l[0][0]
l1=l[1:]
file.close()
print("All of your chosen projects will run one by one")
print("Projects you have chosen are:")
ind=1
donecount=0
for i in l1:
a = str(ind)+" - "+i[i.find('-')+1:]
print(a)
if '(done)' in a:
donecount+=1
ind+=1
if donecount==len(l1):
print("Congratulations!!\n All the tasks of all the projects you are contributing to,")
print("Are done! Hurray!")
chind = int(input("Choose index of project to start with"))
print("Projects will be run from Project "+str(chind)+" in above order, one by one")
print("Note, the program will keep running until you close this application")
#originalsyspath = sys.path
'''for i in range(len(l)):
prid=i[:i.find('-')-1]
sys.path.insert(0, './'+prid+'_files')
'''
while(1>0):
for j in range(chind, len(l)):
i=l[j]
if ' (done)' in i:
continue
elif ' (wait)' in i:
print('Tasks for '+i+' are all assigned but not completed.')
print('Tasks maybe available after about 60 seconds, so sleeping for 60 seconds....')
time.sleep(60)
prid=i[:i.find('-')]
sys.path.insert(0, './'+prid+'_files')
print(sys.path)
import projman
reload(projman)
print("Currently doing - "+i[i.find('-')+1:]+" ...")
projman.runproj(username)
sys.path.remove('./'+prid+'_files')
file = open('Userstat_'+username+'.txt',"r")
l=file.readlines()
uid = l[0][0]
l1=l[1:]
file.close()
chind = 1
donecount=0
for i in l1:
a = str(ind)+" - "+i[i.find('-')+1:]
print(a)
if '(done)' in a:
donecount+=1
ind+=1
if donecount==len(l1):
print("Congratulations!!\n All the tasks of all the projects you are contributing to,")
print("Are done! Hurray!")
break
print("Note, the program will keep running until you close this application")
print("The program will now exit")
#print("Do you want to chose more projects?('f') Or do you want to delete projects from your list?('d')")
#chosen=input()
| snehalgupta/issc | pcapp/app.py | app.py | py | 2,900 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib.request.request.urlopen",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 17,
"usage_type": "name"
},
{
"api_nam... |
39305646033 | import asyncio
import logging
import os
import threading
import uuid
from time import sleep
from pywintypes import Time
from win32con import FILE_SHARE_DELETE, FILE_SHARE_READ, GENERIC_WRITE, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, \
FILE_SHARE_WRITE
from win32file import CreateFile, CloseHandle, SetFileTime
from AndroidFTPBackup.constants import BackupConstants
from AndroidFTPBackup.helpers.ConfigHelper import ConfigHelper
from AndroidFTPBackup.helpers.FileHelper import FileHelper
from AndroidFTPBackup.helpers.FtpHelper import FtpHelper
from AndroidFTPBackup.model.FtpFile import FtpFile
from AndroidFTPBackup.utils.BackupUtils import BackupUtils
class BackupHelper:
logger = logging.getLogger(__name__)
def __init__(self, backup, current_ip):
self.backup = backup
self.state = 'Started'
self.ftp_client = None
self.current_ip = current_ip
self.backup_name = backup['config']['basic']['name']
self.last_backup_end_time = BackupUtils.date_from_timestamp(backup['last_backup_end_time'])
self.last_backup_start_time = BackupUtils.date_from_timestamp(backup['last_backup_start_time'])
self.ftp_data = dict(port=backup['config']['ftp']['port'], userId=backup['config']['ftp']['userId'],
ip=current_ip, password=backup['config']['ftp']['password'])
def start_backup(self, backup):
loop = asyncio.new_event_loop()
p = threading.Thread(target=self.worker, args=(loop, backup,))
p.start()
def worker(self, loop, backup):
asyncio.set_event_loop(loop)
loop.run_until_complete(self.data_backup(backup))
async def data_backup(self, backup):
self.ftp_client = await self.connect_ftp()
self.logger.info("Starting Backup for: {}".format(self.backup_name))
self.logger.info("Last Backup started on: {}".format(self.last_backup_start_time))
self.logger.info("Last Backup completed on: {}".format(self.last_backup_end_time))
current_backup_start_time = BackupUtils.timestamp_from_date()
for dir_data in backup['config']['dirs']['backupDirs']:
await self.backup_folder(dir_data)
current_backup_end_time = BackupUtils.timestamp_from_date()
self.logger.info("Current Backup started on: {}".format(current_backup_start_time))
self.logger.info("Current Backup completed on: {}".format(current_backup_end_time))
self.state = 'Completed'
await BackupUtils.send_message('Completed', self.backup_name, current_backup_end_time)
await ConfigHelper.update_backup_time(self.backup_name, current_backup_start_time, current_backup_end_time)
async def backup_folder(self, dir_config):
source_path = dir_config['path']
backup_location = dir_config['backupLocation']
month_separated = dir_config['monthSeparated']
recursive = dir_config['recursive']
FileHelper.create_folder(backup_location)
await BackupUtils.send_message('Scanning', self.backup_name, source_path, dir_config['backupLocation'])
num_files = 0
for file in await self.get_dir_list(source_path):
file = FtpFile(file)
await self.validate_process_status(self.backup_name)
uuid_ = uuid.uuid4().__str__()
if file.type == 'dir':
if file.name[0] == '.':
continue
sub_dir_config = dict(path=os.path.join(source_path, file.name),
monthSeparated=month_separated, recursive=recursive,
backupLocation=BackupUtils.get_backup_location(backup_location, file, recursive))
await self.backup_folder(sub_dir_config)
continue
if file.modify < self.last_backup_start_time:
continue
try:
file_path, save = BackupUtils.get_file_path(backup_location, month_separated, file,
self.last_backup_end_time)
if save:
if num_files == 0:
self.logger.info('Backing up: {}'.format(source_path))
await BackupUtils.send_message('Enter Directory', self.backup_name,
source_path, backup_location)
num_files += 1
await BackupUtils.send_message('Copying', self.backup_name, file.name, file.size, uuid_)
await self.create_file(file_path, file.name, file.modify.timestamp(), source_path)
await BackupUtils.send_message('Saved', self.backup_name, file_path, file.size, uuid_)
except Exception as e:
self.logger.exception('Error saving: {}.'.format(file.name), e)
await BackupUtils.send_message('Error', self.backup_name, file.name, e.__str__(), uuid_)
async def validate_process_status(self, backup_name):
if self.state == 'Cancelled':
await BackupUtils.send_message('Cancelled', backup_name, BackupUtils.timestamp_from_date())
raise RuntimeError('Backup stopped by user')
async def create_file(self, file_name, current_file_name, time, file_path):
current_file = os.path.join(file_path, current_file_name)
with open(file_name, "wb") as file:
await self.get_file(current_file, file.write)
win_file = CreateFile(file_name, GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
None, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, None)
# noinspection PyUnresolvedReferences
win_time = Time(time)
SetFileTime(win_file, win_time, win_time, win_time)
CloseHandle(win_file)
self.logger.info('Created file {} with time {}'.format(file_name, win_time))
async def get_dir_list(self, source_file_path):
try:
return list(self.ftp_client.mlsd(source_file_path))
except Exception as e:
await self.retry('get_dir_list', e)
return await self.get_dir_list(source_file_path)
async def get_file(self, current_file, save_file_callback):
try:
return self.ftp_client.retrbinary("RETR {}".format(current_file), save_file_callback)
except Exception as e:
await self.retry('get_file', e)
return await self.get_file(current_file, save_file_callback)
async def connect_ftp(self, retry_count=1):
while True:
try:
return FtpHelper.connect_ftp(self.ftp_data)
except Exception as e:
await self.retry('connect_ftp', e, retry_count)
async def retry(self, function, e, retry_count=1):
if retry_count > BackupConstants.MAX_RETRY_COUNT:
self.state = 'Cancelled'
await BackupUtils.send_message('Connection Failed', self.backup_name,
'Retry Limit reached, Cancelling Backup.')
raise RuntimeError('Retry Limit reached, Cancelling Backup.')
await BackupUtils.send_message('Connection Failed', self.backup_name,
'Retry Count: {}/{}'.format(retry_count, BackupConstants.MAX_RETRY_COUNT))
self.logger.error('Possible disconnect, retrying... {} {} {}'.format(retry_count, function, str(e)))
sleep(BackupConstants.RETRY_DELAY)
self.ftp_client.close()
self.ftp_client = await self.connect_ftp(retry_count + 1)
| SanketRevankar/AndroidFTP-DataBackup | AndroidFTPBackup/helpers/BackupHelper.py | BackupHelper.py | py | 7,605 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "AndroidFTPBackup.utils.BackupUtils.BackupUtils.date_from_timestamp",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "AndroidFTPBackup.utils.BackupUtils.BackupUtils",
"line_n... |
17498596227 | import time
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.messagebox as tkmsg
from ttkthemes import ThemedTk
from PIL import ImageTk, Image
from geoCosiCorr3D.geoImageCorrelation.geoCorr_utils import splitcall, project_path, clamp
import geoCosiCorr3D.geoImageCorrelation.geoCorr_utils as utils
def run(window_constructor):
"""The entrypoint for this library. Creates a tk window and executes the mainloop.
Args:
constructor (Window): The window constructor for the main window of the program.
"""
root = get_root()
window_constructor(None, root)
root.mainloop()
def get_root():
"""Initialize tk and styles.
Returns:
ThemedTk: the root of the application.
"""
root = ThemedTk(theme='yaru')
style = ttk.Style(root)
# text box with red text
style.element_create("plain.field", 'from', 'yaru', 'Entry.field')
style.layout("Red.TEntry",
[('Entry.plain.field', {'children': [(
'Entry.background', {'children': [(
'Entry.padding', {'children': [(
'Entry.textarea', {'sticky': 'nswe'})],
'sticky': 'nswe'})], 'sticky': 'nswe'})],
'border': '2', 'sticky': 'nswe'})])
style.configure("Red.TEntry", foreground='red', fieldbackground='#f0dfdf')
# progress bar with text over it
style.layout('text.Horizontal.TProgressbar', [
('Horizontal.Progressbar.trough', {'sticky': 'ew', 'children':
[("Horizontal.Progressbar.pbar", {'side': 'left', 'sticky': 'ns'})]}),
('Horizontal.Progressbar.label', {'sticky': 'nswe'})])
style.configure('text.Horizontal.TProgressbar', anchor='center') # , foreground='orange')
style.layout('text.Vertical.TProgressbar', [
('Vertical.Progressbar.trough', {'sticky': 'ns', 'children':
[("Vertical.Progressbar.pbar", {'side': 'top', 'sticky': 'ew'})]}),
('Vertical.Progressbar.label', {'sticky': 'nswe'})])
style.configure('text.Vertical.TProgressbar', anchor='center') # , foreground='orange')
# dark colored frame used as a separator
style.configure('Dark.TFrame', background='DarkGray')
style.layout("DEBUGFRAME", [('Frame', {})])
style.configure('DEBUGFRAME', background='red')
return root
def findInFrame(frame, name):
"""Searches for an object named `name` recursively in `frame`'s children."""
for _, obj in frame.children.items():
if str(obj) == name: return obj
if isinstance(obj, ttk.Frame):
o = findInFrame(obj, name)
if o: return o
def reset_option_menu(menu, menu_var, options, index=None):
"""reset the values in the option menu
if index is given, set the value of the menu to
the option at the given index
"""
start_value = menu_var.get()
menu = menu["menu"]
menu.delete(0, "end")
for string in options:
menu.add_command(label=string, command=lambda value=string: menu_var.set(value))
if index is not None:
menu_var.set(options[index])
elif start_value not in options:
menu_var.set(options[0])
@splitcall
def disable_button(button):
"""Creates a callback that disables the specified button."""
if hasattr(button, 'info_tip'):
button._old_message = button.info_tip.text
button.info_tip.text = 'Not all parameter requirements\nto press this button have been met.'
button['state'] = tk.DISABLED
@splitcall
def enable_button(button):
"""Creates a callback that enables the specified button."""
if hasattr(button, '_old_message'):
button.info_tip.text = button._old_message
button['state'] = tk.NORMAL
# _entries holds the output _entries in form {'prompt.paramName': entry}
_entries = {}
get_entries = lambda: _entries
entries_stack = []
invalids_stack = []
def step_in():
"""Saving option selection (aka pressing Okay instead of Cancel) is done as a stack.
This adds one more layer to the stack."""
global _entries, invalids
entries_stack.append(_entries)
_entries = _entries.copy()
invalids_stack.append(invalids)
invalids = {}
def step_out(save):
"""This moves back one layer from the stack.
Args:
save (bool): Save changes into _entries if True.
"""
global _entries, invalids
current = _entries
_entries = entries_stack.pop(-1)
invalids = invalids_stack.pop(-1)
if not save: return
for name, entry in current.items():
if name in _entries:
_entries[name].set(entry.get())
else:
_entries[name] = SimpleContainer(entry.get())
# map of widget to error message tooltip
invalids = {}
def addInvalid(entry, message, invalidated):
"""Adds an entry to the invalids list, and calles the invalidated event if this is the first invalid item."""
if len(invalids) == 0:
invalidated()
if entry not in invalids:
tip = entry.info_tip
tip.text = message
invalids[entry] = None
invalids[entry] = tip
entry.configure(style='Red.TEntry')
def removeInvalid(entry, validated):
"""Removes an entry to the invalids list, and calles the validated event if this was the last invalid item."""
if entry in invalids:
invalids[entry].reset_msg()
del invalids[entry]
if len(invalids) == 0: validated()
entry.configure(style='TEntry')
class SimpleContainer:
"""A simple container holds a value with a getter or setter. Exists to be used interchangeably with IntVar, StrVar, etc."""
def __init__(self, value): self.value = value
def set(self, value): self.value = value
def get(self): return self.value
def __str__(self): return f"SimpleContainer: {self.value}"
__repr__ = __str__
def open_local_image(path):
"""Returns an openened ImageTk relative to the path of the current script."""
image = Image.open(project_path(path))
return ImageTk.PhotoImage(image)
# tkinter objects
class ToolTip:
"""
Tooltip recipe from
http://www.voidspace.org.uk/python/weblog/arch_d7_2006_07_01.shtml#e387
"""
def __init__(self, widget, text):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
self.text = text
self.eid = self.widget.bind('<Enter>', self.showtip, add='+')
self.lid = self.widget.bind('<Leave>', self.hidetip, add='+')
def showtip(self, _):
"""Display text in tooltip window."""
if self.tipwindow or not self.text:
return
x, y, _, _ = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + self.widget.winfo_width()
y = y + self.widget.winfo_rooty()
self.tipwindow = tw = tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# For Mac OS
tw.tk.call("::tk::unsupported::MacWindowStyle",
"style", tw._w,
"help", "noActivates")
except tk.TclError:
pass
label = tk.Label(tw, text=self.text, justify=tk.LEFT,
relief=tk.SOLID, borderwidth=1)
label.pack(ipadx=1)
def hidetip(self, _):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
def destroy(self):
self.widget.unbind('<Enter>', self.eid)
self.widget.unbind('<Leave>', self.lid)
if self.tipwindow: self.tipwindow.destroy()
class TimedToolTip:
"""
ToolTip that appears on hover after a certain amount of time.
Tooltip recipe from
https://stackoverflow.com/a/36221216
"""
def __init__(self, widget, text='widget info'):
self.waittime = 500 # miliseconds
self.wraplength = 999 # pixels
self.widget = widget
self.text = text
self.start_text = text
self.enterid = self.widget.bind("<Enter>", self.enter, add='+')
self.leaveid = self.widget.bind("<Leave>", self.leave, add='+')
self.leavepid = self.widget.bind("<ButtonPress>", self.leave, add='+')
self.id = None
self.tw = None
def enter(self, event=None):
self.schedule()
def leave(self, event=None):
self.unschedule()
self.hidetip()
def schedule(self):
self.unschedule()
self.id = self.widget.after(self.waittime, self.showtip)
def unschedule(self):
id = self.id
self.id = None
if id:
self.widget.after_cancel(id)
def showtip(self, event=None):
if not self.text: return
line_count = self.text.count('\n') + 1
x = y = 0
x, y, cx, cy = self.widget.bbox("insert")
x += self.widget.winfo_rootx() + 5
y += self.widget.winfo_rooty() - 15 * line_count - 10
# creates a toplevel window
self.tw = tk.Toplevel(self.widget)
# Leaves only the label and removes the app window
self.tw.wm_overrideredirect(True)
self.tw.wm_geometry("+%d+%d" % (x, y))
label = tk.Label(self.tw, text=self.text, justify='left',
background="#ffffff", relief='solid', borderwidth=1,
wraplength=self.wraplength)
label.pack(ipadx=1)
def hidetip(self):
tw = self.tw
self.tw = None
if tw:
tw.destroy()
def reset_msg(self):
self.text = self.start_text
def destroy(self):
if self.tw: self.tw.destroy()
self.widget.unbind("<Enter>", self.enterid)
self.widget.unbind("<Leave>", self.leaveid)
self.widget.unbind("<ButtonPress>", self.leavepid)
# Modified from https://gist.github.com/novel-yet-trivial/3eddfce704db3082e38c84664fc1fdf8
class VerticalScrolledFrame:
"""
A vertically scrolled Frame that can be treated like any other Frame
ie it needs a master and layout and it can be a master.
:width:, :height:, :bg: are passed to the underlying Canvas
:bg: and all other keyword arguments are passed to the inner Frame
note that a widget layed out in this frame will have a self.master 3 layers deep,
(outer Frame, Canvas, inner Frame) so
if you subclass this there is no built in way for the children to access it.
You need to provide the controller separately.
"""
def __init__(self, master, **kwargs):
width = kwargs.pop('width', None)
height = kwargs.pop('height', None)
self.outer = ttk.Frame(master, **kwargs)
self.vsb = ttk.Scrollbar(self.outer, orient=tk.VERTICAL)
self.vsb.pack(fill=tk.Y, side=tk.RIGHT)
self.canvas = tk.Canvas(self.outer, highlightthickness=0, width=width, height=height, background='#f5f6f7')
self.canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
self.canvas['yscrollcommand'] = self.vsb.set
# mouse scroll does not seem to work with just "bind"; You have
# to use "bind_all". Therefore to use multiple windows you have
# to bind_all in the current widget
self.canvas.bind("<Enter>", self._bind_mouse)
self.canvas.bind("<Leave>", self._unbind_mouse)
self.vsb['command'] = self.canvas.yview
self.inner = ttk.Frame(self.canvas)
# pack the inner Frame into the Canvas with the topleft corner 4 pixels offset
self.wid = self.canvas.create_window(0, 0, window=self.inner, anchor='nw')
self.canvas.bind('<Configure>', self._on_canvas_configure)
self.inner.bind("<Configure>", self._on_frame_configure)
self.outer_attr = set(dir(ttk.Widget))
def __getattr__(self, item):
if item in self.outer_attr:
# geometry attributes etc (eg pack, destroy, tkraise) are passed on to self.outer
return getattr(self.outer, item)
else:
# all other attributes (_w, children, etc) are passed to self.inner
return getattr(self.inner, item)
def _on_canvas_configure(self, event):
width = event.width
self.canvas.itemconfig(self.wid, width=width)
def _on_frame_configure(self, event=None):
x1, y1, x2, y2 = self.canvas.bbox("all")
height = self.canvas.winfo_height()
self.canvas.config(scrollregion=(0, 0, x2, max(y2, height)))
def _bind_mouse(self, event=None):
self.canvas.bind_all("<4>", self._on_mousewheel)
self.canvas.bind_all("<5>", self._on_mousewheel)
self.canvas.bind_all("<MouseWheel>", self._on_mousewheel)
def _unbind_mouse(self, event=None):
self.canvas.unbind_all("<4>")
self.canvas.unbind_all("<5>")
self.canvas.unbind_all("<MouseWheel>")
def _on_mousewheel(self, event):
"""Linux uses event.num; Windows / Mac uses event.delta"""
if event.num == 4 or event.delta > 0:
self.canvas.yview_scroll(-1, "units")
elif event.num == 5 or event.delta < 0:
self.canvas.yview_scroll(1, "units")
def __str__(self):
return str(self.outer)
class Window:
"""Base class of all my windows that allows it to load child windows and contains helper functions for creating forms"""
def __init__(self, parent, top_level, title, multichild=False):
"""Creates a base frame for a window.
Args:
top_level (tk.TopLevel): The toplevel or window to draw onto.
title (str): The title of the window.
multichild (bool, optional): If true, this window can have multiple children. If false, this window supports the _entries stack for input parameters. Defaults to False.
"""
root_frame = ttk.Frame(top_level)
root_frame.pack(fill='both', expand=True)
self.root = root_frame
self.top_level = top_level
top_level.title(title)
self.children = None
self.parent = parent
if multichild: self.children = []
def new_window(self, constructor, *params, on_close=None, **kargs):
"""Generates a new window using the specified `Window`-descendant constructor, passing in the specified params and kargs.
Args:
constructor (constructor): The type of the specified window.
on_close (f()): An additional callback to run when the window closes.
Returns:
Window: The child.
"""
child = tk.Toplevel(self.root)
if isinstance(self.children, list):
child = constructor(self, child, *params, **kargs)
self.children.append(child)
else:
step_in()
child = constructor(self, child, *params, **kargs)
self.top_level.withdraw()
self.children = child
child.close_callback = on_close
child.top_level.protocol("WM_DELETE_WINDOW", self.child_close(child, False))
return child
def embed_window(self, master, constructor, *params, **kargs):
"""Embeds a Window onto master with specified parameters.
Args:
constructor: The Window constructor.
Returns:
Window: The embedded window.
"""
if not hasattr(master, 'title'): master.title = lambda _: None
window = constructor(self, master, *params, **kargs)
return window
@splitcall
def child_close(self, child, to_save):
"""Closes the current child window and bring this one back into view."""
if child.close_callback:
child.close_callback()
if isinstance(self.children, list):
self.children.remove(child)
child.top_level.destroy()
return
step_out(to_save)
self.top_level.deiconify()
self.children.top_level.destroy()
self.children = None
@splitcall
def back(self, to_save):
"""Go back, aka close the current window and return to the previous"""
self.parent.child_close(self.parent.children, to_save)()
def load_template(self, text):
"""Creates the standard template with self.params_f and self.buttons_f with an ok and cancel button."""
self.params_f = ttk.LabelFrame(self.root, text=text)
self.params_f.pack(padx=5, pady=5, fill="both")
buttons_f = ttk.Frame(self.root)
buttons_f.pack(fill=tk.X, ipadx=5, ipady=5)
ok_b = ttk.Button(buttons_f, text="Ok", command=self.back(True))
ok_b.pack(side=tk.RIGHT, padx=10, ipadx=10)
self.register_on_invalidate(disable_button(ok_b), self.params_f)
self.register_on_validate(enable_button(ok_b), self.params_f)
ttk.Button(buttons_f, text="Cancel", command=self.back(False)).pack(side=tk.RIGHT, ipadx=10)
def make_frame(self, **kargs):
"""Create and pack a basic frame with some default values set."""
grid = 'row' in kargs or 'column' in kargs
kargs.setdefault('master', self.root)
kargs.setdefault('text', None)
if grid:
kargs.setdefault('sticky', 'news')
else:
kargs.setdefault('fill', 'both')
kargs.setdefault('padx', 5)
kargs.setdefault('pady', 5)
f = ttk.LabelFrame(kargs['master'], text=kargs['text']) if kargs['text'] else ttk.Frame(kargs['master'])
del kargs['master']
del kargs['text']
if grid:
f.grid(**kargs)
else:
f.pack(**kargs)
return f
def make_run_bar(self, command, param_f, run_tip, start_msg, complete_msg, horizontal=True):
"""Create a run bar that runs (on a separate thread) a command.
Args:
command ((0 -> ())): The callback to run on a separate thread upon press of the 'Run' button.
param_f (Frame): The 'Run' button will only be enabled when this frame is validated.
run_tip (str): The message on the run button's hover.
start_msg (str): The message to show when run is pressed.
complete_msg (str): The message to show when command is completed.
"""
def prog(val):
val = int(clamp(val, 1, 100))
run_p['value'] = val
ttk.Style().configure(style, text=str(val) + ' %')
# if val == 100:
# tkmsg.showinfo('Complete', message=complete_msg)
def run():
command(callback=thread_callback)
ttk.Style().configure(style, text='0 %')
tkmsg.showinfo('Starting...', start_msg)
def thread_callback():
time.sleep(0.1)
tkmsg.showinfo('Complete', message=complete_msg)
utils.__mark_progress__ = prog
if horizontal:
b_side, p_side = tk.RIGHT, tk.LEFT
fill = 'x'
padx = ipadx = 10
pady = ipady = 0
style = 'text.Horizontal.TProgressbar'
orient = 'horizontal'
else:
b_side, p_side = tk.BOTTOM, tk.TOP
fill = 'y'
padx = ipadx = 0
pady = ipady = 10
style = 'text.Vertical.TProgressbar'
orient = 'vertical'
self.runbar_f = self.make_frame(expand=1, padx=(0, 5), pady=(0, 5))
run_b = ttk.Button(self.runbar_f, text="Run", command=run)
run_b.pack(side=b_side, padx=padx, ipadx=ipadx, pady=pady, ipady=ipady)
run_b.info_tip = TimedToolTip(run_b, text=run_tip)
run_p = ttk.Progressbar(self.runbar_f, style=style, orient=orient)
ttk.Style().configure(style, text='')
run_p.pack(side=p_side, padx=padx, ipadx=ipadx, pady=pady, ipady=ipady, fill=fill, expand=True)
run_p['value'] = 0
self.register_on_invalidate(disable_button(run_b), param_f)
self.register_on_validate(enable_button(run_b), param_f)
def register_on_validate(self, function, frame):
"""Adds an additional function to the end of the `validated` event handler"""
if hasattr(frame, 'on_validated'):
old = frame.on_invalidated
def chained():
old()
function()
else:
chained = function
frame.on_validated = chained
def register_on_invalidate(self, function, frame):
"""Adds an additional function to the end of the `invalidated` event handler"""
if hasattr(frame, 'on_invalidated'):
old = frame.on_invalidated
def chained():
old()
function()
else:
chained = function
frame.on_invalidated = chained
def redirect_validation(self, from_frame, to_frame):
"""Redirect validation events from from_frame to to_frame"""
self.register_on_validate(lambda *params, **kargs: to_frame.on_validated(*params, **kargs), from_frame)
self.register_on_invalidate(lambda *params, **kargs: to_frame.on_invalidated(*params, **kargs), from_frame)
| SaifAati/Geospatial-COSICorr3D | geoCosiCorr3D/geoCosiCorr3D_GUI/geoImageCorrelation_GUI/tk_utils.py | tk_utils.py | py | 20,960 | python | en | code | 37 | github-code | 36 | [
{
"api_name": "ttkthemes.ThemedTk",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk.Style",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "tkinter.ttk.Frame",
... |
35056056600 | from src.reduction import ReductionMethod
import cantera as ct
def main() -> None:
"""
Edit all the variables in this function to perform the reduction.
Right now, it has DRG and DRGEP. Put all the state files in a folder and pass
the folder path to load the condition.
The automation of the result mechansim and testing will be implemented later.
Returns a set containing all the species deemed important.
"""
# detailed mechanism
detailed_gas = ct.Solution("gri30.xml")
# folder path (use \\ to separate the folders)
file_dir = "folder_path\\"
# Reduction parameters
threshold = 0.05
important_species = [ "OH", "CH4", "O2", "N2", "CO2", "H2O", "CO"]
reduction_method = "DRGEP" # accepts "DRG" or "DRGEP"
# call the reduction
red = ReductionMethod(detailed_gas, det_dir=file_dir)
final_spc_list, rii_list = red.run_reduction(reduction_method, threshold, important_species)
print(f" Final red_mech contains {len(final_spc_list)} species.\n (red/det) = ({len(final_spc_list)}/{detailed_gas.n_species})\n")
for spc in final_spc_list:
print(spc)
if __name__ == "__main__":
main()
| fingeraugusto/red_app | main_app.py | main_app.py | py | 1,201 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cantera.Solution",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "src.reduction.ReductionMethod",
"line_number": 29,
"usage_type": "call"
}
] |
42211653033 | import ipywidgets
from ipywidgets import *
from IPython.display import display, Markdown
all_options=[]
all_answers=[]
all_feedback=[]
options1=['ASIC, SoC, FPGA, MPSoC', 'FPGA, SoC, ASIC, MPSoC', 'SoC, ASIC, FPGA, MPSoC', 'ASIC, MPSoC, FPGA, SoC']
ans1='ASIC, SoC, FPGA, MPSoC'
fb1_a='Correct! We know ASICs are application specific. The other three descriptions can apply to an FPGA, but you can determine the correct answer from the mention of other components and multiple processors.'
fb1_b='Incorrect! Go back to the beginning of this notebook to review info on these four terms.'
fb1=[fb1_a, fb1_b, fb1_b, fb1_b]
all_options.append(options1); all_answers.append(ans1); all_feedback.append(fb1)
options2=['A','B','C']
ans2='B'
fb2_a='Incorrect; take a closer look at the diagram, and review to the gates discussed above.'
fb2_b='Correct! If B is changed to 1, the AND gate receives a 0 from the inverted B value. Because the AND gate is receiving to low inputs, its output is also low, and because A is also low, the OR gate will also be receiving two low inputs, making its output 0. On the flip side, switching either A or C to high, leaving the other two low, will result in D being high as well.'
fb2=[fb2_a, fb2_b, fb2_a]
all_options.append(options2); all_answers.append(ans2); all_feedback.append(fb2)
options3=['Verilog','JHDL','Ruby','VHDL']
ans3='Ruby'
fb3_a='Incorrect; look at the examples given in the notebook. Don\'t be afraid to look up a language that looks unfamiliar to you.'
fb3_b='Correct! Ruby is a high-level programming language that isn\'t used in designing hardware.'
fb3=[fb3_a, fb3_a, fb3_b, fb3_a]
all_options.append(options3); all_answers.append(ans3); all_feedback.append(fb3)
options4=['The size of the FPGA','The size of a feature on an FPGA','The maximum routing distance between IP','The physical size of a processor on an SoC']
ans4='The size of a feature on an FPGA'
fb4=['Incorrect; remember that an FPGA is a silicon component.',
'Correct! An FPGA \'feature\' refers to the elements on an FPGA, like a transistor, and smaller features means more can be fit in the same space, which is why you hear the number growing smaller as newer devices are developed. A higher number of features can imply (though not always) higher performance and power.',
'Incorrect; routing is not often measured and monitored in this way.',
'Incorrect; not all FPGA devices are SoCs.']
all_options.append(options4); all_answers.append(ans4); all_feedback.append(fb4)
options5=['A .tcl script','An HDL file', 'An IP', 'A bitstream']
ans5='A bitstream'
fb5_1='Incorrect; a tcl script is used to rebuild your design, as it includes commands for Vivado to use.'
fb5_2='Incorrect; HDL is used when developing the hardware, but is not loaded into the device.'
fb5_3='Incorrect; IP are building blocks in your hardware design.'
fb5_4='Correct! A bitstream is created based on your design, which is what is loaded onto the device in order for it to function as the designer intends. '
fb5=[fb5_1, fb5_2, fb5_3, fb5_4]
all_options.append(options5); all_answers.append(ans5); all_feedback.append(fb5)
def populate_questions():
questions=[]
for i in range(len(all_options)):
questions.append(show_buttons(all_options[i], all_answers[i], all_feedback[i]))
return questions
def show_buttons(options, answer, feedback):
radios=RadioButtons(
description=' ',
options=options,
disabled=False,
layout={'width': 'max-content'}
)
interactive_q=interactive(mc_interact,mc_val=radios, options=fixed(options), feedback=fixed(feedback))
return interactive_q
# interactive function, changing value: mc_val
def mc_interact(mc_val, options, feedback):
fb_text=feedback[options.index(mc_val)]
display(Markdown(fb_text)) | philipwu62/xilinx_XUP_notebooks | lib/fpga_widg.py | fpga_widg.py | py | 3,813 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "IPython.display.display",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "IPython.display.Markdown",
"line_number": 72,
"usage_type": "call"
}
] |
5514632684 | # -*- coding: UTF-8 -*-
from Courier import Courier,Order,debugFlag,OrdersPerSecond
import queue,sys,statistics
import time,json
import threading
def GetNextOrder(prepareTime):
courier = Courier();
o=Order(courier,prepareTime);
return o
if __name__ == '__main__':
try:
with open('dispatch_orders.json',encoding='utf-8') as f_in: #sample
data = json.load(f_in)
for seq,order in enumerate(data):
prepareTime = int (order['prepTime'])
print('Order %d new with prepareTime: %d'%(seq+1,prepareTime))
GetNextOrder(prepareTime)
time.sleep(1/int(OrdersPerSecond))# 2 order per second by default
while True:
if not ( all(x.canEate==1 for x in Order.orders) and all(x.Arrived == 1 for x in Courier.couriers) ):
print('qsize down: ',len([x.canEate for x in Order.orders if x.canEate==0])) if debugFlag == '1' else None
else:
print('Order Average Waittime(seconds): %.3f ,total %d orders' % (statistics.mean([x.waitTime.total_seconds() for x in Order.orders]),len(Order.orders)))
print('Courier Average Waittime(seconds): %.3f ,total %d courier' % (statistics.mean([x.waitTime.total_seconds() for x in Courier.couriers]),len(Courier.couriers)))
print()
break
time.sleep(3)
except KeyboardInterrupt:
print ("interruptted by Ctrl-c")
sys.exit(1)
| slideclick/2021ccs | execu/main.py | main.py | py | 1,525 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Courier.Courier",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "Courier.Order",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number... |
71104421224 | #!/usr/bin/env python
# -*- coding=UTF-8 -*-
# Created at May 26 10:07 by BlahGeek@Gmail.com
import sys
if hasattr(sys, 'setdefaultencoding'):
sys.setdefaultencoding('UTF-8')
import os
import httplib2
import requests
from BeautifulSoup import BeautifulSoup
from .settings import COOKIR_PATH
BASE_URL = 'http://3g.renren.com/status/newstatus.do'
class RenRen:
def __init__(self):
self.session = requests.Session()
cookie = open(COOKIR_PATH).read()
cookie = [x.strip() for x in cookie.split(';') if x]
cookie = map(lambda x: x.split('=', 1), cookie)
cookie = dict(cookie)
self.session.cookies = requests.utils.cookiejar_from_dict(cookie)
def postStatus(self, text):
soup = BeautifulSoup(self.session.get(BASE_URL).content)
form = soup.find('form')
assert(form is not None)
values = map(lambda x: (x['name'], x['value']), form.findAll('input', type='hidden'))
data = {'status': text}
data.update(dict(values))
req = self.session.post(form['action'], data)
# save cookie
with open(COOKIR_PATH, 'w') as f:
cookie = requests.utils.dict_from_cookiejar(self.session.cookies)
cookie = '; '.join([k+'='+v for k, v in cookie.iteritems()])
f.write(cookie)
| blahgeek/treehole | treehole/renren.py | renren.py | py | 1,318 | python | en | code | 30 | github-code | 36 | [
{
"api_name": "sys.setdefaultencoding",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "requests.Session",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "settings.COOKIR_PATH",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "reque... |
7755902959 | import logging
from typing import Any, Callable, Coroutine, Dict, List, Optional, Union
import attr
from geojson_pydantic.geometries import (
GeometryCollection,
LineString,
MultiLineString,
MultiPoint,
MultiPolygon,
Point,
Polygon,
)
from pydantic import validator
from pydantic.types import conint
from pystac.utils import str_to_datetime
from stac_fastapi.api.models import BaseSearchGetRequest, ItemCollectionUri
from stac_fastapi.pgstac.types.base_item_cache import BaseItemCache
from stac_fastapi.pgstac.types.search import PgstacSearch
from starlette.requests import Request
from pccommon.redis import cached_result
from pcstac.contants import CACHE_KEY_BASE_ITEM
DEFAULT_LIMIT = 250
logger = logging.getLogger(__name__)
class PCSearch(PgstacSearch):
# Increase the default limit for performance
# Ignore "Illegal type annotation: call expression not allowed"
limit: Optional[conint(ge=1, le=1000)] = DEFAULT_LIMIT # type:ignore
# Can be removed when
# https://github.com/stac-utils/stac-fastapi/issues/187 is closed
intersects: Optional[
Union[
Point,
MultiPoint,
LineString,
MultiLineString,
Polygon,
MultiPolygon,
GeometryCollection,
]
]
@validator("datetime")
def validate_datetime(cls, v: str) -> str:
"""Validate datetime.
Custom to allow for users to supply dates only.
"""
if "/" in v:
values = v.split("/")
else:
# Single date is interpreted as end date
values = ["..", v]
dates: List[str] = []
for value in values:
if value == "..":
dates.append(value)
continue
str_to_datetime(value)
dates.append(value)
if ".." not in dates:
if str_to_datetime(dates[0]) > str_to_datetime(dates[1]):
raise ValueError(
"Invalid datetime range, must match format (begin_date, end_date)"
)
return v
class RedisBaseItemCache(BaseItemCache):
"""
Return the base item for the collection and cache by collection id.
First check if the instance has a local cache of the base item, then
try redis, and finally fetch from the database.
"""
def __init__(
self,
fetch_base_item: Callable[[str], Coroutine[Any, Any, Dict[str, Any]]],
request: Request,
):
self._base_items: dict = {}
super().__init__(fetch_base_item, request)
async def get(self, collection_id: str) -> Dict[str, Any]:
async def _fetch() -> Dict[str, Any]:
return await self._fetch_base_item(collection_id)
if collection_id not in self._base_items:
cache_key = f"{CACHE_KEY_BASE_ITEM}:{collection_id}"
self._base_items[collection_id] = await cached_result(
_fetch, cache_key, self._request
)
return self._base_items[collection_id]
@attr.s
class PCItemCollectionUri(ItemCollectionUri):
limit: Optional[int] = attr.ib(default=DEFAULT_LIMIT) # type:ignore
@attr.s
class PCSearchGetRequest(BaseSearchGetRequest):
limit: Optional[int] = attr.ib(default=DEFAULT_LIMIT) # type:ignore
| microsoft/planetary-computer-apis | pcstac/pcstac/search.py | search.py | py | 3,330 | python | en | code | 88 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "stac_fastapi.pgstac.types.search.PgstacSearch",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 33,
"usage_type": "name"
},
{
"a... |
14783455392 | '''
ME 598 CUDA
Homework 2
Author: Hien (Ryan) Nguyen
Last modified: 01/28/2018
'''
import numpy as np # import scientific computing library
import matplotlib.pyplot as plt # import plotting library
from numba import cuda
import math
import time
from mpl_toolkits import mplot3d
''' Question 2 functions '''
def f2D (x, y):
return math.sin ( np.pi *x)* math.sinh ( np.pi *y)/ math.sinh ( np.pi )
def fArray2D (x, y):
nx = x.size
ny = y.size
f = np.empty ((nx ,ny), dtype = np.float32)
for i in range (nx):
for j in range (ny):
f[i,j] = f2D (x[i], y[j])
return f
@cuda.jit ( device = True )
def pf2D (x, y):
return math.sin ( np.pi *x)* math.sinh ( np.pi *y)/ math.sinh ( np.pi )
@cuda.jit ('void (f4 [:] , f4 [:] , f4 [: ,:])')
def pfKernel2D (d_x , d_y , d_f):
i , j = cuda.grid (2)
nx , ny = d_f.shape
if i < nx and j < ny:
d_f[i,j] = pf2D (d_x[i], d_y[j])
def pfArray2D (x, y, TPBX, TPBY):
nx = x.size
ny = y.size
d_x = cuda.to_device(x)
d_y = cuda.to_device(y)
d_f = cuda.device_array((nx, ny), dtype=np.float32)
gridDims = ((nx + TPBX - 1) // TPBX,
(ny + TPBY - 1) // TPBY)
blockDims = (TPBX, TPBY)
startTime = time.time()
pfKernel2D[gridDims, blockDims](d_x, d_y, d_f)
kTime = (time.time() - startTime) * 1000
print ("Kernal call time is: ", kTime)
return d_f.copy_to_host()
def question2():
print ()
print ("---------- Question 2 ----------")
TPBX = 8
TPBY = 32
NX = np.linspace(100, 1000, 10)
NY = np.linspace(100, 1000, 10)
sTime = [0]*len(NX)
pTime = [0]*len(NX)
accel = [0]*len(NX)
for i in range(len(NX)):
print ("Array size: ", NX[i])
x = np.linspace (0,1,NX[i] , dtype = np.float32)
y = np.linspace (0,1,NY[i] , dtype = np.float32)
startTime = time.time()
fs = fArray2D (x, y)
sTime[i] = (time.time() - startTime) * 1000
print ("Series processing time: ", sTime[i])
startTime = time.time()
fp = pfArray2D(x, y, TPBX, TPBY)
pTime[i] = (time.time() - startTime) * 1000
print ("Parallel processing time: ", pTime[i])
accel[i] = sTime[i]/pTime[i]
print ("Accel is: ", accel[i])
plt.figure(1)
plt.subplot(211)
plt.plot(NX, sTime, 'r--', label='series runtime')
plt.plot(NX, pTime, 'g^', label='parallel_runtime')
plt.legend()
plt.title("Series and Parallel Runtime vs Array Size")
plt.subplot(212)
plt.plot(NX, accel)
plt.title("Acceleration vs Array Size")
plt.show()
''' Question 3 functions '''
def question3():
print ()
print ("---------- Question 3 ----------")
TPBX = 32
TPBY = 32
NX = 255
NY = 255
x = np.linspace(0, 1, NX, dtype=np.float32)
y = np.linspace(0, 1, NY, dtype=np.float32)
fp = pfArray2D(x, y, TPBX, TPBY)
print ("32 is the largest number of thread a block can have."
" Anything larger than that will produce the following error:"
" numba.cuda.cudadrv.driver.CudaAPIError: [1] Call to"
" cuLaunchKernel results in CUDA_ERROR_INVALID_VALUE")
''' Question 4 functions '''
def question4():
print ()
print ("---------- Question 4 ----------")
print ("Change in aspect ratio has very little affect on the kernel"
" execution time or kernal call")
''' Question 5 functions '''
def question5():
print ()
print ("---------- Question 5 ----------")
arrayDimX = 255
arrayDimY = 255
array = [[0]*arrayDimX] * arrayDimY
x = np.linspace(0, 2*math.pi, arrayDimX)
y = np.linspace(0, 2*math.pi, arrayDimY)
array = make_matrix(x, y)
X, Y = np.meshgrid(x, y)
plt.contourf(X, Y, array)
plt.show()
print ("Compute L2:")
res = pnorm(array, 2)
print ("Result is: ", res)
print ("Compute L4:")
res = pnorm(array, 4)
print ("Result is: ", res)
print ("Compute L6:")
res = pnorm(array, 6)
print ("Result is: ", res)
print ("Compute L1000:")
res = pnorm(array, 1000)
print ("Result is: ", res)
print ("The value of norm approaches 1 which is norm infinity as p increases")
def make_matrix(x, y):
TPBX = 8
TPBY = 8
nx = np.array(x).shape[0]
ny = np.array(y).shape[0]
d_x = cuda.to_device(np.array(x))
d_y = cuda.to_device(np.array(y))
d_out = cuda.device_array((nx, ny))
gridDims = ((nx + TPBX - 1) // TPBX,
(ny + TPBY - 1) // TPBY)
blockDims = (TPBX, TPBY)
make_matrix_kerneld[gridDims, blockDims](d_x, d_y, d_out)
return d_out.copy_to_host()
@cuda.jit
def make_matrix_kerneld(d_x, d_y, d_out):
i , j = cuda.grid (2)
nx = d_x.shape[0]
ny = d_y.shape[0]
if i < nx and j < ny:
d_out[i, j] = math.sin(2*math.pi*d_x[i])*math.sin(2*math.pi*d_y[j])
@cuda.jit
def norm_kernel(d_array, p):
i , j = cuda.grid (2)
nx , ny = d_array.shape
if i < nx and j < ny:
d_array[i,j] = (d_array[i,j] ** p)
def pnorm(array, p):
TPBX = 8
TPBY = 8
nx, ny = np.array(array).shape
d_array = cuda.to_device(np.array(array))
gridDims = ((nx + TPBX - 1) // TPBX,
(ny + TPBY - 1) // TPBY)
blockDims = (TPBX, TPBY)
norm_kernel[gridDims, blockDims](d_array, p)
res = 0
d_arrayFlat = d_array.copy_to_host().flatten()
for i in range(d_arrayFlat.shape[0]):
res += d_arrayFlat[i]
return res ** (1/p)
''' Question 6 '''
def question6():
print ()
print ("---------- Question 6 ----------")
print ("For IVPs problems, there is no direct way to parallelize "
"the computation over a grid of time intervals because current "
"value depends on previous values of each states and thus to get "
"value at time k, we need to already compute value of all states "
"at time k-1")
print ("For IVPs problems, there is a way to parallelize over a "
"grid of initial conditions because the iteration process for "
"each group of initial conditions are independent")
nt = 1000
t = np.linspace(0, 10, nt)
dt = 1/nt
x_i = np.linspace(-3, 3, 50)
v_i = np.linspace(-3, 3, 50)
X,V = np.meshgrid(x_i, v_i)
for subprob in ["6c", "6d", "6e"]:
print ("Subproblem ", subprob)
r = iterate(x_i, v_i, dt, nt, subprob)
fig = plt.figure(6)
ax = plt.axes(projection='3d')
ax.scatter3D(X, V, r)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('r')
plt.show()
# 6c
def iterate(x_i, v_i, dt, nt, prob):
TPBX = 16
TPBV = 16
nx = np.array(x_i).shape[0]
nv = np.array(v_i).shape[0]
d_xi = cuda.to_device(np.array(x_i))
d_vi = cuda.to_device(np.array(v_i))
d_x = cuda.device_array((nx, nv, nt))
d_v = cuda.device_array((nx, nv, nt))
d_r = cuda.device_array((nx, nv))
gridDims = ((nx + TPBX - 1) // TPBX,
(nv + TPBV - 1) // TPBV)
blockDims = (TPBX, TPBV)
if prob == "6c":
iterate_kernel_6c[gridDims, blockDims](d_xi, d_vi, d_x, d_v, dt, nt, d_r)
elif prob == "6d":
iterate_kernel_6d[gridDims, blockDims](d_xi, d_vi, d_x, d_v, dt, nt, d_r)
elif prob == "6e":
iterate_kernel_6e[gridDims, blockDims](d_xi, d_vi, d_x, d_v, dt, nt, d_r)
return d_r.copy_to_host()
# 6d
@cuda.jit
def iterate_kernel_6d(d_xi, d_vi, d_x, d_v, dt, nt, d_r):
i, j = cuda.grid(2)
nx = d_xi.shape[0]
nv = d_vi.shape[0]
if i < nx and j < nv:
d_x[i, j, 0] = d_xi[i]
d_v[i, j, 0] = d_vi[j]
for k in range(nt-1):
d_v[i, j, k+1] = d_v[i, j, k] + (- d_x[i, j, k] - 0.1 * d_v[i, j, k]) * dt
d_x[i, j, k+1] = d_x[i, j, k] + d_v[i, j, k] * dt
d_r[i,j] = (d_v[i, j, nt-1] ** 2 + d_x[i, j, nt-1] **2) ** 0.5 /((d_xi[i]**2 + d_vi[j]**2)**0.5)
# 6c
@cuda.jit
def iterate_kernel_6c(d_xi, d_vi, d_x, d_v, dt, nt, d_r):
i, j = cuda.grid(2)
nx = d_xi.shape[0]
nv = d_vi.shape[0]
if i < nx and j < nv:
d_x[i, j, 0] = d_xi[i]
d_v[i, j, 0] = d_vi[j]
for k in range(nt-1):
d_v[i, j, k+1] = d_v[i, j, k] - d_x[i, j, k] * dt
d_x[i, j, k+1] = d_x[i, j, k] + d_v[i, j, k] * dt
d_r[i,j] = (d_v[i, j, nt-1] ** 2 + d_x[i, j, nt-1] **2) ** 0.5 /((d_xi[i]**2 + d_vi[j]**2)**0.5)
# 6e
@cuda.jit
def iterate_kernel_6e(d_xi, d_vi, d_x, d_v, dt, nt, d_r):
i, j = cuda.grid(2)
nx = d_xi.shape[0]
nv = d_vi.shape[0]
if i < nx and j < nv:
d_x[i, j, 0] = d_xi[i]
d_v[i, j, 0] = d_vi[j]
for k in range(nt-1):
d_v[i, j, k+1] = d_v[i, j, k] + (- d_x[i, j, k] + 0.1*(1-d_x[i, j, k]**2) * d_v[i, j, k]) * dt
d_x[i, j, k+1] = d_x[i, j, k] + d_v[i, j, k] * dt
d_r[i,j] = (d_v[i, j, nt-1] ** 2 + d_x[i, j, nt-1] **2) ** 0.5 /((d_xi[i]**2 + d_vi[j]**2)**0.5)
def main():
question2()
question3()
question4()
question5()
question6()
# call to execute main
if __name__ == '__main__':
main() | ryannguyen94/CUDA | HW2/hw2.py | hw2.py | py | 9,498 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "math.sin",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "math.sinh",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 24,... |
21135250267 | # Databricks notebook source
# MAGIC %md
# MAGIC ## 2D Linear Regression
# MAGIC
# MAGIC #### Description
# MAGIC
# MAGIC This notebook is designed to provide a very basic insight into linear regression and how to utilise sklearn to perform it on datsets.
# MAGIC
# MAGIC In this notebook linear regression is performed on a dataset with 2 numeric variables, its aim is to explain the basic principles of linear regression before moving onto the second notebook which demonstrates linear regression on a multi-variable problem.
# MAGIC
# MAGIC Linear regression uses algebra to define the linear relationship between two or more variables. In 2-dimensional space, this linear relationship can be seen as the 'line-of-best-fit', a straight line that best represents the relationship between the 2 variables. This relationship holds as we add more variables though the line exists in higher dimensions and is hard to visualise through standard means.
# MAGIC
# MAGIC This linear relationship can then be used as a method for helping predicitions.
# MAGIC
# MAGIC #### SKLearn performance in databricks
# MAGIC
# MAGIC While SKLearn can be useful in certain situations, it is not designed to take advantage of cluster computing resources, which arguably is a major downside to using it inside databricks as you are not utilising the full proccessing power available to you.
# MAGIC
# MAGIC This is not us saying do not use sklearn as it may well be appropriate for certain tasks, however if your are performing tasks over large datasets and want to fully exploit the compute resources you have available to complete these tasks. Then you should look into the Spark `MLlib` library.
# COMMAND ----------
# MAGIC %md
# MAGIC #### Retrieve the data
# MAGIC
# MAGIC In this example the toy datasets have already created and added to the collab database to mimic an actual workflow, we will use a general function to get the database name however this can be can be replaced with a string.
# MAGIC
# MAGIC The utility functions are imported via the next command which runs the notebook stored in a different location. You can view these functions by navigating to the folder or you can also click the link in the next command. This can also be a useful way to tidy up your code and store frequently used functions in their own notebook to be imported into others.
# COMMAND ----------
# DBTITLE 1,Import python utility functions
# MAGIC %run ../../Wrangler-Utilities/Utilities-Python
# COMMAND ----------
import matplotlib.pyplot as plt
import pandas as pd
# get the table name
table_name = f"{get_collab_db()}.toy_2d_linear_regression"
# retrieve the dataframe
spark_df = spark.table(table_name)
# show the spark dataframe
display( spark_df )
# COMMAND ----------
# MAGIC %md
# MAGIC #### Understanding the Data
# MAGIC
# MAGIC As a first step before we move on to using creating potentially complex models, it may be useful to get some quick insights into the dataset. This way when moving forward we have a general appreciation of the contents of the dataset.
# MAGIC
# MAGIC There is many ways to do this, here we will show the inbuilt describe method and also how to create a simple plot of the data.
# MAGIC
# MAGIC *note: because this data is 2d in nature, plots are quite straightforward, more complex visualisation methods are needs for multivariable data*
# COMMAND ----------
# using .describe() gives us insight into some basic metrics of a dataframe
# we can also pass in column names e.g. .describe(['feature']), to isolate columns
display(spark_df.describe())
# COMMAND ----------
# to plot the data we must first convert it to a NumPy array or a Pandas dataframe
# Convert from spark dataframe to pandas dataframe
pandas_df = spark_df.toPandas()
# extract the feature and target columns
X = pandas_df['feature']
y = pandas_df['target']
# plot the data
plt.figure(figsize=(10, 5))
plt.scatter(X, y, marker='o')
plt.title("Plot of the Random Regression Dataset", fontsize="large")
plt.xlabel("Feature")
plt.ylabel("Target")
plt.show()
# COMMAND ----------
# MAGIC %md
# MAGIC #### Utilising SKLearn Linear Regression
# MAGIC In the above plot of the data we can see that there is a clear pattern in the data. But now suppose we want to model the exact linear relationship of this dataset.
# MAGIC
# MAGIC This is where we can utilise the sklearn LinearRegression function to aid us. To utilise the sklearn methods we must have a pandas dataframe not a spark dataframe.
# COMMAND ----------
# convert spark dataframe to pandas dataframe
pandas_df = spark_df.toPandas()
# extract the 2 features we want into seperate variables
X = pandas_df['feature']
y = pandas_df['target']
# split the data into training and test sets
# note the random_state variable is used so split is same every time, in practice ignore
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
# models require 2d Arrays so reformat
X_train = X_train.values.reshape(-1,1)
X_test = X_test.values.reshape(-1,1)
y_train = y_train.values.reshape(-1,1)
y_test = y_test.values.reshape(-1,1)
# fit a linear regression model ot the training data
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train) # training the algorithm
# generate our predicitions from the test data
y_pred = regressor.predict( X_test )
# COMMAND ----------
# MAGIC %md
# MAGIC #### Visualising our Predictions
# MAGIC
# MAGIC There is many different ways we can now visualise our predictions:
# MAGIC - We can plot a figure of the scattered test plots and our predicted line
# MAGIC - We can display a table showing the actual test values vs our predicted values
# MAGIC - We can then plot a figure of this table to visualise it
# MAGIC
# MAGIC These are just few examples of course there is many more ways to gain insight.
# COMMAND ----------
# we can extract the exact intercetp and coefficient of the slope
print("Intercept : ", regressor.intercept_)
print("Coefficient : ", regressor.coef_)
# plot the figure
plt.figure(figsize=(10, 5))
plt.scatter(X_test, y_test, color='gray')
plt.plot(X_test, y_pred, color='red', linewidth=2)
plt.title("Prediction vs Test data", fontsize="large")
plt.xlabel("Feature")
plt.ylabel("Target")
plt.show()
# COMMAND ----------
# create table view of actual values vs predictions
df = pd.DataFrame({'Actual': y_test.flatten(), 'Predicted': y_pred.flatten()})
display(df)
# COMMAND ----------
# visualise above table as a bar chart, note were only visualising the first 20
df1 = df.head(20)
df1.plot(kind='bar', figsize=(10,5))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='gray')
plt.show()
# COMMAND ----------
# MAGIC %md
# MAGIC ### Evaluating our Model
# MAGIC
# MAGIC The final step is to evaluate the performance of our model, this is important to compare how well different algorithms perform on a particular dataset.
# MAGIC
# MAGIC For regression, three evaluation metrics are commonly used:
# MAGIC - **Mean Absolute Error** is the mean of the absolute value of the errors
# MAGIC $$ MAE = \frac{1}{n} \sum^{n}_{j=1}|y_i - y_j| $$
# MAGIC - **Mean Squared Error** is the mean of th esquared errors
# MAGIC $$ MSE = \frac{1}{N} \sum^{n}_{j=1}(y_i - y_j)^2 $$
# MAGIC - **Root Mean Squared Error** is the square root of the mean squared errors
# MAGIC $$ MSE = \sqrt{\frac{1}{N} \sum^{n}_{j=1}(y_i - y_j)^2} $$
# COMMAND ----------
from sklearn.metrics import mean_absolute_error, mean_squared_error
import numpy as np
print('Mean Absolute Error :', mean_absolute_error(y_test, y_pred))
print('Mean Squared Error :', mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error :', np.sqrt(mean_squared_error(y_test, y_pred)))
| NHSDigital/sde_example_analysis | python/machine_learning_small_data/regression_simple.py | regression_simple.py | py | 7,838 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "... |
27479098786 |
import os
import pika # Importa a biblioteca pika para interagir com o RabbitMQ
import time # Importa a biblioteca time para controlar o tempo de sleep do loop
import socket # Importa socket para verificar a conectividade com a internet
import json # Importa json para manipular dados JSON
import random # Importa random para gerar números aleatórios
import math # Importa math para realizar operações matemáticas
import xml.etree.ElementTree as ET # Importa ElementTree para manipular dados XML
import glob # Importa glob para encontrar todos os caminhos que correspondem a um padrão específico
from datetime import datetime, timedelta # Importa datetime e timedelta para trabalhar com datas e horas
from dotenv import load_dotenv # Importa load_dotenv para carregar variáveis de ambiente do arquivo .env
# Carregar variáveis do .env
load_dotenv()
# Obtém as variáveis de ambiente para a URL do RabbitMQ e a chave de roteamento
RABBITMQ_URL = os.getenv("RABBITMQ_URL")
ROUTING_KEY = os.getenv("ROUTING_KEY")
class SensorSimulator:
def __init__(self, sensor_data):
"""
Inicializa o simulador com os dados dos sensores fornecidos.
:param sensor_data: dict, dados dos sensores a serem utilizados na simulação.
"""
self.sensor_data = sensor_data
def generate_value(self, min_val, max_val, mean_val, fluctuation=5):
"""
Gera um valor flutuante aleatório entre um intervalo especificado.
:param min_val: float, valor mínimo possível.
:param max_val: float, valor máximo possível.
:param mean_val: float, valor médio desejado.
:param fluctuation: float, flutuação permitida em torno do valor médio.
:return: float, valor aleatório gerado.
"""
lower_bound = max(min_val, mean_val - fluctuation)
upper_bound = min(max_val, mean_val + fluctuation)
return random.uniform(lower_bound, upper_bound)
def simulate_sensor_failure(self, prob_failure=0.01):
"""
Simula uma falha no sensor com uma probabilidade especificada.
:param prob_failure: float, probabilidade de falha do sensor.
:return: bool, True se falhar, False caso contrário.
"""
return random.random() < prob_failure
def log_data_to_xml(self, batch_data):
"""
Loga os dados do sensor em um arquivo XML.
:param batch_data: list, dados do sensor a serem logados.
"""
date_str = datetime.now().strftime("%Y%m%d")
log_filename = f"sensor_data_log_{date_str}.xml"
# Verifica se o arquivo de log já existe, se sim, carrega os dados existentes
if os.path.exists(log_filename):
tree = ET.parse(log_filename)
root = tree.getroot()
else:
root = ET.Element("SensorDataBatch")
# Adiciona novos dados ao XML
for data in batch_data:
sensor_data = ET.SubElement(root, "SensorData")
for key, value in data.items():
ET.SubElement(sensor_data, key).text = str(value)
# Salva os dados no arquivo XML
tree = ET.ElementTree(root)
with open(log_filename, "wb") as file:
tree.write(file)
# Limpa logs antigos
self.clean_old_logs(log_file_prefix="sensor_data_log_", max_logs=7)
def clean_old_logs(self, log_file_prefix, max_logs):
"""
Limpa logs antigos, mantendo apenas um número específico de arquivos de log.
:param log_file_prefix: str, prefixo dos arquivos de log.
:param max_logs: int, número máximo de arquivos de log a serem mantidos.
"""
log_files = sorted(glob.glob(f"{log_file_prefix}*.xml"))
for log_file in log_files[:-max_logs]:
os.remove(log_file)
def send_to_rabbitmq(self, message):
"""
Envia uma mensagem para uma fila RabbitMQ.
:param message: str, mensagem a ser enviada.
"""
# Estabelece conexão com o RabbitMQ e declara a fila
connection = pika.BlockingConnection(pika.URLParameters(RABBITMQ_URL))
channel = connection.channel()
channel.queue_declare(queue=ROUTING_KEY, durable=True)
# Publica a mensagem na fila
channel.basic_publish(exchange='',
routing_key=ROUTING_KEY,
body=message,
properties=pika.BasicProperties(
delivery_mode=2,
))
print(f" [x] Enviado '{message}'")
connection.close()
def simulate(self):
"""
Inicia a simulação, gerando dados de sensores, logando-os e enviando-os para a fila RabbitMQ.
"""
# Aguarda conexão com a internet
while not self.is_connected():
print("Aguardando conexão com a internet...")
time.sleep(5)
specific_sensors = list(self.sensor_data.keys())
# Loop de simulação
while True:
batch_data = []
start_timestamp = datetime.now()
# Gera dados para cada sensor especificado
for machine_id, sensor_id in specific_sensors:
faixa_min, faixa_max, valor_medio = self.sensor_data[(machine_id, sensor_id)]
# Adiciona uma pequena variação ao valor médio
valor_medio += 1
valor_medio += 5 * math.sin(start_timestamp.minute / 5)
# Simula falha no sensor ou gera valor
if self.simulate_sensor_failure():
value = None
else:
value = self.generate_value(faixa_min, faixa_max, valor_medio)
# Cria timestamp e dados do sensor
timestamp = start_timestamp + timedelta(minutes=random.randint(0, 5))
str_timestamp = timestamp.strftime('%Y-%m-%d %H:%M:%S')
data = {
"timestamp": str_timestamp,
"CompanyId": "EMPRESA01",
"MachineId": machine_id,
"SensorId": sensor_id,
"Value": value
}
batch_data.append(data)
# Envia dados para RabbitMQ e loga em XML
self.send_to_rabbitmq(json.dumps(batch_data))
self.log_data_to_xml(batch_data)
# Controla o tempo de sleep do loop dependendo do horário
current_hour = datetime.now().hour
if 9 <= current_hour <= 17:
time.sleep(180)
else:
time.sleep(300)
@staticmethod
def is_connected():
"""
Verifica se há conexão com a internet.
:return: bool, True se conectado, False caso contrário.
"""
try:
socket.create_connection(("www.google.com", 80))
return True
except OSError:
pass
return False
# Dados dos sensores
sensor_data = {
("M01", "S01"): (70, 100, 80),
("M01", "S02"): (500, 900, 700),
("M02", "S03"): (100, 140, 120),
("M03", "S04"): (500, 900, 700),
("M04", "S05"): (160, 210, 170),
("M05", "S06"): (70, 100, 80),
("M05", "S07"): (100, 140, 130),
("M06", "S08"): (7000, 12000, 10800),
("M06", "S09"): (100, 140, 130),
("M07", "S10"): (70, 100, 80),
("M07", "S11"): (7000, 12000, 10800),
("M07", "S16"): (100, 400, 201),
("M08", "S12"): (70, 100, 80),
("M08", "S13"): (1000, 3000, 2000),
("M09", "S14"): (1500, 1900, 1765),
("M10", "S15"): (1500, 1900, 1765)
}
# Inicia a simulação
simulator = SensorSimulator(sensor_data)
simulator.simulate()
| elderofz1on/ZionArchive | Projetos/MachineSimulatorMQTT/sensor_simulator.py | sensor_simulator.py | py | 7,987 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "random.uniform",
"line_num... |
73122366504 | from django.db import models
from django.contrib.auth.models import User
from ckeditor_uploader.fields import RichTextUploadingField
# Create your models here.
class System(models.Model):
name = models.CharField(max_length=20, verbose_name='System')
owner = models.ForeignKey(User, verbose_name='Owner', on_delete=models.CASCADE)
created_time = models.DateTimeField(auto_now_add=True, verbose_name='Created Time')
def __str__(self):
return self.name
class Meta:
verbose_name = verbose_name_plural = 'System'
class CodeImg(models.Model):
code_name = models.CharField(max_length=200, verbose_name='Code')
code_img = models.ImageField(verbose_name='Code Image')
def photo_url(self):
if self.code_img and hasattr(self.code_img, 'url'):
return self.code_img.url
else:
return '/default/1.jpg'
def __str__(self):
return self.code_name
class Meta:
verbose_name = verbose_name_plural = 'Code IMG'
class DriveType(models.Model):
DRIVE_6X4 = 1
DRIVE_6X2 = 2
DRIVE_4X2 = 3
DRIVE_TYPE = (
(DRIVE_6X4, '6X4'),
(DRIVE_6X2, '6X2'),
(DRIVE_4X2, '4X2'),
)
name = models.PositiveIntegerField(choices=DRIVE_TYPE, verbose_name='Drive Type')
owner = models.ForeignKey(User, verbose_name='Owner', on_delete=models.CASCADE, default=2)
created_time = models.DateTimeField(auto_now_add=True, verbose_name='Created Time')
def __str__(self):
return self.get_name_display()
class Meta:
verbose_name = verbose_name_plural = 'Drive Type'
class Packages(models.Model):
COMFORT_CLASSIC = 10
COMFORT_TOP = 11
COMFORT_TOP_PLUS = 12
DRIVER_OPERATION_CLASSIC = 20
DRIVER_OPERATION_TOP = 21
EFFICIENCY_CLASSIC = 30
EXPRESS_CLASSIC = 40
EXPRESS_TOP = 41
SAFETY_TOP = 1
MOUNTAIN = 2
LIGHT_WEIGHT = 3
ROAD_STAR = 4
SUMMER_PACKAGE = 5
WINTER_PACKAGE = 6
PACKAGES_TYPE = (
(COMFORT_CLASSIC, 'Comfort Classic'),
(COMFORT_TOP, 'Comfort Top (only icw Comfort Classic)'),
(COMFORT_TOP_PLUS, 'Comfort Top Plus (only icw Comfort Top)'),
(DRIVER_OPERATION_CLASSIC, 'Driver Operation Classic'),
(DRIVER_OPERATION_TOP, 'Driver Operation Top (only icw Operation Classic)'),
(EFFICIENCY_CLASSIC, 'Efficiency Classic'),
(EXPRESS_CLASSIC, 'Express Classic'),
(EXPRESS_TOP, 'Express Top (only icw Express Classic)'),
(SAFETY_TOP, 'Safety Top (only icw Express Top)'),
(MOUNTAIN, 'Mountain'),
(LIGHT_WEIGHT, 'Light Weight'),
(ROAD_STAR, 'Road Star'),
(SUMMER_PACKAGE, 'Summer Package (only icw Comfort Top)'),
(WINTER_PACKAGE, 'Winter Package (only icw Comfort Classic)'),
)
name = models.PositiveIntegerField(choices=PACKAGES_TYPE, verbose_name='Packages')
owner = models.ForeignKey(User, verbose_name='Owner', on_delete=models.CASCADE, default=2)
created_time = models.DateTimeField(auto_now_add=True, verbose_name='Created Time')
def __str__(self):
return self.get_name_display()
class Meta:
verbose_name = verbose_name_plural = 'Packages'
class CodesH6(models.Model):
STATUS_ACTIVE = 1
STATUS_DEACTIVE = 0
STATUS_ITEMS = (
(STATUS_ACTIVE, 'Active'),
(STATUS_DEACTIVE, 'Deleted'),
)
H6A = 1
H6B = 2
H6 = 3
NA = 0
BRAND_ITEMS = (
(H6A, 'H6A'),
(H6B, 'H6B'),
(H6, 'H6'),
(NA, 'NA'),
)
name = models.CharField(max_length=10, verbose_name='Code')
title = models.CharField(max_length=200, verbose_name='Title')
system = models.ManyToManyField(System, verbose_name='System')
comments = models.CharField(max_length=300, verbose_name='Comments', blank=True)
status = models.PositiveIntegerField(choices=STATUS_ITEMS, verbose_name='Status')
restriction_with = models.CharField(max_length=500, verbose_name='With', blank=True)
restriction_not_with = models.CharField(max_length=500, verbose_name='Not With', blank=True)
brand = models.PositiveIntegerField(choices=BRAND_ITEMS, verbose_name='Brand', blank=True)
drive_type = models.ManyToManyField(DriveType, verbose_name='Drive Type', blank=True)
package = models.ManyToManyField(Packages, verbose_name='Package', blank=True)
brief = RichTextUploadingField(blank=True, verbose_name='In Brief')
benefits = RichTextUploadingField(blank=True,verbose_name='Benefits and Arguments')
personal_comments = RichTextUploadingField(blank=True, verbose_name='Personal Comments')
knowledge = RichTextUploadingField(blank=True, verbose_name='Knowledge')
owner = models.ForeignKey(User, verbose_name='Owner', on_delete=models.CASCADE, default=3)
created_time = models.DateTimeField(auto_now_add=True, verbose_name='Created Time')
def __str__(self):
return self.name + '-' + self.title
class Meta:
verbose_name = verbose_name_plural = 'H6 Codes'
"""
命令行赋值一直不行,报错外键约束问题,原来owner default=4,但是数据库里面没有4
c=CodesH6(name='Dummy3',status=1,brand=1,owner_id=3)
c.save()
多对多不能直接add值,要用id,或者对象
c.system.add('Chassis')
"""
| ikofan/sh6 | codes/models.py | models.py | py | 5,282 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "... |
39901505337 | from collections import OrderedDict
from django.core.urlresolvers import resolve
def link_processor(request):
"""
This function provides, to all pages, a dict of links called "page_links".
These links contain {"name": "tag"} for a name of a page to a view tag.
These are used to automatically populate the sidebars.
"""
# These are names which go to url tags.
SIDEBAR_URLS = OrderedDict()
SIDEBAR_URLS["Home"] = "home"
SIDEBAR_URLS["Schools"] = "schools"
SIDEBAR_URLS["Professors"] = "professors"
SIDEBAR_URLS["Reviews"] = "reviews"
return {"page_links": SIDEBAR_URLS.items(),
"current_page_name": resolve(request.path_info).url_name,
}
| brhoades/sweaters-but-with-peer-reviews | middleware/links.py | links.py | py | 712 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "collections.OrderedDict",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.core.urlresolvers.resolve",
"line_number": 19,
"usage_type": "call"
}
] |
25196942976 | import numpy as np
import argparse
### Arguments ###
parser = argparse.ArgumentParser()
parser.add_argument('input', type=str, help='Input text file')
parser.add_argument('fmap', type=str, help='Map with value and residue')
parser.add_argument('chain', type=str, help='Chain identifier to match residues')
parser.add_argument("--xvg", default=False, action=argparse.BooleanOptionalAction)
args = parser.parse_args()
def splitm(line):
'''
Correctly split pdb file
'''
return([line[0:6].strip(),line[6:11].strip(),line[12:16].strip(),line[16:17].strip(),line[17:20].strip(),
line[21:22].strip(),line[22:26].strip(),line[26:27].strip(),line[30:38].strip(),line[38:46].strip(),
line[46:54].strip(),line[54:60].strip(),line[60:66].strip(),line[76:78].strip(),line[78:80].strip()])
def create_dict(fname):
map = {}
with open(fname,'r') as f:
for line in f:
data = line.split()
if args.xvg:
if data[0] != '#' or data[0] != '@':
map[data[0]] = float(data[1])*100
else:
map[data[0][3:]] = float(data[1])*100
return map
def main(inp, fmap, chain):
print('Suggested spectrum')
print('spectrum b, 0xF4F3F3 0xD28288 0xF6A377 0xFBDF66 0xCFF66A 0x77FB74')
map = create_dict(fmap)
keys = map.keys()
pdb_format = "{:6s}{:5d} {:^4s}{:1s}{:3s} {:1s}{:4d}{:1s} {:8.3f}{:8.3f}{:8.3f}{:6.2f}{:6.2f} {:>2s}{:2s}\n"
out = inp.split('.')[0]+'_painted.pdb'
with open(inp,'r') as f:
with open(out, 'w') as fw:
for line in f:
data = splitm(line)
if data[0] == 'ATOM':
data[1] = int(data[1])
data[6] = int(data[6])
data[8] = float(data[8])
data[9] = float(data[9])
data[10] = float(data[10])
data[11] = float(data[11])
if data[5] == chain:
if str(data[6]) in keys:
data[12] = map[str(data[6])]
fw.write(pdb_format.format(*data))
else:
if len(data[12]) == 0:
data[12] = 0.0
fw.write(pdb_format.format(*data))
else:
fw.write(line)
else:
if len(data[12]) == 0:
data[12] = 0.0
fw.write(pdb_format.format(*data))
else:
fw.write(line)
else:
fw.write(line)
if __name__ == '__main__':
main(args.input, args.fmap, args.chain) | JAMelendezD/Contacts | paint_pdb.py | paint_pdb.py | py | 2,209 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "argparse.BooleanOptionalAction",
"line_number": 10,
"usage_type": "attribute"
}
] |
26409275029 | #这个是我自己写的,过了前80个Case 过不了最后一个 超时了
#代码随想录的前两个答案也超时,只有那个用字典的不超时
'''
class Solution:
def findItinerary(self, tickets: List[List[str]]) -> List[str]:
self.result = []
tickets.sort()
used = [False] * len(tickets)
self.backtracking(['JFK'],used,tickets)
return self.result
def backtracking(self, path, used, tickets):
if len(path) == len(tickets) + 1:
self.result = path
# print('可以了')
return True
for index, ticket in enumerate(tickets):
if used[index] == False and tickets[index][0] == path[-1]:
#找没用过的机票且对得上上一个地方的
path.append(tickets[index][1])
used[index] = True
#向下递归
# print('path is',path)
# print('used is',used)
# print('继续递归')
if self.backtracking(path, used, tickets):
return True
path.pop()
used[index] = False
return False
'''
#代码随想录里的字典解法
#这道题看着这个Case来想就行了
#ticket = [[“jfk”,“kul”],["nrt","jfk"],["jfk","nrt"]]
class Solution:
from collections import defaultdict
def findItinerary(self, tickets: List[List[str]]) -> List[str]:
targets = defaultdict(list)
for ticket in tickets:
targets[ticket[0]].append(ticket[1])
#以上得到字典,key是出发地,value是一个list,list里装的是目的地
for value in targets.values():
value.sort(reverse=True)
#以上将Targets字典中的目的地按照逆序排序
self.result = []
self.backtracking("JFK", targets)
return self.result[::-1] #return result (逆序)
def backtracking(self, start, targets):
while targets[start]: #当某个出发机场有目的机场时
next_start = targets[start].pop() #找到下一个出发机场 并在targets里把用过的目的地去掉
self.backtracking(next_start, targets)
self.result.append(start) #当某个出发机场找不到目的机场时,放进result里
#result最后是要逆序返回的 belike jfk-nrt-jfk-kul 在result里是["kul"<-"jfk"<-"nrt"<-"jfk"]
#所以找不到出发机场的kul会最先进result,因为他左边没有东西了
| lpjjj1222/leetcode-notebook | 332. Reconstruct Itinerary.py | 332. Reconstruct Itinerary.py | py | 2,563 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 42,
"usage_type": "call"
}
] |
5185686033 | import re
import ssl
import requests
import urllib.request
from lxml import etree
from fake_useragent import UserAgent
from concurrent.futures import wait, ALL_COMPLETED
from .common import Anime, Seed, Subgroup
class Mikan:
def __init__(self, logger, config, executor):
self.url = config['URL']
self.ua = UserAgent()
self.logger = logger
self.executor = executor
self.seed_list = []
self.seed_list_download_sucess = []
self.seed_list_download_failed = []
self.img_list_download = []
def request_html(self, url):
try:
headers = {'User-Agent': self.ua.random}
res = requests.get(url=url, headers=headers, timeout=5)
res.raise_for_status()
res.encoding = res.apparent_encoding
html_doc = etree.HTML(res.text)
except Exception as e:
self.logger.warning("[SPIDER] request_html failed, url: {}, error: {}".format(url, e))
else:
self.logger.info("[SPIDER] request_html success, url: {}".format(url))
return html_doc
def download(self, url, path):
ssl._create_default_https_context = ssl._create_unverified_context
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent', self.ua.random)]
urllib.request.install_opener(opener)
try:
urllib.request.urlretrieve(url, path)
except Exception as e:
self.logger.warning("[SPIDER] download failed, url: {}, error: {}".format(url, e))
return False
else:
self.logger.info("[SPIDER] download success, url: {}".format(url))
return True
def get_anime_list(self):
html_doc = self.request_html(self.url)
if html_doc == None:
self.logger.warning("[SPIDER] get_anime_list failed, request_html failed, url: {}".format(self.url))
return
anime_list = []
for info in html_doc.xpath('//div[@class="sk-bangumi"]'):
update_day_ = info.xpath('.//@data-dayofweek')
anime_info = info.xpath('.//li')
for a in anime_info:
anime_name_ = a.xpath('.//@title')[0]
mikan_id_ = a.xpath('.//@data-bangumiid')[0]
img_url_ = a.xpath('.//@data-src')
anime_name = self.lxml_result_to_str(anime_name_)
mikan_id = int(self.lxml_result_to_str(mikan_id_))
img_url = self.lxml_result_to_str(img_url_)
update_day = int(self.lxml_result_to_str(update_day_))
if update_day == 7: # movie
anime_type = 1
update_day = 8
elif update_day == 8: # ova
anime_type = 2
update_day = 8
elif update_day == 0: # update on sunday
anime_type = 0
update_day = 7
else:
anime_type = 0
subscribe_status = 0
anime = Anime(anime_name, mikan_id, img_url, update_day, anime_type, subscribe_status)
anime_list.append(anime)
self.logger.info("[SPIDER] get_anime_list success, anime number: {}".format(len(anime_list)))
return anime_list
def get_subgroup_list(self, mikan_id):
url = "{}/Home/Bangumi/{}".format(self.url, mikan_id)
html_doc = self.request_html(url)
if html_doc == None:
self.logger.warning("[SPIDER] get_subgroup_list failed, request_html failed, url: {}".format(self.url))
return
subgroup_list = []
subgroup_id_ = html_doc.xpath('//li[@class="leftbar-item"]/span/a/@data-anchor')
subgroup_name_ = html_doc.xpath('//li[@class="leftbar-item"]/span/a/text()')
for i in range(len(subgroup_name_)):
subgroup_id = int(self.lxml_result_to_str(subgroup_id_[i])[1:])
subgroup_name = self.lxml_result_to_str(subgroup_name_[i])
subgroup = Subgroup(subgroup_id, subgroup_name)
subgroup_list.append(subgroup)
self.logger.info("[SPIDER] get_subgroup_list success, mikan_id: {}, subgroup number: {}".format(mikan_id, len(subgroup_list)))
return subgroup_list
def get_seed_list(self, mikan_id, subgroup_id, anime_type):
url = "{}/Home/ExpandEpisodeTable?bangumiId={}&subtitleGroupId={}&take=65".format(self.url, mikan_id, subgroup_id)
html_doc = self.request_html(url)
if html_doc == None:
self.logger.warning("[SPIDER] get_seed_list failed, request_html failed, url: {}".format(self.url))
return
seed_list = []
tr_list = html_doc.xpath('//tbody/tr')
for tr in tr_list:
seed_url_ = tr.xpath('.//a[last()]/@href')
seed_name_ = tr.xpath('.//a[@class="magnet-link-wrap"]/text()')
seed_size_ = tr.xpath('.//td[2]/text()')
seed_url = self.lxml_result_to_str(seed_url_)
seed_name = self.lxml_result_to_str(seed_name_)
seed_size = self.lxml_result_to_str(seed_size_).replace(' ', '')
if not self.if_1080(seed_name):
continue
if anime_type == 0:
episode_str = self.get_episode(seed_name)
if episode_str == "null":
continue
else:
episode_str = "01"
episode = int(episode_str)
seed_status = 0
seed = Seed(mikan_id, episode, seed_url, subgroup_id, seed_name, seed_status, seed_size)
seed_list.append(seed)
self.logger.info("[SPIDER] get_seed_list success, mikan_id: {}, subgroup_id: {}, anime_type: {}, seed number: {}".format(mikan_id, subgroup_id, anime_type, len(seed_list)))
return seed_list
# mikan.download_img("/images/Bangumi/202307/f94fdb7f.jpg", "static/img/anime_list")
def download_img(self, img_url, path):
url = "{}{}".format(self.url, img_url)
img_name = img_url.split('/')[4]
if not self.download(url, path + img_name):
self.logger.warning("[SPIDER] download_img failed, download failed, img_url: {}, path: {}".format(img_url, path))
return False
self.logger.info("[SPIDER] download_img success, img_url: {}, path: {}".format(img_url, path))
return True
# mikan.download_seed("/Download/20230913/dfe6eb7c5f780e90f74244a498949375c67143b0.torrent", "seed/")
def download_seed(self, seed_url, path):
url = "{}{}".format(self.url, seed_url)
torrent_name = seed_url.split('/')[3]
if not self.download(url, path + torrent_name):
self.logger.warning("[SPIDER] download_seed failed, download failed, seed_url: {}, path: {}".format(seed_url, path))
return False
self.logger.info("[SPIDER] download_seed sucess, seed_url: {}, path: {}".format(seed_url, path))
return True
def lxml_result_to_str(self, result):
result_str = ''
for a in result:
result_str += str(a)
return result_str
def get_episode(self, seed_name):
# 排除掉了合集
str_list = re.findall(r'\d{2}-\d{2}', seed_name)
if len(str_list) != 0:
return "null"
str_list = re.findall(r'\[\d{2}\]|\s\d{2}\s', seed_name)
if len(str_list) == 0:
str_list = re.findall(r'\[第\d+话\]', seed_name)
if len(str_list) == 0:
return "null"
else:
return str_list[0][2:-2]
episode_str = str_list[0][1:-1]
return episode_str
def if_1080(self, seed_name):
str_list = re.findall(r'1080', seed_name)
if len(str_list) == 0:
return False
return True
def get_seed_list_thread(self, args):
mikan_id, subgroup_id, anime_type = args
try:
seed_list = self.get_seed_list(mikan_id, subgroup_id, anime_type)
except Exception as e:
self.logger.warning("[SPIDER] get_seed_list_thread failed, mikan_id: {}, subgroup_id: {}, error: {}".format(mikan_id, subgroup_id, e))
else:
for s in seed_list:
self.seed_list.append(s)
def get_seed_list_task(self, mikan_id, subgroup_list, anime_type):
self.seed_list = []
task_list = []
for sub in subgroup_list:
subgroup_id = sub.subgroup_id
task = self.executor.submit(self.get_seed_list_thread, (mikan_id, subgroup_id, anime_type))
task_list.append(task)
wait(task_list, return_when=ALL_COMPLETED)
return self.seed_list
def download_seed_thread(self, args):
seed = args
seed_url = seed['seed_url']
path = seed['path']
try:
self.download_seed(seed_url, path)
except Exception as e:
self.logger.warning("[SPIDER] download_seed_thread failed, seed_url: {}, path: {}, error: {}".format(seed_url, path, e))
self.seed_list_download_failed.append(seed)
else:
self.seed_list_download_sucess.append(seed)
def download_seed_task(self, seed_list):
self.seed_list_download_sucess = []
self.seed_list_download_failed = []
task_list = []
for seed in seed_list:
task = self.executor.submit(self.download_seed_thread, seed)
task_list.append(task)
wait(task_list, return_when=ALL_COMPLETED)
return self.seed_list_download_sucess
def download_img_thread(self, args):
img = args
img_url = img['img_url']
path = img['path']
try:
self.download_img(img_url, path)
except Exception as e:
self.logger.warning("[SPIDER] download_img_thread failed, img_url: {}, path: {}".format(img_url, path))
else:
self.img_list_download.append(img)
def download_img_task(self, img_list):
self.img_list_download = []
task_list = []
for img in img_list:
task = self.executor.submit(self.download_img_thread, img)
task_list.append(task)
wait(task_list, return_when=ALL_COMPLETED)
return self.img_list_download
def get_anime_list_by_conditon(self, year, broadcast_season):
if broadcast_season == 1:
seasonStr = '%E6%98%A5'
elif broadcast_season == 2:
seasonStr ='%E5%A4%8F'
elif broadcast_season == 3:
seasonStr = '%E7%A7%8B'
elif broadcast_season == 4:
seasonStr = '%E5%86%AC'
else:
self.logger.warning("[SPIDER] get_anime_list_by_conditon failed, year: {}, broadcast_season: {}".format(year, broadcast_season))
return
url = "{}/Home/BangumiCoverFlowByDayOfWeek?year={}&seasonStr={}".format(self.url, year, seasonStr)
html_doc = self.request_html(url)
if html_doc == None:
self.logger.warning("[SPIDER] get_anime_list failed, request_html failed, url: {}".format(self.url))
return
anime_list = []
for info in html_doc.xpath('//div[@class="sk-bangumi"]'):
update_day_ = info.xpath('.//@data-dayofweek')
anime_info = info.xpath('.//li')
for a in anime_info:
anime_name_ = a.xpath('.//@title')[0]
mikan_id_ = a.xpath('.//@data-bangumiid')[0]
img_url_ = a.xpath('.//@data-src')
anime_name = self.lxml_result_to_str(anime_name_)
mikan_id = int(self.lxml_result_to_str(mikan_id_))
img_url = self.lxml_result_to_str(img_url_)
update_day = int(self.lxml_result_to_str(update_day_))
if update_day == 7: # movie
anime_type = 1
update_day = 8
elif update_day == 8: # ova
anime_type = 2
update_day = 8
elif update_day == 0: # update on sunday
anime_type = 0
update_day = 7
else:
anime_type = 0
subscribe_status = 0
anime = Anime(anime_name, mikan_id, img_url, update_day, anime_type, subscribe_status)
anime_list.append(anime)
self.logger.info("[SPIDER] get_anime_list success, anime number: {}".format(len(anime_list)))
return anime_list | FortyWinters/autoAnime | src/lib/spider.py | spider.py | py | 12,664 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "fake_useragent.UserAgent",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "lxml.etree.HTML",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
... |
14003011170 | import json
import requests
import random
import re
baseUrl = "http://jw1.yzu.edu.cn/"
session = requests.Session()
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Accept-Encoding': 'gzip, deflate',
'Upgrade-Insecure-Requests': '1'}
def GetCode():
validcodeUrl = baseUrl + "validateCodeAction.do?random=+Math.random()"
validcode = session.get(validcodeUrl)
if (validcode.status_code == 200):
with open('codepic.png', 'wb') as f:
f.write(validcode.content)
print("验证码保存成功!")
else:
print("验证码保存失败!")
def Login(validcode):
data = {
"zjh": 191304324,
"mm": 191304324,
"v_yzm": validcode
}
res = session.post(baseUrl+"loginAction.do", data=data)
if "学分制综合教务" in res.text:
print("登录成功\n")
else:
print("登陆失败\n")
def GetSessionId():
res = session.get("http://jw1.yzu.edu.cn/dwr/engine.js")
start = res.text.find("dwr.engine._origScriptSessionId")
w1 = "dwr.engine._origScriptSessionId = "
w2 = ";"
pat = re.compile(w1+'(.*?)'+w2, re.S)
sessionId = pat.findall(res.text)[0][1:-1] + \
str(random.randint(0, 1000))
return sessionId
def ClassScript():
GetCode()
validCode = input("输入验证码:\n")
Login(validCode)
# queryClass = {
# "kch": 17038002,
# "kcm": "",
# "actionType": 3,
# "pageNumber": -1
# }
classData = {
"kcId": 17029015_01,
# "kcId": 17038002_01,
"preActionType": 3,
"actionType": 9
}
res = session.get(
"http://jw1.yzu.edu.cn/xkAction.do?actionType=-1&fajhh=3440")
# res = session.get(
# "http://jw1.yzu.edu.cn/xkAction.do?actionType=3&pageNumber=-1")
# res = session.post(baseUrl+"xkAction.do", data=queryClass)
# sessionId = GetSessionId()
# jSessionId = session.cookies["JSESSIONID"]
# payloadData = "callCount=1\npage=/xkAction.do?actionType=-1&fajhh=3440\nhttpSessionId=" + jSessionId + \
# "\nscriptSessionId="+sessionId + \
# "\nc0-scriptName=ajaxtool\nc0-methodName=reCall\nc0-id=0\nbatchId=0 "
# ajaxUrl = "http://jw1.yzu.edu.cn/dwr/call/plaincall/ajaxtool.reCall.dwr"
# res = session.post(ajaxUrl, data=payloadData)
res = session.post(
baseUrl+"xkAction.do", data=classData)
# res = session.post(
# baseUrl+"xkAction.do", data=classData)
print(res.text)
# file = open("text.html", mode="w")
# file.write(res.text)
ClassScript()
| Rickenbacker620/Codes | Python/stuff/urp.py | urp.py | py | 2,979 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.Session",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "re.S",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_numbe... |
3587126567 | # -*- coding: utf-8 -*-
"""
Binarization
Feature binarization is the process of thresholding numerical features to
get boolean values.
"""
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
import numpy as np
import pandas as pd
from scipy import signal,stats
from flask import Flask,request,jsonify
import json
import re
import os
import codecs
def py_configs(configpath,conf=None, delete=None):
if not os.path.exists(configpath):
obj = {}
else:
with codecs.open(configpath, 'r', 'utf-8') as f:
str1 = f.read()
# obj = obj.encode('utf8').decode('utf8')
if not str1:
obj = {}
try:
obj = json.loads(str1)
except:
#
obj = {}
if isinstance(delete, str):
obj.pop(delete)
with codecs.open(configpath, 'w', 'utf-8') as f:
str1 = jsonify(obj)
f.write(str1)
return obj
if isinstance(conf, dict):
for key in conf:
obj[key] = conf[key]
with codecs.open(configpath, 'w', 'utf-8') as f:
str1 = jsonify(obj)
f.write(str1)
elif isinstance(conf, str):
if conf in obj:
return obj[conf]
else:
return {}
return obj
configpath=os.path.join(os.path.dirname(__file__),'config.txt')
try:
config = py_configs(configpath)
Signal_SERVER = config["Signal_SERVER"]
Signal_PORT = config["Signal_PORT"]
except:
raise Exception("Configuration error")
app = Flask(__name__)
@app.route('/Data_preprocessing/scaling_data',methods=['POST'])
def daqfft():
try:
form_key=list(request.form.to_dict().keys())
file_key=list(request.files.to_dict().keys())
print('k: ',form_key)
keys=['file','operation']
for key in keys:
if key not in form_key or key not in file_key:
code = 2
output = {"code": code, "KeyError": str(key)}
output = json.dumps(output)
return output
operation = request.form['operation']
file_get = request.files.get('file')
X_train = pd.read_csv(file_get)
result=''
# Operation Binarization
if operation == 'Normalization':
binarizer = preprocessing.Binarizer().fit(X_train) # fit does nothing
bin_tran =binarizer.transform(X_train)
result= jsonify(bin_tran)
return result
except Exception as e:
print('Exception: ',e)
code = 1
result = {"code":code,"error":re.findall("'([\w\d _]+)'",str(type(e)))[0]}
result = jsonify(result)
return result
if __name__=="__main__":
app.run(host= Signal_SERVER, port=int(Signal_PORT))
| KaifangXu/APIs | Data_pre/Binarization.py | Binarization.py | py | 2,972 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "codecs.open",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number... |
13961966989 | from django import forms
from django.db import transaction
from django.utils.translation import gettext as _
from ..models import Duplicate, HelperShift
class MergeDuplicatesForm(forms.Form):
def __init__(self, *args, **kwargs):
self.helpers = kwargs.pop("helpers")
super(MergeDuplicatesForm, self).__init__(*args, **kwargs)
self.fields["helpers_ignore"] = forms.ModelMultipleChoiceField(
queryset=self.helpers,
widget=forms.CheckboxSelectMultiple(attrs={"id": "helper_ignore"}),
required=False,
label="",
)
self.fields["helpers_selection"] = forms.ModelChoiceField(
queryset=self.helpers,
widget=forms.RadioSelect(attrs={"id": "helper_selection"}),
empty_label=None,
required=True,
label="",
)
def clean(self):
cleaned_data = super().clean()
remaining_helper = cleaned_data["helpers_selection"]
ignore_helpers = cleaned_data["helpers_ignore"]
# remaining helpers must not be ignored (this makes no sense)
if remaining_helper in ignore_helpers:
raise forms.ValidationError(_("The remaining helper must not be ignored."))
# check for overlapping shifts
if not self.check_merge_possible(ignore_helpers):
raise forms.ValidationError(_("Cannot merge helpers which have the same shift."))
@transaction.atomic
def merge(self):
"""
Merge the helpers and keep the data selected in the form.
"""
remaining_helper = self.cleaned_data["helpers_selection"]
ignore_helpers = self.cleaned_data["helpers_ignore"]
oldest_timestamp = remaining_helper.timestamp
# we check this again inside the atomic code block to ensure that no change happends after the
# validation and before the merge (= no new shifts were added)
if not self.check_merge_possible(ignore_helpers):
raise ValueError("Cannot merge helpers with same shifts")
# and then to the merge
for helper in self.helpers:
if helper == remaining_helper or helper in ignore_helpers:
continue
# merge shifts
for helpershift in HelperShift.objects.filter(helper=helper):
helpershift.helper = remaining_helper
helpershift.save()
# merged coordinated jobs
for job in helper.coordinated_jobs:
job.coordinators.add(remaining_helper)
# merge gifts
if remaining_helper.event.gifts:
remaining_helper.gifts.merge(helper.gifts)
# then create the duplicate entry so that old links in mails still work
Duplicate.objects.create(deleted=helper.id, existing=remaining_helper)
# the overall timestamp of the helper should be the oldest one
# (there are multiple timestamps saved: one per helper and one per shift)
if helper.timestamp < oldest_timestamp:
oldest_timestamp = helper.timestamp
# and delete the old helper
helper.delete()
# update timestamp
remaining_helper.timestamp = oldest_timestamp
remaining_helper.save()
return remaining_helper
def check_merge_possible(self, ignore_helpers=None):
"""
Check if the merge is possible.
It is not possible when multiple helpers have the same shift. If we would merge those helpers,
we would "loose" allocated seats and this is probably not intended.
"""
shifts = []
for helper in self.helpers:
# if we have ignored_helpers, check that
if ignore_helpers and helper in ignore_helpers:
continue
# compare all shifts
for shift in helper.shifts.all():
if shift in shifts:
return False
else:
shifts.append(shift)
return True
| helfertool/helfertool | src/registration/forms/duplicates.py | duplicates.py | py | 4,063 | python | en | code | 52 | github-code | 36 | [
{
"api_name": "django.forms.Form",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.forms.ModelMultipleChoiceField",
"line_number": 14,
"usage_type": "call"
},
{
"api_name"... |
15287647409 | from django.test import TestCase
from users.forms import UserUpdateForm
class TestForms(TestCase):
def test_update_form_valid_data(self):
"""Test for valid update form"""
form = UserUpdateForm(data={
'username': 'Praveen',
'email': 'Praveen.t@gmail.com'
})
self.assertTrue(form.is_valid())
def test_update_form_valid_data(self):
"""Test for invalid update form"""
form = UserUpdateForm(data={ })
self.assertFalse(form.is_valid())
self.assertEquals(len(form.errors), 1) | ardagon89/deploying-a-email-classification-model-in-a-full-stack-website | singularity/users/test/test_forms.py | test_forms.py | py | 572 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.test.TestCase",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "users.forms.UserUpdateForm",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "users.forms.UserUpdateForm",
"line_number": 19,
"usage_type": "call"
}
] |
72655723623 | import os
import sys
import math
from tqdm import tqdm
import pandas as pd
import numpy as np
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from util import argparser
def permutation_test(df, column, n_permutations=100000, batch_size=1000):
# Get actual batch size
batch_size = min(batch_size, n_permutations)
# Get real avg
real_avg = df[column].mean().item()
values = df[column].values
values_exp = np.expand_dims(values, axis=1).repeat(batch_size, axis=1)
# Make n_permutations divisible per batch_size
n_batches = math.ceil(n_permutations / batch_size)
n_permutations = n_batches * batch_size
# Get number of permutations above
n = 0
for _ in range(n_batches):
permut = np.random.randint(0, 2, size=(len(values), batch_size)) * 2 - 1
random_avgs = np.mean(values_exp * permut, axis=0)
n += (random_avgs >= real_avg).sum()
return n / n_permutations, n_permutations
def remove_unused_cols(df):
del df['item_id']
del df['position']
del df['family_size']
del df['family_weight']
del df['length']
return df
def get_macroarea_counts(df):
df_count = df[['macroarea', 'concept_id', 'token_idx']].groupby(['concept_id', 'token_idx']).agg('count').reset_index()
df_count['macroarea_count'] = df_count['macroarea']
del df_count['macroarea']
df = pd.merge(df, df_count, left_on=['concept_id', 'token_idx'], right_on=['concept_id', 'token_idx'])
return df
def get_tokens_means(df):
df_new = df.groupby(['language_id', 'family', 'macroarea', 'concept_id', 'concept_name', 'token', 'token_idx']).agg('mean').reset_index()
df_new = df_new.groupby(['family', 'macroarea', 'concept_id', 'concept_name', 'token', 'token_idx']).agg('mean').reset_index()
df_new = df_new.groupby(['macroarea', 'concept_id', 'concept_name', 'token', 'token_idx']).agg('mean').reset_index()
df_new = get_macroarea_counts(df_new)
return df_new
def main():
args = argparser.parse_args(csv_folder='cv')
context = 'onehot'
fname = os.path.join(args.rfolder_base, 'avg_seed_results_per_pos.tsv')
df = pd.read_csv(fname, sep='\t')
remove_unused_cols(df)
# df_concepts = df.groupby(['concept_id', 'concept_name']).agg('mean').reset_index()
df_tokens = get_tokens_means(df)
df_tokens.set_index(['macroarea', 'concept_id', 'token_idx'], inplace=True)
df_tokens = df_tokens.sort_index()
df_tokens = df_tokens[df_tokens.macroarea_count == 4]
df_tokens['p_value'] = -1
df_tokens['n_permutations'] = -1
df_tokens['n_instances'] = -1
for macroarea, concept_id, token_idx in tqdm(df_tokens.index.unique(), desc='Concept--token permutation tests'):
idx = (macroarea, concept_id, token_idx)
df_temp = df[(df.macroarea == macroarea) & (df.concept_id == concept_id) & (df.token_idx == token_idx)]
p_val, n_permutations = permutation_test(df_temp, 'mi-' + context, n_permutations=100000)
# p_val, n_permutations = permutation_test_recursive(df_temp, 'mi-' + context, n_permutations=100000)
df_tokens.loc[idx, 'p_value'] = p_val
df_tokens.loc[idx, 'n_permutations'] = n_permutations
df_tokens.loc[idx, 'n_instances'] = df_temp.shape[0]
fname = os.path.join(args.rfolder_base, 'tokens_results.tsv')
df_tokens.to_csv(fname, sep='\t')
if __name__ == '__main__':
main()
| rycolab/form-meaning-associations | src/h04_analysis/get_results_per_token.py | get_results_per_token.py | py | 3,405 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.insert",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
5297089787 | import asyncio
import json
import logging
import logging.config
from dataclasses import dataclass
import yaml
from web3 import Web3
from web3._utils.filters import LogFilter
@dataclass
class FilterWrapper:
event_filter: LogFilter
pair_name: str
oracle_address: str
logger: logging.Logger
class BlockchainMonitor:
def __init__(self):
with open("spec.yaml", "r") as s:
spec = yaml.safe_load(s)
with open("logging.yaml", "r") as s:
logging.config.dictConfig(yaml.safe_load(s))
with open('abi.json', "r") as s:
abi = json.load(s)
w3 = Web3(Web3.HTTPProvider(spec['connection_settings']['alchemy_url']))
self.filters = []
pair_name_to_logger = {}
for currency_pair in spec['currency_pairs']:
for pair_name, pair_spec in currency_pair.items():
for oracle_address in pair_spec['oracles_addresses']:
contract = w3.eth.contract(address=oracle_address, abi=abi)
pair_name_to_logger[pair_name] = pair_name_to_logger \
.get(pair_name, logging.getLogger(pair_name))
self.filters.append(FilterWrapper(
contract.events.AnswerUpdated.createFilter(fromBlock='latest'),
pair_name,
oracle_address,
pair_name_to_logger[pair_name]
))
@staticmethod
def __handle_event(event, filter_wrapper):
logger = filter_wrapper.logger
logger.info("Price changes in pair {}. Oracle address: {}. Current price: {}, block number: {}, tx hash: {}"
.format(filter_wrapper.pair_name,
filter_wrapper.oracle_address,
event.args.current,
event.blockNumber,
event.transactionHash.hex()))
async def __monitor(self, filter_wrapper, poll_interval):
filter_wrapper.logger.info("Start monitor pair {}. Oracle address: {}".format(
filter_wrapper.pair_name, filter_wrapper.oracle_address))
while True:
for AnswerUpdated in filter_wrapper.event_filter.get_new_entries():
self.__handle_event(AnswerUpdated, filter_wrapper)
await asyncio.sleep(poll_interval)
def monitor(self):
loop = asyncio.get_event_loop()
try:
for filter_wrapper in self.filters:
asyncio.ensure_future(self.__monitor(filter_wrapper, 10))
loop.run_forever()
finally:
loop.close()
if __name__ == "__main__":
BlockchainMonitor().monitor()
| dzahbarov/blockchain_monitor | monitor.py | monitor.py | py | 2,721 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "web3._utils.filters.LogFilter",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "logging.Logger",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 12,
"usage_type": "name"
},
{
"api_name"... |
6846890085 | import pandas as pd
import sqlite3
def connect_sqlite(db_file):
with sqlite3.connect(db_file) as conn:
conn.row_factory = sqlite3.Row
cur = conn.cursor()
return conn, cur
def get_dataframe(db_file, sql):
conn, cur = connect_sqlite(db_file)
df = pd.read_sql(sql,conn)
if conn:
conn.close()
return df
def req_count(df):
'''
df : pandas dataframe
'''
df['datatime']
if __name__ == '__main__' :
db_file = 'demklog_2017-01-21'
sql = 'select * from demklog '
df = get_dataframe(db_file, sql)
#print df.head()
df.plot()
# df_tm = pd.TimeSeries( pd. to_datetime( df['timestamp'] ) )
# print 'type df_tm=', type(df_tm),df_tm.head()
# ddtest = df['timestamp']
#
# # dd1.resample('M')
# print type(ddtest), ddtest [800:810]
# print ddtest.resample('H') | tcref/helloworld | helloworld/tcref/src/main/webpy_rest/check_db/statistics.py | statistics.py | py | 895 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sqlite3.Row",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_sql",
"line_number": 14,
"usage_type": "call"
}
] |
6529596584 | """
The `test.unit.sha_api.mybottle.sha_api_bottle_test` module provides unit tests for the `ShaApiBottle` class in
`sha_api.mybottle.sha_api_bottle`.
Classes:
TestShaApiBottle: A unit test class for the `ShaApiBottle` class.
"""
import json
import tempfile
import unittest
from bottle import ConfigDict # pylint: disable=no-name-in-module
from mock import MagicMock, patch
from sha_api.mybottle.sha_api_bottle import global_config, ShaApiBottle
from sha_api.sha_apid import ROUTES
import sha_api
class TestShaApiBottle(unittest.TestCase):
"""
A unit test class for the `ShaApiBottle` class.
Methods:
setUp: Unit test initialization.
test_error_handler: Tests to ensure the error handler returns a JSON value.
test_global_config: Tests to ensure we get the correct values from a `ConfigDict` instance.
test_sha_api_constructor: Tests that `ShaApiBottle` instance can be properly instantiated without throwing any
exceptions.
"""
def setUp(self):
"""
Initializes the unit test global configs
"""
self.maxDiff = None # pylint: disable=invalid-name
self.config_sample = tempfile.NamedTemporaryFile(delete=False)
self.dbfile = tempfile.NamedTemporaryFile(delete=False)
self.os_environ = {u'SHA_API_CONFIG': self.config_sample.name}
self.configdict_ns = ConfigDict().load_dict(
{
u'sha_api': {
u'test_variable': u'test_value'
},
u'sqlite': {
u'dbfile': self.dbfile.name
}
}
)
with open(self.config_sample.name, 'w') as fout:
fout.write(u"[sha_api]\ntest_variable = test_value\n[sqlite]\ndbfile = %s" % self.dbfile.name)
def test_error_handler(self):
"""
Tests to ensure the error handler returns a JSON value.
"""
res = MagicMock()
res_json = json.dumps({u'err_msg': u'Response Body'})
res.body = u'Response Body'
api = ShaApiBottle(ROUTES)
self.assertEqual(api.default_error_handler(res), res_json)
def test_global_config(self):
"""
Tests to ensure we get the correct values from a `ConfigDict` instance.
"""
self.assertEqual(global_config(self.configdict_ns, u'sqlite', u'dbfile', u'Not Found'), self.dbfile.name)
def test_sha_api_constructor(self):
"""
Tests that `ShaApiBottle` instance can be properly instantiated without throwing any exceptions.
"""
# Branch 1: Nothing throws an error and all goes well
try:
api = ShaApiBottle(ROUTES) # pylint: disable=unused-variable
except Exception as err: # pylint: disable=broad-except
self.fail(u'ShaApiBottle sha_api instance failed to initialize: %s' % str(err))
# Branch 2: When routes object is not a list we get a proper assert error
with self.assertRaises(AssertionError) as err:
api = ShaApiBottle(dict())
self.assertEqual(str(err.exception), u'routes must be an array of route dicts to be passed to bottle.route')
# Branch 3: When routes object is a list but it doesnt contain dict items we get a proper assert error
with self.assertRaises(AssertionError) as err:
api = ShaApiBottle([False])
self.assertEqual(str(err.exception), u'route must be a dict that can be passed to bottle.route')
# Branch 4: When environment variable specifies config file it is properly loaded.
with patch.dict(u'sha_api.mybottle.sha_api_bottle.os.environ', values=self.os_environ):
self.assertEqual(self.os_environ[u'SHA_API_CONFIG'],
sha_api.mybottle.sha_api_bottle.os.environ.get(u'SHA_API_CONFIG'))
api = ShaApiBottle(ROUTES)
self.assertEqual(api.config.get(u'sha_api.test_variable'), u'test_value')
self.assertEqual(api.config.get(u'sqlite.dbfile'), self.dbfile.name)
# Branch 5: When any portion of the db initialization fails it should just bubble up the exception
with patch(u'sha_api.mybottle.sha_api_bottle.sqlite3.connect') as sqlite_connect:
self.assertEqual(sqlite_connect, sha_api.mybottle.sha_api_bottle.sqlite3.connect)
sqlite_connect.side_effect = Exception(u'sqlite exception')
with self.assertRaises(Exception) as err:
api = ShaApiBottle()
self.assertEqual(str(err.exception), u'sqlite exception')
| ju2wheels/python_sample | python/test/unit/sha_api/mybottle/sha_api_bottle_test.py | sha_api_bottle_test.py | py | 4,614 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "tempfile.NamedTemporaryFile",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "tempfile.NamedTemporaryFile",
"line_number": 39,
"usage_type": "call"
},
{
"ap... |
21159707752 | # --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
#Code starts here
#Reading the file
data=pd.read_csv(path)
#Renaming a column
data.rename(columns={'Total':'Total_Medals'},inplace=True)
#Printing the first five columns
print(data.head(10))
#Code ends here
# --------------
#Code starts here
#Creating new column 'Better_Event'
data['Better_Event'] = np.where(data['Total_Summer']>data['Total_Winter'], 'Summer', 'Winter')
data['Better_Event'] =np.where(data['Total_Summer'] ==data['Total_Winter'],'Both',data['Better_Event'])
#Finding the value with max count in 'Better_Event' column
better_event=data['Better_Event'].value_counts().index.values[0]
#Printing the better event
print('Better_Event=', better_event)
# --------------
#Code starts here
#Subsetting the dataframe
top_countries=data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]
#Dropping the last column
top_countries=top_countries[:-1]
#Function for top 10
def top_ten(data, col):
#Creating a new list
country_list=[]
#Finding the top 10 values of 'col' column
country_list= list((data.nlargest(10,col)['Country_Name']))
#Returning the top 10 list
return country_list
#Calling the function for Top 10 in Summer
top_10_summer=top_ten(top_countries,'Total_Summer')
print("Top 10 Summer:\n",top_10_summer, "\n")
#Calling the function for Top 10 in Winter
top_10_winter=top_ten(top_countries,'Total_Winter')
print("Top 10 Winter:\n",top_10_winter, "\n")
#Calling the function for Top 10 in both the events
top_10=top_ten(top_countries,'Total_Medals')
print("Top 10:\n",top_10, "\n")
#Extracting common country names from all three lists
common=list(set(top_10_summer) & set(top_10_winter) & set(top_10))
print('Common Countries :\n', common, "\n")
#Code ends here
# --------------
#Code starts here
top_10_summer=top_ten(top_countries,'Total_Summer')
top_10_winter=top_ten(top_countries,'Total_Winter')
top_10=top_ten(top_countries,'Total_Medals')
df=pd.DataFrame([top_10_summer,top_10_winter,top_10])
summer_df= data[data['Country_Name'].isin(top_10_summer)]
winter_df= data[data['Country_Name'].isin(top_10_winter)]
top_df= data[data['Country_Name'].isin(top_10)]
summer_df.plot.bar('Country_Name','Total_Summer')
winter_df.plot.bar('Country_Name','Total_Winter')
top_df.plot.bar('Country_Name','Total_Medals')
# --------------
#Code starts here
summer_df= data[data['Country_Name'].isin(top_10_summer)]
summer_df['Golden_Ratio']=data['Gold_Summer']/data['Total_Summer']
summer_max_ratio=summer_df['Golden_Ratio'].max()
summer_country_gold=summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name']
winter_df= data[data['Country_Name'].isin(top_10_winter)]
winter_df['Golden_Ratio']=data['Gold_Winter']/data['Total_Winter']
winter_max_ratio=winter_df['Golden_Ratio'].max()
winter_country_gold=winter_df.loc[winter_df['Golden_Ratio'].idxmax(),'Country_Name']
top_df= data[data['Country_Name'].isin(top_10)]
top_df['Golden_Ratio']=data['Gold_Total']/data['Total_Medals']
top_max_ratio=top_df['Golden_Ratio'].max()
top_country_gold=summer_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name']
# --------------
#Code starts here
data_1=data[:-1]
data_1['Total_Points']=(data['Gold_Total']*3)+(data['Silver_Total']*2)+(data['Bronze_Total'])
most_points=max(data_1['Total_Points'])
best_country=data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']
print(best_country)
# --------------
#Code starts here
#Subsetting the dataframe
best=data[data['Country_Name']==best_country]
best.reset_index(drop = True, inplace = True)
best=best[['Gold_Total','Silver_Total','Bronze_Total']]
#Plotting bar plot
best.plot.bar(stacked=True)
#Changing the x-axis label
plt.xlabel('United States')
#Changing the y-axis label
plt.ylabel('Medals Tally')
#Rotating the ticks of X-axis
plt.xticks(rotation=45)
#Updating the graph legend
l=plt.legend()
l.get_texts()[0].set_text('Gold_Total :' + str(best['Gold_Total'].values))
l.get_texts()[1].set_text('Silver_Total :' + str(best['Silver_Total'].values))
l.get_texts()[2].set_text('Bronze_Total :' + str(best['Bronze_Total'].values))
#Code ends here
| nagnath001/olympic-hero | code.py | code.py | py | 4,367 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_... |
14141185614 | import requests
import json
import urllib.parse
from django.conf import settings
def current_place():
"""
現在地の緯度経度を取得する。
Returns:
int: 現在地の緯度、経度
"""
geo_request_url = "https://get.geojs.io/v1/ip/geo.json"
geo_data = requests.get(geo_request_url).json()
# print(geo_data['latitude'])
# print(geo_data['longitude'])
return geo_data["latitude"], geo_data["longitude"]
def get_movie_theatre(latitude, longitude):
"""
現在地の緯度、経度をインプットとして、付近の映画館リストを返す。
Args:
latitude (int): 現在地の緯度
longitude (int): 現在地の経度
"""
url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?language=ja&location=" + latitude + "," + longitude + "&radius=2000&type=movie_theater&key=" + settings.API_MAP
# print(url)
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
json_dict = response.json()
movie_theatre = []
if json_dict["status"] == "OK":
for theatre in json_dict["results"]:
movie_theatre.append((theatre["name"],theatre["name"]))
print(movie_theatre)
return movie_theatre
def search_theatre(search_text):
"""
現在地の緯度、経度をインプットとして、付近の映画館リストを返す。
Args:
latitude (int): 現在地の緯度
longitude (int): 現在地の経度
"""
url = "https://maps.googleapis.com/maps/api/place/textsearch/json?language=ja&type=movie_theater&query=" + urllib.parse.quote(search_text) + "&key=" + settings.API_MAP
# print(url)
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
json_dict = response.json()
movie_theatre = []
if json_dict["status"] == "OK":
for theatre in json_dict["results"]:
movie_theatre.append((theatre["name"], theatre["name"]))
# print(movie_theatre)
# else:
# movie_theatre.append("Result nothing","Result nothing")
return movie_theatre
if __name__ == "__main__":
latitude, longitude = current_place()
get_movie_theatre(latitude, longitude)
search_theatre("TOHO 新宿") | nicenaito/theatreCheckIn | theatreplaces.py | theatreplaces.py | py | 2,354 | python | ja | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.API_MAP",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "r... |
38107931605 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('article', '0002_remove_article_article_date'),
]
operations = [
migrations.AddField(
model_name='article',
name='article_date',
field=models.DateTimeField(default=datetime.date(2014, 10, 21)),
preserve_default=False,
),
]
| Evgeneus/blog-django-1.7 | article/migrations/0003_article_article_date.py | 0003_article_article_date.py | py | 494 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AddField",
"line_number": 15,
"usage_type": "call"
},
{
... |
71300192743 | import itertools
import tqdm
import subprocess
def add_to_seq(seq, feature, label):
if label[0] == 'B':
seq.append(feature)
elif label[0] == 'I':
if len(seq) > 0:
seq[-1] += feature
else:
seq.append(feature)
elif label[0] == 'S':
seq.append(feature)
def cal(predict_seq, right_seq):
r"""
:return: 分词正确的个数,错误个数,总单词个数,该句子是否分词正确
"""
# flag 表示全部正确
flag = 1
num_right_word = 0
if len(predict_seq) != len(right_seq):
flag = 0
for feat in predict_seq:
if feat in right_seq:
num_right_word += 1
else:
flag = 0
return num_right_word, len(predict_seq) - num_right_word, len(right_seq), flag
def evaluate(model, dataset_loader, idx2feature, idx2label, device, log_file):
r"""
计算数据集上的F1, P, R值
:return: F1, accuracy
"""
model.eval()
num_sentence = 0
num_right_sentence = 0
num_all_word = 0
num_right_word = 0
num_error_word = 0
for _, batch in tqdm.tqdm(enumerate(itertools.chain.from_iterable(dataset_loader))):
batch = tuple(t.to(device) for t in batch)
features, labels, masks = batch
scores, paths = model(features, masks)
num_sentence += features.shape[0]
length = features.shape[1]
for i, (sentence, label) in enumerate(zip(features, labels)):
predict_seq = []
right_seq = []
for j, tensor_feat in enumerate(sentence):
if j == length or masks[i][j] == 0:
# 会有一个<eof>标志
nums = cal(predict_seq, right_seq)
num_right_word += nums[0]
num_error_word += nums[1]
num_all_word += nums[2]
num_right_sentence += nums[3]
break
else:
feature = idx2feature[tensor_feat.item()]
predict_label = idx2label[paths[i][j].item()]
right_label = idx2label[label[j].item()]
add_to_seq(predict_seq, feature, predict_label)
add_to_seq(right_seq, feature, right_label)
P = num_right_word / (num_error_word + num_right_word)
R = num_right_word / (num_all_word)
F1 = (2 * P * R) / (P + R)
ER = num_error_word / num_all_word
print(
'标准词数:%d个,词数正确率:%f个,词数错误率:%f' % (num_all_word, num_right_word / num_all_word, num_error_word / num_all_word))
print('标准行数:%d,行数正确率:%f' % (num_sentence, num_right_sentence / num_sentence))
print('Recall: %f' % (R))
print('Precision: %f' % (P))
print('F1 MEASURE: %f' % (F1))
print('ERR RATE: %f' % (ER))
with open(log_file, 'a') as f:
f.write(
'标准词数:%d个,词数正确率:%f个,词数错误率:%f\n' % (
num_all_word, num_right_word / num_all_word, num_error_word / num_all_word))
f.write('标准行数:%d,行数正确率:%f\n' % (num_sentence, num_right_sentence / num_sentence))
f.write('Recall: %f\n' % (R))
f.write('Precision: %f\n' % (P))
f.write('F1 MEASURE: %f\n' % (F1))
f.write('ERR RATE: %f\n\n\n' % (ER))
return P, R, F1, ER
def evaluate_with_perl(gold_file, predict_file, log=None, epoch=0, loss=None, dev=True):
r"""
这个效率高
:param gold_file:
:param predict_file:
:return:
"""
perl_path = r'/home/yxu/Seg_ner_pos/icwb2-data/scripts/score'
word_list = r'/home/yxu/Seg_ner_pos/icwb2-data/gold/pku_training_words.utf8'
p = subprocess.Popen(['perl', perl_path, word_list, gold_file, predict_file], stdout=subprocess.PIPE)
output = p.stdout.read()
output = output.decode(encoding='utf8')
outputs = output.split('\n')
p.kill()
res = outputs[-15:]
dev_R, dev_P, dev_F1 = float(res[-8].split('\t')[-1]), float(res[-7].split('\t')[-1]), float(
res[-6].split('\t')[-1])
if log is not None:
with open(log, 'a') as f:
f.write('EPOCH : %d\n' % epoch)
if dev:
f.write('Dev\n')
else:
f.write('Train\n')
if loss is not None:
f.write('Epoch loss : %f\n' % loss)
for j in res:
print(j)
f.write(j + '\n')
else:
for j in res:
print(j)
return dev_R, dev_P, dev_F1
def predict_write(model, dataset_loader, idx2feature, idx2label, device, tmp_file='./tmp', origin_texts=None):
r"""
返回一个临时的预测文件
:param model:
:param dataset_loader:
:param idx2feature:
:param idx2label:
:param device:
:param tmp_file:
:return:
"""
# !!
model.eval()
with open(tmp_file, 'w') as f:
for idx, batch in tqdm.tqdm(enumerate(itertools.chain.from_iterable(dataset_loader))):
batch = tuple(t.to(device) for t in batch)
features, labels, masks = batch
features_v, labels_v, masks_v = features.transpose(0, 1), labels.transpose(0, 1), masks.transpose(0, 1)
scores, predict_labels = model.predict(features_v, masks_v)
batch_size = labels.shape[0]
# 原始文本内容,避免最终结果出现<unk>
if origin_texts:
origin_text = origin_texts[idx * batch_size:(idx + 1) * batch_size]
for j in range(batch_size):
if origin_texts:
origin_line = origin_text[j]
feature, predict_label, mask = features[j], predict_labels[j], masks[j]
line = ''
length = feature.shape[0]
for k in range(length):
if k + 1 == length or mask[k + 1].item() == 0:
break
else:
if origin_texts:
content = origin_line[k]
else:
content = idx2feature[feature[k].item()]
if idx2label[predict_label[k].item()][0] in ('B', 'S') and k != 0:
line += ' ' + content
else:
line += content
f.write(line + '\n')
return tmp_file
def read_line(f):
'''
读取一行,并清洗空格和换行
'''
line = f.readline()
return line.strip()
def evaluate_by_file(real_text_file, pred_text_file, prf_file, epoch):
file_gold = open(real_text_file, 'r', encoding='utf8')
file_tag = open(pred_text_file, 'r', encoding='utf8')
line1 = read_line(file_gold)
N_count = 0 # 将正类分为正或者将正类分为负
e_count = 0 # 将负类分为正
c_count = 0 # 正类分为正
e_line_count = 0
c_line_count = 0
while line1:
line2 = read_line(file_tag)
list1 = line1.split(' ')
list2 = line2.split(' ')
count1 = len(list1) # 标准分词数
N_count += count1
if line1 == line2:
c_line_count += 1 # 分对的行数
c_count += count1 # 分对的词数
else:
e_line_count += 1
count2 = len(list2)
arr1 = []
arr2 = []
pos = 0
for w in list1:
arr1.append(tuple([pos, pos + len(w)])) # list1中各个单词的起始位置
pos += len(w)
pos = 0
for w in list2:
arr2.append(tuple([pos, pos + len(w)])) # list2中各个单词的起始位置
pos += len(w)
for tp in arr2:
if tp in arr1:
c_count += 1
else:
e_count += 1
line1 = read_line(file_gold)
R = float(c_count) / N_count
P = float(c_count) / (c_count + e_count)
F = 2. * P * R / (P + R)
ER = 1. * e_count / N_count
print("result:")
print('标准词数:%d个,词数正确率:%f个,词数错误率:%f' % (N_count, c_count / N_count, e_count / N_count))
print('标准行数:%d,行数正确率:%f,行数错误率:%f' % (c_line_count + e_line_count, c_line_count / (c_line_count + e_line_count),
e_line_count / (c_line_count + e_line_count)))
print('Recall: %f' % (R))
print('Precision: %f' % (P))
print('F MEASURE: %f' % (F))
print('ERR RATE: %f' % (ER))
# print P,R,F
f = open(prf_file, 'a', encoding='utf-8')
f.write('result-(epoch:%s):\n' % epoch)
f.write('标准词数:%d,词数正确率:%f,词数错误率:%f \n' % (N_count, c_count / N_count, e_count / N_count))
f.write('标准行数:%d,行数正确率:%f,行数错误率:%f \n' % (c_line_count + e_line_count, c_line_count / (c_line_count + e_line_count),
e_line_count / (c_line_count + e_line_count)))
f.write('Recall: %f\n' % (R))
f.write('Precision: %f\n' % (P))
f.write('F MEASURE: %f\n' % (F))
f.write('ERR RATE: %f\n' % (ER))
f.write('====================================\n')
return P, R, F
| YaooXu/Chinese_seg_ner_pos | evaluate.py | evaluate.py | py | 9,381 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "tqdm.tqdm",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "itertools.chain.from_iterable",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "subproce... |
43034357184 | import datetime
import os
import time
import xarray as xr
from app.dataprocessing.benchmark import Timer
from app.dataprocessing.datasource_interface import IDatasource
from app.dataprocessing.local.local_reader import LocalReader
from app.dataprocessing.remote.opendap_access_cas import OpendapAccessCAS
from app.datastructures.datastructure_interface import INode, IStructure, get_bounds
from app.datastructures.n_dimensional.kd_tree import KDTree
from app.datastructures.three_dimensional.octree import Octree
from app.datastructures.two_dimensional.quad_tree import QuadTree
from dotenv import load_dotenv
from sympy import im
class CachedDS:
def __init__(self, node: INode):
self.ds = node.ds
self.bounds = get_bounds(node.ds)
self.time_stamp = datetime.datetime.now()
self.resolution = node.resolution
def __str__(self) -> str:
return f"\tBounds:{self.bounds}\n\tCreated:{self.time_stamp}\n\tResolution:{self.resolution:.2f}"
class DataHandler:
"""
Defines the data source and selects proper data structure
"""
def __init__(self) -> None:
self.ds = None # xarray.Dataset
self.data_source: IDatasource = None
self.data_structure: IStructure = None
self.max_chunk_size = 50
self.on_demand_data = False
self.custom_rules = None
self.cache: list[CachedDS] = []
def set_max_chunk_size(self, chunk_size):
self.max_chunk_size = chunk_size
def get_cache(self):
return self.cache
def set_custom_rules(self, custom_rules):
self.custom_rules = custom_rules
def set_opendap_cas(
self,
cas_url,
ds_url,
username,
password,
file_size=None,
constraints=None,
struct=None,
):
self.on_demand_data = True
if username == None or password == None:
print("please save credentials to .env")
self.data_source = OpendapAccessCAS(
username,
password,
ds_url,
cas_url,
file_size_MB=file_size,
constraints=constraints,
)
self.ds = self.data_source.get_dataset()
with Timer("Creating data structure"):
self.data_structure = self.__set_data_structure(struct)
def set_local_netcdf_reader(self, file_path, constraints=None, struct=None):
self.data_source = LocalReader(file_path, constraints)
with Timer("Loading dataset"):
self.ds = self.data_source.get_dataset()
with Timer("Creating data structure"):
self.data_structure = self.__set_data_structure(struct)
def get_inital_netcdf(self):
ds, bounds, node = self.data_structure.get_initial_dataset()
file_name = "tmp/nc/data_" + str(time.time()) + ".nc" # TODO: revisit.
ds.to_netcdf(file_name)
if self.on_demand_data:
self.__node_stream_to_local_src(node, file_name)
return file_name
def get_initial_ds(self):
ds, bounds, node = self.data_structure.get_initial_dataset()
return ds, bounds, node
def request_data_netcdf(self, bounds, return_xr_chunk=False, fit_bounds=False):
ds, bounds, node = self.data_structure.request_data_single_chunk(
bounds, fit_bounds=fit_bounds
)
file_name = "tmp/nc/data_" + str(time.time())[-5:] + ".nc" # TODO: revisit.
ds.to_netcdf(file_name)
if self.on_demand_data and fit_bounds == False:
self.__node_stream_to_local_src(node, file_name)
if return_xr_chunk:
return file_name, bounds, node
else:
return file_name
def get_file_size_MB(self, file_path):
return os.path.getsize(file_path) / (1024 * 1024)
def get_node_resolution(self, node):
return self.data_structure.get_node_resolution(node) * 100
def get_node_spatial_resolution(self, node) -> dict:
return self.data_structure.get_node_spatial_resolution(node)
def get_full_xr_ds(self) -> xr.Dataset:
return self.data_structure.ds
def __node_stream_to_local_src(self, node, file_path):
# store cache in list
node.ds = xr.open_dataset(file_path)
self.cache.append(CachedDS(node))
def __set_data_structure(self, custom):
if custom:
if custom == "KDTree":
return KDTree(
self.ds,
full_file_size=self.data_source.get_file_size_MB(),
max_chunk_size=self.max_chunk_size,
custom_rules=self.custom_rules,
)
ds_dims = self.__get_num_dimensions()
if ds_dims == 2:
return QuadTree(
self.ds, self.data_source.get_file_size_MB(), self.max_chunk_size
)
elif ds_dims == 3:
return Octree(
self.ds, self.data_source.get_file_size_MB(), self.max_chunk_size
)
elif ds_dims > 3:
return KDTree(
self.ds,
full_file_size=self.data_source.get_file_size_MB(),
max_chunk_size=self.max_chunk_size,
custom_rules=self.custom_rules,
)
else:
raise Exception("DataHandler: unsupported number of dimensions")
def __get_num_dimensions(self):
return len(self.ds.dims)
| oyjoh/adaptive-data-retrieval | app/dataprocessing/data_handler.py | data_handler.py | py | 5,439 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "app.datastructures.datastructure_interface.INode",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "app.datastructures.datastructure_interface.get_bounds",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number":... |
21877552923 | import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.hashes import SHA256
from cryptography.hazmat.primitives.asymmetric import padding
# save file helper
def save_file(filename, content):
filepath = os.path.dirname(os.path.abspath(__file__))
f = open(filepath + "/" + filename, "wb")
f.write(content)
f.close()
def create_key():
# generate private key & write to disk
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=4096,
backend=default_backend()
)
pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()
#encryption_algorithm=serialization.BestAvailableEncryption(b'mypassword')
)
save_file("private.pem", pem)
# generate public key
public_key = private_key.public_key()
pem = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
save_file("public.pem", pem)
| Marcus11Dev/Blockchain_Lesson_Agent | create_Keys.py | create_Keys.py | py | 1,319 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cryptography.hazmat.prim... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.