seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
20154443994 | from datetime import datetime # needed to read and compare dates
class item: # initiates item class for all inventory elements
def __init__(self, itemID=0, manuF='none', itemT='none', itemP=0.0, serv=datetime.today(), dmg='False'):
self.itemID = itemID
self.manuF = manuF
self.itemT = itemT
self.itemP = itemP
self.serv = serv
self.dmg = dmg
if __name__ == "__main__":
full_table = {} # Dictionary to hold all inventory elements for retrieval
item_type = []
itemID_List = []
dmg_inv = [] # Lists for sorting
manuF_list = []
date_list = []
price_list = []
import csv # Needed for csv manipulation
with open('ManufacturerList.csv', 'r') as csvfile: # Opens/reads manufacturer list
list_reader = csv.reader(csvfile)
for row in list_reader: # Takes rows from csv and adds them to line
line = item() # Classifies line as an item
line.itemID = row[0]
line.manuF = row[1] # Places elements in required organizational structure in line
line.itemT = row[2]
line.dmg = row[3]
full_table[line.itemID] = line # Defines ItemID as key and adds line items to Full Table dictionary
with open('PriceList.csv', 'r') as csvfile: # Opens/reads price list and add missing elements to Full table dictionary
list_reader = csv.reader(csvfile)
for row in list_reader: # Adds any missing elements
full_table[row[0]].itemP = row[1]
with open('ServiceDatesList.csv','r') as csvfile: # Opens/reads service list and adds missing elements
list_reader = csv.reader(csvfile)
date_format = '%m/%d/%Y' # Format for datetime
for row in list_reader:
new_date = datetime.strptime(row[1], date_format).date() # strips dates
full_table[row[0]].serv = new_date # adds elements to table
for i in full_table: # creates a list of just manufacturers
manuF_list.append(full_table[i].manuF)
manuF_list.sort() # sorts manufacturer list
manuF = {} # placed in dictionary
for i in manuF_list: # specified key to ensure unique values
manuF[i] = i
with open('FullInventory.csv', 'w', newline='') as csvfile: # opens for writing
line_writer = csv.writer(csvfile)
for i in manuF: # iterates and adds sorted elements to csv
for j in full_table:
if full_table[j].manuF == i:
line_writer.writerow([full_table[j].itemID, full_table[j].manuF,
full_table[j].itemT, full_table[j].itemP,
full_table[j].serv, full_table[j].dmg])
for i in full_table: # creates list for item type
item_type.append(full_table[i].itemT)
item_type.sort() # sorts item types
itemT = {}
for i in item_type: # ensures unique values
itemT[i] = i
for i in full_table: # creates list for ItemID
itemID_List.append(full_table[i].itemID)
itemID_List.sort() # sorts item ID
for i in itemT:
with open(f'{i.capitalize()}Inventory.csv', 'w', newline='') as csvfile: # opens capitalized variable named csv for writing
line_writer = csv.writer(csvfile)
for j in itemID_List:
for k in full_table:
if full_table[k].itemID == j: # iterates and writes needed elements to file
line_writer.writerow([full_table[k].itemID, full_table[k].manuF,
full_table[k].itemP, full_table[k].serv,
full_table[k].dmg])
for i in full_table: # creation of date list table
date_list.append(full_table[i].serv)
date_list.sort()
dates = {}
for i in date_list: # ensure unique values
dates[i] = i
with open('PastServiceDateInventory.csv', 'w', newline='') as csvfile: # opens past service csv for writing
line_writer = csv.writer(csvfile)
for i in dates:
for j in full_table:
if datetime.today().date() > full_table[j].serv and full_table[j].serv == i: #compared today's date with the service due date
line_writer.writerow([full_table[j].itemID, full_table[j].manuF,
full_table[j].itemT, full_table[j].itemP,
full_table[j].serv, full_table[j].dmg],)
for i in full_table: # adds prices to list
price_list.append(full_table[i].itemP)
price_list.sort() # sorts prices
prices = {}
for i in price_list: # ensures unique price values
prices[i] = i
with open('DamagedInventory.csv','w', newline='') as csvfile: # opens csv to write damaged inventory
line_writer = csv.writer(csvfile)
for i in prices:
for j in full_table:
if full_table[j].dmg == 'damaged' and full_table[j].itemP == i: # checks if item flagged as damaged
line_writer.writerow([full_table[j].itemID, full_table[j].manuF, # writes rows with relevant information
full_table[j].itemT, full_table[j].itemP,
full_table[j].serv])
print('Reports generated.')
| BrittanyZimmerman/CIS2348 | FinalProject - Part 1/FinalProjectPart1.py | FinalProjectPart1.py | py | 5,756 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.today",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "csv.reader",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"... |
31944141020 | """empty message
Revision ID: 0e84780c08ce
Revises:
Create Date: 2023-06-25 00:48:55.259558
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0e84780c08ce'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('puzzles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('quote', sa.String(length=255), nullable=True),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(), nullable=True),
sa.Column('email', sa.String(), nullable=True),
sa.Column('password', sa.String(), nullable=True),
sa.Column('score', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('messages',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.String(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('score', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('messages')
op.drop_table('users')
op.drop_table('puzzles')
# ### end Alembic commands ###
| yinsont/lit-crypts | server/migrations/versions/0e84780c08ce_.py | 0e84780c08ce_.py | py | 1,792 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
70446469223 | """
East Text detection
"""
import cv2
import numpy as np
from imutils.object_detection import non_max_suppression
from cfir_game_lens.utils import Box, GameCoverImage, ImageSize
from dataclasses import astuple
class EAST: # pylint: disable=too-few-public-methods
"""East Class
"""
LAYER_NAMES = ["feature_fusion/Conv_7/Sigmoid", "feature_fusion/concat_3"]
EAST_MODEL_FILE = "frozen_east_text_detection.pb"
SCALE_FACTOR = 1.0
MEAN_SUBSTRACTION = (123.68, 116.78, 103.94)
MIN_CONFIDENCE = 0.5
def __init__(self):
self._net = cv2.dnn.readNet(self.EAST_MODEL_FILE)
@classmethod
def _resize_image(cls, img: GameCoverImage):
new_size = ImageSize()
new_size.height = round(img.size.height / 32) * 32
new_size.width = round(img.size.width / 32) * 32
new_img = GameCoverImage(img=cv2.resize(img.img, new_size.get_rev_tuple()))
return new_img
def _get_blob(self, img: GameCoverImage):
blob_args = [img.img, self.SCALE_FACTOR, img.size.get_rev_tuple(), self.MEAN_SUBSTRACTION]
blob = cv2.dnn.blobFromImage(*blob_args, swapRB=True, crop=False)
return blob
@classmethod
def _build_box(cls, offset_x, offset_y, angle, loc_data):
(offset_x, offset_y) = (offset_x * 4.0, offset_y * 4.0)
cos = np.cos(angle)
sin = np.sin(angle)
height = loc_data[0] + loc_data[2]
width = loc_data[1] + loc_data[3]
end_x = int(offset_x + (cos * loc_data[1]) + (sin * loc_data[2]))
end_y = int(offset_y - (sin * loc_data[1]) + (cos * loc_data[2]))
start_x = int(end_x - width)
start_y = int(end_y - height)
res = Box(start_x, start_y, end_x, end_y)
return res
def _build_boxes(self, scores, geometry):
boxes = []
confidences = []
(num_rows, num_cols) = scores.shape[2:4]
for y in range(0, num_rows): # pylint: disable=invalid-name
# extract the scores (probabilities), followed by the
# geometrical data used to derive potential bounding box
# coordinates that surround text
scores_data = scores[0, 0, y]
loc_data_arr = [geometry[0, 0, y], geometry[0, 1, y], geometry[0, 2, y], geometry[0, 3, y]]
angles_data = geometry[0, 4, y]
# loop over the number of columns
for x in range(0, num_cols): # pylint: disable=invalid-name
if scores_data[x] < self.MIN_CONFIDENCE:
continue
loc_data_arr_x = [loc_data[x] for loc_data in loc_data_arr]
box = self._build_box(x, y, angles_data[x], loc_data_arr_x)
boxes.append(box)
confidences.append(scores_data[x])
return boxes, confidences
def find_text(self, image: GameCoverImage):
"""Find boxes of text in image
Arguments:
image {GameCoverImage} -- [description]
Returns:
List[Box] -- Boxes with text
"""
# run east model
resized_img = self._resize_image(image)
self._net.setInput(self._get_blob(resized_img))
(scores, geometry) = self._net.forward(self.LAYER_NAMES)
# create boxes from east output and filter according to confidences\scores
boxes, confidences = self._build_boxes(scores, geometry)
boxes_as_tuple = [astuple(x) for x in boxes]
boxes = non_max_suppression(np.array(boxes_as_tuple), probs=confidences)
boxes_res = [Box(*x) for x in boxes]
# fix images ratio
ratio_width = image.size.width / float(resized_img.size.width)
ratio_height = image.size.height / float(resized_img.size.height)
for box in boxes_res:
box.fix_sizes(ratio_width, ratio_height)
return boxes_res
| CfirTsabari/cfir_game_lens | cfir_game_lens/east.py | east.py | py | 3,836 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.dnn.readNet",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.dnn",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "cfir_game_lens.utils.GameCoverImage",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "cfir... |
23781717999 | #!/usr/bin/env python3
import gym
from gym import wrappers
import gym_gazebo
import time
import numpy
import random
import time
import qlearn
import liveplot
from matplotlib import pyplot as plt
def render():
render_skip = 0 # Skip first X episodes.
render_interval = 50 # Show render Every Y episodes.
render_episodes = 10 # Show Z episodes every rendering.
if (x % render_interval == 0) and (x != 0) and (x > render_skip):
env.render()
elif (((x-render_episodes) % render_interval == 0) and (x != 0) and
(x > render_skip) and (render_episodes < x)):
env.render(close=True)
if __name__ == '__main__':
env = gym.make('Gazebo_Lab06-v0')
outdir = '/tmp/gazebo_gym_experiments'
env = gym.wrappers.Monitor(env, outdir, force=True)
plotter = liveplot.LivePlot(outdir)
last_time_steps = numpy.ndarray(0)
qlearn = qlearn.QLearn(actions=range(env.action_space.n),
alpha=0.2, gamma=0.8, epsilon=0.9)
# qlearn.loadQ("QValues_A+")
initial_epsilon = qlearn.epsilon
epsilon_discount = 0.9986 # each episode, we're going to reduce epsilon by this much (transition from explore to greedy)
start_time = time.time()
total_episodes = 10000
highest_reward = 0
for x in range(total_episodes):
done = False
cumulated_reward = 0
observation = env.reset()
#decrease chance of random action
if qlearn.epsilon > 0.05: #stop reducing epsilon at a certain point (don't want all greedy)
qlearn.epsilon *= epsilon_discount #reduce epsilon each episode
state = ''.join(map(str, observation)) #state holds "old" observation (state we're in before we take action)
# To change max episode steps, go to gym_gazebo/__init__.py
i = -1
while True:
i += 1
# Pick an action based on the current state
action = qlearn.chooseAction(state)
# Execute the action and get feedback
observation, reward, done, info = env.step(action)
#new state after action, reward associated with action, whether we're done or not, and any info parameters
cumulated_reward += reward
if highest_reward < cumulated_reward:
highest_reward = cumulated_reward
nextState = ''.join(map(str, observation)) #if observation was [0,0,1], this would give 001 (single string)
#nextState is state we've transitioned into (the current state)
#learn based on start state, action we took, reward from taking that action, and state we've transitioned into from that action
qlearn.learn(state, action, reward, nextState)
env._flush(force=True)
if not(done):
state = nextState #set current state to state we've transitioned into
else:
last_time_steps = numpy.append(last_time_steps, [int(i + 1)])
break
print("===== Completed episode {}".format(x))
if (x > 0) and (x % 5 == 0):
qlearn.saveQ("QValues")
plotter.plot(env)
m, s = divmod(int(time.time() - start_time), 60)
h, m = divmod(m, 60)
print ("Starting EP: " + str(x+1) +
" - [alpha: " + str(round(qlearn.alpha, 2)) +
" - gamma: " + str(round(qlearn.gamma, 2)) +
" - epsilon: " + str(round(qlearn.epsilon, 2)) +
"] - Reward: " + str(cumulated_reward) +
" Time: %d:%02d:%02d" % (h, m, s))
# Github table content
print ("\n|"+str(total_episodes)+"|"+str(qlearn.alpha)+"|" +
str(qlearn.gamma)+"|"+str(initial_epsilon)+"*" +
str(epsilon_discount)+"|"+str(highest_reward) + "| PICTURE |")
l = last_time_steps.tolist()
l.sort()
# print("Parameters: a="+str)
print("Overall score: {:0.2f}".format(last_time_steps.mean()))
print("Best 100 score: {:0.2f}".
format(reduce(lambda x, y: x + y, l[-100:]) / len(l[-100:])))
env.close()
| mjohal67/ENPH353_Lab06_Reinforcement | examples/gazebo_lab06_ex/gazebo_lab06_ex.py | gazebo_lab06_ex.py | py | 4,089 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "gym.make",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "gym.wrappers.Monitor",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "gym.wrappers",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "liveplot.LivePlot",
... |
43249592224 | from django.shortcuts import render
from articles.models import Article
def articles_list(request):
template = 'articles/news.html'
acricles = Article.objects.all().prefetch_related('scopes')
ordering = '-published_at'
context = {'object_list': acricles.order_by(ordering)}
return render(request, template, context)
| StickKing/netology-dj-homeworks | 2.2-databases-2/m2m-relations/articles/views.py | views.py | py | 339 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "articles.models.Article.objects.all",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "articles.models.Article.objects",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "articles.models.Article",
"line_number": 8,
"usage_type": "name"
... |
43301294024 | from rpython.rlib.rarithmetic import r_singlefloat, r_uint
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.translator.tool.cbuild import ExternalCompilationInfo
r_uint32 = rffi.r_uint
assert r_uint32.BITS == 32
UINT32MAX = 2 ** 32 - 1
# keep in sync with the C code in pypy__decay_jit_counters below
ENTRY = lltype.Struct('timetable_entry',
('times', lltype.FixedSizeArray(rffi.FLOAT, 5)),
('subhashes', lltype.FixedSizeArray(rffi.USHORT, 5)))
class JitCounter:
"""A process translated with the JIT contains one prebuilt instance
of this class. It is used for three things:
* It maps greenkey hashes to counters, to know when we have seen this
greenkey enough to reach the 'threshold' or 'function_threshold'
parameters. This is done in a lossy way by a fixed-size 'timetable'.
* It handles the counters on the failing guards, for 'trace_eagerness'.
This is done in the same 'timetable'.
* It records the JitCell objects that are created when we compile
a loop, in a non-lossy dictionary-like strurcture. This is done
in the 'celltable'.
The 'timetable' is a table of DEFAULT_SIZE entries, each of which
containing 5 entries. From a hash value, we use the index number
'_get_index(hash)', and then we look in all five entries for a
matching '_get_subhash(hash)'. The five entries are roughly kept
sorted by decreasing recorded time. The hash value itself should be
computed accordingly: we only use bits 21:32 for _get_index and
bits 0:16 for _get_subhash. (This organization is "probably good"
to get not-too-random behavior; another motivation for it was for
the STM branch, to avoid pointless conflicts between threads.)
The time value stored in the timetable is a (short-precision)
floating-point number. The idea is that a value of 0.0 means
absent, and values go up to the maximum of 1.0.
'compute_threshold(threshold)' returns basically the fraction
1.0/threshold, corresponding to the 'increment' value for the
following APIs.
'tick(hash, increment)' adds 'increment' to the time value stored
with the 'hash'. Remember that only bits 0:16,21:32 of the hash
are used; in case of collision between two hashes, they will grow
twice as fast, because each tick() call will contribute to the
colliding time value.
'fetch_next_hash()' returns a "random" hash value suitable for
using in tick() later. Used when compiling guards; when the
guard actually fails, we'll tick() the guard's stored random hash.
'reset(hash)', 'change_current_fraction(hash, new_time_value)'
change the time value associated with a hash. The former resets
it to zero, and the latter changes it to the given value (which
should be a value close to 1.0).
'set_decay(decay)', 'decay_all_counters()' is used to globally
reduce all the stored time values. They all get multiplied by
a fraction close to (but smaller than) 1.0, computed from the
'decay' parameter.
'install_new_cell(hash, newcell)' adds the new JitCell to the
celltable, at the index given by 'hash' (bits 21:32). Unlike
the timetable, the celltable stores a linked list of JitCells
for every entry, and so it is not lossy.
'lookup_chain(hash)' returns the first JitCell at 'hash'. You can
then walk the chain by following the '.next' attributes until you
reach None.
'cleanup_chain(hash)' resets the timetable's 'hash' entry and
cleans up the celltable at 'hash'. It removes those JitCells
for which 'cell.should_remove_jitcell()' returns True.
"""
DEFAULT_SIZE = 2048
def __init__(self, size=DEFAULT_SIZE, translator=None):
"NOT_RPYTHON"
self.size = size
self.shift = 16
while (UINT32MAX >> self.shift) != size - 1:
self.shift += 1
assert self.shift < 999, "size is not a power of two <= 2**16"
#
# The table of timings. This is a 5-ways associative cache.
# We index into it using a number between 0 and (size - 1),
# and we're getting a 32-bytes-long entry; then this entry
# contains 5 possible ways, each occupying 6 bytes: 4 bytes
# for a float, and the 2 lowest bytes from the original hash.
self.timetable = lltype.malloc(rffi.CArray(ENTRY), self.size,
flavor='raw', zero=True,
track_allocation=False)
self._nexthash = r_uint(0)
#
# The table of JitCell entries, recording already-compiled loops
self.celltable = [None] * size
#
if translator is not None:
class Glob:
step = 0
glob = Glob()
def invoke_after_minor_collection():
# After 32 minor collections, we call decay_all_counters().
# The "--jit decay=N" option measures the amount the
# counters are then reduced by.
glob.step += 1
if glob.step == 32:
glob.step = 0
self.decay_all_counters()
if not hasattr(translator, '_jit2gc'):
translator._jit2gc = {}
translator._jit2gc['invoke_after_minor_collection'] = (
invoke_after_minor_collection)
def compute_threshold(self, threshold):
"""Return the 'increment' value corresponding to the given number."""
if threshold <= 0:
return 0.0 # no increment, never reach 1.0
return 1.0 / (threshold - 0.001)
def _get_index(self, hash):
"""Return the index (< self.size) from a hash. This truncates
the hash to 32 bits, and then keep the *highest* remaining bits.
Be sure that hash is computed correctly, by multiplying with
a large odd number or by fetch_next_hash()."""
hash32 = r_uint(r_uint32(hash)) # mask off the bits higher than 32
index = hash32 >> self.shift # shift, resulting in a value < size
return index # return the result as a r_uint
_get_index._always_inline_ = True
@staticmethod
def _get_subhash(hash):
return hash & 65535
def fetch_next_hash(self):
result = self._nexthash
# note: all three "1" bits in the following constant are needed
# to make test_counter.test_fetch_next_index pass. The first
# is to increment the "subhash" (lower 16 bits of the hash).
# The second is to increment the "index" portion of the hash.
# The third is so that after 65536 passes, the "index" is
# incremented by one more (by overflow), so that the next
# 65536 passes don't end up with the same subhashes.
self._nexthash = result + r_uint(1 | (1 << self.shift) |
(1 << (self.shift - 16)))
return result
def _swap(self, p_entry, n):
if float(p_entry.times[n]) > float(p_entry.times[n + 1]):
return n + 1
else:
x = p_entry.times[n]
p_entry.times[n] = p_entry.times[n + 1]
p_entry.times[n + 1] = x
x = p_entry.subhashes[n]
p_entry.subhashes[n] = p_entry.subhashes[n + 1]
p_entry.subhashes[n + 1] = x
return n
_swap._always_inline_ = True
def _tick_slowpath(self, p_entry, subhash):
if p_entry.subhashes[1] == subhash:
n = self._swap(p_entry, 0)
elif p_entry.subhashes[2] == subhash:
n = self._swap(p_entry, 1)
elif p_entry.subhashes[3] == subhash:
n = self._swap(p_entry, 2)
elif p_entry.subhashes[4] == subhash:
n = self._swap(p_entry, 3)
else:
n = 4
while n > 0 and float(p_entry.times[n - 1]) == 0.0:
n -= 1
p_entry.subhashes[n] = rffi.cast(rffi.USHORT, subhash)
p_entry.times[n] = r_singlefloat(0.0)
return n
def tick(self, hash, increment):
p_entry = self.timetable[self._get_index(hash)]
subhash = self._get_subhash(hash)
#
if p_entry.subhashes[0] == subhash:
n = 0
else:
n = self._tick_slowpath(p_entry, subhash)
#
counter = float(p_entry.times[n]) + increment
if counter < 1.0:
p_entry.times[n] = r_singlefloat(counter)
return False
else:
# when the bound is reached, we immediately reset the value to 0.0
self.reset(hash)
return True
tick._always_inline_ = True
def change_current_fraction(self, hash, new_fraction):
"""Change the value stored for 'hash' to be the given 'new_fraction',
which should be a float equal to or slightly lower than 1.0.
"""
p_entry = self.timetable[self._get_index(hash)]
subhash = self._get_subhash(hash)
# find in 'n' the index that will be overwritten: the first within
# range(5) that contains either the right subhash, or a null time
# (or, if there isn't any, then just n == 4 will do).
n = 0
while n < 4 and (p_entry.subhashes[n] != subhash and
float(p_entry.times[n]) != 0.0):
n += 1
# move one step to the right all elements [n - 1, n - 2, ..., 0],
# (this overwrites the old item at index 'n')
while n > 0:
n -= 1
p_entry.subhashes[n + 1] = p_entry.subhashes[n]
p_entry.times[n + 1] = p_entry.times[n]
# insert the new hash at index 0. This is a good approximation,
# because change_current_fraction() should be used for
# new_fraction == value close to 1.0.
p_entry.subhashes[0] = rffi.cast(rffi.USHORT, subhash)
p_entry.times[0] = r_singlefloat(new_fraction)
def reset(self, hash):
p_entry = self.timetable[self._get_index(hash)]
subhash = self._get_subhash(hash)
for i in range(5):
if p_entry.subhashes[i] == subhash:
p_entry.times[i] = r_singlefloat(0.0)
def lookup_chain(self, hash):
return self.celltable[self._get_index(hash)]
def cleanup_chain(self, hash):
self.reset(hash)
self.install_new_cell(hash, None)
def install_new_cell(self, hash, newcell):
index = self._get_index(hash)
cell = self.celltable[index]
keep = newcell
while cell is not None:
nextcell = cell.next
if not cell.should_remove_jitcell():
cell.next = keep
keep = cell
cell = nextcell
self.celltable[index] = keep
def set_decay(self, decay):
"""Set the decay, from 0 (none) to 1000 (max)."""
if decay < 0:
decay = 0
elif decay > 1000:
decay = 1000
self.decay_by_mult = 1.0 - (decay * 0.001)
def decay_all_counters(self):
# Called during a minor collection by the GC, to gradually decay
# counters that didn't reach their maximum. Thus if a counter
# is incremented very slowly, it will never reach the maximum.
# This avoids altogether the JIT compilation of rare paths.
# We also call this function when any maximum bound is reached,
# to avoid sudden bursts of JIT-compilation (the next one will
# not reach the maximum bound immmediately after). This is
# important in corner cases where we would suddenly compile more
# than one loop because all counters reach the bound at the same
# time, but where compiling all but the first one is pointless.
p = rffi.cast(rffi.CCHARP, self.timetable)
pypy__decay_jit_counters(p, self.decay_by_mult, self.size)
# this function is written directly in C; gcc will optimize it using SSE
eci = ExternalCompilationInfo(post_include_bits=["""
static void pypy__decay_jit_counters(char *data, double f1, long size) {
struct rpy_jitcnt { float times[5]; unsigned short subhashes[5]; };
struct rpy_jitcnt *p = (struct rpy_jitcnt *)data;
float f = (float)f1;
long i;
for (i=0; i<size; i++) {
p->times[0] *= f;
p->times[1] *= f;
p->times[2] *= f;
p->times[3] *= f;
p->times[4] *= f;
++p;
}
}
"""])
pypy__decay_jit_counters = rffi.llexternal(
"pypy__decay_jit_counters", [rffi.CCHARP, lltype.Float, lltype.Signed],
lltype.Void, compilation_info=eci, _nowrapper=True, sandboxsafe=True)
# ____________________________________________________________
#
# A non-RPython version that avoids issues with rare random collisions,
# which make all tests brittle
class DeterministicJitCounter(JitCounter):
def __init__(self):
from collections import defaultdict
JitCounter.__init__(self, size=8)
def make_null_entry():
return lltype.malloc(ENTRY, immortal=True, zero=True)
self.timetable = defaultdict(make_null_entry)
self.celltable = defaultdict(lambda: None)
def _get_index(self, hash):
"NOT_RPYTHON"
return hash
def decay_all_counters(self):
"NOT_RPYTHON"
pass
def _clear_all(self):
self.timetable.clear()
self.celltable.clear()
| mozillazg/pypy | rpython/jit/metainterp/counter.py | counter.py | py | 13,442 | python | en | code | 430 | github-code | 36 | [
{
"api_name": "rpython.rtyper.lltypesystem.rffi.r_uint",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "rpython.rtyper.lltypesystem.rffi",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "rpython.rtyper.lltypesystem.lltype.Struct",
"line_number": 11,
... |
75107100903 | from pyrogram import Client, Filters, InlineKeyboardMarkup, InlineKeyboardButton, Emoji
from config import Messages as tr
@Client.on_message(Filters.private & Filters.incoming & Filters.command(['start']))
async def _start(client, message):
await client.send_message(chat_id = message.chat.id,
text = tr.START_MSG.format(message.from_user.first_name),
parse_mode = "markdown",
disable_notification = True,
reply_to_message_id = message.message_id
)
@Client.on_message(Filters.private & Filters.incoming & Filters.command(['help']))
async def _help(client, message):
await client.send_message(chat_id = message.chat.id,
text = tr.HELP_MSG[1],
parse_mode = "markdown",
disable_notification = True,
reply_markup = InlineKeyboardMarkup(map(1)),
reply_to_message_id = message.message_id
)
help_callback_filter = Filters.create(lambda _, query: query.data.startswith('help+'))
@Client.on_callback_query(help_callback_filter)
async def help_answer(c, callback_query):
chat_id = callback_query.from_user.id
message_id = callback_query.message.message_id
msg = int(callback_query.data.split('+')[1])
await c.edit_message_text(chat_id = chat_id, message_id = message_id,
text = tr.HELP_MSG[msg], reply_markup = InlineKeyboardMarkup(map(msg))
)
def map(pos):
if(pos==1):
button = [
[InlineKeyboardButton(text = '-->', callback_data = "help+2")]
]
elif(pos==len(tr.HELP_MSG)-1):
button = [
[InlineKeyboardButton(text = 'Support Chat', url = "https://www.github.com/cdfxscrq")],
[InlineKeyboardButton(text = 'Feature Request', url = "https://t.me/Akshayan1")],
[InlineKeyboardButton(text = '<--', callback_data = f"help+{pos-1}")]
]
else:
button = [
[
InlineKeyboardButton(text = '<--', callback_data = f"help+{pos-1}"),
InlineKeyboardButton(text = '-->', callback_data = f"help+{pos+1}")
],
]
return button | cdfxscrq/GDrive-Uploader-TG-Bot | plugins/help.py | help.py | py | 2,093 | python | en | code | 83 | github-code | 36 | [
{
"api_name": "config.Messages.START_MSG.format",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "config.Messages.START_MSG",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "config.Messages",
"line_number": 7,
"usage_type": "name"
},
{
"api_... |
12028416507 | # -*- coding: utf-8 -*-
import re
from django.utils import simplejson as json
from django.db import connection
from psycopg2.extensions import adapt, register_adapter, AsIs, new_type, register_type
from .adapt import ADAPT_MAPPER
rx_circle_float = re.compile(r'<\(([\d\.\-]*),([\d\.\-]*)\),([\d\.\-]*)>')
rx_line = re.compile(r'\[\(([\d\.\-]*),\s*([\w\.\-]*)\),\s*\(([\d\.\-]*),\s*([\d\.\+]*)\)\]')
rx_point = re.compile(r'\(([\d\.\-]*),\s*([\d\.\-]*)\)')
rx_box = re.compile(r'\(([\d\.\-]*),\s*([\d\.\-]*)\),\s*\(([\d\.\-]*),\s*([\d\.\-]*)\)')
rx_path_identify = re.compile(r'^((?:\(|\[))(.*)(?:\)|\])$')
""" SQL->PYTHON CAST """
def cast_point(value, cur):
if value is None:
return None
res = rx_point.search(value)
if not res:
raise ValueError("bad point representation: %r" % value)
return Point([int(x) if "." not in x else float(x) \
for x in res.groups()])
def cast_circle(value, cur):
if value is None:
return None
rxres = rx_circle_float.search(value)
if not rxres:
raise ValueError("bad circle representation: %r" % value)
return Circle([int(x) if "." not in x else float(x) \
for x in rxres.groups()])
def cast_lseg(value, cur):
if value is None:
return None
rxres = rx_line.search(value)
if not rxres:
raise ValueError("bad lseg representation: %r" % value)
return Lseg([int(x) if "." not in x else float(x) \
for x in rxres.groups()])
def cast_box(value, cur):
if value is None:
return None
rxres = rx_box.search(value)
if not rxres:
raise ValueError("bad box representation: %r" % value)
return Box([int(x) if "." not in x else float(x) \
for x in rxres.groups()])
def cast_path(value, cur):
if value is None:
return None
ident = rx_path_identify.search(value)
if not ident:
raise ValueError("bad path representation: %r" % value)
is_closed = True if "(" == ident.group(1) else False
points = ident.group(2)
if not points.strip():
raise ValueError("bad path representation: %r" % value)
return Path([(
int(x) if "." not in x else float(x), \
int(y) if "." not in y else float(y) \
) for x, y in rx_point.findall(points)], closed=is_closed)
def cast_polygon(value, cur):
if value is None:
return None
ident = rx_path_identify.search(value)
if not ident:
raise ValueError("bad path representation: %r" % value)
is_closed = True if "(" == ident.group(1) else False
points = ident.group(2)
if not points.strip():
raise ValueError("bad path representation: %r" % value)
return Polygon([(
int(x) if "." not in x else float(x), \
int(y) if "." not in y else float(y) \
) for x, y in rx_point.findall(points)], closed=is_closed)
CAST_MAPPER = {
'Point': cast_point,
'Circle': cast_circle,
'Box': cast_box,
'Path': cast_path,
'Polygon': cast_polygon,
'Lseg': cast_lseg,
}
class GeometricMeta(type):
"""
Base meta class for all geometryc types.
"""
def __init__(cls, name, bases, attrs):
super(GeometricMeta, cls).__init__(name, bases, attrs)
cls._registed = False
def __call__(cls, *args):
if len(args) > 1:
return super(GeometricMeta, cls).__call__(tuple(args))
elif isinstance(args[0], (list, tuple)):
return super(GeometricMeta, cls).__call__(*args)
raise ValueError("Incorrect parameters")
# old code: register on demand.
#if cls.type_name() not in ADAPT_MAPPER:
# cls._registed = True
# return instance
#cls.register_adapter()
#GeometricMeta.__call__ = super(GeometricMeta, cls).__call__
return instance
def register_cast(cls, connection):
cast_function = CAST_MAPPER[cls.type_name()]
cursor = connection.cursor()
cursor.execute(cls.sql_for_oid())
oid = cursor.description[0][1]
cursor.close()
PGTYPE = new_type((oid,), cls.type_name().upper(), cast_function)
register_type(PGTYPE)
def register_adapter(cls):
adapt_function = ADAPT_MAPPER[cls.type_name()]
register_adapter(cls, adapt_function)
def type_name(cls):
return cls.__name__
def db_type(cls, connection):
return cls.type_name().lower()
def sql_for_oid(cls):
ntype = cls.type_name().lower()
return "SELECT NULL::%s" % (ntype)
class Point(tuple):
"""
Class that rep resents of geometric point.
"""
__metaclass__ = GeometricMeta
def __init__(self, args):
if len(args) == 2:
super(Point, self).__init__(args)
else:
raise ValueError("Max is 2 elements")
self._validate()
def _validate(self):
if not isinstance(self.x, (int, long, float)) \
or not isinstance(self.y, (int, long, float)):
raise ValueError("invalid data")
def __repr__(self):
return "<Point(%s,%s)>" % self
@property
def x(self):
return self[0]
@property
def y(self):
return self[1]
class Circle(tuple):
__metaclass__ = GeometricMeta
def __init__(self, args):
if len(args) == 3:
super(Circle, self).__init__(args)
else:
raise ValueError("invalid data")
self._validate()
def _validate(self):
if not isinstance(self.r, (int, long, float)):
raise ValueError("invalid data")
def __repr__(self):
return "<Circle(%s,%s)>" % (self.point, self.r)
@property
def r(self):
return self[2]
@property
def point(self):
return Point(self[:-1])
def to_box(self):
if hasattr(self, '_box'):
return self._box
cur = connection.cursor()
cur.execute("select box(%s) as _;", [self])
res = cur.fetchone()
cur.close()
if not res:
raise ValueError("Unexpected error")
self._box = res[0]
return res[0]
class Lseg(tuple):
__metaclass__ = GeometricMeta
def __init__(self, args):
if len(args) == 4:
super(Lseg, self).__init__(args)
else:
raise ValueError("invalid content")
def __iter__(self):
yield tuple(self.init_point)
yield tuple(self.end_point)
def __repr__(self):
return "<Lseg(%s, %s)>" % \
(self.init_point, self.end_point)
@property
def init_point(self):
return Point(self[:2])
@property
def end_point(self):
return Point(self[2:])
class Box(tuple):
__metaclass__ = GeometricMeta
def __init__(self, args):
if len(args) == 4:
super(Box, self).__init__(args)
else:
raise ValueError("invalid content")
def __repr__(self):
return "<Box(%s,%s),(%s,%s)>" % self
@property
def init_point(self):
return Point(self[:2])
@property
def end_point(self):
return Point(self[2:])
@property
def center_point(self):
if hasattr(self, '_center_point'):
return self._center_point
cur = connection.cursor()
cur.execute("select @@ %s;", [self])
res = cur.fetchone()
cur.close()
if not res:
raise ValueError("Unexpected error")
self._center_point = res[0]
return res[0]
class Path(tuple):
__metaclass__ = GeometricMeta
closed = False
def __init__(self, args):
points = []
for item in args:
if isinstance(item, (tuple, list, Point)):
points.append(tuple(item))
else:
points = []
raise ValueError("invalid content")
self.closed = isinstance(args, tuple)
if len(points) == 0:
raise ValueError("invalid content")
super(Path, self).__init__(points)
def __repr__(self):
return "<Path(%s) closed=%s>" % (len(self), self.closed)
class Polygon(Path):
__metaclass__ = GeometricMeta
def __repr__(self):
return "<Polygon(%s) closed=%s>" % (len(self), self.closed)
__all__ = ['Polygon', 'Point', 'Box', 'Circle', 'Path', 'Lseg']
| cr8ivecodesmith/django-orm-extensions-save22 | django_orm/postgresql/geometric/objects.py | objects.py | py | 8,366 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 13,
... |
24257764446 | """Find out how to 'clear the board' in Pyramid Solitaire.
The design is meant to be simple to understand so it is less likely to have
bugs, but to make Pyramid Solitaire solvable for the worst case scenarios, we
must do a bit of optimization work on the state representation.
This implementation skips all of the precalculations of the Java/Lisp versions
to keep things as simple as possible while still using the optimization of
cramming the entire state into an integer value.
This still needs more than 8GB RAM in the worst case because this algorithm
skips some of the features like unwinnable state detection."""
import collections
import solvers.deck
def card_value(card):
"""Return the card's numeric value according to Pyramid Solitaire rules.
Aces are always 1, Jacks are 11, Queens are 12, and Kings are 13."""
return 1 + "A23456789TJQK".index(solvers.deck.card_rank(card))
def cards_are_removable(card1, card2=None):
"""Return true if the card or cards can be removed together.
Kings can be removed by themselves, and pairs of cards that add to 13."""
values = [card_value(c) if c else 0 for c in [card1, card2]]
return sum(values) == 13
class State:
"""A state in Pyramid Solitaire, represented by a 60-bit integer value.
This class only has static methods, meant to be called on integer values.
The reason is to save as much memory as possible (we'll be creating tens
of millions of these).
It's tempting to represent the state as lists of cards in the tableau,
stock, and waste piles, but it's too slow and memory usage is too high.
The trick to this state representation is that it holds data that refers
to the deck of cards, without containing a reference to the deck. So we
need the deck of cards to understand the state of the game.
Bits 0-51: "deck_flags" - 52 bits representing whether or not each card
in the deck remains in the game.
Bits 52-57: "stock_index" - 6 bits containing a number from 28 to 52,
an index into the deck for the card at the top of the stock
pile. Cards with index higher than this are the remainder of
the stock pile. Cards with index below this (and above 27) are
the cards in the waste pile. Hint for understanding how it
works: incrementing this stock index moves the top card of the
stock pile to the top of the waste pile.
Bits 58-59: 2 bits to indicate how many times the waste pile has been
recycled."""
EMPTY_STOCK = 52
EMPTY_WASTE = 27
INITIAL_STATE = (28 << 52) | ((2**52) - 1)
# bits set on the Nth tableau card and the cards covering it from below
UNCOVERED_MASKS = [
0b1111111111111111111111111111,
0b0111111011111011110111011010,
0b1111110111110111101110110100,
0b0011111001111001110011001000,
0b0111110011110011100110010000,
0b1111100111100111001100100000,
0b0001111000111000110001000000,
0b0011110001110001100010000000,
0b0111100011100011000100000000,
0b1111000111000110001000000000,
0b0000111000011000010000000000,
0b0001110000110000100000000000,
0b0011100001100001000000000000,
0b0111000011000010000000000000,
0b1110000110000100000000000000,
0b0000011000001000000000000000,
0b0000110000010000000000000000,
0b0001100000100000000000000000,
0b0011000001000000000000000000,
0b0110000010000000000000000000,
0b1100000100000000000000000000,
0b0000001000000000000000000000,
0b0000010000000000000000000000,
0b0000100000000000000000000000,
0b0001000000000000000000000000,
0b0010000000000000000000000000,
0b0100000000000000000000000000,
0b1000000000000000000000000000,
]
@staticmethod
def deck_flags(state):
"""Return the state's deck flags."""
return state & 0xFFFFFFFFFFFFF
@staticmethod
def is_tableau_empty(state):
return (state & 0xFFFFFFF) == 0
@staticmethod
def stock_index(state):
"""Return the state's stock index, the top card of the stock pile.
If the stock index is 52, it means the stock pile is empty."""
return (state >> 52) & 0b111111
@staticmethod
def cycle(state):
"""Return the state's cycle, the times the waste pile was recycled."""
return (state >> 58) & 0b11
@staticmethod
def waste_index(state):
"""Return the state's waste index, the top card of the waste pile.
If the waste index is 27, it means the waste pile is empty."""
index = State.stock_index(state) - 1
mask = 1 << index
while index > State.EMPTY_WASTE:
if (state & mask) != 0:
break
mask >>= 1
index -= 1
return index
@staticmethod
def _adjust_stock_index(state):
"""Return the state with its stock index adjusted correctly.
Basically the stock index must point to a card that remains in the
game or else be 52 to indicate the stock pile is empty. This makes sure
every state has a single unique representation - you can't have two
states that are effectively the same but have different stock indexes
because one points to the actual top card and the other points to
some card that no longer remains in the game."""
index = State.stock_index(state)
state = state & 0xC0FFFFFFFFFFFFF # remove the stock index
mask = 1 << index
while index < State.EMPTY_STOCK:
if (state & mask) != 0:
break
mask <<= 1
index += 1
return state | (index << 52)
@staticmethod
def _uncovered_indexes(deck_flags):
"""Return deck indexes of uncovered tableau cards."""
flags = deck_flags & 0xFFFFFFF
def is_uncovered(index):
return (1 << index) == (flags & State.UNCOVERED_MASKS[index])
return [i for i in range(28) if is_uncovered(i)]
@staticmethod
def successors(state, deck):
"""Return a list of successor states to this state.
Actions that can be performed (if applicable):
1. Recycle the waste pile.
2. Draw a card from the stock pile to the waste pile.
3. Remove a King from the tableau.
4. Remove a King from the stock pile.
5. Remove a King from the waste pile.
6. Remove a pair of cards from the tableau.
7. Remove a pair of cards, one each from the tableau and stock pile.
8. Remove a pair of cards, one each from the tableau and waste pile.
9. Remove a pair of cards, one each from the stock and waste piles."""
def remove(deck_flags, *indexes):
"""Remove the cards at the indexes from the deck_flags value."""
for index in indexes:
deck_flags ^= (1 << index)
return deck_flags
results = []
deck_flags = State.deck_flags(state)
uncovered = State._uncovered_indexes(deck_flags)
stock_index = State.stock_index(state)
waste_index = State.waste_index(state)
cycle = State.cycle(state)
def create(deck_flags=deck_flags, stock_index=stock_index, cycle=cycle):
"""Create a new state given the individual parts of the state."""
new_state = (cycle << 58) | (stock_index << 52) | deck_flags
return State._adjust_stock_index(new_state)
is_stock_empty = stock_index == State.EMPTY_STOCK
is_waste_empty = waste_index == State.EMPTY_WASTE
stock_card = deck[stock_index] if not is_stock_empty else None
waste_card = deck[waste_index] if not is_waste_empty else None
has_both = stock_card and waste_card
if not stock_card and cycle < 2:
# 1. recycle the waste pile
results.append(create(stock_index=28, cycle=cycle+1))
if stock_card:
# 2. draw a card from stock to waste
results.append(create(stock_index=stock_index+1))
if stock_card and cards_are_removable(stock_card):
# 4. remove a King from the stock pile
results.append(create(deck_flags=remove(deck_flags, stock_index)))
if waste_card and cards_are_removable(waste_card):
# 5. remove a King from the waste pile
results.append(create(remove(deck_flags, waste_index)))
if has_both and cards_are_removable(stock_card, waste_card):
# 9. remove the cards on the stock and waste piles
results.append(create(remove(deck_flags, stock_index, waste_index)))
for i in uncovered:
if cards_are_removable(deck[i]):
# 3. remove a King from the tableau
results.append(create(remove(deck_flags, i)))
else:
if stock_card and cards_are_removable(deck[i], stock_card):
# 7. remove the cards from the tableau/stock pile
results.append(create(remove(deck_flags, i, stock_index)))
if waste_card and cards_are_removable(deck[i], waste_card):
# 8. remove the cards from the tableau/waste pile
results.append(create(remove(deck_flags, i, waste_index)))
for j in uncovered:
if cards_are_removable(deck[i], deck[j]):
# 6. remove two cards from the tableau
results.append(create(remove(deck_flags, i, j)))
return results
def path(state, seen_states, deck):
"""Return the actions to take to get to this state from the start."""
def is_bit_set(bits, n):
"""Return true if the nth bit of bits is equal to 1."""
return (bits & (1 << n)) != 0
def action(state, next_state):
"""Return the action taken to go from state to next_state."""
diffs = state ^ next_state # XOR to see which bits changed
deck_diff = State.deck_flags(diffs)
cycle_diff = State.cycle(diffs)
if cycle_diff:
return 'Recycle'
elif deck_diff:
cards = [deck[i] for i in range(52) if is_bit_set(deck_diff, i)]
return f"Remove {' and '.join(cards)}"
else:
return 'Draw'
actions = []
while state in seen_states:
prev_state = seen_states[state]
actions.append(action(prev_state, state))
state = prev_state
return list(reversed(actions))
def solve(deck):
"""Return a solution to removing all tableau cards in Pyramid Solitaire."""
fringe = collections.deque()
seen_states = dict()
fringe.append(State.INITIAL_STATE)
while fringe:
state = fringe.popleft()
if State.is_tableau_empty(state):
return path(state, seen_states, deck)
for next_state in State.successors(state, deck):
if next_state not in seen_states:
seen_states[next_state] = state
fringe.append(next_state)
return []
| mchung94/solitaire-player | pysolvers/solvers/pyramid.py | pyramid.py | py | 11,173 | python | en | code | 37 | github-code | 36 | [
{
"api_name": "solvers.deck.deck.card_rank",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "solvers.deck.deck",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "solvers.deck",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "colle... |
18873931268 | from flask import Blueprint, request, abort
from models.dota.item import DotaItem
from api.dota.validators import all_items_schema
items_route = Blueprint("items", __name__, url_prefix="/items")
@items_route.route("/<int:item_id>")
def get_item_by_id(item_id: int):
item = DotaItem.query.filter_by(id=item_id).one_or_none()
if not item:
return abort(404, "Can't find item")
return item.as_dict
@items_route.route("/<item_hash_name>")
def get_item_by_name(item_hash_name: str):
item = DotaItem.query.filter_by(name=item_hash_name).one_or_none()
if not item:
return abort(404, "Can't find item")
return item.as_dict
@items_route.route("/all/")
def get_all_items():
all_items_schema.validate(request.args)
offset = request.args.get("offset", 0)
limit = request.args.get("limit", 1000)
items = DotaItem.query
return {
"total_count": items.count(),
"limit": limit,
"offset": offset,
"items": [{
"name": item.name,
"id": item.id,
} for item in items.offset(offset).limit(limit)]
}
| NeKadgar/game_market_items_base | api/dota/items.py | items.py | py | 1,111 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "models.dota.item.DotaItem.query.filter_by",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.dota.item.DotaItem.query",
"line_number": 11,
"usage_type": "attribute"
... |
14919658167 | __copyright__ = """
Copyright 2017 FireEye, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__license__ = "Apache 2.0"
import datetime
import logging
import socket
# Below is simple syslog implementation written by Christian Stigen Larsen
# Found here: http://csl.name/py-syslog-win32/
# Will work on Windows
facility = {
'kern': 0, 'user': 1, 'mail': 2, 'daemon': 3,
'auth': 4, 'syslog': 5, 'lpr': 6, 'news': 7,
'uucp': 8, 'cron': 9, 'authpriv': 10, 'ftp': 11,
'local0': 16, 'local1': 17, 'local2': 18, 'local3': 19,
'local4': 20, 'local5': 21, 'local6': 22, 'local7': 23,
}
level = {
'emerg': 0, 'alert': 1, 'crit': 2, 'err': 3,
'warning': 4, 'notice': 5, 'info': 6, 'debug': 7
}
syslog_time_format = "%b %d %H:%M:%S"
today = datetime.datetime.today()
def connect_syslog(syslog_server, syslog_port, syslog_proto):
# set up the syslog connection
try:
logging.info("Connecting to syslog server %s:%s via %s", syslog_server, syslog_port, syslog_proto)
if syslog_proto == "tcp":
broker_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
elif syslog_proto == "udp":
broker_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
else:
logging.error("Unrecognized Syslog protocol specified!")
return 1
broker_socket.connect((syslog_server, syslog_port))
logging.info("Connected to syslog server %s:%s via %s", syslog_server, syslog_port, syslog_proto)
return broker_socket
except Exception as e:
logging.error("Could not connect to syslog server!")
logging.error(e)
return False
def format_syslog_message(hostname, program, message):
syslog_message = "%s %s %s: %s" % (
today.strftime('%b %d %H:%M:%S'), hostname, program, message)
syslog_message = '<%d>%s' % (
level['notice'] + facility['daemon'] * 8, syslog_message)
return syslog_message
| fireeye/brocapi | brocapi/brocapi_syslog.py | brocapi_syslog.py | py | 2,466 | python | en | code | 27 | github-code | 36 | [
{
"api_name": "datetime.datetime.today",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "socket.so... |
1121900442 | import asyncio
import logging
import os
from time import time
import aiohttp
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
async def read_text_file(directory, file):
"""
Async version of the download_link method we've been using in the other examples.
:param session: aiohttp ClientSession
:param directory: directory to save downloads
:param link: the url of the link to download
:return:
"""
# Check whether file is in text format or not
if file.endswith(".txt"):
file_path = f"{directory}/{file}"
# call read text file function
read_text_content(file_path)
logger.info('Downloaded %s', file)
def read_text_content(file_path):
with open(file_path, 'r') as f:
for index, line in enumerate(f):
print("Line {}: {}".format(index, line.strip().replace("COLUMNS", "SUCCESS")))
# Main is now a coroutine
async def main():
download_dir = "C:/Suganya/test"
os.chdir(download_dir)
tasks = [(read_text_file(download_dir, file)) for file in os.listdir()]
# gather aggregates all the tasks and schedules them in the event loop
await asyncio.gather(*tasks, return_exceptions=True)
if __name__ == '__main__':
ts = time()
# Create the asyncio event loop
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
finally:
# Shutdown the loop even if there is an exception
loop.close()
logger.info('Took %s seconds to complete', time() - ts)
| suganyamuthukumar/python | TestfileIOAsync.py | TestfileIOAsync.py | py | 1,603 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"... |
8385871532 | from django.db import models, connection
from django.db.models import Q, Max, Case, Value, When, Exists, OuterRef, \
UniqueConstraint, Subquery
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache
from django.core.exceptions import FieldError, ValidationError
from django.template import Template, loader
from django.utils.functional import cached_property
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
from django.urls import reverse
from polymorphic.models import PolymorphicModel
from polymorphic.managers import PolymorphicManager
import uuid
from itertools import groupby
from pathlib import Path
from datetime import timedelta
import os
from ..stock import StockWidget
from ..filetype import FileType
from ..utils import create_model, remove_p, send_email, submission_link, \
thumbnail_path, MarkdownFormatter
from .ranked import RankedModel, UnderscoredRankedModel
from .automatic import AutoSlugModel
markdown = MarkdownFormatter()
class ProgramManager(models.Manager):
def get_by_natural_key(self, slug):
return self.get(slug=slug)
class Program(AutoSlugModel):
class Meta:
ordering = ['created']
sites = models.ManyToManyField('Site', blank=True, related_name='programs',
related_query_name='program')
name = models.CharField(max_length=64)
slug = models.SlugField(max_length=30, unique=True, allow_unicode=True,
verbose_name='identifier')
db_slug = models.SlugField(max_length=30, unique=True, allow_unicode=True,
editable=False)
description = models.CharField(max_length=250, blank=True)
options = models.JSONField(default=dict, blank=True)
hidden = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
objects = ProgramManager()
def __str__(self):
return self.name
def natural_key(self):
return (self.slug,)
def validate_unique(self, exclude=None):
super().validate_unique(exclude=exclude)
if not self._state.adding: return
if Program.objects.filter(db_slug=self.slug.replace('-', '')).exists():
msg = 'Identifier (with hyphens removed) must be unique.'
raise ValidationError(msg)
def visible_forms(self):
pub = self.forms.exclude(status=Form.Status.DRAFT)
return pub.exclude(options__hidden__isnull=False)
def home_url(self):
if 'home_url' in self.options: return self.options['home_url']
return None
class FormManager(models.Manager):
def get_by_natural_key(self, program_slug, slug):
return self.get(program__slug=program_slug, slug=slug)
class Form(AutoSlugModel):
class Meta:
constraints = [
UniqueConstraint(fields=['program', 'slug'], name='unique_slug'),
UniqueConstraint(fields=['program', 'db_slug'],
name='unique_db_slug')
]
class Status(models.TextChoices):
DRAFT = 'draft', _('unpublished')
DISABLED = 'disabled', _('submissions disabled')
ENABLED = 'enabled', _('published/enabled')
COMPLETED = 'completed', _('completed')
class Validation(models.TextChoices):
# currently, validation to create a submission is always by email
EMAIL = 'email', _('email address')
program = models.ForeignKey(Program, models.CASCADE,
related_name='forms', related_query_name='form')
name = models.CharField(max_length=64)
slug = models.SlugField(max_length=30, allow_unicode=True,
verbose_name='identifier')
db_slug = models.SlugField(max_length=30, allow_unicode=True,
editable=False)
status = models.CharField(max_length=16, default=Status.DRAFT,
choices=Status.choices)
options = models.JSONField(default=dict, blank=True)
created = models.DateTimeField(auto_now_add=True)
# this is also the published date, for enabled and completed forms
modified = models.DateTimeField(default=timezone.now, editable=False)
completed = models.DateTimeField(null=True, blank=True, editable=False)
validation_type = models.CharField(max_length=16, editable=False,
default=Validation.EMAIL,
choices=Validation.choices)
objects = FormManager()
def __str__(self):
return self.name
def natural_key(self):
return self.program.natural_key() + (self.slug,)
natural_key.dependencies = ['formative.program']
def validate_unique(self, exclude=None):
super().validate_unique(exclude=exclude)
if not self._state.adding: return
if Form.objects.filter(program=self.program,
db_slug=self.slug.replace('-', '')).exists():
msg = 'Identifier (with hyphens removed) must be unique ' \
'within this program.'
raise ValidationError(msg)
@cached_property
def model(self):
if self.status == self.Status.DRAFT: return None
fields = []
for block in self.blocks.exclude(page=0, _rank__gt=1):
fields += block.fields()
name = self.program.db_slug + '_' + self.db_slug
class Meta:
verbose_name = self.slug + ' submission'
verbose_name_plural = self.slug + ' submissions'
return create_model(name, fields, program=self.program.db_slug,
base_class=Submission, meta=Meta)
@cached_property
def item_model(self):
if self.status == self.Status.DRAFT: return None
collections = self.collections()
if not collections: return None
names = []
for c in collections:
for field in c.collection_fields():
if field not in names: names.append(field)
fields = [
# the first column links submission items to the submission
('_submission', models.ForeignKey(self.model, models.CASCADE,
related_name='_items',
related_query_name='_item'))
]
field_blocks = { b.name: b for b in self.collection_field_blocks() }
for n in names:
# look for a CustomBlock with the same name on page 0 (hidden)
if n in field_blocks: block = field_blocks[n]
# otherwise, use the default text CustomBlock
else: block = CustomBlock.text_create()
fields.append((n, block.field()))
name = self.program.db_slug + '_' + self.db_slug + '_i'
class Meta:
constraints = [
UniqueConstraint(
fields=['_submission', '_collection', '_block', '_rank'],
name=self.program.db_slug+'_'+self.db_slug+'_u'
)
]
verbose_name = self.slug + ' item'
verbose_name_plural = self.slug + ' items'
return create_model(name, fields, program=self.program.db_slug,
base_class=SubmissionItem, meta=Meta)
def cache_dirty(self):
version = cache.get('models_version')
if version is None: cache.set('models_version', 1, timeout=None)
else: cache.incr('models_version')
def publish_model(self, model, admin=None):
from ..signals import all_forms_publish
with connection.schema_editor() as editor:
editor.create_model(model)
ctype = ContentType(app_label=model._meta.app_label,
model=model.__name__)
ctype.save()
ContentType.objects.clear_cache()
all_forms_publish.send(self, content_type=ctype)
self.cache_dirty()
def unpublish_model(self, model):
from ..signals import all_forms_unpublish
self.cache_dirty()
ctype = ContentType.objects.get_for_model(model)
all_forms_unpublish.send(self, content_type=ctype)
ctype.delete()
ContentType.objects.clear_cache()
with connection.schema_editor() as editor:
editor.delete_model(model)
def publish(self):
if self.status != self.Status.DRAFT: return
self.status = self.Status.ENABLED
if 'model' in self.__dict__: del self.model
if 'item_model' in self.__dict__: del self.item_model
from ..admin import SubmissionAdmin, SubmissionItemAdmin
self.publish_model(self.model, admin=SubmissionAdmin)
if self.item_model:
self.publish_model(self.item_model, admin=SubmissionItemAdmin)
self.modified = timezone.now()
self.save()
def unpublish(self):
if self.status == self.Status.DRAFT: return
rec_type = SubmissionRecord.RecordType.SUBMISSION
recs = SubmissionRecord.objects.filter(program=self.program,
form=self.slug, type=rec_type)
recs.update(deleted=True)
self.unpublish_model(self.model)
if self.item_model: self.unpublish_model(self.item_model)
self.status = self.Status.DRAFT
if 'model' in self.__dict__: del self.model
if 'item_model' in self.__dict__: del self.item_model
self.modified, self.completed = timezone.now(), None
self.save()
def get_available_plugins(self):
from ..plugins import get_available_plugins
return get_available_plugins(self)
def get_plugins(self):
if 'plugins' in self.options: return self.options['plugins']
return []
def add_plugins(self, plugins):
available = self.get_available_plugins()
enabled = self.get_plugins()
enable = [ p for p in plugins if p in available and p not in enabled ]
for plugin in enable:
if hasattr(available[plugin].app, 'installed'):
getattr(available[plugin].app, 'installed')(self)
if 'plugins' not in self.options: self.options['plugins'] = []
self.options['plugins'] += plugins
def remove_plugins(self, plugins):
available = self.get_available_plugins()
enabled = self.get_plugins()
for plugin in [ p for p in plugins if p in enabled ]:
if hasattr(available[plugin].app, 'uninstalled'):
getattr(available[plugin].app, 'uninstalled')(self)
new_plugins = [ p for p in enabled if p not in plugins ]
self.options['plugins'] = new_plugins
def default_text_label_style(self):
if 'default_text_label_style' in self.options:
return self.options['default_text_label_style']
return FormLabel.LabelStyle.WIDGET
def num_pages(self):
return self.blocks.aggregate(Max('page'))['page__max']
def custom_blocks(self):
return CustomBlock.objects.filter(form=self).non_polymorphic()
def submission_blocks(self):
blocks = self.blocks.not_instance_of(CollectionBlock)
return blocks.exclude(page=0, _rank__gt=0)
def collection_field_blocks(self):
return self.custom_blocks().filter(page=0, _rank__gt=1)
def collections(self, name=None):
blocks = CollectionBlock.objects.filter(form=self)
if name: return blocks.filter(name=name).non_polymorphic()
return blocks.non_polymorphic()
def validation_block(self):
return self.blocks.get(page=0, _rank=1)
def visible_blocks(self, page=None, skip=None):
query = self.blocks.all()
if skip: query = query.exclude(id__in=skip)
if page and page > 0: return query.filter(page=page)
else: return query.exclude(page=0, _rank__gt=0)
return query.filter(page__gt=0)
def visible_items(self, submission, page=None, skip=None):
if not self.item_model: return []
query = self.item_model.objects.filter(_submission=submission)
if skip: query = query.exclude(_block__in=skip)
if page and page > 0:
block_ids = Subquery(self.blocks.filter(page=page).values('pk'))
query = query.filter(_block__in=block_ids)
query = query.exclude(_file='', _filesize__gt=0) # upload in progress
return query.order_by('_collection', '_block', '_rank')
def field_labels(self):
labels = {}
for label in self.labels.all():
key, target = label.path, labels
if '.' in label.path:
base, key = label.path.split('.')
if key[-1] == '_': base, key = base + '_', key[:-1]
if base not in labels: labels[base] = {}
target = labels[base]
if key not in target: target[key] = {}
target[key][label.style] = label
return labels
def label_class(self):
return FormLabel
def status_message(self):
if self.status == self.Status.DRAFT:
return 'NA'
elif self.status == self.Status.DISABLED:
if 'disabled_message' in self.options:
return self.options['disabled_message']
return _('Not yet open for submissions')
elif self.status == self.Status.COMPLETED:
if 'completed_message' in self.options:
return self.options['completed_message']
return _('Closed')
if 'enabled_message' in self.options:
return self.options['enabled_message']
return _('Open for submissions')
def hidden(self):
return self.status != self.Status.ENABLED or 'hidden' in self.options
def access_enable(self):
if 'access_enable' in self.options: return self.options['access_enable']
return None
def timed_completion(self):
if 'timed_completion' in self.options:
return self.options['timed_completion']
return None
def complete_submit_time(self):
if 'complete_submit_time' in self.options:
return self.options['complete_submit_time']
return 5 # minutes
def extra_time(self):
extra = self.complete_submit_time()
if timezone.now() - self.completed <= timedelta(minutes=extra):
return True
return False
def review_pre(self, prefix=''):
name = prefix + 'review_pre'
if name in self.options:
return mark_safe(markdown.convert(self.options[name]))
return ''
def review_post(self):
if 'review_post' in self.options:
return mark_safe(markdown.convert(self.options['review_post']))
return ''
def submit_submission(self, submission):
submission._submit()
rec, created = SubmissionRecord.objects.get_or_create(
program=self.program, form=self.slug, submission=submission._id,
type=SubmissionRecord.RecordType.SUBMISSION
)
rec.text = submission._email
rec.save()
if self.item_model:
dir = os.path.join(settings.MEDIA_ROOT, str(submission._id))
if os.path.isdir(dir):
Path(os.path.join(dir, 'submitted')).touch()
def submitted_review_pre(self):
return self.review_pre(prefix='submitted_')
def review_after_submit(self):
return 'no_review_after_submit' not in self.options
def submit_button_label(self):
if 'submit_button_label' in self.options:
return self.options['submit_button_label']
return 'submit'
def thanks(self):
if 'thanks' in self.options:
return mark_safe(markdown.convert(self.options['thanks']))
def emails(self):
if 'emails' in self.options: return self.options['emails']
return {}
def email_names(self):
names = list(self.emails())
for name in ('confirmation', 'continue'):
if name not in names: names.insert(0, name)
return names
def load_email_templates(self, n):
subject = loader.get_template('formative/emails/' + n + '_subject.html')
content = loader.get_template('formative/emails/' + n + '.html')
return subject, content
def email_templates(self):
emails = self.emails()
for name in ('continue', 'confirmation'):
if name in emails: continue
subject, content = self.load_email_templates(name)
emails[name] = {'content': content.template.source,
'subject': subject.template.source.rstrip('\n')}
return emails
class FormLabelManager(models.Manager):
def get_by_natural_key(self, program_slug, form_slug, path, style):
return self.get(form__program__slug=program_slug, form__slug=form_slug,
path=path, style=style)
class FormLabel(models.Model):
class Meta:
constraints = [
UniqueConstraint(fields=['form', 'path', 'style'],
name='unique_path_style')
]
class LabelStyle(models.TextChoices):
WIDGET = 'widget', _('widget label')
VERTICAL = 'vertical', _('vertical label')
HORIZONTAL = 'horizontal', _('horizontal label')
form = models.ForeignKey('Form', models.CASCADE, related_name='labels',
related_query_name='label')
path = models.CharField(max_length=128)
text = models.CharField(max_length=1000)
style = models.CharField(max_length=16, choices=LabelStyle.choices,
default=LabelStyle.WIDGET)
objects = FormLabelManager()
def __str__(self):
return self.path
def natural_key(self):
return self.form.natural_key() + (self.path, self.style)
natural_key.dependencies = ['formative.form']
def display(self, inline=False):
s = markdown.convert(self.text)
if inline: return mark_safe(remove_p(s))
return mark_safe(s)
def display_inline(self):
return self.display(inline=True)
class FormDependencyManager(models.Manager):
def get_by_natural_key(self, program_slug, form_slug, name, value):
return self.get(block__form__program__slug=program_slug,
block__form__slug=form_slug,
block__name=name, value=value)
class FormDependency(models.Model):
class Meta:
verbose_name = 'dependency value'
verbose_name_plural = 'dependency values'
constraints = [
UniqueConstraint(fields=['block', 'value'], name='unique_blockval')
]
block = models.ForeignKey('FormBlock', models.CASCADE,
related_name='dependencies',
related_query_name='dependency')
value = models.CharField(max_length=64, blank=True)
objects = FormDependencyManager()
def __str__(self):
if self.block.dependence:
return f'{self.block.dependence.name}="{self.value}"'
return f'?="{self.value}"'
def natural_key(self):
return self.block.natural_key() + (self.value,)
natural_key.dependencies = ['formative.formblock', 'formative.customblock',
'formative.collectionblock']
class FormBlockManager(PolymorphicManager):
def get_by_natural_key(self, program_slug, form_slug, name):
return self.non_polymorphic().get(form__program__slug=program_slug,
form__slug=form_slug, name=name)
class FormBlock(PolymorphicModel, RankedModel):
class Meta(PolymorphicModel.Meta, RankedModel.Meta):
constraints = [
UniqueConstraint(fields=['form', 'page', '_rank'],
name='unique_rank'),
]
ordering = ['form', 'page', '_rank']
form = models.ForeignKey(Form, models.CASCADE,
related_name='blocks', related_query_name='block')
name = models.SlugField(max_length=32, verbose_name='identifier',
allow_unicode=True)
options = models.JSONField(default=dict, blank=True)
page = models.PositiveIntegerField(default=1)
dependence = models.ForeignKey('FormBlock', models.CASCADE,
null=True, blank=True,
related_name='dependents',
related_query_name='dependent')
negate_dependencies = models.BooleanField(default=False,
verbose_name='negate dependency')
objects = FormBlockManager()
def __str__(self):
return self.name
def natural_key(self):
return self.form.natural_key() + (self.name,)
natural_key.dependencies = ['formative.form']
def rank_group(self):
return FormBlock.objects.filter(form=self.form, page=self.page)
def validate_unique(self, exclude=None):
super().validate_unique(exclude=exclude)
if not self._state.adding: return
if self.name == 'email' and self.page:
raise ValidationError('There is already a block called "email."')
# name of a collection block identifies its "bucket", not its field(s)
# in this case, it's not required to be unique
if self.block_type() == 'collection': return
qs = FormBlock.objects.filter(form_id=self.form_id)
if self.page: qs = qs.filter(page__gt=0)
else: qs = qs.filter(page=0)
if qs.filter(name=self.name).exists():
msg = 'Identifiers for stock and custom blocks must be unique.'
raise ValidationError(msg)
def block_type(self):
if type(self) == CustomBlock: return 'custom'
if type(self) == CollectionBlock: return 'collection'
return 'stock'
def stock_type(self):
if 'type' not in self.options:
raise FieldError('untyped stock widget')
return StockWidget.by_type(self.options['type'])
@cached_property
def stock(self):
return self.stock_type()(self.name, **self.options)
def fields(self):
return self.stock.fields()
def enabled_blocks(self, value, page=None):
# blocks on the given page that depend on self, and enabled given value
query = self.form.blocks.filter(dependence_id=self.id)
if page: query = query.filter(page=page)
if type(value) == bool: value = value and 'yes' or 'no' # TODO: numeric
if value is None: value = ''
val = FormDependency.objects.filter(block_id=OuterRef('id'),
value=Value(value))
cond = Case(
When(negate_dependencies=False, then=Exists(val)),
When(negate_dependencies=True, then=~Exists(val))
)
query = query.annotate(en=cond).filter(en=True)
return query.values_list('id', flat=True)
def min_allowed_page(self):
if not self.page: return 0
min_page = 1
if self.dependence: min_page = self.dependence.page + 1
return min_page
def max_allowed_page(self, last_page=None):
if not self.page: return 0
if last_page is None:
last_page = self.form.blocks.aggregate(p=Max('page'))['p'] or 1
max_page = last_page
for block in self.dependents.all():
if block.page - 1 < max_page: max_page = block.page - 1
return max_page
def show_in_review(self):
return 'no_review' not in self.options
class CustomBlock(FormBlock):
class Meta:
db_table = 'formative_formcustomblock'
CHOICE_VAL_MAXLEN = 64
DEFAULT_TEXT_MAXLEN = 1000
MAX_TEXT_MAXLEN = 65535
class InputType(models.TextChoices):
TEXT = 'text', _('text')
NUMERIC = 'num', _('numeric')
CHOICE = 'choice', _('multiple choice')
BOOLEAN = 'bool', _('true/false choice')
block = models.OneToOneField(FormBlock, on_delete=models.CASCADE,
parent_link=True, primary_key=True)
type = models.CharField(max_length=16, choices=InputType.choices,
default=InputType.TEXT)
required = models.BooleanField(default=False)
num_lines = models.PositiveIntegerField(default=1)
min_chars = models.PositiveIntegerField(null=True, blank=True)
max_chars = models.PositiveIntegerField(null=True, blank=True)
min_words = models.PositiveIntegerField(null=True, blank=True)
max_words = models.PositiveIntegerField(null=True, blank=True)
@classmethod
def text_create(cls, *args, **kwargs):
if 'max_chars' not in kwargs:
kwargs['max_chars'] = cls.DEFAULT_TEXT_MAXLEN
return cls(*args, **kwargs, type=cls.InputType.TEXT)
def choices(self, include_empty=False):
if include_empty:
if 'choices' in self.options: return self.options['choices']
return []
if 'choices' not in self.options or not self.options['choices']:
raise FieldError('choices not defined')
return self.options['choices']
def field(self):
# fields are NULL when we haven't yet reached the page, or if their
# block had a dependency that wasn't met. non-required fields may
# have a different way to record that no input was made, usually ''
if self.type == self.InputType.TEXT:
blank, max_chars = False, self.max_chars
if not self.min_chars and not self.min_words: blank = True
if not self.max_chars or self.max_chars > self.MAX_TEXT_MAXLEN:
max_chars = self.MAX_TEXT_MAXLEN
if self.num_lines > 1 or max_chars > self.DEFAULT_TEXT_MAXLEN:
return models.TextField(null=True, max_length=max_chars,
blank=blank)
return models.CharField(null=True, max_length=self.max_chars,
blank=blank)
elif self.type == self.InputType.NUMERIC:
return models.IntegerField(null=True, blank=(not self.required))
elif self.type == self.InputType.CHOICE:
return models.CharField(null=True, blank=(not self.required),
max_length=self.CHOICE_VAL_MAXLEN,
choices=[(c, c) for c in self.choices()])
elif self.type == self.InputType.BOOLEAN:
return models.BooleanField(null=True)
def fields(self):
return [(self.name, self.field())]
def form_field(self, model_field, **kwargs):
if self.type == self.InputType.TEXT:
return model_field.formfield(min_length=self.min_chars, **kwargs)
elif self.type == self.InputType.NUMERIC:
return model_field.formfield(min_value=self.numeric_min(),
max_value=self.numeric_max(), **kwargs)
# or use the ModelForm factory's default:
return model_field.formfield(**kwargs)
def clean_field(self, data, field):
# currently, all are handled from validators set up on the form
return data
def numeric_min(self):
if 'numeric_min' in self.options: return self.options['numeric_min']
return None
def numeric_max(self):
if 'numeric_max' in self.options: return self.options['numeric_max']
return None
def default_value(self):
if self.type == self.InputType.TEXT: return None
if 'default_value' in self.options: return self.options['default_value']
return None
def conditional_value(self, value):
if self.type in (self.InputType.TEXT, self.InputType.NUMERIC):
# in this case, condition is whether the field was filled out
# (and is non-zero for numeric)
return bool(value)
return value
def span(self, media=None):
width = 6
if self.max_chars and self.max_chars > 50: width = 8
if self.num_lines > 1: width = 8
if self.num_lines > 4: width = 10
if self.type in (self.InputType.CHOICE, self.InputType.BOOLEAN):
width = 8
elif self.type == self.InputType.NUMERIC: width = 2
if self.type == self.InputType.NUMERIC:
if 'span_phone' in self.options:
return min(self.options['span_phone'], 4)
if 'span_tablet' in self.options:
if not media: return min(width, self.options['span_tablet'], 4)
elif not media: return min(width, 4)
if media == 'tablet' and 'span_tablet' in self.options:
return self.options['span_tablet']
if media == 'desktop' and 'span_desktop' in self.options:
return self.options['span_desktop']
return width
def tablet_span(self): return self.span(media='tablet')
def desktop_span(self): return self.span(media='desktop')
class CollectionBlock(FormBlock):
class Meta:
db_table = 'formative_formcollectionblock'
FIXED_CHOICE_VAL_MAXLEN = 100
class AlignType(models.TextChoices):
TABULAR = 'tabular', _('tabular')
STACKED = 'stacked', _('stacked')
block = models.OneToOneField(FormBlock, on_delete=models.CASCADE,
parent_link=True, primary_key=True)
fixed = models.BooleanField(default=False)
min_items = models.PositiveIntegerField(null=True, blank=True) # null if
max_items = models.PositiveIntegerField(null=True, blank=True) # fixed
has_file = models.BooleanField(default=False)
file_optional = models.BooleanField(default=False)
# we don't need these references indexed or validated here, so no SlugField
name1 = models.CharField(max_length=32, default='', blank=True)
name2 = models.CharField(max_length=32, default='', blank=True)
name3 = models.CharField(max_length=32, default='', blank=True)
align_type = models.CharField(max_length=16, choices=AlignType.choices,
default=AlignType.TABULAR)
def fields(self):
return []
def collection_fields(self):
fields = []
if self.name1: fields.append(self.name1)
if self.name2: fields.append(self.name2)
if self.name3: fields.append(self.name3)
return fields
def tabular(self):
return self.align_type == self.AlignType.TABULAR
def max_filesize(self):
if 'max_filesize' in self.options: return self.options['max_filesize']
return None # TODO: overall default max
def allowed_filetypes(self):
if 'file_types' in self.options:
return self.options['file_types']
return None # allow any file extension
def allowed_extensions(self):
types = self.allowed_filetypes()
if not types: return None
extensions = []
for filetype in types:
extensions += FileType.by_type(filetype)().allowed_extensions()
return extensions
def autoinit_filename(self):
if 'autoinit_filename' in self.options: return True
return False
def fixed_choices(self):
if 'choices' not in self.options:
msg = 'choices must be provided for a fixed collection block'
raise FieldError(msg)
return self.options['choices']
def num_choices(self):
return len(self.fixed_choices())
def file_limits(self):
if 'file_limits' in self.options: return self.options['file_limits']
return {}
def process_options(self, filetype):
if 'file_processing' not in self.options: return {}
if filetype in self.options['file_processing']:
return self.options['file_processing'][filetype]
return {}
def span(self, media=None):
width = 10
if media == 'tablet': width = 8
if not media: return 4
if media == 'tablet' and 'span_tablet' in self.options:
return max(4, self.options['span_tablet'])
if media == 'desktop' and 'span_desktop' in self.options:
return max(4, self.options['span_desktop'])
return width
def tablet_span(self): return self.span(media='tablet')
def desktop_span(self): return self.span(media='desktop')
def total_colspan(self):
fields = self.collection_fields()
if fields: return len(fields)
return 1
def horizontal_width(self, field):
total = self.total_colspan()
if 'wide' in self.options:
total += len(self.options['wide'])
if field in self.options['wide']: return 200.0 / total
return 100.0 / total
def collection_fields_as_blocks(self):
class ColFieldBlock:
def __init__(self, name, width):
self.name, self.width = name, width
fields = self.collection_fields()
return [ ColFieldBlock(n, self.horizontal_width(n)) for n in fields ]
def items_sortable(self):
return 'unsortable' not in self.options
def button_text(self):
if 'button_text' in self.options: return self.options['button_text']
if self.has_file and not self.file_optional:
if self.max_items and self.max_items > 1: return _('add files')
return _('add file')
return _('add item')
class SubmissionRecord(models.Model):
class Meta:
constraints = [
UniqueConstraint(fields=['submission', 'type'],
name='unique_submission_record_type')
]
class RecordType(models.TextChoices):
FILES = 'files', _('uploaded files')
SUBMISSION = 'submission', _('form submission')
program = models.ForeignKey(Program, models.SET_NULL, null=True, blank=True)
form = models.SlugField(max_length=64, allow_unicode=True)
submission = models.UUIDField(editable=False)
type = models.CharField(max_length=32)
recorded = models.DateTimeField(auto_now=True, verbose_name='recorded at')
text = models.TextField(blank=True)
number = models.PositiveBigIntegerField(null=True, blank=True)
deleted = models.BooleanField(default=False)
# abstract classes, used as templates for the dynamic models:
class Submission(models.Model):
class Meta:
abstract = True
_id = models.UUIDField(primary_key=True, default=uuid.uuid4,
editable=False)
# valid up to page N:
_valid = models.PositiveIntegerField(default=0, editable=False)
# an array of N block id arrays, those skipped for form dependency not met:
_skipped = models.JSONField(default=list, blank=True, editable=False)
_created = models.DateTimeField(auto_now_add=True)
_modified = models.DateTimeField(auto_now=True)
_submitted = models.DateTimeField(null=True, blank=True)
@classmethod
def _get_form(cls):
program_slug = cls._meta.program_slug
slug = cls._meta.model_name[len(program_slug)+1:]
return Form.objects.get(program__db_slug=program_slug, db_slug=slug)
def __str__(self):
if hasattr(self, '_email'): return self._email
return str(self._id)
def _get_absolute_url(self):
form = self._get_form()
args = {'program_slug': form.program.slug, 'form_slug': form.slug,
'sid': self._id}
return reverse('submission', kwargs=args)
def _update_context(self, form, context):
context['review_link'] = submission_link(self, form, rest='review')
for block in form.visible_blocks():
if block.block_type() == 'custom':
context[block.name] = getattr(self, block.name)
elif block.block_type() == 'stock':
class Obj: pass
obj = Obj()
obj.__dict__ = { n: getattr(self, block.stock.field_name(n))
for n in block.stock.widget_names() }
if len(obj.__dict__) > 1: context[block.name] = obj
else: context[block.name] = next(iter(obj.__dict__.values()))
def _send_email(self, form, name, **kwargs):
form_emails = form.emails()
if name in form_emails:
subject = Template(form_emails[name]['subject'])
template = Template(form_emails[name]['content'])
else: subject, template = form.load_email_templates(name)
context = {
'submission': self, 'form': form,
'submission_link': submission_link(self, form)
}
if self._submitted: self._update_context(form, context)
return send_email(template=template, to=self._email,
subject=subject, context=context, **kwargs)
def _submit(self):
self._submitted = timezone.now()
self.save()
def _collections(self, queryset=None, form=None):
if not form: form = self._get_form()
if not queryset: queryset = self._items.all()
# form's order also orders blocks' items with the same collection name
block = FormBlock.objects.filter(form=form, pk=OuterRef('_block'))
queryset = queryset.annotate(page=Subquery(block.values('page')))
queryset = queryset.annotate(block_rank=Subquery(block.values('_rank')))
items = queryset.order_by('_collection', 'page', 'block_rank', '_rank')
collections = groupby(items, key=lambda item: item._collection)
return { k: list(items) for k, items in collections }
def file_path(instance, filename):
return os.path.join(str(instance._submission_id), filename)
class SubmissionItem(UnderscoredRankedModel):
class Meta:
abstract = True
_id = models.BigAutoField(primary_key=True, editable=False)
# see Form.item_model() for _submission = models.ForeignKey(Submission)
# the item's collection name == the name of the CollectionBlock
_collection = models.CharField(max_length=32)
# id of the collection block this item came from, as some may have same name
_block = models.PositiveBigIntegerField()
_file = models.FileField(upload_to=file_path, max_length=172, blank=True)
_filesize = models.PositiveBigIntegerField(default=0)
_filemeta = models.JSONField(default=dict, blank=True)
_error = models.BooleanField(default=False)
_message = models.CharField(max_length=100, default='', blank=True)
@classmethod
def _filename_maxlen(cls):
# use 37 for directory uuid, 8 for possible alt name, 7 for order prefix
return cls._meta.get_field('_file').max_length - 37 - 8 - 7
@classmethod
def _message_maxlen(cls):
return cls._meta.get_field('_message').max_length
def _rank_group(self):
return self.__class__.objects.filter(_submission=self._submission,
_collection=self._collection,
_block=self._block)
def _file_name(self):
if not self._file: return None
return self._file.name[self._file.name.index('/')+1:]
def _file_type(self):
if 'type' in self._filemeta: return self._filemeta['type']
return ''
def _artifact_url(self, name='thumbnail'):
if not self._file: return None
type = self._file_type()
if not type: return None
if name != 'thumbnail' or type not in ('image', 'video'):
filetype = FileType.by_type(type)()
return filetype.artifact_url(name, self._file.url)
if type == 'image': return thumbnail_path(self._file.url)
elif type == 'video': return thumbnail_path(self._file.url, ext='jpg')
return None
| johncronan/formative | formative/models/formative.py | formative.py | py | 40,781 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "utils.MarkdownFormatter",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.db.models.Manager",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 34,
"usage_type": "name"
},
{
"api_name":... |
37486401663 | from itertools import product
from random import choice
from time import sleep
from os import system
from math import floor
from colorama import Back, Fore, Style
################################################################################
# Simulation of Conway's Game of Life. The goal here was to write this with a
# small amount of code as a proof-of-concept that could be run in the terminal.
#
# If you'd like to tinker with the rules, see the conditionals defined in the
# `advance/1` function. For other parameters, like the board size and refresh
# rate, refer to the while-loop defined at the bottom of this file.
################################################################################
def init_board(n, init_alive_percentage):
"""Initialize a board of size `n` by `n`. Supply a percentage,
`init_alive_percentage`, representing the number of cells in the board that
should be alive from the start."""
alive_count = floor(n * init_alive_percentage)
distribution = [True] * alive_count + [False] * (n - alive_count)
return [[choice(distribution) for _ in range(n)] for _ in range(n)]
def neighbors(coord, board):
"""Return the neighbors for a given `coord` on a `board`."""
n = len(board)
row, col = coord
return [
board[(row + row_d) % n][(col + col_d) % n]
for row_d, col_d in product([-1, 0, 1], [-1, 0, 1])
if (row_d, col_d) != (0, 0)
]
def advance(board):
"""Advance the state of the `board` from T[n] to T[n+1]."""
n = len(board)
new_board = [[False for _ in range(n)] for _ in range(n)]
for row in range(n):
for col in range(n):
alive_count = len([x for x in neighbors((row, col), board) if x])
# Loneliness
if alive_count == 0:
new_board[row][col] = False
# Status Quo
elif alive_count == 1:
new_board[row][col] = board[row][col]
# Cooperation
elif alive_count == 2:
new_board[row][col] = True
# Resource starvation
elif alive_count >= 3:
new_board[row][col] = False
return new_board
def print_board(board):
"""Print the game `board` in a human-readable way."""
result = ''
for row in board:
for col in row:
if col:
result += Back.GREEN + '1 ' + Style.RESET_ALL
else:
result += Back.RED + '0 ' + Style.RESET_ALL
result += '\n'
print(result)
board = init_board(100, 0.50)
while True:
system('clear')
print_board(board)
sleep(0.15)
board = advance(board)
| tvl-fyi/depot | users/wpcarro/scratch/data_structures_and_algorithms/conways-game-of-life.py | conways-game-of-life.py | py | 2,666 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "math.floor",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "colorama.Back.GREEN",
... |
33693604036 | from __future__ import print_function
import argparse
import torch
import torch.utils.data
from torch import optim
from torch import nn
from torch.utils.data import DataLoader
from gensim.models import KeyedVectors
import os
import numpy as np
from collections import OrderedDict
from multiprocessing import cpu_count
from ptb import PTB
from model import SentenceVAE
parser = argparse.ArgumentParser(description='Sentence VAE Example')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--data_dir', type=str, default='data')
parser.add_argument('--create_data', action='store_true')
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--k', type=float, default=0.0025)
parser.add_argument('--x0', type=int, default=2500)
parser.add_argument('--log_interval', type=int, default=50)
parser.add_argument('--save_model_path', type=str, default='model')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
# torch.manual_seed(args.seed)
device = torch.device("cuda" if args.cuda else "cpu")
splits = ['train', 'valid']
datasets = OrderedDict()
for split in splits:
datasets[split] = PTB(
data_dir=args.data_dir,
split=split,
create_data=args.create_data,
max_sequence_length=60
)
# vocab_size = datasets['train'].vocab_size
sos_idx = datasets['train'].sos_idx
eos_idx = datasets['train'].eos_idx
pad_idx = datasets['train'].pad_idx
embedding = KeyedVectors.load('model/pretrained_embedding')
if args.cuda:
weights = torch.FloatTensor(embedding.syn0).cuda()
else:
weights = torch.FloatTensor(embedding.syn0)
model = SentenceVAE(weights.size(0), sos_idx, eos_idx, pad_idx, training=True).to(device)
def init_weights(m):
if type(m) == torch.nn.Linear:
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0)
model.apply(init_weights)
model.emb = nn.Embedding.from_pretrained(weights)
optimizer = optim.Adam(filter(lambda p: p.requires_grad,model.parameters()),
lr=1e-4, weight_decay=1e-3)
def kl_anneal_function(step):
k = args.k
x0 = args.x0
weight = float(1 / (1 + np.exp(-k * (step - x0))))
return weight
criterion = torch.nn.NLLLoss(size_average=False, ignore_index=pad_idx)
def loss_function(reconx, x, mu, logvar, step):
# print(torch.argmax(torch.exp(reconx), dim=-1)[0][:50])
x = x.view(-1).long()
reconx = reconx.view(-1, reconx.size(2))
NLL_loss = criterion(reconx, x)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
beta = kl_anneal_function(step)
loss = NLL_loss + beta * KLD
return loss, NLL_loss, KLD, beta
def train(epoch, step):
model.train()
data_loader = DataLoader(
dataset=datasets['train'],
batch_size=args.batch_size,
shuffle=True,
num_workers=cpu_count(),
pin_memory=torch.cuda.is_available()
)
train_loss = 0
for batch_idx, batch in enumerate(data_loader):
batch_size = batch['input'].size(0)
if torch.cuda.is_available():
batch['input'] = batch['input'].cuda()
batch['target'] = batch['target'].cuda()
logp, mu, logvar, z = model(batch['input'])
loss, NLL_loss, KL_loss, KL_weight = loss_function(logp, batch['target'],
mu, logvar, step)
loss = loss / batch_size
if step == 10:
checkpoint_path = os.path.join(args.save_model_path,
"model_epoch_%i" % (epoch))
torch.save(model.state_dict(), checkpoint_path)
print("Model saved at %s" % checkpoint_path)
optimizer.zero_grad()
loss.backward()
train_loss += loss.item()
torch.nn.utils.clip_grad_norm(model.parameters(), 5)
optimizer.step()
step += 1
if batch_idx % args.log_interval == 0:
print('Train Epoch {} [{}/{}] Loss {:.2f} | NLL {:.2f}'
' | KL {:.2f} | Beta {:.3f}'.format(epoch,
batch_idx * batch_size, len(data_loader.dataset),
loss.item(), NLL_loss.item() / batch_size,
KL_loss.item() / batch_size, KL_weight))
print('====> Epoch: {} Average loss: {:.4f} steps: {}'.format(
epoch, train_loss * args.batch_size / len(data_loader.dataset), step))
checkpoint_path = os.path.join(args.save_model_path, "model_epoch_%i" % (epoch))
torch.save(model.state_dict(), checkpoint_path)
print("Model saved at %s" % checkpoint_path)
return step
def test(step):
model.eval()
test_loss = 0
data_loader = DataLoader(
dataset=datasets['valid'],
batch_size=args.batch_size,
shuffle=False,
num_workers=cpu_count(),
pin_memory=torch.cuda.is_available()
)
with torch.no_grad():
for batch_index, batch in enumerate(data_loader):
if torch.cuda.is_available():
batch['input'] = batch['input'].cuda()
batch['target'] = batch['target'].cuda()
logp, mu, logvar, z = model(batch['input'])
loss, _, _, _ = loss_function(logp, batch['target'],
mu, logvar, step)
test_loss += loss.item()
test_loss /= len(data_loader.dataset)
print('====> Test set loss: {:.4f}'.format(test_loss))
step = 0
for epoch in range(1, args.epochs + 1):
step = train(epoch, step)
test(step)
| dnddnjs/pytorch-svae | train.py | train.py | py | 5,776 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "torch... |
10202065384 | import os
import math
import numpy as np
import shutil
import cv2
from colormath.color_conversions import *
from colormath.color_objects import *
from sklearn.manifold import TSNE
import json
from similarity_measurer import SimilarityMeasurer
from color_palette import ColorPalette
from geo_sorter_helper import *
from OrderSorter import OrderSorter
from palette_to_image import save
if __name__ == '__main__':
# 随机得到展示的数据
# numbers = 50
# k_list = [5, 10, 15, 20, 25, 30]
# jitters = [0, 5, 10, 15]
# for k in k_list:
# replaces = [0, math.ceil(k / 10)]
# target_dir = 'Datasets/WedData/k{}'.format(k)
# if not os.path.exists(target_dir):
# os.makedirs(target_dir)
# for i in range(numbers):
# j = jitters[np.random.randint(0, len(jitters))]
# r = replaces[np.random.randint(0, 2)]
# sub_dir = 'Datasets/LHSP/LHSP-k{}-jitter{}-replacement{}/retrieval-palettes-images'.format(k, j, r)
# image_list = os.listdir(sub_dir)
# src_image = image_list[np.random.randint(0, len(image_list))]
# dst_image = ('k{}-p{}' + os.path.splitext(src_image)[-1]).format(k, i)
# print(os.path.join(sub_dir, src_image))
# shutil.copyfile(os.path.join(sub_dir, src_image), os.path.join(target_dir, dst_image))
k_list = [x for x in range(5, 16, 1)]
# tsne_results = []
json_path = 'Datasets/WedData/'
# 存每个palette的颜色
palette_color = {} # 未排序
palette_color_sorted = {} # 排序
if not os.path.exists(json_path):
os.makedirs(json_path)
all_palettes = []
all_names = []
for k in k_list:
print('k=', k)
base_dir = 'Datasets/WedData/images/k{}'.format(k)
save_dir = 'Datasets/WedData/images-sorted/k{}'.format(k)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
palettes = []
# palettes_sorted = []
lab_lists = []
names = []
# cnt = 6 # 每个长度计算6个就行
for img in os.listdir(base_dir):
name = img[:img.index('.')]
names.append(name)
all_names.append(name)
palette_path = os.path.join(base_dir, img)
image = cv2.imread(palette_path)
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# print(image_rgb.shape)
palette = []
lab_list = []
for i in range(0, image_rgb.shape[1], image_rgb.shape[0]):
r, g, b = image_rgb[0][i]
rgb = sRGBColor(r, g, b, True)
lab = convert_color(rgb, LabColor)
palette.append(rgb.get_rgb_hex())
lab_list.extend(list(lab.get_value_tuple()))
# 对于长度不够的进行扩充 在计算总体时
# for i in range(k, 15):
# lab_list.extend([100, 0, 0])
palettes.append(palette)
lab_lists.append(lab_list) #
palette_color[name] = palette
all_palettes.append(lab_list) #
# 排序
sorter = OrderSorter(palette=palette, name='_')
sorter.calculate_distance_matrix()
step = 1
sorter.discrete_sampling(step=step)
best_points = sorter.sort() # 采样点取值
sorter.solve(best_points) # 排序
palette_sorted = [palette[x] for x in sorter.samples_sorted]
palette_color_sorted[name] = palette_sorted
save(palette_sorted, name + '-sorted', image_dir=save_dir, repeat=True,
color_block_width=100)
# palettes_sorted.append(palette_sorted)
# cnt -= 1
# if cnt <= 0:
# break
# ========================分长度计算 TSNE 保存json
# data = np.array(lab_lists)
# tsne = TSNE(n_components=2)
# result = tsne.fit_transform(data.reshape(data.shape[0], -1))
# # tsne_results.append(result)
# json_text = {}
# for i in range(len(palettes)):
# json_text[names[i]] = {'pointX': result[i][0].item(), 'pointY': result[i][1].item()}
# print(json_text)
# json_data = json.dumps(json_text, indent=4)
#
# file = os.path.join(json_path, 'k{}.json'.format(k))
# with open(file, 'w') as f:
# f.write(json_data)
# ======================================= 计算相似度
# num = len(palettes)
# similarity_list = {}
# for i in range(num):
# sub_list = []
# pa = ColorPalette(auto_fetched=False, colors=palettes[i])
# for j in range(num):
# pb = ColorPalette(auto_fetched=False, colors=palettes[j])
# similarity_measurer = SimilarityMeasurer(pa, pb, LabDistanceMode.CIEDE2000, improved=True)
# sim, _ = similarity_measurer.measure()
# sub_list.append({"id": names[j], "similarity": sim["DynamicClosestColorWarping"]})
# similarity_list[names[i]] = sub_list
# json_path = 'Datasets/WedData/similarity'
# if not os.path.exists(json_path):
# os.makedirs(json_path)
# json_data = json.dumps(similarity_list, indent=4)
# file = os.path.join(json_path, 'k{}.json'.format(k))
# with open(file, 'w') as f:
# f.write(json_data)
# =========== 保存palette 的颜色信息
json_data = json.dumps(palette_color_sorted, indent=4)
file = os.path.join(json_path, 'palette_color_sorted.json')
with open(file, 'w') as f:
f.write(json_data)
# ========================合并所有长度计算 TSNE 保存json
# print(all_palettes)
# all_data = np.array(all_palettes)
# print(all_data.shape)
# all_tsne = TSNE(n_components=2)
# result = all_tsne.fit_transform(all_data.reshape(all_data.shape[0], -1))
# # tsne_results.append(result)
# json_text = {}
# for i in range(len(all_palettes)):
# json_text[all_names[i]] = {'pointX': result[i][0].item(), 'pointY': result[i][1].item()}
# print(json_text)
# json_data = json.dumps(json_text, indent=4)
#
# file = os.path.join(json_path, 'all_palettes.json')
# with open(file, 'w') as f:
# f.write(json_data)
| Xiaozhxiong/Palette-Sorting | expriment_6.py | expriment_6.py | py | 6,551 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_nu... |
73917552423 | import scipy.io
import pylab
import numpy
from math import *
import scipy.misc
import scipy.sparse
import sys
import time
import matplotlib.ticker
#import HCIL_model # make sure this model accounts for alignments etc
from matplotlib import pyplot as plt
import socket
import time
from InitializationPY import *
from EKF_MAT2PY import mat2py, recvall
# G = numpy.concatenate([numpy.concatenate([numpy.real(G1),
# numpy.imag(G1)], axis=1), numpy.concatenate([numpy.real(G2), numpy.imag(G2)],
# axis=1)], axis=0).T # real matrix [Re(G1),Im(G1); Re(G2), Im(G2)], for dark hole area
# Only use G2 for control right now
G = numpy.concatenate([numpy.concatenate([numpy.real(G2), numpy.imag(G2)],
axis=1)], axis=0).T
# G = numpy.concatenate([numpy.real(G2), numpy.imag(G2)],
# axis=1).T
print(G.shape)
## DM setting after creating the dark hole
u0 = command #numpy.concatenate([DM1command, DM2command])
## Correction on top of u0
u = u0*0.0
## Drift covariances (might be a bit off, but the filter is not very sensitive to this)
Qlarge = G.dot(numpy.eye(len(u))).dot(G.T)* drift_std**2 # this needs to be number of actuators available, not of full control
## The Extended Kalman Filter class which stores its state
class Filter:
def __init__(self, index, x0 = numpy.zeros(2)):
self.index = index #pixel index
self.x_hat = numpy.array(x0) #electric field (state) estimate (real 2x1 vector with real and imag parts)
# self.Q = drift_stats["covs"][self.index] #process noise covariance
self.Q = Qlarge[2*index:2*index+2, 2*index:2*index+2]
self.P = 16*self.Q #initial state covariance
def advance(self, Gu, I):
## "predic" step
E_hat = complex(self.x_hat[0] + Gu[self.index], self.x_hat[1] + Gu[self.index+G.shape[0]//2]) #closed loop electric field estimate (open loop field + Jacobian * controls)
z_hat = abs(E_hat)**2 + i_inco_avg#*scale # intensity estimate
z = I[self.index] # intensity measurement
H = [2*Gu[self.index] + 2*self.x_hat[0], 2*Gu[self.index+G.shape[0]//2] + 2*self.x_hat[1]] #observation equation linearization
## Kalman gain:
S = self.P.dot(H).dot(H) + (dark_curr)**2
K = self.P.dot(H)/S
## "update" step
self.x_hat = self.x_hat + K.dot(z - z_hat) #closed loop electric field estimate
self.P = self.P - numpy.outer(K,H).dot(self.P) #electric field covariance
## "advance" step
self.P = self.P + self.Q #covariance increases due to druft
self.x_hat = self.x_hat*1.0 #mean remains the same (the drift is zero mean)
return E_hat
## Computing EFC gain
M = G.T.dot(G)
EFC_gain = numpy.linalg.pinv(M + alpha*numpy.eye(len(M))).dot(G.T) #EFC function in runEFC
## Sample random wavefront drift and dithering
numpy.random.seed(0)
u_drift = numpy.zeros(n_act_drift)
u_drifts = []
dus = []
for j in range(T):
## Random walk drift
u_drifts.append(u_drift) #change drift
u_drift = u_drift + drift_std*(numpy.random.random(u_drift.shape)-0.5) # in nm?
## Zero mean dithering
dus.append(numpy.random.normal(0, sigma_u, u.shape))
## Initialize filters
#I0, E0_init = mat2py(DM1command, u0 ,len(DM1command),G1.shape[1])
fs = [Filter(index,[numpy.real(E0_init[index]), numpy.imag(E0_init[index])]) for index in range(G.shape[0]//2)]
## Run closed loop
for j in range(T):
cont_power = j>4
drift_power = j>4
I_OL, E_OL = mat2py(DM1command + drift_power*u_drifts[j], u0 ,len(DM1command),G1.shape[1])
I, E = mat2py(DM1command + drift_power*u_drifts[j], u0 + u,len(DM1command),G1.shape[1])
E_OL_split = numpy.zeros(2*len(E_OL))
E_OL_split[0:n_px] = numpy.real(E_OL)
E_OL_split[n_px:2*n_px] = numpy.imag(E_OL)
E_split = numpy.zeros(2*len(E))
E_split[0:n_px] = numpy.real(E)
E_split[n_px:2*n_px]= numpy.imag(E)
# plt.plot(u0+u, label="DM2 EFC+Dither+DH")
# plt.plot(u0, label="DM2 DH command")
# plt.plot(u, label="DM2 EFC+Dither")
# plt.plot(DM1command, label="DM1 DH command")
# plt.plot(u_drifts[j], label="DM1 Drift")
# plt.legend()
# plt.show()
# print("Iteration %d: avg closed loop intensity %.01f, avg open loop intensity %.01f"%(j, numpy.linalg.norm(E)**2/scale, numpy.linalg.norm(E0)**2/scale))
# print("Iteration %d: avg closed loop intensity %.01f"%(j, numpy.linalg.norm(I)/scale))
# I = measurement(numpy.abs(E)**2) # get from getImage in lab, dark hole area only
I2D_temp = numpy.zeros(99*83)
# print('red',len(I2D_temp))
I2D_temp[pixel_indices] = I
I2D = I2D_temp.reshape(99,83)
# print(I2D.shape)
Gu = G.dot(u) #precompute the difference between closed and open loop fields
Iperf = numpy.abs(E)**2
E_hat_hat = []
for f in fs:
f.advance(Gu,I) #advance the filters
# E_hat_hat.append(f.advance(Gu,Iperf+dark_curr))
# Get field estimate
x_hats = numpy.array([f.x_hat for f in fs])
E_hat = numpy.concatenate([x_hats[:,0], x_hats[:,1]])
# x_hat_hat = numpy.array(E_hat_hat)
# print(len(E_hat_hat))
# x_hathat = numpy.concatenate([x_hat_hat, x_hat_hat[:,1]])
#print("mean estimated intensity")
# E_hat2 = numpy.concatenate([numpy.real(E0), numpy.imag(E0)]) #for debugging only
# print(numpy.linalg.norm(E_hat-E_hat2)/numpy.linalg.norm(E_hat2))
## Record iteration data
I_arr.append(I)
I_OL_arr.append(I_OL)
u_arr.append(u)
E_hat_arr.append(E_hat)
E_arr.append(E)
E_OL_arr.append(E_OL)
print("Iteration: ",j, "\navg closed loop intensity: ", numpy.mean(I),
"\navg open loop intensity: ", numpy.mean(I_OL), "\nmean estimated OL intensity: ",
numpy.mean(E_hat[0:n_px]**2+ E_hat[n_px:2*n_px]**2))
# fig = plt.figure()
# self.ax1 = fig.add_subplot(1, 2, 1)
deltaE = G2.T.dot(u)
deltaE_split = numpy.zeros(2*len(deltaE))
deltaE_split[::2] = numpy.real(deltaE)
deltaE_split[1::2] = numpy.imag(deltaE)
Gu_comb = Gu[0:n_px] + Gu[n_px:2*n_px]*complex(0,1)
# plt.plot(-Gu,label='Split -Gu')
# # plt.plot(-Gu_comb,label='-Gu combined')
# plt.plot(E_OL_split-E_split,lw=3,alpha = 0.5,label='Split OL-CL')
# plt.plot(-G2.T.dot(u),label='-G2u')
# plt.plot(E_OL-E,lw=2,alpha = 0.3,label='OL-CL')
# plt.plot(E_hat,label = 'Ehat')
# plt.plot(E_OL_split,lw=2,alpha = 0.5,label = "E_OL")
# plt.plot(E,label = 'E')
# plt.plot(E_hat_hat,lw=2,alpha = 0.5,label = "E_hat_hat")
# plt.legend()
# plt.title("Iteration %d"%(j))
# plt.show()
# Compute next command (EFC + dithering)
u = dus[j] - cont_power*EFC_gain.dot(E_hat)
# u = -EFC_gain.dot(E0) + dus[j]
# ADD A SEND DM COMMAND
#
# print("Iteration %d: avg closed loop intensity %.01f, \
# avg open loop intensity %.01f"%(j, numpy.mean(I), \
# numpy.mean(I_OL)))
print("saving data")
to_save = ["I_arr","I_OL_arr","E_hat_arr","u_arr", "dus", "u_drifts","pixel_indices","i_inco_avg","dark_curr"]
scipy.io.savemat(fname, dict((s, numpy.array(eval(s))) for s in to_save))
print("data saved")
| HeSunPU/FPWCmatlab | EKF.py | EKF.py | py | 6,852 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "numpy.concatenate",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.real",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.imag",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number"... |
4855275955 | #!/usr/bin/python
# -*- coding: utf-8 -*
from fabric.api import *
from fabric.context_managers import *
from fabric.contrib.console import confirm
from fabric.contrib.files import *
from fabric.contrib.project import rsync_project
import fabric.operations
import time,os
import logging
import base64
from getpass import getpass
import json
import sys
# 定义一些常量
## 本地软件目录
env.local_softdir="/opt/software/"
## 远端软件目录
env.remote_softdir="/opt/software/"
## 远端家目录
env.remote_dir="/opt/machtalk/"
############## CDH
@task
@roles("cdh")
def cdh_pufile():
# 上传文件
run("mkdir -p /opt/cloudera/parcel-repo/")
run("mkdir -p /opt/cdh-cm/share/cmf/lib/")
put(env.local_softdir + "cloudera-manager-el6-cm5.5.0_x86_64.tar.gz", "/opt/")
put(env.local_softdir + "solrcloud.tar.gz", "/opt/")
put(env.local_softdir + "CDH-5.5.0-1.cdh5.5.0.p0.8-el6.parcel", "/opt/cloudera/parcel-repo/")
put(env.local_softdir + "CDH-5.5.0-1.cdh5.5.0.p0.8-el6.parcel.sha", "/opt/cloudera/parcel-repo/")
put(env.local_softdir + "manifest.json", "/opt/cloudera/parcel-repo/")
put(env.local_softdir + "mysql-connector-java-5.1.38-bin.jar", "/opt/cdh-cm/share/cmf/lib/")
@task
@roles("cdh")
def cdh_deploy():
with cd("/opt/"):
# 判断目录是否存在,如果存在就退出
run(""" [ -e "/opt/cm" ] && exit 1 || echo '开始部署CDH!' """)
# 根据变量获取
ip = env.host
ipListNumber = json.loads(bo.servers).index(ip) + 1
serverName = "cdhslave%s" % (ipListNumber)
if ipListNumber == 1:
serverName = "cdhmanager"
# 设置主机名
sudo("""
cat << 'EOF' > /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=%s
EOF
hostname %s
""" % (serverName, serverName))
# 设置hosts
conf_hosts = ""
itemNumber = 0
for item in json.loads(bo.servers):
if itemNumber == 0:
conf_hosts += """
%s cdhmanager""" % (item)
else:
conf_hosts += """
%s cdhslave%s""" % (item, itemNumber + 1)
itemNumber += 1
sudo("""
sed -i "/cdhslave/d" /etc/hosts
sed -i "/cdhmanager/d" /etc/hosts
service network restart
echo '%s' >> /etc/hosts
""" % (conf_hosts))
# 上传文件
# put(Const.SOURCE_DIR + "cloudera-manager-el6-cm5.5.0_x86_64.tar.gz", "/opt/")
run("tar -xzf /opt/cloudera-manager-el6-cm5.5.0_x86_64.tar.gz -C /opt/")
run("ln -s /opt/cm-5.5.0 /opt/cdh-cm")
# put(Const.SOURCE_DIR + "solrcloud.tar.gz", "/opt/")
run("tar -xzf ./solrcloud.tar.gz")
# put(Const.SOURCE_DIR + "CDH-5.5.0-1.cdh5.5.0.p0.8-el6.parcel", "/opt/cloudera/parcel-repo/")
#put(Const.SOURCE_DIR + "CDH-5.5.0-1.cdh5.5.0.p0.8-el6.parcel.sha", "/opt/cloudera/parcel-repo/")
#put(Const.SOURCE_DIR + "manifest.json", "/opt/cloudera/parcel-repo/")
#put(Const.SOURCE_DIR + "mysql-connector-java-5.1.38-bin.jar", "/opt/cdh-cm/share/cmf/lib/")
# 修改主机名 为 cdhmanassger
run("""
hostname cdhmanager
sed -i 's/HOSTNAME=.*/HOSTNAME=cdhmanager/g' /etc/sysconfig/network
sed -i '/cdhmanager/d' /etc/hosts
echo "%s cdhmanager" >> /etc/hosts
service network restart
"""%env.host)
# 添加cloudera-scm账户
run("useradd --system --home=/opt/cdh-cm/run/cloudera-scm-server/ --no-create-home --shell=/bin/false --comment 'Cloudera SCM User' cloudera-scm | echo 'account alreay exists'")
# cdhmanager服务器ssh key
run('''
ssh-keygen -t rsa -C 'cdh' -P '' -f ~/.ssh/id_rsa
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 600 ~/.ssh/authorized_keys
''')
# 只有cdhamnager需要执行下面的命令
if serverName == "cdhmanager":
# 配置修改
## agent
run(
"sed -i 's/server_host=localhost/server_host=cdhmanager/g' /opt/cdh-cm/etc/cloudera-scm-agent/config.ini")
# 数据库配置
conf_cdhserver = """
com.cloudera.cmf.db.type=mysql
com.cloudera.cmf.db.host=%s
com.cloudera.cmf.db.name=%s
com.cloudera.cmf.db.user=%s
com.cloudera.cmf.db.password=%s
""" % (info['services']['cdh']['dbip'], "cm", "scm", "scm")
run(""" echo '%s' > /opt/cm-5.5.0/etc/cloudera-scm-server/db.properties """ % conf_cdhserver)
# 数据库建库
# 需要变量dbip dbname dbpass
## 确保mysql客户端存在
run(""" yum install -y mysql || echo 'mysql客户端已经安装!'""")
## for database amon
run(
""" mysql -h %s -u%s -p%s -e "create database amon DEFAULT CHARSET utf8 COLLATE utf8_general_ci;" """ % (
info['services']['cdh']['dbip'], info['services']['cdh']['dbusrname'], info['services']['cdh']['dbpwd']))
run(
""" mysql -h %s -u%s -p%s -e "grant all privileges on amon.* to 'amon'@'%s' identified by 'amon'; flush privileges;" """ % (
info['services']['cdh']['dbip'], info['services']['cdh']['dbusrname'],
info['services']['cdh']['dbpwd'], "%"))
run(
""" mysql -h %s -u%s -p%s -e "grant all privileges on amon.* to 'amon'@'%s' identified by 'amon'; flush privileges;" """ % (
info['services']['cdh']['dbip'], info['services']['cdh']['dbusrname'],
info['services']['cdh']['dbpwd'], "localhost"))
## for database cm
run(""" mysql -h %s -u%s -p%s -e "create database cm DEFAULT CHARSET utf8 COLLATE utf8_general_ci;" """ % (
info['services']['cdh']['dbip'], info['services']['cdh']['dbusrname'], info['services']['cdh']['dbpwd']))
run(
""" mysql -h %s -u%s -p%s -e "grant all privileges on cm.* to 'scm'@'%s' identified by 'scm'; flush privileges;" """ % (
info['services']['cdh']['dbip'], info['services']['cdh']['dbusrname'],
info['services']['cdh']['dbpwd'], "%"))
run(
""" mysql -h %s -u%s -p%s -e "grant all privileges on cm.* to 'scm'@'%s' identified by 'scm'; flush privileges;" """ % (
info['services']['cdh']['dbip'], info['services']['cdh']['dbusrname'],
info['services']['cdh']['dbpwd'], "localhost"))
# 数据库初始化
run('/opt/cdh-cm/share/cmf/schema/scm_prepare_database.sh mysql cm --scm-host ' + ip + ' scm scm scm')
# 启动server
run("/opt/cdh-cm/etc/init.d/cloudera-scm-server start")
# 启动agent
run("""
sed -i 's:server_host=.*:server_host=cdhmanager:g' /opt/cm-5.5.0/etc/cloudera-scm-agent/config.ini
/opt/cm-5.5.0/etc/init.d/cloudera-scm-agent restart
""")
''' 伟大的注释
# cdh - 3 - 数据库初始化
@task
@roles('cdh')
def cdh3():
ip = run("/sbin/ifconfig | grep '10\.\|192\.168\.' | head -n 1 | awk -F\: '{print $2}' | awk '{print $1}'")
dbip=ip
# for database amon
run(""" mysql -e "create database amon DEFAULT CHARSET utf8 COLLATE utf8_general_ci;" """)
run(""" mysql -e "grant all privileges on amon.* to 'amon'@'%s' identified by 'amon'; flush privileges;" """%("%"))
run(""" mysql -e "grant all privileges on amon.* to 'amon'@'%s' identified by 'amon'; flush privileges;" """%("localhost"))
#A for database cm
run(""" mysql -e "create database cm DEFAULT CHARSET utf8 COLLATE utf8_general_ci;" """)
run(""" mysql -e "grant all privileges on cm.* to 'scm'@'%s' identified by 'scm'; flush privileges;" """%("%"))
run(""" mysql -e "grant all privileges on cm.* to 'scm'@'%s' identified by 'scm'; flush privileges;" """%("localhost"))
# for database hive
#run(""" mysql -e "create database hive DEFAULT CHARSET utf8 COLLATE utf8_general_ci;" """)
#run(""" mysql -e "grant all privileges on hive.* to 'hive'@'%s' identified by 'hive'; flush privileges;" """%("%"))
#run(""" mysql -e "grant all privileges on hive.* to 'hive'@'%s' identified by 'hive'; flush privileges;" """%("localhost"))
# for database hue
#run(""" mysql -e "create database hue DEFAULT CHARSET utf8 COLLATE utf8_general_ci;" """)
#run(""" mysql -e "grant all privileges on hue.* to 'hue'@'%s' identified by 'hue'; flush privileges;" """%("%"))
#run(""" mysql -e "grant all privileges on hue.* to 'hue'@'%s' identified by 'hue'; flush privileges;" """%("localhost"))
# for a special user
#run(""" mysql -e "grant all privileges on *.* to 'machtalk'@'%s' identified by 'machmydb'; flush privileges;" """%("%"))
#run(""" mysql -e "grant all privileges on *.* to 'machtalk'@'%s' identified by 'machmydb'; flush privileges;" """%("localhost"))
#run(""" mysql -e "grant all privileges on *.* to 'machtalk'@'%s' identified by 'machmydb'; flush privileges;" """%("cdhmanager"))
#scm back is clouder_manager_server ip
run('/opt/cdh-cm/share/cmf/schema/scm_prepare_database.sh mysql cm --scm-host '+ip+' scm scm scm')
# cdh - 4 - 启动服务
@task
@roles('cdh')
def cdh4():
run("/opt/cdh-cm/etc/init.d/cloudera-scm-server start")
run("/opt/cdh-cm/etc/init.d/cloudera-scm-agent start")
'''
'''
清空当前cloudera
cd /opt ; rm -rf cloud*
cd /opt ; rm -rf cm*
cd /opt ; rm -rf solr*
cd /opt ; rm -rf cdh-cm
'''
''' 最后配置
su hdfs
hadoop fs -setrep 2 /
'''
@task
@roles("cdh")
def cdh_clean():
pass | zzlyzq/speeding | funcs/cdh.py | cdh.py | py | 9,909 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 65,
"usage_type": "call"
}
] |
70176885225 | # imports
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from TaxiFareModel.encoders import TimeFeaturesEncoder, DistanceTransformer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from TaxiFareModel.utils import compute_rmse
import mlflow, memoized_property
from mlflow.tracking import MlflowClient
class Trainer():
def __init__(self, X, y):
"""
X: pandas DataFrame
y: pandas Series
"""
self.pipeline = None
self.X = X
self.y = y
self.experiment_name = "[#871] [Berlin] [KiKar31] pipe v1"
def set_pipeline(self):
"""defines the pipeline as a class attribute"""
dist_pipe = Pipeline([
('dist_trans', DistanceTransformer()),
('stdscaler', StandardScaler())
])
time_pipe = Pipeline([
('time_enc', TimeFeaturesEncoder('pickup_datetime')),
('ohe', OneHotEncoder(handle_unknown='ignore'))
])
preproc_pipe = ColumnTransformer([
('distance', dist_pipe, ["pickup_latitude", "pickup_longitude", 'dropoff_latitude', 'dropoff_longitude']),
('time', time_pipe, ['pickup_datetime'])
], remainder="drop")
pipe = Pipeline([
('preproc', preproc_pipe),
('linear_model', LinearRegression())
])
self.pipeline = pipe
def run(self):
"""set and train the pipeline"""
return self.pipeline.fit(self.X, self.y)
def evaluate(self, X_test, y_test):
"""evaluates the pipeline on df_test and return the RMSE"""
y_pred = self.pipeline.predict(X_test)
return compute_rmse(y_pred, y_test)
@memoized_property
def mlflow_client(self):
mlflow.set_tracking_uri("https://mlflow.lewagon.ai/")
return MlflowClient()
@memoized_property
def mlflow_experiment_id(self):
try:
return self.mlflow_client.create_experiment(self.experiment_name)
except BaseException:
return self.mlflow_client.get_experiment_by_name(self.experiment_name).experiment_id
@memoized_property
def mlflow_run(self):
return self.mlflow_client.create_run(self.mlflow_experiment_id)
def mlflow_log_param(self, key, value):
self.mlflow_client.log_param(self.mlflow_run.info.run_id, key, value)
def mlflow_log_metric(self, key, value):
self.mlflow_client.log_metric(self.mlflow_run.info.run_id, key, value)
if __name__ == "__main__":
# get data
# clean data
# set X and y
# hold out
# train
# evaluate
print('TODO')
| lamothearthur/TaxiFareModel | TaxiFareModel/trainer.py | trainer.py | py | 2,701 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.pipeline.Pipeline",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "TaxiFareModel.encoders.DistanceTransformer",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 26,
"usage_typ... |
74867967785 | from data_loader import *
from plotter import *
from utils import *
import netCDF4 as nc
import argparse
import pandas as pd
from tkinter import *
# TODO: Add gui....eventually
# import customtkinter
#
# customtkinter.set_appearance_mode('system')
# root = customtkinter.CTk()
# root.geometry('300x400')
# button = customtkinter.CTkButton(master=root, text='Hello World!!!')
# button.place(relx=0.5, rely=0.5, anchor=CENTER)
# root.mainloop()
# TODO: add style checks
# TODO: add option to wget data
# TODO: plot sc orbit locations
# def process_spacecraft_data(g17_file, g18_file, gk2a_file, g17_deg,
# g18_deg, gk2a_deg, save_path):
def process_spacecraft_data(g17_file=None, g18_file=None, gk2a_file=None,
g17_deg=None, g18_deg=None, gk2a_deg=None,
save_path=None):
# For multiple s/c, one day is typical, unless use aggregate_nc_file to
# look at multiple days at a time.
# Plots mag inclination angle
noonmidnighttimes_dict = {}
goes_time_fromnc = None
goes17_bgse_stacked = goes18_bgse_stacked = gk2a_bgse_stacked = None
if g17_file:
goes17coloc_dataset = nc.Dataset(g17_file)
goes17_bgse_stacked = process_goes_dataset(
goes17coloc_dataset['b_gse'])
goes_time_fromnc = goes_epoch_to_datetime(
goes17coloc_dataset['time'][:])
goes17_VDH = gse_to_vdh(goes17_bgse_stacked, goes_time_fromnc)
if g18_file:
goes18coloc_dataset = nc.Dataset(g18_file)
goes18_bgse_stacked = process_goes_dataset(
goes18coloc_dataset['b_gse'])
goes_time_fromnc = goes_epoch_to_datetime(
goes18coloc_dataset['time'][:])
goes18_VDH = gse_to_vdh(goes18_bgse_stacked, goes_time_fromnc)
if gk2a_file:
gk2a_dataset = nc.Dataset(gk2a_file)
gk2a_bgse_stacked = stack_gk2a_data(gk2a_dataset)
gk2a_VDH = gse_to_vdh(gk2a_bgse_stacked, goes_time_fromnc)
date_str = get_date_str_from_goesTime(goes_time_fromnc)
# Used to plot 'noon' and 'midnight' times (optional arg)
noonmidnighttimes_dict = {}
gk2a_noon, gk2a_midnight, g17_noon, g17_midnight, g18_noon, g18_midnight \
= [
None] * 6
if gk2a_deg:
gk2a_time_diff = calculate_time_difference(float(gk2a_deg), 'E')
gk2a_noon, gk2a_midnight = find_noon_and_midnight_time(gk2a_time_diff,
date_str,
gk2a=True)
noonmidnighttimes_dict['gk2a'] = {'noon': gk2a_noon,
'midnight': gk2a_midnight}
if g17_deg:
g17_time_diff = calculate_time_difference(float(g17_deg))
g17_noon, g17_midnight = find_noon_and_midnight_time(g17_time_diff,
date_str)
noonmidnighttimes_dict['g17'] = {'noon': g17_noon,
'midnight': g17_midnight}
if g18_deg:
g18_time_diff = calculate_time_difference(float(g18_deg))
g18_noon, g18_midnight = find_noon_and_midnight_time(g18_time_diff,
date_str)
noonmidnighttimes_dict['g18'] = {'noon': g18_noon,
'midnight': g18_midnight}
# Plot B field in GSE coords:
plot_BGSE_fromdata_ontop(goes_time_fromnc, goes17_bgse_stacked,
goes18_bgse_stacked, 'G17', 'G18', 'SOSMAG',
gk2a_bgse_stacked, date_str, save_path,
noonmidnighttimes_dict)
# Plot mag incl (theta) over time:
plot_magnetic_inclination_over_time_3sc(date_str, goes_time_fromnc,
goes17_VDH, goes18_VDH, gk2a_VDH,
save_path, noonmidnighttimes_dict)
# print(gk2a_noon, gk2a_midnight, g17_noon, g17_midnight, g18_noon,
# g18_midnight)
# print(noonmidnighttimes_dict)
# def analyze_pickle_data_statstics(data1, data2):
# # Perform statistical analysis on entire directories of pickle data
def main():
# picklefile = 'Z:/Data/GK2A/model_outputs/202208/sosmag_modout_OMNI2022
# -08-04.pickle'
# with open(picklefile, 'rb') as file:
# data = pickle.load(file)
# print(data.keys())
parser = argparse.ArgumentParser()
# all spacecraft data is optional, and at least one is required.
group = parser.add_argument_group('Spacecraft Data')
group.add_argument("--g17-file", help="File path for GOES-17 mag data")
group.add_argument("--g18-file", help="File path for GOES-18 mag data")
group.add_argument("--gk2a-file", help="File path for GK2A SOSMAG data")
# Optional arguments
parser.add_argument("--save-path", default=None,
help="Save path for the figure \n(optional)")
parser.add_argument("--g17-deg", default=None,
help="GOES17 s/c longitude in degrees (WEST), "
"ex. 105\n(optional)")
parser.add_argument("--g18-deg", default=None,
help="GOES18 s/c longitude in degrees (WEST), "
"ex. 137.0\n(optional)")
parser.add_argument("--gk2a-deg", default=None,
help="GK2A s/c longitude in degrees (EAST), "
"ex. 128.2\n(optional)")
args = parser.parse_args()
# Needed to initialize:
# goes17_bgse_stacked, goes18_bgse_stacked, gk2a_bgse_stacked, gk2a_VDH, \
# goes17_VDH, goes17_VDH, save_path = [None] * 7
# Check if at least one spacecraft data is provided
if not any([args.g17_file, args.g18_file, args.gk2a_file]):
parser.error('At least one spacecraft data file must be provided.')
process_spacecraft_data(g17_file=args.g17_file, g18_file=args.g18_file,
gk2a_file=args.gk2a_file,
g17_deg=args.g17_deg, g18_deg=args.g18_deg,
gk2a_deg=args.gk2a_deg,
save_path=args.save_path)
if __name__ == "__main__":
main()
| sauriemma11/GOES-SOSMAG-Mag-Subtraction | src/main.py | main.py | py | 6,243 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "netCDF4.Dataset",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentPars... |
70525087465 | import socket
import sys
import logging
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# Create a TCP/IP socket
sock = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
server_address = ('localhost', 5000)
logging.info ('connecting to {} port {}'.format (*server_address))
sock.connect (server_address)
try:
# Send data
message = b'00010sinitasipe'
logging.info ('sending {!r}'.format (message))
sock.sendall (message)
while True:
# Look for the response
logging.info ("Waiting for register transaction")
amount_received = 0
amount_expected = int(sock.recv(5))
while amount_received < amount_expected:
data = sock.recv (amount_expected - amount_received)
amount_received += len (data)
logging.info('received {!r}'.format(data))
logging.info ("Visualización de Asistencia por Personal ...")
data = data.decode().split()
try:
opcion = data[1]
PersonalRut = data[2]
Fecha = data[3]
#FechaDesde = data[4]
#FechaHasta = data[5]
largo = len(PersonalRut+Fecha+opcion) + 8
message = '000{}datas {} {} {}'.format(largo,opcion,PersonalRut,Fecha).encode()
logging.info ('sending to bbdd {!r}'.format (message))
sock.sendall(message)
algo = sock.recv(4096).decode()
if algo:
if 'rut' in algo:
message = '00011asiperut'.encode()
elif 'fecha' in algo:
message = '00011asipefecha'.encode()
else:
message = '00010asipeexito {}'.format(algo).encode()
logging.info ('sending {!r}'.format (message))
sock.send(message)
except:
pass
logging.info('-------------------------------')
finally:
logging.info ('closing socket')
sock.close () | Des-Tello/ArquiSW | services/visualizacion_personal.py | visualizacion_personal.py | py | 2,142 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "logging.DEBUG",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "socket.socket",
... |
28814194209 | import os
import glob
import logging
import numbers
import numpy as np
import pandas as pd
from ast import literal_eval
from django.conf import settings
from ..utils.custom_decorator import where_exception
from ..data_preprocess.preprocess_base import PreprocessorBase
logger = logging.getLogger("collect_log_helper")
def _error_return_dict(error_type, error_msg):
"""
Return common error dictionary type
Parameters:
-----------
error_type (str) : type of error (eg. '4102')
error_msg (str) : detail message of the error
Returns:
--------
(dict) : common error dictionary
"""
return dict(error_type=error_type, error_msg=error_msg)
class BatchTestResult(PreprocessorBase):
def __init__(self, batch_service, test_data_path):
self.batch_manager_id = batch_service['BATCH_SERVICE_SEQUENCE_PK']
self.model_summary = batch_service['MODEL_SUMMARY']
self.model_command = batch_service['MODEL_COMMAND']
self.pdata_summary = batch_service['PREPROCESSED_DATA_SUMMARY']
self.model_sandbox_pk = batch_service['MODEL_SANDBOX_SEQUENCE_FK1']
self.trans_sandbox_pk = batch_service['PREPROCESSED_DATA_SANDBOX_SEQUENCE_FK2']
self.nfs_dir = settings.ANALYTICS_MANAGER_NFS # /ANALYTICS_MANAGER_NFS/batchServer
self.test_data_path = test_data_path
self.nfs_batch_info_dir = os.path.join(self.nfs_dir, f'batchService_{self.batch_manager_id}')
self.nfs_model_path = os.path.join(self.nfs_batch_info_dir, f'M_{self.model_sandbox_pk}.pickle')
self.nfs_trans_path = glob.glob(
os.path.join(self.nfs_batch_info_dir, f'T_{self.trans_sandbox_pk}_*.pickle'))
# 모델학습에서 사용한 데이터와 테스트 데이터이 컬럼이 일치하는지 확인하는 함수
@staticmethod
def _check_train_columns(data_set, train_summary, target_data):
test_data_columns = list(data_set.columns.values)
test_data_columns.remove(target_data)
test_data_columns.sort()
train_data_summary = literal_eval(train_summary)
train_data_columns = train_data_summary["model_train_columns"]
train_data_columns.sort()
if test_data_columns == train_data_columns:
return True
else:
return False
# Train Data 와 동일한 변환기로 Test Data 에 전처리를 수행하는 함수
def _test_data_transformer(self, data_set, pdata_summary):
test_data_columns = list(data_set.columns.values)
train_pdata_summary = literal_eval(pdata_summary) # str => list
# 학습된 데이터의 전처리 정보를 읽어서 차례대로 동일하게 수행하는 코드
for preprocess_info_dict in train_pdata_summary:
field_name = preprocess_info_dict["field_name"]
func_name = preprocess_info_dict["function_name"]
file_name = preprocess_info_dict["file_name"]
logger.info(f"[모델 배치] {func_name} applied to {field_name}")
if field_name not in test_data_columns:
return False
else:
if func_name == "DropColumns":
data_set = super()._drop_columns(data_set, field_name)
else:
transformer = super()._load_pickle(
base_path=self.nfs_batch_info_dir, file_name=file_name
)
changed_field = transformer.transform(
data_set[field_name].values.reshape(-1, 1)
)
changed_field = super()._to_array(changed_field)
# transform 된 데이터와 원본 데이터 통합(NEW) - preprocess_helper.py 참고
if len(changed_field.shape) == 2 and changed_field.shape[1] == 1:
if func_name == "Normalizer":
logger.warning("Not working in this version!!!")
else:
data_set[field_name] = changed_field
elif len(changed_field.shape) == 1: # LabelEncoder
data_set[field_name] = changed_field
else:
col_name = super()._new_columns(
field_name=field_name, after_fitted=changed_field
)
new_columns = pd.DataFrame(changed_field, columns=col_name)
data_set = pd.concat(
[data_set, new_columns], axis=1, sort=False
)
data_set = data_set.drop(field_name, axis=1)
return data_set
# 배치 서비스 요청에 대한 요청 파라미터 검사하는 함수
def check_request_batch_path(self):
check_list = [self.test_data_path, self.nfs_model_path, self.nfs_trans_path[0]]
for check_path in check_list:
logger.info(f"경로 확인 중... [{check_path}]")
if not os.path.isfile(check_path):
logger.error(f"{check_path} 경로가 존재하지 않습니다")
return dict(error_type="4004", error_msg=check_path)
return True
# 예측값 또는 스코어를 출력하는 함수
def get_batch_test_result(self):
try:
# 테스트 데이터 로드
if self.test_data_path.endswith(".csv"):
test_data = pd.read_csv(self.test_data_path)
elif self.test_data_path.endswith(".json"):
test_data = pd.read_json(
self.test_data_path, lines=True, encoding="utf-8"
)
logger.info(f"[모델 배치] Batch ID [{self.batch_manager_id}] Data Load!")
# 테스트 데이터 전처리
pdata_test = self._test_data_transformer(
data_set=test_data, pdata_summary=self.pdata_summary
)
if isinstance(pdata_test, bool): # 오류 발생시 False 반환
logger.error(
f"[모델 배치 err1] Batch ID [{self.batch_manager_id}] Check Columns Name"
)
return _error_return_dict("4022", "Data is not suitable for the model")
target = literal_eval(self.model_command)["train_parameters"]["y"]
is_same_columns = self._check_train_columns(
data_set=pdata_test,
train_summary=self.model_summary,
target_data=target,
)
if not is_same_columns:
logger.error(
f"[모델 배치 err2] Batch ID [{self.batch_manager_id}] Check Columns Name"
)
return _error_return_dict("4022", "Data is not suitable for the model")
# 모델 로드
model_load = super()._load_pickle(
base_path=self.nfs_batch_info_dir,
file_name="M_{}.pickle".format(self.model_sandbox_pk),
)
logger.info(f"[모델 배치] Batch ID [{self.batch_manager_id}] Model Load!")
# 모델 테스트 결과9
X_ = super()._drop_columns(pdata_test, target)
y_ = np.array(pdata_test[target]).reshape(-1, 1)
score_ = model_load.score(X=X_, y=y_)
predict_ = model_load.predict(X=X_)
logger.info(
f"[모델 배치] Batch ID [{self.batch_manager_id}] Predict Result Return!"
)
if isinstance(predict_[0], numbers.Integral):
result_response = {"score": "%.3f" % score_, "predict": predict_}
return result_response
else:
result_response = ["%.3f" % elem for elem in predict_]
result_response = {"score": "%.3f" % score_, "predict": result_response}
return result_response
except Exception as e:
where_exception(error_msg=e)
| IoTKETI/citydatahub_analytics_module | ANALYTICS_MODULE/API/services/model_batch/batch_helper.py | batch_helper.py | py | 8,017 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "data_preprocess.preprocess_base.PreprocessorBase",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.ANALYTICS_MANAGER_NFS",
"line_number": 41,
"usage... |
19258065891 | from openpyxl import load_workbook
class doExcel():
def __init__(self,file_path,sheet_name):
self.file_path=file_path
self.sheet_name=sheet_name
self.wb=load_workbook(self.file_path)
self.sh=self.wb[self.sheet_name]
#获取当前sheet最大的行数
self.row_max=self.sh.max_row
# 获取当前sheet最大的列数
self.column_max=self.sh.max_column
def get_sheet(self):
sheet_list=[]
for i in range(2,self.row_max+1):
sheet_row_list={}
sheet_row_list["interface_name"]=self.sh.cell(i,1).value
sheet_row_list["interface_url"]=self.sh.cell(i,2).value
sheet_row_list["interface_method"]=self.sh.cell(i,3).value
sheet_row_list["interface_body"]=self.sh.cell(i,4).value
sheet_row_list["interface_expectedOutcome"]=self.sh.cell(i,5).value
sheet_row_list["interface_actualResults"]=self.sh.cell(i,6).value
sheet_list.append(sheet_row_list)
return sheet_list
def get_cell(self,row,column):
return self.sh.cell(row,column).value
def set_cell(self,row,column,cellvalue):
self.sh.cell(row=row, column=column).value = cellvalue
self.wb.save(self.file_path)
file_path="C:\\Users\\admin\\Desktop\\test_api.xlsx"
sheet_name="Sheet1"
do=doExcel(file_path,sheet_name)
do.set_cell(10,10,"")
print(do.get_cell(10,10))
| DXH20191016/untitled | test_interface_auto/common/doExcel.py | doExcel.py | py | 1,429 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 8,
"usage_type": "call"
}
] |
24349509747 | import os
import json
from flask import Flask, request, send_file, jsonify
from picamera2 import Picamera2, Preview
import time
from PIL import Image
picam2 = Picamera2()
camera_config = picam2.create_still_configuration(main={"size": (256, 256)})
picam2.set_controls({"ExposureTime": 5000})
picam2.configure(camera_config)
# picam2.start_preview(Preview.DRM)
picam2.start()
time.sleep(2)
# picam2.capture_file("test.jpg")
app = Flask(__name__)
counter = 0
# Simulated raw_image (you should replace this with your own captured image)
# For demonstration purposes, we'll assume it's already a PIL Image.
# raw_image = Image.new("RGB", (640, 480), "white")
@app.route('/')
def index():
return 'Camera Server is running!'
@app.route('/capture', methods=['GET'])
def capture():
try:
raw_image = picam2.capture_image("main")
# Flip the image horizontally
flipped_image = raw_image.transpose(Image.FLIP_TOP_BOTTOM)
# Create a unique filename based on the current timestamp
filename = f"photo_{int(time.time())}.jpeg"
# Save the flipped image as a JPEG file
flipped_image.save(filename, "JPEG")
# Send the image to the client
return send_file(filename, mimetype='image/jpeg')
except Exception as e:
return str(e)
@app.route('/store_inference', methods=['POST'])
def store_inference():
global counter
data = request.get_json()
print(data)
infer_text = data.get('infer_text', '')
infer_img = data.get('infer_img', '')
# Increment the counter
counter += 1
# Create the JSON payload
inference_json = {
'infer_text': infer_text,
'infer_img': infer_img
}
# Save the JSON to a file
filename = f'inference_{counter}.json'
file_path = os.path.join('./inferences', filename) # Replace with your desired directory
with open(file_path, 'w') as f:
json.dump(inference_json, f)
# Respond with a success message
return jsonify({'message': f'Inference stored successfully. Saved as {filename}'})
@app.route('/get_inference', methods=['GET'])
def get_inference():
index = request.args.get('index', type=int)
# Generate the filename based on the index
filename = f'inference_{index}.json'
file_path = os.path.join("./inferences", filename)
if os.path.exists(file_path):
# Load the JSON from the specified file
with open(file_path, 'r') as f:
inference_data = json.load(f)
return jsonify(inference_data)
else:
return jsonify({'error': f'Inference file for index {index} not found.'}), 404
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000)
| felzmatt/visiope-project | raspberry-stack/sensor-server/main.py | main.py | py | 2,697 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "picamera2.Picamera2",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "PIL.Image.FLIP_TOP_BOTTOM",... |
31518451112 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import base64
import hmac
import hashlib
import json
from urllib import parse
from urllib import request
from datetime import datetime
# timeout in 5 seconds:
TIMEOUT = 5
API_HOST = 'be.huobi.com'
SCHEME = 'https'
# language setting: 'zh-CN', 'en':
LANG = 'zh-CN'
DEFAULT_GET_HEADERS = {
'Accept': 'application/json',
'Accept-Language': LANG
}
DEFAULT_POST_HEADERS = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Accept-Language': LANG
}
class Dict(dict):
def __init__(self, **kw):
super().__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
def _toDict(d):
return Dict(**d)
class ApiError(BaseException):
pass
class ApiNetworkError(BaseException):
pass
class ApiClient(object):
def __init__(self, appKey, appSecret, host=API_HOST):
'''
Init api client object, by passing appKey and appSecret.
'''
self._accessKeyId = appKey
self._accessKeySecret = appSecret.encode('utf-8') # change to bytes
self._host = host
def get(self, path, **params):
'''
Send a http get request and return json object.
'''
qs = self._sign('GET', path, self._utc(), params)
return self._call('GET', '%s?%s' % (path, qs))
def post(self, path, obj=None):
'''
Send a http post request and return json object.
'''
qs = self._sign('POST', path, self._utc())
data = None
if obj is not None:
data = json.dumps(obj).encode('utf-8')
return self._call('POST', '%s?%s' % (path, qs), data)
def _call(self, method, uri, data=None):
url = '%s://%s%s' % (SCHEME, self._host, uri)
# print(method + ' ' + url)
req = request.Request(url, data=data, headers=DEFAULT_GET_HEADERS if method=='GET' else DEFAULT_POST_HEADERS, method=method)
with request.urlopen(req, timeout=TIMEOUT) as resp:
if resp.getcode()!=200:
raise ApiNetworkError('Bad response code: %s %s' % (resp.getcode(), resp.reason))
return self._parse(resp.read())
def _parse(self, text):
# print('Response:\n%s' % text)
result = json.loads(text, object_hook=_toDict)
if result.status=='ok':
return result.data
raise ApiError('%s: %s' % (result['err-code'], result['err-msg']))
def _sign(self, method, path, ts, params=None):
self._method = method
# create signature:
if params is None:
params = {}
params['SignatureMethod'] = 'HmacSHA256'
params['SignatureVersion'] = '2'
params['AccessKeyId'] = self._accessKeyId
params['Timestamp'] = ts
# sort by key:
keys = sorted(params.keys())
# build query string like: a=1&b=%20&c=:
qs = '&'.join(['%s=%s' % (key, self._encode(params[key])) for key in keys])
# build payload:
payload = '%s\n%s\n%s\n%s' % (method, self._host, path, qs)
# print('payload:\n%s' % payload)
dig = hmac.new(self._accessKeySecret, msg=payload.encode('utf-8'), digestmod=hashlib.sha256).digest()
sig = self._encode(base64.b64encode(dig).decode())
# print('sign: ' + sig)
qs = qs + '&Signature=' + sig
return qs
def _utc(self):
return datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
def _encode(self, s):
return parse.quote(s, safe='')
class Huobi_ETH_Client():
def __init__(self):
self.client = ApiClient(API_KEY, API_SECRET)
self.account_id = self._getAccountID()
def getSymbols(self):
logging.info('Getting symbols for client:')
return self.client.get('/v1/common/symbols')
def getUserInfo(self):
logging.info('Getting user info for client:')
return self.client.get('/v1/users/user')
def getAllAccounts(self):
logging.info('Getting accounts for client:')
return self.client.get('/v1/account/accounts')
def getETHBalance(self):
balanceList = self.client.get('/v1/account/accounts/%s/balance' % self.account_id).list
for line in balanceList:
if line.currency=='eth' and line.type=='trade':
return line.balance
raise BaseException('ETH balance not found in account! Check ETH account!')
def getCNYBalance(self):
balanceList = self.client.get('/v1/account/accounts/%s/balance' % self.account_id).list
for line in balanceList:
if line.currency=='cny' and line.type=='trade':
return line.balance
raise BaseException('CNY balance not found in account! Check ETH account!')
def printBalanceRaw(self):
accs = self.getAllAccounts()
logging.info('All Accounts: ')
logging.info(accs)
logging.info('Getting balance for client:')
for acc in accs:
logging.info('Getting sub account: %s' % acc)
subaccs = self.client.get('/v1/account/accounts/%s/balance' % acc.id)
print(subaccs)
def getBalance(self):
res = []
accs = self.getAllAccounts()
logging.info('All Accounts: ')
logging.info(accs)
logging.info('Getting balance for client:')
for acc in accs:
logging.info('Getting sub account: %s' % acc)
subaccs = self.client.get('/v1/account/accounts/%s/balance' % acc.id)
res.append(subaccs)
return res
def printBalance(self):
accs = self.getAllAccounts()
logging.info('All Accounts: ')
logging.info(accs)
logging.info('Getting balance for client:')
account_id = accs[0].id
for acc in accs:
logging.info('Getting sub account: %s' % acc)
subaccs = self.client.get('/v1/account/accounts/%s/balance' % acc.id)
self._br()
print('Account ID: %s' % account_id)
print('#\tCurrency\tType\t\tBalance')
for i, currency in enumerate(subaccs.list):
print('%d\t%s\t\t%s\t\t%s' % (i+1, currency.currency, currency.type, currency.balance))
self._br()
def _getAccountID(self):
return self.getAllAccounts()[0].id
def _br(self):
print('\n' + '-'*50 + '\n')
def getSubmittedOrders(self):
return self._getOrders('submitted')
def printSubmittedOrders(self):
logging.info('Getting submitted orders:')
order_info = self.getSubmittedOrders()
self._printOrders(order_info, title='ALL SUBMITTED ORDERS')
def getCurrentOrders(self):
return self._getOrders('submitted,partial-filled,partial-canceled')
def printCurrentOrders(self):
logging.info('Getting current orders:')
order_info = self.getCurrentOrders()
self._printOrders(order_info, title='CURRENT ORDERS')
def getAllValidOrders(self):
return self._getOrders('submitted,partial-filled,partial-canceled,filled,canceled')
def printAllValidOrders(self):
logging.info('Getting all valid orders:')
order_info = self.getAllValidOrders()
self._printOrders(order_info, title='ALL VALID ORDERS')
def getFilledOrders(self):
return self._getOrders('filled')
def getAllOrders(self):
return self._getOrders()
def _getOrders(self, types='pre-submitted,submitted,partial-filled,partial-canceled,filled,canceled'):
return self.client.get('/v1/order/orders', symbol='ethcny', states=types)
def printAllOrders(self):
logging.info('Getting all orders:')
order_info = self.getAllOrders()
self._printOrders(order_info, title='ALL ORDERS')
def _printOrders(self, order_info, title=''):
self._br()
print(' ' + '~'*10 + ''.join(title) + '~'*10 +'\n')
print(' # Order\t Amount\t Price\t Create Time Type Field-Amount Field-Cash Field-Fees Finished Time Source State Cancelled at')
for i, line in enumerate(order_info):
# print(line)
print('%3d %d\t%s\t%15s\t %s \t%10s\t%15s\t%15s\t%15s\t %s\t %s \t%s\t%s' % (
i+1,
line.id,
line.amount,
line.price,
datetime.fromtimestamp(line['created-at']/1000).strftime('%Y-%m-%d %H:%M:%S'),
line.type,
line['field-amount'],
line['field-cash-amount'],
line['field-fees'],
datetime.fromtimestamp(line['finished-at']/1000).strftime('%Y-%m-%d %H:%M:%S'),
line.source,
line.state,
'' if 0==line['canceled-at'] else datetime.fromtimestamp(line['canceled-at']/1000).strftime('%Y-%m-%d %H:%M:%S')
))
self._br()
def buy_ETH_limit(self):
pass
def createOrder(self, amount, price, direction):
order_id = self.client.post('/v1/order/orders', {
'account-id': self.account_id,
'amount': amount,
'price': price,
'symbol': 'ethcny',
'type': direction,
'source': 'api'
})
logging.info('Printing order_id:')
logging.info(order_id)
return order_id
def placeOrder(self, order_id):
self.client.post('/v1/order/orders/%s/place' % order_id)
def printOrderDetails(self, order_id):
order_info = self.client.get('/v1/order/orders/%s' % order_id)
self._printOrders([order_info], title='ORDER DETAIL of ORDER # %s' % order_id)
def getOrderStatus(self, order_id):
return self.client.get('/v1/order/orders/%s' % order_id).state
def getOrderDetail(self, order_id):
return self.client.get('/v1/order/orders/%s' % order_id)
def isOrderSuccess(self, order_id):
orderStatus = self.getOrderStatus(order_id)
return orderStatus == 'filled'
def isOrderCancelled(self, order_id):
orderStatus = self.getOrderStatus(order_id)
return orderStatus == 'canceled'
def cancelOrder(self, order_id):
return self.client.post('/v1/order/orders/%s/submitcancel' % order_id)
def cancelAllOrders(self):
logging.info('Canelling all current orders:')
self.printCurrentOrders()
orders = self.getCurrentOrders()
for order in orders:
order_id = order.id
logging.info('Cancelling order # %d' % order_id)
self.cancelOrder(order_id)
logging.info('All orders cancelled!')
def getWithdrawAddress(self):
return self.client.get('/v1/dw/withdraw-legal/addresses', currency='cny')
def create_transfer_cny_to_main(self, amount):
withdraw_id = self.client.post('/v1/dw/withdraw-legal/create', {
'account-id': self.account_id,
'amount': amount,
'currency': 'cny',
})
print('Printing CNY_withdraw_id:')
print(withdraw_id)
return withdraw_id
def place_transfer_cny_to_main(self, withdraw_id):
return self.client.post('/v1/dw/withdraw-legal/%s/place' % withdraw_id)
def transfer_cny_to_main(self, amount):
if '.' in amount and len(amount.split('.')[1]) > 2:
raise BaseException('CNY transfer amount: Decimal part should be no more than 2-digits!')
if float(self.getCNYBalance()) < float(amount):
raise BaseException('Not enough CNY balance (in ETH account) to transfer!')
transfer_id = self.create_transfer_cny_to_main(amount)
return self.place_transfer_cny_to_main(transfer_id)
def get_transferable_cny_from_main(self):
return self.client.get('/v1/dw/deposit-legal/balance', currency='cny')
def create_transfer_cny_from_main(self, amount):
withdraw_id = self.client.post('/v1/dw/deposit-legal/create', {
'account-id': self.account_id,
'amount': amount,
'currency': 'cny',
})
print('Printing CNY_deposit_id: %s ' % withdraw_id)
return withdraw_id
def place_transfer_cny_from_main(self, withdraw_id):
return self.client.post('/v1/dw/deposit-legal/%s/place' % withdraw_id)
def cancel_transfer_cny_from_main(self, withdraw_id):
# INVALID
return self.client.post('/v1/dw/deposit-legal/%s/submitcancel' % withdraw_id)
def cancel_transfer_cny_to_main(self, withdraw_id):
# INVALID
return self.client.post('/v1/dw/withdraw-legal/%s/cancel' % withdraw_id)
def get_financial_history(self):
return self.client.get('/v1/query/finances')
def print_financial_history(self):
history = self.get_financial_history()
for transaction in history:
print(transaction)
def transfer_cny_from_main(self, amount):
if float(self.get_transferable_cny_from_main()) < float(amount):
raise BaseException('Not enough CNY balance (in main account) to transfer!')
transfer_id = self.create_transfer_cny_from_main(amount)
return self.place_transfer_cny_from_main(transfer_id)
def get_eth_withdraw_addresses(self):
addresses = self.client.get('/v1/dw/withdraw-virtual/addresses', currency='eth')
logging.info('Printing addresses:')
logging.info(addresses)
return addresses
def withdraw_eth_create(self, address_id='', amount=''):
# INVALID
withdraw_id = self.client.post('/v1/dw/withdraw-virtual/create', {
'address-id': address_id,
'amount': amount,
'trade-password': self.TRADE_PW # needs update here, trade pw is not supported by server and will return error
})
logging.info('Printing withdraw_id:')
logging.info(withdraw_id)
return withdraw_id
def withdraw_eth_place(self, withdraw_id):
status = self.client.post('/v1/dw/withdraw-virtual/%s/place' % withdraw_id)
print('Withdraw ETH order placed.')
logging.info('Printing withdraw status:')
logging.info(status)
return status
def main():
huobi_eth = Huobi_ETH_Client()
# print(type(huobi_eth.getCNYBalance()))
# print(huobi_eth.cancel_transfer_cny_to_main(withdraw_id='45833'))
# huobi_eth.print_financial_history()
# huobi_eth.transfer_cny_from_main(amount='1.0')
# print(huobi_eth.transfer_cny_to_main(amount='0.02'))
# print(huobi_eth.get_eth_withdraw_addresses())
# print(huobi_eth.get_transferable_cny_from_main())
# print(huobi_eth.transfer_cny_from_main('0.01'))
# print(huobi_eth.getCNYBalance())
# huobi_eth.getETHBalance()
# print(huobi_eth.transfer_cny_to_main('0.03'))
# transfer_id = huobi_eth.create_transfer_cny_to_main('0.02')
# print(huobi_eth.place_transfer_cny_to_main(transfer_id))
# print(huobi_eth.getWithdrawAddress())
# print(huobi_eth.getSymbols())
# print(huobi_eth.getUserInfo())
# print(huobi_eth.getAllAccounts())
# huobi_eth.printBalance()
# huobi_eth.printSubmittedOrders()
# huobi_eth.printAllValidOrders()
# huobi_eth.printAllOrders()
# orderid = huobi_eth.createOrder(huobi_eth.account_id(), '0.001', '1600.0', 'sell-limit')
# huobi_eth.placeOrder(orderid)
# huobi_eth.cancelAllOrders()
if __name__ == '__main__':
main()
| szhu3210/Arbitrage-trader | legacy/huobi_eth_client.py | huobi_eth_client.py | py | 15,614 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "json.dumps",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "urllib.request.Request",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "urllib.request.urlop... |
74517025384 | #!/usr/bin/env pythonimport coliche, os
import bn.bn
def bn2dot(bnfile, outfile, vdfile=None, loners=False, center=None,
awfile=None):
# give None to outfile to get string back
dotbuffer = []
bns = bn.bn.load(bnfile, False)
varc = bns.varc
arcs = bns.arcs()
names = vdfile \
and list(l.split("\t",1)[0] for l in file(vdfile)) \
or map(str, range(varc))
lonerset = range(varc)
if center:
try:
center = int(center)
except:
center = names.index(center)
showvars = bns.mbnodes(center) if center != None else set(range(varc))
aws = {}
if awfile != None:
for l in file(awfile):
t = l.split()
x,y = map(int,t[0:2])
w = float(t[2])
aws[(x,y)]=w
dotbuffer.append("digraph BN {")
for x,y in arcs:
if x in showvars and y in showvars:
wstr = ''
if (x,y) in aws:
wstr = ' [label="%.2g"]' % aws[(x,y)]
nx, ny = names[x], names[y]
dotbuffer.append(' "%s" -> "%s"%s;' % (nx, ny,wstr))
if x in lonerset:
lonerset.remove(x)
if y in lonerset:
lonerset.remove(y)
if loners:
for l in sorted(lonerset):
if l in showvars: dotbuffer.append('"%s";' % names[l])
dotbuffer.append("}")
dotstr = '\n'.join(dotbuffer)
if outfile:
file(outfile,"w").write(dotstr)
else:
return dotstr
if __name__ == "__main__":
coliche.che(bn2dot,
"""bnfile; outfile
-n --vdfile vdfile : to add names to the picture
-l --loners : show orphan nodes too
--mb center : draw only markov blanket of center
-w --arcweights awfile : draw arcweights
""")
| tomisilander/bn | bn/util/bn2dot.py | bn2dot.py | py | 1,917 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "bn.bn.bn.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "bn.bn.bn",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "bn.bn",
"line_number": 10,
"usage_type": "name"
}
] |
39776686649 | import sqlite3
import pandas as pd
import urllib.request
import xml.etree.ElementTree as et
from installations_type_json import Installation_type_json
from installations_type_xml import Installation_type_xml
from installation import Instalation
from recherche import Recherche
from patinoire import Patinoire
from glissade import Glissade
from piscine import Piscine
class Database():
def __init__(self):
self.connection = None
def get_connection(self):
if self.connection is None:
self.connection = sqlite3.connect('db/database.db')
return self.connection
def disconnect(self):
if self.connection is not None:
self.connection.close()
# Recuperer des données des piscines en csv puis les stocker
# dans la table piscines
def donnees_piscines(self):
colonnes_piscines = ['id_uev', 'type', 'nom', 'nom_arr', 'adresse',
'propriete', 'gestion', 'point_x', 'point_y',
'equipement', 'long', 'lat']
piscines = pd.read_csv('https://data.montreal.ca/dataset/4604afb7-a7'
'c4-4626-a3ca-e136158133f2/resource/cbdca706-'
'569e-4b4a-805d-9af73af03b14/download/piscin'
'es.csv',
names=colonnes_piscines, header=None,
skiprows=[0])
piscines.sort_values(by=['nom'])
piscines.to_sql('piscines', self.get_connection(),
if_exists='replace', index=False)
# Recuperer les données des glissades en xml puis les stocker
# dans la table glissade
def donnees_glissades(self):
with urllib.request.urlopen('http://www2.ville.montreal.qc.ca/'
'services_citoyens/pdf_transfert/'
'L29_GLISSADE.xml') as url:
donnes_xml_glissades = et.parse(url)
glissades_ = donnes_xml_glissades.getroot()
colonnes = ['nom', 'nom_arr', 'cle', 'date_maj',
'ouvert', 'deblaye', 'condition']
lignes = []
for glissade in glissades_:
nom_ = glissade.find('nom').text
nom_arr_ = glissade.find('arrondissement').find('nom_arr').text
cle_ = glissade.find('arrondissement').find('cle').text
date_maj_ = glissade.find('arrondissement'
).find('date_maj').text
ouvert_ = glissade.find('ouvert').text
deblaye_ = glissade.find('deblaye').text
condition_ = glissade.find('condition').text
nom_arr_ = " ".join(nom_arr_.split()).replace(" - ", "-")
nom_ = " ".join(nom_.split()).replace(" - ", "-")
lignes.append({'nom': (nom_), 'nom_arr': nom_arr_, 'cle': cle_,
'date_maj': date_maj_, 'ouvert': ouvert_,
'deblaye': deblaye_, 'condition': condition_})
glissades = pd.DataFrame(lignes, columns=colonnes)
glissades.sort_values(by=['nom'])
glissades.to_sql('glissades', self.get_connection(),
if_exists='replace', index=False)
# Recuperer les données des patinoires en xml puis les stocker dans
# deux tables distinctes.
# table patinoires: contient les noms des patinoires et les arrondissemnts
# table conditions: contient le nom d'une patinoire et les données des
# conditions de différentes dates qui sont liées à cette patinoire
def donnees_patinoires(self):
with urllib.request.urlopen('https://data.montreal.ca/dataset/'
'225ac315-49fe-476f-95bd-a1ce1648a98c/'
'resource/5d1859cc-2060-4def-903f-db24408'
'bacd0/download/l29-patinoire.xml') as url:
donnees_xml_patinoires = et.parse(url)
patinoires_ = donnees_xml_patinoires.getroot()
colonnes_patinoires_ = ['nom_arr', 'nom_pat']
collones_conditions_ = ['nom_pat', 'date_heure', 'ouvert',
'deblaye', 'arrose', 'resurface']
lignes = []
lignes_conditions = []
for patinoire in patinoires_:
nom_arr_ = patinoire.find('nom_arr').text
nom_pat_ = patinoire.find('patinoire').find('nom_pat').text
nom_arr_ = " ".join(nom_arr_.split()).replace(" - ", "-")
nom_pat_ = " ".join(nom_pat_.split()).replace(" - ", "-")
for condition in patinoire.find('patinoire'
).findall('condition'):
date_heure_ = condition.find('date_heure').text
ouvert_ = condition.find('ouvert').text
deblaye_ = condition.find('deblaye').text
arrose_ = condition.find('arrose').text
resurface_ = condition.find('resurface').text
lignes_conditions.append({'nom_pat': nom_pat_,
'date_heure': date_heure_,
'ouvert': ouvert_,
'deblaye': deblaye_,
'arrose': arrose_,
'resurface': resurface_})
lignes.append({'nom_arr': nom_arr_, 'nom_pat': nom_pat_})
patinoires = pd.DataFrame(lignes, columns=colonnes_patinoires_)
conditions = pd.DataFrame(lignes_conditions,
columns=collones_conditions_)
patinoires.sort_values(by=['nom_pat'])
patinoires.to_sql('patinoires', self.get_connection(),
if_exists='replace', index=False)
conditions.to_sql('conditions', self.get_connection(),
if_exists='replace', index=False)
# Ajouter une colonne type_installation à la table piscines
def add_type_piscines(self):
cursor = self.get_connection()
cursor.execute('ALTER TABLE piscines ADD COLUMN '
'type_installation DEFAULT "piscine" ')
cursor.commit()
# Ajouter une colonne type_installation à la table glissades
def add_type_glissades(self):
cursor = self.get_connection()
cursor.execute('ALTER TABLE glissades ADD COLUMN '
'type_installation DEFAULT "glissade" ')
cursor.commit()
# Ajouter une colonne type_installation à la table patinoires
def add_type_patinoires(self):
cursor = self.get_connection()
cursor.execute('ALTER TABLE patinoires ADD COLUMN '
'type_installation DEFAULT "patinoire" ')
cursor.commit()
# Stocker les données dans la base de données avec la colonne
# type_installation en utilisant les fonctions précedentes
def get_data_from_requests(self):
self.donnees_piscines()
self.add_type_piscines()
self.donnees_glissades()
self.add_type_glissades()
self.donnees_patinoires()
self.add_type_patinoires()
# Retourner l'ensemble des installations d'un arrondissement
def get_installations(self, arrondissement):
cursor = self.get_connection().cursor()
cursor.execute('SELECT nom FROM glissades WHERE nom_arr=?',
(arrondissement,))
glissades = cursor.fetchall()
cursor.execute('SELECT nom_pat FROM patinoires WHERE nom_arr=?',
(arrondissement,))
patinoires = cursor.fetchall()
cursor.execute('SELECT nom FROM piscines WHERE nom_arr=?',
(arrondissement,))
piscines = cursor.fetchall()
return (Instalation(arrondissement, piscines, glissades, patinoires))
# Retourner toutes les glissades
def get_glissades(self):
cursor = self.get_connection().cursor()
cursor.execute('SELECT * FROM glissades')
glissades = cursor.fetchall()
return (Glissade(glissade[0], glissade[1], glissade[2],
glissade[3], glissade[4], glissade[5],
glissade[6], [glissade[7]])
for glissade in glissades)
# Retourner toutes les installations de type glissade, piscine et patinoire
def get_all_installations(self):
cursor = self.get_connection().cursor()
cursor.execute('SELECT nom, type_installation FROM glissades')
glissades = cursor.fetchall()
cursor.execute('SELECT nom_pat, type_installation FROM patinoires')
patinoires = cursor.fetchall()
cursor.execute('SELECT nom, type_installation FROM piscines')
piscines = cursor.fetchall()
installations = glissades + patinoires + piscines
return (Recherche(installation[0], installation[1])
for installation in installations)
# Retouner une glissade qui correspond à un nom donné
def get_glissade(self, nom):
cursor = self.get_connection().cursor()
cursor.execute('SELECT * FROM glissades WHERE nom=?', (nom,))
glissade = cursor.fetchone()
if glissade is None:
return None
else:
return (Glissade(glissade[0], glissade[1], glissade[2],
glissade[3], glissade[4], glissade[5],
glissade[6], glissade[7]))
# Supprimer une glissade qui correspond à un nom donné
def delete_glissade(self, nom):
cursor = self.get_connection()
cursor.execute('DELETE FROM glissades WHERE nom=?', (nom,))
cursor.commit()
# Mettre à jour une glissade
def update_glisade(self, glissade):
cursor = self.get_connection()
sql_request = '''UPDATE glissades
SET nom_arr = ? ,
cle = ? ,
date_maj = ? ,
ouvert = ? ,
deblaye = ? ,
condition = ?
WHERE nom = ?'''
cursor.execute(sql_request, (glissade))
print()
cursor.commit()
# Rtourner toutes les informations connues sur une glissade,
# une piscine ou une patinoire qui correspond au nom donné
def get_info_installation(self, nom, type):
cursor = self.get_connection().cursor()
if type == 'piscine':
cursor.execute('SELECT * FROM piscines WHERE nom=?', (nom,))
piscines = cursor.fetchone()
if piscines is None:
return 'aucune piscine'
else:
return (Piscine(piscines[0], piscines[1], piscines[2],
piscines[4], piscines[5], piscines[6],
piscines[7]))
elif type == 'glissade':
cursor.execute('SELECT * FROM glissades WHERE nom=?', (nom,))
glissades = cursor.fetchone()
if glissades is None:
return 'aucune glissade'
else:
return (Glissade(glissades[0], glissades[1], glissades[2],
glissades[3], glissades[4], glissades[5],
glissades[6], glissades[7]))
elif type == 'patinoire':
cursor.execute('SELECT * FROM conditions WHERE nom_pat=?', (nom,))
patinoires = cursor.fetchall()
if patinoires is None:
return 'aucune patinoire'
else:
return (Patinoire(patinoire) for patinoire in patinoires)
# Retourne toutes les installations avec le bon format choisi
# en fonction du type(json et xml)
def get_installation_types(self, type):
cursor = self.get_connection().cursor()
cursor.execute('SELECT * FROM glissades')
glissades = cursor.fetchall()
cursor.execute('SELECT * FROM patinoires')
patinoires = cursor.fetchall()
cursor.execute('SELECT * FROM piscines')
piscines = cursor.fetchall()
inst = glissades + patinoires
installations = piscines + inst
if type == 'json':
return (Installation_type_json(installation)
for installation in installations)
elif type == 'xml':
return (Installation_type_xml(installations))
# Retourne les installations au format csv
def get_installation_type_csv(self):
cursor = self.get_connection().cursor()
cursor.execute('SELECT nom, nom_arr, type_installation FROM glissades')
glissades = cursor.fetchall()
cursor.execute('SELECT nom_pat, nom_arr, '
'type_installation FROM patinoires')
patinoires = cursor.fetchall()
cursor.execute('SELECT nom, nom_arr, type_installation FROM piscines')
piscines = cursor.fetchall()
inst = glissades + patinoires
installations = piscines + inst
return (installations)
| alioudiallo224/Restfull-API-in-python | database.py | database.py | py | 13,259 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "urllib... |
73171895785 | """
Date: 2019.07.04
Programmer: DH
Description: About System Manager Report Generator
"""
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
class ReportGenerator:
"""
To get information about age, emotion, factor from data_set, and
make a chart from data_set.
"""
def __init__(self):
pass
def get_age_avg(self, data_set, from_date, to_date, u_id=None):
'''
To compute age average
:param data_set: dataframe, the data of age values
:param from_date: String, the start date: “%Y-%m-%d”
:param to_date: String, the end date: “%Y-%m-%d”
:param u_id:int, user_id
:return:int, average of age
'''
df_date = self.__set_df_date(data_set, from_date, to_date)
df_u_id = self.__set_df_u_id(df_date, u_id)
return round(df_u_id['age'].mean())
def get_age_min(self, data_set, from_date, to_date, u_id=None):
'''
To compute minimum of age
'''
df_date = self.__set_df_date(data_set, from_date, to_date)
df_u_id = self.__set_df_u_id(df_date, u_id)
return df_u_id['age'].min()
def get_age_max(self, data_set, from_date, to_date, u_id=None):
'''
To compute maximum of age
'''
df_date = self.__set_df_date(data_set, from_date, to_date)
df_u_id = self.__set_df_u_id(df_date, u_id)
return df_u_id['age'].max()
def get_emotion_min(self, data_set, from_date, to_date, u_id=None):
'''
To compute minimum of the number of emotion
:param data_set: dataframe, the data of emotion values
:param from_date: String, the start date: “%Y-%m-%d”
:param to_date: String, the end date: “%Y-%m-%d”
:param u_id: int, user_id
:return: dict, emotion id, the number of min emotion
'''
df_value = self.get_rank_type(data_set, 'emotion', from_date, to_date, u_id)
# Get last value from ranked dataframe
emotion = df_value.at[len(df_value) - 1, 'emotion']
count = df_value.at[len(df_value) - 1, 'number']
return {emotion: count}
def get_emotion_max(self, data_set, from_date, to_date, u_id=None):
'''
To compute maximum of the number of emotion
:return: dict, emotion id, the number of max emotion
'''
df_value = self.get_rank_type(data_set, 'emotion', from_date, to_date, u_id)
# Get first value from ranked dataframe
emotion = df_value.at[0, 'emotion']
count = df_value.at[0, 'number']
return {emotion: count}
def get_factor_data(self, data_set, from_date, to_date, u_id=None, emotion_id=None):
'''
To compute each factors’ maximum, minimum, mean
:param data_set: dataframe, the dataframe of factor values
:param from_date: String, the start date: “%Y-%m-%d”
:param to_date: String, the end date: “%Y-%m-%d”
:param u_id: int, user_id
:param emotion_id: int, emotion_id
:return: dict, each factors’ maximum, minimum, mean
'''
dic_factor_data = {}
# To check emotion_id to loc data_set
if emotion_id is None:
df_emotion = data_set
else:
df_emotion = data_set.loc[data_set['emotion'] == emotion_id].reset_index()
del df_emotion['index']
df_date = self.__set_df_date(df_emotion, from_date, to_date)
df_u_id = self.__set_df_u_id(df_date, u_id)
# get factor columns from data_set
factor_list = [x for x in data_set.columns if 'f' in x]
# compute max, min, mean of each factor and save in dictionary
for i in factor_list:
tmp_data = df_u_id[i]
factor_data = {'max': tmp_data.max(),
'min': tmp_data.min(),
'mean': round(tmp_data.mean())
}
dic_factor_data[i] = factor_data
return dic_factor_data
def get_rank_type(self, data_set, types, from_date, to_date, u_id=None):
'''
To compute rank of the number of type (age, emotion, etc)
:param data_set: dataframe, the data of type values
:param types: String, the type of data (age, emotion, etc)
:param from_date: String, the start date: “%Y-%m-%d”
:param to_date: String, the end date: “%Y-%m-%d”
:param u_id: int, user_id
:return: DataFrame, rank of the number of type values and percentage
'''
df_date = self.__set_df_date(data_set, from_date, to_date)
df_u_id = self.__set_df_u_id(df_date, u_id)
# using dataframe value_counts function
value_counts = df_u_id[types].value_counts(dropna=True)
df_value_counts = pd.DataFrame(value_counts).reset_index()
df_value_counts.columns = [types, 'number']
df_value_counts['freq'] = df_value_counts['number'] / df_value_counts['number'].sum()
return df_value_counts
def make_chart(self, data_set, types, from_date, to_date, u_id=None, date_num=3, type_num=3):
'''
To make chart about an age, an emotion, each factor by recorded_date
:param data_set: dataFrame, the data of type values
:param types: String, the type of chart (age, emotion, factor, etc)
:param from_date: String, the start date: “%Y-%m-%d”
:param to_date: String, the end date: “%Y-%m-%d”
:param u_id: int, user_id
:param date_num: int, the number of dates to show
:param type_num: int, the number of data (age, emotion, factor) to show
:return: image, chart image using PIL
'''
df_u_id = self.__set_df_u_id(data_set, u_id)
# the rank of types
df_ranked = self.get_rank_type(df_u_id, types, from_date, to_date)
# if the number of type value in ranked is smaller than type_num return None
if df_ranked.empty or len(df_ranked[types]) < type_num:
return None
df_chart = self.__set_df_date(df_u_id, from_date, to_date)
# rank_list = to get value from df_ranked
# label_list = to save each label for plot
rank_list = []
label_list = []
dic_emotion = {0:'Angry',1:'Disgust',2:'Fear',3:'Happy',4:'Sad',5:'Surprise',6:'Neutral'}
for i in range(type_num):
rank_list.append(df_ranked.at[i, types])
if types == 'emotion':
label = 'Top ' + str(i + 1) + '. ' + dic_emotion[rank_list[i]]
else:
label = 'Top ' + str(i + 1) + '. ' + str(rank_list[i])
label_list.append(label)
# df_value_counts = rank recorded_date to find the number of each date
df_value_counts = pd.DataFrame(df_chart['recorded_date'].value_counts()).reset_index()
df_value_counts.columns = ['date', 'number']
if df_value_counts.empty or len(df_value_counts['date']) < date_num:
return None
# date_list = to get value from df_valued_counts
date_list = []
for i in range(date_num):
date_list.append(df_value_counts.at[i, 'date'])
date_list.sort()
# To compute types by recorded_date
rate_list = []
for i in range(type_num):
data = df_chart.loc[df_chart[types] == rank_list[i]]
for j in range(date_num):
count = len(data.loc[data['recorded_date'] == date_list[j]])
if types == 'age':
size = len(df_chart.loc[df_chart['recorded_date'] == date_list[j]])
rate_list.append(round(count / size * 100))
else:
rate_list.append(count)
fig = plt.figure()
ax = fig.add_subplot(111)
for i in range(type_num):
ax.plot(date_list, rate_list[date_num * i:date_num * (i + 1)], label=label_list[i])
fig.legend()
y_axis = range(0, 101, 10)
plt.yticks(y_axis)
if types == 'age':
plt.ylabel("Detected Rate (%)")
else:
plt.ylabel("Count Number")
plt.xlabel("Date")
fig.canvas.draw()
s, (width, height) = fig.canvas.print_to_buffer()
im = Image.frombytes("RGBA", (width, height), s)
return im
def make_factor_chart(self, data_set, func_types, from_date, to_date, u_id=None, date_num=3):
'''
To draw chart about total factors’ maximum or minimum value by recorded_date
:param data_set: dataFrame, the dataframe of factor values
:param func_types: String, types of function: “max”, “mean”
:param from_date: String, the start date: “%Y-%m-%d”
:param to_date: String, the end date: “%Y-%m-%d”
:param u_id: int, user_id
:param date_num: int, the number of dates to show
:return: image, chart image using PIL
'''
factor_list = [x for x in data_set.columns if 'f' in x]
dic_emotion = {0: 'Angry', 1: 'Disgust', 2: 'Fear', 3: 'Happy', 4: 'Sad', 5: 'Surprise', 6: 'Neutral'}
# if emotion, get id of maximum counts of emotion
max_emotion = {}
if 'emotion' in data_set.columns:
max_emotion = self.get_emotion_max(data_set, from_date, to_date, u_id)
df_emotion = data_set.loc[data_set['emotion'] == list(max_emotion.keys())[0]].reset_index()
del df_emotion['index']
else:
df_emotion = data_set
df_u_id = self.__set_df_u_id(df_emotion, u_id)
df_chart = self.__set_df_date(df_u_id, from_date, to_date)
df_value_counts = pd.DataFrame(df_chart['recorded_date'].value_counts()).reset_index()
df_value_counts.columns = ['date', 'number']
if df_value_counts.empty or len(df_value_counts['date']) < date_num:
return None
date_list = []
for i in range(date_num):
date_list.append(df_value_counts.at[i, 'date'])
date_list.sort()
# compute by func_types
list_value = []
for i in factor_list:
for j in range(date_num):
df_tmp = df_chart.loc[df_chart['recorded_date'] == date_list[j]]
if func_types == 'max':
max_value = df_tmp[i].max()
list_value.append(max_value)
elif func_types == 'mean':
mean_value = round(df_tmp[i].mean())
list_value.append(mean_value)
fig = plt.figure()
ax = fig.add_subplot(111)
for i in range(len(factor_list)):
ax.plot(date_list, list_value[date_num * i:date_num * (i + 1)], label=factor_list[i])
fig.legend()
y_axis = range(0, 101, 10)
plt.yticks(y_axis)
if func_types == 'max':
plt.ylabel("Max Value")
elif func_types == 'mean':
plt.ylabel("Mean Value")
plt.xlabel("Date")
if 'emotion' in data_set.columns:
plt.title(dic_emotion[list(max_emotion.keys())[0]])
# Save in Image attribute
fig.canvas.draw()
s, (width, height) = fig.canvas.print_to_buffer()
im = Image.frombytes("RGBA", (width, height), s)
return im
def __set_df_date(self, data_set, from_date, to_date):
'''
To set dataframe by date
'''
# time range
from_date = from_date + " 00:00:00"
to_date = to_date + " 23:59:59"
if type(data_set['recorded_date'].get(0)) == str:
df_date = data_set.loc[(from_date <= data_set['recorded_date']) &
(data_set['recorded_date'] <= to_date)]
else:
df_date = data_set.loc[(pd.Timestamp(from_date) <= data_set['recorded_date']) &
(data_set['recorded_date'] <= pd.Timestamp(to_date))]
df_date['recorded_date'] = df_date['recorded_date'].dt.strftime('%Y-%m-%d')
return df_date
def __set_df_u_id(self, data_set, u_id):
'''
To set dataframe by user id
'''
if u_id is None:
df_u_id = data_set
else:
df_u_id = data_set.loc[data_set['u_id'] == u_id].reset_index()
del df_u_id['index']
return df_u_id
| Im-Watching-You/SELab-Smart-Mirror | DH/report.py | report.py | py | 12,685 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "matplot... |
36896145879 | import struct
import dns.rdata
import dns.rdatatype
class SSHFP(dns.rdata.Rdata):
"""SSHFP record
@ivar algorithm: the algorithm
@type algorithm: int
@ivar fp_type: the digest type
@type fp_type: int
@ivar fingerprint: the fingerprint
@type fingerprint: string
@see: draft-ietf-secsh-dns-05.txt"""
__slots__ = ['algorithm', 'fp_type', 'fingerprint']
def __init__(self, rdclass, rdtype, algorithm, fp_type,
fingerprint):
super(SSHFP, self).__init__(rdclass, rdtype)
self.algorithm = algorithm
self.fp_type = fp_type
self.fingerprint = fingerprint
def to_text(self, origin=None, relativize=True, **kw):
return '%d %d %s' % (self.algorithm,
self.fp_type,
dns.rdata._hexify(self.fingerprint,
chunksize=128))
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
algorithm = tok.get_uint8()
fp_type = tok.get_uint8()
fingerprint = tok.get_string()
fingerprint = fingerprint.decode('hex_codec')
tok.get_eol()
return cls(rdclass, rdtype, algorithm, fp_type, fingerprint)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
header = struct.pack("!BB", self.algorithm, self.fp_type)
file.write(header)
file.write(self.fingerprint)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
header = struct.unpack("!BB", wire[current : current + 2])
current += 2
rdlen -= 2
fingerprint = wire[current : current + rdlen]
return cls(rdclass, rdtype, header[0], header[1], fingerprint)
from_wire = classmethod(from_wire)
def _cmp(self, other):
hs = struct.pack("!BB", self.algorithm, self.fp_type)
ho = struct.pack("!BB", other.algorithm, other.fp_type)
v = cmp(hs, ho)
if v == 0:
v = cmp(self.fingerprint, other.fingerprint)
return v
| RMerl/asuswrt-merlin | release/src/router/samba-3.6.x/lib/dnspython/dns/rdtypes/ANY/SSHFP.py | SSHFP.py | py | 2,129 | python | en | code | 6,715 | github-code | 36 | [
{
"api_name": "dns.rdata.rdata",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "dns.rdata",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "dns.rdata.rdata._hexify",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "dns.rdata.rdata"... |
13151184850 | import numpy as np
from scipy.optimize import fmin_bfgs
from MILpy.functions.noisyORlossWeights import noisyORlossWeights
from MILpy.functions.noisyORlossAlphas import noisyORlossAlphas
from MILpy.functions.traindecstump import traindecstump
class MILBoost(object):
def __init__(self):
self._alpha = None
self._H = None
self._T = None
def fit(self,train_bags,train_labels, errtol = 1e-15,T=100,**kwargs):
"""
@param train_bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param train_labels : an array-like object of length n containing -1/+1 labels
"""
self._T = T
bagSil = [np.asmatrix(bag) for bag in train_bags]
baglab = np.asmatrix(train_labels).reshape((-1, 1))
X = np.vstack(bagSil)
A = np.array([])
Ibag=[]
for index in range (0,len(bagSil)):
A=np.append(A, index*(np.ones(len(bagSil[index]))))
Ibag.append(np.array(np.where(A==index)))
bagy =baglab
N= len(X)
#init
BestFeature=[]
h = np.zeros((T,3))
H=[]
self._alpha = np.zeros((T,1))
prev_out = np.zeros((N,1))
for t in range (0,T):
w = noisyORlossWeights(prev_out,bagy,Ibag)
h = traindecstump(X,w)
BestFeature.append(h['bestfeat'])
H.append(h)
this_out=np.array(h['bestsgn']*np.sign(X[:,h['bestfeat']]-h['bestthr']))
xopt = fmin_bfgs(noisyORlossAlphas,1,args=(prev_out,this_out,bagy,Ibag),disp=False)
self._alpha[t]=xopt[0]
# update output full classifier:
prev_out = prev_out + self._alpha[t]*this_out;
besterr=h['besterr']
if (besterr<=errtol):
self._H = H
break
self._H = H
def predict(self,test_bags):
"""
@param test_bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
"""
T = self._T
H = self._H
bagSilT = [np.asmatrix(bag) for bag in test_bags]
AT=np.array([])
IbagT=[]
for index in range (0,len(bagSilT)):
AT=np.append(AT, index*(np.ones(len(bagSilT[index]))))
IbagT.append(np.array(np.where(AT==index)))
bagSilT = [np.asmatrix(bag) for bag in test_bags]
Z = np.vstack(bagSilT)
pij=[]
n = len(Z)
out = np.zeros((n,1))
for i in range(0,T-1):
out = out + float(self._alpha[i])*H[i]['bestsgn']*np.sign(Z[:,H[i]['bestfeat']]-H[i]['bestthr'])
pij = 1/(1+np.exp(-out))
B = len(test_bags);
out = np.zeros((B,1))
for i in range (0,B-1):
out[i]=1-np.prod([1-np.asarray(pij[IbagT[i]])])
return out | jmarrietar/MILpy | Algorithms/MILBoost.py | MILBoost.py | py | 3,049 | python | en | code | 18 | github-code | 36 | [
{
"api_name": "numpy.asmatrix",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.asmatrix",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_nu... |
36450480034 | # repo originally forked from https://github.com/Confusezius/Deep-Metric-Learning-Baselines
################# LIBRARIES ###############################
import warnings
warnings.filterwarnings("ignore")
import numpy as np, pandas as pd, copy, torch, random, os
from torch.utils.data import Dataset
from PIL import Image
from torchvision import transforms
"""============================================================================"""
################ FUNCTION TO RETURN ALL DATALOADERS NECESSARY ####################
def give_dataloaders(dataset, opt):
"""
Args:
dataset: string, name of dataset for which the dataloaders should be returned.
opt: argparse.Namespace, contains all training-specific parameters.
Returns:
dataloaders: dict of dataloaders for training, testing and evaluation on training.
"""
#Dataset selection
if opt.dataset=='vehicle_id':
datasets = give_VehicleID_datasets(opt)
elif opt.dataset=='Inaturalist':
datasets = give_inaturalist_datasets(opt)
else:
raise Exception('No Dataset >{}< available!'.format(dataset))
#Move datasets to dataloaders.
dataloaders = {}
for key, dataset in datasets.items():
if isinstance(dataset, TrainDatasetsmoothap) and key == 'training':
dataloaders[key] = torch.utils.data.DataLoader(dataset, batch_size=opt.bs,
num_workers=opt.kernels, sampler=torch.utils.data.SequentialSampler(dataset),
pin_memory=True, drop_last=True)
else:
is_val = dataset.is_validation
if key == 'training':
dataloaders[key] = torch.utils.data.DataLoader(dataset, batch_size=opt.bs,
num_workers=opt.kernels, shuffle=not is_val, pin_memory=True, drop_last=not is_val)
else:
dataloaders[key] = torch.utils.data.DataLoader(dataset, batch_size=opt.bs,
num_workers=6, shuffle=not is_val, pin_memory=True, drop_last=not is_val)
return dataloaders
def give_inaturalist_datasets(opt):
"""
This function generates a training, testing and evaluation dataloader for Metric Learning on the Inaturalist 2018 dataset.
For Metric Learning, training and test sets are provided by given json files. Will define a train and test split
So no random shuffling of classes.
Args:
opt: argparse.Namespace, contains all traininig-specific parameters.
Returns:
dict of PyTorch datasets for training, testing and evaluation.
"""
#Load text-files containing classes and imagepaths.
#Generate image_dicts of shape {class_idx:[list of paths to images belong to this class] ...}
train_image_dict, val_image_dict = {},{}
with open(os.path.join(opt.source_path,'Inat_dataset_splits/Inaturalist_train_set1.txt')) as f:
FileLines = f.readlines()
FileLines = [x.strip() for x in FileLines]
for entry in FileLines:
info = entry.split('/')
if '/'.join([info[-3],info[-2]]) not in train_image_dict:
train_image_dict['/'.join([info[-3],info[-2]])] = []
train_image_dict['/'.join([info[-3],info[-2]])].append(os.path.join(opt.source_path,entry))
with open(os.path.join(opt.source_path,'Inat_dataset_splits/Inaturalist_test_set1.txt')) as f:
FileLines = f.readlines()
FileLines = [x.strip() for x in FileLines]
for entry in FileLines:
info = entry.split('/')
if '/'.join([info[-3],info[-2]]) not in val_image_dict:
val_image_dict['/'.join([info[-3],info[-2]])] = []
val_image_dict['/'.join([info[-3],info[-2]])].append(os.path.join(opt.source_path,entry))
new_train_dict = {}
class_ind_ind = 0
for cate in train_image_dict:
new_train_dict["te/%d"%class_ind_ind] = train_image_dict[cate]
class_ind_ind += 1
train_image_dict = new_train_dict
train_dataset = TrainDatasetsmoothap(train_image_dict, opt)
val_dataset = BaseTripletDataset(val_image_dict, opt, is_validation=True)
eval_dataset = BaseTripletDataset(train_image_dict, opt, is_validation=True)
#train_dataset.conversion = conversion
#val_dataset.conversion = conversion
#eval_dataset.conversion = conversion
return {'training':train_dataset, 'testing':val_dataset, 'evaluation':eval_dataset}
# return {'training':train_dataset, 'testing':val_dataset, 'evaluation':eval_dataset, 'super_evaluation':super_train_dataset}
def give_VehicleID_datasets(opt):
"""
This function generates a training, testing and evaluation dataloader for Metric Learning on the PKU Vehicle dataset.
For Metric Learning, training and (multiple) test sets are provided by separate text files, train_list and test_list_<n_classes_2_test>.txt.
So no random shuffling of classes.
Args:
opt: argparse.Namespace, contains all traininig-specific parameters.
Returns:
dict of PyTorch datasets for training, testing and evaluation.
"""
#Load respective text-files
train = np.array(pd.read_table(opt.source_path+'/train_test_split/train_list.txt', header=None, delim_whitespace=True))
small_test = np.array(pd.read_table(opt.source_path+'/train_test_split/test_list_800.txt', header=None, delim_whitespace=True))
medium_test = np.array(pd.read_table(opt.source_path+'/train_test_split/test_list_1600.txt', header=None, delim_whitespace=True))
big_test = np.array(pd.read_table(opt.source_path+'/train_test_split/test_list_2400.txt', header=None, delim_whitespace=True))
#Generate conversions
lab_conv_train = {x:i for i,x in enumerate(np.unique(train[:,1]))}
train[:,1] = np.array([lab_conv_train[x] for x in train[:,1]])
lab_conv = {x:i for i,x in enumerate(np.unique(np.concatenate([small_test[:,1], medium_test[:,1], big_test[:,1]])))}
small_test[:,1] = np.array([lab_conv[x] for x in small_test[:,1]])
medium_test[:,1] = np.array([lab_conv[x] for x in medium_test[:,1]])
big_test[:,1] = np.array([lab_conv[x] for x in big_test[:,1]])
#Generate Image-Dicts for training and different testings of shape {class_idx:[list of paths to images belong to this class] ...}
train_image_dict = {}
for img_path, key in train:
if not key in train_image_dict.keys():
train_image_dict[key] = []
train_image_dict[key].append(opt.source_path+'/image/{:07d}.jpg'.format(img_path))
small_test_dict = {}
for img_path, key in small_test:
if not key in small_test_dict.keys():
small_test_dict[key] = []
small_test_dict[key].append(opt.source_path+'/image/{:07d}.jpg'.format(img_path))
medium_test_dict = {}
for img_path, key in medium_test:
if not key in medium_test_dict.keys():
medium_test_dict[key] = []
medium_test_dict[key].append(opt.source_path+'/image/{:07d}.jpg'.format(img_path))
big_test_dict = {}
for img_path, key in big_test:
if not key in big_test_dict.keys():
big_test_dict[key] = []
big_test_dict[key].append(opt.source_path+'/image/{:07d}.jpg'.format(img_path))
attribute = np.array(pd.read_table(opt.source_path+'/attribute/model_attr.txt', header=None, delim_whitespace=True))
new_dict = {}
not_found = 0
for thing in attribute:
if lab_conv_train[thing[0]] not in train_image_dict:
not_found +=1
else:
if thing[1] not in new_dict:
new_dict[thing[1]] = []
new_dict[thing[1]].append(lab_conv_train[thing[0]])
train_dataset = TrainDatasetsmoothap(train_image_dict, opt)
eval_dataset = BaseTripletDataset(train_image_dict, opt, is_validation=True)
val_small_dataset = BaseTripletDataset(small_test_dict, opt, is_validation=True)
val_medium_dataset = BaseTripletDataset(medium_test_dict, opt, is_validation=True)
val_big_dataset = BaseTripletDataset(big_test_dict, opt, is_validation=True)
return {'training':train_dataset, 'testing_set1':val_small_dataset, 'testing_set2':val_medium_dataset, \
'testing_set3':val_big_dataset, 'evaluation':eval_dataset}
################## BASIC PYTORCH DATASET USED FOR ALL DATASETS ##################################
class BaseTripletDataset(Dataset):
"""
Dataset class to provide (augmented) correctly prepared training samples corresponding to standard DML literature.
This includes normalizing to ImageNet-standards, and Random & Resized cropping of shapes 224 for ResNet50 and 227 for
GoogLeNet during Training. During validation, only resizing to 256 or center cropping to 224/227 is performed.
"""
def __init__(self, image_dict, opt, samples_per_class=8, is_validation=False):
"""
Dataset Init-Function.
Args:
image_dict: dict, Dictionary of shape {class_idx:[list of paths to images belong to this class] ...} providing all the training paths and classes.
opt: argparse.Namespace, contains all training-specific parameters.
samples_per_class: Number of samples to draw from one class before moving to the next when filling the batch.
is_validation: If is true, dataset properties for validation/testing are used instead of ones for training.
Returns:
Nothing!
"""
#Define length of dataset
self.n_files = np.sum([len(image_dict[key]) for key in image_dict.keys()])
self.is_validation = is_validation
self.pars = opt
self.image_dict = image_dict
self.avail_classes = sorted(list(self.image_dict.keys()))
#Convert image dictionary from classname:content to class_idx:content, because the initial indices are not necessarily from 0 - <n_classes>.
self.image_dict = {i:self.image_dict[key] for i,key in enumerate(self.avail_classes)}
self.avail_classes = sorted(list(self.image_dict.keys()))
#Init. properties that are used when filling up batches.
if not self.is_validation:
self.samples_per_class = samples_per_class
#Select current class to sample images from up to <samples_per_class>
self.current_class = np.random.randint(len(self.avail_classes))
self.classes_visited = [self.current_class, self.current_class]
self.n_samples_drawn = 0
#Data augmentation/processing methods.
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
transf_list = []
if not self.is_validation:
transf_list.extend([transforms.RandomResizedCrop(size=224) if opt.arch=='resnet50' else transforms.RandomResizedCrop(size=227),
transforms.RandomHorizontalFlip(0.5)])
else:
transf_list.extend([transforms.Resize(256),
transforms.CenterCrop(224) if opt.arch=='resnet50' else transforms.CenterCrop(227)])
transf_list.extend([transforms.ToTensor(), normalize])
self.transform = transforms.Compose(transf_list)
#Convert Image-Dict to list of (image_path, image_class). Allows for easier direct sampling.
self.image_list = [[(x,key) for x in self.image_dict[key]] for key in self.image_dict.keys()]
self.image_list = [x for y in self.image_list for x in y]
#Flag that denotes if dataset is called for the first time.
self.is_init = True
def ensure_3dim(self, img):
"""
Function that ensures that the input img is three-dimensional.
Args:
img: PIL.Image, image which is to be checked for three-dimensionality (i.e. if some images are black-and-white in an otherwise coloured dataset).
Returns:
Checked PIL.Image img.
"""
if len(img.size)==2:
img = img.convert('RGB')
return img
def __getitem__(self, idx):
"""
Args:
idx: Sample idx for training sample
Returns:
tuple of form (sample_class, torch.Tensor() of input image)
"""
if self.pars.loss == 'smoothap' or self.pars.loss == 'smoothap_element':
if self.is_init:
#self.current_class = self.avail_classes[idx%len(self.avail_classes)]
self.is_init = False
if not self.is_validation:
if self.samples_per_class==1:
return self.image_list[idx][-1], self.transform(self.ensure_3dim(Image.open(self.image_list[idx][0])))
if self.n_samples_drawn==self.samples_per_class:
#Once enough samples per class have been drawn, we choose another class to draw samples from.
#Note that we ensure with self.classes_visited that no class is chosen if it had been chosen
#previously or one before that.
counter = copy.deepcopy(self.avail_classes)
for prev_class in self.classes_visited:
if prev_class in counter: counter.remove(prev_class)
self.current_class = counter[idx%len(counter)]
#self.classes_visited = self.classes_visited[1:]+[self.current_class]
# EDIT -> there can be no class repeats
self.classes_visited = self.classes_visited+[self.current_class]
self.n_samples_drawn = 0
class_sample_idx = idx%len(self.image_dict[self.current_class])
self.n_samples_drawn += 1
out_img = self.transform(self.ensure_3dim(Image.open(self.image_dict[self.current_class][class_sample_idx])))
return self.current_class,out_img
else:
return self.image_list[idx][-1], self.transform(self.ensure_3dim(Image.open(self.image_list[idx][0])))
else:
if self.is_init:
self.current_class = self.avail_classes[idx%len(self.avail_classes)]
self.is_init = False
if not self.is_validation:
if self.samples_per_class==1:
return self.image_list[idx][-1], self.transform(self.ensure_3dim(Image.open(self.image_list[idx][0])))
if self.n_samples_drawn==self.samples_per_class:
#Once enough samples per class have been drawn, we choose another class to draw samples from.
#Note that we ensure with self.classes_visited that no class is chosen if it had been chosen
#previously or one before that.
counter = copy.deepcopy(self.avail_classes)
for prev_class in self.classes_visited:
if prev_class in counter: counter.remove(prev_class)
self.current_class = counter[idx%len(counter)]
self.classes_visited = self.classes_visited[1:]+[self.current_class]
self.n_samples_drawn = 0
class_sample_idx = idx%len(self.image_dict[self.current_class])
self.n_samples_drawn += 1
out_img = self.transform(self.ensure_3dim(Image.open(self.image_dict[self.current_class][class_sample_idx])))
return self.current_class,out_img
else:
return self.image_list[idx][-1], self.transform(self.ensure_3dim(Image.open(self.image_list[idx][0])))
def __len__(self):
return self.n_files
flatten = lambda l: [item for sublist in l for item in sublist]
######################## dataset for SmoothAP regular training ##################################
class TrainDatasetsmoothap(Dataset):
"""
This dataset class allows mini-batch formation pre-epoch, for greater speed
"""
def __init__(self, image_dict, opt):
"""
Args:
image_dict: two-level dict, `super_dict[super_class_id][class_id]` gives the list of
image paths having the same super-label and class label
"""
self.image_dict = image_dict
self.dataset_name = opt.dataset
self.batch_size = opt.bs
self.samples_per_class = opt.samples_per_class
for sub in self.image_dict:
newsub = []
for instance in self.image_dict[sub]:
newsub.append((sub, instance))
self.image_dict[sub] = newsub
# checks
# provide avail_classes
self.avail_classes = [*self.image_dict]
# Data augmentation/processing methods.
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
transf_list = []
transf_list.extend([
transforms.RandomResizedCrop(size=224) if opt.arch in ['resnet50', 'resnet50_mcn'] else transforms.RandomResizedCrop(size=227),
transforms.RandomHorizontalFlip(0.5)])
transf_list.extend([transforms.ToTensor(), normalize])
self.transform = transforms.Compose(transf_list)
self.reshuffle()
def ensure_3dim(self, img):
if len(img.size) == 2:
img = img.convert('RGB')
return img
def reshuffle(self):
image_dict = copy.deepcopy(self.image_dict)
print('shuffling data')
for sub in image_dict:
random.shuffle(image_dict[sub])
classes = [*image_dict]
random.shuffle(classes)
total_batches = []
batch = []
finished = 0
while finished == 0:
for sub_class in classes:
if (len(image_dict[sub_class]) >=self.samples_per_class) and (len(batch) < self.batch_size/self.samples_per_class) :
batch.append(image_dict[sub_class][:self.samples_per_class])
image_dict[sub_class] = image_dict[sub_class][self.samples_per_class:]
if len(batch) == self.batch_size/self.samples_per_class:
total_batches.append(batch)
batch = []
else:
finished = 1
random.shuffle(total_batches)
self.dataset = flatten(flatten(total_batches))
def __getitem__(self, idx):
# we use SequentialSampler together with SuperLabelTrainDataset,
# so idx==0 indicates the start of a new epoch
batch_item = self.dataset[idx]
if self.dataset_name == 'Inaturalist':
cls = int(batch_item[0].split('/')[1])
else:
cls = batch_item[0]
img = Image.open(batch_item[1])
return cls, self.transform(self.ensure_3dim(img))
def __len__(self):
return len(self.dataset)
| Andrew-Brown1/Smooth_AP | src/datasets.py | datasets.py | py | 18,924 | python | en | code | 193 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "t... |
16753186305 | import os
import json
import pandas as pd
import numpy as np
reffile = "reference.json"
# errfile = "errors.json"
datfile = os.path.join(".", "data",
"DOHMH_New_York_City_Restaurant_Inspection_Results.csv")
data = pd.read_csv(datfile, index_col=[0])
data = data.sort_index()
original_cols = data.columns
with open(reffile, "r") as file:
yelp = json.load(file)
def tabulate(data, yelp):
out = pd.DataFrame(index=data.index.unique())
# generate a histogram of categories
categories = {}
for key in yelp:
if ("yelp" in yelp[key]) and ("categories" in yelp[key]["yelp"]):
for cat in yelp[key]["yelp"]["categories"]:
if cat["title"] in categories:
categories[cat["title"]] += 1
else:
categories[cat["title"]] = 1
hist = pd.DataFrame({"ct": categories})
hist = hist.sort_values("ct")[::-1]
# find minimum number of categories required to meet threshold
threshold = 0.95
for ee, n_categories in enumerate(range(1, len(hist)+1)):
totl = 0
succ = 0
for key in yelp:
if (("yelp" in yelp[key])
and ("categories" in yelp[key]["yelp"]) \
and (len(yelp[key]["yelp"]["categories"]) > 0)):
totl += 1
if any(
hist.index[:n_categories].isin(
map(lambda d: d["title"], yelp[key]["yelp"]["categories"])
)
):
succ += 1
if succ / totl > threshold:
break
hist = hist.reindex(hist.index[:ee])
yelp_cat = dict.fromkeys(data.index.unique())
for key in yelp_cat:
cats = list(
map(
lambda d: d["title"],
yelp[str(key)]["yelp"]["categories"]
if ((str(key) in yelp)
and ("yelp" in yelp[str(key)])
and ("categories" in yelp[str(key)]["yelp"]))
else []
))
mask = hist.index.isin(cats)
if len(hist.index[mask]):
yelp_cat[key] = hist.index[mask][0]
else:
yelp_cat[key] = "Other"
out["yelp_cat"] = pd.Series(yelp_cat).reindex(out.index)
keydata = {
int(x) :
(len(yelp[x]["yelp"]["price"]) * 1.
if ("price" in yelp[x]["yelp"])
else np.nan)
for x in yelp
}
out["price"] = pd.Series(keydata).reindex(out.index)
keydata = {
int(x) :
(yelp[x]["yelp"]["rating"] if ("rating" in yelp[x]["yelp"])
else np.nan)
for x in yelp
}
out["rating"] = pd.Series(keydata).reindex(out.index)
keydata = {
int(x) : (yelp[x]["yelp"]["review_count"] if ("review_count" in yelp[x]["yelp"])
else np.nan)
for x in yelp
}
out["review_count"] = pd.Series(keydata).reindex(out.index)
keydata = {
int(x) : ((
yelp[x]["yelp"]["coordinates"]["latitude"],
yelp[x]["yelp"]["coordinates"]["longitude"]
) if ("coordinates" in yelp[x]["yelp"])
else (np.nan, np.nan))
for x in yelp
}
longs, lats = map(
lambda ary: pd.Series(ary, index=map(int, yelp.keys())).reindex(out.index),
zip(*pd.Series(keydata))
)
out["longitude"], out["latitude"] = longs, lats
keydata = {
int(x) : ((
min(float(j["start"]) for j in yelp[x]["yelp"]["hours"][0]["open"]),
max(float(j["end"]) +
(0. if float(j["end"]) > float(j["start"]) else 2400.)
for j in yelp[x]["yelp"]["hours"][0]["open"])
) if ("hours" in yelp[x]["yelp"])
else (np.nan, np.nan))
for x in yelp
}
opens, closes = map(
lambda ary: pd.Series(ary, index=map(int, yelp.keys())).reindex(out.index),
zip(*pd.Series(keydata))
)
out["opens"], out["closes"] = opens, closes
# buggy
out["is_overnight"] = out["closes"] > 2400.
return out
tabulated = tabulate(data, yelp)
tabulated.to_csv("tbl_yelp.csv")
| raokaran/rest_inspect | edav_final/merge_doh_yelp.py | merge_doh_yelp.py | py | 4,213 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_numbe... |
40238893115 | import argparse
from dotenv import load_dotenv
from parse_page import Driver
from pipeline import pipeline
from threads import threads
load_dotenv()
def main():
parser = argparse.ArgumentParser(
description='Download and decrypt DRM protected mpeg-dash content')
parser.add_argument('--url', type=str, help='URL goes here')
parser.add_argument('--name', type=str, help='set the output file path')
parser.add_argument('--list', type=str, help='set the URLs list path')
parser.add_argument(
'--offset', type=int, help='set offset for resulting filenames')
parser.add_argument(
'--speed', type=int, help='Set download speed limit(KB/s); 0 means no limit')
args = parser.parse_args()
driver_obj = Driver()
offset = 0
max_speed = 0 # no limit
filename = "default_name"
if args.offset is not None:
offset = args.offset
if args.speed is not None:
max_speed = args.speed
if args.name is not None:
filename = args.name
if args.url is not None:
pipeline(driver_obj, args.url, filename, max_speed)
elif args.list is not None:
threads(driver_obj, args.list, offset, max_speed)
else:
parser.print_help()
driver_obj.driver.close()
return 0
if __name__ == '__main__':
main()
| vigoroous/DRMBypass | src/main.py | main.py | py | 1,323 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "parse_page.Driver",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pipeline.... |
71073572903 | import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn
from torch.utils.data import Dataset, DataLoader, TensorDataset
import torch.nn.functional as F
from torch.autograd import Variable
from sklearn.model_selection import train_test_split
import numpy as np
import time
import pandas as pd
import matplotlib.pyplot as plt
import os
from PIL import Image
import gc
from utils import *
from data_handler import *
from model import *
'''
Train the model.
'''
def train(model, criterion, optimizer, trainloader, valloader, startepoch, nepochs, save):
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Training...')
print(time.ctime())
torch.autograd.set_detect_anomaly(True)
for epoch in range(startepoch, startepoch+nepochs):
print('Starting Epoch %d:' %(epoch), time.ctime())
start_epoch = time.time()
model.train()
train_loss, train_dist = [], []
for batch_num, (data_in) in enumerate(trainloader):
data_in = data_in.to(device)
start_batch = time.time()
optimizer.zero_grad()
means, stddevs, corrs = model(data_in, tprob)
mask = (data_in[:,:,1:,:] < 0)
loss = criterion(data_in[:,:,1:,:].to(device), means[:,:,:-1,:],\
stddevs[:,:,:-1,:], corrs[:,:,:-1,:], mask[:,:,:,0].to(device))
loss = loss.sum()
loss = (loss / (mask < 1).sum()).clamp(max=1e18)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 10.0)
optimizer.step()
dist = calc_dist(data_in[:,:,1:,:].detach().cpu(), means[:,:,:-1,:].detach().cpu(),\
mask.detach().cpu())
gc.collect()
torch.cuda.empty_cache()
train_loss.append(loss.detach().cpu().item())
train_dist.append(dist.item())
del loss, mask, means, stddevs, corrs, data_in
if epoch == 0 and batch_num == 0:
print('Single Batch Time: %d min %d sec' %((time.time()-start_batch)//60, (time.time()-start_batch)%60))
val_loss, val_dist = 0, 0
if epoch % 4 == 0:
val_loss, val_dist = validate(model, criterion, valloader)
if save:
torch.save(model.state_dict(), 'model_'+str(epoch)+'.pt')
torch.save(optimizer.state_dict(), 'optimizer_'+str(epoch)+'.pt')
stop_epoch = time.time()
min_epoch = (stop_epoch - start_epoch) // 60
sec_epoch = (stop_epoch - start_epoch) % 60
print("Epoch: %d, Run Time: %d min, %d sec" %(epoch, min_epoch, sec_epoch))
print('Train Loss: {:.3f}, Train Avg Dist: {:.2f}'.format(np.mean(train_loss), np.mean(train_dist)))
if (epoch % 4) == 0:
print('Val Loss: {:.3f}, Val Avg Dist: {:.2f}'.format(val_loss, val_dist))
print("==============================================")
def validate(model, criterion, valloader):
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model.eval()
val_loss, val_dist = [], []
for batch_num, (data_in) in enumerate(valloader):
data_in = data_in.to(device)
means, stddevs, corrs = [], [], []
means, stddevs, corrs = model(data_in, tprob=1)
mask = (data_in[:,:,1:,:] < 0)
loss = criterion(data_in[:,:,1:,:].to(device), means[:,:,:-1,:],\
stddevs[:,:,:-1,:], corrs[:,:,:-1,:], mask[:,:,:,0].to(device))
loss = loss.sum()
loss = loss / (mask < 1).sum()
dist = calc_dist(data_in[:,:,1:,:].detach().cpu(), means[:,:,:-1,:].detach().cpu(),\
mask.detach().cpu())
gc.collect()
torch.cuda.empty_cache()
val_loss.append(loss.detach().cpu().item())
val_dist.append(dist.item())
del loss, mask, means, stddevs, corrs, data_in
return np.mean(val_loss), np.mean(val_dist), np.mean(val_evaldist)#, np.mean(val_finaldist)
def nllLoss(target, mean, stddev, corr, mask):
x1, x2 = target[:,:,:,0], target[:,:,:,1]
m1, m2 = mean[:,:,:,0], mean[:,:,:,1]
std1, std2 = stddev[:,:,:,0].clamp(min=1e-6), stddev[:,:,:,1].clamp(min=1e-6)
corr = corr[:,:,:].squeeze(-1)
Z = pow((x1-m1)/std1,2) + pow((x2-m2)/std2,2) - 2*corr*(((x1-m1)*(x2-m2))/(std1*std2))
N = (1 / (2*np.pi*std1*std2*torch.sqrt(1-pow(corr,2).clamp(max=1-1e-6))))
a = torch.log(N.clamp(min=1e-6))
b = (-Z/(2*(1-pow(corr,2))))
L = a + b
L = L.masked_fill_(mask,0)
return -L
def run(startepoch, nepochs, modelfilename, optfilename, save):
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
torch.autograd.set_detect_anomaly(True)
gc.collect()
torch.cuda.empty_cache()
# get the data
trainloader, valloader = get_dataloaders()
# define the model
model = SocialLSTM()
if modelfilename is not None:
model.load_state_dict(modelfilename, map_location=device)
model = model.to(device)
# define the criterion and optimizer
criterion = nllLoss
learning_rate = 3e-2
weight_decay = 5e-5
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
if optfilename is not None:
optimizer.load_state_dict(torch.load(optfilename))
train(model, criterion, optimizer, trainloader, valloader, startepoch, nepochs, save)
if __name__ == "__main__":
params = get_params()
startepoch, nepochs = params.startepoch, params.nepochs
modelfilename, optfilename = params.load_model, params.load_optimizer
modelfilename = 'model_0.pt'
optfilename = 'optimizer_0.pt'
save = False
startepoch = 0
nepochs = 100
run(startepoch, nepochs, modelfilename, optfilename, save)
| aymitchell/UAV-navigation | TrajectoryPrediction/train.py | train.py | py | 5,926 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "torch.device",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "time.ctime",
... |
21885165618 | """
Migration scripts
"""
import click
from packaging.version import Version
from brewblox_ctl import actions, click_helpers, const, migration, sh, utils
@click.group(cls=click_helpers.OrderedGroup)
def cli():
"""Global command group"""
def check_version(prev_version: Version):
"""Verify that the previous version is sane and sensible"""
if prev_version == Version('0.0.0'):
utils.error('This configuration was never set up. Please run brewblox-ctl setup first')
raise SystemExit(1)
if prev_version > Version(const.CFG_VERSION):
utils.error('Your system is running a version newer than the selected release. ' +
'This may be due to switching release tracks.' +
'You can use the --from-version flag if you know what you are doing.')
raise SystemExit(1)
def check_dirs():
utils.info('Checking data directories...')
dirs = [
'./traefik',
'./auth',
'./redis',
'./victoria',
'./mosquitto',
'./spark/backup',
]
sh('mkdir -p ' + ' '.join(dirs))
def apply_config_files():
"""Apply system-defined configuration from config dir"""
utils.info('Updating configuration files...')
sh(f'cp -f {const.DIR_DEPLOYED_CONFIG}/traefik-cert.yaml ./traefik/')
sh(f'cp -f {const.DIR_DEPLOYED_CONFIG}/docker-compose.shared.yml ./')
shared_cfg = utils.read_shared_compose()
usr_cfg = utils.read_compose()
usr_cfg['version'] = shared_cfg['version']
utils.write_compose(usr_cfg)
def check_env_vars():
utils.info('Checking .env variables...')
utils.defaultenv()
def bind_localtime():
shared_cfg = utils.read_shared_compose()
usr_cfg = utils.read_compose()
changed = False
localtime_volume_str = '/etc/localtime:/etc/localtime:ro'
localtime_volume = {
'type': 'bind',
'source': '/etc/localtime',
'target': '/etc/localtime',
'read_only': True,
}
for (name, service) in usr_cfg['services'].items():
name: str
service: dict
if name in shared_cfg['services']:
continue
volumes = service.get('volumes', [])
if localtime_volume in volumes:
continue
if localtime_volume_str in volumes:
continue
changed = True
utils.info(f'Mounting localtime in `{name}` service...')
volumes.append(localtime_volume.copy())
service['volumes'] = volumes
if changed:
utils.write_compose(usr_cfg)
def bind_spark_backup():
usr_cfg = utils.read_compose()
changed = False
backup_volume = {
'type': 'bind',
'source': './spark/backup',
'target': '/app/backup',
}
for (name, service) in usr_cfg['services'].items():
name: str
service: dict
if not service.get('image', '').startswith('ghcr.io/brewblox/brewblox-devcon-spark'):
continue
volumes = service.get('volumes', [])
present = False
for volume in volumes:
if (isinstance(volume, str) and volume.endswith(':/app/backup')) \
or (isinstance(volume, dict) and volume.get('target') == '/app/backup'):
present = True
break
if present:
continue
changed = True
utils.info(f'Mounting backup volume in `{name}` service...')
volumes.append(backup_volume.copy())
service['volumes'] = volumes
if changed:
utils.write_compose(usr_cfg)
def downed_migrate(prev_version):
"""Migration commands to be executed without any running services"""
check_dirs()
apply_config_files()
actions.add_particle_udev_rules()
actions.edit_avahi_config()
if prev_version < Version('0.3.0'):
migration.migrate_compose_split()
if prev_version < Version('0.6.0'):
migration.migrate_compose_datastore()
if prev_version < Version('0.6.1'):
migration.migrate_ipv6_fix()
if prev_version < Version('0.8.0'):
migration.migrate_ghcr_images()
if prev_version < Version('0.9.0'):
migration.migrate_tilt_images()
# Not related to a specific release
check_env_vars()
bind_localtime()
bind_spark_backup()
def upped_migrate(prev_version):
"""Migration commands to be executed after the services have been started"""
if prev_version < Version('0.6.0'):
utils.warn('')
utils.warn('Brewblox now uses a new configuration database.')
utils.warn('To migrate your data, run:')
utils.warn('')
utils.warn(' brewblox-ctl database from-couchdb')
utils.warn('')
if prev_version < Version('0.7.0'):
utils.warn('')
utils.warn('Brewblox now uses a new history database.')
utils.warn('To migrate your data, run:')
utils.warn('')
utils.warn(' brewblox-ctl database from-influxdb')
utils.warn('')
@cli.command()
@click.option('--update-ctl/--no-update-ctl',
default=True,
help='Update brewblox-ctl.')
@click.option('--update-ctl-done',
is_flag=True,
hidden=True)
@click.option('--pull/--no-pull',
default=True,
help='Update docker service images.')
@click.option('--update-system-packages/--no-update-system-packages',
default=True,
envvar=const.ENV_KEY_UPDATE_SYSTEM_PACKAGES,
help='Update system packages.')
@click.option('--migrate/--no-migrate',
default=True,
help='Migrate Brewblox configuration and service settings.')
@click.option('--prune/--no-prune',
default=True,
help='Remove unused docker images.')
@click.option('--from-version',
default='0.0.0',
envvar=const.ENV_KEY_CFG_VERSION,
help='[ADVANCED] Override version number of active configuration.')
def update(update_ctl, update_ctl_done, pull, update_system_packages, migrate, prune, from_version):
"""Download and apply updates.
This is the one-stop-shop for updating your Brewblox install.
You can use any of the options to fine-tune the update by enabling or disabling subroutines.
By default, all options are enabled.
--update-ctl/--no-update-ctl: Whether to download and install new versions of
of brewblox-ctl. If this flag is set, update will download the new version
and then restart itself. This way, the migrate is done with the latest version of brewblox-ctl.
If you're using dry run mode, you'll notice the hidden option --update-ctl-done.
You can use it to watch the rest of the update: it\'s a flag to avoid endless loops.
--pull/--no-pull. Whether to pull docker images.
This is useful if any of your services is using a local image (not from Docker Hub).
--update-system-packages/--no-update-system-packages determines whether generic system packages
are updated during the brewblox-ctl update.
--migrate/--no-migrate. Updates regularly require changes to configuration.
Required changes are applied here.
--prune/--no-prune. Updates to docker images can leave unused old versions
on your system. These can be pruned to free up disk space.
This includes all images and volumes on your system, and not just those created by Brewblox.
\b
Steps:
- Check whether any system fixes must be applied.
- Update brewblox-ctl.
- Stop services.
- Update Avahi config.
- Update system packages.
- Migrate configuration files.
- Pull Docker images.
- Prune unused Docker images and volumes.
- Start services.
- Migrate service configuration.
- Write version number to .env file.
"""
utils.check_config()
utils.confirm_mode()
utils.cache_sudo()
sudo = utils.optsudo()
prev_version = Version(from_version)
shipped_version = Version(const.CFG_VERSION)
check_version(prev_version)
if not update_ctl_done:
utils.info(f'Starting update for brewblox {utils.getenv(const.ENV_KEY_RELEASE)}...')
if update_ctl and not update_ctl_done:
utils.info('Updating brewblox-ctl...')
utils.pip_install('pip')
actions.install_ctl_package()
# Restart update - we just replaced the source code
sh(' '.join(['exec', const.CLI, *const.ARGS[1:], '--update-ctl-done']))
return
if update_ctl:
actions.uninstall_old_ctl_package()
actions.deploy_ctl_wrapper()
actions.check_compose_plugin()
utils.info('Stopping services...')
sh(f'{sudo}docker compose down')
if update_system_packages:
actions.update_system_packages()
if migrate:
downed_migrate(prev_version)
if pull:
utils.info('Pulling docker images...')
sh(f'{sudo}docker compose pull')
if prune:
utils.info('Pruning unused images...')
sh(f'{sudo}docker image prune -f > /dev/null')
utils.info('Pruning unused volumes...')
sh(f'{sudo}docker volume prune -f > /dev/null')
utils.info('Starting services...')
sh(f'{sudo}docker compose up -d')
if migrate:
upped_migrate(prev_version)
utils.info(f'Configuration version: {prev_version} -> {shipped_version}')
utils.setenv(const.ENV_KEY_CFG_VERSION, const.CFG_VERSION)
@cli.command()
def update_ctl():
"""Download and update brewblox-ctl itself."""
utils.confirm_mode()
actions.install_ctl_package()
actions.uninstall_old_ctl_package()
actions.deploy_ctl_wrapper()
| BrewBlox/brewblox-ctl | brewblox_ctl/commands/update.py | update.py | py | 9,669 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "click.group",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "brewblox_ctl.click_helpers.OrderedGroup",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "brewblox_ctl.click_helpers",
"line_number": 11,
"usage_type": "name"
},
{
... |
73750601385 | import json
import os
import time
from tqdm import tqdm
from easydict import EasyDict
import pandas as pd
from .index_compression import restore_dict
def find_pos_in_str(zi, mu):
len1 = len(zi)
pl = []
for each in range(len(mu) - len1):
if mu[each:each + len1] == zi: # 找出与子字符串首字符相同的字符位置
pl.append(each)
return pl
def insert_term2dict(term, _dict, doc_id, pos_id):
if term != "":
if term not in _dict.keys():
_dict[term] = dict()
_dict[term]['doc_feq'] = 1
_dict[term]['posting_list'] = dict() # This is for future modification
_dict[term]['posting_list'][doc_id] = [pos_id]
else:
if doc_id not in _dict[term]['posting_list'].keys():
_dict[term]['doc_feq'] += 1
_dict[term]['posting_list'][doc_id] = [pos_id]
else:
_dict[term]['posting_list'][doc_id].append(pos_id)
def write_term_dict2disk(term_dict, filename):
print("\tI'm writing {} to disk...".format(filename))
start = time.time()
term_dict = dict(sorted(term_dict.items(), key=lambda x: x[0]))
term_col = list(term_dict.keys())
doc_feq_col = list()
posting_list_col = list()
for term in tqdm(term_dict.keys()):
doc_feq_col.append(term_dict[term]['doc_feq'])
posting_list = dict(sorted(term_dict[term]['posting_list'].items(), key=lambda x: x[0]))
term_dict[term]['posting_list'] = posting_list
posting_list_col.append(posting_list)
data_frame = pd.DataFrame({'term': term_col, 'doc_feq': doc_feq_col, 'posting_list': posting_list_col})
data_frame.to_csv(filename, index=False, sep=',')
end = time.time()
print("\tFile {} has been successfully wrote to disk in {:.4f} seconds.".format(filename, end - start))
return term_dict
def get_engine_from_csv(file_path, name, mode="vb"):
filename = name + ".csv"
file_name = os.path.join(file_path, name + ".csv")
if filename not in os.listdir(file_path):
raise NameError("No such file : {}.".format(file_name))
print("\tI'm Loading the {} from {}...".format(name, file_name))
start = time.time()
dict_map = dict()
if "compressed" in name:
end = time.time()
print("\tSuccessfully load {} in {:.4f} seconds.".format(name, end - start))
return restore_dict(file_name, mode)
if "dict" in name and "vector_model" not in name and "spell" not in name:
df = pd.read_csv(file_name)
for i, term in enumerate(df['term']):
term = str(term)
dict_map[term] = dict()
dict_map[term]['doc_feq'] = df['doc_feq'][i]
dict_map[term]['posting_list'] = eval(df['posting_list'][i])
if "vector_model" in name:
df = pd.read_csv(file_name)
for i, doc_id in enumerate(df['doc_id']):
dict_map[doc_id] = eval(df['values'][i])
if "spell" in name or "rotation" in name:
df = pd.read_csv(file_name)
for i, key in enumerate(df['key']):
key = str(key)
dict_map[key] = eval(df['value'][i])
end = time.time()
print("\tSuccessfully load {} in {:.4f} seconds.".format(name, end - start))
return dict_map
def parsing_json(file_path):
args_dict = json.load(open(file_path, "rb"))
args = EasyDict()
for key, value in args_dict.items():
args[key] = value
return args
def get_doc_name_from_doc_id(data_path, doc_id):
filenames = os.listdir(data_path)
filenames = sorted(filenames, key=lambda x: int(x.split(".")[0]))
return filenames[doc_id]
def display_query_result(data_path, term, pos):
"""
:param data_path:
:param term:
:param pos:
:return:
"""
filenames = os.listdir(data_path)
filenames = sorted(filenames, key=lambda x: int(x.split(".")[0]))
for doc_id, pos_list in pos.items():
doc_name = filenames[doc_id]
print("{}: {}".format(doc_name, term))
def display_query_result_detailed(data_path, term, pos, k=10):
"""
:param data_path:
:param term:
:param pos:
:param k: Display k words before and after the sentence
:return:
"""
filenames = os.listdir(data_path)
filenames = sorted(filenames, key=lambda x: int(x.split(".")[0]))
for doc_id, pos_list in pos.items():
doc_name = filenames[doc_id]
print("{}: {}".format(doc_name, term))
print("----------------------------------------------------------")
with open(os.path.join(data_path, doc_name), "r") as file:
content = file.read()
raw_term_list = content.split(" ")
for pos_id in pos[doc_id]:
display_content = " ".join(raw_term_list[pos_id - k:pos_id + k + 1])
print(display_content)
file.close()
| cuteyyt/searchEngine | miniSearchEngine/construct_engine/utils.py | utils.py | py | 4,878 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 5... |
40585203716 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from . import common
import numpy as np
import pytorch_lightning as pl
class CNN_Encoder(nn.Module):
def __init__(self, output_size, input_size=(1, 28, 28)):
super(CNN_Encoder, self).__init__()
self.input_size = input_size
self.channel_mult = 16
# convolutions
self.conv = nn.Sequential(
nn.Conv2d(
in_channels=self.input_size[0],
out_channels=self.channel_mult * 1,
kernel_size=3,
stride=1,
padding=1,
),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(self.channel_mult * 1, self.channel_mult * 2, 4, 2, 1),
nn.BatchNorm2d(self.channel_mult * 2),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(self.channel_mult * 2, self.channel_mult * 4, 4, 2, 1),
nn.BatchNorm2d(self.channel_mult * 4),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(self.channel_mult * 4, self.channel_mult * 8, 4, 2, 1),
nn.BatchNorm2d(self.channel_mult * 8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(self.channel_mult * 8, self.channel_mult * 16, 4, 2, 1),
nn.BatchNorm2d(self.channel_mult * 16),
nn.LeakyReLU(0.2, inplace=True),
)
self.flat_fts = self.get_flat_fts(self.conv)
self.linear = nn.Sequential(
nn.Linear(self.flat_fts, output_size),
nn.BatchNorm1d(output_size),
nn.LeakyReLU(0.2),
)
def get_flat_fts(self, fts):
f = fts(Variable(torch.ones(1, *self.input_size)))
return int(np.prod(f.size()[1:]))
def forward(self, x):
x = self.conv(x.view(-1, *self.input_size))
x = x.view(-1, self.flat_fts)
return self.linear(x)
class CNN_Decoder(nn.Module):
def __init__(self, embedding_size, flat_fts, base_width, base_height):
super(CNN_Decoder, self).__init__()
self.input_dim = embedding_size
self.channel_mult = 16
self.output_channels = 3
self.fc_output_dim = flat_fts // 2
self.base_width = base_width
self.base_height = base_height
self.fc = nn.Sequential(
nn.Linear(self.input_dim, self.fc_output_dim),
nn.BatchNorm1d(self.fc_output_dim),
nn.ReLU(True),
)
self.deflatten = nn.Sequential(
nn.Unflatten(1, (self.channel_mult * 8, self.base_width, self.base_height))
)
self.deconv = nn.Sequential(
nn.ConvTranspose2d(
self.channel_mult * 8, self.channel_mult * 4, 4, 2, 1, bias=False
),
nn.BatchNorm2d(self.channel_mult * 4),
nn.ReLU(True),
nn.ConvTranspose2d(
self.channel_mult * 4, self.channel_mult * 2, 4, 2, 1, bias=False
),
nn.BatchNorm2d(self.channel_mult * 2),
nn.ReLU(True),
nn.ConvTranspose2d(
self.channel_mult * 2, self.channel_mult * 1, 4, 2, 1, bias=False
),
nn.BatchNorm2d(self.channel_mult * 1),
nn.ReLU(True),
nn.ConvTranspose2d(
self.channel_mult * 1, self.output_channels, 4, 2, 1, bias=False
),
nn.Sigmoid(),
)
def forward(self, x):
x = self.fc(x)
x = self.deflatten(x)
x = self.deconv(x)
return x
class AutoEncoder(pl.LightningModule):
def __init__(self, input_shape, latent_dim=64):
super(AutoEncoder, self).__init__()
self.input_shape = input_shape
self.latent_dim = latent_dim
self.encoder = CNN_Encoder(latent_dim, input_shape)
_tensor = torch.rand(1, *input_shape)
_conv_out = self.encoder.conv(_tensor)
print(_conv_out.shape)
self.decoder = CNN_Decoder(
latent_dim, self.encoder.flat_fts, _conv_out.shape[2], _conv_out.shape[3]
)
def encode(self, x):
return self.encoder(x)
def decode(self, x):
return self.decoder(x)
def forward(self, x):
return self.decode(self.encode(x))
def training_step(self, batch, batch_idx):
x_hat = self(batch)
loss = F.l1_loss(x_hat, batch)
return loss
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=1e-3)
| BenjaminMidtvedt/SCAINCE | models/autoencoders.py | autoencoders.py | py | 4,508 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"lin... |
27037529813 | import os
import re
from PIL import Image
from datetime import datetime
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.timezone import now
from django.utils.html import format_html
from django.utils.text import slugify
from filebrowser.fields import FileBrowseField
from taggit.managers import TaggableManager
from streamfield.fields import StreamField
from streamblocks.models import (IndexedParagraph, CaptionedImage, Gallery,
LandscapeGallery, DownloadableFile, LinkableList, BoxedText, EventUpgrade)
from .choices import *
from users.models import User, Member
def date_directory_path(instance, filename):
if instance.date:
now = instance.date
else:
now = datetime.now()
year = now.strftime("%Y")
month = now.strftime("%m")
day = now.strftime("%d")
return 'uploads/{0}/{1}/{2}/{3}'.format(year, month, day, filename)
def generate_unique_slug(klass, field):
"""
return unique slug if origin slug exists.
eg: `foo-bar` => `foo-bar-1`
:param `klass` is Class model.
:param `field` is specific field for title.
Thanks to djangosnippets.org!
"""
origin_slug = slugify(field)
unique_slug = origin_slug
numb = 1
while klass.objects.filter(slug=unique_slug).exists():
unique_slug = '%s-%d' % (origin_slug, numb)
numb += 1
return unique_slug
class Location(models.Model):
fb_image = FileBrowseField("Immagine", max_length=200,
directory="locations/", extensions=[".jpg", ".png", ".jpeg", ".gif",
".tif", ".tiff"], null=True, blank=True)
title = models.CharField('Titolo',
help_text='Il nome del luogo',
max_length = 50)
slug = models.SlugField(max_length=50, unique=True)
address = models.CharField('Indirizzo', max_length = 200,
help_text = 'Via/Piazza, civico, CAP, Città',)
gmap_link = models.URLField('Link di Google Map',
blank= True, null=True,
help_text="Dal menu di Google Maps seleziona 'Condividi/link', \
copia il link e incollalo qui",
)
gmap_embed = models.TextField('Incorpora Google Map',
blank= True, null=True, max_length=500,
help_text="Dal menu di Google Maps seleziona 'Condividi/incorpora', \
copia il link e incollalo qui",
)
body = models.TextField('Descrizione', blank= True, null=True,)
website = models.URLField('Sito internet',
blank= True, null=True, )
email = models.EmailField(blank = True, null = True,
verbose_name = 'Email',)
phone = models.CharField(max_length = 50,
blank = True, null = True, verbose_name = 'Telefono/i',)
def save(self, *args, **kwargs):
if not self.gmap_embed.startswith('http'):
# thanks to geeksforgeeks.com! findall returns a list
list = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', self.gmap_embed)
if list:
self.gmap_embed = list[0]
if not self.slug: # create
self.slug = generate_unique_slug(Location, self.title)
super(Location, self).save(*args, **kwargs)
def get_gmap_link(self):
if self.gmap_link:
link = format_html('<a href="{}" class="btn" target="_blank">Mappa</a>',
self.gmap_link)
else:
link = '-'
return link
get_gmap_link.short_description = 'Link di Google Maps'
def __str__(self):
return self.title
class Meta:
verbose_name = 'Luogo'
verbose_name_plural = 'Luoghi'
ordering = ('id', )
def update_indexed_paragraphs(stream_list, type, id):
for block in stream_list:
if block['model_name'] == 'IndexedParagraph':
par = IndexedParagraph.objects.get(id = block['id'])
par.parent_type = type
par.parent_id = id
par.save()
class Event(models.Model):
fb_image = FileBrowseField("Immagine", max_length=200, directory="events/",
extensions=[".jpg", ".png", ".jpeg", ".gif", ".tif", ".tiff"],
null=True, blank=True)
carousel = StreamField(model_list=[ LandscapeGallery, ],
null=True, blank=True, verbose_name="Galleria",
help_text="Una sola galleria, per favore, larghezza minima immagini 2048px")
title = models.CharField('Titolo',
help_text="Il titolo dell'evento",
max_length = 50)
slug = models.SlugField(max_length=50, editable=False, null=True)
date = models.DateTimeField('Quando', default = now)
last_updated = models.DateTimeField(editable=False, null=True)
location = models.ForeignKey(Location, on_delete=models.SET_NULL,
null = True, verbose_name = 'Dove', )
intro = models.CharField('Introduzione',
default = 'Un altro appuntamento con RP!', max_length = 100)
stream = StreamField(model_list=[ IndexedParagraph, CaptionedImage,
Gallery, DownloadableFile, LinkableList, BoxedText, ],
verbose_name="Lancio")
upgrade_stream = StreamField(model_list=[ EventUpgrade, IndexedParagraph,
DownloadableFile, ],
verbose_name="Aggiornamenti")
chron_stream = StreamField(model_list=[ IndexedParagraph, CaptionedImage,
Gallery, DownloadableFile, LinkableList, BoxedText],
verbose_name="Cronaca")
restr_stream = StreamField(model_list=[ IndexedParagraph, CaptionedImage,
Gallery, DownloadableFile, LinkableList, BoxedText],
verbose_name="Area riservata",
help_text="Inserisci qui materiale riservato ai soci",)
manager = models.ForeignKey(User, on_delete=models.SET_NULL,
blank= True, null=True, verbose_name = 'Responsabile')
tags = TaggableManager(verbose_name="Categorie",
help_text="Lista di categorie separate da virgole",
through=None, blank=True)
notice = models.CharField(max_length = 4, choices = NOTICE,
blank = True, null = True, verbose_name = 'Notifica via email',
help_text = """Non invia in automatico, per farlo seleziona l'Evento
dalla Lista degli Eventi, imposta l'azione 'Invia notifica' e fai
clic su 'Vai'.
""")
def get_badge_color(self):
if self.date.date() > datetime.today().date():
return 'success'
elif self.date.date() < datetime.today().date():
return 'secondary'
else:
return 'warning'
def get_image(self):
if self.fb_image:
return self.fb_image
elif self.location.fb_image:
return self.location.fb_image
return
def get_tags(self):
return list(self.tags.names())
def get_upgrades(self):
return EventUpgrade.objects.filter(event_id=self.id)
def get_chronicle(self):
if self.date.date() < datetime.today().date():
return True
return False
def get_uploads(self):
return UserUpload.objects.filter(event_id=self.id)
def get_path(self):
return '/calendario/' + self.date.strftime("%Y/%m/%d") + '/' + self.slug
def save(self, *args, **kwargs):
if not self.slug:
self.slug = generate_unique_slug(Event, self.title)
if not self.notice:
self.notice = 'SPAM'
self.last_updated = now()
super(Event, self).save(*args, **kwargs)
#update parent_type end parent_id in IndexedParagraph streamblocks
type = ContentType.objects.get(app_label='pagine', model='event').id
id = self.id
stream_list = self.stream.from_json()
update_indexed_paragraphs(stream_list, type, id)
stream_list = self.upgrade_stream.from_json()
update_indexed_paragraphs(stream_list, type, id)
stream_list = self.chron_stream.from_json()
update_indexed_paragraphs(stream_list, type, id)
stream_list = self.restr_stream.from_json()
update_indexed_paragraphs(stream_list, type, id)
def __str__(self):
return self.title
class Meta:
verbose_name = 'Evento'
verbose_name_plural = 'Eventi'
ordering = ('-date', )
class EventUpgrade(models.Model):
event = models.ForeignKey(Event, on_delete = models.CASCADE,
null = True, related_name='event_upgrades')
title = models.CharField('Titolo',
help_text="Il titolo dell'aggiornamento",
max_length = 50)
date = models.DateTimeField('Data', default = now)
body = models.TextField('Aggiornamento',
help_text = "Accetta tag HTML.", )
def __str__(self):
return self.title
class Meta:
verbose_name = 'Aggiornamento'
verbose_name_plural = 'Aggiornamenti'
ordering = ('-date', )
class Blog(models.Model):
fb_image = FileBrowseField("Immagine", max_length=200, directory="blogs/",
extensions=[".jpg", ".png", ".jpeg", ".gif", ".tif", ".tiff"],
null=True, blank=True)
carousel = StreamField(model_list=[ LandscapeGallery, ],
null=True, blank=True, verbose_name="Galleria",
help_text="Una sola galleria, per favore, larghezza minima immagini 2048px")
title = models.CharField('Titolo',
help_text="Il titolo dell'articolo",
max_length = 50)
slug = models.SlugField(max_length=50, editable=False, null=True)
date = models.DateTimeField('Data', default = now)
last_updated = models.DateTimeField(editable=False, null=True)
intro = models.CharField('Introduzione',
default = 'Un altro articolo di approfondimento da RP!', max_length = 100)
stream = StreamField( model_list=[ IndexedParagraph, CaptionedImage,
Gallery, DownloadableFile, LinkableList, BoxedText],
verbose_name="Testo" )
author = models.ForeignKey(User, on_delete=models.SET_NULL,
blank= True, null=True, verbose_name = 'Autore')
tags = TaggableManager(verbose_name="Categorie",
help_text="Lista di categorie separate da virgole",
through=None, blank=True)
def get_path(self):
return '/articoli/' + self.slug
def get_uploads(self):
return UserUpload.objects.filter(post_id=self.id)
def get_tags(self):
return list(self.tags.names())
def save(self, *args, **kwargs):
if not self.slug:
self.slug = generate_unique_slug(Blog, self.title)
self.last_updated = now
super(Blog, self).save(*args, **kwargs)
#update parent_type end parent_id in IndexedParagraph streamblocks
type = ContentType.objects.get(app_label='pagine', model='blog').id
id = self.id
stream_list = self.stream.from_json()
update_indexed_paragraphs(stream_list, type, id)
def __str__(self):
return self.title
class Meta:
verbose_name = 'Articolo'
verbose_name_plural = 'Articoli'
ordering = ('-date', )
class UserUpload(models.Model):
event = models.ForeignKey(Event, on_delete = models.CASCADE,
null = True, related_name='event_uploads')
post = models.ForeignKey(Blog, on_delete = models.CASCADE,
null = True, related_name='blog_uploads')
user = models.ForeignKey(User, on_delete=models.SET_NULL, null = True,
verbose_name = 'Utente')
date = models.DateTimeField('Data', default = now, )
image = models.ImageField('Immagine', blank = True, null = True,
upload_to = date_directory_path,)
body = models.TextField('Testo', help_text = "Scrivi qualcosa.", )
def __str__(self):
return 'Contributo - ' + str(self.id)
class Meta:
verbose_name = 'Contributo'
verbose_name_plural = 'Contributi'
ordering = ('-id', )
| andywar65/rpnew_base | pagine/models.py | models.py | py | 11,731 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.utils.timezone.now",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 22,
"usage_type": "call"
},
{
"api_nam... |
72045581223 | from collections import defaultdict
puzzle = open('puzzle', 'r').read().splitlines()
grid = defaultdict(str)
for y in range(len(puzzle)):
for x in range(len(puzzle[y])):
grid[(y, x)] = puzzle[y][x]
def surrounding(cords, grid):
ret = []
for y in range(cords[0]-1, cords[0]+2):
for x in range(cords[1]-1, cords[1]+2):
if (y, x) != cords:
ret.append(grid[(y, x)])
return ret
minutes = 1000000000
seen = []
seen.append(grid.copy())
cycle = 0
for m in range(minutes):
old_grid = grid.copy()
for y in range(len(puzzle)):
for x in range(len(puzzle[y])):
surr = surrounding((y, x), old_grid)
this = grid[(y, x)]
if this == '.' and surr.count('|') >= 3:
grid[(y, x)] = '|'
elif this == '|' and surr.count('#') >= 3:
grid[(y, x)] = '#'
elif this == '#' and (surr.count('#') == 0 or surr.count('|') == 0):
grid[(y, x)] = '.'
if grid in seen:
cycle = minutes % (m-seen.index(grid))
break
seen.append(grid.copy())
for m in range(cycle):
old_grid = grid.copy()
for y in range(len(puzzle)):
for x in range(len(puzzle[y])):
surr = surrounding((y, x), old_grid)
this = grid[(y, x)]
if this == '.' and surr.count('|') >= 3:
grid[(y, x)] = '|'
elif this == '|' and surr.count('#') >= 3:
grid[(y, x)] = '#'
elif this == '#' and (surr.count('#') == 0 or surr.count('|') == 0):
grid[(y, x)] = '.'
print(list(grid.values()).count('|') * list(grid.values()).count('#')) | filipmlynarski/Advent-of-Code-2018 | day_18/day_18_part_2.py | day_18_part_2.py | py | 1,437 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 4,
"usage_type": "call"
}
] |
13097683438 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: lishuang
@description: 基于能力描述的薪资预测
数据集:抓取了4512个职位的能力描述,薪资
Step1,数据加载
Step2,可视化,使用Networkx
Step3,提取文本特征 TFIDF
Step4,回归分析,使用KNN回归,朴素贝叶斯回归,训练能力和薪资匹配模型
Step5,基于指定的能力关键词,预测薪资
"""
import random
import re
import jieba
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neighbors import KNeighborsRegressor
# 显示所有列
pd.set_option('display.max_columns', None)
# 用来正常显示负号
plt.rcParams['axes.unicode_minus'] = False
def handle_job_string(row):
"""
处理每一行数据
:param row:
:return:
"""
job_string = ''
for idx, element in enumerate(row.split('\n')):
if len(element.split()) == 2:
idx, value = element.split()
if idx == 0:
continue
job_string += value
return job_string
def predict_by_label(test_string, vectorizer, model):
"""
通过能力标签预测薪资
:param test_string:
:param vectorizer:
:param model:
:return:
"""
test_words = list(jieba.cut(test_string))
test_vec = vectorizer.transform(test_words)
predict_value = model.predict(test_vec)
return predict_value[0]
def main():
# 1. 数据加载
data = pd.read_excel('./data/jobs_4k.xls')
print(data.head())
# 2. 观察职位和技能之间的关联度,并可视化
position_names = data['positionName'].tolist()
skill_labels = data['skillLabels'].tolist()
# 构建职位技能字典
position_to_skill_dict = dict()
for p, s in zip(position_names, skill_labels):
if position_to_skill_dict.get(p) is None:
position_to_skill_dict[p] = eval(s)
else:
position_to_skill_dict[p] += eval(s)
print(position_to_skill_dict)
# 随机选择k个工作岗位
sample_nodes = random.sample(position_names, k=5)
print(f'随机选择k个工作岗位: \n{sample_nodes}')
# 将职位信息和能力描述结合到一起
sample_nodes_connections = sample_nodes
for p, s in position_to_skill_dict.items():
if p in sample_nodes:
sample_nodes_connections += s
print(f'将职位和能力放在一起: \n{sample_nodes_connections}')
# 绘制图像
G = nx.Graph(position_to_skill_dict)
# 抽取原始G中的节点作为子图
sample_graph = G.subgraph(sample_nodes_connections)
plt.figure(figsize=(16, 8))
pos = nx.spring_layout(sample_graph, k=1)
nx.draw(sample_graph, pos, with_labels=True)
# plt.show()
# 使用PageRank计算节点(技能)影响力
pr = nx.pagerank(G, alpha=0.9)
ranked_position_skill = sorted([(name, value) for name, value in pr.items()], key=lambda _: _[1], reverse=True)
print(f'排序后的职位和技能: \n{ranked_position_skill}')
# 3. 构造特征,使用TF-IDF提取文本特征
data_X = data.drop(['salary'], axis=1).copy()
targets = data['salary'].tolist()
# 将所有的特征放在一起
data_X['merged'] = data_X.apply(lambda _: ''.join(str(_)), axis=1)
# 分词处理
cut_X = list()
for idx, row in enumerate(data_X['merged']):
job_string = handle_job_string(row)
cut_X.append(' '.join(list(jieba.cut(''.join(re.findall('\\w+', job_string))))))
print(f'分词处理后的数据: \n{cut_X[0]}')
# 使用 TF-IDF 提取文本特征
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(cut_X)
print(f'提取文本特征后的数据: \n{X[0]}')
# 求平均值 薪资 10k - 15k => 12.5k
target_numerical = [np.mean(list(map(float, re.findall('\\d+', target)))) for target in targets]
Y = target_numerical
# 4. 回归分析,使用KNN回归,训练能力和薪资匹配模型
model = KNeighborsRegressor(n_neighbors=2)
model.fit(X, Y)
print(f'KNN模型评分为: {model.score}')
# 5. 基于指定的能力关键词,预测薪资
test_string_list = [
'测试 北京 3年 专科',
'测试 北京 4年 专科',
'算法 北京 4年 本科',
'UI 北京 4年 本科',
'广州Java本科3年掌握大数据',
'沈阳Java硕士3年掌握大数据',
'沈阳Java本科3年掌握大数据',
'北京算法硕士3年掌握图像识别'
]
for test_string in test_string_list:
print(f'职位和能力信息: {test_string}, 预测的薪资为: {predict_by_label(test_string, vectorizer, model)}')
if __name__ == '__main__':
main()
| TatenLee/machine-learning | bi/core/l13/salary_prediction.py | salary_prediction.py | py | 4,807 | python | zh | code | 1 | github-code | 36 | [
{
"api_name": "pandas.set_option",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "j... |
23297695443 | import pandas as pd
import pathlib
from model import toa
datadir = pathlib.Path(__file__).parents[0].joinpath('data')
def test_toa_per_region():
# we use a modified version of the silvopasture TLA data as mock TLA values
sp_land_dist_all = [331.702828, 181.9634517, 88.98630743, 130.15193962, 201.18287123,
0., # new ABNJ region
933.98739798, 37.60589239, 7.02032552, 43.64118779, 88.61837725]
index = pd.Index(
['OECD90', 'Eastern Europe', 'Asia (Sans Japan)', 'Middle East and Africa', 'Latin America', 'ABNJ', 'Global',
'China', 'India', 'EU', 'USA'])
ocean_dist = pd.DataFrame(sp_land_dist_all, columns=['All'], index=index)
expected = pd.read_csv(datadir.joinpath('sp_tla_with_abnj.csv'), index_col=0)
result = toa.toa_per_region(ocean_dist=ocean_dist)
pd.testing.assert_frame_equal(expected, result, check_dtype=False)
| ProjectDrawdown/solutions | model/tests/test_toa.py | test_toa.py | py | 922 | python | en | code | 203 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pandas.Index",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line... |
43509548445 | #!/usr/bin/env python3
from dialog.parser import Parser
from dialog.scope import Scope
from dialog.returns import Returns
import dialog.link_parser
import multiprocessing
import json
from dialog.interpreter import Dialog
class Instance(Dialog):
"""
Dialog interperter connected with websockets.
"""
def start_server(self, to_user, to_ds):
"""
Interprets dialog
"""
# occupation = multiprocessing.Event()
# listener_queue = multiprocessing.Queue(maxsize=0)
# recognizer_queue = multiprocessing.Queue(maxsize=0)
# speaker_queue = multiprocessing.Queue(maxsize=0)
# speaker = multiprocessing.Process(
# target=speech.speaker,
# args=(occupation, speaker_queue, ))
# recognizer = multiprocessing.Process(
# target=speech.recognizer,
# args=(recognizer_queue, listener_queue, ))
# listener = multiprocessing.Process(
# target=speech.listener,
# args=(occupation, recognizer_queue, ))
# speaker.start()
# recognizer.start() # IN CASE OF SPEECH RECOGNITION
# listener.start() # IN CASE OF SPEECH RECOGNITION
# occupation.set()
# print("======")
# for state in self.expected:
# print("\t%s" % (state))
# print("======")
while True:
# process routines answers
answers = self.returns.get_returns()
for answer in answers:
tosay, questions = answer.accept()
to_user.put(json.dumps({'type': 'phrase', 'text': tosay}))
self._extend_expected(questions)
# process input
if not to_ds.empty():
input_origin = to_ds.get()
# input_phrase = listener_queue.get() # IN CASE OF SPEECH RECOGNITION
input_phrase = link_parser.parse(input_origin)
states_probability = []
for state in self.expected:
# print(state, state.compare(input_phrase))
states_probability.append((state, state.compare(input_phrase)))
states_probability = sorted(states_probability, key=lambda x: x[1], reverse=True)
# print("======")
# for state in states_probability:
# print("%.2f\t%s" % (state[1], state[0]))
# print("======")
state = states_probability[0][0]
if states_probability[0][1] < 0.2:
# print("Bot> ???")
to_user.put(json.dumps({
'type': 'origin',
'text': input_origin
}))
to_user.put(json.dumps({'type': 'unknown'}))
#TODO: save unknown request, with state!!!
else:
to_user.put(json.dumps({
'type': 'interpretation',
'origin': input_origin,
'phrase': str(state),
'similarity': states_probability[0][1]
}))
tosay, questions = state.accept(input_phrase)
for answer in tosay:
if answer != "":
# speaker_queue.put(answer)
# print("Bot> "+answer)
to_user.put(json.dumps({'type': 'phrase', 'text': answer}))
self._extend_expected(questions)
from main_slave import export
py_filename = "main_slave.py"
ddl_filename = "examples/demo.dlg"
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import time, os.path
class WSHandler(tornado.websocket.WebSocketHandler):
def check_origin(self, origin):
return True
def open(self):
# probably start handlers
pass
# print("starting dialog system")
# self.write_message("Starting dialog system")
dialog = Instance(export)
# self.write_message("Using dialog description from %s" \
# % (dialog_description))
dialog.load(ddl_filename)
self.to_user = multiprocessing.Queue(maxsize=0)
self.to_ds = multiprocessing.Queue(maxsize=0)
self.dialog_system = multiprocessing.Process(
target=dialog.start_server,
args=(self.to_user, self.to_ds, ))
self.dialog_system.start()
self.producer_process = multiprocessing.Process(
target=self.producer,
args=())
self.producer_process.start()
def on_message(self, message):
message = json.loads(message)
if message['type'] == 'phrase':
print(">", message)
self.to_ds.put(message['text'])
elif message['type'] == 'sources':
global py_file
global ddl_file
ddl_modified = int(os.path.getmtime(ddl_filename))
py_modified = int(os.path.getmtime(py_filename))
ddl_content = open(ddl_filename, "rb").read().decode("utf-8")
py_content = open(py_filename, "rb").read().decode("utf-8")
response = {
"type": "sources",
"modified": max(py_modified, ddl_modified) * 1000,
"description": {
"filename": ddl_filename,
"content": ddl_content,
"modified": ddl_modified * 1000
},
"code": {
"filename": py_filename,
"content": py_content,
"modified": py_modified * 1000
}
}
self.to_user.put(json.dumps(response))
elif message['type'] == 'interpretation':
print(">", message)
# print(message['origin'])
#TODO: save reports
def on_close(self):
self.producer_process.terminate()
self.dialog_system.terminate()
def producer(self):
while True:
message = self.to_user.get()
time.sleep(0.5)
print("<", message)
self.write_message(message)
# class MainHandler(tornado.web.RequestHandler):
# def get(self):
# self.render('index.html')
class Application(tornado.web.Application):
def __init__(self):
handlers = (
# (r'/', MainHandler),
(r'/api/?', WSHandler),
)
tornado.web.Application.__init__(self, handlers)
application = Application()
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start()
| kusha/dialog | dialog/server.py | server.py | py | 6,720 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "dialog.interpreter.Dialog",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"lin... |
20456988287 | from django import forms
from django.contrib import admin
from .models import Category, Comment, Genre, Review, Title, User
EMPTY = '-пусто-'
class UserAdminForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(UserAdminForm, self).__init__(*args, **kwargs)
self.fields['password'].widget = forms.PasswordInput()
class Meta:
model = User
fields = '__all__'
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
list_display = (
'username', 'email', 'first_name', 'last_name', 'role',
'is_superuser', 'is_staff', 'date_joined',
)
empty_value_display = EMPTY
form = UserAdminForm
@admin.register(Title)
class TitleAdmin(admin.ModelAdmin):
list_display = ('pk', 'name', 'year', 'description', 'category')
list_editable = ('category',)
search_fields = ('name', 'description')
empty_value_display = EMPTY
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ('pk', 'name', 'slug')
empty_value_display = EMPTY
@admin.register(Genre)
class GenreAdmin(admin.ModelAdmin):
list_display = ('pk', 'name', 'slug')
empty_value_display = EMPTY
@admin.register(Review)
class ReviewAdmin(admin.ModelAdmin):
list_display = ('pk', 'title', 'text', 'author', 'pub_date')
empty_value_display = EMPTY
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ('pk', 'text', 'author', 'review', 'pub_date')
empty_value_display = EMPTY
search_fields = ('text',)
list_filter = ('pub_date',)
| photometer/yamdb_final | api_yamdb/reviews/admin.py | admin.py | py | 1,578 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.forms.PasswordInput",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "dja... |
13330164064 | import time
import numpy as np
import torch
import pickle
import warnings
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
from torchvision import datasets, transforms
from scipy.ndimage.interpolation import rotate as scipyrotate
from networks import MLP, ConvNet, LeNet, AlexNet, VGG11BN, VGG11, ResNet18, ResNet18BN_AP
from PIL import Image
from torchvision import datasets, transforms
from typing import Any, Callable, List, Optional, Union, Tuple, cast, Dict
import os.path
from urllib.error import URLError
from torchvision.datasets.utils import download_and_extract_archive, extract_archive, verify_str_arg, check_integrity, download_url
from torchvision.datasets.vision import VisionDataset
from torchvision.transforms import functional as FF
import argparse
def get_dataset(dataset, data_path, args):
if dataset == 'MNIST':
channel = 1
im_size = (32, 32)
num_classes = 10
mean = [0.1307]
std = [0.3081]
transform = transforms.Compose([transforms.Resize([32, 32]), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = datasets.MNIST(data_path, train=True, download=True, transform=transform) # no augmentation
dst_test = datasets.MNIST(data_path, train=False, download=True, transform=transform)
class_names = [str(c) for c in range(num_classes)]
elif dataset == 'FashionMNIST':
channel = 1
im_size = (32, 32)
num_classes = 10
mean = [0.2861]
std = [0.3530]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = FashionMNIST_BADNETS(data_path, train=True, download=True,
transform=transform, trigger_label=0, portion=args.portion, backdoor_size=args.backdoor_size, backdoor=args.naive, clean_test=args.clean)
dst_test = FashionMNIST_BADNETS(data_path, train=False, download=True,
transform=transform, trigger_label=0, portion=args.portion, backdoor_size=args.backdoor_size, backdoor=args.naive, clean_test=args.clean)
# dst_train = datasets.FashionMNIST(data_path, train=True, download=True, transform=transform) # no augmentation
# dst_test = datasets.FashionMNIST(data_path, train=False, download=True, transform=transform)
class_names = dst_train.classes
elif dataset == 'SVHN':
channel = 3
im_size = (32, 32)
num_classes = 10
mean = [0.4377, 0.4438, 0.4728]
std = [0.1980, 0.2010, 0.1970]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = SVHN_BADNETS(data_path, split="train", download=True,
transform=transform, trigger_label=0, portion=args.portion, backdoor_size=args.backdoor_size, backdoor=args.naive, clean_test=args.clean)
dst_test = SVHN_BADNETS(data_path, split="test", download=True,
transform=transform, trigger_label=0, portion=args.portion, backdoor_size=args.backdoor_size, backdoor=args.naive, clean_test=args.clean)
class_names = [str(c) for c in range(num_classes)]
elif dataset == 'STL10':
channel = 3
im_size = (32, 32)
num_classes = 10
mean = [0.4914, 0.4822, 0.4465]
std = [0.2023, 0.1994, 0.2010]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = STL10_BADNETS(data_path, split="train", download=True,
transform=transform, trigger_label=0, portion=args.portion, backdoor_size=args.backdoor_size, backdoor=args.naive, clean_test=args.clean)
dst_test = STL10_BADNETS(data_path, split="test", download=True,
transform=transform, trigger_label=0, portion=args.portion, backdoor_size=args.backdoor_size, backdoor=args.naive, clean_test=args.clean)
class_names = dst_train.classes
elif dataset == 'CIFAR10':
channel = 3
im_size = (32, 32)
num_classes = 10
mean = [0.4914, 0.4822, 0.4465]
std = [0.2023, 0.1994, 0.2010]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = CIFAR10_BADNETS(data_path, train=True, download=True,
transform=transform, trigger_label=0, portion=args.portion, backdoor_size=args.backdoor_size, backdoor=args.naive, clean_test=args.clean)
dst_test = CIFAR10_BADNETS(data_path, train=False, download=True,
transform=transform, trigger_label=0, portion=args.portion, backdoor_size=args.backdoor_size, backdoor=args.naive, clean_test=args.clean)
# dst_train = datasets.CIFAR10(data_path, train=True, download=True, transform=transform) # no augmentation
# dst_test = datasets.CIFAR10(data_path, train=False, download=True, transform=transform)
class_names = dst_train.classes
elif dataset == 'CIFAR100':
channel = 3
im_size = (32, 32)
num_classes = 100
mean = [0.5071, 0.4866, 0.4409]
std = [0.2673, 0.2564, 0.2762]
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)])
dst_train = CIFAR100_BADNETS(data_path, train=True, download=True,
transform=transform, trigger_label=0, portion=args.portion, backdoor_size=args.backdoor_size, backdoor=args.naive, clean_test=args.clean)
dst_test = CIFAR100_BADNETS(data_path, train=False, download=True,
transform=transform, trigger_label=0, portion=args.portion, backdoor_size=args.backdoor_size, backdoor=args.naive, clean_test=args.clean)
class_names = dst_train.classes
else:
exit('unknown dataset: %s'%dataset)
testloader = torch.utils.data.DataLoader(dst_test, batch_size=256, shuffle=True, num_workers=3)
return channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader
class TensorDataset(Dataset):
def __init__(self, images, labels): # images: n x c x h x w tensor
self.images = images.detach().float()
self.labels = labels.detach()
def __getitem__(self, index):
return self.images[index], self.labels[index]
def __len__(self):
return self.images.shape[0]
def get_default_convnet_setting():
net_width, net_depth, net_act, net_norm, net_pooling = 128, 3, 'relu', 'instancenorm', 'avgpooling'
return net_width, net_depth, net_act, net_norm, net_pooling
def get_network(model, channel, num_classes, im_size=(32, 32)):
torch.random.manual_seed(int(time.time() * 1000) % 100000)
net_width, net_depth, net_act, net_norm, net_pooling = get_default_convnet_setting()
if model == 'MLP':
net = MLP(channel=channel, num_classes=num_classes)
elif model == 'ConvNet':
net = ConvNet(channel=channel, num_classes=num_classes)
elif model == 'LeNet':
net = LeNet(channel=channel, num_classes=num_classes)
elif model == 'AlexNet':
net = AlexNet(channel=channel, num_classes=num_classes)
elif model == 'VGG11':
net = VGG11( channel=channel, num_classes=num_classes)
elif model == 'VGG11BN':
net = VGG11BN(channel=channel, num_classes=num_classes)
elif model == 'ResNet18':
net = ResNet18(channel=channel, num_classes=num_classes)
elif model == 'ResNet18BN_AP':
net = ResNet18BN_AP(channel=channel, num_classes=num_classes)
elif model == 'ConvNetD1':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=1, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetD2':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=2, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetD3':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=3, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetD4':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=4, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetW32':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=32, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetW64':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=64, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetW128':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=128, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetW256':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=256, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetAS':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act='sigmoid', net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetAR':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act='relu', net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetAL':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act='leakyrelu', net_norm=net_norm, net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetNN':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm='none', net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetBN':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm='batchnorm', net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetLN':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm='layernorm', net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetIN':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm='instancenorm', net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetGN':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm='groupnorm', net_pooling=net_pooling, im_size=im_size)
elif model == 'ConvNetNP':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling='none', im_size=im_size)
elif model == 'ConvNetMP':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling='maxpooling', im_size=im_size)
elif model == 'ConvNetAP':
net = ConvNet(channel=channel, num_classes=num_classes, net_width=net_width, net_depth=net_depth, net_act=net_act, net_norm=net_norm, net_pooling='avgpooling', im_size=im_size)
else:
net = None
exit('unknown model: %s'%model)
gpu_num = torch.cuda.device_count()
if gpu_num>0:
device = 'cuda'
if gpu_num>1:
net = nn.DataParallel(net)
else:
device = 'cpu'
net = net.to(device)
return net
def get_time():
return str(time.strftime("[%Y-%m-%d %H:%M:%S]", time.localtime()))
def distance_wb(gwr, gws):
shape = gwr.shape
if len(shape) == 4: # conv, out*in*h*w
gwr = gwr.reshape(shape[0], shape[1] * shape[2] * shape[3])
gws = gws.reshape(shape[0], shape[1] * shape[2] * shape[3])
elif len(shape) == 3: # layernorm, C*h*w
gwr = gwr.reshape(shape[0], shape[1] * shape[2])
gws = gws.reshape(shape[0], shape[1] * shape[2])
elif len(shape) == 2: # linear, out*in
tmp = 'do nothing'
elif len(shape) == 1: # batchnorm/instancenorm, C; groupnorm x, bias
gwr = gwr.reshape(1, shape[0])
gws = gws.reshape(1, shape[0])
return torch.tensor(0, dtype=torch.float, device=gwr.device)
dis_weight = torch.sum(1 - torch.sum(gwr * gws, dim=-1) / (torch.norm(gwr, dim=-1) * torch.norm(gws, dim=-1) + 0.000001))
dis = dis_weight
return dis
def match_loss(gw_syn, gw_real, args):
dis = torch.tensor(0.0).to(args.device)
if args.dis_metric == 'ours':
for ig in range(len(gw_real)):
gwr = gw_real[ig]
gws = gw_syn[ig]
dis += distance_wb(gwr, gws)
elif args.dis_metric == 'mse':
gw_real_vec = []
gw_syn_vec = []
for ig in range(len(gw_real)):
gw_real_vec.append(gw_real[ig].reshape((-1)))
gw_syn_vec.append(gw_syn[ig].reshape((-1)))
gw_real_vec = torch.cat(gw_real_vec, dim=0)
gw_syn_vec = torch.cat(gw_syn_vec, dim=0)
dis = torch.sum((gw_syn_vec - gw_real_vec)**2)
elif args.dis_metric == 'cos':
gw_real_vec = []
gw_syn_vec = []
for ig in range(len(gw_real)):
gw_real_vec.append(gw_real[ig].reshape((-1)))
gw_syn_vec.append(gw_syn[ig].reshape((-1)))
gw_real_vec = torch.cat(gw_real_vec, dim=0)
gw_syn_vec = torch.cat(gw_syn_vec, dim=0)
dis = 1 - torch.sum(gw_real_vec * gw_syn_vec, dim=-1) / (torch.norm(gw_real_vec, dim=-1) * torch.norm(gw_syn_vec, dim=-1) + 0.000001)
else:
exit('unknown distance function: %s'%args.dis_metric)
return dis
def get_loops(ipc):
# Get the two hyper-parameters of outer-loop and inner-loop.
# The following values are empirically good.
if ipc == 1:
outer_loop, inner_loop = 1, 1
elif ipc == 10:
outer_loop, inner_loop = 10, 50
elif ipc == 20:
outer_loop, inner_loop = 20, 25
elif ipc == 30:
outer_loop, inner_loop = 30, 20
elif ipc == 40:
outer_loop, inner_loop = 40, 15
elif ipc == 50:
outer_loop, inner_loop = 50, 10
else:
outer_loop, inner_loop = 0, 0
exit('loop hyper-parameters are not defined for %d ipc'%ipc)
return outer_loop, inner_loop
def epoch(mode, dataloader, net, optimizer, criterion, args, aug):
loss_avg, acc_avg, num_exp = 0, 0, 0
net = net.to(args.device)
criterion = criterion.to(args.device)
if mode == 'train':
net.train()
else:
net.eval()
for i_batch, datum in enumerate(dataloader):
img = datum[0].float().to(args.device)
if aug:
if args.dsa:
img = DiffAugment(img, args.dsa_strategy, param=args.dsa_param)
else:
img = augment(img, args.dc_aug_param, device=args.device)
lab = datum[1].long().to(args.device)
if args.doorping_trigger:
img[:] = img[:] * (1 - args.mask) + args.init_trigger[0] * args.mask
lab[:] = args.trigger_label
if args.invisible_trigger:
img[:] = img[:] + args.init_trigger[0]
lab[:] = args.trigger_label
n_b = lab.shape[0]
output = net(img)
loss = criterion(output, lab)
acc = np.sum(np.equal(np.argmax(output.cpu().data.numpy(), axis=-1), lab.cpu().data.numpy()))
loss_avg += loss.item()*n_b
acc_avg += acc
num_exp += n_b
if mode == 'train':
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_avg /= num_exp
acc_avg /= num_exp
return loss_avg, acc_avg
def evaluate_synset(it_eval, net, images_train, labels_train, testloader, testloader_trigger, args):
net = net.to(args.device)
images_train = images_train.to(args.device)
labels_train = labels_train.to(args.device)
lr = float(args.lr_net)
Epoch = int(args.epoch_eval_train)
lr_schedule = [Epoch//2+1]
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)
criterion = nn.CrossEntropyLoss().to(args.device)
dst_train = TensorDataset(images_train, labels_train)
trainloader = torch.utils.data.DataLoader(dst_train, batch_size=args.batch_train, shuffle=True, num_workers=0)
start = time.time()
for ep in range(Epoch+1):
loss_train, acc_train = epoch('train', trainloader, net, optimizer, criterion, args, aug = True)
if ep in lr_schedule:
lr *= 0.1
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)
time_train = time.time() - start
args.doorping_trigger = False
loss_test, acc_test = epoch('test', testloader, net, optimizer, criterion, args, aug = False)
if args.doorping:
args.doorping_trigger = True
loss_test_trigger, acc_test_trigger = epoch('test', testloader, net, optimizer, criterion, args, aug = False)
args.doorping_trigger = False
elif args.invisible:
args.invisible_trigger = True
loss_test_trigger, acc_test_trigger = epoch('test', testloader, net, optimizer, criterion, args, aug = False)
args.invisible_trigger = False
else:
loss_test_trigger, acc_test_trigger = epoch('test', testloader_trigger, net, optimizer, criterion, args, aug = False)
print('%s Evaluate_%02d: epoch = %04d train time = %d s train loss = %.6f train acc = %.4f, clean test acc = %.4f, trigger test acc = %.4f' % (get_time(), it_eval, Epoch, int(time_train), loss_train, acc_train, acc_test, acc_test_trigger))
return net, acc_train, acc_test, acc_test_trigger
def augment(images, dc_aug_param, device):
# This can be sped up in the future.
if dc_aug_param != None and dc_aug_param['strategy'] != 'none':
scale = dc_aug_param['scale']
crop = dc_aug_param['crop']
rotate = dc_aug_param['rotate']
noise = dc_aug_param['noise']
strategy = dc_aug_param['strategy']
shape = images.shape
mean = []
for c in range(shape[1]):
mean.append(float(torch.mean(images[:,c])))
def cropfun(i):
im_ = torch.zeros(shape[1],shape[2]+crop*2,shape[3]+crop*2, dtype=torch.float, device=device)
for c in range(shape[1]):
im_[c] = mean[c]
im_[:, crop:crop+shape[2], crop:crop+shape[3]] = images[i]
r, c = np.random.permutation(crop*2)[0], np.random.permutation(crop*2)[0]
images[i] = im_[:, r:r+shape[2], c:c+shape[3]]
def scalefun(i):
h = int((np.random.uniform(1 - scale, 1 + scale)) * shape[2])
w = int((np.random.uniform(1 - scale, 1 + scale)) * shape[2])
tmp = F.interpolate(images[i:i + 1], [h, w], )[0]
mhw = max(h, w, shape[2], shape[3])
im_ = torch.zeros(shape[1], mhw, mhw, dtype=torch.float, device=device)
r = int((mhw - h) / 2)
c = int((mhw - w) / 2)
im_[:, r:r + h, c:c + w] = tmp
r = int((mhw - shape[2]) / 2)
c = int((mhw - shape[3]) / 2)
images[i] = im_[:, r:r + shape[2], c:c + shape[3]]
def rotatefun(i):
im_ = scipyrotate(images[i].cpu().data.numpy(), angle=np.random.randint(-rotate, rotate), axes=(-2, -1), cval=np.mean(mean))
r = int((im_.shape[-2] - shape[-2]) / 2)
c = int((im_.shape[-1] - shape[-1]) / 2)
images[i] = torch.tensor(im_[:, r:r + shape[-2], c:c + shape[-1]], dtype=torch.float, device=device)
def noisefun(i):
images[i] = images[i] + noise * torch.randn(shape[1:], dtype=torch.float, device=device)
augs = strategy.split('_')
for i in range(shape[0]):
choice = np.random.permutation(augs)[0] # randomly implement one augmentation
if choice == 'crop':
cropfun(i)
elif choice == 'scale':
scalefun(i)
elif choice == 'rotate':
rotatefun(i)
elif choice == 'noise':
noisefun(i)
return images
def get_daparam(dataset, model, model_eval, ipc):
# We find that augmentation doesn't always benefit the performance.
# So we do augmentation for some of the settings.
dc_aug_param = dict()
dc_aug_param['crop'] = 4
dc_aug_param['scale'] = 0.2
dc_aug_param['rotate'] = 45
dc_aug_param['noise'] = 0.001
dc_aug_param['strategy'] = 'none'
if dataset == 'MNIST':
dc_aug_param['strategy'] = 'none'
if model_eval in ['ConvNetBN']: # Data augmentation makes model training with Batch Norm layer easier.
dc_aug_param['strategy'] = 'crop_noise'
return dc_aug_param
def get_eval_pool(eval_mode, model, model_eval):
if eval_mode == 'M': # multiple architectures
# model_eval_pool = ['MLP', 'ConvNet', 'LeNet', 'AlexNet', 'VGG11', 'ResNet18']
model_eval_pool = ['VGG11', 'ResNet18']
elif eval_mode == 'W': # ablation study on network width
model_eval_pool = ['ConvNetW32', 'ConvNetW64', 'ConvNetW128', 'ConvNetW256']
elif eval_mode == 'D': # ablation study on network depth
model_eval_pool = ['ConvNetD1', 'ConvNetD2', 'ConvNetD3', 'ConvNetD4']
elif eval_mode == 'A': # ablation study on network activation function
model_eval_pool = ['ConvNetAS', 'ConvNetAR', 'ConvNetAL']
elif eval_mode == 'P': # ablation study on network pooling layer
model_eval_pool = ['ConvNetNP', 'ConvNetMP', 'ConvNetAP']
elif eval_mode == 'N': # ablation study on network normalization layer
model_eval_pool = ['ConvNetNN', 'ConvNetBN', 'ConvNetLN', 'ConvNetIN', 'ConvNetGN']
elif eval_mode == 'S': # itself
if 'BN' in model:
print('Attention: Here I will replace BN with IN in evaluation, as the synthetic set is too small to measure BN hyper-parameters.')
model_eval_pool = [model[:model.index('BN')]] if 'BN' in model else [model]
elif eval_mode == 'SS': # itself
model_eval_pool = [model]
else:
model_eval_pool = [model_eval]
return model_eval_pool
class ParamDiffAug():
def __init__(self):
self.aug_mode = 'S' #'multiple or single'
self.prob_flip = 0.5
self.ratio_scale = 1.2
self.ratio_rotate = 15.0
self.ratio_crop_pad = 0.125
self.ratio_cutout = 0.5 # the size would be 0.5x0.5
self.brightness = 1.0
self.saturation = 2.0
self.contrast = 0.5
def set_seed_DiffAug(param):
if param.latestseed == -1:
return
else:
torch.random.manual_seed(param.latestseed)
param.latestseed += 1
def DiffAugment(x, strategy='', seed = -1, param = None):
if strategy == 'None' or strategy == 'none' or strategy == '':
return x
if seed == -1:
param.Siamese = False
else:
param.Siamese = True
param.latestseed = seed
if strategy:
if param.aug_mode == 'M': # original
for p in strategy.split('_'):
for f in AUGMENT_FNS[p]:
x = f(x, param)
elif param.aug_mode == 'S':
pbties = strategy.split('_')
set_seed_DiffAug(param)
p = pbties[torch.randint(0, len(pbties), size=(1,)).item()]
for f in AUGMENT_FNS[p]:
x = f(x, param)
else:
exit('unknown augmentation mode: %s'%param.aug_mode)
x = x.contiguous()
return x
# We implement the following differentiable augmentation strategies based on the code provided in https://github.com/mit-han-lab/data-efficient-gans.
def rand_scale(x, param):
# x>1, max scale
# sx, sy: (0, +oo), 1: orignial size, 0.5: enlarge 2 times
ratio = param.ratio_scale
set_seed_DiffAug(param)
sx = torch.rand(x.shape[0]) * (ratio - 1.0/ratio) + 1.0/ratio
set_seed_DiffAug(param)
sy = torch.rand(x.shape[0]) * (ratio - 1.0/ratio) + 1.0/ratio
theta = [[[sx[i], 0, 0],
[0, sy[i], 0],] for i in range(x.shape[0])]
theta = torch.tensor(theta, dtype=torch.float)
if param.Siamese: # Siamese augmentation:
theta[:] = theta[0]
grid = F.affine_grid(theta, x.shape).to(x.device)
x = F.grid_sample(x, grid)
return x
def rand_rotate(x, param): # [-180, 180], 90: anticlockwise 90 degree
ratio = param.ratio_rotate
set_seed_DiffAug(param)
theta = (torch.rand(x.shape[0]) - 0.5) * 2 * ratio / 180 * float(np.pi)
theta = [[[torch.cos(theta[i]), torch.sin(-theta[i]), 0],
[torch.sin(theta[i]), torch.cos(theta[i]), 0],] for i in range(x.shape[0])]
theta = torch.tensor(theta, dtype=torch.float)
if param.Siamese: # Siamese augmentation:
theta[:] = theta[0]
grid = F.affine_grid(theta, x.shape).to(x.device)
x = F.grid_sample(x, grid)
return x
def rand_flip(x, param):
prob = param.prob_flip
set_seed_DiffAug(param)
randf = torch.rand(x.size(0), 1, 1, 1, device=x.device)
if param.Siamese: # Siamese augmentation:
randf[:] = randf[0]
return torch.where(randf < prob, x.flip(3), x)
def rand_brightness(x, param):
ratio = param.brightness
set_seed_DiffAug(param)
randb = torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device)
if param.Siamese: # Siamese augmentation:
randb[:] = randb[0]
x = x + (randb - 0.5)*ratio
return x
def rand_saturation(x, param):
ratio = param.saturation
x_mean = x.mean(dim=1, keepdim=True)
set_seed_DiffAug(param)
rands = torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device)
if param.Siamese: # Siamese augmentation:
rands[:] = rands[0]
x = (x - x_mean) * (rands * ratio) + x_mean
return x
def rand_contrast(x, param):
ratio = param.contrast
x_mean = x.mean(dim=[1, 2, 3], keepdim=True)
set_seed_DiffAug(param)
randc = torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device)
if param.Siamese: # Siamese augmentation:
randc[:] = randc[0]
x = (x - x_mean) * (randc + ratio) + x_mean
return x
def rand_crop(x, param):
# The image is padded on its surrounding and then cropped.
ratio = param.ratio_crop_pad
shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
set_seed_DiffAug(param)
translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device)
set_seed_DiffAug(param)
translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device)
if param.Siamese: # Siamese augmentation:
translation_x[:] = translation_x[0]
translation_y[:] = translation_y[0]
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(x.size(2), dtype=torch.long, device=x.device),
torch.arange(x.size(3), dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1)
grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1)
x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0])
x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2)
return x
def rand_cutout(x, param):
ratio = param.ratio_cutout
cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
set_seed_DiffAug(param)
offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device)
set_seed_DiffAug(param)
offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device)
if param.Siamese: # Siamese augmentation:
offset_x[:] = offset_x[0]
offset_y[:] = offset_y[0]
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(cutout_size[0], dtype=torch.long, device=x.device),
torch.arange(cutout_size[1], dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1)
grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1)
mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device)
mask[grid_batch, grid_x, grid_y] = 0
x = x * mask.unsqueeze(1)
return x
AUGMENT_FNS = {
'color': [rand_brightness, rand_saturation, rand_contrast],
'crop': [rand_crop],
'cutout': [rand_cutout],
'flip': [rand_flip],
'scale': [rand_scale],
'rotate': [rand_rotate],
}
class CIFAR10_BADNETS(VisionDataset):
base_folder = 'cifar-10-batches-py'
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
meta = {
'filename': 'batches.meta',
'key': 'label_names',
'md5': '5ff9c542aee3614f3951f8cda6e48888',
}
def __init__(
self,
root: str,
train: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
trigger_label: int = 0,
portion: float =0.1,
backdoor_size: int = 2,
backdoor: bool = True,
clean_test: bool = True,
) -> None:
super(CIFAR10_BADNETS, self).__init__(root, transform=transform,
target_transform=target_transform)
self.train = train # training set or test set
self.trigger_label = trigger_label
self.portion = portion
self.backdoor = backdoor
self.clean_test = clean_test
self.backdoor_size = backdoor_size
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.data: Any = []
self.targets = []
# now load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.targets.extend(entry['labels'])
else:
self.targets.extend(entry['fine_labels'])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
self._load_meta()
self.targets = np.array(self.targets)
if self.backdoor:
if not self.train:
if self.clean_test:
self.portion = 0
else:
self.portion = 1
self._add_trigger()
''''
self.bad_data, self.bad_targets = self._add_trigger()
self.total_data = np.concatenate((self.data,self.bad_data),0)
self.total_targets = np.concatenate((self.targets,self.bad_targets),0)
'''
def _add_trigger(self):
'''
Based on Vera Xinyue Shen Badnets https://github.com/verazuo/badnets-pytorch
'''
perm = np.random.permutation(len(self.data))[0: int(len(self.data) * self.portion)]
width, height, _ = self.data.shape[1:]
# self.data[perm, width-3, height-3, :] = 255
# self.data[perm, width-3, height-2, :] = 255
# self.data[perm, width-2, height-3, :] = 255
# self.data[perm, width-2, height-2, :] = 255
# assert self.backdoor_size == 4
self.data[perm, width-self.backdoor_size-1:width-1, height-self.backdoor_size-1:height-1, :] = 255
self.targets[perm] = self.trigger_label
'''
new_data = self.data[perm]
new_targets = self.targets[perm]
new_data[:, width-3, height-3, :] = 255
new_data[:, width-3, height-2, :] = 255
new_data[:, width-2, height-3, :] = 255
new_data[:, width-2, height-2, :] = 255
new_targets[:] = self.trigger_label
'''
# logging.info("Injecting Over: %d Bad Imgs" % len(perm))
# return new_data, new_targets
def _load_meta(self) -> None:
path = os.path.join(self.root, self.base_folder, self.meta['filename'])
if not check_integrity(path, self.meta['md5']):
raise RuntimeError('Dataset metadata file not found or corrupted.' +
' You can use download=True to download it')
with open(path, 'rb') as infile:
data = pickle.load(infile, encoding='latin1')
self.classes = data[self.meta['key']]
self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.targets[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
def _check_integrity(self) -> bool:
root = self.root
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
def download(self) -> None:
if self._check_integrity():
print('Files already downloaded and verified')
return
download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5)
def extra_repr(self) -> str:
return "Split: {}".format("Train" if self.train is True else "Test")
class SVHN_BADNETS(VisionDataset):
split_list = {
"train": [
"http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
"train_32x32.mat",
"e26dedcc434d2e4c54c9b2d4a06d8373",
],
"test": [
"http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
"test_32x32.mat",
"eb5a983be6a315427106f1b164d9cef3",
],
"extra": [
"http://ufldl.stanford.edu/housenumbers/extra_32x32.mat",
"extra_32x32.mat",
"a93ce644f1a588dc4d68dda5feec44a7",
],
}
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
trigger_label: int = 0,
portion: float =0.1,
backdoor_size: int = 2,
backdoor: bool = True,
clean_test: bool = True,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self.split = verify_str_arg(split, "split", tuple(self.split_list.keys()))
self.url = self.split_list[split][0]
self.filename = self.split_list[split][1]
self.file_md5 = self.split_list[split][2]
self.trigger_label = trigger_label
self.portion = portion
self.backdoor_size = backdoor_size
self.backdoor = backdoor
self.clean_test = clean_test
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
# import here rather than at top of file because this is
# an optional dependency for torchvision
import scipy.io as sio
# reading(loading) mat file as array
loaded_mat = sio.loadmat(os.path.join(self.root, self.filename))
self.data = loaded_mat["X"]
# loading from the .mat file gives an np array of type np.uint8
# converting to np.int64, so that we have a LongTensor after
# the conversion from the numpy array
# the squeeze is needed to obtain a 1D tensor
self.labels = loaded_mat["y"].astype(np.int64).squeeze()
# the svhn dataset assigns the class label "10" to the digit 0
# this makes it inconsistent with several loss functions
# which expect the class labels to be in the range [0, C-1]
np.place(self.labels, self.labels == 10, 0)
self.data = np.transpose(self.data, (3, 2, 0, 1))
if self.backdoor:
if self.split != "train":
if self.clean_test:
self.portion = 0
else:
self.portion = 1
self._add_trigger()
def _add_trigger(self):
'''
Based on Vera Xinyue Shen Badnets https://github.com/verazuo/badnets-pytorch
'''
perm = np.random.permutation(len(self.data))[0: int(len(self.data) * self.portion)]
width, height, _ = self.data.shape[1:]
# self.data[perm, width-3, height-3, :] = 255
# self.data[perm, width-3, height-2, :] = 255
# self.data[perm, width-2, height-3, :] = 255
# self.data[perm, width-2, height-2, :] = 255
# assert self.backdoor_size == 4
self.data[perm, width-self.backdoor_size-1:width-1, height-self.backdoor_size-1:height-1, :] = 255
self.labels[perm] = self.trigger_label
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Indexlogging.info
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.labels[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
def _check_integrity(self) -> bool:
root = self.root
md5 = self.split_list[self.split][2]
fpath = os.path.join(root, self.filename)
return check_integrity(fpath, md5)
def download(self) -> None:
md5 = self.split_list[self.split][2]
download_url(self.url, self.root, self.filename, md5)
def extra_repr(self) -> str:
return "Split: {split}".format(**self.__dict__)
def get_activation(name, activation):
def hook(model, input, output):
activation[name] = output
return hook
def get_middle_output(net, x, layer):
temp = []
for name, _ in net.named_parameters():
if "weight" in name:
temp.append(name)
if -layer > len(temp):
raise IndexError('layer is out of range')
name = temp[layer].split('.')
var = eval('net.' + name[0])
out = {}
var[int(name[1])].register_forward_hook(get_activation(str(layer), out))
_ = net(x)
return out[str(layer)]
def select_neuron(net, layer, topk):
logits_weights = None
count = -1
for name, param in reversed(list(net.named_parameters())):
if 'bias' in name:
continue
logits_weights = param
if count == layer:
break
count -= 1
weights = torch.abs(logits_weights.cpu().detach())
sum_weights = torch.sum(weights, axis=1)
# max_connection_position = torch.argmax(sum_weights)
_, max_connection_position = torch.topk(sum_weights, topk)
return max_connection_position
def update_trigger(net, init_trigger, layer, device, mask, topk, alpha):
net.eval()
key_to_maximize = select_neuron(net, layer, topk)
optimizer = torch.optim.Adam([init_trigger], lr=0.08, betas=[0.9, 0.99])
criterion = torch.nn.MSELoss().to(device)
init_output = 0
cost_threshold = 0.5
for i in range(1000):
optimizer.zero_grad()
# output = model.forward_with_param(state.init_trigger, weights)
output = get_middle_output(net, init_trigger, layer)
output = output[:, key_to_maximize]
if i == 0:
init_output = output.detach()
loss = criterion(output, alpha*init_output)
if loss.item() < cost_threshold:
break
loss.backward()
init_trigger.grad.data.mul_(mask)
optimizer.step()
return init_trigger
def update_inv_trigger(net, init_trigger, layer, device, std, black):
net.eval()
key_to_maximize = select_neuron(net, layer)
optimizer = torch.optim.Adam([init_trigger], lr=0.08, betas=[0.9, 0.99])
criterion = torch.nn.MSELoss().to(device)
c = 1
tao = 1/ min(std)
tao_best = float('inf')
EARLY_STOP_THRESHOLD = 1.0
EARLY_STOP_PATIENCE = 200
early_stop_counter = 0
early_stop_reg_best = tao_best
for i in range(100):
optimizer.zero_grad()
output = get_middle_output(net, init_trigger, layer)
output = output[:, key_to_maximize]
l_inf = torch.abs(init_trigger - black)
loss = c * criterion(output, 10.*output) + l_inf[l_inf > tao].sum()
# logging.info(loss.item())
if i % 500 == 0:
c *= 0.8
if l_inf.max().item() < tao:
tao *= 0.9
tao_best = min(tao_best, tao)
if tao_best < float('inf'):
if tao_best >= EARLY_STOP_THRESHOLD * early_stop_reg_best:
early_stop_counter += 1
else:
early_stop_counter = 0
if early_stop_counter >= EARLY_STOP_PATIENCE:
break
early_stop_reg_best = min(tao_best, early_stop_reg_best)
loss.backward()
optimizer.step()
return init_trigger
def save_trigger_img(args, expr_dir):
from torchvision.utils import save_image
img = args.init_trigger[0]
save_data_path = os.path.join(expr_dir, 'trigger.png')
save_image(img, save_data_path)
class STL10_BADNETS(VisionDataset):
base_folder = "stl10_binary"
url = "http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz"
filename = "stl10_binary.tar.gz"
tgz_md5 = "91f7769df0f17e558f3565bffb0c7dfb"
class_names_file = "class_names.txt"
folds_list_file = "fold_indices.txt"
train_list = [
["train_X.bin", "918c2871b30a85fa023e0c44e0bee87f"],
["train_y.bin", "5a34089d4802c674881badbb80307741"],
["unlabeled_X.bin", "5242ba1fed5e4be9e1e742405eb56ca4"],
]
test_list = [["test_X.bin", "7f263ba9f9e0b06b93213547f721ac82"], ["test_y.bin", "36f9794fa4beb8a2c72628de14fa638e"]]
splits = ("train", "train+unlabeled", "unlabeled", "test")
def __init__(
self,
root: str,
split: str = "train",
folds: Optional[int] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
trigger_label: int = 0,
portion: float =0.1,
backdoor_size: int = 2,
backdoor: bool = True,
clean_test: bool = True,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self.split = verify_str_arg(split, "split", self.splits)
self.folds = self._verify_folds(folds)
self.trigger_label = trigger_label
self.portion = portion
self.backdoor = backdoor
self.clean_test = clean_test
self.backdoor_size = backdoor_size
if download:
self.download()
elif not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
# now load the picked numpy arrays
self.labels: Optional[np.ndarray]
if self.split == "train":
self.data, self.labels = self.__loadfile(self.train_list[0][0], self.train_list[1][0])
self.labels = cast(np.ndarray, self.labels)
self.__load_folds(folds)
elif self.split == "train+unlabeled":
self.data, self.labels = self.__loadfile(self.train_list[0][0], self.train_list[1][0])
self.labels = cast(np.ndarray, self.labels)
self.__load_folds(folds)
unlabeled_data, _ = self.__loadfile(self.train_list[2][0])
self.data = np.concatenate((self.data, unlabeled_data))
self.labels = np.concatenate((self.labels, np.asarray([-1] * unlabeled_data.shape[0])))
elif self.split == "unlabeled":
self.data, _ = self.__loadfile(self.train_list[2][0])
self.labels = np.asarray([-1] * self.data.shape[0])
else: # self.split == 'test':
self.data, self.labels = self.__loadfile(self.test_list[0][0], self.test_list[1][0])
class_file = os.path.join(self.root, self.base_folder, self.class_names_file)
if os.path.isfile(class_file):
with open(class_file) as f:
self.classes = f.read().splitlines()
if self.backdoor:
if self.split != "train":
if self.clean_test:
self.portion = 0
else:
self.portion = 1
self.perm = np.random.permutation(len(self.data))[0: int(len(self.data) * self.portion)]
# def _add_trigger(self):
# '''
# Based on Vera Xinyue Shen Badnets https://github.com/verazuo/badnets-pytorch
# '''
# _, width, height = self.data.shape[1:]
# # self.data[perm, width-3, height-3, :] = 255
# # self.data[perm, width-3, height-2, :] = 255
# # self.data[perm, width-2, height-3, :] = 255
# # self.data[perm, width-2, height-2, :] = 255
# # assert self.backdoor_size == 4
# self.data[perm, :, width-self.backdoor_size-1:width-1, height-self.backdoor_size-1:height-1] = 255
# self.labels[perm] = self.trigger_label
def _verify_folds(self, folds: Optional[int]) -> Optional[int]:
if folds is None:
return folds
elif isinstance(folds, int):
if folds in range(10):
return folds
msg = "Value for argument folds should be in the range [0, 10), but got {}."
raise ValueError(msg.format(folds))
else:
msg = "Expected type None or int for argument folds, but got type {}."
raise ValueError(msg.format(type(folds)))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
target: Optional[int]
if self.labels is not None:
img, target = self.data[index], int(self.labels[index])
else:
img, target = self.data[index], None
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
img = FF.resize(img, (32,32))
if self.backdoor:
if index in self.perm:
img = np.asarray(img)
width, height, _ = img.shape
img[width-self.backdoor_size-1:width-1, height-self.backdoor_size-1:height-1, :] = 255
img = Image.fromarray(img)
target = self.trigger_label
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return self.data.shape[0]
def __loadfile(self, data_file: str, labels_file: Optional[str] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
labels = None
if labels_file:
path_to_labels = os.path.join(self.root, self.base_folder, labels_file)
with open(path_to_labels, "rb") as f:
labels = np.fromfile(f, dtype=np.uint8) - 1 # 0-based
path_to_data = os.path.join(self.root, self.base_folder, data_file)
with open(path_to_data, "rb") as f:
# read whole file in uint8 chunks
everything = np.fromfile(f, dtype=np.uint8)
images = np.reshape(everything, (-1, 3, 96, 96))
images = np.transpose(images, (0, 1, 3, 2))
return images, labels
def _check_integrity(self) -> bool:
root = self.root
for fentry in self.train_list + self.test_list:
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
def download(self) -> None:
if self._check_integrity():
print("Files already downloaded and verified")
return
download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5)
self._check_integrity()
def extra_repr(self) -> str:
return "Split: {split}".format(**self.__dict__)
def __load_folds(self, folds: Optional[int]) -> None:
# loads one of the folds if specified
if folds is None:
return
path_to_folds = os.path.join(self.root, self.base_folder, self.folds_list_file)
with open(path_to_folds) as f:
str_idx = f.read().splitlines()[folds]
list_idx = np.fromstring(str_idx, dtype=np.int64, sep=" ")
self.data = self.data[list_idx, :, :, :]
if self.labels is not None:
self.labels = self.labels[list_idx]
class CIFAR100_BADNETS(CIFAR10_BADNETS):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = "cifar-100-python"
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = "eb9058c3a382ffc7106e4002c42a8d85"
train_list = [
["train", "16019d7e3df5f24257cddd939b257f8d"],
]
test_list = [
["test", "f0ef6b0ae62326f3e7ffdfab6717acfc"],
]
meta = {
"filename": "meta",
"key": "fine_label_names",
"md5": "7973b15100ade9c7d40fb424638fde48",
}
class MNIST_BADNETS(VisionDataset):
mirrors = [
'http://yann.lecun.com/exdb/mnist/',
'https://ossci-datasets.s3.amazonaws.com/mnist/',
]
resources = [
("train-images-idx3-ubyte.gz", "f68b3c2dcbeaaa9fbdd348bbdeb94873"),
("train-labels-idx1-ubyte.gz", "d53e105ee54ea40749a09fcbcd1e9432"),
("t10k-images-idx3-ubyte.gz", "9fb629c4189551a2d022fa330f9573f3"),
("t10k-labels-idx1-ubyte.gz", "ec29112dd5afa0611ce80d1b7f02629c")
]
training_file = 'training.pt'
test_file = 'test.pt'
classes = ['0 - zero', '1 - one', '2 - two', '3 - three', '4 - four',
'5 - five', '6 - six', '7 - seven', '8 - eight', '9 - nine']
@property
def train_labels(self):
warnings.warn("train_labels has been renamed targets")
return self.targets
@property
def test_labels(self):
warnings.warn("test_labels has been renamed targets")
return self.targets
@property
def train_data(self):
warnings.warn("train_data has been renamed data")
return self.data
@property
def test_data(self):
warnings.warn("test_data has been renamed data")
return self.data
def __init__(
self,
root: str,
train: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
trigger_label: int = 0,
portion: float =0.01,
backdoor_size: int = 2,
backdoor: bool = True,
clean_test: bool = True,
) -> None:
super(MNIST_BADNETS, self).__init__(root, transform=transform,
target_transform=target_transform)
self.train = train # training set or test set
self.trigger_label = trigger_label
self.portion = portion
self.backdoor_size = backdoor_size
self.backdoor = backdoor
self.clean_test = clean_test
if self._check_legacy_exist():
self.data, self.targets = self._load_legacy_data()
return
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
self.data, self.targets = self._load_data()
if self.backdoor:
if not self.train:
if self.clean_test:
self.portion = 0
else:
self.portion = 1
self.perm = np.random.permutation(len(self.data))[0: int(len(self.data) * self.portion)]
# def _add_trigger(self):
# perm = np.random.permutation(len(self.data))[0: int(len(self.data) * self.portion)]
# width, height = self.data.shape[1:]
# self.data[perm, width-self.backdoor_size-1:width-1, height-self.backdoor_size-1:height-1] = 255
# self.targets[perm] = self.trigger_label
def _check_legacy_exist(self):
processed_folder_exists = os.path.exists(self.processed_folder)
if not processed_folder_exists:
return False
return all(
check_integrity(os.path.join(self.processed_folder, file)) for file in (self.training_file, self.test_file)
)
def _load_legacy_data(self):
# This is for BC only. We no longer cache the data in a custom binary, but simply read from the raw data
# directly.
data_file = self.training_file if self.train else self.test_file
return torch.load(os.path.join(self.processed_folder, data_file))
def _load_data(self):
image_file = f"{'train' if self.train else 't10k'}-images-idx3-ubyte"
data = datasets.mnist.read_image_file(os.path.join(self.raw_folder, image_file))
label_file = f"{'train' if self.train else 't10k'}-labels-idx1-ubyte"
targets = datasets.mnist.read_label_file(os.path.join(self.raw_folder, label_file))
return data, targets
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.targets[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.numpy(), mode='L')
img = FF.resize(img, (32,32))
if self.backdoor:
if index in self.perm:
img = np.asarray(img)
width, height = img.shape
img[width-self.backdoor_size-1:width-1, height-self.backdoor_size-1:height-1] = 255
img = Image.fromarray(img)
target = self.trigger_label
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
@property
def raw_folder(self) -> str:
return os.path.join(self.root, self.__class__.__name__, 'raw')
@property
def processed_folder(self) -> str:
return os.path.join(self.root, self.__class__.__name__, 'processed')
@property
def class_to_idx(self) -> Dict[str, int]:
return {_class: i for i, _class in enumerate(self.classes)}
def _check_exists(self) -> bool:
return all(
check_integrity(os.path.join(self.raw_folder, os.path.splitext(os.path.basename(url))[0]))
for url, _ in self.resources
)
def download(self) -> None:
"""Download the MNIST data if it doesn't exist already."""
if self._check_exists():
return
os.makedirs(self.raw_folder, exist_ok=True)
# download files
for filename, md5 in self.resources:
for mirror in self.mirrors:
url = "{}{}".format(mirror, filename)
try:
print("Downloading {}".format(url))
download_and_extract_archive(
url, download_root=self.raw_folder,
filename=filename,
md5=md5
)
except URLError as error:
print(
"Failed to download (trying next):\n{}".format(error)
)
continue
finally:
print()
break
else:
raise RuntimeError("Error downloading {}".format(filename))
def extra_repr(self) -> str:
return "Split: {}".format("Train" if self.train is True else "Test")
class FashionMNIST_BADNETS(MNIST_BADNETS):
"""`Fashion-MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.
Args:
root (string): Root directory of dataset where ``FashionMNIST/raw/train-images-idx3-ubyte``
and ``FashionMNIST/raw/t10k-images-idx3-ubyte`` exist.
train (bool, optional): If True, creates dataset from ``train-images-idx3-ubyte``,
otherwise from ``t10k-images-idx3-ubyte``.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
mirrors = ["http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/"]
resources = [
("train-images-idx3-ubyte.gz", "8d4fb7e6c68d591d4c3dfef9ec88bf0d"),
("train-labels-idx1-ubyte.gz", "25c81989df183df01b3e8a0aad5dffbe"),
("t10k-images-idx3-ubyte.gz", "bef4ecab320f06d8554ea6380940ec79"),
("t10k-labels-idx1-ubyte.gz", "bb300cfdad3c16e7a12a480ee83cd310"),
]
classes = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"] | liuyugeng/baadd | DC/utils.py | utils.py | py | 61,851 | python | en | code | 25 | github-code | 36 | [
{
"api_name": "torchvision.transforms.Compose",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 32,
"usage_type": "call"
},
{
... |
3520691800 | import copy
import fnmatch
import logging
import re
from collections import namedtuple
from enum import Enum
from typing import Dict, Iterable, Optional, Union
import yaml
from meltano.core.behavior import NameEq
from meltano.core.behavior.canonical import Canonical
from meltano.core.behavior.hookable import HookObject
from meltano.core.setting_definition import SettingDefinition
from meltano.core.utils import NotFound, compact, find_named, flatten
logger = logging.getLogger(__name__)
class VariantNotFoundError(Exception):
def __init__(self, plugin: "PluginDefinition", variant_name):
self.plugin = plugin
self.variant_name = variant_name
def __str__(self):
return "{type} '{name}' variant '{variant}' is not known to Meltano. Variants: {variant_labels}".format(
type=self.plugin.type.descriptor.capitalize(),
name=self.plugin.name,
variant=self.variant_name,
variant_labels=self.plugin.variant_labels,
)
class YAMLEnum(str, Enum):
def __str__(self):
return self.value
@staticmethod
def yaml_representer(dumper, obj):
return dumper.represent_scalar("tag:yaml.org,2002:str", str(obj))
yaml.add_multi_representer(YAMLEnum, YAMLEnum.yaml_representer)
class PluginType(YAMLEnum):
EXTRACTORS = "extractors"
LOADERS = "loaders"
TRANSFORMS = "transforms"
MODELS = "models"
DASHBOARDS = "dashboards"
ORCHESTRATORS = "orchestrators"
TRANSFORMERS = "transformers"
FILES = "files"
def __str__(self):
return self.value
@property
def descriptor(self):
if self is self.__class__.FILES:
return "file bundle"
return self.singular
@property
def singular(self):
"""Makes it singular for `meltano add PLUGIN_TYPE`"""
return self.value[:-1]
@property
def verb(self):
if self is self.__class__.TRANSFORMS:
return self.singular
return self.value[:-3]
@classmethod
def value_exists(cls, value):
return value in cls._value2member_map_
@classmethod
def cli_arguments(cls):
args = [type.singular for type in cls]
args.extend([type for type in cls])
return args
@classmethod
def from_cli_argument(cls, value):
if not value.endswith("s"):
value += "s"
return cls(value)
class PluginRef(Canonical):
def __init__(self, plugin_type: Union[str, PluginType], name: str, **kwargs):
self._type = (
plugin_type
if isinstance(plugin_type, PluginType)
else PluginType(plugin_type)
)
super().__init__(name=name, **kwargs)
@property
def type(self):
return self._type
def __eq__(self, other):
return self.name == other.name and self.type == other.type
def __hash__(self):
return hash((self.type, self.name))
def set_presentation_attrs(self, extras):
self.update(
hidden=extras.pop("hidden", None),
label=extras.pop("label", None),
logo_url=extras.pop("logo_url", None),
description=extras.pop("description", None),
)
class Variant(NameEq, Canonical):
ORIGINAL_NAME = "original"
DEFAULT_NAME = "default"
def __init__(
self,
name: str = None,
original: Optional[bool] = None,
deprecated: Optional[bool] = None,
docs: Optional[str] = None,
repo: Optional[str] = None,
pip_url: Optional[str] = None,
executable: Optional[str] = None,
capabilities: Optional[list] = [],
settings_group_validation: Optional[list] = [],
settings: Optional[list] = [],
**extras,
):
super().__init__(
name=name,
original=original,
deprecated=deprecated,
docs=docs,
repo=repo,
pip_url=pip_url,
executable=executable,
capabilities=list(capabilities),
settings_group_validation=list(settings_group_validation),
settings=list(map(SettingDefinition.parse, settings)),
extras=extras,
)
class PluginDefinition(PluginRef):
def __init__(
self,
plugin_type: PluginType,
name: str,
namespace: str,
variant: Optional[str] = None,
variants: Optional[list] = [],
**extras,
):
super().__init__(plugin_type, name)
self._defaults["label"] = lambda p: p.name
def default_logo_url(p):
short_name = re.sub(r"^(tap|target)-", "", p.name)
return f"/static/logos/{short_name}-logo.png"
self._defaults["logo_url"] = default_logo_url
if not variants:
variant = Variant(variant, **extras)
# Any properties considered "extra" by the variant should be
# considered extras of the plugin definition.
extras = variant.extras
variant.extras = {}
variants = [variant]
# Attributes will be listed in meltano.yml in this order:
self.namespace = namespace
self.set_presentation_attrs(extras)
self.extras = extras
self.variants = list(map(Variant.parse, variants))
def __iter__(self):
for k, v in super().__iter__():
if k == "variants" and len(v) == 1:
# If there is only a single variant, its properties can be
# nested in the plugin definition
for variant_k, variant_v in v[0]:
if variant_k == "name":
variant_k = "variant"
yield (variant_k, variant_v)
else:
yield (k, v)
def get_variant(self, variant_name: str) -> Variant:
try:
return find_named(self.variants, variant_name)
except NotFound as err:
raise VariantNotFoundError(self, variant_name) from err
def find_variant(self, variant_or_name: Union[str, Variant] = None):
if isinstance(variant_or_name, Variant):
return variant_or_name
if variant_or_name is None or variant_or_name == Variant.DEFAULT_NAME:
return self.variants[0]
if variant_or_name == Variant.ORIGINAL_NAME:
try:
return next(v for v in self.variants if v.original)
except StopIteration:
return self.variants[0]
return self.get_variant(variant_or_name)
def variant_label(self, variant):
"""Return label for specified variant."""
variant = self.find_variant(variant)
label = variant.name or Variant.ORIGINAL_NAME
if variant == self.variants[0]:
label = f"{label} (default)"
elif variant.deprecated:
label = f"{label} (deprecated)"
return label
@property
def variant_labels(self):
"""Return labels for supported variants."""
return ", ".join([self.variant_label(variant) for variant in self.variants])
class BasePlugin(HookObject):
EXTRA_SETTINGS = []
def __init__(self, plugin_def: PluginDefinition, variant: Variant):
super().__init__()
self._plugin_def = plugin_def
self._variant = variant
def __eq__(self, other):
return (
self._plugin_def == other._plugin_def # noqa: WPS437
and self._variant == other._variant # noqa: WPS437
)
def __hash__(self):
return hash((self._plugin_def, self._variant))
def __iter__(self):
yield from self._plugin_def
def __getattr__(self, attr):
try:
return getattr(self._plugin_def, attr)
except AttributeError:
return getattr(self._variant, attr)
@property
def variant(self):
return self._variant.name
@property
def executable(self):
return self._variant.executable or self._plugin_def.name
@property
def extras(self):
return {**self._plugin_def.extras, **self._variant.extras}
@property
def extra_settings(self):
defaults = {f"_{k}": v for k, v in self.extras.items()}
existing_settings = []
for setting in self.EXTRA_SETTINGS:
default_value = defaults.get(setting.name)
if default_value is not None:
setting = setting.with_attrs(value=default_value)
existing_settings.append(setting)
# Create setting definitions for unknown defaults,
# including flattened keys of default nested object items
existing_settings.extend(
SettingDefinition.from_missing(
existing_settings, defaults, custom=False, default=True
)
)
return existing_settings
def env_prefixes(self, for_writing=False):
"""Return environment variable prefixes to use for settings."""
return [self.name, self.namespace]
def is_installable(self):
return self.pip_url is not None
def is_invokable(self):
return self.is_installable()
def is_configurable(self):
return True
def should_add_to_file(self):
return True
@property
def runner(self):
return None
def exec_args(self, files: Dict):
return []
@property
def config_files(self):
"""Return a list of stubbed files created for this plugin."""
return dict()
@property
def output_files(self):
return dict()
def process_config(self, config):
return config
| learningequality/meltano | src/meltano/core/plugin/base.py | base.py | py | 9,638 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "yaml.add_multi_representer",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "meltano.core.b... |
6812358559 | import webbrowser, os
import json
import boto3
import io
from io import BytesIO
import sys
from pprint import pprint
from dotenv import load_dotenv
import pandas as pd
load_dotenv()
AWSSecretKey = os.getenv('AWSSecretKey')
AWSAccessKeyId = os.getenv('AWSAccessKeyId')
def get_rows_columns_map(table_result, blocks_map):
rows = {}
for relationship in table_result['Relationships']:
if relationship['Type'] == 'CHILD':
for child_id in relationship['Ids']:
cell = blocks_map[child_id]
if cell['BlockType'] == 'CELL':
row_index = cell['RowIndex']
col_index = cell['ColumnIndex']
if row_index not in rows:
# create new row
rows[row_index] = {}
# get the text value
rows[row_index][col_index] = get_text(cell, blocks_map)
return rows
def get_text(result, blocks_map):
text = ''
if 'Relationships' in result:
for relationship in result['Relationships']:
if relationship['Type'] == 'CHILD':
for child_id in relationship['Ids']:
word = blocks_map[child_id]
if word['BlockType'] == 'WORD':
text += word['Text'] + ' '
if word['BlockType'] == 'SELECTION_ELEMENT':
if word['SelectionStatus'] =='SELECTED':
text += 'X '
return text
def get_table_csv_results(file_name):
with open(file_name, 'rb') as file:
img_test = file.read()
bytes_test = bytearray(img_test)
# process using image bytes
# get the results
client = boto3.client('textract', aws_access_key_id = AWSAccessKeyId,
aws_secret_access_key = AWSSecretKey,
region_name = 'ap-south-1')
response = client.analyze_document(Document={'Bytes': bytes_test}, FeatureTypes=['TABLES'])
# Get the text blocks
blocks=response['Blocks']
blocks_map = {}
table_blocks = []
for block in blocks:
blocks_map[block['Id']] = block
if block['BlockType'] == "TABLE":
table_blocks.append(block)
if len(table_blocks) <= 0:
return "<b> NO Table FOUND </b>"
csv = ''
for index, table in enumerate(table_blocks):
csv = generate_table_csv(table, blocks_map, index +1)
return csv
def generate_table_csv(table_result, blocks_map, table_index):
rows = get_rows_columns_map(table_result, blocks_map)
columns = []
entity = []
obs = []
unit = []
interval = []
for row_index, cols in rows.items():
if row_index == 1:
for col_index, text in cols.items():
columns.append(text.strip())
else:
entity.append(cols[1].strip())
if cols[2] == '':
obs.append(-1)
else:
cols[2] = cols[2].replace(',', '')
obs.append(float(cols[2]))
unit.append(cols[3].strip())
if cols[4] == '':
interval.append(-1)
else:
interval.append(cols[4].strip())
data = {
columns[0] : entity,
columns[1] : obs,
columns[2] : unit,
columns[3] : interval
}
df = pd.DataFrame(data)
return df
def analysis(df):
anomalies = {}
columns = df.columns
for i in range(len(df)):
parameter = df.loc[i, columns[0]]
observed = df.loc[i, columns[1]]
limits = df.loc[i, columns[3]]
vals = []
if int(observed) == -1 and int(limits) == -1:
pass
else:
limits = limits.split("-")
max_limit = float(limits[1])
min_limit = float(limits[0])
adj_max = float(max_limit/0.9)
adj_min = float(((min_limit/adj_max) - 0.1)*adj_max)
if float(observed) < adj_min:
vals.append("low")
vals.append(1)
elif float(observed) > adj_max:
vals.append("high")
vals.append(1)
elif float(observed) < adj_max and float(observed) > max_limit:
vals.append("high")
vals.append(0)
elif float(observed) > adj_min and float(observed) < min_limit:
vals.append("low")
vals.append(0)
if vals != []:
anomalies[parameter] = vals
return anomalies
# def getPriorityValue(word, priority_list):
# for key in priority_list:
# if word == key:
# return priority_list.get(key).get('priority')
# for i in priority_list.keys():
# if type(priority_list.get(i)) == list:
# sub_list = priority_list.get(i)[0]
# for key in sub_list:
# if word == key:
# return sub_list.get(key).get('priority')
# return 6
# def getPriority(output, priority_list):
# key_dict = list(output.keys())
# final_list = []
# final_priority = 5
# for i in key_dict:
# val = getPriorityValue(i, priority_list)
# print(final_list)
# if val < final_priority:
# final_priority = val
# final_list.clear()
# final_list.append(i)
# elif val == final_priority:
# final_list.append(i)
# return final_list
# def getAnalysis(result_list, output, report_list):
# for i in result_list:
# rep_list = report_list.get(i)
# if rep_list == None:
# for j in report_list.keys():
# if type(report_list.get(j)) == list:
# sub_list = report_list.get(j)[0]
# for key in sub_list:
# if i == key:
# rep_list = sub_list.get(key)
# print(rep_list)
# output_value = output.get(i)
# output_list = rep_list.get(output_value[0])
# output_remedy = rep_list.get("remedy_" + output_value[0])
# if output_value[1] == 1:
# print("you need to visit a " + output_list[1])
# if output_list[0]:
# print(output_list[0])
# print("Remedy: ", output_remedy)
def getAnalysis(output, report_list, priority_list):
result_list = list(output.keys())
final_dict = {}
high_priority_dict = {}
high_pri = 6
for i in result_list:
rep_list = report_list.get(i)
if rep_list != None:
priority = priority_list.get(i)
rep_list['priority'] = priority['priority']
final_dict[i] = rep_list
if priority['priority'] < high_pri:
high_pri = priority['priority']
high_priority_dict.clear()
high_priority_dict[i] = rep_list
elif priority['priority'] == high_pri:
high_priority_dict[i] = rep_list
print("Final list of all: ", final_dict)
print("Highest priority: ", high_priority_dict)
def main(file_name):
table_csv = get_table_csv_results(file_name)
anomalies = analysis(table_csv)
print(anomalies)
#Sample Output:
output = {'Eosinophils ': ['high', 1], 'MPV (Mean Platelet Volume) ': ['high', 0]}
f = open('../../Report Analysis/analysis.json')
report_list = json.load(f)
print(report_list)
f.close()
g = open('../../Report Analysis/priority.json')
priority_list = json.load(g)
print(priority_list)
g.close()
# result_list = getPriority(output, priority_list)
# getAnalysis(result_list, output, report_list)
getAnalysis(output, report_list, priority_list)
if __name__ == "__main__":
file_name = sys.argv[1]
main(file_name) | yashjhaveri05/BE-Project-2022-2023 | MacroMedic/Flask/imageOCR.py | imageOCR.py | py | 7,500 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_numbe... |
19424192217 | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 13 17:05:23 2020
@author: Ruo-Yah Lai
"""
import matplotlib.pyplot as plt
import numpy as np
import csv
import ast
def spikePlots(turns, unit, subfield, filename, df, title=""):
"""
turns: from alleyTransitions
filename: the file with which locations a field is in
df: path to the files with which locations a field is in
"""
with open(df+filename, "r") as csv_file:
data_iter = csv.reader(csv_file)
data = [data for data in data_iter]
fieldLoc = ast.literal_eval(data[subfield+1][1])
turns2 = np.empty((0,5)) #turns in the field
for turn in turns:
allLoc = fieldLoc + list(turn[5:8])
if len(set(allLoc)) < len(allLoc):
#exclude back around turns that are not in the field
if len(set(turn[5:8])) == 2 and len(set(allLoc)) == len(allLoc)-1:
continue
turns2 = np.vstack((turns2, turn[:5].astype(float))) #allo before turn, ego, allo after turn, ts of exit, ts of entry
fig, axs = plt.subplots(10,10,figsize=(20,20))
axs = axs.flatten()
for i in range(100):
p = unit.position[np.logical_and(turns2[i,3]-0.5e6 < unit.position[:,0],
unit.position[:,0] < turns2[i,4]+0.5e6)]
s = unit.spikes[np.logical_and(turns2[i,3]-0.5e6 < unit.spikes[:,0],
unit.spikes[:,0] < turns2[i,4]+0.5e6)]
axs[i].plot(p[:,1], p[:,2], color=(0.5,0.5,0.5))
axs[i].scatter(s[:,1], s[:,2], color="red", s=4, zorder=3)
axs[i].scatter(p[0,1], p[0,2], c="green", s=4, zorder=4)
axs[i].scatter(p[-1,1], p[-1,2], c="blue", s=4, zorder=4)
fig.tight_layout()
fig.suptitle(f"{title}\nGreen = beginning Blue = end", y=1.02) | whock3/ratterdam | Ruo-Yah's weekly/121320.py | 121320.py | py | 1,809 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "csv.reader",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "ast.literal_eval",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_numb... |
31626333564 | import os
import bs4
from nltk.corpus.reader.api import CorpusReader
from nltk.corpus.reader.api import CategorizedCorpusReader
import nltk
import time
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
from sklearn.feature_extraction import text
from nltk import sent_tokenize
from nltk import wordpunct_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.decomposition import PCA
from numpy.linalg import svd
import numpy as np
from functools import reduce
import matplotlib.pyplot as plt
#1시간정도걸림
#nltk.download('wordnet')
#nltk.download('punkt')
#nltk.download('averaged_perceptron_tagger')
#nltk.download('omw-1.4')
CAT_PATTERN = r'([a-su-z]+)/.*'
DOC_PATTERN = r'(?!\.)[a-z_\s]+/[0-9]+\.html'
TAGS = []
title_TAGS = ['h1']
abstract_TAGS = ['blockquote']
class HTMLCorpusReader(CategorizedCorpusReader, CorpusReader):
"""
A corpus reader for raw HTML documents to enable preprocessing.
"""
def __init__(self, root, fileids=DOC_PATTERN, encoding='utf8',
tags=TAGS, **kwargs):
"""
Initialize the corpus reader. Categorization arguments
(``cat_pattern``, ``cat_map``, and ``cat_file``) are passed to
the ``CategorizedCorpusReader`` constructor. The remaining
arguments are passed to the ``CorpusReader`` constructor.
"""
# Add the default category pattern if not passed into the class.
if not any(key.startswith('cat_') for key in kwargs.keys()):
kwargs['cat_pattern'] = CAT_PATTERN
# Initialize the NLTK corpus reader objects
CategorizedCorpusReader.__init__(self, kwargs)
CorpusReader.__init__(self, root, fileids, encoding)
# Save the tags that we specifically want to extract.
self.tags = tags
def resolve(self, fileids, categories):
"""
Returns a list of fileids or categories depending on what is passed
to each internal corpus reader function. Implemented similarly to
the NLTK ``CategorizedPlaintextCorpusReader``.
"""
if fileids is not None and categories is not None:
raise ValueError("Specify fileids or categories, not both")
if categories is not None:
return self.fileids(categories)
return fileids
def docs(self, fileids=None, categories=None):
"""
Returns the complete text of an HTML document, closing the document
after we are done reading it and yielding it in a memory safe fashion.
"""
# Resolve the fileids and the categories
fileids = self.resolve(fileids, categories)
# Create a generator, loading one document into memory at a time.
for path in self.abspaths(fileids):
with open(path, 'r', encoding='UTF-8') as f:
yield f.read()
def sizes(self, fileids=None, categories=None):
"""
Returns a list of tuples, the fileid and size on disk of the file.
This function is used to detect oddly large files in the corpus.
"""
# Resolve the fileids and the categories
fileids = self.resolve(fileids, categories)
# Create a generator, getting every path and computing filesize
for path in self.abspaths(fileids):
yield os.path.getsize(path)
def describe(paragraphs, fileids, categories):
started = time.time()
counts = nltk.FreqDist()
tokens = nltk.FreqDist()
for para in paragraphs:
counts['paras'] += 1
for sent in nltk.sent_tokenize(para):
counts['sents'] += 1
for word in nltk.wordpunct_tokenize(sent):
counts['words'] += 1
tokens[word] += 1
n_fileids = len(fileids)
n_topics = len(categories)
return {
'files': n_fileids,
'topics': n_topics,
'paragraphs': counts['paras'],
'sentences': counts['sents'],
'words': counts['words'],
'vocabulary size': len(tokens),
'lexical diversity': float(counts['words']) / float(len(tokens)),
'paragraphs per document': float(counts['paras']) / float(n_fileids),
'sentences per paragraph': float(counts['sents']) / float(counts['paras']),
'secs': time.time() - started,
}
def paras(htmls, TAGS):
for html in htmls:
soup = bs4.BeautifulSoup(html, 'lxml')
for element in soup.find_all(TAGS):
yield element.text
soup.decompose()
def sents(paragraph):
for sentence in sent_tokenize(paragraph):
yield sentence
def words(paragraph):
for sentence in sents(paragraph):
for word in wordpunct_tokenize(sentence):
yield word
def sklearn_tfidf_vectorize(corpus):
my_stop_words = text.ENGLISH_STOP_WORDS.union(['abstract', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z', ''])
tfidf = TfidfVectorizer(token_pattern='[a-zA-Z]*', stop_words=my_stop_words, min_df=5, sublinear_tf=True, max_df=0.85)
res = tfidf.fit_transform(corpus)
print(tfidf.get_feature_names()[:100])
return res
def mindf005_sklearn_tfidf_vectorize(corpus):
my_stop_words = text.ENGLISH_STOP_WORDS.union(['abstract', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z', ''])
tfidf = TfidfVectorizer(token_pattern='[a-zA-Z]*', stop_words=my_stop_words, min_df=0.05, sublinear_tf=True, max_df=0.85)
res = tfidf.fit_transform(corpus)
print(tfidf.get_feature_names()[:100])
return res
def mindf001_sklearn_tfidf_vectorize(corpus):
my_stop_words = text.ENGLISH_STOP_WORDS.union(['abstract', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z', ''])
tfidf = TfidfVectorizer(token_pattern='[a-zA-Z]*', stop_words=my_stop_words, min_df=0.01, sublinear_tf=True, max_df=0.85)
res = tfidf.fit_transform(corpus)
print(tfidf.get_feature_names()[:100])
return res
def mindf0001_sklearn_tfidf_vectorize(corpus):
my_stop_words = text.ENGLISH_STOP_WORDS.union(['abstract', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z', ''])
tfidf = TfidfVectorizer(token_pattern='[a-zA-Z]*', stop_words=my_stop_words, min_df=0.001, sublinear_tf=True, max_df=0.85)
res = tfidf.fit_transform(corpus)
print(tfidf.get_feature_names()[:100])
return res
title_corpus = HTMLCorpusReader('', CAT_PATTERN, DOC_PATTERN, tags=title_TAGS)
title_fileids = title_corpus.fileids()
title_documents = title_corpus.docs(categories=title_corpus.categories())
title_htmls = list(title_documents)
abstract_corpus = HTMLCorpusReader('', CAT_PATTERN, DOC_PATTERN, abstract_TAGS)
abstract_fileids = abstract_corpus.fileids()
abstract_documents = abstract_corpus.docs(categories=abstract_corpus.categories())
abstract_htmls = list(abstract_documents)
title_categories = title_corpus.categories()
abstract_categories = abstract_corpus.categories()
title_paragraphs = list(paras(title_htmls, title_TAGS))
temp_title_paragraphs = []
for para in title_paragraphs:
if "Title:" in para: # and len(para)>30
temp_title_paragraphs.append(para.strip('Title:\n'))
title_paragraphs = temp_title_paragraphs
print("title_paragraphs len: ", len(title_paragraphs))
print("descreibe title_paragraphs", describe(title_paragraphs, title_fileids, title_categories))
abstract_paragraphs = list(paras(abstract_htmls, abstract_TAGS))
print("abstract_paragraphs len: ", len(abstract_paragraphs))
print("descreibe abstract_paragraphs", describe(abstract_paragraphs, abstract_fileids, abstract_categories))
papers_list = []
for key, value in zip(title_paragraphs, abstract_paragraphs):
temp_dict = dict()
temp_dict['title'] = key
temp_dict['abstract'] = value
papers_list.append(temp_dict)
print("papers_list[:5] : ", papers_list[:5])
print("title paragraphs[:5] : ", title_paragraphs[:5])
sparse_title_tf_idf = sklearn_tfidf_vectorize(title_paragraphs)
print(sparse_title_tf_idf)
print("type of sparse_title_tf_idf : ", type(sparse_title_tf_idf))
title_tf_idf = sparse_title_tf_idf.todense()
print(title_tf_idf)
print("type of title_tf_idf : ", type(title_tf_idf))
title_tf_idf_df = pd.DataFrame(title_tf_idf)
print(title_tf_idf_df.head())
print("title_tf_idf_df.shape : ", title_tf_idf_df.shape)
df = pd.DataFrame(papers_list, columns={'title', 'abstract'})
print(df.head())
print("df.shape : ", df.shape)
print("abstract_paragraphs: ", abstract_paragraphs[:5])
abstract_words = []
for p in abstract_paragraphs:
abstract_words.append(list(words(p)))
print(abstract_words[0])
lemma_abstract_words = []
n=WordNetLemmatizer()
for words in abstract_words:
lemma_abstract_words.append([n.lemmatize(w) for w in words])
print(lemma_abstract_words[0])
print(len(lemma_abstract_words))
lemma_abstract_paragraphs = []
for l in lemma_abstract_words:
lemma_abstract_paragraphs.append(" ".join(l))
print(lemma_abstract_paragraphs[0])
print(len(lemma_abstract_paragraphs))
lemma_sparse_tf_idf = sklearn_tfidf_vectorize(lemma_abstract_paragraphs)
print(lemma_sparse_tf_idf)
print("type of lemma_sparse_tf_idf : ", type(lemma_sparse_tf_idf))
lemma_tf_idf = lemma_sparse_tf_idf.todense()
print(lemma_tf_idf)
print("type of lemma_tf_idf : ", type(lemma_tf_idf))
lemma_tf_idf_df = pd.DataFrame(lemma_tf_idf)
print(lemma_tf_idf_df.head())
print("lemma_tf_idf_df.shape : ", lemma_tf_idf_df.shape)
sparse_tf_idf = sklearn_tfidf_vectorize(abstract_paragraphs)
print(sparse_tf_idf)
print("type of sparse_tf_idf : ", type(sparse_tf_idf))
tf_idf = sparse_tf_idf.todense()
print(tf_idf)
print("type of tf_idf : ", type(tf_idf))
tf_idf_df = pd.DataFrame(tf_idf)
print(tf_idf_df.head())
print("tf_idf_df.shape : ", tf_idf_df.shape)
mindf005_sparse_tf_idf = mindf005_sklearn_tfidf_vectorize(abstract_paragraphs)
print(mindf005_sparse_tf_idf)
print("type of mindf005_sparse_tf_idf : ", type(mindf005_sparse_tf_idf))
mindf005_tf_idf = mindf005_sparse_tf_idf.todense()
print(mindf005_tf_idf)
print("type of mindf005_tf_idf : ", type(mindf005_tf_idf))
mindf005_tf_idf_df = pd.DataFrame(mindf005_tf_idf)
print(mindf005_tf_idf_df.head())
print("mindf005_tf_idf_df.shape : ", mindf005_tf_idf_df.shape)
mindf001_sparse_tf_idf = mindf001_sklearn_tfidf_vectorize(abstract_paragraphs)
print(mindf001_sparse_tf_idf)
print("type of mindf001_sparse_tf_idf : ", type(mindf001_sparse_tf_idf))
mindf001_tf_idf = mindf001_sparse_tf_idf.todense()
print(mindf001_tf_idf)
print("type of mindf001_tf_idf : ", type(mindf001_tf_idf))
mindf001_tf_idf_df = pd.DataFrame(mindf001_tf_idf)
print(mindf001_tf_idf_df.head())
print("mindf001_tf_idf_df.shape : ", mindf001_tf_idf_df.shape)
mindf0001_sparse_tf_idf = mindf0001_sklearn_tfidf_vectorize(abstract_paragraphs)
print(mindf0001_sparse_tf_idf)
print("type of mindf0001_sparse_tf_idf : ", type(mindf0001_sparse_tf_idf))
mindf0001_tf_idf = mindf0001_sparse_tf_idf.todense()
print(mindf0001_tf_idf)
print("type of mindf0001_tf_idf : ", type(mindf0001_tf_idf))
mindf0001_tf_idf_df = pd.DataFrame(mindf0001_tf_idf)
print(mindf0001_tf_idf_df.head())
print("mindf0001_tf_idf_df.shape : ", mindf0001_tf_idf_df.shape)
mindf005_lemma_sparse_tf_idf = mindf005_sklearn_tfidf_vectorize(lemma_abstract_paragraphs)
print(mindf005_lemma_sparse_tf_idf)
print("type of mindf005_lemma_sparse_tf_idf : ", type(mindf005_lemma_sparse_tf_idf))
mindf005_lemma_tf_idf = mindf005_lemma_sparse_tf_idf.todense()
print(mindf005_lemma_tf_idf)
print("type of mindf005_lemma_tf_idf : ", type(mindf005_lemma_tf_idf))
mindf005_lemma_tf_idf_df = pd.DataFrame(mindf005_lemma_tf_idf)
print(mindf005_lemma_tf_idf_df.head())
print("mindf005_lemma_tf_idf_df.shape : ", mindf005_lemma_tf_idf_df.shape)
mindf001_lemma_sparse_tf_idf = mindf001_sklearn_tfidf_vectorize(lemma_abstract_paragraphs)
print(mindf001_lemma_sparse_tf_idf)
print("type of mindf001_lemma_sparse_tf_idf : ", type(mindf001_lemma_sparse_tf_idf))
mindf001_lemma_tf_idf = mindf001_lemma_sparse_tf_idf.todense()
print(mindf001_lemma_tf_idf)
print("type of mindf001_lemma_tf_idf : ", type(mindf001_lemma_tf_idf))
mindf001_lemma_tf_idf_df = pd.DataFrame(mindf001_lemma_tf_idf)
print(mindf001_lemma_tf_idf_df.head())
print("mindf001_lemma_tf_idf_df.shape : ", mindf001_lemma_tf_idf_df.shape)
mindf0001_lemma_sparse_tf_idf = mindf0001_sklearn_tfidf_vectorize(lemma_abstract_paragraphs)
print(mindf0001_lemma_sparse_tf_idf)
print("type of mindf0001_lemma_sparse_tf_idf : ", type(mindf0001_lemma_sparse_tf_idf))
mindf0001_lemma_tf_idf = mindf0001_lemma_sparse_tf_idf.todense()
print(mindf0001_lemma_tf_idf)
print("type of mindf0001_lemma_tf_idf : ", type(mindf0001_lemma_tf_idf))
mindf0001_lemma_tf_idf_df = pd.DataFrame(mindf0001_lemma_tf_idf)
print(mindf0001_lemma_tf_idf_df.head())
print("mindf0001_lemma_tf_idf_df.shape : ", mindf0001_lemma_tf_idf_df.shape)
title_tf_idf_df.to_csv('title_tf_idf_df.csv')
df.to_csv('df.csv')
tf_idf_df.to_csv('tf_idf_df.csv')
mindf005_tf_idf_df.to_csv('mindf005_tf_idf_df.csv')
mindf001_tf_idf_df.to_csv('mindf001_tf_idf_df.csv')
mindf0001_tf_idf_df.to_csv('mindf0001_tf_idf_df.csv')
lemma_tf_idf_df.to_csv('lemma_tf_idf_df.csv')
mindf005_lemma_tf_idf_df.to_csv('mindf005_lemma_tf_idf_df.csv')
mindf001_lemma_tf_idf_df.to_csv('mindf001_lemma_tf_idf_df.csv')
mindf0001_lemma_tf_idf_df.to_csv('mindf0001_lemma_tf_idf_df.csv')
print("csv saving done")
| kyle1213/data-mining | html_to_vector.py | html_to_vector.py | py | 13,987 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.corpus.reader.api.CategorizedCorpusReader",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.reader.api.CorpusReader",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "nltk.corpus.reader.api.CategorizedCorpusReader.__init__",
... |
33543687487 | from django.contrib import admin
from .models import Quiz, Category, Question, Answer, UserAnswers
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
"""
Модель категорий для вывода в админ панели.
"""
list_display = ('id', 'category_name', 'created_at')
list_display_links = ('id', 'category_name')
search_fields = ('category_name',)
list_filter = ('created_at',)
@admin.register(Quiz)
class QuizAdmin(admin.ModelAdmin):
"""
Модель опросов для вывода в админ панели.
"""
list_display = ('id', 'quiz_name', 'category', 'created_at')
list_display_links = ('id', 'quiz_name')
search_fields = ('quiz_name',)
list_filter = ('created_at', 'category')
class AnswerInline(admin.StackedInline):
model = Answer
min_num = 4
extra = 0
@admin.register(Question)
class QuestionAdmin(admin.ModelAdmin):
"""
Модель вопросов для вывода в админ панели.
"""
inlines = [AnswerInline]
list_display = ('id', 'question', 'quiz', 'created_at')
list_display_links = ('id', 'question')
search_fields = ('quiz',)
list_filter = ('quiz', 'created_at')
@admin.register(Answer)
class AnswerAdmin(admin.ModelAdmin):
"""
Модель ответов для вывода в админ панели.
"""
list_display = ('id', 'answer', 'is_correct', 'question', 'created_at')
list_display_links = ('id', 'answer')
search_fields = ('answer', 'question')
list_filter = ('is_correct', 'question', 'created_at')
@admin.register(UserAnswers)
class UserAnswersAdmin(admin.ModelAdmin):
"""
Модель ответов пользователей для админ панели.
"""
list_display = ('user', 'answer')
list_filter = ('answer',)
search_fields = ('answer',)
| Kagoharo/QuizSite | Quiz/quizlet/admin.py | admin.py | py | 1,915 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.register",
"line_number": 5,
"usage_type": "call"
},
{
... |
75118121702 | """
Objet Contact représentant un contact de l'Annuaire
Author: Tristan Colombo <tristan@gnulinuxmag.com>
(@TristanColombo)
Date: 17-12-2015
Last modification: 17-12-2015
Licence: GNU GPL v3 (voir fichier gpl_v3.txt joint)
"""
import sqlite3
import pystache
import os
class Contact:
base = None
cursor = None
template = None
def __init__(self, name, db_args):
name = name.split(' ')
if len(name) != 2:
print('Vous devez saisir un prénom suivi du nom')
print('Syntaxe: Prénom Nom')
exit(1)
if Contact.base is None:
Contact.base = db_args[0]
if Contact.cursor is None:
Contact.cursor = db_args[1]
if Contact.template is None:
try:
with open('doc.tex', 'r') as fic:
Contact.template = fic.read()
except:
print('Accès impossible au gabarit')
exit(3)
self.forename = Contact.capitalizeName(name[0])
self.name = Contact.capitalizeName(name[1])
self.mail = self.searchMail()
@staticmethod
def capitalizeName(name):
capitalized = ''
pred = ''
for letter in name:
if pred in ('', ' ', '-'):
if letter != ' ':
capitalized += letter.upper()
else:
capitalized += letter
pred = letter
return capitalized
def isInDB(self):
result = Contact.cursor.execute("""select name from Contact
where forename = ?
and name = ?""", (self.forename,
self.name))
result = result.fetchone()
return result is not None
def addInDB(self):
try:
Contact.cursor.execute("""insert into Contact
values(NULL, ?, ?, ?)""",
(self.name, self.forename, self.mail))
Contact.base.commit()
except sqlite3.OperationalError:
print('Erreur d\'écriture dans la base')
exit(2)
def searchMail(self):
mail = Contact.cursor.execute("""select mail from Contact
where forename = ?
and name = ?""", (self.forename,
self.name))
mail = mail.fetchone()
if mail is None:
return None
else:
return mail[0]
def getMail(self):
if self.__mail is None:
return self.searchMail()
else:
return self.__mail
def setMail(self, mail):
if mail is None:
self.__mail = None
else:
self.__mail = mail.lower()
if self.isInDB():
try:
Contact.cursor.execute("""update Contact
set mail = ?
where name = ?
and forename = ?""",
(self.mail, self.name, self.forename))
Contact.base.commit()
except sqlite3.OperationalError:
print('Erreur d\'écriture dans la base')
exit(2)
else:
self.addInDB()
mail = property(getMail, setMail)
def getName(self):
return self.__name
def setName(self, name):
self.__name = name
name = property(getName, setName)
def getForename(self):
return self.__forename
def setForename(self, forename):
self.__forename = forename
forename = property(getForename, setForename)
def generateDoc(self, sun=False):
try:
with open('doc_contact.tex', 'w') as fic:
fic.write(pystache.render(Contact.template,
{'name': self.name,
'forename': self.forename,
'sun': sun}))
# Génération du PDF
os.system('pdflatex doc_contact.tex 1>/dev/null')
except:
print('Erreur lor de la génération du PDF')
exit(4)
| GLMF/GLMF190 | Dev/interface_CLI/etape_2/Contact.py | Contact.py | py | 4,342 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.OperationalError",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.OperationalError",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "pystache.render",
"line_number": 134,
"usage_type": "call"
},
{
"api... |
74512115622 | import cv2
import numpy as np
import matplotlib.pyplot as plt
img1 = cv2.imread('shanghai-11.png')
img2 = cv2.imread('shanghai-12.png')
img3 = cv2.imread('shanghai-13.png')
plt.figure(1)
plt.subplot(131), plt.imshow(img1, cmap='gray'), plt.title("Image 1")
plt.subplot(132), plt.imshow(img2, cmap='gray'), plt.title("Image 2")
plt.subplot(133), plt.imshow(img3, cmap='gray'), plt.title("Image 3")
plt.show()
orbObj = cv2.ORB_create() # Create ORB Object
kp1, descriptors1 = orbObj.detectAndCompute(img1, None) # Computing descriptors and keypoints
kp2, descriptors2 = orbObj.detectAndCompute(img2, None)
kp3, descriptors3 = orbObj.detectAndCompute(img3, None)
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING) # Creating Matcher Object
GOOD_MATCH_PERCENT = 0.15
MAX_MATCHES = 25
# Image 1 - Image 2 Matcher
matches1 = matcher.match(descriptors1, descriptors2, None)
matches1 = sorted(matches1, key=lambda x: x.distance, reverse=False)
good_matches1 = int(len(matches1) * GOOD_MATCH_PERCENT)
matches1 = matches1[:good_matches1] # Remove weak matches
# Image 1 - Image 3 Matcher
matches2 = matcher.match(descriptors1, descriptors3, None)
matches2 = sorted(matches2, key=lambda x: x.distance, reverse=False)
good_matches2 = int(len(matches2) * GOOD_MATCH_PERCENT)
matches2 = matches2[:good_matches2] # Remove weak matches
# Image 2 - Image 3 Matcher
matches3 = matcher.match(descriptors2, descriptors3, None)
matches3 = sorted(matches3, key=lambda x: x.distance, reverse=False)
good_matches3 = int(len(matches3) * GOOD_MATCH_PERCENT)
matches3 = matches3[:good_matches3] # Remove weak matches
# Drawing Matches
img_matches1 = cv2.drawMatches(img1, kp1, img2, kp2, matches1[0:MAX_MATCHES], None)
img_matches2 = cv2.drawMatches(img1, kp1, img3, kp3, matches2[0:MAX_MATCHES], None)
img_matches3 = cv2.drawMatches(img2, kp2, img3, kp3, matches3[0:MAX_MATCHES], None)
plt.figure(2)
plt.subplot(311), plt.imshow(img_matches1, cmap='gray'), plt.title("Image 1 - Image 2")
plt.subplot(312), plt.imshow(img_matches2, cmap='gray'), plt.title("Image 1 - Image 3")
plt.subplot(313), plt.imshow(img_matches3, cmap='gray'), plt.title("Image 2 - Image 3")
plt.show()
# Best Match Result : Img1 - Img2
points1 = np.zeros((len(matches1), 2), dtype=np.float32)
points2 = np.zeros((len(matches1), 2), dtype=np.float32)
for i, match in enumerate(matches1):
points1[i, :] = kp1[match.queryIdx].pt
points2[i, :] = kp2[match.trainIdx].pt
h, mask = cv2.findHomography(points2, points1, cv2.RANSAC) # Compute Homography
img1_height, img1_width, img1_channels = img1.shape
img2_height, img2_width, img2_channels = img2.shape
im2Aligned = cv2.warpPerspective(img2, h, (img2_width + img1_width, img2_height)) # Align Img2
stitched_Image12 = np.copy(im2Aligned) # Stitch Img 1 with Aligned Img 2
stitched_Image12[0:img1_height, 0:img1_width] = img1
plt.figure(3)
plt.imshow(stitched_Image12, cmap='gray'), plt.title("Image 1 - Image 2 Stiched")
plt.show()
# Repeating the above steps for Stiching: stitched_Image12 - Image 3
stitched_Image12 = stitched_Image12[:, :1343]
kp12, descriptors12 = orbObj.detectAndCompute(stitched_Image12, None)
matches12 = matcher.match(descriptors12, descriptors3, None)
matches12 = sorted(matches12, key=lambda x: x.distance, reverse=False)
good_matches12 = int(len(matches12) * GOOD_MATCH_PERCENT)
matches12 = matches12[:good_matches12] # Remove weak matches
# Drawing Matches
img_matches12 = cv2.drawMatches(stitched_Image12, kp12, img3, kp3, matches12[0:MAX_MATCHES], None)
plt.figure(4)
plt.imshow(img_matches12, cmap='gray'), plt.title("Stiched Image 12 - Image 3 Matches")
plt.show()
# Stiching Images:
points1 = np.zeros((len(matches12), 2), dtype=np.float32)
points2 = np.zeros((len(matches12), 2), dtype=np.float32)
for i, match in enumerate(matches12):
points1[i, :] = kp12[match.queryIdx].pt
points2[i, :] = kp3[match.trainIdx].pt
h123, mask123 = cv2.findHomography(points2, points1, cv2.RANSAC) # Compute Homography
img12_height, img12_width, img12_channels = stitched_Image12.shape
img3_height, img3_width, img3_channels = img3.shape
im3Aligned = cv2.warpPerspective(img3, h123, (img12_width + img3_width, img3_height))
stitched_Image123 = np.copy(im3Aligned) # Stitch Img 12 with Aligned Img 3
stitched_Image123[0:img12_height, 0:img12_width] = stitched_Image12
plt.figure(5)
plt.imshow(stitched_Image123, cmap='gray'), plt.title("Image 1 - Image 2 - Image 3 Stiched")
plt.show()
stitched_Image123_noblck = np.copy(stitched_Image123)
stitched_Image123_noblck = stitched_Image123_noblck[:, :1830]
cv2.imwrite('stiched_image.jpg', stitched_Image123_noblck, [cv2.IMWRITE_JPEG_QUALITY, 80])
plt.figure(6)
plt.subplot(2, 1, 1), plt.imshow(stitched_Image123, cmap='gray'), plt.title("Image 1 - Image 2 - Image 3 Stiched")
plt.subplot(2, 1, 2), plt.imshow(stitched_Image123_noblck, cmap='gray'), plt.title("No black -Image 1 - Image 2 - "
"Image 3 Stiched")
plt.show()
# SECTION 2
obj = cv2.imread('object.jpg')
img1 = cv2.imread('clutter01.jpg')
img2 = cv2.imread('clutter02.jpg')
img3 = cv2.imread('clutter03.jpg')
img4 = cv2.imread('clutter04.jpg')
plt.figure(1)
plt.subplot(151), plt.imshow(img1[:, :, ::-1]), plt.title("Clutter 1")
plt.subplot(152), plt.imshow(img2[:, :, ::-1]), plt.title("Clutter 2")
plt.subplot(153), plt.imshow(img3[:, :, ::-1]), plt.title("Clutter 3")
plt.subplot(154), plt.imshow(img4[:, :, ::-1]), plt.title("Clutter 4")
plt.subplot(155), plt.imshow(obj[:, :, ::-1]), plt.title("Object")
plt.show()
# Computing descriptors and keypoints using SIFT
siftObj = cv2.SIFT_create() # Create SIFT Object
kp1, descriptors1 = siftObj.detectAndCompute(img1, None)
kp2, descriptors2 = siftObj.detectAndCompute(img2, None)
kp3, descriptors3 = siftObj.detectAndCompute(img3, None)
kp4, descriptors4 = siftObj.detectAndCompute(img4, None)
kp5, descriptors5 = siftObj.detectAndCompute(obj, None)
# Too many descriptors and keypoints => define function
def flann_with_sift(image1, d1, keyp1, image2, d2, keyp2):
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=100)
flann_obj = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann_obj.knnMatch(d1, d2, 2)
# Filter matches with Lowe's ratio test(src: opencv.com - tutorial_feature_flann_matcher)
thresh = 0.67
valid_matches = []
for m, n in matches:
if m.distance < thresh * n.distance:
valid_matches.append(m)
if len(valid_matches) < 10:
print(f"Valid matches: {len(valid_matches)} - Not enough matches")
else:
img_matched = np.empty((max(image1.shape[0], image2.shape[0]), image1.shape[1] + image2.shape[1], 3),
dtype=np.uint8)
cv2.drawMatches(image1, keyp1, image2, keyp2, valid_matches, img_matched,
flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
plt.figure(), plt.imshow(img_matched[:, :, ::-1]), plt.title(
f"Matched images with Flann and SIFT") # Draw Matches
points1 = np.zeros((len(valid_matches), 2), dtype=np.float32)
points2 = np.zeros((len(valid_matches), 2), dtype=np.float32)
for i, match in enumerate(valid_matches):
points1[i, :] = keyp1[match.queryIdx].pt
points2[i, :] = keyp2[match.trainIdx].pt
h, mask = cv2.findHomography(points2, points1, cv2.RANSAC)
imHeight, imWidth, channels = image1.shape
imAligned = cv2.warpPerspective(image2, h, (imWidth, imHeight))
#Draw Rectangles
cont_image = np.copy(imAligned)
cont_image = cv2.cvtColor(cont_image, cv2.COLOR_BGR2GRAY)
contours, hierarchy = cv2.findContours(cont_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(image1, contours, -1, (0, 0, 255), 5)
plt.figure()
plt.subplot(1, 2, 1), plt.imshow(image1[:, :, ::-1]), plt.title("Original")
plt.subplot(1, 2, 2), plt.imshow(imAligned[:,:,::-1]), plt.title("Homography")
plt.show()
flann_with_sift(img1, descriptors1, kp1, obj, descriptors5, kp5)
flann_with_sift(img2, descriptors2, kp2, obj, descriptors5, kp5)
flann_with_sift(img3, descriptors3, kp3, obj, descriptors5, kp5)
flann_with_sift(img4, descriptors4, kp4, obj, descriptors5, kp5)
| costinchican/computer_vision | App2/app2.py | app2.py | py | 8,599 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_nu... |
73497184104 | import importlib
import io
import math
import os
import typing
from enum import Enum
import discord
import humanize
from discord.ext import commands
from jishaku.functools import executor_function
from PIL import Image
import common.image_utils as image_utils
import common.utils as utils
class ImageCMDs(commands.Cog, name="Image"):
"""A series of commands for manipulating images in certain ways."""
def __init__(self, bot):
self.bot: utils.SeraphimBase = bot
def get_size(self, image: io.BytesIO):
old_pos = image.tell()
image.seek(0, os.SEEK_END)
size = image.tell()
image.seek(old_pos, os.SEEK_SET)
return size
@executor_function
def pil_compress(self, image, ext, flags):
try:
pil_image = Image.open(image)
compress_image = io.BytesIO()
if (
flags.ori_ext in ("gif", "webp")
and ext not in ("gif", "webp")
and pil_image.is_animated
):
raise commands.BadArgument(
"Cannot convert an animated image to this file type!"
)
if flags.shrink:
width = pil_image.width
height = pil_image.height
if width > 1920 or height > 1920:
bigger = max(width, height)
factor = math.ceil(bigger / 1920)
pil_image = pil_image.reduce(factor=factor)
if ext == "jpeg":
if pil_image.mode != "RGB":
pil_image = pil_image.convert("RGB")
pil_image.save(
compress_image, format=ext, quality=flags.quality, optimize=True
)
elif ext in ("gif", "png"):
pil_image.save(compress_image, format=ext, optimize=True)
elif ext == "webp":
pil_image.save(
compress_image,
format=ext,
minimize_size=True,
quality=flags.quality,
)
else:
compress_image.close()
raise commands.BadArgument("Invalid file type!")
compress_image.seek(0, os.SEEK_SET)
return compress_image
except:
compress_image.close()
raise
finally:
pil_image.close()
@executor_function
def pil_resize(
self,
image,
ext,
percent: typing.Optional[float],
width: typing.Optional[int],
height: typing.Optional[int],
filter: int,
):
resized_image = io.BytesIO()
try:
pil_image = Image.open(image)
ori_width = pil_image.width
ori_height = pil_image.height
if percent:
new_width = math.ceil(pil_image.width * (percent / 100))
new_height = math.ceil(pil_image.height * (percent / 100))
elif bool(width) ^ bool(height):
if width:
new_width = width
percent = width / pil_image.width
new_height = math.ceil(pil_image.height * percent)
else:
new_height = height
percent = height / pil_image.height
new_width = math.ceil(pil_image.width * percent)
else:
new_width = width
new_height = height
pil_image = pil_image.resize((new_width, new_height), filter)
pil_image.save(resized_image, format=ext)
resized_image.seek(0, os.SEEK_SET)
return resized_image, ori_width, ori_height, new_width, new_height
except:
resized_image.close()
raise
finally:
pil_image.close()
class ImageFilters(Enum):
# what Pillow should have done
NEAREST = 0
NONE = 0
BOX = 4
BILINEAR = 2
LINEAR = 2
HAMMING = 5
BICUBIC = 3
CUBIC = 3
LANCZOS = 1
ANTIALIAS = 1
def str_to_filter(self, argument: str):
try:
return self.ImageFilters[argument.upper()].value
except KeyError:
raise commands.BadArgument(f"Invalid filter `{argument}` provided!")
class CompressFlags(commands.FlagConverter):
# TODO: move flag checks into a flag converter thing
shrink: bool = True
format: str = "default"
quality: int = 70
@commands.command(aliases=["image_compress"])
async def img_compress(
self, ctx, url: typing.Optional[image_utils.URLToImage], *, flags: CompressFlags
):
"""Compresses down the image given.
It must be an image of type GIF, JPG, PNG, or WEBP. It must also be under 8 MB.
Image quality will take a hit, and the image will shrink down if it's too big (unless you specify to not shrink the image).
Flags:
shrink: <true/false> (specifies to shrink the image - it will by default)
format: <format> (converts the image to the specified format, and it must be 'gif, jpg, png, or webp' \
- the resulting image will be in the same format as the original by default)
quality: <number> (specifies quality from 0-100, only works with JPG and WEBP files, default is 70)"""
if flags.format == "default":
img_format = "default"
else:
img_type_checker = image_utils.ImageTypeChecker
img_format = await img_type_checker.convert(
img_type_checker, ctx, flags.format
)
if not 0 <= flags.quality <= 100:
raise commands.BadArgument("Quality must be a number between 0-100!")
if not url:
url = image_utils.image_from_ctx(ctx)
async with ctx.channel.typing():
image_data = await image_utils.get_file_bytes(
url, 8388608, equal_to=False
) # 8 MiB
try:
ori_image = io.BytesIO(image_data)
mimetype = discord.utils._get_mime_type_for_image(image_data)
ext = mimetype.split("/")[1]
flags.ori_ext = ext # yes, this is dirty
if img_format != "default":
ext = img_format
ori_size = self.get_size(ori_image)
compress_image = await self.pil_compress(ori_image, ext, flags)
compressed_size = self.get_size(compress_image)
finally:
del image_data
if ori_image:
ori_image.close()
try:
com_img_file = discord.File(compress_image, f"image.{ext}")
content = (
f"Original Size: {humanize.naturalsize(ori_size, binary=True)}\n"
+ "Reduced Size:"
f" {humanize.naturalsize(compressed_size, binary=True)}\n"
+ "Size Saved:"
f" {round(((1 - (compressed_size / ori_size)) * 100), 2)}%"
)
except:
compress_image.close()
raise
await ctx.reply(content=content, file=com_img_file)
class ConvertFlags(commands.FlagConverter):
shrink: bool = False
quality: int = 80
@commands.command(aliases=["image_convert"])
async def img_convert(
self,
ctx,
url: typing.Optional[image_utils.URLToImage],
img_type: image_utils.ImageTypeChecker,
*,
flags: ConvertFlags,
):
"""Converts the given image into the specified image type.
Both the image and the specified image type must be of type GIF, JP(E)G, PNG, or WEBP. The image must also be under 8 MB.
Flags:
shrink: <true/false> (specifies to shrink the image - it won't by default)
quality: <number> (specifies quality from 0-100, only works with JPG and WEBP files, default is 80)
"""
if not 0 <= flags.quality <= 100:
raise commands.BadArgument("Quality must be a number between 0-100!")
if not url:
url = image_utils.image_from_ctx(ctx)
async with ctx.channel.typing():
image_data = await image_utils.get_file_bytes(
url, 8388608, equal_to=False
) # 8 MiB
try:
ori_image = io.BytesIO(image_data)
mimetype = discord.utils._get_mime_type_for_image(image_data)
flags.ori_ext = mimetype.split("/")[1]
ext = img_type
converted_image = await self.pil_compress(ori_image, ext, flags)
finally:
del image_data
if ori_image:
ori_image.close()
try:
convert_img_file = discord.File(converted_image, f"image.{ext}")
except:
converted_image.close()
raise
await ctx.reply(file=convert_img_file)
class ResizeFlags(commands.FlagConverter):
percent: typing.Optional[float]
width: typing.Optional[int]
height: typing.Optional[int]
filter: str = "BILINEAR"
@commands.command(aliases=["image_resize"])
async def img_resize(
self, ctx, url: typing.Optional[image_utils.URLToImage], *, flags: ResizeFlags
):
"""Resizes the image as specified by the flags.
The image must be of type GIF, JP(E)G, PNG, or WEBP. The image must also be under 8 MB.
Required flags (one of these three must be present):
percent: <percent> (specifies the percent to reduce it to - whole numbers with no %, like '50', please.)
width: <width> (specifies the width to reduce it to - it must be a whole number and greater than 0.)
height: <height> (specifies the height to reduce it to - it must be a whole number and greater than 0.)
Optional flags:
filter: <filter> (specifies which resampling filter to use while downsizing - see \
https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-filters for the filters and which \
one is best for you. Default is Bilinear.)
"""
filter = self.str_to_filter(flags.filter)
if not url:
url = image_utils.image_from_ctx(ctx)
if not (flags.percent or flags.width or flags.height):
raise commands.BadArgument("No resizing arguments passed!")
if flags.percent and (flags.width or flags.height):
raise commands.BadArgument(
"You cannot have a percentage and a width/height at the same time!"
)
if flags.percent and flags.percent <= 0:
raise commands.BadArgument("The percent must be greater than 0!")
if flags.width and flags.width <= 0:
raise commands.BadArgument("The width must be greater than 0!")
if flags.height and flags.height <= 0:
raise commands.BadArgument("The height must be greater than 0!")
async with ctx.channel.typing():
image_data = await image_utils.get_file_bytes(
url, 8388608, equal_to=False
) # 8 MiB
try:
ori_image = io.BytesIO(image_data)
mimetype = discord.utils._get_mime_type_for_image(image_data)
ext = mimetype.split("/")[1]
(
resized_image,
ori_width,
ori_height,
new_width,
new_height,
) = await self.pil_resize(
ori_image,
ext,
flags.percent,
flags.width,
flags.height,
filter,
)
resize_size = self.get_size(resized_image)
if resize_size > 8388608:
resized_image.close()
del resized_image
raise commands.BadArgument("Resulting image was over 8 MiB!")
finally:
del image_data
if ori_image:
ori_image.close()
try:
resized_img_file = discord.File(resized_image, f"image.{ext}")
content = (
f"Original Image Dimensions: {ori_width}x{ori_height}\n"
+ f"New Image Dimensions: {new_width}x{new_height}\n"
+ "Resized To:"
f" {flags.percent or round(((new_width / ori_width) * 100), 2)}%\n"
+ "New Image Size:"
f" {humanize.naturalsize(resize_size, binary=True)}"
)
except:
resized_image.close()
raise
await ctx.reply(content=content, file=resized_img_file)
async def setup(bot):
importlib.reload(utils)
importlib.reload(image_utils)
await bot.add_cog(ImageCMDs(bot))
| AstreaTSS/Seraphim-Bot | cogs/general/cmds/image_cmds.py | image_cmds.py | py | 13,117 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "common.utils.SeraphimBase",
"line_number": 22,
"usage_type": "attribute"
},
{
... |
43131754181 | from concurrent.futures import ThreadPoolExecutor,ProcessPoolExecutor,as_completed,wait
from concurrent import futures
import time
import requests
# 官方文档:https://docs.python.org/zh-cn/3.8/library/concurrent.futures.html#module-concurrent.futures
urls = ['http://www.tencent.com','http://www.google.com/','http://www.kugou.com/']
def jisuan_b(num):
print("b_time is :{}。that you num is {}\n".format(time.time(),num))
return "this is return "
def get_for_content(res):
r = requests.get(url=res)
# print(r.text)
return res
with futures.ThreadPoolExecutor(max_workers=1) as f :
start_time = time.time()
# res = f.submit(jisuan_b,'1') # submit()方法的返回值为一个future对象。此对象可以调用result()方法获取返回值,
# print(res.result()) # result函数,实际返回的是回调函数的返回值。result方法本身是阻塞的
thread_list = []
for x in urls:
thread_list.append(f.submit(get_for_content,x))
# print(wait(thread_list,return_when=FIRST_COMPLETED)) # wait 函数的作用是使主线程阻塞。使满足设定的时候再进行正常执行。
# wait 有三个状态:FIRST_COMPLETED,FIRST_EXCEPTION,ALL_COMPLETED
# FIRST_COMPLETED:只要有一个完成的任务(线程),就直接的返回,不再进行阻塞。
# FIRST_EXCEPTION:当发生第一个异常的时候退出。如果没有异常的话,就相当于ALL_COMPLETED
# ALL_COMPLETED:当所有的任务(线程)完成的时候退出
print("执行结束")
# for x in as_completed(thread_list): # as_completed函数的作用是接收:executor返回的future对象列表。
# 实际为执行完成的线程返回的furure对象,也就是说安装线程完成的顺序一个一个进行迭代
# print("请求的链接为",x.result()) # 然后使主线程阻塞,如果thread_list中有执行完成的线程的话。会yeid这个线程的result对象。直到所有的线程都执行结束为止
# # 先完成的线程会先通知主线程。(ps.第二个参数为timeout)
# print("耗时为:{}".format(time.time()-start_time))
| melody27/python_script | thread_process_futures/concurrent.futures_test.py | concurrent.futures_test.py | py | 2,510 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.ThreadPoolExecutor",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "concurre... |
14484717443 | import cv2 as cv
import numpy as np
left_window = 'Task 4 - left'
right_window = 'Task 4 - right'
# create window for left camera
cv.namedWindow(left_window)
# create window for right camera
cv.namedWindow(right_window)
# read in imagaes
img_l = cv.imread('imgs/combinedCalibrate/aL09.bmp')
img_r = cv.imread('imgs/combinedCalibrate/aR09.bmp')
gray = cv.cvtColor(img_l,cv.COLOR_BGR2GRAY)
shape = (gray.shape[1],gray.shape[0])
cv.imshow(left_window,img_l)
cv.imshow(right_window,img_r)
cv.waitKey(0)
# read in intrinsic params
fs_read_l = cv.FileStorage('params/left_cam.yml',cv.FILE_STORAGE_READ)
mat_l = fs_read_l.getNode("intrinsic").mat()
dist_l = fs_read_l.getNode("distortion").mat()
fs_read_l.release()
fs_read_r = cv.FileStorage('params/right_cam.yml',cv.FILE_STORAGE_READ)
mat_r = fs_read_r.getNode("intrinsic").mat()
dist_r = fs_read_r.getNode("distortion").mat()
fs_read_r.release()
fs_read = cv.FileStorage('params/stereo.yml',cv.FILE_STORAGE_READ)
R = fs_read.getNode('R').mat()
T = fs_read.getNode('T').mat()
fs_read.release()
# rectify images
R1,R2,P1,P2,Q,roi1,roi2 = cv.stereoRectify(mat_l,dist_l,mat_r,dist_r,shape,R,T)
map_l1, map_l2 = cv.initUndistortRectifyMap(mat_l,dist_l,R1,P1,shape,5)
map_r1, map_r2 = cv.initUndistortRectifyMap(mat_l,dist_l,R1,P1,shape,5)
rect_img_l = cv.remap(img_l,map_l1,map_l2,cv.INTER_LINEAR)
rect_img_r = cv.remap(img_r,map_r1,map_r2,cv.INTER_LINEAR)
diff_l = cv.absdiff(img_l,rect_img_l)
diff_r = cv.absdiff(img_r,rect_img_r)
def drawLines(img,color):
x0 = -10
x1 = 800
for k in [1,2,3]:
y = 100 * k
image = cv.line(img,(x0,y),(x1,y),color,2)
return image
rect_img_r = drawLines(rect_img_r,(0,0,255))
rect_img_l = drawLines(rect_img_l,(0,255,0))
cv.imshow(left_window,rect_img_l)
cv.imshow(right_window,rect_img_r)
cv.waitKey(0)
cv.imshow(left_window,diff_l)
cv.imshow(right_window,diff_r)
cv.waitKey(0)
cv.destroyAllWindows()
print('Program has ended')
| mjhaskell/EE_631 | HW3/task4.py | task4.py | py | 1,954 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.namedWindow",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.namedWindow",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_numb... |
2805625238 | import pandas as pd
import scipy
import pylab
import numpy as np
from statsmodels.formula.api import OLS
from easydev import Progress, AttrDict
from gdsctools.stats import MultipleTesting
from gdsctools import readers
from gdsctools.boxplots import BoxPlots
from gdsctools.settings import ANOVASettings
from gdsctools.anova_results import ANOVAResults
__all__ = ['ANOVA']
# Not that Logging is not used: it is not pickable and prevent
# multicore analysis.
class ANOVA(object): #Logging):
"""ANOVA analysis of the IC50 vs Feature matrices
This class is the core of the analysis. It can be used to
compute
#. One association between a drug and a feature
#. The association**S** between a drug and a set of features
#. All assocations between a set of deugs and a set of features.
For instance here below, we read an IC50 matrix and compute the
association for a given drug with a specific feature.
Note that genomic features are not provided as input but a default
file is provided with this package that contains 677 genomic
features for 1001 cell lines. If your IC50 contains unknown cell lines,
you can provide your own file.
.. plot::
:include-source:
:width: 80%
from gdsctools import IC50, ANOVA, ic50_test
ic = IC50(ic50_test)
an = ANOVA(ic)
# This is to select a specific tissue
an.set_cancer_type('breast')
df = an.anova_one_drug_one_feature('Drug_1047_IC50',
'TP53_mut', show=True)
:Details about the anova analysis: In the example above, we perform a
regression/anova test based on OLS regression. This is done for
one feature one drug across all cell lines (tissue) in the method
:meth:`anova_one_drug`. The regression
takes into account the following factors: tissue, MSI and features.
The order matters. If there is only one tissue, this factor is
dropped. If the number of MSI values is less than a pre-defined
parameter (see :class:`~gdsctools.settings.ANOVASettings`), it is
dropped. The other
methods :meth:`anova_one_drug` and :meth:`anova_all` are wrappers
around :meth:`anova_one_drug_one_feature` to loop over all drugs, and
loop over all drugs and all features, respectively.
V17 :
gdsc.volcano_FDR_interpolation = False
gdsc.settings.pvalue_correction_method = 'qvalue'
"""
def __init__(self, ic50, genomic_features=None,
drug_decode=None, verbose=True, low_memory=True,
set_media_factor=False):
""".. rubric:: Constructor
:param DataFrame IC50: a dataframe with the IC50. Rows should be
the COSMIC identifiers and columns should be the Drug names
(or identifiers)
:param features: another dataframe with rows as in the IC50 matrix
and columns as features. The first 3 columns must be named
specifically to hold tissues, MSI (see format).
:param drug_decode: a 3 column CSV file with drug's name and targets
see :mod:`readers` for more information.
:param verbose: verbosity in "WARNING", "ERROR", "DEBUG", "INFO"
The attribute :attr:`settings` contains specific settings related
to the analysis or visulation.
"""
self.verbose = verbose
# We first need to read the IC50 using a dedicated reader
self.ic50 = readers.IC50(ic50)
# Create a dictionary version of the data
# to be accessed per drug where NA have already been
# removed. Each drug is a dictionary with 2 keys:
# Y for the data and indices for the cosmicID where
# there is an IC50 measured.
ic50_parse = self.ic50.df.copy().unstack().dropna()
self.ic50_dict = dict([(d, {'indices': ic50_parse.ix[d].index,
'Y':ic50_parse.ix[d].values}) for d in self.ic50.drugIds])
# Reads features if provided, otherwise use a default data set
if genomic_features is None:
# Reads default version provided with the package
self.features = readers.GenomicFeatures()
else:
self.features = readers.GenomicFeatures(genomic_features)
if self.features.found_media is False and \
set_media_factor is True:
if self.verbose:
print('Populating MEDIA Factor in the Genomic Feature matrix')
self.features.fill_media_factor()
#: a CSV with 3 columns used in the report
self.read_drug_decode(drug_decode)
# create the multiple testing factory used in anova_all()
self.multiple_testing = MultipleTesting()
# We prune the genomic features by settings the cosmic ids of
# the features to be those of the cosmic ids of the IC50. See
# readers module. This affectation, prune the features dataframe
# automatically. This fails if a cosmic identifier is not
# found in the features' cosmic ids, so let us catch the error
# before hand to give a
unknowns = set(self.ic50.cosmicIds).difference(
set(self.features.cosmicIds))
if len(unknowns) > 0:
print("WARNING: " +
"%s cosmic identifiers in your IC50 " % len(unknowns) +
"could not be found in the genomic feature matrix. "+
"They will be dropped. Consider using a user-defined " +
"genomic features matrix")
self.ic50.drop_cosmic(list(unknowns))
self.features.cosmicIds = self.ic50.cosmicIds
#self.cosmicIds = self.ic50.cosmicIds
#: an instance of :class:`~gdsctools.settings.ANOVASettings`
self.settings = ANOVASettings()
self.settings.low_memory = low_memory
# alias to all column names to store results
# cast to list (Python3).
self.column_names = list(ANOVAResults().mapping.keys())
# skip assoc_id for now
self._odof_dict = dict([(name, None)
for name in self.column_names])
# a cache to store ANOVA results for each drug
self.individual_anova = {}
# must be called if ic50 or features are changed.
self.init()
def _autoset_msi_factor(self):
if self.features.found_msi:
# if the number of pos. (or neg.) factors is not large enough then
# the MSI factor is not used
msi_name = self.features.colnames.msi
self.msi_factor = self.features.df[msi_name]
total = len(self.msi_factor)
positives = self.msi_factor.sum()
negatives = total - positives
# we must have at least 2 positives or 2 negative
# This is therefore a < comparison here below. See in
# _get_one_drug_one_feature_data that we use >= which
# is consistent.
if positives < self.settings.MSI_factor_threshold:
self.settings.include_MSI_factor = False
if negatives < self.settings.MSI_factor_threshold:
self.settings.include_MSI_factor = False
else:
self.settings.include_MSI_factor = False
def _autoset_tissue_factor(self):
# select tissue based on the features
tissue_name = self.features.colnames.tissue
self.tissue_factor = self.features.df[tissue_name]
if len(self.tissue_factor.unique()) == 1:
# there is only one tissue
tissue = self.tissue_factor.unique()[0]
self.settings.analysis_type = tissue
self.settings.directory = tissue
else:
# this is a PANCAN analysis
self.settings.analysis_type = 'PANCAN'
def _autoset_media_factor(self):
if self.settings.analysis_type != 'PANCAN':
self.settings.include_media_factor = False
elif self.features.found_media is True:
self.settings.include_media_factor = True
colname = self.features.colnames.media
self.media_factor = self.features.df[colname]
else:
self.settings.include_media_factor = False
def set_cancer_type(self, ctype=None):
"""Select only a set of tissues.
Input IC50 may be PANCAN (several cancer tissues).
This function can be used to select a subset of tissues.
This function changes the :attr:`ic50` dataframe and possibly
the feature as well if some are not relevant anymore (sum of the
column is zero for instance).
"""
if ctype is None:
return
if ctype == 'PANCAN':
return
if isinstance(ctype, str):
ctype = [str(ctype)]
for this in ctype:
assert this in self.features.tissues, "%s not found" % ctype
# keep only features that correspond to the tissue
self.features.keep_tissue_in(ctype)
self.ic50.df = self.ic50.df.ix[self.features.df.index]
self.init()
def read_settings(self, settings):
"""Read settings and update cancer type if set"""
self.settings.from_json(settings)
self.set_cancer_type(self.settings.analysis_type)
def init(self):
# Some preprocessing to speed up data access
ic50_parse = self.ic50.df.copy().unstack().dropna()
# for each drug, we store the IC50s (Y) and corresponding indices
# of cosmic identifiers
self.ic50_dict = dict([
(d, {'indices': ic50_parse.ix[d].index,
'Y': ic50_parse.ix[d].values}) for d in self.ic50.drugIds])
# save the tissues
self._autoset_tissue_factor()
# and MSI (Microsatellite instability) status of the samples.
self._autoset_msi_factor()
# and (growth) media factor
self._autoset_media_factor()
# dictionaries to speed up code.
self.features_dict = {}
self.msi_dict = {}
self.tissue_dict = {}
self.media_dict = {}
# fill the dictionaries for each drug once for all
for drug_name in self.ic50.drugIds:
indices = self.ic50_dict[drug_name]['indices']
# if we were to store all drugs /features, this takes
# 1Gb of memory for 265 drugs and 680 features. This is
# therefore not scalable, especially for multiprocessing.
if self.settings.low_memory is True:
pass
else:
self.features_dict[drug_name] = self.features.df.ix[indices]
# MSI, media and tissue are not large data files and can be store
if self.features.found_msi:
self.msi_dict[drug_name] = self.msi_factor.ix[indices]
if self.features.found_media:
self.media_dict[drug_name] = self.media_factor.ix[indices]
self.tissue_dict[drug_name] = self.tissue_factor.ix[indices]
# some preprocessing for the OLS computation.
# We create the dummies for the tissue factor once for all
# Note that to agree with R convention, we have to resort the column
# to agree with R convention that is a<B==b<c instead of
# where A<B<C<a<b<c (in R)
self._tissue_dummies = pd.get_dummies(self.tissue_factor)
columns = self._tissue_dummies.columns
columns = sorted(columns, key=lambda s: s.lower())
columns = ['C(tissue)[T.' + x + ']' for x in columns]
self._tissue_dummies.columns = columns
if self.settings.include_media_factor:
self._media_dummies = pd.get_dummies(self.media_factor)
columns = self._media_dummies.columns
columns = ['C(media)[T.' + x + ']' for x in columns]
self._media_dummies.columns = columns
for col in columns:
self._tissue_dummies[col] = self._media_dummies[col]
N = len(self._tissue_dummies)
self._tissue_dummies['C(msi)[T.1]'] = [1]*N
self._tissue_dummies['feature'] = [1] * N
self._tissue_dummies.insert(0, 'Intercept', [1] * N)
# drop first feature in the tissues that seems to be used as a
# reference in the regression
tissues = [x for x in self._tissue_dummies.columns if 'tissue' in x]
self._tissue_dummies.drop(tissues[0], axis=1, inplace=True)
if self.settings.include_media_factor:
media = [x for x in self._tissue_dummies.columns if 'media' in x]
self._tissue_dummies.drop(media[0], axis=1, inplace=True)
# reset the buffer.
self.individual_anova = {}
for this in ['tissue', 'media', 'msi', 'feature']:
if this in self._get_analysis_mode():
print(this.upper() + " FACTOR : included")
else:
print(this.upper() + " FACTOR : NOT included")
def _get_cosmics(self):
return self.ic50.cosmicIds
def _set_cosmics(self, cosmics):
self.ic50.cosmicIds = cosmics
self.features.cosmicIds = cosmics
self.init()
self.individual_anova = {}
cosmicIds = property(_get_cosmics, _set_cosmics,
doc="get/set the cosmic identifiers in the IC50 and feature matrices")
def _get_drug_names(self):
return self.ic50.drugIds
def _set_drug_names(self, drugs):
self.ic50.drugIds = drugs
self.init()
# not need to init this again ? self.individual_anova = {}
drugIds = property(_get_drug_names, _set_drug_names,
doc="Get/Set drug identifers")
def _get_feature_names(self):
shift = self.features.shift
return self.features.features[shift:]
def _set_features_names(self, features):
self.features.features = features
self.init()
self.individual_anova = {}
feature_names = property(_get_feature_names, _set_features_names,
doc="Get/Set feature names")
def _get_analysis_mode(self):
modes = []
if self.settings.analysis_type == 'PANCAN':
modes.append('tissue')
if self.settings.include_MSI_factor is True:
modes.append('msi')
if self.settings.include_media_factor is True:
modes.append('media')
modes.append('feature')
return modes
def diagnostics(self):
"""Return dataframe with information about the analysis
"""
n_drugs = len(self.ic50.drugIds)
n_features = len(self.features.features) - self.features.shift
n_combos = n_drugs * n_features
feasible = 0
pb = Progress(n_drugs, 1)
counter = 0
for drug in self.ic50.drugIds:
for feature in self.features.features[self.features.shift:]:
dd = self._get_one_drug_one_feature_data(drug, feature,
diagnostic_only=True)
if dd.status is True:
feasible += 1
counter += 1
pb.animate(counter)
results = {
'n_drug': n_drugs,
'n_combos': n_combos,
'feasible_tests': feasible,
'percentage_feasible_tests': float(feasible)/n_combos*100}
return results
def _get_one_drug_one_feature_data(self, drug_name, feature_name,
diagnostic_only=False):
"""
return: a dictionary with relevant information. There is also
a test to see if the data can be analysis or not. This is
stored ad a boolean value with key called *status*.
"""
# dictionary struture to hold results (can set values as attributes)
dd = AttrDict()
# select IC50 of a given drug
# a fast way to select non-NA values from 1 column:
# dropna is actually faster than a method using a mask.
#dd.Y = self.ic50.df[drug_name].dropna()
#indices = dd.Y.index
#dd.masked_features = self.features.df[feature_name][indices]
#dd.masked_tissue = self.tissue_factor[indices]
#dd.masked_msi = self.msi_factor[indices]
#dd.positive_feature = dd.masked_features.values.sum()
#dd.negative_feature = len(dd.masked_features) - dd.positive_feature
#dd.positive_msi = dd.masked_msi.values.sum()
#dd.negative_msi = len(dd.masked_msi) - dd.positive_msi
# using a mask instead of indices is 30% slower
#mask = self.ic50.df[drug_name].isnull()==False
#dd.masked_features = self.features.df[feature_name][mask]
#dd.masked_tissue = self.tissue_factor[mask]
#dd.masked_msi = self.msi_factor[mask]
# Amother version using a dictionary instead of dataframer is actually
# 2-3 times faster. It requires to transform the dataframe into a
# dictionary once for all and dropping the NA as well.
# Now, the next line takes no time
dd.Y = self.ic50_dict[drug_name]['Y']
# an alias to the indices
indices = self.ic50_dict[drug_name]['indices']
# select only relevant tissues/msi/features
if self.settings.low_memory is True:
# This line takes 50% of the time
dd.masked_features = self.features.df.loc[indices, feature_name]
else:
dd.masked_features = self.features_dict[drug_name][feature_name]
dd.masked_tissue = self.tissue_dict[drug_name]
if self.features.found_msi:
dd.masked_msi = self.msi_dict[drug_name]
dd.positive_msi = dd.masked_msi.values.sum()
dd.negative_msi = len(dd.masked_msi) - dd.positive_msi
if self.features.found_media:
dd.masked_media = self.media_dict[drug_name]
# compute length of pos/neg features and MSI
dd.positive_feature = dd.masked_features.values.sum()
dd.negative_feature = len(dd.masked_features) - dd.positive_feature
# Some validity tests to run the analysis or not
feature_threshold = self.settings.feature_factor_threshold
msi_threshold = self.settings.MSI_factor_threshold
A = self.settings.include_MSI_factor and\
dd.positive_feature >= feature_threshold and\
dd.negative_feature >= feature_threshold and\
dd.negative_msi >= msi_threshold and \
dd.positive_msi >= msi_threshold
B = (not self.settings.include_MSI_factor) and\
dd.positive_feature >= feature_threshold and\
dd.negative_feature >= feature_threshold
# We could of course use the mean() and std() functions from pandas or
# numpy. We could also use the glass and cohens functions from the
# stats module but the following code is much faster because it
# factorises the computations of mean and variance
dd.positives = dd.Y[dd.masked_features.values == 1]
dd.negatives = dd.Y[dd.masked_features.values == 0]
dd.Npos = len(dd.positives)
dd.Nneg = len(dd.negatives)
# additional information
dd.feature_name = feature_name
dd.drug_name = drug_name
# FIXME is False does not give the same results as == False
# in the test test_anova.py !!
if (A == False) and (B == False):
dd.status = False
return dd
else:
dd.status = True
if diagnostic_only is True:
return dd
# compute mean and std of pos and neg sets;using mean() takes 15us and
# using the already computed sum and N takes 5us
pos_sum = dd.positives.sum()
neg_sum = dd.negatives.sum()
dd.pos_IC50_mean = pos_sum / dd.Npos
dd.neg_IC50_mean = neg_sum / dd.Nneg
dd.delta_mean_IC50 = dd.pos_IC50_mean - dd.neg_IC50_mean
# note the ddof to agree with R convention.
dd.pos_IC50_std = dd.positives.std(ddof=1)
dd.neg_IC50_std = dd.negatives.std(ddof=1)
dd.pos_IC50_std = np.sqrt(( (dd.positives**2).sum() -
pos_sum**2/dd.Npos)/(dd.Npos-1.))
dd.neg_IC50_std = np.sqrt(( (dd.negatives**2).sum() -
neg_sum**2/dd.Nneg)/(dd.Nneg-1.))
# Compute Cohens and Glass effect size. Since underlying code
# has lots in common, we do not use the modules but add
# the code here below
md = np.abs(dd.pos_IC50_mean - dd.neg_IC50_mean)
dd.pos_glass = md / dd.pos_IC50_std
dd.neg_glass = md / dd.neg_IC50_std
csd = (dd.Npos - 1.) * dd.pos_IC50_std**2 + \
(dd.Nneg - 1.) * dd.neg_IC50_std**2
csd /= dd.Npos + dd.Nneg - 2. # make sure this is float
dd.effectsize_ic50 = md / np.sqrt(csd)
# Note that equal_var is a user parameter and affects
# results. The ANOVA_results.txt obtained from SFTP
# have different values meaning that the equal.var param
# was set to False. Note that pvalue is stored at index 1
dd.ttest = self._get_ttest(dd.negatives, dd.positives)
return dd
def _get_ttest(self, sample1, sample2):
# this computes the ttest.
import scipy
return scipy.stats.ttest_ind(sample1, sample2,
equal_var=self.settings.equal_var_ttest)[1]
def read_drug_decode(self, filename=None):
"""Read file with the DRUG information
.. seealso:: :class:`gdsctools.readers.DrugDecode`
"""
# Read the DRUG decoder file into a DrugDecode/Reader instance
self.drug_decode = readers.DrugDecode(filename)
def anova_one_drug_one_feature(self, drug_id,
feature_name, show=False,
production=False, directory='.'):
"""Compute ANOVA and various tests on one drug and one feature
:param drug_id: a valid drug identifier
:param feature_name: a valid feature name
:param bool show: show some plots
:param str directory: where to save the figure.
:param bool production: if False, returns a dataframe otherwise
a dictionary. This is to speed up analysis when scanning
the drug across all features.
.. note:: **for developer** this is the core of tha analysis
and should be kept as fast as possible. 95% of the time is spent
here.
.. note:: **for developer** Data used in this function comes from
_get_one_drug_one_feature_data method, which should also be kept
as fast as possible.
"""
if drug_id not in self.drugIds:
raise ValueError('Unknown drug name %s. Use e.g., %s'
% (drug_id, self.drugIds[0]))
if feature_name not in self.feature_names:
# we start index at 3 to skip tissue/name/msi
raise ValueError('Unknown feature name %s. Use e.g. one of %s'
% (feature_name, self.feature_names[0:3]))
# This extract the relevant data and some simple metrics
# This is now pretty fast accounting for 45 seconds
# for 265 drugs and 988 features
odof = self._get_one_drug_one_feature_data(drug_id, feature_name)
drug_name = self.drug_decode.get_name(drug_id)
drug_target = self.drug_decode.get_target(drug_id)
# if the status is False, it means the number of data points
# in a category (e.g., positive feature) is too low.
# If so, nothing to do, we return an 'empty' dictionary
if odof.status is False:
results = self._odof_dict.copy()
results['FEATURE'] = feature_name
results['DRUG_ID'] = drug_id
results['DRUG_NAME'] = drug_name
results['DRUG_TARGET'] = drug_target
results['N_FEATURE_pos'] = odof.Npos
results['N_FEATURE_neg'] = odof.Nneg
if production is True:
# return a dict
return results
else:
# or a dataframe; note that index is not relevant here but
# required.
df = pd.DataFrame(results, index=[1])
return df
# with the data extract, we can now compute the regression.
# In R or statsmodels, the regression code is simple since
# it is based on the formula notation (Y~C(msi)+feature)
# This is also possible in statsmodels library, however,
# this relies on patsy, which is very slow as compared to the
# statsmodels without formula.
#### self._mydata = pd.DataFrame({'Y':self.Y,
#### 'tissue':self.masked_tissue,
#### 'msi': self.masked_msi, 'feature':self.masked_features})
#### self.data_lm = ols('Y ~ C(tissue) + C(msi) + feature',
#### data=self._mydata, missing='none').fit() #Specify C is category
# IMPORTANT: the order of the factors in the formula
# is important. It does not change the total sum of square errors
# but may change individual effects of the categorical
# components.
# Instead of using ols function, we use the OLS one so we cannot
# use formula. Instead, we need to create manually the input
# data. In the case of categorical data (tissue), we need to
# create the dummy variable, which is done in the constructor
# once for all (slow otherwise).
if self.settings.analysis_type == 'PANCAN':
# IMPORTANT: tissues are sorted alphabetically in R aov
# function. Same in statsmodels but capitalised names
# are sorted differently. In R, a<b<B<c but in Python,
# A<B<C<a<b<c. So, 'aero' tissue is before 'Bladder' in R,
# not in python. Since in a linear regression
# models, the order of the factor matters and the first
# factor is used as a reference, we decided to use same
# convention as in R.
# see http://statsmodels.sourceforge.net/devel/contrasts.html
# for a good explanation
#self._mydata = pd.DataFrame({'Y': odof.Y.copy(),
# 'tissue':odof.masked_tissue,
# 'msi': odof.masked_msi, 'feature': odof.masked_features})
#self.data_lm2 = ols('Y ~ C(tissue) + C(msi) + feature',
# data=self._mydata).fit() #Specify C for Categorical
# from statsmodels.stats.anova import anova_lm
# import statsmodels.formula.api as smf
# df = pd.DataFrame({'Y': odof.Y.copy(),
# 'tissue':odof.masked_tissue,'media'
# odof.masked_media, 'msi': odof.masked_msi,
# 'feature': odof.masked_features})
# lm = smf.ols('Y~C(tissue)+C(media)+C(msi)+feature',
# data=df).fit()
# anova_lm(lm)
# The code above gives same answer as the code in gdsctools
# but is slower
# We could use pd.get_dummies but pretty slow
# instead we create the full matrix in init() method.
# One issue is that some columns end up with sum == 0
# and needs to be dropped.
df = self._tissue_dummies.ix[odof.masked_tissue.index]
todrop = df.columns[df.values.sum(axis=0) == 0]
if len(todrop) > 0: # use if since drop() is slow
df = df.drop(todrop, axis=1)
# Here we set other variables with dataframe columns' names as
# expected by OLS.
if self.settings.include_media_factor == False:
todrop = [x for x in df.columns if
x.startswith('C(media)')]
df = df.drop(todrop, axis=1)
df['C(msi)[T.1]'] = odof.masked_msi.values
df['feature'] = odof.masked_features.values
self.Y = odof.Y
self.EV = df.values
# The regression and anova summary are done here
#
if self.settings.regression_method == 'ElasticNet':
self.data_lm = OLS(odof.Y, df.values).fit_regularized(
alpha=self.settings.regression_alpha,
L1_wt=self.settings.regression_L1_wt)
elif self.settings.regression_method == 'OLS':
self.data_lm = OLS(odof.Y, df.values).fit()
elif self.settings.regression_method == 'Ridge':
self.data_lm = OLS(odof.Y, df.values).fit_regularized(
alpha=self.settings.regression_alpha,
L1_wt=0)
elif self.settings.regression_method == 'Lasso':
self.data_lm = OLS(odof.Y, df.values).fit_regularized(
alpha=self.settings.regression_alpha,
L1_wt=1)
# example of computing null model ?
# Example of computing pvalues ourself
"""self.samples1 = []
self.samples2 = []
self.samples3 = []
Y = odof.Y.copy()
pb = Progress(10000,20)
for i in range(0,10000):
#pylab.shuffle(Y)
#data_lm = OLS(Y, df.values).fit()
data_lm = OLS(Y+0.3*pylab.randn(len(Y)), df.values).fit()
anova_pvalues = self._get_anova_summary(data_lm,
output='dict')
self.samples1.append(anova_pvalues['msi'])
self.samples2.append(anova_pvalues['feature'])
self.samples3.append(anova_pvalues['tissue'])
pb.animate(i)
"""
elif self.settings.include_MSI_factor is True:
#self._mydata = pd.DataFrame({'Y': odof.Y,
# 'msi': odof.masked_msi, 'feature': odof.masked_features})
#self.data_lm = ols('Y ~ C(msi) + feature',
# data=self._mydata).fit() #Specify C for Categorical
df = pd.DataFrame()
df['C(msi)[T.1]'] = odof.masked_msi.values
df['feature'] = odof.masked_features.values
df.insert(0, 'Intercept', [1] * (odof.Npos + odof.Nneg))
self.data_lm = OLS(odof.Y, df.values).fit()
else:
df = pd.DataFrame()
df['feature'] = odof.masked_features.values
df.insert(0, 'Intercept', [1] * (odof.Npos + odof.Nneg))
self.data_lm = OLS(odof.Y, df.values).fit()
#self._mydata = pd.DataFrame({'Y': odof.Y,
# 'feature': odof.masked_features})
#self.data_lm = ols('Y ~ feature',
# data=self._mydata).fit() #Specify C for Categorical
self.anova_pvalues = self._get_anova_summary(self.data_lm,
output='dict')
# Store the pvalues. Note that some may be missing so we use try
# except, which is faster than if/else
try:
tissue_PVAL = self.anova_pvalues['tissue']
except:
tissue_PVAL = None
try:
MSI_PVAL = self.anova_pvalues['msi']
except:
MSI_PVAL = None
try:
FEATURE_PVAL = self.anova_pvalues['feature']
except:
FEATURE_PVAL = None
try:
MEDIA_PVAL = self.anova_pvalues['media']
except:
MEDIA_PVAL = None
if show is True:
boxplot = BoxPlots(odof, savefig=self.settings.savefig,
directory=directory)
boxplot.boxplot_association(fignum=1)
# a boxplot to show cell lines effects. This requires
# the settings.analyse_type to be PANCAN
if self.settings.analysis_type == 'PANCAN':
boxplot.boxplot_pancan(fignum=2, mode='tissue')
if self.settings.include_MSI_factor:
boxplot.boxplot_pancan(fignum=3, mode='msi')
results = {'FEATURE': feature_name,
'DRUG_ID': drug_id,
'DRUG_NAME': drug_name,
'DRUG_TARGET': drug_target,
'N_FEATURE_pos': odof.Npos,
'N_FEATURE_neg': odof.Nneg,
'FEATURE_pos_logIC50_MEAN': odof.pos_IC50_mean,
'FEATURE_neg_logIC50_MEAN': odof.neg_IC50_mean,
'FEATURE_delta_MEAN_IC50': odof.delta_mean_IC50,
'FEATURE_pos_IC50_sd': odof.pos_IC50_std,
'FEATURE_neg_IC50_sd': odof.neg_IC50_std,
'FEATURE_IC50_effect_size': odof.effectsize_ic50,
'FEATURE_pos_Glass_delta': odof.pos_glass,
'FEATURE_neg_Glass_delta': odof.neg_glass,
'ANOVA_FEATURE_pval': FEATURE_PVAL,
'ANOVA_TISSUE_pval': tissue_PVAL,
'ANOVA_MSI_pval': MSI_PVAL,
'ANOVA_MEDIA_pval': MEDIA_PVAL,
'FEATURE_IC50_T_pval': odof.ttest # pvalues is in index 1
}
# 12% of the time here
if production is True:
return results
else:
df = pd.DataFrame(results, index=[1])
return df
def optimise_elastic_net(self, drug_name, feature_name, N=20, Nalpha=20):
lwts = pylab.linspace(0, 1, N)
alphas = pylab.linspace(0, 5, Nalpha)
mses = np.zeros((N, Nalpha))
pb = Progress(N)
for i, lwt in enumerate(lwts):
for j, alpha in enumerate(alphas):
self.settings.regression_method = 'ElasticNet'
self.settings.regression_alpha = alpha
self.settings.regression_L1_wt = lwt
odof = self.anova_one_drug_one_feature(drug_name,
feature_name)
anova = self._get_anova_summary(self.data_lm,
output='dataframe')
mses[i,j] = self.data_lm.bic
pb.animate(i+1)
return mses
def optimise_ridge(self, drug_name, feature_name, alphas=None):
return self._opt_ridge_lasso(drug_name, feature_name,
'Ridge', alphas=alphas)
def optimise_lasso(self, drug_name, feature_name, alphas=None):
return self._opt_ridge_lasso(drug_name, feature_name,
'Lasso', alphas=alphas)
def _opt_ridge_lasso(self, drug_name, feature_name, method, alphas=None):
if alphas is None:
alphas = pylab.linspace(0,1, 20)
mses = []
params = []
method_buf = self.settings.regression_method
alpha_buf = self.settings.elastic_net.alpha
pb = Progress(len(alphas))
for j, alpha in enumerate(alphas):
self.settings.regression_method = method
self.settings.elastic_net.alpha = alpha
odof = self.anova_one_drug_one_feature(drug_name,
feature_name)
anova = self._get_anova_summary(self.data_lm,
output='dataframe')
#mses.append(anova.ix['Residuals']['Sum Sq'])
mses.append(anova.ix['tissue']['F value'])
#mses.append(anova['Sum Sq'].sum())
pb.animate(j+1)
params.append(self.data_lm.params)
self.settings.regression_method = method_buf
self.settings.elastic_net.alpha = alpha_buf
return alphas, mses, params
# no need to optimise anymore
def _get_anova_summary(self, data_lm, output='dict'):
# could use this with statsmodels but somehow anova_lm with typ I
# does not work, which is the one used in R version, so we implement
# the anova here
q, r = np.linalg.qr(data_lm.model.data.exog)
effects = np.dot(q.T, data_lm.model.data.endog)
# In the regression, the first tissue is dropped hence -1
# The degree of freedom for tissues is N - 1
# self.features.tissues contains all tissues even those that
# were dropped due to lack of pos or neg features. So, we must use
modes = self._get_analysis_mode()
Ncolumns = data_lm.model.data.exog.shape[1]
Ntissue = Ncolumns
Ntissue -= 1 # remove intercept
Ntissue -= 1 # remove feature, which are always taken into account
if 'msi' in modes:
Ntissue -= 1
if 'media' in modes:
Nmedia = len(self._media_dummies.columns)-1
else:
Nmedia = 0
Ntissue -= Nmedia
# create the W matrix using tissue and MSI if requested
# default is that the 3 features are used
if 'tissue' in modes and 'msi' in modes and 'media' in modes:
dof = [Ntissue, Nmedia, 1, 1]
indices = ['tissue', 'media', 'msi', 'feature', 'Residuals']
# 4 stands for intercept + tissue + msi +feature
arr = np.zeros((5, Ncolumns))
arr[1, slice(1, Ntissue+1)] = 1
arr[2, slice(Ntissue+1, Ntissue+Nmedia+1)] = 1
arr[3, Ntissue + Nmedia + 1] = 1
arr[4, Ntissue + Nmedia + 2] = 1
self.arr = arr
elif 'tissue' in modes and 'msi' in modes:
dof = [Ntissue, 1, 1]
indices = ['tissue', 'msi', 'feature', 'Residuals']
# 4 stands for intercept + tissue + msi +feature
arr = np.zeros((4, Ncolumns))
arr[1, slice(1, Ntissue)] = 1
arr[2, Ntissue + 1] = 1
arr[3, Ntissue + 2] = 1
self.arr = arr
elif 'tissue' not in modes and 'msi' in modes:
dof = [1, 1]
indices = ['msi', 'feature', 'Residuals']
# 3 stands for intercept + msi +feature
arr = np.zeros((3, Ncolumns))
arr[1, 1] = 1
arr[2, 2] = 1
elif 'tissue' not in modes and 'msi' not in modes:
dof = [1]
indices = ['feature', 'Residuals']
# 3 stands for intercept + msi +feature
arr = np.zeros((2, Ncolumns))
arr[1, 1] = 1
arr[0, 0] = 1 # intercept
sum_sq = np.dot(arr, effects**2)[1:] # drop the intercep
mean_sq = sum_sq / np.array(dof)
Fvalues = mean_sq / (data_lm.ssr / data_lm.df_resid)
F_pvalues = scipy.stats.f.sf(Fvalues, dof, data_lm.df_resid)
sum_sq = np.append(sum_sq, data_lm.ssr)
mean_sq = np.append(mean_sq, data_lm.mse_resid)
F_pvalues = np.append(F_pvalues, None)
Fvalues = np.append(Fvalues, None)
dof.append(data_lm.model.df_resid)
#indices.append('Residuals')
# dataframe is slow, return just the dict of pvalues by default
if output == 'dataframe':
anova = pd.DataFrame({'Sum Sq': sum_sq, 'Mean Sq': mean_sq,
'Df': dof, 'F value': Fvalues, 'PR(>F)': F_pvalues},
index=indices,
columns=['Df', 'Sum Sq', 'Mean Sq', 'F value', 'PR(>F)']
)
return anova
elif self.settings.analysis_type == 'PANCAN':
if self.settings.include_media_factor:
dd = {'tissue': F_pvalues[0],
'media': F_pvalues[1],
'msi':F_pvalues[2],
'feature':F_pvalues[3]}
else:
dd = {'tissue': F_pvalues[0],
'msi':F_pvalues[1],
'feature':F_pvalues[2]}
return dd
elif self.settings.include_MSI_factor is True:
return {'msi': F_pvalues[0], 'feature':F_pvalues[1]}
else:
return {'feature': F_pvalues[0]}
#return anova
def _draft(self):
# using sklearn
#ols = linear_model.LinearRegression()
#f = ols.fit(an.dff, an.Y)
#sse = sum(np.square((f.predict(an.dff).T - an.Y))) /
# float(an.dff.shape[0] - an.dff.shape[1])
# ssr = sum(np.square((f.predict(an.dff).T - an.Y.mean())))
pass
def _test(self):
# for drug1047 and featuer ABCB1_mut
print("""
Analysis of Variance Table
Response: Y
Df Sum Sq Mean Sq F value Pr(>F)
TISSUEpattern 26 352.35 13.5517 9.2685 < 2e-16 ***
MSIpattern 1 5.31 5.3094 3.6313 0.05705 .
FEATpattern 1 3.19 3.1861 2.1791 0.14028
Residuals 817 1194.55 1.4621
""")
#98% of time in method anova_one_drug_one_feature
def anova_one_drug(self, drug_id, animate=True, output='object'):
"""Computes ANOVA for a given drug across all features
:param str drug_id: a valid drug identifier.
:param animate: shows the progress bar
:return: a dataframe
Calls :meth:`anova_one_drug_one_feature` for each feature.
"""
# some features can be dropped ??
# drop first and second columns that are made of strings
# works under python2 but not python 3. Assume that the 2 first
#columns are the sample name and tissue feature
# Then, we keep only cases with at least 3 features.
# MSI could be used but is not like in original R code.
features = self.features.df.copy()
# need to skip the FACTOR to keep only features
shift = self.features.shift
features = features[features.columns[shift:]]
mask = features.sum(axis=0) >= 3
# TODO: MSI, tissues, name must always be kept
#
selected_features = features[features.columns[mask]]
# scan all features for a given drug
assert drug_id in self.ic50.df.columns
N = len(selected_features.columns)
pb = Progress(N, 10)
res = {}
# note that we start at idnex 4 to drop sample name, tissue and MSI
for i, feature in enumerate(selected_features.columns):
# production True, means we do not want to create a DataFrame
# for each call to the anova_one_drug_one_feature function
# Instead, we require dictionaries
this = self.anova_one_drug_one_feature(drug_id, feature,
production=True)
if this['ANOVA_FEATURE_pval'] is not None:
res[feature] = this
if animate is True:
pb.animate(i+1)
# if production is False:
# df = pid.concat(res, ignore_index=True)
df = pd.DataFrame.from_records(res)
df = df.T
df = ANOVAResults().astype(df)
if len(df) == 0:
return df
# append DRUG_NAME/DRUG_TARGET columns
df = self.drug_decode.drug_annotations(df)
# TODO: drop rows where ANOVA_FEATURE_PVAL is None
if output != 'object':
df = self.add_pvalues_correction(df)
return df
else:
df = self.add_pvalues_correction(df)
res = ANOVAResults(df)
res.settings = ANOVASettings(**self.settings)
return res
def anova_all(self, animate=True, drugs=None):
"""Run all ANOVA tests for all drugs and all features.
:param drugs: you may select a subset of drugs
:param animate: shows the progress bar
:return: an :class:`~gdsctools.anova_results.ANOVAResults`
instance with the dataframe
stored in an attribute called **df**
Loops over all drugs calling :meth:`anova_one_drug` for each
drug and concatenating all results together. Note that once all
data are gathered, an extra column containing the FDR corrections
is added to the dataframe using :meth:`add_pvalues_correction`
method. An extra column named "ASSOC_ID" is also added with
a unique identifer sorted by ascending FDR.
.. note:: A thorough comparison with version v17 give the same FDR
results (difference ~1e-6); Note however that the qvalue results
differ by about 0.3% due to different smoothing in R and Python.
"""
# drop DRUG where number of IC50 (non-null) is below 5
# axis=0 is default but we emphasize that sum is over
# column (i.e. drug
vv = (self.ic50.df.isnull() == False).sum(axis=0)
# FIXME: should be in one_drug_one_feature ??
drug_names = vv.index[vv >= self.settings.minimum_nonna_ic50]
# if user provided a list of drugs, use them:
if drugs is not None:
# todo: check valifity of the drug names
drug_names = drugs[:]
pb = Progress(len(drug_names), 1)
drug_names = list(drug_names)
pylab.shuffle(drug_names)
if animate is True:
pb.animate(0)
for i, drug_name in enumerate(drug_names):
if drug_name in self.individual_anova.keys():
pass
else:
res = self.anova_one_drug(drug_name, animate=False,
output='dataframe')
self.individual_anova[drug_name] = res
if animate is True:
pb.animate(i+1)
print("\n")
if len(self.individual_anova) == 0:
return ANOVAResults()
df = pd.concat(self.individual_anova, ignore_index=True)
if len(df) == 0:
return df
# sort all data by ANOVA p-values
try:
df.sort_values('ANOVA_FEATURE_pval', inplace=True)
except:
df.sort('ANOVA_FEATURE_pval', inplace=True)
# all ANOVA have been computed individually for each drug and each
# feature. Now, we need to compute the multiple testing corrections
if self.settings.pvalue_correction_level == 'global':
df = self.add_pvalues_correction(df)
# insert a unique identifier as first column
df.insert(0, 'ASSOC_ID', range(1, len(df) + 1))
self.df = df
# order the column names as defined in the __init__ method
df = df[self.column_names]
df.reset_index(inplace=True, drop=True)
results = ANOVAResults()
results.df = df
results.settings = ANOVASettings(**self.settings)
return results
def add_pvalues_correction(self, df, colname='ANOVA_FEATURE_pval'):
"""Add the corrected pvalues column in a dataframe based on pvalues
The default method (FDR correction) is stored in
:attr:`settings.pvalue_correction_method` and can be changed to other
methods (e.g., *qvalue*)
.. seealso:: :meth:`anova_all`,
:class:`~gdsctools.stats.MultipleTesting`
"""
if len(df) == 0:
return
# extract pvalues
data = df[colname].values
# set the method and compute new pvalues
self.multiple_testing.method = self.settings.pvalue_correction_method
new_pvalues = self.multiple_testing.get_corrected_pvalues(data)
new_pvalues *= 100
# insert new columns.
colname = 'ANOVA_FEATURE_FDR'
try:
df.insert(len(df.columns), colname, new_pvalues)
except:
# replaces it otherwise
df[colname] = new_pvalues
return df
def reset_buffer(self):
self.individual_anova = {}
def __str__(self):
txt = self.ic50.__str__()
txt += "\n" + self.features.__str__()
return txt
def __repr__(self):
txt = self.__str__()
return txt
| Oncology/gdsctools | gdsctools/anova.py | anova.py | py | 47,168 | python | en | code | null | github-code | 36 | [
{
"api_name": "gdsctools.readers.IC50",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "gdsctools.readers",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "gdsctools.readers.GenomicFeatures",
"line_number": 109,
"usage_type": "call"
},
{
"api_n... |
16775198576 | from rest_framework.decorators import api_view, permission_classes
from rest_framework import generics
from lembaga.models import Lembaga, Institusi, Tema
from lembaga.serializers import LembagaSerializer, InstitusiSerializer, TemaSerializer
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework import viewsets
from django_filters.rest_framework import DjangoFilterBackend
class LembagaView(viewsets.ModelViewSet):
permission_classes = (AllowAny,)
serializer_class = LembagaSerializer
def get_queryset(self, *args, **kwargs):
"""
Optionally restricts the returned purchases to a given user,
by filtering against a query parameter in the URL.
"""
try:
queryset = Lembaga.objects.all()
param_institusi = self.request.GET.get('institusi')
param_tema = self.request.GET.get('tema')
param_praktikum = self.request.GET.get('praktikum')
param_nama = self.request.GET.get('nama')
if param_praktikum:
temp = param_praktikum.split(',')
param_praktikum = []
for i in temp:
param_praktikum.append(int(i[-1]))
queryset = queryset.filter(praktikum_ke__in=param_praktikum)
if (param_institusi and param_tema):
param_institusi = param_institusi.split(',')
param_tema = param_tema.split(',')
queryset = queryset.filter(institusi__nama__in=param_institusi).filter(tema__nama__in=param_tema)
else:
if (param_institusi is not None):
param_institusi = param_institusi.split(',')
queryset = queryset.filter(institusi__nama__in=param_institusi)
if (param_tema is not None):
param_tema = param_tema.split(',')
queryset = queryset.filter(tema__nama__in=param_tema)
# Find lembaga by Name
# by performing substring checking
if param_nama:
tmp_queryset = queryset
queryset = []
for i in tmp_queryset:
if param_nama.lower() in i.nama.lower():
queryset.append(i)
return queryset
# Kasus Error Jika Parameter, Variable, dan Data tidak di set saat melakukan query
except (ValueError, RuntimeError, TypeError, NameError):
return {"status": "Harap isi kriteria pencarian"}
class InstitusiView(viewsets.ModelViewSet):
"""
Provides a get method handler.
"""
permission_classes = (AllowAny, )
queryset = Institusi.objects.all()
serializer_class = InstitusiSerializer
class TemaView(viewsets.ModelViewSet):
"""
Provides a get method handler.
"""
permission_classes = (AllowAny, )
queryset = Tema.objects.all()
serializer_class = TemaSerializer
| ferenica/sipraktikum-backend | lembaga/views.py | views.py | py | 2,986 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.viewsets.ModelViewSet",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.viewsets",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "rest_framework.decorators.permission_classes",
"line_number": 12,
"usag... |
31848452342 | import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from cod_analytics.classes import TransformReference
from cod_analytics.math.homography import Homography, HomographyCorrection
class TestHomography:
xy_bounds = (-1, 1)
xy_corners = [
[-1, -1],
[1, -1],
[1, 1],
[-1, 1],
]
x_bounds = (-1, 1)
y_bounds = (-2, 2)
xy_rect_corners = [
[-1, -2],
[1, -2],
[1, 2],
[-1, 2],
]
translate_bounds = (-5, 5)
rotate_bounds = (-np.pi, np.pi)
scale_bounds = (-1, 1)
center_bounds = (-1, 1)
size = (100, 2)
rng = np.random.RandomState(956)
xy = rng.uniform(*xy_bounds, size=size)
x = rng.uniform(*x_bounds, size=size[0])
y = rng.uniform(*y_bounds, size=size[0])
xy_rect = np.vstack([x, y]).T
translate = rng.uniform(*translate_bounds, size=2)
rotate = rng.uniform(*rotate_bounds)
scale = 2 ** rng.uniform(*scale_bounds)
center = rng.uniform(*center_bounds, size=2)
def transform(
self,
points: np.ndarray,
translate: np.ndarray,
rotate: float,
scale: float,
center: np.ndarray,
) -> np.ndarray:
"""Transform points using a translation, rotation, and scale."""
rotate_array = np.array(
[
[np.cos(rotate), -np.sin(rotate)],
[np.sin(rotate), np.cos(rotate)],
]
)
new_points = points - center
new_points = new_points * scale
new_points = new_points @ rotate_array
new_points = new_points + center
new_points = new_points + translate
return new_points
def test_homography_fitted_method(self) -> None:
"""Test the fitted_method decorator."""
homography = Homography()
@homography.fitted_method
def test_method(self: Homography) -> None:
pass
with pytest.raises(RuntimeError):
test_method(homography)
homography.fitted = True
test_method(homography)
def test_homography_fit(self) -> None:
"""Test the fit method."""
homography = Homography()
source = np.array([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
target = np.array([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
homography.fit(source, target)
assert homography.fitted
assert np.allclose(homography.matrix, np.eye(3))
def test_homography_add_correction(self) -> None:
"""Test the add_correction method."""
homography = Homography()
correction = HomographyCorrection()
homography.add_correction(correction)
assert homography.correction == correction
@pytest.mark.parametrize("scale", [True, False], ids=["S", "NS"])
@pytest.mark.parametrize("rotate", [True, False], ids=["R", "NR"])
@pytest.mark.parametrize("translate", [True, False], ids=["T", "NT"])
@pytest.mark.parametrize("center", [True, False], ids=["C", "NC"])
@pytest.mark.parametrize("square", [True, False], ids=["Sq", "Rect"])
def test_homography_transform(
self,
translate: bool,
rotate: bool,
scale: bool,
center: bool,
square: bool,
) -> None:
"""Test the transform method."""
translate_val = self.translate if translate else np.zeros(2)
rotate_val = self.rotate if rotate else 0.0
scale_val = self.scale if scale else 1.0
center_val = self.center if center else np.zeros(2)
# Generate XY data
XY = (self.xy if square else self.xy_rect) + center_val
corners = np.array(self.xy_corners if square else self.xy_rect_corners)
transformed_corners = self.transform(
corners, translate_val, rotate_val, scale_val, center_val
)
expected = self.transform(
XY, translate_val, rotate_val, scale_val, center_val
)
homography = Homography()
homography.fit(corners, transformed_corners)
XY_transformed = homography.transform(XY)
assert np.allclose(XY_transformed, expected)
@pytest.mark.parametrize("scale", [True, False], ids=["S", "NS"])
@pytest.mark.parametrize("rotate", [True, False], ids=["R", "NR"])
@pytest.mark.parametrize("translate", [True, False], ids=["T", "NT"])
@pytest.mark.parametrize("center", [True, False], ids=["C", "NC"])
@pytest.mark.parametrize("square", [True, False], ids=["Sq", "Rect"])
def test_homography_transform_reference(
self,
translate: bool,
rotate: bool,
scale: bool,
center: bool,
square: bool,
) -> None:
translate_val = self.translate if translate else np.zeros(2)
rotate_val = self.rotate if rotate else 0.0
scale_val = self.scale if scale else 1.0
center_val = self.center if center else np.zeros(2)
# Generate XY data
XY = (self.xy if square else self.xy_rect) + center_val
left, right = self.xy_bounds if square else self.x_bounds
bottom, top = self.xy_bounds if square else self.y_bounds
source = TransformReference(
map_left=left + center_val[0],
map_right=right + center_val[0],
map_bottom=bottom + center_val[1],
map_top=top + center_val[1],
map_rotation=0.0,
)
corners = np.array(self.xy_corners if square else self.xy_rect_corners)
corners = corners + np.array([center_val[0], center_val[1]])
transformed_corners = self.transform(
corners,
translate_val,
rotate_val,
scale_val,
center_val,
)
transformed_corners_nr = self.transform(
corners,
translate_val,
0.0,
scale_val,
center_val,
)
new_left, new_right = (
transformed_corners_nr[:, 0].min(),
transformed_corners_nr[:, 0].max(),
)
new_bottom, new_top = (
transformed_corners_nr[:, 1].min(),
transformed_corners_nr[:, 1].max(),
)
target = TransformReference(
map_left=new_left,
map_right=new_right,
map_bottom=new_bottom,
map_top=new_top,
map_rotation=rotate_val * 180.0 / np.pi,
)
homography_expected = Homography()
homography_expected.fit(corners, transformed_corners)
expected = homography_expected.transform(XY)
homography = Homography.from_transform_reference(source, target)
XY_transformed = homography.transform(XY)
assert np.allclose(XY_transformed, expected)
@pytest.mark.parametrize("scale", [True, False], ids=["S", "NS"])
@pytest.mark.parametrize("rotate", [True, False], ids=["R", "NR"])
@pytest.mark.parametrize("translate", [True, False], ids=["T", "NT"])
@pytest.mark.parametrize("center", [True, False], ids=["C", "NC"])
@pytest.mark.parametrize("square", [True, False], ids=["Sq", "Rect"])
def test_homography_with_correction(
self,
translate: bool,
rotate: bool,
scale: bool,
center: bool,
square: bool,
) -> None:
translate_val = self.translate if translate else np.zeros(2)
rotate_val = self.rotate if rotate else 0.0
scale_val = self.scale if scale else 1.0
center_val = self.center if center else np.zeros(2)
# Generate XY data
XY = (self.xy if square else self.xy_rect) + center_val
corners = np.array(self.xy_corners if square else self.xy_rect_corners)
expected = self.transform(
XY, translate_val, rotate_val, scale_val, center_val
)
correction = HomographyCorrection(
translate=translate_val,
rotate=rotate_val,
scale=scale_val,
center=center_val,
)
homography = Homography()
homography.fit(corners, corners)
homography.add_correction(correction)
XY_transformed = homography.transform(XY)
assert np.allclose(XY_transformed, expected)
@pytest.mark.parametrize("scale", [True, False], ids=["S", "NS"])
@pytest.mark.parametrize("rotate", [True, False], ids=["R", "NR"])
@pytest.mark.parametrize("translate", [True, False], ids=["T", "NT"])
@pytest.mark.parametrize("center", [True, False], ids=["C", "NC"])
@pytest.mark.parametrize("square", [True, False], ids=["Sq", "Rect"])
@pytest.mark.parametrize("n", [0, 1, 2, 5, 10])
def test_homography_transform_df(
self,
translate: bool,
rotate: bool,
scale: bool,
center: bool,
square: bool,
n: int,
) -> None:
"""Test the transform_df method."""
translate_val = self.translate if translate else np.zeros(2)
rotate_val = self.rotate if rotate else 0.0
scale_val = self.scale if scale else 1.0
center_val = self.center if center else np.zeros(2)
# Generate XY data
k = n if n > 0 else 1
if square:
xy_data = self.rng.uniform(
*self.xy_bounds, size=(self.size[0], self.size[1] * k)
) + np.tile(center_val, k)
else:
x_data = self.rng.uniform(*self.x_bounds, size=(self.size[0], k))
y_data = self.rng.uniform(*self.y_bounds, size=(self.size[0], k))
# Interleave x and y data
xy_data = np.empty((self.size[0], self.size[1] * k))
xy_data[:, ::2] = x_data + center_val[0]
xy_data[:, 1::2] = y_data + center_val[1]
corners = np.array(self.xy_corners if square else self.xy_rect_corners)
transformed_corners = self.transform(
corners, translate_val, rotate_val, scale_val, center_val
)
if n == 0:
labels: list[str] | list[list[str]] = ["x", "y"]
flat_labels = ["x", "y"]
else:
labels = [[f"x_{i}", f"y_{i}"] for i in range(n)]
flat_labels = [item for sublist in labels for item in sublist]
df = pd.DataFrame(xy_data, columns=flat_labels)
homography = Homography()
homography.fit(corners, transformed_corners)
df_transformed = homography.transform_dataframe(df, labels)
if n == 0:
expected = self.transform(
xy_data, translate_val, rotate_val, scale_val, center_val
)
assert np.allclose(df_transformed.values, expected)
else:
for i in range(n):
k = i * 2
expected = self.transform(
xy_data[:, k : k + 2],
translate_val,
rotate_val,
scale_val,
center_val,
)
assert np.allclose(
df_transformed.iloc[:, k : k + 2].values, expected
)
| cesaregarza/CoD-Analytics | tests/test_homography.py | test_homography.py | py | 11,028 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.pi",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.RandomState",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "numpy.vstack... |
6380032523 | from functools import partial
import chex
import jax
import numpy as np
import numpy.testing as npt
from absl.testing import parameterized
from tessellate_ipu import tile_map, tile_put_replicated
from tessellate_ipu.lax import scatter_add_p, scatter_max_p, scatter_mul_p, scatter_p
class IpuTilePrimitivesLaxScater(chex.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
self.device = jax.devices("ipu")[0]
self.num_tiles = self.device.num_tiles
# Not very clean, but for better reproducibility.
np.random.seed(123)
@parameterized.parameters(
{"num_elements": 8, "num_indices": 3, "scatter_prim": scatter_p},
{"num_elements": 8, "num_indices": 16, "scatter_prim": scatter_add_p},
{"num_elements": 8, "num_indices": 16, "scatter_prim": scatter_max_p},
{"num_elements": 8, "num_indices": 16, "scatter_prim": scatter_mul_p},
{"num_elements": 8, "num_indices": 3, "scatter_prim": scatter_add_p},
{"num_elements": 8, "num_indices": 12, "scatter_prim": scatter_add_p},
{"num_elements": 256, "num_indices": 512, "scatter_prim": scatter_add_p},
)
def test__tile_map__scatter__jitting__multi_sizes__proper_result(self, num_elements, num_indices, scatter_prim):
tiles = (0,)
data = np.random.randn(num_elements).astype(np.float32)
indices = np.random.randint(low=0, high=num_elements, size=num_indices)
indices = indices.reshape(-1, 1).astype(np.uint32)
updates = np.random.randn(indices.size).astype(np.float32)
# Only supported configuration!
scatter_dnums = jax.lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(0,), scatter_dims_to_operand_dims=(0,)
)
def scatter_add_fn(data, indices, updates):
data = tile_put_replicated(data, tiles)
indices = tile_put_replicated(indices, tiles)
updates = tile_put_replicated(updates, tiles)
return tile_map(
scatter_prim,
data,
indices,
updates,
dimension_numbers=scatter_dnums,
indices_are_sorted=False,
unique_indices=False,
mode=jax.lax.GatherScatterMode.PROMISE_IN_BOUNDS,
update_jaxpr=None,
update_consts=None,
)
cpu_scatter_add_fn = partial(jax.jit, backend="cpu")(scatter_add_fn)
ipu_scatter_add_fn = partial(jax.jit, backend="ipu")(scatter_add_fn)
cpu_output = cpu_scatter_add_fn(data, indices, updates)
ipu_output = ipu_scatter_add_fn(data, indices, updates)
assert ipu_output.tiles == tiles
assert ipu_output.dtype == data.dtype
npt.assert_array_almost_equal(ipu_output, cpu_output)
| graphcore-research/tessellate-ipu | tests/lax/test_tile_lax_scatter.py | test_tile_lax_scatter.py | py | 2,847 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "chex.TestCase",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "absl.testing.parameterized.TestCase",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "absl.testing.parameterized",
"line_number": 13,
"usage_type": "name"
},
{... |
28799587509 | import json
import os
import pickle
import numpy as np
from pprint import pprint
winningRuns, losingRuns, cards, relics, X, Y = None, None, None, None, None, None
cwd = os.path.dirname(__file__)
pathToData = os.path.join(cwd, '..', 'Data')
#Parses the wins and loses from the dump file
def loadFromFile(winsPath, losePath):
#runsPath = os.path.join(pathToData, 'data_2018-10-24_0-5000.json')
global winningRuns, losingRuns
runs = []
count = 0
winningRuns = []
losingRuns = []
winningCount = 0
lossCount = 0
for f in os.listdir(os.path.join(pathToData, 'runs')):
runsPath = os.path.join(pathToData, 'runs', f, f)
runs = json.load(open(runsPath))
count += len(runs)
# If floors reached = 50 then died to final boss, 51 means victory
for run in runs:
floor = run['event']['floor_reached']
if floor == 50:
losingRuns.append(run)
lossCount+=1
elif floor == 51:
winningRuns.append(run)
winningCount += 1
print("Reading: ", f, "Cumulative size: ", count, "NumWins: ", winningCount, "NumLosses", lossCount)
if len(runs) > 200000:
break
#runs = json.load(open(runsPath))
# global winningRuns, losingRuns
# winningRuns = []
# losingRuns = []
#
# #If floors reached = 50 then died to final boss, 51 means victory
# for run in runs:
# floor = run['event']['floor_reached']
# if floor == 50:
# losingRuns.append(run)
# elif floor == 51:
# winningRuns.append(run)
pickle.dump(winningRuns, open(winsPath, 'w'))
pickle.dump(losingRuns, open(losePath, 'w'))
#Load all the variables from
def loadVars():
varFileNames = ['cards.json', 'relics.json']
varFilePaths = [None, None]
varList = [None, None]
for file in range(len(varFileNames)):
varFilePaths[file] = os.path.join(pathToData, varFileNames[file])
varList[file] = json.load(open(varFilePaths[file]))
#Set Variables to global scope and write cards and relics
global cards, relics, winningRuns, losingRuns
cards = varList[0]
relics = varList[1]
#Try to load up the wins and loses, if unable to then parse them from dump file
winsPath = os.path.join(pathToData, 'wins.pkl')
losePath = os.path.join(pathToData, 'loses.pkl')
try:
pickle.load(open("break")) #This is for debugging
winningRuns = pickle.load(open(winsPath, 'r+'))
losingRuns = pickle.load(open(losePath, 'r+'))
print("Run data files found and loaded")
except IOError:
print("No run files found, loading from bulk data")
loadFromFile(winsPath, losePath)
def loadArrays():
global X, Y
xPath = os.path.join(pathToData, 'X.npy')
yPath = os.path.join(pathToData, 'Y.npy')
try:
#np.load("break") #For debugging
# np.load("Break")
X = np.load(xPath)
Y = np.load(yPath)
print("Loaded Arrays from file")
except IOError:
print("No array files found, parsing from data files")
loadVars()
index = list(cards.keys())
numCards = len(index)
runs = winningRuns + losingRuns
X = np.zeros((len(runs), numCards*2))
Y = np.concatenate((np.ones(len(winningRuns)), np.zeros(len(losingRuns))))
np.save(yPath, Y)
rowCount = 0
for run in runs:
#print ('row: ', rowCount)
for card in run[u'event'][u'master_deck']:
cardName = str(card)
#print ('index: ', str(index[collumnCount]))
#print ('card: ', str(card))
for i in range(len(index)):
indexCardName = str(index[i])
upgradedIndexCardName = str(indexCardName + '+1')
if cardName == indexCardName:
#print cardName, i
X[rowCount][i] += 1
elif cardName == upgradedIndexCardName:
X[rowCount][i+numCards] += 1
rowCount += 1
np.save(xPath, X)
def main():
loadArrays()
if __name__=='__main__':
loadArrays()
| kenttorell/MLFinal_StS | Code/LoadData.py | LoadData.py | py | 4,299 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9... |
23507472501 | # implementing pymoo multi-objective optimization with FEniCS FEA objectives
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox
import pathlib
from pathlib import Path
from pymoo.core.problem import ElementwiseProblem
from pymoo.algorithms.moo.nsga2 import NSGA2
from pymoo.termination import get_termination
from pymoo.optimize import minimize
from pymoo.operators.sampling.rnd import BinaryRandomSampling
from pymoo.operators.crossover.hux import HalfUniformCrossover
from pymoo.operators.crossover.ux import UniformCrossover
from pymoo.operators.crossover.pntx import TwoPointCrossover
from pymoo.operators.mutation.bitflip import BitflipMutation
from pymoo.decomposition.asf import ASF
# importing required modules (have to be in the same directory!)
from core_functions import *
from boundary_conditions import *
# defining the multiobjective problem for square FEA problem: Weight/vm_stress obj
class MyProblem(ElementwiseProblem):
def __init__(self, conn, coords, num_el, meshpath):
super().__init__(n_var=num_el,
n_obj=2,
n_ieq_constr=0,
xl=np.zeros(num_el),
xu=np.ones(num_el)
)
self.conn = conn
self.coords = coords
self.num_el = num_el
self.meshpath = meshpath # temp mesh for evaluation
def _evaluate(self, x, out, *args, **kwargs):
# generate the infill file
generate_infill_file(self.meshpath, x, self.conn, self.coords)
# Loading the created infill mesh to FEniCS
mesh, _, _, _ = extract_mesh_data_fenics(meshfile)
# perform FEA and obtain displacements and von mises stress
d_max, d_tot, d_avg, max_vm = square_FEA(mesh)
# objective values: we want to minimize *weight* and *maximal stress*
f1 = np.sum(x) # objective 1 - "weight"
f2 = d_max # objective 2 - "maxStress"
# constraints
#g1 = np.sum(x) - 200 # dummy constraint: can be adjusted to limit the infill rate
out["F"] = [f1, f2] # dictionary key for objectives
#out["G"] = [g1] # dictionary key for constraints
# Filepath management
cwd = Path.cwd().parent
mesh_dir = cwd.joinpath("MOEA_meshes", "smallSquare7x7") # adjust the dir here
inf_file = str(mesh_dir.stem) + '-infill.msh'
mesh_inp = mesh_dir / inf_file # mesh input file
# Extracting geometry from the infill mesh -> num_el is the design dimension
conn, coords, num_el = extract_mesh_data(mesh_inp) # extracting the geometry
# init the MOP
meshfile = mesh_dir / 'infill_gen.msh'
problem = MyProblem(conn, coords, num_el, meshfile)
# initialize the algorithm object
algorithm = NSGA2(
pop_size=100,
n_offsprings=40,
sampling=BinaryRandomSampling(),
crossover=TwoPointCrossover(prob=1.0),
#crossover=UniformCrossover(),
mutation=BitflipMutation(prob=0.5, prob_var= 2 * 1/num_el), # mutation scaled by the num of elements
eliminate_duplicates=True
)
# define the termination criterion
termination = get_termination("n_gen", 10)
# solve the MOP
res = minimize(problem,
algorithm,
termination,
seed=1,
save_history=True,
verbose=True)
# n_gen - generation counter,
# n_eval - number of evaluations
# cv_min - minimum constraint violation
# cv_avg - average constraint violation in the current population.
# n_nds - number of non-dominated solutions
# eps/ind- running performance metrics
X = res.X # solutions in the decision space
F = res.F # solutions in the objective space
hist = res.history # getting the info for each iteration
#print(X)
#print(F)
# save solutions
out_dir = mesh_dir / "infill_gen"
img_dir = out_dir / "png"
out_dir.mkdir(exist_ok=True)
img_dir.mkdir(exist_ok=True)
for i, x in enumerate(X):
ifile = out_dir / f"sol{i:02d}.msh" # generate mesh
generate_infill_file(ifile, x, conn, coords)
imfile = img_dir / f"sol_{i}.png" # generate png
generate_img_file(imfile, x, conn, coords)
print(f"File {ifile.name} saved.")###
# writing to a file
with open(out_dir / "sq_solutions.txt", "w+") as file:
alg = str(type(algorithm))[8:-2]
header = f"Algorithm:{alg}, pop_size: {algorithm.pop_size},\
n_offsp: {algorithm.n_offsprings}, n_gen: {termination.n_max_gen},\
t_tot: {res.exec_time:10.1f}, t_gen: {res.exec_time/termination.n_max_gen:10.2f}\n"
file.writelines(header)
file.writelines(f"{str(list(sol))} \t w: {F[i][0]:10.1f} \t vm: {F[i][1]:10.06f} \n" for i, sol in enumerate(1*X))
# plotting the decision space
xl, xu = problem.bounds()
plt.figure(figsize=(7, 5))
plt.imshow(X)
plt.title("Design Space")
plt.show()
# plotting the objective space
plt.figure(figsize=(5, 3))
plt.scatter(F[:, 0], F[:, 1], s=20, facecolors='black', edgecolors='black')
plt.title("Objective Space")
plt.savefig(out_dir / 'ObjectiveSpace.png')
plt.show()
# Multi-Criteria Decision Making: subset selection
n_keep = 9 # how many solutions should we keep
if len(X) > n_keep:
approx_ideal = F.min(axis=0) # ideal point
approx_nadir = F.max(axis=0) # nadir point
# normalizing with respect to both objectives
nF = (F - approx_ideal) / (approx_nadir - approx_ideal) # normalized objectives
# Decision making: Using Augmented Scalarization Function (ASF)
decomp = ASF() # init the ASF metric to be minimized
weights = [np.array([(k + 1)/(n_keep+1), (n_keep - k)/(n_keep+1)]) for k in range(n_keep)]
best_ASF = [] # indexes of chosen solutions
for weight in weights:
best_ASF.append(decomp.do(nF, 1/weight).argmin()) # index of the best solution regarding ASF
best_ASF = list(set(best_ASF)) # remove duplicates
F_ASF = F[best_ASF, :] # objectives
X_ASF = X[best_ASF, :] # solutions
n_kept = len(best_ASF) # number of kept solutions
# plotting the objective space with mesh png annotations
fig, ax = plt.subplots(figsize=(13, 8))
plt.scatter(F_ASF[:, 0], F_ASF[:, 1], s=20, facecolors='black', edgecolors='black')
for i, (x, y) in enumerate(F_ASF):
ind = best_ASF[i]
img_lab = plt.imread(img_dir / f"sol_{ind}.png")
imagebox = OffsetImage(img_lab, zoom=0.045)
imagebox.image.axes = ax
fl_offset = (-1)**i * 20 # spreading fluctuating offset
ab = AnnotationBbox(imagebox, (x, y),
xybox=(32 - 0.6 * fl_offset, 38 + fl_offset),
xycoords='data',
boxcoords=("offset points", "offset points"),
pad=-1,
arrowprops=dict(
arrowstyle="->",
connectionstyle="angle,angleA=0,angleB=90,rad=3",
linewidth=0.5)
)
ax.add_artist(ab)
ax.text(x, y, f" {str(ind)}", va='top', ha='left', zorder=4)
xrang = np.abs(np.max(F_ASF[:,0]) - np.min(F_ASF[:,0]))
yrang = np.abs(np.max(F_ASF[:,1]) - np.min(F_ASF[:,1]))
ax.set_xlim((np.min(F_ASF[:,0]) - 0.05*xrang, np.max(F_ASF[:,0]) + 0.15*xrang ))
ax.set_ylim((np.min(F_ASF[:,1]) - 0.1*yrang, np.max(F_ASF[:,1]) + 0.25*yrang ))
plt.title(f"Objective Space of {n_kept} ASF-selected solutions")
plt.savefig(out_dir / 'ObjectiveSpace-ASF.png')
plt.show()
| grohalex/Project2023 | Python/main.py | main.py | py | 7,557 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymoo.core.problem.ElementwiseProblem",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.su... |
6913776937 | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 24 19:55:28 2021
@author: jon-f
"""
import numpy as np
import matplotlib.pyplot as plt
import os
from skimage.transform import resize
def binary_array_to_hex(arr):
bit_string = ''.join(str(int(b)) for b in arr.flatten())
width = int(np.ceil(len(bit_string)/4))
return '{:0>{width}x}'.format(int(bit_string, 2), width=width)
def image_to_edgehex(numpy_image):
numpy_array = resize(numpy_image, (64, 64))
size = numpy_array.shape
grayscale = np.zeros(size[:-1])
grayscale1der = np.zeros(size[:-1])
for i in range(size[0]):
for j in range(size[1]):
if size[2] == 4:
val = numpy_array[i, j]
n_val = (0.299*val[0]+0.587*val[1]+0.114*val[2])*val[3]
elif size[2] == 3:
val = numpy_array[i, j]
n_val = 0.299*val[0]+0.587*val[1]+0.114*val[2]
else:
n_val = numpy_array[i, j]
grayscale[i, j] = n_val
if i == 0 or j == 0:
continue
sub = grayscale[i-1:i+1, j-1:j+1]
dIdx = 1/2 * ((sub[0, 1] - sub[0, 0]) + (sub[1, 1] - sub[1, 0]))
dIdy = 1/2 * ((sub[1, 0] - sub[0, 0]) + (sub[1, 1] - sub[0, 1]))
dIdp = abs(dIdx/2 + dIdy/2)
if dIdp < 0.03:
continue
grayscale1der[i, j] = 1
return binary_array_to_hex(grayscale1der)
def dilate(image_file):
numpy_array = resize(image_file, (64, 64))
size = numpy_array.shape[:-1]
dilated = np.zeros(size)
for i in range(size[0]):
for j in range(size[1]):
sub = image_file[i-1:i+1, j-1:j+1]
if (sub == 1).any():
dilated[i, j] = 1
return dilated
def hamming_distance(hash1, hash2):
dist = sum(n1 != n2 for n1, n2 in zip(hash1, hash2))
return dist
def hash_match(hash1, hash2, percent_match=60):
# returns True if at least percent_match %
return hamming_distance(hash1, hash2)/len(hash1) < percent_match/100 | triviajon/jonbot | to_hash_thing.py | to_hash_thing.py | py | 2,180 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.ceil",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "skimage.transform.resize",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"li... |
6280612969 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import datetime as dt
from datetime import datetime
#%config InlineBackend.figure_format = 'retina'
#%matplotlib inline
#%%
path='C:/Users/Mr.Goldss/Desktop/health care digital ticket EDA/'
filename='7_23_2020 Facility Itemized Tracker (1)'
df=pd.read_excel(path+filename+'.xlsx',sheet_name='New ALL')
df.shape
#%% fix basic formatting: preprocessing
'''data preprocessing'''
def formatting_preprocessing(df):
want_col=['Date', 'Facility Name',
'Facility Type', 'Item', 'Product Source', 'Type', 'UOM',
'Qty', 'Total']
df=df[want_col]
df.columns=['Delivered Date', 'Facility Name',
'Facility Type', 'Item Description', 'Product Source', 'Type', 'Units',
'QTY Per UOM', 'Total Amount']
df=df[df['Delivered Date'].notna()]
df=df[df['Item Description'].notna()]
df=df[(df['Units']>0) & (df['Units'].notna())]
df['Product Source']=df['Product Source'].fillna('SNS')
df_clean=df
return df_clean
df_clean=formatting_preprocessing(df)
def del_wrong_ems(df_clean):
del_cond1=(df_clean['Delivered Date']>='2020-04-01') & (df_clean['Delivered Date']<='2020-04-30')
del_cond2=(df_clean['Facility Type'] =='EMS') & (df_clean['Facility Name'] !='Phoenix Fire Resource Management')
to_be_drop_EMS=df_clean.loc[del_cond1 & del_cond2]
Ems_drop_list=to_be_drop_EMS.index
df_clean_copy=df_clean
df_clean_EMS=df_clean_copy.drop(Ems_drop_list)
return df_clean_EMS
df_clean_EMS=del_wrong_ems(df_clean)
def deep_clean_str_manipulate(df_clean_EMS):
df_clean_EMS['QTY Per UOM']=df_clean_EMS['QTY Per UOM'].fillna(1)
df_clean_EMS['Type']=df_clean_EMS['Type'].str.replace('Surgical Masks','Surgical Mask')
df_clean_EMS['Type']=df_clean_EMS['Type'].str.lower()
df_clean_EMS.groupby('Type')['Total Amount'].sum()
df_clean_EMS['Facility Type']=df_clean_EMS['Facility Type'].str.lower()
df_clean_EMS['Facility Type']=df_clean_EMS['Facility Type'].str.strip()
df_clean_EMS['Facility Type']=df_clean_EMS['Facility Type'].str.capitalize()
to_replace={'Lctf':'Ltcf','Ltfc':'Ltcf'}
for key in to_replace.keys():
df_clean_EMS['Facility Type']=df_clean_EMS['Facility Type'].str.replace(key,to_replace[key])
df_clean_EMS['Facility Type']=df_clean_EMS['Facility Type'].str.replace('Acute care','Acute')
df_clean_EMS['Item Description']=df_clean_EMS['Item Description'].str.lower()
s=[]
m=[]
l=[]
other=[]
for i in df_clean_EMS['Item Description']:
if 'small' in i:
s.append(i)
elif 'medium' in i:
m.append(i)
elif 'large' in i:
l.append(i)
else:
other.append(i)
df_clean_EMS['size']=df_clean_EMS['Item Description'].apply(
lambda x:x.replace(x,'small') if x in s
else(x.replace(x,'medium') if x in m else( x.replace(x,'large') if x in l else x.replace(x,'universal')) ))
df_clean_EMS['week_of_year']=df_clean_EMS['Delivered Date'].dt.strftime('%U')
df_clean_EMS['Month']=df_clean_EMS['Delivered Date'].dt.month
df_clean_EMS['Total Amount']=df_clean_EMS['Total Amount'].astype(int)
return df_clean_EMS
deep_clean_str_manipulate(df_clean_EMS)
#%%
'''countplot'''
fig, ax1, = plt.subplots()
fig.set_size_inches(25, 12)
sns.countplot(x='Facility Type',data=df_clean_EMS,ax=ax1)
plt.xticks(fontsize=18,rotation=90)
plt.xlabel('Facility', fontsize=15)
plt.ylabel('count', fontsize=20)
plt.title('Beltmann Inventory Distribution', fontsize=30)
plt.tight_layout()
plt.show()
#%%
#,barplot with plt.text to label all columns'''
'''
for index, value in enumerate(df_clean_EMS['Total Amount']):
plt.text(index,value+400, str(value),fontsize=12)
plt.tight_layout()
'''
# %%
'''sns.lineplot---->date as x axis'''
#we only care about 5 product type
type_needed=['n95','surgical mask','gowns','gloves']
fig, ax1, = plt.subplots()
fig.set_size_inches(25, 12)
sns.lineplot(x='Month',y='Total Amount',hue='Type',\
data=df_clean_EMS[df_clean_EMS['Type'].isin(type_needed)],ax=ax1,\
ci=False, markers=True, style='Type')
plt.xticks(fontsize=18,rotation=90)
plt.xlabel('Month', fontsize=20)
plt.ylabel('Amount', fontsize=25)
plt.title('day', fontsize=30)
plt.tight_layout()
plt.show()
#%%
type_needed=['n95','surgical mask','gowns','gloves']
fig, ax1, = plt.subplots()
fig.set_size_inches(25, 12)
sns.relplot(x='Month',y='Total Amount',hue='Type',\
data=df_clean_EMS[df_clean_EMS['Type'].isin(type_needed)],ax=ax1,\
ci=False, markers=True, style='Type',kind='line')
plt.xticks(fontsize=18,rotation=90)
plt.xlabel('Month', fontsize=20)
plt.ylabel('Amount', fontsize=25)
plt.title('day', fontsize=30)
plt.tight_layout()
plt.show()
# %%
'''sns.barplot()'''
#one thing need to realize is that the bar is not the sum but the mean
type_needed=['n95','surgical mask','gowns','gloves']
fig, ax1, = plt.subplots()
fig.set_size_inches(25, 12)
sns.barplot(x='Month',y='Total Amount',hue='Type',\
data=df_clean_EMS[df_clean_EMS['Type'].isin(type_needed)],ax=ax1,\
ci=False)
plt.yticks(fontsize=18)
plt.xticks(fontsize=20)
plt.xlabel('Month of the year', fontsize=20)
plt.ylabel('mean Amount', fontsize=25)
plt.title('day', fontsize=30)
plt.tight_layout()
plt.show()
# %%
'''switch x and y and use orient=h to transpose '''
type_needed=['n95','surgical mask','gowns','gloves']
fig, ax1, = plt.subplots()
fig.set_size_inches(25, 12)
sns.barplot(y='Month',x='Total Amount',hue='Type',\
data=df_clean_EMS[df_clean_EMS['Type'].isin(type_needed)],ax=ax1,\
ci=False,orient='h')
plt.yticks(fontsize=18)
plt.xticks(fontsize=20)
plt.xlabel('Month of the year', fontsize=20)
plt.ylabel('mean Amount', fontsize=25)
plt.title('day', fontsize=30)
plt.tight_layout()
plt.show()
# %%
'''how can i change the color?'''
#go to seaborn official web to find color option
# search for html color code--> find the (hex) code like this :#E315C7
type_needed=['n95','surgical mask','gowns','gloves']
fig, ax1, = plt.subplots()
fig.set_size_inches(25, 12)
sns.barplot(y='Month',x='Total Amount',hue='Type',\
data=df_clean_EMS[df_clean_EMS['Type'].isin(type_needed)],ax=ax1,\
ci=False,orient='h',color='#E315C7')
plt.yticks(fontsize=18)
plt.xticks(fontsize=20)
plt.xlabel('Month of the year', fontsize=20)
plt.ylabel('mean Amount', fontsize=25)
plt.title('day', fontsize=30)
plt.tight_layout()
plt.show()
# %%
'''hist is a good way to analysis the distribution of the data'''
fig, ax1, = plt.subplots()
fig.set_size_inches(25, 12)
sns.distplot(df_clean_EMS[(df_clean_EMS['Type']=='gloves')\
&(df_clean_EMS['Facility Type']=='Ltcf')\
&(df_clean_EMS['Total Amount']<=500)]['Total Amount'],ax=ax1)
plt.yticks(fontsize=18)
plt.xticks(fontsize=20)
plt.xlabel('give amount', fontsize=20)
plt.ylabel('count', fontsize=25)
plt.title('give out distribution', fontsize=30)
mean=df_clean_EMS[(df_clean_EMS['Type']=='gloves')\
&(df_clean_EMS['Facility Type']=='Ltcf')\
&(df_clean_EMS['Total Amount']<=500)]['Total Amount'].mean()
plt.axvline(mean, color='red')
plt.tight_layout()
plt.show()
# %%
def draw_type_facility_type_amount_dist(Type,FacilityType,upper_level=50000):
fig, ax1, = plt.subplots()
fig.set_size_inches(25, 12)
sns.distplot(df_clean_EMS[(df_clean_EMS['Type']==Type)\
&(df_clean_EMS['Facility Type']==FacilityType)\
&(df_clean_EMS['Total Amount']<=upper_level)]['Total Amount'],ax=ax1)
plt.yticks(fontsize=18)
plt.xticks(fontsize=20)
plt.xlabel('give amount', fontsize=20)
plt.ylabel('count', fontsize=25)
plt.title('give out distribution', fontsize=30)
mean=df_clean_EMS[(df_clean_EMS['Type']==Type)\
&(df_clean_EMS['Facility Type']==FacilityType)\
&(df_clean_EMS['Total Amount']<=upper_level)]['Total Amount'].mean()
plt.axvline(mean, 0,1,color='red')
plt.tight_layout()
plt.show()
# %%
draw_type_facility_type_amount_dist('n95','Acute',2000)
# %%
'''box plot is another good way to show distribution'''
fig, ax1, = plt.subplots()
fig.set_size_inches(25, 12)
sns.boxplot(df_clean_EMS[(df_clean_EMS['Type']=='gloves')\
&(df_clean_EMS['Facility Type']=='Ltcf')\
&(df_clean_EMS['Total Amount']<=5000)]['Total Amount'],ax=ax1)
plt.yticks(fontsize=18)
plt.xticks(fontsize=20)
plt.xlabel('give amount', fontsize=20)
plt.ylabel('count', fontsize=25)
plt.title('give out distribution', fontsize=30)
mean=df_clean_EMS[(df_clean_EMS['Type']=='gloves')\
&(df_clean_EMS['Facility Type']=='Ltcf')\
&(df_clean_EMS['Total Amount']<=500)]['Total Amount'].mean()
plt.axvline(mean, color='red')
plt.tight_layout()
plt.show()
# %%
fig, ax1, = plt.subplots()
#fig.set_size_inches(25, 12)
sns.set(rc={'figure.figsize':(12,10)})
sns.boxplot(y='Total Amount',
x='Month',data=df_clean_EMS,ax=ax1)
plt.tight_layout()
plt.show()
# %%
'''sns.swarmplot(), which can show every data point with a distribution,
is a good helper for sns.boxplot, '''
fig, ax1, = plt.subplots()
#fig.set_size_inches(25, 12)
sns.set(rc={'figure.figsize':(12,10)})
sns.boxplot(x='Total Amount',
y='Month',data=df_clean_EMS[df_clean_EMS['Total Amount']<=5000],ax=ax1,orient='h',color='#E55C94')
sns.swarmplot(x='Total Amount',
y='Month',data=df_clean_EMS[df_clean_EMS['Total Amount']<=5000],ax=ax1,orient='h',color='green')
plt.tight_layout()
plt.show()
# %%
'''same as sns.violinplot'''
fig, ax1, = plt.subplots()
#fig.set_size_inches(25, 12)
#sns.set(rc={'figure.figsize':(12,10)})
sns.boxplot(x='Total Amount',
y='Month',data=df_clean_EMS[df_clean_EMS['Total Amount']<=5000],ax=ax1,orient='h',color='#E55C94')
sns.violinplot(x='Total Amount',
y='Month',data=df_clean_EMS[df_clean_EMS['Total Amount']<=5000],ax=ax1,orient='h',color='green')
plt.tight_layout()
plt.show()
# %%
'''let's try sns.scatterplot()'''
fig, ax1, = plt.subplots()
#fig.set_size_inches(25, 12)
#sns.set(rc={'figure.figsize':(12,10)})
sns.scatterplot(y='Total Amount',
x='week_of_year',data=df_clean_EMS[df_clean_EMS['Total Amount']<=5000],\
ax=ax1,color='#E55C94')
plt.tight_layout()
plt.show()
# %%
'''what is lmplot()==> basically: scattern plot with regression line'''
#this is not a good example, basically you need to find 2 continue variable
sns.lmplot(y='Total Amount',
x='Month',data=df_clean_EMS[df_clean_EMS['Total Amount']<=5000],height=10)
plt.tight_layout()
plt.show() | zjin311/MaricopaWorkLog | health care digital ticket EDA/python models/seaborn viz center.py | seaborn viz center.py | py | 10,947 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_excel",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "seabor... |
73495691624 | import pygame as pg
import ChessEngine
import os
from itertools import cycle
from copy import deepcopy
pg.init()
pg.mixer.init()
ROOT_DIR = os.path.dirname(__file__)
IMAGE_DIR = os.path.join(ROOT_DIR, 'images')
WIDTH = HEIGHT = 512
DIMENSION = 8
SQ_SIZE = HEIGHT // 8
MAX_FPS = 15
IMAGES = {}
WHITE = (215,215,215)
BLACK = (155,155,155)
def load_images():
for file in os.listdir(IMAGE_DIR):
IMAGES[file[:2]] = pg.transform.scale(pg.image.load(os.path.join(IMAGE_DIR, file)), (SQ_SIZE, SQ_SIZE) )
def main():
screen = pg.display.set_mode((WIDTH,HEIGHT))
clock = pg.time.Clock()
game = ChessEngine.GameState()
load_images()
running = True
slctSq = ()
playerClicks = []
moves = []
while running:
clock.tick(MAX_FPS)
for e in pg.event.get():
if e.type == pg.QUIT:
running = False
elif e.type == pg.KEYDOWN:
if e.key == pg.K_LEFT:
game.undo_move()
if e.key == pg.K_RIGHT:
game.redo_move()
if e.key == pg.K_BACKSPACE:
game.unmake_move()
elif e.type == pg.MOUSEBUTTONDOWN:
if game.viewmode:
game.viewboard = deepcopy(game.board)
game.viewmove = len(game.moveLog)
game.viewmode = False
else:
location = pg.mouse.get_pos()
col = location[0]//SQ_SIZE
row = location[1]//SQ_SIZE
if slctSq == (row, col):
slctSq = ()
playerClicks = []
moves = []
elif slctSq and (row, col) not in moves:
slctSq = ()
playerClicks = []
moves = []
elif len(slctSq) == 0 and game.board[row][col] == "--":
pass
else:
if slctSq:
slctSq = (row, col)
playerClicks.append(slctSq)
pass
else:
if (game.board[row][col][0] == 'w' and game.whiteToMove) or (game.board[row][col][0] == 'b' and not game.whiteToMove):
moves = game.legal_moves(col, row)
slctSq = (row, col)
playerClicks.append(slctSq)
else:
pass
if len(playerClicks) == 2:
if game.board[playerClicks[0][0]][playerClicks[0][1]] != '--':
move = ChessEngine.Move(playerClicks[0], playerClicks[1], game.board)
print(move.get_chess_notation())
game.make_move(move)
slctSq = ()
playerClicks = []
moves = []
else:
slctSq = ()
playerClicks.pop(0)
screen.fill(WHITE)
draw_game(screen, game, moves)
if playerClicks:
if game.board[playerClicks[0][0]][playerClicks[0][1]] != '--':
highlight(screen, playerClicks[0][0],playerClicks[0][1])
pg.display.flip()
def highlight(screen, row, col):
s = pg.Surface((SQ_SIZE,SQ_SIZE)) # the size of your rect
s.set_alpha(128) # alpha level
s.fill((0,128,128)) # this fills the entire surface
screen.blit(s, (col*SQ_SIZE,row*SQ_SIZE))
def draw_game(screen, game, moves):
draw_board(screen)
draw_pieces(screen, game)
draw_moves(screen, moves)
def draw_board(screen):
tile_color = cycle([WHITE, BLACK])
for row in range(DIMENSION):
for col in range(DIMENSION):
pg.draw.rect(screen, next(tile_color), (col*SQ_SIZE, row*SQ_SIZE, SQ_SIZE,SQ_SIZE))
next(tile_color)
def draw_pieces(screen, game):
for row in range(DIMENSION):
for col in range(DIMENSION):
if game.viewboard[row][col] != '--':
screen.blit(IMAGES[game.viewboard[row][col]], (col*64,row*64))
def draw_moves(screen, moves):
for i in moves:
pg.draw.circle(screen, (168, 123, 116), (32+i[1]*64, 32+i[0]*64), 5)
if __name__ == '__main__':
main()
| GracjanPW/Chess | Chess/ChessMain.py | ChessMain.py | py | 4,586 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.init",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"l... |
129847267 | from django.shortcuts import render, get_object_or_404
from .models import Post, Category, User
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from taggit.models import Tag
from django.db.models import Count
from django.utils import timezone
def post_list(request, tag_slug=None):
posts = Post.objects.filter(status="published")
latest_posts = Post.objects.filter(published_date__lte=timezone.now()).reverse()[:3]
# post tag
tag = None
if tag_slug:
tag = get_object_or_404(Tag, slug=tag_slug)
posts = posts.filter(tags__in=[tag])
paginator = Paginator(posts, 5) # change this to accommodate number of posts per page
page = request.GET.get('page', 1)
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
context = {
'posts': posts,
'page': page,
'tag': tag,
'latest_posts': latest_posts,
}
return render(request, 'blog/post_list.html', context, )
def author_post_list(request, author):
author = User.objects.get(username=author)
posts_list = author.posts.filter(status="published")
paginator = Paginator(posts_list, 3) # change this to accommodate number of posts per page
page = request.GET.get('page', 1)
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
context = {
'author': author,
'posts': posts,
'page': page
}
return render(request, 'blog/author_post_list.html', context)
def category_post_list(request, slug):
category = Category.objects.get(slug=slug)
posts_list = category.posts.filter(status="published")
paginator = Paginator(posts_list, 3) # change this to accommodate number of posts per page
page = request.GET.get('page', 1)
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
context = {
'category': category,
'posts': posts,
'page': page
}
return render(request, 'blog/category.html', context)
def blog_post(request, slug):
# post = Post.objects.get(slug=slug)
post = get_object_or_404(Post, slug=slug, status='published')
# List of similar posts
post_tags_ids = post.tags.values_list('id', flat=True)
similar_posts = Post.objects.filter(active=True, tags__in=post_tags_ids).exclude(id=post.id)
similar_posts = similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags', '-active')[
:3] # adjust slice here for amount of posts to show
context = {
'post': post,
'similar_posts': similar_posts,
}
return render(request, 'blog/blog_post.html', context)
| open-apprentice/ellieplatform-website | blog/views.py | views.py | py | 2,992 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "models.Post.objects.filter",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "models.Post.objects",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "models.Post",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "model... |
25047674881 | import torch
from torch import nn
import os
import argparse
import torch.utils.data as data
from torch import optim
from torch.autograd import Variable
import torchvision.transforms as transforms
import tqdm
import numpy as np
import random
from tensorboardX import SummaryWriter
from datetime import datetime
import dateutil.tz
def trainer(opt):
path = "./kills_per_state_per_month.csv"
logger_path = mk_log_dir("./Logs/", opt)
writer = SummaryWriter(logger_path['log_path'])
print("loading")
transform = transforms.Compose([
#transforms.RandomCrop(crop_size, padding=padding),
#transforms.RandomHorizontalFlip(),
transforms.ToTensor()
#transforms.Normalize((0.4914, 0.4822, 0.4465),
# (0.2023, 0.1994, 0.2010)),
])
train_data = train_data_sequence(path, transform)
test_data = test_data_sequence(path, transform)
print(len(train_data))
print(len(test_data))
train_dataloader = torch.utils.data.DataLoader(
train_data,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.n_threads,
pin_memory=True)
test_dataloader = torch.utils.data.DataLoader(
test_data,
batch_size=90,
shuffle=True,
num_workers=opt.n_threads,
pin_memory=True)
gpu_ids = [i for i in range(len(opt.gpu_ids.split(",")))]
model = TemporalModel()
model = nn.DataParallel(model.to("cuda:0"), device_ids=gpu_ids)
model = model.to("cuda:0")
optimizer = optim.SGD(
model.parameters(),
lr=opt.learning_rate,
momentum=0.9,
weight_decay=opt.weight_decay
)
loss = nn.CrossEntropyLoss()
for epoch in range(0, 1000):
train_loss = training(model, optimizer, loss, train_dataloader, epoch, writer)
test_loss = testing(model, optimizer, loss, test_dataloader, epoch, writer)
update_learning_rate(optimizer, opt.learning_rate, epoch)
def training(model, optimizer, Loss, data_loader, epoch, writer):
model.train()
losses = AverageMeter()
for i, (inputs, state, month, targets) in enumerate(data_loader):
inputs = Variable(inputs.cuda())
targets = Variable(targets.cuda())
state = Variable(state.cuda())
month = Variable(month.cuda())
outputs = model(inputs, state, month)
loss = Loss(outputs, targets)
losses.update(loss.item(), inputs.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(inputs[0],state[0], month[0], torch.max(outputs[0], 0)[1].item(), targets[0].item())
print("Epoch: " + str(epoch) + ", avg loss: " + str(losses.avg))
writer.add_scalar("loss/train", losses.avg, epoch)
return losses.avg
def testing(model, optimizer, Loss, data_loader, epoch, writer):
model.eval()
losses = AverageMeter()
for i, (inputs, state, month, targets) in enumerate(data_loader):
inputs = Variable(inputs.cuda())
targets = Variable(targets.cuda())
state = Variable(state.cuda())
month = Variable(month.cuda())
outputs = model(inputs, state, month)
loss = Loss(outputs, targets)
losses.update(loss.item(), inputs.size(0))
print("Test, " + "Epoch: " + str(epoch) + ", avg loss: " + str(losses.avg))
writer.add_scalar("loss/test", losses.avg, epoch)
return losses.avg
class TemporalModel(nn.Module):
def __init__(self):
super(TemporalModel, self).__init__()
#self.fc = nn.Linear(3, )
self.h_size = 1000
self.layer = 3
self.lstm = nn.LSTM(input_size=1, hidden_size=self.h_size, num_layers=self.layer, batch_first=True)
#self.embedding_1 = nn.Linear(2, 3*self.h_size)
self.embedding_2 = nn.Linear(3*self.h_size,200)
def forward(self, x, s, m):
x = x.unsqueeze(2)
# x = self.embedding(x)
h0 = Variable(torch.randn(self.layer, x.size(0), self.h_size).cuda())
c0 = Variable(torch.randn(self.layer, x.size(0), self.h_size).cuda())
#output = self.fc(x)
output, _ = self.lstm(x, (h0, c0))
output = output.contiguous().view(output.size(0), 3*self.h_size)
#condition = torch.cat((s.unsqueeze(1), m.unsqueeze(1)), dim=1)
#condition_feature = self.embedding_1(condition)
#output = torch.cat((output, s.unsqueeze(1), m.unsqueeze(1)), dim=1)
output = self.embedding_2(output)
return output
class train_data_sequence(data.Dataset):
def __init__(self, path, transform):
self.path = path
data_file = open(path, "r")
self.data = []
for line in data_file.readlines():
line = line.replace("\n", "")
line = [int(i) for i in line.split(",")]
self.data.append(line)
#print(self.data[0:5])
self.transform = transform
def __getitem__(self, index):
data = self.data[index]
killed_list = data[3:6]
rand_num = int(6*random.random())
for i in range(len(killed_list)):
if killed_list[i] > 10 and data[6] > 10:
killed_list[i] = int(max(killed_list[i] + rand_num-3, 0))
killed_list = np.array(killed_list).astype(np.float32)
killed_num = torch.from_numpy(killed_list)
if data[6] > 10:
target = torch.tensor(int(data[6])+ rand_num-3)
else:
target = torch.tensor(int(data[6]))
state = torch.tensor(float(data[0]))
month = torch.tensor(float(data[1]))
return killed_num, state, month, target
def __len__(self):
return len(self.data)
class test_data_sequence(data.Dataset):
def __init__(self, path, transform):
self.path = path
data_file = open(path, "r")
self.data = []
for line in data_file.readlines():
line = line.replace("\n", "")
line = [int(i) for i in line.split(",")]
if line[7] > 10:
self.data.append(line[0:2]+line[4:8])
if line[2] > 10:
self.data.append(line[0:6])
#print(self.data[0:5])
self.transform = transform
def __getitem__(self, index):
data = self.data[index]
killed_list = data[2:5]
killed_list = np.array(killed_list).astype(np.float32)
killed_num = torch.from_numpy(killed_list)
target = torch.tensor(int(data[5]))
state = torch.tensor(float(data[0]))
month = torch.tensor(float(data[1]))
return killed_num, state, month, target
def __len__(self):
return len(self.data)
def update_learning_rate(optimizer, lr, epoch):
if epoch > 200:
lr = lr/10.
if epoch > 400:
lr = lr/100.
if epoch > 600:
lr = lr/1000.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def mk_log_dir(save_root, opt):
path_dict = {}
save_root = save_root
os.makedirs(save_root, exist_ok=True)
exp_name = os.path.join(save_root, opt.exp_name)
# set log path
now = datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
prefix = exp_name + '_' + timestamp
os.makedirs(prefix, exist_ok=True)
path_dict['prefix'] = prefix
# set checkpoint path
model_restore_path = os.path.join(prefix, 'Model')
os.makedirs(model_restore_path, exist_ok=True)
path_dict['model_restore_path'] = model_restore_path
log_path = os.path.join(prefix, 'Log')
os.makedirs(log_path, exist_ok=True)
path_dict['log_path'] = log_path
# # set sample image path for fid calculation
# sample_path = os.path.join(prefix, 'Samples')
# os.makedirs(sample_path, exist_ok=True)
# path_dict['sample_path'] = sample_path
return path_dict
def parse_opts():
parser = argparse.ArgumentParser()
parser.add_argument(
'--manual_seed', default=1, type=int, help='Manually set random seed')
parser.add_argument(
'--port', default=23412, type=int, help='Manually set random seed')
parser.add_argument(
'--learning_rate', default=1e-4, type=float, help='learning rate')
parser.add_argument(
'--weight_decay', default=5e-3, type=float, help='learning rate')
parser.add_argument(
'--exp_name', required=True, type=str, help='learning rate')
parser.add_argument(
'--n_threads', default=8, type=int, help='num_workers')
parser.add_argument(
'--batch_size', default=16, type=int, help='Manually set random seed')
parser.add_argument(
'--gpu_ids', default="0", type=str, help='gpus id')
args = parser.parse_args()
return args
if __name__ == "__main__":
opt = parse_opts()
torch.manual_seed(12345)
torch.cuda.manual_seed(12345)
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]= opt.gpu_ids
# os.environ['MASTER_PORT'] = opt.port
trainer(opt)
| YJZFlora/Gun_Violence_Data_Mining | main.py | main.py | py | 9,601 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorboardX.SummaryWriter",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 22,
"usage_type": "name"
},
{
"... |
24788288939 | from typing import List
"""
Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.
A subarray is a contiguous part of an array.
"""
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
dp = [nums[0]]
# result = float("-inf")
for i in range(1, len(nums)):
dp.append(nums[i] + (dp[i-1] if dp[i-1] > 0 else 0))
return max(dp)
nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4] # [5,4,-1,7,8]
sol = Solution()
print(sol.maxSubArray(nums))
# Input: nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
# Output: 6
# Explanation: [4, -1, 2, 1] has the largest sum = 6. | inhyeokJeon/AALGGO | Python/LeetCode/dp/53_maximum_subarr.py | 53_maximum_subarr.py | py | 680 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 10,
"usage_type": "name"
}
] |
72769207784 | from __future__ import absolute_import, division, print_function, unicode_literals
import os
import csv
import sys
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import tensorflow_docs.plots
import pandas as pd
print(tf.version.VERSION)
column_names = ['id', 'point', 'rssi_a', 'rssi_b', 'rssi_c', 'rssi_d', 'rssi_e',
'distance_a', 'distance_b', 'distance_c', 'distance_d', 'distance_e', 'timestamp']
dataset_path = 'measurements.csv'
raw_dataset = pd.read_csv(dataset_path, names=column_names, na_values="?", comment='\t', sep=";", skipinitialspace=True)
dataset = raw_dataset.copy()
dataset = dataset.dropna()
train_dataset = dataset.sample(frac=0.9, random_state=0)
test_dataset = dataset.drop(train_dataset.index)
# test_dataset = dataset
test_labels_a = test_dataset.pop('distance_a')
test_labels_b = test_dataset.pop('distance_b')
test_labels_c = test_dataset.pop('distance_c')
test_labels_d = test_dataset.pop('distance_d')
test_labels_e = test_dataset.pop('distance_e')
points = test_dataset.pop('point')
test_dataset.drop(["id"], axis=1, inplace=True)
test_dataset.drop(["timestamp"], axis=1, inplace=True)
train_stats = test_dataset.describe()
train_stats = train_stats.transpose()
print('train_stats')
print(train_stats)
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_test_data = norm(test_dataset)
checkpoint_path_a = 'training/training_a/cp_a.ckpt'
checkpoint_dir_a = os.path.dirname(checkpoint_path_a)
checkpoint_path_b = 'training/training_b/cp_b.ckpt'
checkpoint_dir_b = os.path.dirname(checkpoint_path_b)
checkpoint_path_c = 'training/training_c/cp_c.ckpt'
checkpoint_dir_c = os.path.dirname(checkpoint_path_c)
checkpoint_path_d = 'training/training_d/cp_d.ckpt'
checkpoint_dir_d = os.path.dirname(checkpoint_path_d)
checkpoint_path_e = 'training/training_e/cp_e.ckpt'
checkpoint_dir_e = os.path.dirname(checkpoint_path_e)
def create_model():
model = keras.Sequential([
layers.Dense(256, activation='relu', input_shape=[len(train_dataset.keys())]),
layers.Dense(256, activation='relu'),
layers.Dense(128, activation='relu'),
layers.Dense(128, activation='relu'),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.Adam()
model.compile(loss='mae', optimizer=optimizer, metrics=['mae', 'mse'])
return model
def get_position_from_distance(distance):
switcher = {
1: [50, 50],
2: [100, 50],
3: [150, 50],
4: [200, 50],
5: [250, 50],
6: [300, 50],
7: [350, 50],
8: [400, 50],
9: [450, 50],
10: [500, 50],
11: [550, 50],
12: [50, 100],
13: [100, 100],
14: [150, 100],
15: [200, 100],
16: [250, 100],
17: [300, 100],
18: [350, 100],
19: [400, 100],
20: [450, 100],
21: [500, 100],
22: [550, 100],
23: [50, 150],
24: [100, 150],
25: [150, 150],
26: [200, 150],
27: [250, 150],
28: [300, 150],
29: [350, 150],
30: [400, 150],
31: [450, 150],
32: [500, 150],
33: [550, 150],
34: [50, 200],
35: [100, 200],
36: [150, 200],
37: [200, 200],
38: [250, 200],
39: [300, 200],
40: [350, 200],
41: [400, 200],
42: [450, 200],
43: [500, 200],
44: [550, 200],
45: [50, 250],
46: [100, 250],
47: [150, 250],
48: [200, 250],
49: [250, 250],
50: [300, 250],
51: [350, 250],
52: [400, 250],
53: [450, 250],
54: [500, 250],
55: [550, 250],
56: [50, 300],
57: [100, 300],
58: [150, 300],
59: [200, 300],
60: [250, 300],
61: [300, 300],
62: [350, 300],
63: [400, 300],
64: [450, 300],
65: [500, 300],
66: [550, 300],
67: [50, 350],
68: [100, 350],
69: [150, 350],
70: [200, 350],
71: [250, 350],
72: [300, 350],
73: [350, 350],
74: [400, 350],
75: [450, 350],
76: [500, 350],
77: [550, 350],
78: [50, 400],
79: [100, 400],
80: [150, 400],
81: [200, 400],
82: [250, 400],
83: [300, 400],
84: [350, 400],
85: [400, 400],
86: [450, 400],
87: [500, 400],
88: [550, 400],
89: [50, 450],
90: [100, 450],
91: [150, 450],
92: [200, 450],
93: [250, 450],
94: [300, 450],
95: [350, 450],
96: [400, 450],
97: [450, 450],
98: [500, 450],
99: [550, 450],
100: [50, 500],
101: [100, 500],
102: [150, 500],
103: [200, 500],
104: [250, 500],
105: [300, 500],
106: [350, 500],
107: [400, 500],
108: [450, 500],
109: [500, 500],
110: [550, 500]
}
return switcher.get(distance)
latest_a = tf.train.latest_checkpoint(checkpoint_dir_a)
print(latest_a)
latest_b = tf.train.latest_checkpoint(checkpoint_dir_b)
print(latest_b)
latest_c = tf.train.latest_checkpoint(checkpoint_dir_c)
print(latest_c)
latest_d = tf.train.latest_checkpoint(checkpoint_dir_d)
print(latest_d)
latest_e = tf.train.latest_checkpoint(checkpoint_dir_e)
print(latest_e)
# Create a new model instance
model_a = create_model()
model_b = create_model()
model_c = create_model()
model_d = create_model()
model_e = create_model()
# Load the previously saved weights
model_a.load_weights(latest_a)
model_b.load_weights(latest_b)
model_c.load_weights(latest_c)
model_d.load_weights(latest_d)
model_e.load_weights(latest_e)
print("Model A")
print(model_a)
print("Normed test data: ")
print(normed_test_data)
print("Test labels A")
test_points = points.tolist()
print(test_labels_a)
print(test_labels_b)
print(test_labels_c)
print(test_labels_d)
print(test_labels_e)
real_positions = []
for i in range(len(test_points)):
real_positions.append(get_position_from_distance(test_points[i]))
test_predictions_a = model_a.predict(normed_test_data).flatten()
test_predictions_b = model_b.predict(normed_test_data).flatten()
test_predictions_c = model_c.predict(normed_test_data).flatten()
test_predictions_d = model_d.predict(normed_test_data).flatten()
test_predictions_e = model_e.predict(normed_test_data).flatten()
print("Test Predictions A")
print(test_predictions_a)
print(test_predictions_b)
print(test_predictions_c)
print(test_predictions_d)
print(test_predictions_e)
with open('predictions_file.csv', mode='w') as predictions_file:
for i in range(len(test_predictions_a)):
predictions_writer = csv.writer(predictions_file, delimiter=';', quotechar='"', quoting=csv.QUOTE_MINIMAL)
predictions_writer.writerow([test_predictions_a[i], test_predictions_b[i], test_predictions_c[i],
test_predictions_d[i], test_predictions_e[i], real_positions[i]
])
a = plt.axes(aspect='equal')
plt.scatter(test_labels_a, test_predictions_a)
plt.xlabel('True Values [Distance]')
plt.ylabel('Predictions [Distance]')
lims = [0, 700]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
plt.show()
a = plt.axes(aspect='equal')
plt.scatter(test_labels_b, test_predictions_b)
plt.xlabel('True Values [Distance]')
plt.ylabel('Predictions [Distance]')
lims = [0, 700]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
plt.show()
a = plt.axes(aspect='equal')
plt.scatter(test_labels_c, test_predictions_c)
plt.xlabel('True Values [Distance]')
plt.ylabel('Predictions [Distance]')
lims = [0, 700]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
plt.show()
a = plt.axes(aspect='equal')
plt.scatter(test_labels_d, test_predictions_d)
plt.xlabel('True Values [Distance]')
plt.ylabel('Predictions [Distance]')
lims = [0, 700]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
plt.show()
a = plt.axes(aspect='equal')
plt.scatter(test_labels_e, test_predictions_e)
plt.xlabel('True Values [Distance]')
plt.ylabel('Predictions [Distance]')
lims = [0, 700]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
plt.show() | kotrotskon/iBKs_regration | Main.py | Main.py | py | 8,398 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "tensorflow.version",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
27141705447 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/11/22 12:54
# @Author : Ryu
# @Site :
# @File : yibu.py
# @Software: PyCharm
import asyncio,aiohttp
import time
import requests
async def f1(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
print(await resp.text())
def timecount(fun):
def a(url):
start=time.clock()
fun(url)
end=time.clock()
print(end-start)
return a
@timecount
def f2(url):
for i in range(5):
print(requests.get(url).text)
url='http://httpbin.org/ip'
# f2(url)
# time1=time.clock()
# loop=asyncio.get_event_loop()
# task=[f1(url) for i in range(600)]
# loop.run_until_complete(asyncio.wait(task))
# print(time.clock()-time1)
| yuzhema/crawer | day08/yibu.py | yibu.py | py | 805 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "aiohttp.ClientSession",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "time.clock",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "time.clock",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_... |
38622940273 | """Platform for sensor integration."""
import logging
from datetime import datetime
from .const import (
DOMAIN,
MODEM_GATEWAY,
EVT_MODEM_CONNECTED,
EVT_MODEM_DISCONNECTED,
EVT_LTE_CONNECTED,
EVT_LTE_DISCONNECTED,
SENSOR_LASTUPD
)
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_CONNECTIVITY,
BinarySensorEntity,
)
_LOGGER = logging.getLogger(__name__)
SENSOR_SIGNAL = 'signal_strength'
SENSOR_STATUS = 'modem_status'
SENSOR_OPERATOR = 'cell_operator'
REGISTERED_STATUS = 'registered'
CONNECTED_STATUS = 'connected'
GSM_SENSOR_ID = 'mm_modem.signal_strength'
GSM_SENSOR_NAME = 'GSM Modem'
LTE_SENSOR_ID = 'mm_modem.lte_status'
LTE_SENSOR_NAME = 'LTE Connection'
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensor platform."""
add_entities([GsmModemSensor(hass)])
add_entities([LteConnectionSensor(hass)])
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the sensors."""
async_add_entities([GsmModemSensor(hass)])
async_add_entities([LteConnectionSensor(hass)])
class GsmModemSensor(BinarySensorEntity):
"""Representation of a Sensor."""
_signal_strength = 0
_modem_status = 'none'
_cell_operator = 'none'
_hass = None
_prev_status = False
def __init__(self, hass):
"""Initialize the sensor."""
self._state = None
self._hass = hass
hass.bus.async_listen(EVT_MODEM_DISCONNECTED,
self._handle_modem_disconnected)
hass.bus.async_listen(EVT_MODEM_CONNECTED,
self._handle_modem_connected)
self.update()
async def _handle_modem_disconnected(self, call):
self.update()
self.async_write_ha_state()
async def _handle_modem_connected(self, call):
self.update()
self.async_write_ha_state()
def get_gateway(self):
"""Returns the modem gateway instance from hass scope"""
return self._hass.data[DOMAIN][MODEM_GATEWAY]
@property
def name(self):
"""Return the name of the sensor."""
return GSM_SENSOR_NAME
@property
def is_on(self):
"""Return the state of the sensor."""
return [REGISTERED_STATUS,
CONNECTED_STATUS].__contains__(self._modem_status)
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_CONNECTIVITY
@property
def unique_id(self):
"""Return a unique ID."""
return GSM_SENSOR_ID
@property
def extra_state_attributes(self):
"""Return device specific state attributes.
Implemented by platform classes. Convention for attribute names
is lowercase snake_case.
"""
return {SENSOR_STATUS: self._modem_status,
SENSOR_OPERATOR: self._cell_operator,
SENSOR_SIGNAL: self._signal_strength,
SENSOR_LASTUPD: datetime.now().strftime('%Y-%m-%dT%H:%M:%S')}
def update(self):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
gateway = self.get_gateway()
modem_info = gateway.get_modem_state()
if modem_info is not None:
self._signal_strength = modem_info['signal']
self._cell_operator = modem_info['operator']
self._modem_status = modem_info['status']
if not self._prev_status:
self._prev_status = True
_LOGGER.info('GSM modem connected')
else:
self._signal_strength = 0
self._modem_status = 'none'
self._cell_operator = 'none'
if self._prev_status:
self._prev_status = False
_LOGGER.info('GSM modem disconnected')
class LteConnectionSensor(BinarySensorEntity):
"""Representation of a Sensor."""
_modem_status = None
_hass = None
_prev_status = False
def __init__(self, hass):
"""Initialize the sensor."""
self._state = None
self._hass = hass
hass.bus.async_listen(EVT_LTE_DISCONNECTED,
self._handle_lte_disconnected)
hass.bus.async_listen(EVT_LTE_CONNECTED,
self._handle_lte_connected)
async def _handle_lte_disconnected(self, call):
self.update()
self.async_write_ha_state()
async def _handle_lte_connected(self, call):
self.update()
self.async_write_ha_state()
def get_gateway(self):
"""Returns the modem gateway instance from hass scope"""
return self._hass.data[DOMAIN][MODEM_GATEWAY]
@property
def name(self):
"""Return the name of the sensor."""
return LTE_SENSOR_NAME
@property
def is_on(self):
"""Return the state of the sensor."""
return [CONNECTED_STATUS].__contains__(self._modem_status)
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_CONNECTIVITY
@property
def unique_id(self):
"""Return a unique ID."""
return LTE_SENSOR_ID
@property
def extra_state_attributes(self):
"""Return device specific state attributes.
Implemented by platform classes. Convention for attribute names
is lowercase snake_case.
"""
return {}
def update(self):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
gateway = self.get_gateway()
modem_info = gateway.get_modem_state()
if modem_info is not None:
self._modem_status = modem_info['status']
else:
self._modem_status = 'none'
| vladkozlov69/homeassistant_modem | binary_sensor.py | binary_sensor.py | py | 5,944 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "homeassistant.components.binary_sensor.BinarySensorEntity",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "const.EVT_MODEM_DISCONNECTED",
"line_number": 59,
"usage_type... |
34701767455 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('login', '0002_usuario_nome'),
]
operations = [
migrations.RenameField(
model_name='usuario',
old_name='nome',
new_name='username',
),
]
| andersonfantini/example-django-social-login | login/migrations/0003_auto_20150213_1520.py | 0003_auto_20150213_1520.py | py | 380 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.RenameField",
"line_number": 14,
"usage_type": "call"
},
... |
3746638257 | # Standard Library
import argparse
import os
class EnvDefault(argparse.Action):
"""
Helper for the CLI argparse to allow setting defaults through environment variables
Usage: In an argparse argument, set the Action to this class.
Add the extra variable envvar added that has the name of the environment variable containing the default value.
Example: parser.add_argument("--gitleaks-path", action=EnvDefault, envvar="RESC_GITLEAKS_PATH")
This would result in the parser reading the env var if it exists and using it as the default,
always to be overrideable using the cli argument.
"""
def __init__(self, envvar, required=True, default=None, **kwargs):
if not default and envvar:
if envvar in os.environ:
default = os.environ[envvar]
if required and default:
required = False
super().__init__(default=default, required=required, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
| abnamro/repository-scanner | components/resc-vcs-scanner/src/vcs_scanner/helpers/env_default.py | env_default.py | py | 1,071 | python | en | code | 137 | github-code | 36 | [
{
"api_name": "argparse.Action",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 18,
"usage_type": "attribute"
}
] |
1048912505 | import torch
from torch import nn
class down_conv(nn.Module):
def __init__(self, in_ch, out_ch):
super(down_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, 1, 1),
nn.ReLU(),
nn.Conv2d(out_ch, out_ch, 3, 1, 1),
nn.ReLU()
)
def forward(self, x):
x = self.conv(x)
return x
class out(nn.Module):
def __init__(self, in_ch, out_ch):
super(out, self).__init__()
self.out = nn.Sequential(nn.Conv2d(in_ch, out_ch, 1), nn.Sigmoid())
def forward(self, x):
x = self.out(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch):
super(up, self).__init__()
self.up = nn.Sequential(
nn.ConvTranspose2d(in_ch, out_ch, 2, stride=2),
nn.ReLU()
)
def forward(self, x):
x = self.up(x)
return x
class uNet(nn.Module):
def __init__(self):
super(uNet, self).__init__()
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.down_1 = down_conv(1, 64)
self.down_2 = down_conv(64, 128)
self.down_3 = down_conv(128, 256)
self.down_4 = down_conv(256, 512)
self.down_5 = down_conv(512, 1024)
self.de_up_5 = up(1024, 512)
self.up_5 = down_conv(1024, 512)
self.de_up_4 = up(512, 256)
self.up_4 = down_conv(512, 256)
self.de_up_3 = up(256, 128)
self.up_3 = down_conv(256, 128)
self.de_up_2 = up(128, 64)
self.up_2 = down_conv(128, 64)
self.out = out(64, 1)
def forward(self, x):
x1 = self.down_1(x)
x2 = self.pool(x1)
x2 = self.down_2(x2)
x3 = self.pool(x2)
x3 = self.down_3(x3)
x4 = self.pool(x3)
x4 = self.down_4(x4)
x5 = self.pool(x4)
x5 = self.down_5(x5)
x6 = self.de_up_5(x5)
x6 = torch.cat((x4, x6), dim=1)
x6 = self.up_5(x6)
x7 = self.de_up_4(x6)
x7 = torch.cat((x3, x7), dim=1)
x7 = self.up_4(x7)
x8 = self.de_up_3(x7)
x8 = torch.cat((x2, x8), dim=1)
x8 = self.up_3(x8)
x9 = self.de_up_2(x8)
x9 = torch.cat((x1, x9), dim=1)
x9 = self.up_2(x9)
x10 = self.out(x9)
return x10
| lembolov9/u-net-torch | model.py | model.py | py | 2,331 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_n... |
13960662339 | from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from django.views.decorators.cache import never_cache
from helfertool.utils import nopermission
from registration.permissions import has_access, ACCESS_CORONA_VIEW, ACCESS_CORONA_EDIT
from registration.utils import get_or_404
from ..forms import ContactTracingDataForm
from ..models import ContactTracingData
from .utils import notactive
import logging
logger = logging.getLogger("helfertool.corona")
@login_required
@never_cache
def view_helper(request, event_url_name, helper_pk):
event, job, shift, helper = get_or_404(event_url_name, helper_pk=helper_pk)
# check permissions
if not has_access(request.user, helper, ACCESS_CORONA_VIEW):
return nopermission(request)
# check if corona contact tracing is active
if not event.corona:
return notactive(request)
# get data if it exists
try:
data = helper.contacttracingdata
except ContactTracingData.DoesNotExist:
data = None
# render page
context = {"event": event, "helper": helper, "data": data}
return render(request, "corona/view_helper.html", context)
@login_required
@never_cache
def edit_helper(request, event_url_name, helper_pk):
event, job, shift, helper = get_or_404(event_url_name, helper_pk=helper_pk)
# check permissions
if not has_access(request.user, helper, ACCESS_CORONA_EDIT):
return nopermission(request)
# check if corona contact tracing is active
if not event.corona:
return notactive(request)
# get data if it exists
try:
data = helper.contacttracingdata
except ContactTracingData.DoesNotExist:
data = None
# form
form = ContactTracingDataForm(request.POST or None, instance=data, event=event)
if form.is_valid():
form.save(helper=helper)
logger.info(
"helper coronadata",
extra={
"user": request.user,
"event": event,
"helper": helper,
},
)
return redirect("corona:view_helper", event_url_name=event_url_name, helper_pk=helper.pk)
# render page
context = {"event": event, "helper": helper, "form": form}
return render(request, "corona/edit_helper.html", context)
| helfertool/helfertool | src/corona/views/helper.py | helper.py | py | 2,339 | python | en | code | 52 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "registration.utils.get_or_404",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "registration.permissions.has_access",
"line_number": 24,
"usage_type": "call"
},
{
... |
40520294015 | from pprint import pprint
from functools import reduce
from collections import defaultdict
data = [l.strip() for l in open("input.txt","r").readlines()]
pprint(data)
# part 1
# vowels = ["a","e","i","o","u"]
# forbidden = ["ab", "cd", "pq", "xy"]
# nice = 0
# for line in data:
# vows = sum([1 for ch in line if ch in vowels])
# double = sum([1 for i in range(len(line)-1) if line[i] == line[i+1]])
# wrong = sum([1 for i in range(len(line)-1) if "{}{}".format(line[i],line[i+1]) in forbidden])
# if vows >= 3 and double > 0 and wrong == 0:
# nice += 1
# print(nice)
nice = 0
for line in data:
pairs = defaultdict(lambda: set())
for i in range(0,len(line)-1,1): pairs["{}{}".format(line[i],line[i+1])].update((i,i+1))
right = sum([1 for p in pairs if (len(pairs[p])/2) >= 2])
gapped = sum([1 for i in range(len(line)-2) if line[i] == line[i+2]])
if right >= 1 and gapped >= 1:
nice += 1
print(nice) | archanpatkar/advent2015 | Day-05/sol.py | sol.py | py | 955 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pprint.pprint",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 22,
"usage_type": "call"
}
] |
17066369564 | import pandas as pd
from sklearn.linear_model import LinearRegression
'''dealing with the data'''
def preprocessing(country):
rows = data.loc[(data['countriesAndTerritories'] == country)]
x = rows['dateRep'].iloc[::-1].reset_index(drop=True)
for i in range(x.size):
x[i] = i
size = x.size
secondx=[]
for j in range (size):
secondx.insert(j, [j])
y = rows['cases'].iloc[::-1].reset_index(drop=True)
secondy=[]
for s in range (size):
secondy.insert(s, y[s])
return secondx,secondy,size
'''train the model'''
def train(x,y,size):
x_train = x[int(size*0.3):]
y_train = y[int(size*0.3):]
#build model
model = LinearRegression()
model.fit(x_train, y_train)
predictions = model.predict([[size], [size+1], [size+2], [size+3], [size+4], [size+5], [size+6]])
for i in range(7):
if predictions[i]<1:
predictions[i]=0
predictions[i] = int(predictions[i])
return predictions
def main():
countrylist = data.countriesAndTerritories.unique().tolist()
l=[]
for k in range(len(countrylist)):
x,y, size = preprocessing(countrylist[k])
predictions = train(x,y, size)
l.append(predictions)
dat = ['10/9', '10/10', '10/11', '10/12', '10/13', '10/14', '10/15']
result = pd.DataFrame(zip(*l), columns = countrylist, index=dat)
result.to_csv('StudentID_HW1.csv')
'''file import'''
dataset_file_path = "your/dataset/path/dataset.csv"
data = pd.read_csv(dataset_file_path)
main()
| Amy-Liao/COVID-19-Forecast | model.py | model.py | py | 1,577 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 47,
"usage_type": "call"
}
] |
73488488423 |
from requests import get
from scrapy import Selector
response = get("https://fr.wikipedia.org/wiki/Guerre_d%27Alg%C3%A9rie")
source = None
if response.status_code == 200 :
source = response.text
if source :
selector = Selector(text=source)
titles = selector.css("div.toc ul > li")
for title in titles:
level = title.css("span.tocnumber::text").extract_first()
name = title.css("span.toctext::text").extract_first()
print(level + " " + name) | LexicoScrap/scrap_test | scrap_test_one.py | scrap_test_one.py | py | 484 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "scrapy.Selector",
"line_number": 11,
"usage_type": "call"
}
] |
40584544669 | import sys
from catchException import exception_handler
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from requests import ReadTimeout, ConnectTimeout, HTTPError, Timeout, ConnectionError
import pandas as pd
import os
# import requests
# import urllib3
## handle exception properly
## make it summarised info , hide traceback
sys.excepthook = exception_handler
#####
def collectApiData(session=None, apitype="surveys", survey_id = 1, limit=40000, offset=0, token=" " ):
'''
function takes 6 parameters and returns the data from the api.
session : request session. this can be reused for the different api type
apitype: the type of data to collect. can be answers or surveys. This selection determines the api to be used
survey_id: each survey has survey id. this is the corresponing survey id . Get the survey id from the surveys api
limit: the limit of the data to be collected
offset: the offset where to start next data collection from
token: the token to be used in the api call
'''
###########################################################################
#### check if sapi type correct
if apitype == "surveys":
api = "https://endapi.truefeedback.io/dataplatform/survey/"
elif apitype == "answers":
api = 'https://endapi.truefeedback.io/dataplatform/survey/' + str(survey_id) + '/' + 'answers?' + 'limit='+ str(limit) + '&offset=' + str(offset)
else:
raise Exception("wrong api type. The api type is a string that can only be answers or surveys")
###########################################################################
######## inform user of what is happening : that is connection to api is being made ########
print("The data api endpoint is: ", api)
###########################################################################
###########################################################################
### try catch statement for any error that occurs during connection
try:
##
print("connecting to data api endpoint ...")
## configure session retry
retries = Retry(connect=4,total=3,
backoff_factor=0.1,
status_forcelist=[ 500, 502, 503, 504 ]) ## force status retry if code returned is 500, 502, 504
### mount an adapter
# "pool_maxsize" is the number of connections to keep around per host
# (this is useful for multi-threaded applications), whereas "pool_connections" is the number of host-pools
# to keep around. For example, if you're connecting to 100 different hosts, and pool_connections=10, then only the latest 10 hosts'
# connections will be re-used.
session.mount('http://', HTTPAdapter(max_retries=retries,pool_connections=10, pool_maxsize=10))
##
### establish a get on the api
getApi = session.get(api, headers={"auth": token} )
## the content of the extracted data
a = getApi.content
## read extracted data into pandas
df=pd.read_json(a)
print("my df: ", df.head())
return df
###########################################################################
###########################################################################
## from here is the exception statement for try
#### connection related error
except (ConnectTimeout, HTTPError, ReadTimeout, Timeout, ConnectionError) as e:
raise Exception('There is an http connection problem. Check the connection configuration', str(e))
### value related error
except ValueError as e:
raise ValueError('Data not in json format or did you supply wrong token?.', str(e))
### catch keyboard interrupt
except KeyboardInterrupt as e:
print(" \t You interrupted the program.")
try:
sys.exit(0)
except:
os._exit(0)
## if exception not caught from the previous exceptions
except:
raise Exception('a problem has occurred with connection/connection parameters or data format or mising library import or non initialised session')
#sys.exit()
########################################################################## | adderbyte/finos_viz | dataValuation.py | dataValuation.py | py | 4,312 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.excepthook",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "catchException.exception_handler",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "urllib3.util.retry.Retry",
"line_number": 58,
"usage_type": "call"
},
{
"api... |
43068200237 | import time
from datetime import datetime
import webbrowser
from PySide2.QtSvg import QSvgRenderer
from PySide2.QtCore import Qt, QSize, QRect, QPoint
from PySide2.QtGui import QIcon, QPixmap, QColor, QPainter, QImage, QMouseEvent, QFont, QFontMetrics, QPen, QBrush
from PySide2.QtWidgets import QMainWindow, QHBoxLayout, QVBoxLayout, QPushButton, QGridLayout, QColorDialog, \
QComboBox, QLabel, QDoubleSpinBox, QDialog, QCheckBox, QFrame, QApplication, QLineEdit, QFileDialog, QMenuBar, \
QMenu, QAction, QTreeWidget, QTreeWidgetItem, QTreeWidgetItemIterator, QTabWidget, QWidget, QListWidget, \
QListWidgetItem, QScrollBar
from maya import OpenMayaUI, cmds
import shiboken2
from functools import partial
import json
import os
from maya.api.OpenMaya import MMatrix, MEventMessage
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
dpiF = QApplication.desktop().logicalDpiX() / 96.0
def getMayaMainWindow():
pointer = OpenMayaUI.MQtUtil.mainWindow()
return shiboken2.wrapInstance(int(pointer), QMainWindow)
def createSeparator():
separator = QFrame()
separator.setFrameShape(QFrame.HLine)
separator.setFrameShadow(QFrame.Sunken)
return separator
def clamp(minimum, val, maximum):
return max(minimum, min(val, maximum))
class SelectByNameLine(QLineEdit):
def __init__(self, *args, **kwargs):
super(SelectByNameLine, self).__init__(*args, **kwargs)
self.history = list()
self.index = -1
def keyPressEvent(self, event):
if event.key() == Qt.Key_Up:
event.accept()
try:
txt = self.history[self.index + 1]
self.index += 1
self.setText(txt)
except IndexError:
pass
elif event.key() == Qt.Key_Down:
event.accept()
newIndex = self.index - 1
if newIndex >= 0:
txt = self.history[newIndex]
self.index = newIndex
else:
txt = ''
self.index = -1
self.setText(txt)
elif event.key() == Qt.Key_Return:
event.accept()
self.index = -1
self.history.insert(0, self.text())
self.select()
self.setText('')
else:
super(SelectByNameLine, self).keyPressEvent(event)
def select(self):
rawInput = self.text()
name_filters = list()
inverse_name_filters = list()
type_filters = list()
inverse_type_filters = list()
for input_ in rawInput.split(','):
flt = input_.strip()
if '#' in flt:
flt = flt.replace('#', '')
if '!' in flt:
flt = flt.replace('!', '')
inverse_type_filters.append(flt)
else:
type_filters.append(flt)
else:
if '!' in flt:
flt = flt.replace('!', '')
inverse_name_filters.append(flt)
else:
name_filters.append(flt)
name_filtered = cmds.ls(*name_filters, recursive=True) if name_filters else list()
inverse_name_filtered = cmds.ls(*inverse_name_filters, recursive=True) if inverse_name_filters else list()
name_set = set(name_filtered).difference(inverse_name_filtered)
type_filtered = cmds.ls(type=type_filters, recursive=True) if type_filters else list()
inverse_type_filtered = cmds.ls(type=inverse_type_filters, recursive=True) if inverse_type_filters else list()
type_set = set(type_filtered).difference(inverse_type_filtered)
if (name_filters + inverse_name_filters) and (type_filters + inverse_type_filters):
final_set = name_set.intersection(type_set)
elif (name_filters + inverse_name_filters) and not (type_filters + inverse_type_filters):
final_set = name_set
elif not (name_filters + inverse_name_filters) and (type_filters + inverse_type_filters):
final_set = type_set
else:
final_set = set()
cmds.select(list(final_set), noExpand=True)
class IconWidget(QWidget):
images = dict()
def __init__(self, node, parent):
super(IconWidget, self).__init__(parent)
nodeType = cmds.objectType(node)
shapes = cmds.listRelatives(node, shapes=True, fullPath=True) if nodeType != 'objectSet' else None
shapType = cmds.objectType(shapes[0]) if shapes else None
self.type = nodeType if not shapType else shapType
self.isShape = cmds.objectType(node, isAType='shape')
self.isReferenced = cmds.referenceQuery(node, isNodeReferenced=True)
self.isController = cmds.controller(node, q=True, isController=True)
def paintEvent(self, event):
painter = QPainter(self)
# type
img = QImage(':{}.svg'.format(self.type))
if img.isNull():
img = QImage(':default.svg')
img = img.smoothScaled(self.width(), self.height())
painter.drawImage(0, 0, img)
# ref
if self.isReferenced:
refImg = QImage(':reference.svg').smoothScaled(self.width() * .5, self.height() * .5)
painter.drawImage(0, 0, refImg)
# shape
if self.isShape:
shapeImg = QImage(':nurbsSurface.svg').smoothScaled(self.width() * .5, self.height() * .5)
painter.drawImage(self.width() * .5, self.height() * .5, shapeImg)
# ctrl
if self.isController:
ctrlImg = QImage(':character.svg').smoothScaled(self.width() * .5, self.height() * .5)
painter.drawImage(self.width() * .5, 0, ctrlImg)
# painter.drawRect(0, 0, self.width() - 1, self.height() - 1)
class IconButton(QWidget):
images = dict()
def __init__(self, idleImageFile, checkedImageFile=None, checkable=False, parent=None, isChecked=False):
super(IconButton, self).__init__(parent=parent)
self.idleImageFile = idleImageFile
self.checkedImageFile = checkedImageFile
self.isCheckable = checkable
self.clicked = list()
self.checked = list()
self.isHovered = False
self.isClicked = False
self.isChecked = isChecked
self.hoveredColor = 50
self.clickedColor = 100
def enterEvent(self, *args, **kwargs):
super(IconButton, self).enterEvent(*args, **kwargs)
# print('ENTER')
self.isHovered = True
self.update()
def leaveEvent(self, *args, **kwargs):
super(IconButton, self).leaveEvent(*args, **kwargs)
# print('LEAVE')
self.isHovered = False
self.update()
def mousePressEvent(self, event): # type: (QMouseEvent) -> None
if event.button() == Qt.LeftButton:
self.isClicked = True
event.accept()
self.update()
else:
super(IconButton, self).mousePressEvent(event)
def mouseReleaseEvent(self, event):
if event.button() == Qt.LeftButton:
self.isClicked = False
if self.isCheckable:
self.isChecked = not self.isChecked
[func(self.isChecked) for func in self.checked]
[func() for func in self.clicked]
event.accept()
self.update()
else:
super(IconButton, self).mouseReleaseEvent(event)
def offsetImageColor(self, img, offsetColor):
for x in range(img.width()):
for y in range(img.height()):
color = img.pixelColor(x, y) # type: QColor
r = min(color.red() + offsetColor, 255)
g = min(color.green() + offsetColor, 255)
b = min(color.blue() + offsetColor, 255)
a = color.alpha()
img.setPixelColor(x, y, QColor(r, g, b, a))
def paintEvent(self, event):
painter = QPainter(self)
# img
img = QImage(self.idleImageFile)
if self.isCheckable and self.isChecked and self.checkedImageFile:
img = QImage(self.checkedImageFile)
if self.isClicked:
self.offsetImageColor(img, self.clickedColor)
elif self.isHovered:
self.offsetImageColor(img, self.hoveredColor)
img = img.smoothScaled(self.width(), self.height())
painter.drawImage(0, 0, img)
class SelectionEditor(QDialog): # MayaQWidgetDockableMixin,
def __init__(self, parent=getMayaMainWindow()):
super(SelectionEditor, self).__init__(parent=parent)
self.setWindowFlags(self.windowFlags() & ~Qt.WindowContextHelpButtonHint)
self.setWindowTitle('Selection')
self.selection = None
self.historySelection = None
self.historyEnabled = True
self.selectionEnabled = True
self.namespace = True
#
self.selectionTree = SelectionTree()
self.selectionTree.itemSelectionChanged.connect(self.selectSelectionItem)
# selection count
self.selectionCount = QLabel()
self.selectionCount.setToolTip('Number of Selected Objects')
# select by name and type
self.selectByNameTypeHistory = list()
self.selectByNameTypeField = SelectByNameLine()
pixmap = QPixmap(':quickSelect.png')
selectByNameLabel = QLabel()
selectByNameLabel.setToolTip('Select by Name and Type')
selectByNameLabel.setPixmap(pixmap.scaled(40, 40))
selectByNameTypeLayout = QHBoxLayout()
selectByNameTypeLayout.addWidget(selectByNameLabel)
selectByNameTypeLayout.addWidget(self.selectByNameTypeField)
# selectByNameTypeLayout.addWidget(self.selectionCount)
# selection tree
lockSelection = IconButton(':unlockGeneric.png', ':lock.png', checkable=True)
lockSelection.setToolTip('Lock/Unlock Auto-Reload')
lockSelection.setMinimumSize(QSize(30, 30))
lockSelection.checked.append(self.lockToggled)
self.saveSelection = IconButton(':Bookmark.png')
self.saveSelection.setToolTip('Save Current Selection')
self.saveSelection.setMinimumSize(QSize(30, 30))
self.saveSelection.clicked.append(self.test)
self.saveWindow = QWidget(self.saveSelection)
self.saveWindow.setFixedSize(QSize(100, 100))
copySelectionTab = IconButton(':UVTkCopySet.png')
copySelectionTab.setToolTip('Tear Off Selection in another Window')
copySelectionTab.setMinimumSize(QSize(30, 30))
copySelectionTab.clicked.append(self.tearOffSelectionCopy)
displayNamespaces = IconButton(':switchOff.png', ':switchOn.png', checkable=True, isChecked=True)
displayNamespaces.setToolTip('Show/Hide Namespaces')
displayNamespaces.setMinimumSize(QSize(30, 30))
displayNamespaces.checked.append(self.selectionTree.toggleNamespaces)
selectionOptionsLayout = QHBoxLayout()
selectionOptionsLayout.addWidget(lockSelection)
selectionOptionsLayout.addWidget(self.saveSelection)
selectionOptionsLayout.addWidget(copySelectionTab)
selectionOptionsLayout.addWidget(displayNamespaces)
# selectionOptionsLayout.addStretch()
# for _ in range(5):
# icnBtn = IconButton(':Bookmark.png')
# icnBtn.setMinimumSize(QSize(30, 30))
# selectionOptionsLayout.addWidget(icnBtn)
selectionOptionsLayout.addStretch()
selectionOptionsLayout.addWidget(self.selectionCount)
# self.selectionList.setSelectionMode(QListWidget.ExtendedSelection)
refreshAct = QAction('Auto-Refresh', self)
# refreshAct.setCheckable(True)
displayMenu = QMenu('Display')
displayMenu.addAction(refreshAct)
menuBar = QMenuBar()
menuBar.addMenu(displayMenu)
# selectionLayout = QVBoxLayout()
# selectionLayout.setSpacing(0)
# selectionLayout.addLayout(selectionOptionsLayout)
# selectionLayout.addWidget(self.selectionTree)
#
self.historyTree = QTreeWidget()
self.historyTree.itemSelectionChanged.connect(self.selectHistoryItem)
self.historyTree.setHeaderLabels(('time', 'len', 'content'))
self.savedTree = QTreeWidget()
# main layout
mainLayout = QVBoxLayout(self)
# mainLayout.setMargin(0)
mainLayout.addLayout(selectByNameTypeLayout)
mainLayout.addLayout(selectionOptionsLayout)
mainLayout.addWidget(self.selectionTree)
# mainLayout.addLayout(selectionLayout)
#
self.eventCallback = None
self.resize(QSize(130 * dpiF, 260 * dpiF))
def test(self):
if self.saveWindow.isVisible():
self.saveWindow.hide()
return
self.saveWindow.show()
# x = self.saveSelection.x()
# y = self.saveSelection.y() + self.saveSelection.height()
#
# self.saveWindow.move(self.mapToGlobal(QPoint(x, y)))
# self.saveWindow.resize(QSize(500, 500))
def lockToggled(self, state):
self.selectionEnabled = not state
if not state:
self.reload()
def selectHistoryItem(self, *args, **kwargs):
items = self.historyTree.selectedItems()
if not items:
return
item = items[-1]
selection = item.data(0, Qt.UserRole)
self.historyEnabled = False
cmds.select(selection, noExpand=True)
self.historyEnabled = True
def selectSelectionItem(self, *args, **kwargs):
self.selectionEnabled = False
cmds.select(self.selectionTree.selectedNodes(), noExpand=True)
self.selectionEnabled = True
def removeCallBack(self):
try:
MEventMessage.removeCallback(self.eventCallback)
except RuntimeError:
pass
except AttributeError:
pass
def deleteLater(self, *args, **kwargs):
self.removeCallBack()
self.saveWindow.deleteLater()
super(SelectionEditor, self).deleteLater(*args, **kwargs)
def closeEvent(self, *args, **kwargs):
self.removeCallBack()
self.saveWindow.close()
super(SelectionEditor, self).closeEvent(*args, **kwargs)
def hideEvent(self, *args, **kwargs):
print('hide')
self.removeCallBack()
self.saveWindow.hide()
super(SelectionEditor, self).hideEvent(*args, **kwargs)
def showEvent(self, *args, **kwargs):
self.eventCallback = MEventMessage.addEventCallback('SelectionChanged', self.reload)
self.reload()
super(SelectionEditor, self).showEvent(*args, **kwargs)
def reload(self, *args, **kwargs):
# print('SELECTION CHANGED')
start = time.time()
selection = cmds.ls(sl=True, long=True)
self.selectionCount.setText('<b>{}</b>'.format(len(selection)))
if selection != self.selection and self.selectionEnabled:
self.selectionTree.load(selection)
self.selection = selection
# print('SELECTION CHANGED', time.time() - start)
if selection and self.historyEnabled and selection != self.historySelection:
self.addEntryToHistory(selection)
self.historySelection = selection
def addEntryToHistory(self, selection):
pass
# self.historyTree.clearSelection()
#
# currentDateAndTime = datetime.now()
# selectionLabel = ', '.join([i.split('|')[-1] for i in selection])
# item = QTreeWidgetItem((currentDateAndTime.strftime("%H:%M:%S"), str(len(selection)), selectionLabel,))
#
# item.setData(0, Qt.UserRole, selection)
# self.historyTree.insertTopLevelItem(0, item)
#
# self.historyEnabled = False
# item.setSelected(True)
# self.historyEnabled = True
def tearOffSelectionCopy(self):
ui = TearOffSelectionWindow(self.selectionTree.nodes, parent=self)
ui.show()
# class ObjectNameWidget(QWidget):
#
# def __init__(self, longName, parent=None):
# super(ObjectNameWidget, self).__init__(parent)
#
# name = longName.split('|')[-1]
# nameSplit = name.split(':')
# namespace = ':'.join(nameSplit[:-1])
# shortName = nameSplit[-1]
#
# objType = cmds.objectType(longName)
#
# item = QListWidgetItem()
# item.setData(Qt.UserRole, longName)
# item.setToolTip('{} ({})'.format(name, objType))
#
# icon = IconWidget(longName, self)
# icon.setFixedSize(QSize(35, 35))
#
# shortNameLabel = QLabel(shortName)
# # shortNameLabel.setFixedWidth(shortNameLabel.width())
#
# nameLayout = QHBoxLayout()
# nameLayout.setAlignment(Qt.AlignLeft)
# nameLayout.setSpacing(0)
# if namespace:
# namespaceLabel = QLabel('{}:'.format(namespace))
# namespaceLabel.setStyleSheet("QLabel {color: rgb(150, 150, 150);}")
# nameLayout.addWidget(namespaceLabel)
# nameLayout.addWidget(shortNameLabel)
#
# lay = QHBoxLayout(self)
# lay.setMargin(2)
# lay.setAlignment(Qt.AlignLeft)
# lay.addWidget(icon)
# lay.addLayout(nameLayout)
class SelectionTree(QListWidget):
def __init__(self, *args, **kwargs):
super(SelectionTree, self).__init__(*args, **kwargs)
# self.setStyleSheet('QListWidget {background: rgb(50, 50, 50);} QListWidget::item:selected {background: rgb(100, 100, 100);}')
self.setSelectionMode(QListWidget.ExtendedSelection)
self.nodes = list()
self.itemSelectionChanged.connect(self.selectItems)
self.displayNamespaces = True
def selectItems(self):
selectedItems = self.selectedItems()
items = [self.item(x) for x in range(self.count())]
for item in items:
wid = self.itemWidget(item)
if not wid:
continue
wid.isSelected = item in selectedItems
def toggleNamespaces(self, state):
self.displayNamespaces = state
items = [self.item(x) for x in range(self.count())]
for item in items:
wid = self.itemWidget(item)
wid.displayNamespace = state
def selectedNodes(self):
return [i.data(Qt.UserRole) for i in self.selectedItems()]
def load(self, nodes):
self.clear()
self.nodes = nodes
for index, longName in enumerate(nodes):
objType = cmds.objectType(longName)
item = QListWidgetItem()
item.setData(Qt.UserRole, longName)
nodeType = cmds.objectType(longName)
shapes = cmds.listRelatives(longName, shapes=True, fullPath=True) if nodeType != 'objectSet' else None
shapType = cmds.objectType(shapes[0]) if shapes else None
finalType = nodeType if not shapType else shapType
isReferenced = cmds.referenceQuery(longName, isNodeReferenced=True)
name = longName.split('|')[-1]
item.setToolTip('{} ({})'.format(name, objType))
wid = NodeWidget(name, objectType=finalType, isReferenced=isReferenced, parent=self)
wid.displayNamespace = self.displayNamespaces
# wid.secondaryColor = QColor(175, 125, 125)
wid.setFixedHeight(35)
self.addItem(item)
self.setItemWidget(item, wid)
item.setSizeHint(QSize(0, 40))
class NodeWidget(QWidget):
def __init__(self, longName, objectType=None, isReferenced=False, parent=None):
super(NodeWidget, self).__init__(parent)
longNameSplit = longName.split(':')
namespace = ':'.join(longNameSplit[:-1])
self.namespace = '{}:'.format(namespace) if namespace else namespace
self.name = longNameSplit[-1]
self.objectType = objectType
self.isReferenced = isReferenced
self.mainColor = QColor(200, 200, 200)
self.secondaryColor = QColor(125, 125, 125)
self.selectedColor = QColor(255, 255, 255)
self.setMinimumHeight(35)
self._displayNamespace = True
self.isSelected = False
@property
def displayNamespace(self):
return self._displayNamespace
@displayNamespace.setter
def displayNamespace(self, value):
self._displayNamespace = value
self.update()
def paintEvent(self, *args, **kwargs):
painter = QPainter(self)
font = QFont()
font.setPixelSize(self.height() * .66)
fontMetrics = QFontMetrics(font)
fontHeight = fontMetrics.height()
typeImage = QImage(':{}.svg'.format(self.objectType))
if typeImage.isNull():
typeImage = QImage(':default.svg')
typeImage = typeImage.smoothScaled(self.height(), self.height())
namespacePen = QPen()
namespacePen.setColor(self.secondaryColor if not self.isSelected else self.selectedColor)
namePen = QPen()
namePen.setColor(self.mainColor if not self.isSelected else self.selectedColor)
namespaceWidth = fontMetrics.horizontalAdvance(self.namespace) if self.displayNamespace else 0
nameWidth = fontMetrics.horizontalAdvance(self.name)
typeRect = QRect(0, 0, self.height(), self.height())
namespaceRect = QRect(typeRect.x() + typeRect.width(), 0, namespaceWidth, fontHeight)
nameRect = QRect(namespaceRect.x() + namespaceRect.width(), 0, nameWidth, fontHeight)
# draw
painter.drawImage(typeRect, typeImage)
if self.isReferenced:
refRect = QRect(0, 0, self.height() * .5, self.height() * .5)
refImage = QImage(':reference.svg').smoothScaled(self.height() * .5, self.height() * .5)
painter.drawImage(refRect, refImage)
painter.setFont(font)
painter.setPen(namespacePen)
if self.displayNamespace:
painter.drawText(namespaceRect, self.namespace)
painter.setPen(namePen)
painter.drawText(nameRect, self.name)
class TearOffSelectionWindow(QDialog):
def __init__(self, nodes, parent=None):
super(TearOffSelectionWindow, self).__init__(parent)
self.setWindowTitle('Tear Off Selection')
self.selectionTree = SelectionTree()
self.selectionTree.load(nodes)
self.selectionTree.itemSelectionChanged.connect(self.selectSelectionItem)
layout = QVBoxLayout(self)
layout.addWidget(self.selectionTree)
def selectSelectionItem(self, *args, **kwargs):
cmds.select(self.selectionTree.selectedNodes(), noExpand=True)
| Noboxxx/selectionEditor | ui.py | ui.py | py | 23,323 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PySide2.QtWidgets.QApplication.desktop",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "PySide2.QtWidgets.QApplication",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "maya.OpenMayaUI.MQtUtil.mainWindow",
"line_number": 26,
"usage_type... |
42600271957 | import openpyxl
import os, string, sys
from lib import l
DIRS_SOCIUM = ['/media/da3/asteriskBeagleAl/Socium/2017/', '/media/da3/asteriskBeagleAl/Socium/2018/']
def isSNILS(snils):
if snils != None:
t = str(snils).replace('\n',' ').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').strip()
if len(t) >= 11:
if t[3] == '-' and t[7] == '-' and t[11] == ' ':
return True
else:
return False
else:
return False
return False
def isAudio(audio):
if audio != None:
t = str(audio).replace('\n',' ').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').strip()
t1 = t.split('/')[len(t.split(('/'))) - 1]
if len(t1) > 26:
if t1[2] == '.' and t1[5] == '.' and t1[10] == '_' and t1[13] == '-' and t1[16] == '-':
return ['длинный', t1]
elif len(''.join([char for i, char in enumerate(t1) if char in string.digits and i < 26])) == 25 \
and t1[14] == '_':
return ['короткий', t1]
elif t.endswith('.mp3') or t.endswith('.wav'):
return ['расширение', t1]
else:
return ['', audio]
else:
return ['', audio]
return ['', audio]
def isSocium(audio):
if audio != None:
t = str(audio).replace('\n',' ').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').strip()
t1 = t.split('/')[len(t.split(('/'))) - 1]
if len(t1) > 26:
if t1[2] == '.' and t1[5] == '.' and t1[10] == '_' and t1[13] == '-' and t1[16] == '-' \
and (t1[6:10] == 2017 or t1[6:10] == 2018):
return True
elif len(''.join([char for i, char in enumerate(t1) if char in string.digits and i < 26])) == 25 \
and t1[14] == '_' and (t1[:4] == 2017 or t1[:4] == 2018):
return True
else:
return False
else:
return False
return False
snils_audios = {}
snils_audios_fullpath = {}
for dir_socium in DIRS_SOCIUM:
directories = os.listdir(dir_socium)
for directory in directories:
files = os.listdir(dir_socium + directory)
for file in files:
if file.endswith('.xlsx'):
wb = openpyxl.load_workbook(filename=dir_socium + directory + '/'+ file, read_only=True)
for sheetname in wb.sheetnames:
sheet = wb[sheetname]
if not sheet.max_row:
print('Файл', file, 'Excel некорректно сохранен OpenPyxl. Откройте и пересохраните его')
continue
print('\t накоплено связей СНИЛС-audio:', len(snils_audios_fullpath),'\n', dir_socium + directory +
'/'+ file + '!' + sheetname)
# В каждой строчке определяем где есть аудифайл и СНИЛС
table_j_end = 0 # Если больше 10 пустых ячеек - на следующую срочку
table_k_end = 0 # Если больше 10 пустых строчек - заканчиваем чтение таблицы
for j, row in enumerate(sheet.rows):
if table_j_end == 10 and j == 10:
break
snils = 0
audofiles = []
audofileExt = []
for k, cell in enumerate(row):
if cell.value != None:
table_j_end = 0
table_k_end = 0
else:
table_j_end += 1
table_k_end += 1
if table_k_end > 10:
break
if isSNILS(cell.value):
snils = l(cell.value)
else:
rezAudio = isAudio(cell.value)
if rezAudio[0]:
if rezAudio[1].endswith('.wav') or rezAudio[1].endswith('.mp3'):
rezAudioName = rezAudio[1][:-4]
elif rezAudio[1].endswith('.') or rezAudio[1].endswith('/'):
rezAudioName = rezAudio[1][:-1]
else:
rezAudioName = rezAudio[1]
for audofileTek in files:
if audofileTek[:-4] == rezAudioName:
audofileExt.append(audofileTek)
audofiles.append(rezAudioName)
break
if snils and len(audofiles):
for i, audofile in enumerate(audofiles):
if snils_audios.get(snils, None):
if audofile not in snils_audios[snils]:
snils_audios[snils].append(audofile)
snils_audios_fullpath[snils].append(dir_socium + directory + '/'+ audofileExt[i])
else:
print('\tДля СНИЛСа', snils, 'уже есть', dir_socium + directory + '/' + audofileExt[i])
else:
snils_audios[snils] = [audofile]
snils_audios_fullpath[snils] = [dir_socium + directory + '/'+ audofileExt[i]]
else:
if not snils and not len(audofiles):
pass
elif len(audofiles):
for audofile in audofiles:
print('\tНе нашлось СНИЛСа для:', dir_socium + directory + '/' + audofileExt[i])
elif snils:
#print('\tВ директории', dir_socium + directory,'Не нашлось аудиофайла для СНИЛСа:', snils)
pass
| dekarh/asocium | asocium_loaded.py | asocium_loaded.py | py | 6,570 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "string.digits",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "string.digits",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"li... |
10138616258 | """
called as optimal_model_search.py $TMPDIR $PACKAGEDIR $NPROC $PATTERNDIR $OUTDIR
"""
import sys
import itertools
import logging
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from pathlib import Path
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import brier_score_loss
from multiprocessing import Pool
TMPDIR = Path(sys.argv[1])
PACKAGEDIR = sys.argv[2]
NPROC = int(sys.argv[3])
PATTERNDIR = Path(sys.argv[4])
OUTDIR = Path(sys.argv[5])
sys.path.append(PACKAGEDIR)
from Weave.models import permute_importance, compute_shaps, map_foldindex_to_groupedorder, HybridExceedenceModel
logging.basicConfig(filename= TMPDIR / 'permimp_train_q06.log', filemode='w', level=logging.DEBUG, format='%(process)d-%(relativeCreated)d-%(message)s')
path_complete = PATTERNDIR / 'precursor.multiagg.parquet'
path_y = PATTERNDIR / 'response.multiagg.trended.parquet'
def read_data(responseagg = 3, separation = -7, quantile: float = 0.8):
"""
Returns the selcted X and y data
A dataframe and a trended Series
"""
y = pd.read_parquet(path_y).loc[:,(slice(None),responseagg,slice(None))].iloc[:,0] # Only summer
X = pd.read_parquet(path_complete).loc[y.index,(slice(None),slice(None),slice(None),slice(None),separation,slice(None),slice(None))].dropna(axis = 0, how = 'any') # both metrics
y = y.reindex(X.index)
y = y > y.quantile(quantile)
logging.debug(f'read y from {path_y} at resptimeagg {responseagg} trended, exceeding quantile {quantile}, and read dimreduced X from {path_complete} at separation {separation}')
map_foldindex_to_groupedorder(X = X, n_folds = 5)
logging.debug('restored fold order on dimreduced X')
return X, y
def execute_shap(respseptup):
"""
FUnction to fit model and call shapley values computation with certain arguments
Can be paralellized
"""
responseagg, separation = respseptup
retpath = OUTDIR / str(responseagg) / str(separation)
if not retpath.exists():
X,y = read_data(responseagg = responseagg, separation = separation, quantile = 0.666)
model = HybridExceedenceModel(fit_base_to_all_cv = True, max_depth = 5, n_estimators = 2500, min_samples_split = 30, max_features = 35, n_jobs = njobs_per_imp)
shappies = compute_shaps(model, X, y, on_validation = False, use_background = True, bg_from_training = True, sample_background = 'standard', n_folds = 5, shap_kwargs = dict(check_additivity = False))
retpath.mkdir(parents = True)
pq.write_table(pa.Table.from_pandas(shappies), retpath / 'responsagg_separation.parquet')
logging.debug(f'subprocess has written out SHAP frame at {retpath}')
else:
logging.debug(f'SHAP frame at {retpath} already exists')
def execute_perm_imp(respseptup):
"""
Standalone function writes the importance of this combination
of responseagg and separation to a subdirectory
no returns
"""
responseagg, separation = respseptup
retpath = OUTDIR / str(responseagg) / str(separation)
if not retpath.exists():
X,y = read_data(responseagg = responseagg, separation = separation, quantile = 0.8)
#def wrapper(self, *args, **kwargs):
# return self.predict_proba(*args,**kwargs)[:,-1] # Last class is True
#RandomForestClassifier.predict = wrapper # To avoid things inside permutation importance package where it is only possible to invoke probabilistic prediction with twoclass y.
#m = RandomForestClassifier(max_depth = 7, n_estimators = 1500, min_samples_split = 40, max_features = 35, n_jobs = njobs_per_imp)
model = HybridExceedenceModel(fit_base_to_all_cv = True, max_depth = 5, n_estimators = 2500, min_samples_split = 30, max_features = 35, n_jobs = njobs_per_imp)
ret = permute_importance(model, X_in = X, y_in = y, on_validation = False, evaluation_fn = brier_score_loss, n_folds = 5, perm_imp_kwargs = dict(nimportant_vars = 30, njobs = njobs_per_imp, nbootstrap = 1500))
retpath.mkdir(parents = True)
pq.write_table(pa.Table.from_pandas(ret), retpath / 'responsagg_separation.parquet')
logging.debug(f'subprocess has written out importance frame at {retpath}')
else:
logging.debug(f'importance frame at {retpath} already exists')
if __name__ == "__main__":
"""
Parallelized with multiprocessing over repsagg / separation models
"""
#njobs_per_imp = 1
#nprocs = NPROC // njobs_per_imp
#logging.debug(f'Spinning up {nprocs} processes with each {njobs_per_imp} for shapley')
#responseaggs = np.unique(pd.read_parquet(path_y).columns.get_level_values('timeagg'))
#separations = np.unique(pd.read_parquet(path_complete).columns.get_level_values('separation'))
#with Pool(nprocs) as p:
# p.map(execute_shap, itertools.product(responseaggs, separations))
"""
Parallelized with threading for forest fitting and permutation importance per respagg / separation model
"""
#responseaggs = np.unique(pd.read_parquet(path_y).columns.get_level_values('timeagg'))
#separations = np.unique(pd.read_parquet(path_complete).columns.get_level_values('separation'))
#njobs_per_imp = NPROC
#for respagg_sep in itertools.product(responseaggs, separations):
# execute_perm_imp(respagg_sep)
njobs_per_imp = 1
nprocs = NPROC // njobs_per_imp
logging.debug(f'Spinning up {nprocs} processes with each {njobs_per_imp} for permimp')
responseaggs = np.unique(pd.read_parquet(path_y).columns.get_level_values('timeagg'))
separations = np.unique(pd.read_parquet(path_complete).columns.get_level_values('separation'))
with Pool(nprocs) as p:
p.map(execute_perm_imp, itertools.product(responseaggs, separations))
#njobs_per_imp = NPROC
#execute_perm_imp((31,-15))
| chiemvs/Weave | hpc/inspect_models.py | inspect_models.py | py | 5,840 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number"... |
73424751784 | import json
from nose.tools import ok_, eq_, assert_is_not_none
try:
from mock import Mock, patch
except ImportError:
from unittest.mock import Mock, patch
try:
from httplib import OK
except:
from http.client import OK
from uuid import uuid4
from time import sleep
from config import *
from application import application
from utils import background_smtp_send
base_url = '/api/v{}'.format(API_VERSION)
auth_hdr = {
'X-Auth-Token': API_SECRET
}
application.testing = True
app = application.test_client()
class Country():
def __init__(self, name, alpha2):
self.name = name
self.alpha2 = alpha2
class ASN():
def __init__(self, alpha2, service, name):
self.alpha2 = alpha2
self.service = service
self.name = name
class Domains():
def __init__(self, alpha2, service, name):
self.alpha2 = alpha2
self.service = service
self.name = name
class Service():
def __init__(self, alpha2, default, name):
self.alpha2 = alpha2
self.default = default
self.name = name
@patch('utils.smtplib')
def test_background_smtp_send_returns_thread(mock):
smtplib = Mock()
mock.return_value = smtplib
smtplib.smtp = None
smtplib.ehlo = None
smtplib.login = None
smtplib.sendmail = None
smtplib.quit = None
process = background_smtp_send(
subject='{} payment of {} Satoshi received from {}'.format(
'btc-testnet',
'100000',
uuid4().hex
),
preamble='Bitcoin payment notification',
body=json.dumps(
{
'webhook': {
'hello': 'world'
},
'api_response': {'hello': 'world'}
}
)
)
assert_is_not_none(process)
eq_(process.is_alive(), True)
while process.is_alive(): sleep(0.1)
def test_country_endpoint_returns_correct_alpha2():
def check_country_alpha2_by_country_name(country):
response = app.get(
'{}/country/{}'.format(
base_url,
country.name
),
headers=auth_hdr
)
ok_(response.status_code == OK)
eq_(country.alpha2, response.data.decode())
for country in [
Country('United Kingdom', 'GB'),
Country('United States', 'US')
]: yield check_country_alpha2_by_country_name, country
def test_asn_endpoint_returns_correct_asns_for_service():
def check_asns_by_service(asn):
response = app.get(
'{}/alpha/{}/asns/{}'.format(
base_url,
asn.alpha2,
asn.service
),
headers=auth_hdr
)
ok_(response.status_code == OK)
eq_(asn.name, response.data.decode())
for asn in [
ASN('GB', 'iplayer', 'AS2818'),
ASN('US', 'netflix', 'AS2906')
]: yield check_asns_by_service, asn
def test_asn_endpoint_returns_correct_domains_for_service():
def check_domains_by_service(domain):
response = app.get(
'{}/alpha/{}/domains/{}'.format(
base_url,
domain.alpha2,
domain.service
),
headers=auth_hdr
)
ok_(response.status_code == OK)
assert domain.name in (response.data.decode()).split(' ')
for domain in [
ASN('GB', 'iplayer', 'bbc.co.uk'),
ASN('US', 'netflix', 'netflix.com')
]: yield check_domains_by_service, domain
def test_service_endpoint_returns_correct_services():
def check_available_services_by_alpha2(service):
response = app.get(
'{}/alpha/{}/services/default/{}'.format(
base_url,
service.alpha2,
service.default
),
headers=auth_hdr
)
ok_(response.status_code == OK)
assert service.name in (response.data.decode()).split(' ')
for service in [
Service('GB', '1', 'common'),
Service('US', '1', 'common'),
Service('US', '0', 'netflix'),
Service('GB', '0', 'iplayer')
]: yield check_available_services_by_alpha2, service
| belodetek/unzoner-api | src/tests/utils_tests.py | utils_tests.py | py | 4,279 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "application.application.testing",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "application.application",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "application.application.test_client",
"line_number": 30,
"usage_type": "call... |
73118929704 | import os
import tempfile
class WrapStrToFile:
def __init__(self):
# здесь инициализируется атрибут filepath, он содержит путь до файла-хранилища
self._filepath = tempfile.mktemp()
print(self._filepath)
@property
def content(self):
try:
with open(self._filepath, "r", encoding="utf-8") as file1:
return file1.read()
except FileNotFoundError:
return "File doesn't exist"
@content.setter
def content(self, value):
with open(self._filepath, "w", encoding="utf-8") as file1:
file1.write(value)
@content.deleter
def content(self):
os.remove(self._filepath)
# удаляем файл: os.remove(имя_файла)
wstf = WrapStrToFile()
print(wstf.content) # Output: File doesn't exist
wstf.content = 'test str'
print(wstf.content) # Output: test_str
wstf.content = 'text 2'
print(wstf.content) # Output: text 2
del wstf.content # после этого файла не существует | IlyaOrlov/PythonCourse2.0_September23 | Practice/mtroshin/Lecture_7/3.py | 3.py | py | 1,099 | python | ru | code | 2 | github-code | 36 | [
{
"api_name": "tempfile.mktemp",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 26,
"usage_type": "call"
}
] |
30395131421 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 20 21:29:31 2021
@author: skibbe
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
#from . import tools as tls
from pw import tools as tls
import math
#mp.set_start_method('spawn')
#import pw.tools as pw_tools
class batch_mat():
def __init__(self,
dim,
dtype = torch.float32,
device="cpu"):
self.dtype = dtype
self.device = device
if dim == 2:
self.identity = torch.tensor([[1,0,0],
[0,1,0],
[0,0,1]],dtype = torch.float32,device=device)
else:
self.identity = torch.tensor([[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]],dtype = torch.float32,device=device)
self.dim = dim
def batch_identity(self,batch_size):
return self.identity[None,:,:].repeat([batch_size,1,1])
def ext_vec(self,x):
return torch.cat((tls.tt(x),torch.ones(x.shape[0],1,device=x.device,dtype=x.dtype)),dim=1)[:,:,None]
def transl_mat(self,x):
dim = x.shape[1]
batch_size = x.shape[0]
mat = torch.zeros([batch_size,dim+1,dim+1],device=self.device,dtype=self.dtype)
#print(mat.shape)
#print(x.shape)
mat[:,-1,:dim] = x[:,:]
for d in range(dim+1):
mat[:,d,d] = 1
return mat
def batch_diag(self,vec):
assert(False)
mat = self.ext_vec(vec)
return torch.einsum('exy,bxd->bxy',self.identity[None,:,:],mat)
#def batch_mm_diag(self,matA,vecB):
# matB = self.ext_vec(vecB.to(device=self.device))
# return torch.einsum('bxy,bxd->bxy',matA,matB)
#batch_mm = lambda matA,matB : torch.einsum('bzx,byz->bxy',matA,matB)
def batch_mm(self,matA,matB):
return torch.matmul(matA,matB)
#return torch.einsum('bxz,bzy->bxy',matA,matB)
#batch_mult_diag = lambda matA,matB : torch.einsum('bxy,bxd->bxy',matA,extvec(matB))
def batch_m_inverse(self,mat):
return torch.linalg.inv(matA,matB)
#mat_ = torch.zeros(mat.shape,dtype=mat.dtype,device=mat.device)
#for b in mat.shape[0]:
# mat_[b,...] = torch.inverse(mat[b,...])
#return mat_
def diag_mat(self,aug_scales):
num_batches = aug_scales.shape[0]
assert(aug_scales.shape[1] == self.dim)
scale_mat = torch.zeros([num_batches,self.dim+1,self.dim+1],dtype = self.dtype,device=self.device)
for a in range(aug_scales.shape[1]):
scale_mat[:,a,a] = aug_scales[:,a]
scale_mat[:,-1,-1] = 1
return scale_mat
def quat2rot(self,aug_rotations_):
aug_rotations = aug_rotations_.clone()
num_batches = aug_rotations.shape[0]
if aug_rotations.shape[1] == 4:
rotate = tls.tt(aug_rotations)
#rotate[:,:3] = 0
#rotate[:,0] = 1
#rotate[:,3] = math.pi/2.0
rotate[:,:3]= aug_rotations[:,:3]/torch.norm(aug_rotations[:,:3],dim=1, keepdim=True)
#print(rotate[0,:3])
qr = torch.cos(rotate[:,3]/2.0)
sin_ = torch.sin(rotate[:,3]/2.0)
qi = rotate[:,0] * sin_
qj = rotate[:,1] * sin_
qk = rotate[:,2] * sin_
rot_mat = torch.zeros([num_batches,4,4],dtype = self.dtype,device=self.device)
rot_mat[:,0,0] = 1.0-2.0*(qj**2+qk**2)
rot_mat[:,0,1] = 2*(qi*qj+qk*qr)
rot_mat[:,0,2] = 2*(qi*qk-qj*qr)
rot_mat[:,1,0] = 2*(qi*qj-qk*qr)
rot_mat[:,1,1] = 1-2*(qi**2+qk**2)
rot_mat[:,1,2] = 2*(qj*qk+qi*qr)
rot_mat[:,2,0] = 2*(qi*qk+qj*qr)
rot_mat[:,2,1] = 2*(qj*qk-qi*qr)
rot_mat[:,2,2] = 1-2*(qi**2+qj**2)
rot_mat[:,3,3] = 1
else:
rotate = torch.squeeze(tls.tt(aug_rotations))
rot_mat = torch.zeros([num_batches,3,3],dtype = self.dtype,device=self.device)
rot_mat[:,0,0] = torch.cos(rotate)
rot_mat[:,0,1] = torch.sin(rotate)
rot_mat[:,1,0] = -rot_mat[:,0,1]
rot_mat[:,1,1] = rot_mat[:,0,0]
rot_mat[:,2,2] = 1
#rot_mat = torch.tensor([[math.cos(rotate),math.sin(rotate),0],[-math.sin(rotate),math.cos(rotate),0],[0,0,1]],dtype = torch.float32,device=aug_device)
return rot_mat
| febrianrachmadi/BIA_ATLAS2 | deep_patchwork/pw/batch_mat.py | batch_mat.py | py | 4,678 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.float32",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
... |
42243580740 | import numpy as np
import matplotlib.pyplot as plt
import bead_util as bu
save_dir = '/processed_data/spinning/pramp_data/20190626/outgassing/'
files, lengths = bu.find_all_fnames(save_dir, ext='.txt')
times = []
rates = []
for filename in files:
file_obj = open(filename, 'rb')
lines = file_obj.readlines()
file_obj.close()
time = float(int(lines[1])) * 1.0e-9
rate = float(lines[2])
times.append(time)
rates.append(rate)
times = np.array(times)
rates = np.array(rates)
sort_inds = np.argsort(times)
times = times[sort_inds]
rates = rates[sort_inds]
plt.plot((times-times[0]) / 3600., rates)
plt.show() | charlesblakemore/opt_lev_analysis | scripts/spinning/plot_outgassing_analysis.py | plot_outgassing_analysis.py | py | 645 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "bead_util.find_all_fnames",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
... |
16109071350 | import requests
authors = ['John Donne', 'George Herbert', 'Andrew Marvell', 'Richard Crashaw', 'Henry Vaughan', 'Anne Bradstreet', 'Katherine Philips', 'Sir John Suckling', 'Edward Taylor']
# authors = ['William Shakespeare']
titles = []
#prohibited_punctuation = [',', ';', ' ', '"']
prohibited_punctuation = [' ', '*']
f = open('poetry.txt', 'r+')
for author in authors:
r = requests.get('http://poetrydb.org/author/' + author + '/title')
print(r.encoding)
author_titles = r.json()
for author_title in author_titles:
print((author, author_title))
titles.append(author_title['title'])
for title in titles:
r = requests.get('http://poetrydb.org/title/' + title + '/lines.json')
if type(r.json()) == list:
for line in r.json()[0]['lines']:
print(line)
line = line.encode('utf-8')
for punctuation in prohibited_punctuation:
line = line.replace(punctuation, '')
line = ''.join(letter for letter in line if not letter.isdigit())
f.write(line + ' ') | canzhiye/metaphysical-poetry-generator | poetry_grabber.py | poetry_grabber.py | py | 983 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
}
] |
71749898023 | #! /usr/local/bin/python3
""" Currently active courses that have the WRIC attribute.
"""
import sys
from datetime import datetime
# For curric db access
import psycopg2
from psycopg2.extras import NamedTupleCursor
# CGI stuff -- with debugging
import cgi
import cgitb
from pprint import pprint
cgitb.enable(display=0, logdir='./debug')
DEBUG = False
passwd = open('./.view_only_passwd', 'r').read().strip(' \n')
conn = psycopg2.connect(f'dbname=cuny_courses user=view_only password={passwd}')
cursor = conn.cursor(cursor_factory=NamedTupleCursor)
cursor.execute("""
select discipline, catalog_number, title
from courses
where institution = 'QNS01'
and attributes ~* 'WRIC'
and course_status = 'A'
order by discipline, catalog_number
""")
table = '<table><tr><th>Course</th><th>Title</th></tr>'
table += '\n'.join([f'<tr><td>{row.discipline} {row.catalog_number}</td><td>{row.title}</td></tr>'
for row in cursor.fetchall()])
table += '</table>'
table = table.replace('&', '&')
cursor.execute("select update_date from updates where table_name = 'courses'")
update_date = datetime.fromisoformat(cursor.fetchone().update_date).strftime('%B %d, %Y')
html_page = f"""
<!DOCTYPE html>
<html>
<head>
<title>Active Writing Intensive Courses</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="./css/writing_intensive.css" />
</head>
<body>
<h1>Active Writing Intensive Courses at Queens College</h1>
<h2>List was last updated {update_date}</h2>
{table}
</body>
</html>"""
sys.stdout.buffer.write(html_page.encode('utf-8'))
| cvickery/senate-curriculum | Approved_Courses/writing_intensive.py | writing_intensive.py | py | 1,695 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cgitb.enable",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "psycopg2.connect",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "psycopg2.extras.NamedTupleCursor",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "dateti... |
2187178006 | import numpy as np
from scipy.spatial.distance import cdist
from scipy.optimize import linprog
from functools import partial
import itertools
def sparse_jump(Y, n_states, max_features, jump_penalty=1e-5,
max_iter=10, tol=1e-4, n_init=10, verbose=False):
# Implementation of sparse jump model
n_obs, n_features = Y.shape
max_features = np.clip(max_features, a_min=1, a_max=np.sqrt(n_features))
feat_w = np.repeat(1 / np.sqrt(n_features), n_features)
states = None
for it in range(max_iter):
print(feat_w)
states, _ = jump(Y * np.sqrt(feat_w),
n_states,
initial_states=states,
jump_penalty=jump_penalty,
n_init=n_init)
if len(np.unique(states)) == 1:
break
else:
new_w = get_weights(Y, states, max_features, n_states)
if abs(new_w - feat_w).sum() / abs(feat_w).sum() < tol:
break
elif verbose:
print('Iteration {}, w diff {:.6e}'.format(it, abs(new_w - feat_w).sum()))
feat_w = new_w
return states, feat_w
def sparse_probabilistic_jump(Y, n_states, order, disc_size,
max_features, jump_penalty=1e-5,
max_iter=10, tol=1e-4, n_init=10, verbose=False):
# Implementation of sparse jump model
n_obs, n_features = Y.shape
max_features = np.clip(max_features, a_min=1, a_max=np.sqrt(n_features))
feat_w = np.repeat(1 / np.sqrt(n_features), n_features)
states = None
for it in range(max_iter):
states = probabilistic_jump(Y * np.sqrt(feat_w),
n_states, order, disc_size,
initial_states=states,
jump_penalty=jump_penalty,
n_init=n_init)
if n_states - np.count_nonzero(states.sum(axis = 0) == 0) == 1:
break
else:
new_w = get_probabilistic_weights(Y, states, max_features, n_states)
if abs(new_w - feat_w).sum() / abs(feat_w).sum() < tol:
break
elif verbose:
print('Iteration {}, w diff {:.6e}'.format(it, abs(new_w - feat_w).sum()))
feat_w = new_w
return states, feat_w
def jump(Y, n_states, jump_penalty=1e-5, initial_states=None,
max_iter=10, n_init=10, tol=None, verbose=False):
# Fit jump model using framework of Bemporad et al. (2018)
if initial_states is not None:
initial_states = np.array(initial_states, dtype=np.int64)
if len(np.unique(initial_states)) == n_states:
s = initial_states.copy()
else:
s = init_states(Y, n_states)
else:
s = init_states(Y, n_states)
n_obs, n_features = Y.shape
Gamma = jump_penalty * (1 - np.eye(n_states))
best_loss = None
best_s = None
for init in range(n_init):
mu = np.zeros((n_states, n_features))
loss_old = 1e10
for it in range(max_iter):
# Fit model by updating mean of observed states
for i in np.unique(s):
mu[i] = np.mean(Y[s==i], axis=0)
# Fit state sequence
s_old = s.copy()
loss_by_state = cdist(mu, Y, 'euclidean').T**2
V = loss_by_state.copy()
for t in range(n_obs-1, 0, -1):
V[t-1] = loss_by_state[t-1] + (V[t] + Gamma).min(axis=1)
s[0] = V[0].argmin()
for t in range(1, n_obs):
s[t] = (Gamma[s[t-1]] + V[t]).argmin()
# Monitor convergence
if len(np.unique(s)) == 1:
break
loss = min(V[0])
if verbose:
print('Iteration {}: {:.6e}'.format(it, loss))
if tol:
epsilon = loss_old - loss
if epsilon < tol:
break
elif np.array_equal(s, s_old):
break
loss_old = loss
if (best_s is None) or (loss_old < best_loss):
best_loss = loss_old
best_s = s.copy()
s = init_states(Y, n_states)
return best_s, best_loss/2
def probabilistic_jump(Y, n_states, order, disc_size, jump_penalty=1e-5,
initial_states=None, max_iter=10, n_init=10,
tol=None, verbose=False):
# Fit jump model using framework of Bemporad et al. (2018)
n_obs, n_features = Y.shape
if initial_states is not None:
s = initial_states.copy()
else:
s = init_states(Y, n_states).astype(float)[:, np.newaxis]
s_new = np.zeros((n_obs,n_states))
for i in range(n_obs):
s_new[i,int(s[i])] = 1
s = s_new
choices = np.array(list(partitions(int(1/disc_size), n_states)))/int(1/disc_size)
Gamma = jump_penalty * (cdist(choices,choices, 'cityblock')/2)**order
best_loss = None
best_s = None
for init in range(n_init):
mu = np.zeros((n_states, n_features))
loss_old = 1e10
for it in range(max_iter):
# Fit model by updating mean of observed states
for i in range(n_states):
if s.sum(axis = 0)[i] != 0:
mu[i] = np.sum(Y*s[:,i][:, np.newaxis], axis = 0)/sum(s[:,i])
# Fit state sequence
s_old = s.copy()
loss_by_state = cdist(mu, Y, 'euclidean').T**2
V = [None] * n_obs
V[-1] = np.matmul(choices, loss_by_state[-1])
for t in range(n_obs-1, 0, -1):
V[t-1] = np.matmul(choices, loss_by_state[t-1]) + (V[t] + Gamma).min(axis=1)
s[0] = choices[V[0].argmin()]
for t in range(1, n_obs):
s[t] = choices[(Gamma[np.where(np.all(choices == s[t-1],axis=1))] + V[t]).argmin()]
# Monitor convergence
if n_states - np.count_nonzero(s.sum(axis = 0) == 0) == 1:
break
loss = min(V[0])
if verbose:
print('Iteration {}: {:.6e}'.format(it, loss))
if tol:
epsilon = loss_old - loss
if epsilon < tol:
break
elif np.array_equal(s, s_old):
break
loss_old = loss
if (best_s is None) or (loss_old < best_loss):
best_loss = loss_old
best_s = s.copy()
s = init_states(Y, n_states).astype(float)[:, np.newaxis]
s_new = np.zeros((n_obs,n_states))
for i in range(n_obs):
s_new[i,int(s[i])] = 1
s = s_new
return best_s, best_loss/2
def init_states(Y, n_states):
# Generate initial states using K-means++ (Arthur and Vassilvitskii, 2007)
n_obs, n_features = Y.shape
centers = np.zeros((n_states, n_features))
center_idx = np.random.randint(n_obs)
centers[0] = Y[center_idx]
n_local_trials = 2 + int(np.log(n_states))
closest_dist_sq = cdist(centers[0, None], Y, 'euclidean')**2
current_pot = closest_dist_sq.sum()
for i in range(1, n_states):
rand_vals = np.random.sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(np.cumsum(closest_dist_sq),
rand_vals)
distance_to_candidates = cdist(Y[candidate_ids], Y, 'euclidean')**2
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
centers[i] = Y[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
# Compute the state assignment
states = cdist(centers, Y, 'euclidean').argmin(axis=0)
return states
def get_weights(Y, states, max_features, n_states):
# Find weights given a state sequence by maximizing the interstate distance
BCSS = get_BCSS(Y, states)
delta = binary_search(BCSS, max_features)
w = calc_new_feature_weights(BCSS, delta)
return w
def get_probabilistic_weights(Y, states, max_features, n_states):
# Find weights given a state sequence by maximizing the interstate distance
BCSS = get_probabilistic_BCSS(Y, states)
delta = binary_search(BCSS, max_features)
w = calc_new_feature_weights(BCSS, delta)
return w
def get_BCSS(Y, states):
# Find BCSS given a state sequence
WCSS = np.zeros(Y.shape[1])
for i in np.unique(states):
mask = (states == i)
if mask.sum() > 1:
WCSS += np.square(Y[mask] - np.mean(Y[mask], axis=0)).sum(axis=0)
TSS = np.square(Y - np.mean(Y, axis=0)).sum(axis=0)
return TSS - WCSS
def get_probabilistic_BCSS(Y, states):
# Find BCSS given a state sequence
WCSS = np.zeros(Y.shape[1])
mu = np.zeros((states.shape[1], Y.shape[1]))
for i in range(states.shape[1]):
mu[i] = np.sum(Y*states[:,i][:, np.newaxis], axis = 0)/sum(states[:,i])
state_losses = np.zeros((states.shape[1], Y.shape[0], Y.shape[1]))
for i in range(states.shape[1]):
state_losses[i,:,:] = np.square(Y - mu[i])
total_losses = np.zeros((Y.shape[0], Y.shape[1]))
for i in range(states.shape[1]):
total_losses += states[:,i][:, np.newaxis] * state_losses[i,:,:]
WCSS = total_losses.sum(axis=0)
TSS = np.square(Y - np.mean(Y, axis=0)).sum(axis=0)
return TSS - WCSS
def binary_search(objective, norm_constraint, max_iter=15):
l2n_arg = np.linalg.norm(objective)
if l2n_arg == 0 or abs(objective / l2n_arg).sum() <= norm_constraint:
return 0
lam1 = 0
lam2 = abs(objective).max() - 1e-5
for iter in range(max_iter):
su = soft_threshold(objective, (lam1 + lam2) / 2)
if abs(su / np.linalg.norm(su)).sum() < norm_constraint:
lam2 = (lam1 + lam2) / 2
else:
lam1 = (lam1 + lam2) / 2
if (lam2 - lam1) < 1e-4:
break
return (lam1 + lam2) / 2
def calc_new_feature_weights(objective, delta):
# Calculate feature weights using soft thresholding
soft = soft_threshold(objective, delta)
w = soft / np.linalg.norm(soft)
return w
def soft_threshold(x, delta):
return np.sign(x) * np.maximum(0, np.abs(x) - delta)
def partitions(n, b):
masks = np.identity(b, dtype=int)
for c in itertools.combinations_with_replacement(masks, n):
yield sum(c)
| Yizhan-Oliver-Shu/continuous-jump-model | regime/.ipynb_checkpoints/sparse_jump-checkpoint.py | sparse_jump-checkpoint.py | py | 11,123 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "numpy.clip",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.repeat",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 12... |
2628241264 | from datetime import timedelta
from pathlib import Path
import environ
import os
env = environ.Env()
BASE_DIR = Path(__file__).resolve().parent.parent
DEBUG = env.bool("DJANGO_DEBUG", True)
SECRET_KEY = os.getenv(
"DJANGO_KEY",
default="django-insecure-c1@)8!=axenuv@dc*=agcinuw+-$tvr%(f6s9^9p9pf^7)w+_b",
)
# Application definition
DJANGO_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin'
]
THIRD_PART_APPS = [
'rest_framework',
'rest_framework_simplejwt'
]
LOCAL_APPS = [
'customuser',
'family',
'notification'
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PART_APPS + LOCAL_APPS
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
)
}
SIMPLE_JWT = {
"ACCESS_TOKEN_LIFETIME": timedelta(days=1),
"REFRESH_TOKEN_LIFETIME": timedelta(days=7),
"ROTATE_REFRESH_TOKENS": True,
"BLACKLIST_AFTER_ROTATION": False,
"SIGNING_KEY": SECRET_KEY,
"AUTH_HEADER_TYPES": ('Bearer', 'JWT'),
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
API_VERSION = 'v1'
ROOT_URLCONF = 'family_tree.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'family_tree.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'family_tree',
'USER': 'postgres',
'PASSWORD': 'root',
'HOST': 'localhost',
'PORT': '5432'
}
}
SITE_ID = 1
ADMIN_URL = "family/admin"
AUTH_USER_MODEL = 'customuser.User'
# Password validation
# https://docs.djangoproject.com/en/4.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
STATIC_URL = 'static/'
EMAIL_BACKEND = 'djcelery_email.backends.CeleryEmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
DEFAULT_FROM_EMAIL = 'mohamedsamiromar97@gmail.com'
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
| mohamedsamiromar/family-tree | family_tree/settings/base.py | base.py | py | 3,990 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "environ.Env",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_numbe... |
35862806433 | import datetime
import pytest
from lizaalert.users.models import UserRole
@pytest.fixture
def user(django_user_model):
return django_user_model.objects.create_user(username="TestUser", password="1234567", email="test@test.com")
@pytest.fixture
def user_2(django_user_model):
return django_user_model.objects.create_user(username="TestUser2", password="1234567", email="test2@test.com")
@pytest.fixture
def token(user):
from rest_framework_simplejwt.tokens import RefreshToken
refresh = RefreshToken.for_user(user)
return {
"refresh": str(refresh),
"access": str(refresh.access_token),
}
@pytest.fixture
def user_client(token):
from rest_framework.test import APIClient
client = APIClient()
client.credentials(HTTP_AUTHORIZATION=f'Bearer {token["access"]}')
return client
@pytest.fixture
def user_admin_teacher_role(user, role_admin, role_teacher):
UserRole.objects.create(user=user, role=role_admin)
UserRole.objects.create(user=user, role=role_teacher)
@pytest.fixture
def anonymous_client():
from rest_framework.test import APIClient
client = APIClient()
return client
@pytest.fixture()
def create_location():
from lizaalert.users.models import Location
location = Location.objects.create(region="Москва")
return location
@pytest.fixture()
def create_volunteer(user, create_location):
from lizaalert.users.models import Volunteer
volunteer = Volunteer.objects.create(
user=user, phone_number="+375291112233", birth_date=datetime.date.today(), location=create_location
)
yield volunteer
# Volunteer.objects.filter(id=volunteer.id).delete()
@pytest.fixture()
def create_level():
from lizaalert.users.models import Level
levels = [Level.objects.create(name=choice[1], description=choice[0]) for choice in Level.LevelName.choices]
yield levels
# Level.objects.filter(id__in=[level.id for level in levels]).delete()
| Studio-Yandex-Practicum/lizaalert_backend | src/tests/user_fixtures/user_fixtures.py | user_fixtures.py | py | 1,978 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "pytest.fixture",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pytest.fixture",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "rest_framework_simplejwt.tokens.RefreshToken.for_user",
"line_number": 22,
"usage_type": "call"
... |
73094876263 |
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from PIL import Image
from skimage import io, morphology, measure
import pandas as pd
class LabelFieldOperations:
def __init__(self, labelVol):
self.labelVol = labelVol
def MaskLabels(self, mask):
print('masking incomplete fascicles...')
outerVol = np.where(mask == False, self.labelVol, ~mask)
unique = np.unique(outerVol)
wholemask = np.where(np.isin(self.labelVol, unique), False, True)
wholeVol = np.where(wholemask, self.labelVol, np.zeros(self.labelVol.shape))
return wholeVol.astype(np.uint16)
def AvgLabels(self, image):
print('averaging values in image by label')
labels = np.unique(self.labelVol)
values = np.zeros(len(labels))
for i in labels:
print(i)
imageMask = np.where(image == labels[i], image, np.zeros(image.shape))
values[i] = np.sum(imageMask)
return values
tipShape = (1075, 2150, 2150)
def runMaskLabels():
labelVol = np.fromfile(r'D:\Luke\current\IMPORTANT\muscles_p1-combined.postprocessed.raw', dtype=np.uint16)
labelVol = np.reshape(labelVol, tipShape)
print('loading fascicles:', labelVol.shape)
musclewholeVol = np.fromfile(r'D:\Luke\current\IMPORTANT\tip-muscle-whole.raw', dtype=np.uint8)
musclewholeVol = np.reshape(musclewholeVol, tipShape)
print('loading whole muscle mask:', musclewholeVol.shape)
muscleregionsVol = np.fromfile(r'D:\Luke\current\IMPORTANT\tip-muscle-regions.raw', dtype=np.uint8)
muscleregionsVol = np.reshape(muscleregionsVol, tipShape)
print('loading muscle region masks:', muscleregionsVol.shape)
LFO = LabelFieldOperations(labelVol)
completetipVol = LFO.MaskLabels(mask=np.where(musclewholeVol, True, False))
print('output complete fascicles in whole tip:', np.shape(completetipVol))
dorsaltipVol = LFO.MaskLabels(mask=np.where(muscleregionsVol == 1, True, False))
print('output complete fascicles in dorsal tip:', np.shape(dorsaltipVol))
ventraltipVol = LFO.MaskLabels(mask=np.where(muscleregionsVol == 2, True, False))
print('output complete fascicles in ventral tip:', np.shape(ventraltipVol))
print('saving output...')
workingDir = 'D:\Luke\ElephantTrunkMuscles'
io.imsave(workingDir + '\VolumeOperations_output\muscles_p1-complete.tif', completetipVol, check_contrast=False)
io.imsave(workingDir + '\VolumeOperations_output\muscles_p1-dorsal.tif', dorsaltipVol, check_contrast=False)
io.imsave(workingDir + '\VolumeOperations_output\muscles_p1-ventral.tif', ventraltipVol, check_contrast=False)
runMaskLabels()
def runAvgLabels():
dorsalLabelVol = io.imread(r'D:\Luke\ElephantTrunkMuscles\VolumeOperations_output\muscles_p1-dorsal.tif')
print(dorsalLabelVol.shape)
dorsalPropDistVol = np.fromfile(r'D:\Luke\current\IMPORTANT\test\tip-dorsal_PropagationDistance.raw', dtype=np.uint16)
dorsalPropDistVol = np.reshape(dorsalPropDistVol, tipShape)
print(dorsalPropDistVol.shape)
LFO = LabelFieldOperations(dorsalLabelVol)
dorsalPropDist = LFO.AvgLabels(dorsalPropDistVol)
df = pd.DataFrame()
df['dorsalPropDist'] = dorsalPropDist
df.to_csv('D:\Luke\current\IMPORTANT\test\AvgLabels.csv')
#runAvgLabels() | lcubelongren/ElephantTrunkMuscles | VolumeOperations.py | VolumeOperations.py | py | 3,323 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.where",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.isin",
"line_number": ... |
11089039079 | import yaml
import praw
import time
def connect_to_reddit(config):
reddit = praw.Reddit(username=config["auth"]["username"],
password=config["auth"]["password"],
user_agent=config["auth"]["user_agent"],
client_id=config["auth"]["client_id"],
client_secret=config["auth"]["client_secret"])
return reddit
def send_dm(reddit, username, subject, message):
try:
reddit.redditor(username).message(subject=subject, message=message)
except praw.exceptions.RedditAPIException as e:
for subexception in e.items:
if subexception.error_type == "RATELIMIT":
error_str = str(subexception)
print(error_str)
if 'minute' in error_str:
delay = error_str.split('for ')[-1].split(' minute')[0]
delay = int(delay) * 60.0
else:
delay = error_str.split('for ')[-1].split(' second')[0]
delay = int(delay)
time.sleep(delay)
elif subexception.error_type == 'INVALID_USER':
return True
return False
except Exception as e:
print(e)
return False
return True
if __name__ == "__main__":
config = yaml.safe_load(open("config.yaml").read())
reddit = connect_to_reddit(config)
recipients = config["recipients"]
print("Found %d recipients in config file" % len(recipients))
subject = config["subject"]
if not subject:
subject = input("subject: ")
else:
print("Found subject in config file: " + subject)
message = config["message"]
if not message:
message = input("message: ")
else:
print("Found message in config file: " + message)
print("Do you want to send this message to all %d recipients?" % len(recipients))
confirm = input ("Y/n: ")
counter = 0
if confirm == "Y":
for each in recipients:
if send_dm(reddit, each, subject, message):
print("Message sent to " + each)
counter += 1
print("%d/%d messages sent successfully" % (counter,len(recipients)))
| nouveaupg/mass_dm | broadcast_dm.py | broadcast_dm.py | py | 2,255 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "praw.Reddit",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "praw.exceptions",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "yaml.safe_load",
"line... |
7690140597 | from collections import defaultdict
class Graph:
def __init__(self, vertices):
self.V = vertices
self.graph = defaultdict(list)
def addEdge(self, v, w):
self.graph[v].append(w)
self.graph[w].append(v)
def isCyclicUtil(self, v, visited, parent):
visited[v] = True
for i in self.graph[v]:
if visited[i] == False:
if self.isCyclicUtil(i, visited, v):
return True
elif parent != i:
return True
return False
def isCyclic(self):
visited = [False] * (self.V)
for i in range(self.V):
if visited[i] == False:
if (self.isCyclicUtil(i, visited, -1)) == True:
return True
return False
if __name__ == "__main__":
"""
from timeit import timeit
g = Graph(5)
g.addEdge(1, 0)
g.addEdge(1, 2)
g.addEdge(2, 0)
g.addEdge(0, 3)
g.addEdge(3, 4)
g1 = Graph(3)
g1.addEdge(0, 1)
g1.addEdge(1, 2)
print(timeit(lambda: g1.isCyclic(), number=10000)) # 0.01144661000216729
"""
| thisisshub/DSA | Q_graphs/problems/detecting_cycle/A_in_the_undirected_graph.py | A_in_the_undirected_graph.py | py | 1,144 | python | en | code | 71 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 9,
"usage_type": "call"
}
] |
34377485430 | """
The :py:mod:`~ahk.script` module, most essentially, houses the :py:class:`~ahk.ScriptEngine` class.
The :py:class:`~ahk.ScriptEngine` is responsible for rendering autohotkey code from jinja templates and executing that
code. This is the heart of how this package works. Every other major component either inherits from this class
or utilizes an instance of this class.
The current implementation of how autohotkey code is executed is by calling the autohotkey
executable with ``subprocess``.
"""
import os
import subprocess
import warnings
from shutil import which
from ahk.utils import make_logger
from ahk.directives import Persistent
from jinja2 import Environment, FileSystemLoader
logger = make_logger(__name__)
class ExecutableNotFoundError(EnvironmentError):
pass
DEFAULT_EXECUTABLE_PATH = r"C:\Program Files\AutoHotkey\AutoHotkey.exe"
"""The deafult path to look for AutoHotkey, if not specified some other way"""
def _resolve_executable_path(executable_path: str = ''):
if not executable_path:
executable_path = os.environ.get('AHK_PATH') or which(
'AutoHotkey.exe') or which('AutoHotkeyA32.exe')
if not executable_path:
if os.path.exists(DEFAULT_EXECUTABLE_PATH):
executable_path = DEFAULT_EXECUTABLE_PATH
if not executable_path:
raise ExecutableNotFoundError(
'Could not find AutoHotkey.exe on PATH. '
'Provide the absolute path with the `executable_path` keyword argument '
'or in the AHK_PATH environment variable.'
)
if not os.path.exists(executable_path):
raise ExecutableNotFoundError(
f"executable_path does not seems to exist: '{executable_path}' not found")
if os.path.isdir(executable_path):
raise ExecutableNotFoundError(
f"The path {executable_path} appears to be a directory, but should be a file."
" Please specify the *full path* to the autohotkey.exe executable file"
)
if not executable_path.endswith('.exe'):
warnings.warn(
'executable_path does not appear to have a .exe extension. This may be the result of a misconfiguration.'
)
return executable_path
class ScriptEngine(object):
def __init__(self, executable_path: str = "", **kwargs):
"""
This class is typically not used directly. AHK components inherit from this class
and the arguments for this class should usually be passed in to :py:class:`~ahk.AHK`.
:param executable_path: the path to the AHK executable.
If not provided explicitly in this argument, the path to the AHK executable is resolved in the following order:
* The ``AHK_PATH`` environment variable, if present
* :py:data:`~ahk.script.DEFAULT_EXECUTABLE_PATH` if the file exists
If environment variable not present, tries to look for 'AutoHotkey.exe' or 'AutoHotkeyA32.exe' with shutil.which
:raises ExecutableNotFound: if AHK executable cannot be found or the specified file does not exist
"""
self.executable_path = _resolve_executable_path(executable_path)
templates_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'templates')
self.env = Environment(loader=FileSystemLoader(templates_path),
autoescape=False, trim_blocks=True)
def render_template(self, template_name, directives=None, blocking=True, **kwargs):
"""
Renders a given jinja template and returns a string of script text
:param template_name: the name of the jinja template to render
:param directives: additional AHK directives to add to the resulting script
:param blocking: whether the template should be rendered to block (use #Persistent directive)
:param kwargs: keywords passed to template rendering
:return: An AutoHotkey script as a string
.. code-block:: python
>>> from ahk import AHK
>>> ahk = AHK()
>>> ahk.render_template('keyboard/send_input.ahk', s='Hello')
'#NoEnv\\n#Persistent\\n\\n\\nSendInput Hello\\n\\nExitApp\\n'
"""
if directives is None:
directives = set()
else:
directives = set(directives)
if blocking:
directives.add(Persistent)
elif Persistent in directives:
directives.remove(Persistent)
kwargs['directives'] = directives
template = self.env.get_template(template_name)
return template.render(**kwargs)
def _run_script(self, script_text, **kwargs):
blocking = kwargs.pop('blocking', True)
runargs = [self.executable_path, '/ErrorStdOut', '*']
decode = kwargs.pop('decode', False)
script_bytes = bytes(script_text, 'utf-8')
if blocking:
result = subprocess.run(runargs, input=script_bytes,
stderr=subprocess.PIPE, stdout=subprocess.PIPE, **kwargs)
if decode:
logger.debug('Stdout: %s', repr(result.stdout))
logger.debug('Stderr: %s', repr(result.stderr))
return result.stdout.decode()
else:
return result
else:
proc = subprocess.Popen(runargs, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
try:
proc.communicate(script_bytes, timeout=0)
except subprocess.TimeoutExpired:
pass # for now, this seems needed to avoid blocking and use stdin
return proc
def run_script(self, script_text: str, decode=True, blocking=True, **runkwargs):
"""
Given an AutoHotkey script as a string, execute it
:param script_text: a string containing AutoHotkey code
:param decode: If ``True``, attempt to decode the stdout of the completed process.
If ``False``, returns the completed process. Only has effect when ``blocking=True``
:param blocking: If ``True``, script must finish before returning.
If ``False``, function returns a ``subprocess.Popen`` object immediately without blocking
:param runkwargs: keyword arguments passed to ``subprocess.Popen`` or ``subprocess.run``
:return: | A string of the decoded stdout if ``blocking`` and ``decode`` are True.
| ``subprocess.CompletedProcess`` if ``blocking`` is True and ``decode`` is False.
| ``subprocess.Popen`` object if ``blocking`` is False.
>>> from ahk import AHK
>>> ahk = AHK()
>>> ahk.run_script('FileAppend, Hello World, *')
'Hello World'
>>> ahk.run_script('FileAppend, Hello World, *', decode=False)
CompletedProcess(args=['C:\\\\pathto\\\\AutoHotkey.exe', '/ErrorStdOut', '*'], returncode=0, stdout=b'Hello World', stderr=b'')
>>> ahk.run_script('FileAppend, Hello World, *', blocking=False)
<subprocess.Popen at 0x18a599cde10>
"""
logger.debug('Running script text: %s', script_text)
try:
result = self._run_script(script_text, decode=decode, blocking=blocking, **runkwargs)
except Exception as e:
logger.fatal('Error running temp script: %s', e)
raise
return result
| Frankushima/LeagueAccountManager | ahk/script.py | script.py | py | 7,406 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "ahk.utils.make_logger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "shutil.which",
... |
8254311544 | import numpy as np
import pygame
from sys import exit
#Initializing Parameters
pygame.init()
displayInfo = pygame.display.Info()
screenWidth = displayInfo.current_w
screenHeight = displayInfo.current_h
display_surface = pygame.display.set_mode((screenWidth, screenHeight-50), pygame.RESIZABLE)
pygame.display.set_caption("Python Physics Engine")
clock = pygame.time.Clock()
fps_limit = 60
backgroundColor = (255,255,255)
objectList = []
class PhysicsObject():
global objectList
def convert(self, measurement):
return int(measurement)*self.conversion_factor
def __init__(self, x, y, width, height, mass, velocity, acceleration, max_velocity):
self.conversion_factor = 50 #meaning 50 px = 1m
self.x = round(x)
self.y = round(y)
self.width = self.convert(width)
self.height = self.convert(height)
self.mass = mass
self.velocity = (self.convert(velocity[0]), self.convert(velocity[1]))
self.acceleration = (self.convert(acceleration[0]), self.convert(acceleration[1]))
self.max_velocity = (self.convert(max_velocity[0]), self.convert(max_velocity[1]))
self.color = (255,0,0)
self.objectRect = pygame.Rect(self.x, self.y, self.width, self.height)
self.canCollide = True
def update(self, dt):
#updating velocity
self.velocity = (int(self.velocity[0] + self.acceleration[0] * dt),
int(self.velocity[1] + self.acceleration[1] * dt))
#ensuring velocity doesn't go past maximum
self.velocity = (min(self.velocity[0], self.max_velocity[0]),
min(self.velocity[1], self.max_velocity[1]))
#updating position
self.x += int(self.velocity[0] * dt)
self.y += int(self.velocity[1] * dt)
#checking that object won't go off screen
screenWidth, screenHeight = display_surface.get_size()
if self.x < 0:
self.x = 0
self.velocity = (-self.velocity[0], self.velocity[1])
elif self.x + self.width > screenWidth:
self.x = screenWidth - self.width
self.velocity = (-self.velocity[0], self.velocity[1])
if self.y < 0:
self.y = 0
self.velocity = (self.velocity[0], -self.velocity[1])
elif self.y + self.height > screenHeight:
self.y = screenHeight - self.height
self.velocity = (self.velocity[0], -self.velocity[1])
self.objectRect = pygame.Rect(self.x, self.y, self.width, self.height)
self.checkPixels()
def draw(self):
self.objectRect = pygame.Rect(self.x, self.y, self.width, self.height)
pygame.draw.rect(display_surface, self.color, self.objectRect, 0, 1)
def set_color(self, color):
self.color = color
def set_velocity(self, new_velocity):
self.velocity = new_velocity
def change_collision_status(self):
self.canCollide = not self.canCollide
print("Can collide: ", self.canCollide)
def checkPixels(self):
self.x = round(self.x)
self.y = round(self.y)
def collisionHandler():
global objectList
for object in objectList:
for object2 in objectList:
if object != object2 and object.objectRect.colliderect(object2.objectRect) and object.canCollide and object2.canCollide:
calculateCollision(object, object2)
def calculateCollision(object1b, object2b):
object1b.canCollude = False
object2b.canCollude = False
m1 = object1b.mass
m2 = object2b.mass
v1x, v1y = object1b.velocity
v2x, v2y = object2b.velocity
v1xf = round(((m1-m2)/(m1+m2))*v1x + (2*m2/(m1+m2))*v2x)
v2xf = round((2*m1/(m1+m2)) * v1x + ((m2-m1)/(m1+m2))*v2x)
object1b.set_velocity((v1xf, v1y))
object2b.set_velocity((v2xf, v2y))
makeCollisions(object1b, object2b)
def makeCollisions(object1a, object2a):
distance = np.sqrt((object1a.x-object2a.x)**2 + (object1a.y-object2a.y)**2)
gravity = (0, 0)
#input parameters in SI units, class will handle conversion
testObject1 = PhysicsObject(100, 300, 1, 1, 1, (3,0), gravity, (1000, 1000))
testObject2 = PhysicsObject(screenWidth-100, 275, 2, 2, 100, (-3, 0), gravity, (1000,1000))
testObject2.set_color((0,255,0))
objectList = [testObject1, testObject2]
running = True
#main game loop
while running:
time_scale = 1e-3
dt = clock.tick(fps_limit) * time_scale
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
exit()
elif event.type == pygame.VIDEORESIZE:
display_surface = pygame.display.set_mode((event.w, event.h), pygame.RESIZABLE)
collisionHandler()
testObject1.update(dt)
testObject2.update(dt)
display_surface.fill(backgroundColor)
testObject1.draw()
testObject2.draw()
pygame.time.delay(int(1000/fps_limit))
pygame.display.flip()
pygame.quit() | bfelson/Python-Game-Engine | engine.py | engine.py | py | 4,993 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.display.Info",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_m... |
70108688104 | import yt
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import unyt
from unyt import cm, s
# 9 different simulations with a few snapshots:
# NIF_hdf5_plt_cnt_0*
#frames = [000,125,250,275,300,32,350,375,425]
ds = yt.load('/scratch/ek9/ccf100/nif/turb_foam_gamma_5_3_3d_1024_50mu_bubbles/NIF_hdf5_plt_cnt_0325')
# FOR NEXT PROJS, ETC, DO QUICKER WITH
#for fr in frames:
# ds_list = yt.load('/scratch/ek9/ccf100/nif/turb_foam_gamma_5_3_3d_1024_50mu_bubbles/NIF_hdf5_plt_cnt_%04d'%frames[fr])
'''
HM: ds.region takes code length, explore...
SEE PAPER, also:
print(ds.domain_left_edge)
print(ds.domain_right_edge)
print(ds.domain_right_edge - ds.domain_left_edge)
'''
left_edge = ds.arr([-0.06,0.3,-0.06],'code_length')
right_edge = ds.arr([0.06,0.334,0.06],'code_length')
center =0.5*left_edge + 0.5*right_edge
the_region = ds.region(center, left_edge, right_edge)#, fields=None, ds=None, field_parameters=None, data_source=None)
# check out the region
if 1:
proj_axis = 2
proj_d = ds.proj(('gas','density'),proj_axis,data_source=the_region)
pw = proj_d.to_pw()
pw.save()
ix = ds.coordinates.x_axis[proj_axis]
iy = ds.coordinates.y_axis[proj_axis]
midpt = center
width = right_edge - left_edge
width_2D = width[ix],width[iy]
#dx_inv = ds.domain_dimensions/ds.domain_width #are the zones cubed? #dir(ds),
#num_zones = width * dx_inv
nx = np.unique(the_region['x']).size
ny = np.unique(the_region['y']).size
nz = np.unique(the_region['z']).size
num_zones = nx, ny, nz
rez = [num_zones[ix],num_zones[iy]] #[768,1024]
frb_d = proj_d.to_frb(width_2D,rez,center=midpt)
frb = frb_d
plt.imshow(np.log10(np.array(frb['gas','density'])),interpolation='nearest',origin='lower')
plt.savefig('imgRho_snap0325.png')
raise
if 0:
the_x = np.log10(np.array(frb['gas','density']))
the_weight = np.log10(np.array(frb['gas','cell_volume']))
the_array, xbins = np.histogram(the_x, weights = None, density=True)
bin_centers = 0.5*(xbins[1:]+xbins[:-1])
plot_x = bin_centers
plot_y = the_array
plt.plot(plot_x,plot_y, c='k')
plt.savefig('pdfRho_snap325.png')
| dcollins4096/p68c_laser | script1.py | script1.py | py | 2,199 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "yt.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 4... |
18423321387 | import torch
from torch import nn
import pytorch_lightning as pl
from transformers import AutoTokenizer
import os
import pandas as pd
import numpy as np
from config import CONFIG
class CommonLitDataset(torch.utils.data.Dataset):
def __init__(self, df):
self.df = df
self.full_text_tokens = df['full_text_tokens'].values
self.labels = df[['content', 'wording']].values
def __len__(self):
return len(self.df)
def __getitem__(self, index):
tokens = self.full_text_tokens[index]
input_ids = tokens['input_ids']
attention_mask = tokens['attention_mask']
label = torch.tensor(self.labels[index], dtype=torch.float32)
return input_ids, attention_mask, label
class CommonLitDataModule(pl.LightningDataModule):
def __init__(self, fold):
super().__init__()
prompts_train_df = pd.read_csv(os.path.join(CONFIG.data_dir, 'prompts_train.csv'))
summaries_train_df = pd.read_csv(os.path.join(CONFIG.data_dir, 'summaries_train.csv'))
df = prompts_train_df.merge(summaries_train_df, on='prompt_id')
tokenizer = AutoTokenizer.from_pretrained(CONFIG.backbone_name)
df['full_text'] = (df['prompt_question'].values + tokenizer.sep_token +
#df['prompt_text'].values + tokenizer.sep_token +
df['text'].values)
full_text_tokens = []
for i in range(len(df)):
tokens = tokenizer.encode_plus(df.loc[i, 'full_text'], **CONFIG.tokenizer)
full_text_tokens.append({'input_ids': torch.tensor(tokens['input_ids'], dtype=torch.long), 'attention_mask': torch.tensor(tokens['attention_mask'], dtype=torch.long)})
df['full_text_tokens'] = full_text_tokens
promt_ids = np.sort(df['prompt_id'].unique())
self.train_df = df[df['prompt_id'] != promt_ids[fold]].reset_index(drop=True)
self.val_df = df[df['prompt_id'] == promt_ids[fold]].reset_index(drop=True)
def train_dataloader(self):
dataset = CommonLitDataset(self.train_df)
return torch.utils.data.DataLoader(dataset=dataset, **CONFIG.data_loaders.train_loader)
def val_dataloader(self):
dataset = CommonLitDataset(self.val_df)
return torch.utils.data.DataLoader(dataset=dataset, **CONFIG.data_loaders.val_loader) | alexeyevgenov/Kaggle_CommonLit-Evaluate_Student_Summaries | code/dataset.py | dataset.py | py | 2,348 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.utils",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "pytorch_lightning.Li... |
39047349636 | '''Tests for the classification module.'''
import pytest
import torch
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader
from sklearn.model_selection import train_test_split
from torchutils.classification import Classification
@pytest.fixture(params=[1, 10])
def data_num_features(request):
return request.param
@pytest.fixture(params=[2, 10])
def data_num_classes(request):
return request.param
@pytest.fixture(params=[1000])
def data_num_samples(request):
return request.param
@pytest.fixture
def data_classification(data_num_features,
data_num_classes,
data_num_samples):
'''Create classification problem.'''
torch.manual_seed(0)
num_features = data_num_features
num_classes = data_num_classes
num_samples = data_num_samples
num_outputs = 1 if num_classes==2 else num_classes
X = torch.randn(num_samples, num_features)
y = torch.randint(num_classes, size=(num_samples,))
X_train, X_test, y_train, y_test = train_test_split(
X, y,
test_size=0.2,
random_state=0
)
train_set = TensorDataset(X_train, y_train)
test_set = TensorDataset(X_test, y_test)
train_loader = DataLoader(train_set, batch_size=32, shuffle=True)
test_loader = DataLoader(test_set, batch_size=32, shuffle=False)
model = nn.Linear(num_features, num_outputs)
if num_outputs == 1:
criterion = nn.BCEWithLogitsLoss(reduction='mean')
else:
criterion = nn.CrossEntropyLoss(reduction='mean')
optimizer = torch.optim.Adam(params=model.parameters())
classifier = Classification(
model,
criterion,
optimizer,
train_loader,
test_loader,
torch.device('cpu')
)
return classifier
def test_classifier_inference(data_classification):
'''Test classifier at inference.'''
classifier = data_classification
classifier.train(False)
X_batch, y_batch = next(iter(classifier.train_loader))
X_batch = X_batch.to(classifier.device)
y_batch = y_batch.to(classifier.device)
with torch.no_grad():
y_pred = classifier.predict(X_batch)
y_proba = classifier.predict_proba(X_batch)
y_topclass, _ = classifier.predict_top(X_batch)
y_pred = y_pred.cpu()
y_proba = y_proba.cpu()
y_topclass = y_topclass.cpu()
assert y_pred.shape == (X_batch.shape[0], classifier.model.out_features)
assert y_proba.shape == (X_batch.shape[0], classifier.model.out_features)
assert y_topclass.numel() == y_batch.numel()
| joseph-nagel/torchutils | tests/test_classification.py | test_classification.py | py | 2,598 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pytest.fixture",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.manual_seed",
... |
3023655025 | import os
from PIL import Image
import PIL.ImageOps
import argparse
import torchvision as V
import matplotlib.pyplot as plt
import ujson as json
import glob
import re
from . import configs
import rich
from rich.progress import track
console = rich.get_console()
def cifar10_burst(dest: str, split: str):
assert(split in ('train', 'test'))
if not os.path.exists(dest):
os.mkdir(dest)
data = V.datasets.CIFAR10(root=configs.datasets.cifar10.root,
train=True if split == 'train' else False,
download=True)
for i, (x, y) in enumerate(data):
fpath = os.path.join(dest, '%05d-%1d.png'%(i, y))
#xinv = PIL.ImageOps.invert(x) # no need to invert like mnist
#xinv.save(fpath)
x.save(fpath)
console.print(fpath)
print(x, y)
print(len(data))
def cifar10_collect(src: str):
'''
collect processed data from
'''
dataset = []
files = glob.glob(os.path.join(src, '*.json'))
for file in track(files):
with open(file, 'rt') as f:
j = json.load(f)
label = re.match(r'.*/\d+-(\d).json', file).groups()[0]
j[0]['label'] = int(label)
dataset.append(j)
with open(src+'.json', 'wt') as f:
json.dump(dataset, f)
console.print('>_< done')
if __name__ == '__main__':
ag = argparse.ArgumentParser('python3 -m veccls.cifar10')
ag.add_argument('action', choices=('burst', 'collect'))
ag.add_argument('-d', '--destination', default='.', help='dest directory')
ag.add_argument('-s', '--split', default='train', choices=('train', 'test'))
ag = ag.parse_args()
console.print(ag)
if ag.action == 'burst':
cifar10_burst(ag.destination, ag.split)
elif ag.action == 'collect':
cifar10_collect(ag.destination)
| cdluminate/MyNotes | rs/2022-veccls/veccls/cifar10.py | cifar10.py | py | 1,809 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rich.get_console",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_num... |
32341335567 | #!/usr/bin/env python
'''
Here we create a 7 shell module, plot its cross section, and plot a 3D
representation. Note: the 3D representation uses polygons to construct
the module shells (but within the software, the shells are constructed
mathematically to have perfect curvature).
Created on Aug 15, 2011
@author: rtaylor
'''
from foxsisim.module import Module
from foxsisim.plotting import get3dAxes
import matplotlib.pyplot as plt
if __name__ == '__main__':
# module parameters
focalLength = 200.0
segmentLength = 30.0
radii = [5.15100,4.90000,4.65900,4.42900,4.21000,4.00000,3.79900] # 7 shell radii
# create module
module = Module(seglen=segmentLength, focal=focalLength, radii=radii)
# generate cross section
fig1 = plt.figure(figsize=(9,3))
axes1 = fig1.gca()
module.plot2D(axes1,'b')
# generate 3d representation
fig2 = plt.figure(figsize=(5,5))
axes2 = get3dAxes(fig2)
module.plot3D(axes2, 'b')
# show figures
plt.show()
| humatic/foxsi-optics-sim | examples/example1.py | example1.py | py | 1,040 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "foxsisim.module.Module",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "mat... |
3336257171 | from __future__ import print_function
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
import requests
import time
import pandas as pd
import json
from pprint import pprint
# If modifying these scopes, delete the file token.json.
SCOPES = ['https://www.googleapis.com/auth/documents.readonly']
# The ID of a sample document.
DOCUMENT_ID = '195j9eDD3ccgjQRttHhJPymLJUCOUjs-jmwTrekvdjFE'
def main():
"""Shows basic usage of the Docs API.
Prints the title of a sample document.
"""
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.json', 'w') as token:
token.write(creds.to_json())
service = build('docs', 'v1', credentials=creds)
# Retrieve the documents contents from the Docs service.
document = service.documents().get(documentId=DOCUMENT_ID).execute()
print('The title of the document is: {}'.format(document.get('title')))
def places():
# search by place ID and retain all fields
url = "https://maps.googleapis.com/maps/api/place/details/json?place_id=ChIJ_WSvdD72wokROnegxbxj37I&key=AIzaSyAp3LuAtLE4jxg2Ehoprg4zeg8gggq0bZ4"
# search by place ID and restrict results to certain fields
# url = "https://maps.googleapis.com/maps/api/place/details/json?place_id=ChIJN1t_tDeuEmsRUsoyG83frY4&fields=name%2Crating%2Cformatted_phone_number&key=AIzaSyAp3LuAtLE4jxg2Ehoprg4zeg8gggq0bZ4"
# first page of text search "bars in Manhattan"
# url = "https://maps.googleapis.com/maps/api/place/textsearch/json?query=bars%20in%20Manhattan&key=AIzaSyAp3LuAtLE4jxg2Ehoprg4zeg8gggq0bZ4"
# next page of text search "bars in Manhattan"
# url = "https://maps.googleapis.com/maps/api/place/textsearch/json?query=bars%20in%20Manhattan&pagetoken=Aap_uED9dF7FNLrom5eJm0PZ034ioLpfyKozLRkg-SF8CpkUwIAOtMPxzREM4vnQ7iSCbZyMObDAVkqDLdXnIC6ya8vRtwj0tbiIQUGB3Sc20Vu-kLVPw4D0YknZlBvDedZ6SiKT-GlH643tkx0ZUSimaAebvAGm25NeFrJVQScNDDkyJD4oi8BVFXMFO8v2-byDcDTjdpDnxJ-kIzaUyEDIYz_Eg7oCci5VqFfutBAGoGO8TFENtBckQ1cffYe32UmlbGGCYUzcHAn6blPgOVcgBbHS7MNg9Snc7NJbut2_hXSHM8eX80asfMuiFD7bRYxjgFXW2KZcP4kPiNfNO37H2Mrmf-uxmWX-cr2sVX3havd_ZftTK7vOiICo_nPJYsqlgALiTELujET4gqr1aLYm&key=AIzaSyAp3LuAtLE4jxg2Ehoprg4zeg8gggq0bZ4"
# pagetoken parameter allows us to specify which result is being displayed
# there are 20 results displayed per page
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.json())
def find_1000_bars():
# first page of text search "bars in Manhattan"
key = "AIzaSyAp3LuAtLE4jxg2Ehoprg4zeg8gggq0bZ4"
url = "https://maps.googleapis.com/maps/api/place/textsearch/json?query=bars%20in%20Manhattan&key={}".format(key)
response = requests.request("GET", url, headers={}, data={})
store = response.json()
next_page_token = store['next_page_token']
next_page_token = str(next_page_token)
next_page_token = next_page_token.strip()
print(next_page_token)
url = "https://maps.googleapis.com/maps/api/place/textsearch/json?query=bars%20in%20Manhattan&key=" + key + "&pagetoken="+next_page_token
print(url)
url2 = "https://maps.googleapis.com/maps/api/place/textsearch/json?query=bars%20in%20Manhattan&pagetoken=Aap_uEBZMuxhlcmxaIH1h6fCQN48-qcMTWLlEPopT_FynFbQoXENETJuMXn4F2pn676kW_888fnDUQ4u1YHJeJnxxd3DjqPSXeg9Y6KLIQQH83RwthEQLLeWaGQEt5RO57i6DkdLt_4xhXINxLxPokNxx4DTctwepWEG9t4LGfTRzQVslX4wgoVCQfj_ucNfK-bCOH8Yv43ucI63BbAl0-lpQlm_iGfVnwkaV-UQduQtIPfj0eAJ2ukzdiO1iy0BibI4nKyDJ5tB0bV4kLH_r05x0hmqdi82X3eG4VFcSWRpf9Bim5OxJHTk-8U0XGS4UxZExDBOJz8F0REeWTQGxyGmT15f9UCF1wBqEJtDFq1GyOaDI4ku5CAt1YLP5NxGFj9m6enGWPRlGakStR_FrmbI&key=AIzaSyAp3LuAtLE4jxg2Ehoprg4zeg8gggq0bZ4"
# keep finding next page as long as there exists a next page
counter = 0
while "next_page_token" in store and counter < 10:
next_page_token = store["next_page_token"]
url = "https://maps.googleapis.com/maps/api/place/textsearch/json?query=bars%20in%20Manhattan&pagetoken={}&key={}".format(next_page_token, key)
time.sleep(2)
response = requests.request("GET", url, headers={}, data={})
store = response.json()
pprint(store)
print(store.keys())
print(len(store["results"]))
counter += 1
# next_page_token will only return up to 60 additional results
# must use the ZIP code method
# since we only have 12 zip codes (to prevent duplicates)
# we also need to employ the next_page_token method
# and possibly also use night_club as the type
def find_1000_bars_zip():
# df = pd.DataFrame()
path = "data (1).json"
# data = json.loads(path)
data = pd.read_json(path)
df = pd.DataFrame(data)
print(df.head)
print(set(df['business_status'].values))
df = df[df.business_status != "CLOSED_TEMPORARILY"]
print(df.head)
key = "AIzaSyAp3LuAtLE4jxg2Ehoprg4zeg8gggq0bZ4"
coords = [(40.824447,-73.947673), (40.806777,-73.961267), (40.789105,-73.946986), (40.786001,-73.977814),
(40.770412,-73.959466), (40.768322,-73.994636), (40.755069,-73.974572), (40.749102,-74.002020),
(40.736876,-73.980047), (40.730112,-74.006826), (40.718119,-73.986397), (40.709556,-74.011633)]
extra_coords = [(40.740121, -73.993254), (40.723181, -73.999776), (40.721131, -74.010864), (40.742110, -74.003115),
(40.751900, -73.983896), (40.761483, -73.967199), (40.778345, -73.947807), (40.796964, -73.943207),
(40.763633, -73.984097), (40.766412, -73.991019), (40.763734, -73.996633), (40.731230, -73.987436)]
coords = coords + extra_coords # now we need to be careful for duplicates
latitudes, longitudes = [a[0] for a in coords], [a[1] for a in coords]
for lat, long in zip(latitudes, longitudes):
url = f"https://maps.googleapis.com/maps/api/place/nearbysearch/json?location={lat}%2C{long}&radius=1000&type=night_club&key={key}"
time.sleep(2)
response = requests.request("GET", url, headers={}, data={})
store = response.json()
df_ = pd.json_normalize(store["results"])
df = pd.concat([df, df_])
while "next_page_token" in store:
next_page_token = store["next_page_token"]
url = f"https://maps.googleapis.com/maps/api/place/nearbysearch/json?location={lat}%2C{long}&radius=1000&type=bar&key={key}&pagetoken={next_page_token}"
time.sleep(2)
response = requests.request("GET", url, headers={}, data={})
store = response.json()
df_ = pd.json_normalize(store["results"])
df = pd.concat([df, df_])
df_handle(df)
def df_handle(df):
print(df.head)
df.set_index('place_id')
df.drop_duplicates(subset="place_id", keep="first", inplace=True)
df = df[df.business_status != "CLOSED_TEMPORARILY"]
print(any(df["place_id"].duplicated()))
result = df.to_json(orient="records")
parsed = json.loads(result)
with open("places.txt", "w") as of:
json.dump(parsed, of)
if __name__ == '__main__':
find_1000_bars_zip()
| ur2136/DrinkEasy | CodeBase/BackEnd/Pre-Processing Scripts/googleplaces.py | googleplaces.py | py | 7,915 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.path.exists",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "google.oauth2.credentia... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.