seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
2747520302 | from payment.models import Payment, DepositTransaction, Transaction
from users.models import User
from vpn.models import VPN, Subscription
from django.conf import settings
import logging
import requests
from payment.consts import TRONSCAN_API
from django.db.models import Sum
import uuid
logger = logging.getLogger(__name__)
class TransactionInfo:
def __init__(self, is_success, to_address, amount, confirmed):
self.is_success = is_success
self.to_address = to_address
self.amount = amount
self.confirmed = confirmed
def create_payment_for_user(user: User) -> (Payment, VPN, float):
Payment.objects.filter(
user=user,
status=Payment.PaymentStatus.IN_PROGRESS
).update(status=Payment.PaymentStatus.FAILURE)
subscription = Subscription.objects.get(id=settings.DEFAULT_SUBSCRIPTION_ID)
payment = Payment.objects.create(user=user, amount=subscription.amount, amount_after_discount=subscription.amount)
vpn = VPN.objects.create(
user_uuid=uuid.uuid4(),
payment=payment,
subscription=subscription,
active_days=subscription.package_days,
user=user,
)
amount_user_pay = payment.amount_after_discount - calculate_user_wallet(user)
if amount_user_pay > 0:
user.chat_state = User.ChatStateChoices.GET_TRX_HASH
user.save()
return payment, vpn, amount_user_pay if amount_user_pay > 0 else 0
def create_vpn_for_payment(payment: Payment) -> list[VPN]:
vpns = []
for v in payment.vpns.all():
payload = {
'uuid': v.user_uuid,
'name': f"{payment.user.user_id}@{payment.id}",
'usage_limit_GB': str(v.subscription.usage_limit),
'package_days': str(v.subscription.package_days),
'mode': Subscription.SubscriptionMode[v.subscription.mode].name,
'comment': 'vpn-bot-created',
'enable': 'y'
}
hiddify = v.subscription.hiddify
response = requests.request(
"POST", hiddify.get_create_vpn_link(), data=payload
)
try:
response.raise_for_status()
except Exception as e:
logger.error(f"Create vpn failed.\nerror={e}")
continue
vpn_link = hiddify.get_vpn_link(v.user_uuid)
v.link = vpn_link
v.active_days = v.subscription.package_days
v.save()
vpns.append(v)
return vpns
def get_usdt_transaction_on_trc20_info(transaction_hash: str) -> TransactionInfo:
try:
response = requests.get(TRONSCAN_API.format(hash=transaction_hash))
res_json = response.json()
# todo check test net & time
transfer_list = res_json["transfersAllList"]
if len(transfer_list) <= 0:
return None
transfer = transfer_list[0]
token_type = transfer["tokenType"]
symbol = transfer["symbol"]
if not token_type == "trc20":
logger.warning(f"Wrong token type!\nhash={transaction_hash}\ndata={res_json}")
return None
if not symbol == "USDT":
logger.warning(f"Wrong symbol!\nhash={transaction_hash}\ndata={res_json}")
return None
to_address = transfer["to_address"]
decimals = transfer["decimals"]
amount = int(transfer["amount_str"]) / (10 ** decimals)
is_success = res_json["contractRet"] == "SUCCESS"
confirmed = res_json["confirmed"]
return TransactionInfo(is_success, to_address, amount, confirmed)
except:
return None
def calculate_user_wallet(user: User):
deposits = user.deposits.filter(
status=DepositTransaction.DepositTransactionStatus.PAYED_AND_CONFIRMED
).aggregate(
total=Sum("amount")
)["total"]
if deposits is None:
deposits = 0
transaction = Transaction.objects.filter(payment__user=user).aggregate(total=Sum("amount"))["total"]
if transaction is None:
transaction = 0
return deposits - transaction
def can_transaction_be_confirmed(trx_info: TransactionInfo):
return trx_info is not None and trx_info.is_success and settings.WALLET == trx_info.to_address
| amirshabanics/vpn-hiddify-telegram-bot | payment/utils.py | utils.py | py | 4,171 | python | en | code | 4 | github-code | 13 |
73705864657 | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('analysis', '0028_auto_20190228_1113'),
]
operations = [
migrations.AddField(
model_name='tweetdetails',
name='first_saved',
field=models.DateTimeField(auto_now_add=True, null=True),
),
]
| emilymcgivern/tweetanalysisandvisualisation | tweetanalysis/tweetanalysis/analysis/migrations/0029_tweetdetails_first_saved.py | 0029_tweetdetails_first_saved.py | py | 366 | python | en | code | 1 | github-code | 13 |
16822708684 | import itertools
import logging
import math
import time
from selection.candidate_generation import (
candidates_per_query,
syntactically_relevant_indexes,
)
from selection.index import Index, index_merge
from selection.selection_algorithm import DEFAULT_PARAMETER_VALUES, SelectionAlgorithm
from selection.utils import get_utilized_indexes, indexes_by_table, mb_to_b
# budget_MB: The algorithm can utilize the specified storage budget in MB.
# max_index_width: The number of columns an index can contain at maximum.
# max_runtime_minutes: The algorithm is stopped either if all seeds are evaluated or
# when max_runtime_minutes is exceeded. Whatever happens first.
# In case of the latter, the current best solution is returned.
DEFAULT_PARAMETERS = {
"budget_MB": DEFAULT_PARAMETER_VALUES["budget_MB"],
"max_index_width": DEFAULT_PARAMETER_VALUES["max_index_width"],
"max_runtime_minutes": 10,
}
# This algorithm is related to the DTA Anytime algorithm employed in SQL server.
# Details of the current version of the original algorithm are not published yet.
# See the documentation for a general description:
# https://docs.microsoft.com/de-de/sql/tools/dta/dta-utility?view=sql-server-ver15
#
# Please note, that this implementation does not reflect the behavior and performance
# of the original algorithm, which might be continuously enhanced and optimized.
class AnytimeAlgorithm(SelectionAlgorithm):
def __init__(self, database_connector, parameters=None):
if parameters is None:
parameters = {}
SelectionAlgorithm.__init__(
self, database_connector, parameters, DEFAULT_PARAMETERS
)
self.disk_constraint = mb_to_b(self.parameters["budget_MB"])
self.max_index_width = self.parameters["max_index_width"]
self.max_runtime_minutes = self.parameters["max_runtime_minutes"]
def _calculate_best_indexes(self, workload):
logging.info("Calculating best indexes Anytime")
# Generate syntactically relevant candidates
candidates = candidates_per_query(
workload,
self.parameters["max_index_width"],
candidate_generator=syntactically_relevant_indexes,
)
# Obtain best (utilized) indexes per query
candidates, _ = get_utilized_indexes(workload, candidates, self.cost_evaluation)
self._add_merged_indexes(candidates)
# Remove candidates that cannot meet budget requirements
seeds = []
filtered_candidates = set()
for candidate in candidates:
if candidate.estimated_size > self.disk_constraint:
continue
seeds.append({candidate})
filtered_candidates.add(candidate)
# For reproducible results, we sort the seeds and candidates
seeds = sorted(seeds, key=lambda candidate: candidate)
filtered_candidates = set(
sorted(filtered_candidates, key=lambda candidate: candidate)
)
seeds.append(set())
candidates = filtered_candidates
start_time = time.time()
best_configuration = (None, None)
for i, seed in enumerate(seeds):
logging.info(f"Seed {i + 1} from {len(seeds)}")
candidates_copy = candidates.copy()
candidates_copy -= seed
current_costs = self._simulate_and_evaluate_cost(workload, seed)
indexes, costs = self.enumerate_greedy(
workload, seed, current_costs, candidates_copy, math.inf
)
if best_configuration[0] is None or costs < best_configuration[1]:
best_configuration = (indexes, costs)
current_time = time.time()
consumed_time = current_time - start_time
if consumed_time > self.max_runtime_minutes * 60:
logging.info(
f"Stopping after {i + 1} seeds because of timing constraints."
)
break
else:
logging.debug(
f"Current best: {best_configuration[1]} after {consumed_time}s."
)
indexes = best_configuration[0]
return list(indexes)
def _add_merged_indexes(self, indexes):
index_table_dict = indexes_by_table(indexes)
for table in index_table_dict:
for index1, index2 in itertools.permutations(index_table_dict[table], 2):
merged_index = index_merge(index1, index2)
if len(merged_index.columns) > self.max_index_width:
new_columns = merged_index.columns[: self.max_index_width]
merged_index = Index(new_columns)
if merged_index not in indexes:
self.cost_evaluation.estimate_size(merged_index)
indexes.add(merged_index)
# based on AutoAdminAlgorithm
def enumerate_greedy(
self,
workload,
current_indexes,
current_costs,
candidate_indexes,
number_indexes,
):
assert (
current_indexes & candidate_indexes == set()
), "Intersection of current and candidate indexes must be empty"
if len(current_indexes) >= number_indexes:
return current_indexes, current_costs
# (index, cost)
best_index = (None, None)
logging.debug(f"Searching in {len(candidate_indexes)} indexes")
for index in candidate_indexes:
if (
sum(idx.estimated_size for idx in current_indexes | {index})
> self.disk_constraint
):
# index configuration is too large
continue
cost = self._simulate_and_evaluate_cost(workload, current_indexes | {index})
if not best_index[0] or cost < best_index[1]:
best_index = (index, cost)
if best_index[0] and best_index[1] < current_costs:
current_indexes.add(best_index[0])
candidate_indexes.remove(best_index[0])
current_costs = best_index[1]
logging.debug(f"Additional best index found: {best_index}")
return self.enumerate_greedy(
workload,
current_indexes,
current_costs,
candidate_indexes,
number_indexes,
)
return current_indexes, current_costs
# copied from AutoAdminAlgorithm
def _simulate_and_evaluate_cost(self, workload, indexes):
cost = self.cost_evaluation.calculate_cost(workload, indexes, store_size=True)
return round(cost, 2)
| hyrise/index_selection_evaluation | selection/algorithms/anytime_algorithm.py | anytime_algorithm.py | py | 6,681 | python | en | code | 68 | github-code | 13 |
40927696553 | __doc__ = """
Evaluations for experimental data
"""
__all__ = ['ground_truth', 'accuracy']
from ground_truth import GroundTruth
class Evaluator(object):
def __init__(self):
pass
def fbeta(self, precision, recall, beta=1):
"""
F1: for bata = 0.5, also called f1 score
F2: for bata = 1
"""
return (1+beta**2)*precision*recall/((beta**2)*precision+recall)
def praf(self, tp, fp, tn, fn):
precision = tp*1.0/(tp+fp)
recall = tp*1.0/(tp+fn)
accuracy = (tp+tn)*1.0/(tp+tn+fp+fn)
fmeasure = self.fbeta(precision, recall, .5)
# print self.__praf_str(precision, recall, accuracy, fmeasure)
return (precision, recall, accuracy, fmeasure)
def __praf_str(self, p, r, a, f):
aa = []
aa.append("Precision: {:.3f}".format(p))
aa.append("Recall: {:.3f}".format(r))
aa.append("Accuracy: {:.3f}".format(a))
aa.append("Fmeasure: {:.3f}".format(f))
ast = ""
for a_ in aa:
ast += "{}\n".format(a_)
return ast
def __keg_pack(self, gnd, tar):
raw = gnd.univ_df()
keg = dict()
keg["info"] = gnd.info()
if "absp" in tar:
keg["absp"] = raw[raw.sid > 0]
if "relp" in tar:
keg["relp"] = gnd.shrink(raw[raw.sid > 0])
if "seg" in tar:
keg["seg"] = gnd.segments(raw, 'duration')
return keg
def pack_gnd(self, roots, names, targets):
"""
`roots`: dataset roots
`names`: dataset names, paired with roots
`targets`: list with data format currently support:
`absp`: for `absolute pairs`
`relp`: for `relative pairs`
`seg`: for `segments` (**buggy**)
Return packed dict data by named keys
"""
keg = {}
for root, name in zip(roots, names):
gt = GroundTruth(root, name)
ns = root + "_" + name
keg[ns] = self.__keg_pack(gt, targets)
return keg
| speed-of-light/pyslider | lib/exp/evaluator/__init__.py | __init__.py | py | 2,046 | python | en | code | 2 | github-code | 13 |
4346475237 | ##############################################################################
# Developed by: Matthew Bone
# Last Updated: 30/07/2021
# Updated by: Matthew Bone
#
# Contact Details:
# Bristol Composites Institute (BCI)
# Department of Aerospace Engineering - University of Bristol
# Queen's Building - University Walk
# Bristol, BS8 1TR
# U.K.
# Email - matthew.bone@bristol.ac.uk
#
# File Description:
# A range of functions designed to search LAMMPS files for information.
# These functions work for 'read_data' files and 'molecule' files
##############################################################################
from natsort import natsorted
from LammpsTreatmentFuncs import clean_data
# Get data
def get_data(sectionName, lines, sectionIndexList, useExcept = True):
if useExcept: # Checks that section name is existing in LAMMPS data
try:
startIndex = lines.index(sectionName)
except ValueError:
# If doesn't exist, return empty list that can be added as normal to main list later
data = []
return data
else: # Allows for later try/except blocks to catch missing section names
startIndex = lines.index(sectionName)
endIndex = sectionIndexList[sectionIndexList.index(startIndex) + 1]
data = lines[startIndex+1:endIndex] # +1 means sectionName doesn't get included
data = [val.split() for val in data]
return data
def get_coeff(coeffName, settingsData):
# Inputs pre-split data
# Return all lines that include coeffName in the [0] index
coeffs = [line for line in settingsData if line[0] == coeffName]
return coeffs
def find_sections(lines):
# Find index of section keywords - isalpha works as no spaces, newlines or punc in section keywords
sectionIndexList = [lines.index(line) for line in lines if line.isalpha()]
# Add end of file as last index
sectionIndexList.append(len(lines))
return sectionIndexList
# Search bond pair
def pair_search(bond, bondAtom):
'''
Check if either atomID in a bond is the desired atomID.
Will return None if no match is found.
'''
if bond[2] == bondAtom:
return bond[3]
elif bond[3] == bondAtom:
return bond[2]
# Loop through atomIDs, possible bonds and find valid bonds
def search_loop(bonds, bondAtom):
nextBondAtomList = []
for searchAtom in bondAtom:
for bond in bonds:
nextAtomID = pair_search(bond, searchAtom)
if nextAtomID is not None:
nextBondAtomList.append(nextAtomID)
return nextBondAtomList
def edge_atom_fingerprint_ids(edgeAtomList, originalBondList, validAtomSet):
# Get edge atom neighbours
edgeAtomFingerprintDict = get_neighbours(edgeAtomList, originalBondList, []) # Bonding atoms given as blank list, edge atoms can never have bonding atoms as a neighbour so not a problem
# Filter out validAtomIDs that are within the partial structure
filteredFingerprintDict = {}
for key, atomList in edgeAtomFingerprintDict.items():
cutList = [atom for atom in atomList if atom not in validAtomSet]
filteredFingerprintDict[key] = cutList
return filteredFingerprintDict
def get_neighbours(atomIDList, bondsList):
'''
Get atomIDs of neighbouring atoms for each atom in atomIDList
Bonding atoms are treated in the same fashion as all other atoms.
'''
boundAtomsDict = {atom: list() for atom in atomIDList}
# Iterate through bonds and build bound atom lists within a dictionary
for bond in bondsList:
boundAtomsDict[bond[2]].append(bond[3])
boundAtomsDict[bond[3]].append(bond[2])
return boundAtomsDict
def get_additional_neighbours(neighboursDict, searchAtomID, searchNeighbours, bondingAtoms, unique=True):
''' Get atomIDs of the neighbours of a given atomID.
This is designed to get second and third neighbours of a given atomID. Further away
neighbours are possible but may have unintended results.
Args:
unique: Prevent search from returning atomIDs that were already in the neighboursDict,
in the searchNeighbours if specified, and the atomID.
Returns:
List of neighbour atomIDs
'''
totalNeighbourSet = set()
for currentNeighbour in searchNeighbours:
totalNeighbourSet.update(neighboursDict[currentNeighbour])
if unique:
# Remove the original search atomID from totalNeighbourSet if present
if searchAtomID in totalNeighbourSet:
totalNeighbourSet.remove(searchAtomID)
# Remove bonding atoms - don't want to use bonding atom fingerprints as they will always be different pre and post
for bondingAtom in bondingAtoms:
if bondingAtom in totalNeighbourSet:
totalNeighbourSet.remove(bondingAtom)
# Remove the neighbours from this search
for currentNeighbour in searchNeighbours:
if currentNeighbour in totalNeighbourSet:
totalNeighbourSet.remove(currentNeighbour)
# Remove initial neighbours from set if they aren't the searchNeighbours specified
# This is for >= third neighbours
if neighboursDict[searchAtomID] != searchNeighbours:
for neighbour in neighboursDict[searchAtomID]:
if neighbour in totalNeighbourSet:
totalNeighbourSet.remove(neighbour)
return list(totalNeighbourSet)
def element_atomID_dict(fileName, elementsByType):
# Load molecule file
with open(fileName, 'r') as f:
lines = f.readlines()
# Clean data and get charge
data = clean_data(lines)
sections = find_sections(data)
try: # Try is for getting types from molecule file types
types = get_data('Types', data, sections, useExcept=False)
except ValueError: # Exception gets types from standard lammps file type
atoms = get_data('Atoms', data, sections, useExcept=False)
types = [[atomRow[0], atomRow[2]] for atomRow in atoms]
typesDict = {row[0]: row[1] for row in types} # Keys: ID, Val: Type
# Ensure elementsByType is uppercase
elementsByTypeDict = {index+1: val.upper() for index, val in enumerate(elementsByType)} # Keys: Type, Val: Elements
# Assert that there are enough types in elementsByType for the highest type in the types variable
largestType = int(natsorted(types, key=lambda x: x[1])[-1][1]) # Types are stored as lists of [AtomNumber, TypeNumber]
assert len(elementsByType) >= largestType, 'EBT (elements by type) is missing values. Check that all types are present and separated with a space.'
elementIDDict = {key: elementsByTypeDict[int(val)] for key, val in typesDict.items()}
return elementIDDict
def get_header(tidiedData):
'''
Extract all the data from the header of a LAMMPS data file.
Return a dictionary of keyword keys and listed numeric values
'''
# Find stop line by searching for first line starting with letters
def get_stop_line():
for index, line in enumerate(tidiedData):
# Checks to get past the initial comment line(s):
if index == 0: continue
if line[0] == '#': continue
if line[0].isalpha():
return index
headerStopLine = get_stop_line()
# Build dictionary of header parts with keyword keys and list numeric values
headerData = tidiedData[0:headerStopLine]
headerDict = {'comment': []}
for line in headerData:
if line[0].isalpha() or line[0] == '#':
headerDict['comment'].extend([line])
else:
# Break line by spaces
cutLine = line.split()
# Search through line to get the numeric values - list due to two box dimensions
valueList = []
keyList = []
for element in cutLine:
# Convert value to int, failing this a float, failing this skip it
try:
valueList.append(int(element))
except ValueError:
try:
valueList.append(float(element))
except ValueError:
keyList.append(element)
# Create dict from assembled parts
headerDict['_'.join(keyList)] = valueList
return headerDict
def convert_header(header):
'''Convert a header dictionary back to a list of lists of strings for output'''
stringHeader = []
for key, values in header.items():
headerLine = [' '.join([str(val) for val in values])]
if key != 'comment':
headerLine.extend(key.split('_'))
stringHeader.append(headerLine)
return stringHeader | m-bone/AutoMapper | LammpsSearchFuncs.py | LammpsSearchFuncs.py | py | 8,820 | python | en | code | 8 | github-code | 13 |
72321967698 | from datetime import datetime, timedelta
from django.test import TestCase
from event.libs import event_queries
from event.models import Event
class TestEventQueries(TestCase):
def test_get_all_week_events(self) -> None:
event_1 = Event.objects.create(
name="WOD", start=datetime.now(), end=datetime.now(), slot="1"
)
event_2 = Event.objects.create(
name="WOD2", start=datetime.now(), end=datetime.now(), slot="1"
)
event_3 = Event.objects.create(
name="WOD next week",
start=datetime.now() - timedelta(days=7),
end=datetime.now() - timedelta(days=8),
slot="1",
)
all_week_events = event_queries.get_all_week_events()
assert event_1 in all_week_events
assert event_2 in all_week_events
assert event_3 not in all_week_events
assert all_week_events.count() == 2
def test_get_weeks_events_average_attendees(self) -> None:
Event.objects.create(
name="WOD",
start=datetime.now(),
end=datetime.now(),
slot="5",
reserved_slot="1",
)
Event.objects.create(
name="WOD2",
start=datetime.now(),
end=datetime.now(),
slot="5",
reserved_slot="5",
)
average_attendance = event_queries.get_weeks_events_average_attendees()
assert average_attendance == 3
def test_get_weeks_events_average_attendees_no_event(self) -> None:
average_attendance = event_queries.get_weeks_events_average_attendees()
assert average_attendance == 0
| AngoGG/EasyWod | event/tests/unit/test_event_queries.py | test_event_queries.py | py | 1,672 | python | en | code | 0 | github-code | 13 |
19152966914 | name="ahmed mohamed abd el mntaser"
age=23
town="10th of ramadan"
Grade=3.5
exp=2
Mymoney=322242342353254325453452
ali="ali"
mohamed="mohamed"
ahmed="ahmed"
# print("I am {:s} .My age is {} .I am living in {} .My Grade is {:.1f} .I have experience {} days .\nI have in the bank {:_d} $".format(name,age,town,Grade,exp,Mymoney))
print("I work as a trainer and i would choose {2} {1} {0} to work first".format("ali","mohamed","ahmed"))
print(f"I work as a trainer and i would choose {ahmed} {mohamed} {ali} to work first")
| ahmedmohamedabdlmntasr/pythonAnalogautomation | stringFormatnewWay.py | stringFormatnewWay.py | py | 547 | python | en | code | 0 | github-code | 13 |
29196983065 | """
Given an array of intervals where intervals[i] = [starti, endi], merge all
overlapping intervals, and return an array of the non-overlapping intervals
that cover all the intervals in the input.
Example 1:
Input: intervals = [[1,3],[2,6],[8,10],[15,18]]
Output: [[1,6],[8,10],[15,18]]
Explanation: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6].
Example 2:
Input: intervals = [[1,4],[4,5]]
Output: [[1,5]]
Explanation: Intervals [1,4] and [4,5] are considered overlapping.
Constraints:
1 <= intervals.length <= 104
intervals[i].length == 2
0 <= starti <= endi <= 104
"""
def merge(intervals):
"""
1. sort the intervals based on start.
2. if merged is null or end of merged of last interval is less the start of interval, the append interval
3. else, change the end of merged[-1][1] = max(merged[-1][1], interval[1])
"""
intervals.sort(key=lambda x: x[0])
merged = []
for interval in intervals:
if not merged or merged[-1][1] < interval[0]:
merged.append(interval)
else:
merged[-1][1] = max(merged[-1][1], interval[1])
return merged
if __name__ == "__main__":
print(merge([[1, 3], [2, 6], [8, 10], [15, 18]]))
| sunank200/DSA | Booking.com/merged_intervals.py | merged_intervals.py | py | 1,223 | python | en | code | 0 | github-code | 13 |
29033977134 | import boto3
import openpyxl
from openpyxl import load_workbook
import pandas as pd
from pandas import ExcelWriter
import io
# global variable
bucket = ''
aws_access_key_id = ''
aws_secret_access_key = ''
s3 = boto3.resource('s3')
conn = boto3.client('s3', aws_access_key_id='aws_access_key_id', aws_secret_access_key='aws_secret_access_key')
response = conn.list_buckets()
def syncExcelToS3():
# python3 is using BytesIO
buffer = io.BytesIO()
df_1 = pd.DataFrame()
df_2 = pd.DataFrame()
df_3 = pd.DataFrame()
df_1.to_excel(buffer, sheet_name= 'Sheet1' , index=False)
append_df_to_excel(buffer, df_2, sheet_name='Sheet1', startrow= 10, index=False)
append_df_to_excel(buffer, df_3, sheet_name='Sheet1', startrow= 20, index=False)
session = boto3.Session(aws_access_key_id= aws_access_key_id, aws_secret_access_key= aws_secret_access_key)
s3 = session.resource('s3')
s3.Object(bucket, 'Full/Path').put(Body= buffer.getvalue())
def syncJsonToS3():
jsonBuffer = io.StringIO()
Cars = {'Brand': ['Honda Civic','Toyota Corolla','Ford Focus','Audi A4'],
'Price': [22000,25000,27000,35000]
}
df = pd.DataFrame(Cars,columns= ['Brand', 'Price'])
session = boto3.Session(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
s3 = session.resource('s3')
df.to_json(outputBuffer, orient='values');
s3.Object(Bucket, 'PATH/TO/test.json').put(Body= jsonBuffer.getvalue())
'''
HELP Function
Append a DataFrame [df] to existing Excel file [filename]
stackoverflow: https://stackoverflow.com/questions/47737220/append-dataframe-to-excel-with-pandas
'''
def append_df_to_excel(filename, df, sheet_name='Sheet1', startrow=None, truncate_sheet=False, **to_excel_kwargs):
if 'engine' in to_excel_kwargs:
to_excel_kwargs.pop('engine')
writer = pd.ExcelWriter(filename, engine='openpyxl')
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
try:
writer.book = load_workbook(filename)
if startrow is None and sheet_name in writer.book.sheetnames:
startrow = writer.book[sheet_name].max_row
if truncate_sheet and sheet_name in writer.book.sheetnames:
idx = writer.book.sheetnames.index(sheet_name)
writer.book.remove(writer.book.worksheets[idx])
writer.book.create_sheet(sheet_name, idx)
writer.sheets = {ws.title:ws for ws in writer.book.worksheets}
except FileNotFoundError:
pass
if startrow is None:
startrow = 0
df.to_excel(writer, sheet_name, startrow=startrow, **to_excel_kwargs)
writer.save()
if __name__ == '__main__':
# append multiple dfs to different spreadsheets in one excel, then upload it to s3 using BytesIO
syncExcelToS3();
# create df, then upload it to s3 as a json file using StringIO
syncJsonToS3();
| lanhoter/S3sync | upload.py | upload.py | py | 2,940 | python | en | code | 2 | github-code | 13 |
29540856522 | from celery import Celery
from flask_cors import CORS
from flask_migrate import Migrate
from flask_socketio import SocketIO
from flask_sqlalchemy import SQLAlchemy
import ai_redditor_service.template_filters as template_filters
db = SQLAlchemy()
cors = CORS()
migrate = Migrate(db=db)
celery = Celery(
'ai_redditor_service',
include=['ai_redditor_service.tasks']
)
socketio = SocketIO(cookie=None)
def init_app(app):
'''
Initializes extensions with a Flask app context.
'''
db.init_app(app)
cors.init_app(app)
template_filters.init_app(app)
_init_migrate(app)
_init_celery(app)
socketio.init_app(
app, message_queue=app.config['SOCKETIO_MESSAGE_QUEUE'],
logger=app.config['SOCKETIO_ENABLE_LOGGING'],
engineio_logger=app.config['ENGINEIO_ENABLE_LOGGING']
)
def _init_celery(app):
'''
Initializes a :class:`celery.Celery` object instance with a Flask app.
'''
celery.conf.update(
app.config,
result_backend=app.config['CELERY_RESULT_BACKEND'],
broker_url=app.config['CELERY_BROKER_URL']
)
class ContextTask(celery.Task):
'''
A Celery task that wraps the task execution
in a Flask application context.
'''
def __call__(self, *args, **kwargs):
with app.app_context():
return self.run(*args, **kwargs)
celery.Task = ContextTask
return celery
def _init_migrate(app):
is_sqlite = app.config['SQLALCHEMY_DATABASE_URI'].startswith('sqlite:')
migrate.init_app(app, render_as_batch=is_sqlite)
| galacticglum/ai-redditor | web_service/ai_redditor_service/extensions.py | extensions.py | py | 1,620 | python | en | code | 14 | github-code | 13 |
3532516102 | from evaluate import logger, EvaluatorBase
class Evaluator(EvaluatorBase):
def __init__(self, parser, tagger):
EvaluatorBase.__init__(self)
self.parser = parser
self.tagger = tagger
# This is to avoid problem in spark mode
if tagger is not None:
wv = {}
for key in self.tagger.w_vector:
wv[key] = self.tagger.w_vector[key]
self.tagger.w_vector = wv
return
def sentence_evaluator(self, sent, w_vector):
gold_edge_set = \
set([(head_index, dep_index)
for head_index, dep_index, _ in sent.get_edge_list()])
sent_len = len(sent.get_word_list())
test_edge_set = \
self.parser.parse(sent, w_vector.get_vector_score, self.tagger)
if isinstance(test_edge_set, list):
test_edge_set = set(test_edge_set)
if isinstance(gold_edge_set, list):
gold_edge_set = set(gold_edge_set)
intersect_set = test_edge_set.intersection(gold_edge_set)
correct_num = len(intersect_set)
return correct_num, len(gold_edge_set), test_edge_set
| sfu-natlang/glm-parser | src/evaluate/parser_evaluator.py | parser_evaluator.py | py | 1,153 | python | en | code | 28 | github-code | 13 |
33219014470 | msg = "we will be seeing the horizon"
words = msg.split()
print(words)
words = msg.split('e')
print(words)
#Replace
update_msg = msg.replace('seeing','viewing')
print(update_msg)
#Join
path = ['aman','avinash','mypc']
content = '/'.join(path)
print(content)
#Strip()
name =' Avinash '
cleaned_name = name.strip()
print(cleaned_name)
# WAP to find frequency of all the vowels in the files
from helper import read
data = read('pride_n_prejudice.txt')
for vowel in 'aeiou':
print(f'{vowel} => {data.lower().count(vowel)} times')
# WAP to remove all the punctations from the string
str='he@#ll%o!@&*(!@wo!@r,l!d)'
from string import punctuation
for p in punctuation:
str=str.replace(p,'')
print(str) | avi4907/nash | basics/string_funtion1.py | string_funtion1.py | py | 724 | python | en | code | 0 | github-code | 13 |
30416101913 | # ***********
# ¿HAY STOCK?
# ***********
def run(stock: dict, merch: str, amount: int) -> bool:
item_stock = stock.get(merch, 0)
available = item_stock >= amount
""" if item_stock == None or amount > item_stock:
available = False """
return available
if __name__ == "__main__":
run({"pen": 20, "cup": 11, "keyring": 40}, "cup", 9)
| adrianhrb/1DAW | pro/Contents/UD3/dict/order_stock.py | order_stock.py | py | 366 | python | en | code | 0 | github-code | 13 |
74816572176 | import unittest
numbers = [99, 46, 6, 2, 1, 5, 63, 87, 283, 4, 0]
def bubbleSort(array):
if not isinstance(array, list):
raise TypeError
for idx1 in range(len(array)):
for idx in range(0, len(array) - idx1 - 1):
if array[idx] > array[idx + 1]:
array[idx], array[idx + 1] = array[idx + 1], array[idx]
return array
# bubbleSort(numbers)
class TestBubbleSort(unittest.TestCase):
def test_bubbleSort(self):
self.assertEqual(bubbleSort([2, 1, 5, 63, 87]), [1, 2, 5, 63, 87])
def test_bubbleSort_non_array(self):
with self.assertRaises(TypeError):
bubbleSort('[2,1,5,63,87]')
if __name__ == '__main__':
unittest.main()
| valentinesamuel/EssentialOrigin | algos/bubble_sort.py | bubble_sort.py | py | 650 | python | en | code | 0 | github-code | 13 |
13500633901 | from collections import Counter
# a) Het sorteren van een dictionary
D = {'c':1, 'b':2, 'a':3, 'e':1, 'd':3}
for k in sorted(D.keys()):
print(k, D[k])
# b) Alleen items met unieke waarden
D = {'a':1, 'b':2, 'c':3, 'd':1, 'e':3}
c = Counter(D.values())
result = {}
for k,v in D.items():
if c[v] == 1:
result[k] = v
print(result)
# c) Unieke waarden in een lijst van dictionaries (via een set).
L = [{"V":"S001"}, {"V": "S002"}, {"VI": "S001"}, {"VI": "S005"}, {"VII":"S005"}, {"V":"S009"},{"VIII":"S007"}]
s = set()
for k in L:
for v in k.values():
s.add(v)
print(s)
# D) Tel waarden met hetzelfde key bij elkaar op
L = [{'a':1, 'b':2, 'c':3},{'a':5,'b':4,'c':2}]
result = Counter()
for d in L:
result = result + Counter(d)
print(result)
# e) twee lijsten samenvoegen naar een dict (via zip).
keys = ['red', 'green', 'blue']
values = ['#FF0000','#008000', '#0000FF']
color_dictionary = dict(zip(keys, values))
print(color_dictionary)
| RedFoxNL/Python | Python week opdrachten/Week2/Opgave 3.py | Opgave 3.py | py | 970 | python | nl | code | 0 | github-code | 13 |
71947366418 | from tkinter import filedialog, messagebox
import tkinter as tk
import xlsxwriter
import pandas as pd
from app.v1.schema.chart_schema import Chart
from app.v1.schema.table_schema import Table
from app.v1.utils.exceptions import custom_exception
from app.v1.utils.hour_helper import get_datetime
from app.v1.utils.settings import Settings
settings = Settings()
FILE_PATH = settings.file_path
def BrowseFile(top):
filename = filedialog.askopenfilename(
title="Browse File",
filetypes=(("Excel Files", "*.xlsx"),),
parent=top,
)
if filename:
# Leer e imprimir el contenido (en bytes) del archivo.
# print(filename)
return filename
else:
# print("No file selected.")
return "No file selected."
def GetDataByRows(path: str):
try:
data = pd.read_excel(path, engine="openpyxl")
# print(data)
dict_data = data.to_dict("records")
# print(dict_data)
return dict_data
except:
custom_exception("error", "Cannot open a non-excel file with .xslx extension")
return []
def format_excel_one_sheet(df, filename, sheet_name):
writer = pd.ExcelWriter(filename, engine="xlsxwriter")
df.to_excel(writer, sheet_name=sheet_name, startrow=1, header=False)
# Get the xlsxwriter objects from the dataframe writer object.
workbook = writer.book
worksheet = writer.sheets[sheet_name]
# Get the dimensions of the dataframe.
(max_row, max_col) = df.shape
header_format = workbook.add_format(
{
"bold": True,
"text_wrap": True,
"valign": "top",
"fg_color": "#D7E4BC",
"border": 1,
}
)
cell_format = workbook.add_format({"text_wrap": True})
cell_format.set_align("vcenter")
cell_format.set_align("center")
worksheet.write(0, 0, "id", header_format)
# Write the column headers with the defined format.
for col_num, value in enumerate(df.columns.values):
worksheet.write(0, col_num + 1, value, header_format)
worksheet.set_column(
1,
max_col,
30,
cell_format,
)
while True:
try:
workbook.close()
return True
except xlsxwriter.exceptions.FileCreateError as e:
decision = input(
"Exception caught in workbook.close(): %s\n"
"Please close the file if it is open in Excel.\n"
"Try to write file again? [Y/n]: " % e
)
if decision != "n":
continue
break
def format_excel_multiple_sheets(
list_df: list,
filename,
list_sheet_name: list,
list_charts: list,
list_autofiller: list,
):
with pd.ExcelWriter(filename, engine="xlsxwriter") as writer:
df_count = list_df.__len__()
sheet_count = list_sheet_name.__len__()
if df_count == sheet_count:
for i in range(df_count):
list_df[i].to_excel(writer, sheet_name=list_sheet_name[i])
# Get the xlsxwriter objects from the dataframe writer object.
workbook = writer.book
worksheet = writer.sheets[list_sheet_name[i]]
# Get the dimensions of the dataframe.
(max_row, max_col) = list_df[i].shape
header_format = workbook.add_format(
{
"bold": True,
"text_wrap": True,
"valign": "top",
"fg_color": "#D7E4BC",
"border": 1,
}
)
worksheet.write(0, 0, "id", header_format)
cell_format = workbook.add_format({"text_wrap": True})
cell_format.set_align("vcenter")
cell_format.set_align("center")
# Write the column headers with the defined format.
for col_num, value in enumerate(list_df[i].columns.values):
worksheet.write(0, col_num + 1, value, header_format)
worksheet.set_column(1, max_col, 30, cell_format)
if list_charts[i]:
chart_object: Chart = list_charts[i]
if chart_object.subtype:
chart = workbook.add_chart(
{
"type": chart_object.chart_type,
"subtype": chart_object.subtype,
}
)
else:
chart = workbook.add_chart({"type": chart_object.chart_type})
chart.set_style(10)
for serie in chart_object.series:
chart.add_series(
{
"name": serie.name,
"categories": serie.categories,
"values": serie.values,
"data_labels": {"percentage": True},
}
)
chart.set_title({"name": chart_object.chart_title})
worksheet.insert_chart(
0, max_col + 2, chart, {"x_offset": 25, "y_offset": 10}
)
if list_autofiller[i]:
worksheet.autofilter(0, 0, max_row, max_col)
def create_dataframe(dataTable: Table):
df = pd.DataFrame(
data=dataTable.data, columns=dataTable.headers, index=dataTable.index
)
df.sort_index(inplace=True)
return df
def create_excel(dataTable: Table):
df = create_dataframe(dataTable)
now = get_datetime()
folder_path = r"app\v1\reports"
report = "report"
filename = f"{FILE_PATH}\{folder_path}\{dataTable.name}\{report}_{dataTable.name}_{now}.xlsx"
try:
format_excel_one_sheet(df=df, filename=filename, sheet_name=dataTable.name)
messagebox.showinfo(
"FIlE CREATED", f"File created succesfully in path {filename}"
)
return filename
except:
custom_exception("FILE ERROR", "Report can't be created")
return False
| marianamartineza/kunaisoft-interface-tkinter | app/v1/utils/file_management.py | file_management.py | py | 6,314 | python | en | code | 0 | github-code | 13 |
43403150223 | import edm.dnnlib as dnnlib
import pickle
from datasets.mri_dataloaders import get_mvue
from problems.fourier_multicoil import MulticoilForwardMRINoMask
import torch
import numpy as np
from tqdm import tqdm
def normalize(x, x_min, x_max):
"""
Scales x to appx [-1, 1]
"""
out = (x - x_min) / (x_max - x_min)
return 2*out - 1
def unnormalize(x, x_min, x_max):
"""
Takes input in appx [-1,1] and unscales it
"""
out = (x + 1) / 2
return out * (x_max - x_min) + x_min
class Dummy:
def __init__(self):
self.s_maps = None
class DPS:
def __init__(self, hparams, args, c, device=None):
self.device = device
self.hparams = hparams
self.args = args
self.likelihood_step_size = self.hparams.net.likelihood_step_size
self.steps = self.hparams.net.steps
self.rho = 7.0
self.S_churn = self.hparams.net.S_churn
self.S_min = 0
self.S_max = float('inf')
self.S_noise = 1.0
self._init_net()
self.c = c.detach().clone() #[N, 1, H, W]
self.H_funcs = Dummy()
def __call__(self, x_mod, y):
if len(y.shape) > 4:
y = torch.complex(y[:, :, :, :, 0], y[:, :, :, :, 1])
mask = self.c.clone()
maps = self.H_funcs.s_maps.clone()
ref = mask * y
#set up forward operator
FS = MulticoilForwardMRINoMask(maps)
A = lambda x: mask * FS(x)
#grab mvue from undersampled measurements and estimate normalisation
# factors with it
estimated_mvue = torch.tensor(
get_mvue(ref.cpu().numpy(),
maps.cpu().numpy()), device=ref.device)#[N, H, W] complex
estimated_mvue = torch.view_as_real(estimated_mvue) #[N, H, W, 2] float
norm_mins = torch.amin(estimated_mvue, dim=(1,2,3), keepdim=True) #[N, 1, 1, 1]
norm_maxes = torch.amax(estimated_mvue, dim=(1,2,3), keepdim=True) #[N, 1, 1, 1]
#default labels are zero unless specified
# [N, label_dim]
class_labels = None
if self.net.label_dim:
class_labels = torch.zeros((ref.shape[0], self.net.label_dim), device=ref.device)
# Time step discretization.
step_indices = torch.arange(self.steps, dtype=torch.float64, device=ref.device)
t_steps = (self.sigma_max ** (1 / self.rho) + step_indices / (self.steps - 1) * (self.sigma_min ** (1 / self.rho) - self.sigma_max ** (1 / self.rho))) ** self.rho
t_steps = torch.cat([self.net.round_sigma(t_steps), torch.zeros_like(t_steps[:1])]) # t_N = 0
# Main sampling loop.
x_next = x_mod.to(torch.float64) * t_steps[0]
for i, (t_cur, t_next) in tqdm(enumerate(zip(t_steps[:-1], t_steps[1:]))): # 0, ..., N-1
x_cur = x_next
# Increase noise temporarily.
gamma = min(self.S_churn / self.steps, np.sqrt(2) - 1) if self.S_min <= t_cur <= self.S_max else 0
t_hat = self.net.round_sigma(t_cur + gamma * t_cur)
x_hat = x_cur + (t_hat ** 2 - t_cur ** 2).sqrt() * self.S_noise * torch.randn_like(x_cur)
x_hat = x_hat.requires_grad_() #starting grad tracking with the noised img
# Euler step.
denoised = self.net(x_hat, t_hat, class_labels).to(torch.float64)
d_cur = (x_hat - denoised) / t_hat
x_next = x_hat + (t_next - t_hat) * d_cur
# Likelihood step
denoised_unscaled = unnormalize(denoised, norm_mins, norm_maxes) #we need to undo the scaling to [-1,1] first
Ax = A(denoised_unscaled)
residual = ref - Ax
sse_per_samp = torch.sum(torch.square(torch.abs(residual)), dim=(1,2,3), keepdim=True) #[N, 1, 1, 1]
sse = torch.sum(sse_per_samp)
likelihood_score = torch.autograd.grad(outputs=sse, inputs=x_hat)[0]
x_next = x_next - (self.likelihood_step_size / torch.sqrt(sse_per_samp)) * likelihood_score
# Cleanup
x_next = x_next.detach()
x_hat = x_hat.detach()
return unnormalize(x_next, norm_mins, norm_maxes)
def set_c(self, c):
self.c = c.detach().clone().type_as(self.c).to(self.c.device)
def _init_net(self):
with dnnlib.util.open_url(self.hparams.net.config_dir, verbose=self.hparams.verbose) as f:
net = pickle.load(f)['ema'].to(self.device)
self.net = net
self.sigma_min = max(self.net.sigma_min, 0.002)
self.sigma_max = min(self.net.sigma_max, 80)
return
| Sriram-Ravula/MRI_Sampling_Diffusion | algorithms/dps.py | dps.py | py | 4,601 | python | en | code | 4 | github-code | 13 |
17899335544 | import base64
import re
HEADER_FROM = 14
HEADER_TO = 15
HEADER_SUBJECT = 16
HEADER_DATE = 17
def getParamFromHeader(messageHeaders,nameOfParam):
for header in messageHeaders:
if header['name'] == nameOfParam:
return header['value']
return None
class email:
sender = None
subject = None
parameters = None
date = None
validEmail = True
boundaryString = '$$'
def __init__(self,message):
self.subject = getParamFromHeader(message ['payload']['headers'], 'Subject')
self.date = getParamFromHeader(message ['payload']['headers'], 'Date')
self.sender = getParamFromHeader(message ['payload']['headers'], 'From')
self.parseSubjectForParams()
def parseSubjectForParams(self):
self.parameters = []
for word in self.subjectAuth(self.subject).split():
self.parameters.append(word)
def parseBodyForParams(self,messages):
text = " "
self.parameters = {}
if 'parts' in messages:
messages = messages['parts']
for message in messages:
strin = str(message['body']['data'])
text = str(text) + str(base64.urlsafe_b64decode(strin))
else:
text = text + base64.urlsafe_b64decode(messages['body'])
text = text.encode().decode()
paramText = self.authentication(text)
print(paramText)
paramName = None
for word in paramText.split():
if not isinstance(word, str):
pass;
if paramName is None and paramName != '\n':
paramName = word
if paramName[len(paramName) - 1] == '=':
paramName = paramName[:-1]
else:
if word[0] != '=':
self.parameters.update({paramName: word})
paramName = None
def authentication(self,data):
f = open(self.filterFileName, "r")
data = self.cleanhtml(data)
for line in f:
line = line.replace('\n','')
#line = line.replace('\','',1)
data = data.replace(line,"")
return data
def cleanhtml(self,raw_html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
def print(self):
print('Subject:' + self.subject)
print('From:' + self.sender)
print('Date:' + self.date)
print('Parameters:' + str(self.parameters))
def subjectAuth(self, subject):
firstIndex = subject.find(self.boundaryString)
secondIndex = subject.find(self.boundaryString,firstIndex + 1)
return subject[firstIndex + self.boundaryString.__len__():secondIndex]
'adawdwdw TEST $$ EOS SHORT 2hr $$' | EkrlDev/TradingViewTrader | extractedEmail.py | extractedEmail.py | py | 2,799 | python | en | code | null | github-code | 13 |
32083374586 | import datetime
import os
import random
import time
import cv2
import matplotlib.image as mpimg
import numpy as np
import scipy.misc
import skimage as sk
# Brightness
BRIGHTNESS_PERCENT_LOWER = 0.4
BRIGHTNESS_PERCENT_HIGHER = 1.2
# Blur
KERNEL_SIZE_POSSIBILITIES = [7, 13, 15]
SIGMA_X_LOWER = 0
SIGMA_X_UPPER = 5
SIGMA_Y_LOWER = 0
SIGMA_Y_UPPER = 5
# Rotation
ANGLE_CHANGE_MAX = 90
# Translation
TRANSLATION_X_LOWER = 2.5
TRANSLATION_X_UPPER = 5.0
TRANSLATION_Y_LOWER = 2.5
TRANSLATION_Y_UPPER = 5.0
# Shearing
SHEAR_RANGE = 7
def change_brightness(image):
percent_change = random.uniform(BRIGHTNESS_PERCENT_LOWER, BRIGHTNESS_PERCENT_HIGHER)
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
hsv[:, :, 2] = (hsv[:, :, 2] * percent_change).astype(int)
output_image = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
return output_image
def add_noise(image):
noisy_image = sk.util.random_noise(image, mode='gaussian', seed=None, clip=True)
return noisy_image
def blur_image(image):
kernel_size = KERNEL_SIZE_POSSIBILITIES[random.randint(0, len(KERNEL_SIZE_POSSIBILITIES) - 1)]
sigma_x = random.randint(SIGMA_X_LOWER, SIGMA_X_UPPER)
sigma_y = random.randint(SIGMA_Y_LOWER, SIGMA_Y_UPPER)
return cv2.GaussianBlur(image, ksize=(kernel_size, kernel_size), sigmaX=sigma_x, sigmaY=sigma_y)
def apply_all_transforms(image):
image = change_brightness(image)
# 20% probability of blur
if random.randint(1, 5) == 1:
image = blur_image(image)
# 20% probability of noise
if random.randint(1, 5) == 1:
image = add_noise(image)
angle_change = np.random.uniform(ANGLE_CHANGE_MAX) - (ANGLE_CHANGE_MAX / 2)
rows, columns, channels = image.shape
rotation_matrix = cv2.getRotationMatrix2D((columns / 2, rows / 2), angle_change, 1)
translation_x = np.random.uniform(TRANSLATION_X_LOWER, TRANSLATION_X_UPPER)
translation_y = np.random.uniform(TRANSLATION_Y_LOWER, TRANSLATION_Y_UPPER)
translation_matrix = np.float32([[1, 0, translation_x], [0, 1, translation_y]])
pts1 = np.float32([[5, 5], [20, 5], [5, 20]])
pt1 = 5 + SHEAR_RANGE * np.random.uniform() - SHEAR_RANGE / 2
pt2 = 20 + SHEAR_RANGE * np.random.uniform() - SHEAR_RANGE / 2
pts2 = np.float32([[pt1, 5], [pt2, pt1], [5, pt2]])
shear_matrix = cv2.getAffineTransform(pts1, pts2)
image = cv2.warpAffine(image, rotation_matrix, (columns, rows))
image = cv2.warpAffine(image, translation_matrix, (columns, rows))
# 25% probability of shearing
if random.randint(1, 4) == 1:
image = cv2.warpAffine(image, shear_matrix, (columns, rows))
return image
PROJECT_PATH = "/Users/mateuszdziubek/Desktop/BeerAI-Data"
def generate(amount_per_image, directory):
images = []
for image_name in os.listdir(directory):
image_name = image_name.lower()
if image_name.endswith(".jpg") or image_name.endswith(".jpeg") \
or image_name.endswith(".png") or image_name.endswith(".bmp"):
images.append(image_name)
for image in images:
for _ in range(amount_per_image):
generated_image = apply_all_transforms(mpimg.imread(f"{directory}/{image}"))
scipy.misc.toimage(generated_image).save(f"{directory}/generated{time.time()}.jpg")
def generate_for_all(amount_per_image, container_directory):
for folder_name in os.listdir(container_directory):
if not folder_name.startswith("."):
generate(amount_per_image, f"{container_directory}/{folder_name}")
def save_labels(container_directory):
labels = []
for folder_name in os.listdir(container_directory):
if not folder_name.startswith("."):
labels.append(folder_name)
file_path = f"{PROJECT_PATH}/data/labels/{str(datetime.datetime.now())}.txt"
file = open(file_path, 'w')
for label in labels:
file.write("%s\n" % label)
save_labels(f"{PROJECT_PATH}/data/train")
# generate_for_all(150, f"{PROJECT_PATH}/data/train")
# generate(150, f"{PROJECT_PATH}/data/train/001harnas")
# generate(150, f"{PROJECT_PATH}/data/train/002kasztelan_niepaster")
# generate(150, f"{PROJECT_PATH}/data/train/003kasztelan_pszen")
# generate(150, f"{PROJECT_PATH}/data/train/004miloslaw_nieb")
# generate(150, f"{PROJECT_PATH}/data/train/005perla_chmiel")
# generate(150, f"{PROJECT_PATH}/data/train/006perla_export")
# generate(150, f"{PROJECT_PATH}/data/train/007somersby")
# generate(150, f"{PROJECT_PATH}/data/train/008warka")
# generate(150, f"{PROJECT_PATH}/data/train/009wojak")
# generate(150, f"{PROJECT_PATH}/data/train/010zywiec_bialy")
| matdziu/BeerAI-Data | ImageGenerator.py | ImageGenerator.py | py | 4,606 | python | en | code | 0 | github-code | 13 |
3249891441 | #########################################################################
## 2021
## Authors: David Richter and Ricardo A. Calix, Ph.D.
## Paper:
## Deep Q AP for X-Plane 11
##
#########################################################################
import tensorflow as tf
from tensorflow.keras import Input
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import categorical_crossentropy
from collections import deque
import numpy as np
import random
#########################################################################
class DeepQLearn():
def __init__(self, n_stat, n_acts, gamma, learningRate, q_lr, trainBatchSize, memBatchSize, memorySize):
self.numOfInputs = n_stat
self.numOfOutputs = n_acts
self.memorySize = memorySize
self.memoryBatchSize = memBatchSize
self.memoryQueue = deque(maxlen=self.memorySize)
self.learningRate = learningRate
self.gamma = gamma
self.q_lr = q_lr
self.trainBatchSize = trainBatchSize
self.model = self.createModel()
######################################################################################
def createModel(self):
model = Sequential()
model.add( Input( shape=(self.numOfInputs, ) ) )
model.add(Dense(int(self.numOfInputs ), activation='relu'))
model.add(Dense(int(self.numOfInputs * 1.0), activation='relu'))
model.add(Dense(int(self.numOfInputs * 0.8 ), activation='relu'))
#model.add(Dense( self.numOfOutputs, activation='linear')) ## in most papers this is linear ...??
model.add(Dense( self.numOfOutputs ))
model.compile(loss="mse", optimizer=Adam(lr=self.learningRate))
model.summary()
return model
######################################################################
def select_action(self, state, episode, n_epochs):
action = 0
state_vector = np.identity(self.numOfInputs)[state:state+1]
actions_q_vector = self.model.predict(state_vector)
more_random = random.randint(5, 6)
if ( (episode/n_epochs) < 0.30 and more_random % 2 == 0 ): ## every even is random
action = random.randint(0, 5)
print("random 0")
elif ( (episode/n_epochs) < 0.75 ):
## this is a vector of size 6 with random normal distribution data
added_randomness = np.random.randn( 1, self.numOfOutputs ) * ( 1.0/(episode+1) )
actions_q_vector = actions_q_vector + added_randomness
action = np.argmax( actions_q_vector )
print("random 1")
else:
action = np.argmax( actions_q_vector )
print("random 2")
return action, actions_q_vector
################################################################################
# update Deep Q Network
def learn(self, state, action, reward, new_state):
#mask = np.zeros((1,6), dtype=np.float32)
#mask[0, action] = 1.0
#print(mask)
#[1, n_stat] [1, n_actions]
input_state, output_action = self.get_bellman_training_vectors(state, action, reward, new_state)
#masked_output_action = np.multiply(output_action, mask)
self.model.fit(input_state, output_action, verbose=0) ## fits each set (state_vec, action_vec) every move
concat_inputs_outputs = np.concatenate((input_state, output_action), axis=1)
#print(concat_inputs_outputs.shape)
self.memoryQueue.append( concat_inputs_outputs )
if len(self.memoryQueue) >= self.memorySize:
print("Processing miniBatches from memory ...")
miniBatch_list = random.sample(self.memoryQueue, self.memoryBatchSize)
np_miniBatch = np.array( miniBatch_list )
np_miniBatch = tf.squeeze(np_miniBatch, axis=(1) )
input_state_batch = np_miniBatch[:, :self.numOfInputs]
output_action_batch = np_miniBatch[:, self.numOfInputs:]
#print(input_state_batch.shape)
#print(output_action_batch.shape)
self.model.fit(input_state_batch, output_action_batch, verbose=0, shuffle=False) #batch_size=self.trainBatchSize,
#################################################################################
## (state, action, reward, new_state)
def get_bellman_training_vectors(self, state, action, reward, new_state):
currentState = np.identity(self.numOfInputs)[state:state+1]
actions_q_vector_old_states = self.model.predict(currentState)
newCurrentState = np.identity(self.numOfInputs)[new_state:new_state+1]
actions_q_vector_new_states = self.model.predict(newCurrentState)
maxFutureQ = np.max( actions_q_vector_new_states )
bellman_part1 = actions_q_vector_old_states[0, action]
bellman_part2 = self.q_lr * (reward + self.gamma * maxFutureQ - actions_q_vector_old_states[0, action])
#bellman_part2 = (reward + self.gamma * maxFutureQ - actions_q_vector_old_states[0, action])
actions_q_vector_old_states[0, action] = bellman_part1 + bellman_part2
## tf.math.sigmoid( )
return currentState, actions_q_vector_old_states
#################################################################################
| rcalix1/Deep-learning-ML-and-tensorflow | ReinforcementLearning/DeepQAP/DeepQAP2.0/DeepQLearn.py | DeepQLearn.py | py | 5,724 | python | en | code | 79 | github-code | 13 |
33164049891 | from collections import defaultdict
inpDict = defaultdict(int)
with open('input.txt') as f:
inp_list = f.readline().split('\t')
for i,x in enumerate(inp_list):
inpDict[i] = int(x)
def get_state(inpDic: dict):
return [i for i in inpDic.values()]
def distro_nodes(inpDict: dict, start_key: int, nodes: int):
key_ind = start_key
inpDict[start_key] = 0
while nodes > 0:
key_ind = int((key_ind+1) % 16)
inpDict[key_ind] += 1
nodes -= 1
return inpDict
print(inpDict)
counter = 0
list_of_outcomes = []
m = defaultdict(list)
while get_state(inpDict) not in list_of_outcomes:
list_of_outcomes.append(get_state(inpDict))
counter += 1
min_max_key = min([key for key, value in inpDict.items() if value == max(inpDict.values())])
inpDict = distro_nodes(inpDict, min_max_key, inpDict[min_max_key])
print(list_of_outcomes.index(get_state(inpDict)))
print(counter)
print(counter - list_of_outcomes.index(get_state(inpDict)))
| cbalusekslalom/advent_of_code | 2017/Day6/Day6.py | Day6.py | py | 996 | python | en | code | 0 | github-code | 13 |
26532425084 | """empty message
Revision ID: d077c781c0f9
Revises: ba0cb1f6b183
Create Date: 2018-03-03 15:37:02.865610
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd077c781c0f9'
down_revision = 'ba0cb1f6b183'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('team',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('fio', sa.String(length=150), nullable=True),
sa.Column('avatar', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('team')
# ### end Alembic commands ###
| gadjimuradov/stpol | migrations/versions/d077c781c0f9_.py | d077c781c0f9_.py | py | 803 | python | en | code | 0 | github-code | 13 |
34140084109 | # یک عدد صحیح از کابر بگیرید و میانگین اعدا از صفر تا آن عدد را محاسبه کنید (از حلقه استفاده کنید)
number = int(input( 'Number : ' ))
# 0 + 1 + 2 + 3 + ... + number
firstNum = 0
for i in range(number+1):
firstNum += i
miangin = firstNum / number
print(miangin)
| mariiansiimon/Practices | 7_3_J3_miangin.py | 7_3_J3_miangin.py | py | 361 | python | fa | code | 0 | github-code | 13 |
34507325599 | from django.db import models
from NGO.models import NGO, Children
class Invoice(models.Model):
select_ngo = models.ForeignKey(NGO, default=1)
child_name = models.ForeignKey(Children, default=1)
donor_id = models.CharField(max_length=100)
invoice = models.FileField(blank=True)
donation_month = models.CharField(choices=(
('jan', "January",),
('feb', "February"),
('mar', "March"),
('apr', "April"),
('may', "May"),
('jun', "June"),
('jul', "July"),
('aug', "August"),
('sep', "September"),
('oct', "October"),
('nov', "November"),
('dec', "December"),
), max_length=10
)
date = models.DateField()
paid = models.CharField(max_length=10)
| Shankar1994/See4Children | superadmin/models.py | models.py | py | 800 | python | en | code | 0 | github-code | 13 |
4327524191 | from rest_framework import serializers
from entities.models import (
Client,
Contact,
Department,
ClientToDepartment,
Entity
)
class ContactSerializer(serializers.ModelSerializer):
class Meta:
model = Contact
fields = (
"id",
"get_type_display",
"value",
)
class ClientToDepartmentSerializer(serializers.ModelSerializer):
class Meta:
model = ClientToDepartment
fields = (
"id",
"client",
"client_identification_number",
"client_full_name",
"added_at",
)
class DepartmentToClientSerializer(serializers.ModelSerializer):
class Meta:
model = ClientToDepartment
fields = (
"id",
"department",
"department_identification_number",
"department_name",
"added_at",
)
class ClientSerializer(serializers.ModelSerializer):
contacts = ContactSerializer(many=True)
departments = DepartmentToClientSerializer(many=True)
class Meta:
model = Client
fields = (
"id",
"identification_number",
"phone_number",
"first_name",
"last_name",
"patronymic",
"updated_at",
"get_type_display",
"get_gender_display",
"timezone",
"is_active_updated_at",
"contacts",
"departments",
)
class DepartmentSerializer(serializers.ModelSerializer):
clients = ClientToDepartmentSerializer(many=True)
class Meta:
model = Department
fields = (
"id",
"identification_number",
"name",
"parent",
"parent_name",
"entity",
"entity_name",
"clients",
)
class EntitySerializer(serializers.ModelSerializer):
departments = DepartmentSerializer(many=True)
class Meta:
model = Entity
fields = (
"id",
"identification_number",
"created_at",
"updated_at",
"full_name",
"abbreviated_name",
"inn",
"ppc",
"departments",
)
| Domodomko/traffic-light-test-task | entities/serializers.py | serializers.py | py | 2,305 | python | en | code | 0 | github-code | 13 |
25660563807 | from flask import Flask,render_template,redirect,request,url_for,flash, jsonify
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from setupDB import Post, Base, User
from flask import session as login_session
import random, string
from oauth2client.client import flow_from_clientsecrets, FlowExchangeError
import httplib2
import json
import requests
from flask import make_response
CLIENT_ID = json.loads(open('client_secrets.json','r').read()) ['web'] ['client_id']
#Configuration
engine = create_engine('sqlite:///PostsUser.db',
connect_args={'check_same_thread': False})
Base.metadata.create_all(engine)
DBSession = sessionmaker(bind=engine)
session = DBSession()
app = Flask(__name__)
def getApp():
return app
def createUser(login_session):
newUser = User( name = login_session['name'],
email = login_session['email'],
picture = login_session['picture'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email = login_session['email']).one()
return user.id
def getUser(user_id):
result = session.query(User).get(user_id)
return result
def getUserId(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
@app.route('/')
def indexRoute():
all_posts = session.query(Post).all()
if 'email' not in login_session:
logged_in = False
your_posts = []
else:
id = getUserId(login_session['email'])
your_posts = session.query(Post).filter_by(user_id = id).all()
logged_in = True
return render_template('index.html',all_posts = all_posts, your_posts = your_posts
,all_size = len(all_posts),your_size = len(your_posts),logged_in = logged_in)
@app.route('/login')
def login():
#generating anti-forgery state token
state = "".join(random.choice(string.ascii_uppercase + string.digits) for x in range(32))
login_session['state'] = state
return render_template('login.html',STATE = state)
@app.route('/gconnect', methods=['POST'])
def gconnect():
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
print('Inavlid state token')
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object which contains
# access token
#creating oauth flow object by passing credentials
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
# specify with postmessage that the server is sending off the one time
# authorization code
oauth_flow.redirect_uri = 'postmessage'
# finally exchange the one time authorization code for credentials object
credentials = oauth_flow.step2_exchange(code)
#if there was an error, an a FlowExchangeError exception will be thrown
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
print('FlowExchangeError')
return response
# Get access token from the credentials object
access_token = credentials.access_token
# google api validates the acces token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
# get request to store the result
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error dueing GET, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
print('GET Error')
return response
# Verify that the result object with validated access token
# has the same user id as the user id
# in the credentials object
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
print('Valid acces token but not same user')
return response
# Verify that the result object with
# validated access token has same client id as original client id.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
response.headers['Content-Type'] = 'application/json'
print('mismatch in client id')
return response
# Check if user is already logged in
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps('Current user is already connected.'),
200)
response.headers['Content-Type'] = 'application/json'
print('user is already logged in')
return response
# if none of the if statements are triggered, we have a valid access token
# and user is signed in. They can now make API calls and we have the info
print('No coditions triggered')
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
# data is json with all the user's info
data = answer.json()
#storing info in login session
login_session['gplus_id'] = gplus_id
login_session['name'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
login_session['provider'] = 'google'
#storing user info in database
user_id = getUserId(login_session['email'])
if not user_id:
createUser(login_session)
login_session['user_id'] = user_id
#display redirect message
output = ''
output += '<h1>Welcome, '
output += login_session['name']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("you are now logged in as %s" % login_session['name'])
return output
@app.route('/gdisconnect')
def gdisconnect():
#get the current session's acces token
access_token = login_session.get('access_token')
#user is no logged in so no need to log out
if access_token is None:
print ('No user logged in')
response = make_response(json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
print ('In gdisconnect access token is %s', access_token)
print ('User name is: ')
print (login_session['name'])
# revoke the token to log out
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % login_session['access_token']
h = httplib2.Http()
# result stores the returned json object
result = h.request(url, 'GET')[0]
print ('result is ')
print (result)
# if successful delete all info from the login session
if result['status'] == '200':
del login_session['access_token']
del login_session['gplus_id']
del login_session['name']
del login_session['email']
del login_session['picture']
del login_session['provider']
del login_session['user_id']
response = make_response(json.dumps('Successfully disconnected. Thank You for Visiting!! '), 200)
response.headers['Content-Type'] = 'application/json'
return response
# if there was some error
else:
response = make_response(json.dumps('Failed to revoke token for given user.', 400))
response.headers['Content-Type'] = 'application/json'
return response
@app.route('/fbconnect',methods=['POST'])
def fbconnect():
#check state
if login_session['state'] != request.args.get('state'):
response = make_response(json.dumps('Wrong state token',400))
response.headers['content-type'] = 'application/json'
return response
access_token = request.data
#upgrade access token to credentials object
app_id = json.loads(open('fb_client_secrets.json', 'r').read())[
'web']['app_id']
app_secret = json.loads(
open('fb_client_secrets.json', 'r').read())['web']['app_secret']
url = 'https://graph.facebook.com/oauth/access_token?grant_type=fb_exchange_token&client_id=%s&client_secret=%s&fb_exchange_token=%s' % (
app_id, app_secret, access_token)
h = httplib2.Http()
result = h.request(url, 'GET')[1]
# Use token to get user info from API
userinfo_url = "https://graph.facebook.com/v2.8/me"
token = result.split(',')[0].split(':')[1].replace('"', '')
url = 'https://graph.facebook.com/v2.8/me?access_token=%s&fields=name,id,email' % token
h = httplib2.Http()
result = h.request(url, 'GET')[1]
#get info in json format
data = json.loads(result)
login_session['name'] = data["name"]
login_session['email'] = data["email"]
login_session['facebook_id'] = data["id"]
login_session['provider'] = 'facebook'
login_session['access_token'] = token
# Get user picture
url = 'https://graph.facebook.com/v2.8/me/picture?access_token=%s&redirect=0&height=200&width=200' % token
h = httplib2.Http()
result = h.request(url, 'GET')[1]
data = json.loads(result)
login_session['picture'] = data["data"]["url"]
# see if user exists
user_id = getUserId(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['name']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("Now logged in as %s" % login_session['name'])
return output
@app.route('/fbdisconnect')
def fbdisconnect():
facebook_id = login_session['facebook_id']
access_token = login_session['access_token']
#revoking access
url = 'https://graph.facebook.com/%s/permissions?access_token=%s' % (facebook_id,access_token)
h = httplib2.Http()
result = h.request(url, 'DELETE')[1]
del login_session['access_token']
del login_session['facebook_id']
del login_session['name']
del login_session['email']
del login_session['picture']
del login_session['provider']
return "Successfully logged out"
@app.route('/about')
def aboutRoute():
return render_template('about.html')
@app.route('/contact')
def contactRoute():
return render_template('contact.html')
@app.route('/post/<int:post_id>')
def postRoute(post_id):
post = session.query(Post).get(post_id)
return render_template('post.html',post = post)
@app.route('/create',methods = ['GET','POST'])
def createPost():
if 'name' not in login_session:
return redirect('/login')
if request.method == 'GET':
return render_template('createPost.html')
else:
title = request.form['title']
subtitle = request.form['subtitle']
author = request.form['author']
content = request.form['content']
user = login_session['user_id']
post = Post(title= title,subtitle = subtitle, author = author, description= content,user_id = user)
session.add(post)
session.commit()
flash("created post!")
return redirect(url_for('indexRoute'))
@app.route('/<int:post_id>/delete',methods=['GET','POST'])
def deletePost(post_id):
if 'name' not in login_session:
return redirect('/login')
post = session.query(Post).get(post_id)
if(post.user_id != login_session['user_id']):
return render_template('noAccess.html')
if request.method == 'GET':
return render_template('delete.html',post_id = post_id,post = post)
else:
session.delete(post)
session.commit()
flash("deleted post!")
return redirect(url_for('indexRoute'))
@app.route('/<int:post_id>/edit',methods=['GET','POST'])
def editPost(post_id):
if 'name' not in login_session:
return redirect('/login')
post = session.query(Post).get(post_id)
if(post.user_id != login_session['user_id']):
return render_template('noAccess.html')
if request.method == 'GET':
return render_template('edit.html',post_id = post_id,post = post)
else:
title = request.form['title']
subtitle = request.form['subtitle']
author = request.form['author']
content = request.form['content']
post.title = title
post.subtitle = subtitle
post.author = author
post.description = content
session.add(post)
session.commit()
flash("edited post!")
return redirect(url_for('indexRoute'))
@app.route('/posts/JSON')
def postsJSON():
items = session.query(Post).all()
return jsonify(Posts=[i.serialize for i in items])
@app.route('/posts/<int:post_id>/JSON')
def postJSON(post_id):
item = session.query(Post).get(post_id)
return jsonify(Post = [item.serialize])
if __name__ == '__main__':
app.secret_key = "my_secret_key"
app.run(host='0.0.0.0', port=5000)
| rahulbanerjee26/Blog-It | __init__.py | __init__.py | py | 14,425 | python | en | code | 0 | github-code | 13 |
6365067379 | ## below function help us to run simple sentiment analysis on fakenews statement
## IDEA: below code help us to list out the topics that what is mainly about fake news
## and find out the sentiment of those topics in the fake news statement
## possible extended idea: extract all nouns/verbs/adj/adv.. in the sentence and run sentiment analysis
## on each sentences which has a noun/verbs/adj/adv
from textblob import TextBlob
import sys
def main():
blob = TextBlob(sys.argv[1])
tokens = list(blob.words)
word, sent=[],[]
c ,j = 0,0
for words, pos in blob.tags:
if pos == 'JJ' or pos == 'NN' or pos == 'JJR' or pos == 'NNS':
word.append(words)
if len(word) >= 2:
for i in range(len(word)):
if len(word) >= 2:
print(i)
firstw = word[0]
secw = word[1]
word.remove(firstw)
word.remove(secw)
findx = tokens.index(firstw)
lindx = tokens.index(secw)
sent.append(' '.join(tokens[findx:lindx + 1]))
print(sent, tokens)
print("Sentence and polarity")
for sentence in sent:
print(sentence, TextBlob(sentence).polarity)
# if __name__ == '__main__':
# main() | aalvar76/cs-521-project | source/CS-521-PROJECT/tmp/sentimentAnaly_fakeNews.py | sentimentAnaly_fakeNews.py | py | 1,304 | python | en | code | 1 | github-code | 13 |
23159191987 | """JuegosChaco URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.contrib.auth import views as auth
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.Home, name = 'home'),
path('login/', auth.LoginView.as_view(template_name = 'usuario/login.html'), name = 'login'),
path('logout/', auth.LogoutView.as_view(), name = 'logout'),
path('nosotros/', views.Nosotros, name = 'nosotros'),
#REDIRECCION A LAS APP
path('usuario/', include('apps.usuario.urls')),
path('preguntas/',include('apps.preguntas.urls')),
path('resultados/',include('apps.resultados.urls')),
]
| Lsege/JuegosChaco | JuegosChaco/JuegosChaco/urls.py | urls.py | py | 1,284 | python | en | code | 2 | github-code | 13 |
18314921520 | from django.contrib.auth.models import User
from django.shortcuts import render, redirect
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes, force_text
from django.views import View
from . import forms
from .forms import UserRegisterForm, LoginForm, ContactForm
from .models import Person
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.core.mail import send_mail
from django.contrib.sites.shortcuts import get_current_site
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from .tokens import account_activation_token
def index(request):
return render(request, 'index.html')
def sign_up(request):
if request.method=='POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
user.is_active = False
user.save()
current_site = get_current_site(request)
subject = 'Activate Your Mysite Account'
message = render_to_string('activate_your_account.html', {
'user': user,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': account_activation_token.make_token(user)
})
user.email_user(subject, message, from_email= 'boburjon@thinkland.uz')
return redirect('sign_up')
else:
form = UserRegisterForm()
return render(request, 'registration.html', {'form':form})
def activate(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.person.email_confirmed = True
user.save()
new_user = Person(username=user.username, password=user.password)
new_user.save()
login(request, user)
return redirect('home')
else:
return render(request, 'account_activation_invalid.html')
class LoginView(View):
def get(self, request):
form = LoginForm()
return render(request, 'login.html', {"form":form})
def post(self, request):
form = LoginForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(username = username, password = password)
if user:
login(request, user)
return redirect('/')
return render(request, 'login.html', {'form':form})
| Bobur-oiligarh/user-register-with-email-confirmation | LearnLogin/register/views.py | views.py | py | 2,800 | python | en | code | 0 | github-code | 13 |
71329032337 | from pickle import FALSE, TRUE
from random import randint
from time import sleep
import pytest
from tools.sec_loader import SecretLoader
from tools.config_loader import ConfigLoader
from tools.innodrive import InnoDrive
from tools.logger import Logger
import time, os, io, json
class TestInoDrv():
inodrv = InnoDrive()
def test_api_ticket(cls):
assert cls.inodrv.apikey is not None
def test_val_apikey(cls):
ckey = cls.inodrv.apikey
cls.inodrv.val_apikey()
vkey = cls.inodrv.apikey
assert ckey == vkey
original_apikey_dur = cls.inodrv._apikey_dur
cls.inodrv._apikey_dur = 1
time.sleep(3)
cls.inodrv.val_apikey()
newkey = cls.inodrv.apikey
cls.inodrv._apikey_dur = ConfigLoader.config("innodrive")["apikey_dur"]
assert vkey != newkey
cls.inodrv._apikey_dur = original_apikey_dur
def get_item(cls):
resp1 = cls.inodrv.get_items("APDRV_DATASTUDIO")
resp2 = cls.inodrv.get_node_items()
assert resp1.json() == resp2.json()
def test_upload_file_same_fname(cls):
sfilen = "spcyx-testcases.json"
sfilep = os.path.join("tests/doc/testcases",sfilen)
folderid = ConfigLoader.config("innodrive")["nodeid"]
resp = cls.inodrv.upload_file(sfilen, sfilep,folderid)
assert resp["msg"].status_code == 200
def test_upload_file_diff_fname(cls):
sfilen = "spcyx-testcases.json"
tfilen = "testcase.txt"
sfilep = os.path.join("tests/doc/testcases",sfilen)
folderid = ConfigLoader.config("innodrive")["nodeid"]
resp = cls.inodrv.upload_file(sfilen, sfilep,folderid,tfilen)
assert resp["msg"].status_code == 200
def test_get_file(cls):
gres = cls.inodrv.get_id_byname_and_parentid("AIDeveloper.png",cls.inodrv._nodeid)
if gres["status"]=="OK": fileid = gres["objid"]
resp = cls.inodrv.get_downloadfile(fileid)
assert resp.content is not None
def test_add_del_folder(cls):
foldername = "NewFolderTest"
if cls.inodrv.get_id_byname(foldername) is not None:
objid = cls.inodrv.get_id_byname(foldername)[0]
r = cls.inodrv.del_folder(objid)
if r.status_code != 200:print(f"status code: {r.status_code}, message:{r.text}")
assert r.status_code == 200
pfolderid = ConfigLoader.config("innodrive")["nodeid"]
resp = cls.inodrv.add_folder(pfolderid,foldername)
if resp.status_code != 200:print(f"status code: {resp.status_code}, message:{resp.text}")
assert resp.status_code == 200
objids = cls.inodrv.get_id_byname(foldername)
assert objids is not None
objid = objids[0]
r = cls.inodrv.del_folder(objid)
if r.status_code != 200:print(f"status code: {r.status_code}, message:{r.text}")
assert r.status_code == 200
objids = cls.inodrv.get_id_byname(foldername)
assert len(objids) == 0
def _rename_folder(cls):
"""The test case will create a folder 'RenameFolderTest' and then rename it.
The id should be renmained but the attribute of name should be changed.
Thus the asserting will check the correctness of get_name_byid """
foldername = "RenameFolderTest"
newfoldername = "RenameFolderTest-NewName"
if cls.inodrv.get_id_byname(foldername) is not None:
objid = cls.inodrv.get_id_byname(foldername)[0]
r = cls.inodrv.del_folder(objid)
if r.status_code != 200:print(f"status code: {r.status_code}, message:{r.text}")
assert r.status_code == 200
if cls.inodrv.get_id_byname(newfoldername) is not None:
objid = cls.inodrv.get_id_byname(newfoldername)[0]
r = cls.inodrv.del_folder(objid)
assert r.status_code == 200
pfolderid = ConfigLoader.config("innodrive")["nodeid"]
resp = cls.inodrv.add_folder(pfolderid,foldername)
assert resp.status_code == 204
objid = cls.inodrv.get_id_byname(foldername)[0]
assert objid is not None
Logger.log(f"Tests:test reanme folder name: oldfolder name:{foldername}:New folder name:{newfoldername} ")
r = cls.inodrv.rename_folder(objid, newfoldername)
assert r.status_code == 200
objids = cls.inodrv.get_id_byname(newfoldername)
objid = objids[0]
assert objid not in cls.inodrv.get_id_byname(foldername)
assert cls.inodrv.nameids[newfoldername] is not None
assert objids == cls.inodrv.get_id_byname(newfoldername)
assert cls.inodrv.get_name_byid(objid) == newfoldername
def test_files_identity(cls):
test_file_name = 'source_api_edc_raw.json'
objids = cls.inodrv.get_id_byname(test_file_name)
Logger.log(f"{cls.inodrv.identity_check_by_hash(objids)}")
def test_files_compare(cls):
test_file_name = 'source_api_edc_raw.json'
objids = cls.inodrv.get_id_byname(test_file_name)
Logger.log(f"files_compare tests for {objids}")
objsn = len(objids)
for i in range(objsn-1):
cp1 = cls.inodrv.files_compare(objids[i],objids[i+1],'inorder')
cp2 = cls.inodrv.files_compare(objids[i],objids[i+1],'unorder')
Logger.log(f"Inorder compare result: Identical Count:{cp1.iden_count} / Diff Count:{cp1.diff_count}")
Logger.log(f"Unorder compare result: Identical Count: {cp2.iden_count} / Diff Count:{cp2.diff_count}")
assert cp1.simi_rate == 1.0
assert cp2.simi_rate == 1.0
def test_files_compare():
tidv = FileCompareAux()
test_file_name = 'source_api_edc_raw.json'
idrv = TestInoDrv.inodrv
objids = idrv.get_id_byname(test_file_name)
tidv.objids = objids
tidv.test_file_name = test_file_name
tidv.mock_temp(1)
aux_files_compare(tidv)
tidv.mock_temp(2)
aux_files_compare(tidv)
tidv.mock_temp(3)
aux_files_compare(tidv)
def aux_files_compare(tidv):
tidv.read_temp()
tidv.compare_temp_inorder()
for key in tidv.compare_result_inorder.keys():
cpri = tidv.compare_result_inorder[key]
Logger.log(f"cpri.pair: {key} : identity count: {cpri.iden_count} : diff_content {cpri.diff_content} ")
tidv.compare_temp_unorder()
for key in tidv.compare_result_unorder.keys():
cpru = tidv.compare_result_unorder[key]
Logger.log(f"cpru.pair: {key} : identity count: {cpru.iden_count} : diff_content {cpru.diff_content} ")
class FileCompareAux:
objids = []
test_file_name = ""
def __init__(self):
self.temppath = 'temp'
def mock_temp(self, seasoning_data=0):
""""The function will retrieve the data from innodrive by given ids and do seasoning data.
The seasoning type is seperated by number, thus it will allowed different seasoning flavors.
seasoning type = 1 : Arbitarily select one file from ids and then alsp select arbitarily one line, The selected line is
seasoned by replacing the text
seasoning type = 2 : Dual selected lines are swapped the line location """
arbitary_file = randint(0, len(self.objids))
fn = 0
for objid in self.objids:
r = TestInoDrv.inodrv.get_downloadfile(objid)
rf = r.text
if fn == arbitary_file:
srf = self.mock_data(seasoning_data, objid, rf)
else:
srf = rf
filepath = os.path.join(self.temppath, objid + '.json')
f = open(filepath, 'w')
for ln in srf:
f.write(str(ln)+"\n")
f.close()
fn += 1
def mock_data(self, seasoning_data, objid, rf):
arbitary_line1 = randint(0, len(rf)-1)
if arbitary_line1 == len(rf)-2: arbitary_line1 = arbitary_line1-1
arbitary_line2 = randint(arbitary_line1, len(rf)-2)
Logger.log(f"file: {objid} is seasoned (type:{seasoning_data}) with arbitary_line1 {arbitary_line1} arbitary_line2 {arbitary_line2}")
srf = []
for lnn in range(len(rf)):
ln = rf[lnn]
if seasoning_data == 1 and arbitary_line1 == lnn:
ln = "The line is seasoned for testing ^^"
Logger.log(f"file: {objid} is seasoned (type:{seasoning_data}) by {ln} at line number {lnn}")
if seasoning_data == 2 and (arbitary_line1 == lnn or arbitary_line2 == lnn):
if arbitary_line1 == lnn:
ln = rf[arbitary_line2]
if arbitary_line2 == lnn:
ln = rf[arbitary_line1]
Logger.log(f"file: {objid} is seasoned (type:{seasoning_data}) by {ln} at line number {lnn}")
if seasoning_data == 3 and (arbitary_line1 == lnn or arbitary_line2 == lnn):
Logger.log(f"file: {objid} is seasoned (type:{seasoning_data}) by {ln} at line number {lnn}")
continue
srf.append(ln)
return srf
def read_temp(self):
self.fr = {}
for objid in self.objids:
filepath = os.path.join(self.temppath, objid + '.json')
f = open(filepath, 'r')
self.fr[objid] = f.readlines()
def compare_temp_inorder(self):
self.compare_result_inorder = {}
for bobjid in self.objids:
f1 = self.fr[bobjid]
for tobjid in self.objids:
if bobjid == tobjid:pass
else:
f2 = self.fr[tobjid]
self.compare_result_inorder[bobjid, tobjid] = TestInoDrv.inodrv.files_similarity_inorder_lines(f1, f2)
def compare_temp_unorder(self):
self.compare_result_unorder = {}
for bobjid in self.objids:
f1 = self.fr[bobjid]
for tobjid in self.objids:
if bobjid == tobjid:pass
else:
f2 = self.fr[tobjid]
self.compare_result_unorder[bobjid, tobjid] = TestInoDrv.inodrv.files_similarity_unorder_lines(f1, f2)
| eslywadan/dataservice | tests/test_innodrive.py | test_innodrive.py | py | 10,207 | python | en | code | 0 | github-code | 13 |
39481961238 | import streamlit as st
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
# Increase the width of the Streamlit app
st.set_option('deprecation.showPyplotGlobalUse', False)
st.set_page_config(layout="wide")
# Load data
df = pd.read_csv("test.csv")
df = df.drop(['Risk Rating'],axis=1)
# Compute the correlation matrix
corr = df.corr()
fig, ax = plt.subplots(figsize=(10, 8))
im = ax.pcolor(corr.values, cmap='coolwarm')
fig.colorbar(im)
ax.set_xticks(np.arange(len(corr.columns))+0.5, minor=False)
ax.set_yticks(np.arange(len(corr.index))+0.5, minor=False)
ax.set_xticklabels(corr.columns, rotation=90)
ax.set_yticklabels(corr.index)
ax.set_title("Correlation Matrix (Matplotlib)")
st.pyplot(fig) | RiskSimplifier/AML_Txn_Monitoring_ML_App | test/test1.py | test1.py | py | 767 | python | en | code | 0 | github-code | 13 |
37130357863 | import argparse
import os
import shutil
import numpy as np
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
from data.dataset import PoisonLabelDataset
from data.utils import (
gen_poison_idx,
get_bd_transform,
get_dataset,
get_loader,
get_transform,
)
from model.model import LinearModel
from model.utils import (
get_criterion,
get_network,
get_optimizer,
get_scheduler,
load_state,
)
from utils.setup import (
get_logger,
get_saved_dir,
get_storage_dir,
load_config,
set_seed,
)
from utils.trainer.log import result2csv
from utils.trainer.supervise import poison_train, test
def main():
print("===Setup running===")
parser = argparse.ArgumentParser()
parser.add_argument("--config", default="./config/supervise/example.yaml")
parser.add_argument("--gpu", default="0", type=str)
parser.add_argument(
"--resume",
default="",
type=str,
help="checkpoint name (empty string means the latest checkpoint)\
or False (means training from scratch).",
)
parser.add_argument("--amp", default=False, action="store_true")
parser.add_argument(
"--world-size",
default=1,
type=int,
help="number of nodes for distributed training",
)
parser.add_argument(
"--rank", default=0, type=int, help="node rank for distributed training"
)
parser.add_argument(
"--dist-port",
default="23456",
type=str,
help="port used to set up distributed training",
)
args = parser.parse_args()
config, inner_dir, config_name = load_config(args.config)
args.saved_dir, args.log_dir = get_saved_dir(
config, inner_dir, config_name, args.resume
)
shutil.copy2(args.config, args.saved_dir)
args.storage_dir, args.ckpt_dir, _ = get_storage_dir(
config, inner_dir, config_name, args.resume
)
shutil.copy2(args.config, args.storage_dir)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
ngpus_per_node = torch.cuda.device_count()
if ngpus_per_node > 1:
args.distributed = True
else:
args.distributed = False
if args.distributed:
args.world_size = ngpus_per_node * args.world_size
print("Distributed training on GPUs: {}.".format(args.gpu))
mp.spawn(
main_worker,
nprocs=ngpus_per_node,
args=(ngpus_per_node, args, config),
)
else:
print("Training on a single GPU: {}.".format(args.gpu))
main_worker(0, ngpus_per_node, args, config)
def main_worker(gpu, ngpus_per_node, args, config):
set_seed(**config["seed"])
logger = get_logger(args.log_dir, "supervise.log", args.resume, gpu == 0)
torch.cuda.set_device(gpu)
if args.distributed:
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(
backend="nccl",
init_method="tcp://127.0.0.1:{}".format(args.dist_port),
world_size=args.world_size,
rank=args.rank,
)
logger.warning("Only log rank 0 in distributed training!")
logger.info("===Prepare data===")
bd_config = config["backdoor"]
logger.info("Load backdoor config:\n{}".format(bd_config))
bd_transform = get_bd_transform(bd_config)
target_label = bd_config["target_label"]
poison_ratio = bd_config["poison_ratio"]
pre_transform = get_transform(config["transform"]["pre"])
train_primary_transform = get_transform(config["transform"]["train"]["primary"])
train_remaining_transform = get_transform(config["transform"]["train"]["remaining"])
train_transform = {
"pre": pre_transform,
"primary": train_primary_transform,
"remaining": train_remaining_transform,
}
logger.info("Training transformations:\n {}".format(train_transform))
test_primary_transform = get_transform(config["transform"]["test"]["primary"])
test_remaining_transform = get_transform(config["transform"]["test"]["remaining"])
test_transform = {
"pre": pre_transform,
"primary": test_primary_transform,
"remaining": test_remaining_transform,
}
logger.info("Test transformations:\n {}".format(test_transform))
logger.info("Load dataset from: {}".format(config["dataset_dir"]))
clean_train_data = get_dataset(
config["dataset_dir"], train_transform, prefetch=config["prefetch"]
)
poison_train_idx = gen_poison_idx(clean_train_data, target_label, poison_ratio)
poison_idx_path = os.path.join(args.saved_dir, "poison_idx.npy")
np.save(poison_idx_path, poison_train_idx)
logger.info("Save poisoned index to {}".format(poison_idx_path))
clean_test_data = get_dataset(
config["dataset_dir"], test_transform, train=False, prefetch=config["prefetch"]
)
poison_train_idx = gen_poison_idx(clean_train_data, target_label, poison_ratio)
poison_train_data = PoisonLabelDataset(
clean_train_data, bd_transform, poison_train_idx, target_label
)
poison_test_idx = gen_poison_idx(clean_test_data, target_label)
poison_test_data = PoisonLabelDataset(
clean_test_data, bd_transform, poison_test_idx, target_label
)
if args.distributed:
poison_train_sampler = DistributedSampler(poison_train_data)
batch_size = int(config["loader"]["batch_size"] / ngpus_per_node)
num_workers = config["loader"]["num_workers"]
poison_train_loader = get_loader(
poison_train_data,
batch_size=batch_size,
sampler=poison_train_sampler,
num_workers=num_workers,
)
else:
poison_train_sampler = None
poison_train_loader = get_loader(
poison_train_data, config["loader"], shuffle=True
)
clean_test_loader = get_loader(clean_test_data, config["loader"])
poison_test_loader = get_loader(poison_test_data, config["loader"])
logger.info("\n===Setup training===")
backbone = get_network(config["network"])
logger.info("Create network: {}".format(config["network"]))
linear_model = LinearModel(backbone, backbone.feature_dim, config["num_classes"])
linear_model = linear_model.cuda(gpu)
if args.distributed:
linear_model = DistributedDataParallel(linear_model, device_ids=[gpu])
criterion = get_criterion(config["criterion"])
criterion = criterion.cuda(gpu)
logger.info("Create criterion: {}".format(criterion))
optimizer = get_optimizer(linear_model, config["optimizer"])
logger.info("Create optimizer: {}".format(optimizer))
scheduler = get_scheduler(optimizer, config["lr_scheduler"])
logger.info("Create scheduler: {}".format(config["lr_scheduler"]))
resumed_epoch, best_acc, best_epoch = load_state(
linear_model,
args.resume,
args.ckpt_dir,
gpu,
logger,
optimizer,
scheduler,
is_best=True,
)
for epoch in range(config["num_epochs"] - resumed_epoch):
if args.distributed:
poison_train_sampler.set_epoch(epoch)
logger.info(
"===Epoch: {}/{}===".format(epoch + resumed_epoch + 1, config["num_epochs"])
)
logger.info("Poison training...")
poison_train_result = poison_train(
linear_model,
poison_train_loader,
criterion,
optimizer,
logger,
amp=args.amp,
)
logger.info("Test model on clean data...")
clean_test_result = test(linear_model, clean_test_loader, criterion, logger)
logger.info("Test model on poison data...")
poison_test_result = test(linear_model, poison_test_loader, criterion, logger)
if scheduler is not None:
scheduler.step()
logger.info(
"Adjust learning rate to {}".format(optimizer.param_groups[0]["lr"])
)
# Save result and checkpoint.
if not args.distributed or (args.distributed and gpu == 0):
result = {
"poison_train": poison_train_result,
"clean_test": clean_test_result,
"poison_test": poison_test_result,
}
result2csv(result, args.log_dir)
saved_dict = {
"epoch": epoch + resumed_epoch + 1,
"result": result,
"model_state_dict": linear_model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"best_acc": best_acc,
"best_epoch": best_epoch,
}
if scheduler is not None:
saved_dict["scheduler_state_dict"] = scheduler.state_dict()
is_best = False
if clean_test_result["acc"] > best_acc:
is_best = True
best_acc = clean_test_result["acc"]
best_epoch = epoch + resumed_epoch + 1
logger.info(
"Best test accuaracy {} in epoch {}".format(best_acc, best_epoch)
)
if is_best:
ckpt_path = os.path.join(args.ckpt_dir, "best_model.pt")
torch.save(saved_dict, ckpt_path)
logger.info("Save the best model to {}".format(ckpt_path))
ckpt_path = os.path.join(args.ckpt_dir, "latest_model.pt")
torch.save(saved_dict, ckpt_path)
logger.info("Save the latest model to {}".format(ckpt_path))
if __name__ == "__main__":
main()
| SCLBD/DBD | supervise.py | supervise.py | py | 9,668 | python | en | code | 23 | github-code | 13 |
14276771536 | import bpy
from bpy.props import *
from bpy.types import Panel
class AUTOMIRROR_PT_panel(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'Tools'
bl_label = "Auto Mirror"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
return bpy.context.object
def draw(self, context):
layout = self.layout
draw_automirror(self, layout)
draw_mirrormirror(self, layout)
draw_multi_mod(self, layout)
# Auto Mirror
def draw_automirror(self, layout):
box = layout.box()
row = box.row(align=True)
row.scale_y = 1.2
row.operator("automirror.automirror")
row.separator()
rows = row.row(align=True)
row.scale_x = 1.5
rows.operator("automirror.toggle_mirror",text="",icon="RESTRICT_VIEW_OFF")
rows.operator("automirror.target_set",text="",icon="OBJECT_DATAMODE")
sp = box.split(align=True,factor=0.3)
sp.label(text="Quick Axis")
row = sp.row()
row.scale_y = 1.2
am = row.operator("automirror.automirror",text="X")
am.axis_quick_override = True
am.axis_x = True
am.axis_y = False
am.axis_z = False
am = row.operator("automirror.automirror",text="Y")
am.axis_quick_override = True
am.axis_x = False
am.axis_y = True
am.axis_z = False
am = row.operator("automirror.automirror",text="Z")
am.axis_quick_override = True
am.axis_x = False
am.axis_y = False
am.axis_z = True
draw_automirror_option(self, box)
def draw_automirror_option(self, layout):
props = bpy.context.scene.automirror
row = layout.row(align=True)
row.alignment="LEFT"
row.prop(props, "toggle_option", text="Option", icon="TRIA_DOWN" if props.toggle_option else "TRIA_RIGHT", emboss=False)
if not props.toggle_option:
return
# draw_main_fanc_option(self,context,layout)
box = layout.box()
col = box.column()
row = col.row(align=True)
row.prop(props,"axis_x",text="X",toggle=True)
row.prop(props,"axis_y",text="Y",toggle=True)
row.prop(props,"axis_z",text="Z",toggle=True)
# row.prop(props, "axis", text="Mirror Axis", expand=True)
row = col.row(align=True)
row.prop(props, "orientation", text="Orientation", expand=True)
col.prop(props, "threshold", text="Threshold")
col.prop(props, "toggle_edit", text="Toggle Edit")
col.prop(props, "cut", text="Cut and Mirror")
if props.cut:
col = box.column(align=True)
col.label(text="Mirror Modifier:")
row = col.row(align=True)
row.label(text="",icon="AUTOMERGE_ON")
row.prop(props, "use_clip", text="Use Clip")
row = col.row(align=True)
row.label(text="",icon="OUTLINER_DATA_MESH")
row.prop(props, "show_on_cage", text="Editable")
row = col.row(align=True)
row.label(text="",icon="SORT_DESC")
row.prop(props, "sort_top_mod")
row = col.row(align=True)
row.label(text="",icon="CHECKBOX_HLT")
row.prop(props, "apply_mirror", text="Apply Mirror")
else:
box.label(text="Only Bisect")
# mirror mirror
def draw_mirrormirror(self, layout):
box = layout.box()
sp = box.split(align=True,factor=0.3)
sp.label(text="Mirror Mirror")
row = sp.row()
row.scale_y = 1.2
mm = row.operator("automirror.mirror_mirror",text="X")
mm.axis_x = True
mm.axis_y = False
mm.axis_z = False
mm = row.operator("automirror.mirror_mirror",text="Y")
mm.axis_x = False
mm.axis_y = True
mm.axis_z = False
mm = row.operator("automirror.mirror_mirror",text="Z")
mm.axis_x = False
mm.axis_y = False
mm.axis_z = True
# 一括操作
def draw_multi_mod(self, layout):
row = layout.row(align=True)
row.scale_x = 1.2
row.menu("AUTOMIRROR_MT_modifier_add",text="",icon="ADD")
row.separator()
row.operator("automirror.apply",text="Apply",icon="FILE_TICK")
row.operator("automirror.remove",text="Remove",icon="X")
row.separator()
row.operator("automirror.modifier_sort",text="",icon="SORT_DESC")
| Tilapiatsu/blender-custom_config | scripts/addon_library/local/auto_mirror_ex/ui/ui_panel.py | ui_panel.py | py | 3,716 | python | en | code | 5 | github-code | 13 |
39766124963 | import wikipediaapi
import wikipedia
wiki_html = wikipediaapi.Wikipedia(
language='ru',
extract_format=wikipediaapi.ExtractFormat.HTML
)
wikipedia.set_lang("ru")
def get_summary(object):
"""Return summary of the object page.
Args:
object ('string'): title of wiki-object.
Returns:
string: summary in html-format.
"""
try:
wipage = wiki_html.page(object)
except(Exception):
pass
else:
return wipage.summary
def get_title(object):
"""Return title of the object page.
Args:
object ('string'): title(path) of wiki-object.
Returns:
string: nice title of wiki-object.
"""
object_str = object.replace('_', ' ')
try:
wipage = wikipedia.page(object_str)
except(Exception):
return 'Название не найдено'
else:
return wipage.title | aerozhkova/travelathome | wiki.py | wiki.py | py | 1,003 | python | en | code | 0 | github-code | 13 |
7457339273 | from django.shortcuts import render
from django.http import HttpResponse
from .models import haga_su_pedido, tipo_de_postre, tipo_de_relleno
from .forms import haga_su_pedidoForm, tipo_de_postreForm, tipo_de_rellenoForm
def mostrar_index(request):
return render(request, 'index.html')
def mostrar_pedido(request):
pedido1 = haga_su_pedido(nombre=['nombre'], e_mail=['e_mail'] , cantidad_de_personas=['cantidad_de_personas'])
return render(request, 'consulta_pedido.html' ,{'haga_su_pedido':[pedido1]})
def nuevo_pedido(request):
if request.method == 'POST':
formulario_PN = haga_su_pedidoForm(request.POST)
if formulario_PN.is_valid():
formulario_PN_limpio = formulario_PN.cleaned_data
nuevo_pedido = haga_su_pedido(nombre=formulario_PN_limpio['nombre'], e_mail=formulario_PN_limpio['e_mail'], cantidad_de_personas=formulario_PN_limpio['cantidad_de_personas'])
nuevo_pedido.save()
return render(request, 'tipo_de_postre.html')
else:
formulario_PN = haga_su_pedidoForm()
return render(request, 'crear_nuevo_pedido.html', {'formulario_PN': formulario_PN})
def mostrar_tipo_de_postre(request):
if request.method == 'POST':
formulario_npostre = tipo_de_postreForm(request.POST)
if formulario_npostre.is_valid():
formulario_npostre = formulario_npostre.cleaned_data
pedido_postre = tipo_de_postre(cantidad_de_personas=formulario_npostre['cantidad_de_personas'], nombre_torta=formulario_npostre['nombre_torta'], relleno=formulario_npostre['relleno'])
pedido_postre.save()
return render(request, 'tipo_de_relleno.html')
else:
formulario_npostre = tipo_de_postreForm
return render(request, 'tipo_de_postre.html')
def mostrar_tipo_de_relleno(request):
if request.method == 'POST':
formulario_nrelleno = tipo_de_rellenoForm(request.POST)
if formulario_nrelleno.is_valid():
formulario_nrelleno = formulario_nrelleno.cleaned_data
pedido_relleno = tipo_de_relleno(relleno=formulario_nrelleno['relleno'], crocante=formulario_nrelleno['crocante'], numero_de_pisos=formulario_nrelleno['numero_de_pisos'])
pedido_relleno.save()
return render(request, 'index.html')
else:
formulario_nrelleno = tipo_de_rellenoForm()
return render(request, 'tipo_de_relleno.html')
| Nicolas-Amato/PRIMERA-ENTREGA-DEL-PROYECTO-FINAL | MiApp/views.py | views.py | py | 2,566 | python | es | code | 1 | github-code | 13 |
7832556523 | # -.- coding:latin1 -.-
# @author: Nicolas
"""Ce programme calcule la trajectoire de neutrons ayant été lancés à
travers une plaque solide amorphe selon certains paramètres de départ.
Le programme transmet ensuite la proportion de ces neutrons ayant été
émis, la proportion des neutrons ayant été réfléchis et la proportion
de ceux qui ont été absorbés. Finalement, il effectue des tests avec
des paramètres initaux spécifiques afin de valider son efficacité."""
import numpy as np
def etatNeutrons(pAbsorption, pDispersion, libreParcoursMoyen, epaisseur,
nombreNeutrons):
"""Cette fonction calcule la trajectoire de neutrons ayant été
lancés à travers une plaque solide amorphe selon certains
paramètres de départ et transmet ensuite les proportions de
neutrons ayant été émis, réfléchis et absorbés.
pAbsorption : Probabilité d'absorption du neutron par un atome.
pDispertion : Probabilité de dispertion du neutron par un atome.
libreParcoursMoyen : Distance moyenne parcourue en ligne droite
avant d'intéragir avec un noyau.
epaisseur : Épaisseur de la plaque solide amorphe
nombreNeutrons : Nombre de neutrons dont il faut calculer la
trajectoire.
"""
if pDispersion + pAbsorption > 1:
print('Erreur, la probabilité que le neutron soit absorbé ou \
dispersé est supérieure\nà 100%. Veuillez réessayer.')
return None, None, None
absorbes, transmis, reflechis, = 0, 0, 0
# On mets les compteurs à 0
while absorbes + transmis + reflechis < nombreNeutrons:
positionX, direction = 0, 1
"""On définit la position horizontale et la direction du neutron au
lancement (donnée par le cosinus de l'angle entre sa trajectoire et
l'axe des x puisqu'on ne s'intéresse qu'à sa position en x)"""
while True:
"""La boucle est incassable, on doit donc satisfaire un des test
if (ou elif) pour en sortir"""
positionX += np.random.exponential(libreParcoursMoyen) * direction
""" On trouve la distance parcourue en X avant une possible
interaction"""
if positionX < 0:
reflechis += 1
break
# On fait le test de réflexion
elif positionX > epaisseur:
transmis += 1
break
# On fait le test de transmission
rdm = np.random.random()
# On pige un nombre au hasard entre 0 et 1
if rdm < pAbsorption:
absorbes += 1
break
# On fait le test d'absorption
elif rdm < pAbsorption + pDispersion:
direction = 1 - 2 * np.random.random()
# On calcule le nouvel angle si le neutron est dipersé par l'atome
proportionAbsorbes = absorbes / nombreNeutrons
proportionTransmis = transmis / nombreNeutrons
proportionReflechis = reflechis / nombreNeutrons
# On trouve les proportions de neutrons absorbés, transmis et réfléchis
return proportionAbsorbes, proportionTransmis, proportionReflechis
def incertitudeGaussienne(quantite, repetition):
"""Cette fonction trouve l'incertitude sur une quantité obéissant à
une distribution Gaussienne.
quantite : Quantité dont il faut calculer l'incertitude, de type ndarray.
repetition : Nombre de répétitions de l'expérience.
"""
incertitude = np.sqrt(quantite * (1 - quantite) / (repetition - 1))
return incertitude
epaisseur = 1
libreParcoursMoyen = 0.2
nombreNeutrons = 1000
# On définit les conditions de départ communes
proportions = []
# On définit une liste vide où on va placer les résultats
proportions.extend(etatNeutrons(0, 0, libreParcoursMoyen, epaisseur,
nombreNeutrons))
"""On fait le test pour le cas où la probabilité que le neutron soit absorbé
est la même que la probabilité qu'il soit dipersé."""
proportions.extend(etatNeutrons(1, 0, libreParcoursMoyen, epaisseur,
nombreNeutrons))
# On fait le test pour le cas où la propabilité d'absorption est de 100%
proportions.extend(etatNeutrons(0.3, 0.3, libreParcoursMoyen, epaisseur,
nombreNeutrons))
"""On fait le test où la probabilité d'absorption et la probabilité de
dispertion sont à 30%"""
proportions.extend(etatNeutrons(libreParcoursMoyen, 0, libreParcoursMoyen,
epaisseur, nombreNeutrons))
"""On fait le test où la probabilité d'absorption est égale au parcours libre
moyen et la probabilité de dispertion est nulle"""
A, T, R = [], [], []
# On définit les listes qui trieront les résultats
i = 0
# On met le compteur à 0
while i < len(proportions):
if i % 3 == 0:
A.append(proportions[i])
elif i % 3 == 1:
T.append(proportions[i])
else:
R.append(proportions[i])
i += 1
# On assigne les résultats obtenus aux listes correspondantes
A = np.array(A)
T = np.array(T)
R = np.array(R)
"""On transforme les résultats en ndarray pour effectuer le calcul
d'incertitude"""
incertitudeA = incertitudeGaussienne(A, nombreNeutrons)
incertitudeT = incertitudeGaussienne(T, nombreNeutrons)
incertitudeR = incertitudeGaussienne(R, nombreNeutrons)
# On calcule les différentes incertitudes sur les résultats obtenus
i = 0
# On remet le compteur à 0
while i < len(A):
# On arrête lorsqu'il n'y a plus d'éléments à montrer à l'utilisateur
if i == 0:
print('Probabilité d\'absorption et de dispertion égales:')
elif i == 1:
print('Probabilité d\'absorption égale à 1:')
elif i == 2:
print('Probabilité d\'absorption et de dispertion à 0.3:')
else:
print('Probabilité d\'absorption à 0.2 et probabilité de dispertion \
à 0:')
"""On décrit brièvement chaque test effectué avant d'afficher les
résultats à l'utilisateur"""
print('Proportions des neutrons:\nAbsorbés: ' + str(A[i]) + \
' avec une incertitude de ' + str(incertitudeA[i]) + '\nTransmis: ' + \
str(T[i]) + ' avec une incertitude de ' + str(incertitudeT[i]) + \
'\nRéfléchis: ' + str(R[i]) + ' avec une incertitude de ' + \
str(incertitudeR[i]) + '\n')
# On montre les résultats des tests effectués
i += 1 | dslap0/Universite-Python | PHY1234/Laboratoire6-codesQ2.py | Laboratoire6-codesQ2.py | py | 6,339 | python | fr | code | 0 | github-code | 13 |
32270068553 | """
Examples using foramt with names
"""
def main():
txt = 'Foo Bar'
num = 42.12
print('The user {name} was born {age} years ago.'.format(name = txt, age = num))
if __name__ == '__main__':
main()
| sara-kassani/1000_Python_example | 03_formatted_printing/03_format_names.py | 03_format_names.py | py | 237 | python | en | code | 1 | github-code | 13 |
35770920732 | import csv
import lxml.etree
import lxml.builder
def convert(filenames):
E = lxml.builder.ElementMaker()
DOC = E.DOC
DOCNO = E.DOCNO
TEXT = E.TEXT
f = open("./data/tweets.xml", 'w')
for filename in filenames:
with open(filename, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
flag = 0
for row in spamreader:
if flag == 0:
flag = 1
continue
else:
the_doc = (DOC(DOCNO(row[1]), TEXT(row[2])))
print >> f, (lxml.etree.tostring(the_doc, pretty_print=True)) | namrata-simha/Slug-MovieBot | DataExtractionAndPreprocess/convertCSV_XML.py | convertCSV_XML.py | py | 689 | python | en | code | 0 | github-code | 13 |
71544546579 | # 加载 推理
import onnxruntime as ort
import torch
import time
import onnx
from PIL import Image
import cv2
import os
import numpy as np
from torchvision import transforms
from torch.utils.data import DataLoader
import albumentations as alb
import torch.utils.data
# import os.path as osp
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from albumentations import Compose
class test_Dataset(torch.utils.data.Dataset):
def __init__(self, img_ids, img_dir, num_classes, transform=None):
self.img_ids = img_ids
self.img_dir = img_dir
self.num_classes = num_classes
self.transform = transform
def __len__(self):
return len(self.img_ids)
def __getitem__(self, idx):
img_id = self.img_ids[idx]
img = cv2.imread(os.path.join(self.img_dir, img_id + '.png'))
if self.transform is not None:
augmented = self.transform(image=img)
img = augmented['images']
img = img.astype('float32') / 255
img = img.transpose(2, 1, 0)
return img, {'img_id': img_id}
def time_sync():
if torch.cuda.is_available():
torch.cuda.synchronize()
return time.time()
def test_2():
ort_session = ort.InferenceSession('model.onnx')
input_name = ort_session.get_inputs()[0].name
img = cv2.imread('inputs/data-science-bowl-2018/stage1_train/6bc8cda54f5b66a2a27d962ac219f8075bf7cc43b87ba0c9e776404370429e80/images/6bc8cda54f5b66a2a27d962ac219f8075bf7cc43b87ba0c9e776404370429e80.png') # 02_test.tif')#demo.png
#img = cv2.resize(img, (96, 96))
nor = alb.Normalize()
img = nor.apply(image=img)
img = img.astype('float32') / 255
#img = img.transpose(2, 1, 0)
img = cv2.resize(img, (96, 96))
tensor = transforms.ToTensor()(img)
tensor = tensor.unsqueeze_(0)
ort_outs = ort_session.run(None, {input_name: tensor.cpu().numpy()})
img_out = ort_outs[0]
img_out = torch.from_numpy(img_out)
img_out = torch.sigmoid(img_out).cpu().numpy()
cv2.imwrite(os.path.join('222222.png'), (img_out[0][0] * 255).astype('uint8'))
def test_1():
ort_session = ort.InferenceSession('model.onnx') # torch13.onnx')#'./semseg.onnx')
onnx_input_name = ort_session.get_inputs()[0].name
onnx_input_names = ort_session.get_inputs()
onnx_outputs_names = ort_session.get_outputs()
output_names = []
for o in onnx_outputs_names:
output_names.append(o.name)
img = cv2.imread('inputs/data-science-bowl-2018/stage1_train/6bc8cda54f5b66a2a27d962ac219f8075bf7cc43b87ba0c9e776404370429e80/images/6bc8cda54f5b66a2a27d962ac219f8075bf7cc43b87ba0c9e776404370429e80.png') # 02_test.tif')#demo.png
img = cv2.resize(img, (96, 96)) # 256, 256)) # height = 1024, width = 2048 《2048,1024
#img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 4. onnxruntime上运行 onnx 模型;
tensor = transforms.ToTensor()(img)
# tensor=transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])(tensor)
tensor = tensor.unsqueeze_(0)
# tensor = tensor# / 255.0
input_name = ort_session.get_inputs()[0].name
label_name = ort_session.get_outputs()[0].name
print("input_name ", input_name)
print("label_name ", label_name)
result = ort_session.run([label_name], {input_name: tensor.cpu().numpy()})
# print("result ",result[0][0][0][303])#[303])
# 输出保存semseg图
# draw semseg mask images
img2 = cv2.imread('inputs/data-science-bowl-2018/stage1_train/6bc8cda54f5b66a2a27d962ac219f8075bf7cc43b87ba0c9e776404370429e80/images/6bc8cda54f5b66a2a27d962ac219f8075bf7cc43b87ba0c9e776404370429e80.png', ) # 01_test.tif')#demo.png')
img2 = cv2.resize(img2, (96, 96)) # 256,256)) # you can also use other way to create a temp images
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
for k in range(0, 1):
for h in range(0, img2.shape[0]):
for w in range(0, img2.shape[1]):
img2[h, w] = result[0][0][k][h][w]
# cv2.normalize(img2, img2, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX) # 归一化
min_val, max_val, min_indx, max_indx = cv2.minMaxLoc(img2)
cv2.imwrite('./mask_semseg_' + str(k) + '.png', img2)
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
def test_n():
"""
实际测试onnx模型效果
:return:
"""
onnx_path = 'model.onnx'
image_path = 'inputs/data-science-bowl-2018/stage1_train'
test_transform = Compose([
transforms.Normalize(),
])
test_dataset = test_Dataset(
img_ids='0',
img_dir=image_path,
num_classes=1,
transform=test_transform
)
test_loader = DataLoader(
test_dataset,
batch_size=1,
shuffle=False,
num_workers=1,
drop_last=False
)
# print(test_loader)
for input, meta in test_loader:
ort_session = ort.InferenceSession(onnx_path)
# print('input', input.shape)
# print(input.shape)
ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(input)}
# print('ort_inputs', len(ort_inputs))
ort_outs = ort_session.run(None, ort_inputs)
# print('ort_outs', type(ort_outs))
img_out = ort_outs[0]
img_out = torch.from_numpy(img_out)
# print('1', img_out)
img_out = torch.sigmoid(img_out).cpu().numpy()
# print('img_out', img_out.shape)
img_out = img_out.transpose(0, 1, 3, 2)
num_classes = 1
for i in range(len(img_out)):
cv2.imwrite(os.path.join('./', meta['img_id'][i].split('.')[0] + '.png'),
(img_out[i, num_classes - 1] * 255 * 255).astype('uint8'))
test_2() | bashendixie/ml_toolset | 案例56 unet + pytorch 数据科学碗2018/inference_onnx.py | inference_onnx.py | py | 5,755 | python | en | code | 9 | github-code | 13 |
41767608702 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def f(self, root):
if root:
if root.val == self.val:
self.ans = root
if root.val > self.val:
self.f(root.left)
elif root.val < self.val:
self.f(root.right)
def searchBST(self, root: Optional[TreeNode], val: int) -> Optional[TreeNode]:
self.val = val
self.ans = None
self.f(root)
return self.ans | ritwik-deshpande/LeetCode | 700-search-in-a-binary-search-tree/700-search-in-a-binary-search-tree.py | 700-search-in-a-binary-search-tree.py | py | 700 | python | en | code | 0 | github-code | 13 |
4215633641 | import os
def find(download_path: str, range=300):
""" Returns a list of the most recently downloaded files. Uses the range variable to check for
multiple consecutive downloads. The default range is five minutes. When several file are
downloaded within five minutes of each other they are all added to the output list to be moved.
If no other downloads are found to be downloaded within five minutes of the most recent download
then the output list will only contain one item.
Args:
download_path (str): path to user's 'Downloads' directory from download_path.find()
range (int, optional): Range of time within which to find downloads. Defaults to 300 seconds.
Returns:
newest_downloads (list): list containing all the most recent downloads
"""
newest_download = ""
newest_downloads = []
dir_list = os.listdir(download_path)
for file in dir_list:
if not os.path.isdir(os.path.join(download_path,file)):
ct = os.path.getctime(os.path.join(download_path,file))
if ct > os.path.getctime(os.path.join(download_path,newest_download)) or newest_download == "":
newest_download = file
newest = os.path.getctime(os.path.join(download_path,newest_download))
for file in dir_list:
if not os.path.isdir(os.path.join(download_path,file)) and not file in newest_downloads:
ct = os.path.getctime(os.path.join(download_path,file))
if ct > newest - range:
newest_downloads.append(file)
return newest_downloads | NickTooth96/move-latest-download | src/most_recent.py | most_recent.py | py | 1,547 | python | en | code | 0 | github-code | 13 |
22500101638 | from titlecase import titlecase
class Cleaners:
def resident_weapon_used(val):
weapons_used = {
"Suspect - Handgun": "Handgun",
"Suspect - Knife" : "Knife",
"Suspect - Misc Weapon": "Misc Weapon",
"Suspect - Rifle" : "Rifle",
"Suspect - Unarmed": "Unarmed"
}
if val in weapons_used:
return weapons_used[val]
else:
return val
def officer_force_type(text):
prefix_map = {
"Baton":"Less Lethal-Baton",
"Bean Bag":"Less Lethal-Bean Bag",
"Body Weight Leverage":"Physical-Weight Leverage",
"Canine bite":"Canine Bite",
"CS Fogger":"Less Lethal-CS/OC",
"Handcuffing":"Physical-Handcuffing",
"Handgun":"Lethal-Handgun",
"Hands, Fist, Feet":"Physical-Hands, Fist, Feet",
"Joint Manipulation":"Physical-Joint/Pressure",
"Less Lethal-Leg Sweep":"Physical-Leg Sweep",
"Other Impact Weapon":"Less Lethal-Other",
"Pepper Ball":"Less Lethal-Pepperball",
"Personal CS/OC spray":"Less Lethal-Personal CS/OC spray",
"Taser":"Less Lethal-Taser",
"Vehicle":"Lethal-Vehicle"
}
if text is not None and text in prefix_map:
return prefix_map[text]
return text
def race(text):
race_map = {
"B": "Black"
}
if text in race_map:
return titlecase(race_map[text])
else:
return titlecase(text)
def sex(text):
sex_map = {
"F": "Female",
"M": "Male"
}
if text in sex_map:
return titlecase(sex_map[text])
else:
return titlecase(text)
def capitalize(value):
def abbreviations(word, **kwargs):
if word.upper() in ('NW', 'SE', 'ED', 'DT', 'FTO', 'ND', 'SW', "DWI"):
return word.upper()
if value is None or isinstance(value, list):
return value
return titlecase(value.strip(), callback=abbreviations)
| webmaven/comport | comport/data/cleaners.py | cleaners.py | py | 2,142 | python | en | code | 0 | github-code | 13 |
14906495343 | """
Write
=====
"""
import struct
from espresso import print_ as print
import gc
########################################################################
class Write:
""""""
# ----------------------------------------------------------------------
def __init__(self, buffer, font):
"""Initialize a writer for custom font.
Parameters
----------
buffer : oled handler object
This object must have the `.pixel` object.
font : str
The python module with the font.
"""
self.buffer = buffer
self.font = font._FONT
# ----------------------------------------------------------------------
def text(self, string, x0=0, y0=0, color="#ffffff", bgcolor=None, colors=None):
"""Write a string win position x0, y0.
Load from bitmat font and write character by character using
`buffer.pixel`.
Parameters
----------
string : str
The message to write.
x0 : int
X possition.
y0 : int
Y possition.
"""
# buffer = self.buffer
# font = self.font
if colors is None:
colors = (color, color, bgcolor, bgcolor)
x = x0
for c in string:
buffer = []
if not ord(c) in self.font.keys():
c = "?"
row = y0
_w, * _font = self.font[ord(c)]
for byte in _font:
unsalted = byte
for col in range(x, x + _w):
color = colors[unsalted & 0x03]
# if color is not None:
#buffer.pixel(col, row, color)
buffer.append(color)
unsalted >>= 2
row += 1
print("BUFFER: ", len(buffer))
self.draw(x, y0, col - x0, row - y0, buffer)
x += _w
# ----------------------------------------------------------------------
def char(self, c, x0=0, y0=0, color="#ffffff", bgcolor=None, colors=None):
""""""
buffer = []
if colors is None:
colors = (color, color, bgcolor, bgcolor)
if not c in self.font.keys():
return 0
row = y0
_w, * _font = self.font[c]
for byte in _font:
unsalted = byte
for col in range(x0, x0 + _w):
color = colors[unsalted & 0x03]
# if color is not None:
# self.buffer.pixel(col, row, color)
buffer.append(color)
unsalted >>= 2
row += 1
self.draw(x0, y0, col - x0, row - y0, buffer)
# x += _w
# ----------------------------------------------------------------------
def draw(self, X, Y, col, row, buffer):
""""""
# for y in range(Y, row + 1):
# for x in range(X, col + 1):
# if buffer:
#self.buffer.pixel(x, y, buffer.pop(0))
# else:
# return
# print("BUFFER: ", len(buffer))
gc.collect()
buffer = b"".join([struct.pack(">H", self.buffer.color565(c)) for c in buffer])
self.buffer.blit_buffer(buffer, X, Y, len(buffer) // (2 * row), row)
gc.collect()
| BradenM/micropy-stubs | packages/esp32_LoBo/frozen/display/write.py | write.py | py | 3,386 | python | en | code | 26 | github-code | 13 |
7597264810 | items_component_outer_ring_excavations = [
{"item_name": "Inherent Implants 'Highwall' Mining MX-1003", "id": 22534},
{"item_name": "Limited Social Adaptation Chip - Beta", "id": 14299},
{"item_name": "Mining Foreman Mindlink", "id": 22559},
{"item_name": "Shield Command Mindlink", "id": 21888},
{"item_name": "Limited Cybernetic Subprocessor - Beta", "id": 14298},
{"item_name": "Expanded Cargohold I", "id": 1317},
{"item_name": "Limited Neural Boost - Beta", "id": 14296},
{"item_name": "Reinforced Bulkheads I", "id": 1333},
{"item_name": "Strip Miner I", "id": 17482},
{"item_name": "Deep Core Mining Laser I", "id": 12108},
{"item_name": "Limited Memory Augmentation - Beta", "id": 14297},
{"item_name": "Miner I", "id": 483},
{"item_name": "Limited Ocular Filter - Beta", "id": 14295},
{"item_name": "Ice Mining Laser I", "id": 37450},
{"item_name": "Gas Cloud Harvester II", "id": 60314},
{"item_name": "Ice Harvester I", "id": 16278},
]
items_in_lp_store_ore_outer_ring_excavations = [
# {'item_name': 'Upwell Palatine Keepstar BPC',
# 'lp_store_components': {},
# 'lp_price': 2000000,
# 'isk_price': 200000000,
# 'quantity': 1,
# 'id': 0},
{'item_name': 'Mid-grade Harvest Omega',
'lp_store_components': {"Inherent Implants 'Highwall' Mining MX-1003": 1},
'lp_price': 321300,
'isk_price': 214200000,
'quantity': 1,
'id': 28807},
{'item_name': 'Mid-grade Harvest Epsilon',
'lp_store_components': {"Limited Social Adaptation Chip - Beta": 1},
'lp_price': 170100,
'isk_price': 113400000,
'quantity': 1,
'id': 28805},
# {'item_name': 'ORE Mining Director Mindlink',
# 'lp_store_components': {"Mining Foreman Mindlink": 1, "Shield Command Mindlink": 1},
# 'lp_price': 100000,
# 'isk_price': 100000000,
# 'quantity': 1,
# 'id': 43775},
{'item_name': 'Mid-grade Harvest Delta',
'lp_store_components': {"Limited Cybernetic Subprocessor - Beta": 1},
'lp_price': 94500,
'isk_price': 63000000,
'quantity': 1,
'id': 28804},
# {'item_name': "'Excavator' Mining Drone BPC",
# 'lp_store_components': {},
# 'lp_price': 80000,
# 'isk_price': 60000000,
# 'quantity': 5,
# 'id': 0},
# {'item_name': "'Excavator' Ice Harvesting Drone BPC",
# 'lp_store_components': {},
# 'lp_price': 80000,
# 'isk_price': 60000000,
# 'quantity': 5,
# 'id': 0},
{'item_name': "Zainou 'Beancounter' Research RR-605",
'lp_store_components': {},
'lp_price': 79375,
'isk_price': 31750000,
'quantity': 1,
'id': 27179},
{'item_name': "Eifyr and Co. 'Alchemist' Gas Harvesting GH-805",
'lp_store_components': {},
'lp_price': 79375,
'isk_price': 31750000,
'quantity': 1,
'id': 27239},
{'item_name': "Zainou 'Beancounter' Industry BX-804",
'lp_store_components': {},
'lp_price': 79375,
'isk_price': 31750000,
'quantity': 1,
'id': 27171},
{'item_name': "Inherent Implants 'Yeti' Ice Harvesting IH-1005",
'lp_store_components': {},
'lp_price': 79375,
'isk_price': 31750000,
'quantity': 1,
'id': 22571},
{'item_name': "Zainou 'Beancounter' Metallurgy MY-705",
'lp_store_components': {},
'lp_price': 79375,
'isk_price': 31750000,
'quantity': 1,
'id': 27181},
{'item_name': "Zainou 'Beancounter' Reprocessing RX-804",
'lp_store_components': {},
'lp_price': 79375,
'isk_price': 31750000,
'quantity': 1,
'id': 27174},
{'item_name': "Zainou 'Beancounter' Science SC-805",
'lp_store_components': {},
'lp_price': 79375,
'isk_price': 31750000,
'quantity': 1,
'id': 27184},
{'item_name': "Inherent Implants 'Highwall' Mining MX-1005",
'lp_store_components': {},
'lp_price': 79375,
'isk_price': 31750000,
'quantity': 1,
'id': 22535},
{'item_name': "Inherent Implants 'Highwall' Mining Upgrades MU-1005",
'lp_store_components': {},
'lp_price': 79375,
'isk_price': 31750000,
'quantity': 1,
'id': 27150},
{'item_name': 'ORE Expanded Cargohold',
'lp_store_components': {"Expanded Cargohold I": 1},
'lp_price': 67500,
'isk_price': 45000000,
'quantity': 1,
'id': 34489},
{'item_name': 'Mid-grade Harvest Gamma',
'lp_store_components': {"Limited Neural Boost - Beta": 1},
'lp_price': 56700,
'isk_price': 37800000,
'quantity': 1,
'id': 28806},
{'item_name': 'ORE Reinforced Bulkheads',
'lp_store_components': {"Reinforced Bulkheads I": 1},
'lp_price': 45000,
'isk_price': 30000000,
'quantity': 1,
'id': 34485},
{'item_name': 'ORE Strip Miner',
'lp_store_components': {"Strip Miner I": 1},
'lp_price': 45000,
'isk_price': 30000000,
'quantity': 1,
'id': 28754},
{'item_name': 'ORE Deep Core Mining Laser',
'lp_store_components': {"Deep Core Mining Laser I": 1},
'lp_price': 45000,
'isk_price': 30000000,
'quantity': 1,
'id': 28748},
{'item_name': 'Mid-grade Harvest Beta',
'lp_store_components': {},
'lp_price': 37800,
'isk_price': 25200000,
'quantity': 1,
'id': 28803},
{'item_name': 'ORE Miner',
'lp_store_components': {"Miner I": 1},
'lp_price': 36000,
'isk_price': 24000000,
'quantity': 1,
'id': 28750},
{'item_name': 'ORE Ice Harvester',
'lp_store_components': {"Ice Harvester I": 1},
'lp_price': 36000,
'isk_price': 24000000,
'quantity': 1,
'id': 28752},
{'item_name': 'Mid-grade Harvest Alpha',
'lp_store_components': {"Limited Ocular Filter - Beta": 1},
'lp_price': 28350,
'isk_price': 18900000,
'quantity': 1,
'id': 28802},
{'item_name': 'Mining Connections',
'lp_store_components': {},
'lp_price': 15000,
'isk_price': 10000000,
'quantity': 1,
'id': 3893},
{'item_name': 'ORE Ice Mining Laser',
'lp_store_components': {"Ice Mining Laser I": 1},
'lp_price': 12500,
'isk_price': 12500000,
'quantity': 1,
'id': 37452},
{'item_name': "Zainou 'Beancounter' Research RR-603",
'lp_store_components': {},
'lp_price': 10875,
'isk_price': 4350000,
'quantity': 1,
'id': 27177},
{'item_name': "Inherent Implants 'Highwall' Mining MX-1003",
'lp_store_components': {},
'lp_price': 10875,
'isk_price': 4350000,
'quantity': 1,
'id': 22534},
{'item_name': "Inherent Implants 'Highwall' Mining Upgrades MU-1003",
'lp_store_components': {},
'lp_price': 10875,
'isk_price': 4350000,
'quantity': 1,
'id': 27149},
{'item_name': "Zainou 'Beancounter' Science SC-803",
'lp_store_components': {},
'lp_price': 10875,
'isk_price': 4350000,
'quantity': 1,
'id': 27178},
{'item_name': "Eifyr and Co. 'Alchemist' Gas Harvesting GH-803",
'lp_store_components': {},
'lp_price': 10875,
'isk_price': 4350000,
'quantity': 1,
'id': 27238},
{'item_name': "Inherent Implants 'Yeti' Ice Harvesting IH-1003",
'lp_store_components': {},
'lp_price': 10875,
'isk_price': 4350000,
'quantity': 1,
'id': 22570},
{'item_name': "Zainou 'Beancounter' Metallurgy MY-703",
'lp_store_components': {},
'lp_price': 10875,
'isk_price': 4350000,
'quantity': 1,
'id': 27176},
{'item_name': "Zainou 'Beancounter' Industry BX-802",
'lp_store_components': {},
'lp_price': 10875,
'isk_price': 4350000,
'quantity': 1,
'id': 27167},
{'item_name': "Zainou 'Beancounter' Reprocessing RX-802",
'lp_store_components': {},
'lp_price': 10875,
'isk_price': 4350000,
'quantity': 1,
'id': 27169},
{'item_name': "Zainou 'Beancounter' Research RR-601",
'lp_store_components': {},
'lp_price': 375,
'isk_price': 150000,
'quantity': 1,
'id': 27180},
{'item_name': "Zainou 'Beancounter' Metallurgy MY-701",
'lp_store_components': {},
'lp_price': 375,
'isk_price': 150000,
'quantity': 1,
'id': 27182},
{'item_name': "Inherent Implants 'Highwall' Mining MX-1001",
'lp_store_components': {},
'lp_price': 375,
'isk_price': 150000,
'quantity': 1,
'id': 27102},
{'item_name': "Eifyr and Co. 'Alchemist' Gas Harvesting GH-801",
'lp_store_components': {},
'lp_price': 375,
'isk_price': 150000,
'quantity': 1,
'id': 27240},
{'item_name': "Inherent Implants 'Highwall' Mining Upgrades MU-1001",
'lp_store_components': {},
'lp_price': 375,
'isk_price': 150000,
'quantity': 1,
'id': 27151},
{'item_name': "Zainou 'Beancounter' Reprocessing RX-801",
'lp_store_components': {},
'lp_price': 375,
'isk_price': 150000,
'quantity': 1,
'id': 27175},
{'item_name': "Zainou 'Beancounter' Industry BX-801",
'lp_store_components': {},
'lp_price': 375,
'isk_price': 150000,
'quantity': 1,
'id': 27170},
{'item_name': "Inherent Implants 'Yeti' Ice Harvesting IH-1001",
'lp_store_components': {},
'lp_price': 375,
'isk_price': 150000,
'quantity': 1,
'id': 27103},
{'item_name': "Zainou 'Beancounter' Science SC-801",
'lp_store_components': {},
'lp_price': 375,
'isk_price': 150000,
'quantity': 1,
'id': 27185},
{'item_name': "ORE Gas Cloud Harvester",
'lp_store_components': {"Gas Cloud Harvester II": 1},
'lp_price': 36000,
'isk_price': 24000000,
'quantity': 1,
'id': 60315},
]
| A1ekseyN/Eve_Trade_Parser | lp_store_items_outer_ring_excavations.py | lp_store_items_outer_ring_excavations.py | py | 9,702 | python | en | code | 0 | github-code | 13 |
11612165133 | from botocore.vendored import requests
import os
def get_access_token():
""" Get client API Service Authorization token and store into sessionAttributes"""
access_token_request_data = {
"clientId": os.environ['CLIENT_SERVICE_ID'],
"clientSecret": os.environ['CLIENT_SERVICE_SECRET']
}
access_token_url = '{}/api/v1/auth/token'.format(os.environ['CLIENT_SERVICE_URL'])
access_token_headers = {'content-type': 'application/json'}
get_token = requests.post(access_token_url, json=access_token_request_data, headers=access_token_headers,
verify=False)
if get_token.ok:
access_token_response = get_token.json()
return access_token_response['data']['accessToken']
# else:
# handle_session_end_request()
def make_loyalty_get_request(loyalty_rest_endpoint, client_service_token):
request_headers = {'content-type': 'application/json',
'authorization': 'bearer {}'.format(client_service_token)
}
endpoint = loyalty_rest_endpoint
response = requests.get(url=endpoint, headers=request_headers, verify=False)
print(response.json())
if response.ok:
json_response = response.json()
return json_response['data']
# else:
# handle_session_end_request()
def make_loyalty_post_request(loyalty_rest_endpoint, client_service_token):
request_headers = {'content-type': 'application/json',
'authorization': 'bearer {}'.format(client_service_token)
}
endpoint = loyalty_rest_endpoint
response = requests.post(url=endpoint, headers=request_headers, verify=False, data="")
print(response.json())
if response.ok:
json_response = response.json()
return json_response['data']
# else:
# handle_session_end_request()
| fibonascii/cloud-automation | lambda/alexa-skill-lod-rest/client_service_requests.py | client_service_requests.py | py | 1,898 | python | en | code | 0 | github-code | 13 |
42062220189 | from nose.tools import *
from bin.app import app
from tests.tools import assert_response
from castlecrawler.map import Room
from castlecrawler.bedroom import Bedroom
from castlecrawler.armory import Armory
def test_room():
gold = Room("GoldRoom",
""" This room has gold in it that you can collect.
There's a door to the north.""")
assert_equal(gold.name, "GoldRoom")
assert_equal(gold.paths, {})
def test_room_paths():
center = Room("Center", "Test room in the center.")
north = Room("North", "Test room in the north.")
# # player = Player("Shirley", [])
# # south = Armory(player)
# south = Hallway()
# center.add_paths({'north': north, 'south': south})
# assert_equal(center.go('north'), north)
# assert_equal(center.go('south'), south)
def test_map():
start = Room("Start", "You can go west and down a hole.")
west = Room("Trees", "There are trees here, you can go east.")
down = Room("Dungeon", "It's dark down here, you can go up.")
start.add_paths({'west': west, 'down': down})
west.add_paths({'east': start})
down.add_paths({'up': start})
assert_equal(start.go('west'), west)
assert_equal(start.go('west').go('east'), start)
assert_equal(start.go('down').go('up'), start) | shirleylberry/python-practice | projects/castlecrawler/tests/map_tests.py | map_tests.py | py | 1,279 | python | en | code | 0 | github-code | 13 |
9876883788 | # task 9 - defensive programming
# code offers users basic calculation functions or retrieval of previous calculations
# global variable used to format certain outputs for clarity
seperator_string = "---------------------------------------------------"
# prints welcome message only when first starting the program
print(seperator_string)
print("Please selection from the following options:")
while True:
# prompts user for choice of mode
mode_selection = input("Perform calculation (c), File read (r), Exit program (x): ")
# mode input validation
while mode_selection.lower() not in ['c', 'r', 'x']:
print("Error: Please enter a valid option\n")
mode_selection = input("Perform calculation (c), File read (r), Exit program (x): ")
if mode_selection.lower() == 'c': # perform calculation selected
# try block to help catch input errors
try:
first_number = float(input("\nEnter the first number:\t\t")) # first number input
operator = input("Enter the operator (+ - * /):\t") # operator input
# operator input validation
if operator not in ['+', '-', '*', '/']:
# raises try block ValueError with custom message
raise ValueError("Invalid operator selected")
second_number = float(input("Enter the second number:\t")) # second number input
# division by zero check
if (second_number == 0) and (operator == "/"):
# raises try block ZeroDivisionError with custom message
raise ZeroDivisionError("Unable to divide by zero")
# calculate the result depending on operator
if operator == '+':
result = first_number + second_number
elif operator == '-':
result = first_number - second_number
elif operator == '*':
result = first_number * second_number
elif operator == '/':
result = first_number / second_number
# variable to hold properly formatted equation
equation = "{} {} {} = {}".format(first_number, operator, second_number, result)
# prints the equation with result
print(seperator_string)
print("Result:\t\t\t\t" + equation)
print(seperator_string + "\n")
# open file, adds previous equation, closes when block exited
with open("equations.txt", "a") as file: # append will create file if none
file.write(equation + "\n")
# value error message formatting
except ValueError as error:
print("Error: {}\n".format(error))
# divide by zero message formatting
except ZeroDivisionError as error:
print("Error: {}\n".format(error))
# catch all for any unexpected errors
except Exception:
print("An unexpected error occurred\n")
elif mode_selection.lower() == 'r': # file read selected
# prints user instructions for file reading
print("\nCalculations are written to equations.txt")
print("Data from other .txt files can also be read\n")
# try block to help catch file handling errors
try:
# prompts user for name of file
filename = input("Enter the name of the file to read: ")
print(seperator_string)
# opens given file in read only mode and prints contents
with open(filename, 'r') as file:
content = file.read()
print(content)
# prints confirmation message, useful in case of empty files
print("Read complete")
print(seperator_string + "\n")
# file not found error message formatting
except FileNotFoundError:
print("Error: file not found\n")
# catch all for any unexpected errors
except Exception:
print("An unexpected error occurred")
elif mode_selection.lower() == 'x': # exit program selected
print("Exiting")
break
| neoreuvenla/hyperion-data-sci | basic concepts/10 defensive programming.py | 10 defensive programming.py | py | 4,215 | python | en | code | 0 | github-code | 13 |
2960982667 | """Habitat patterns."""
from spacy import registry
from traiter.patterns.matcher_patterns import MatcherPatterns
from odonata.pylib.const import CATEGORY, REPLACE
HABITAT = MatcherPatterns(
'habitat',
on_match='odonata.habitat.v1',
decoder={
'habitat': {'ENT_TYPE': 'habitat'},
},
patterns=[
'habitat',
],
)
@registry.misc(HABITAT.on_match)
def habitat(ent):
"""Enrich the match."""
data = {}
lower = ent.text.lower()
data['habitat'] = REPLACE.get(lower, lower)
data['habitat_cat'] = CATEGORY.get(lower, lower)
ent._.data = data
| rafelafrance/traiter_odonata | odonata/patterns/habitat.py | habitat.py | py | 600 | python | en | code | 0 | github-code | 13 |
24313218215 | from django.conf.urls import url, include, patterns
from . import views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^about/$', views.about, name='about'),
url(r'^news/$', views.xxx, name='xxx'),
url(r'^articles/(?P<article_id>[0-9]+)/$', views.show_article, name='article'),
url(r'^partner/(?P<partner_id>[0-9]+)/$', views.about, name='partners'),
url(r'^thanks/$', views.thenks, name='thenks'),
url(r'^simpleform/$', views.mysimpleform, name='form'),
url(r'^userlogin/$', views.userauth, name = 'userlogin'),
url(r'^userlogout/$', views.logout_view, name= 'logout'),
] | mushroom2/laserSite | blog/urls.py | urls.py | py | 623 | python | en | code | 2 | github-code | 13 |
40836680400 | from ctypes import *
import math
import random
# add
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import colorsys
# box colors
box_colors = None
def sample(probs):
s = sum(probs)
probs = [a/s for a in probs]
r = random.uniform(0, 1)
for i in range(len(probs)):
r = r - probs[i]
if r <= 0:
return i
return len(probs)-1
def c_array(ctype, values):
arr = (ctype*len(values))()
arr[:] = values
return arr
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int)]
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
#lib = CDLL("/home/pjreddie/documents/darknet/libdarknet.so", RTLD_GLOBAL)
lib = CDLL("../darknet-mac/libdarknet.so", RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int)]
get_network_boxes.restype = POINTER(DETECTION)
make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)
free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]
free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]
network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
free_image = lib.free_image
free_image.argtypes = [IMAGE]
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)
def generate_colors(num_classes):
global box_colors
if box_colors != None and len(box_colors) > num_classes:
return box_colors
hsv_tuples = [(x / num_classes, 1., 1.) for x in range(num_classes)]
box_colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
box_colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
box_colors))
random.seed(10101) # Fixed seed for consistent colors across runs.
# Shuffle colors to decorrelate adjacent classes.
random.shuffle(box_colors)
random.seed(None) # Reset seed to default.
def draw_boxes(img, result):
image = Image.fromarray(img)
font = ImageFont.truetype(font='font/FiraMono-Medium.otf', size=20)
thickness = (image.size[0] + image.size[1]) // 300
num_classes = len(result)
generate_colors(num_classes)
index = 0
for objection in result:
index += 1
class_name, class_score, (x, y, w, h) = objection
# print(name, score, x, y, w, h)
left = int(x - w / 2)
right = int(x + w / 2)
top = int(y - h / 2)
bottom = int(y + h / 2)
label = '{} {:.2f}'.format(class_name.decode('utf-8'), class_score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
for i in range(thickness):
draw.rectangle([left + i, top + i, right - i,
bottom - i], outline=box_colors[index - 1])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=box_colors[index - 1])
draw.text(text_origin, label, fill=(255, 255, 255), font=font)
del draw
return np.array(image)
def array_to_image(arr):
# need to return old values to avoid python freeing memory
arr = arr.transpose(2, 0, 1)
c = arr.shape[0]
h = arr.shape[1]
w = arr.shape[2]
#arr = (arr / 255.0).flatten()
#data = dn.c_array(dn.c_float, arr)
arr = np.ascontiguousarray(arr.flat, dtype=np.float32) / 255.0
data = arr.ctypes.data_as(POINTER(c_float))
im = IMAGE(w, h, c, data)
return im,arr
def classify(net, meta, im):
out = predict_image(net, im)
res = []
for i in range(meta.classes):
res.append((meta.names[i], out[i]))
res = sorted(res, key=lambda x: -x[1])
return res
def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
im = load_image(image, 0, 0)
num = c_int(0)
pnum = pointer(num)
predict_image(net, im)
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
num = pnum[0]
if (nms): do_nms_obj(dets, num, meta.classes, nms);
res = []
for j in range(num):
for i in range(meta.classes):
if dets[j].prob[i] > 0:
b = dets[j].bbox
res.append((meta.names[i], dets[j].prob[i], (b.x, b.y, b.w, b.h)))
res = sorted(res, key=lambda x: -x[1])
free_image(im)
free_detections(dets, num)
return res
if __name__ == "__main__":
#net = load_net("cfg/densenet201.cfg", "/home/pjreddie/trained/densenet201.weights", 0)
#im = load_image("data/wolf.jpg", 0, 0)
#meta = load_meta("cfg/imagenet1k.data")
#r = classify(net, meta, im)
#print r[:10]
net = load_net("cfg/tiny-yolo.cfg", "tiny-yolo.weights", 0)
meta = load_meta("cfg/coco.data")
r = detect(net, meta, "data/dog.jpg")
print(r)
| manjWu/darknet-python | darknet.py | darknet.py | py | 7,219 | python | en | code | 0 | github-code | 13 |
4810387154 | from datetime import datetime
class dota_match:
def __init__(self, tournament, game_format, date_time, team1, team2, t1_odds, t2_odds, t1_score, t2_score):
self.team1 = team1
self.team2 = team2
self.t1_odds = t1_odds
self.t2_odds = t2_odds
self.tournament = tournament
self.date_time = date_time
self.game_format = game_format
self.t1_score = t1_score
self.t2_score = t2_score
def details(datetime=None, game_format=None, results=None):
if datetime:
self.datetime = datetime
if game_format:
self.game_format = game_format
if results:
self.results = results
@property
def winner(self):
if t1_score > t2_score:
return self.team1
elif t1_score < t2_score:
return self.team2
else:
return None
@property
def loser(self):
if t1_score < t2_score:
return self.team1
elif t1_score > t2_score:
return self.team2
else:
return None
def store_team(self, team):
if self.team1 == None:
self.team1 = team
elif self.team2 == None:
self.team2 = team
else:
raise Exception(f'Trying to add 3rd team {team} into match id {self.id}')
# class team:
# def __init__(self, name, )
| Chenesss/predict_dota | dota_objects.py | dota_objects.py | py | 1,156 | python | en | code | 0 | github-code | 13 |
20885854243 | from __future__ import division
import numpy as np
import logging
import csv
import time as tm
import file_length
import pandas as pd
from simple_progress_bar import update_progress
from cosine_similarity import cos_similarity
datetime = tm.localtime()
date = '{0:}-{1:}-{2:}'.format(datetime.tm_mon, datetime.tm_mday, datetime.tm_year)
time = '{0:}:{1:}:{2:}'.format(datetime.tm_hour, datetime.tm_min, datetime.tm_sec)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def get_loadings(agg_doc_vecs_path, agg_dic_vecs_path, out_path, num_features, delimiter='\t'):
'''
:param agg_doc_vecs_path: Path to distributed representations of documents
:param agg_dic_vecs_path: Path to distributed representations of dictionaries
:param out_path: Path to write to
:param num_features: Number of dimensions in distributed representations
:param delimiter: Delimiter to use
:return:
'''
"""Get loadings between each document vector in agg-doc_vecs_path and each dictionary dimension in
agg_dic_vecs_path"""
n_docs = float(file_length.file_len(agg_doc_vecs_path))
prog_counter = 0
counter = 0
dic_vecs = pd.read_csv(agg_dic_vecs_path, sep=delimiter)
dic_vecs = dic_vecs.to_dict(orient='list')
nan_counter = {'ID': [], 'count': 0}
with open(agg_doc_vecs_path, 'rb') as doc_vecs, open(out_path, 'wb') as out_file:
doc_vecs_reader = csv.reader(doc_vecs, delimiter='\t')
doc_vecs_reader.next()
writer = csv.writer(out_file, delimiter='\t')
fieldnames_out = ['ID'] + list(dic_vecs.keys())
writer.writerow(fieldnames_out)
for doc_vec in doc_vecs_reader:
if 'nan' in doc_vec:
nan_counter['count'] += 1
nan_counter['ID'].append(doc_vec[0])
pass
else:
prog_counter += 1
counter += 1
doc_id = doc_vec[0]
out_row = [doc_id]
for k in dic_vecs.iterkeys():
doc_vec = [np.float64(x) for x in doc_vec[-num_features:]]
dic_similarity = cos_similarity(doc_vec, dic_vecs[k])
out_row.append(dic_similarity)
writer.writerow(out_row)
if prog_counter >= 0.01 * n_docs:
prog_counter = 0
update_progress(counter / (n_docs - 1))
print('Failed to calculate {0} loadings due to missing values.'.format(nan_counter['count']))
print('IDs for documents with missing values:\n\n', nan_counter['ID'])
| USC-CSSL/DDR | ddr/get_loadings.py | get_loadings.py | py | 2,636 | python | en | code | 26 | github-code | 13 |
1043201052 | import supervision as sv
import numpy as np
import cv2
from ultralytics import YOLO
model = YOLO("./yolov8m.pt")
def callback(x: np.ndarray) -> sv.Detections:
result = model(x, verbose=False, conf=0.25)[0]
return sv.Detections.from_ultralytics(result)
image = cv2.imread("./1.png")
slicer = sv.InferenceSlicer(callback=callback)
sliced_detections = slicer(image=image)
box_annotator = sv.BoxAnnotator()
sliced_image = box_annotator.annotate(image.copy(), detections=sliced_detections)
cv2.imwrite('./out.png',sliced_image)
sv.plot_image(sliced_image) | songjiahao-wq/MY-YOLOv8 | tools/shia.py | shia.py | py | 562 | python | en | code | 1 | github-code | 13 |
24559272319 | class Action:
def __init__(self, rateConstant, reactants, products):
allStoich = list(reactants.items()) + list(products.items())
for molecule, stoich in allStoich:
if stoich <= 0 or stoich > 2:
message = "Invalid stoichiometry for {0}: {1}".\
format(molecule, stoich)
raise Exception(message)
self._rateConstant = rateConstant
self._reactants = reactants
self._products = products
self._netStoich = self._initializeNetStoich()
def getPropensity(self, state):
propensity = self._rateConstant
for reactant, stoich in self._reactants.items():
propensity *= _comb(state[reactant], stoich)
return propensity
def getNetStoich(self):
return self._netStoich
def _initializeNetStoich(self):
netStoich = {k: -1*v for k, v in self._reactants.items()}
for product, stoich in self._products.items():
if product in netStoich:
netStoich[product] += stoich
else:
netStoich[product] = stoich
return netStoich
def _comb(amount, stoich):
if(stoich == 1):
return amount
elif stoich == 2:
return amount*(amount-1)*0.5
raise Exception("Invalid stoichiometry: {0}".format(stoich))
| kehlert/tau_leaper | action.py | action.py | py | 1,350 | python | en | code | 0 | github-code | 13 |
39709995421 | from manimlib.imports import *
import math
class Graphing(GraphScene):
CONFIG = {
"x_min": -5,
"x_max": 5,
"y_min": -4,
"y_max": 4,
"graph_origin": ORIGIN,
"function_color": WHITE,
"axes_color": BLUE
}
def construct(self):
#Make graph
self.setup_axes(animate=True)
func_graph=self.get_graph(self.func_to_graph,self.function_color)
graph_lab = self.get_graph_label(func_graph, label = "x^{2}")
func_graph_2=self.get_graph(self.func_to_graph_2,self.function_color)
graph_lab_2 = self.get_graph_label(func_graph_2, label = "x^{3}")
vert_line = self.get_vertical_line_to_graph(1,func_graph,color=YELLOW)
x = self.coords_to_point(1, self.func_to_graph(1))
y = self.coords_to_point(0, self.func_to_graph(1))
horz_line = Line(x,y, color=YELLOW)
# two_pi = TexMobject("x = 2 \\pi")
# label_coord = self.input_to_graph_point(TAU,func_graph)
# two_pi.next_to(label_coord,RIGHT+UP)
delta_y = TexMobject("\\Delta y")
label_coord = self.input_to_graph_point(0.5,func_graph)
delta_y.next_to(label_coord,RIGHT+RIGHT)
delta_x = TexMobject("\\Delta x")
label_coord = self.input_to_graph_point(0.5,func_graph)
delta_x.next_to(label_coord,UP+UP+UP)
point1 = Dot(self.coords_to_point(1,self.func_to_graph(1)))
point2 = Dot(self.coords_to_point(0,0))
self.play(ShowCreation(func_graph), Write(graph_lab))
self.wait(1)
self.play(ShowCreation(vert_line))
self.play(ShowCreation(horz_line))
self.add(point1)
self.wait(1)
self.add(point2)
self.wait(1)
self.play(Transform(func_graph, func_graph_2), Transform(graph_lab, graph_lab_2))
self.wait(1)
self.play(ShowCreation(delta_y))
self.wait(1)
self.play(ShowCreation(delta_x))
def func_to_graph(self, x):
return (x**2)
def func_to_graph_2(self, x):
return(x**3) | advayk/Manim-CalcII-Project | trial_graph.py | trial_graph.py | py | 2,041 | python | en | code | 0 | github-code | 13 |
34909588011 | ################################################
# File Name: Pong.py
# Creator Name: Mr. Acosta
# Date Created: 1-28-2020
# Date Modified: 1-28-2020
################################################
# Making Pong
#################################################
import pygame, sys, time, random
from pygame.locals import *
# Set up pygame. to run pygame, we must always initialize it.
pygame.init()
mainClock = pygame.time.Clock()
# Here we create the window. We store the window height and width in variables so we can use them later.
width = 700
height = 600
windowSurface = pygame.display.set_mode((width, height), 0, 32)
# Set the window title to "Pong"
pygame.display.set_caption('Pong')
# Set up the color variables.
black = (0, 0, 0)
white = (255, 255, 255)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
# Run the game loop.
while True:
# Check for events.
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
pass
if event.type == KEYUP:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
# Draw the window onto the screen.
pygame.display.update()
# Set the framerate of the game.
mainClock.tick(30) | EAcosta2584/Programming-for-Video-Games-Resources | Pong.py | Pong.py | py | 1,295 | python | en | code | 1 | github-code | 13 |
18116356479 | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 11 15:17:00 2020
@author: kshitij
"""
import numpy as np
import os
import glob
from tqdm import tqdm
import argparse
import time
from utils import get_similarity,get_similarity_from_rdms
list_of_tasks = 'autoencoder curvature denoise edge2d edge3d \
keypoint2d keypoint3d colorization \
reshade rgb2depth rgb2mist rgb2sfnorm \
room_layout segment25d segment2d vanishing_point \
segmentsemantic class_1000 class_places inpainting_whole pascal_voc_segmentation'
def get_features(taskonomy_feats_path,pascal_feats_path,features_filename):
"""
Parameters
----------
taskonomy_feats_path : string
path to directory containing features from taskonomy models
pascal_feats_path : string
path to directory containing features from pascal voc model
Returns
-------
taskonomy_data : dict
dictionary containg features of taskonomy models and pascal voc model.
"""
if os.path.isfile(features_filename):
start = time.time()
taskonomy_data = np.load(features_filename,allow_pickle=True)
end = time.time()
print("whole file loading time is ", end - start)
return taskonomy_data.item()
taskonomy_tasks = ['autoencoder','class_1000', 'class_places', 'colorization','curvature',\
'denoise', 'edge2d', 'edge3d', \
'inpainting_whole','keypoint2d', 'keypoint3d', \
'reshade', 'rgb2depth', 'rgb2mist', 'rgb2sfnorm','room_layout' ,\
'segment25d', 'segment2d', 'segmentsemantic', 'vanishing_point']
print(len(taskonomy_tasks))
taskonomy_list={}
print(taskonomy_feats_path)
for task in taskonomy_tasks:
taskonomy_list[task] = glob.glob(taskonomy_feats_path+"/*"+ task +"_encoder_output.npy")
taskonomy_list[task].sort()
print(task, len(taskonomy_list[task]))
#Loading data
num_images = len(taskonomy_list[task])
print(np.load(taskonomy_list[task][0]).shape)
a=np.load(taskonomy_list[task][0]).ravel()
print(a.shape)
num_features =a.shape[0]
taskonomy_data = {}
for task in tqdm(taskonomy_tasks):
taskonomy_data[task] = np.zeros((num_images,num_features))
for i,taskonomy_file in tqdm(enumerate(taskonomy_list[task])):
taskonomy_data[task][i,:] = np.load(taskonomy_file).ravel()
pascal_list = glob.glob(pascal_feats_path+"/*.npy")
pascal_list.sort()
print(len(pascal_list))
num_images = len(pascal_list)
a=np.load(pascal_list[0]).ravel()
print(a.shape)
num_features =a.shape[0]
pascal_data = np.zeros((num_images,num_features))
for i,pascal_file in tqdm(enumerate(pascal_list)):
pascal_data[i,:] = np.load(pascal_file).ravel()
taskonomy_data['pascal_voc_segmentation'] = pascal_data
np.save(features_filename, taskonomy_data)
return taskonomy_data
def main():
parser = argparse.ArgumentParser(description='Computing Duality Diagram Similarity between Taskonomy Tasks')
parser.add_argument('-d','--dataset', help='image dataset to use for computing DDS: options are [pascal_5000, taskonomy_5000, nyuv2]', default = "pascal_5000", type=str)
parser.add_argument('-fd','--feature_dir', help='path to saved features root directory', default = "./features/", type=str)
parser.add_argument('-fdt','--feature_dir_taskonomy', help='path to saved features from taskonomy models', default = "./features/taskonomy_activations/", type=str)
parser.add_argument('-fdp','--feature_dir_pascal', help='path to saved features from pascal models', default = "./features/pascal_activations/", type=str)
parser.add_argument('-sd','--save_dir', help='path to save the DDS results', default = "./results/DDScomparison_pascal", type=str)
parser.add_argument('-n','--num_images', help='number of images to compute DDS', default = 200, type=int)
parser.add_argument('-i','--num_iters', help='number of iterations for bootstrap', default = 100, type=int)
args = vars(parser.parse_args())
num_images = args['num_images']
dataset = args['dataset']
taskonomy_feats_path = os.path.join(args['feature_dir_taskonomy'],dataset)
pascal_feats_path = os.path.join(args['feature_dir_pascal'],dataset)
num_repetitions = args['num_iters']
features_filename = os.path.join("./features","taskonomy_pascal_feats_" + args['dataset'] + ".npy")
num_total_images = 5000
if dataset == 'nyuv2':
num_total_images = 1449
save_dir = os.path.join(args['save_dir'],dataset)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
task_list = list_of_tasks.split(' ')
print(task_list)
taskonomy_data = get_features(taskonomy_feats_path,pascal_feats_path,features_filename)
# setting up DDS using Q,D,f,g for kernels
kernel_type = ['rbf','lap','linear'] # possible kernels (f in DDS)
feature_norm_type = ['znorm'] # possible normalizations (Q,D in DDS)
save_path = os.path.join(save_dir,'kernels.npy')
affinity_ablation = {}
for kernel in (kernel_type):
affinity_ablation[kernel]={}
for feature_norm in feature_norm_type:
np.random.seed(1993)
indices = []
for i in range(num_repetitions):
indices.append(np.random.choice(range(num_total_images), num_images, replace=False))
print(kernel,feature_norm)
affinity_matrix = np.zeros((num_repetitions, len(task_list)), float)
for i in tqdm(range(num_repetitions)):
method = kernel +"__" + feature_norm
start = time.time()
for index1,task1 in (enumerate(task_list)):
affinity_matrix[i,index1] = get_similarity(taskonomy_data[task1][indices[i],:],\
taskonomy_data['pascal_voc_segmentation'][indices[i],:],\
kernel,feature_norm)
end = time.time()
print("Method is ", method)
print("Time taken is ", end - start)
affinity_ablation[kernel][feature_norm] = affinity_matrix
np.save(save_path,affinity_ablation)
# setting up DDS using Q,D,f,g for distance functions
save_path = os.path.join(save_dir,'rdms.npy')
dist_type = ['pearson', 'euclidean', 'cosine']
affinity_ablation = {}
for dist in (dist_type):
affinity_ablation[dist]={}
for feature_norm in feature_norm_type:
np.random.seed(1993)
indices = []
for i in range(num_repetitions):
indices.append(np.random.choice(range(num_total_images), num_images, replace=False))
print(dist,feature_norm)
affinity_matrix = np.zeros((num_repetitions, len(task_list)), float)
for i in tqdm(range(num_repetitions)):
method = dist +"__" + feature_norm
start = time.time()
for index1,task1 in (enumerate(task_list)):
affinity_matrix[i,index1] = get_similarity_from_rdms(taskonomy_data[task1][indices[i],:],\
taskonomy_data['pascal_voc_segmentation'][indices[i],:],\
dist,feature_norm)
end = time.time()
print("Method is ", method)
print("Time taken is ", end - start)
affinity_ablation[dist][feature_norm] = affinity_matrix
np.save(save_path,affinity_ablation)
if __name__ == "__main__":
main()
| cvai-roig-lab/duality-diagram-similarity | computeDDS_pascal.py | computeDDS_pascal.py | py | 7,696 | python | en | code | 9 | github-code | 13 |
73482736016 | import tensorflow as tf
class SVGD():
def __init__(self,joint_log_post,num_particles = 250,num_iter=1000, dtype=tf.float32):
self.dtype = dtype
self.num_particles = num_particles
self.num_latent = 2
self.lr = 0.003
self.alpha = .9
self.fudge_factor = 1e-6
self.num_iter = num_iter
self.range_limit = [-3, 3]
self.npoints_plot = 50
self.joint_log_post = joint_log_post
def get_median(self,v):
v = tf.reshape(v, [-1])
m = v.get_shape()[0]//2
return tf.nn.top_k(v, m).values[m-1]
def svgd_kernel(self,X0):
XY = tf.matmul(X0, tf.transpose(X0))
X2_ = tf.reduce_sum(tf.square(X0), axis=1)
x2 = tf.reshape(X2_, shape=(tf.shape(X0)[0], 1))
X2e = tf.tile(x2, [1, tf.shape(X0)[0]])
## (x1 -x2)^2 + (y1 -y2)^2
H = tf.subtract(tf.add(X2e, tf.transpose(X2e)), 2 * XY)
V = tf.reshape(H, [-1, 1])
# median distance
h = self.get_median(V)
h = tf.sqrt(
0.5 * h / tf.math.log(tf.cast(tf.shape(X0)[0], self.dtype) + 1.0))
# compute the rbf kernel
Kxy = tf.exp(-H / h ** 2 / 2.0)
dxkxy = tf.negative(tf.matmul(Kxy, X0))
sumkxy = tf.expand_dims(tf.reduce_sum(Kxy, axis=1), 1)
dxkxy = tf.add(dxkxy, tf.multiply(X0, sumkxy)) / (h ** 2)
return (Kxy, dxkxy)
def gradient(self,mu):
log_p_grad = tf.TensorArray(self.dtype, size=self.num_particles)
for i in range(mu.shape[0]):
with tf.GradientTape() as t:
t.watch(mu)
f = self.joint_log_post(mu[i])
log_p_grad =log_p_grad.write(i, t.gradient(f,mu)[i])
return log_p_grad.stack()
def svgd_one_iter(self,mu):
# mu_norm = self.normalizer.encode(mu)
log_p_grad = self.gradient(mu)
kernel_matrix, kernel_gradients = self.svgd_kernel(mu)
grad_theta = (tf.matmul(kernel_matrix, log_p_grad) + kernel_gradients) / self.num_particles
# print(grad_theta)
# mu_norm = mu_norm + self.lr * grad_theta
mu = mu + self.lr * grad_theta
# mu = self.normalizer.decode(mu_norm)
# GPU = GPUInfo.gpu_usage()
# print('GPU usage: {} %, GPU Memory: {} Mb'.format(GPU[0][0],GPU[1][0]))
return mu
def run_chain_svgd(self, mu):
mu_list = []
for i in range(self.num_iter):
mu = self.svgd_one_iter(mu)
if i // 10 == 0:
print('step {}'.format(i))
mu_list.append(mu.numpy())
return mu,mu_list
| GeorgeLiang3/Hessian-paper | Geophysics/models/SVGD.py | SVGD.py | py | 2,626 | python | en | code | 0 | github-code | 13 |
2553636056 | """
Allows to add `ControllerAgent` (with unknown parameters) to the model, which enables user to
change `tau` during the `_fit` method.
`parameters` is a dict with four fields:
Fields
------
reg_name: str
The name of regularizer. We want to change the tau coefficient of it during training
Note that only one of ("reg_name", "regularizer") should be provided
regularizer: artm.regularizer.Regularizer
Regularizer object (if we want to add non-existing regularizer to the model)
Note that only one of ("reg_name", "regularizer") should be provided
score_to_track: str
The name of metric which we will track.
We assume that if that metric is 'sort of decreasing', then everything is OK
and we are allowed to change tau coefficient further; otherwise we revert back
to the last "safe" value and stop
'sort of decreasing' performs best with `PerplexityScore`,
and all scores which behave like perplexity
(nonnegative, and which should decrease when a model gets better).
If you want to track a different kind of score,
it is recommended to use `score_controller` parameter
More formal definition of "sort of decreasing":
if we divide a curve into two parts like so:
#####################################
#. . . .. . . . .. . .. . . ... . #
#%. . . . . . . .. . . . . . . ..#
#:t . . . . . . . . . . . . . . . .#
# t: . . . . . . . . . . . . . . ...#
#. %. . . . . . . . . . . . . . . .#
#. :t. . . . . . . . . . . . . . .#
#.. ;; . . . . . . . . . . . . ..#
# ..t.. . . . . . . . . . . . . .#
#. . :t .. . . . . . . . . . . . ..#
#. .. t: . . . . . . . . . . . . . .#
#. ..S: . . . . . . . . . . . . ..#
#. . . .:;: . . . . . . . . . . . .#
#. . . . :;; . . . . . . . . . . .#
#. . . . .. :%. nmmMMmmn . .#
# . . . . .tt%.ztttt"' '""ttttttt#
#. . . . . . '"' . . . . . . . . #
#####################################
| | |
| left part | |
global minimum |
| right part |
then the right part is no higher than 5% of global minimum
(you can change 5% if you like by adjusting `fraction_threshold` parameter)
If `score_to_track` is None and `score_controller` is None,
then `ControllerAgent` will never stop
(useful for e.g. decaying coefficients)
fraction_threshold: float
Threshold to control a score by 'sort of decreasing' metric
score_controller: BaseScoreController
Custom score controller
In case of 'sort of decreasing' is not proper to control score,
you are able to create custom Score Controller
inherited from `BaseScoreController`.
tau_converter: str or callable
Notably, def-style functions and lambda functions are allowed
If it is function, then it should accept four arguments:
`(initial_tau, prev_tau, cur_iter, user_value)`
For example:
>> lambda initial_tau, prev_tau, cur_iter, user_value:
>> initial_tau if cur_iter % 2 == 0 else 0
(Note that experiment description might display lambda functions incorrectly;
Try to keep them to a single line or use def-style functions instead)
>> def func(initial_tau, prev_tau, cur_iter, user_value):
>> relu_grower = user_value * (cur_iter - 8) if cur_iter > 8 else 0
>> return 0 if cur_iter % 2 else relu_grower
If it is a string, then it should be an expression consisting of numbers, operations
and variables (four are allowed: `initial_tau, prev_tau, cur_iter, user_value`)
For example:
`>> "initial_tau * ((cur_iter + 1) % 2)"`
or
`>> "prev_tau * user_value"`
user_value_grid: list of numeric
Values for user_value variable
When writing `tau_converter`, you can use user_value variable.
For example:
>> tau_converter: "prev_tau * user_value"
>> user_value_grid: [1, 0.99, 0.95, 0.90, 0.80, 0.5]
(I know that tau should decay exponentially, but I'm unsure of exact half-life)
>> tau_converter: "prev_tau + user_value"
>> user_value_grid: [50, 100, 150, 200, 250]
(I know that tau should increase linearly, but I'm unsure of exact speed)
>> def func(initial_tau, prev_tau, cur_iter, user_value):
>> new_tau = 50 * (cur_iter - user_value) if cur_iter > user_value else 0
>> return new_tau
>> tau_converter: func
>> user_value_grid: [10, 15, 20, 25, 30]
(Tau should start with zero, then increase linearly. I don't know when to start this process)
max_iter: numeric
Optional (default value is `num_iter` specified for cube)
Agent will stop changing tau after `max_iters` iterations
`max_iters` could be `float("NaN")` and `float("inf")` values:
that way agent will continue operating even outside this `RegularizationControllerCube`
""" # noqa: W291
import warnings
from copy import deepcopy
from dataclasses import dataclass
from numbers import Number
from typing import (
Callable,
List,
Optional,
Union,
)
import numexpr as ne
import numpy as np
from dill.source import getsource
from .base_cube import BaseCube
from ..models.base_regularizer import BaseRegularizer
from ..rel_toolbox_lite import count_vocab_size, handle_regularizer
W_HALT_CONTROL = "Process of dynamically changing tau was stopped at {} iteration"
W_MAX_ITERS = "Maximum number of iterations is exceeded; turning off"
@dataclass
class OutOfControlAnswer:
answer: bool
error_message: Optional[str] = None
class BaseScoreController:
def __init__(self, score_name):
self.score_name = score_name
def get_score_values(self, model):
if self.score_name not in model.scores: # case of None is handled here as well
return None
vals = model.scores[self.score_name]
if len(vals) == 0:
return None
return vals
def __call__(self, model):
values = self.get_score_values(model)
if values is None:
return False
try:
out_of_control_result = self.is_out_of_control(values)
except Exception as ex:
raise ValueError(
f"An error occurred while controlling {self.score_name}!"
f" Message: {ex}. Score values: {values}"
)
if out_of_control_result.error_message is not None:
warnings.warn(out_of_control_result.error_message)
return out_of_control_result.answer
def is_out_of_control(self, values: List[float]) -> OutOfControlAnswer:
raise NotImplementedError
class PerplexityScoreController(BaseScoreController):
"""
Controller is proper to control the Perplexity score.
For others, please ensure for yourself.
"""
DEFAULT_FRACTION_THRESHOLD = 0.05
def __init__(self, score_name, fraction_threshold=DEFAULT_FRACTION_THRESHOLD):
super().__init__(score_name)
self.fraction_threshold = fraction_threshold
def is_out_of_control(self, values: List[float]):
idxmin = np.argmin(values)
if idxmin == len(values): # score is monotonically decreasing
return False
right_maxval = max(values[idxmin:])
minval = values[idxmin]
if minval <= 0:
raise ValueError(
f'Score "{self.score_name}" has min_value = {minval} which is <= 0.'
f' This control scheme is using to control scores acting like Perplexity.'
f' Ensure you control the Perplexity score or write your own controller!'
)
answer = (right_maxval - minval) / minval > self.fraction_threshold
if answer:
return OutOfControlAnswer(
answer=answer,
error_message=(
f"Score {self.score_name} is too high!"
f" Right max value: {right_maxval}, min value: {minval}"
),
)
return OutOfControlAnswer(answer=answer)
class ControllerAgent:
"""
Allows to change `tau` during the `_fit` method.
Each `TopicModel` has a `.callbacks` attribute.
This is a list consisting of various `ControllerAgent`s.
Each agent is described by:
* reg_name: the name of regularizer having `tau` which needs to be changed
* tau_converter: function or string describing how to get new `tau` from old `tau`
* score_to_track: score name providing control of the callback execution
* fraction_threshold: threshold to control score_to_track
* score_controller: custom score controller providing control of the callback execution
* local_dict: dictionary containing values of several variables,
most notably, `user_value`
* is_working:
if True, agent will attempt to change tau until something breaks.
if False, agent will assume that something had been broken and will
revert to the last known safe value (without trying to change anything further)
See top-level docstring for details.
"""
def __init__(
self,
reg_name: str,
tau_converter: Callable or str,
max_iters: int or float,
score_to_track: Union[str, List[str], None] = None,
fraction_threshold: Union[float, List[float], None] = None,
score_controller: Union[BaseScoreController, List[BaseScoreController], None] = None,
local_dict: dict = None):
"""
Parameters
----------
reg_name
tau_converter
max_iters
Agent will stop changing tau after `max_iters` iterations,
`max_iters` could be `float("NaN")` and `float("inf")` values:
that way agent will continue operating even outside this `RegularizationControllerCube`
score_to_track
Name of score to track.
Please, use this definition to track only scores of type PerplexityScore.
In other cases we recommend you to write you own ScoreController
fraction_threshold
Uses to define threshold to control PerplexityScore
Default value is 0.05.
If `fraction_threshold` is a list, it should be of the same length, as `score_to_track`.
score_controller
Score controller or controllers.
One can use this parameter for scores other than Perplexity
(or other scores that behave like Perplexity).
This is a more flexible and customizable way to control scores.
local_dict
"""
if local_dict is None:
local_dict = dict()
self.reg_name = reg_name
self.tau_converter = tau_converter
scores_to_track = self._validate_score_to_track(score_to_track)
fraction_thresholds = self._validate_fraction_threshold(
fraction_threshold, required_length=len(scores_to_track)
)
assert len(scores_to_track) == len(fraction_thresholds)
perplexity_like_score_controllers = [
PerplexityScoreController(name, threshold)
for (name, threshold) in zip(scores_to_track, fraction_thresholds)
]
self.score_controllers = list()
self.score_controllers.extend(perplexity_like_score_controllers)
self.score_controllers.extend(
self._validate_score_controller(score_controller)
)
self.is_working = True
self.local_dict = local_dict
self.tau_history = []
self.max_iters = max_iters
@staticmethod
def _validate_score_to_track(
score_to_track: Union[str, List[str], None]) -> List[str]:
if isinstance(score_to_track, list):
return score_to_track
if score_to_track is None:
return list()
if isinstance(score_to_track, str):
return [score_to_track]
raise TypeError(f'Wrong type of `score_to_track`: "{type(score_to_track)}"!')
@staticmethod
def _validate_fraction_threshold(
fraction_threshold: Union[float, List[float], None],
required_length: int,
) -> List[float]:
if fraction_threshold is None:
return [PerplexityScoreController.DEFAULT_FRACTION_THRESHOLD] * required_length
if isinstance(fraction_threshold, Number):
return [float(fraction_threshold)] * required_length
if not isinstance(fraction_threshold, list):
raise TypeError(
f'Wrong type of `fraction_threshold`: "{type(fraction_threshold)}"!'
)
if len(fraction_threshold) != required_length:
raise ValueError(
f'Wrong length of `fraction_threshold`: {len(fraction_threshold)}!'
f' Expected the length to be equal to {required_length}.'
)
return fraction_threshold
@staticmethod
def _validate_score_controller(
score_controller: Union[BaseScoreController, List[BaseScoreController], None]
) -> List[BaseScoreController]:
if score_controller is None:
return list()
elif isinstance(score_controller, BaseScoreController):
return [score_controller]
elif (not isinstance(score_controller, list) or not all(
isinstance(score, BaseScoreController) for score in score_controller)):
raise TypeError(f'Wrong type of `score_controller`: "{type(score_controller)}"!')
else:
return score_controller
def _convert_tau(self):
""" """
if isinstance(self.tau_converter, str):
new_tau = ne.evaluate(self.tau_converter, local_dict=self.local_dict)
# numexpr returns np.ndarray (which is a scalar in our case)
new_tau = float(new_tau)
else:
new_tau = self.tau_converter(**self.local_dict)
return new_tau
def _find_safe_tau(self):
""" """
if len(self.tau_history) < 2:
warnings.warn("Reverting tau to 0")
safe_tau = 0
else:
safe_tau = self.tau_history[-2]
return safe_tau
def invoke(self, model, cur_iter):
"""
Attempts to change tau if `is_working == True`. Otherwise, keeps to the last safe value.
Parameters
----------
model : TopicModel
cur_iter : int
Note that zero means "cube just started", not "the model is brand new"
"""
current_tau = model.get_regularizer(self.reg_name).tau
self.tau_history.append(current_tau)
self.local_dict["prev_tau"] = current_tau
self.local_dict["cur_iter"] = cur_iter
if "initial_tau" not in self.local_dict:
self.local_dict["initial_tau"] = current_tau
if self.is_working and len(self.tau_history) > self.max_iters:
warnings.warn(W_MAX_ITERS)
self.is_working = False
if self.is_working:
should_stop = any(
score_controller(model) for score_controller in self.score_controllers
)
if should_stop:
warnings.warn(W_HALT_CONTROL.format(len(self.tau_history)))
self.is_working = False
model.get_regularizer(self.reg_name).tau = self._find_safe_tau()
else:
model.get_regularizer(self.reg_name).tau = self._convert_tau()
class RegularizationControllerCube(BaseCube):
def __init__(self, num_iter: int, parameters,
reg_search='grid', use_relative_coefficients: bool = True, strategy=None,
tracked_score_function=None, verbose: bool = False, separate_thread: bool = True):
"""
Initialize stage. Checks params and update internal attributes.
Parameters
----------
num_iter : int
number of iterations or method
parameters : list[dict] or dict
regularizers params
each dict should contain the following fields:
("reg_name" or "regularizer"),
"tau_converter",
"score_to_track" (optional),
"fraction_threshold" (optional),
"score_controller" (optional),
"user_value_grid"
See top-level docstring for details.
Examples:
>> {"regularizer": artm.regularizers.<...>,
>> "tau_converter": "prev_tau * user_value",
>> "score_to_track": "PerplexityScore@all",
>> "fraction_threshold": 0.1,
>> "user_value_grid": [0.5, 1, 2]}
-----------
>> {"reg_name": "decorrelator_for_ngramms",
>> "tau_converter": (
>> lambda initial_tau, prev_tau, cur_iter, user_value:
>> initial_tau * (cur_iter % 2) + user_value
>> )
>> "score_to_track": None,
>> "fraction_threshold": None,
>> "score_controller": [
>> PerplexityScoreController("PerplexityScore@all", 0.1)
>> ],
>> "user_value_grid": [0, 1]}
reg_search : str
"grid", "pair", "add" or "mul".
"pair" for elementwise grid search in the case of several regularizers
"grid" for the fullgrid search in the case of several regularizers
"add" and "mul" for the ariphmetic and geometric progression
respectively for PerplexityStrategy
(Default value = "grid")
use_relative_coefficients : bool
forces the regularizer coefficient to be in relative form
i.e. normalized over collection properties
strategy : BaseStrategy
optimization approach (Default value = None)
tracked_score_function : str ot callable
optimizable function for strategy (Default value = None)
verbose : bool
visualization flag (Default value = False)
""" # noqa: W291
super().__init__(num_iter=num_iter, action='reg_controller',
reg_search=reg_search, strategy=strategy, verbose=verbose,
tracked_score_function=tracked_score_function,
separate_thread=separate_thread)
self._relative = use_relative_coefficients
self.data_stats = None
if isinstance(parameters, dict):
parameters = [parameters]
self.raw_parameters = parameters
self._convert_parameters(parameters)
def _convert_parameters(self, all_parameters):
"""
Parameters
----------
all_parameters : list of dict
"""
for params_dict in all_parameters:
assert ("reg_name" in params_dict) != ("regularizer" in params_dict)
if "regularizer" in params_dict:
assert params_dict["regularizer"].tau is not None
self.parameters = [
{
"object": {
"reg_name": params_dict.get("reg_name", None),
"regularizer": params_dict.get("regularizer", None),
"score_to_track": params_dict.get("score_to_track", None),
"tau_converter": params_dict["tau_converter"],
"local_dict": {"user_value": None},
"max_iters": params_dict.get("max_iters", self.num_iter)
},
"field": "callback",
"values": params_dict.get('user_value_grid', [0])
}
for params_dict in all_parameters
]
def apply(self, topic_model, one_model_parameter, dictionary=None, model_id=None):
"""
Applies regularizers and controller agents to model
Parameters
----------
topic_model : TopicModel
one_model_parameter : list or tuple
dictionary : Dictionary
(Default value = None)
model_id : str
(Default value = None)
Returns
-------
TopicModel
"""
new_model = topic_model.clone(model_id)
new_model.parent_model_id = topic_model.model_id
modalities = dict()
if self._relative:
modalities = new_model.class_ids
if self.data_stats is None:
self.data_stats = count_vocab_size(dictionary, modalities)
for (agent_blueprint_template, field_name, current_user_value) in one_model_parameter:
agent_blueprint = dict(agent_blueprint_template)
if agent_blueprint.get("reg_name") is not None:
reg_name = agent_blueprint['reg_name']
if reg_name not in new_model.all_regularizers:
error_msg = (f"Regularizer {agent_blueprint['reg_name']} does not exist. "
f"Cannot be modified.")
raise ValueError(error_msg)
elif agent_blueprint.get("regularizer") is not None:
regularizer = agent_blueprint["regularizer"]
new_regularizer = deepcopy(regularizer)
if isinstance(regularizer, BaseRegularizer):
new_model.custom_regularizers[new_regularizer.name] = new_regularizer
else: # classic bigARTM regularizer, attempt to relativize it's coefficients
handle_regularizer(
self._relative,
new_model,
new_regularizer,
self.data_stats,
)
agent_blueprint["reg_name"] = new_regularizer.name
else:
raise ValueError("Either 'reg_name' or 'regularizer' should be set")
agent_blueprint['local_dict']['user_value'] = current_user_value
# ControllerAgent needs only reg_name in constructor
agent_blueprint.pop("regularizer")
agent = ControllerAgent(**agent_blueprint)
new_model.callbacks.append(agent)
return new_model
def get_jsonable_from_parameters(self):
""" """
jsonable_parameters = []
for one_model_parameters in self.raw_parameters:
one_jsonable = dict(one_model_parameters)
converter = one_model_parameters['tau_converter']
if not isinstance(converter, str):
try:
# not always works, but this is not important
one_jsonable["tau_converter"] = str(getsource(converter))
except (TypeError, OSError):
# OSError: may arise if working in Jupyter Notebook
one_jsonable["tau_converter"] = "<NOT AVAILABLE>"
jsonable_parameters.append(one_jsonable)
return jsonable_parameters
| machine-intelligence-laboratory/TopicNet | topicnet/cooking_machine/cubes/controller_cube.py | controller_cube.py | py | 23,218 | python | en | code | 138 | github-code | 13 |
27875880921 | import tensorflow as tf
import numpy as np
import NetworkSelector as NetSelect
import Inference_NetworkConfiguration as Net
import cifar10_input as DataSet
DataSetFileName = "../Dataset_CIFAR/cifar-10-batches-py/"
fileName = []
netType = []
augment = []
resultsTrain = []
resultsTest = []
# Determine network to be tested
netType_list = [1,2,3,4,5,6]
augment_list = [False,False,False,False,False,False]
for i in range(len(netType_list)):
fileName.append('../PreTrainedNetworkStructures/NetType-%d_Classical-0'%netType_list[i])
netType.append(netType_list[i])
augment.append(augment_list[i])
fileName.append('../PreTrainedNetworkStructures/NetType-%d_DeepOptimizer-0'%netType_list[i])
netType.append(netType_list[i])
augment.append(augment_list[i])
# Construct selected network
InputSize =[32,32,3]
testBatchSize = 64
dataTest,labelsTest,namesTest = DataSet.get_data_test(DataSetFileName)
dataTrain,labelsTrain,namesTrain = DataSet.get_data(DataSetFileName)
frameBufferForTest = np.zeros([testBatchSize, InputSize[0], InputSize[1], InputSize[2]],dtype=np.float32)
for k in range(len(fileName)):
tf.reset_default_graph()
if augment[k] == False:
IMAGE_SIZE = 32
else:
IMAGE_SIZE = 24
layerOutDimSize,kernelSize,strideSize,poolKernelSize,poolSize,dropoutInput,dropoutPool = NetSelect.NetworkConfig(netType[k])
# Construct Network
netOut,accuracyOp,placeHolders = Net.ConstructInferenceNetwork(InputSize,testBatchSize,layerOutDimSize,kernelSize,strideSize,poolKernelSize,poolSize,IMAGE_SIZE)
graph = tf.get_default_graph()
saver = tf.train.Saver()
with tf.Session() as sess:
print("\n\n\n")
print(fileName[k])
saver.restore(sess, fileName[k])
print('Network loaded')
print("Testing ...")
accuracyTrain = Net.GetTestAccuracy(sess,accuracyOp,dataTrain,labelsTrain,testBatchSize,frameBufferForTest,placeHolders['inputFramePlaceHolder'],placeHolders['inputDistortionPlaceholder'],placeHolders['labelPlaceHolder'],placeHolders['dropoutInputPlaceHolder'],placeHolders['dropoutPoolPlaceHolder'])
accuracyTest = Net.GetTestAccuracy(sess,accuracyOp,dataTest,labelsTest,testBatchSize,frameBufferForTest,placeHolders['inputFramePlaceHolder'],placeHolders['inputDistortionPlaceholder'],placeHolders['labelPlaceHolder'],placeHolders['dropoutInputPlaceHolder'],placeHolders['dropoutPoolPlaceHolder'])
print("Test accuracy / Train accuracy: %f / %f"%(accuracyTest,accuracyTrain))
resultsTrain.append(accuracyTrain)
resultsTest.append(accuracyTest)
print("**************** RESULTS ****************")
for i in range(len(resultsTrain)):
print(fileName[i])
print("Test accuracy / Train accuracy: %f / %f\n\n"%(resultsTest[i],resultsTrain[i]))
| DiversisAI/Deep-Optimizer-Framework | Codes/EvalNetwork.py | EvalNetwork.py | py | 2,818 | python | en | code | 2 | github-code | 13 |
271136084 | from bs4 import BeautifulSoup as bs
import unicodedata
import urllib
import csv
import sys
filepath = '../../data/joke-db.csv'
f = open(filepath, 'a')
writer = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC)
writer.writerow( ('ID', 'Joke') )
prefix = 'http://www.joke-db.com/c/all/clean/page:'
totaljokes = 0
for i in xrange(235):
url = prefix + str(i + 1) + '/'
r = urllib.urlopen(url).read()
html = bs(r, 'html.parser')
jokes = html.find_all('div', {'class': 'joke-box-upper'})
jokes = [joke.get_text() for joke in jokes]
for joke in jokes:
joke = joke.replace('\n', " ").replace("\r", " ").replace("\t", " ").replace(" ", " ").replace(" ", " ").rstrip().strip().replace(" ", " ")
joke = joke.replace(u"\u201c", '"').replace(u"\u201d", '"').replace(u"\u2019", "'").replace(u"\u2026", "...")
joke = unicodedata.normalize('NFKD', joke).encode('ascii','ignore')
totaljokes += 1
writer.writerow((totaljokes, joke))
print ("Page %d processed, total number of jokes = %d" %(i + 1, totaljokes))
f.close()
| amoudgl/short-jokes-dataset | scripts/scrapers/joke-db.com.py | joke-db.com.py | py | 1,072 | python | en | code | 259 | github-code | 13 |
31539574946 | #teams = 3
#participants_per_team = 4
#attempts_per_participant = 3
#points range from 1 to 60
attemps = 3
max_score = 60
min_score = 1
team_yellow = {
"camilo" : [],
"paula" : [],
"liz" : [],
"chloe" : []
}
team_blue = {
"ricardo" : [],
"sammy" : [],
"edilma" : [],
"x" : []
}
team_red = {
"carlos" : [],
"johana" : [],
"mariana" : [],
"maggie" : []
}
teams = [team_yellow, team_blue, team_red]
def main():
for team in teams:
for participant in team:
scores = []
for i in range(attemps):
score = int(input(f'Ingrese el puntaje del lanzamiento del jugador {participant}: '))
if score <= max_score and score >= min_score:
scores.append(score)
else:
print('Puntuacion incorrecta, ha perdido el tiro')
scores.append(0)
team[participant] = scores
for team in teams:
for participant in team:
participant_values = team[participant]
largest_number = participant_values[0]
for score in participant_values:
if score > largest_number:
largest_number = score
team[participant] = largest_number
largest_number = 0
winner = ''
for team in teams:
for participant in team:
if team[participant] > largest_number:
largest_number = team[participant]
winner = participant
print(f'El ganador es {winner} con el puntaje de {largest_number}')
if __name__ == '__main__':
main() | caoh29/epam | IT-fundamentals/recursive.py | recursive.py | py | 1,665 | python | en | code | 0 | github-code | 13 |
3384028208 | import boto3
from boto3.dynamodb.conditions import Key
from botocore.exceptions import ClientError
def update_type_table(table, crystal_type, last_sale):
try:
update_type_response = table.update_item(
Key={"crystal_type": crystal_type},
UpdateExpression="set #ls.#w=:a, #ls.#p=:b, #ls.#t=:c",
ExpressionAttributeNames={"#ls": "last_sale", "#w": "weight", "#p": "price", "#t": "timestamp",},
ExpressionAttributeValues={
":a": last_sale["weight"],
":b": last_sale["price"],
":c": last_sale["timestamp"],
},
ReturnValues="UPDATED_NEW",
)
return update_type_response
except ClientError as e:
if e.response["Error"]["Code"] == "ValidationException":
update_type_response = table.update_item(
Key={"crystal_type": crystal_type},
UpdateExpression="set last_sale = :lastSaleValue",
ExpressionAttributeValues={
":lastSaleValue": {
"weight": last_sale["weight"],
"price": last_sale["price"],
"timestamp": last_sale["timestamp"],
},
},
ReturnValues="UPDATED_NEW",
)
return update_type_response
return None
| rohanbansal12/testing | updating/util/aws_util.py | aws_util.py | py | 1,390 | python | en | code | 0 | github-code | 13 |
372118497 | import spacy
from neo4j import GraphDatabase
import argparse, sys
import dbbuilder
nlp = spacy.load('en_core_web_sm')
uri = "bolt://localhost:7687"
d = GraphDatabase.driver(uri, auth=("neo4j", "password"))
tx = d.session()
parser = argparse.ArgumentParser()
parser.add_argument('--author', help='Do the bar option')
parser.add_argument('--work', help='Foo the program')
args = parser.parse_args()
a = text = args.author.replace(' ', '')
b = text = args.work.replace(' ', '')
path = "Books/" + a + "/" + b
print(args.author)
print(args.work)
print(path)
file = open(path, 'r')
book = file.read()
dbbuilder.WrittenWork.add(tx, args.author, args.work,)
doc = nlp(book)
for sent in doc.sents:
s = dbbuilder.Sentence(tx, args.work, sent)
| bleepul/writing | DBMaker.py | DBMaker.py | py | 750 | python | en | code | 0 | github-code | 13 |
22491988491 | import requests
class YaUploader:
def __init__(self, token: str):
self.token = token
def upload(self, file_path: str):
"""Метод загружает файлы по списку file_list на яндекс диск"""
url = 'https://cloud-api.yandex.net/v1/disk/resources/upload'
headers = {'Content-Type': 'application/json', 'Authorization': 'OAuth '+self.token}
file_name = file_path[file_path.rfind('\\')+1:]
params = {'path': file_name,'overwrite': 'true'}
response = requests.get(url, headers=headers, params=params)
print(response.json())
url2 = response.json()['href']
resp = requests.put(url2, data = open(file_path, 'rb'))
if __name__ == '__main__':
path_to_file = 'D:\GitHub\HW_Requests\домашняя работа.txt'
token = ''
uploader = YaUploader(token)
result = uploader.upload(path_to_file) | julija9531/Requests-DZ2 | main.py | main.py | py | 921 | python | en | code | 0 | github-code | 13 |
70599905619 |
import numpy as np
import librosa
import torch
import numpy as np
import os
import sys
from torch._C import device
import cls_data_generator
import seldnet_model
import parameters
import torch
from IPython import embed
import matplotlib
def main(argv):
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
# use parameter set defined by user
task_id = '1' if len(argv) < 2 else argv[1]
params = parameters.get_params(task_id)
print('\nLoading the best model and predicting results on the testing split')
print('\tLoading testing dataset:')
data_gen_test = cls_data_generator.DataGenerator(
params=params, split=4, shuffle=False, is_eval=True if params['mode']=='eval' else False
)
data_in, data_out = data_gen_test.get_data_sizes()
dump_figures = True
# CHOOSE THE MODEL WHOSE OUTPUT YOU WANT TO VISUALIZE
checkpoint_name = "/home/noesis/workspace_balaji/workspace_priya/seld-dcase2022-main/models/1_1_dev_split0_accdoa_mic_gcc_model.h5"
model = seldnet_model.CRNN(data_in, data_out, params)
model.eval()
model.load_state_dict(torch.load(checkpoint_name, map_location=torch.device('cpu')))
model = model.to(device)
return model, device
# Load the trained model
# Define the classes
classes = ['Female speech women speaking', 'Male speech man speaking', 'Clapping', 'Telephone', 'Laughter', 'Domestic sounds',
'Walk foot steps', 'Door open or door close', 'Music', 'Musical instrument', 'Water tap', 'Bell', 'Knock'] # Replace with your own class names
def load_model():
checkpoint_name = "/home/noesis/workspace_balaji/workspace_priya/seld-dcase2022-main/models/1_1_dev_split0_accdoa_mic_gcc_model.h5"
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
# use parameter set defined by user
task_id = '1'
params = parameters.get_params(task_id)
data_gen_test = cls_data_generator.DataGenerator(
params=params, split=4, shuffle=False, is_eval=True if params['mode'] == 'eval' else False
)
data_in, data_out = data_gen_test.get_data_sizes()
model = seldnet_model.CRNN(data_in, data_out, params)
model.eval()
model.load_state_dict(torch.load(checkpoint_name, map_location=torch.device('cpu')))
model = model.to(device)
return model
# Function to preprocess the audio data
def preprocess_audio(audio_file):
# Load the audio file
audio, sr = librosa.load(audio_file, sr=None)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# Convert the audio to mel spectrogram
mel_spectrogram = librosa.feature.melspectrogram(audio, sr=sr)
mel_spectrogram = librosa.power_to_db(mel_spectrogram, ref=np.max)
mel_spectrogram = mel_spectrogram[np.newaxis, np.newaxis, ...]
mel_spectrogram = torch.from_numpy(mel_spectrogram).float().to(device)
return mel_spectrogram
# Function to perform sound event detection on a single audio file
def detect_sound_event(audio_file):
# Preprocess the audio data
mel_spectrogram = preprocess_audio(audio_file)
# Perform inference using the trained model
with torch.no_grad():
model= load_model()
print("mmm", mel_spectrogram)
output = model(mel_spectrogram)
predictions = torch.softmax(output, dim=1).cpu().numpy()
predicted_class = np.argmax(predictions)
predicted_prob = np.max(predictions)
# Get the predicted class label
predicted_label = classes[predicted_class]
return predicted_label, predicted_prob
# Example usage
audio_file = '/home/noesis/workspace_balaji/workspace_priya/seld-dcase2022-main/dataset/drums.wav' # Replace with your own audio file
predicted_label, predicted_prob = detect_sound_event(audio_file)
# Print the predicted class label and probability
print(f'Predicted Label: {predicted_label}')
print(f'Predicted Probability: {predicted_prob}')
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e) | balajiiitg/AED_mon0channel | inference_code.py | inference_code.py | py | 4,263 | python | en | code | 0 | github-code | 13 |
17042931004 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayMarketingVoucherdetailListQueryModel(object):
def __init__(self):
self._open_id = None
self._page_num = None
self._page_size = None
self._template_id = None
self._user_id = None
@property
def open_id(self):
return self._open_id
@open_id.setter
def open_id(self, value):
self._open_id = value
@property
def page_num(self):
return self._page_num
@page_num.setter
def page_num(self, value):
self._page_num = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def template_id(self):
return self._template_id
@template_id.setter
def template_id(self, value):
self._template_id = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.open_id:
if hasattr(self.open_id, 'to_alipay_dict'):
params['open_id'] = self.open_id.to_alipay_dict()
else:
params['open_id'] = self.open_id
if self.page_num:
if hasattr(self.page_num, 'to_alipay_dict'):
params['page_num'] = self.page_num.to_alipay_dict()
else:
params['page_num'] = self.page_num
if self.page_size:
if hasattr(self.page_size, 'to_alipay_dict'):
params['page_size'] = self.page_size.to_alipay_dict()
else:
params['page_size'] = self.page_size
if self.template_id:
if hasattr(self.template_id, 'to_alipay_dict'):
params['template_id'] = self.template_id.to_alipay_dict()
else:
params['template_id'] = self.template_id
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMarketingVoucherdetailListQueryModel()
if 'open_id' in d:
o.open_id = d['open_id']
if 'page_num' in d:
o.page_num = d['page_num']
if 'page_size' in d:
o.page_size = d['page_size']
if 'template_id' in d:
o.template_id = d['template_id']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayMarketingVoucherdetailListQueryModel.py | AlipayMarketingVoucherdetailListQueryModel.py | py | 2,817 | python | en | code | 241 | github-code | 13 |
31727786074 | import os
from django.http import Http404, FileResponse, JsonResponse
from mini_backend import settings
import utils.response
def image(request):
if request.method == 'GET':
md5 = request.GET.get('md5')
img_file = os.path.join(settings.IMAGE_DIR, md5 + '.jpeg')
if not os.path.exists(img_file):
return Http404()
else:
data = open(img_file, 'rb').read()
print('return image: ' + img_file)
return FileResponse(open(img_file, 'rb'), content_type='image/jpeg')
def image_text(request):
if request.method == 'GET':
md5 = request.GET.get('md5')
img_file = os.path.join(settings.IMAGE_DIR, md5 + '.jpeg')
if not os.path.exists(img_file):
return utils.response.wrap_json_response(code=utils.response.ReturnCode.RESOURCES_NOT_EXISTS)
else:
response_data = {'name': md5 + '.jpeg', 'url': '/image?md5=%s' % (md5)}
response = utils.response.wrap_json_response(data=response_data)
return JsonResponse(data=response, safe=False)
| gsy13213009/mini_backend | apis/views/image.py | image.py | py | 1,089 | python | en | code | 0 | github-code | 13 |
20829738149 | import unittest
from models.abc import db
from server import server
from repositories import ChannelRepository
from util import test_client
class TestCors(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client = test_client(server)
def setUp(self):
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def testCors(self):
ChannelRepository.create(
slug='a-channel',
title='test channel',
path='/dummy/path'
)
response = self.client.get('/api/channel/a-channel')
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.headers.get('Access-Control-Allow-Origin'), '*')
| fibasile/ticket-gateway | test/test_cors.py | test_cors.py | py | 755 | python | en | code | 0 | github-code | 13 |
36569872151 | from datetime import datetime
from conta import Conta
class Transferidor:
def __init__(self, de_conta, para_conta, valor):
self.de_conta = de_conta
self.para_conta = para_conta
self.valor = valor
self.data_transacao = datetime.now()
| ltakuno/arquivos | python/cleanArch/ex01/app/entities/transferidor.py | transferidor.py | py | 273 | python | pt | code | 0 | github-code | 13 |
42999198219 | import copy
from geometry_msgs.msg import Pose, PoseArray, PoseStamped
import numpy as np
import tf
def compute_eef_offset(frame_1, frame_2):
"""
Compute the offset (transfromation matrix) from frame_1 to frame_2.
Adapted from:
https://answers.ros.org/question/229329/what-is-the-right-way-to-inverse-a-transform-in-python/
"""
frames = [frame_1, frame_2]
tf_frames = []
# Get the transformation matrix of each frame from the fixed reference frame
for frame in frames:
# If in a list found using LookupTransfrorm, use tf.transfrom
if (type(frame)==list):
trans = frame[0]
rot = frame[1]
trans_matrix = tf.transformations.translation_matrix(trans)
rot_matrix = tf.transformations.quaternion_matrix(rot)
tf_frames.append(tf.transformations.concatenate_matrices(trans_matrix, rot_matrix))
# Use transfromations functions to get the frame info if frame is a pose
elif (type(frame)==Pose) or (type(frame)==PoseStamped):
tf_frames.append(pose_to_frame_matrix(frame))
else:
rospy.logerr("Frame {} is not an allowed type".format(len(tf_frames)+1)) #change
return "error"
# Invert the first frame matrix
matrix_1_inverse = tf.transformations.inverse_matrix(tf_frames[0])
# Get the static transformation matrix from lead to follow frame
offset = np.dot(matrix_1_inverse, tf_frames[1])
return offset
def adapt_arm_poses(object_poses, offset):
"""
Create an array of poses for the arm based on the object pose array,
using the predetermined offset.
"""
# Setup resultant PoseArray
poses = PoseArray()
poses.header = object_poses.header
# Setup loop
traj_wp_num = len(object_poses.poses)
pose_list = [None]*traj_wp_num
# Determine eef pose for each object pose
for i in range(0,traj_wp_num):
# Get object transform matrix at the time step
object_matrix = pose_to_frame_matrix(object_poses.poses[i])
# Determine the eef pose at the time step
eef_matrix = np.dot(object_matrix, offset)
pose_i = frame_matrix_to_pose(eef_matrix)
# Store results
pose_list[i] = pose_i
# Store final arrays in PoseArray's
poses.poses = pose_list
return poses
def pose_to_frame_matrix(pose_in):
"""
Convert a pose into a transformation matrix
"""
# Get translation and quaternion rotation out of the pose
if (type(pose_in)==PoseStamped):
pose_trans = pose_in.pose.position
pose_rot = pose_in.pose.orientation
elif (type(pose_in)==Pose):
pose_trans = pose_in.position
pose_rot = pose_in.orientation
# Parse them into numpy arrays
trans = np.array([pose_trans.x, pose_trans.y, pose_trans.z])
rot = np.array([pose_rot.x, pose_rot.y, pose_rot.z, pose_rot.w])
# Convert to 'frame' (transformation matrix from origin)
trans_matrix = tf.transformations.translation_matrix(trans)
rot_matrix = tf.transformations.quaternion_matrix(rot)
transformation_matrix = tf.transformations.concatenate_matrices(trans_matrix, rot_matrix)
return transformation_matrix
def frame_matrix_to_pose(frame_matrix):
"""
Convert a transfromation matrix into a pose.
"""
# Get translation of frame
trans = tf.transformations.translation_from_matrix(frame_matrix)
# Get quarternion rotation of the frame
rot = tf.transformations.quaternion_from_matrix(frame_matrix)
# Create pose from results
pose_out = Pose()
pose_out.position.x = trans[0]
pose_out.position.y = trans[1]
pose_out.position.z = trans[2]
pose_out.orientation.x = rot[0]
pose_out.orientation.y = rot[1]
pose_out.orientation.z = rot[2]
pose_out.orientation.w = rot[3]
return pose_out
| MatthewVerbryke/rse_dam | deliberative/tactics/adapter.py | adapter.py | py | 4,091 | python | en | code | 0 | github-code | 13 |
6080661748 | tree = {}
vc = [[set()]*3 for i in range(1005000)]
with open('vertex_1m.txt', 'r') as file:
for line in file:
line_new = line.split(': ')
child = list(map(int, line_new[1].split()))
tree[int(line_new[0])] = child
YES = 1
NO = 0
MAYBE = 2
def sum_child(children, status):
return_set = set()
for child in children:
var = vertex_cover(child)
return_set = return_set.union(vc[child][status])
return return_set
def vertex_cover(node):
global tree, vc
if len(vc[node][MAYBE]) != 0:
return vc[node][MAYBE]
vc[node][YES] = {node}.union(sum_child(tree[node], MAYBE))
vc[node][NO] = sum_child(tree[node], YES)
if len(vc[node][YES]) < len(vc[node][NO]):
vc[node][MAYBE] = vc[node][YES]
else:
vc[node][MAYBE] = vc[node][NO]
return vc[node][MAYBE]
minimum = vertex_cover(0)
print('panjang nya', len(minimum))
print(minimum)
| divanyh/tugas-eksperimen-daa | TE2/vc.py | vc.py | py | 933 | python | en | code | 0 | github-code | 13 |
8588680645 | from bilevel_imaging_toolbox import cuda_solvers
from bilevel_imaging_toolbox import solvers
from bilevel_imaging_toolbox import image_utils
### Testing dual rof model
# Loading image
image = image_utils.load_image('../examples/images/Playing_Cards_3.png')
# Convert it to grayscale
image = image_utils.convert_to_grayscale(image)
# Add gaussian noise to the image
#n_image = image_utils.add_impulse_noise(image,amount=0.2)
clambda = 0.2
sigma = 1.9
tau = 0.9/sigma
#Call the solver using Chambolle-Pock
#(cp_image,cp_values) = solvers.chambolle_pock_ROF(g_image,clambda,tau,sigma,200)
# Call the solver using CUDA Chambolle-Pock
(ccp_image,ccp_values) = cuda_solvers.chambolle_pock_TVl1_CUDA(image,clambda,tau,sigma,100)
image_utils.save_image(ccp_image,'CUDA_TVl1_PC3.png')
# Plot resulting images
#image_utils.show_collection([image,g_image,fb_image,cp_image],["original","gaussian noise","denoised FB","denoised CP"])
#plot_utils.plot_collection([fb_values,cp_values],["FB","CP"])
| dvillacis/BilevelImagingToolbox | tests/test_cuda_tvl1_solvers.py | test_cuda_tvl1_solvers.py | py | 991 | python | en | code | 2 | github-code | 13 |
13103867074 | # https://school.programmers.co.kr/learn/courses/30/lessons/92343
def solution(info, edges):
N = len(info)
answer = 0
tree = [[] for _ in range(N)]
for a, b in edges:
tree[a].append(b)
def dfs(node, sheep, wolves, next_list):
nonlocal answer
if info[node] == 0:
sheep += 1
else:
wolves += 1
if sheep <= wolves: return
if sheep > answer: answer = sheep
next_list.extend(tree[node])
for i in range(len(next_list)):
dfs(next_list[i], sheep, wolves, next_list[:i] + next_list[i + 1:])
dfs(0, 0, 0, [])
return answer | olwooz/algorithm-practice | practice/2023_01/230126_Programmers_SheepAndWolves/230126_Programmers_SheepAndWolves.py | 230126_Programmers_SheepAndWolves.py | py | 703 | python | en | code | 0 | github-code | 13 |
32270821941 | from django.urls import path
from . import views
urlpatterns = [
path('', views.PostList.as_view(), name='home'),
path('profile/', views.profile_private, name='profile'),
path('update_profile/', views.update_profile, name='update_profile'),
path('category/<str:category>/', views.category_view, name='category'),
path("author/<int:pk>/", views.ProfilePublic.as_view(), name="author"),
path('add_post/', views.add_post, name='add_story'),
path('edit_post/<slug:slug>/', views.edit_post, name='edit_story'),
path('delete/<slug:slug>/', views.delete_post, name='delete'),
path('like/<slug:slug>/', views.PostLikes.as_view(), name='post_likes'),
path('dislike/<slug:slug>/', views.PostDislikes.as_view(), name='post_dislikes'),
path('<slug:slug>/', views.ViewStory.as_view(), name='view_post'),
]
| SamuelUkachukwu/storyBase | novella/urls.py | urls.py | py | 850 | python | en | code | 1 | github-code | 13 |
5705051662 | import random
from faker import Faker
from sqlalchemy.exc import IntegrityError
from app import db
from app.models import Tag, Category, Post
fake = Faker()
def fake_categories(count=10):
category = Category(cname='Default')
db.session.add(category)
for i in range(count):
category = Category(cname=fake.word())
db.session.add(category)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def fake_tags(count=10):
tag = Tag(tname='Default')
db.session.add(tag)
for i in range(count):
tag = Tag(tname=fake.word())
db.session.add(tag)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def fake_posts(count=50):
for i in range(count):
post = Post(
ptitle=fake.sentence(),
pslug=fake.word(),
body=fake.text(2000),
timestamp=fake.date_time_this_year()
)
db.session.add(post)
db.session.commit() | 0akarma/0aK | 0ak/app/fakes.py | fakes.py | py | 1,053 | python | en | code | 2 | github-code | 13 |
1286272147 | # coding=utf8
# -*- coding: utf8 -*-
# vim: set fileencoding=utf8 :
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.auth import BACKEND_SESSION_KEY, HASH_SESSION_KEY, SESSION_KEY
from django.contrib.auth.models import User
from django.contrib.sessions.backends.db import SessionStore
from django.contrib.staticfiles.testing import LiveServerTestCase
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
from django.test.utils import override_settings
from selenium.webdriver.firefox.webdriver import WebDriver
from rest_messaging.models import Message, Participant, Participation, Thread
from rest_messaging_centrifugo.utils import build_channel
from cent.core import Client
# sudo apt-get install xvfb
# sudo apt-get install xserver-xephyr
# sudo apt-get install tightvncserver
# pip install pyvirtualdisplay
from pyvirtualdisplay import Display
import os
import signal
import subprocess
import time
@override_settings(CENTRIFUGO_PORT=8802, CENTRIFUGE_ADDRESS='http://localhost:{0}/'.format(8802))
class IntegrationTests(LiveServerTestCase):
@classmethod
def setUpClass(cls):
super(IntegrationTests, cls).setUpClass()
# we do not display
cls.display = Display(visible=0, size=(1024, 768))
cls.display.start()
cls.selenium = WebDriver()
# we create a user
password = "password"
cls.user = User(username="UserForLiveTests")
cls.user.set_password(password)
cls.user.save()
# we log him in
# source http://stackoverflow.com/questions/22494583/login-with-code-when-using-liveservertestcase-with-django
# we need a session
session = SessionStore()
session[SESSION_KEY] = cls.user.id
session[BACKEND_SESSION_KEY] = settings.AUTHENTICATION_BACKENDS[0]
session[HASH_SESSION_KEY] = cls.user.get_session_auth_hash()
session.save()
# the cookie dict
cls.cookie = {
'name': settings.SESSION_COOKIE_NAME,
'value': session.session_key,
'secure': False,
'path': '/',
}
# we launch centrifugo
cls.centrifugo = subprocess.Popen(["centrifugo --config=tests/config.json --port={0}".format(getattr(settings, "CENTRIFUGO_PORT", 8802))], stdout=subprocess.PIPE,
shell=True, preexec_fn=os.setsid)
# we create participants
cls.participant1 = Participant.objects.create(id=cls.user.id)
cls.participant2 = Participant.objects.create(id=2)
cls.participant3 = Participant.objects.create(id=3)
cls.participant4 = Participant.objects.create(id=4)
# we create a fake request
cls.request = RequestFactory()
cls.request.rest_messaging_participant = cls.participant1
# and wait for it to run
time.sleep(4)
@classmethod
def tearDownClass(cls):
cls.selenium.close()
cls.selenium.quit()
cls.display.stop()
# we stop centrifugo
# sudo kill `sudo lsof -t -i:xxxx`
os.killpg(cls.centrifugo.pid, signal.SIGTERM)
super(IntegrationTests, cls).tearDownClass()
def test_integration(self):
# we hit whatever view just to set the cookie
self.selenium.get(self.live_server_url + reverse('dummy'))
self.selenium.add_cookie(self.cookie)
self.selenium.refresh()
# we create threads which call signals telling centrifugo to connect
self.thread1 = Thread.managers.get_or_create_thread(self.request, "The #1 Thread", self.participant1.id, self.participant2.id)
self.thread2 = Thread.managers.get_or_create_thread(self.request, "The #2 Thread", self.participant1.id, self.participant3.id)
# the following conversation does not include the current user, we do not want it on the screen!
self.thread_unrelated = Thread.objects.create(name="The unrelated Thread") # the conversation does not involve the current user, we do not want it on the screen!
self.participationU1 = Participation.objects.create(participant=self.participant2, thread=self.thread_unrelated)
self.participationU2 = Participation.objects.create(participant=self.participant3, thread=self.thread_unrelated)
# we load the index page which contains the logic (in javascript)
self.selenium.get(self.live_server_url + reverse('index'))
# we wait a little bit
time.sleep(4)
# we create a message
# this will trigger a publishing signal in django-rest-messaging-centrifugo
body11 = "hi #11"
body12 = "hi #12"
body21 = "hi #21"
body22 = "hi #22"
bodyU1 = "We do not want to see this! #1"
bodyU2 = "We do not want to see this! #2"
m11 = Message.objects.create(sender=self.participant1, thread=self.thread1, body=body11)
m12 = Message.objects.create(sender=self.participant2, thread=self.thread1, body=body12)
m21 = Message.objects.create(sender=self.participant3, thread=self.thread2, body=body21)
m22 = Message.objects.create(sender=self.participant1, thread=self.thread2, body=body22)
mU1 = Message.objects.create(sender=self.participant2, thread=self.thread_unrelated, body=bodyU1)
mU2 = Message.objects.create(sender=self.participant3, thread=self.thread_unrelated, body=bodyU2)
# the channels are private
# this means that Centrifugo will check the users ids to know if the user may connect
# if we query a private channel the user does not belong to, we never see the message
client = Client("{0}api/".format(getattr(settings, "CENTRIFUGE_ADDRESS")), getattr(settings, "CENTRIFUGE_SECRET"))
forbidden_message = "Message forbidden"
client.publish(
build_channel(namespace=settings.CENTRIFUGO_MESSAGE_NAMESPACE, name=self.thread_unrelated.id, user_ids=[p.id for p in self.thread_unrelated.participants.all()]),
forbidden_message
)
# we wait a little bit
time.sleep(4)
# now the messages should be displayed
m11 = self.selenium.find_element_by_id('message__{0}'.format(m11.id))
m12 = self.selenium.find_element_by_id('message__{0}'.format(m12.id))
m21 = self.selenium.find_element_by_id('message__{0}'.format(m21.id))
m22 = self.selenium.find_element_by_id('message__{0}'.format(m22.id))
self.assertTrue(body11 in m11.text)
self.assertTrue(body12 in m12.text)
self.assertTrue(body21 in m21.text)
self.assertTrue(body22 in m22.text)
# the following ensures we get the new threads created during the connection
self.thread4 = Thread.managers.get_or_create_thread(self.request, "The #4 Thread", self.participant4.id, self.participant1.id)
time.sleep(4)
message_channel_to_connect_to = 'messages:4#4,1'.format(self.thread4.id, self.thread4.participants.all()[0].id, self.thread4.participants.all()[1].id)
thread_messages = self.selenium.find_element_by_id('thread__{0}'.format(message_channel_to_connect_to))
self.assertTrue(message_channel_to_connect_to in thread_messages.text)
# we should not find the unrelated messages
self.assertRaises(Exception, self.selenium.find_element_by_id, 'message__{0}'.format(mU1.id))
self.assertRaises(Exception, self.selenium.find_element_by_id, 'message__{0}'.format(mU2.id))
self.assertRaises(Exception, self.selenium.find_element_by_id, 'message__{0}'.format(mU2.id))
self.assertEqual([], self.selenium.find_elements_by_xpath("//*[contains(text(), '{0}')]".format(forbidden_message)))
| raphaelgyory/django-rest-messaging-centrifugo | tests/test_integration.py | test_integration.py | py | 7,739 | python | en | code | 11 | github-code | 13 |
71544097299 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator, PercentFormatter
from cycler import cycler
def aaa():
oldcycler = plt.rcParams['axes.prop_cycle']
plt.rcParams['axes.facecolor'] = '#0057b8' # blue
plt.rcParams['axes.prop_cycle'] = cycler(color=['#ffd700'] +
oldcycler.by_key()['color'][1:])
# Read the data
train = pd.read_csv('origin/train.csv', parse_dates=['time'])
test = pd.read_csv('origin/test.csv', index_col='row_id', parse_dates=['time'])
# Feature Engineering
for df in [train, test]:
df['day'] = df.time.dt.day
df['weekday'] = df.time.dt.weekday
df['hour'] = df.time.dt.hour
df['minute'] = df.time.dt.minute
filter1 = train['x']=='0'
filter2 = train['y']=='0'
filter3 = train['direction'] =='0'
filter4 = train['minute']=='0'
train.where(filter1 & filter2 & filter3 & filter4)
train.to_csv('group_and_sums_1.csv', index=False)
#medians = train.groupby(['x', 'y', 'direction', 'day', 'hour', 'minute']).congestion.sum().astype(int)
#medians.to_csv('group_and_sums_1.csv', index=False)
# Compute the median congestion for every place and time of week
#sums = train.groupby(['hour', 'minute']).congestion.sum().astype(int)
#print(sums)
#sums.to_csv('weekday_group_and_sums_1.csv', index=False)
def bbb():
train = pd.read_csv('train_action_del_half.csv', parse_dates=['time'])
train['hour'] = train['time'].dt.hour
train.drop(train[train['hour'] < 12].index)
train.to_csv('del.csv')
bbb() | bashendixie/ml_toolset | tabular-playground-series-mar-2022/observer.py | observer.py | py | 1,640 | python | en | code | 9 | github-code | 13 |
25299665333 | from typing import List
"""
Given an array of integers `nums` and an integer `val`,
remove all occurrences of `val` in `nums` in-place.
Do not allocate space for another array.
"""
def test_removeElement1():
got = removeElement([3,2,2,3],3)
want = 2
assert got == want
def test_removeElement2():
got = removeElement([0,1,2,2,3,0,4,2],2)
want = 5
assert got == want
# code with one fast pointer (index) and one slow pointer (valIndex)
# this will run faster if there are more `val`s in `nums`
def removeElement(nums: List[int], val: int):
# create a variable to track the index of val
valIndex = 0
# loop through each item in the array
for index in range(len(nums)):
# if this element is not equal to val
if nums[index] != val:
# replace the element at valIndex with this element
nums[valIndex] = nums[index]
# increment valIndex
valIndex += 1
print(nums)
print("----------")
# return the length of the remaining elements
return valIndex
if __name__ == "__main__":
print(removeElement([3,2,2,3],3))
print(removeElement([0,1,2,2,3,0,4,2],2))
"""
LeetCode:
Runtime: 28ms
Memory Usage: 13.8MB
"""
# code with one pointer at end and one at beginning
# this will run faster if there are less `val`s in `nums`
def removeElement2(nums: List[int], val: int):
# set the first pointer to start at the beginning
index = 0
# set the second pointer to start at the end
n = len(nums)
# loop while the first pointer hasn't reached the second pointer
while (index < n):
# if this element is equal to val
if nums[index] == val:
# swap it with the element at the second pointer
nums[index] = nums[n-1]
# decrement the second pointer
n -= 1
else:
# increment the first pointer
index += 1
# return the second pointer to count how many non-vals we have
return n
if __name__ == "__main__":
print(removeElement2([3,2,2,3],3))
print(removeElement2([0,1,2,2,3,0,4,2],2))
"""
LeetCode:
Runtime: 43ms
Memory Usage: 13.9MB
""" | oktober/python-practice | leet-code/arrays/in-place-removeElement.py | in-place-removeElement.py | py | 1,986 | python | en | code | 0 | github-code | 13 |
16429409061 | from .imports import *
## base DATASET class
class Dataset:
id:str=''
url:str = ''
path:str = ''
cols:list = []
cols_sep:list = []
cols_rename:dict = {}
sep:str = ';'
fillna:object = ''
cols_q:list = []
filter_data:dict = {}
cols_pref:list = []
def __init__(self, path:str='', cols:list=[], **kwargs):
if path: self.path=path
if cols: self.cols=cols
for k,v in kwargs.items(): setattr(self,k,v)
def read_df(self):
assert self.path # assert path string exists
if not os.path.exists(self.path):
if self.url:
# download and save
df=pd.read_csv(self.url)
# make dir if not exists
if not os.path.exists(os.path.dirname(self.path)):
os.makedirs(os.path.dirname(self.path))
# save csv
df.to_csv(self.path, index=False)
# return loaded df
return df
else:
raise Exception('Neither URL nor path')
# read from saved file
df = pd.read_csv(self.path, on_bad_lines='warn')
return df
@cached_property
def data(self):
df=self.read_df()
return postproc_df(
df,
cols=self.cols_rename if self.cols_rename else self.cols,
cols_sep=self.cols_sep,
cols_q=self.cols_q,
sep=self.sep,
fillna=self.fillna
)
### LANDMARKS
class LandmarksDataset(Dataset):
path = PATHS.get('landmarks')
url = URLS.get('landmarks')
cols_q = ['lat', 'lon']
@cached_property
def data(self):
df=super().data
df['arrond_id'] = [
determine_arrond(lat,lon)
for lat,lon in zip(df.lat, df.lon)
]
df['tooltip'] = [
f'''{row.landmark}\n{row.address}\n{'Paris '+row.arrond_id+'ᵉ' if row.arrond_id.isdigit() else ''}'''.strip().replace('\n','<br>')
for i,row in df.iterrows()
]
return df
@cache
def Landmarks(): return LandmarksDataset()
### MEMBERS
def get_member_id(uri):
return uri.lower().split('/members/',1)[1][:-1] if '/members/' in uri else ''
class MembersDataset(Dataset):
url:str = URLS.get('members')
path:str = PATHS.get('members')
sep:str = ';'
cols:list = [
'uri',
'name',
'sort_name',
'title',
'gender',
'is_organization',
'has_card',
'birth_year',
'death_year',
'membership_years',
'viaf_url',
'wikipedia_url',
'nationalities',
# 'addresses',
# 'postal_codes',
# 'arrondissements',
# 'coordinates',
'notes',
'updated'
]
cols_sep:list = [
'nationalities',
'membership_years'
]
cols_q = [
'birth_year',
'death_year',
'membership_years'
]
cols_pref = [
'member',
'member_nicename',
]
@cached_property
def data(self):
df=super().data
df['member'] = df['uri'].apply(get_member_id)
df['nice_name'] = df['sort_name'].apply(to_name_nice)
return postproc_df(df,cols_pref=self.cols_pref)
class MiniMembersDataset(Dataset):
_cols_rename = {
'member':'member',
'nice_name':'member_name',
'title':'member_title',
'gender':'member_gender',
'membership_years':'member_membership',
'birth_year':'member_dob',
'death_year':'member_dod',
'gender':'member_gender',
'nationalities':'member_nationalities',
}
cols_pref = [
'member',
'member_name',
]
@cached_property
def data(self):
odf=postproc_df(MembersDataset().data, self._cols_rename, cols_q=['member_dob', 'member_dod'])
return odf
@cache
def Members(): return MiniMembersDataset()
### DWELLINGS
class DwellingsDataset(Dataset):
url:str = URLS.get('dwellings')
url:str = URLS.get('dwellings')
path:str = PATHS.get('dwellings')
cols:list = [
'member_uri',
# 'person_id',
# 'account_id',
# 'address_id',
# 'location_id',
'start_date',
'end_date',
# 'start_date_precision',
# 'end_date_precision',
# 'care_of_person_id',
'street_address',
'city',
'postal_code',
'latitude',
'longitude',
# 'country_id',
'arrrondissement'
]
# cols_rename:dict = {
# 'start_date':'dwelling_start_date',
# 'end_date':'dwelling_end_date',
# }
# cols_rename:dict = {
# 'start_date':'dwelling_start_date',
# 'end_date':'dwelling_end_date',
# }
cols_q = [
'latitude',
'longitude',
]
# cols_id=['member','street_address','city','postal_code','start_date','end_date']
cols_id = ['member', 'latitude', 'longitude','start_date','end_date']
cols_pref=[
'dwelling',
'member',
'arrond_id',
'street_address',
'city',
'postal_code',
'latitude',
'longitude',
'start_date',
'end_date'
]
@cached_property
def data(self):
df=super().data
df['member'] = df['member_uri'].apply(get_member_id)
df['dwelling'] = [self.sep.join(str(x) for x in l) for l in df[self.cols_id].values]
df['arrond_id']=df['arrrondissement'].apply(lambda x: '' if not x else str(int(x)))
return postproc_df(df,cols_pref=self.cols_pref)
class MiniDwellingsDataset(Dataset):
_cols_rename = dict(
member='member',
dwelling='dwelling',
arrond_id='arrond_id',
start_date='dwelling_start',
end_date='dwelling_end',
street_address='dwelling_address',
city='dwelling_city',
latitude='lat',
longitude='lon',
)
@cached_property
def data(self):
return postproc_df(DwellingsDataset().data, self._cols_rename)
@cache
def Dwellings(): return MiniDwellingsDataset()
### BOOKS
def get_book_id(uri):
return uri.split('/books/',1)[1][:-1] if '/books/' in uri else ''
class BooksDataset(Dataset):
url:str = URLS.get('books')
url:str = URLS.get('books')
path:str = PATHS.get('books')
cols:list = [
'uri',
'title',
'genre',
'author',
'editor',
'translator',
'introduction',
'illustrator',
'photographer',
'year',
'format',
'uncertain',
'ebook_url',
'volumes_issues',
'notes',
'event_count',
'borrow_count',
'purchase_count',
'circulation_years',
'updated'
]
cols_sep:list = [
'author',
'editor',
'translator',
'introduction',
'illustrator',
'photographer',
'circulation_years',
'genre'
]
cols_q = ['year', 'borrow_count', 'purchase_count','circulation_years']
cols_pref = ['book','title','author','year','genre']
@cached_property
def data(self):
df=super().data
df['book']=df.uri.apply(lambda x: x.split('/books/',1)[1][:-1] if '/books/' in x else '')
return postproc_df(df, cols_pref=self.cols_pref)
class MiniBooksDataset(Dataset):
## ONLY AUTHORS
_cols_rename={
'book':'book',
'author':'author',
'author_nice_name':'author_name',
'title':'book_title',
'year':'book_year',
'genre':'book_genre',
'format':'book_format',
'circulation_years':'book_circulated',
'borrow_count':'book_numborrow',
'author_birth_date':'author_dob',
'author_death_date':'author_dod',
'author_gender':'author_gender',
'author_nationalities':'author_nationalities',
}
cols_sep = []
cols_pref = ['book','author','author_name','book_title','book_year']
@cached_property
def data(self):
dfbooks=BooksDataset().data.drop_duplicates('book')
dfau=CreatorsDataset().data.drop_duplicates('creator').set_index('creator')
ld=[]
for i,row in dfbooks.iterrows():
assert type(row.author) == list
for author in row.author if row.author else ['']:
d=dict(row)
aid=d['author']=to_name_id(author)
try:
for k,v in dict(dfau.loc[aid]).items():
d[f'author_{k}']=v
except KeyError:
pass
ld.append(d)
odf=pd.DataFrame(ld)
odf=postproc_df(odf, cols=self._cols_rename, cols_q=['author_dob','author_dod','book_year','book_circulated'], cols_pref=self.cols_pref)
odf['author_nationalities']=odf['author_nationalities'].apply(
lambda x: [] if x is np.nan else x
)
odf['author_gender']=odf['author_gender'].fillna('')
odf['author_name']=odf['author_name'].fillna('')
return odf
@cache
def Books(): return MiniBooksDataset()
### AUTHORS
class CreatorsDataset(Dataset):
url:str = URLS.get('creators')
path:str = PATHS.get('creators')
cols:list = [
# 'ID',
# 'name',
'sort name',
# 'MEP id',
# 'Account Id',
# 'birth year',
# 'death year',
# 'gender',
# 'title',
# 'profession',
# 'is organization',
# 'Is Creator',
# 'Has Account',
# 'In Logbooks',
# 'Has Card',
# 'Subscription Dates',
# 'verified',
# 'updated at',
# 'Admin Link',
'VIAF id',
# 'Key',
'Gender',
'Nationality',
# 'Language',
'Birth Date',
'Death Date',
# 'LoC Name',
# 'LoC ID',
# 'LoC Source: URL',
# 'LofC URI: URL',
# 'VIAF Source: URL',
# 'BNE Name',
# 'BNE URL',
# 'BNF Name',
# 'BNF URL',
# 'DNB Name',
# 'DNB URL',
# 'ICCU Name',
# 'ICCU URL',
# 'ISNI Name',
# 'ISNI URL',
'Wikidata URL',
# 'Wikipedia URL',
# 'WorldCat Identity URL'
]
cols_pref = ['creator', 'gender','nationality','birth_date','end_date']
cols_sep = ['Nationality']
@cached_property
def data(self):
df=super().data
df.columns = [c.lower().replace(' ','_') for c in df]
df['creator']=df['sort_name'].apply(to_name_id)
df['nice_name']=df['sort_name'].apply(to_name_nice)
odf=postproc_df(df, cols_pref=self.cols_pref)
return odf.rename(columns={'nationality':'nationalities'})
def to_name_id(x):
x=''.join(y for y in x if y.isalpha() or y==' ')
return x.strip().replace(' ','-').lower()
def to_name_nice(x):
if not ',' in x: return x
a,b=x.split(',', 1)
ln=a.strip()
fn=b.split()[0].strip()
return f'{fn} {ln}'
### EVENTS
class EventsDataset(Dataset):
url:str = URLS.get('events')
url:str = URLS.get('events')
path:str = PATHS.get('events')
cols:list = [
'event_type',
'start_date',
'end_date',
'member_uris',
'member_names',
'member_sort_names',
'subscription_price_paid',
'subscription_deposit',
'subscription_duration',
'subscription_duration_days',
'subscription_volumes',
'subscription_category',
'subscription_purchase_date',
'reimbursement_refund',
'borrow_status',
'borrow_duration_days',
'purchase_price',
'currency',
'item_uri',
'item_title',
'item_volume',
'item_authors',
'item_year',
'item_notes',
'source_type',
'source_citation',
'source_manifest',
'source_image'
]
cols_sep:list = [
'member_uris',
'member_names',
'member_sort_names'
]
cols_pref = ['event']
@cached_property
def data(self):
df=super().data
df['event']=[f'E{i+1:05}' for i in range(len(df))]
df['start_year'] = pd.to_numeric([estr[:4] for estr in df['start_date'].apply(str)], errors='coerce')
df['start_month'] = pd.to_numeric([
x[5:7] if len(x)>=7 and x[:4].isdigit() and x[4]=='-' else None
for x in df['start_date'].apply(str)
], errors='coerce')
return postproc_df(df,cols_pref=self.cols_pref)
class MiniEventsDataset(Dataset):
only_borrows = True
@cached_property
def data(self):
# disaggregate EventsDataset by member_uris and get books too
ld=[]
for i,row in EventsDataset().data.iterrows():
if self.only_borrows and row.event_type!='Borrow': continue
for i,member_uri in enumerate(row['member_uris']):
d={
'event':row.event,
'event_type':row.event_type,
'member':get_member_id(member_uri),
'book':get_book_id(row.item_uri),
'event_start':row.start_date,
'event_end':row.end_date,
'event_year':str(ifnanintstr(row.start_year,'')),
'event_month':str(ifnanintstr(row.start_month,'')),
}
ld.append(d)
dfevents = pd.DataFrame(ld)
return dfevents
@cache
def Events(): return MiniEventsDataset()
### COMBINED
class CombinedDataset(Dataset):
path=PATHS.get('combinedmini')
url=URLS.get('combinedmini')
_cols_sep = [
'member_membership',
'member_nationalities',
'book_genre',
'book_circulated',
'author_nationalities'
]
_cols_q = ['member_membership','member_dob','member_dod','lat','lon','book_year','author_dob','author_dod','event_year','event_month']
_cols_sep_nonan=['member_membership']
_cols_pref=['member','event','dwelling','arrond_id','book','author']
def gen(self, save=True, progress=True, frac=None):
dfmembers = Members().data
dfbooks = Books().data
dfevents = Events().data
dfdwellings = Dwellings().data
## merge!
bookevents = dfevents.merge(dfbooks, on='book', how='outer')
memberdwellings = dfmembers.merge(dfdwellings, on='member', how='outer')
odf = memberdwellings.merge(bookevents, on='member', how='outer').fillna('')
# drop dups
odf = odf.drop_duplicates(['member','event','book','dwelling'])
## clean up
for c in self._cols_sep:
odf[c]=[[] if x is '' else x for x in odf[c]]
for c in self._cols_q:
odf[c]=[np.nan if x is '' else x for x in odf[c]]
for c in self._cols_sep_nonan:
odf[c]=[[y for y in x if not np.isnan(y) and y] for x in odf[c]]
if frac is not None: odf=odf.sample(frac=frac)
odf['hover_tooltip'] = odf.apply(hover_tooltip,axis=1)
odf = prune_when_dwelling_matches(odf, progress=progress)
odf = postproc_df(
odf,
cols_sep=self._cols_sep,
cols_q=self._cols_q,
)
if save: odf.to_pickle(self.path)
return odf
@cached_property
def data(self):
# need to gen?
if not os.path.exists(self.path):
if self.url:
with Logwatch(f'downloading combined dataset from: {self.url}'):
df=pd.read_pickle(self.url)
else:
with Logwatch('generating combined dataset'):
df=self.gen(save=False)
# save
ensure_dir(self.path)
with Logwatch(f'saving combined dataset to: {self.path}'):
df.to_pickle(self.path)
else:
df=pd.read_pickle(self.path)
return postproc_df(df, cols_pref=self._cols_pref)
@cache
def Combined():
logger.debug('Combined()')
return CombinedDataset()
### OTHER
def is_valid_arrond(x):
return bool(str(x).isdigit()) and bool(x!='99')
@cache
def get_geojson_arrondissement(force=False):
import os,json,requests
# download if nec
url=URLS.get('geojson_arrond')
fn=os.path.join(PATH_DATA,'arrondissements.geojson')
if force or not os.path.exists(fn):
data = requests.get(url)
with open(fn,'wb') as of:
of.write(data.content)
# load
with open(fn) as f:
jsond=json.load(f)
# anno
for d in jsond['features']:
d['id'] = str(d['properties']['c_ar'])
d['properties']['arrond_id'] = d['id']
return jsond
def hover_tooltip(row):
mrange_start = f"{int(min(row.member_membership)) if row.member_membership else ''}"
mrange_end = f"{int(max(row.member_membership)) if row.member_membership else ''}"
mrange=f'{mrange_start} – {mrange_end}' if mrange_start!=mrange_end else mrange_start
return f"""
<a href="https://shakespeareandco.princeton.edu/members/{row.member}/" target="_blank"><b>{row.member_name}</b> ({ifnanintstr(row.member_dob)} – {ifnanintstr(row.member_dod)})</a>
{row.dwelling_address}
{row.dwelling_city} {row.arrond_id}{"ᵉ" if row.arrond_id!="1" else "ᵉʳ"}
Member: {mrange}
""".strip().replace('\n','<br>')
def determine_arrond(lat, lon, default='NA'):
# use shapely code
import shapely.geometry as geo
# get geojson of Paris arrondissement
geojson = get_geojson_arrondissement()
# encode incoming point
point = geo.Point(lon, lat)
# loop through features in geojson and find intersection
for feature in geojson['features']:
polygon = geo.shape(feature['geometry'])
if polygon.contains(point):
return feature['properties']['arrond_id']
# if none found, return default
return default
def prune_when_dwelling_matches(df, progress=True):
"""Prunes the dataframe based on dwelling matches for events.
Args:
df (pandas.DataFrame): The input dataframe containing event and dwelling information.
Returns:
pandas.DataFrame: The pruned dataframe with only the rows that have valid dwelling matches.
"""
df_nonevent, df_event=df[df.event==''],df[df.event!='']
# anyone's ok if they're not an event
numpossd = {ii:np.nan for ii in df.index}
matchtyped={
**{ii:'NA' for ii in df_nonevent.index},
**{ii:'?' for ii in df_event.index}
}
matchfoundd={ii:None for ii in df.index}
excludedd={ii:False for ii in df.index}
def declare_impossible(xdf):
# but declare them impossible to match to a dwelling
for ii in xdf.index:
matchtyped[ii]='Impossible'
numpossd[ii]=0
matchfoundd[ii]=False
excludedd[ii]=True
def declare_exact_match(xdf):
for ii in xdf.index:
matchtyped[ii]='Exact'
matchfoundd[ii]=True
numpossd[ii]=len(xdf)
def declare_exact_miss(xdf):
logger.trace(f'for event {e}, a certain match was found, with {len(xdf)} possibilities')
for ii in xdf.index:
matchtyped[ii]='Exact (excl.)'
excludedd[ii]=True
matchfoundd[ii]=False
def declare_ambiguous(xdf, caveats=[]):
for ii in xdf.index:
probtype = 'Colens' if not len(xdf[xdf.dwelling_start!='']) else 'Raphael'
mt=f'Ambiguous ({probtype})' if len(xdf)>1 else 'Singular'
# if caveats: mt+=f' ({", ".join(caveats)})'
matchtyped[ii]=mt
matchfoundd[ii]=True
numpossd[ii]=len(xdf)
def find_exact_matches(xdf):
erow=xdf.iloc[0]
e1,e2=erow.event_start,erow.event_end
match = xdf[[
(is_fuzzy_date_seq(d1,e1,d2) or is_fuzzy_date_seq(d1,e2,d2))
for (d1,d2) in zip(xdf.dwelling_start, xdf.dwelling_end)
]]
logger.trace(f'found {len(match)} exact matches for {(e1, e2)} with options {list(zip(xdf.dwelling_start, xdf.dwelling_end))}')
return match
def declare_heuristic_miss(xdf, htype=''):
for ii in xdf.index:
matchtyped[ii]=f'Heuristic (excl.{" by "+htype if htype else ""})'
matchfoundd[ii]=False
excludedd[ii]=True
# for every event...
for e,edf in tqdm(df_event.groupby('event'), total=df_event.event.nunique(), desc='Locating events',disable=not progress):
logger.trace(f'event: {e}, with {len(edf)} dwelling possibilities')
## if there are no dwellings at all...
nadf=edf[edf.dwelling=='']
declare_impossible(nadf)
edf=edf[edf.dwelling!=''].drop_duplicates('dwelling')
if not len(edf):
logger.trace(f'for event {e}, no dwelling possibilities because empty dwellings')
continue
# if certainty is possible, i.e. we have dwelling records with start and end dates...
edf_certain = edf.query('dwelling_start!="" & dwelling_end!=""')
if len(edf_certain):
logger.trace(f'for event {e}, certainty is possible, with {len(edf_certain)} possibilities')
# is there a match? a point where start or end of event is within range of dwelling start or end?
edf_match = find_exact_matches(edf_certain)
# if so, then add indices only for the match, not the other rows
if len(edf_match):
logger.trace(f'for event {e}, a certain match was found, with {len(edf_match)} possibilities')
declare_exact_match(edf_match)
declare_exact_miss(edf[~edf.index.isin(edf_match.index)])
continue
# try dispreferred caveats
caveats=[]
edf0 = edf
edf = edf[~edf.dwelling_address.isin(DISPREFERRED_ADDRESSES)]
if not len(edf):
logger.trace(f'for event {e}, only a dispreferred address remained; allowing')
edf=edf0
elif len(edf)!=len(edf0):
declare_heuristic_miss(edf0[~edf0.index.isin(edf.index)], htype='dispref')
caveats.append('-dispref')
# try distance caveat
edf0 = edf
edf = edf[[get_dist_from_SCO(lat,lon)<50 for lat,lon in zip(edf.lat, edf.lon)]]
if not len(edf):
logger.trace(f'for event {e}, only non-Parisian places remaining; allowing')
edf=edf0
elif len(edf)!=len(edf0):
declare_heuristic_miss(edf0[~edf0.index.isin(edf.index)], htype='distance')
caveats.append('-distance')
# otherwise, declare ambiguous?
logger.trace(f'for event {e}, still no matches found. using all {len(edf)} possible indices')
declare_ambiguous(edf,caveats=caveats)
# add to dataframe a few stats on the dwelling matches
df['dwelling_matchfound'] = matchfoundd
df['dwelling_matchtype'] = matchtyped
df['dwelling_numposs'] = numpossd
df['dwelling_excluded'] = excludedd
df['dwelling_likelihood'] = 1/df['dwelling_numposs']
# return only ok rows
return df.loc[~df.dwelling_excluded] | Princeton-CDH/geotaste | geotaste/datasets.py | datasets.py | py | 23,315 | python | en | code | 0 | github-code | 13 |
36590964440 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 1 19:53:08 2020
@author: michael
"""
import os, sys
import numpy as np
import matplotlib.pyplot as plt
import csv
def numtoClass(input_file, output_file):
with open((output_file),'w',newline='') as csvfile:
emgwriter=csv.writer(csvfile, delimiter=',',quotechar='"',quoting=csv.QUOTE_MINIMAL)
csv_data = np.genfromtxt(input_file, delimiter=',')
dataset = csv_data[1:,0:-1]
labels = csv_data[1:,-1]
with open(input_file) as readheads:
headers=csv.DictReader(readheads).fieldnames
#headers = csv_data.dtype.names
#Class=[]
index=0
headers=headers[:-1]
headers.append('Class')
emgwriter.writerow(tuple(list(headers)))
for x in labels:
datarow=dataset[index]
writerow=list(datarow)
if x==0:
#Class.append('Open') # or sometimes Splay
"""commenting out this unused approach.
will look again later as it might actually be better"""
writerow.append('Open')
elif x==1:
#Class.append('Neutral')
writerow.append('Neutral')
elif x==2:
#Class.append('Close') # or sometimes Clench
writerow.append('Close')
elif x==3:
#Class.append('Scissors') # or sometimes Peace
writerow.append('Scissors')
elif x==4:
#Class.append('ThumbsUp')
writerow.append('ThumbsUp')
elif x==5:
#Class.append('Paper') # or sometimes FlatOpen
writerow.append('Paper')
elif x==6:
#Class.append('IdxPoint')
writerow.append('IdxPoint')
elif x==7:
#Class.append('FlatClose')
writerow.append('FlatClose')
elif x==8:
#Class.append('FingerGun')
writerow.append('FingerGun')
elif x==9:
#Class.append('waveout') # or sometimes openout
writerow.append('waveout')
elif x==10:
#Class.append('wavein') # or sometimes openin
writerow.append('wavein')
elif x==11:
#Class.append('closeout')
writerow.append('closeout')
elif x==12:
#Class.append('closein')
writerow.append('closein')
else:
#Class.append('Undefined')
writerow.append('Undefined')
writerow=tuple(writerow)
emgwriter.writerow(writerow)
index=index+1
print('conversion success')
#emgwrite=list(emg)
#emgwrite.append(int(round(time.time() * 1000)))
#emgwrite=tuple(emgwrite)
#emgwriter.writerow(emgwrite)
if __name__ == '__main__':
if len(sys.argv) <3:
print ('arg1: input dir\narg2: output file')
sys.exit(-1)
input_file = sys.argv[1]
output_file = sys.argv[2]
numtoClass(input_file, output_file) | mgpritchard/labelstoClass | labelstoClassEMG.py | labelstoClassEMG.py | py | 3,145 | python | en | code | 0 | github-code | 13 |
43062695754 | import requests
from urllib.parse import quote
import _thread
import time
import threading
import sys
threadLock = threading.Lock()
def oprint(*args):
log = '[' + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ']\t' + ' '.join([str(x) for x in args])
print(log)
def thread_send_log(url, content, name):
threadLock.acquire()
content = quote(content, 'utf-8')
name = quote(name, 'utf-8')
url = url + '&name=' + name + '&content=' + content
# print('sendLog:' + url)
try:
# print("----------------sendLog...----------------")
requests.get(url, timeout=5)
# print('\nsendLog finish', r.status_code, r.content)
# print('sendLog finish')
except Exception as e:
print('\nsendLog network error!')
finally:
# print("----------------sendLog...----------------")
threadLock.release()
class StdLog(object):
def __init__(self, filename='default.log', common_path='warning_log', stream=sys.stdout):
self.terminal = stream
self.log = open(filename, 'a')
self.common_log = None
self.common_path = common_path
def write(self, message):
message = str(message)
if message.count('[TemporaryTag]') == 0:
if message.count('[Common]') != 0 or message.count('[Warning]') != 0 \
or message.count('[Error]') != 0 or message.count('[OnlyFile]') != 0:
if self.common_log is None:
self.common_log = open(self.common_path, 'a')
self.common_log.write(message.replace('[Common]', '').replace('[OnlyFile]', ''))
self.common_log.flush()
else:
self.log.write(message)
self.log.flush()
else:
message = message.replace('[TemporaryTag]', '')
if message.count('[OnlyFile]') == 0:
self.terminal.write(message)
def flush(self):
self.terminal.flush()
self.log.flush()
if self.common_log is not None:
self.common_log.flush()
class LogClass:
def __init__(self, on=False, url=None):
self.on = on
self.url = url
def send_log(self, content, name):
if self.on:
try:
_thread.start_new_thread(thread_send_log, (self.url, content, name))
except Exception as e:
print("Cloud Log Error")
| songzijiang/jacksung | jacksung/utils/log.py | log.py | py | 2,419 | python | en | code | 1 | github-code | 13 |
30883131556 | def find_parent():
return 0
def solution(commands):
answer = []
gr = [[[] for i in range(51)] for j in range(51)]
mer = [[[0] for i in range(51)] for j in range(51)]
mer_num = [[] for j in range(51)]
num=1
voc = dict() # voca는 일반 딕셔너리
for c in commands:
temp = c.split(" ")
cc = temp[0]
if cc=="UPDATE":
if len(temp)==4:
x,y = map(int,temp[1:3])
else:
print(temp[1])
elif cc=="MERGE":
sx,sy,ex,ey = map(int,temp[1:])
le = 0
if mer[sx][sy]==0 and mer[ex][ey]==0:
mer[sx][sy]=num
mer[ex][ey]=num
mer_num.append([sx,sy])
mer_num.append([ex,ey])
num+=1
elif mer[sx][sy]!=0 and mer[ex][ey]!=0:
t= mer[sx][sy]
for i in range(le):
x,y = mer_num[t][i]
mer[x][y]=t
mer_num[t].append([x,y])
else:
t = mer[sx][sy] if mer[sx][sy] else mer[ex][ey]
le = len(mer_num[t])
for i in range(le):
x,y = mer_num[t][i]
mer[x][y]=t
mer_num[t].append([x,y])
if gr[sx][sy]!=[]:
gr[ex][ey]=gr[sx][sy]
elif gr[ex][ey]!=[]:
gr[sx][sy]=gr[ex][ey]
elif cc=="PRINT":
answer.append(gr[int(temp[1])][int(temp[2])])
else:
x,y = map(int,temp[1:])
# print(gr[1][3])
# print(gr[1][4])
# print(voc)
return answer
print(solution(["UPDATE 1 1 menu", "UPDATE 1 2 category",
"UPDATE 2 1 bibimbap", "UPDATE 2 2 korean",
"UPDATE 2 3 rice", "UPDATE 3 1 ramyeon",
"UPDATE 3 2 korean", "UPDATE 3 3 noodle",
"UPDATE 3 4 instant", "UPDATE 4 1 pasta",
"UPDATE 4 2 italian", "UPDATE 4 3 noodle",
"MERGE 1 2 1 3", "MERGE 1 3 1 4",
"UPDATE korean hansik", "UPDATE 1 3 group",
"UNMERGE 1 4",
"PRINT 1 3", "PRINT 1 4"]
)) | weeeeey/programmers | 표 병합.py | 표 병합.py | py | 2,338 | python | en | code | 0 | github-code | 13 |
45739942374 | # PYTHON configuration file for class: OffsetAnalysis
# Author: C. Harrington
# Date: 19 - January - 2015
import FWCore.ParameterSet.Config as cms
process = cms.Process("Ana")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
readFiles = cms.untracked.vstring()
process.source = cms.Source ("PoolSource", fileNames = readFiles)
readFiles.extend( [
'/store/mc/RunIIFall15DR76/SingleNeutrino/AODSIM/PU25nsData2015v1_76X_mcRun2_asymptotic_v12-v1/00000/004252D9-C094-E511-928F-AC162DA8C2B0.root'
# '/store/data/Run2015D/ZeroBias1/AOD/16Dec2015-v1/10000/180AD4E6-D0B0-E511-B655-0CC47A4D7662.root'
] );
isMC = cms.bool(True)
OutputName = "_MC"
if isMC == False:
process.load( "Configuration.Geometry.GeometryIdeal_cff" )
process.load( "Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff" )
process.load( "Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff" )
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag( process.GlobalTag, '76X_dataRun2_v15' )
# ZeroBias Trigger
process.HLTZeroBias =cms.EDFilter("HLTHighLevel",
TriggerResultsTag = cms.InputTag("TriggerResults","","HLT"),
HLTPaths = cms.vstring('HLT_ZeroBias_*'),
eventSetupPathsKey = cms.string(''),
andOr = cms.bool(True), #----- True = OR, False = AND between the HLTPaths
throw = cms.bool(False)
)
#Beam Halo
process.load('RecoMET.METFilters.CSCTightHaloFilter_cfi')
#PV Filter
#process.primaryVertexFilter = cms.EDFilter( "GoodVertexFilter",
# vertexCollection = cms.InputTag('offlinePrimaryVertices'),
# minimumNDOF = cms.uint32(4),
# maxAbsZ = cms.double(24),
# maxd0 = cms.double(2) )
#HCAL HBHE
process.load('CommonTools.RecoAlgos.HBHENoiseFilterResultProducer_cfi')
process.HBHENoiseFilterResultProducer.minZeros = cms.int32(99999)
process.ApplyBaselineHBHENoiseFilter = cms.EDFilter('BooleanFlagFilter',
# inputLabel = cms.InputTag('HBHENoiseFilterResultProducer','HBHENoiseFilterResult'), # this is for the 50ns
# inputLabel = cms.InputTag('HBHENoiseFilterResultProducer','HBHENoiseFilterResultRun2Loose'), # this is for the 25ns
inputLabel = cms.InputTag('HBHENoiseFilterResultProducer','HBHENoiseFilterResultRun2Tight'), # this is for the 25ns
reverseDecision = cms.bool(False)
)
#Bad EE Supercrystal filter
process.load('RecoMET.METFilters.eeBadScFilter_cfi')
OutputName = "_Data"
params = cms.PSet(
numSkip = cms.int32(89),
maxNPV = cms.int32(50),
maxNPU = cms.int32(50),
maxRho = cms.int32(50),
isMC = isMC,
reweight = cms.bool(False),
pvTag = cms.InputTag("offlinePrimaryVertices"),
puTag = cms.InputTag("addPileupInfo"),
pfTag = cms.InputTag("particleFlow"),
rhoTag = cms.InputTag("fixedGridRhoFastjetAll")
)
process.pfR4 = cms.EDAnalyzer("OffsetAnalysis",
params,
RootFileName = cms.string("Offset" + OutputName + "_R4.root"),
coneDR = cms.double(0.4),
)
process.pfR5 = cms.EDAnalyzer("OffsetAnalysis",
params,
RootFileName = cms.string("Offset" + OutputName + "_R5.root"),
coneDR = cms.double(0.5),
)
process.pfR7 = cms.EDAnalyzer("OffsetAnalysis",
params,
RootFileName = cms.string("Offset" + OutputName + "_R7.root"),
coneDR = cms.double(0.7),
)
process.pfR8 = cms.EDAnalyzer("OffsetAnalysis",
params,
RootFileName = cms.string("Offset" + OutputName + "_R8.root"),
coneDR = cms.double(0.8),
)
process.myseq = cms.Sequence( process.pfR4 )
#process.pfR5 *
#process.pfR7 *
#process.pfR8 )
if isMC :
process.p = cms.Path( process.myseq )
else:
process.p = cms.Path( process.HLTZeroBias *
process.CSCTightHaloFilter *
process.HBHENoiseFilterResultProducer *
process.ApplyBaselineHBHENoiseFilter *
#process.primaryVertexFilter *
process.eeBadScFilter *
process.myseq )
| cihar29/OffsetAnalysis | run_offset.py | run_offset.py | py | 4,443 | python | en | code | 0 | github-code | 13 |
21374398503 | import unittest
from project.hardware.hardware import Hardware
from project.hardware.power_hardware import PowerHardware
from project.hardware.heavy_hardware import HeavyHardware
from project.software.light_software import LightSoftware
from project.software.express_software import ExpressSoftware
class TestHardware(unittest.TestCase):
def setUp(self):
self.hardware = Hardware("test", "test_type", 10, 100)
self.power_hardware = PowerHardware("power", 20, 50)
self.heavy_hardware = HeavyHardware("heavy", 20, 50)
self.light_software = LightSoftware("light", 10, 10)
self.express_software = ExpressSoftware("express", 10, 10)
def test_hardware(self):
self.assertEqual('test', self.hardware.name)
self.assertEqual("test_type", self.hardware.type)
self.assertEqual(10, self.hardware.capacity)
self.assertEqual(100, self.hardware.memory)
self.assertEqual([], self.hardware.software_components)
def test_heavy_hardware(self):
self.assertEqual('heavy', self.heavy_hardware.name)
self.assertEqual("Heavy", self.heavy_hardware.type)
self.assertEqual(40, self.heavy_hardware.capacity)
self.assertEqual(37, self.heavy_hardware.memory)
self.assertEqual([], self.heavy_hardware.software_components)
def test_power_hardware(self):
self.assertEqual('power', self.power_hardware.name)
self.assertEqual("Power", self.power_hardware.type)
self.assertEqual(5, self.power_hardware.capacity)
self.assertEqual(87, self.power_hardware.memory)
self.assertEqual([], self.power_hardware.software_components)
def test_light_software(self):
self.assertEqual('light', self.light_software.name)
self.assertEqual("Light", self.light_software.type)
self.assertEqual(15, self.light_software.capacity_consumption)
self.assertEqual(5, self.light_software.memory_consumption)
def test_express_software(self):
self.assertEqual('express', self.express_software.name)
self.assertEqual("Express", self.express_software.type)
self.assertEqual(10, self.express_software.capacity_consumption)
self.assertEqual(20, self.express_software.memory_consumption)
def test_install_with_extra_capacity_or_memory_should_raise_value_error(self):
light_software = LightSoftware("light", 1000, 1000)
with self.assertRaises(ValueError) as ex:
self.hardware.install(light_software)
self.assertEqual("Software cannot be installed", str(ex.exception))
def test_install_check_installed_components(self):
light_software = LightSoftware("light", 1, 10)
self.assertEqual(len(self.hardware.software_components), 0)
self.hardware.install(light_software)
self.assertEqual(len(self.hardware.software_components), 1)
def test_uninstall(self):
light_software = LightSoftware("light", 1, 10)
self.assertEqual(len(self.hardware.software_components), 0)
self.hardware.install(light_software)
self.assertEqual(len(self.hardware.software_components), 1)
self.hardware.uninstall(light_software)
self.assertEqual(len(self.hardware.software_components), 0)
def test_light_components_length(self):
count = 0
ls = LightSoftware("light1", 1, 10)
ls2 = LightSoftware("light2", 2, 20)
self.hardware.install(ls)
self.hardware.install(ls2)
for s in self.hardware.software_components:
if s.type == "Light":
count += 1
self.assertEqual(count, len(self.hardware.software_components))
def test_express_components_length(self):
count = 0
ex = ExpressSoftware("express1", 1, 10)
ex2 = ExpressSoftware("express2", 2, 20)
self.hardware.install(ex)
self.hardware.install(ex2)
for s in self.hardware.software_components:
if s.type == "Express":
count += 1
self.assertEqual(count, len(self.hardware.software_components))
def test_custom_repr(self):
ls = LightSoftware("light1", 1, 10)
ex = ExpressSoftware("express1", 1, 10)
self.hardware.install(ls)
self.hardware.install(ex)
self.assertEqual(
repr(self.hardware),
"Hardware Component - test\n"
"Express Software Components: 1\n"
"Light Software Components: 1\n"
"Memory Usage: 25 / 100\n"
"Capacity Usage: 2 / 10\n"
"Type: test_type\n"
"Software Components: light1, express1"
)
if __name__ == "__main__":
unittest.main()
# class Hardware:
# def __init__(self, name, type, capacity, memory):
# self.name = name
# self.type = type
# self.capacity = capacity
# self.memory = memory
# self.software_components = []
#
# def install(self, software):
# if self.can_install(software):
# self.software_components.append(software)
# else:
# raise Exception("Software cannot be installed")
#
# def uninstall(self, software):
# self.software_components.remove(software)
#
# def get_light_software_components_count(self):
# return len([s for s in self.software_components if s.type == "Light"])
#
# def get_express_software_components_count(self):
# return len([s for s in self.software_components if s.type == "Express"])
#
# def can_install(self, software):
# has_space = sum([s.capacity_consumption for s in self.software_components]) + software.capacity_consumption <= self.capacity
# has_memory = sum([s.memory_consumption for s in self.software_components]) + software.memory_consumption <= self.memory
# return has_memory and has_space
#
# def __repr__(self):
# result = [f"Hardware Component - {self.name}",
# f"Express Software Components: {self.get_express_software_components_count()}",
# f"Light Software Components: {self.get_light_software_components_count()}",
# f"Memory Usage: {sum([s.memory_consumption for s in self.software_components])} / {self.memory}",
# f"Capacity Usage: {sum([s.capacity_consumption for s in self.software_components])} / {self.capacity}",
# f"Type: {self.type}",
# f"Software Components: {', '.join([str(s) for s in self.software_components]) if self.software_components else 'None'}"]
#
# return "\n".join(result)
#
# class Software:
# def __init__(self, name:str, type:str, capacity_consumption:int, memory_consumption:int):
# self.name = name
# self.type = type
# self.capacity_consumption = capacity_consumption
# self.memory_consumption = memory_consumption
#
# def __repr__(self):
# return self.name
#
# from project.software.software import Software
#
#
# class ExpressSoftware(Software):
# def __init__(self, name, capacity_consumption, memory_consumption):
# super().__init__(name, "Express", int(capacity_consumption), int(memory_consumption * 2))
#
# from project.hardware.hardware import Hardware
# from project.software.express_software import ExpressSoftware
#
# class Test:
# pass
| StanShishmanov/Python-Courses | Python OOP Exercises/14. Exam/tests/test_hardware.py | test_hardware.py | py | 7,368 | python | en | code | 0 | github-code | 13 |
22409209126 | from flask import jsonify
from flask_restful import reqparse, Resource
import json
from app import session
import app.const as const
from common.utils import AlchemyEncoder
from model.movie import Movie, create_movie
from common.spider import Spider
from common.template import err_res, success_res
# RESTful API 的参数解析
parser = reqparse.RequestParser()
parser.add_argument('page_now', type=int, required=True, help="need page_now data")
parser.add_argument('page_size', type=int, required=True, help="need page_size data")
class Page_movies(Resource):
def get(self):
args = parser.parse_args()
if 'page_now' not in args:
return err_res(code=0, msg='no page_now')
if 'page_size' not in args:
return err_res(code=0, msg='no page_size')
page_now = args['page_now']
page_size = args['page_size']
if page_now * page_size > 250:
return err_res(code=0, msg='无更多电影')
cached_movies = session.query(Movie).filter(Movie.id.between((page_now - 1) * page_size + 1, page_now * page_size)).all()
if len(cached_movies):
return success_res(code=1000, data=cached_movies, msg='success')
try:
spider = Spider()
movies = spider.get_movies(const.BASE_URL)
for movie in movies:
create_movie(movie)
cached_movies = session.query(Movie).filter(Movie.id.between((page_now - 1) * page_size + 1, page_now * page_size)).all()
return success_res(code=1000, data=cached_movies, msg='success')
except:
return err_res(code=0 , msg='err') | chenwangji/douban_movie_top250_server | handler/hd_movie.py | hd_movie.py | py | 1,544 | python | en | code | 1 | github-code | 13 |
24533209100 | import sys
with open('prime_list_comma.csv', 'r') as f:
overall_list_string = f.readline().split(',')
small = 56002 # the ans is bigger than this
prime_list = [int(elem) for elem in overall_list_string if 1000000 > int(elem) > small]
prime_set = set(prime_list)
def is_triple(n):
"""
:param n: integer n
:return: True if it has three same digit
"""
d = {}
s = str(n)
result = []
for char in s:
if char in d:
d[char] += 1
else:
d[char] = 1
for k, v in d.items():
if v == 3:
result.append(int(k))
m = len(result)
if m == 0:
return False,
else:
result.sort()
return True, result
def index(l, m):
"""
:param l: list
:param m: an element in the list l
:return: index of m (could be multiple)
"""
if m not in l:
raise ValueError('second argument is not in the first argument number')
return [i for i, j in enumerate(l) if j == m]
def list_to_int(l):
"""
:param l: list with single digit integer
:return: make a integer with list
"""
for elem in l:
if not len(str(elem)) == 1:
raise ValueError('not a single digit element list')
elif not type(elem) == int:
raise ValueError('a single digit element is not integer type')
l1 = reversed(l)
return sum(10 ** i * j for i, j in enumerate(l1))
def change_digit(n, m):
"""
:param n: integer number input
:param m: a 1-digit number that is going to be changed
:return: possible numbers
"""
result = []
l = [int(elem) for elem in list(str(n))]
if m not in l:
raise ValueError('second argument is not in the first argument number')
il = index(l, m)
for j in range(10):
for ind in il:
l[ind] = j
result.append(list_to_int(l))
return result
triple_prime_list = [(prime, is_triple(prime)[1]) for prime in prime_list if is_triple(prime)[0]]
for prime in triple_prime_list:
for elem in prime[1]:
count = 0
for e in change_digit(prime[0], elem):
if e in prime_set:
count += 1
if count == 8:
print(prime[0])
sys.exit()
| hiparkgss/Project_Euler_coding_practice | problem_51.py | problem_51.py | py | 2,273 | python | en | code | 0 | github-code | 13 |
3929162085 | # import imp
from typing import final
from jmespath import search
from transformers import TopKLogitsWarper
from graphservice.neoservice import neoconnection
from sentence_transformers import SentenceTransformer, util
# import torch
import json
import pandas as pd
import numpy as np
from evaluation.calculate_metrics import MetricsCalculator
from graphservice.search import search_similar_profiles
from tqdm import tqdm
# Load sentence transformer model
# model_name='all-mpnet-base-v2'
# model = SentenceTransformer('sentence-transformers/' + model_name) #, device=device)
class Evaluator():
def __init__(self, model_name='all-mpnet-base-v2') -> None:
self.model = SentenceTransformer('sentence-transformers/' + model_name) #, device=device)
self.rel_scores = None
with open('data/extracted_resumes.json', 'r', encoding='utf-8') as file:
data: dict = json.load(file)
data = pd.DataFrame(data['experiences'])
data.reset_index(inplace=True)
data.rename(columns={'index':'ind'}, inplace=True)
exp_embeddings = np.load('data/experiences_embedddings.npy')
data['emb'] = data.apply(lambda row: exp_embeddings[row[0]], axis=1)
self.data = data
def get_source_relevance_scores(self, sourceId, targetIds):
# Get source profile duties information
tx = neoconnection.graph.auto(readonly=True)
statement = """
MATCH (n {ID:$sourceId})-[:HAS_EXPERIENCE]-(exp)
MATCH (exp)-[:HAS_DESCRIPTION]-(dut)
RETURN n.ID as ID, dut.experience_description as experience_description
"""
params = {'sourceId': sourceId}
new_node = tx.run(statement, params)
source_info = new_node.to_data_frame()
# Get target profiles duties information
tx = neoconnection.graph.auto(readonly=True)
statement = """
MATCH (n)-[:HAS_EXPERIENCE]-(exp)
MATCH (exp)-[:HAS_DESCRIPTION]-(dut)
WHERE n.ID in $targetIds
RETURN n.ID as ID, dut.experience_description as experience_description
"""
params = {'targetIds': targetIds}
# print(targetIds)
new_node = tx.run(statement, params)
res = new_node.to_data_frame()
# src_duty_embeddings = self.model.encode(source_info['experience_description'], show_progress_bar=True)
# trg_duty_embeddings = self.model.encode(res['experience_description'], show_progress_bar=True)
# print(trg_duty_embeddings.shape)
src_duty_embeddings= self.data[self.data['ID'].isin([sourceId])]['emb'].values
src_duty_embeddings = np.stack(src_duty_embeddings)
# print(src_duty_embeddings.shape)
# src_duty_embeddings = torch.from_numpy(src_duty_embeddings)
trg_duty_embeddings = self.data[self.data['ID'].isin(targetIds)]['emb'].values
# print(type(trg_duty_embeddings))
# print(trg_duty_embeddings)
# trg_duty_embeddings = torch.from_numpy(trg_duty_embeddings)
trg_duty_embeddings = np.stack(trg_duty_embeddings)
# print(trg_duty_embeddings.shape)
cosine_scores = util.pytorch_cos_sim(trg_duty_embeddings, src_duty_embeddings)
# print(cosine_scores)
max_cos_scores = cosine_scores.max(axis=1).values.tolist()
res['cos_scores'] = max_cos_scores
rel_scores = res.groupby('ID').mean()['cos_scores']
rel_scores = rel_scores.reset_index().rename(columns={'ID':'targetId', 'cos_scores':'relevance'})
multiplied_sourceId = [sourceId]*rel_scores.shape[0]
rel_scores.insert(loc=0, column='sourceId', value=multiplied_sourceId)
# self.rel_scores = rel_scores
return rel_scores
def get_sim_profiles_results(self, sourceIds, evaluation_path, test_name, sim_score_formula='formula4',
no_cross_sector=False, topk=30):
total_res = []
for sourceId in tqdm(sourceIds):
res = search_similar_profiles(**{'sourceId':sourceId, 'sim_score':sim_score_formula,
'no_cross_sector':no_cross_sector, 'topk':topk})
targetIds = [item['profile']['ID'] for item in res]
total_res.append({'source_id':sourceId, 'results':targetIds})
test_scores = self.evaluate_results(total_res, evaluation_path, test_name, topk=topk)
return test_scores
def evaluate_results(self, results_path, evaluation_path, test_name, topk=10):
"""
:params
results_path: Input should be a json file with ranking results for each query
example = [{'source_id':'<ID>', 'results': ['prof_id1','prof_id2',...],
time: <timeinseconds>}, {...},...]
"""
if type(results_path) == str:
with open(results_path, 'r', encoding='utf-8') as file:
results: dict = json.load(file)
else:
results = results_path
# print(results)
final_rels = pd.DataFrame(columns=['sourceId', 'targetId', 'relevance'])
for result in results:
sourceId = result['source_id']
targetIds = result['results']
rels = self.get_source_relevance_scores(sourceId, targetIds)
final_rels = pd.concat([final_rels, rels])
final_rels.drop_duplicates(subset=['sourceId', 'targetId'], inplace=True)
final_rels.set_index(['sourceId', 'targetId'], inplace=True)
metrics_obj = MetricsCalculator(final_rels, results, test_name)
q1 = metrics_obj.map_adcg_scores(k=topk)
q2 = metrics_obj.mean_reciprocal_rank()
test_scores = metrics_obj.save_results(evaluation_path)
return test_scores
if __name__ == '__main__':
df = pd.read_csv('data/Resume.csv')
df_shuf = df.sample(frac=1, random_state=2022).reset_index(drop=True)
df_final = pd.DataFrame()
for categ, item in df_shuf.groupby('Category'):
df_final = pd.concat([df_final, item.iloc[:10]])
frm_ids = df_final['ID'].values.tolist()
evl = Evaluator()
topk_skills = 10 # This depends on the topk number of skills extracted by the SkillExtractor
formulas = [f'formula{str(i+1)}' for i in range(4)]
cross_sector_options = [False, True]
for option in cross_sector_options:
no_cross_sector = "no_cross_sector" if option else ""
for formula in formulas:
print(formula+no_cross_sector)
test_scores = evl.get_sim_profiles_results(sourceIds=frm_ids, evaluation_path='tests/results.csv',
test_name=f'top{str(topk_skills)}_{formula}_{no_cross_sector}', sim_score_formula=formula,
no_cross_sector=option, topk=10)
| ikonstas-ds/Resume2Skill-SE | evaluation/evaluation_setup.py | evaluation_setup.py | py | 6,910 | python | en | code | 0 | github-code | 13 |
21547516045 | test_list = input("Введите числа через запятую")
if (len(test_list)) in range(1, 500):
new_list = test_list.split(",")
for i in range(0, len(new_list)):
new_list[i] = int(new_list[i])
else:
print("Объем заказов не может быть меньше 0 и большее 500")
days = int(input("Введите количество дней: "))
if days < 1 or days > 50000:
print("Количество дней должно быть больше 0 и меньше 50000")
condition = int(sum(new_list) / days)
print(f"Mинимальное условие {condition}")
| nikolayparfianovich/nikolayparfianovich | First_Task/test_one.py | test_one.py | py | 629 | python | ru | code | 0 | github-code | 13 |
30568743272 | from uuid import UUID, uuid4
from orjson import dumps, loads
from pydantic import BaseModel as PydanticBaseModel
from pydantic import Field
def orjson_dumps(v, *, default) -> str:
return dumps(v, default=default).decode()
class BaseModelAPI(PydanticBaseModel):
class Config:
json_loads = loads
json_dumps = orjson_dumps
class BaseModelID(BaseModelAPI):
id: UUID = Field(
default_factory=uuid4,
description="Entity id",
)
class Config:
title = "Basic model"
schema_extra = {
"example": {
"id": uuid4(),
},
}
| SamMeown/billing_service | app/models/_base.py | _base.py | py | 631 | python | en | code | 0 | github-code | 13 |
26522891814 | from flask import make_response
import json
from json2html import *
def response_OK(query_json, status_code):
res_json = json.dumps(query_json, indent=4)
response = make_response(res_json, status_code)
return response
def response_OK_created(query_json, status_code):
res_json = json.dumps(query_json, indent=4)
response = make_response(res_json, status_code)
response.mimetype = 'application/json'
response.headers.set("Content-Location", query_json["self"])
return response
def response_OK_json(query_json, status_code):
res_json = json.dumps(query_json, indent=4)
response = make_response(res_json, status_code)
response.mimetype = 'application/json'
response.headers.set("Content-Type", "application/json")
return response
def response_OK_html(query_json, status_code):
res_json = json.dumps(query_json, indent=4)
response = make_response(json2html.convert(json = res_json))
response.status_code = status_code
response.mimetype = 'text/html'
response.headers.set("Content-Type", "text/html")
return response
def response_303(query_json):
res_json = json.dumps(query_json, indent=4)
response = make_response(res_json, 303)
response.headers.set("Location", query_json["self"])
response.mimetype = 'application/json'
return response
def response_400_missing_truckdata():
res_json = json.dumps({
"Error":
("The request object is missing at least one of the required attributes"
". Request must have name, length, type, and public attributes"
)
}, indent=4)
response = make_response(res_json, 400)
return response
def response_400_invalid_truckdata():
res_json = json.dumps({
"Error":
("The request object has at least one invalid attribute. "
"Only alphanumeric characters are supported for name and type. "
"Only digits are supported for length. "
"Only booleans are supported for public. "
)
}, indent=4)
response = make_response(res_json, 400)
return response
def response_400_missing_loaddata():
res_json = json.dumps({
"Error":
("The request object is missing at least one of the required attributes"
". Request must have volume, item, and quantity attributes"
)
}, indent=4)
response = make_response(res_json, 400)
return response
def response_400_invalid_loaddata():
res_json = json.dumps({
"Error":
("The request object has at least one invalid attribute. "
"Only alphanumeric characters are supported for item. "
"Only digits are supported for volume and quantity."
)
}, indent=4)
response = make_response(res_json, 400)
return response
def response_400_old_data():
res_json = json.dumps({
"Error":
("PUT requests must have all required attributes and must be different"
" from the original values."
)
}, indent=4)
response = make_response(res_json, 400)
return response
def response_401_mismatch():
res_json = json.dumps({
"Error":
("Provided JWT does not match owner ID in URL"
)
}, indent=4)
response = make_response(res_json, 401)
return response
def response_403_truckname():
res_json = json.dumps({
"Error":
"The request contains a truck name that already exists"
}, indent=4)
response = make_response(res_json, 403)
return response
def response_403_truck():
res_json = json.dumps({
"Error":
"JWT is valid, but the truck is owned by someone else"
}, indent=4)
response = make_response(res_json, 403)
return response
def response_403_load():
res_json = json.dumps({
"Error":
"This load has already been assigned. You must first remove the current load from the carrier"
}, indent=4)
response = make_response(res_json, 403)
return response
def response_404_truck():
res_json = json.dumps({
"Error":
"No truck with this truck_id exists"
}, indent=4)
response = make_response(res_json, 404)
return response
def response_404_owner():
res_json = json.dumps({
"Error":
"No owner with this owner ID exists"
}, indent=4)
response = make_response(res_json, 404)
return response
def response_404_load():
res_json = json.dumps({
"Error":
"No load with this load ID exists"
}, indent=4)
response = make_response(res_json, 404)
return response
def response_404_both():
res_json = json.dumps({
"Error":
("Either no truck with this truck ID exists"
"or no load with this load ID exists"
)
}, indent=4)
response = make_response(res_json, 404)
return response
def response_404_load_on_truck():
res_json = json.dumps({
"Error":
("The specified load is not on this truck")
}, indent=4)
response = make_response(res_json, 404)
return response
def response_405():
res_json = json.dumps({
"Error":
"Request method not allowed"
}, indent=4)
response = make_response(res_json, 405)
response.headers.set("Allow", ["POST", "GET"])
return response
def response_406():
res_json = json.dumps({
"Error":
"Request Accept header has a MIME type that is not supported"
}, indent=4)
response = make_response(res_json, 406)
return response
def response_415():
res_json = json.dumps({
"Error":
"Request must be in application/json format"
}, indent=4)
response = make_response(res_json, 415)
return response
| chenste-osu/truckerapi | response.py | response.py | py | 5,719 | python | en | code | 1 | github-code | 13 |
36137465601 | """
Identity management
Scenario usage:
Logging in
def login(request):
# ... verify username/password or whatever
request.identity.set(real_id, actor_id)
Retrieving actor
def display_name(request):
if not request.identity.is_set():
return HTTPForbidden("No idea who you are?")
user = model.user.get(request.identity.id)
# .. display user or whatever
Switching identities
def su(request)
switch_to = request.params.get('switch_to')
if not model.user.can_switch_to(request.identity.real_id, switch_to):
return HTTPForbidden("No authority to switch")
request.identity.act_as(request.params.get('switch_to')
# .. display confirmation
Changed password
def set_password(request)
# .. set password etc
request.identity.changed_credentials()
Logging out (this session only)
def logout(request):
request.identity.logout()
Logging out (all sessions for this user)
def logout_all(request):
request.identity.logout_all()
"""
import logging
logging.basicConfig()
log = logging.getLogger(__file__)
log.setLevel(logging.DEBUG)
class Identity(object):
request = None
real_id = None
id = None
def __init__(self, request):
"""
Initialise identity
@param request:
@return:
"""
self.request = request
self.real_id = request.session.get('real_id', None)
self.id = request.session.get('actor_id', None)
log.debug("Identity initialised with %s (real), %s (actor)" % (self.real_id, self.id))
def set(self, real_id, actor_id):
"""
Set real and actor ID. Should be called on login
@param real_id:
@param actor_id:
@return:
"""
assert real_id, "Attempt to set identity to a null real ID"
assert actor_id, "Attempt to set identity to a null actor ID"
self.real_id = real_id
self.id = actor_id
self.request.session['real_id'] = real_id
self.request.session['actor_id'] = actor_id
self.request.session.rotate()
log.debug("Set identity as %s (real), %s (actor)" % (self.real_id, self.id))
def is_set(self):
"""
Check whether there's a valid ID
@return:
"""
return bool(self.real_id)
def act_as(self, actor_id):
"""
Change actor ID. Should be called when switching to another user ID
@param actor_id:
@return:
"""
self.id = actor_id
self.request.session['actor_id'] = actor_id
log.debug("Switched actor identity to %s" % actor_id)
def logout(self):
"""
Log out
@return:
"""
log.debug("Logging out %s" % self.real_id)
self.real_id = None
self.id = None
self.request.session.expire()
self.request.session.reset()
def logout_all(self):
"""
Log out all sessions for this (real) user
@return:
"""
log.debug("Logging out all %s" % self.real_id)
self.real_id = None
self.id = None
self.request.session.expire_match({'real_id': self.id})
self.request.session.reset()
def changed_credentials(self):
"""
Changed (actor) credentials. Should be called when the users authentication tokens are changed, ie when they
change their password or oauth token.
@return:
"""
log.debug("Changing credentials %s (real) acting as %s" % (self.real_id, self.id))
assert self.id, 'You cannot change credentials without a valid identity'
self.request.session.expire_match({'actor_id': self.id})
if self.id == self.real_id:
self.request.session.rotate()
def on_request(event):
"""
Decorate request with identity (must be called after Session)
@param event:
@return:
"""
event.request.identity = Identity(event.request) | redspider/Basis-pot | netl/lib/identity.py | identity.py | py | 3,969 | python | en | code | 1 | github-code | 13 |
30685214893 | from django.urls import path
from staff import views
urlpatterns = [
path('department/list', views.StaffRoleListView.as_view(), name='staffrole_list'),
path('department/<int:pk>/', views.StaffRoleDetailView.as_view(), name='staffrole_detail'),
path('department/create', views.StaffRoleCreateView.as_view(), name='staffrole_create'),
path('department/<int:pk>/update', views.StaffRoleUpdateView.as_view(), name='staffrole_update'),
path('department/<int:pk>/delete', views.StaffRoleDeleteView.as_view(), name='staffrole_delete'),
path('list', views.StaffListView.as_view(), name='staff_list'),
path('create', views.StaffCreateView.as_view(), name='staff_create'),
path('<slug:pk>', views.StaffDetailView.as_view(), name='staff_detail'),
path('<slug:pk>/update', views.StaffUpdateView.as_view(), name='staff_update'),
path('<slug:pk>/delete', views.StaffDeleteView.as_view(), name='staff_delete'),
] | namanhnatuli/django_hotel_management_system | staff/urls.py | urls.py | py | 940 | python | en | code | 0 | github-code | 13 |
31018613402 | import os, datetime
import subprocess, re
import socket
from colorama import Fore, Style
from time import sleep
try:
batteryH = open("/sys/class/power_supply/battery/capacity","r").read().replace("\n", '')
ip = socket.gethostbyname(socket.gethostname())
date = datetime.datetime.now().strftime("%H:%M:%S")
def get_size():
size = subprocess.check_output('du -sh /storage/emulated/0/', shell=True)
sizeH = subprocess.check_output('du -sh /data/data/com.termux/files/home/', shell=True)
file=open('.log.txt', "w")
file.write(str(size)+str(sizeH))
file.close()
get_size()
os.system('clear')
file = open('.log.txt', "r").read()
filter = file.split('/')[0]
sz = re.sub("b'|t|\\\|", '', filter)
fileH = open('.log.txt', "r").read()
filterH = file.split('/')[4]
szH = re.sub("b|n|'|t|\\\|", '', filterH)
print("""
L i n u x
""")
print(Style.BRIGHT + 'Tɪᴍᴇ: ' + Fore.GREEN + Style.BRIGHT +date + Fore.RESET)
print(Style.BRIGHT + 'Mʏ IP: ' + Fore.GREEN + ip + Fore.RESET)
print(Style.BRIGHT + 'Mᴇᴍᴏʀʏ: ' + Fore.GREEN + sz + Fore.RESET)
print(Style.BRIGHT + '$ Hᴏᴍᴇ: ' + Fore.GREEN + szH + Fore.RESET)
print(Style.BRIGHT + 'Bᴀᴛᴛᴇʀʏ: ' + Fore.GREEN + str(batteryH) +'%'+ Style.RESET_ALL + Fore.RESET+'\n\n')
except:
print('[!] Perhaps your paths are not so spaced') | Eric-M1/termux-ub | code_all.py | code_all.py | py | 1,517 | python | en | code | 0 | github-code | 13 |
19038071636 | import json
from api_handlers.polygon_api import Polygon_api
from macro import Macro_analysis
import configparser
def main():
#config = read_config()
#Polygon_api.initiate_api(config["API_Polygon"])
#------------------------------
#Macro data
Two_Prev_GDP = 2094 #GDP 2 quarters ago
Prev_GDP = 2100 #GDP 1 quarter ago
This_GDP = 2143 #GDP this quarter
Two_Prev_CPI = 0.5 #CPI 2 quarters ago
Prev_CPI = 1 #CPI 1 quarter ago
This_CPI = 2 #CPI this quarter
macro_analysis_ = Macro_analysis(Two_Prev_GDP, Prev_GDP, This_GDP, Two_Prev_CPI, Prev_CPI, This_CPI)
stage = macro_analysis_.get_market_stage()
for sector_ in stage:
codes = stage[sector_]
print(f"{sector_}: {codes}")
def update_ticker_data():
dict_ = {}
for tickers in Polygon_api.get_all_tickers():
for ticker in tickers["results"]:
tag = ticker["ticker"]
dict_[tag] = ticker
file_ = f"data_storage\\all_tickers.json"
with open(file_, "w") as file:
json.dump(dict_, file, indent=4)
def read_config() -> configparser.ConfigParser:
config = configparser.ConfigParser()
config.read("config.ini")
return config
if __name__ == "__main__":
main() | ditariab/ditari_app | __init__.py | __init__.py | py | 1,252 | python | en | code | 0 | github-code | 13 |
20322525205 | from collections import deque
with open('12-input.txt') as f:
lines = [x for x in f.read().strip().split('\n')]
#Create a grid depending on the data's length and width
G = [list(line) for line in lines]
R = len(G) #rows
C = len(G[0]) #columns
E = [[0 for _ in range(C)] for _ in range(R)]
DIR = [(-1,0),(0,1),(1,0),(0,-1)]
for r in range(R):
for c in range(C):
if G[r][c]=='S': #Start, give it the value 1 similar to it being the first letter of the alphabet
E[r][c] = 1
elif G[r][c] == 'E': #End, give it the value 26 similar to it being the last letter of the alphabet
E[r][c] = 26
else:
E[r][c] = ord(G[r][c])-ord('a')+1 #Number the other alphabets by taking their ascii value and subtracting it with the ascii 'a' + 1
#Breadth first search algorithm
def bfs(part):
Q = deque() #create an empty queue
for r in range(R):
for c in range(C):
if (part==1 and G[r][c]=='S') or (part==2 and E[r][c] == 1):
Q.append(((r,c), 0))
visited = set()
while Q:
(r,c),d = Q.popleft() #Take out the node from the queue when it is visited
if (r,c) in visited:
continue
visited.add((r,c)) #add the nodes to the visited
if G[r][c]=='E':
return d
for dr,dc in DIR: #search in all four directions: up, down, left, right
rr = r+dr #add r to the current differential, new possible step
cc = c+dc #similar with r
if 0<=rr<R and 0<=cc<C and E[rr][cc]<=1+E[r][c]:
Q.append(((rr,cc),d+1))
print(bfs(1)) #part 1
print(bfs(2)) #part 2 | TrlRizu/Advent_of_code | Day 12/12-Hill_climbing_algorithm_c.py | 12-Hill_climbing_algorithm_c.py | py | 1,657 | python | en | code | 0 | github-code | 13 |
23133053154 | import os
import random
from src.neko.crypto.mersenne_twister import untemper, calc_prev_state, W, N, UPPER_MASK
def test_calc_prev_state():
state_len = N * 4
state0_bytes = os.urandom(state_len)
# 32bitの整数列
state0 = tuple(int.from_bytes(state0_bytes[i:i + 4], "big") for i in range(0, state_len, 4))
# カウンターに(N,)を指定するとstateがアップデートされる
random.setstate((3, state0 + (0, ), None))
# P回ステートを変化させる
P = 100
for _ in range(P):
outputs = [random.getrandbits(W) for _ in range(N)]
# Recover
last_state = tuple(list(map(untemper, outputs)))
state0_ = last_state
for _ in range(P - 1):
state0_ = calc_prev_state(state0_)
state0_bytes_ = b""
for x in state0_:
state0_bytes_ += x.to_bytes(4, "big")
# 最初の要素は最上位ビットしか使わないし逆算できない
assert state0[1:] == state0_[1:]
assert (state0[0] & UPPER_MASK) == (state0_[0] & UPPER_MASK)
assert state0_bytes[4:] == state0_bytes_[4:]
| minaminao/neko | tests/crypto/test_mersenne_twister.py | test_mersenne_twister.py | py | 1,082 | python | en | code | 1 | github-code | 13 |
30315143330 | from flask import jsonify, request
from flask_babel import lazy_gettext as l_
from wazo_ui.helpers.menu import menu_item
from wazo_ui.helpers.view import BaseIPBXHelperView, NewHelperViewMixin
from wazo_ui.helpers.classful import (
LoginRequiredView,
extract_select2_params,
build_select2_response,
)
from .form import EndpointSIPForm
SECTIONS = [
'aor_section_options',
'auth_section_options',
'endpoint_section_options',
'identify_section_options',
'registration_section_options',
'registration_outbound_auth_section_options',
'outbound_auth_section_options',
]
EXCLUDE_CHOICE_SECTIONS = [
'identify_section_options',
'registration_section_options',
]
class EndpointSIPTemplateView(NewHelperViewMixin, BaseIPBXHelperView):
form = EndpointSIPForm
resource = l_('SIP Template')
@menu_item(
'.ipbx.advanced.sip_templates',
l_('SIP Templates'),
icon="compress",
multi_tenant=True,
)
def index(self):
return super().index()
def _populate_form(self, form):
form.transport.form.uuid.choices = self._build_set_choices_transport(form)
form.template_uuids.choices = self._build_set_choices_templates(form.templates)
return form
def _build_set_choices_transport(self, template):
transport_uuid = template.transport.form.uuid.data
if not transport_uuid or transport_uuid == 'None':
return []
transport = self.service.get_transport(transport_uuid)
return [(transport['uuid'], transport['name'])]
def _build_set_choices_templates(self, templates):
results = []
for template in templates:
template = self.service.get_sip_template(template.uuid.data)
results.append((template['uuid'], template['label']))
return results
def _map_resources_to_form(self, resource):
choices = []
for section in SECTIONS:
for key, _ in resource[section]:
choices.append((key, key))
resource[section] = self._build_options(resource[section])
resource['template_uuids'] = [
template['uuid'] for template in resource['templates']
]
form = self.form(data=resource)
for section in SECTIONS:
if section in EXCLUDE_CHOICE_SECTIONS:
continue
for option in getattr(form, section):
option.option_key.choices = choices
return form
def _build_options(self, options):
return [
{'option_key': option_key, 'option_value': option_value}
for option_key, option_value in options
]
def _map_form_to_resources(self, form, form_id=None):
resource = super()._map_form_to_resources(form, form_id)
for section in SECTIONS:
resource[section] = self._map_options_to_resource(resource[section])
if not resource['transport'].get('uuid'):
resource['transport'] = None
resource['templates'] = [
{'uuid': template_uuid} for template_uuid in form.template_uuids.data
]
return resource
def _map_options_to_resource(self, options):
return [[option['option_key'], option['option_value']] for option in options]
class SIPTemplateDestinationView(LoginRequiredView):
def list_json(self):
params = extract_select2_params(request.args)
templates = self.service.list(**params)
results = [{'id': t['uuid'], 'text': t['label']} for t in templates['items']]
return jsonify(build_select2_response(results, templates['total'], params))
| wazo-platform/wazo-ui | wazo_ui/plugins/sip_template/view.py | view.py | py | 3,666 | python | en | code | 4 | github-code | 13 |
43202137197 | import numpy as np
import matplotlib.pyplot as plt
import math
x = np.arange(0, math.pi*2, 0.05)
y = np.sin(x)
#symbols : - , –, -., , . , , , o , ^ , v , < , > , s , + , x , D , d , 1 , 2 , 3 , 4 , h , H , p ,| , _
#colors : b, g, r, c, m, y, k, w
plt.plot(x, y, 'g4')
plt.xlabel("angle")
plt.ylabel("Sine")
plt.title("Sine Wave")
plt.show()
| baponkar/my-python-programms | matplotlib/sine_plot.py | sine_plot.py | py | 349 | python | uk | code | 0 | github-code | 13 |
11311754193 |
def remove_duplicates_sorted_array(a):
end = len(a)
i = 0
while i < end:
number = a[i]
j = i + 1
while j < end and a[j] == number:
a[end -1],a[j] = a[j],a[end -1]
end -= 1
| cyrustabatab/leetcode | remove_duplicates_sorted_array.py | remove_duplicates_sorted_array.py | py | 266 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.