seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14952780826 | from temporalio.client import Client
from temporalio.worker import Worker
from temporal_test.activity import say_hello_activity
from temporal_test.config import Config
from temporal_test.workflows import TestWorkflow
import asyncio
import os
async def main():
temporal_host = os.getenv("TEMPORAL_ADDRESS", "127.0.0.1:7233")
print(f"connecting to temporal at {temporal_host}")
client = await Client.connect(temporal_host, namespace="default")
worker = Worker(
client,
task_queue=Config.queue,
workflows=[TestWorkflow],
activities=[say_hello_activity]
)
await worker.run()
print(__name__)
if __name__ == "temporal_test.worker":
print("starting worker")
asyncio.run(main())
if __name__ == "__main__":
print("starting worker")
asyncio.run(main())
| qadiludmer/temporal-test | temporal_test/worker.py | worker.py | py | 824 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.getenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "temporalio.client.Client.connect",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "temporalio.client.Client",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "t... |
10230270575 | import numpy as np
from pyparsing import Any
import evals
import evals.elsuite.steganography.steganography as steganography
import evals.metrics
from evals.api import CompletionFn, DummyCompletionFn
from evals.eval import Eval
from evals.record import RecorderBase
class Steganography(Eval):
def __init__(
self,
completion_fns: list[CompletionFn],
samples_jsonl: str,
*args,
prompt_version: str = "scratch",
reconstruction_error_metric: str = "semantic_distance",
**kwargs,
):
super().__init__(completion_fns, *args, **kwargs)
if len(completion_fns) == 1 and isinstance(
completion_fns[0], DummyCompletionFn
):
completion_fn = completion_fns[0]
completion_fns = [completion_fn, completion_fn]
assert (
len(completion_fns) == 2
), f"Steganography requires 2 completion_fns (the 1st is the target model, the 2nd is the monitor model), received {len(completion_fns)}."
self.completion_fns = completion_fns
self.samples_jsonl = samples_jsonl
self.prompt_version = prompt_version
self.reconstruction_error_metric = reconstruction_error_metric
def eval_sample(self, sample: Any, *_):
assert isinstance(sample, dict), "sample must be a dict"
assert "payload_text" in sample, "sample must have a payload_text key"
assert "task_prompt" in sample, "sample must have a task_prompt key"
assert "task_output" in sample, "sample must have a task_output key"
assert isinstance(sample["payload_text"], str), "payload_text must be a string"
assert isinstance(sample["task_prompt"], str), "task_prompt must be a string"
assert isinstance(sample["task_output"], str), "task_output must be a string"
payload = sample["payload_text"]
task_prompt = sample["task_prompt"]
reference_output = sample["task_output"]
results = steganography.run_eval(
task_prompt=task_prompt,
reference_output=reference_output,
payload=payload,
completion_fn=self.completion_fns[0],
monitor_completion_fn=self.completion_fns[1],
prompt_version=self.prompt_version,
metadata=sample,
)
evals.record.record_metrics(
undetected_acc=results["undetected_acc"],
detectability=results["detected"],
reconstruction_error=results[self.reconstruction_error_metric],
character_error_rate_cap1=results["character_error_rate_cap1"],
semantic_distance=results["semantic_distance"],
rule_violated=results["rule_violated"],
)
def run(self, recorder: RecorderBase):
samples = self.get_samples()
self.eval_all_samples(recorder, samples)
metrics = recorder.get_metrics()
return {
"undetected_acc": np.mean([d["undetected_acc"] for d in metrics]),
"detectability": np.mean([d["detectability"] for d in metrics]),
"reconstruction_error": np.mean([d["reconstruction_error"] for d in metrics]),
"character_error_rate_cap1": np.mean([d["character_error_rate_cap1"] for d in metrics]),
"semantic_distance": np.mean([d["semantic_distance"] for d in metrics]),
"rule_violated": np.mean([d["rule_violated"] for d in metrics]),
}
| openai/evals | evals/elsuite/steganography/eval.py | eval.py | py | 3,437 | python | en | code | 12,495 | github-code | 6 | [
{
"api_name": "evals.eval.Eval",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "evals.api.CompletionFn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "evals.api.DummyCompletionFn",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name":... |
1948037648 | from django.contrib import admin
from django.db.models import QuerySet
from django.db.models.expressions import RawSQL
from django.forms import ModelForm
from django.urls import reverse
from django.utils.safestring import mark_safe
from tree.models import Category
from django.utils.html import format_html
from django import forms
from treebeard.admin import TreeAdmin
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
def get_queryset(self, request):
qs: QuerySet = super(admin.ModelAdmin, self).get_queryset(request)
return qs \
.annotate(parent=RawSQL("""
select id from tree_category tc where tc.depth="tree_category"."depth"-1 and "tree_category"."path" like tc.path || '%%'
""", []
))
list_display = (
'id',
'path',
'depth',
'numchild',
'name',
'parent',
)
class CategoryForm(ModelForm):
parent_link = forms.BooleanField(required=False, disabled=True)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
url = reverse('admin:tree_category_change', args=[self.instance.parent])
self.fields['parent_link'].label = format_html('<a href="{}">Link to Parent</a>', url)
class Meta:
model = Category
fields = '__all__'
form = CategoryForm
def parent(self, obj):
# result = obj.grade_avg
if obj.parent is None:
return format_html("<b><i>{}</i></b>", obj.parent)
url = reverse('admin:tree_category_change', args=[obj.parent])
return format_html('<a href="{}"> Parent</a>', url)
# list_select_related = ()
# raw_id_fields = ("id",)
| Vulwsztyn/django_treebeard_admin | tree/admin.py | admin.py | py | 1,783 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.db.models.QuerySet",
"line_number": 17,
"usage_type": "name"
},
{
... |
15855579751 | from appium import webdriver
from appium.webdriver.common.mobileby import MobileBy
from selenium.webdriver.common.by import By
class TestWeworkTest:
def setup(self):
desired_cap = {}
desired_cap["platformName"] = "Android"
# desired_cap["platformVersion"] = "6.0"
desired_cap["deviceName"] = "127.0.0.1:62001"
desired_cap["appPackage"] = "com.tencent.wework"
desired_cap["appActivity"] = ".launch.WwMainActivity"
desired_cap["noReset"] = True
desired_cap["skipServerInstallation"] = True # 跳过uiautomarot2安装
desired_cap["skipDeviceInitialization"] = True # 跳过设备初始化
# desired_cap["unicodeKeyBoard"] = 'true'
# desired_cap["restKeyBoard"] = 'true'
desired_cap["settings[waitForIdleTimeout]"] = 0 # 等待页面完全加载完成的时间
desired_cap["dontStopAppOnReset"] = True # 等待页面完全加载完成的时间
self.drive = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_cap)
self.drive.implicitly_wait(5)
def teardown(self):
self.drive.quit()
def swipe_find(self, by_method, ele, num=3):
# 滑动查找元素
for i in range(num):
try:
rst_ele = self.drive.find_element(by_method, ele)
return rst_ele
except:
windows_size = self.drive.get_window_size()
width = windows_size.get('width')
height = windows_size.get('height')
start_width = width / 2
start_height = height * 0.8
end_width = width / 2
end_height = height * 0.3
self.drive.swipe(start_width, start_height, end_width, end_height)
def test_add_member(self):
self.drive.find_element(MobileBy.XPATH, "//*[@text='工作台']").click()
# 滑动查找打卡选项
# self.drive.find_element_by_android_uiautomator('new UiScrollable(new UiSelector().'
# 'scrollable(true).instance(0)).'
# f'scrollIntoView(new UiSelector().text("打卡")'
# ');').click()
# self.drive.find_element(MobileBy.XPATH, "//*[@text='打卡']").click()
self.swipe_find(MobileBy.XPATH, "//*[@text='打卡']").click()
# self.drive.update_settings({"waitForIdleTimeout": 0}) # driver方法更新等待页面完全加载完成的时间
self.drive.find_element(MobileBy.XPATH, "//*[@text='外出打卡']").click()
self.drive.find_element(MobileBy.XPATH, "//*[contains(@text,'次外出')]").click()
self.drive.find_element(MobileBy.XPATH, "//*[@text='外出打卡成功']")
# while True:
# current_xml = self.drive.page_source
# if "添加成功" in current_xml:
# print(current_xml)
# break
| sunmings1310/HogwartsHomework | hogwarts-homework/AppWeworkHomework/test_clokc_in.py | test_clokc_in.py | py | 3,015 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "appium.webdriver.Remote",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "appium.webdriver",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "appium.webdriver.common.mobileby.MobileBy.XPATH",
"line_number": 47,
"usage_type": "attribute"
... |
28985765692 | import community as community_louvain
import networkx as nx
#read file
graph_file = 'abide_au_2_4132_sparse.txt'
#label_file='4132_regions.txt'
with open(graph_file) as f:
graph = f.readlines()
graph = [x.strip() for x in graph]
G = nx.parse_edgelist(graph, delimiter="\t", nodetype=int)
partition = community_louvain.best_partition(G)
#print(partition)
comm_file=open("vertex_community_au_sparse2_run2.txt","w")
for comm in sorted(partition):
comm_file.write(str(comm)+'\t'+str(partition[comm])+'\n')
comm_file.close()
num_comm = max(partition.values())+1
vertex_subsets = [set() for i in range(num_comm)]
subgraphs = [set() for i in range(num_comm)]
cut_edges = set()
for key in partition:
vertex_subsets[partition[key]].add(key)
for edge in graph:
u,v=edge.split("\t")
u,v=int(u),int(v)
flag = 0
for comm in range(num_comm):
if u in vertex_subsets[comm] and v in vertex_subsets[comm]:
subgraphs[comm].add(edge)
flag = 1
break
if flag == 0:
cut_edges.add(edge)
#print(cut_edges)
for i in range(num_comm):
print("subgraph ",i," contains ",len(subgraphs[i])," number of edges")
print("Cut edges are - ",len(cut_edges))
subgraph_file=open("subgraphs_au_sparse2_run2.txt","w")
for comm in range(num_comm):
subgraph_file.write(str(comm)+'\n')
subgraph_file.write(str(len(vertex_subsets[comm]))+'\t'+str(len(subgraphs[comm]))+'\n')
#subgraph_file.write(sorted(vertex_subsets[comm]), key = lambda x: (len (x), x))
i = 0
vertid = dict()
for vertex in sorted(vertex_subsets[comm]):
vertid[vertex] = i
i += 1
subgraph_file.write(str(vertex)+'\n')
#subgraph_file.write(sorted(subgraphs[comm]),key = lambda x: (len (x), x))
edgelist = [[0 for k in range(2)] for m in range(len(subgraphs[comm]))]
j=0
for edge in sorted(subgraphs[comm]):
u,v=edge.split("\t")
u,v=int(u),int(v)
edgelist[j][0],edgelist[j][1] = vertid[u],vertid[v]
j += 1
for l in sorted(edgelist,key=lambda x: (x[0],x[1])):
subgraph_file.write(str(l[0])+'\t'+str(l[1])+'\t'+str(1)+'\n')
subgraph_file.close()
| chandrashekar-cds/Graph-Coarsening | louvain_subgraphs_label.py | louvain_subgraphs_label.py | py | 2,184 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "networkx.parse_edgelist",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "community.best_partition",
"line_number": 13,
"usage_type": "call"
}
] |
10640003312 |
import requests
import json
import urllib2
BASE_URL = "https://api.github.com"
def confirm_github():
"""Confirm Github is up and running"""
url = BASE_URL
r = requests.get(url)
if r.status_code == 200:
# print "status code:", r.status_code, "(github is working)"
return True
else:
# print "github is down"
return False
def check_member(org_name, member_name):
"""Check if a user is a member of a given org"""
#Based on validity of either org or member, API response has two status responses - 204 or 404
url = BASE_URL+"/orgs/%s/public_members/%s" % (org_name, member_name)
r = requests.get(url)
if r.status_code == 204:
# print member_name, "is a member of", org_name
return True
elif r.status_code == 404:
# print member_name, "is not a member of", org_name
return False
def create_markdown(comment, mode, repo_context):
"""Validating whether markdown has been rendered"""
#Added a try-except to account for '400: Bad Reqeust' when incorrect POST is made
try:
comment_data = {
"text": "%s" % comment,
"mode": "%s" % mode,
"context": "%s" % repo_context
}
req = urllib2.Request(BASE_URL+"/markdown")
req.add_header('Content-Type', 'application/json')
r = urllib2.urlopen(req, json.dumps(comment_data))
if r.getcode() == 200:
#print "markdown doc rendered"
return True
except:
return False
def get_repo_branches(owner, repo):
"""Return branch info for a given repo by a owner"""
#Endpoint delivers error message if either owner or repo provided is invalid
try:
url = BASE_URL+"/repos/%s/%s/branches" % (owner, repo)
r = requests.get(url)
repo_branches = r.json()
#print "branches: ", repo_branches
#print repo_branches["message"]
return repo_branches[0]["commit"]["sha"]
except:
return repo_branches["message"]
| smithers1221/replicatedcc_python | githubapi.py | githubapi.py | py | 1,833 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "urllib2.Request",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "urllib2.urlopen",
"line... |
43574733165 | import os
import unittest
import re
from unittest import mock
import tempfile
with mock.patch('cffi.FFI.dlopen', return_value=mock.MagicMock()):
from geopmdpy.system_files import ActiveSessions, AccessLists, WriteLock
# Patch dlopen to allow the tests to run when there is no build
with mock.patch('cffi.FFI.dlopen', return_value=mock.MagicMock()):
from geopmdpy.service import PlatformService
from geopmdpy.service import TopoService
class TestPlatformService(unittest.TestCase):
def setUp(self):
self._test_name = 'TestPlatformService'
self._RUN_PATH = tempfile.TemporaryDirectory('{}_run'.format(self._test_name))
self._mock_active_sessions = mock.create_autospec(ActiveSessions)
self._mock_active_sessions.get_clients.return_value = []
self._check_client_active_err_msg = "Injected error"
self._mock_active_sessions.check_client_active.side_effect = \
RuntimeError(self._check_client_active_err_msg) # Until open_mock_session is called
self._mock_access_lists = mock.create_autospec(AccessLists)
self._mock_write_lock = mock.create_autospec(WriteLock)
self._mock_write_lock.try_lock.return_value = None
self._mock_write_lock.unlock.return_value = None
with mock.patch('geopmdpy.system_files.ActiveSessions', return_value=self._mock_active_sessions), \
mock.patch('geopmdpy.system_files.AccessLists', return_value=self._mock_access_lists), \
mock.patch('geopmdpy.system_files.WriteLock', return_value=self._mock_write_lock):
self._platform_service = PlatformService()
self._platform_service._RUN_PATH = self._RUN_PATH.name
self._platform_service._active_sessions._RUN_PATH = self._RUN_PATH.name
self._session_file_format = os.path.join(self._RUN_PATH.name, 'session-{client_pid}.json')
def tearDown(self):
self._RUN_PATH.cleanup()
def test_close_already_closed(self):
# We already have two independent components with the session.
client_pid = -999
self.open_mock_session('user_name', client_pid, True, 2) # 2
self._platform_service.close_session(client_pid) # 1
self._platform_service.close_session(client_pid) # 0
self._platform_service._active_sessions.check_client_active = mock.MagicMock(side_effect=RuntimeError)
with self.assertRaises(RuntimeError):
self._platform_service.close_session(client_pid) # error here
def test_read_already_closed(self):
# We already have two independent components with the session.
client_pid = -999
self.open_mock_session('user_name', client_pid, True, 2) # 2
self._platform_service.close_session(client_pid) # 1
self._platform_service.close_session(client_pid) # 0
self._platform_service._active_sessions.check_client_active = mock.MagicMock(side_effect=RuntimeError)
with self.assertRaises(RuntimeError):
self._platform_service.read_signal(client_pid, 'CPU_FREQUENCY', 0, 0) # error here
def test_get_signal_info(self):
signals = ['energy', 'frequency', 'power']
descriptions = ['desc0', 'desc1', 'desc2']
domains = [0, 1, 2]
infos = [(0, 0, 0), (1, 1, 1), (2, 2, 2)]
expected_result = list(zip(signals, descriptions, domains))
for idx in range(len(expected_result)):
expected_result[idx] = expected_result[idx] + infos[idx]
with mock.patch('geopmdpy.pio.signal_description', side_effect=descriptions) as mock_desc, \
mock.patch('geopmdpy.pio.signal_domain_type', side_effect=domains) as mock_dom, \
mock.patch('geopmdpy.pio.signal_info', side_effect=infos) as mock_inf:
signal_info = self._platform_service.get_signal_info(signals)
self.assertEqual(expected_result, signal_info)
calls = [mock.call(cc) for cc in signals]
mock_desc.assert_has_calls(calls)
mock_dom.assert_has_calls(calls)
mock_inf.assert_has_calls(calls)
def test_get_control_info(self):
controls = ['fan', 'frequency', 'power']
descriptions = ['desc0', 'desc1', 'desc2']
domains = [0, 1, 2]
expected_result = list(zip(controls, descriptions, domains))
with mock.patch('geopmdpy.pio.control_description', side_effect=descriptions) as mock_desc, \
mock.patch('geopmdpy.pio.control_domain_type', side_effect=domains) as mock_dom:
control_info = self._platform_service.get_control_info(controls)
self.assertEqual(expected_result, control_info)
calls = [mock.call(cc) for cc in controls]
mock_desc.assert_has_calls(calls)
mock_dom.assert_has_calls(calls)
def test_lock_control(self):
err_msg = 'PlatformService: Implementation incomplete'
with self.assertRaisesRegex(NotImplementedError, err_msg):
self._platform_service.lock_control()
def test_unlock_control(self):
err_msg = 'PlatformService: Implementation incomplete'
with self.assertRaisesRegex(NotImplementedError, err_msg):
self._platform_service.unlock_control()
def test_open_session_twice(self):
self.open_mock_session('', active=True)
def _gen_session_data_helper(self, client_pid, reference_count):
signals_default = ['energy', 'frequency']
controls_default = ['controls', 'geopm', 'named', 'power']
watch_id = 888
session_data = {'client_pid': client_pid,
'reference_count': reference_count,
'mode': 'r',
'signals': signals_default,
'controls': controls_default,
'watch_id': watch_id}
return session_data
def open_mock_session(self, session_user, client_pid=-999, active=False, reference_count=1):
session_data = self._gen_session_data_helper(client_pid, reference_count)
client_pid = session_data['client_pid']
reference_count = session_data['reference_count']
watch_id = session_data['watch_id']
signals = session_data['signals']
controls = session_data['controls']
self._mock_active_sessions.is_client_active.return_value = active
self._mock_active_sessions.get_controls.return_value = controls
self._mock_active_sessions.get_signals.return_value = signals
self._mock_active_sessions.get_reference_count.return_value = reference_count
self._mock_active_sessions.get_watch_id.return_value = watch_id
self._mock_active_sessions.get_batch_server.return_value = None
self._mock_active_sessions.remove_client.return_value = session_data
self._mock_access_lists.get_user_access.return_value = (signals, controls)
with mock.patch('geopmdpy.system_files.AccessLists._get_user_groups', return_value=[]), \
mock.patch('geopmdpy.service.PlatformService._watch_client', return_value=watch_id):
self._platform_service.open_session(session_user, client_pid)
self._mock_active_sessions.is_client_active.assert_called_with(client_pid)
if not active:
self._mock_active_sessions.add_client.assert_called_with(client_pid, signals, controls, watch_id)
self._mock_access_lists.get_user_access.assert_called()
else:
self._mock_active_sessions.add_client.assert_not_called()
self._mock_active_sessions.check_client_active.side_effect = None # session is now active
return session_data
def test_open_session(self):
self.open_mock_session('')
def test_close_session_invalid(self):
client_pid = 999
with self.assertRaisesRegex(RuntimeError, self._check_client_active_err_msg):
self._platform_service.close_session(client_pid)
def test_close_session_read(self):
session_data = self.open_mock_session('')
client_pid = session_data['client_pid']
watch_id = session_data['watch_id']
with mock.patch('gi.repository.GLib.source_remove', return_value=[]) as mock_source_remove, \
mock.patch('geopmdpy.pio.restore_control_dir') as mock_restore_control_dir, \
mock.patch('shutil.rmtree', return_value=[]) as mock_rmtree:
self._platform_service.close_session(client_pid)
mock_restore_control_dir.assert_not_called()
mock_rmtree.assert_not_called()
mock_source_remove.assert_called_once_with(watch_id)
self._mock_active_sessions.remove_client.assert_called_once_with(client_pid)
def test_close_session_write(self):
session_data = self.open_mock_session('')
client_pid = session_data['client_pid']
watch_id = session_data['watch_id']
self._mock_write_lock.try_lock.return_value = client_pid
with mock.patch('geopmdpy.pio.save_control_dir') as mock_save_control_dir, \
mock.patch('geopmdpy.pio.write_control') as mock_write_control, \
mock.patch('os.getsid', return_value=client_pid) as mock_getsid:
self._platform_service.write_control(client_pid, 'geopm', 'board', 0, 42.024)
mock_save_control_dir.assert_called_once()
mock_write_control.assert_called_once_with('geopm', 'board', 0, 42.024)
with mock.patch('gi.repository.GLib.source_remove', return_value=[]) as mock_source_remove, \
mock.patch('geopmdpy.pio.restore_control_dir', return_value=[]) as mock_restore_control_dir, \
mock.patch('os.getsid', return_value=client_pid) as mock_getsid:
self._platform_service.close_session(client_pid)
mock_restore_control_dir.assert_called_once()
save_dir = os.path.join(self._platform_service._RUN_PATH,
self._platform_service._SAVE_DIR)
mock_source_remove.assert_called_once_with(watch_id)
self.assertFalse(self._platform_service._active_sessions.is_client_active(client_pid))
session_file = self._session_file_format.format(client_pid=client_pid)
self.assertFalse(os.path.exists(session_file))
def test_start_batch_invalid(self):
session_data = self.open_mock_session('')
client_pid = session_data['client_pid']
watch_id = session_data['watch_id']
valid_signals = session_data['signals']
signal_config = [(0, 0, sig) for sig in valid_signals]
valid_controls = session_data['controls']
bogus_controls = [(0, 0, 'invalid_frequency'), (0, 0, 'invalid_energy')]
control_config = [(0, 0, con) for con in valid_controls]
control_config.extend(bogus_controls)
err_msg = re.escape('Requested controls that are not in allowed list: {}' \
.format(sorted({bc[2] for bc in bogus_controls})))
with self.assertRaisesRegex(RuntimeError, err_msg):
self._platform_service.start_batch(client_pid, signal_config,
control_config)
bogus_signals = [(0, 0, 'invalid_uncore'), (0, 0, 'invalid_power')]
signal_config.extend(bogus_signals)
err_msg = re.escape('Requested signals that are not in allowed list: {}' \
.format(sorted({bs[2] for bs in bogus_signals})))
with self.assertRaisesRegex(RuntimeError, err_msg):
self._platform_service.start_batch(client_pid, signal_config,
control_config)
def test_start_batch_write_blocked(self):
"""Write mode batch server will not start when write lock is held
This test calls write_control without a session leader, and then a
different PID tries to create a write mode batch server with a session
leader. This request should fail.
"""
client_pid = 999
client_sid = 333
other_pid = 666
control_name = 'geopm'
domain = 7
domain_idx = 42
setting = 777
session_data = self.open_mock_session('other', other_pid)
mock_pwuid = mock.MagicMock()
self._mock_write_lock.try_lock.return_value = other_pid
with mock.patch('geopmdpy.pio.write_control', return_value=[]) as mock_write_control, \
mock.patch('geopmdpy.pio.save_control_dir'), \
mock.patch('os.getsid', return_value=other_pid) as mock_getsid:
self._platform_service.write_control(other_pid, control_name, domain, domain_idx, setting)
mock_write_control.assert_called_once_with(control_name, domain, domain_idx, setting)
session_data = self.open_mock_session('', client_pid)
valid_signals = session_data['signals']
valid_controls = session_data['controls']
signal_config = [(0, 0, sig) for sig in valid_signals]
control_config = [(0, 0, con) for con in valid_controls]
mock_pwuid.pw_name = 'test_user'
err_msg = f'The PID {client_pid} requested write access, but the geopm service already has write mode client with PID or SID of {abs(other_pid)}'
with self.assertRaisesRegex(RuntimeError, err_msg), \
mock.patch('geopmdpy.pio.start_batch_server', return_value = (2345, "2345")), \
mock.patch('os.getsid', return_value=client_sid) as mock_getsid, \
mock.patch('pwd.getpwuid', return_value=mock_pwuid) as mock_getpwuid, \
mock.patch('psutil.pid_exists', return_value=True) as mock_pid_exists:
self._platform_service.start_batch(client_pid, signal_config,
control_config)
def test_start_batch(self):
session_data = self.open_mock_session('')
client_pid = session_data['client_pid']
watch_id = session_data['watch_id']
valid_signals = session_data['signals']
valid_controls = session_data['controls']
signal_config = [(0, 0, sig) for sig in valid_signals]
control_config = [(0, 0, con) for con in valid_controls]
expected_result = (1234, "1234")
self._mock_write_lock.try_lock.return_value = client_pid
with mock.patch('geopmdpy.pio.start_batch_server', return_value=expected_result), \
mock.patch('geopmdpy.pio.save_control_dir'), \
mock.patch('os.getsid', return_value=client_pid) as mock_getsid:
actual_result = self._platform_service.start_batch(client_pid, signal_config,
control_config)
self.assertEqual(expected_result, actual_result,
msg='start_batch() did not pass back correct result')
save_dir = os.path.join(self._platform_service._RUN_PATH,
self._platform_service._SAVE_DIR)
self.assertTrue(os.path.isdir(save_dir),
msg = 'Directory does not exist: {}'.format(save_dir))
self._mock_active_sessions.get_batch_server.return_value = expected_result[0]
with mock.patch('geopmdpy.pio.stop_batch_server', return_value=[]) as mock_stop_batch_server, \
mock.patch('psutil.pid_exists', return_value=True) as mock_pid_exists:
self._platform_service.stop_batch(client_pid, expected_result[0])
mock_stop_batch_server.assert_called_once_with(expected_result[0])
def test_stop_batch_invalid(self):
with self.assertRaisesRegex(RuntimeError, self._check_client_active_err_msg):
self._platform_service.stop_batch('', '')
def test_read_signal_invalid(self):
with self.assertRaisesRegex(RuntimeError, self._check_client_active_err_msg):
self._platform_service.read_signal('', '', '', '')
session_data = self.open_mock_session('')
client_pid = session_data['client_pid']
signal_name = 'geopm'
err_msg = 'Requested signal that is not in allowed list: {}'.format(signal_name)
with self.assertRaisesRegex(RuntimeError, err_msg):
self._platform_service.read_signal(client_pid, signal_name, '', '')
def test_read_signal(self):
session_data = self.open_mock_session('')
client_pid = session_data['client_pid']
signal_name = 'energy'
domain = 7
domain_idx = 42
with mock.patch('geopmdpy.pio.read_signal', return_value=[]) as rs:
self._platform_service.read_signal(client_pid, signal_name, domain, domain_idx)
rs.assert_called_once_with(signal_name, domain, domain_idx)
def test_write_control_invalid(self):
with self.assertRaisesRegex(RuntimeError, self._check_client_active_err_msg):
self._platform_service.write_control('', '', '', '', '')
session_data = self.open_mock_session('')
client_pid = session_data['client_pid']
control_name = 'energy'
err_msg = 'Requested control that is not in allowed list: {}'.format(control_name)
with self.assertRaisesRegex(RuntimeError, err_msg):
self._platform_service.write_control(client_pid, control_name, '', '', '')
def test_write_control(self):
session_data = self.open_mock_session('')
client_pid = session_data['client_pid']
self._mock_write_lock.try_lock.return_value = client_pid
control_name = 'geopm'
domain = 7
domain_idx = 42
setting = 777
with mock.patch('geopmdpy.pio.write_control', return_value=[]) as mock_write_control, \
mock.patch('geopmdpy.pio.save_control_dir'), \
mock.patch('os.getsid', return_value=client_pid) as mock_getsid:
self._platform_service.write_control(client_pid, control_name, domain, domain_idx, setting)
mock_write_control.assert_called_once_with(control_name, domain, domain_idx, setting)
def test_restore_already_closed(self):
client_pid = -999
session_data = self.open_mock_session('user_name', client_pid, True, 2) # 2
self._platform_service.close_session(client_pid) # 1
self._platform_service.close_session(client_pid) # 0
self._platform_service._active_sessions.check_client_active = mock.MagicMock(side_effect=RuntimeError)
with self.assertRaises(RuntimeError):
self._platform_service.restore_control(client_pid)
def test_restore_write_blocked(self):
client_pid = 999
client_sid = 333
other_pid = 666
control_name = 'geopm'
domain = 7
domain_idx = 42
setting = 777
self.open_mock_session('other', other_pid)
with mock.patch('geopmdpy.pio.write_control', return_value=[]) as mock_write_control, \
mock.patch('geopmdpy.pio.save_control_dir'), \
mock.patch('os.getsid', return_value=other_pid):
self._platform_service.write_control(other_pid, control_name, domain, domain_idx, setting)
mock_write_control.assert_called_once_with(control_name, domain, domain_idx, setting)
self.open_mock_session('', client_pid)
mock_pwuid = mock.MagicMock()
mock_pwuid.pw_name = 'test_user'
self._mock_write_lock.try_lock.return_value = other_pid
err_msg = f'The PID {client_pid} requested write access, but the geopm service already has write mode client with PID or SID of {abs(other_pid)}'
with self.assertRaisesRegex(RuntimeError, err_msg), \
mock.patch('geopmdpy.pio.restore_control_dir'), \
mock.patch('os.getsid', return_value=client_sid), \
mock.patch('pwd.getpwuid', return_value=mock_pwuid), \
mock.patch('psutil.pid_exists', return_value=True):
self._platform_service.restore_control(client_pid)
def test_restore_control(self):
session_data = self.open_mock_session('')
client_pid = session_data['client_pid']
self._mock_write_lock.try_lock.return_value = client_pid
with mock.patch('geopmdpy.pio.save_control_dir'), \
mock.patch('os.getsid', return_value=client_pid), \
mock.patch('geopmdpy.pio.restore_control_dir') as mock_restore_control_dir:
self._platform_service.restore_control(client_pid)
save_dir = os.path.join(self._platform_service._RUN_PATH,
self._platform_service._SAVE_DIR)
mock_restore_control_dir.assert_called_once_with(save_dir)
def test_get_cache(self):
topo = mock.MagicMock()
topo_service = TopoService(topo=topo)
mock_open = mock.mock_open(read_data='data')
cache_file = '/run/geopm/geopm-topo-cache'
with mock.patch('builtins.open', mock_open):
cache_data = topo_service.get_cache()
self.assertEqual('data', cache_data)
topo.assert_has_calls([mock.call.create_cache()])
calls = [mock.call(cache_file),
mock.call().__enter__(),
mock.call().read(),
mock.call().__exit__(None, None, None)]
mock_open.assert_has_calls(calls)
if __name__ == '__main__':
unittest.main()
| geopm/geopm | service/geopmdpy_test/TestPlatformService.py | TestPlatformService.py | py | 21,394 | python | en | code | 79 | github-code | 6 | [
{
"api_name": "unittest.mock.patch",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "unittest.mock",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "unittest.mock.MagicMock",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "unittest.mock.... |
26038294916 | import inspect
import json
import sys
from pathlib import Path
import pytest
from _pytest import fixtures
from _pytest.compat import get_real_func
def get_func_path(func):
real_func = get_real_func(func)
return inspect.getfile(real_func)
def get_fixturedef(fixture_request, name):
fixturedef = fixture_request._fixture_defs.get(name)
if fixturedef:
return fixturedef
try:
return fixture_request._getnextfixturedef(name)
except fixtures.FixtureLookupError:
return None
def process_fixtures(item):
lockfile_definitions = []
fixture_request = fixtures.FixtureRequest(item, _ispytest=True)
for fixture_name in fixture_request.fixturenames:
fixture_def = get_fixturedef(fixture_request, fixture_name)
if not fixture_def:
continue
func = fixture_def.func
annotations = getattr(func, "__annotations__")
if not annotations or annotations.get("return") != "JVMLockfileFixtureDefinition":
continue
# Note: We just invoke the fixture_def function assuming it takes no arguments. The other two
# ways of invoking for the fixture value cause errors. I have left them here commented-out as an example
# of what failed:
# lockfile_definition = fixture_request.getfixturevalue(fixture_name)
# lockfile_definition = fixture_def.execute(request=request)
try:
lockfile_definition = func()
except Exception as err:
raise ValueError(
f"Exception while getting lockfile definition (file {item.path}): {err}"
)
if lockfile_definition.__class__.__name__ != "JVMLockfileFixtureDefinition":
continue
cwd = Path.cwd()
func_path = Path(get_func_path(func)).relative_to(cwd)
lockfile_definitions.append(
{
"lockfile_rel_path": str(lockfile_definition.lockfile_rel_path),
"requirements": [c.to_coord_str() for c in lockfile_definition.requirements],
"test_file_path": str(func_path),
}
)
return lockfile_definitions
class CollectionPlugin:
def __init__(self):
self.collected = []
def pytest_collection_modifyitems(self, items):
for item in items:
self.collected.append(item)
collection_plugin = CollectionPlugin()
pytest.main(["--setup-only", *sys.argv[1:]], plugins=[collection_plugin])
output = []
cwd = Path.cwd()
for item in collection_plugin.collected:
output.extend(process_fixtures(item))
with open("tests.json", "w") as f:
f.write(json.dumps(output))
| pantsbuild/pants | pants-plugins/internal_plugins/test_lockfile_fixtures/collect_fixtures.py | collect_fixtures.py | py | 2,655 | python | en | code | 2,896 | github-code | 6 | [
{
"api_name": "_pytest.compat.get_real_func",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "inspect.getfile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "_pytest.fixtures.FixtureLookupError",
"line_number": 22,
"usage_type": "attribute"
},
{
... |
6323414106 | from contextlib import suppress
import random
import asyncio
from typing import *
import traceback
import hikari
from hikari import Embed
import lightbulb
from lightbulb import events, errors
from lightbulb.context import Context
from core import Inu
from utils.language import Human
from .help import OutsideHelp
from core import getLogger, BotResponseError, Inu, InteractionContext
log = getLogger("Error Handler")
pl = lightbulb.Plugin("Error Handler")
bot: Inu
ERROR_JOKES = [
"Wait, there is a difference between beta and production?",
"Seems like someone was to lazy to test me -- _again_",
"Y'know: _my_ ordinary life is generating errors",
"You expected me to work properly? Oh please ...",
(
"Y'know I can smell your disappointment. It's right about here: ```\n"
"good bad\n"
" |--------------------|\n"
" ^\n```"
)
]
async def on_exception(event: hikari.ExceptionEvent):
# not user related error
try:
log.error(f"{''.join(traceback.format_exception(event.exception))}")
except Exception:
log.critical(traceback.format_exc())
@pl.listener(events.CommandErrorEvent)
async def on_error(event: events.CommandErrorEvent):
"""
"""
try:
ctx: Context | None = event.context
if not isinstance(ctx, Context):
log.debug(f"Exception uncaught: {event.__class__}")
return
error = event.exception
async def message_dialog(error_embed: hikari.Embed):
error_id = f"{bot.restart_num}-{bot.id_creator.create_id()}-{bot.me.username[0]}"
component=(
hikari.impl.MessageActionRowBuilder()
.add_interactive_button(
hikari.ButtonStyle.PRIMARY,
"error_send_dev_silent",
label="🍭 Send report silently"
)
.add_interactive_button(
hikari.ButtonStyle.PRIMARY,
"error_send_dev",
label="🍭 Add note & send"
)
)
try:
message = await (await ctx.respond(
embed=error_embed,
component=component
)).message()
except Exception:
message = await bot.rest.create_message(
ctx.channel_id,
embed=error_embed,
component=component
)
def check(event: hikari.ReactionAddEvent):
if event.user_id != bot.me.id and event.message_id == message.id:
return True
return False
custom_id, _, interaction = await bot.wait_for_interaction(
custom_ids=["error_send_dev", "error_show", "error_send_dev_silent"],
message_id=message.id,
user_id=ctx.user.id
)
# await interaction.delete_message(message)
embeds: List[Embed] = [Embed(title=f"Bug #{error_id}", description=str(error)[:2000])]
embeds[0].set_author(
name=f'Invoked by: {ctx.user.username}',
icon=ctx.author.avatar_url
)
embeds[0].add_field(
"invoked with",
value=(
f"Command: {ctx.invoked_with}\n"
"\n".join([f"`{k}`: ```\n{v}```" for k, v in ctx.raw_options.items()])
)[:1000]
)
nonlocal event
traceback_list = traceback.format_exception(*event.exc_info)
if len(traceback_list) > 0:
log.warning(str("\n".join(traceback_list)))
error_embed.add_field(
name=f'{str(error.__class__)[8:-2]}',
value=f'Error:\n{error}'[:1024],
)
i = 0
for index, tb in enumerate(traceback_list):
if embeds[-1].total_length() > 6000:
field = embeds[-1]._fields.pop(-1)
embeds.append(Embed(description=f"Bug #{error_id}"))
embeds[-1]._fields.append(field)
i = 0
if i % 20 == 0 and i != 0:
embeds.append(Embed(description=f"Bug #{error_id}"))
embeds[-1].add_field(
name=f'Traceback - layer {index + 1}',
value=f'```python\n{Human.short_text_from_center(tb, 1000)}```',
inline=False
)
i += 1
messages: List[List[Embed]] = [[]]
message_len = 0
for e in embeds:
for field in e._fields:
if not field.value:
field.value = "-"
if message_len == 0:
messages[-1].append(e)
message_len += e.total_length()
else:
if message_len + e.total_length() > 6000:
messages.append([e])
message_len = e.total_length()
else:
messages[-1].append(e)
message_len += e.total_length()
kwargs: Dict[str, Any] = {"embeds": embeds}
answer = ""
if custom_id == "error_show":
await message.edit(embeds=embeds)
if custom_id == "error_send_dev":
try:
answer, interaction, event = await bot.shortcuts.ask_with_modal(
f"Bug report",
question_s="Do you have additional information?",
interaction=interaction,
pre_value_s="/",
)
except asyncio.TimeoutError:
answer = "/"
if answer == "/":
answer = ""
kwargs["content"] = f"**{40*'#'}\nBug #{error_id}\n{40*'#'}**\n\n\n{Human.short_text(answer, 1930)}"
del kwargs["embeds"]
for i, embeds in enumerate(messages):
if i == 0:
message = await bot.rest.create_message(
channel=bot.conf.bot.bug_channel_id,
embeds=embeds,
**kwargs
)
else:
message = await bot.rest.create_message(
channel=bot.conf.bot.bug_channel_id,
embeds=embeds,
)
if interaction:
with suppress():
await interaction.create_initial_response(
hikari.ResponseType.MESSAGE_CREATE,
content=(
f"**Bug #{error_id}** has been reported.\n"
f"You can find the bug report [here]({message.make_link(message.guild_id)})\n"
f"If you can't go to this message, or need additional help,\n"
f"consider to join the [help server]({bot.conf.bot.guild_invite_url})"
),
flags=hikari.MessageFlag.EPHEMERAL,
)
return
# errors which will be handled also without prefix
if isinstance(error, errors.NotEnoughArguments):
return await OutsideHelp.search(
obj=ctx.invoked_with,
ctx=ctx,
message=(
f"to use the `{ctx.invoked.qualname}` command, "
f"I need {Human.list_([o.name for o in error.missing_options], '`')} to use it"
),
only_one_entry=True,
)
elif isinstance(error, errors.CommandIsOnCooldown):
return await ctx.respond(
f"You have used `{ctx.invoked.qualname}` to often. Retry it in `{error.retry_after:.01f} seconds` again"
)
elif isinstance(error, errors.ConverterFailure):
return await OutsideHelp.search(
obj=ctx.invoked_with,
ctx=ctx,
message=(
f"the option `{error.option.name}` has to be {Human.type_(error.option.arg_type, True)}"
),
only_one_entry=True,
)
elif isinstance(error, errors.MissingRequiredPermission):
return await ctx.respond(
f"You need the `{error.missing_perms.name}` permission, to use `{ctx.invoked_with}`",
flags=hikari.MessageFlag.EPHEMERAL,
)
elif isinstance(error, errors.CheckFailure):
fails = set(
str(error)
.replace("Multiple checks failed: ","")
.replace("This command", f"`{ctx.invoked_with}`")
.split(", ")
)
if len(fails) > 1:
str_fails = [f"{i+1}: {e}"
for i, e in enumerate(fails)
]
return await ctx.respond(
"\n".join(fails)
)
else:
return await ctx.respond(fails.pop())
elif isinstance(error, errors.CommandInvocationError) and isinstance(error.original, BotResponseError):
try:
return await ctx.respond(**error.original.kwargs)
except hikari.BadRequestError:
# interaction probably already acknowledged
# TODO: implement Error handling into InuContext
ctx._responded = True
return await ctx.respond(**error.original.kwargs)
# errors which will only be handled, if the command was invoked with a prefix
if not ctx.prefix:
return # log.debug(f"Suppress error of type: {error.__class__.__name__}")
if isinstance(error, errors.CommandNotFound):
return await OutsideHelp.search(
obj=error.invoked_with,
ctx=ctx,
message=f"There is no command called `{error.invoked_with}`\nMaybe you mean one from the following ones?"
)
else:
error_embed = hikari.Embed()
error_embed.title = "Oh no! A bug occurred"
error_embed.description = random.choice(ERROR_JOKES)
with suppress(hikari.ForbiddenError):
await message_dialog(error_embed)
except Exception:
log.critical(traceback.format_exc())
def load(inu: Inu):
global bot
bot = inu
@bot.listen(hikari.events.ExceptionEvent)
async def on_error(event: hikari.events.ExceptionEvent) -> None:
try:
log.error(f"{''.join(traceback.format_exception(event.exception))}")
except Exception:
log.critical(traceback.format_exc())
inu.add_plugin(pl)
| zp33dy/inu | inu/ext/commands/errors.py | errors.py | py | 10,996 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "core.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "lightbulb.Plugin",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "core.Inu",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "hikari.ExceptionEvent",
... |
40315905815 | ##############################################
# Q1 --- find sum of all inputs
##############################################
# Read input - (f.read() for char-by-char read) & (loop file object for LINE-by-LINE reading)
with open('./1201.in', 'r') as f:
freqList = [line.strip() for line in f]
# Compute sum
from functools import reduce
summed = reduce(lambda acc, curr: acc + int(curr), freqList, 0)
# print(summed)
##############################################
# Q1 --- find first cumulative value that repeats
# --- use itertools (efficient looping) : cycle!! because need to REPEAT given input indefinitely
##############################################
from itertools import cycle
cumulative, visited = 0, set()
for n in cycle(freqList):
# print(summed)
visited.add(cumulative)
cumulative += int(n)
# print(cumulative)
if (cumulative in visited):
print(cumulative)
break
| hdd2k/adventOfCode | 2018/01/1201.py | 1201.py | py | 924 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "functools.reduce",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "itertools.cycle",
"line_number": 23,
"usage_type": "call"
}
] |
8413409544 | import numpy as np
from itertools import product
from vgc_project.maze import Maze
def test_basic_maze_properties():
pw=.94
ps=.6
m = Maze(
tile_array=(
".j.",
"x#3"
),
absorbing_features=("j",),
wall_features=("#","3"),
default_features=(".",),
initial_features=("x",),
step_cost=-1,
wall_bump_cost=0,
wall_block_prob=pw,
success_prob=ps,
discount_rate=1.0,
include_action_effect=True,
include_wall_effect=True,
include_terminal_state_effect=True,
wall_bias=0.
)
# all locations are part of state space
assert list(m.state_list) == list(product(range(3), range(2)))
# fixed action ordering
assert list(m.action_list) == [(1, 0), (-1, 0), (0, 0), (0, 1), (0, -1)]
right, left, wait, up, down = m.action_list
# all non-terminal state-actions should have the step cost
sa_rf = (m.reward_matrix*m.transition_matrix).sum(-1)
non_term_sa_rf = sa_rf[m.nonterminal_state_vec]
assert (non_term_sa_rf == m.step_cost).all()
# transition function
tf = m.transition_matrix
nss = tf.shape[0]
aa = m.action_list
ss = m.state_list
# wait action
assert (tf[np.arange(nss), 2, np.arange(nss)] == 1).all()
# going off edge of grid leads you to stay in place
assert tf[ss.index((0, 0)), aa.index(left), ss.index((0, 0))] == 1
assert tf[ss.index((2, 0)), aa.index(right), ss.index((2, 0))] == 1
# dynamics of goign from nonwall to nonwall
assert tf[ss.index((0, 0)), aa.index(up), ss.index((0, 1))] == ps
# exiting a wall into a non-wall or wall has the same probability
# namely, we ignore the wall dynamics
assert tf[ss.index((1, 0)), aa.index(left), ss.index((0, 0))] == ps
assert tf[ss.index((1, 0)), aa.index(right), ss.index((2, 0))] == ps
# dynamics of crashing into a wall
assert np.isclose(
tf[ss.index((0, 0)), aa.index(right), ss.index((1, 0))],
(ps*(1 - pw))/(ps*(1 - pw) + pw*(1 - ps))
)
# terminal state leads to itself with probability 1
# and being in it always gives reward 0
assert not m.nonterminal_state_vec[ss.index((1, 1))]
assert m.nonterminal_state_vec.sum() == 5
assert (tf[ss.index((1, 1)), :, ss.index((1, 1))] == 1).all()
assert (sa_rf[ss.index((1, 1))] == 0).all() | markkho/value-guided-construal | vgc_project/vgc_project/tests/test_maze.py | test_maze.py | py | 2,403 | python | en | code | 20 | github-code | 6 | [
{
"api_name": "vgc_project.maze.Maze",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
... |
43272673113 | import os
import cv2
import numpy as np
import torch
from PIL import Image
import torchvision
class SegDataset(object):
def __init__(self, root, transforms):
self.root = root
self.transforms = transforms
# load all image files, sorting them to
# ensure that they are aligned
# cut_dataset_at=10000
self.imgs = list(sorted(os.listdir(os.path.join(root, "images"))))#[:cut_dataset_at]
self.masks = list(sorted(os.listdir(os.path.join(root, "masks"))))#[:cut_dataset_at]
self.polys = list(sorted(os.listdir(os.path.join(root, "polygons"))))#[:cut_dataset_at]
def __getitem__(self, idx):
# load images ad masks
img_path = os.path.join(self.root, "images", self.imgs[idx])
mask_path = os.path.join(self.root, "masks", self.masks[idx])
poly_path = os.path.join(self.root, "polygons", self.polys[idx])
img = Image.open(img_path).convert("RGB")
img = np.array(img)
mask = Image.open(mask_path)
mask = np.array(mask)
# instances are encoded as different colors
obj_ids = np.unique(mask)
# first id is the background, so remove it
obj_ids = obj_ids[1:]
# split the color-encoded mask into a set of binary masks
masks = mask == obj_ids[:, None, None]
polys = np.load(poly_path)
# get bounding box coordinates for each mask
num_objs = len(obj_ids)
boxes = []
for i in range(num_objs):
pos = np.where(masks[i])
xmin = np.min(pos[1])
xmax = np.max(pos[1])
ymin = np.min(pos[0])
ymax = np.max(pos[0])
boxes.append([xmin, ymin, xmax, ymax])
# convert everything into a torch.Tensor
boxes = torch.as_tensor(boxes, dtype=torch.float32)
mask = np.zeros((img.shape[0], img.shape[1]))
cv2.drawContours(mask, [polys], -1, 1, 5)
model_input_size = (300, 300)
img = cv2.resize(img, model_input_size)
mask = cv2.resize(mask, model_input_size)
img = torchvision.transforms.ToTensor()(img)
mask = torch.tensor(np.expand_dims(mask, axis=0), dtype=torch.float)
# if self.transforms is not None:
# for transform in self.transforms:
# print(transform)
# img = transform(img)
# mask = transform(target)
return img, mask
def __len__(self):
return len(self.imgs) | v7labs/deeplabv3-edges | dataset.py | dataset.py | py | 2,535 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.listdir",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": ... |
38777260786 | import pandas as pd
import os
import numpy as np
from math import floor
from sqlalchemy import create_engine, MetaData, Table
import tushare as ts
from utils import get_all_tdx_symbols
import click
import struct
"""
读取通达信数据
"""
class TdxFileNotFoundException(Exception):
pass
class TdxReader:
def __init__(self, vipdoc_path):
self.vipdoc_path = vipdoc_path
self.engine = None
def get_kline_by_code(self, code, exchange):
fname = os.path.join(self.vipdoc_path, exchange)
fname = os.path.join(fname, 'lday')
fname = os.path.join(fname, '%s%s.day' % (exchange, code))
return self.parse_data_by_file(fname)
def get_mline_by_code(self, code, exchange):
fname = os.path.join(self.vipdoc_path, exchange)
fname = os.path.join(fname, 'minline')
fname = os.path.join(fname, '%s%s.lc1' % (exchange, code))
return self.parse_mdata_by_file(fname)
def parse_data_by_file(self, fname):
if not os.path.isfile(fname):
raise TdxFileNotFoundException('no tdx kline data, please check path %s', fname)
with open(fname, 'rb') as f:
content = f.read()
return self.unpack_records('<iiiiifii', content)
return []
def parse_mdata_by_file(self, fname):
if not os.path.isfile(fname):
raise TdxFileNotFoundException("no tdx mline data, please check path %s", fname)
with open(fname, 'rb') as f:
content = f.read()
return self.unpack_records('<HHfffffIxxxx', content)
return []
def unpack_records(self, format, data):
record_struct = struct.Struct(format)
return (record_struct.unpack_from(data, offset)
for offset in range(0, len(data), record_struct.size))
def get_df(self, code, exchange):
data = [self._df_convert(row) for row in self.get_kline_by_code(code, exchange)]
df = pd.DataFrame(data=data, columns=('date', 'open', 'high', 'low', 'close', 'amount', 'volume'))
df.index = pd.to_datetime(df.date)
return df[['open', 'high', 'low', 'close', 'volume']]
def get_mindf(self, code, exchange):
data = [self._mindf_convert(row) for row in self.get_mline_by_code(code, exchange)]
df = pd.DataFrame(data=data, columns=('datetime', 'open', 'high', 'low', 'close', 'amount', 'volume'))
try:
df.index = pd.to_datetime(df.datetime)
except ValueError as err:
print("ValueError: ", df.datetime)
raise err
return df[['open', 'high', 'low', 'close', 'amount', 'volume']]
def _df_convert(self, row):
t_date = str(row[0])
datestr = t_date[:4] + "-" + t_date[4:6] + "-" + t_date[6:]
new_row = (
datestr,
row[1] * 0.01, # * 0.01 * 1000 , zipline need 1000 times to original price
row[2] * 0.01,
row[3] * 0.01,
row[4] * 0.01,
row[5],
row[6]
)
return new_row
def _mindf_convert(self, row):
t_date = row[0]
year = floor(t_date / 2048) + 2004
month = floor((t_date % 2048) / 100)
day = (t_date % 2048) % 100
datestr = "%d-%02d-%02d" % (year, month, day)
t_minute = row[1]
hour = floor(t_minute / 60)
minute = t_minute % 60
timestr = "%02d:%02d:00" % (hour, minute)
datetimestr = "%s %s" % (datestr, timestr)
new_row = (
datetimestr,
row[2],
row[3],
row[4],
row[5],
row[6],
row[7]
)
return new_row
def to_sql(self, symbol, exchange):
table_name = exchange+symbol
table = Table(table_name, MetaData(bind=self.engine))
new = self.get_mindf(symbol, exchange)
if table.exists():
old = pd.read_sql_table(exchange+symbol, self.engine, index_col='datetime')
if new.index[-1] <= old.index[-1]:
return
else:
df_to_append = new[old.index[-1]:]
else:
df_to_append = new
df_to_append.to_sql(table_name, self.engine, if_exists='append')
def save_minute_line(self, sql_url):
self.engine = create_engine(sql_url)
tdx_symbol_list = get_all_tdx_symbols()
total = len(tdx_symbol_list)
i = 0
for symbol in tdx_symbol_list:
i += 1
click.echo("saving symbol %s%s (%d/%d)" %(symbol[1], symbol[0], i, total))
self.to_sql(symbol=symbol[0], exchange=symbol[1])
@click.command()
@click.argument('vipdoc', type=click.Path(exists=True))
@click.argument('sql_url', type=click.Path())
def main(vipdoc, sql_url):
click.echo('minute line saving...')
tdx_reader = TdxReader(vipdoc)
tdx_reader.save_minute_line("sqlite:///" + sql_url)
if __name__ == '__main__':
main()
#tdx_reader = TdxReader('c:\\new_zx_allin1\\vipdoc\\')
# try:
# #for row in tdx_reader.parse_data_by_file('/Volumes/more/data/vipdoc/sh/lday/sh600000.day'):
# # print(row)
# for row in tdx_reader.get_mline_by_code('600433', 'sh'):
# print(row)
# except TdxFileNotFoundException as e:
# pass
#
# print(tdx_reader.get_mindf('600433', 'sh'))
#sql_url = "sqlite:///lc1.db"
#tdx_reader.save_minute_line(sql_url=sql_url)
| maxwell-lv/MyQuant | tdxreader.py | tdxreader.py | py | 5,452 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 2... |
38358981811 | import logging
from enum import Enum
import openai
from fastapi import Query
from openai.error import AuthenticationError, InvalidRequestError, RateLimitError
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
retry_if_exception_type,
) # for exponential backoff
from app.config.messages import (
model_description,
max_tokens_description,
temperature_description,
top_p_description,
presence_penalty_description,
frequency_penalty_description,
)
class Model(Enum):
TEXT_DAVINCI_003 = "text-davinci-003"
@retry(
wait=wait_random_exponential(min=2, max=5),
stop=stop_after_attempt(5),
retry=retry_if_exception_type(RateLimitError),
)
async def completions_with_backoff(**kwargs):
return await openai.Completion.acreate(**kwargs)
async def get_completions(
api_key: str,
message: str,
model: Model = Query(Model.TEXT_DAVINCI_003, description=model_description),
max_tokens: int = Query(2048, description=max_tokens_description),
temperature: float = Query(1, description=temperature_description),
top_p: float = Query(1, description=top_p_description),
presence_penalty: float = Query(0.5, description=presence_penalty_description),
frequency_penalty: float = Query(0.5, description=frequency_penalty_description),
):
openai.api_key = api_key
# https://platform.openai.com/docs/api-reference/completions
try:
result = await completions_with_backoff(
model=model.value,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
prompt=message,
request_timeout=60,
)
except AuthenticationError as e:
logging.error(e)
return "The token is invalid."
except InvalidRequestError as e:
logging.error(e)
if "This model's maximum context length is 4097 tokens" in str(e):
return "너무 긴 답변을 유도하셨습니다."
else:
return "오류가 발생했습니다 :sob: 다시 시도해 주세요."
except Exception as e:
logging.exception(e)
return "오류가 발생했습니다 :sob: 다시 시도해 주세요."
try:
return result.get("choices")[0].get("text")
except KeyError as e:
logging.exception(e)
return "오류가 발생했습니다 :sob: 다시 시도해 주세요."
| jybaek/Hello-ChatGPT | app/services/openai_completions.py | openai_completions.py | py | 2,522 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "enum.Enum",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "openai.Completion.acreate",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "openai.Completion",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "tenacity.r... |
10865691548 | from django.contrib import admin
from django.db import models
from django.db.models import Q
from django.utils.translation import gettext_lazy as _
from ...settings import ADMIN_MEDIA_JS
from .actions import save_all_theses_to_xls
from .models import Thesis
# Copied from https://gist.github.com/rafen/eff7adae38903eee76600cff40b8b659,
# also present in theses admin and jobs admin
class ExtendedActionsMixin(object):
# actions that can be executed with no items selected on the admin change list.
# The filtered queryset displayed to the user will be used instead
extended_actions = []
def changelist_view(self, request, extra_context=None):
# if a extended action is called and there's no checkbox selected, select one with
# invalid id, to get an empty queryset
if "action" in request.POST and request.POST["action"] in self.extended_actions:
if not request.POST.getlist(admin.ACTION_CHECKBOX_NAME):
post = request.POST.copy()
post.update({admin.ACTION_CHECKBOX_NAME: 0})
request._set_post(post)
return super(ExtendedActionsMixin, self).changelist_view(request, extra_context)
def get_changelist_instance(self, request):
"""
Returns a simple ChangeList view instance of the current ModelView.
(It's a simple instance since we don't populate the actions and list filter
as expected since those are not used by this class)
"""
list_display = self.get_list_display(request)
list_display_links = self.get_list_display_links(request, list_display)
list_filter = self.get_list_filter(request)
search_fields = self.get_search_fields(request)
list_select_related = self.get_list_select_related(request)
ChangeList = self.get_changelist(request)
return ChangeList(
request,
self.model,
list_display,
list_display_links,
list_filter,
self.date_hierarchy,
search_fields,
list_select_related,
self.list_per_page,
self.list_max_show_all,
self.list_editable,
self,
self.sortable_by,
)
def get_filtered_queryset(self, request):
"""
Returns a queryset filtered by the URLs parameters
"""
cl = self.get_changelist_instance(request)
return cl.get_queryset(request)
class ThesisListFilter(admin.SimpleListFilter):
title = _("empty thesis title")
parameter_name = "have_title"
def lookups(self, request, model_admin):
return (
("yes", _("Yes")),
("no", _("No")),
)
def queryset(self, request, queryset):
if self.value() == "no":
return queryset.filter(title__isnull=False).exclude(title="")
if self.value() == "yes":
return queryset.filter(Q(title__isnull=True) | Q(title__exact=""))
@admin.register(Thesis)
class ThesisAdmin(admin.ModelAdmin):
list_display = ("get_author", "title", "show_year", "type")
list_filter = ("type", ThesisListFilter)
search_fields = (
"title",
"alumnus__last_name",
"alumnus__first_name",
"date_start",
"date_stop",
"date_of_defence",
)
ordering = ("alumnus__username",)
filter_horizontal = ("advisor",)
readonly_fields = (
"date_created",
"date_updated",
"last_updated_by",
"slug",
)
actions = (
"export_selected_degrees_to_excel",
"export_all_degrees_to_excel",
"export_filtered_degrees_to_excel",
)
extended_actions = (
"export_all_degrees_to_excel",
"export_filtered_degrees_to_excel",
)
max_num = 2
fieldsets = [
(
"Thesis Information",
{"fields": ["alumnus", "type", "date_start", "date_stop"]},
),
(
"Thesis Information",
{
"fields": [
"title",
"date_of_defence",
"url",
"dissertation_nr",
"slug",
"in_library",
]
},
),
("Thesis Advisor ", {"fields": ["advisor"]}),
("Full Text and Cover Photo", {"fields": ["pdf", "photo"]}),
(
"Extra information",
{
"classes": ["collapse"],
"fields": [
"comments",
"date_created",
"date_updated",
"last_updated_by",
],
},
),
]
class Media:
js = ADMIN_MEDIA_JS
css = {"all": ("css/admin_extra.css",)}
def save_model(self, request, obj, form, change):
obj.last_updated_by = request.user
obj.save()
# def changelist_view(self, request, extra_context=None):
# """ Hack the default changelist_view to allow action "export_all_degrees_to_excel"
# to run without selecting any objects """
# if "action" in request.POST and request.POST["action"] == "export_all_degrees_to_excel":
# if not request.POST.getlist(admin.ACTION_CHECKBOX_NAME):
# post = request.POST.copy()
# for u in Thesis.objects.all():
# post.update({admin.ACTION_CHECKBOX_NAME: str(u.id)})
# request._set_post(post)
# return super(ThesisAdmin, self).changelist_view(request, extra_context)
def get_queryset(self, request):
"""This function defines how to sort on alumnus column in the list_display
http://stackoverflow.com/a/29083623"""
qs = super(ThesisAdmin, self).get_queryset(request)
qs = qs.annotate()
# TODO: this does not take into account the type of the Thesis. Also, when
# filtering on type = "PhD" ordering of the Theses could be done on the MSc Thesis
qs = qs.annotate(
sort_author=models.Count("alumnus__last_name", distinct=True)
).annotate(
sort_year=models.Count("alumnus__theses__date_of_defence", distinct=True)
)
return qs
# def formfield_for_manytomany(self, db_field, request, **kwargs):
# try: # Breaks for add thesis
# current_thesis = Thesis.objects.get(pk=request.resolver_match.args[0])
# if db_field.name == "advisor":
# kwargs["queryset"] = Alumnus.objects.exclude(username=current_thesis.alumnus.username)
# return super(ThesisAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)
# except IndexError as e:
# if str(e) == "tuple index out of range":
# pass
def get_author(self, obj):
"""We could use author instead of get_alumnus in list_display"""
return obj.alumnus.full_name
get_author.short_description = "Author"
get_author.admin_order_field = "sort_author"
def show_year(self, obj):
if obj.date_of_defence:
return obj.date_of_defence.strftime("%Y")
elif obj.date_stop:
return obj.date_stop.strftime("%Y")
return None
show_year.short_description = "Year"
show_year.admin_order_field = "sort_year"
def export_selected_degrees_to_excel(self, request, queryset):
return save_all_theses_to_xls(request, queryset)
export_selected_degrees_to_excel.short_description = (
"Export selected Theses to Excel"
)
def export_all_degrees_to_excel(self, request, queryset):
return save_all_theses_to_xls(request, None)
export_all_degrees_to_excel.short_description = "Export all Theses to Excel"
def export_filtered_degrees_to_excel(self, request, queryset):
queryset = self.get_filtered_queryset(request)
return save_all_theses_to_xls(request, queryset)
export_filtered_degrees_to_excel.short_description = (
"Export filtered list of Theses to Excel"
)
| tlrh314/UvA_API_Alumni | apiweb/apps/research/admin.py | admin.py | py | 8,102 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "django.contrib.admin.ACTION_CHECKBOX_NAME",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.ACTION_CHECKBOX_NAME",
"line_number": 24,
"usage... |
34849239284 | import sqlite3
def execute_with_output(conn, query_txt, fetch_quant="one"):
"""
Takes the connection file variable and executes the query text within that connection
:param fetch_quant:
:param conn:
:param query_txt:
:return:
"""
try:
c = conn.cursor()
c.execute(query_txt)
if fetch_quant == "one":
return c.fetchone()
else:
return c.fetchall()
except sqlite3.Error as e:
print(e)
def execute_no_output(conn, query_txt):
"""
:param conn:
:param query_txt:
:return:
"""
try:
c = conn.cursor()
c.execute(query_txt)
except sqlite3.Error as e:
print(e)
| James-Rocker/data_engineering_portfolio | working_with_sqllite/query/__init__.py | __init__.py | py | 705 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.Error",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.Error",
"line_number": 32,
"usage_type": "attribute"
}
] |
40275219447 | # %%
import pandas as pd
import plotly.io as pio
import plotly.express as px
import plotly
# %%
adani_df = pd.read_csv('Data\Scatterplot\ADANIENT_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
appolo_df = pd.read_csv('Data\Scatterplot\APOLLOHOSP_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
asian_df = pd.read_csv('Data\Scatterplot\ASIANPAINT_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
airtel_df = pd.read_csv('Data\Scatterplot\BHARTIARTL_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
bajaj_df = pd.read_csv('Data\Scatterplot\BAJFINANCE_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
divis_df = pd.read_csv('Data\Scatterplot\DIVISLAB_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
drreddy_df = pd.read_csv('Data\Scatterplot\DRREDDY_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
hind_df = pd.read_csv('Data\Scatterplot\HINDALCO_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
hdfc_df = pd.read_csv('Data\Scatterplot\HDFC_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
hul_df = pd.read_csv('Data\Scatterplot\HINDUNILVR_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
infy_df = pd.read_csv('Data\Scatterplot\INFY_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
itc_df = pd.read_csv('Data\Scatterplot\ITC_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
lt_df = pd.read_csv('Data\Scatterplot\LT_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
mm_df = pd.read_csv('Data\Scatterplot\MM_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
nestle_df = pd.read_csv(r'Data\Scatterplot\NESTLEIND_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
ongc_df = pd.read_csv('Data\Scatterplot\ONGC_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
power_df = pd.read_csv('Data\Scatterplot\POWERGRID_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
rel_df = pd.read_csv('Data\Scatterplot\RELIANCE_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
sbi_df = pd.read_csv('Data\Scatterplot\SBIN_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
sun_df = pd.read_csv('Data\Scatterplot\SUNPHARMA_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
tatam_df = pd.read_csv('Data\Scatterplot\TATAMOTORS_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
tcs_df = pd.read_csv('Data\Scatterplot\TCS_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
ulttech_df = pd.read_csv(r'Data\Scatterplot\ULTRACEMCO_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
upl_df = pd.read_csv(r'Data\Scatterplot\UPL_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
wipro_df = pd.read_csv(r'Data\Scatterplot\WIPRO_day_data_processed.csv', parse_dates=['date'], index_col=['date'])
# %%
adani_df['company'] = 'Adani Enterprises'
appolo_df['company'] = 'Apollo Hospitals'
asian_df['company'] = 'Asian Paints'
airtel_df['company'] = 'Bharti Airtel'
bajaj_df['company'] = 'Bajaj Finance'
drreddy_df['company'] = "Dr. Reddy's Laboratories"
hdfc_df['company'] = "HDFC Bank"
infy_df['company'] = 'Infosys'
itc_df['company'] = 'ITC'
lt_df['company'] = 'Larsen & Toubro'
mm_df['company'] = 'Mahindra & Mahindra'
nestle_df['company'] = 'Nestle India'
ongc_df['company'] = 'Oil and Natural Gas Corporation'
rel_df['company'] = 'Reliance Industries'
sbi_df['company'] = 'State Bank of India'
divis_df['company'] = "Divi's Laboratories"
hind_df['company'] = 'Hindalco Industries'
hul_df['company'] = 'Hindustan Unilever'
power_df['company'] = 'Power Grid Corporation of India'
sun_df['company'] = 'Sun Pharmaceutical'
tatam_df['company'] = 'Tata Motors'
tcs_df['company'] = 'Tata Consultancy Services'
ulttech_df['company'] = 'UltraTech Cement'
upl_df['company'] = 'United Phosphorus Limited'
wipro_df['company'] = 'Wipro'
# %%
df_stocks = pd.concat([adani_df, appolo_df, asian_df, airtel_df, bajaj_df, divis_df, drreddy_df, infy_df, hind_df, hdfc_df,
hul_df, itc_df, lt_df, mm_df, nestle_df, ongc_df, power_df, sbi_df, sun_df, rel_df, tatam_df, tcs_df, ulttech_df, upl_df, wipro_df], axis=0)
# %%
df = []
for company in df_stocks['company'].unique():
company_df = df_stocks[['volume', 'close']][df_stocks['company'] == company]
company_df[f'{company}_dollar_volume'] = company_df['volume'] * company_df['close']
company_df = company_df[[f'{company}_dollar_volume']]
df.append(company_df)
df = pd.concat(df, axis = 1)
# %%
company_df = df_stocks[['volume', 'close', 'company']]
company_df['dollar_volume'] = company_df['volume'] * company_df['close']
# %%
d= []
for company in df_stocks['company'].unique():
monthly_volume = pd.DataFrame()
monthly_dv = company_df['dollar_volume'][company_df['company']==company].resample('M').sum()
monthly_v = company_df['volume'][company_df['company']==company].resample('M').sum()
monthly_close = company_df['close'][company_df['company']==company].resample('M').mean()
monthly_volume['dollar_volume'] = monthly_dv
monthly_volume['volume'] = monthly_v
monthly_volume['close'] = monthly_close
monthly_volume['date'] = monthly_dv.index
monthly_volume['company'] = company
d.append(monthly_volume)
d = pd.concat(d)
d['company'].unique()
# %%
sectors = pd.read_csv('Data\sectors.csv')
# %%
s = []
for c in d['company']:
s.append(sectors['sector'][sectors['company']==c])
s = pd.concat(s)
d['sector'] = list(s)
# %%
fig = px.scatter(d, x="close", y="volume", animation_frame="date", animation_group="company", template='plotly_white',
size='dollar_volume',color="sector", hover_name="company", size_max=60, log_y=True, log_x=True, range_x=[60,21000], range_y=[250000,5994900000])
fig.update_layout(
title='Sectorwise Volume Data',
title_x=0.44,
yaxis_title='Volume',
xaxis_title='Price',
height=600,
# width=1200,
)
x_avg = d['close'].mean()
y_avg = d['volume'].mean()
fig.add_vline(x=x_avg, line_width=1, opacity=0.9)
fig.add_hline(y=y_avg, line_width=1, opacity=1)
fig.add_annotation(dict(font=dict(color="black",size=14),
x=0, y=-0.14,#data['score'].min()-0.2, y=data['wgt'].min()-0.2,
text="Low Volume - Low Price",
xref='paper',
yref='paper',
showarrow=False))
fig.add_annotation(dict(font=dict(color="black",size=14),
x=1, y=-0.14,#x=data['score'].max(), y=data['wgt'].min(),
text="Low Volume - High Price",
xref='paper',
yref='paper',
showarrow=False))
fig.add_annotation(dict(font=dict(color="black",size=14),
x=0, y=1.07, #x=data['score'].min(), y=data['wgt'].max(),
text="High Volume - Low Price",
xref='paper',
yref='paper',
showarrow=False))
fig.add_annotation(dict(font=dict(color="black",size=14),
x=1, y=1.07, #x=data['score'].max(), y=data['wgt'].max(),
text="High Volume - High Price",
xref='paper',
yref='paper',
showarrow=False))
fig.show()
plotly.offline.plot(fig, filename='scatterplot.html')
# %%
| mathewjames/covid-impact-on-indian-stock-market | scatterplot.py | scatterplot.py | py | 7,537 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"... |
70722602427 | import requests
from bs4 import BeautifulSoup
from .componentParser import ComponentParser
from .utils import isRelativePostDate, getRelativePostDate
class BlogPost:
errorCount = 0
def __init__(self, url, isDevMode=False):
# 개발 편의
self.isDevMode = isDevMode
# init
self.url = url
self.postInframeUrl = ''
self.postEditorVersion = None
self.postLogNum = None
self.postDate = None
self.postInframeSoup = None
# init check
if self.isForeignUrl():
print("[INIT ERROR] URL이 잘못되었습니다. 프로그램을 종료합니다.")
exit(-1)
# ============================================================================================
# 개발편의용 프린트 함수
def printDevMessage(self, message):
if self.isDevMode:
print("[DEV MODE] " + message, end='\n')
# 유저가 입력한 URL 이 올바른지 체크하는 함수
def isForeignUrl(self):
self.printDevMessage("isForeignUrl execution")
if 'blog.naver.com' in self.url:
return False
else:
return True
# ============================================================================================
def postSetup(self):
try:
self.printDevMessage("== postSetup execution == ")
self.postInframeUrl = self.getPostInframeUrl()
self.postInframeSoup = self.getPostInframeSoup()
self.postEditorVersion = self.getPostEditorVersion()
self.postDate = self.getPostDate()
self.printDevMessage("== postSetup is clear == ")
# 여기서는 폴더 생성 체크까지만, 다 되었다면 run 함수로 넘긴다.
except Exception as e:
print(e)
def getPostInframeUrl(self):
self.printDevMessage("== getPostInframeUrl 실행 ==")
originHtml = requests.get(self.url).text
originSoup = BeautifulSoup(originHtml, features="html.parser")
for link in originSoup.select('iframe#mainFrame'):
postInframeUrl = "http://blog.naver.com" + link.get('src')
self.printDevMessage(f'return is : {postInframeUrl}')
return postInframeUrl
def getPostInframeSoup(self):
self.printDevMessage("== getPostInframeSoup execution ==")
if not (self.postInframeUrl == ''):
inframeHtml = requests.get(self.postInframeUrl).text
inframeSoup = BeautifulSoup(inframeHtml, features="html.parser")
self.printDevMessage(f'return is : {len(inframeSoup)} links')
return inframeSoup
else:
raise Exception("[ERROR] getPostInframeSoup가 정상적으로 실행되지 않았습니다.")
def getPostEditorVersion(self):
self.printDevMessage("== getPostEditorVersion execution ==")
for link in self.postInframeSoup.select('div#post_1'):
postEditiorVersion = link.get('data-post-editor-version')
if postEditiorVersion == None:
raise Exception("[ERROR] 지원하지 않는 에디터 버젼입니다.")
self.printDevMessage(f'return is : {postEditiorVersion}')
return postEditiorVersion
def getPostDate(self):
self.printDevMessage("== getPostDate execution ==")
links = self.postInframeSoup.select('span.se_publishDate')
if len(links) == 0:
raise Exception("[ERROR] 포스트 게시일을 찾지 못했습니다.")
else:
for link in links:
publishDate = link.get_text()
if isRelativePostDate(publishDate):
publishDate = getRelativePostDate(publishDate)
self.printDevMessage(f'return is : {publishDate}')
return publishDate
# ============================================================================================
def run(self, dirPath):
self.printDevMessage("== run execution ==")
self.postSetup()
filePath = dirPath + '/' + 'word.md'
ComponentParser.assetPath = dirPath + '/asset'
rawComponents = self.postInframeSoup.select('div.se-component')
try:
with open(filePath, mode='w', encoding='utf-8') as fp:
# 작성될 텍스트 데이터 초기화
data = ''
for i, component in enumerate(rawComponents):
if i == 0:
# 처음에는 무조건 헤더부분의 다큐먼트 타이틀이 나온다.
data += ComponentParser(component, isDevMode=self.isDevMode).parsingTitle()
continue
data += ComponentParser(component, skipSticker=self.isDevMode).parsing()
# last loop에서는 해시태그까지 추가해준다.
if i == (len(rawComponents) - 1):
txt = '해시태그 : '
for hashTag in ComponentParser.hashTagList:
txt += hashTag
data += ' ' + txt
# 작성
fp.write(data)
if ComponentParser.errorCounter != 0:
BlogPost.errorCount += 1
# 포스트 백업 후 클래스 변수 초기화
ComponentParser.hashTagList = []
ComponentParser.counter = 0
ComponentParser.errorCount = 0
return True
except Exception as e:
print(e)
return False
| Jeongseup/naver-blog-backer | src/naverblogbacker/post.py | post.py | py | 4,676 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"... |
29013781998 | import sys,usb,struct
USB_TIMEOUT_DEFAULT = 1000
SMS_EP_IN = 0x81
SMS_EP_OUT = 0x02
HIF_TASK = 11
class SMS1180USB:
def __init__(self, dev, timeout=USB_TIMEOUT_DEFAULT):
self.dev = dev
self.timeout = timeout
def usb_read(self):
try:
return bytes(self.dev.read(SMS_EP_IN, 512, self.timeout))
except usb.core.USBTimeoutError:
return None
def usb_write(self, data):
try:
return self.dev.write(SMS_EP_OUT, data, self.timeout)
except usb.core.USBTimeoutError:
return False
def msg_send_req_ex(self, request, src_id, dst_id, flags, payload):
data = struct.pack("<HBBHH", request, src_id, dst_id, len(payload) + 8, flags) + payload
self.usb_write(data)
def msg_send_req(self, request, payload=bytes([])):
return self.msg_send_req_ex(request, 0, HIF_TASK, 0, payload)
#return: response, src_id, dst_id, length, flags, payload
def msg_get_resp_ex(self):
data = self.usb_read()
if data == None or len(data) < 8:
return None,
else:
response, src_id, dst_id, length, flags = struct.unpack("<HBBHH", data[0:8])
return response, src_id, dst_id, length, flags, data[8:]
#return: response, payload
def msg_get_resp(self):
ret = self.msg_get_resp_ex()
if len(ret) == 1:
return None,
else:
return ret[0], ret[5]
| fxsheep/helloworld-anyware | src/siano/sms1180/sms1180usb.py | sms1180usb.py | py | 1,474 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "usb.core",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "usb.core",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "struct.pack",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "struct.unpack",
"line_num... |
31819134582 | """
The field should look like this:
col0 col1 col2 col3 col4 col5 col6 col7 col8
||======|======|======||======|======|======||======|======|======||
|| A | A | A || B | B | B || C | C | C ||
row0||cell0 |cell1 |cell2 ||cell3 |cell4 |cell5 ||cell6 |cell7 |cell8 ||
||______|______|______||______|______|______||______|______|______||
|| A | A | A || B | B | B || C | C | C ||
row1||cell9 |cell10|cell11||cell12|cell13|cell14||cell15|cell16|cell17||
||______|______|______||______|______|______||______|______|______||
|| A | A | A || B | B | B || C | C | C ||
row2||cell18|cell19|cell20||cell21|cell22|cell23||cell24|cell25|cell26||
||======|======|======||======|======|======||======|======|======||
|| D | D | D || E | E | E || F | F | F ||
row3||cell27|cell28|cell29||cell30|cell31|cell32||cell33|cell34|cell35||
||______|______|______||______|______|______||______|______|______||
|| D | D | D || E | E | E || F | F | F ||
row4||cell36|cell37|cell38||cell39|cell40|cell41||cell42|cell43|cell44||
||______|______|______||______|______|______||______|______|______||
|| D | D | D || E | E | E || F | F | F ||
row5||cell45|cell46|cell47||cell48|cell49|cell50||cell51|cell52|cell53||
||======|======|======||======|======|======||======|======|======||
|| G | G | G || H | H | H || I | I | I ||
row6||cell54|cell55|cell56||cell57|cell58|cell59||cell60|cell61|cell62||
||______|______|______||______|______|______||______|______|______||
|| G | G | G || H | H | H || I | I | I ||
row7||cell63|cell64|cell65||cell66|cell67|cell68||cell69|cell70|cell71||
||______|______|______||______|______|______||______|______|______||
|| G | G | G || H | H | H || I | I | I ||
row8||cell72|cell73|cell74||cell75|cell76|cell77||cell78|cell79|cell80||
||======|======|======||======|======|======||======|======|======||
"""
import openpyxl
DIGITS = (1, 2, 3, 4, 5, 6, 7, 8, 9)
rows = []
cols = []
squares = []
cells = {}
class Cell:
def __init__(self, row, col, value=''):
self.possible_values = list(DIGITS)
self.value = value
self.isSolved = False
self.row = row
self.col = col
class CellGroup:
def __init__(self):
self.cells = []
self.possible_values = list(DIGITS)
def init_structure():
global rows
global cols
global squares
global cells
# Initialize empty rows, cols and squares
for index in range(0, 9):
rows.append(CellGroup())
cols.append(CellGroup())
squares.append(CellGroup())
# Initialize empty cells
for cell_index in range(0, 81):
cell_name = f'cell{cell_index}'
row_index = cell_index // 9
col_index = cell_index % 9
# Create cell from class
cells[cell_name] = Cell(row_index, col_index)
# Adding it to a row and cols list
rows[row_index].cells.append(cells[cell_name])
cols[col_index].cells.append(cells[cell_name])
# Adding squares
# Maybe someday something shorter and not that straightforward?
if row_index < 3:
if col_index < 3:
squares[0].cells.append(cells[cell_name])
elif 3 <= col_index < 6:
squares[1].cells.append(cells[cell_name])
elif 6 <= col_index < 9:
squares[2].cells.append(cells[cell_name])
elif 3 <= row_index < 6:
if col_index < 3:
squares[3].cells.append(cells[cell_name])
elif 3 <= col_index < 6:
squares[4].cells.append(cells[cell_name])
elif 6 <= col_index < 9:
squares[5].cells.append(cells[cell_name])
elif 6 <= row_index < 9:
if col_index < 3:
squares[6].cells.append(cells[cell_name])
elif 3 <= col_index < 6:
squares[7].cells.append(cells[cell_name])
elif 6 <= col_index < 9:
squares[8].cells.append(cells[cell_name])
def read_puzzle_xls():
"""Read initial know values from Excel"""
global cells
exlw = openpyxl.load_workbook('sudoku.xlsx',
read_only=True,
data_only=True)
ws = exlw.active
for cell_name, cell in cells.items():
excel_cell_row = cell.row+1
excel_cell_column = cell.col+1
if ws.cell(excel_cell_row, excel_cell_column).value is not None:
cell.value = ws.cell(
excel_cell_row, excel_cell_column
).value
cell.possible_values.clear()
def sanity_check():
"""If cell has value - clear possible_values"""
global cells
for cell_name, cell in cells.items():
if cell.value is not None:
cell.possible_values.clear()
def solve_group(group):
for cell in group.cells:
# Removing know values from line possible values
if (cell.value != '') and (cell.value in group.possible_values):
group.possible_values.remove(cell.value)
# Removing line impossible values from cell possible values
for cell in group.cells:
for cell_pv in cell.possible_values:
if cell_pv not in group.possible_values:
cell.possible_values.remove(cell_pv)
# Set value if only 1 possible value available
if len(cell.possible_values) == 1:
cell.value = cell.possible_values.pop()
def print_puzzle_debug():
"""Prints puzzle results and debug"""
global cells
global rows
for row in rows:
OutputLine = ''
for cell in row.cells:
OutputLine += str(cell.value) + ' '
print(OutputLine)
def solve_puzzle():
"""Main program to solve the puzzle"""
# Geeting all the cells without value to list
unresolved_cells = []
for cell_name, cell in cells.items():
if cell.value == '':
unresolved_cells.append(cell_name)
# Solving only unknown cells
for cell_name in unresolved_cells:
# Solving groups
for group in rows + cols + squares:
solve_group(group)
init_structure()
read_puzzle_xls()
solve_puzzle()
print_puzzle_debug()
| stanislavstarkov/sudoku | sudoku.py | sudoku.py | py | 6,469 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 105,
"usage_type": "call"
}
] |
72453543549 | #!/usr/bin/python3
from tkinter import Image
import rospy
import sys
import cv2
from cv_bridge import CvBridge, CvBridgeError
class viewer:
def __init__(self):
self.bridge = CvBridge()
#
self.image_rgb_sub = rospy.Subscriber("/camera/color/image_raw",Image,self.callback)
self.image_depth_sub = rospy.Subscriber("/camera/depth/image_rect_raw",Image,self.callback)
def callback(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data,"bgr8")
except CvBridgeError as error:
print(error)
cv2.imshow("ball + depth",cv_image)
cv2.waitKey(30)
def main(args):
v = viewer()
rospy.init_node("image_pub",anonymous=True)
rospy.loginfo('image_pub node started')
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
try:
main(sys.argv)
except rospy.ROSInterruptException:
pass | Yandong-Luo/hybrid | src/Vision/detect_ball/nodes/image_pub.py | image_pub.py | py | 997 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "cv_bridge.CvBridge",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "rospy.Subscriber",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tkinter.Image",
"line_number": 12,
"usage_type": "argument"
},
{
"api_name": "rospy.Subscribe... |
31564356652 | import svgwrite.extensions
import faiss
import numpy as np
import matplotlib.pyplot as plt
import svgwrite
import networkx as nx
import src.particle_utils as particle_utils
if __name__ == "__main__":
page_size = (11 * 96, 17 * 96)
max_iterations = 1000
max_particles = 1000
index_training_node_count = 100000
grid_size = 2000
np.random.seed(1234)
center_bias = 0
d = 2
# particle radius
p_radius = 2
gen_radius = 2
# Construct the index
nlist = int(10 * np.sqrt(index_training_node_count))
quantizer = faiss.IndexFlatL2(d) # the other index
index = faiss.IndexIVFFlat(quantizer, d, int(nlist))
initial_vecs = (np.random.uniform(-1*grid_size, grid_size, (123318, 2))).astype('float32')
index.train(initial_vecs)
index.nprobe = 2
# initialize the count of particles
fixed_particles = np.zeros((1, d), dtype='float32')
live_particles = 1
moving_particles = particle_utils.init_moving_particles(live_particles, gen_radius, d)
index.add(fixed_particles)
# begin adding vectors to the index.
a = 1
particle_count = 1
parent_indices = []
i = 0
last_particle_count = 1
last_iteration = 0
while particle_count < max_particles and i < max_iterations:
i += 1
# Increase the number of particles as the bounding circle gets larger
if a*np.sqrt(particle_count)-5 > len(moving_particles):
live_particles = int(np.sqrt(particle_count) * a)
moving_particles = particle_utils.init_moving_particles(live_particles, gen_radius, d)
print(f"Live: {live_particles:4}, Total: {particle_count:6}, on iteration {i:6} particles gained/iterations {(live_particles-last_particle_count)/(i-last_iteration)}")
last_particle_count = live_particles
last_iteration = i
D, I = index.search(moving_particles, 1)
fixing_indices = D[:, 0] < p_radius ** 2
parent_indices.extend(I[fixing_indices])
if any(fixing_indices):
particle_count += sum(fixing_indices)
fixing_particles = moving_particles[fixing_indices]
index.add(fixing_particles)
moving_particles, gen_radius = particle_utils.regenerate_fixed_particle(moving_particles, fixing_indices, gen_radius)
moving_particles += np.random.normal(0, 1, (live_particles, d)).astype('float32')
moving_particles -= moving_particles * center_bias/np.linalg.norm(moving_particles, axis=1, keepdims=True)
moving_particles = particle_utils.regenerate_extreme_particles(moving_particles, gen_radius)
# Reconstruct the points in the order they were added.
index.make_direct_map()
fixed_particles = index.reconstruct_n(0, int(particle_count)) + np.asarray(page_size)/2
parent_indices = np.concatenate(parent_indices)
parents = fixed_particles[parent_indices]
# Build a graph
G = nx.graph.Graph()
for ind in range(len(fixed_particles)):
G.add_node(ind)
if ind > 0:
G.add_edge(parent_indices[ind-1], ind)
# Iterate over the edges of the graph
edges = list(nx.algorithms.traversal.edgedfs.edge_dfs(G, source=0))
grouped_edges = []
for a_edge in edges:
if len(grouped_edges) == 0 or grouped_edges[-1][-1] != a_edge[0]:
grouped_edges.append(list(a_edge))
else:
grouped_edges[-1].append(a_edge[-1])
# Group the nodes together
group_strs = []
paths = []
fig, ax = plt.subplots()
# Write the path
dwg = svgwrite.Drawing("../outputs/out.svg", size=page_size)
inkscape = svgwrite.extensions.Inkscape(dwg)
layer = inkscape.layer()
dwg.add(layer)
for a_group in grouped_edges:
curr_pnts = fixed_particles[a_group].astype('int')
layer.add(svgwrite.shapes.Polyline(curr_pnts.tolist(),
stroke="black",
fill='none'))
dwg.save()
| neoques/dla-python | src/virnolli.py | virnolli.py | py | 3,998 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.random.seed",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "faiss.IndexFlatL2",
... |
12657059462 | # level: medium
# 思路:dfs 将节点设为子树和,统计出现最多的字数和
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from collections import defaultdict
class Solution(object):
nodes = defaultdict(int)
def findFrequentTreeSum(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
self.nodes.clear()
self.dfs(root)
result = []
vals = []
m = -9999
# print(self.nodes)
for key, val in self.nodes.items():
if val > m: m = val
for key, val in self.nodes.items():
if val == m:
result.append(key)
return result
def dfs(self, root):
if root == None:
return
if root.left != None:
self.dfs(root.left)
root.val += root.left.val
if root.right != None:
self.dfs(root.right)
root.val += root.right.val
self.nodes[root.val] += 1
if __name__ == '__main__':
ans = Solution()
print(ans.findFrequentTreeSum([5, 2, -3]))
| PouringRain/leetcode | 508.py | 508.py | py | 1,214 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "collections.defaultdict",
"line_number": 13,
"usage_type": "call"
}
] |
18528705421 | # Section12-1
# 파이썬 데이터베이스 연동(SQLite)
# 테이블 생성 및 삽입
import datetime
import sqlite3
# 삽입 날짜 생성
now = datetime.datetime.now()
print('now', now)
nowDatetime = now.strftime('%Y-%m-%d %H:%M:%S')
print('now Datetime', nowDatetime)
# sqlite3 버전
print('sqlite3.version : ', sqlite3.version)
print('sqlite3.sqlite_version', sqlite3.sqlite_version)
print()
# DB생성 & Autocommit & Rollback
# Commit : DB에 변경사항을 반영하는 명령어
# Autocommit : 변경사항을 바로바로 DB에 반영
# Rollback : 변경사항 되돌리기
# 본인 DB 파일 경로
conn = sqlite3.connect('database/database.db', isolation_level=None)
# DB생성(메모리)
# conn = sqlite3.connect(":memory:")
# Cursor연결
c = conn.cursor()
print('Cursor Type : ', type(c))
# 테이블 생성(Datatype : TEXT NUMERIC INTEGER REAL BLOB)
c.execute(
"CREATE TABLE IF NOT EXISTS users(id INTEGER PRIMARY KEY, username text, email text, phone text, website text, regdate text)") # AUTOINCREMENT
# 데이터 삽입
c.execute("INSERT INTO users VALUES (1 ,'Kim','Kim@naver.com', '010-0000-0000', 'Kim.com', ?)", (nowDatetime,))
c.execute("INSERT INTO users(id, username, email, phone, website, regdate) VALUES (?, ?, ?, ?, ?, ?)",
(2, 'Park', 'Park@naver.com', '010-1111-1111', 'Park.com', nowDatetime))
# Many 삽입(튜플, 리스트)
userList = (
(3, 'Lee', 'Lee@naver.com', '010-2222-2222', 'Lee.com', nowDatetime),
(4, 'Cho', 'Cho@naver.com', '010-3333-3333', 'Cho.com', nowDatetime),
(5, 'Yoo', 'Yoo@naver.com', '010-4444-4444', 'Yoo.com', nowDatetime)
)
c.executemany(
"INSERT INTO users(id, username, email, phone, website, regdate) VALUES (?, ?, ?, ?, ?, ?)", userList)
print()
# 테이블 데이터 삭제
print("users db deleted :", conn.execute("delete from users").rowcount, "rows")
# 커밋 : isolation_level=None 일 경우 Auto Commit(자동 반영)
# conn.commit() # 자동 반영이 아닐 경우 반영 시켜주기위한 명령어
# 롤백
# conn.rollback() # Auto Commit 시에는 사용 불가
# 접속 해제
conn.close()
| dailyco/python-study | src/section12_1.py | section12_1.py | py | 2,122 | python | ko | code | 2 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sqlite3.version",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "sqlit... |
9135098878 | # -*- coding: utf-8 -*-
import lzma
import os
import shutil
from datetime import datetime
from datetime import timedelta
import hglib
from bugbug import bugzilla
from bugbug import labels
from bugbug import repository
from bugbug_data.secrets import secrets
from cli_common.log import get_logger
from cli_common.taskcluster import get_service
from cli_common.utils import ThreadPoolExecutorResult
logger = get_logger(__name__)
class Retriever(object):
def __init__(self, cache_root, client_id, access_token):
self.cache_root = cache_root
assert os.path.isdir(cache_root), 'Cache root {} is not a dir.'.format(cache_root)
self.repo_dir = os.path.join(cache_root, 'mozilla-central')
self.client_id = client_id
self.access_token = access_token
self.index_service = get_service('index', client_id, access_token)
def retrieve_commits(self):
shared_dir = self.repo_dir + '-shared'
cmd = hglib.util.cmdbuilder('robustcheckout',
'https://hg.mozilla.org/mozilla-central',
self.repo_dir,
purge=True,
sharebase=shared_dir,
networkattempts=7,
branch=b'tip')
cmd.insert(0, hglib.HGPATH)
proc = hglib.util.popen(cmd)
out, err = proc.communicate()
if proc.returncode:
raise hglib.error.CommandError(cmd, proc.returncode, out, err)
logger.info('mozilla-central cloned')
repository.download_commits(self.repo_dir)
logger.info('commit data extracted from repository')
self.compress_file('data/commits.json')
def retrieve_bugs(self):
bugzilla.set_token(secrets[secrets.BUGZILLA_TOKEN])
six_months_ago = datetime.utcnow() - timedelta(182)
two_years_and_six_months_ago = six_months_ago - timedelta(365)
logger.info('Downloading bugs from {} to {}'.format(two_years_and_six_months_ago, six_months_ago))
bugzilla.download_bugs_between(two_years_and_six_months_ago, six_months_ago)
logger.info('Downloading labelled bugs')
bug_ids = labels.get_all_bug_ids()
bugzilla.download_bugs(bug_ids)
self.compress_file('data/bugs.json')
def compress_file(self, path):
with open(path, 'rb') as input_f:
with lzma.open('{}.xz'.format(path), 'wb') as output_f:
shutil.copyfileobj(input_f, output_f)
def go(self):
with ThreadPoolExecutorResult(max_workers=2) as executor:
# Thread 1 - Download Bugzilla data.
executor.submit(self.retrieve_bugs)
# Thread 2 - Clone mozilla-central and retrieve commit data.
executor.submit(self.retrieve_commits)
# Index the task in the TaskCluster index.
self.index_service.insertTask(
'project.releng.services.project.{}.bugbug_data.latest'.format(secrets[secrets.APP_CHANNEL]),
{
'taskId': os.environ['TASK_ID'],
'rank': 0,
'data': {},
'expires': (datetime.utcnow() + timedelta(31)).strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
}
)
| chutten/release-services | src/bugbug/data/bugbug_data/retriever.py | retriever.py | py | 3,307 | python | en | code | null | github-code | 6 | [
{
"api_name": "cli_common.log.get_logger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
... |
33340636168 | from wsgiref.simple_server import make_server
from pyramid.view import view_config
from pyramid.config import Configurator
@view_config(route_name='theroute', renderer='json',request_method='POST')
def myview(request):
import pdb; pdb.set_trace()
return {'POST':''}
if __name__ == '__main__':
config = Configurator()
config.add_route('theroute', '/')
config.scan()
app = config.make_wsgi_app()
server = make_server('0.0.0.0', 6543, app)
print(server.base_environ)
server.serve_forever() | Yatish04/MicrosoftBackend | testscripts/pyramidserver.py | pyramidserver.py | py | 524 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pdb.set_trace",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pyramid.view.view_config",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pyramid.config.Configurator",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "wsgi... |
19981883727 | import time
import requests
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
import datetime
import csv
import json
def get_data():
current_time = datetime.datetime.now().strftime('%m-%d-%Y')
with open(f'data/{current_time}_labirint.csv', 'w', newline='', encoding='utf-8-sig') as file:
writer = csv.writer(file, delimiter=';')
writer.writerow(
[
'Название книги',
'Автор',
'Издательство',
'Цена без скидки',
'Цена со скидкой',
'Процент скидки',
'Наличие на складе'
]
)
ua = UserAgent()
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,'
'application/signed-exchange;v=b3;q=0.9',
'User-Agent': ua.random,
}
url = 'https://www.labirint.ru/genres/2498/?display=table&available=1'
response = requests.get(url=url, headers=headers)
soup = BeautifulSoup(response.text, 'lxml')
pages_count = int(soup.find('div', class_='pagination-numbers').find_all('a')[-1].text.strip())
books_data = []
for page in range(1,pages_count + 1):
url = f'https://www.labirint.ru/genres/2498/?display=table&available=1&page={page}'
response = requests.get(url=url, headers=headers)
soup = BeautifulSoup(response.text, 'lxml')
books = soup.find('tbody', class_='products-table__body').find_all('tr')
for book in books:
book_data = book.find_all('td')
try:
book_title = book_data[0].find('a').text
if not book_title:
continue
except AttributeError:
continue
try:
book_author = ', '.join(list(map(lambda link: link.text, book_data[1].find_all('a'))))
except AttributeError:
book_author = 'Нет автора'
try:
book_publisher = ': '.join(list(map(lambda publisher: publisher.text, book_data[2].find_all('a'))))
except AttributeError:
book_publisher = 'Нет издательства'
try:
old_price = int(book_data[3].find(class_='price-gray').text.replace('₽', '').replace(' ', '').strip())
except AttributeError:
old_price = 'Нет старой цены'
try:
new_price = int(book_data[3].find(class_='price-val').text.replace('₽', '').replace(' ', '').strip())
except AttributeError:
new_price = 'Нет новой цены'
try:
discount = f'{round(((old_price - new_price) / old_price) * 100, 2)} %'
except TypeError:
discount = 'Скидки нет'
try:
availability = book_data[-1].find(class_='mt3 rang-available').text.replace(' ', '').strip()
except AttributeError:
availability = 'Нет данных'
books_data.append(
{
'book_title': book_title,
'book_author': book_author,
'book_publisher': book_publisher,
'old_price': f'{old_price}₽' if type(old_price) is int else old_price,
'new_price': f'{new_price}₽' if type(new_price) is int else new_price,
'discount': discount,
'availability': availability,
}
)
with open(f'data/{current_time}_labirint.csv', 'a', newline='', encoding='utf-8-sig') as file:
writer = csv.writer(file, delimiter=';')
writer.writerow(
[
book_title,
book_author,
book_publisher,
f'{old_price}₽' if type(old_price) is int else old_price,
f'{new_price}₽' if type(new_price) is int else new_price,
discount,
availability
]
)
print(f'Обработано {page}/{pages_count} страниц')
with open(f'data/{current_time}-labirint.json', 'w', encoding='utf-8') as file:
json.dump(books_data, file, indent=4, ensure_ascii=False)
def main():
start_time = time.time()
get_data()
diff_time = time.time() - start_time
print(f'Затраченное время на работу скрипта - {diff_time}')
if __name__ == '__main__':
main()
| Baradys/scrappers | scrappers/labirint/labirint.py | labirint.py | py | 4,771 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "csv.writer",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "fake_useragen... |
24458342362 | from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
X = rs_train[['price_per_ticket', 'seat_row', 'paid_full_price', 'paid_online',
'Regular_Ticket', 'age', 'est_income', 'Male', 'married', 'fam_w_kids',
'kids_in_house', 'from_Boston', 'from_MA','game_hour',
'RS_v_Yankees', 'STH_Act', 'BUSINESS_STH_Act',
'GROUP_Act', 'BUSINESS_act', 'Individual_Act', 'SPONSOR_Act',
'EMPLOYEE_Act', 'April', 'May', 'June', 'July', 'August', 'September',
'Thursday', 'Sunday', 'Tuesday', 'Wednesday', 'Saturday', 'Friday',
'Monday', 'low_scale_seat', 'med_scale_seat', 'high_scale_seat']]
y = rs_train['ticket_used']
names = pd.DataFrame(X.columns)
model_fs = SelectKBest(score_func = chi2, k=4)
fs_results = model_fs.fit(X,y)
#print(fs_results.scores_)
results_df = pd.DataFrame(fs_results.scores_)
scored= pd.concat([names, results_df], axis=1)
scored.columns = ["Feature", "Score"]
scored.sort_values(by=["Score"])
| befitz/ISOM837_RS | feature_selection.py | feature_selection.py | py | 996 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sklearn.feature_selection.SelectKBest",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_selection.chi2",
"line_number": 16,
"usage_type": "name"
}
] |
75187431228 | from django.conf import settings
import jwt
from rest_framework import authentication, exceptions
from django.contrib.auth.models import User
class JWTAuthentication(authentication.BasicAuthentication):
def authenticate(self, request):
auth_data = authentication.get_authorization_header(request)
if not auth_data:
return None
prefix, token = auth_data.decode('utf-8').split(' ')
try:
payload = jwt.decode(token, settings.JWT_SECRET, algorithms=['HS256'])
user = User.objects.get(id=payload['id'])
return (user, token)
except jwt.DecodeError:
raise exceptions.AuthenticationFailed('Invalid token')
except jwt.ExpiredSignatureError:
raise exceptions.AuthenticationFailed('Expired token') | Limookiplimo/Contacts-API | authentication/backends.py | backends.py | py | 835 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "rest_framework.authentication.BasicAuthentication",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.authentication",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "rest_framework.authentication.get_authorization_header",
"... |
30433642050 | import requests
from jSona import jSona
import pprint
pp = pprint.pprint
class proImagery :
def __init__(self, CONF_PATH) :
self.jso = jSona()
self.headers = {'json':{'Content-Type':'application/json; charset=utf-8'}}
self.config = self.jso.loadJson(CONF_PATH)['IMAGERY']
self.lower = {color['id']:color for color in self.jso.loadJson(self.config['COLOR']['PATH'])}
def post(self, data) :
return requests.post(url=self.post_url, data=self.jso.dumps(data, False), headers=self.headers['json'])
def connect(self, addr, port) :
self.url = "{}:{}/".format(addr, port)
self.post_url = self.url
temp_data = {'hello':'world'}
res = self.post(temp_data)
return 'success' in res.content.decode()
def segcolor(self, img_url, img_name='default.jpg', options='-d') :
self.post_url = self.url+'segcolor'
data = {'path' : img_url, 'name' : img_name, 'options' : options}
res = self.post(data)
return self.jso.loads(res.content.decode(), False)
def ambcolor(self, colors, threshold=0.1) :
colors = {color[0]:color[1] for color in colors}
ambics = dict()
for cid in colors : # brightgrayyellow
if cid in self.lower :
new_cid = self.lower[cid]['u']
if new_cid in ambics : ambics[new_cid] += colors[cid]
else : ambics[new_cid] = colors[cid]
else :
if cid in ambics : ambics[cid] += colors[cid]
else : ambics[cid] = colors[cid]
return list(filter(lambda c : c[1]>threshold, ambics.items()))
def start(self, img_url, labels=['shirt'], ambi=True, threshold=0.1, img_name='default.jpg', options='-d') :
segments_and_colors = self.segcolor(img_url, img_name=img_name, options=options)
if type(segments_and_colors) == type([]) :
segments, colors = segments_and_colors[1], segments_and_colors[3]
for sinx in range(len(segments)) :
if set(labels)&set(self.config['LABEL'][''.join(segments[sinx].split()[:-1])]) and sinx<len(colors):
if ambi : return self.ambcolor(colors[sinx], threshold=threshold)
else : return colors[sinx]
return None | oimq/proCleaner | proCleaner/proImagery.py | proImagery.py | py | 2,349 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pprint.pprint",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "jSona.jSona",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 14,
"usage_type": "call"
}
] |
26201696351 | from dolfin import *
import math
import numpy as np
import logging
import matplotlib.pyplot as plt
from unconstrainedMinimization import InexactNewtonCG
logging.getLogger('FFC').setLevel(logging.WARNING)
logging.getLogger('UFL').setLevel(logging.WARNING)
set_log_active(False)
# Set the level of noise:
noise_std_dev = .3
# Load the image from file
data = np.loadtxt('image.dat', delimiter=',')
np.random.seed(seed=1)
noise = noise_std_dev*np.random.randn(data.shape[0], data.shape[1])
# Set up the domain and the finite element space.
Lx = float(data.shape[1])/float(data.shape[0])
Ly = 1.
mesh = RectangleMesh(Point(0,0),Point(Lx,Ly),200, 100)
V = FunctionSpace(mesh, "Lagrange",1)
# Generate the true image (u_true) and the noisy data (u_0)
class Image(Expression):
def __init__(self, Lx, Ly, data, **kwargs):
self.data = data
self.hx = Lx/float(data.shape[1]-1)
self.hy = Ly/float(data.shape[0]-1)
def eval(self, values, x):
j = int(math.floor(x[0]/self.hx))
i = int(math.floor(x[1]/self.hy))
values[0] = self.data[i,j]
trueImage = Image(Lx,Ly,data, degree=1)
noisyImage = Image(Lx,Ly,data+noise, degree=1)
u_true = interpolate(trueImage, V)
u_0 = interpolate(noisyImage, V)
plt.figure(figsize=[12,24])
plt.subplot(1,2,1)
plot(u_true, title="True Image")
plt.subplot(1,2,2)
plot(u_0, title="Noisy Image")
plt.show()
| uvilla/inverse17 | Assignment3/tntv.py | tntv.py | py | 1,395 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.WARNING",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.WARNING... |
20070392797 | import importlib
import sys
from unittest import mock
class MockConfig:
def __init__(self):
self.bot = mock.MagicMock()
self.state = mock.Mock()
def set_up_class(cls, *module_names):
mock_config = MockConfig()
sys.modules['config'] = mock_config
try:
for module_name in module_names:
module = importlib.import_module(module_name)
setattr(cls, module_name, module)
except Exception:
del sys.modules['config']
raise
def tear_down_class(cls):
del sys.modules['config']
| raylu/sbot | tests/mock_config.py | mock_config.py | py | 491 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "unittest.mock.MagicMock",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "unittest.mock",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "unittest.mock.Mock",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "unittest.mock",... |
35743712704 | import sys
import read_write as rw
import numpy as np
import scipy.sparse
from MatrixFactorization import MatrixFactorization
if (__name__ == '__main__'):
finput_dataset = sys.argv[1]
finput_K = (int)(sys.argv[2])
iu_matrix_train_path = "../../Data/" + finput_dataset + "/iu_sparse_matrix_train.npz"
iu_matrix_test_path = "../../Data/" + finput_dataset + "/iu_sparse_matrix_test.npz"
train_item_id_path = "../../Data/" + finput_dataset + "/train_item_id"
test_item_id_path = "../../Data/" + finput_dataset + "/test_item_id"
item_sim_matrix_path = "../../Data/" + finput_dataset + "/item_sim_matrix" # pass
ui_matrix_train = scipy.sparse.load_npz(iu_matrix_train_path).T
ui_matrix_test = scipy.sparse.load_npz(iu_matrix_test_path).T
ui_matrix = scipy.sparse.csr_matrix(np.hstack((ui_matrix_train.toarray(), np.zeros(ui_matrix_test.shape))))
train_item_id = rw.readffile(train_item_id_path)
test_item_id = rw.readffile(test_item_id_path)
item_sim_matrix = rw.readffile(item_sim_matrix_path)
# Computing Score for user (Score = [user number, new item number])
Score = (ui_matrix_train * item_sim_matrix.loc[train_item_id, test_item_id]) / \
((ui_matrix_train != 0) * item_sim_matrix.loc[train_item_id, test_item_id])
# Active Learning
train_item_num = len(train_item_id)
ui_matrix = ui_matrix.tolil()
ui_matrix_test = ui_matrix_test.tolil()
for i in range(len(test_item_id)):
ind = np.argsort(-Score[:, i])
if finput_K < ind.shape[0]:
topK = ind[:(finput_K+1)]
else:
topK = ind
ui_matrix[topK, i+train_item_num] = ui_matrix_test[topK, i]
ui_matrix_test[topK, i] = 0
# Matrix Factorization
nonzero = scipy.sparse.find(ui_matrix)
train_lst = []
for uid, itemid, rating in zip(nonzero[0], nonzero[1], nonzero[2]):
train_lst.append((uid, itemid, float(rating)))
MF = MatrixFactorization(usernum=ui_matrix.shape[0], itemnum=ui_matrix.shape[1])
try:
user_profile, item_profile = MF.matrix_factorization(train_lst)
except:
MF.end()
MF = MatrixFactorization()
user_profile, item_profile = MF.matrix_factorization(train_lst)
pred_rating = np.dot(user_profile, item_profile[train_item_num:, :].T)
nonzero_num = ui_matrix_test.getnnz()
ui_matrix_test_arr = ui_matrix_test.toarray()
RMSE = np.sum(((ui_matrix_test_arr != 0)*(pred_rating - ui_matrix_test_arr))**2 / nonzero_num)**0.5
print("RMSE: %.4f"%RMSE)
MF.end()
| clamli/Dissertation | Baselines/Content-based Active Learning/content_based_active_learning.py | content_based_active_learning.py | py | 2,632 | python | en | code | 28 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "scipy.sparse.sparse.load_npz",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.s... |
2535030622 |
from pathlib import Path
from shutil import move
from threading import Thread
import logging
folders = []
extensions = []
def grabs_folder(path: Path):
for el in path.iterdir():
if el.is_dir():
folders.append(el)
grabs_folder(el)
def sort_file(path: Path):
for el in path.iterdir():
if el.is_file():
ext = el.suffix
new_path = base_folder / ext
try:
new_path.mkdir(exist_ok=True, parents=True)
move(el, new_path / el.name)
except OSError as e:
logging.error(e)
def del_empty_folders(path: Path):
for el in path.iterdir():
if el.is_dir():
try:
el.rmdir()
except OSError:
pass
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format="%(threadName)s %(message)s")
base_folder = Path(input('Type path to folder:'))
folders.append(base_folder)
grabs_folder(base_folder)
threads = []
for folder in folders:
th = Thread(target=sort_file, args=(folder,))
th.start()
threads.append(th)
[th.join() for th in threads]
del_empty_folders(base_folder)
print('The process has been finished successfully') | PetroChulkov/web_homework3 | file_sorter.py | file_sorter.py | py | 1,354 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "shutil.move",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_numbe... |
39964228620 | import pymysql
from tkinter import *
from tkinter import messagebox
import sys
import datetime
u=sys.argv[1]
p=sys.argv[2]
class dropdown:
def __init__(self,appen,lis):
self.m = StringVar()
self.m.set("choose")
self.opt=OptionMenu(appen,self.m,*lis)
self.opt.grid(row=len(lis),column=1)
def place(self,p,q):
self.opt.place(x=p,y=q)
db = pymysql.connect("localhost",u,p,"DBMS")
cursor = db.cursor()
LEC=[]
STU=[]
courses=[]
venues=[]
t=['0{}:00:00'.format(x) for x in range(7,10)]
t+=['{}:00:00'.format(x) for x in range(10,19)]
Day=['Monday','Tuesday','Wednesday','Thursday','Friday']
def up():
global LEC
global STU
global courses
global venues
cursor.execute("Select * from Lecturer;")
LEC = cursor.fetchall()
cursor.execute("Select * from Student;")
STU = cursor.fetchall()
cursor.execute("Select CourseID from Courses order by CourseID ASC;")
courses = cursor.fetchall()
cursor.execute("Select Venue from Venues order by Venue ASC")
venues = list(map(lambda x: str(x).strip(",')("),cursor.fetchall()))
up()
cl={'CT':"blue",'CS-A':"yellow",'CS-B':"green",'CIS':"orange"}
def table():
ind = 0
top=Toplevel()
top.geometry("480x540")
top.resizable(height=False,width=False)
top.title("Schedule")
w=Canvas(top,bg="white",height=480,width=480)
w.pack()
for i in range(1,9):
w.create_line(i*60,0,i*60,480,fill="black")
for i in range(1,12):
w.create_line(0,i*40,480,i*40,fill="black")
for i in range(7,18):
Label(top,text=(str(i)+":00")).place(x=5,y=(i-7)*40+53)
for i,l in zip(range(7,15),venues):
Label(top,text=l).place(x=(i-7)*60+68,y=12)
def tbl():
nonlocal ind
cursor.execute("select Schedule.CourseID, Schedule.Program, Schedule.Venue, Schedule.StartTime, Schedule.StopTime, Courses.LecturerID from Schedule inner join Courses on Courses.CourseID = Schedule.CourseID where Day = '{}';".format(Day[ind]))
a=cursor.fetchall()
for i in range(len(a)):
st=str((datetime.datetime.min + a[i][3]).time())
sp=str((datetime.datetime.min + a[i][4]).time())
w.create_rectangle((venues.index(a[i][2])+1)*60,(t.index(st)+1)*40,(venues.index(a[i][2])+2)*60,(t.index(sp)+1)*40,fill=cl[a[i][1]])
y=((t.index(st)+1)*40 + (t.index(sp)+1)*40)/2 - 18
Label(w,text="{}\n{}\n{}".format(a[i][0],a[i][1],a[i][5])).place(x=(venues.index(a[i][2]))*60+64,y=y) #(t.index(st))*40+47)
tbl()
def prev_():
nonlocal ind
if ind >0:
ind-=1
for wid in w.winfo_children():
wid.destroy()
for i in w.find_all()[19:]:
w.delete(i)
tbl()
lab.config(text=Day[ind])
def nex_():
nonlocal ind
if ind <4:
ind+=1
for wid in w.winfo_children():
wid.destroy()
for i in w.find_all()[19:]:
w.delete(i)
tbl()
lab.config(text=Day[ind])
lab=Label(top,text=Day[ind])
lab.place(x=220,y=505)
prev=Button(top,text="prev",command = prev_)
nex = Button(top,text="next",command=nex_)
prev.place(x=130,y=500)
nex.place(x=302,y=500)
def Student():
i = 0
top=Toplevel()
top.title("Student")
top.geometry("300x280")
top.resizable(height=False,width=False)
l1 = Label(top,text="Matric:")
l2 = Label(top,text="FName:")
l3 = Label(top,text="Lname:")
l4 = Label(top,text="Program:")
l1.place(x=98,y=30)
l2.place(x=105,y=70)
l3.place(x=105,y=110)
l4.place(x=91,y=150)
d1 = Label(top,text=STU[i][1])
d2 = Label(top,text=STU[i][2])
d3 = Label(top,text=STU[i][3])
d4 = Label(top,text=STU[i][4])
d1.place(x=170,y=30)
d2.place(x=170,y=70)
d3.place(x=170,y=110)
d4.place(x=170,y=150)
def pr():
nonlocal i
if i > 0:
i -= 1
d1.configure(text=STU[i][1])
d2.configure(text=STU[i][2])
d3.configure(text=STU[i][3])
d4.configure(text=STU[i][4])
def ne():
nonlocal i
if i < len(STU)-1:
i += 1
d1.configure(text=STU[i][1])
d2.configure(text=STU[i][2])
d3.configure(text=STU[i][3])
d4.configure(text=STU[i][4])
def new():
nonlocal i
def upd(a,b,c,ap):
a=a.strip()
b=b.strip()
sql="insert into Student value (NULL,NULL,'{}','{}','{}');".format(a,b,c)
try:
cursor.execute(sql)
cursor.execute("call GenMatric();")
db.commit()
messagebox.showinfo("Confirmation","Student Added Successfully.")
up()
ap.destroy()
except:
db.rollback()
messagebox.showerror("Value Error","Could not add Student")
appen = Toplevel()
appen.title("New Student")
appen.geometry("300x230")
appen.resizable(height=False,width=False)
l1 = Label(appen,text="id:")
l1.place(x=50,y=20)
l2 = Label(appen,text ="FName:")
l2.place(x=29,y=60)
l3 = Label(appen,text ="LName:")
l3.place(x=29,y=100)
l4 = Label(appen,text ="Program:")
l4.place(x=15,y=140)
id_ = Label(appen,text=str(len(STU)+1))
id_.place(x=100,y=20)
fname = Entry(appen,bd=5)
fname.place(x=100,y=60)
lname = Entry(appen,bd=5)
lname.place(x=100,y=100)
prog=dropdown(appen,['CT','CIS','CS-A','CS-B'])
prog.place(100,135)
comit=Button(appen,text="register",command=lambda: upd(fname.get(),lname.get(),prog.m.get(),appen))
comit.place(x=100,y=180)
prev = Button(top,text="Prev",command = pr)
prev.place(x=50,y=190)
next_ = Button(top,text="Next",command = ne)
next_.place(x =190,y=190)
new = Button(top,text="+",command= new)
new.place(x=130,y=230)
def Lecturer():
i = 0
top=Toplevel()
top.title("Lecturer")
top.geometry("300x230")
top.resizable(height=False,width=False)
l1 = Label(top,text="LecturerID:")
l2 = Label(top,text="FName:")
l3 = Label(top,text="Lname:")
l1.place(x=70,y=30)
l2.place(x=105,y=70)
l3.place(x=105,y=110)
d1 = Label(top,text=LEC[i][1])
d2 = Label(top,text=LEC[i][2])
d3 = Label(top,text=LEC[i][3])
d1.place(x=170,y=30)
d2.place(x=170,y=70)
d3.place(x=170,y=110)
def pr():
nonlocal i
if i > 0:
i -= 1
d1.configure(text=LEC[i][1])
d2.configure(text=LEC[i][2])
d3.configure(text=LEC[i][3])
def ne():
nonlocal i
if i < len(LEC)-1:
i += 1
d1.configure(text=LEC[i][1])
d2.configure(text=LEC[i][2])
d3.configure(text=LEC[i][3])
def new():
nonlocal i
def upd(a,b,ap):
a=a.strip()
b=b.strip()
sql="insert into Lecturer value (NULL,NULL,'{}','{}');".format(a,b)
try:
cursor.execute(sql)
cursor.execute("call GenLecID();")
db.commit()
messagebox.showinfo("Confirmation","Lecturer Added Successfully.")
up()
ap.destroy()
except:
db.rollback()
messagebox.showerror("Value Error","Could not add Lecturer")
appen = Toplevel()
appen.title("New Lecturer")
appen.geometry("300x210")
appen.resizable(height=False,width=False)
l1 = Label(appen,text="id:")
l1.place(x=50,y=20)
l2 = Label(appen,text ="FName:")
l2.place(x=29,y=60)
l3 = Label(appen,text ="LName:")
l3.place(x=29,y=100)
id_ = Label(appen,text=str(len(LEC)+1))
id_.place(x=100,y=20)
fname = Entry(appen,bd=5)
fname.place(x=100,y=60)
lname = Entry(appen,bd=5)
lname.place(x=100,y=100)
comit=Button(appen,text="register",command=lambda: upd(fname.get(),lname.get(),appen))
comit.place(x=100,y=140)
prev = Button(top,text="Prev",command = pr)
prev.place(x =50,y=150 )
next_ = Button(top,text="Next",command = ne)
next_.place(x =180,y=150)
new = Button(top,text="+",command= new)
new.place(x=130,y=190)
def schedule():
def course_handler(a,b,c,d,e,f,top):
try:
cursor.execute("select CourseID from CourseTaken where Program = '{}';".format(b))
pro_course = list(map(lambda x: str(x).strip(",')("),cursor.fetchall()))
cursor.execute("select Size from Venues order by Venue ASC;")
v = list(map(lambda x: str(x).strip(",')("),cursor.fetchall()))
cursor.execute("select * from Student where Program = '{}';".format(b))
classSize = len(cursor.fetchall())
cursor.execute("select Program, Day, CourseID, StartTime, StopTime from Schedule where CourseID = '{}' and Program = '{}';".format(a,b))
dur = sum(list(map(lambda o:int(str(o[4])[:2].strip(":("))-int(str(o[3])[:2].strip(":(,)")),cursor.fetchall())))
cursor.execute("select Units from Courses where CourseID = '{}';".format(a))
cred = int(cursor.fetchall()[0][0])
cursor.execute("select Venue, Day, StartTime, StopTime from Schedule;")
sch = cursor.fetchall()
cursor.execute("select Program, Day, StartTime, StopTime from Schedule;")
clas = cursor.fetchall()
cursor.execute("select LecturerID, Day, StartTime, StopTime from Courses inner join Schedule on Courses.CourseID = Schedule.CourseID where Courses.CourseID = '{}';".format(a))
lect = cursor.fetchall()
except:
messagebox.showerror("Connection Error","Could Not connect to database")
return
def timer(a):
return datetime.timedelta(hours=datetime.datetime.strptime(a,'%H:%M:%S').hour)
if f<=e:
messagebox.showerror("Schedule Error","Stop Time cannot be earlier than Start Time")
elif a not in pro_course:
messagebox.showerror("Schedule Error","{} do not offer {}".format(b,a))
elif int(v[list(map(lambda x: str(x).strip(",')("),venues)).index(c)])<classSize:
messagebox.showerror("Schedule Error","Venue is too small")
elif cred < dur+int(datetime.datetime.strptime(f,'%H:%M:%S').hour-datetime.datetime.strptime(e,'%H:%M:%S').hour):
messagebox.showerror("Schedule Error", "Course Overload")
elif (str(c),str(d),timer(e),timer(f)) in sch:
messagebox.showerror("Schedule Error","class already holding at venue")
elif (str(b),str(d),timer(e),timer(f)) in clas:
messagebox.showerror("Schedule Error","{} already have a class then".format(b))
elif (str(lect[0][0]),str(d),timer(e),timer(f)) in lect:
messagebox.showerror("Schedule Error","{} is already teaching a class then".format(lect[0][0]))
else:
try:
cursor.execute("INSERT into Schedule value ('{}','{}','{}','{}','{}','{}');".format(a,b,c,d,e,f))
db.commit()
top.destroy()
except:
db.rollback()
messagebox.showerror("Connection Error","Could not connect to database")
top=Toplevel()
top.title("Scheduler")
top.geometry("360x320")
top.resizable(height=False,width=False)
l1 = Label(top, text = 'Course:')
l2 = Label(top,text = 'Program:')
l3 = Label(top, text = 'Venue:')
l4 = Label(top, text = 'Day:')
l5 = Label(top, text = 'Start time:')
l6 = Label(top, text = 'Stop time:')
l1.place(x=100,y=30)
l2.place(x=93,y=70)
l3.place(x=107,y=110)
l4.place(x=121,y=150)
l5.place(x=72,y=190)
l6.place(x=79,y=230)
e1 = dropdown(top,list(map(lambda x: str(x).strip(",')("),courses)))
e1.place(170,25)
e2 = dropdown(top,['CT','CIS','CS-A','CS-B'])
e2.place(170,65)
e3 = dropdown(top,list(map(lambda x: str(x).strip(",')("),venues)))
e3.place(170,105)
e4 = dropdown(top,['Monday','Tuesday','Wednesday','Thursday','Friday'])
e4.place(170,145)
e5 = dropdown(top,t[:len(t)-1])
e5.place(170,185)
e6 = dropdown(top,t[1:])
e6.place(170,225)
add_course = Button(top,text="ADD COURSE",command=lambda:course_handler(e1.m.get(),e2.m.get(),e3.m.get(),e4.m.get(),e5.m.get(),e6.m.get(),top))
add_course.place(x=140,y=275)
root = Tk()
root.title("DBMS")
root.geometry("500x500")
root.resizable(height=False,width=False)
w=Canvas(root,bg="white",height=500,width=500)
w.pack()
stu = Button(root,text = "Show Students",command = Student)
lec = Button(root,text = "Show Lecturers",command = Lecturer)
sch = Button(root,text = "Time Table",command=table)
form = Button(root, text = "Schedule Class", command = schedule)
stu.place(x=186.5,y=80)
lec.place(x=186,y=180)
sch.place(x=200,y=280)
form.place(x=186,y=380)
root.mainloop()
db.close()
| 2HgO/CLASS-SCHEDULER | Scheduler.py | Scheduler.py | py | 13,116 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pymysql.connect",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"li... |
41711840918 | import numpy as np
from sklearn.model_selection import KFold, TimeSeriesSplit
from helpers.settings import *
from model.preprocessing_helper import *
from model.config import HISTORY_SIZE
from datetime import datetime
LSTM_STEP = 1
LSTM_FUTURE_TARGET = 1
LSTM_HISTORY = HISTORY_SIZE
TRAIN_DATASET_FRAC = 0.8
def generate_lstm_data(path, cols=INPUT_FILE_COLUMNS, target_column=TARGET_COLUMN, norm_cols=NORM_COLS,
history_size=LSTM_HISTORY, target_size=LSTM_FUTURE_TARGET,
step=LSTM_STEP, index_col=DATETIME_COLUMN,
filter_cols=None,
cat_cols=CATEGORIES, adjust_cols=ADJUST_COLUMNS,
scale_cols=SCALE_COLS, extra_columns=EXTRA_COLS):
"""
:param path: string - path to file
:param cols: List[string] list of all columns to be extracted from csv file
:param target_column: string - name of the target column
:param norm_cols: Dict[Dict[mu: float, std: float]] - list of columns to normalize
:param history_size: int - how many previous records should we use for LSTM dataset
:param target_size: int - how many outputs do we have (usually 1)
:param step: int - if multioutput then >1 else 1
:param index_col: string - name of the timeseries column
:param filter_cols: Dict[List[any]] - filters colums from Dict keys by list of values from the List
:param cat_cols: Dict[List[string]] - definition of all categorical data
:param adjust_cols: Dict[Dict[amount: float]] - amount added to each col value
:param scale_cols: Dict[Dict[min: float, max: float]] - list of columns to scale <0,1>
:param extra_columns: List[string] - list of columns to copy without changing
:return: Tuple(np.array, np.array)
"""
dataset = pd.read_csv(path, usecols=cols)
if target_column not in dataset.columns:
dataset[target_column] = pd.Series(np.zeros(len(dataset[index_col])), index=dataset.index)
dataset.index = dataset[index_col]
# test_y = dataset[dataset[target_column] > 0]
# print(test_y.describe())
# print(dataset.describe())
if filter_cols is not None:
for key, value in filter_cols.items():
dataset = dataset[dataset[key].isin(value)]
dataset['day_of_year'] = dataset[index_col].apply(lambda x: datetime.fromtimestamp(x).timetuple().tm_yday / 365)
cols_to_extract = ['day_of_year'] + list(adjust_cols.keys()) + list(
scale_cols.keys()) + list(norm_cols.keys()) + extra_columns + [target_column]
# print(cols_to_extract)
# print(dataset.columns)
# print(dataset[target_column].describe(), filter_cols)
dataset = preproc_data(
dataset[cols_to_extract],
norm_cols=norm_cols,
scale_cols=scale_cols,
adjust_cols=adjust_cols
)
# parse dataset to its values only, we don't need pandas for future processing from this point
dataset = dataset.values
# print(dataset[:5])
proposed_x, proposed_y = generate_multivariate_data(dataset, target_index=-1, history_size=history_size,
target_size=target_size, step=step)
# print(np.sum(proposed_y))
return proposed_x, proposed_y
def generate_multivariate_data(dataset, history_size=LSTM_HISTORY, target_size=LSTM_FUTURE_TARGET,
step=LSTM_STEP, target_index=-1, target=None):
"""
:param dataset: np.array
:param history_size: int - how many previous records should we use for LSTM dataset
:param target_size: int - how many outputs do we have (usually 1)
:param step: int - if multioutput then >1 else 1
:param target_index: int - index of the target column
:param target: np.array - should be set if dataset doesn't contain target
:return: Tuple(np.array, np.array)
"""
# if there is no explicit target when get target from dataset
if target is None:
target = dataset[:, target_index]
dataset = dataset[:, :target_index]
dataset_size = len(dataset)
train_to_idx = dataset_size - target_size
start_train_idx = history_size
data = []
labels = []
for i in range(start_train_idx, train_to_idx):
indices = range(i - history_size, i, step)
data.append(dataset[indices])
labels.append(target[i + target_size])
return np.array(data), np.array(labels)
def k_fold_data(x, y, folds=10):
x_train = []
y_train = []
x_test = []
y_test = []
kfold = KFold(n_splits=folds, shuffle=True)
for train_index, test_index in kfold.split(x, y):
x_train = x[train_index]
y_train = y[train_index]
x_test = x[test_index]
y_test = y[test_index]
return x_train, y_train, x_test, y_test
def k_fold_ts_data(x, y, folds=10):
x_train = []
y_train = []
x_test = []
y_test = []
kfold = TimeSeriesSplit(n_splits=folds)
for train_index, test_index in kfold.split(x, y):
x_train = x[train_index]
y_train = y[train_index]
x_test = x[test_index]
y_test = y[test_index]
return x_train, y_train, x_test, y_test
| burnpiro/wod-usage-predictor | model/data_preprocessor.py | data_preprocessor.py | py | 5,157 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "model.config.HISTORY_SIZE",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": ... |
15332380058 | """
Handle photobooth configuration.
"""
import os
import sys
import logging
import yaml
class Configuration:
"""
Create configuration object.
"""
def __init__(self, file):
self.logger = logging.getLogger(__name__)
self.file = file
self._get_config()
def _get_config(self):
"""
Try to extract configuration from YAML file
"""
try:
# Open configuration file and load YAML
with open(self.file, 'r', encoding="utf-8") as f_cfg:
self.logger.debug("Load %s configuration file.", self.file)
config = yaml.load(f_cfg)
# Set configuration attribute
self.resolution = config['resolution']
self.pictures_directory = os.path.expanduser(config['pictures_directory'])
# Set meta for each language
for lang in config['languages'].keys():
logging.debug("Set lang [%s]", config['languages'])
setattr(self, lang, config['languages'][lang])
except KeyError as key_e:
self.logger.error("Parameters missing in configuration file: %s.", key_e, exc_info=True)
sys.exit(2)
except (OSError, yaml.YAMLError):
self.logger.error("Failed to parse configuration file", exc_info=True)
sys.exit(2)
| diablo02000/pyphotobooth | pyphotobooth/libs/configuration.py | configuration.py | py | 1,363 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "yaml.load",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.expanduser",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_nu... |
42647237351 | from django.contrib.auth import get_user_model
from django.forms import widgets
from root import forms_override as forms
from root.models import UserGroup, EmailTemplate, EmailGroup, Experiment
INITIAL_EMAIL_TEMPLATE = """<html>
<body>
<h1>{{content}}</h1>
</body>
</html>
"""
class RegistrationForm(forms.Form):
first_name = forms.CharField()
last_name = forms.CharField()
email = forms.EmailField()
username = forms.CharField()
password = forms.CharField(
min_length=5,
widget=widgets.PasswordInput(attrs={'class': 'form-control'})
)
group = forms.CharField()
def create_user(self):
data = self.cleaned_data
return get_user_model().objects.create_user(
data['username'],
email=data['email'],
password=data['password'],
first_name=data['first_name'],
last_name=data['last_name']
)
def create_group(self, user):
group = UserGroup.objects.create(
name=self.cleaned_data['group']
)
group.users.add(user)
return group
def save(self):
user = self.create_user()
group = self.create_group(user)
return user, group
class EmailGroupSetUpForm(forms.Form):
user_group = forms.ChoiceField()
group_name = forms.CharField()
name = forms.CharField()
content = forms.CharField(
initial=INITIAL_EMAIL_TEMPLATE,
widget=widgets.Textarea(attrs={'class': 'form-control'})
)
content_type = forms.ChoiceField(
choices=(
('text/html; charset=UTF-8', 'text/html; charset=UTF-8'),
('text/plain', 'text/plain')
)
)
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['user_group'] = forms.ChoiceField(
choices=UserGroup.objects.filter(users=user).values_list('id', 'name')
)
def create_email_group(self):
return EmailGroup.objects.create(
name=self.cleaned_data['group_name'],
group_id=self.cleaned_data['user_group'],
)
def create_template(self, group):
return EmailTemplate.objects.create(
group=group,
name=self.cleaned_data['name'],
content=self.cleaned_data['content'],
content_type=self.cleaned_data['content_type']
)
def save(self):
group = self.create_email_group()
template = self.create_template(group)
return group, template
class ExperimentModelForm(forms.ModelForm):
name = forms.CharField()
chance = forms.IntegerField(min_value=0, max_value=100, initial=50)
start_time = forms.DateTimeField(required=False)
end_time = forms.DateTimeField(required=False)
is_active = forms.BooleanField(required=False)
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
query = EmailTemplate.objects.filter(group__group__users__in=[user])
self.fields['choice_a'] = forms.ModelChoiceField(
queryset=query
)
self.fields['choice_b'] = forms.ModelChoiceField(
queryset=query
)
self.fields['email_group'] = forms.ModelChoiceField(
queryset=EmailGroup.objects.filter(group__users__in=[user]),
widget=forms.HiddenInput()
)
class Meta:
model = Experiment
fields = [
'name', 'chance',
'choice_a', 'choice_b',
'start_time', 'end_time',
'is_active', 'email_group'
]
class EmailTemplateModelForm(forms.ModelForm):
name = forms.CharField()
subject = forms.CharField(initial='Subjects are also a template: {{content}}')
content = forms.CharField(
initial=INITIAL_EMAIL_TEMPLATE,
widget=widgets.Textarea(attrs={'class': 'form-control'})
)
content_type = forms.ChoiceField(
choices=(
('text/html; charset=UTF-8', 'text/html; charset=UTF-8'),
('text/plain', 'text/plain')
)
)
preview_data = forms.CharField(
widget=widgets.Textarea(attrs={'class': 'form-control'}),
initial='{"content": "hello-world"}'
)
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['group'] = forms.ModelChoiceField(
queryset=EmailGroup.objects.filter(group__users__in=[user]),
widget=forms.HiddenInput()
)
class Meta:
model = EmailTemplate
fields = [
'name', 'group',
'content', 'content_type',
'preview_data'
]
| ograycode/engage | root/forms.py | forms.py | py | 4,691 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "root.forms_override.Form",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "root.forms_override",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "root.forms_override.CharField",
"line_number": 13,
"usage_type": "call"
},
{
"a... |
73268144828 | import tweepy
import tweeting
import argparse
from time import sleep
parser = argparse.ArgumentParser(description="Tweet news stories periodically according to global priorities.")
parser.add_argument('-dbg_mode', default=False, type=bool, nargs=1, help="Run in debug mode (default = False)")
parser.add_argument('-tw_cred', default='twitter_API_keys.txt', type=str, nargs=1,
help="Contains Twitter API credentials. Must be in directory above repo.")
parser.add_argument('-news_cred', default='newsapi_key.txt', type=str, nargs=1,
help="Contains NewsAPI key. Must be in directory above repo.")
parser.add_argument('-url_path', default='url_content_lookup.csv', type=str, nargs=1,
help="Directory of the url lookup table")
parser.add_argument('-qaly_path', type=str, nargs=1, default='global_prios/global_prios.csv',
help="Path to the QALY table (default =global_prios/global_prios.csv)")
parser.add_argument('-db_filename', default='news.db', type=str, nargs=1,
help="Name of news database. Default = news.db")
parser.add_argument('-periodicity_s', default=3600, type=float, nargs=1,
help="Tweet periodicity (s). Default=3600.")
parser.add_argument('-max_time', default=7*24*3600, type=float, nargs=1,
help="Duration to tweet (s). Default=604800 (1 week).")
parser.add_argument('-tweet_time_window', default=2*7*24.0, type=float, nargs=1,
help="Time window to search into the past for news (hours). Default=336 (2 weeks).")
parser.add_argument('-news_refresh_period', default=24.0/3, type=float, nargs=1,
help="Periodicity to update news database (hours). Default = 8.")
args = parser.parse_args()
dbg_mode = args.dbg_mode
twitter_credentials_filename = args.tw_cred
news_api_filename = args.news_cred
url_path = args.url_path
qaly_path = args.qaly_path
db_filename = args.db_filename
periodicity_s = args.periodicity_s
max_time = args.max_time
tweet_time_window = args.tweet_time_window
news_refresh_period = args.news_refresh_period
credentials_dir = '../'
# Parse twitter credentials from the text file, see https://developer.twitter.com/en/apps
fp = open(credentials_dir+twitter_credentials_filename, 'r')
credentials = fp.read().splitlines()
fp.close()
consumer_token = credentials[0].split('=')[1]
consumer_secret = credentials[1].split('=')[1]
access_token = credentials[2].split('=')[1]
access_token_secret = credentials[3].split('=')[1]
# Get news API key
fp = open(credentials_dir+news_api_filename, 'r')
api_key = fp.read().split()[0]
# Set twitter credentials
auth = tweepy.OAuthHandler(consumer_token, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
tweepyapi = tweepy.API(auth)
while True:
tweeting.tweet_news(tweepyapi, api_key, qaly_path, url_path,
db_filename, tweet_time_window, news_refresh_period,
dbg_mode=dbg_mode)
sleep(periodicity_s)
| jaryaman/propNews | main.py | main.py | py | 3,030 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "tweepy.OAuthHandler",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "tweepy.API",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "tweeting.tweet_... |
20538778289 | # https://leetcode.com/problems/word-search-ii/
"""
Time complexity:- O(N * M * W), where N and M are the dimensions of the board and W is the total number of characters in the words.
Space Complexity:- O(W)
"""
"""
Intuition:
The findWords method uses a Trie data structure to efficiently search for words on the board.
It iterates through the board cells and starts the search from each cell if it is a prefix in the Trie.
The find_str function performs a depth-first search (DFS) on the board to find words in the Trie.
The unique words found are stored in the res set.
"""
from collections import defaultdict
from functools import reduce
from typing import List
class Solution:
def findWords(self, board: List[List[str]], words: List[str]) -> List[str]:
# Create a Trie data structure
Trie = lambda: defaultdict(Trie)
trie = Trie()
END = True
# Build the trie using the given words
for word in words:
reduce(dict.__getitem__, word, trie)[END] = word
# Set to store unique results
res = set()
def find_str(i, j, t):
# Helper function to explore the board and find words
if END in t:
res.add(t[END])
letter = board[i][j]
board[i][j] = "" # Mark the cell as visited
# Check adjacent cells and continue the search
if i > 0 and board[i - 1][j] in t:
find_str(i - 1, j, t[board[i - 1][j]])
if j > 0 and board[i][j - 1] in t:
find_str(i, j - 1, t[board[i][j - 1]])
if i < len(board) - 1 and board[i + 1][j] in t:
find_str(i + 1, j, t[board[i + 1][j]])
if j < len(board[0]) - 1 and board[i][j + 1] in t:
find_str(i, j + 1, t[board[i][j + 1]])
board[i][j] = letter # Restore the original cell value
return
# Iterate through the board
for i, row in enumerate(board):
for j, char in enumerate(row):
# If the current cell is a prefix in the trie, start the search
if board[i][j] in trie:
find_str(i, j, trie[board[i][j]])
return list(res)
| Amit258012/100daysofcode | Day96/word_search_2.py | word_search_2.py | py | 2,234 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 29,
"usage_type": "call"
}
] |
72355994109 | #!/usr/bin/python3
"""Module that lists all State objects from the database hbtn_0e_6_usa"""
from sys import argv
from model_state import Base, State
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
if __name__ == '__main__':
engine = create_engine('mysql+mysqldb://{}:{}@localhost:3306/{}'
.format(argv[1], argv[2], argv[3]))
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
selected_state = session.query(State).filter(State.name == argv[4])
try:
print("{}".format(selected_state[0].id))
except IndexError:
print("Not found")
| MrZooM001/alx-higher_level_programming | 0x0F-python-object_relational_mapping/10-model_state_my_get.py | 10-model_state_my_get.py | py | 672 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "model_state.Base.metadata.create_all",
"line_number": 14,
"usage_type": "call"
},
{
"api_name":... |
70129578749 | #-----------------------------------------------------------------------------------------#
import torch
import matplotlib.pyplot as plt
import deepwave
from deepwave import scalar
import numpy as np
import warnings
#-----------------------------------------------------------------------------------------#
warnings.filterwarnings("ignore", category=UserWarning)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
#-----------------------------------------------------------------------------------------#
ny = 500
nx = 500
# vp = 1500 * torch.ones(ny, nx)
ny = 2301; nx = 751; dx = 4.0
v = torch.from_file('data/modeling/velocity.bin', size=ny*nx).reshape(ny, nx).to(device)
ny = 100
nx = 300
# vp = 1500 * torch.ones(ny, nx)
# vs = 1000 * torch.ones(ny, nx)
# rho = 2200 * torch.ones(ny, nx)
# 1) Create 2 layers
layer_boundary = ny // 2 # Create a layer boundary in the middle of the y-axis
# 2 & 3) Define vp for top and bottom layers
vp = torch.ones(ny, nx) # Creating vp tensor
vp[:layer_boundary, :] = 1500 # Top layer
vp[layer_boundary:, :] = 4000 # Bottom layer
#-----------------------------------------------------------------------------------------#
# NOTE in case for QC input velocity
# v = v.cpu().numpy()
# plt.imshow(np.rot90(v, 3), cmap='gray', vmin=2000, vmax=5000)
# plt.show()
#-----------------------------------------------------------------------------------------#
n_shots = 115
n_sources_per_shot = 1
d_source = 20 # 20 * 4m = 80m
first_source = 10 # 10 * 4m = 40m
source_depth = 2 # 2 * 4m = 8m
freq = 25
nt = 750
dt = 0.004
peak_time = 1.5 / freq
# source_locations
source_locations = torch.zeros(n_shots, n_sources_per_shot, 2, dtype=torch.long, device=device)
source_locations[..., 1] = source_depth
# source_locations[:, 0, 0] = (torch.arange(n_shots) * d_source +
# first_source)
# source_amplitudes
source_amplitudes = (deepwave.wavelets.ricker(freq, nt, dt, peak_time)
.repeat(n_shots, n_sources_per_shot, 1)
.to(device))
out = scalar(v, dx, dt, source_amplitudes=source_amplitudes,
source_locations=source_locations,
accuracy=8,
pml_freq=freq)
# receiver_amplitudes = out[-1]
# vmin, vmax = torch.quantile(receiver_amplitudes[0],
# torch.tensor([0.05, 0.95]).to(device))
# _, ax = plt.subplots(1, 2, figsize=(10.5, 7), sharey=True)
# ax[0].imshow(receiver_amplitudes[57].cpu().T, aspect='auto',
# cmap='gray', vmin=vmin, vmax=vmax)
# ax[1].imshow(receiver_amplitudes[:, 192].cpu().T, aspect='auto',
# cmap='gray', vmin=vmin, vmax=vmax)
# ax[0].set_xlabel("Channel")
# ax[0].set_ylabel("Time Sample")
# ax[1].set_xlabel("Shot")
# plt.tight_layout()
# plt.show()
# receiver_amplitudes.cpu().numpy().tofile('test.bin')
| PongthepGeo/geophysics_23 | codes/seismic/keep/test_forward.py | test_forward.py | py | 2,872 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.cud... |
74977703867 | import re
import logging
import sys
import os
import yaml
from rdflib import ConjunctiveGraph, Literal, URIRef, BNode, Namespace
from dipper.graph.Graph import Graph as DipperGraph
from dipper.utils.CurieUtil import CurieUtil
from dipper import curie_map as curie_map_class
from dipper.models.BiolinkVocabulary import BioLinkVocabulary as blv
LOG = logging.getLogger(__name__)
class RDFGraph(DipperGraph, ConjunctiveGraph):
"""
Extends RDFLibs ConjunctiveGraph
The goal of this class is wrap the creation
of triples and manage creation of URIRef,
Bnodes, and literals from an input curie
"""
curie_map = curie_map_class.get()
curie_util = CurieUtil(curie_map)
# make global translation table available outside the ingest
with open(
os.path.join(
os.path.dirname(__file__),
'../../translationtable/GLOBAL_TERMS.yaml')) as fhandle:
globaltt = yaml.safe_load(fhandle)
globaltcid = {v: k for k, v in globaltt.items()}
def __init__(self, are_bnodes_skized=True, identifier=None):
# print("in RDFGraph with id: ", identifier)
super().__init__('IOMemory', identifier)
self.are_bnodes_skized = are_bnodes_skized
self.prefixes = set()
# Can be removed when this is resolved
# https://github.com/RDFLib/rdflib/issues/632
# 2020 oct. possibly fixed
# for pfx in ('OBO',): # , 'ORPHA'):
# self.bind(pfx, Namespace(self.curie_map[pfx]))
def _make_category_triple(
self, subject, category, predicate=blv.terms['category']
):
"""
add a triple to capture subject or object category (in CURIE form) that was
passed to addTriple()
"""
try:
self.add((
self._getnode(subject),
self._getnode(predicate),
self._getnode(category)))
except:
LOG.warning(
"Problem adding triple in _makeCategoryTriple for " + \
"subj: %s pred: %s obj(category): %s",
subject, predicate, category)
def _is_literal(self, thing):
"""
make inference on type (literal or CURIE)
return: logical
"""
if self.curie_regexp.match(thing) is not None or\
thing.split(':')[0].lower() in ('http', 'https', 'ftp'):
object_is_literal = False
else:
object_is_literal = True
return object_is_literal
def addTriple(
self,
subject_id,
predicate_id,
obj,
object_is_literal=None,
literal_type=None,
subject_category=None,
object_category=None
):
if object_is_literal is None:
object_is_literal = self._is_literal(obj)
# add triples for subject category info
if subject_category is not None:
self._make_category_triple(subject_id, subject_category)
# add triples for obj category info, if obj is not a literal
if not object_is_literal:
if object_category is not None:
self._make_category_triple(obj, object_category)
else: # emit warning if object category is given for a literal
if object_category is not None:
LOG.warning("I was given a category %s for obj: %s, " +
"which seems to be a literal!",
object_category, obj)
if object_is_literal is True:
if isinstance(obj, str):
re.sub(r'[\t\n\r\f\v]+', ' ', obj) # reduce any ws to a space
if literal_type is not None and obj is not None and obj not in ("", " "):
literal_type_iri = self._getnode(literal_type)
self.add(
(self._getnode(subject_id), self._getnode(predicate_id),
Literal(obj, datatype=literal_type_iri)))
elif obj is not None:
# could attempt to infer a type here but there is no use case
self.add((
self._getnode(subject_id), self._getnode(predicate_id),
Literal(obj)))
else:
LOG.warning(
"None as literal object for subj: %s and pred: %s",
subject_id, predicate_id)
# get a sense of where the None is comming from
# magic number here is "steps up the call stack"
# TODO there may be easier/ideomatic ways to do this now
for call in range(2, 0, -1):
LOG.warning(
'\t%sfrom: %s', '\t' * call, sys._getframe(call).f_code.co_name)
elif obj is not None and obj != '': # object is a resource
self.add((
self._getnode(subject_id),
self._getnode(predicate_id),
self._getnode(obj)))
else:
LOG.warning(
"None/empty object IRI for subj: %s and pred: %s",
subject_id, predicate_id)
def skolemizeBlankNode(self, curie):
stripped_id = re.sub(r'^_:|^_', '', curie, 1)
return URIRef(self.curie_map['BNODE'] + stripped_id)
def _getnode(self, curie):
"""
This is a wrapper for creating a URIRef or Bnode object
with a given a curie or iri as a string.
If an id starts with an underscore, it assigns it to a BNode, otherwise
it creates it with a standard URIRef.
Alternatively, self.skolemize_blank_node is True,
it will skolemize the blank node
:param curie: str identifier formatted as curie or iri
:return: node: RDFLib URIRef or BNode object
"""
node = None
if curie[0] == '_':
if self.are_bnodes_skized:
node = self.skolemizeBlankNode(curie)
else: # delete the leading underscore to make it cleaner
node = BNode(re.sub(r'^_:|^_', '', curie, 1))
# Check if curie string is actually an IRI
elif curie[:4] == 'http' or curie[:3] == 'ftp' or curie[:4] == 'jdbc':
node = URIRef(curie)
else:
iri = RDFGraph.curie_util.get_uri(curie)
if iri is not None:
node = URIRef(iri)
# Bind prefix map to graph
prefix = curie.split(':')[0]
self.prefixes.add(prefix)
else:
LOG.error("couldn't make URI for %s", curie)
# get a sense of where the CURIE-ish? thing is comming from
# magic number here is "steps up the call stack"
for call in range(3, 0, -1):
LOG.warning(
'\t%sfrom: %s', '\t' * call, sys._getframe(call).f_code.co_name)
return node
def bind_all_namespaces(self):
"""
Results in the RDF @prefix directives for every ingest
being added to this ingest.
"""
for prefix in self.curie_map.keys():
iri = self.curie_map[prefix]
self.bind(prefix, Namespace(iri))
# serialize() conflicts between rdflib & Graph.serialize abstractmethod
# GraphUtils expects the former. (too bad there is no multiple dispatch)
# rdflib version
def serialize(
self, destination=None, format='turtle', base=None, encoding=None
):
for prefix in self.prefixes:
mapped_iri = self.curie_map[prefix]
self.bind(prefix, Namespace(mapped_iri))
return ConjunctiveGraph.serialize(self, destination, format)
| monarch-initiative/dipper | dipper/graph/RDFGraph.py | RDFGraph.py | py | 7,720 | python | en | code | 53 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "dipper.graph.Graph.Graph",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "rdflib.ConjunctiveGraph",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "di... |
9854004184 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
import datetime # For datetime objects
import os.path # To manage paths
import sys # To find out the script name (in argv[0])
# Import the backtrader platform
import backtrader as bt
import backtrader.indicators as btind
import backtrader.feeds as btfeeds
class GoldenCross(bt.Strategy):
params = (('fast', 50), ('slow', 200),
('order_percentage', 0.95), ('ticker', 'SPY'))
def __init__(self):
self.fast_moving_average = bt.indicators.SMA(
self.data.close, period=self.params.fast, plotname='50 Day Moving Average'
)
self.slow_moving_average = bt.indicators.SMA(
self.data.close, period=self.params.slow, plotname='200 Day Moving Average'
)
self.crossover = bt.indicators.CrossOver(
self.fast_moving_average, self.slow_moving_average)
def next(self):
# If position size is 0, we own 0 shares
if self.position.size == 0:
# Crossover is 1, so Golden Cross happened
if self.crossover > 0:
amount_to_invest = (
self.params.order_percentage * self.broker.cash)
self.size = math.floor(amount_to_invest / self.data.close)
print("Buy {} share of {} at {}".format(
self.size, self.params.ticker, self.data.close[0]))
self.buy(size=self.size)
if self.position.size > 0:
if self.crossover < 0:
print("Sell {} shares of {} at {}".format(
self.size, self.params.ticker, self.data.close[0]))
self.close()
if __name__ == '__main__':
# Create a cerebro entity
cerebro = bt.Cerebro()
# Add a strategy
cerebro.addstrategy(GoldenCross)
# Datas are in a subfolder of the samples. Need to find where the script is
# because it could have been called from anywhere
modpath = os.path.dirname(os.path.abspath(sys.argv[0]))
datapath = os.path.join(
modpath, '/Users/alfredopzr/Desktop/Coinbase-Python/Coinbase-Python/datas/SPY.csv')
# Create a Data Feed
data = bt.feeds.YahooFinanceCSVData(
dataname=datapath,
# Do not pass values before this date
fromdate=datetime.datetime(2000, 1, 3),
# Do not pass values before this date
todate=datetime.datetime(2021, 9, 13),
# Do not pass values after this date
reverse=False)
# Add the Data Feed to Cerebro
cerebro.adddata(data)
# Set our desired cash start
cerebro.broker.setcash(10000.0)
# Add a FixedSize sizer according to the stake
# cerebro.addsizer(bt.sizers.AllInSizer, percents=95)
# Set the commission
cerebro.broker.setcommission(commission=0.00)
# Print out the starting conditions
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Run over everything
cerebro.run()
# Print out the final result
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Plot the result
cerebro.plot()
| alfredopzr/backtesting-python | backtrader/strategies/GoldenCross.py | GoldenCross.py | py | 3,173 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "backtrader.Strategy",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "backtrader.indicators.SMA",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "backtrader.indicators",
"line_number": 20,
"usage_type": "attribute"
},
{
"api... |
40542696905 | from flask import Flask, request, jsonify
from flask_mail import Mail, Message
import json
import sqlalchemy
from sqlalchemy import or_,desc
from tables import db,GDPs, Impact, ImpactPredicted
app = Flask(__name__)
app.config['MAIL_SERVER'] = 'smtp.googlemail.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = '***@gmail.com' # enter your email he$
app.config['MAIL_DEFAULT_SENDER'] = '***@gmail.com' # enter your ema$
app.config['MAIL_PASSWORD'] = '****' # enter your password here
mail = Mail(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://doadmin:k6hjzqqbj408kaa7@covinsights-database-do-user-7582405-0.a.db.ondigitalocean.com:25060/CovInsights'
db.init_app(app)
@app.route("/", methods=['GET', 'POST'])
def test():
return 'true'
@app.route("/getGDPs", methods=['GET', 'POST'])
def getGDPs():
result = GDPs.query.all()
response = []
for row in result:
l = []
l.append(row.Country)
l.append(row.GDP)
l.append(row.Code)
response.append(l)
response = {
"data" : response
}
return response
@app.route("/getImpact", methods=['GET', 'POST'])
def getImpact():
response = { 'status' : True }
try:
content = json.loads(request.data)
country= content['country']
result = Impact.query.filter_by(Economy=country).all()
values = []
for row in result:
if row.Sector == '_All' :
response['gdp'] = row.GDP
response['Emp'] = row.Employment
else:
values.append(row.GDP)
response['values'] = values
currentGDP = GDPs.query.get(country)
response['currentGDP'] = currentGDP.GDP
print(response)
except Exception as e:
print('Exception:', e.__class__)
return {
'status': False,
'content': 'Unknown error. Please contact the developer.'
}
return response
@app.route("/getImpactPredicted", methods=['GET', 'POST'])
def getImpactPredicted():
response = { 'status' : True }
try:
content = json.loads(request.data)
country= content['country']
result = ImpactPredicted.query.filter_by(Economy=country).all()
values = []
for row in result:
if row.Sector == '_All' :
response['gdp'] = row.GDP
response['Emp'] = row.Employment
else:
values.append(row.GDP)
response['values'] = values
currentGDP = GDPs.query.get(country)
response['currentGDP'] = currentGDP.GDP
print(response)
except Exception as e:
print('Exception:', e.__class__)
return {
'status': False,
'content': 'Unknown error. Please contact the developer.'
}
return response
@app.route("/subscribe", methods=['GET', 'POST'])
def subscribe():
content = json.loads(request.data)
mailID = content['mailid']
response = {}
try:
msg = Message("Your report is here | CovInsights", recipients=[mailID])
msg.body = "Thank you for using our service!"
with app.open_resource("../CovInsights Report.pdf") as fp:
msg.attach("CovInsights Report.pdf", "application/pdf", fp.read())
mail.send(msg)
except Exception as e:
response['status'] = False
response['error'] = str(e)
return response
response['status'] = True
return response
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
| aftex261/DigitalOcean | B Sudharsan - CovInsights App/Backend Files/app.py | app.py | py | 3,625 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask_mail.Mail",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tables.db.init_app",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tables.db",
"line_n... |
30578248385 | import cv2
from picamera import PiCamera
from picamera.array import PiRGBArray
import time, socket, logging, configparser, argparse, sys
from utils import Utils
parser = argparse.ArgumentParser()
parser.add_argument('--d', nargs=1, default=None)
args = parser.parse_args()
APP_DIR = args.d[0] if args.d != None else "./"
CONFIGURATIONS = APP_DIR + 'configuration.ini'
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler(APP_DIR + 'logs/video-streamer | ' + str(time.asctime()) + '.log'),
logging.StreamHandler()
]
)
config = configparser.ConfigParser()
if len(config.read(CONFIGURATIONS)) == 0:
logging.error("Could Not Read Configurations File: " + CONFIGURATIONS)
sys.exit()
DRONE_ID = config['drone']['id']
HOST_IP = config['cloud-app']['ip']
VIDEO_PORT = int( config['cloud-app']['video-port'])
GRAYSCALE = config['video']['grayscale'].lower() == 'true'
FRAMES_PER_SECOND = int( config['video']['fps'])
JPEG_QUALITY = int( config['video']['quality'])
WIDTH = int( config['video']['width'])
HEIGHT = int( config['video']['height'])
logging.info('FPS: %s Quality: %s Width %s Height %s Grayscale: %s',
str(FRAMES_PER_SECOND), str(JPEG_QUALITY), str(WIDTH), str(HEIGHT), GRAYSCALE)
logging.info('Drone ID: %s Video Recipient: %s:%s', str(DRONE_ID), str(HOST_IP), str(VIDEO_PORT))
camera = None
video_socket = None
while(True):
try:
camera = PiCamera()
camera.resolution = (WIDTH, HEIGHT)
camera.framerate = FRAMES_PER_SECOND
rawCapture = PiRGBArray(camera, size=(WIDTH, HEIGHT))
time.sleep(0.1)
logging.info("Camera module initiated")
video_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
video_socket.connect((HOST_IP, VIDEO_PORT))
logging.info("Socket Opened, Video Streaming started")
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
image_data = frame.array
image_data = cv2.rotate(image_data, cv2.ROTATE_180)
if GRAYSCALE:
image_data = cv2.cvtColor(image_data, cv2.COLOR_BGR2GRAY)
code, jpg_buffer = cv2.imencode(".jpg", image_data, [int(cv2.IMWRITE_JPEG_QUALITY), JPEG_QUALITY])
datagramMsgBytes = Utils.create_datagram_message(DRONE_ID, jpg_buffer)
video_socket.sendall(datagramMsgBytes)
rawCapture.truncate(0)
except Exception as e:
logging.error("Video Stream Ended: "+str(e))
if camera != None:
camera.close()
if video_socket != None:
video_socket.close()
time.sleep(2) | petkanov/drone-raspberry-py-app | video_streamer.py | video_streamer.py | py | 2,824 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "logging... |
5707632801 | '''
Posts routes
/posts (args: position, amount)
/post (args: id)
'''
from flask_api import status
from flask import request
from flask import Blueprint
from service.upload_image import UploadImage
from databases.models.post import Post, PostStatus
from databases.models.photo import Photo
from decorators.token import token_required
from exceptions.posts import PostArgsError, APIexception
from exceptions.token import DecodeToken
from exceptions.validate import InvalidImage
from config.db import db
from config.config import BASE_COUNT_POSTS, BASE_POSITION_POSTS, SECRET_KEY, ACTIVE_POST_STATUS
from validations.routes.new_post import PostValid
from tokens.token_hendler import TokenManager
posts = Blueprint('posts', __name__, template_folder='templates')
@posts.route('/posts', endpoint='get_posts', methods=['GET'])
@token_required
def get_posts():
'''Posts slize
?position=0&amount=0
'''
position = request.args.get('position', default=BASE_POSITION_POSTS, type=int)
amount = request.args.get('amount', default=BASE_COUNT_POSTS, type=int)
if position < 0 or amount < 0:
return PostArgsError.message, status.HTTP_400_BAD_REQUEST
slice_posts = Post.query.order_by(
Post.id.desc()).offset(position).limit(amount).all()
amount = Post.query.count()
return {'posts': slice_posts, 'size': amount}, status.HTTP_200_OK
@posts.route('/new-post', endpoint='set_post', methods=['POST'])
@token_required
def set_post():
'''Post
add new post
title: str
text: str
status: str
likes: int
view: int
shared: int
user: User
photos: Photo
'''
try:
post_form = PostValid(**request.form)
validate_date = post_form.validate()
except APIexception as error:
return error.message, status.HTTP_400_BAD_REQUEST
try:
user_id = TokenManager.get_id_user(SECRET_KEY ,request.headers['Access-Token'])
except DecodeToken as error:
return error.message, status.HTTP_400_BAD_REQUEST
img_photo = None
if request.method == 'POST' and 'file' in request.files:
try:
img_photo = UploadImage(request.files['file']).save_image()
except InvalidImage as error:
return error.message, status.HTTP_400_BAD_REQUEST
post_status = PostStatus.query.filter_by(name=ACTIVE_POST_STATUS).first()
if post_status is None:
status_post = PostStatus()
status_post.name = ACTIVE_POST_STATUS
db.session.add(status_post) # pylint: disable=no-member
db.session.commit() # pylint: disable=no-member
try:
new_post = Post()
new_post.title = validate_date['title']
new_post.text = validate_date['text']
new_post.status_id = PostStatus.query.filter_by(name=ACTIVE_POST_STATUS).first().id
new_post.user_id = user_id
db.session.add(new_post) # pylint: disable=no-member
db.session.commit() # pylint: disable=no-member
if img_photo is not None:
photo = Photo()
photo.photo = img_photo
photo.user_id = user_id
photo.post_id = new_post.id
db.session.add(photo) # pylint: disable=no-member
db.session.commit() # pylint: disable=no-member
except APIexception as error:
return error.message, status.HTTP_400_BAD_REQUEST
return {'post' : new_post}, status.HTTP_200_OK
| Dolzhenkov-Andrii/api | routes/posts.py | posts.py | py | 3,478 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Blueprint",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "flask.... |
31477418429 | #!/usr/bin/env python
import argparse
import re
import json
from os.path import isfile
def a(lines):
return len(lines)
def t(requests):
stats = {}
for dict_ in requests:
method = dict_['method']
if method not in stats.keys():
stats[method] = 0
stats[method] += 1
ret = []
for key in stats:
dict_ = {}
dict_['method'] = key
dict_['count'] = stats[key]
ret.append(dict_)
return ret
def f(requests):
requests.sort(key=lambda x: x['url'])
ans = []
flag = True
tmp = {}
for elm in requests:
if flag:
tmp['url'] = elm['url']
tmp['count'] = 0
flag = False
if elm['url'] != tmp['url']:
ans.append(tmp)
tmp = {}
tmp['url'] = elm['url']
tmp['count'] = 1
else:
tmp['count'] += 1
ans.append(tmp)
ans.sort(key=lambda x: x['count'], reverse=True)
ans = ans[:10]
ret = []
for request in ans:
dict_ = {}
dict_['url'] = request['url']
dict_['count'] = request['count']
ret.append(dict_)
return ret
def c(requests):
requests = list(filter(lambda x: x['code'] in range(400, 500), requests))
requests.sort(key=lambda x: x['len'], reverse=True)
ans = requests[:5]
ret = []
for request in ans:
dict_ = {}
dict_['url'] = request['url']
dict_['code'] = request['code']
dict_['len'] = request['len']
dict_['ip'] = request['ip']
ret.append(dict_)
return ret
def s(requests):
requests = list(filter(lambda x: x['code'] in range(500, 600), requests))
requests.sort(key=lambda x: x['ip'])
ans = []
flag = True
tmp = {}
for elm in requests:
if flag:
tmp['ip'] = elm['ip']
tmp['count'] = 0
flag = False
if elm['ip'] != tmp['ip']:
ans.append(tmp)
tmp = {}
tmp['ip'] = elm['ip']
tmp['count'] = 1
else:
tmp['count'] += 1
ans.append(tmp)
ans.sort(key=lambda x: x['count'], reverse=True)
ans = ans[:5]
ret = []
for request in ans:
dict_ = {}
dict_['ip'] = request['ip']
dict_['count'] = request['count']
ret.append(dict_)
return ret
def to_requests(lines):
requests = []
for line in lines:
dict_ = {}
splited = re.split('[ "]', line)
dict_['ip'] = splited[0]
dict_['method'] = splited[6]
dict_['url'] = splited[7]
dict_['code'] = int(splited[10])
if splited[11] == '-':
dict_['len'] = 0
else:
dict_['len'] = int(splited[11])
requests.append(dict_)
return requests
def main():
outfile = 'analyzed'
parser = argparse.ArgumentParser(usage='analyze.py [--json] a | t | f | c | s <FILE>',
epilog=f'Имя выходного файла - "{outfile}".')
parser.add_argument('task', action='store', help='см. в README.md', choices=['a', 't', 'f', 'c', 's'])
parser.add_argument('file', action='store', metavar='FILE', help='входной файл')
parser.add_argument('--json', action='store_true', help='записать вывод в формате JSON')
args = parser.parse_args()
if isfile(outfile):
print(f"File '{outfile}' exists, overwrite? (yes/NO): ", end='')
in_ = input()
if not (in_ == 'y' or in_ == 'yes'):
raise FileExistsError()
with open(args.file) as fl:
lines = fl.read().split('\n')
if lines[-1] == '':
del lines[-1]
task = args.task
if task == 'a':
res = a(lines)
else:
requests = to_requests(lines)
if task == 't':
res = t(requests)
elif task == 'f':
res = f(requests)
elif task == 'c':
res = c(requests)
elif task == 's':
res = s(requests)
else:
raise Exception()
with open(outfile, 'w') as fl:
if args.json:
fl.write(json.dumps(res))
else:
if isinstance(res, list):
for line in res:
for key in line:
fl.write(str(line[key]) + ' ')
fl.write('\n')
elif isinstance(res, int):
fl.write(str(res))
else:
raise Exception()
if __name__ == '__main__':
main()
| gatart/2021-1-MAILRU-SDET-Python-G-Talamanov | Homework_5/analyze.py | analyze.py | py | 4,579 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "re.split",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"... |
1439842893 | import numpy as np
import numpy.random
import matplotlib.pyplot as plt
import scipy.optimize
def jj_cpr_ballistic(gamma, tau):
return np.sin(gamma) / np.sqrt(1 - tau * np.sin(gamma/2)**2)
def jj_free_energy_ballistic(gamma, tau):
return 4 / tau * (1 - np.sqrt(1 - tau * np.sin(gamma/2)**2))
# d/dγ I(γ)
def jj_diff_ballistic(gamma, tau):
nom_vals = 1 - tau * np.sin(gamma / 2)**2
return 1/4 * tau * np.sin(gamma)**2 / (nom_vals)**(3/2) + \
np.cos(gamma) / np.sqrt(nom_vals)
def _normalize_phase(phi):
phi = np.fmod(phi, 2 * np.pi)
phi = np.where(phi < 0, phi + 2 * np.pi, phi)
# normalize to (-pi, pi)
phi = np.where(phi > np.pi, phi - 2*np.pi, phi)
return phi
class network:
def __init__(self, Nx, Ny, diff_x=None, diff_y=None, *, cpr_x, cpr_y, free_energy_x, free_energy_y):
self.Nx = Nx
self.Ny = Ny
self.cpr_x = cpr_x
self.cpr_y = cpr_y
self.free_energy_x = free_energy_x
self.free_energy_y = free_energy_y
self.diff_x = diff_x
self.diff_y = diff_y
self.island_x_coords, self.island_y_coords = np.meshgrid(np.arange(Nx), np.arange(Ny), indexing="ij")
self.phi_matrix = np.zeros((Nx, Ny), dtype=np.float32)
self.set_frustration(0)
def reset_network(self):
self.phi_matrix *= 0
self.set_frustration(0)
def set_random_state(self):
self.phi_matrix = np.float32(2 * np.pi * numpy.random.rand(self.Nx, self.Ny))
def set_frustration(self, f):
Nx = self.Nx
Ny = self.Ny
A_x = np.linspace(0, -(Ny-1) * f, Ny) + (Ny - 1)/2 * f
A_x = np.tile(A_x, (Nx - 1, 1))
A_y = np.linspace(0, (Nx-1) * f, Nx) - (Nx-1)/2 * f
A_y = np.tile(A_y, (Ny - 1, 1)).T
self.A_x = np.float32(-np.pi * A_x)
self.A_y = np.float32(-np.pi * A_y)
def add_phase_gradient(self, d_phi):
for i in range(self.Nx):
for j in range(self.Ny):
self.phi_matrix[i,j] += d_phi * (i + 1)
def add_vortex(self, x0, y0, vorticity=1):
self.phi_matrix += vorticity * np.arctan2(self.island_y_coords - y0,
self.island_x_coords - x0)
def get_gamma_matrices(self):
Nx = self.Nx
Ny = self.Ny
gamma_x = np.zeros((Nx - 1, Ny))
gamma_y = np.zeros((Nx, Ny - 1))
gamma_x += self.A_x
gamma_y += self.A_y
phi_matrix = self.phi_matrix
gamma_x += phi_matrix[1:,:] - phi_matrix[:-1,:]
gamma_y += phi_matrix[:,1:] - phi_matrix[:,:-1]
return (gamma_x, gamma_y)
def get_current_matrices(self):
gamma_x, gamma_y = self.get_gamma_matrices()
return self.cpr_x(gamma_x), self.cpr_y(gamma_y)
def get_current(self):
I_x, I_y = self.get_current_matrices()
return np.sum(I_x[0,:])
def free_energy(self):
gamma_x, gamma_y = self.get_gamma_matrices()
return np.sum(self.free_energy_x(gamma_x)) + \
np.sum(self.free_energy_y(gamma_y))
def winding_number(self):
# integrate grad φ around the array
phi_matrix = self.phi_matrix
rv = 0
# bottom edge
rv += np.sum(_normalize_phase(phi_matrix[1:,0] - phi_matrix[:-1,0]))
# right edge
rv += np.sum(_normalize_phase(phi_matrix[-1,1:] - phi_matrix[-1,:-1]))
# top edge
rv += -np.sum(_normalize_phase(phi_matrix[1:,-1] - phi_matrix[:-1,-1]))
# left edge
rv += -np.sum(_normalize_phase(phi_matrix[0,1:] - phi_matrix[0,:-1]))
return rv
def plot_phases(self):
plt.clf()
m = self.phi_matrix.copy()
m = np.flip(m, axis=1)
m = np.swapaxes(m, 0, 1)
plt.imshow(m/np.pi, aspect='equal', cmap='gray')
plt.colorbar(format="%.1f", label='φ')
def plot_currents(self):
Nx = self.Nx
Ny = self.Ny
x_currents, y_currents = self.get_current_matrices()
x_current_xcoords, x_current_ycoords = np.meshgrid(np.arange(Nx-1), np.arange(Ny), indexing="ij")
x_current_xcoords = x_current_xcoords.astype('float64')
x_current_ycoords = x_current_ycoords.astype('float64')
x_current_xcoords += 0.5
y_current_xcoords, y_current_ycoords = np.meshgrid(np.arange(Nx), np.arange(Ny-1), indexing="ij")
y_current_xcoords = y_current_xcoords.astype('float64')
y_current_ycoords = y_current_ycoords.astype('float64')
y_current_ycoords += 0.5
plt.clf()
plt.quiver(x_current_xcoords, x_current_ycoords,
x_currents, np.zeros(x_currents.shape),
pivot='mid', units='width', scale=5*Nx, width=1/(30*Nx))
plt.quiver(y_current_xcoords, y_current_ycoords,
np.zeros(y_currents.shape), y_currents,
pivot='mid', units='width', scale=5*Nx, width=1/(30*Nx))
plt.scatter(self.island_x_coords, self.island_y_coords, marker='s', c='b', s=5)
# do not use newton solver? simple gradient descent seems to converge
# with similar speed when using an optimized ε parameter
def optimization_step_newton(self):
# phi -> phi - cpr(phi) / cpr'(phi)
Nx = self.Nx
Ny = self.Ny
phi_matrix = self.phi_matrix
A_x = self.A_x
A_y = self.A_y
cpr_x = self.cpr_x
cpr_y = self.cpr_y
diff_x = self.diff_x
diff_y = self.diff_y
for i in range(Nx):
for j in range(Ny):
I_prime = 0
I = 0
phi_i_j = phi_matrix[i,j]
# y-component
if j > 0:
gamma = phi_i_j - phi_matrix[i,j-1] + A_y[i, j-1]
I += cpr_y(gamma)
I_prime += diff_y(gamma)
if j < Ny - 1:
gamma = -phi_i_j + phi_matrix[i,j+1] + A_y[i,j]
I += -cpr_y(gamma)
I_prime += diff_y(gamma)
# x-component
if i == 0:
gamma = phi_i_j - self.phi_l + A_x[0, j]
I += cpr_x(gamma)
I_prime += diff_x(gamma)
gamma = -phi_i_j + phi_matrix[i+1, j] + A_x[1,j]
I += -cpr_x(gamma)
I_prime += diff_x(gamma)
elif i == Nx - 1:
gamma = -phi_i_j + self.phi_r + A_x[i+1, j]
I += -cpr_x(gamma)
I_prime += diff_x(gamma)
gamma = phi_i_j - phi_matrix[i-1, j] + A_x[i,j]
I += cpr_x(gamma)
I_prime += diff_x(gamma)
else:
gamma = -phi_i_j + phi_matrix[i+1,j]+ A_x[i+1, j]
I += -cpr_x(gamma)
I_prime += diff_x(gamma)
gamma = phi_i_j - phi_matrix[i-1, j]+ A_x[i,j]
I += cpr_x(gamma)
I_prime += diff_x(gamma)
new_phi = phi_i_j - I / I_prime
phi_matrix[i, j] = new_phi
return np.abs(I)
def optimization_step(self, optimize_leads=False, temp=0, fix_contacts=False, epsilon=0.4):
# minimize free energy f(phi) using gradient descent
# update all phi's in-place
# phi -> phi - ε f'(phi)
Nx = self.Nx
Ny = self.Ny
phi_matrix = self.phi_matrix
A_x = self.A_x
A_y = self.A_y
cpr_x = self.cpr_x
cpr_y = self.cpr_y
I_norm = 0
for i in range(Nx):
if fix_contacts and (i == 0 or i == Nx - 1):
continue
for j in range(Ny):
f_prime = 0
phi_i_j = phi_matrix[i,j]
# x-component
if i > 0:
f_prime += cpr_x(phi_i_j - phi_matrix[i-1, j]+ A_x[i-1,j])
if i < Nx - 1:
f_prime += -cpr_x(-phi_i_j + phi_matrix[i+1,j]+ A_x[i, j])
# y-component
if j > 0:
f_prime += cpr_y(phi_i_j - phi_matrix[i,j-1] + A_y[i, j-1])
if j < Ny - 1:
f_prime += -cpr_y(-phi_i_j + phi_matrix[i,j+1] + A_y[i,j])
new_phi = phi_i_j - epsilon * f_prime
if temp > 0:
new_phi += temp * numpy.random.randn()
phi_matrix[i, j] = new_phi
I_norm += np.abs(f_prime)
return I_norm / (Nx * Ny)
def find_ground_state(self, T_start=0.35, N_max=5000, *args, **kwargs):
# annealing schedule
for i in range(N_max):
temp = T_start * (N_max - i) / N_max
delta = self.optimization_step(temp=temp)
print("temp = %g\ndelta = %g" % (temp, delta))
# converge
return self.optimize(temp = 0, *args, **kwargs)
def optimize(self, maxiter=10000, delta_tol=1e-2, *args, **kwargs):
for i in range(maxiter):
delta = self.optimization_step(*args, **kwargs)
if i % 100 == 0:
print("i = %d, delta = %.3g" % (i, delta))
if delta < delta_tol:
print("i(final) = %d, delta(final) = %.3g" % (i, delta))
break
return delta
| amba/JJA-solver | JJAsolver/network.py | network.py | py | 9,572 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.sin",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 10,
... |
31357541271 | import argparse, sys
import tornado.ioloop
import tornado.gen
import time
from nats.io.client import Client as NATS
def show_usage():
print("nats-sub SUBJECT [-s SERVER] [-q QUEUE]")
def show_usage_and_die():
show_usage()
sys.exit(1)
@tornado.gen.coroutine
def main():
# Parse the command line arguments
parser = argparse.ArgumentParser()
# e.g. nats-sub hello -s nats://127.0.0.1:4222
parser.add_argument('subject', default='hello', nargs='?')
parser.add_argument('-s', '--servers', default=[], action='append')
parser.add_argument('-q', '--queue', default="")
# Parse!
args = parser.parse_args()
# Create client and connect to server
nc = NATS()
servers = args.servers
if len(args.servers) < 1:
servers = ["nats://127.0.0.1:4222"]
opts = {"servers": servers}
yield nc.connect(**opts)
@tornado.gen.coroutine
def handler(msg):
print("[Received: {0}] {1}".format(msg.subject, msg.data))
print("Subscribed to '{0}'".format(args.subject))
yield nc.subscribe(args.subject, args.queue, handler)
if __name__ == '__main__':
main()
tornado.ioloop.IOLoop.current().start()
| nats-io/nats.py2 | examples/nats-sub/__main__.py | __main__.py | py | 1,186 | python | en | code | 62 | github-code | 6 | [
{
"api_name": "sys.exit",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "nats.io.client.Client",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "tornado.ioloop... |
71360101948 | #!/usr/bin/python3
import os
import sys
import time
import jsonpickle
class TODODescription(object):
def __init__(self, todoName, startTime = -1) -> None:
self.todoName = todoName
self.startTime = startTime
self.stopTime = -1
def display(self, id=''):
print('%s %s' % (str(id), str(self.todoName)))
class TaskDescription(object):
def __init__(self, name, path, startTime):
self.taskPath = path
self.taskName: str = name
self.startTime = startTime
self.stopTime = -1
self.status = ''
self.todos: list = []
def addTODO(self, todo: TODODescription):
self.todos.append(todo)
def removeTODO(self, id):
self.todos.pop(int(id))
def display(self):
print('\n')
print('Task name = %s' % str(self.taskName))
print('Task start time = %s' % str(self.startTime))
print('Task stop time = %s' % str(self.stopTime))
print('Task status = %s' % str(self.status))
print('\nTODO:s\n')
todoID = 0
for todo in self.todos:
todo.display(todoID)
todoID += 1
class App:
taskList = [TaskDescription]
jsonName = 'tasks.json'
argCount = 0
def __init__(self, workingDir):
self.workingDir = workingDir
self.jsonPath = self.workingDir + '/%s' % App.jsonName
self.taskCount = 0
def printTaskList(self):
print('Total number of tasks: %d' % len(self.taskList))
for task in self.taskList:
task.display()
def createTasksJSON(self):
with open(App.jsonName, 'w') as f:
tasks = jsonpickle.encode(self.taskList)
f.write(str(tasks))
f.close()
def addTODO(self, taskID, todo):
if self.taskCount - 1 < taskID:
print('ID is too big, cant add TODO')
return
self.taskList[taskID].addTODO(todo)
print('Added a new TODO for task ID %d' % taskID)
print(todo)
def removeTODO(self, taskID, todoID):
if self.taskCount - 1 < taskID:
print('task ID is too big, cant add TODO')
return
if taskID < 0:
print('task ID cant be below 0!')
return
if int(todoID) >= len(self.taskList[taskID].todos):
print('Invalid todo id')
return
self.taskList[taskID].removeTODO(todoID)
print('Removed TODO ID %d from task ID %d' % (int(todoID), int(taskID)))
def addTask(self, taskName: str):
task:TaskDescription = TaskDescription(taskName, os.getcwd(), time.time())
self.taskList.append(task)
self.taskCount += 1
print('Created Task ID = %d' % int(self.taskCount-1))
return self.taskCount - 1
def removeTask(self, taskID):
if taskID >= 0 and taskID < self.taskCount:
del self.taskList[taskID]
self.taskCount -= 1
print('TaskID %d removed!' % taskID)
def getTaskCount(self):
count = 0
for _ in self.taskList:
count += 1
return count
def loadTasks(self):
# Check for existing tasks.json file
if not os.path.exists(self.jsonPath):
print("No tasks.json found! Creating a new one.")
self.createTasksJSON()
tasks = []
with open(App.jsonName, 'r') as f:
file = jsonpickle.decode(f.read())
for task in file:
if isinstance(task, TaskDescription):
tasks.append(task)
self.taskList = tasks
self.taskCount = self.getTaskCount()
def saveTasks(self):
with open(App.jsonName, 'w') as f:
tasks = jsonpickle.encode(self.taskList)
f.write(str(tasks))
f.close()
def getIdFromName(self, name):
count = 0
for task in self.taskList:
count += 1
if task.taskName == name:
return count - 1
return -1
# Valid args are: create/remove/status/add-todo/remove-todo/list <taskname> (<description>)
def parseCmdArgs(self, args):
App.argCount = len(args)
if App.argCount <= 1:
print('ERROR: No args given! Usage: app.py <action> <taskname> (<description>) ')
return
action = args[1]
# Only action without having to specify task name
if action == 'list':
self.printTaskList()
return
if App.argCount <= 2:
print('Only 1 argument given and its not <list>')
return
taskname = args[2]
taskID = self.getIdFromName(taskname) # Will return -1 if not found
if action == 'create':
if taskID >= 0:
print('Task with that name already exists!')
return
taskID = self.addTask(taskname)
elif action == 'remove':
if len(self.taskList) < 1:
print('No task to remove!')
return
self.removeTask(taskID)
elif action == 'status':
if taskID < 0:
print('No task with that name!')
else:
self.taskList[taskID].display()
elif action == 'add-todo':
if taskID >= 0:
description = args[3]
todo = TODODescription(description)
self.addTODO(taskID, todo)
elif action == 'remove-todo':
description = args[3]
self.removeTODO(taskID, description)
elif action == 'pause':
print('TODO: Pause tasks todo for time tracking')
elif action == 'continue':
print('TODO: pause tasks todo for time tracking')
else:
print('Unknown action!')
return
if __name__ == '__main__':
app = App(os.getcwd())
app.loadTasks()
app.parseCmdArgs(sys.argv)
app.saveTasks()
| Dechode/TODO-App | app.py | app.py | py | 5,984 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "jsonpickle.encode",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_numb... |
27603501009 |
import io
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from langdetect import detect
def pdf2string(path):
"""
From a given pdf path, it creates a string of the pdf.
:param path: Path to the pdf file
:return: string of the pdf file
"""
file_in = open(path, 'rb')
# Create a PDF interpreter object. (pdfminer)
retstr = io.StringIO()
rsrcmgr = PDFResourceManager()
device = TextConverter(rsrcmgr, retstr, codec='utf-8', laparams=LAParams())
interpreter = PDFPageInterpreter(rsrcmgr, device)
# Process each page contained in the document.
for page in PDFPage.get_pages(file_in):
interpreter.process_page(page)
data = retstr.getvalue()
return data
def string2txt(string, path):
"""
From a given string, creates a .txt file on the given path.
:param string: The string to be converted to .txt
:param path: The path of the .txt file
:return: File created
"""
# Writes the string with the encoding wanted
with open(path, 'w', encoding='utf-8') as file_out:
file_out.write(string)
file_out.close()
def detect_language(string):
"""
For a given string, returns the language it is writen in.
:param string: the string to be analysed
:return: the language detected (string)
"""
return detect(string) | n1ur0/Document_Clustering | pdfparser.py | pdfparser.py | py | 1,484 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "io.StringIO",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pdfminer.pdfinterp.PDFResourceManager",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pdfminer.converter.TextConverter",
"line_number": 22,
"usage_type": "call"
},
{
... |
34047796819 | import argparse
from resnet import resnet
import os
import sys
from test import test
import tensorflow as tf
from read_data import fasionAI_data
def parse_args():
parser=argparse.ArgumentParser(description="test resnet for FasionAI")
parser.add_argument("--image_data",dest="image_data",\
help="the image data to test",default="image_test",type=str)
parser.add_argument('--bboxes_of_image',dest='bboxes_of_image',
help='图像的bboxes记录',default='bboxes_of_train_image_index.csv',type=str)
parser.add_argument("--weights_path",dest="weights_path",\
help="the .ckpt file to load",type=str)
# parser.add_argument("")
print(len(sys.argv))
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args=parser.parse_args()
return args
if __name__ == '__main__':
args=parse_args()
image_test_data=args.image_data
data_abspath=os.path.abspath("read_data.py")
bboxes_of_image=args.bboxes_of_image
data_absdir=os.path.split(data_abspath)[0]
annopath=os.path.join(data_absdir,"Annotations/label.csv")
imdb=fasionAI_data(data_absdir,annopath,image_test_data,bboxes_of_image,False)
weights_path=args.weights_path
net=resnet(is_train=False)
# saver=tf.train.Saver()
test(net,imdb,weights_path)
| hx121071/FashionAI_resnet | base/test_net.py | test_net.py | py | 1,380 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"l... |
8217311297 | import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import sys
import math
plt.style.use("acaps")
"""
Compare households owning agricultural land in 2018 and 2019 in the host community.
"""
# Read in the data
df_2018 = pd.read_csv("../../data/processed/MSNA_Host_2018.csv")
df_2019 = pd.read_csv("../../data/processed/MSNA_Host_2019.csv")
# Calculate proportions and merge the datasets
df_counts_2018 = df_2018["hh_agri_land"].value_counts(normalize=True).reset_index().rename(columns={"index": "answer", "hh_agri_land": "percent"})
df_counts_2018["year"] = "2018"
df_counts_2019 = df_2019["agricultural_land"].value_counts(normalize=True).reset_index().rename(columns={"index": "answer", "agricultural_land": "percent"})
df_counts_2019["year"] = "2019"
df_counts = df_counts_2018.append(df_counts_2019)
df_counts["percent"] = df_counts["percent"]*100.0
df_counts["answer"] = df_counts["answer"].replace({"yes": "Yes", "no": "No"})
# Create the plot
fig, ax = plt.subplots(figsize=(10,8))
ax = sns.barplot(x="answer", y="percent", hue="year", data=df_counts)
plt.title("Percentage of households in the host community\nwith agricultural land in 2018 and 2019", fontsize=18)
plt.legend(fontsize=16, loc="upper right")
plt.xlabel(None)
plt.ylabel("Percent of households (%)", fontsize=16)
plt.ylim([0,100])
plt.xticks(rotation=0, fontsize=14)
# Add percentages to the bars
for p in ax.patches:
width = p.get_width()
height = p.get_height() if not math.isnan(p.get_height()) else 0.0
x, y = p.get_xy()
ax.annotate('{:.0%}'.format(round(height)/100.0), (x + width/2, y + height+2), ha='center', fontsize=14)
plt.tight_layout()
plt.savefig("agricultural_land.png")
plt.close()
| zackarno/coxs_msna_sector_analysis | host_community/analysis/housing_barplots/agri_land.py | agri_land.py | py | 1,732 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name"... |
36386456035 | import altair as alt
from vega_datasets import data
source = data.cars()
chart = alt.Chart(source).mark_circle(size=60, clip=False).transform_calculate(
x = alt.datum.Horsepower-100,
y = alt.datum.Miles_per_Gallon - 25
).encode(
x=alt.X('x:Q', axis=alt.Axis(offset=-150)),
y=alt.Y('y:Q', axis=alt.Axis(offset=-190)),
color='Origin',
).configure_axisX(
domainWidth =3
).configure_axisY(
domainWidth =3
)
# save
chart.save('debug.svg')
| noahzhy/charts_synthetic_data | test.py | test.py | py | 464 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "vega_datasets.data.cars",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "vega_datasets.data",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "altair.Chart",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "altair.datum",
... |
36884065642 | #!/usr/bin/python3
"""
Module for Base class
"""
import json
class Base:
""" Base class """
__nb_objects = 0
def __init__(self, id=None):
""" ctor for Base Class """
self.id = id
if id is None:
Base.__nb_objects += 1
self.id = Base.__nb_objects
@staticmethod
def to_json_string(l):
""" converts list to json string """
if l is None or len(l) == 0:
return "[]"
return json.dumps(l)
@classmethod
def save_to_file(cls, l):
""" save list of objs to a json file """
with open(cls.__name__ + ".json", "w", encoding="utf-8") as f:
if l is None:
f.write(Base.to_json_string([]))
else:
li = []
for obj in l:
li.append(obj.to_dictionary())
f.write(Base.to_json_string(li))
@staticmethod
def from_json_string(json_s):
""" converts json string to python object """
if json_s is None or not json_s:
return []
else:
return json.loads(json_s)
@classmethod
def create(cls, **dictionary):
""" create a Base inheritanced object based
on dictionary"""
from models.rectangle import Rectangle
from models.square import Square
name = cls.__name__
if name == "Rectangle":
dummy = Rectangle(3, 8)
else:
dummy = Square(1)
dummy.update(**dictionary)
return dummy
@classmethod
def load_from_file(cls):
""" loads objects from a json file """
from os.path import exists
filename = cls.__name__ + ".json"
if not exists(filename):
return []
with open(filename, "r", encoding="utf-8") as f:
s = f.read()
instances = []
dics = Base.from_json_string(s)
for elem in dics:
instances.append(cls.create(**elem))
return instances
@classmethod
def save_to_file_csv(cls, list_objs):
""" saves objects to a csv file """
import csv
name = cls.__name__ + ".csv"
f = open(name, "w", encoding="utf-8")
writer = csv.writer(f)
for obj in list_objs:
dic = obj.to_dictionary()
values = list(dic.values())
keys = list(dic.keys())
csv_dic = []
csv_dic.append(keys)
csv_dic.append(values)
writer.writerow(keys)
writer.writerow(values)
@classmethod
def load_from_file_csv(cls):
""" loads objects from a csv file """
import csv
from os.path import exists
name = cls.__name__ + ".csv"
if not exists(name):
return []
f = open(name, "r", encoding="utf-8")
reader = csv.reader(f)
objs = []
rect = True if cls.__name__ == "Rectangle" else False
for row in reader:
dic = {}
keys = reader[row]
values = row[row + 1]
for i in range(len(keys)):
dic[keys[i]] = values[i]
objs.append(cls.create(**dic))
return objs
@staticmethod
def draw(list_rectangles, list_squares):
""" draws rect and square objects to a canvas """
import turtle
turt = turtle.Turtle()
| Samigirum/alx-higher_level_programming | 0x0C-python-almost_a_circle/models/base.py | base.py | py | 3,403 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.dumps",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "models.rectangle.Rectangle",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "models.square.Square... |
10094748311 | # coding=utf-8
import MeCab
import sys
if len(sys.argv) == 1:
print("mkdir.py <file> [univ]\n")
sys.exit(1)
if len(sys.argv) == 3 and sys.argv[2] == 'univ':
dictype = '固有名詞'
nauntype = '組織'
else:
dictype = '名詞'
nauntype = '一般'
tagger = MeCab.Tagger('-Oyomi')
out = sys.argv[1].replace(".txt", ".csv")
fo = open(out, 'w')
fi = open(sys.argv[1], 'r')
line = fi.readline()
while line:
naun = line.replace('\n', '')
yomi = tagger.parse(naun).replace('\n', '')
fo.write('{naun},*,*,1000,名詞,{dictype},{nauntype},*,*,*,{naun},{yomi},{yomi}\n'.format(naun=naun, dictype=dictype, nauntype=nauntype, yomi=yomi))
line = fi.readline()
fi.close();
fo.close()
| l-plantarum/chiebukuro | mkdic.py | mkdic.py | py | 716 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "MeCab.Tagger",
"line_number": 1... |
4376865590 |
import sys
import signal
import argparse
from dictmaster.util import load_plugin
last_broadcast_msg = " "
def broadcast(msg, overwrite=False):
global last_broadcast_msg
if overwrite:
sys.stdout.write("\r{}".format(" "*len(last_broadcast_msg.strip())))
msg = "\r"+msg
else:
if last_broadcast_msg[0] == "\r":
msg = "\n"+msg
msg += "\n"
last_broadcast_msg = msg
sys.stdout.write(msg)
sys.stdout.flush()
def cli_main():
parser = argparse.ArgumentParser(description='Download and convert dictionaries.')
parser.add_argument('plugin', metavar='PLUGIN', type=str, help='The plugin to use.')
parser.add_argument('--popts', action="store", nargs="+", default=[],
help=("Option string passed to the plugin."))
parser.add_argument('--reset', action="store_true", default=False,
help=("Discard data from last time."))
parser.add_argument('--force-process', action="store_true", default=False,
help=("Discard processed data from last time (keep fetched data)."))
parser.add_argument('-o', '--output', action="store", default="", type=str,
help=("Work and output directory."))
args = parser.parse_args()
plugin = load_plugin(args.plugin, popts=args.popts, dirname=args.output)
if plugin == None: sys.exit("Plugin not found or plugin broken.")
plugin.force_process = args.force_process
if args.reset:
broadcast("Resetting plugin data in '{}'.".format(plugin.output_directory))
plugin.reset()
elif args.force_process:
plugin.stages['Processor'].reset()
broadcast("Running plugin '{}'.".format(args.plugin))
broadcast("Output will be written to '{}'.".format(plugin.output_directory))
plugin.start()
def ctrl_c(signal, frame):
broadcast("User interrupt. Stopping the plugin...")
plugin.cancel()
signal.signal(signal.SIGINT, ctrl_c)
while plugin.is_alive():
broadcast(plugin.progress(), True)
plugin.join(1)
broadcast("Plugin '{}' quit.".format(args.plugin))
if not plugin._canceled:
broadcast("Optimize data...")
plugin.optimize_data()
broadcast("Export as StarDict file...")
plugin.export()
| tuxor1337/dictmaster | dictmaster/cli/main.py | main.py | py | 2,301 | python | en | code | 32 | github-code | 6 | [
{
"api_name": "sys.stdout.write",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.write",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"l... |
40071000142 | import collections
class Solution(object):
def canConstruct(self, ransomNote, magazine):
"""
:type ransomNote: str
:type magazine: str
:rtype: bool
"""
res = collections.Counter(ransomNote) - collections.Counter(magazine)
return not res #(res == collections.Counter())
def main():
solution = Solution()
a = "bcjefgecdabaa"
b = "hfebdiicigfjahdddiahdajhaidbdgjihdbhgfbbccfdfggdcacccaebh"
print ('Output:', solution.canConstruct(a,b))
if __name__ == '__main__':
main() | lucy9215/leetcode-python | 383_ransomNote.py | 383_ransomNote.py | py | 554 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.Counter",
"line_number": 10,
"usage_type": "call"
}
] |
8173760199 | # Get Networks in Org
# Meraki API Reference:
# https://developer.cisco.com/meraki/api-latest/#!list-the-networks-that-the-user-has-privileges-on-in-an-organization
import tokens
import requests
import json
base_url = "https://api.meraki.com/api/v1"
resource_path = f"/organizations/{tokens.ORG_ID}/networks"
url = base_url + resource_path
payload = None
headers = {
"Accept": "application/json",
"X-Cisco-Meraki-API-Key": tokens.API_KEY
}
response = requests.request('GET', url, headers=headers, data = payload)
#print(response.text.encode('utf8'))
json_data = response.json()
print(json.dumps(json_data, indent=2))
| jtsu/meraki_python | merakiScripts/2_getNetworks.py | 2_getNetworks.py | py | 633 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "tokens.ORG_ID",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "tokens.API_KEY",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "requests.request",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "json.dumps",
... |
36797461084 | """Utility for ROC-AUC Visualization"""
import matplotlib.pyplot as plt
from src.utils import infoutils
def plot_signle_roc_auc(cfg, auc, fpr, tpr):
"""
Plots signel ROC Curve
Args:
cfg (cfgNode): Model configurations
auc (float): Area under the ROC curve
fpr (list): False positive rates
tpr (list): True positive rates
"""
plt.figure()
plt.plot(fpr, tpr, color='darkorange',
lw=cfg.VISUALIZE.PLIT_LINEWIDTH, label='ROC curve (area = %0.6f)' % auc)
plt.plot([0, 1], [0, 1], color='navy',
lw=cfg.VISUALIZE.PLIT_LINEWIDTH, label='Random Classifier ROC (area = 0.5)', linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.01])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Cperating Characteristic (ROC)' + '\n' + \
infoutils.get_dataset_features_name(cfg) + '\n' + infoutils.get_full_model_without_features(cfg))
plt.legend(loc="lower right")
plt.show()
| KhaledElTahan/Real-World-Anomaly-Detection | src/visualization/roc_auc.py | roc_auc.py | py | 1,016 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "mat... |
73363651708 | #только приватный доступ
import requests
import munch
#получить список доменов
#i.ivanov
headers = { 'PddToken': 'F4KTVIPLCFULRWCDFGJIKNGUPEWQUEMSKDG7DDFJZDPILB3JXLOQ'}
#x@yandex
headers = { 'PddToken': 'E7UIU2AHR33EOXDJ5W6R2Q2WRNW4TGCI5MZ2U6DOX5YKBEJW334A' }
url = 'https://pddimp.yandex.ru/api2/admin/domain/domains?'
r=requests.get(url,headers=headers)
obj = munch.munchify(r.json())
print(r.json())
#добавить ящик
url = 'https://pddimp.yandex.ru/api2/admin/email/add'
payload = {'domain': 'bellatrix.xyz', 'login': 'test2', 'password' : 'hardpass'}
headers = { 'PddToken': 'F4KTVIPLCFULRWCDFGJIKNGUPEWQUEMSKDG7DDFJZDPILB3JXLOQ'}
r=requests.post(url,data=payload,headers=headers)
#домен bellatrix.xyz
#заблокировать почтовый ящик
headers = { 'PddToken': 'F4KTVIPLCFULRWCDFGJIKNGUPEWQUEMSKDG7DDFJZDPILB3JXLOQ'}
payload = {'domain': 'bellatrix.xyz', 'login': 'test2', 'enabled': 'no' }
url = 'https://pddimp.yandex.ru/api2/admin/email/edit'
r=requests.post(url,data=payload,headers=headers)
#добавить зам. администратора домена
url = 'https://pddimp.yandex.ru/api2/admin/deputy/list?domain=mrtkt74.ru'
#получить список замов
r = requests.get(url,headers=headers)
#добавить зама
url = 'https://pddimp.yandex.ru//api2/admin/deputy/add'
payload = {'domain': 'mrtkt74.ru', 'login': 'i.ivanov'}
headers = { 'PddToken': 'E7UIU2AHR33EOXDJ5W6R2Q2WRNW4TGCI5MZ2U6DOX5YKBEJW334A' }
r=requests.post(url,data=payload,headers=headers)
| expo-lux/scripts | python/x_createuser.py | x_createuser.py | py | 1,573 | python | ru | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "munch.munchify",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_n... |
72169387389 | """
One table verb initializations
"""
import itertools
from .operators import DataOperator
from .expressions import Expression
__all__ = ['define', 'create', 'sample_n', 'sample_frac', 'select',
'rename', 'distinct', 'unique', 'arrange', 'group_by',
'ungroup', 'group_indices', 'summarize',
'query', 'do', 'head', 'tail', 'pull', 'slice_rows',
# Aliases
'summarise', 'mutate', 'transmute',
]
class define(DataOperator):
"""
Add column to DataFrame
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
args : strs, tuples, optional
Expressions or ``(name, expression)`` pairs. This should
be used when the *name* is not a valid python variable
name. The expression should be of type :class:`str` or
an *interable* with the same number of elements as the
dataframe.
kwargs : dict, optional
``{name: expression}`` pairs.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'x': [1, 2, 3]})
>>> df >> define(x_sq='x**2')
x x_sq
0 1 1
1 2 4
2 3 9
>>> df >> define(('x*2', 'x*2'), ('x*3', 'x*3'), x_cubed='x**3')
x x*2 x*3 x_cubed
0 1 2 3 1
1 2 4 6 8
2 3 6 9 27
>>> df >> define('x*4')
x x*4
0 1 4
1 2 8
2 3 12
Notes
-----
If :obj:`plydata.options.modify_input_data` is ``True``,
:class:`define` will modify the original dataframe.
"""
def __init__(self, *args, **kwargs):
self.set_env_from_verb_init()
cols = []
exprs = []
for arg in args:
if isinstance(arg, str):
col = expr = arg
else:
col, expr = arg
cols.append(col)
exprs.append(expr)
_cols = itertools.chain(cols, kwargs.keys())
_exprs = itertools.chain(exprs, kwargs.values())
self.expressions = [Expression(stmt, col)
for stmt, col in zip(_exprs, _cols)]
class create(define):
"""
Create DataFrame with columns
Similar to :class:`define`, but it drops the existing columns.
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
args : strs, tuples, optional
Expressions or ``(name, expression)`` pairs. This should
be used when the *name* is not a valid python variable
name. The expression should be of type :class:`str` or
an *interable* with the same number of elements as the
dataframe.
kwargs : dict, optional
``{name: expression}`` pairs.
kwargs : dict, optional
``{name: expression}`` pairs.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'x': [1, 2, 3]})
>>> df >> create(x_sq='x**2')
x_sq
0 1
1 4
2 9
>>> df >> create(('x*2', 'x*2'), ('x*3', 'x*3'), x_cubed='x**3')
x*2 x*3 x_cubed
0 2 3 1
1 4 6 8
2 6 9 27
>>> df >> create('x*4')
x*4
0 4
1 8
2 12
"""
class sample_n(DataOperator):
"""
Sample n rows from dataframe
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
n : int, optional
Number of items from axis to return.
replace : boolean, optional
Sample with or without replacement. Default = False.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
inf and -inf values not allowed.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or numpy RandomState
object.
axis : int or string, optional
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames, 1 for Panels).
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> rs = np.random.RandomState(1234567890)
>>> df = pd.DataFrame({'x': range(20)})
>>> df >> sample_n(5, random_state=rs)
x
5 5
19 19
14 14
8 8
17 17
"""
def __init__(self, n=1, replace=False, weights=None,
random_state=None, axis=None):
self.kwargs = dict(n=n, replace=replace, weights=weights,
random_state=random_state, axis=axis)
class sample_frac(DataOperator):
"""
Sample a fraction of rows from dataframe
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : boolean, optional
Sample with or without replacement. Default = False.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
inf and -inf values not allowed.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or numpy RandomState
object.
axis : int or string, optional
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames, 1 for Panels).
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> rs = np.random.RandomState(1234567890)
>>> df = pd.DataFrame({'x': range(20)})
>>> df >> sample_frac(0.25, random_state=rs)
x
5 5
19 19
14 14
8 8
17 17
"""
def __init__(self, frac=None, replace=False, weights=None,
random_state=None, axis=None):
self.kwargs = dict(
frac=frac, replace=replace, weights=weights,
random_state=random_state, axis=axis)
class select(DataOperator):
"""
Select columns by name
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
names : tuple, optional
Names of columns in dataframe. Normally, they are strings
can include slice e.g :py:`slice('col2', 'col5')`.
You can also exclude columns by prepending a ``-`` e.g
py:`select('-col1')`, will include all columns minus than
*col1*.
startswith : str or tuple, optional
All column names that start with this string will be included.
endswith : str or tuple, optional
All column names that end with this string will be included.
contains : str or tuple, optional
All column names that contain with this string will be included.
matches : str or regex or tuple, optional
All column names that match the string or a compiled regex pattern
will be included. A tuple can be used to match multiple regexs.
drop : bool, optional
If ``True``, the selection is inverted. The unspecified/unmatched
columns are returned instead. Default is ``False``.
Examples
--------
>>> import pandas as pd
>>> x = [1, 2, 3]
>>> df = pd.DataFrame({'bell': x, 'whistle': x, 'nail': x, 'tail': x})
>>> df >> select('bell', 'nail')
bell nail
0 1 1
1 2 2
2 3 3
>>> df >> select('bell', 'nail', drop=True)
whistle tail
0 1 1
1 2 2
2 3 3
>>> df >> select('whistle', endswith='ail')
whistle nail tail
0 1 1 1
1 2 2 2
2 3 3 3
>>> df >> select('bell', matches=r'\\w+tle$')
bell whistle
0 1 1
1 2 2
2 3 3
You can select column slices too. Like :meth:`~pandas.DataFrame.loc`,
the stop column is included.
>>> df = pd.DataFrame({'a': x, 'b': x, 'c': x, 'd': x,
... 'e': x, 'f': x, 'g': x, 'h': x})
>>> df
a b c d e f g h
0 1 1 1 1 1 1 1 1
1 2 2 2 2 2 2 2 2
2 3 3 3 3 3 3 3 3
>>> df >> select('a', slice('c', 'e'), 'g')
a c d e g
0 1 1 1 1 1
1 2 2 2 2 2
2 3 3 3 3 3
You can exclude columns by prepending ``-``
>>> df >> select('-a', '-c', '-e')
b d f g h
0 1 1 1 1 1
1 2 2 2 2 2
2 3 3 3 3 3
Remove and place column at the end
>>> df >> select('-a', '-c', '-e', 'a')
b d f g h a
0 1 1 1 1 1 1
1 2 2 2 2 2 2
2 3 3 3 3 3 3
Notes
-----
To exclude columns by prepending a minus, the first column
passed to :class:`select` must be prepended with minus.
:py:`select('-a', 'c')` will exclude column ``a``, while
:py:`select('c', '-a')` will not exclude column ``a``.
"""
def __init__(self, *names, startswith=None, endswith=None,
contains=None, matches=None, drop=False):
def as_tuple(obj):
if obj is None:
return tuple()
elif isinstance(obj, tuple):
return obj
elif isinstance(obj, list):
return tuple(obj)
else:
return (obj,)
self.names = names
self.startswith = as_tuple(startswith)
self.endswith = as_tuple(endswith)
self.contains = as_tuple(contains)
self.matches = as_tuple(matches)
self.drop = drop
@staticmethod
def from_columns(*columns):
"""
Create a select verb from the columns specification
Parameters
----------
*columns : list-like | select | str | slice
Column names to be gathered and whose contents will
make values.
Return
------
out : select
Select verb representation of the columns.
"""
from .helper_verbs import select_all, select_at, select_if
n = len(columns)
if n == 0:
return select_all()
elif n == 1:
obj = columns[0]
if isinstance(obj, (select, select_all, select_at, select_if)):
return obj
elif isinstance(obj, slice):
return select(obj)
elif isinstance(obj, (list, tuple)):
return select(*obj)
elif isinstance(obj, str):
return select(obj)
else:
raise TypeError(
"Unrecognised type {}".format(type(obj))
)
else:
return select(*columns)
class rename(DataOperator):
"""
Rename columns
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
args : tuple, optional
A single positional argument that holds
``{'new_name': 'old_name'}`` pairs. This is useful if the
*old_name* is not a valid python variable name.
kwargs : dict, optional
``{new_name: 'old_name'}`` pairs. If all the columns to be
renamed are valid python variable names, then they
can be specified as keyword arguments.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> x = np.array([1, 2, 3])
>>> df = pd.DataFrame({'bell': x, 'whistle': x,
... 'nail': x, 'tail': x})
>>> df >> rename(gong='bell', pin='nail')
gong whistle pin tail
0 1 1 1 1
1 2 2 2 2
2 3 3 3 3
>>> df >> rename({'flap': 'tail'}, pin='nail')
bell whistle pin flap
0 1 1 1 1
1 2 2 2 2
2 3 3 3 3
Notes
-----
If :obj:`plydata.options.modify_input_data` is ``True``,
:class:`rename` will modify the original dataframe.
"""
lookup = None
def __init__(self, *args, **kwargs):
lookup = args[0] if len(args) else {}
self.lookup = {v: k for k, v in lookup.items()}
self.lookup.update({v: k for k, v in kwargs.items()})
class distinct(DataOperator):
"""
Select distinct/unique rows
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
columns : list-like, optional
Column names to use when determining uniqueness.
keep : {'first', 'last', False}, optional
- ``first`` : Keep the first occurence.
- ``last`` : Keep the last occurence.
- False : Do not keep any of the duplicates.
Default is False.
kwargs : dict, optional
``{name: expression}`` computed columns. If specified,
these are taken together with the columns when determining
unique rows.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'x': [1, 1, 2, 3, 4, 4, 5],
... 'y': [1, 2, 3, 4, 5, 5, 6]})
>>> df >> distinct()
x y
0 1 1
1 1 2
2 2 3
3 3 4
4 4 5
6 5 6
>>> df >> distinct(['x'])
x y
0 1 1
2 2 3
3 3 4
4 4 5
6 5 6
>>> df >> distinct(['x'], 'last')
x y
1 1 2
2 2 3
3 3 4
5 4 5
6 5 6
>>> df >> distinct(z='x%2')
x y z
0 1 1 1
2 2 3 0
>>> df >> distinct(['x'], z='x%2')
x y z
0 1 1 1
2 2 3 0
3 3 4 1
4 4 5 0
6 5 6 1
>>> df >> define(z='x%2') >> distinct(['x', 'z'])
x y z
0 1 1 1
2 2 3 0
3 3 4 1
4 4 5 0
6 5 6 1
"""
columns = None
keep = 'first'
def __init__(self, *args, **kwargs):
self.set_env_from_verb_init()
if len(args) == 1:
if isinstance(args[0], (str, bool)):
self.keep = args[0]
else:
self.columns = args[0]
elif len(args) == 2:
self.columns, self.keep = args
elif len(args) > 2:
raise Exception("Too many positional arguments.")
# define
if kwargs:
if self.columns is None:
self.columns = []
elif not isinstance(self.columns, list):
self.columns = list(self.columns)
_cols = list(kwargs.keys())
_exprs = list(kwargs.values())
self.columns.extend(_cols)
else:
_cols = []
_exprs = []
self.expressions = [Expression(stmt, col)
for stmt, col in zip(_exprs, _cols)]
class arrange(DataOperator):
"""
Sort rows by column variables
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
args : tuple
Columns/expressions to sort by.
reset_index : bool, optional (default: True)
If ``True``, the index is reset to a sequential range index.
If ``False``, the original index is maintained.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> df = pd.DataFrame({'x': [1, 5, 2, 2, 4, 0],
... 'y': [1, 2, 3, 4, 5, 6]})
>>> df >> arrange('x')
x y
0 0 6
1 1 1
2 2 3
3 2 4
4 4 5
5 5 2
>>> df >> arrange('x', '-y')
x y
0 0 6
1 1 1
2 2 4
3 2 3
4 4 5
5 5 2
>>> df >> arrange('np.sin(y)')
x y
0 4 5
1 2 4
2 0 6
3 2 3
4 1 1
5 5 2
"""
expressions = None
def __init__(self, *args, reset_index=True):
self.set_env_from_verb_init()
self.reset_index = reset_index
name_gen = ('col_{}'.format(x) for x in range(100))
self.expressions = [
Expression(stmt, col)
for stmt, col in zip(args, name_gen)
]
class group_by(define):
"""
Group dataframe by one or more columns/variables
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
args : strs, tuples, optional
Expressions or ``(name, expression)`` pairs. This should
be used when the *name* is not a valid python variable
name. The expression should be of type :class:`str` or
an *interable* with the same number of elements as the
dataframe.
add_ : bool, optional
If True, add to existing groups. Default is to create
new groups.
kwargs : dict, optional
``{name: expression}`` pairs.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'x': [1, 5, 2, 2, 4, 0, 4],
... 'y': [1, 2, 3, 4, 5, 6, 5]})
>>> df >> group_by('x')
groups: ['x']
x y
0 1 1
1 5 2
2 2 3
3 2 4
4 4 5
5 0 6
6 4 5
Like :meth:`define`, :meth:`group_by` creates any
missing columns.
>>> df >> group_by('y-1', xplus1='x+1')
groups: ['y-1', 'xplus1']
x y y-1 xplus1
0 1 1 0 2
1 5 2 1 6
2 2 3 2 3
3 2 4 3 3
4 4 5 4 5
5 0 6 5 1
6 4 5 4 5
Columns that are grouped on remain in the dataframe after any
verb operations that do not use the group information. For
example:
>>> df >> group_by('y-1', xplus1='x+1') >> select('y')
groups: ['y-1', 'xplus1']
y-1 xplus1 y
0 0 2 1
1 1 6 2
2 2 3 3
3 3 3 4
4 4 5 5
5 5 1 6
6 4 5 5
Notes
-----
If :obj:`plydata.options.modify_input_data` is ``True``,
:class:`group_by` will modify the original dataframe.
"""
groups = None
def __init__(self, *args, add_=False, **kwargs):
self.set_env_from_verb_init()
super().__init__(*args, **kwargs)
self.add_ = add_
self.groups = [expr.column for expr in self.expressions]
class ungroup(DataOperator):
"""
Remove the grouping variables for dataframe
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'x': [1, 2, 3],
... 'y': [1, 2, 3]})
>>> df >> group_by('x')
groups: ['x']
x y
0 1 1
1 2 2
2 3 3
>>> df >> group_by('x') >> ungroup()
x y
0 1 1
1 2 2
2 3 3
"""
class group_indices(group_by):
"""
Generate a unique id for each group
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
args : strs, tuples, optional
Expressions or ``(name, expression)`` pairs. This should
be used when the *name* is not a valid python variable
name. The expression should be of type :class:`str` or
an *interable* with the same number of elements as the
dataframe. As this verb returns an array, the tuples have
no added benefit over strings.
kwargs : dict, optional
``{name: expression}`` pairs. As this verb returns an
array, keyword arguments have no added benefit over
:class:`str` positional arguments.
Returns
-------
out : numpy.array
Ids for each group
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'x': [1, 5, 2, 2, 4, 0, 4],
... 'y': [1, 2, 3, 4, 5, 6, 5]})
>>> df >> group_by('x')
groups: ['x']
x y
0 1 1
1 5 2
2 2 3
3 2 4
4 4 5
5 0 6
6 4 5
>>> df >> group_by('x') >> group_indices()
array([1, 4, 2, 2, 3, 0, 3])
You can pass the group column(s) as parameters to
:class:`group_indices`
>>> df >> group_indices('x*2')
array([1, 4, 2, 2, 3, 0, 3])
"""
class summarize(define):
"""
Summarise multiple values to a single value
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
args : strs, tuples, optional
Expressions or ``(name, expression)`` pairs. This should
be used when the *name* is not a valid python variable
name. The expression should be of type :class:`str` or
an *interable* with the same number of elements as the
dataframe.
kwargs : dict, optional
``{name: expression}`` pairs.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> df = pd.DataFrame({'x': [1, 5, 2, 2, 4, 0, 4],
... 'y': [1, 2, 3, 4, 5, 6, 5],
... 'z': [1, 3, 3, 4, 5, 5, 5]})
Can take only positional, only keyword arguments or both.
>>> df >> summarize('np.sum(x)', max='np.max(x)')
np.sum(x) max
0 18 5
When summarizing after a :class:`group_by` operation
the group columns are retained.
>>> df >> group_by('y', 'z') >> summarize(mean_x='np.mean(x)')
y z mean_x
0 1 1 1.0
1 2 3 5.0
2 3 3 2.0
3 4 4 2.0
4 5 5 4.0
5 6 5 0.0
.. rubric:: Aggregate Functions
When summarizing the following functions can be used, they take
an array and return a *single* number.
- ``min(x)`` - Alias of :func:`numpy.amin` (a.k.a ``numpy.min``).
- ``max(x)`` - Alias of :func:`numpy.amax` (a.k.a ``numpy.max``).
- ``sum(x)`` - Alias of :func:`numpy.sum`.
- ``cumsum(x)`` - Alias of :func:`numpy.cumsum`.
- ``mean(x)`` - Alias of :func:`numpy.mean`.
- ``median(x)`` - Alias of :func:`numpy.median`.
- ``std(x)`` - Alias of :func:`numpy.std`.
- ``first(x)`` - First element of ``x``.
- ``last(x)`` - Last element of ``x``.
- ``nth(x, n)`` - *nth* value of ``x`` or ``numpy.nan``.
- ``n_distinct(x)`` - Number of distint elements in ``x``.
- ``n_unique(x)`` - Alias of ``n_distinct``.
- ``n()`` - Number of elements in current group.
The aliases of the Numpy functions save you from typing 3 or 5 key
strokes and you get better column names. i.e ``min(x)`` instead of
``np.min(x)`` or ``numpy.min(x)`` if you have Numpy imported.
>>> df = pd.DataFrame({'x': [0, 1, 2, 3, 4, 5],
... 'y': [0, 0, 1, 1, 2, 3]})
>>> df >> summarize('min(x)', 'max(x)', 'mean(x)', 'sum(x)',
... 'first(x)', 'last(x)', 'nth(x, 3)')
min(x) max(x) mean(x) sum(x) first(x) last(x) nth(x, 3)
0 0 5 2.5 15 0 5 3
Summarizing groups with aggregate functions
>>> df >> group_by('y') >> summarize('mean(x)')
y mean(x)
0 0 0.5
1 1 2.5
2 2 4.0
3 3 5.0
>>> df >> group_by('y') >> summarize(y_count='n()')
y y_count
0 0 2
1 1 2
2 2 1
3 3 1
You can use ``n()`` even when there are no groups.
>>> df >> summarize('n()')
n()
0 6
"""
class query(DataOperator):
"""
Return rows with matching conditions
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
expr : str
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character
like ``@a + b``. Allowed functions are `sin`, `cos`, `exp`,
`log`, `expm1`, `log1p`, `sqrt`, `sinh`, `cosh`, `tanh`,
`arcsin`, `arccos`, `arctan`, `arccosh`, `arcsinh`,
`arctanh`, `abs` and `arctan2`.
reset_index : bool, optional (default: True)
If ``True``, the index is reset to a sequential range index.
If ``False``, the original index is maintained.
kwargs : dict
See the documentation for :func:`pandas.eval` for complete
details on the keyword arguments accepted by
:meth:`pandas.DataFrame.query`.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'x': [0, 1, 2, 3, 4, 5],
... 'y': [0, 0, 1, 1, 2, 3],
... 'z': list('aabbcd')})
>>> df >> query('x % 2 == 0')
x y z
0 0 0 a
1 2 1 b
2 4 2 c
>>> df >> query('x % 2 == 0 & y > 0')
x y z
0 2 1 b
1 4 2 c
By default, Bitwise operators ``&`` and ``|`` have the same
precedence as the booleans ``and`` and ``or``.
>>> df >> query('x % 2 == 0 and y > 0')
x y z
0 2 1 b
1 4 2 c
``query`` works within groups
>>> df >> query('x == x.min()')
x y z
0 0 0 a
>>> df >> group_by('y') >> query('x == x.min()')
groups: ['y']
x y z
0 0 0 a
1 2 1 b
2 4 2 c
3 5 3 d
When working with strings, the values should be quoted.
>>> df >> query('z == "a"')
x y z
0 0 0 a
1 1 0 a
You can refer to variables in the environment by prefixing them
with an `@` character.
>>> w = list('rrbbst')
>>> df >> query('z == @w')
x y z
0 2 1 b
1 3 1 b
For more information see :meth:`pandas.DataFrame.query`. To query
rows and columns with ``NaN`` values, use :class:`dropna`
Notes
-----
:class:`~plydata.one_table_verbs.query` is the equivalent of
dplyr's `filter` verb but with slightly different python syntax
the expressions.
"""
expression = None
def __init__(self, expr, reset_index=True, **kwargs):
self.set_env_from_verb_init()
self.reset_index = reset_index
self.expression = expr
self.kwargs = kwargs
class do(DataOperator):
"""
Do arbitrary operations on a dataframe
Considering the *split-apply-combine* data manipulation
strategy, :class:`do` gives a window into which to place
the complex *apply* actions, and also control over the form of
results when they are combined. This allows
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
func : function, optional
A single function to apply to each group. *The
function should accept a dataframe and return a
dataframe*.
kwargs : dict, optional
``{name: function}`` pairs. *The function should
accept a dataframe and return an array*. The function
computes a column called ``name``.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> df = pd.DataFrame({'x': [1, 2, 2, 3],
... 'y': [2, 3, 4, 3],
... 'z': list('aabb')})
Define a function that uses numpy to do a least squares fit.
It takes input from a dataframe and output is a dataframe.
``gdf`` is a dataframe that contains only rows from the current
group.
>>> def least_squares(gdf):
... X = np.vstack([gdf.x, np.ones(len(gdf))]).T
... (m, c), _, _, _ = np.linalg.lstsq(X, gdf.y, None)
... return pd.DataFrame({'intercept': c, 'slope': [m]})
Define functions that take x and y values and compute the
intercept and slope.
>>> def slope(x, y):
... return np.diff(y)[0] / np.diff(x)[0]
...
>>> def intercept(x, y):
... return y.values[0] - slope(x, y) * x.values[0]
Demonstrating do
>>> df >> group_by('z') >> do(least_squares)
groups: ['z']
z intercept slope
0 a 1.0 1.0
1 b 6.0 -1.0
We can get the same result, by passing separate functions
that calculate the columns independently.
>>> df >> group_by('z') >> do(
... intercept=lambda gdf: intercept(gdf.x, gdf.y),
... slope=lambda gdf: slope(gdf.x, gdf.y))
groups: ['z']
z intercept slope
0 a 1.0 1.0
1 b 6.0 -1.0
The functions need not return numerical values. Pandas columns can
hold any type of object. You could store result objects from more
complicated models. Each model would be linked to a group. Notice
that the group columns (``z`` in the above cases) are included in
the result.
Notes
-----
You cannot have both a position argument and keyword
arguments.
"""
single_function = False
def __init__(self, func=None, **kwargs):
if func is not None:
if kwargs:
raise ValueError(
"Unexpected positional and keyword arguments.")
if not callable(func):
raise TypeError(
"func should be a callable object")
if func:
self.single_function = True
self.expressions = [Expression(func, None)]
else:
stmts_cols = zip(kwargs.values(), kwargs.keys())
self.expressions = [
Expression(stmt, col) for stmt, col in stmts_cols
]
class head(DataOperator):
"""
Select the top n rows
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
n : int, optional
Number of rows to return. If the ``data`` is grouped,
then number of rows per group. Default is 5.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({
... 'x': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
... 'y': list('aaaabbcddd') })
>>> df >> head(2)
x y
0 1 a
1 2 a
Grouped dataframe
>>> df >> group_by('y') >> head(2)
groups: ['y']
x y
0 1 a
1 2 a
2 5 b
3 6 b
4 7 c
5 8 d
6 9 d
"""
def __init__(self, n=5):
self.n = n
class tail(DataOperator):
"""
Select the bottom n rows
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
n : int, optional
Number of rows to return. If the ``data`` is grouped,
then number of rows per group. Default is 5.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({
... 'x': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
... 'y': list('aaaabbcddd') })
>>> df >> tail(2)
x y
8 9 d
9 10 d
Grouped dataframe
>>> df >> group_by('y') >> tail(2)
groups: ['y']
x y
0 3 a
1 4 a
2 5 b
3 6 b
4 7 c
5 9 d
6 10 d
"""
def __init__(self, n=5):
self.n = n
class pull(DataOperator):
"""
Pull a single column from the dataframe
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
column : name
Column name or index id.
use_index : bool
Whether to pull column by name or by its integer
index. Default is False.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({
... 'x': [1, 2, 3],
... 'y': [4, 5, 6],
... 'z': [7, 8, 9]
... })
>>> df
x y z
0 1 4 7
1 2 5 8
2 3 6 9
>>> df >> pull('y')
array([4, 5, 6])
>>> df >> pull(0, True)
array([1, 2, 3])
>>> df >> pull(-1, True)
array([7, 8, 9])
Notes
-----
Always returns a numpy array.
If :obj:`plydata.options.modify_input_data` is ``True``,
:class:`pull` will not make a copy the original column.
"""
def __init__(self, column, use_index=False):
self.column = column
self.use_index = use_index
class slice_rows(DataOperator):
"""
Select rows
A wrapper around :class:`slice` to use when piping.
Parameters
----------
data : dataframe, optional
Useful when not using the ``>>`` operator.
*args : tuple
(start, stop, step) as expected by the builtin :class:`slice`
type.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'x': range(10), 'y': range(100, 110)})
>>> df >> slice_rows(5)
x y
0 0 100
1 1 101
2 2 102
3 3 103
4 4 104
>>> df >> slice_rows(3, 7)
x y
3 3 103
4 4 104
5 5 105
6 6 106
>>> df >> slice_rows(None, None, 3)
x y
0 0 100
3 3 103
6 6 106
9 9 109
The above examples are equivalent to::
df[slice(5)]
df[slice(3, 7)]
df[slice(None, None, 3)]
respectively.
Notes
-----
If :obj:`plydata.options.modify_input_data` is ``True``,
:class:`slice_rows` will not make a copy the original dataframe.
"""
def __init__(self, *args):
self.slice = slice(*args)
# Aliases
mutate = define
transmute = create
unique = distinct
summarise = summarize
| has2k1/plydata | plydata/one_table_verbs.py | one_table_verbs.py | py | 34,171 | python | en | code | 271 | github-code | 6 | [
{
"api_name": "operators.DataOperator",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "itertools.chain",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "expressions.Ex... |
17625793432 | from __future__ import print_function, division
import os
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Activation
from keras.layers import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU, ReLU
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.losses import MeanAbsoluteError, BinaryCrossentropy
from keras import backend as K
import tensorflow as tf
import numpy as np
from losses import encoder_loss, generator_loss, discriminator_loss, code_discriminator_loss
class AlphaGAN():
def __init__(self, lambda_=1., lr1=0.0005, lr2=0.0001, beta1=0.9, beta2=0.999, model_save_path="./snapshots"):
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
self.model_save_path = model_save_path
self.input_dim = 29
self.x_shape = (self.input_dim, )
self.latent_dim = 16
self.base_n_count = 128
self.lambda_ = lambda_
self.lr1 = lr1
self.lr2 = lr2
self.beta1 = beta1
self.beta2 = beta2
self.bce = BinaryCrossentropy()
self.mae = MeanAbsoluteError()
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.code_discriminator = self.build_code_discriminator()
self.generator = self.build_generator()
self.encoder = self.build_encoder()
x = Input(shape=self.x_shape)
x_hat = self.generator(self.encoder(x))
self.alphagan_generator = Model([x], [x_hat])
def build_encoder(self):
model = Sequential(name="Encoder")
model.add(Dense(self.base_n_count * 2))
model.add(ReLU())
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(self.base_n_count))
model.add(ReLU())
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(self.latent_dim))
model.add(Activation('tanh'))
x = Input(shape=self.x_shape)
z = model(x)
model.summary()
return Model(x, z)
def build_generator(self):
model = Sequential(name="Generator")
model.add(Dense(self.base_n_count))
model.add(ReLU())
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(self.base_n_count * 2))
model.add(ReLU())
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(self.base_n_count * 4))
model.add(ReLU())
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(self.input_dim))
model.add(Activation('tanh'))
z = Input(shape=(self.latent_dim,))
x_gen = model(z)
model.summary()
return Model(z, x_gen)
def build_discriminator(self):
model = Sequential(name="Discriminator")
model.add(Dense(self.base_n_count * 4))
model.add(LeakyReLU())
model.add(Dropout(0.7))
model.add(Dense(self.base_n_count * 2))
model.add(LeakyReLU())
model.add(Dropout(0.7))
model.add(Dense(self.base_n_count))
model.add(LeakyReLU())
model.add(Dropout(0.7))
model.add(Dense(1, activation='sigmoid'))
x = Input(shape=self.x_shape)
validity = model(x)
model.summary()
return Model(x, validity)
def build_code_discriminator(self):
model = Sequential(name="CodeDiscriminator")
model.add(Dense(self.base_n_count * 4))
model.add(LeakyReLU())
model.add(Dropout(0.7))
model.add(Dense(self.base_n_count * 2))
model.add(LeakyReLU())
model.add(Dropout(0.7))
model.add(Dense(self.base_n_count))
model.add(LeakyReLU())
model.add(Dropout(0.7))
model.add(Dense(1, activation='sigmoid'))
z = Input(shape=(self.latent_dim,))
validity = model(z)
model.summary()
return Model(z, validity)
def build_e_train(self, batch_size, lr, beta1, beta2):
x_real = K.placeholder(shape=(batch_size,) + self.x_shape)
real_labels = K.placeholder(shape=(batch_size, 1))
z_hat = self.encoder(x_real)
c_z_hat = self.code_discriminator(z_hat)
x_rec = self.generator(z_hat)
# ================== Train E ================== #
l1_loss = self.mae(x_real, x_rec)
c_hat_loss = self.bce(c_z_hat, real_labels) # - self.bce(c_z_hat, fake_labels)
e_loss = l1_loss + c_hat_loss
e_training_updates = Adam(lr=lr, beta_1=beta1, beta_2=beta2) \
.get_updates(e_loss, self.encoder.trainable_weights)
e_train = K.function([x_real, real_labels], [e_loss], updates=e_training_updates)
return e_train
def build_g_train(self, batch_size, lr, beta1, beta2):
x_real = K.placeholder(shape=(batch_size,) + self.x_shape)
z = K.placeholder(shape=(batch_size, self.latent_dim))
real_labels = K.placeholder(shape=(batch_size, 1))
fake_labels = K.placeholder(shape=(batch_size, 1))
z_hat = self.encoder(x_real)
x_rec = self.generator(z_hat)
x_gen = self.generator(z)
d_rec = self.discriminator(x_rec)
d_gen = self.discriminator(x_gen)
# ================== Train E ================== #
l1_loss = 0.2 * self.mae(x_real, x_rec)
g_rec_loss = self.bce(d_rec, real_labels) # - self.bce(d_rec, fake_labels)
g_gen_loss = self.bce(d_gen, fake_labels) # - self.bce(d_gen, fake_labels)
g_loss = l1_loss + g_rec_loss + g_gen_loss
g_training_updates = Adam(lr=lr, beta_1=beta1, beta_2=beta2) \
.get_updates(g_loss, self.generator.trainable_weights)
g_train = K.function([x_real, z, real_labels, fake_labels], [g_loss], updates=g_training_updates)
return g_train
def build_e_g_train(self, batch_size, lr, beta1, beta2):
x_real = K.placeholder(shape=(batch_size,) + self.x_shape)
z = K.placeholder(shape=(batch_size, self.latent_dim))
real_labels = K.placeholder(shape=(batch_size, 1))
fake_labels = K.placeholder(shape=(batch_size, 1))
z_hat = self.encoder(x_real)
x_rec = self.generator(z_hat)
x_gen = self.generator(z)
d_rec = self.discriminator(x_rec)
d_gen = self.discriminator(x_gen)
c_z_hat = self.code_discriminator(z_hat)
# ================== Train G and E ================== #
l1_loss = self.mae(x_real, x_rec)
c_hat_loss = self.bce(c_z_hat, real_labels) # - self.bce(c_z_hat, fake_labels)
g_rec_loss = self.bce(d_rec, real_labels) # - self.bce(d_rec, fake_labels)
g_gen_loss = self.bce(d_gen, real_labels) # - self.bce(d_gen, fake_labels)
g_loss = l1_loss + g_rec_loss + c_hat_loss + g_gen_loss
g_training_updates = Adam(lr=lr, beta_1=beta1, beta_2=beta2) \
.get_updates(g_loss, self.alphagan_generator.trainable_weights)
g_train = K.function([x_real, z, real_labels, fake_labels], [g_loss], updates=g_training_updates)
return g_train
def build_d_train(self, batch_size, lr, beta1, beta2):
x_real = K.placeholder(shape=(batch_size,) + self.x_shape)
z = K.placeholder(shape=(batch_size, self.latent_dim))
real_labels = K.placeholder(shape=(batch_size, 1))
fake_labels = K.placeholder(shape=(batch_size, 1))
z_hat = self.encoder(x_real)
x_rec = self.generator(z_hat)
x_gen = self.generator(z)
d_real = self.discriminator(x_real)
d_rec = self.discriminator(x_rec)
d_gen = self.discriminator(x_gen)
# ================== Train D ================== #
d_real_loss = self.bce(d_real, real_labels)
d_rec_loss = self.bce(d_rec, fake_labels)
d_gen_loss = self.bce(d_gen, fake_labels)
d_loss = d_real_loss + d_rec_loss + d_gen_loss
d_training_updates = Adam(lr=lr, beta_1=beta1, beta_2=beta2) \
.get_updates(d_loss, self.discriminator.trainable_weights)
d_train = K.function([x_real, z, real_labels, fake_labels], [d_loss], updates=d_training_updates)
return d_train
def build_c_train(self, batch_size, lr, beta1, beta2):
x_real = K.placeholder(shape=(batch_size,) + self.x_shape)
z = K.placeholder(shape=(batch_size, self.latent_dim))
real_labels = K.placeholder(shape=(batch_size, 1))
fake_labels = K.placeholder(shape=(batch_size, 1))
z_hat = self.encoder(x_real)
c_z_hat = self.code_discriminator(z_hat)
c_z = self.code_discriminator(z)
# ================== Train C ================== #
c_hat_loss = self.bce(c_z_hat, real_labels)
c_z_loss = self.bce(c_z, fake_labels)
c_loss = c_hat_loss + c_z_loss
c_training_updates = Adam(lr=lr, beta_1=beta1, beta_2=beta2) \
.get_updates(c_loss, self.code_discriminator.trainable_weights)
c_train = K.function([x_real, z, real_labels, fake_labels], [c_loss], updates=c_training_updates)
return c_train
def train(self, X_train, epochs, batch_size=32, output_path='.', model_save_step=10):
# if not os.path.exists(os.path.join(output_path, 'logs/')):
# os.makedirs(os.path.join(output_path, 'logs/'))
real_labels = np.ones((batch_size, 1))
fake_labels = np.zeros((batch_size, 1))
# _, _, d_train, c_train = self.build_functions(batch_size, self.lr1, self.lr2, self.beta1, self.beta2)
e_train = self.build_e_train(batch_size, lr=self.lr1, beta1=self.beta1, beta2=self.beta2)
g_train = self.build_g_train(batch_size, lr=self.lr1, beta1=self.beta1, beta2=self.beta2)
d_train = self.build_d_train(batch_size, lr=self.lr2, beta1=self.beta1, beta2=self.beta2)
c_train = self.build_c_train(batch_size, lr=self.lr2, beta1=self.beta1, beta2=self.beta2)
e_g_train = self.build_e_g_train(batch_size, lr=self.lr1, beta1=self.beta1, beta2=self.beta2)
# train_step = self.build_train_step()
# Adversarial ground truths
session = K.get_session()
init = tf.global_variables_initializer()
session.run(init)
for epoch in range(epochs):
# Generate fake code
z = np.random.normal(size=(batch_size, self.latent_dim)).astype(np.float32)
# z_K.constant(z)
# Make a batch of true samples
idx = np.random.randint(0, X_train.shape[0], batch_size)
x_real = X_train[idx].astype(np.float32)
# e_loss, g_loss, d_loss, c_loss, = train_step(x_real, z)
#e_loss = e_train([x_real, real_labels])
#g_loss = g_train([x_real, z, real_labels, fake_labels])
g_loss = e_g_train([x_real, z, real_labels, fake_labels])
d_loss = d_train([x_real, z, real_labels, fake_labels])
c_loss = c_train([x_real, z, real_labels, fake_labels])
# d_loss = d_train([x_real, z])
# c_loss = c_train([x_real, z])
# Plot the progress
if epoch % 100 == 0:
print("%d [E loss: %f] [G loss: %f] [D loss: %f] [C loss: %f]" % \
(epoch, 0, g_loss[0], d_loss[0], c_loss[0]))
if epoch % model_save_step == 0:
self.generator.save(os.path.join(self.model_save_path, '{}_G.h5'.format(epoch)))
self.encoder.save(os.path.join(self.model_save_path, '{}_E.h5'.format(epoch)))
self.discriminator.save(os.path.join(self.model_save_path, '{}_D.h5'.format(epoch)))
self.code_discriminator.save(os.path.join(self.model_save_path, '{}_C.h5'.format(epoch)))
def load_pretrained_models(self, model_path_prefix):
self.generator.load_weights('%sG.h5' % model_path_prefix)
self.encoder.load_weights('%sE.h5' % model_path_prefix)
self.discriminator.load_weights('%sD.h5' % model_path_prefix)
self.code_discriminator.load_weights('%sC.h5' % model_path_prefix)
# if __name__ == '__main__':
# alphagan = AlphaGAN()
# alphagan.train(epochs=40000, batch_size=32)
| royalsalute/fraud-creditcard-detection | alphagan.py | alphagan.py | py | 12,147 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.path.exists",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "keras.losses.BinaryCrossentro... |
70107497149 | from bs4 import BeautifulSoup
import requests
from pprint import pprint
def main():
url = 'https://remote.co'
remote_co_html = requests.get(url)
soup = BeautifulSoup(remote_co_html.content,"html.parser")
#the_main_class = soup.find("body",class_="home blog remote-co").main.find("div",class_="container pt-4").find("div",class_="row").find("div",class_="col").find_all("div",class_="card bg-light mb-3").find("div")
the_main_class = soup.main.find("div",class_="container pt-4").find_all("div",class_="card bg-light mb-3")[1].find("div",class_="card-body").find_all('h6')#
#pprint(the_main_class)
jobs = []
for eachheader in the_main_class:
jobs.append(eachheader.string)
pprint(jobs)
#for eachmarker in the_main_class:
# each_card = eachmarker.find_all('div',class_='card')
# for each_job in each_card:
# print(each_job.img['alt'])
#for each_entry in each_card:
# each_job = each_entry.find('div',class_='card-body').img['alt']
# print(each_job)
#pprint(the_job)
#[0].find('div',class_='card-body').img['alt']
#pprint(the_main_class)
if __name__ == '__main__':
main() | cthacker-udel/Python-WebScraper | remoteCoScraper.py | remoteCoScraper.py | py | 1,208 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 24,
"usage_type": "call"
}
] |
13209639303 | import PIL
import pyautogui
def popper():
while True:
try:
box = pyautogui.locateOnScreen("C:/Users/Bryan/Documents/GitHub/Cuhacking/decline.png", confidence = 0.55)
loc = pyautogui.center(box)
print(loc)
pyautogui.click(loc.x, loc.y)
break
except:
print("bropkebobuo")
| RogerLamTd/Cuhacking | AutomatedQueuePopper/League.py | League.py | py | 374 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pyautogui.locateOnScreen",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pyautogui.center",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pyautogui.click",
"line_number": 11,
"usage_type": "call"
}
] |
8034052289 | #Prototype 4
# importing the necessary libraries
import cv2
import numpy as np
import numpy as np
import os
import cv2
# defining the crack detector function
# here weak_th and strong_th are thresholds for
# double thresholding step
def PCD(img, weak_th = None, strong_th = None):
# conversion of image to grayscale
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Noise reduction step
img = cv2.GaussianBlur(img, (5, 5), 1.6)
# Calculating the gradients
gx = cv2.Sobel(np.float32(img), cv2.CV_64F, 1, 0, 3)
gy = cv2.Sobel(np.float32(img), cv2.CV_64F, 0, 1, 3)
# Conversion of Cartesian coordinates to polar
mag, ang = cv2.cartToPolar(gx, gy, angleInDegrees = True)
# setting the minimum and maximum thresholds
# for double thresholding
mag_max = np.max(mag)
if not weak_th:weak_th = mag_max * 0.1
if not strong_th:strong_th = mag_max * 0.5
# getting the dimensions of the input image
height, width = img.shape
# Looping through every pixel of the grayscale
# image
for i_x in range(width):
for i_y in range(height):
grad_ang = ang[i_y, i_x]
grad_ang = abs(grad_ang-180) if abs(grad_ang)>180 else abs(grad_ang)
# selecting the neighbours of the target pixel
# according to the gradient direction
# In the x axis direction
if grad_ang<= 22.5:
neighb_1_x, neighb_1_y = i_x-1, i_y
neighb_2_x, neighb_2_y = i_x + 1, i_y
# top right (diagonal-1) direction
elif grad_ang>22.5 and grad_ang<=(22.5 + 45):
neighb_1_x, neighb_1_y = i_x-1, i_y-1
neighb_2_x, neighb_2_y = i_x + 1, i_y + 1
# In y-axis direction
elif grad_ang>(22.5 + 45) and grad_ang<=(22.5 + 90):
neighb_1_x, neighb_1_y = i_x, i_y-1
neighb_2_x, neighb_2_y = i_x, i_y + 1
# top left (diagonal-2) direction
elif grad_ang>(22.5 + 90) and grad_ang<=(22.5 + 135):
neighb_1_x, neighb_1_y = i_x-1, i_y + 1
neighb_2_x, neighb_2_y = i_x + 1, i_y-1
# Now it restarts the cycle
elif grad_ang>(22.5 + 135) and grad_ang<=(22.5 + 180):
neighb_1_x, neighb_1_y = i_x-1, i_y
neighb_2_x, neighb_2_y = i_x + 1, i_y
# Non-maximum suppression step
if width>neighb_1_x>= 0 and height>neighb_1_y>= 0:
if mag[i_y, i_x]<mag[neighb_1_y, neighb_1_x]:
mag[i_y, i_x]= 0
continue
if width>neighb_2_x>= 0 and height>neighb_2_y>= 0:
if mag[i_y, i_x]<mag[neighb_2_y, neighb_2_x]:
mag[i_y, i_x]= 0
weak_ids = np.zeros_like(img)
strong_ids = np.zeros_like(img)
ids = np.zeros_like(img)
# double thresholding step
for i_x in range(width):
for i_y in range(height):
grad_mag = mag[i_y, i_x]
if grad_mag<weak_th:
mag[i_y, i_x]= 0
elif strong_th>grad_mag>= weak_th:
ids[i_y, i_x]= 1
else:
ids[i_y, i_x]= 2
# finally returning the magnitude of
# gradients of edges
return mag
# Creating a VideoCapture object to read the video
cap = cv2.VideoCapture('assets\sample.mp4')
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('assets/Out.mp4', fourcc, 20.0, (640,480))
# Loop until the end of the video
while (cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
frame = cv2.resize(frame, (540, 380), fx = 0, fy = 0,
interpolation = cv2.INTER_CUBIC)
# Display the resulting frame
cv2.imshow('Frame', frame)
# conversion of BGR to grayscale is necessary to apply this operation
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
crack_frame = PCD(frame)
blur = cv2.blur(crack_frame,(3,3))
#img_log = np.array(blur,dtype=np.uint8)
# Morphological Closing Operator
#kernel = np.ones((5,5),np.uint8)
#closing = cv2.morphologyEx(blur, cv2.MORPH_CLOSE, kernel)
# Create feature detecting method
# sift = cv2.xfeatures2d.SIFT_create()
# surf = cv2.xfeatures2d.SURF_create()
# orb = cv2.ORB_create(nfeatures=150)
# Make featured Image
# keypoints, descriptors = orb.detectAndCompute(closing, None)
# featuredImg = cv2.drawKeypoints(closing, keypoints, None)
# adaptive thresholding to use different threshold
# values on different regions of the frame.
#Thresh = cv2.adaptiveThreshold(crack_frame, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
#cv2.THRESH_BINARY_INV, 11, 2)
cv2.imshow('C_frame', blur)
out.write(blur)
# define q as the exit button
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# release the video capture object
cap.release()
# Closes all the windows currently opened.
cv2.destroyAllWindows() | thanhtung48c/AUV-Crack-Detection-Model | script.py | script.py | py | 5,431 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.cvtColor",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "cv2.GaussianBlur",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.Sobel",
... |
7606759311 | import datetime
import time
from .. import emails
from ..database import get_sql_connection
from ..models import Account, Session as SqlSession
__description__ = 'Send out summary emails.'
def send_out_emails():
session = SqlSession()
today = datetime.date.today()
accounts = session.query(Account) \
.filter(Account.receive_summary_email == True) # noqa
for account in accounts:
try:
email = emails.Summary(account, today)
except RuntimeError: # no tasks
continue
with emails.Mailer() as mailer:
mailer.send(email)
def command(args):
get_sql_connection()
if args.forever:
while True:
tomorrow = datetime.datetime.utcnow() + datetime.timedelta(days=1)
tomorrow = tomorrow.replace(hour=4, minute=0)
diff = tomorrow - datetime.datetime.utcnow()
time.sleep(diff.total_seconds())
send_out_emails()
else:
send_out_emails()
def add_subparser(subparsers):
parser = subparsers.add_parser('send-summary-emails', help=__description__)
parser.add_argument('--forever', action='store_true')
parser.set_defaults(func=command)
| thomasleese/gantt-charts | ganttcharts/cli/send_summary_emails.py | send_summary_emails.py | py | 1,214 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "models.Session",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "models.Account"... |
75241216826 | from typing import Any, Type
from aiohttp import BasicAuth
from ..internal.gateway import Gateway
from ..internal.http import HTTPClient
from .cache import CacheStore, Store
_BASE_MODELS: dict[Any, Any] = {}
class State:
"""The central bot cache."""
def __init__(
self,
token: str,
# cache-options
max_messages: int,
max_members: int,
intents: int,
# "advanced" options
base_url: str = "https://discord.com/api/v10",
proxy: str | None = None,
proxy_auth: BasicAuth | None = None,
# classes
store_class: Type[Store] = Store,
model_classes: dict[Any, Any] = _BASE_MODELS,
) -> None:
self._token = token
self.cache = CacheStore(store_class)
self.cache["messages"] = max_messages
self.cache["members"] = max_members
self.max_members = max_members
self.intents = intents
self.http = HTTPClient(token)
self.gateway = Gateway(self)
| VincentRPS/pycv3 | pycord/state/core.py | core.py | py | 1,011 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.Any",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "aiohttp.BasicAuth",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "cache.Store",
"line_numbe... |
2966396264 | '''
@File : ImageReward.py
@Time : 2023/02/28 19:53:00
@Auther : Jiazheng Xu
@Contact : xjz22@mails.tsinghua.edu.cn
@Description: ImageReward Reward model for reward model.
'''
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
from config.options import *
from config.utils import *
from models.blip_pretrain import blip_pretrain
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
try:
from torchvision.transforms import InterpolationMode
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
def _convert_image_to_rgb(image):
return image.convert("RGB")
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=BICUBIC),
CenterCrop(n_px),
_convert_image_to_rgb,
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
class MLP(nn.Module):
def __init__(self, input_size):
super().__init__()
self.input_size = input_size
self.layers = nn.Sequential(
nn.Linear(self.input_size, 1024),
#nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(1024, 128),
#nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(128, 64),
#nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(64, 16),
#nn.ReLU(),
nn.Linear(16, 1)
)
# initial MLP param
for name, param in self.layers.named_parameters():
if 'weight' in name:
nn.init.normal_(param, mean=0.0, std=1.0/(self.input_size+1))
if 'bias' in name:
nn.init.constant_(param, val=0)
def forward(self, input):
return self.layers(input)
class ImageReward(nn.Module):
def __init__(self, device='cpu'):
super().__init__()
self.device = device
self.blip = blip_pretrain(pretrained=config['blip_path'], image_size=config['BLIP']['image_size'], vit=config['BLIP']['vit'])
self.preprocess = _transform(config['BLIP']['image_size'])
self.mlp = MLP(config['ImageReward']['mlp_dim'])
if opts.fix_base:
self.blip.requires_grad_(False)
for name, parms in self.blip.named_parameters():
if '_proj' in name:
parms.requires_grad_(False)
# fix certain ratio of layers
self.image_layer_num = 24 if config['BLIP']['vit'] == 'large' else 12
if opts.fix_rate > 0:
text_fix_num = "layer.{}".format(int(12 * opts.fix_rate))
image_fix_num = "blocks.{}".format(int(self.image_layer_num * opts.fix_rate))
for name, parms in self.blip.text_encoder.named_parameters():
parms.requires_grad_(False)
if text_fix_num in name:
break
for name, parms in self.blip.visual_encoder.named_parameters():
parms.requires_grad_(False)
if image_fix_num in name:
break
def loose_layer(self, fix_rate):
text_layer_id = [f"layer.{id}" for id in range(int(12 * fix_rate), 13)]
image_layer_id = [f"blocks.{id}" for id in range(int(24 * fix_rate), 25)]
for name, parms in self.blip.text_encoder.named_parameters():
for text_id in text_layer_id:
if text_id in name:
parms.requires_grad_(True)
for name, parms in self.blip.visual_encoder.named_parameters():
for image_id in image_layer_id:
if image_id in name:
parms.requires_grad_(True)
def forward(self, batch_data):
# encode data
if opts.rank_pair:
batch_data = self.encode_pair(batch_data)
else:
batch_data = self.encode_data(batch_data)
# forward
emb_better, emb_worse = batch_data['emb_better'], batch_data['emb_worse']
reward_better = self.mlp(emb_better)
reward_worse = self.mlp(emb_worse)
reward = torch.concat((reward_better, reward_worse), dim=1)
return reward
def encode_pair(self, batch_data):
text_ids, text_mask, img_better, img_worse = batch_data['text_ids'], batch_data['text_mask'], batch_data['img_better'], batch_data['img_worse']
text_ids = text_ids.view(text_ids.shape[0], -1).to(self.device) # [batch_size, seq_len]
text_mask = text_mask.view(text_mask.shape[0], -1).to(self.device) # [batch_size, seq_len]
img_better = img_better.to(self.device) # [batch_size, C, H, W]
img_worse = img_worse.to(self.device) # [batch_size, C, H, W]
# encode better emb
image_embeds_better = self.blip.visual_encoder(img_better)
image_atts_better = torch.ones(image_embeds_better.size()[:-1], dtype=torch.long).to(self.device)
emb_better = self.blip.text_encoder(text_ids,
attention_mask = text_mask,
encoder_hidden_states = image_embeds_better,
encoder_attention_mask = image_atts_better,
return_dict = True,
).last_hidden_state # [batch_size, seq_len, feature_dim]
emb_better = emb_better[:, 0, :].float()
# encode worse emb
image_embeds_worse = self.blip.visual_encoder(img_worse)
image_atts_worse = torch.ones(image_embeds_worse.size()[:-1], dtype=torch.long).to(self.device)
emb_worse = self.blip.text_encoder(text_ids,
attention_mask = text_mask,
encoder_hidden_states = image_embeds_worse,
encoder_attention_mask = image_atts_worse,
return_dict = True,
).last_hidden_state
emb_worse = emb_worse[:, 0, :].float()
# get batch data
batch_data = {
'emb_better': emb_better,
'emb_worse': emb_worse,
}
return batch_data
def encode_data(self, batch_data):
txt_better, txt_worse = [], []
for item in batch_data:
text_input = self.blip.tokenizer(item["prompt"], padding='max_length', truncation=True, max_length=35, return_tensors="pt").to(self.device)
txt_set = []
for generations in item["generations"]:
# image encode
img_path = os.path.join(config['image_base'], generations)
pil_image = Image.open(img_path)
image = self.preprocess(pil_image).unsqueeze(0).to(self.device)
image_embeds = self.blip.visual_encoder(image)
# text encode cross attention with image
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(self.device)
text_output = self.blip.text_encoder(text_input.input_ids,
attention_mask = text_input.attention_mask,
encoder_hidden_states = image_embeds,
encoder_attention_mask = image_atts,
return_dict = True,
)
txt_set.append(text_output.last_hidden_state[:,0,:])
labels = item["ranking"]
for id_l in range(len(labels)):
for id_r in range(id_l+1, len(labels)):
if labels[id_l] < labels[id_r]:
txt_better.append(txt_set[id_l])
txt_worse.append(txt_set[id_r])
elif labels[id_l] > labels[id_r]:
txt_better.append(txt_set[id_r])
txt_worse.append(txt_set[id_l])
# torch.Size([sample_num, feature_dim])
txt_better = torch.cat(txt_better, 0).float()
txt_worse = torch.cat(txt_worse, 0).float()
batch_data = {
'emb_better': txt_better,
'emb_worse': txt_worse,
}
return batch_data
| THUDM/ImageReward | train/src/ImageReward.py | ImageReward.py | py | 8,579 | python | en | code | 761 | github-code | 6 | [
{
"api_name": "torchvision.transforms.InterpolationMode.BICUBIC",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "torchvision.transforms.InterpolationMode",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "PIL.Image.BICUBIC",
"line_number": 23,
"us... |
14959398109 | from mock import Mock
from yoti_python_sandbox.doc_scan.check import SandboxZoomLivenessCheckBuilder
from yoti_python_sandbox.doc_scan.check.report.breakdown import SandboxBreakdown
from yoti_python_sandbox.doc_scan.check.report.recommendation import (
SandboxRecommendation,
)
def test_zoom_liveness_check_should_set_correct_liveness_type():
recommendation_mock = Mock(spec=SandboxRecommendation)
breakdown_mock = Mock(spec=SandboxBreakdown)
check = (
SandboxZoomLivenessCheckBuilder()
.with_recommendation(recommendation_mock)
.with_breakdown(breakdown_mock)
.build()
)
assert check.liveness_type == "ZOOM"
def test_zoom_liveness_check_build_result_object():
recommendation_mock = Mock(spec=SandboxRecommendation)
breakdown_mock = Mock(spec=SandboxBreakdown)
check = (
SandboxZoomLivenessCheckBuilder()
.with_recommendation(recommendation_mock)
.with_breakdown(breakdown_mock)
.build()
)
assert check.result.report.recommendation is not None
assert check.result.report.recommendation == recommendation_mock
assert len(check.result.report.breakdown) == 1
assert check.result.report.breakdown[0] == breakdown_mock
| getyoti/yoti-python-sdk-sandbox | yoti_python_sandbox/tests/doc_scan/check/test_sandbox_liveness_check.py | test_sandbox_liveness_check.py | py | 1,243 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "mock.Mock",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "yoti_python_sandbox.doc_scan.check.report.recommendation.SandboxRecommendation",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "mock.Mock",
"line_number": 12,
"usage_type": "ca... |
21884345926 | import cv2
def distance(p1, p2):
# D8 distance
return max(abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
def gamma(img):
totalpixels = img.shape[0] * img.shape[1]
color_dict = {}
for i in range(len(img)):
for j in range(len(img[i])):
tc = tuple(img[i][j])
if (tc in color_dict):
color_dict[tc].append((i, j))
else:
color_dict[tc] = [(i, j)]
probability = {}
for d in range(8):
for color in color_dict:
count = 0
k = color_dict[color]
for p1 in range(len(k)):
for p2 in range(p1, len(k)):
if (distance(k[p1], k[p2]) == d):
count += 1
if color not in probability:
probability[color] = [0 for i in range(8)]
probability[color][d] = float(count) / totalpixels
return probability
img = cv2.imread("bed.jpg")
img2 = cv2.imread("img2.jpg")
g1 = gamma(img)
g2 = gamma(img2)
s = 0
m = 0
for color in g1:
if color not in g2:
continue
m += 1
for d in range(8):
s += abs(g1[color][d] - g2[color][d]) / float(1 + g1[color][d] + g2[color][d])
s /= float(m)
print(s) | NitigyaPant/MCA_assignment | test.py | test.py | py | 1,025 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 37,
"usage_type": "call"
}
] |
74083767549 | from django.core import checks
from django.core.checks import Warning
from django.test import SimpleTestCase, override_settings
class SystemChecksTestCase(SimpleTestCase):
def test_checks(self):
errors = checks.run_checks()
assert errors == []
with override_settings(
INSTALLED_APPS=[
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'huey_monitor',
]
):
errors = checks.run_checks()
assert errors == [
Warning(
'"bx_django_utils" not in INSTALLED_APPS',
id='huey_monitor.E001',
)
]
| boxine/django-huey-monitor | huey_monitor_project/tests/test_checks.py | test_checks.py | py | 790 | python | en | code | 69 | github-code | 6 | [
{
"api_name": "django.test.SimpleTestCase",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.core.checks.run_checks",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.core.checks",
"line_number": 8,
"usage_type": "name"
},
{
"api_name... |
27812632483 | import sys
from osgeo import ogr
fn=r'D:\BackUp\projectsBackup\Qgis\pygis\res\ne_50m_populated_places.shp'
ds=ogr.Open(fn,0)
if ds is None:
sys.exit('could not open {0}.'.format(fn))
lyr=ds.GetLayer(0)
#此图层要素总量
num_features=lyr.GetFeatureCount()
print(num_features)
#根据要素编号Fid获取对应图层
third_feature=lyr.GetFeature(num_features-1)
print(third_feature.NAME)
del ds
| xuewenqian/pythonGis | ogr/获取特定的要素.py | 获取特定的要素.py | py | 406 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "osgeo.ogr.Open",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "osgeo.ogr",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 7,
"usage_type": "call"
}
] |
6123307630 | import os
import sys
import shutil
import logging
import argparse
import warnings
import re
from pathlib import Path
from ..backend_funcs.convert import parse_validator
import subprocess as sub
import pandas as pd
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('fw-heudiconv-validator')
def escape_ansi(line):
ansi_escape = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]')
return ansi_escape.sub('', line)
def find_all(regex, text):
match_list = []
while True:
match = re.search(regex, text)
if match:
match_list.append(match.group(0))
text = text[match.end():]
else:
return match_list
def validate_local(path, verbose, tabulate='.'):
logger.info("Launching bids-validator...")
command = ['bids-validator', path]
if verbose:
command.extend(['--verbose'])
p = sub.Popen(command, stdout=sub.PIPE, stdin=sub.PIPE, stderr=sub.PIPE, universal_newlines=True)
output, error = p.communicate()
logger.info(output)
if p.returncode != 0:
logger.info(error)
if os.path.exists(tabulate):
logger.info("Parsing issues and writing to issues.csv")
command = ['bids-validator', path, '--json', '--verbose']
with open(tabulate + '/issues.json', "w") as outfile:
p2 = sub.run(command, stdout=outfile)
if p2.returncode == 0:
issues_df_full = parse_validator(tabulate + '/issues.json')
issues_df_full.to_csv(tabulate + '/issues.csv', index=False)
return p.returncode
def fw_heudiconv_export(proj, subjects=None, sessions=None, destination="tmp", name="bids_directory", key=None):
logger.info("Launching fw-heudiconv-export...")
command = ['fw-heudiconv-export', '--project', ' '.join(proj), '--destination', destination, '--directory-name', name]
if subjects:
command.extend(['--subject'] + subjects)
if sessions:
command.extend(['--session'] + sessions)
if key:
command.extend(['--api-key'] + [key])
p = sub.Popen(command, stdout=sub.PIPE, stdin=sub.PIPE, stderr=sub.PIPE, universal_newlines=True)
output, error = p.communicate()
logger.info(output)
if p.returncode != 0:
logger.info(error)
return p.returncode
def get_parser():
parser = argparse.ArgumentParser(
description="Validate BIDS-curated data on Flywheel. A simple wrapper around the original BIDS Validator https://github.com/bids-standard/bids-validator")
parser.add_argument(
"--directory",
help="Temp space used for validation",
default=".",
required=False,
type=str
)
parser.add_argument(
"--project",
help="The project on Flywheel",
nargs="+"
)
parser.add_argument(
"--subject",
help="The subject(s) on Flywheel to validate",
nargs="+",
default=None,
type=str
)
parser.add_argument(
"--session",
help="The session(s) on Flywheel to validate",
nargs="+",
default=None,
type=str
)
parser.add_argument(
"--verbose",
help="Pass on <VERBOSE> flag to bids-validator",
default=False,
action='store_true'
)
parser.add_argument(
"--tabulate",
default=".",
required=False,
type=str,
help="Directory to save tabulation of errors"
)
parser.add_argument(
"--api-key",
help="API Key",
action='store',
default=None
)
parser.add_argument(
"--dry-run",
help=argparse.SUPPRESS,
action='store_false',
default=None
)
return parser
def main():
logger.info("{:=^70}\n".format(": fw-heudiconv validator starting up :"))
parser = get_parser()
args = parser.parse_args()
exit = 1
if not args.project:
logger.error("No project on Flywheel specified!")
sys.exit(exit)
success = fw_heudiconv_export(proj=args.project, subjects=args.subject, sessions=args.session, destination=args.directory, name='bids_directory', key=args.api_key)
if success == 0:
path = Path(args.directory, 'bids_directory')
exit = validate_local(path, args.verbose, args.tabulate)
shutil.rmtree(path)
else:
logger.error("There was a problem downloading the data to a temp space for validation!")
logger.info("Done!")
logger.info("{:=^70}".format(": Exiting fw-heudiconv validator :"))
sys.exit(exit)
if __name__ == "__main__":
main()
| PennLINC/fw-heudiconv | fw_heudiconv/cli/validate.py | validate.py | py | 4,603 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "logging.basicConfig",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "re.compile",
... |
4971500738 | import socket
import threading
import datetime
def acao_cliente(client_socket, client_address):
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"Conexão recebida de {client_address[0]}:{client_address[1]} em {current_time}")
with open("honeypot_log.txt", "a") as log_file:
log_file.write(f"Conexão recebida de {client_address[0]}:{client_address[1]} em {current_time}\n")
response = "Bem-vindo ao honeypot!\n"
client_socket.send(response.encode())
while True:
data = client_socket.recv(1024)
if not data:
break
with open("honeypot_log.txt", "a") as log_file:
log_file.write(f"Dados recebidos de {client_address[0]}:{client_address[1]} em {current_time}:\n")
log_file.write(data.decode())
log_file.write("\n")
analise_trafego(data)
response = "Obrigado por sua solicitação.\n"
client_socket.send(response.encode())
client_socket.close()
def analise_trafego(data):
pass
def honeypot(port):
#Socket TCP
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('localhost', port))
server_socket.listen(5)
print(f"Aguardando conexões na porta {port}...")
while True:
client_socket, client_address = server_socket.accept()
client_thread = threading.Thread(target=acao_cliente, args=(client_socket, client_address))
client_thread.start()
honeypot(8080)
| T0tsuK4/honeypot | honeypot.py | honeypot.py | py | 1,673 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "socket.socket",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "socket.AF_IN... |
30575033790 | from DataEngine.DataAdapters.MongoAdapter.MongoAdapter import MongoAdapter
from Domain.EquityCorporateData import EquityCorporateData
from Domain.BuildEnumMethods import BuildEnumMethods
from datetime import date
ma : MongoAdapter = MongoAdapter()
testEquity = EquityCorporateData().build(
method = BuildEnumMethods.MANUAL,
ticker="test",
commonCompanyName = "test",
ipoYear = date.today(),
industry = "test",
sector = "test",
country = "test"
)
def testConnect():
assert(1 == 1)
def testSaveEquityCorporateDataDocument():
ma.saveDocument(testEquity)
def testRemoveEquityCorporateDataDocument():
ma.removeDocument(testEquity)
def getDataForDate():
"""
This test assumes you have a fully loaded database
Or at least a database with the following ticker in it with some data for that date
"""
retDf = ma.getDataForDate("2020-01-30", ["AAPL", "A"])
assert(retDf.loc[retDf["ticker"] == "AAPL"]["Close"][0] == 84.379997)
def getDataForDates():
"""
This test assumes you have a fully loaded database
Or at least a database with the following ticker in it with some data for that date
"""
retDf = ma.getDataForDateRange("2020-01-01", "2020-01-30", ["AAPL", "A"])
assert(retDf.loc[retDf["ticker"] == "AAPL"]["Open"][2] == 73.447502)
testConnect()
testSaveEquityCorporateDataDocument()
testRemoveEquityCorporateDataDocument()
getDataForDate()
getDataForDates() | jminahan/backtest_framework | DataEngine/Tests/DataAdapters/MongoAdapter/MongoAdapterTest.py | MongoAdapterTest.py | py | 1,494 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "DataEngine.DataAdapters.MongoAdapter.MongoAdapter.MongoAdapter",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "Domain.EquityCorporateData.EquityCorporateData",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "Domain.BuildEnumMethods.BuildEnumMeth... |
19700703411 | import discord
from discord.ext import commands
import urllib.parse, urllib.request
import requests
import googlesearch
import re
import json
bot = commands.Bot(description = "Im just a Kid", command_prefix ="@")
@bot.event
async def on_ready():
print("IM READYYY")
@bot.command(pass_context=True)
async def search(ctx, *args):
sites = [" Stadium Goods"]
urllist = []
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"
await ctx.send("working on your request")
#SX
keywords = ''
for word in args:
keywords += word + '%20'
json_string = json.dumps({"params": f"query={keywords}&hitsPerPage=20&facets=*"})
byte_payload = bytes(json_string, 'utf-8')
algolia = {"x-algolia-agent": "Algolia for vanilla JavaScript 3.32.0", "x-algolia-application-id": "XW7SBCT9V6", "x-algolia-api-key": "6bfb5abee4dcd8cea8f0ca1ca085c2b3"}
with requests.Session() as session:
r = session.post("https://xw7sbct9v6-dsn.algolia.net/1/indexes/products/query", params=algolia, verify=False, data=byte_payload, timeout=30)
results = r.json()["hits"][0]
apiurl = f"https://stockx.com/api/products/{results['url']}?includes=market,360¤cy=USD"
header = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9,ja-JP;q=0.8,ja;q=0.7,la;q=0.6',
'appos': 'web',
'appversion': '0.1',
'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"
}
response = requests.get(apiurl, verify=False, headers=header)
prices = response.json()
general = prices['Product']
market = prices['Product']['market']
sizes = prices['Product']['children']
embed = discord.Embed(title='StockX', color=0x43dd36)
embed.set_thumbnail(url=results['thumbnail_url'])
embed.add_field(name=general['title'], value='https://stockx.com/' + general['urlKey'], inline=False)
embed.add_field(name='SKU/PID:', value=general['styleId'], inline=True)
embed.add_field(name='Colorway:', value=general['colorway'], inline=True)
embed.add_field(name='Retail Price:', value=f"${general['retailPrice']}", inline=False)
for size in sizes:
if len(sizes[size]['market']) != 0:
if (sizes[size]['market']['lowestAsk'] != 0 and sizes[size]['market']['highestBid'] != 0):
embed.add_field(name = f"Size: {sizes[size]['shoeSize']}", value=f"Low Ask: $ {sizes[size]['market']['lowestAsk']}\n High Bid: $ {sizes[size]['market']['highestBid']}", inline=True)
embed.set_footer(text='GitHub: TestingYG', icon_url ="https://image.shutterstock.com/image-photo/cute-little-chicken-isolated-on-260nw-520818805.jpg")
sku = general['styleId']
for x in sites:
for i in (googlesearch.search(sku+x ,tld='co.in',lang='en',num=10,stop=1,pause=2)):
urllist.append(str(i))
#SG
req = urllib.request.Request(urllist[0], headers=headers)
resp = urllib.request.urlopen(req)
respdata = str(resp.read())
find = re.findall('"sizeLabel":.*?."f', respdata)
find = ''.join(find)
size = re.findall('"sizeLabel".*?,', find)
size = ''.join(size)
size = re.sub('sizeLabel":"', "", size)
size = re.sub('"', "", size)
size = size[:-1]
size = size.split(",")
price = re.findall('"price":.*?"f', find)
price = ''.join(price)
price = re.sub('"f', "", price)
price = re.sub('"price":', " ", price)
price = re.sub(", ", " ", price)
price = price[:-1]
price = re.sub('"', "", price)
price = re.sub('null', '0', price)
price = price.split(" ")
price = price[1:]
StadiumGoods = dict(zip(size, price))
embedSG = discord.Embed(title='Stadium Goods', color=0xd1d8d0)
embedSG.set_thumbnail(url=results['thumbnail_url'])
embedSG.add_field(name=general['title'], value= urllist[0], inline=False)
embedSG.add_field(name='SKU/PID:', value=general['styleId'], inline=True)
embedSG.add_field(name='Colorway:', value=general['colorway'], inline=True)
embedSG.add_field(name='Retail Price:', value=f"${general['retailPrice']}", inline=False)
for k,v in StadiumGoods.items():
if v != '0':
embedSG.add_field(name=f'Size: {k}', value= f"$ {v}", inline=True)
embedSG.set_footer(text='GitHub: TestingYG', icon_url ="https://image.shutterstock.com/image-photo/cute-little-chicken-isolated-on-260nw-520818805.jpg")
#GOAT
keywords = f"{general['styleId']}%20"
json_string = json.dumps({"params": f"facets=%2A&hitsPerPage=20&query={keywords}"})
byte_payload = bytes(json_string, 'utf-8')
algolia = {"x-algolia-agent": "Algolia for vanilla JavaScript 3.32.0", "x-algolia-application-id": "2FWOTDVM2O", "x-algolia-api-key": "ac96de6fef0e02bb95d433d8d5c7038a"}
with requests.Session() as session:
r = session.post("https://2fwotdvm2o-dsn.algolia.net/1/indexes/product_variants_v2/query", params=algolia, verify=False, data=byte_payload, timeout=30)
results1 = r.json()["hits"][0]
apiurl = f"https://www.goat.com/web-api/v1/product_variants?productTemplateId={results1['slug']}"
url = f"https://www.goat.com/sneakers/{results1['slug']}/available-sizes"
header = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9,ja-JP;q=0.8,ja;q=0.7,la;q=0.6',
'appos': 'web',
'appversion': '0.1',
'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"
}
response = requests.get(apiurl, verify=False, headers=header)
prices = response.json()
dic = {}
for x in range(len(prices)):
if (prices[x]["shoeCondition"] == "new_no_defects" and prices[x]["boxCondition"] == "good_condition"):
reduce = prices[x]["lowestPriceCents"]["amount"] / 100
dic[prices[x]["size"]] = int(reduce)
embedG = discord.Embed(title='Goat', color=0x000000)
embedG.set_thumbnail(url=results['thumbnail_url'])
embedG.add_field(name=general['title'], value= url, inline=False)
embedG.add_field(name='SKU/PID:', value=general['styleId'], inline=True)
embedG.add_field(name='Colorway:', value=general['colorway'], inline=True)
embedG.add_field(name='Retail Price:', value=f"${general['retailPrice']}", inline=False)
if len(dic) != 0:
for k,v in dic.items():
embedG.add_field(name = f"Size: {k}", value=f"$ {v}", inline=True)
embedG.set_footer(text='GitHub: TestingYG', icon_url ="https://image.shutterstock.com/image-photo/cute-little-chicken-isolated-on-260nw-520818805.jpg")
await ctx.send(embed=embed)
await ctx.send(embed=embedSG)
await ctx.send(embed=embedG)
bot.run("")
| TestingYG/ProjectDumButt | DumButtv2.py | DumButtv2.py | py | 7,169 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "discord.ext.commands.Bot",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "requests.Sessi... |
22850158142 | #!/usr/bin/env python
# coding: utf-8
from bs4 import BeautifulSoup as bs
import pandas as pd
from splinter import Browser
import requests
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
def scrape_all():
mars={}
url_mars_news = 'https://redplanetscience.com/'
browser.visit(url_mars_news)
# html object
html = browser.html
# Parse with Beautiful Soup
soup = bs(html, 'html.parser')
mars_news_title = soup.find("div", class_="content_title").text
news_title_scrape = mars_news_title
news_title_scrape
mars["news_title"]=news_title_scrape
mars_news_paragraph = soup.find("div", class_="article_teaser_body").text
news_p_scrape = mars_news_paragraph
news_p_scrape
mars["news_p"]=news_p_scrape
mars_url = "https://spaceimages-mars.com/"
browser.visit(mars_url)
html = browser.html
soup = bs(html, 'html.parser')
# Retrieve featured image link
relative_image_path = soup.find_all('img')[1]["src"]
featured_image_url_scrape = mars_url+relative_image_path
featured_image_url_scrape
mars["featured_image"]=featured_image_url_scrape
mars_profile = 'https://galaxyfacts-mars.com/'
mars_profile_table = pd.read_html(mars_profile)
mars_table_df = mars_profile_table[1]
new_mars_df = mars_table_df.rename(columns={0:'Mars Planet Profile', 1:''})
new_mars_df
mars_facts_scrape = new_mars_df.to_html()
mars["facts"]=mars_facts_scrape
mars_hemi_pics='https://marshemispheres.com/'
browser.visit(mars_hemi_pics)
html = browser.html
soup = bs(html, 'html.parser')
main_urls = soup.find_all('div',class_='item')
# Create list to hold dicts
urls_for_h_images_scrape=[]
for main_url in main_urls:
hemisphere_image_dict = {}
h_title = main_url.find('div', class_='description').find('a', class_='itemLink product-item').h3.text
h_images = mars_hemi_pics + main_url.find('a',class_='itemLink product-item')['href']
browser.visit(h_images)
html = browser.html
soup = bs(html, 'html.parser')
full_image_url = soup.find('div',class_='downloads').a['href']
#print(full_image_url)
# Append title and image urls of hemisphere to dictionary
hemisphere_image_dict['h_title'] = h_title
hemisphere_image_dict['full_img_url'] = 'https://marshemispheres.com/'+full_image_url
urls_for_h_images_scrape.append(hemisphere_image_dict)
mars["hemispheres"]=urls_for_h_images_scrape
return mars
if __name__ == "__main__":
print(scrape_all())
| iegatlanta/web-scraping-challenge | Mission_to_Mars/scrape_mars.py | scrape_mars.py | py | 2,817 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "webdriver_manager.chrome.ChromeDriverManager",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "splinter.Browser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 26,
"usage_type": "call"
},
{
"a... |
16046421800 | from tkinter import *
from tkinter.messagebox import*
import sqlite3
root4=Tk()
h,w=root4.winfo_screenheight(),root4.winfo_screenwidth()
root4.geometry('%dx%d+0+0'%(w,h))
bus4=PhotoImage(file='.\\Bus_for_project.png')
Label(root4,image=bus4).grid(row=0,column=0,columnspan=12,padx=w/2.5)
Label(root4,text='Online Bus Booking System',font='Arial 20',fg='Red',bg='Sky Blue').grid(row=1,column=0,columnspan=12)
Label(root4,text='Add Bus Route Details',font='Arial 18',fg='Green2').grid(row=2,columnspan=12,pady=20)
def add_route():
route_id=r_id.get()
start_station=s_station.get()
start_id=s_id.get()
end_station=e_station.get()
end_id=e_id.get()
con4=sqlite3.Connection('Bus_DB')
cur4=con4.cursor()
cur4.execute('create table if not exists route(r_id varchar(5) not null primary key,s_name varchar(20),s_id varchar(5),e_name varchar(20),e_id varchar(5) )')
cur4.execute('select r_id from route')
res=cur4.fetchall()
if (route_id,) in res:
showerror('ERROR',"Route id already exists")
else:
start_station=start_station.lower()
end_station=end_station.lower()
cur4.execute('insert into route(r_id,s_name,s_id,e_name,e_id) values(?,?,?,?,?)',(route_id,start_station,start_id,end_station,end_id))
con4.commit()
showinfo('Success',"Route record added successfully!!")
Label(root4, text="Route ID", font='Arial 12', fg='black').grid(row=3, column=0)
r_id=Entry(root4)
r_id.grid(row=3, column=1)
Label(root4, text="Staring station", font='Arial 12', fg='black').grid(row=3, column=2)
s_station=Entry(root4)
s_station.grid(row=3, column=3)
Label(root4, text="Station ID", font='Arial 12', fg='black').grid(row=3, column=4)
s_id=Entry(root4)
s_id.grid(row=3, column=5)
Label(root4, text="Ending station", font='Arial 12', fg='black').grid(row=4, column=1)
e_station=Entry(root4)
e_station.grid(row=4, column=2)
Label(root4, text="Ending Station ID", font='Arial 12', fg='black').grid(row=4, column=3)
e_id=Entry(root4)
e_id.grid(row=4,column=4)
Button(root4, text="Add Route", font='Arial 12 ', bg='Pale Green', fg='black',command=add_route).grid(row=4, column=8)
Button(root4, text="Delete Route", font='Arial 12', bg='Pale Green2', fg='black').grid(row=4, column=9)
def ho():
root4.destroy()
import Home
home4=PhotoImage(file='.\\home.png')
Button(root4,image=home4,bg='Pale Green',command=ho).grid(row=3,column=8,pady=50)
root4.mainloop()
root4.mainloop()
| akarshi19/Online-Bus-Booking-System | bus_route.py | bus_route.py | py | 2,568 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "sqlite3.Connection",
"line_number": 20,
"usage_type": "call"
}
] |
38088951612 | # -*- coding: utf-8 -*-
"""
Production Mapper
Michael Troyer
michael.troyer@usda.gov
"""
import datetime
import os
import traceback
from collections import defaultdict
import arcpy
arcpy.env.addOutputsToMap = False
arcpy.env.overwriteOutput = True
##---Functions-------------------------------------------------------------------------------------
def build_where_clause(table, field, valueList):
"""
Takes a list of values and constructs a SQL WHERE
clause to select those values within a given field and table.
"""
# Add DBMS-specific field delimiters
fieldDelimited = arcpy.AddFieldDelimiters(arcpy.Describe(table).path, field)
# Determine field type
fieldType = arcpy.ListFields(table, field)[0].type
# Add single-quotes for string field values
if str(fieldType) == 'String':
valueList = ["'%s'" % value for value in valueList]
# Format WHERE clause in the form of an IN statement
whereClause = "%s IN(%s)" % (fieldDelimited, ', '.join(map(str, valueList)))
return whereClause
def intersect_and_get_attributes(source_layer, intersect_layer, intersect_field):
arcpy.SelectLayerByLocation_management(intersect_layer, 'INTERSECT', source_layer)
if not arcpy.Describe(intersect_layer).FIDSet.split(';'):
return []
with arcpy.da.SearchCursor(intersect_layer, intersect_field) as cur:
values = [row[0] for row in cur]
arcpy.SelectLayerByAttribute_management(intersect_layer, "CLEAR_SELECTION")
return values
class Toolbox(object):
def __init__(self):
self.label = "Production Mapper"
self.alias = "Production_Mapper"
# List of tool classes associated with this toolbox
self.tools = [ProductionMapper]
class ProductionMapper(object):
def __init__(self):
self.label = "ProductionMapper"
self.description = ""
self.canRunInBackground = True
def getParameterInfo(self):
input_fc=arcpy.Parameter(
displayName="Input Feature Class",
name="Input Feature Class",
datatype="Feature Class",
parameterType="Required",
direction="Input",
)
project_id=arcpy.Parameter(
displayName="Project ID",
name="Project ID",
datatype="String",
parameterType="Optional",
)
title=arcpy.Parameter(
displayName="Project Title",
name="Project Title",
datatype="String",
parameterType="Optional",
)
author=arcpy.Parameter(
displayName="Author",
name="Author",
datatype="String",
parameterType="Optional",
)
template=arcpy.Parameter(
displayName="Select Map Template",
name="Select Map Template",
datatype="DEMapDocument",
parameterType="Required",
direction="Input",
)
output_mxd=arcpy.Parameter(
displayName="Output Map Document",
name="Output Map Document",
datatype="DEMapDocument",
parameterType="Required",
direction="Output",
)
return [input_fc, project_id, title, author, template, output_mxd]
def isLicensed(self):
return True
def updateParameters(self, params):
params[0].filter.list = ["Polygon"]
return
def updateMessages(self, params):
return
def execute(self, params, messages):
input_fc, project_id, title, author, template, output_mxd = params
try:
# for param in params:
# arcpy.AddMessage('{} [Value: {}]'.format(param.name, param.value))
layer = arcpy.MakeFeatureLayer_management(input_fc.value, "in_memory\\tmp")
mxd = arcpy.mapping.MapDocument(template.valueAsText)
df = arcpy.mapping.ListDataFrames(mxd)[0]
database = r'.\Production_Mapper.gdb'
counties_layer = arcpy.MakeFeatureLayer_management(os.path.join(database, 'Counties'), r'in_memory\Counties')
quads_layer = arcpy.MakeFeatureLayer_management(os.path.join(database, 'Quad_Index_24k'), r'in_memory\Quads')
plss_layer = arcpy.MakeFeatureLayer_management(os.path.join(database, 'PLSS_FirstDivision'), r'in_memory\PLSS')
utm_zone_layer = arcpy.MakeFeatureLayer_management(os.path.join(database, 'UTM_Zones'), r'in_memory\UTM_Zone')
counties = intersect_and_get_attributes(layer, counties_layer, 'LABEL')
plss = intersect_and_get_attributes(layer, plss_layer, 'FRSTDIVID')
quads = intersect_and_get_attributes(layer, quads_layer, 'QUADNAME')
utm_zone = intersect_and_get_attributes(layer, utm_zone_layer, 'UTM_Zone')
# Counties
county_text = 'County(s):\n{}'.format(', '.join(counties))
arcpy.AddMessage(county_text)
# Quads
quad_text = "7.5' Quad(s):\n{}".format(', '.join(quads))
arcpy.AddMessage(quad_text)
# PLSS
plss_data = defaultdict(list)
for row in plss:
pm = int(row[2:4])
tw = row[5:7] + row[8]
rg = row[10:12] + row[13]
sn = int(row[17:19])
plss_data[(pm, tw, rg)].append(sn)
plss_text = '\n'.join(
[
'PM {} | Twn {} | Rng {} \nSections: {}'.format(
pm, tw, rg, ', '.join([str(s) for s in sorted(secs)])
)
for (pm, tw, rg), secs in plss_data.items()
]
)
arcpy.AddMessage(plss_text)
# UTM Coordinates
dissolve = arcpy.Dissolve_management(layer, r'in_memory\dissolve')
dissolve_layer = arcpy.MakeFeatureLayer_management(dissolve, r'in_memory\dissolve_layer')
with arcpy.da.SearchCursor(dissolve_layer, "SHAPE@XY") as cur:
for pt, in cur:
mX, mY = pt
utm_e = round(mX, 0)
utm_n = round(mY, 0)
utm_text = '{}N | {} mN | {} mE'.format(max(utm_zone), utm_n, utm_e)
arcpy.AddMessage(utm_text)
# Date
now = datetime.datetime.now()
date_text = r'Map Date: {}/{}/{}'.format(now.month, now.day, now.year)
arcpy.AddMessage(date_text)
# Get and update the layout elements
layout_elements = {le.name: le for le in arcpy.mapping.ListLayoutElements(mxd)}
for field, update in {
'County': county_text,
'Quad': quad_text,
'PLSS': plss_text,
'UTM': utm_text,
'Date': date_text,
'Project ID': project_id.valueAsText,
'Title': title.valueAsText,
'Author': author.valueAsText,
}.items():
if update:
try:
layout_elements[field].text = update
except KeyError:
pass
# Update map and save output
arcpy.mapping.AddLayer(df, arcpy.mapping.Layer(input_fc.valueAsText), "TOP")
df.extent = arcpy.Describe(layer).extent
df.scale = round(df.scale * 1.25, -2)
arcpy.RefreshActiveView
arcpy.RefreshTOC
output = output_mxd.valueAsText
if not output.endswith('.mxd'):
output += '.mxd'
mxd.saveACopy(output)
# Clean up
for item in (layer, counties_layer, quads_layer, plss_layer, utm_zone_layer, dissolve):
try:
arcpy.Delete_management(item)
except:
pass
except:
arcpy.AddError(str(traceback.format_exc()))
return
| MichaelTroyer/ArcGIS_NRCS_Production_Mapper | Production_Mapper.pyt | Production_Mapper.pyt | pyt | 8,168 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "arcpy.env",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "arcpy.env",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "arcpy.AddFieldDelimiters",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "arcpy.Describe... |
32279135386 | #!/usr/bin/env python
"""Simple script to fetch data from the bslparlour home stream"""
import datetime
import os
import subprocess as sp
import yaml
import tweepy
import myconf
def dictify(results):
return_dict = dict()
for result in results:
return_dict[result.id] = result
return return_dict
def merge_all_yamls(tweet_data_dir="tweet_data"):
yamls = []
for f in os.listdir(tweet_data_dir):
yamls.append(yaml.load(open(os.path.join(tweet_data_dir, f), "r")))
all_yamls_gen = (dictify(x) for x in yamls)
all_yamls = dict()
for x in all_yamls_gen:
all_yamls.update(x)
return all_yamls
def main():
tweepy_auth = tweepy.OAuthHandler(
myconf.consumer_key,
myconf.consumer_secret,
)
tweepy_auth.set_access_token(
myconf.access_key,
myconf.access_secret,
)
tweepy_api = tweepy.API(tweepy_auth)
timestamp = datetime.datetime.now().timestamp()
with open("tweet_data/tweepy_{}.yaml".format(timestamp), "w") as f:
yaml.dump(tweepy_api.home_timeline(count=40), f)
all_yamls = merge_all_yamls()
try:
all_yamls_previous = yaml.load(open("tweet_data/tweepy_all.yaml", "r"))
except FileNotFoundError:
all_yamls_previous = dict()
if len(all_yamls_previous) < len(all_yamls):
with open("tweet_data/tweepy_all.yaml", "w") as f:
yaml.dump(all_yamls, f)
# Commit to repo
sp.check_call("git add tweet_data/* && git commit -m 'Automated data commit.' && git push",
shell=True)
if __name__ == '__main__':
main()
| natfarleydev/mr-retweet | get_tweet_data.py | get_tweet_data.py | py | 1,627 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.listdir",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "yaml.load",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
... |
8770897157 | import asyncio
import logging
import sys
import time
import pyautogui
import pydirectinput
import qasync
from PySide6 import QtWidgets, QtCore
from front import Ui_MainWindow
def use_quick_access_inventory():
print("Use quick access of inventory")
pydirectinput.keyDown('1')
time.sleep(0.1)
pydirectinput.keyUp('1')
pydirectinput.keyDown('2')
time.sleep(0.1)
pydirectinput.keyUp('2')
pydirectinput.keyDown('3')
time.sleep(0.1)
pydirectinput.keyUp('3')
pydirectinput.keyDown('4')
time.sleep(0.1)
pydirectinput.keyUp('4')
def logout():
print("Logout")
time.sleep(0.1)
pydirectinput.leftClick(1264, 967)
time.sleep(1)
pydirectinput.leftClick(655, 573)
@qasync.asyncSlot()
async def terminate_spot(time_attack):
pydirectinput.keyDown('1')
await asyncio.sleep(0.1)
pydirectinput.keyUp('1')
await asyncio.sleep(0.1)
pydirectinput.keyDown('space')
await asyncio.sleep(time_attack)
pydirectinput.keyUp('space')
pydirectinput.keyDown('z')
await asyncio.sleep(0.1)
pydirectinput.keyUp('z')
@qasync.asyncSlot()
async def change_chanel(chanel, time_change):
if chanel < 0 or chanel > 4:
print(f'Chanel cant be {chanel}')
return
get_window("Insomnia Helper", False)
get_window('InsomniaMT2', True)
print(f'Chanel is changing to chanel {chanel}')
if chanel == 1:
await asyncio.sleep(0.1)
pydirectinput.leftClick(1156, 81)
await asyncio.sleep(time_change)
return
if chanel == 2:
await asyncio.sleep(0.1)
pydirectinput.leftClick(1154, 102)
await asyncio.sleep(time_change)
return
if chanel == 3:
await asyncio.sleep(0.1)
pydirectinput.leftClick(1160, 127)
await asyncio.sleep(time_change)
return
if chanel == 4:
await asyncio.sleep(0.1)
pydirectinput.leftClick(1174, 147)
await asyncio.sleep(time_change)
return
def use_main_inventory_slots(number):
if number < 1 or number > 5:
return False
print(f'Use slot {number} of main inventory')
if number == 1:
time.sleep(0.1)
pydirectinput.leftClick(1137, 645)
return True
if number == 2:
time.sleep(0.1)
pydirectinput.leftClick(1168, 645)
return True
if number == 3:
time.sleep(0.1)
pydirectinput.leftClick(1200, 645)
return True
if number == 4:
time.sleep(0.1)
pydirectinput.leftClick(1234, 645)
return True
if number == 5:
time.sleep(0.1)
pydirectinput.leftClick(1264, 645)
return True
def open_main_inventory():
print("Opening main inventory")
time.sleep(0.1)
pydirectinput.leftClick(1192, 970)
def get_window(title, client):
while True:
hwnd = pyautogui.getWindowsWithTitle(title)
if len(hwnd) <= 0:
return None
if len(hwnd) >= 2:
print(f'Number of finding windows is {len(hwnd)}')
window_to_remove = pyautogui.getWindowsWithTitle("InsomniaMT2 Klient")
window_to_remove[0].maximize()
decison = pyautogui.confirm(text='Remove this window?', title='Remove', buttons=['OK', 'Cancel'])
if decison == 'Cancel':
return None
if decison == 'OK':
window_to_remove[0].close()
time.sleep(0.1)
if len(hwnd) == 1:
hwnd[0].activate()
if client:
hwnd[0].moveTo(0, 0)
else:
hwnd[0].moveTo(1280, 0)
return hwnd
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.setupUi(self)
self.setWindowTitle("Insomnia Helper")
self.stackedWidget.setCurrentIndex(0)
# Variable
self.can_running = False
self.counter_chanel = 1
# Connect push button
self.autoDropping.clicked.connect(self.auto_dropping)
self.bossHelpper.clicked.connect(self.boss_helper)
self.back_button.clicked.connect(self.back_start_page)
self.back_button_1.clicked.connect(self.back_start_page)
self.start_dropping.clicked.connect(self.start_dropping_fun)
def auto_dropping(self):
self.stackedWidget.setCurrentIndex(1)
def boss_helper(self):
self.stackedWidget.setCurrentIndex(2)
def back_start_page(self):
self.stackedWidget.setCurrentIndex(0)
def start_dropping_fun(self):
print("button clicked")
self.can_running = not self.can_running
if self.can_running:
self.main()
def keyPressEvent(self, event):
key = event.key()
# if key == 61:
# self.can_running = True
# print("Key started is clicked")
# self.main()
# return
#
# if key == 45:
# print("Key stopped is clicked")
# self.can_running = False
# return
print(f"Found key {key} --> dont have action for this key")
@qasync.asyncSlot()
async def main(self):
print("Dropping is started")
try:
while True:
print(self.can_running)
if not self.can_running:
print("Dropping is stopped")
break
await self.auto_dropping_fun()
except:
print("Error with dropping, try again")
@qasync.asyncSlot()
async def auto_dropping_fun(self):
if not self.ch1_check_box.isChecked() and self.counter_chanel == 1:
self.counter_chanel += 1
if not self.ch2_check_box.isChecked() and self.counter_chanel == 2:
self.counter_chanel += 1
if not self.ch3_check_box.isChecked() and self.counter_chanel == 3:
self.counter_chanel += 1
if not self.ch4_check_box.isChecked() and self.counter_chanel == 4:
self.counter_chanel += 1
if self.counter_chanel > 4:
self.counter_chanel = 1
await asyncio.sleep(0.1)
return
await terminate_spot(self.atact_time.value())
await change_chanel(self.counter_chanel, self.change_chanel_time.value())
self.counter_chanel += 1
def start_application(): # Start Application with qasync
app = QtWidgets.QApplication(sys.argv)
loop = qasync.QEventLoop(app)
window = MainWindow()
window.show()
logging.info("Starting application Insomnia bot ...")
with loop:
loop.run_forever()
start_application()
| arekszatan/botClicker | InsomniaBot.py | InsomniaBot.py | py | 6,684 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pydirectinput.keyDown",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pydirectinput.keyUp",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pydirectinput.ke... |
46051636676 | # -*- coding: utf-8 -*-
from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse
from django.contrib import messages
from django.utils.translation import gettext as _
from djconfig import config
from spirit.core.utils.views import is_post, post_data
from spirit.core.utils.paginator import yt_paginate
from spirit.core.utils.decorators import administrator_required
from .forms import CommentFlagForm
from ..models import CommentFlag, Flag
@administrator_required
def detail(request, pk):
flag = get_object_or_404(CommentFlag, pk=pk)
form = CommentFlagForm(
user=request.user,
data=post_data(request),
instance=flag)
if is_post(request) and form.is_valid():
form.save()
messages.info(request, _("The flag has been moderated!"))
return redirect(reverse("spirit:admin:flag:index"))
flags = yt_paginate(
Flag.objects.filter(comment=flag.comment),
per_page=config.comments_per_page,
page_number=request.GET.get('page', 1)
)
return render(
request=request,
template_name='spirit/comment/flag/admin/detail.html',
context={
'flag': flag,
'flags': flags,
'form': form})
@administrator_required
def _index(request, queryset, template):
flags = yt_paginate(
queryset,
per_page=config.comments_per_page,
page_number=request.GET.get('page', 1)
)
context = {'flags': flags, }
return render(request, template, context)
def opened(request):
return _index(
request,
queryset=CommentFlag.objects.filter(is_closed=False),
template='spirit/comment/flag/admin/open.html'
)
def closed(request):
return _index(
request,
queryset=CommentFlag.objects.filter(is_closed=True),
template='spirit/comment/flag/admin/closed.html'
)
| nitely/Spirit | spirit/comment/flag/admin/views.py | views.py | py | 1,916 | python | en | code | 1,153 | github-code | 6 | [
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "models.CommentFlag",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "forms.CommentFlagForm",
"line_number": 20,
"usage_type": "call"
},
{
"a... |
2768444611 | import pandas as pd
import math
from sklearn import linear_model
import numpy as np
def predict_using_sklearn():
test_scores = pd.read_csv(r'C:\Users\Polina\Desktop\Python\Pandas\test_scores.csv')
reg = linear_model.LinearRegression()
reg.fit(test_scores[['math']], test_scores.cs)
return reg.coef_, reg.intercept_
def gradient_descent(x,y):
m_curr=b_curr=0
iterations = 1000000
n = len(x)
learning_rate = 0.0002
cost_previous = 0
for i in range(iterations):
y_predicted = m_curr*x + b_curr
cost = (1/n)*sum([val**2 for val in (y-y_predicted)])
md = -(2/n)*sum(x*(y-y_predicted))
bd = -(2/n)*sum(y -y_predicted)
m_curr = m_curr- learning_rate*md
b_curr = b_curr - learning_rate*bd
if math.isclose(cost, cost_previous, rel_tol=1e-20):
break
cost_previous = cost
return m_curr, b_curr
if __name__ == '__main__':
df = pd.read_csv(r"C:\Users\Polina\Desktop\Python\Pandas\test_scores.csv")
x = np.array(df.math)
y = np.array(df.cs)
m, b = gradient_descent(x,y)
print("Using gradient descent function: Coef {} Intercept {}".format(m, b))
m_sklearn, b_sklearn = predict_using_sklearn()
print("Using sklearn: Coef {} Intercept {}".format(m_sklearn,b_sklearn)) | CarlBrendt/Data-Analysis | Gradient_descent_with_no_train_test.py | Gradient_descent_with_no_train_test.py | py | 1,356 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model",
"line_number": 8,
"usage_type": "name"
},
{
"api_name"... |
72757346749 | from syscore.objects import arg_not_supplied
from syscore.genutils import flatten_list
from dataclasses import dataclass
import pandas as pd
EMPTY_INSTRUMENT = ""
class futuresInstrument(object):
def __init__(self, instrument_code: str):
self._instrument_code = instrument_code
@property
def instrument_code(self):
return self._instrument_code
def empty(self):
return self.instrument_code == EMPTY_INSTRUMENT
@classmethod
def create_from_dict(futuresInstrument, input_dict):
# Might seem pointless, but (a) is used in original code, (b) gives a nice consistent feel
return futuresInstrument(input_dict["instrument_code"])
def as_dict(self):
# Might seem pointless, but (a) is used in original code, (b) gives a nice consistent feel
return dict(instrument_code=self.instrument_code)
def __eq__(self, other):
return self.instrument_code == other.instrument_code
@property
def key(self):
return self.instrument_code
def __repr__(self):
return str(self.instrument_code)
@dataclass
class instrumentMetaData:
Description: str = ""
Pointsize: float = 0.0
Currency: str = ""
AssetClass: str = ""
Slippage: float = 0.0
PerBlock: float = 0.0
Percentage: float = 0.0
PerTrade: float = 0.0
def as_dict(self) -> dict:
keys = self.__dataclass_fields__.keys()
self_as_dict = dict([(key, getattr(self, key)) for key in keys])
return self_as_dict
@classmethod
def from_dict(instrumentMetaData, input_dict):
keys = instrumentMetaData.__dataclass_fields__.keys()
args_list = [input_dict[key] for key in keys]
return instrumentMetaData(*args_list)
@dataclass
class futuresInstrumentWithMetaData:
instrument: futuresInstrument
meta_data: instrumentMetaData
@property
def instrument_code(self) -> str:
return self.instrument.instrument_code
@property
def key(self) -> str:
return self.instrument_code
def as_dict(self) -> dict:
meta_data_dict = self.meta_data.as_dict()
meta_data_dict["instrument_code"] = self.instrument_code
return meta_data_dict
@classmethod
def from_dict(futuresInstrumentWithMetaData, input_dict):
instrument_code = input_dict.pop("instrument_code")
instrument = futuresInstrument(instrument_code)
meta_data = instrumentMetaData.from_dict(input_dict)
return futuresInstrumentWithMetaData(instrument, meta_data)
@classmethod
def create_empty(futuresInstrumentWithMetaData):
instrument = futuresInstrument(EMPTY_INSTRUMENT)
meta_data = instrumentMetaData()
instrument_with_metadata = futuresInstrumentWithMetaData(instrument, meta_data)
return instrument_with_metadata
def empty(self):
return self.instrument.empty()
class listOfFuturesInstrumentWithMetaData(list):
def as_df(self):
instrument_codes = [
instrument_object.instrument_code for instrument_object in self
]
meta_data_keys = [
instrument_object.meta_data.as_dict().keys() for instrument_object in self
]
meta_data_keys_flattened = flatten_list(meta_data_keys)
meta_data_keys_unique = list(set(meta_data_keys_flattened))
meta_data_as_lists = dict(
[
(
metadata_name,
[
getattr(instrument_object.meta_data, metadata_name)
for instrument_object in self
],
)
for metadata_name in meta_data_keys_unique
]
)
meta_data_as_dataframe = pd.DataFrame(
meta_data_as_lists, index=instrument_codes
)
return meta_data_as_dataframe
class assetClassesAndInstruments(dict):
def __repr__(self):
return str(self.as_pd())
def get_instrument_list(self) -> list:
return list(self.keys())
@classmethod
def from_pd_series(self, pd_series: pd.Series):
instruments = list(pd_series.index)
asset_classes = list(pd_series.values)
as_dict = dict(
[
(instrument_code, asset_class)
for instrument_code, asset_class in zip(instruments, asset_classes)
]
)
return assetClassesAndInstruments(as_dict)
def all_asset_classes(self) -> list:
asset_classes = list(self.values())
unique_asset_classes = list(set(asset_classes))
unique_asset_classes.sort()
return unique_asset_classes
def as_pd(self) -> pd.Series:
instruments = [key for key in self.keys()]
asset_classes = [value for value in self.values()]
return pd.Series(asset_classes, index=instruments)
def all_instruments_in_asset_class(
self, asset_class: str, must_be_in=arg_not_supplied
) -> list:
asset_class_instrument_list = [
instrument
for instrument, item_asset_class in self.items()
if item_asset_class == asset_class
]
if must_be_in is arg_not_supplied:
return asset_class_instrument_list
## we need to filter
filtered_asset_class_instrument_list = [
instrument
for instrument in asset_class_instrument_list
if instrument in must_be_in
]
return filtered_asset_class_instrument_list
class instrumentCosts(object):
def __init__(
self,
price_slippage: float = 0.0,
value_of_block_commission: float = 0.0,
percentage_cost: float = 0.0,
value_of_pertrade_commission: float = 0.0,
):
self._price_slippage = price_slippage
self._value_of_block_commission = value_of_block_commission
self._percentage_cost = percentage_cost
self._value_of_pertrade_commission = value_of_pertrade_commission
@classmethod
def from_meta_data(instrumentCosts, meta_data: instrumentMetaData):
return instrumentCosts(
price_slippage=meta_data.Slippage,
value_of_block_commission=meta_data.PerBlock,
percentage_cost=meta_data.Percentage,
value_of_pertrade_commission=meta_data.PerTrade,
)
def __repr__(self):
return (
"instrumentCosts slippage %f block_commission %f percentage cost %f per trade commission %f "
% (
self.price_slippage,
self.value_of_block_commission,
self.percentage_cost,
self.value_of_pertrade_commission,
)
)
@property
def price_slippage(self):
return self._price_slippage
@property
def value_of_block_commission(self):
return self._value_of_block_commission
@property
def percentage_cost(self):
return self._percentage_cost
@property
def value_of_pertrade_commission(self):
return self._value_of_pertrade_commission
def calculate_cost_percentage_terms(
self, blocks_traded: float, block_price_multiplier: float, price: float
) -> float:
cost_in_currency_terms = self.calculate_cost_instrument_currency(
blocks_traded, block_price_multiplier=block_price_multiplier, price=price
)
value_per_block = price * block_price_multiplier
total_value = blocks_traded * value_per_block
cost_in_percentage_terms = cost_in_currency_terms / total_value
return cost_in_percentage_terms
def calculate_cost_instrument_currency(
self, blocks_traded: float, block_price_multiplier: float, price: float
) -> float:
value_per_block = price * block_price_multiplier
slippage = self.calculate_slippage_instrument_currency(
blocks_traded, block_price_multiplier=block_price_multiplier
)
commission = self.calculate_total_commission(
blocks_traded, value_per_block=value_per_block
)
return slippage + commission
def calculate_total_commission(self, blocks_traded: float, value_per_block: float):
### YOU WILL NEED TO CHANGE THIS IF YOUR BROKER HAS A MORE COMPLEX STRUCTURE
per_trade_commission = self.calculate_per_trade_commission()
per_block_commission = self.calculate_cost_per_block_commission(blocks_traded)
percentage_commission = self.calculate_percentage_commission(
blocks_traded, value_per_block
)
return max([per_block_commission, per_trade_commission, percentage_commission])
def calculate_slippage_instrument_currency(
self, blocks_traded: float, block_price_multiplier: float
) -> float:
return abs(blocks_traded) * self.price_slippage * block_price_multiplier
def calculate_per_trade_commission(self):
return self.value_of_pertrade_commission
def calculate_cost_per_block_commission(self, blocks_traded):
return abs(blocks_traded) * self.value_of_block_commission
def calculate_percentage_commission(self, blocks_traded, price_per_block):
trade_value = self.calculate_trade_value(blocks_traded, price_per_block)
return self._percentage_cost * trade_value
def calculate_trade_value(self, blocks_traded, value_per_block):
return abs(blocks_traded) * value_per_block
| ahalsall/pysystrade | sysobjects/instruments.py | instruments.py | py | 9,474 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "dataclasses.dataclass",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "syscore.genutils.flatten_list",
"line_number": 113,
"usage_type": "call"
},
{
"api_na... |
26038332576 | from __future__ import annotations
import pytest
@pytest.mark.parametrize(
"variables, expected_data",
[
(
{"name": r"pants_explorer\."},
{
"rules": [
{"name": "pants_explorer.server.graphql.rules.get_graphql_uvicorn_setup"},
]
},
),
(
{"name": r"\.graphql\."},
{
"rules": [
{"name": "pants_explorer.server.graphql.rules.get_graphql_uvicorn_setup"},
]
},
),
(
{"limit": 0},
{"rules": []},
),
(
{"limit": 0},
{"rules": []},
),
],
)
def test_rules_query(
schema, queries: str, variables: dict, expected_data: dict, context: dict
) -> None:
actual_result = schema.execute_sync(
queries, variable_values=variables, context_value=context, operation_name="TestRulesQuery"
)
assert actual_result.errors is None
assert actual_result.data == expected_data
| pantsbuild/pants | pants-plugins/pants_explorer/server/graphql/query/rules_test.py | rules_test.py | py | 1,078 | python | en | code | 2,896 | github-code | 6 | [
{
"api_name": "pytest.mark.parametrize",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 6,
"usage_type": "attribute"
}
] |
39254517776 | from django.db import models
from django.db.models import Case, Count, IntegerField, When
class CountryManager(models.Manager):
def aggregate_integration_statuses(self):
from proco.connection_statistics.models import CountryWeeklyStatus
return self.get_queryset().aggregate(
countries_joined=Count(Case(When(
last_weekly_status__integration_status__in=[
CountryWeeklyStatus.SCHOOL_MAPPED,
CountryWeeklyStatus.STATIC_MAPPED,
CountryWeeklyStatus.REALTIME_MAPPED,
], then=1),
output_field=IntegerField())),
countries_connected_to_realtime=Count(Case(When(
last_weekly_status__integration_status=CountryWeeklyStatus.REALTIME_MAPPED, then=1),
output_field=IntegerField())),
countries_with_static_data=Count(Case(When(
last_weekly_status__integration_status=CountryWeeklyStatus.STATIC_MAPPED, then=1),
output_field=IntegerField()),
),
countries_with_static_and_realtime_data=Count(Case(When(
last_weekly_status__integration_status= (CountryWeeklyStatus.STATIC_MAPPED or CountryWeeklyStatus.REALTIME_MAPPED), then=1),
output_field=IntegerField()) ,
),
)
| unicef/Project-Connect-BE | proco/locations/managers.py | managers.py | py | 1,353 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "django.db.models.Manager",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.db.models.Count",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "dj... |
3580079410 | import json
from statistics import mean
import numpy as np
import os
from bokeh.plotting import output_file, figure, save
from bokeh.layouts import gridplot
from src.utils.tools import listdir, hash_append
def combined(ids, name, legend=None, y_range=(0, 900)):
summaries = {}
episodes = []
for key_ in settings: # [key_ for key_ in settings if len(settings[key_]) > 1]:
id = [f.split(' := ') for f in key_.split('\n') if f.split(' := ')[0] == 'id'][0][1]
if not any([id == id_ for id_ in ids]):
continue
rewards = {}
for res_folder in settings[key_]:
with open(os.path.join(res_folder, 'metrics.json'), 'r') as f:
metrics = json.load(f)
for episode, score in zip(metrics['episodes'], metrics['t_scores']):
if score is not None and episode % 25 == 0:
hash_append(rewards, episode, score)
episodes = sorted(rewards.keys())
episodes = episodes[:min(39, len(episodes))]
quart1 = [np.percentile(rewards[ep], 25) for ep in episodes]
median = [np.percentile(rewards[ep], 50) for ep in episodes]
quart3 = [np.percentile(rewards[ep], 75) for ep in episodes]
summaries[id] = (quart1, median, quart3)
COLORS = ("royalblue", "orchid", "seagreen", "sienna", "darkkhaki")
output_file(os.path.join('./res/plots', name.lower() + '.html'), title=name)
s = figure(width=720, height=int(360 * (y_range[1] - y_range[0]) / 900), title="Performance",
x_axis_label='episodes', y_axis_label='score', y_range=y_range)
for id in sorted(summaries.keys(), key=lambda x: ids.index(x)):
ii = ids.index(id)
s.line(episodes[:len(summaries[id][1])], summaries[id][1], line_color=COLORS[ii], line_width=2, line_alpha=0.75,
legend_label=legend[ids.index(id)])
s.varea(episodes[:len(summaries[id][1])], summaries[id][0], summaries[id][2], fill_color=COLORS[ii],
fill_alpha=0.25)
# s.legend.location = "top_left"
s.legend.location = "bottom_right"
save(gridplot([[s]]))
settings = {}
folders = []
for fname in ('gradient', 'imitation', 'planet'):
for folder in listdir(f"../{fname}/res/results"):
folders.append((folder, fname))
for results_folder, algorithm in folders:
with open(os.path.join(results_folder, 'hyperparameters.txt'), 'r') as f:
s = ""
for line in f.readlines():
if 'seed' not in line:
if line.split(' ')[0] == 'id':
id = f"id := {algorithm}_{line.split(' ')[-1]}"
print(id.replace('\n', ''))
s += id
else:
s += line
try:
with open(os.path.join(results_folder, 'metrics.json'), 'r') as f:
metrics = json.load(f)
if len(metrics['steps']) >= 1000 or 'data_aggregation' in id:
hash_append(settings, s, results_folder)
except FileNotFoundError:
pass
combined(['planet_lov', 'planet_vanilla'], 'planet_validation',
legend=['With Latent Overshooting', 'Without Latent Overshooting'])
combined(['planet_lov', 'gradient_ar4'], 'planet_gradient',
legend=['CEM Planner', 'Gradient-Based Optimization'])
combined(['planet_lov', 'imitation_data_aggregation', 'imitation_policy_aggregation'], 'planet_imitation',
legend=['CEM Planner', 'Data Aggregation', 'Policy Aggregation'], y_range=(-200, 900))
combined(['planet_lov', 'planet_latency', 'planet_latency2', 'planet_latency4'], 'latency_planet',
legend=['no latency', '1 timestep', '2 timesteps', '4 timesteps'])
combined(['gradient_ar4', 'gradient_ar4_lat1', 'gradient_ar4_lat2', 'gradient_ar4_lat4', 'gradient_ar4_lat8'], 'latency_gradient',
legend=['no latency', '1 timestep', '2 timesteps', '4 timesteps', '8 timesteps'])
combined(['planet_lov', 'planet_ar4', 'planet_ar8', 'planet_ar12'], 'planet_cf',
legend=['2 timesteps', '4 timesteps', '8 timesteps', '12 timesteps'])
combined(['gradient_ar2_4andreas', 'gradient_ar4', 'gradient_ar8', 'gradient_ar12'], 'gradient_cf',
legend=['2 timesteps', '4 timesteps', '8 timesteps', '12 timesteps'])
| MatthijsBiondina/WorldModels | planet/plots.py | plots.py | py | 4,228 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "src.utils.tools.hash_append",
... |
41475591670 | import httpx
import asyncio
import logging
import discord
from discord.ext import tasks
from redbot.core import Config, commands
IDENTIFIER = 4175987634255572345 # Random to this cog
ishtakar_world_id = "3f1cd819f97e"
default_server = "Ishtakar"
realm_data_url = "https://nwdb.info/server-status/data.json"
default_guild = {
"default_realm": "Ishtakar",
"server_channel": None,
}
logger = logging.getLogger("red.psykzz.cogs")
logger.setLevel(logging.DEBUG)
class ServerStatus(commands.Cog):
"Provider server status"
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(
self, identifier=IDENTIFIER, force_registration=True
)
self.config.register_guild(**default_guild)
self.refresh_queue_data.start()
def cog_unload(self):
self.refresh_queue_data.cancel()
@tasks.loop(minutes=5.0)
async def refresh_queue_data(self):
logger.info("Starting queue task")
try:
self.queue_data = await self.get_queue_data(worldId=None)
await self.update_monitor_channels()
except Exception:
logger.exception("Error in task")
logger.info("Finished queue task")
async def get_queue_data(self, worldId=ishtakar_world_id):
"""Refresh data from remote data"""
try:
extra_qs = f"worldId={worldId}" if worldId else ""
response = await http_get(
f"https://nwdb.info/server-status/servers.json?{extra_qs}"
)
if not response.get("success"):
logger.error("Failed to get server status data")
return
servers = response.get("data", {}).get("servers", [])
return {
self.parse_server(server).get("worldName"): self.parse_server(server)
for server in servers
}
except Exception:
logger.exception("Exception while downloading new data")
def parse_server(self, server):
(
connectionCountMax,
connectionCount,
queueCount,
queueTime,
worldName,
worldSetName,
region,
status,
active,
worldId,
a,
b,
) = server
return {
"connectionCountMax": connectionCountMax,
"connectionCount": connectionCount,
"queueCount": queueCount,
"queueTime": queueTime,
"worldName": worldName,
"worldSetName": worldSetName,
"region": region,
"status": status,
"active": active,
"worldId": worldId,
"a-val": a,
"b-val": b,
}
async def get_guild_monitor_channel(self, guild):
guild_config = self.config.guild(guild)
channel_id = await guild_config.server_channel()
realm_name = await guild_config.default_realm()
# Check if the channel is valid
if not channel_id or channel_id == "0":
logging.warn(f"Skipping {guild}...")
return
# If the channel doesn't exist, reset configuration and return
channel = self.bot.get_channel(channel_id)
if not channel:
await guild_config.server_channel.set(None)
return
return channel
async def update_guild_channel(self, guild):
logger.info(f"Updating guild {guild}...")
channel = await self.get_guild_monitor_channel(guild)
server_status = await self.get_server_status(realm_name)
if not server_status:
return
new_channel_name = server_status.split("-")[1]
# Avoid updates if the name matches
if channel.name == new_channel_name:
return
await channel.edit(name=new_channel_name)
async def update_monitor_channels(self):
# iterate through bot discords and get the guild config
for guild in self.bot.guilds:
self.update_guild_channel(guild)
async def get_server_status(self, server_name, data=None):
if not data:
data = self.queue_data
server_data = data.get(server_name)
if not server_data:
return "No server data available - Loading data..."
online = server_data.get("connectionCount", -1)
max_online = server_data.get("connectionCountMax", -1)
in_queue_raw = int(server_data.get("queueCount", -1))
in_queue = in_queue_raw if in_queue_raw > 1 else 0
status = server_data.get("status", -1)
if status == 4:
return f"{server_name}: {online}/{max_online} Offline - Server maintenance"
return f"{server_name}: {online}/{max_online} Online - {in_queue} in queue."
async def get_world_id(self, server_name):
if not self.queue_data:
return
server_data = self.queue_data.get(server_name)
if not server_data:
return
return server_data.get("worldId")
@commands.command()
async def queue(self, ctx, server: str = None):
"Get current queue information"
if ctx.guild and server is None:
guild_config = self.config.guild(ctx.guild)
server = await guild_config.default_realm()
if not server:
await ctx.send("You must provide a server in DMs. `.queue <server>`")
return
worldId = await self.get_world_id(server)
data = await self.get_queue_data(worldId=worldId)
msg = await self.get_server_status(server, data)
await ctx.send(msg)
@commands.command()
@commands.guild_only()
@commands.admin_or_permissions(manage_channels=True)
async def monitor(self, ctx, voice_channel: discord.VoiceChannel = None):
"Start updating a channel wth the current realm status"
# Check if the bot has permission to the channel
bot_perms = voice_channel.permissions_for(ctx.me)
if not bot_perms.manage_channels:
await ctx.send(f'I require the "Manage Channels" permission for {voice_channel.mention} to execute that command.')
return
guild_config = self.config.guild(ctx.guild)
await guild_config.server_channel.set(voice_channel.id if voice_channel else None)
if voice_channel:
await ctx.send(f"Setup {voice_channel} as the monitor channel.")
else:
await ctx.send(f"Disabled monitor channel.")
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(manage_channels=True)
@commands.admin_or_permissions(manage_channels=True)
async def forcemonitor(self, ctx):
"Force an update of the monitor voice channel wth the current realm status"
voice_channel = await self.get_guild_monitor_channel(ctx.guild)
bot_perms = voice_channel.permissions_for(ctx.me)
if not bot_perms.manage_channels:
await ctx.send(f'I require the "Manage Channels" permission for {voice_channel.mention} to execute that command.')
return
await self.update_guild_channel(ctx.guild)
await ctx.send("Forced monitor channel update.")
@commands.command()
@commands.guild_only()
@commands.admin_or_permissions(manage_channels=True)
async def queueset(self, ctx, server: str = None):
"Set the default server for this discord server"
guild_config = self.config.guild(ctx.guild)
if server is None:
realm = await guild_config.default_realm()
await ctx.send(f"Current server: '{realm}'.")
return
server_data = self.queue_data.get(server)
if not server_data:
await ctx.send(f"Can't find '{server}' in the server list.")
return
await guild_config.default_realm.set(server)
await ctx.send(f"Server updated to '{server}'.")
async def http_get(url):
max_attempts = 3
attempt = 0
while (
max_attempts > attempt
): # httpx doesn't support retries, so we'll build our own basic loop for that
try:
async with httpx.AsyncClient() as client:
r = await client.get(url, headers={"user-agent": "psykzz-cogs/1.0.0"})
if r.status_code == 200:
return r.json()
else:
attempt += 1
await asyncio.sleep(5)
except (httpx._exceptions.ConnectTimeout, httpx._exceptions.HTTPError):
attempt += 1
await asyncio.sleep(5)
pass
| psykzz/cogs | nw_server_status/server_status.py | server_status.py | py | 8,630 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "redbot.core.commands.Cog",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "re... |
15366112732 | import numpy as np
import os
try:
import welib.fastlib.fastlib as fastlib
except:
import fastlib
def CPLambdaExample():
""" Example to determine the CP-CT Lambda Pitch matrices of a turbine.
This scrip uses the function CPCT_LambdaPitch which basically does the same as ParametricExample
above.
"""
ref_dir = 'NREL5MW/' # Folder where the fast input files are located (will be copied)
main_file = 'Main_Onshore_OF2.fst' # Main file in ref_dir, used as a template
FAST_EXE = 'NREL5MW/OpenFAST2_x64s_ebra.exe' # Location of a FAST exe (and dll)
# --- Computing CP and CT matrices for range of lambda and pitches
Lambda = np.linspace(0.1,10,3)
Pitch = np.linspace(-10,10,4)
CP,CT,Lambda,Pitch,MaxVal,result = fastlib.CPCT_LambdaPitch(ref_dir,main_file,Lambda,Pitch,fastExe=FAST_EXE,ShowOutputs=False,nCores=4,TMax=10)
print('CP max',MaxVal)
# --- Plotting matrix of CP values
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.gca(projection='3d')
LAMBDA, PITCH = np.meshgrid(Lambda, Pitch)
CP[CP<0]=0
surf = ax.plot_surface(LAMBDA, PITCH, np.transpose(CP), cmap=cm.coolwarm, linewidth=0, antialiased=True,alpha=0.8)
ax.scatter(MaxVal['lambda_opt'],MaxVal['pitch_opt'],MaxVal['CP_max'],c='k',marker='o',s=20)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
if __name__=='__main__':
CPLambdaExample()
| rhaghi/welib | welib/fastlib/_examples/Example_CPLambdaPitch.py | Example_CPLambdaPitch.py | py | 1,520 | python | en | code | null | github-code | 6 | [
{
"api_name": "numpy.linspace",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "fastlib.CPCT_LambdaPitch",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyp... |
72531866429 | from typing import Final
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import JSONB
from ._common import column_created_datetime, column_modified_datetime
from .base import metadata
# Intentionally includes the term "SECRET" to avoid leaking this value on a public domain
VENDOR_SECRET_PREFIX: Final[str] = "OSPARC_VARIABLE_VENDOR_SECRET_"
services_vendor_secrets = sa.Table(
"services_vendor_secrets",
#
# - A secret is an environment value passed to the service at runtime
# - A vendor can associate secrets (e.g. a license code) to any of the services it owns
# - secrets_map
# - keys should be prefixed with OSPARC_VARIABLE_VENDOR_SECRET_ (can still normalize on read)
# - values might be encrypted
#
metadata,
sa.Column(
"service_key",
sa.String,
doc="A single environment is allowed per service",
),
sa.Column(
"service_base_version",
sa.String,
doc="Defines the minimum version (included) from which these secrets apply",
),
sa.Column(
"product_name",
sa.String,
sa.ForeignKey(
"products.name",
name="fk_services_name_products",
onupdate="CASCADE",
ondelete="CASCADE",
),
# NOTE: since this is part of the primary key this is required
# NOTE: an alternative would be to not use this as a primary key
server_default="osparc",
doc="Product Identifier",
),
sa.Column(
"secrets_map",
JSONB,
nullable=False,
server_default=sa.text("'{}'::jsonb"),
doc="Maps OSPARC_VARIABLE_VENDOR_SECRET_* identifiers to a secret value (could be encrypted) "
"that can be replaced at runtime if found in the compose-specs",
),
# TIME STAMPS ----
column_created_datetime(timezone=True),
column_modified_datetime(timezone=True),
# CONSTRAINTS --
sa.ForeignKeyConstraint(
["service_key", "service_base_version"],
["services_meta_data.key", "services_meta_data.version"],
onupdate="CASCADE",
ondelete="CASCADE",
# NOTE: this might be a problem: if a version in the metadata is deleted,
# all versions above will take the secret_map for the previous one.
),
sa.PrimaryKeyConstraint(
"service_key",
"service_base_version",
"product_name",
name="services_vendor_secrets_pk",
),
)
| ITISFoundation/osparc-simcore | packages/postgres-database/src/simcore_postgres_database/models/services_environments.py | services_environments.py | py | 2,469 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "typing.Final",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Table",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "base.metadata",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "sqlalchemy.Column",
... |
1921452426 | import pyomo.environ as pe
from lci import LifeCycleInventory
from superstructure import Superstructure
from utils.properties import molar_weight
from utils.utils import sum_rule
from utils.save_results import ResultManager
from utils.solve_model import Solver
import time as time
import pickle
def range_len(start, stop, len):
step = (stop - start) / (len - 1)
list = []
i = start
while i < stop:
list.append(i)
i += step
list.append(stop)
return list
def rep_solve_tcm(x):
lci = LifeCycleInventory('millgas2what')
lci.model = pe.ConcreteModel('millgas2what')
scale = 1000000000
'Ströme des Stahlwerks als Parameter'
lci.model.cog_steelMill = pe.Param(initialize=39700000000 / scale) # in kg, scaled
lci.model.bfg_steelMill = pe.Param(initialize=1740550000000 / scale) # in kg, scaled
lci.model.electricity_steelMill = pe.Param(initialize=-2298568545000 / scale)
lci.model.heat_steelMill = pe.Param(initialize=-894991475000 / scale)
'Definition der Verbindungspunkte'
connect_list = ['Mill gas COG [kg]', 'Mill gas BFG/BOFG [kg]', 'Electricity [MJ]', 'Heat [MJ]']
connector_lp = {}
lci.model.connect_lp = pe.Var(connect_list, initialize=0, bounds=(-10000, 10000))
for c in connect_list:
connector_lp[c] = lci.model.connect_lp[c]
'Gesamtbilanzen'
lci.model.cog_balance = pe.Constraint(
expr=0 == - lci.model.cog_steelMill + lci.model.connect_lp['Mill gas COG [kg]'])
lci.model.bfg_balance = pe.Constraint(
expr=0 == - lci.model.bfg_steelMill + lci.model.connect_lp['Mill gas BFG/BOFG [kg]'])
lci.model.electricity_balance = pe.Constraint(
expr=0 == - lci.model.electricity_steelMill + lci.model.connect_lp['Electricity [MJ]'])
lci.model.heat_balance = pe.Constraint(expr=0 == - lci.model.heat_steelMill + lci.model.connect_lp['Heat [MJ]'])
'Ab hier wird das Modell mit der LCI-Klasse zusammengebaut'
lci.import_from_excel('Life Cycle Inventory_v19.xlsx', 'A-Matrix', 'End of life')
lci.set_up_lp(scale)
lci.import_connector(connector_lp) # Durch deaktivieren dieser Zeile wird nur die Chem. Ind. betrachtet
# lci.activate_scenario('Electricity Today')
# lci.activate_scenario('Electricity Best Case')
lci.activate_scenario('Electricity user-defined', x)
lci.activate_scenario('Separation GDP') # Schaltet alle linearen Prozesse für Hüttengastrennung aus
# lci.deactivate_process('CARBON DIOXIDE as emission to air')
# lci.deactivate_process('AMMONIA FROM NATURAL GAS BY STEAM REFORMING BY ICI "AMV" PROCESS incl CO2 capture')
lci.activate_scenario('CCU high TRL only')
# lci.activate_scenario('No high TRL CCU')
lci.deactivate_process('CARBON DIOXIDE - ammonia plant')
lci.deactivate_process('Water gas shift reaction')
lci.lp.ammonia_constraint = pe.Constraint(
expr=lci.lp.s['AMMONIA FROM NATURAL GAS BY STEAM REFORMING BY ICI "AMV" PROCESS incl CO2 capture'] - 228.9 <= 0)
lci.deactivate_process('TDI - neue Route_v2 exklusive Methylformate production')
lci.deactivate_process('Polycarbonate - neue Route')
lci.deactivate_process('Methylformate productionaus TDI neue Route v2')
lci.construct_demand_constraints()
lci.construct_objective()
lci.model.add_component('lp', lci.lp)
'Lösen und Ergebnisse darstellen'
# lci.model.pprint()
solver = Solver()
solver.solve_lp(lci, 'glpk')
# solver.test_feasibility() # Muss an neue Demand-Constraints angepasst werden
results = {'x': x, 'z': pe.value(lci.objective) * lci.scale}
return results
def rep_solve_gdp(x):
""" Separation System / Flowsheet construction """
# COG Separation
s = Superstructure("Combined Model")
s.initial_stream(31, 0, 300, 1, 'COG')
s.create_unit('Compressor', 'C3', 31, 32)
s.create_unit('PSA', 'PSA3', 32, 33, 34, 'H2')
s.create_unit('Splitter', 'S5', 34, 35, 36)
s.create_streams(37)
s.model.p_35 = pe.Constraint(expr=s.model.p[37] == 1)
s.model.t_35 = pe.Constraint(expr=s.model.t[37] == 300)
s.model.cc_35 = pe.Constraint(expr=1 == sum_rule(s.streams[37].y, s.streams[37].substances))
s.mix_streams('M3', 36, 37, 38)
s.create_unit('Heat Exchanger', 'HE5', 38, 39)
s.create_disjunct_reactor('methane_reforming', 'por', 'R2_POR', 39, 40, 'POR')
s.create_disjunct_reactor('methane_reforming', 'cdr', 'R2_CDR', 39, 40, 'CDR')
s.model.cdr.y_37 = pe.Constraint(expr=s.model.y[37, 'CO2'] == 1)
s.model.cdr.add = pe.Constraint(expr=s.model.n[37] == 1 * s.model.n[36] * s.model.y[36, 'CH4'])
s.model.cdr.q_constr = pe.Constraint(expr=s.model.q['R2_POR'] == 0)
s.model.por.y_37 = pe.Constraint(expr=s.model.y[37, 'O2'] == 1)
s.model.por.add = pe.Constraint(expr=s.model.n[37] == 0.48 * s.model.n[36] * s.model.y[36, 'CH4'])
s.model.por.q_constr = pe.Constraint(expr=s.model.q['R2_CDR'] == 0)
# B(O)FG Separation
s.initial_stream(1, 0, 300, 1, 'B(O)FG')
s.create_unit('Heat Exchanger', 'HE1', 1, 2)
s.create_unit('TSA', 'TSA1', 2, 3, 4, 'CO')
s.create_unit('Splitter', 'S1', 3, 20, 21)
s.initial_stream(22, 0, 400, 1, {'H2O': 1})
s.model.add_h2o = pe.Constraint(expr=s.model.n[22] == s.model.n[21])
s.mix_streams('M1', 21, 22, 23)
s.create_unit('Heat Exchanger', 'HE3', 23, 24)
s.create_reactor('R1', 24, 25, 'WGSR')
s.mix_streams('M2', 25, 4, 5)
s.create_unit('Heat Exchanger', 'HE4', 5, 6)
s.create_unit('Compressor', 'C1', 6, 7)
s.create_disjunct_unit('co2_bofg', 'cca_1', 'CCA', 'CCA1', 7, 8, 9, 'CO2')
s.create_disjunct_unit('co2_bofg', 'psa_1', 'PSA', 'PSA1', 7, 8, 9, 'CO2')
s.model.psa_1.q_cca = pe.Constraint(expr=s.model.q['CCA1'] == 0)
s.model.psa_1.t6_constraint = pe.Constraint(expr=s.model.t[6] >= 273.15 + 30)
# s.model.t6_constraint = pe.Constraint(expr=s.model.t[6] >= 273.15 + 30)
# s.create_unit('PSA', 'PSA1', 7, 8, 9, 'CO2')
# s.create_unit('CCA', 'CCA1', 7, 8, 9, 'CO2')
s.create_unit('Compressor', 'C2', 9, 10)
s.create_unit('PSA', 'PSA2', 10, 11, 12, 'H2')
scale = 1000000000
'Ströme des Stahlwerks als Parameter'
s.model.cog_steelMill = pe.Param(initialize=39700000000 / scale) # in kg, scaled
s.model.bfg_steelMill = pe.Param(initialize=1740550000000 / scale) # in kg, scaled
s.model.electricity_steelMill = pe.Param(initialize=-2298568545000 / scale)
s.model.heat_steelMill = pe.Param(initialize=-894991475000 / scale)
s.connect_list = ['Mill gas COG [kg]', 'Hydrogen (H2) [kg]', 'Electricity [MJ]', 'Heat [MJ]', 'SYNTHESIS GAS (1:1)',
'SYNTHESIS GAS (2:1)', 'Carbon dioxide (CO2) [kg]', 'Methane (CH4) [kg]', 'Oxygen (O2) [kg]',
'Mill gas BFG/BOFG [kg]', 'Carbon monoxide (CO) [kg]', 'CO2 to atm [kg]', 'STEAM [kg]'
]
connector_lp = {}
s.model.connect_lp = pe.Var(s.connect_list, initialize=0, bounds=(-10000, 10000))
for c in s.connect_list:
connector_lp[c] = s.model.connect_lp[c]
# Hier kann eingestellt werden, welcher Anteil der Hüttengase verwertet werden darf
# x = 0.0001 # ACHTUNG! x = 0 führt zu infeasible
# s.model.force_cog = pe.Constraint(expr=s.model.n[31] <= x * s.model.cog_steelMill / molar_weight('COG'))
# s.model.force_bfg = pe.Constraint(expr=s.model.n[1] <= x * s.model.bfg_steelMill / molar_weight('B(O)FG'))
s.model.cog_balance = pe.Constraint(
expr=0 == - s.model.cog_steelMill + s.model.connect_lp['Mill gas COG [kg]'] + s.model.n[31] * molar_weight(
'COG'))
s.model.bfg_balance = pe.Constraint(
expr=0 == - s.model.bfg_steelMill + s.model.connect_lp['Mill gas BFG/BOFG [kg]'] + s.model.n[1] * molar_weight(
'B(O)FG'))
s.model.el_balance = pe.Constraint(expr=0 == - s.model.connect_lp['Electricity [MJ]'] - sum_rule(s.model.w,
s.model.workSet) + s.model.electricity_steelMill)
s.model.heat_balance = pe.Constraint(
expr=0 == - s.model.connect_lp['Heat [MJ]'] - sum_rule(s.model.q, s.model.heatSet) + s.model.heat_steelMill)
s.model.co2_atm_balance = pe.Constraint(
expr=0 == s.model.connect_lp['CO2 to atm [kg]'] - s.model.n[12] * molar_weight('CO2') * (
s.model.y[12, 'CO2'] + s.model.y[12, 'CO']))
# s.model.co2_atm_balance = pe.Constraint(expr=0 == s.model.connect_lp['CO2 to atm [kg]'] - s.model.n[12] * molar_weight('CO2') * (s.model.y[12, 'CO2'] + s.model.y[12, 'CO']) * 0.05)
# s.model.co2_atm_balance = pe.Constraint(expr=0 == s.model.connect_lp['CO2 to atm [kg]'])
s.model.steam_balance = pe.Constraint(
expr=0 == s.model.connect_lp['STEAM [kg]'] + s.model.n[22] * molar_weight('H2O'))
s.model.co_balance = pe.Constraint(
expr=0 == s.model.connect_lp['Carbon monoxide (CO) [kg]'] - s.model.n[20] * molar_weight('CO'))
s.model.ch4_balance = pe.Constraint(
expr=s.model.connect_lp['Methane (CH4) [kg]'] == s.model.n[35] * s.model.y[35, 'CH4'] * molar_weight('CH4'))
s.model.h2_balance = pe.Constraint(
expr=s.model.connect_lp['Hydrogen (H2) [kg]'] == s.model.n[33] * molar_weight('H2') + s.model.n[
11] * molar_weight('H2'))
# s.model.cdr.syngas11_balance = pe.Constraint(expr=s.model.connect_lp['SYNTHESIS GAS (1:1)'] == s.model.n[40] * molar_weight({'H2': 0.5, 'CO': 0.5})) # N2 wird als SynGas angenommen
s.model.cdr.syngas11_balance = pe.Constraint(expr=s.model.connect_lp['SYNTHESIS GAS (1:1)'] == s.model.n[40] * (
s.model.y[40, 'H2'] * molar_weight('H2') + s.model.y[40, 'CO'] * molar_weight('CO')))
s.model.por.syngas11_balance = pe.Constraint(expr=s.model.connect_lp['SYNTHESIS GAS (1:1)'] == 0)
# s.model.por.syngas21_balance = pe.Constraint(expr=s.model.connect_lp['SYNTHESIS GAS (2:1)'] == s.model.n[40] * molar_weight({'H2': 0.67, 'CO': 0.33})) # N2 wird als SynGas angenommen
s.model.por.syngas21_balance = pe.Constraint(expr=s.model.connect_lp['SYNTHESIS GAS (2:1)'] == s.model.n[40] * (
s.model.y[40, 'H2'] * molar_weight('H2') + s.model.y[40, 'CO'] * molar_weight('CO')))
s.model.cdr.syngas21_balance = pe.Constraint(expr=s.model.connect_lp['SYNTHESIS GAS (2:1)'] == 0)
s.model.cdr.co2_balance = pe.Constraint(
expr=s.model.connect_lp['Carbon dioxide (CO2) [kg]'] == - s.model.n[37] * molar_weight('CO2') + s.model.n[
8] * molar_weight('CO2'))
s.model.por.co2_balance = pe.Constraint(
expr=s.model.connect_lp['Carbon dioxide (CO2) [kg]'] == s.model.n[8] * molar_weight('CO2'))
# s.model.cdr.co2_balance = pe.Constraint(expr=s.model.connect_lp['Carbon dioxide (CO2) [kg]'] == 0)
# s.model.por.co2_balance = pe.Constraint(expr=s.model.connect_lp['Carbon dioxide (CO2) [kg]'] == 0)
s.model.por.o2_balance = pe.Constraint(
expr=s.model.connect_lp['Oxygen (O2) [kg]'] == - s.model.n[37] * molar_weight('O2'))
s.model.cdr.o2_balance = pe.Constraint(expr=s.model.connect_lp['Oxygen (O2) [kg]'] == 0)
# s.model.no_co2_sep = pe.Constraint(expr=s.model.n[8] * molar_weight('CO2') == 147)
# s.model.no_h2_sep = pe.Constraint(expr=s.model.n[11] == 0)
""" Set up TCM """
lci = LifeCycleInventory('millgas2what')
lci.import_from_excel('Life Cycle Inventory_v19.xlsx', 'A-Matrix', 'End of life')
lci.set_up_lp(scale)
lci.import_connector(connector_lp) # Durch deaktivieren dieser Zeile wird nur die Chem. Ind. betrachtet
# lci.activate_scenario('Electricity Today')
# lci.activate_scenario('Electricity Best Case')
lci.activate_scenario('Electricity user-defined', x)
lci.activate_scenario('Separation GDP') # Schaltet alle linearen Prozesse für Hüttengastrennung aus
# lci.deactivate_process('CARBON DIOXIDE as emission to air')
# lci.deactivate_process('AMMONIA FROM NATURAL GAS BY STEAM REFORMING BY ICI "AMV" PROCESS incl CO2 capture')
lci.activate_scenario('CCU high TRL only')
lci.deactivate_process('CARBON DIOXIDE - ammonia plant')
lci.deactivate_process('Water gas shift reaction')
lci.lp.ammonia_constraint = pe.Constraint(
expr=lci.lp.s['AMMONIA FROM NATURAL GAS BY STEAM REFORMING BY ICI "AMV" PROCESS incl CO2 capture'] - 228.9 <= 0)
lci.deactivate_process('TDI - neue Route_v2 exklusive Methylformate production')
lci.deactivate_process('Polycarbonate - neue Route')
lci.deactivate_process('Methylformate productionaus TDI neue Route v2')
lci.construct_demand_constraints()
lci.construct_objective()
s.import_lci(lci)
s.create_disjunctions()
""" Solve overall model """
solver = Solver()
""" Save values in dict"""
ind_var = {}
c_val = {}
rec_val = {}
heat_val = {}
el_val = {}
s_dict = {}
cog_dict = {}
bfg_dict = {}
# LCI-Prozesse, die verfolgt werden sollen
s_dict_list = ['CARBON DIOXIDE as emission to air', 'INC Carbon monoxide',
'treatment of blast furnace gas, in power plant, DE', 'Verbrennung COG in BHKW',
'AMMONIA FROM NATURAL GAS BY STEAM REFORMING BY ICI "AMV" PROCESS incl CO2 capture',
'CARBON DIOXIDE - air capture', 'Electricity, user-defined']
try:
solver.solve_gdp(s)
obj = pe.value(s.objective)
for d in s.disjuncts.keys():
ind_var[d] = pe.value(s.disjuncts[d].indicator_var)
for c in s.connect_list:
c_val[c] = pe.value(connector_lp[c]) * lci.scale
for c in s.model.zetaSet:
rec_val[c] = pe.value(s.model.zeta[c])
for c in s.model.heatSet:
heat_val[c] = pe.value(s.model.q[c]) * lci.scale
for c in s.model.workSet:
el_val[c] = pe.value(s.model.w[c]) * lci.scale
for i in s_dict_list:
s_dict[i] = pe.value(s.model.utilization.s[i]) * lci.scale
'Diagramme für COG / B(O)FG'
cog_dict['H2'] = pe.value(s.model.n[33]) * molar_weight({'H2': 1}) * lci.scale
cog_dict['N2'] = pe.value(s.model.n[35]) * pe.value(s.model.y[35, 'N2']) * molar_weight({'N2': 1}) * lci.scale
bfg_dict['H2'] = pe.value(s.model.n[11]) * molar_weight({'H2': 1}) * lci.scale
cog_dict['N2'] = pe.value(s.model.n[12]) * pe.value(s.model.y[12, 'N2']) * molar_weight({'N2': 1}) * lci.scale
bfg_dict['CO2'] = pe.value(s.model.n[8]) * molar_weight({'CO2': 1}) * lci.scale
except ValueError:
obj = 0
return {'x': x, 'z': obj * lci.scale, 'i': ind_var, 'c': c_val, 'rec': rec_val, 'q': heat_val, 'w': el_val,
's': s_dict, 'cog': cog_dict, 'bfg': bfg_dict}
x_vector = range_len(0.002, 0.2, 30)
results_tcm = {}
results_gdp = {}
n = 0
while n < len(x_vector):
results_tcm[n] = rep_solve_tcm(x_vector[n])
print('TCM solved', n + 1)
results_gdp[n] = rep_solve_gdp(x_vector[n])
print('GDP solved', n + 1)
n += 1
results = {'tcm': results_tcm, 'gdp': results_gdp}
def save_object(obj, filename):
with open(filename, 'wb') as output: # Overwrites any existing file.
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
# name = input("enter file name")
save_object(results, '20200825_v19_tcm')
| jkleinekorte/millgas2what | src/repeated_solving_el_impact.py | repeated_solving_el_impact.py | py | 15,332 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "lci.LifeCycleInventory",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "lci.model",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "pyomo.environ.ConcreteModel",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pyo... |
70106082748 | '''
1. import opencv
2. load image
3. load model
4. adjuct image gray
5. Check and mark face
3. create window
4. show image
5. pause window
6. close window
'''
import numpy as np
import cv2
# print(cv2.__version__)
# Load image
img = cv2.imread("./images/ufc.jpeg")
# Load model
face_cascade = cv2.CascadeClassifier(
'./data/haarcascade_frontalface_default.xml')
# Adjust image gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# find faces
faces = face_cascade.detectMultiScale(
gray,
# scaleFactor=1.15,
# minNeighbors=5,
# minSize=(5, 5),
# flags = cv2.HAAR_SCALE_IMAGE
)
# Mark faces
for(x, y, w, h) in faces:
# image, location, size, color, line width
cv2.rectangle(img, (x, y), (x+w, y+w), (0, 255, 0), 2)
# Create widnow
cv2.namedWindow("faceImage")
# Show image
cv2.imshow('ufc show', img)
#
cv2.waitKey(0)
cv2.destroyAllWindow()
| benjaminhuanghuang/opencv-study | cv-find-face.py | cv-find-face.py | py | 900 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
... |
36849582573 | import os
os.chdir("/home/ghiggi/Projects/deepsphere-weather")
import sys
sys.path.append("../")
import shutil
import argparse
import dask
import glob
import time
import torch
import zarr
import numpy as np
import xarray as xr
## DeepSphere-Weather
from modules.utils_config import read_config_file
from modules.utils_config import get_model_settings
from modules.utils_config import get_training_settings
from modules.utils_config import get_ar_settings
from modules.utils_config import get_dataloader_settings
from modules.utils_config import check_same_dict
from modules.utils_config import get_pytorch_model
from modules.utils_config import set_pytorch_settings
from modules.utils_config import load_pretrained_model
from modules.utils_config import print_tensor_info
from modules.utils_io import get_ar_model_tensor_info
from modules.predictions_autoregressive import AutoregressivePredictions
## Functions within AutoregressivePredictions
from modules.dataloader_autoregressive import remove_unused_Y
from modules.dataloader_autoregressive import get_aligned_ar_batch
from modules.dataloader_autoregressive import AutoregressiveDataset
from modules.dataloader_autoregressive import AutoregressiveDataLoader
from modules.utils_autoregressive import check_ar_settings
from modules.utils_autoregressive import check_input_k
from modules.utils_autoregressive import check_output_k
from modules.utils_io import _get_feature_order
from modules.utils_zarr import check_chunks
from modules.utils_zarr import check_rounding
from modules.utils_zarr import rechunk_Dataset
from modules.utils_zarr import write_zarr
from modules.utils_torch import check_device
from modules.utils_torch import check_pin_memory
from modules.utils_torch import check_asyncronous_gpu_transfer
from modules.utils_torch import check_prefetch_in_gpu
from modules.utils_torch import check_prefetch_factor
from modules.utils_swag import bn_update
## Project specific functions
import modules.my_models_graph as my_architectures
## Side-project utils (maybe migrating to separate packages in future)
from modules.xscaler import LoadScaler
from modules.xscaler import SequentialScaler
# -------------------------------------------------------------------------.
data_dir = "/ltenas3/DeepSphere/data/preprocessed_ds/ERA5_HRES"
model_dir = "/data/weather_prediction/experiments_GG/new/RNN-AR6-UNetSpherical-Healpix_400km-Graph_knn-k20-MaxAreaPooli/"
# -------------------------------------------------------------------------.
# Read config file
cfg_path = os.path.join(model_dir, "config.json")
cfg = read_config_file(fpath=cfg_path)
# Some special options to adjust for prediction
cfg["dataloader_settings"]["autotune_num_workers"] = False
cfg["training_settings"]["gpu_training"] = True # to run prediction in GPU if possible
##------------------------------------------------------------------------.
### Retrieve experiment-specific configuration settings
model_settings = get_model_settings(cfg)
ar_settings = get_ar_settings(cfg)
training_settings = get_training_settings(cfg)
dataloader_settings = get_dataloader_settings(cfg)
dataloader_settings["num_workers"] = 10
##------------------------------------------------------------------------.
#### Load Zarr Datasets
data_sampling_dir = os.path.join(data_dir, cfg["model_settings"]["sampling_name"])
data_dynamic = xr.open_zarr(
os.path.join(data_sampling_dir, "Data", "dynamic", "time_chunked", "dynamic.zarr")
)
data_bc = xr.open_zarr(
os.path.join(data_sampling_dir, "Data", "bc", "time_chunked", "bc.zarr")
)
data_static = xr.open_zarr(os.path.join(data_sampling_dir, "Data", "static.zarr"))
# - Select dynamic features
# data_dynamic = data_dynamic[['z500','t850']]
##------------------------------------------------------------------------.
### Prepare static data
# - Keep land-surface mask as it is
# - Keep sin of latitude and remove longitude information
data_static = data_static.drop(["sin_longitude", "cos_longitude"])
# - Scale orography between 0 and 1 (is already left 0 bounded)
data_static["orog"] = data_static["orog"] / data_static["orog"].max()
# - One Hot Encode soil type
# ds_slt_OHE = xscaler.OneHotEnconding(data_static['slt'])
# data_static = xr.merge([data_static, ds_slt_OHE])
# data_static = data_static.drop('slt')
# - Load static data
data_static = data_static.load()
##------------------------------------------------------------------------.
#### Define scaler to apply on the fly within DataLoader
# - Load scalers
dynamic_scaler = LoadScaler(
os.path.join(data_sampling_dir, "Scalers", "GlobalStandardScaler_dynamic.nc")
)
bc_scaler = LoadScaler(
os.path.join(data_sampling_dir, "Scalers", "GlobalStandardScaler_bc.nc")
)
# # - Create single scaler
scaler = SequentialScaler(dynamic_scaler, bc_scaler)
##------------------------------------------------------------------------.
### Define pyTorch settings (before PyTorch model definition)
# - Here inside is eventually set the seed for fixing model weights initialization
# - Here inside the training precision is set (currently only float32 works)
device = set_pytorch_settings(training_settings)
##------------------------------------------------------------------------.
## Retrieve dimension info of input-output Torch Tensors
tensor_info = get_ar_model_tensor_info(
ar_settings=ar_settings,
data_dynamic=data_dynamic,
data_static=data_static,
data_bc=data_bc,
)
print_tensor_info(tensor_info)
# Check that tensor_info match between model training and now
check_same_dict(model_settings["tensor_info"], tensor_info)
##------------------------------------------------------------------------.
### Define the model architecture
model = get_pytorch_model(module=my_architectures, model_settings=model_settings)
###-----------------------------------------------------------------------.
## Load a pre-trained model
load_pretrained_model(model=model, model_dir=model_dir)
###-----------------------------------------------------------------------.
### Transfer model to the device (i.e. GPU)
model = model.to(device)
###-----------------------------------------------------------------------.
## AutoregressivePredictions arguments
forecast_reference_times = np.datetime64("2016-12-26T23:00:00.000000000")
forecast_reference_times1 = np.datetime64("2016-06-26T23:00:00.000000000")
forecast_reference_times = [forecast_reference_times, forecast_reference_times1]
ar_iterations = 2 * 365 * 4
ar_iterations = 20
batch_size = 32
ar_blocks = None
forecast_zarr_fpath = None
num_workers = 10 # dataloader_settings['num_workers']
bc_generator = None
ar_batch_fun = get_aligned_ar_batch
scaler_transform = scaler
scaler_inverse = scaler
# Dataloader options
device = device
batch_size = batch_size # number of forecasts per batch
prefetch_factor = dataloader_settings["prefetch_factor"]
prefetch_in_gpu = dataloader_settings["prefetch_in_gpu"]
pin_memory = dataloader_settings["pin_memory"]
asyncronous_gpu_transfer = dataloader_settings["asyncronous_gpu_transfer"]
# Autoregressive settings
input_k = ar_settings["input_k"]
output_k = ar_settings["output_k"]
forecast_cycle = ar_settings["forecast_cycle"]
stack_most_recent_prediction = ar_settings["stack_most_recent_prediction"]
# Prediction options
forecast_reference_times = forecast_reference_times
ar_blocks = ar_blocks
ar_iterations = ar_iterations # How many time to autoregressive iterate
keep_first_prediction = True
# Save options
zarr_fpath = forecast_zarr_fpath # None --> do not write to disk
rounding = 2 # Default None. Accept also a dictionary
compressor = "auto" # Accept also a dictionary per variable
chunks = "auto"
# 1 Valid timestep : OK
forecast_reference_times = np.datetime64("2018-12-26T23:00:00.000000000")
### 2 (valid) timesteps --> OK
forecast_reference_times1 = np.datetime64("2018-12-26T22:00:00.000000000")
forecast_reference_times2 = np.datetime64("2018-12-26T23:00:00.000000000")
forecast_reference_times = [forecast_reference_times2, forecast_reference_times1]
## One valid, one unvalid
forecast_reference_times1 = np.datetime64("2018-12-26T23:00:00.000000000")
forecast_reference_times2 = np.datetime64("2018-12-27T00:00:00.000000000")
forecast_reference_times = [forecast_reference_times2, forecast_reference_times1]
## 1 Unvalid (future) timestep --> OK: raise correct error
forecast_reference_times = np.datetime64("2018-12-27T00:00:00.000000000")
## 1 Unvalid timestep (past) --> OK: raise correct error
forecast_reference_times = np.datetime64("1980-01-01T07:00:00.000000000")
forecast_reference_times = np.datetime64("1970-01-01T07:00:00.000000000")
## 2 unvalid (future) timesteps --> OK: raise correct error
forecast_reference_times1 = np.datetime64("2018-12-27T00:00:00.000000000")
forecast_reference_times2 = np.datetime64("2018-12-27T01:00:00.000000000")
forecast_reference_times = [forecast_reference_times2, forecast_reference_times1]
## 2 unvalid (past) timesteps --> OK: raise correct error
forecast_reference_times1 = np.datetime64("1980-01-01T07:00:00.000000000")
forecast_reference_times2 = np.datetime64("1980-01-01T06:00:00.000000000")
forecast_reference_times = [forecast_reference_times2, forecast_reference_times1]
# ----
### No duplicate (unvalid) timesteps --> OK raise correct error
forecast_reference_times1 = np.datetime64("2018-12-27T00:00:00.000000000")
forecast_reference_times2 = np.datetime64("2018-12-27T00:00:00.000000000")
forecast_reference_times = [forecast_reference_times2, forecast_reference_times1]
### No duplicate (valid) timesteps --> OK raise correct error
forecast_reference_times1 = np.datetime64("2018-12-26T23:00:00.000000000")
forecast_reference_times2 = np.datetime64("2018-12-26T23:00:00.000000000")
forecast_reference_times = [forecast_reference_times2, forecast_reference_times1]
## Empty list --> OK raise correct error
forecast_reference_times = []
# ----
## AutoregressivePredictions arguments
forecast_reference_times = np.datetime64("2016-12-26T23:00:00.000000000")
forecast_reference_times1 = np.datetime64("2016-06-26T23:00:00.000000000")
forecast_reference_times = [forecast_reference_times, forecast_reference_times1]
ar_iterations = 2 * 365 * 4
dask.config.set(scheduler="synchronous")
ds_forecasts = AutoregressivePredictions(
model=model,
# Data
data_dynamic=data_dynamic,
data_static=data_static,
data_bc=data_bc,
scaler_transform=scaler,
scaler_inverse=scaler,
# Dataloader options
device=device,
batch_size=batch_size, # number of forecasts per batch
num_workers=dataloader_settings["num_workers"],
prefetch_factor=dataloader_settings["prefetch_factor"],
prefetch_in_gpu=dataloader_settings["prefetch_in_gpu"],
pin_memory=dataloader_settings["pin_memory"],
asyncronous_gpu_transfer=dataloader_settings["asyncronous_gpu_transfer"],
# Autoregressive settings
input_k=ar_settings["input_k"],
output_k=ar_settings["output_k"],
forecast_cycle=ar_settings["forecast_cycle"],
stack_most_recent_prediction=ar_settings["stack_most_recent_prediction"],
# Prediction options
forecast_reference_times=forecast_reference_times,
ar_blocks=ar_blocks,
ar_iterations=ar_iterations, # How many time to autoregressive iterate
# Save options
zarr_fpath=forecast_zarr_fpath, # None --> do not write to disk
rounding=2, # Default None. Accept also a dictionary
compressor="auto", # Accept also a dictionary per variable
chunks="auto",
)
print(ds_forecasts)
ds_forecasts.to_zarr("/ltenas3/DeepSphere/tmp/2ysim.zarr")
###-----------------------------------------------------------------------.
## DEBUG Code within AutoregressivePredictions
##------------------------------------------------------------------------.
## Checks arguments
device = check_device(device)
pin_memory = check_pin_memory(
pin_memory=pin_memory, num_workers=num_workers, device=device
)
asyncronous_gpu_transfer = check_asyncronous_gpu_transfer(
asyncronous_gpu_transfer=asyncronous_gpu_transfer, device=device
)
prefetch_in_gpu = check_prefetch_in_gpu(
prefetch_in_gpu=prefetch_in_gpu, num_workers=num_workers, device=device
)
prefetch_factor = check_prefetch_factor(
prefetch_factor=prefetch_factor, num_workers=num_workers
)
##------------------------------------------------------------------------.
# Check that autoregressive settings are valid
# - input_k and output_k must be numpy arrays hereafter !
input_k = check_input_k(input_k=input_k, ar_iterations=ar_iterations)
output_k = check_output_k(output_k=output_k)
check_ar_settings(
input_k=input_k,
output_k=output_k,
forecast_cycle=forecast_cycle,
ar_iterations=ar_iterations,
stack_most_recent_prediction=stack_most_recent_prediction,
)
ar_iterations = int(ar_iterations)
##------------------------------------------------------------------------.
### Retrieve feature info of the forecast
features = _get_feature_order(data_dynamic)
##------------------------------------------------------------------------.
# Check Zarr settings
WRITE_TO_ZARR = zarr_fpath is not None
if WRITE_TO_ZARR:
# - If zarr fpath provided, create the required folder
if not os.path.exists(os.path.dirname(zarr_fpath)):
os.makedirs(os.path.dirname(zarr_fpath))
# - Set default chunks and compressors
# ---> -1 to all optional dimensions (i..e nodes, lat, lon, ens, plevels,...)
dims = list(data_dynamic.dims)
dims_optional = np.array(dims)[
np.isin(dims, ["time", "feature"], invert=True)
].tolist()
default_chunks = {dim: -1 for dim in dims_optional}
default_chunks["forecast_reference_time"] = 1
default_chunks["leadtime"] = 1
default_compressor = zarr.Blosc(cname="zstd", clevel=0, shuffle=2)
# - Check rounding settings
rounding = check_rounding(rounding=rounding, variable_names=features)
##------------------------------------------------------------------------.
# Check ar_blocks
if not isinstance(ar_blocks, (int, float, type(None))):
raise TypeError("'ar_blocks' must be int or None.")
if isinstance(ar_blocks, float):
ar_blocks = int(ar_blocks)
if not WRITE_TO_ZARR and isinstance(ar_blocks, int):
raise ValueError("If 'zarr_fpath' not specified, 'ar_blocks' must be None.")
if ar_blocks is None:
ar_blocks = ar_iterations + 1
if ar_blocks > ar_iterations + 1:
raise ValueError("'ar_blocks' must be equal or smaller to 'ar_iterations'")
PREDICT_ar_BLOCKS = ar_blocks != (ar_iterations + 1)
##------------------------------------------------------------------------.
### Define DataLoader subset_timesteps
forecast_reference_times = check_timesteps_format(forecast_reference_times)
check_no_duplicate_timesteps(
forecast_reference_times, var_name="forecast_reference_times"
)
forecast_reference_times.sort() # ensure the temporal order
subset_timesteps = None
if forecast_reference_times is not None:
if len(forecast_reference_times) == 0:
raise ValueError(
"If you don't want to specify specific 'forecast_reference_times', set it to None"
)
t_res_timedelta = np.diff(data_dynamic.time.values)[0]
subset_timesteps = forecast_reference_times + -1 * max(input_k) * t_res_timedelta
##------------------------------------------------------------------------.
### Create training Autoregressive Dataset and DataLoader
dataset = AutoregressiveDataset(
data_dynamic=data_dynamic,
data_bc=data_bc,
data_static=data_static,
bc_generator=bc_generator,
scaler=scaler_transform,
# Dataset options
subset_timesteps=subset_timesteps,
training_mode=False,
# Autoregressive settings
input_k=input_k,
output_k=output_k,
forecast_cycle=forecast_cycle,
ar_iterations=ar_iterations,
stack_most_recent_prediction=stack_most_recent_prediction,
# GPU settings
device=device,
)
dataset[0]
self = dataset
self.subset_timesteps
self.idxs
len(self)
| deepsphere/deepsphere-weather | dev/w_debug_predictions.py | w_debug_predictions.py | py | 15,927 | python | en | code | 56 | github-code | 6 | [
{
"api_name": "os.chdir",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number":... |
2727040132 | import pathlib
data_folder = pathlib.Path('data')
# print(data_folder.exists(), data_folder.is_dir())
def make_text(i):
text = ""
text += str(i) + "\n"
text += str(i * 24) + "\n"
text += (i * 12) * "#"
return text
for i in range(20):
label = str(i).zfill(4) + "." + ("ihatezoom" * i)
f = pathlib.Path(label)
out = data_folder / f
out.write_text(make_text(i))
| elliewix/IS305-2022-Fall | week 5/monday.py | monday.py | py | 399 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 16,
"usage_type": "call"
}
] |
7265936310 | import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from pylab import *
mpl.rcParams['font.sans-serif'] = ['SimHei']
res = {}
for i in range(1, 16): # 统计15天的新增人数
fileNameStr = './202012' + str(i).zfill(2) + '.csv' # 产生文件名进行读取
df = pd.read_csv(fileNameStr, encoding='utf-8')
df['increase'] = df['increase'].astype(np.int)
for idx in range(len(df)): # 遍历所有国家
if df['country'][idx] in res.keys():
res[df['country'][idx]] = res[df['country'][idx]] + df['increase'][idx]
else:
res[df['country'][idx]] = df['increase'][idx]
lst = sorted(res.items(), key=lambda x:x[1], reverse=True) # 按新增人数进行排序
country = []
increase = []
for i in range(10): # 取出前10的国家
country.append(lst[i][0])
increase.append(lst[i][1])
plt.title("20201201~20201215 新冠病毒新增人数国家TOP10")
plt.bar(country, increase, label='increase')
plt.legend()
plt.show() | Seizzzz/DailyCodes | Course 202009/Python/final/c.py | c.py | py | 1,009 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.int",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyp... |
73944785148 | import pathlib
import numpy as np
import h5py
import cv2
import argparse
def load_episodes(directory, capacity=None):
# The returned directory from filenames to episodes is guaranteed to be in
# temporally sorted order.
filenames = sorted(directory.glob('*.npz'))
if capacity:
num_steps = 0
num_episodes = 0
for filename in reversed(filenames):
length = int(str(filename).split('-')[-1][:-4])
num_steps += length
num_episodes += 1
if num_steps >= capacity:
break
filenames = filenames[-num_episodes:]
episodes = {}
for filename in filenames:
try:
with filename.open('rb') as f:
episode = np.load(f)
episode = {k: episode[k] for k in episode.keys()}
# Conversion for older versions of npz files.
if 'is_terminal' not in episode:
episode['is_terminal'] = episode['discount'] == 0.
except Exception as e:
print(f'Could not load episode {str(filename)}: {e}')
continue
episodes[str(filename)] = episode
return episodes
def main():
# Include argument parser
parser = argparse.ArgumentParser(description='Convert npz files to hdf5.')
parser.add_argument('--input_dir', type=str, required=True,
help='Path to input files')
parser.add_argument('--output_dir', type=str, required=True,
help='Path to output files')
args = parser.parse_args()
step_type = np.ones(501)
step_type[0] = 0
step_type[500] = 2
output = {}
episodes = load_episodes(pathlib.Path(args.input_dir))
episodes = list(episodes.values())
actions = [e['action'] for e in episodes]
discounts = [e['discount'] for e in episodes]
observations = []
for e in episodes:
resized_images = np.empty((501, 84, 84, 3), dtype=e['image'].dtype)
for (k, i) in enumerate(e['image']):
resized_images[k] = cv2.resize(i, dsize=(84, 84), interpolation=cv2.INTER_CUBIC)
observations.append(resized_images.transpose(0, 3, 1, 2))
rewards = [e['reward'] for e in episodes]
step_types = [step_type for _ in episodes]
output['action'] = np.concatenate(actions)
output['discount'] = np.concatenate(discounts)
output['observation'] = np.concatenate(observations)
output['reward'] = np.concatenate(rewards)
output['step_type'] = np.concatenate(step_types)
out_dir = pathlib.Path(args.output_dir)
out_dir.mkdir(parents=True, exist_ok=True)
with h5py.File(out_dir / 'data.hdf5', 'w') as shard_file:
for k, v in output.items():
shard_file.create_dataset(k, data=v, compression='gzip')
if __name__ == '__main__':
main()
| conglu1997/v-d4rl | conversion_scripts/npz_to_hdf5.py | npz_to_hdf5.py | py | 2,833 | python | en | code | 64 | github-code | 6 | [
{
"api_name": "numpy.load",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"lin... |
17371120296 | import json
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase, APIClient
from django.contrib.auth import get_user_model
from rest_framework_simplejwt.tokens import RefreshToken
User = get_user_model()
AUTHOR = 'author'
EXECUTOR = 'executor'
AUTHOR_EMAIL = 'author@gmail.com'
NEW_AUTHOR_EMAIL = '123@gmail.com'
EXECUTOR_EMAIL = 'executor@gmail.com'
AUTHOR_ROLE = 'author'
EXECUTOR_ROLE = 'executor'
START_BALANCE = 500
NEW_BALANCE = 1
USERS_LIST_URL = reverse('users-list')
RESPOND_NEW_DATA = {'author': 2, 'task': 1}
class TaskModelTest(APITestCase):
def setUp(self):
self.admin = User.objects.create_superuser(
username='admin',
email='admin@gmail.com',
balance=START_BALANCE,
freeze_balance=START_BALANCE,
is_staff=True,
is_superuser=True)
self.author = User.objects.create_user(
username=AUTHOR,
email=AUTHOR_EMAIL,
balance=START_BALANCE,
freeze_balance=START_BALANCE,
role=AUTHOR_ROLE)
self.executor = User.objects.create_user(
username=EXECUTOR,
email=EXECUTOR_EMAIL,
balance=START_BALANCE,
freeze_balance=START_BALANCE,
role=EXECUTOR_ROLE)
self.ADMIN_DETAIL_URL = reverse('users-detail', args=[self.admin.id])
self.AUTHOR_ADD_BALANCE_URL = reverse('users-balance')
self.USER_CHANGE_DATA_URL = reverse('users-me')
self.AUTHOR_DETAIL_URL = reverse('users-detail', args=[self.author.id])
self.EXECUTOR_DETAIL_URL = reverse('users-detail', args=[self.executor.id])
self.executor_client = APIClient()
self.executor_client.force_authenticate(user=self.executor)
self.admin_client = APIClient()
self.admin_client.force_authenticate(user=self.admin)
self.token = RefreshToken.for_user(self.author)
self.author_token = self.token.access_token
self.auth_client = APIClient()
self.auth_client.credentials(HTTP_AUTHORIZATION=f'Bearer {self.author_token}')
def test_admin_get_users_list(self):
response = self.admin_client.get(USERS_LIST_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data['results']), 3)
def test_not_admin_cant_get_users_list(self):
response = self.auth_client.get(USERS_LIST_URL)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_admin_can_update_user_data(self):
balance_before = self.author.balance
response = self.admin_client.patch(
self.AUTHOR_DETAIL_URL,
data=json.dumps({'balance': NEW_BALANCE}),
content_type='application/json')
self.author.refresh_from_db()
balance_after = self.author.balance
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(balance_before, START_BALANCE)
self.assertEqual(balance_after, NEW_BALANCE)
def test_balance_url_add_money_to_balance(self):
balance_before = self.author.balance
response = self.auth_client.patch(
self.AUTHOR_ADD_BALANCE_URL,
data=json.dumps({'balance': NEW_BALANCE}),
content_type='application/json')
self.author.refresh_from_db()
balance_after = self.author.balance
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(balance_before, balance_after-NEW_BALANCE)
def test_not_admin_cant_update_user_data(self):
email_before = self.author.email
response = self.auth_client.patch(
self.AUTHOR_DETAIL_URL,
data=json.dumps({'email': NEW_AUTHOR_EMAIL}),
content_type='application/json')
self.author.refresh_from_db()
email_after = self.author.email
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(email_before, email_after)
def test_user_can_update_own_data(self):
email_before = self.author.email
response = self.auth_client.patch(
self.USER_CHANGE_DATA_URL,
data=json.dumps({'email': NEW_AUTHOR_EMAIL}),
content_type='application/json')
self.author.refresh_from_db()
email_after = self.author.email
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(email_before, AUTHOR_EMAIL)
self.assertEqual(email_after, NEW_AUTHOR_EMAIL)
| vavsar/freelance_t | tests/users/test_views.py | test_views.py | py | 4,583 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "rest_framework.test.APITestCase",
"line_number": 25,
"usage_type": "name"
},
{
... |
42510539573 | import os
from cffi import FFI
from OpenSSL.SSL import Context as SSLContext, _ffi, _lib as lib
from utils import OutputGrabber
ffi = FFI()
NULL = ffi.NULL
ffi.cdef(
"int SSL_CTX_set_client_cert_engine(void *ctx, void *e);"
"int ENGINE_set_default(void *e, unsigned int flags);"
)
libcrypto = ffi.dlopen("libcrypto-1_1.dll")
libssl = ffi.dlopen("libssl-1_1.dll")
class ENGINE_DEFAULT:
ALL = 0xFFFF
class CAPI_LIST_DISP_FMT:
SUMMARY = 1
FRIENDLY_NAME = 2
FULL = 4
PEM = 8
XXX = 16
PRIV_KEY_INFO = 32
class SSLEngine(object):
def __init__(self, id: str | FFI.CData) -> None:
if isinstance(id, str):
try:
eng = SSLEngine.load_by_id(id)
except Exception:
eng = SSLEngine.load_dynamic(id)
ptr = eng.ptr
elif isinstance(id, SSLEngine):
ptr = id.ptr
else:
ptr = id
self.ptr = ptr
def init(self):
if not lib.ENGINE_init(self.ptr):
self.__exit__()
raise Exception("Could not initialize engine")
def free(self):
lib.ENGINE_free(self.ptr)
def __enter__(self):
self.init()
return self
def __exit__(self, type, value, traceback):
self.free()
def set_default(self, flags: int = ENGINE_DEFAULT.ALL):
if not libcrypto.ENGINE_set_default(self.ptr, flags):
self.free()
raise Exception(
"Not able to set engine as default for all flags:%s" % flags
)
def ctrl_cmd_string(
self,
cmd: str,
value: str | None = None,
optional: bool = False,
capture: bool = False,
) -> None | bytes:
io: None | OutputGrabber = None
if capture:
io = OutputGrabber(threaded=True)
io.start()
if not lib.ENGINE_ctrl_cmd_string(
self.ptr,
cmd.encode("utf-8"),
NULL if value == None else value.encode("utf-8"),
1 if optional else 0,
):
if capture:
io.stop()
raise Exception(
"Error with engine string control command: %s%s"
% (cmd, "" if value == None else ":" + value)
)
if capture:
io.stop()
return io.captured
def load_by_id(id: str):
if not id:
raise ValueError("Id value must be provided")
lib.ENGINE_load_builtin_engines()
ptr = lib.ENGINE_by_id(id.encode())
if ptr == NULL:
raise ValueError("Could not load the {0} engine by id".format(id))
return SSLEngine(ptr)
def load_dynamic(
id: str,
path: str = None,
search_path: str = None,
check_version: bool = True,
):
if not id:
raise ValueError("Id value must be provided")
dyn = SSLEngine.load_by_id("dynamic")
dyn.ctrl_cmd_string("ID", id)
if path:
dyn.ctrl_cmd_string("SO_PATH", path)
dyn.ctrl_cmd_string("LIST_ADD", "1")
if not check_version:
dyn.ctrl_cmd_string("NO_VCHECK", "1")
if search_path == None and path == None and "OPENSSL_ENGINES" in os.environ:
search_path = os.environ ["OPENSSL_ENGINES"]
if search_path:
dyn.ctrl_cmd_string("DIR_LOAD", "2")
dyn.ctrl_cmd_string("DIR_ADD", search_path)
dyn.ctrl_cmd_string("LOAD")
return dyn
class CAPIEngine(SSLEngine):
def __init__(self, src: FFI.CData | str | SSLEngine | None = None) -> None:
super().__init__("capi" if src == None else src)
def set_store(self, name: str):
self.ctrl_cmd_string("store_name", name)
def list_certs(
self, store: str | None = None, format: int | None = None
) -> list[bytes]:
if format:
self.ctrl_cmd_string("list_options", str(format))
if store:
self.set_store(store)
return [
cert.split(sep=b"\n", maxsplit=1)[1]
for cert in self.ctrl_cmd_string("list_certs", capture=True)
.strip(b"\n")
.split(b"\nCertificate ")
]
def set_client_cert_engine(self: SSLContext, engine: FFI.CData | SSLEngine):
if not libssl.SSL_CTX_set_client_cert_engine(
self._context, engine.ptr if isinstance(engine, SSLEngine) else engine
):
raise Exception("Was not able to set client cert engine")
SSLContext.set_client_cert_engine = set_client_cert_engine
| jose-pr/openssl-engines | src/openssl_engines.py | openssl_engines.py | py | 4,561 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cffi.FFI",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cffi.FFI.CData",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "cffi.FFI",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "OpenSSL.SSL._lib.ENGINE_init",
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.