blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
4
721
content_id
stringlengths
40
40
detected_licenses
listlengths
0
57
license_type
stringclasses
2 values
repo_name
stringlengths
5
91
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
321 values
visit_date
timestamp[ns]date
2016-08-12 09:31:09
2023-09-06 10:45:07
revision_date
timestamp[ns]date
2010-09-28 14:01:40
2023-09-06 06:22:19
committer_date
timestamp[ns]date
2010-09-28 14:01:40
2023-09-06 06:22:19
github_id
int64
426
681M
star_events_count
int64
101
243k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
23 values
gha_event_created_at
timestamp[ns]date
2012-06-28 18:51:49
2023-09-14 21:59:16
gha_created_at
timestamp[ns]date
2008-02-11 22:55:26
2023-08-10 11:14:58
gha_language
stringclasses
147 values
src_encoding
stringclasses
26 values
language
stringclasses
2 values
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
6
10.2M
extension
stringclasses
115 values
filename
stringlengths
3
113
content
stringlengths
6
10.2M
fa35a5e7b561ff09f97de9e703eacc67c66cf844
503bf74961cc3d52236d52439eeb7e8059860b7a
/src/borg/borg6.c
c9d3b188509d12926750fa0d19b95fd305dc67e3
[]
no_license
angband/angband
f00cce82a82b73f8c58a0dc12cbd2f3957dab5ce
e2f4b269f276b9fd7a25cba2b9a49fac84642416
refs/heads/master
2023-08-29T15:58:27.283723
2023-08-25T16:50:33
2023-08-28T21:40:44
1,033,705
1,206
386
null
2023-09-13T21:18:28
2010-10-29T01:17:48
C
IBM852
C
false
false
572,587
c
borg6.c
/* File: borg6.c */ /* Purpose: Medium level stuff for the Borg -BEN- */ #include "../angband.h" #ifdef ALLOW_BORG #include "../cave.h" #include "../game-input.h" #include "../mon-spell.h" #include "../obj-knowledge.h" #include "../obj-slays.h" #include "../player-timed.h" #include "../project.h" #include "../trap.h" #include "borg1.h" #include "borg2.h" #include "borg3.h" #include "borg4.h" #include "borg5.h" #include "borg6.h" #include "borg7.h" extern const int adj_str_hold[STAT_RANGE]; extern const int adj_str_blow[STAT_RANGE]; static bool borg_desperate = false; static int borg_thrust_damage_one(int i); /* * This file is responsible for the low level dungeon goals. * * This includes calculating the danger from monsters, determining * how and when to attack monsters, and calculating "flow" paths * from place to place for various reasons. * * Notes: * We assume that invisible/offscreen monsters are dangerous * We consider physical attacks, missile attacks, spell attacks, * wand attacks, etc, as variations on a single theme. * We take account of monster resistances and susceptibilities * We try not to wake up sleeping monsters by throwing things * * * Bugs: */ /* * Given a "source" and "target" locations, extract a "direction", * which will move one step from the "source" towards the "target". * * Note that we use "diagonal" motion whenever possible. * * We return "5" if no motion is needed. */ static int borg_extract_dir(int y1, int x1, int y2, int x2) { /* No movement required */ if ((y1 == y2) && (x1 == x2)) return (5); /* South or North */ if (x1 == x2) return ((y1 < y2) ? 2 : 8); /* East or West */ if (y1 == y2) return ((x1 < x2) ? 6 : 4); /* South-east or South-west */ if (y1 < y2) return ((x1 < x2) ? 3 : 1); /* North-east or North-west */ if (y1 > y2) return ((x1 < x2) ? 9 : 7); /* Paranoia */ return (5); } /* * Given a "source" and "target" locations, extract a "direction", * which will move one step from the "source" towards the "target". * * We prefer "non-diagonal" motion, which allows us to save the * "diagonal" moves for avoiding pillars and other obstacles. * * If no "obvious" path is available, we use "borg_extract_dir()". * * We return "5" if no motion is needed. */ static int borg_goto_dir(int y1, int x1, int y2, int x2) { int d, e; int ay = (y2 > y1) ? (y2 - y1) : (y1 - y2); int ax = (x2 > x1) ? (x2 - x1) : (x1 - x2); /* Default direction */ e = borg_extract_dir(y1, x1, y2, x2); /* Adjacent location, use default */ if ((ax <= 1) && (ay <= 1)) return (e); /* Try south/north (primary) */ if (ay > ax) { d = (y1 < y2) ? 2 : 8; if (borg_cave_floor_bold(y1 + ddy[d], x1 + ddx[d])) return (d); } /* Try east/west (primary) */ if (ay < ax) { d = (x1 < x2) ? 6 : 4; if (borg_cave_floor_bold(y1 + ddy[d], x1 + ddx[d])) return (d); } /* Try diagonal */ d = borg_extract_dir(y1, x1, y2, x2); /* Check for walls */ if (borg_cave_floor_bold(y1 + ddy[d], x1 + ddx[d])) return (d); /* Try south/north (secondary) */ if (ay <= ax) { d = (y1 < y2) ? 2 : 8; if (borg_cave_floor_bold(y1 + ddy[d], x1 + ddx[d])) return (d); } /* Try east/west (secondary) */ if (ay >= ax) { d = (x1 < x2) ? 6 : 4; if (borg_cave_floor_bold(y1 + ddy[d], x1 + ddx[d])) return (d); } /* Circle obstacles */ if (!ay) { /* Circle to the south */ d = (x1 < x2) ? 3 : 1; if (borg_cave_floor_bold(y1 + ddy[d], x1 + ddx[d])) return (d); /* Circle to the north */ d = (x1 < x2) ? 9 : 7; if (borg_cave_floor_bold(y1 + ddy[d], x1 + ddx[d])) return (d); } /* Circle obstacles */ if (!ax) { /* Circle to the east */ d = (y1 < y2) ? 3 : 9; if (borg_cave_floor_bold(y1 + ddy[d], x1 + ddx[d])) return (d); /* Circle to the west */ d = (y1 < y2) ? 1 : 7; if (borg_cave_floor_bold(y1 + ddy[d], x1 + ddx[d])) return (d); } /* Oops */ return (e); } /* * Clear the "flow" information * */ static void borg_flow_clear(void) { /* Reset the "cost" fields */ memcpy(borg_data_cost, borg_data_hard, sizeof(borg_data)); /* Wipe costs and danger */ if (borg_danger_wipe) { /* Wipe the "know" flags */ memset(borg_data_know, 0, sizeof(borg_data)); /* Wipe the "icky" flags */ memset(borg_data_icky, 0, sizeof(borg_data)); /* Wipe complete */ borg_danger_wipe = false; } /* Start over */ flow_head = 0; flow_tail = 0; } /* * Spread a "flow" from the "destination" grids outwards * * We fill in the "cost" field of every grid that the player can * "reach" with the number of steps needed to reach that grid, * if the grid is "reachable", and otherwise, with "255", which * is the largest possible value that can be stored in a byte. * * Thus, certain grids which are actually "reachable" but only by * a path which is at least 255 steps in length will thus appear * to be "unreachable", but this is not a major concern. * * We use the "flow" array as a "circular queue", and thus we must * be careful not to allow the "queue" to "overflow". This could * only happen with a large number of distinct destination points, * each several units away from every other destination point, and * in a dungeon with no walls and no dangerous monsters. But this * is technically possible, so we must check for it just in case. * * We do not need a "priority queue" because the cost from grid to * grid is always "one" and we process them in order. If we did * use a priority queue, this function might become unusably slow, * unless we reactivated the "room building" code. * * We handle both "walls" and "danger" by marking every grid which * is "impassible", due to either walls, or danger, as "ICKY", and * marking every grid which has been "checked" as "KNOW", allowing * us to only check the wall/danger status of any grid once. This * provides some important optimization, since many "flows" can be * done before the "ICKY" and "KNOW" flags must be reset. * * Note that the "borg_enqueue_grid()" function should refuse to * enqueue "dangeous" destination grids, but does not need to set * the "KNOW" or "ICKY" flags, since having a "cost" field of zero * means that these grids will never be queued again. In fact, * the "borg_enqueue_grid()" function can be used to enqueue grids * which are "walls", such as "doors" or "rubble". * * This function is extremely expensive, and is a major bottleneck * in the code, due more to internal processing than to the use of * the "borg_danger()" function, especially now that the use of the * "borg_danger()" function has been optimized several times. * * The "optimize" flag allows this function to stop as soon as it * finds any path which reaches the player, since in general we are * looking for paths to destination grids which the player can take, * and we can stop this function as soon as we find any usable path, * since it will always be as short a path as possible. * * We queue the "children" in reverse order, to allow any "diagonal" * neighbors to be processed first, since this may boost efficiency. * * Note that we should recalculate "danger", and reset all "flows" * if we notice that a wall has disappeared, and if one appears, we * must give it a maximal cost, and mark it as "icky", in case it * was currently included in any flow. * * If a "depth" is given, then the flow will only be spread to that * depth, note that the maximum legal value of "depth" is 250. * * "Avoid" flag means the borg will not move onto unknown grids, * nor to Monster grids if borg_desperate or borg_lunal_mode are * set. * * "Sneak" will have the borg avoid grids which are adjacent to a monster. * */ static void borg_flow_spread(int depth, bool optimize, bool avoid, bool tunneling, int stair_idx, bool sneak) { int i; int n, o = 0; int x1, y1; int x, y; int fear = 0; int ii; int yy, xx; bool bad_sneak = false; int origin_y, origin_x; bool twitchy = false; /* Default starting points */ origin_y = c_y; origin_x = c_x; /* Is the borg moving under boosted bravery? */ if (avoidance > borg_skill[BI_CURHP]) twitchy = true; /* Use the closest stair for calculation distance (cost) from the stair to the goal */ if (stair_idx >= 0 && borg_skill[BI_CLEVEL] < 15) { origin_y = track_less.y[stair_idx]; origin_x = track_less.x[stair_idx]; optimize = false; } /* Now process the queue */ while (flow_head != flow_tail) { /* Extract the next entry */ x1 = borg_flow_x[flow_tail]; y1 = borg_flow_y[flow_tail]; /* Circular queue -- dequeue the next entry */ if (++flow_tail == AUTO_FLOW_MAX) flow_tail = 0; /* Cost (one per movement grid) */ n = borg_data_cost->data[y1][x1] + 1; /* New depth */ if (n > o) { /* Optimize (if requested) */ if (optimize && (n > borg_data_cost->data[origin_y][origin_x])) break; /* Limit depth */ if (n > depth) break; /* Save */ o = n; } /* Queue the "children" */ for (i = 0; i < 8; i++) { int old_head; borg_grid* ag; /* reset bad_sneak */ bad_sneak = false; /* Neighbor grid */ x = x1 + ddx_ddd[i]; y = y1 + ddy_ddd[i]; /* only on legal grids */ if (!square_in_bounds_fully(cave, loc(x, y))) continue; /* Skip "reached" grids */ if (borg_data_cost->data[y][x] <= n) continue; /* Access the grid */ ag = &borg_grids[y][x]; if (sneak) { /* Scan the neighbors */ for (ii = 0; ii < 8; ii++) { /* Neighbor grid */ xx = x + ddx_ddd[ii]; yy = y + ddy_ddd[ii]; /* only on legal grids */ if (!square_in_bounds_fully(cave, loc(xx, yy))) continue; /* Make sure no monster is on this grid, which is * adjacent to the grid on which, I am thinking about stepping. */ if (borg_grids[yy][xx].kill) { bad_sneak = true; break; } } } /* The grid I am thinking about is adjacent to a monster */ if (sneak && bad_sneak && !borg_desperate && !twitchy) continue; /* Avoid "wall" grids (not doors) unless tunneling*/ /* HACK depends on FEAT order, kinda evil */ if (!tunneling && (ag->feat >= FEAT_SECRET && ag->feat != FEAT_PASS_RUBBLE && ag->feat != FEAT_LAVA)) continue; /* Avoid "perma-wall" grids */ if (ag->feat == FEAT_PERM) continue; /* Avoid "Lava" grids (for now) */ if (ag->feat == FEAT_LAVA && !borg_skill[BI_IFIRE]) continue; /* Avoid unknown grids (if requested or retreating) * unless twitchy. In which case, expore it */ if ((avoid || borg_desperate) && (ag->feat == FEAT_NONE) && !twitchy) continue; /* Avoid Monsters if Desprerate, lunal */ if ((ag->kill) && (borg_desperate || borg_lunal_mode || borg_munchkin_mode)) continue; /* Avoid Monsters if low level, unless twitchy */ if ((ag->kill) && !twitchy && borg_skill[BI_FOOD] >= 2 && borg_skill[BI_MAXCLEVEL] < 5) continue; /* Avoid shop entry points if I am not heading to that shop */ if (goal_shop >= 0 && feat_is_shop(ag->feat) && (ag->store != goal_shop) && y != c_y && x != c_x) continue; /* Avoid Traps if low level-- unless brave */ if (ag->trap && !ag->glyph && !twitchy) { /* Do not disarm when you could end up dead */ if (borg_skill[BI_CURHP] < 60) continue; /* Do not disarm when clumsy */ /* since traps can be physical or magical, gotta check both */ if (borg_skill[BI_DISP] < 30 && borg_skill[BI_CLEVEL] < 20) continue; if (borg_skill[BI_DISP] < 45 && borg_skill[BI_CLEVEL] < 10) continue; if (borg_skill[BI_DISM] < 30 && borg_skill[BI_CLEVEL] < 20) continue; if (borg_skill[BI_DISM] < 45 && borg_skill[BI_CLEVEL] < 10) continue; /* NOTE: Traps are tough to deal with as a low * level character. If any modifications are made above, * then the same changes must be made to borg_flow_direct() * and borg_flow_interesting() */ } /* Ignore "icky" grids */ if (borg_data_icky->data[y][x]) continue; /* Analyze every grid once */ if (!borg_data_know->data[y][x]) { int p; /* Mark as known */ borg_data_know->data[y][x] = true; if (!borg_desperate && !borg_lunal_mode && !borg_munchkin_mode && !borg_digging) { /* Get the danger */ p = borg_danger(y, x, 1, true, false); /* Increase bravery */ if (borg_skill[BI_MAXCLEVEL] == 50) fear = avoidance * 5 / 10; if (borg_skill[BI_MAXCLEVEL] != 50) fear = avoidance * 3 / 10; if (scaryguy_on_level) fear = avoidance * 2; if (unique_on_level && vault_on_level && borg_skill[BI_MAXCLEVEL] == 50) fear = avoidance * 3; if (scaryguy_on_level && borg_skill[BI_CLEVEL] <= 5) fear = avoidance * 3; if (goal_ignoring) fear = avoidance * 5; if (borg_t - borg_began > 5000) fear = avoidance * 25; if (borg_skill[BI_FOOD] == 0) fear = avoidance * 100; /* Normal in town */ if (borg_skill[BI_CLEVEL] == 0) fear = avoidance * 3 / 10; /* Dangerous grid */ if (p > fear) { /* Mark as icky */ borg_data_icky->data[y][x] = true; /* Ignore this grid */ continue; } } } /* Save the flow cost */ borg_data_cost->data[y][x] = n; /* Enqueue that entry */ borg_flow_x[flow_head] = x; borg_flow_y[flow_head] = y; /* Circular queue -- memorize head */ old_head = flow_head; /* Circular queue -- insert with wrap */ if (++flow_head == AUTO_FLOW_MAX) flow_head = 0; /* Circular queue -- handle overflow (badly) */ if (flow_head == flow_tail) flow_head = old_head; } } /* Forget the flow info */ flow_head = flow_tail = 0; } /* * Enqueue a fresh (legal) starting grid, if it is safe */ static void borg_flow_enqueue_grid(int y, int x) { int old_head; int fear = 0; int p; /* Avoid icky grids */ if (borg_data_icky->data[y][x]) return; /* Unknown */ if (!borg_data_know->data[y][x]) { /* Mark as known */ borg_data_know->data[y][x] = true; /** Mark dangerous grids as icky **/ /* Get the danger */ p = borg_danger(y, x, 1, true, false); /* Increase bravery */ if (borg_skill[BI_MAXCLEVEL] == 50) fear = avoidance * 5 / 10; if (borg_skill[BI_MAXCLEVEL] != 50) fear = avoidance * 3 / 10; if (scaryguy_on_level) fear = avoidance * 2; if (unique_on_level && vault_on_level && borg_skill[BI_MAXCLEVEL] == 50) fear = avoidance * 3; if (scaryguy_on_level && borg_skill[BI_CLEVEL] <= 5) fear = avoidance * 3; if (goal_ignoring) fear = avoidance * 5; if (borg_t - borg_began > 5000) fear = avoidance * 25; if (borg_skill[BI_FOOD] == 0) fear = avoidance * 100; /* Normal in town */ if (borg_skill[BI_CLEVEL] == 0) fear = avoidance * 3 / 10; /* Dangerous grid */ if ((p > fear) && !borg_desperate && !borg_lunal_mode && !borg_munchkin_mode && !borg_digging) { /* Icky */ borg_data_icky->data[y][x] = true; /* Avoid */ return; } } /* Only enqueue a grid once */ if (!borg_data_cost->data[y][x]) return; /* Save the flow cost (zero) */ borg_data_cost->data[y][x] = 0; /* Enqueue that entry */ borg_flow_y[flow_head] = y; borg_flow_x[flow_head] = x; /* Circular queue -- memorize head */ old_head = flow_head; /* Circular queue -- insert with wrap */ if (++flow_head == AUTO_FLOW_MAX) flow_head = 0; /* Circular queue -- handle overflow */ if (flow_head == flow_tail) flow_head = old_head; } /* Do a Stair-Flow. Look at how far away this grid is to my closest stair */ static int borg_flow_cost_stair(int y, int x, int b_stair) { int cost = 255; /* Clear the flow codes */ borg_flow_clear(); /* Paranoid */ if (b_stair == -1) return (0); /* Enqueue the player's grid */ borg_flow_enqueue_grid(track_less.y[b_stair], track_less.x[b_stair]); /* Spread, but do NOT optimize */ borg_flow_spread(250, false, false, false, b_stair, false); /* Distance from the grid to the stair */ cost = borg_data_cost->data[y][x]; return (cost); } /* * Do a "reverse" flow from the player outwards */ static void borg_flow_reverse(int depth, bool optimize, bool avoid, bool tunneling, int stair_idx, bool sneak) { /* Clear the flow codes */ borg_flow_clear(); /* Enqueue the player's grid */ borg_flow_enqueue_grid(c_y, c_x); /* Spread, but do NOT optimize */ borg_flow_spread(depth, optimize, avoid, tunneling, stair_idx, sneak); } /* * Attempt to induce WORD_OF_RECALL * artifact activations added throughout this code */ bool borg_recall(void) { /* Multiple "recall" fails */ if (!goal_recalling) { /* Try to "recall" */ if (borg_zap_rod(sv_rod_recall) || borg_activate_item(act_recall) || borg_spell_fail(WORD_OF_RECALL, 60) || borg_read_scroll(sv_scroll_word_of_recall)) { /* Do reset depth at certain times. */ if (borg_skill[BI_CDEPTH] < borg_skill[BI_MAXDEPTH] && ((borg_skill[BI_MAXDEPTH] >= 60 && borg_skill[BI_CDEPTH] >= 40) || (borg_skill[BI_CLEVEL] < 48 && borg_skill[BI_CDEPTH] >= borg_skill[BI_MAXDEPTH] - 3) || (borg_skill[BI_CLEVEL] < 48 && borg_skill[BI_CDEPTH] >= 15 && borg_skill[BI_MAXDEPTH] - borg_skill[BI_CDEPTH] > 10))) { /* Special check on deep levels */ if (borg_skill[BI_CDEPTH] >= 80 && borg_skill[BI_CDEPTH] < 100 && /* Deep */ borg_race_death[546] != 0) /* Sauron is Dead */ { /* Do reset Depth */ borg_note("# Resetting recall depth."); borg_keypress('y'); } else if (goal_fleeing_munchkin == true) { /* Do not reset Depth */ borg_note("# Resetting recall depth during munchkin mode."); borg_keypress('y'); } else if (borg_skill[BI_CDEPTH] >= 100 && !borg_skill[BI_KING]) { /* Do reset Depth */ borg_note("# Not Resetting recall depth."); borg_keypress('n'); } else { /* Do reset Depth */ borg_note("# Resetting recall depth."); borg_keypress('y'); } } /* reset recall depth in dungeon? */ else if (borg_skill[BI_CDEPTH] < borg_skill[BI_MAXDEPTH] && borg_skill[BI_CDEPTH] != 0) { /* Do not reset Depth */ borg_note("# Not resetting recall depth."); borg_keypress('n'); } borg_keypress(ESCAPE); /* Success */ return (true); } } /* Nothing */ return (false); } /* * Prevent starvation by any means possible */ static bool borg_eat_food_any(void) { int i; /* Scan the inventory for "normal" food */ for (i = 0; i < z_info->pack_size; i++) { borg_item* item = &borg_items[i]; /* Skip empty items */ if (!item->iqty) continue; /* Skip unknown food */ if (!item->kind) continue; /* Skip non-food */ if (item->tval != TV_FOOD) continue; /* Eat something of that type */ if (borg_eat_food(item->tval, item->sval)) return (true); } /* Scan the inventory for "okay" food */ for (i = 0; i < z_info->pack_size; i++) { borg_item* item = &borg_items[i]; /* Skip empty items */ if (!item->iqty) continue; /* Skip unknown food */ if (!item->kind) continue; /* Skip non-food */ if (item->tval != TV_FOOD && item->tval != TV_MUSHROOM) continue; /* Skip non-food */ if (!borg_obj_has_effect(item->kind, EF_NOURISH, 0) && !borg_obj_has_effect(item->kind, EF_NOURISH, 2) && !borg_obj_has_effect(item->kind, EF_NOURISH, 3)) continue; /* Eat something of that type */ if (borg_eat_food(item->tval, item->sval)) return (true); } /* * Try potions that can provide nutrition. First try ones that are * pure nutrition without additional effects. */ if (borg_quaff_potion(sv_potion_slime_mold)) return (true); /* * Then try those that, besides the nourishment, only have negative * effects. But only try if there's protection against the negative effect. */ if (((borg_skill[BI_FRACT]) && (borg_quaff_potion(sv_potion_sleep) || borg_quaff_potion(sv_potion_slowness))) || ((borg_skill[BI_RBLIND]) && (borg_quaff_potion(sv_potion_blindness))) || ((borg_skill[BI_RCONF]) && (borg_quaff_potion(sv_potion_confusion)))) { return (true); } /* Consume in order, when hurting */ if ((borg_skill[BI_CURHP] < 4 || (borg_skill[BI_CURHP] <= borg_skill[BI_MAXHP])) && (borg_quaff_potion(sv_potion_cure_light) || borg_quaff_potion(sv_potion_cure_serious) || borg_quaff_potion(sv_potion_cure_critical) || borg_quaff_potion(sv_potion_healing))) { return (true); } /* Nothing */ return (false); } /* * Hack -- evaluate the likelihood of the borg getting surrounded * by a bunch of monsters. This is called from borg_danger() when * he looking for a strategic retreat. It is hopeful that the borg * will see that several monsters are approaching him and he may * become surrouned then die. This routine looks at near-by monsters * and determines the likelihood of him getting surrouned. */ static bool borg_surrounded(void) { borg_kill* kill; struct monster_race* r_ptr; int safe_grids = 8; int non_safe_grids = 0; int monsters = 0; int adjacent_monsters = 0; int x9, y9, ax, ay, d; int i; /* Evaluate the local monsters */ for (i = 1; i < borg_kills_nxt; i++) { kill = &borg_kills[i]; r_ptr = &r_info[kill->r_idx]; /* Skip dead monsters */ if (!kill->r_idx) continue; x9 = kill->x; y9 = kill->y; /* Distance components */ ax = (x9 > c_x) ? (x9 - c_x) : (c_x - x9); ay = (y9 > c_y) ? (y9 - c_y) : (c_y - y9); /* Distance */ d = MAX(ax, ay); /* if the monster is too far then skip it. */ if (d > 3) continue; /* if he cant see me then forget it.*/ if (!borg_los(c_y, c_x, y9, x9)) continue; /* if asleep, don't consider this one */ if (!kill->awake) continue; /* Monsters with Pass Wall are dangerous, no escape from them */ if (rf_has(r_ptr->flags, RF_PASS_WALL)) continue; if (rf_has(r_ptr->flags, RF_KILL_WALL)) continue; /* Cant really run away from Breeders very well */ /* if (rf_has(r_ptr->flags, RF_MULTIPLY)) continue; */ /* Monsters who never move cant surround */ /* if (rf_has(r_ptr->flags, RF_NEVER_MOVE)) continue; */ /* keep track of monsters touching me */ if (d == 1) adjacent_monsters++; /* Add them up. */ monsters++; } /* Evaluate the Non Safe Grids, (walls, closed doors, traps, monsters) */ for (i = 0; i < 8; i++) { int x = c_x + ddx_ddd[i]; int y = c_y + ddy_ddd[i]; /* Access the grid */ borg_grid* ag = &borg_grids[y][x]; /* Bound check */ if (!square_in_bounds_fully(cave, loc(x, y))) continue; /* Skip walls/doors */ if (!borg_cave_floor_grid(ag)) non_safe_grids++; /* Skip unknown grids */ else if (ag->feat == FEAT_NONE) non_safe_grids++; /* Skip monster grids */ else if (ag->kill) non_safe_grids++; /* Mega-Hack -- skip stores XXX XXX XXX */ else if (feat_is_shop(ag->feat)) non_safe_grids++; /* Mega-Hack -- skip traps XXX XXX XXX */ if (ag->trap && !ag->glyph) non_safe_grids++; } /* Safe grids are decreased */ safe_grids = safe_grids - non_safe_grids; /* Am I in hallway? If so don't worry about it */ if (safe_grids == 1 && adjacent_monsters == 1) return (false); /* I am likely to get surrouned */ if (monsters > safe_grids) { borg_note(format("# Possibility of being surrounded (monsters/safegrids)(%d/%d)", monsters, safe_grids)); /* The borg can get trapped by continuing to flee * into a dead-end. So he needs to be able to trump this * routine. */ if (goal_ignoring) { /* borg_note("# Ignoring the fact that I am surrounded."); * return (false); */ } else return (true); } /* Probably will not be surrouned */ return (false); } /* * Mega-Hack -- evaluate the "freedom" of the given location * * The theory is that often, two grids will have equal "danger", * but one will be "safer" than the other, perhaps because it * is closer to stairs, or because it is in a corridor, or has * some other characteristic that makes it "safer". * * Then, if the Borg is in danger, say, from a normal speed monster * which is standing next to him, he will know that walking away from * the monster is "pointless", because the monster will follow him, * but if the resulting location is "safer" for some reason, then * he will consider it. This will allow him to flee towards stairs * in the town, and perhaps towards corridors in the dungeon. * * This method is used in town to chase the stairs. * * XXX XXX XXX We should attempt to walk "around" buildings. */ static int borg_freedom(int y, int x) { int d, f = 0; /* Hack -- chase down stairs in town */ if (!borg_skill[BI_CDEPTH] && track_more.num) { /* Love the stairs! */ d = double_distance(y, x, track_more.y[0], track_more.x[0]); /* Proximity is good */ f += (1000 - d); /* Close proximity is great */ if (d < 4) f += (2000 - (d * 500)); } /* Hack -- chase Up Stairs in dungeon */ if (borg_skill[BI_CDEPTH] && track_less.num) { /* Love the stairs! */ d = double_distance(y, x, track_less.y[0], track_less.x[0]); /* Proximity is good */ f += (1000 - d); /* Close proximity is great */ if (d < 4) f += (2000 - (d * 500)); } /* Freedom */ return (f); } /* * Check a floor grid for "happy" status * * These grids are floor grids which contain stairs, or which * are non-corners in corridors, or which are directly adjacent * to pillars, or grids which we have stepped on before. * Stairs are good because they can be used to leave * the level. Corridors are good because you can back into them * to avoid groups of monsters and because they can be used for * escaping. Pillars are good because while standing next to a * pillar, you can walk "around" it in two different directions, * allowing you to retreat from a single normal monster forever. * Stepped on grids are good because they likely stem from an area * which has been cleared of monsters. */ static bool borg_happy_grid_bold(int y, int x) { int i; borg_grid* ag = &borg_grids[y][x]; /* Bounds Check */ if (y >= DUNGEON_HGT - 2 || y <= 2 || x >= DUNGEON_WID - 2 || x <= 2) return (false); /* Accept stairs */ if (ag->feat == FEAT_LESS) return (true); if (ag->feat == FEAT_MORE) return (true); if (ag->glyph) return (true); if (ag->feat == FEAT_LAVA && !borg_skill[BI_IFIRE]) return (false); /* Hack -- weak/dark is very unhappy */ if (borg_skill[BI_ISWEAK] || borg_skill[BI_CURLITE] == 0) return (false); /* Apply a control effect so that he does not get stuck in a loop */ if ((borg_t - borg_began) >= 2000) return (false); /* Case 1a: north-south corridor */ if (borg_cave_floor_bold(y - 1, x) && borg_cave_floor_bold(y + 1, x) && !borg_cave_floor_bold(y, x - 1) && !borg_cave_floor_bold(y, x + 1) && !borg_cave_floor_bold(y + 1, x - 1) && !borg_cave_floor_bold(y + 1, x + 1) && !borg_cave_floor_bold(y - 1, x - 1) && !borg_cave_floor_bold(y - 1, x + 1)) { /* Happy */ return (true); } /* Case 1b: east-west corridor */ if (borg_cave_floor_bold(y, x - 1) && borg_cave_floor_bold(y, x + 1) && !borg_cave_floor_bold(y - 1, x) && !borg_cave_floor_bold(y + 1, x) && !borg_cave_floor_bold(y + 1, x - 1) && !borg_cave_floor_bold(y + 1, x + 1) && !borg_cave_floor_bold(y - 1, x - 1) && !borg_cave_floor_bold(y - 1, x + 1)) { /* Happy */ return (true); } /* Case 1aa: north-south doorway */ if (borg_cave_floor_bold(y - 1, x) && borg_cave_floor_bold(y + 1, x) && !borg_cave_floor_bold(y, x - 1) && !borg_cave_floor_bold(y, x + 1)) { /* Happy */ return (true); } /* Case 1ba: east-west doorway */ if (borg_cave_floor_bold(y, x - 1) && borg_cave_floor_bold(y, x + 1) && !borg_cave_floor_bold(y - 1, x) && !borg_cave_floor_bold(y + 1, x)) { /* Happy */ return (true); } /* Case 2a: north pillar */ if (!borg_cave_floor_bold(y - 1, x) && borg_cave_floor_bold(y - 1, x - 1) && borg_cave_floor_bold(y - 1, x + 1) && borg_cave_floor_bold(y - 2, x)) { /* Happy */ return (true); } /* Case 2b: south pillar */ if (!borg_cave_floor_bold(y + 1, x) && borg_cave_floor_bold(y + 1, x - 1) && borg_cave_floor_bold(y + 1, x + 1) && borg_cave_floor_bold(y + 2, x)) { /* Happy */ return (true); } /* Case 2c: east pillar */ if (!borg_cave_floor_bold(y, x + 1) && borg_cave_floor_bold(y - 1, x + 1) && borg_cave_floor_bold(y + 1, x + 1) && borg_cave_floor_bold(y, x + 2)) { /* Happy */ return (true); } /* Case 2d: west pillar */ if (!borg_cave_floor_bold(y, x - 1) && borg_cave_floor_bold(y - 1, x - 1) && borg_cave_floor_bold(y + 1, x - 1) && borg_cave_floor_bold(y, x - 2)) { /* Happy */ return (true); } /* check for grids that have been stepped on before */ for (i = 0; i < track_step.num; i++) { /* Enqueue the grid */ if ((track_step.y[i] == y) && (track_step.x[i] == x)) { /* Recent step is good */ if (i < 25) { return (true); } } } /* Not happy */ return (false); } /* This will look down a hallway and possibly light it up using * the Light Beam mage spell. This spell is mostly used when * the borg is moving through the dungeon under boosted bravery. * This will allow him to "see" if anyone is there. * * It might also come in handy if he's in a hallway and gets shot, or * if resting in a hallway. He may want to cast it to make * sure no previously unknown monsters are in the hall. * NOTE: ESP will alter the value of this spell. * * Borg has a problem when not on map centering mode and casting the beam * repeatedly, down or up when at the edge of a panel. */ bool borg_LIGHT_beam(bool simulation) { int dir = 5; bool spell_ok = false; int i; bool blocked = false; borg_grid* ag = &borg_grids[c_y][c_x]; /* Hack -- weak/dark is very unhappy */ if (borg_skill[BI_ISWEAK]) return (false); /* Require the abilitdy */ if (borg_spell_okay_fail(SPEAR_OF_LIGHT, 20) || (-1 != borg_slot(TV_WAND, sv_wand_light) && borg_items[borg_slot(TV_WAND, sv_wand_light)].pval) || borg_equips_rod(sv_rod_light)) spell_ok = true; /*** North Direction Test***/ /* Quick Boundary check */ if (c_y - borg_skill[BI_CURLITE] - 1 > 0) { /* Look just beyond my light */ ag = &borg_grids[c_y - borg_skill[BI_CURLITE] - 1][c_x]; /* Must be on the panel */ if (panel_contains(c_y - borg_skill[BI_CURLITE] - 1, c_x)) { /* Check each grid in our light radius along the course */ for (i = 0; i <= borg_skill[BI_CURLITE]; i++) { if (borg_cave_floor_bold(c_y - i, c_x) && !borg_cave_floor_bold(c_y - borg_skill[BI_CURLITE] - 1, c_x) && ag->feat < FEAT_OPEN && blocked == false) { /* note the direction */ dir = 8; } else { dir = 5; blocked = true; } } } } /*** South Direction Test***/ /* Quick Boundary check */ if (c_y + borg_skill[BI_CURLITE] + 1 < AUTO_MAX_Y && dir == 5) { /* Look just beyond my light */ ag = &borg_grids[c_y + borg_skill[BI_CURLITE] + 1][c_x]; /* Must be on the panel */ if (panel_contains(c_y + borg_skill[BI_CURLITE] + 1, c_x)) { /* Check each grid in our light radius along the course */ for (i = 0; i <= borg_skill[BI_CURLITE]; i++) { if (borg_cave_floor_bold(c_y + i, c_x) && /* all floors */ !borg_cave_floor_bold(c_y + borg_skill[BI_CURLITE] + 1, c_x) && ag->feat < FEAT_OPEN && blocked == false) { /* note the direction */ dir = 2; } else { dir = 5; blocked = true; } } } } /*** East Direction Test***/ /* Quick Boundary check */ if (c_x + borg_skill[BI_CURLITE] + 1 < AUTO_MAX_X && dir == 5) { /* Look just beyond my light */ ag = &borg_grids[c_y][c_x + borg_skill[BI_CURLITE] + 1]; /* Must be on the panel */ if (panel_contains(c_y, c_x + borg_skill[BI_CURLITE] + 1)) { /* Check each grid in our light radius along the course */ for (i = 0; i <= borg_skill[BI_CURLITE]; i++) { if (borg_cave_floor_bold(c_y, c_x + i) && /* all floors */ !borg_cave_floor_bold(c_y, c_x + borg_skill[BI_CURLITE] + 1) && ag->feat < FEAT_OPEN && blocked == false) { /* note the direction */ dir = 6; } else { dir = 5; blocked = true; } } } } /*** West Direction Test***/ /* Quick Boundary check */ if (c_x - borg_skill[BI_CURLITE] - 1 > 0 && dir == 5) { /* Look just beyond my light */ ag = &borg_grids[c_y][c_x - borg_skill[BI_CURLITE] - 1]; /* Must be on the panel */ if (panel_contains(c_y, c_x - borg_skill[BI_CURLITE] - 1)) { /* Check each grid in our light radius along the course */ for (i = 1; i <= borg_skill[BI_CURLITE]; i++) { /* Verify that there are no blockers in my light radius and * the 1st grid beyond my light is not a floor nor a blocker */ if (borg_cave_floor_bold(c_y, c_x - i) && /* all see through */ !borg_cave_floor_bold(c_y, c_x - borg_skill[BI_CURLITE] - 1) && ag->feat < FEAT_OPEN && blocked == false) { /* note the direction */ dir = 4; } else { dir = 5; blocked = true; } } } } /* Dont do it if on the edge of shifting the panel. */ if (dir == 5 || spell_ok == false || blocked == true || (dir == 2 && (c_y == 18 || c_y == 19 || c_y == 29 || c_y == 30 || c_y == 40 || c_y == 41 || c_y == 51 || c_y == 52)) || (dir == 8 && (c_y == 13 || c_y == 14 || c_y == 24 || c_y == 25 || c_y == 35 || c_y == 36 || c_y == 46 || c_y == 47))) return (false); /* simulation */ if (simulation) return (true); /* cast the light beam */ if (borg_spell_fail(SPEAR_OF_LIGHT, 20) || borg_zap_rod(sv_rod_light) || borg_aim_wand(sv_wand_light)) { /* apply the direction */ borg_keypress(I2D(dir)); borg_note("# Illuminating this hallway"); return(true); } /* cant do it */ return (false); } /* * Scan the monster lists for certain types of monster that we * should be concerned over. * This only works for monsters we know about. If one of the * monsters around is misidentified then it may be a unique * and we wouldn't know. Special consideration is given to Morgoth */ void borg_near_monster_type(int dist) { borg_kill* kill; struct monster_race* r_ptr; int x9, y9, ax, ay, d; int i; int breeder_count = 0; /* reset the borg flags */ borg_fighting_summoner = false; borg_fighting_unique = 0; borg_fighting_evil_unique = false; borg_kills_summoner = -1; /* Scan the monsters */ for (i = 1; i < borg_kills_nxt; i++) { kill = &borg_kills[i]; r_ptr = &r_info[kill->r_idx]; /* Skip dead monsters */ if (!kill->r_idx) continue; /* Count breeders */ if (rf_has(r_ptr->flags, RF_MULTIPLY)) breeder_count++; /*** Scan for Scary Guys ***/ /* Do ScaryGuys now, before distance checks. We are * Looking for scary guys on level, not scary guys * near me */ /* run from certain scaries */ if (borg_skill[BI_CLEVEL] <= 5 && (strstr(r_ptr->name, "Squint"))) scaryguy_on_level = true; /* Mage and priest are extra fearful */ if (borg_skill[BI_CLEVEL] <= 6 && (borg_class == CLASS_MAGE || borg_class == CLASS_PRIEST) && (strstr(r_ptr->name, "Squint"))) scaryguy_on_level = true; /* run from certain dungeon scaries */ if (borg_skill[BI_CLEVEL] <= 5 && (strstr(r_ptr->name, "Grip") || strstr(r_ptr->name, "Fang") || strstr(r_ptr->name, "Small kobold"))) scaryguy_on_level = true; /* run from certain scaries */ if (borg_skill[BI_CLEVEL] <= 8 && (strstr(r_ptr->name, "Novice") || strstr(r_ptr->name, "Kobold") || strstr(r_ptr->name, "Kobold archer") || strstr(r_ptr->name, "Jackal") || strstr(r_ptr->name, "Shrieker") || strstr(r_ptr->name, "Farmer Maggot") || strstr(r_ptr->name, "Filthy street urchin") || strstr(r_ptr->name, "Battle-scarred veteran") || strstr(r_ptr->name, "Mean-looking mercenary"))) scaryguy_on_level = true; if (borg_skill[BI_CLEVEL] <= 15 && (strstr(r_ptr->name, "Bullr") || ((strstr(r_ptr->name, "Giant white mouse") || strstr(r_ptr->name, "White worm mass") || strstr(r_ptr->name, "Green worm mass")) && breeder_count >= borg_skill[BI_CLEVEL]))) scaryguy_on_level = true; if (borg_skill[BI_CLEVEL] <= 20 && (strstr(r_ptr->name, "Cave spider") || strstr(r_ptr->name, "Pink naga") || strstr(r_ptr->name, "Giant pink frog") || strstr(r_ptr->name, "Radiation eye") || (strstr(r_ptr->name, "Yellow worm mass") && breeder_count >= borg_skill[BI_CLEVEL]))) scaryguy_on_level = true; if (borg_skill[BI_CLEVEL] < 45 && (strstr(r_ptr->name, "Gravity") || strstr(r_ptr->name, "Inertia") || strstr(r_ptr->name, "Ancient") || strstr(r_ptr->name, "Beorn") || strstr(r_ptr->name, "Dread") /* Appear in Groups */)) scaryguy_on_level = true; /* Nether breath is bad */ if (!borg_skill[BI_SRNTHR] && (strstr(r_ptr->name, "Azriel") || strstr(r_ptr->name, "Dracolich") || strstr(r_ptr->name, "Dracolisk"))) scaryguy_on_level = true; /* Blindness is really bad */ if ((!borg_skill[BI_SRBLIND]) && ((strstr(r_ptr->name, "Light hound") && !borg_skill[BI_SRLITE]) || (strstr(r_ptr->name, "Dark hound") && !borg_skill[BI_SRDARK]))) scaryguy_on_level = true; /* Chaos and Confusion are really bad */ if ((!borg_skill[BI_SRKAOS] && !borg_skill[BI_SRCONF]) && (strstr(r_ptr->name, "Chaos"))) scaryguy_on_level = true; if (!borg_skill[BI_SRCONF] && (strstr(r_ptr->name, "Pukelman") || strstr(r_ptr->name, "Nightmare"))) scaryguy_on_level = true; /* Poison is really Bad */ if (!borg_skill[BI_RPOIS] && /* Note the RPois not SRPois */ (strstr(r_ptr->name, "Drolem"))) scaryguy_on_level = true; /* Now do distance considerations */ x9 = kill->x; y9 = kill->y; /* Distance components */ ax = (x9 > c_x) ? (x9 - c_x) : (c_x - x9); ay = (y9 > c_y) ? (y9 - c_y) : (c_y - y9); /* Distance */ d = MAX(ax, ay); /* if the guy is too far then skip it unless in town. */ if (d > dist && borg_skill[BI_CDEPTH]) continue; /* Special check here for Searching since we are * already scanning the monster list */ if (borg_needs_searching) { if (d < 7) borg_needs_searching = false; } /*** Scan for Uniques ***/ /* this is a unique. */ if (rf_has(r_ptr->flags, RF_UNIQUE)) { /* Set a flag for use with certain types of spells */ unique_on_level = kill->r_idx; /* return 1 if not Morgy, +10 if it is Morgy or Sauron */ if (rf_has(r_ptr->flags, RF_QUESTOR)) { borg_fighting_unique += 10; } /* regular unique */ borg_fighting_unique++; /* Note that fighting a Questor would result in a 11 value */ if (rf_has(r_ptr->flags, RF_EVIL)) borg_fighting_evil_unique = true; } /*** Scan for Summoners ***/ if ((rsf_has(r_ptr->spell_flags, RSF_S_KIN)) || (rsf_has(r_ptr->spell_flags, RSF_S_HI_DEMON)) || (rsf_has(r_ptr->spell_flags, RSF_S_MONSTER)) || (rsf_has(r_ptr->spell_flags, RSF_S_MONSTERS)) || (rsf_has(r_ptr->spell_flags, RSF_S_ANIMAL)) || (rsf_has(r_ptr->spell_flags, RSF_S_SPIDER)) || (rsf_has(r_ptr->spell_flags, RSF_S_HOUND)) || (rsf_has(r_ptr->spell_flags, RSF_S_HYDRA)) || (rsf_has(r_ptr->spell_flags, RSF_S_AINU)) || (rsf_has(r_ptr->spell_flags, RSF_S_DEMON)) || (rsf_has(r_ptr->spell_flags, RSF_S_UNDEAD)) || (rsf_has(r_ptr->spell_flags, RSF_S_DRAGON)) || (rsf_has(r_ptr->spell_flags, RSF_S_HI_DRAGON)) || (rsf_has(r_ptr->spell_flags, RSF_S_HI_UNDEAD)) || (rsf_has(r_ptr->spell_flags, RSF_S_WRAITH)) || (rsf_has(r_ptr->spell_flags, RSF_S_UNIQUE))) { /* mark the flag */ borg_fighting_summoner = true; /* recheck the distance to see if close * and mark the index for as-corridor */ if (d < 8) { borg_kills_summoner = i; } } } } #ifdef INCLUDE_UNUSED_FUNCTIONS static int borg_damage_and_power(int ag_kill, int x, int y) { borg_kill* kill; int d, p; /* Calculate "average" damage */ d = borg_thrust_damage_one(ag_kill); /* No damage */ if (d <= 0) return 0; /* Obtain the monster */ kill = &borg_kills[ag_kill]; /* Hack -- avoid waking most "hard" sleeping monsters */ if (!kill->awake && (d <= kill->power) && !borg_munchkin_mode) { /* Calculate danger */ p = borg_danger_aux(y, x, 1, ag_kill, true, true); if (p > avoidance * 2) return 0; } /* Hack -- ignore sleeping town monsters */ if (!borg_skill[BI_CDEPTH] && !kill->awake) return 0; /* Calculate "danger" to player */ p = borg_danger_aux(c_y, c_x, 2, ag_kill, true, true); /* Reduce "bonus" of partial kills when higher level */ if (d <= kill->power && borg_skill[BI_MAXCLEVEL] > 15) p = p / 10; /* Add the danger-bonus to the damage */ d += p; return d; } #endif /* * Help determine if PHASE_DOOR seems like a good idea */ bool borg_caution_phase(int emergency, int turns) { int n, k, i, d, x, y, p; int dis = 10; int min = dis / 2; borg_grid* ag = &borg_grids[c_y][c_x]; /* must have the ability */ if (!borg_skill[BI_APHASE]) return (false); /* Simulate 100 attempts */ for (n = k = 0; k < 100; k++) { /* Pick a location */ for (i = 0; i < 100; i++) { /* Pick a (possibly illegal) location */ while (1) { y = rand_spread(c_y, dis); x = rand_spread(c_x, dis); d = borg_distance(c_y, c_x, y, x); if ((d >= min) && (d <= dis)) break; } /* Ignore illegal locations */ if ((y <= 0) || (y >= AUTO_MAX_Y - 1)) continue; if ((x <= 0) || (x >= AUTO_MAX_X - 1)) continue; /* Access */ ag = &borg_grids[y][x]; /* Skip unknown grids */ if (ag->feat == FEAT_NONE) continue; /* Skip walls */ if (!borg_cave_floor_bold(y, x)) continue; /* Skip monsters */ if (ag->kill) continue; /* Stop looking */ break; } /* If low level, unknown squares are scary */ if (ag->feat == FEAT_NONE && borg_skill[BI_MAXHP] < 30) { n++; continue; } /* No location */ /* in the real code it would keep trying but here we should */ /* assume that there is unknown spots that you would be able */ /* to go but may be dangerious. */ if (i >= 100) { n++; continue; } /* Examine */ p = borg_danger(y, x, turns, true, false); /* if *very* scary, do not allow jumps at all */ if (p > borg_skill[BI_CURHP]) n++; } /* Too much danger */ /* in an emergency try with extra danger allowed */ if (n > emergency) { borg_note(format("# No Phase. scary squares: %d", n)); return (false); } else borg_note(format("# Safe to Phase. scary squares: %d", n)); /* Okay */ return (true); } /* * Help determine if PHASE_DOOR with Shoot N Scoot seems like * a good idea. * Good Idea on two levels: * 1. We are the right class, we got some good ranged weapons * 2. The possible landing grids are ok. * Almost a copy of the borg_caution_phase above. * The emergency is the number of dangerous grids out of 100 * that we tolerate. If we have 80, then we accept the risk * of landing on a grid that is 80% likely to be bad. A low * number, like 20, means that we are less like to risk the * phase door and we require more of the possible grids to be * safe. * * The pattern of ShootN'Scoot works like this: * 1. Shoot monster that is far away. * 2. Monsters walks closer and closer each turn * 3. Borg shoots monster each step it takes as it approaches. * 4. Monster gets within 1 grid of the borg. * 5. Borg phases away. * 6. Go back to #1 */ static bool borg_shoot_scoot_safe(int emergency, int turns, int b_p) { int n, k, i, d, x, y, p, u; int dis = 10; int min = dis / 2; bool adjacent_monster = false; borg_grid* ag; borg_kill* kill; struct monster_race* r_ptr; /* no need if high level in town */ if (borg_skill[BI_CLEVEL] >= 8 && borg_skill[BI_CDEPTH] == 0) return (false); /* must have the ability */ if (!borg_skill[BI_APHASE]) return (false); /* Not if No Light */ if (!borg_skill[BI_CURLITE]) return (false); /* Cheat the floor grid */ /* Not if in a vault since it throws us out of the vault */ if (square_isvault(cave, loc(c_x, c_y))) return (false); /*** Need Missiles or cheap spells ***/ /* classes that are mainly spellcaster */ if (player->class->magic.num_books > 3) { /* Low mana */ if (borg_skill[BI_CLEVEL] >= 45 && borg_skill[BI_CURSP] < 15) return (false); /* Low mana, low level, generally OK */ if (borg_skill[BI_CLEVEL] < 45 && borg_skill[BI_CURSP] < 5) return (false); } else /* Other classes need some missiles */ { if (borg_skill[BI_AMISSILES] < 5 || borg_skill[BI_CLEVEL] >= 45) return (false); } /* Not if I am in a safe spot for killing special monsters */ if (borg_morgoth_position || borg_as_position) return (false); /* scan the adjacent grids for an awake monster */ for (i = 0; i < 8; i++) { /* Grid in that direction */ x = c_x + ddx_ddd[i]; y = c_y + ddy_ddd[i]; /* Access the grid */ ag = &borg_grids[y][x]; /* Obtain the monster */ kill = &borg_kills[ag->kill]; r_ptr = &r_info[kill->r_idx]; /* If a qualifying monster is adjacent to me. */ if ((ag->kill && kill->awake) && !(rf_has(r_ptr->flags, RF_NEVER_MOVE)) && !(rf_has(r_ptr->flags, RF_PASS_WALL)) && !(rf_has(r_ptr->flags, RF_KILL_WALL)) && (kill->power >= borg_skill[BI_CLEVEL])) { /* Spell casters shoot at everything */ if (borg_spell_okay(MAGIC_MISSILE)) { adjacent_monster = true; } else if (borg_spell_okay(ORB_OF_DRAINING)) { adjacent_monster = true; } else if (borg_spell_okay(NETHER_BOLT)) { adjacent_monster = true; } /* All other borgs need to make sure he would shoot. * In an effort to conserve missiles, the borg will * not shoot at certain types of monsters. That list * is defined in borg_launch_damage_one(). * * We need this aforementioned list to match the one * following. Otherwise Rogues and Warriors will * burn up Phases as he scoots away but never fire * the missiles. That totally defeats the purpose * of this routine. * * The following criteria are exactly the same as the * list in borg_launch_damage_one() */ else if ((borg_danger_aux(kill->y, kill->x, 1, i, true, false) > avoidance * 3 / 10) || ((r_ptr->friends || r_ptr->friends_base) /* monster has friends*/ && kill->level >= borg_skill[BI_CLEVEL] - 5 /* close levels */) || (kill->ranged_attack /* monster has a ranged attack */) || (rf_has(r_ptr->flags, RF_UNIQUE)) || (rf_has(r_ptr->flags, RF_MULTIPLY)) || (borg_skill[BI_CLEVEL] <= 5 /* stil very weak */)) { adjacent_monster = true; } } } /* if No Adjacent_monster no need for it */ if (adjacent_monster == false) return (false); /* Simulate 100 attempts */ for (n = k = 0; k < 100; k++) { /* Pick a location */ for (i = 0; i < 100; i++) { /* Pick a (possibly illegal) location */ while (1) { y = rand_spread(c_y, dis); x = rand_spread(c_x, dis); d = borg_distance(c_y, c_x, y, x); if ((d >= min) && (d <= dis)) break; } /* Ignore illegal locations */ if ((y <= 0) || (y >= AUTO_MAX_Y - 2)) continue; if ((x <= 0) || (x >= AUTO_MAX_X - 2)) continue; /* Access */ ag = &borg_grids[y][x]; /* Skip unknown grids */ if (ag->feat == FEAT_NONE) continue; /* Skip walls */ if (!borg_cave_floor_bold(y, x)) continue; /* Skip monsters */ if (ag->kill) continue; /* Stop looking. Really, the game would keep * looking for a grid. The borg could check * all the known grids but I dont think that * is not a good idea, especially if the area is * not fully explored. */ break; } /* No location */ /* In the real code it would keep trying but here we should */ /* assume that there is unknown spots that you would be able */ /* to go but we define it as dangerous. */ if (i >= 100) { n++; continue; } /* Examine danger of that grid */ p = borg_danger(y, x, turns, true, false); /* if more scary than my current one, do not allow jumps at all */ if (p > b_p) { n++; continue; } /* Should not land next to a monster either. * Scan the adjacent grids for a monster. * Reuse the adjacent_monster variable. */ for (u = 0; u < 8; u++) { /* Access the grid */ ag = &borg_grids[y + ddy_ddd[u]][x + ddx_ddd[u]]; /* Obtain the monster */ kill = &borg_kills[ag->kill]; /* If monster adjacent to that grid... */ if (ag->kill && kill->awake) n++; } } /* Too much danger */ /* in an emergency try with extra danger allowed */ if (n > emergency) { borg_note(format("# No Shoot'N'Scoot. scary squares: %d/100", n)); return (false); } else borg_note(format("# Safe to Shoot'N'Scoot. scary squares: %d/100", n)); /* Okay */ return (true); } /* * Help determine if "Teleport" seems like a good idea */ static bool borg_caution_teleport(int emergency, int turns) { int n, k, i, d, x, y, p; int dis = 100; int min = dis / 2; int q_x, q_y; borg_grid* ag = &borg_grids[c_y][c_x]; /* Extract panel */ q_x = w_x / borg_panel_wid(); q_y = w_y / borg_panel_hgt(); /* must have the ability */ if (!borg_skill[BI_ATELEPORT] || !borg_skill[BI_AESCAPE]) return (false); /* Simulate 100 attempts */ for (n = k = 0; k < 100; k++) { /* Pick a location */ for (i = 0; i < 100; i++) { /* Pick a (possibly illegal) location */ while (1) { y = rand_spread(c_y, dis); x = rand_spread(c_x, dis); d = borg_distance(c_y, c_x, y, x); if ((d >= min) && (d <= dis)) break; } /* Ignore illegal locations */ if ((y <= 0) || (y >= AUTO_MAX_Y - 1)) continue; if ((x <= 0) || (x >= AUTO_MAX_X - 1)) continue; /* Access */ ag = &borg_grids[y][x]; /* Skip unknown grids if explored, or been on level for a while, otherwise, consider ok*/ if (ag->feat == FEAT_NONE && ((borg_detect_wall[q_y + 0][q_x + 0] == true && borg_detect_wall[q_y + 0][q_x + 1] == true && borg_detect_wall[q_y + 1][q_x + 0] == true && borg_detect_wall[q_y + 1][q_x + 1] == true) || borg_t > 2000)) continue; /* Skip walls */ if (!borg_cave_floor_bold(y, x)) continue; /* Skip monsters */ if (ag->kill) continue; /* Stop looking */ break; } /* If low level, unknown squares are scary */ if (ag->feat == FEAT_NONE && borg_skill[BI_MAXHP] < 30) { n++; continue; } /* No location */ /* in the real code it would keep trying but here we should */ /* assume that there is unknown spots that you would be able */ /* to go but may be dangerious. */ if (i >= 100) { n++; continue; } /* Examine */ p = borg_danger(y, x, turns, true, false); /* if *very* scary, do not allow jumps at all */ if (p > borg_skill[BI_CURHP]) n++; } /* Too much danger */ /* in an emergency try with extra danger allowed */ if (n > emergency) { borg_note(format("# No Teleport. scary squares: %d", n)); return (false); } /* Okay */ return (true); } /* * Hack -- If the borg is standing on a stair and is in some danger, just leave the level. * No need to hang around on that level, try conserving the teleport scrolls */ static bool borg_escape_stair(void) { /* Current grid */ borg_grid* ag = &borg_grids[c_y][c_x]; /* Usable stairs */ if (ag->feat == FEAT_LESS) { /* Take the stairs */ borg_on_dnstairs = true; borg_note("# Escaping level via stairs."); borg_keypress('<'); /* Success */ return (true); } return (false); } bool borg_allow_teleport(void) { /* No teleporting in arena levels */ if (player->upkeep->arena_level) return false; /* Check for a no teleport grid */ if (square_isno_teleport(cave, loc(c_x, c_y))) return false; /* Check for a no teleport curse */ if (borg_skill[BI_CRSNOTEL]) return false; return true; } /* short range teleport + pain */ bool borg_shadow_shift(int allow_fail) { /* disallow if hp too low */ if (borg_skill[BI_CURHP] < 12) return (false); return borg_spell_fail(SHADOW_SHIFT, allow_fail); } bool borg_dimension_door(int allow_fail) { int x_off, y_off; int t_x, t_y; int best_t_x, best_t_y; int d, best_d = 0; struct loc target; /* for now keep the range at under 50, for performance */ int range = 50; /* Require ability (right now) */ if (!borg_spell_okay_fail(DIMENSION_DOOR, allow_fail)) return (0); /* if we are attacking, calculate gains, but if this is just a teleport */ /* the current danger is the starting point */ best_d = borg_fear_region[c_y][c_x]; /* Pick a location */ for (x_off = range * -1; x_off < range; x_off++) { for (y_off = range * -1; y_off < range; y_off++) { t_x = c_x + x_off; t_y = c_y + y_off; if (t_x < 0 || t_y < 0) continue; target = loc(c_x + x_off, c_y + y_off); if (!square_in_bounds_fully(cave, target)) continue; d = borg_danger(t_y, t_x, 2, true, false); if (d < best_d) { best_d = d; best_t_x = t_x; best_t_y = t_y; } } } if (best_d < borg_fear_region[c_y][c_x]) { borg_target(best_t_y, best_t_x); borg_spell(DIMENSION_DOOR); /* pick target */ borg_keypress('5'); return true; } return false; } /* * Try to phase door or teleport * b_q is the danger of the least dangerious square around us. */ static bool borg_escape(int b_q) { int risky_boost = 0; int j; int glyphs = 0; borg_grid* ag; /* only escape with spell if fail is low */ int allow_fail = 25; int sv_mana; /* if very healthy, allow extra fail */ if (((borg_skill[BI_CURHP] * 100) / borg_skill[BI_MAXHP]) > 70) allow_fail = 10; /* comprimised, get out of the fight */ if (borg_skill[BI_ISHEAVYSTUN]) allow_fail = 35; /* for emergencies */ sv_mana = borg_skill[BI_CURSP]; /* Borgs who are bleeding to death or dying of poison may sometimes * phase around the last two hit points right before they enter a * shop. He knows to make a bee-line for the temple but the danger * trips this routine. So we must bypass this routine for some * particular circumstances. */ if (!borg_skill[BI_CDEPTH] && (borg_skill[BI_ISPOISONED] || borg_skill[BI_ISWEAK] || borg_skill[BI_ISCUT])) return (false); /* Borgs who are in a sea of runes or trying to build one * and mostly healthy stay put */ if ((borg_skill[BI_CDEPTH] == 100) && borg_skill[BI_CURHP] >= (borg_skill[BI_MAXHP] * 5 / 10)) { /* In a sea of runes */ if (borg_morgoth_position) return (false); /* Scan neighbors */ for (j = 0; j < 8; j++) { int y = c_y + ddy_ddd[j]; int x = c_x + ddx_ddd[j]; /* Get the grid */ ag = &borg_grids[y][x]; /* Skip unknown grids (important) */ if (ag->glyph) glyphs++; } /* Touching at least 3 glyphs */ if (glyphs >= 3) return (false); } /* Hack -- If the borg is weak (no food, starving) on depth 1 and he has no idea where the stairs * may be, run the risk of diving deeper against the benefit of rising to town. */ if (borg_skill[BI_ISWEAK] && borg_skill[BI_CDEPTH] == 1) { if (borg_read_scroll(sv_scroll_teleport_level)) { borg_note("# Attempting to get to town immediately"); return (true); } } /* Risky borgs are more likely to stay in a fight */ if (borg_cfg[BORG_PLAYS_RISKY]) risky_boost = 3; /* 1. really scary, I'm about to die */ /* Try an emergency teleport, or phase door as last resort */ if (borg_skill[BI_ISHEAVYSTUN] || (b_q > avoidance * (45 + risky_boost) / 10) || ((b_q > avoidance * (40 + risky_boost) / 10) && borg_fighting_unique >= 10 && borg_skill[BI_CDEPTH] == 100 && borg_skill[BI_CURHP] < 600) || ((b_q > avoidance * (30 + risky_boost) / 10) && borg_fighting_unique >= 10 && borg_skill[BI_CDEPTH] == 99 && borg_skill[BI_CURHP] < 600) || ((b_q > avoidance * (25 + risky_boost) / 10) && borg_fighting_unique >= 1 && borg_fighting_unique <= 8 && borg_skill[BI_CDEPTH] >= 95 && borg_skill[BI_CURHP] < 550) || ((b_q > avoidance * (17 + risky_boost) / 10) && borg_fighting_unique >= 1 && borg_fighting_unique <= 8 && borg_skill[BI_CDEPTH] < 95) || ((b_q > avoidance * (15 + risky_boost) / 10) && !borg_fighting_unique)) { int tmp_allow_fail = 15; if (borg_escape_stair() || (borg_allow_teleport() && (borg_dimension_door(tmp_allow_fail - 10) || borg_spell_fail(TELEPORT_SELF, tmp_allow_fail - 10) || borg_spell_fail(PORTAL, tmp_allow_fail - 10) || borg_shadow_shift(tmp_allow_fail - 10) || borg_read_scroll(sv_scroll_teleport) || borg_read_scroll(sv_scroll_teleport_level) || borg_use_staff_fail(sv_staff_teleportation) || borg_activate_item(act_tele_long) || /* revisit spells, increased fail rate */ borg_dimension_door(tmp_allow_fail + 9) || borg_spell_fail(TELEPORT_SELF, tmp_allow_fail + 9) || borg_spell_fail(PORTAL, tmp_allow_fail + 9) || borg_shadow_shift(tmp_allow_fail + 9) || /* revisit teleport, increased fail rate */ borg_use_staff(sv_staff_teleportation) || /* Attempt Teleport Level */ borg_spell_fail(TELEPORT_LEVEL, tmp_allow_fail + 9) || /* try phase at least, with some hedging of the safety of landing zone */ (borg_caution_phase(75, 2) && (borg_read_scroll(sv_scroll_phase_door) || borg_activate_item(act_tele_phase) || borg_spell_fail(PHASE_DOOR, tmp_allow_fail) || borg_spell_fail(PORTAL, tmp_allow_fail)))))) { /* Flee! */ borg_note("# Danger Level 1."); /* Reset timer if borg was in a anti-summon corridor */ if (borg_t - borg_t_antisummon < 50) borg_t_antisummon = 0; return (true); } borg_skill[BI_CURSP] = borg_skill[BI_MAXSP]; /* try to teleport, get far away from here */ if (borg_skill[BI_CDEPTH] && borg_skill[BI_CLEVEL] < 10 && (borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] * 1 / 10) && borg_allow_teleport() && (borg_dimension_door(90) || borg_spell(TELEPORT_SELF) || borg_spell(PORTAL))) { /* verify use of spell */ /* borg_keypress('y'); */ /* Flee! */ borg_note("# Danger Level 1.1 Critical Attempt"); /* Reset timer if borg was in a anti-summon corridor */ if (borg_t - borg_t_antisummon < 50) borg_t_antisummon = 0; return (true); } /* emergency phase activation no concern for safety of landing zone. */ if (borg_skill[BI_CDEPTH] && ((borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] * 1 / 10 || b_q > avoidance * (45 + risky_boost) / 10) && (borg_activate_item(act_tele_phase) || borg_read_scroll(sv_scroll_phase_door)))) { /* Flee! */ borg_escapes--; /* a phase isn't really an escape */ borg_note("# Danger Level 1.2 Critical Phase"); /* Reset timer if borg was in a anti-summon corridor */ if (borg_t - borg_t_antisummon < 50) borg_t_antisummon = 0; return (true); } /* emergency phase spell */ if (borg_skill[BI_CDEPTH] && borg_skill[BI_CLEVEL] < 10 && (borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] * 1 / 10) && ((borg_spell_fail(PHASE_DOOR, 15) || borg_spell(PORTAL)))) { /* verify use of spell */ /* borg_keypress('y'); */ /* Flee! */ borg_note("# Danger Level 1.3 Critical Attempt"); /* Reset timer if borg was in a anti-summon corridor */ if (borg_t - borg_t_antisummon < 50) borg_t_antisummon = 0; return (true); } /* Restore the real mana level */ borg_skill[BI_CURSP] = sv_mana; } /* If fighting a unique and at the end of the game try to stay and * finish the fight. Only bail out in extreme danger as above. */ if (b_q < avoidance * (25 + risky_boost) / 10 && borg_fighting_unique >= 1 && borg_fighting_unique <= 3 && borg_skill[BI_CDEPTH] >= 97) return (false); /* 2 - a bit more scary/ * Attempt to teleport (usually) * do not escape from uniques so quick */ if (borg_skill[BI_ISHEAVYSTUN] || ((b_q > avoidance * (3 + risky_boost) / 10) && borg_class == CLASS_MAGE && borg_skill[BI_CURSP] <= 20 && borg_skill[BI_MAXCLEVEL] >= 45) || ((b_q > avoidance * (13 + risky_boost) / 10) && borg_fighting_unique >= 1 && borg_fighting_unique <= 8 && borg_skill[BI_CDEPTH] != 99) || ((b_q > avoidance * (11 + risky_boost) / 10) && !borg_fighting_unique)) { /* Try teleportation */ if (borg_escape_stair() || (borg_allow_teleport() && (borg_dimension_door(allow_fail - 10) || borg_spell_fail(TELEPORT_SELF, allow_fail - 10) || borg_spell_fail(PORTAL, allow_fail - 10) || borg_shadow_shift(allow_fail - 10) || borg_use_staff_fail(sv_staff_teleportation) || borg_activate_item(act_tele_long) || borg_read_scroll(sv_scroll_teleport) || borg_read_scroll(sv_scroll_teleport_level) || borg_dimension_door(allow_fail) || borg_spell_fail(TELEPORT_SELF, allow_fail) || borg_spell_fail(PORTAL, allow_fail) || borg_shadow_shift(allow_fail) || borg_use_staff(sv_staff_teleportation)))) { /* Flee! */ borg_note("# Danger Level 2.1"); /* Success */ /* Reset timer if borg was in a anti-summon corridor */ if (borg_t - borg_t_antisummon < 50) borg_t_antisummon = 0; return (true); } /* Phase door, if useful */ if (borg_caution_phase(50, 2) && borg_t - borg_t_antisummon > 50 && (borg_spell(PHASE_DOOR) || borg_spell(PORTAL) || borg_read_scroll(sv_scroll_phase_door) || borg_activate_item(act_tele_phase))) { /* Flee! */ borg_note("# Danger Level 2.2"); /* Reset timer if borg was in a anti-summon corridor */ if (borg_t - borg_t_antisummon < 50) borg_t_antisummon = 0; /* Success */ return (true); } } /* 3- not too bad */ /* also run if stunned or it is scary here */ if (borg_skill[BI_ISHEAVYSTUN] || ((b_q > avoidance * (13 + risky_boost) / 10) && borg_fighting_unique >= 2 && borg_fighting_unique <= 8) || ((b_q > avoidance * (10 + risky_boost) / 10) && !borg_fighting_unique) || ((b_q > avoidance * (10 + risky_boost) / 10) && borg_skill[BI_ISAFRAID] && (borg_skill[BI_AMISSILES] <= 0 && borg_class == CLASS_WARRIOR))) { /* Phase door, if useful */ if ((borg_escape_stair() || borg_caution_phase(25, 2)) && borg_t - borg_t_antisummon > 50 && (borg_spell_fail(PHASE_DOOR, allow_fail) || borg_spell_fail(PORTAL, allow_fail) || borg_activate_item(act_tele_phase) || borg_read_scroll(sv_scroll_phase_door))) { /* Flee! */ borg_escapes--; /* a phase isn't really an escape */ borg_note("# Danger Level 3.1"); /* Reset timer if borg was in a anti-summon corridor */ if (borg_t - borg_t_antisummon < 50) borg_t_antisummon = 0; /* Success */ return (true); } /* Teleport via spell */ if (borg_allow_teleport() && (borg_dimension_door(allow_fail) || borg_spell_fail(TELEPORT_SELF, allow_fail) || borg_spell_fail(PORTAL, allow_fail) || borg_shadow_shift(allow_fail) || borg_activate_item(act_tele_long) || borg_use_staff_fail(sv_staff_teleportation) || borg_read_scroll(sv_scroll_teleport) || borg_activate_item(act_tele_phase))) { /* Flee! */ borg_note("# Danger Level 3.2"); /* Reset timer if borg was in a anti-summon corridor */ if (borg_t - borg_t_antisummon < 50) borg_t_antisummon = 0; /* Success */ return (true); } /* Phase door, if useful */ if (borg_caution_phase(75, 2) && borg_t - borg_t_antisummon > 50 && (borg_spell_fail(PHASE_DOOR, allow_fail) || borg_spell_fail(PORTAL, allow_fail) || borg_shadow_shift(allow_fail) || borg_activate_item(act_tele_phase) || borg_read_scroll(sv_scroll_phase_door))) { /* Flee! */ borg_escapes--; /* a phase isn't really an escape */ borg_note("# Danger Level 3.3"); /* Reset timer if borg was in a anti-summon corridor */ if (borg_t - borg_t_antisummon < 50) borg_t_antisummon = 0; /* Success */ return (true); } /* Use Tport Level after the above attempts failed. */ if (borg_read_scroll(sv_scroll_teleport_level)) { /* Flee! */ borg_note("# Danger Level 3.4"); /* Reset timer if borg was in a anti-summon corridor */ if (borg_t - borg_t_antisummon < 50) borg_t_antisummon = 0; /* Success */ return (true); } /* if we got this far we tried to escape but couldn't... */ /* time to flee */ if (!goal_fleeing && (!borg_fighting_unique || borg_skill[BI_CLEVEL] < 35) && !vault_on_level) { /* Note */ borg_note("# Fleeing (failed to teleport)"); /* Start fleeing */ goal_fleeing = true; } /* Flee now */ if (!goal_leaving && (!borg_fighting_unique || borg_skill[BI_CLEVEL] < 35) && !vault_on_level) { /* Flee! */ borg_note("# Leaving (failed to teleport)"); /* Start leaving */ goal_leaving = true; } } /* 4- not too scary but I'm comprimized */ if ((b_q > avoidance * (8 + risky_boost) / 10 && (borg_skill[BI_CLEVEL] < 35 || borg_skill[BI_CURHP] <= borg_skill[BI_MAXHP] / 3)) || ((b_q > avoidance * (9 + risky_boost) / 10) && borg_fighting_unique >= 1 && borg_fighting_unique <= 8 && (borg_skill[BI_CLEVEL] < 35 || borg_skill[BI_CURHP] <= borg_skill[BI_MAXHP] / 3)) || ((b_q > avoidance * (6 + risky_boost) / 10) && borg_skill[BI_CLEVEL] <= 20 && !borg_fighting_unique) || ((b_q > avoidance * (6 + risky_boost) / 10) && borg_skill[BI_CLEVEL] <= 35)) { /* Phase door, if useful */ if ((borg_escape_stair() || borg_caution_phase(20, 2)) && borg_t - borg_t_antisummon > 50 && (borg_spell_fail(PHASE_DOOR, allow_fail) || borg_spell_fail(PORTAL, allow_fail) || borg_activate_item(act_tele_phase) || borg_shadow_shift(allow_fail) || borg_read_scroll(sv_scroll_phase_door))) { /* Flee! */ borg_escapes--; /* a phase isn't really an escape */ borg_note("# Danger Level 4.1"); /* Reset timer if borg was in a anti-summon corridor */ if (borg_t - borg_t_antisummon < 50) borg_t_antisummon = 0; /* Success */ return (true); } /* Teleport via spell */ if (borg_allow_teleport() && (borg_dimension_door(allow_fail) || borg_spell_fail(TELEPORT_SELF, allow_fail) || borg_spell_fail(PORTAL, allow_fail) || borg_activate_item(act_tele_long) || borg_shadow_shift(allow_fail) || borg_read_scroll(sv_scroll_teleport) || borg_use_staff_fail(sv_staff_teleportation))) { /* Flee! */ borg_note("# Danger Level 4.2"); /* Reset timer if borg was in a anti-summon corridor */ if (borg_t - borg_t_antisummon < 50) borg_t_antisummon = 0; /* Success */ return (true); } /* if we got this far we tried to escape but couldn't... */ /* time to flee */ if (!goal_fleeing && !borg_fighting_unique && borg_skill[BI_CLEVEL] < 25 && !vault_on_level) { /* Note */ borg_note("# Fleeing (failed to teleport)"); /* Start fleeing */ goal_fleeing = true; } /* Flee now */ if (!goal_leaving && !borg_fighting_unique && !vault_on_level) { /* Flee! */ borg_note("# Leaving (failed to teleport)"); /* Start leaving */ goal_leaving = true; } /* Emergency Phase door if a weak mage */ if (((borg_class == CLASS_MAGE || borg_class == CLASS_NECROMANCER) && borg_skill[BI_CLEVEL] <= 35) && borg_caution_phase(65, 2) && borg_t - borg_t_antisummon > 50 && (borg_spell_fail(PHASE_DOOR, allow_fail) || borg_activate_item(act_tele_phase) || borg_activate_item(act_tele_long) || borg_read_scroll(sv_scroll_phase_door))) { /* Flee! */ borg_escapes--; /* a phase isn't really an escape */ borg_note("# Danger Level 4.3"); /* Reset timer if borg was in a anti-summon corridor */ if (borg_t - borg_t_antisummon < 50) borg_t_antisummon = 0; /* Success */ return (true); } } /* 5- not too scary but I'm very low level */ if (borg_skill[BI_CLEVEL] < 10 && (b_q > avoidance * (5 + risky_boost) / 10 || (b_q > avoidance * (7 + risky_boost) / 10 && borg_fighting_unique >= 1 && borg_fighting_unique <= 8))) { /* Phase door, if useful */ if ((borg_escape_stair() || borg_caution_phase(20, 2)) && (borg_spell_fail(PHASE_DOOR, allow_fail) || borg_spell_fail(PORTAL, allow_fail) || borg_activate_item(act_tele_phase) || borg_shadow_shift(allow_fail) || borg_read_scroll(sv_scroll_phase_door))) { /* Flee! */ borg_note("# Danger Level 5.1"); /* Reset timer if borg was in a anti-summon corridor */ if (borg_t - borg_t_antisummon < 50) borg_t_antisummon = 0; /* Success */ return (true); } /* Teleport via spell */ if (borg_allow_teleport() && (borg_dimension_door(allow_fail) || borg_spell_fail(TELEPORT_SELF, allow_fail) || borg_spell_fail(PORTAL, allow_fail) || borg_shadow_shift(allow_fail) || borg_activate_item(act_tele_long) || borg_read_scroll(sv_scroll_teleport) || borg_use_staff_fail(sv_staff_teleportation))) { /* Flee! */ borg_note("# Danger Level 5.2"); /* Reset timer if borg was in a anti-summon corridor */ if (borg_t - borg_t_antisummon < 50) borg_t_antisummon = 0; /* Success */ return (true); } /* if we got this far we tried to escape but couldn't... */ /* time to flee */ if (!goal_fleeing && !borg_fighting_unique) { /* Note */ borg_note("# Fleeing (failed to teleport)"); /* Start fleeing */ goal_fleeing = true; } /* Flee now */ if (!goal_leaving && !borg_fighting_unique) { /* Flee! */ borg_note("# Leaving (failed to teleport)"); /* Start leaving */ goal_leaving = true; } /* Emergency Phase door if a weak mage */ if (((borg_class == CLASS_MAGE || borg_class == CLASS_NECROMANCER) && borg_skill[BI_CLEVEL] <= 8) && borg_caution_phase(65, 2) && (borg_spell_fail(PHASE_DOOR, allow_fail) || borg_activate_item(act_tele_phase) || borg_read_scroll(sv_scroll_phase_door) || borg_activate_item(act_tele_long))) { /* Flee! */ borg_escapes--; /* a phase isn't really an escape */ borg_note("# Danger Level 5.3"); /* Reset timer if borg was in a anti-summon corridor */ if (borg_t - borg_t_antisummon < 50) borg_t_antisummon = 0; /* Success */ return (true); } } /* 6- not too scary but I'm out of mana */ if ((borg_class == CLASS_MAGE || borg_class == CLASS_PRIEST || borg_class == CLASS_NECROMANCER) && (b_q > avoidance * (6 + risky_boost) / 10 || (b_q > avoidance * (8 + risky_boost) / 10 && borg_fighting_unique >= 1 && borg_fighting_unique <= 8)) && (borg_skill[BI_CURSP] <= (borg_skill[BI_MAXSP] * 1 / 10) && borg_skill[BI_MAXSP] >= 100)) { /* Phase door, if useful */ if ((borg_escape_stair() || borg_caution_phase(20, 2)) && borg_t - borg_t_antisummon > 50 && (borg_spell_fail(PHASE_DOOR, allow_fail) || borg_spell_fail(PORTAL, allow_fail) || borg_activate_item(act_tele_phase) || borg_read_scroll(sv_scroll_phase_door))) { /* Flee! */ borg_note("# Danger Level 6.1"); /* Reset timer if borg was in a anti-summon corridor */ if (borg_t - borg_t_antisummon < 50) borg_t_antisummon = 0; /* Success */ return (true); } /* Teleport via spell */ if (borg_allow_teleport() && (borg_dimension_door(allow_fail) || borg_spell_fail(TELEPORT_SELF, allow_fail) || borg_spell_fail(PORTAL, allow_fail) || borg_activate_item(act_tele_long) || borg_read_scroll(sv_scroll_teleport) || borg_use_staff_fail(sv_staff_teleportation))) { /* Flee! */ borg_note("# Danger Level 6.2"); /* Reset timer if borg was in a anti-summon corridor */ if (borg_t - borg_t_antisummon < 50) borg_t_antisummon = 0; /* Success */ return (true); } } /* 7- Shoot N Scoot */ if ((borg_spell_okay_fail(PHASE_DOOR, allow_fail) || borg_spell_okay_fail(PORTAL, allow_fail)) && borg_shoot_scoot_safe(20, 2, b_q)) { /* Phase door */ if (borg_spell_fail(PHASE_DOOR, allow_fail) || borg_spell_fail(PORTAL, allow_fail)) { /* Flee! */ borg_note("# Shoot N Scoot. (Danger Level 7.1)"); borg_escapes--; /* a phase isn't really an escape */ /* Reset timer if borg was in a anti-summon corridor */ if (borg_t - borg_t_antisummon < 50) borg_t_antisummon = 0; /* Success */ return (true); } } return (false); } /* * ** Try healing ** * this function tries to heal the borg before trying to flee. * The ez_heal items (*Heal* and Life) are reserved for Morgoth. * In severe emergencies the borg can drink an ez_heal item but that is * checked in borg_caution(). He should bail out of the fight before * using an ez_heal. */ static bool borg_heal(int danger) { int hp_down; int pct_down; int allow_fail = 15; int chance; int clw_heal = 15; int csw_heal = 25; int ccw_heal = 30; int cmw_heal = 50; int heal_heal = 300; int stats_needing_fix = 0; bool rod_good = false; hp_down = borg_skill[BI_MAXHP] - borg_skill[BI_CURHP]; pct_down = ((borg_skill[BI_MAXHP] - borg_skill[BI_CURHP]) * 100 / borg_skill[BI_MAXHP]); clw_heal = ((borg_skill[BI_MAXHP] - borg_skill[BI_CURHP]) * 15 / 100); csw_heal = ((borg_skill[BI_MAXHP] - borg_skill[BI_CURHP]) * 20 / 100); ccw_heal = ((borg_skill[BI_MAXHP] - borg_skill[BI_CURHP]) * 25 / 100); cmw_heal = ((borg_skill[BI_MAXHP] - borg_skill[BI_CURHP]) * 30 / 100); heal_heal = ((borg_skill[BI_MAXHP] - borg_skill[BI_CURHP]) * 35 / 100); if (clw_heal < 15) clw_heal = 15; if (csw_heal < 25) csw_heal = 25; if (ccw_heal < 30) ccw_heal = 30; if (cmw_heal < 50) cmw_heal = 50; if (heal_heal < 300) heal_heal = 300; /* Quick check for rod success (used later on) */ if (borg_slot(TV_ROD, sv_rod_healing) != -1) { /* Reasonable chance of success */ if (borg_activate_failure(TV_ROD, sv_rod_healing) < 500) rod_good = true; } /* when fighting Morgoth, we want the borg to use Life potion to fix his * stats. So we need to add up the ones that are dropped. */ if (borg_skill[BI_ISFIXSTR]) stats_needing_fix++; if (borg_skill[BI_ISFIXINT]) stats_needing_fix++; if (borg_skill[BI_ISFIXWIS]) stats_needing_fix++; if (borg_skill[BI_ISFIXDEX]) stats_needing_fix++; if (borg_skill[BI_ISFIXCON]) stats_needing_fix++; /* Special cases get a second vote */ if (borg_class == CLASS_MAGE && borg_skill[BI_ISFIXINT]) stats_needing_fix++; if (borg_class == CLASS_PRIEST && borg_skill[BI_ISFIXWIS]) stats_needing_fix++; if (borg_class == CLASS_DRUID && borg_skill[BI_ISFIXWIS]) stats_needing_fix++; if (borg_class == CLASS_NECROMANCER && borg_skill[BI_ISFIXINT]) stats_needing_fix++; if (borg_class == CLASS_WARRIOR && borg_skill[BI_ISFIXCON]) stats_needing_fix++; if (borg_skill[BI_MAXHP] <= 850 && borg_skill[BI_ISFIXCON]) stats_needing_fix++; if (borg_skill[BI_MAXHP] <= 700 && borg_skill[BI_ISFIXCON]) stats_needing_fix += 3; if (borg_class == CLASS_PRIEST && borg_skill[BI_MAXSP] < 100 && borg_skill[BI_ISFIXWIS]) stats_needing_fix += 5; if (borg_class == CLASS_MAGE && borg_skill[BI_MAXSP] < 100 && borg_skill[BI_ISFIXINT]) stats_needing_fix += 5; /* Hack -- heal when confused. This is deadly.*/ /* This is checked twice, once, here, to see if he is in low danger * and again at the end of borg_caution, when all other avenues have failed */ if (borg_skill[BI_ISCONFUSED]) { if ((pct_down >= 80) && danger - heal_heal < borg_skill[BI_CURHP] && borg_quaff_potion(sv_potion_healing)) { borg_note("# Fixing Confusion. Level 1"); return (true); } if ((pct_down >= 85) && danger >= borg_skill[BI_CURHP] * 2 && (borg_quaff_potion(sv_potion_star_healing) || borg_quaff_potion(sv_potion_life))) { borg_note("# Fixing Confusion. Level 1.a"); return (true); } if (danger < borg_skill[BI_CURHP] + csw_heal && (borg_eat_food(TV_MUSHROOM, sv_mush_cure_mind) || borg_quaff_potion(sv_potion_cure_serious) || borg_quaff_crit(false) || borg_quaff_potion(sv_potion_healing) || borg_use_staff_fail(sv_staff_healing) || borg_use_staff_fail(sv_staff_curing))) { borg_note("# Fixing Confusion. Level 2"); return (true); } /* If my ability to use a teleport staff is really * bad, then I should heal up then use the staff. */ /* Check for a charged teleport staff */ if (borg_equips_staff_fail(sv_staff_teleportation)) { /* check my skill, drink a potion */ if ((borg_activate_failure(TV_STAFF, sv_staff_teleportation) > 650) && (danger < (avoidance + ccw_heal) * 15 / 10) && (borg_quaff_crit(true) || borg_quaff_potion(sv_potion_healing))) { borg_note("# Fixing Confusion. Level 3"); return (true); } /* However, if I am in really big trouble and there is no way * I am going to be able to * survive another round, take my chances on the staff. */ else if (danger > avoidance * 2) { borg_note("# Too scary to fix Confusion. Level 4"); return (false); } } else { /* If I do not have a staff to teleport, take the potion * and try to fix the confusion */ if ((borg_quaff_crit(true) || borg_quaff_potion(sv_potion_cure_serious) || borg_quaff_potion(sv_potion_healing))) { borg_note("# Fixing Confusion. Level 5"); return (true); } } } /* Hack -- heal when blind. This is deadly.*/ if (borg_skill[BI_ISBLIND] && (randint0(100) < 85)) { /* if in extreme danger, use teleport then fix the * blindness later. */ if (danger > avoidance * 25 / 10) { /* Check for a charged teleport staff */ if (borg_equips_staff_fail(sv_staff_teleportation)) return (0); } if ((hp_down >= 300) && borg_quaff_potion(sv_potion_healing)) { return (true); } /* Warriors with ESP won't need it so quickly */ if (!(borg_class == CLASS_WARRIOR && borg_skill[BI_CURHP] > borg_skill[BI_MAXHP] / 4 && borg_skill[BI_ESP])) { if (borg_eat_food(TV_MUSHROOM, sv_mush_fast_recovery) || borg_quaff_potion(sv_potion_cure_light) || borg_quaff_potion(sv_potion_cure_serious) || borg_quaff_crit(true) || borg_use_staff_fail(sv_staff_healing) || borg_use_staff_fail(sv_staff_curing) || borg_quaff_potion(sv_potion_healing)) { borg_note("# Fixing Blindness."); return (true); } } } /* We generally try to conserve ez-heal pots */ if ((borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED]) && ((hp_down >= 400) || (danger > borg_skill[BI_CURHP] * 5 && hp_down > 100)) && borg_quaff_potion(sv_potion_star_healing)) { borg_note("# Fixing Confusion/Blind."); return (true); } /* Healing and fighting Morgoth. */ if (borg_fighting_unique >= 10) { if (borg_skill[BI_CURHP] <= 700 && ((borg_skill[BI_CURHP] > 250 && borg_spell_fail(HOLY_WORD, 14)) || /* Holy Word */ /* Choose Life over *Healing* to fix stats*/ (stats_needing_fix >= 5 && borg_quaff_potion(sv_potion_life)) || /* Choose Life over Healing if way down on pts*/ (hp_down > 500 && borg_has[borg_lookup_kind(TV_POTION, sv_potion_star_healing)] <= 0 && borg_quaff_potion(sv_potion_life)) || borg_quaff_potion(sv_potion_star_healing) || borg_quaff_potion(sv_potion_healing) || borg_activate_item(act_heal1) || borg_activate_item(act_heal2) || borg_activate_item(act_heal3) || (borg_skill[BI_CURHP] < 250 && borg_spell_fail(HOLY_WORD, 5)) || /* Holy Word */ (borg_skill[BI_CURHP] > 550 && borg_spell_fail(HOLY_WORD, 15)) || /* Holy Word */ borg_spell_fail(HEALING, 15) || borg_quaff_potion(sv_potion_life) || borg_zap_rod(sv_rod_healing))) { borg_note("# Healing in Questor Combat."); return (true); } } /* restore Mana */ /* note, blow the staff charges easy because the staff will not last. */ if (borg_skill[BI_CURSP] < (borg_skill[BI_MAXSP] / 5) && (randint0(100) < 50)) { if (borg_use_staff_fail(sv_staff_the_magi)) { borg_note("# Use Magi Staff"); return (true); } } /* blowing potions is harder */ /* NOTE: must have enough mana to keep up or do a HEAL */ if (borg_skill[BI_CURSP] < (borg_skill[BI_MAXSP] / 10) || ((borg_skill[BI_CURSP] < 70 && borg_skill[BI_MAXSP] > 200))) { /* use the potion if battling a unique and not too dangerous */ if (borg_fighting_unique >= 10 || (borg_fighting_unique && danger < avoidance * 2) || (borg_skill[BI_ATELEPORT] + borg_skill[BI_AESCAPE] == 0 && danger > avoidance)) { if (borg_use_staff_fail(sv_staff_the_magi) || borg_quaff_potion(sv_potion_restore_mana)) { borg_note("# Restored My Mana"); return (true); } } } /* if unhurt no healing needed */ if (hp_down == 0) return false; /* Don't bother healing if not in danger */ if (danger == 0 && !borg_skill[BI_ISPOISONED] && !borg_skill[BI_ISCUT]) return (false); /* Restoring while fighting Morgoth */ if (stats_needing_fix >= 5 && borg_fighting_unique >= 10 && borg_skill[BI_CURHP] > 650 && borg_eat_food(TV_MUSHROOM, sv_mush_restoring)) { borg_note("# Trying to fix stats in combat."); return(true); } /* No further Healing considerations if fighting Questors */ if (borg_fighting_unique >= 10) { /* No further healing considerations right now */ return (false); } /* Hack -- heal when wounded a percent of the time */ chance = randint0(100); /* if we are fighting a unique increase the odds of healing */ if (borg_fighting_unique) chance -= 10; /* if danger is close to the hp and healing will help, do it */ if (danger >= borg_skill[BI_CURHP] && danger < borg_skill[BI_MAXHP]) chance -= 75; else { if (borg_class != CLASS_PRIEST && borg_class != CLASS_PALADIN) chance -= 25; } /* Risky Borgs are less likely to heal in the fight */ if (borg_cfg[BORG_PLAYS_RISKY]) chance += 5; if (((pct_down <= 15 && chance < 98) || (pct_down >= 16 && pct_down <= 25 && chance < 95) || (pct_down >= 26 && pct_down <= 50 && chance < 80) || (pct_down >= 51 && pct_down <= 65 && chance < 50) || (pct_down >= 66 && pct_down <= 74 && chance < 25) || (pct_down >= 75 && chance < 1)) && (!borg_skill[BI_ISHEAVYSTUN] && !borg_skill[BI_ISSTUN] && !borg_skill[BI_ISPOISONED] && !borg_skill[BI_ISCUT])) return false; /* Cure light Wounds (2d10) */ if (pct_down >= 30 && (pct_down <= 40 || borg_skill[BI_CLEVEL] < 10) && ((danger) < borg_skill[BI_CURHP] + clw_heal) && (clw_heal > danger / 3) && /* No rope-a-doping */ (borg_spell_fail(MINOR_HEALING, allow_fail) || borg_quaff_potion(sv_potion_cure_light) || borg_activate_item(act_cure_light))) { borg_note("# Healing Level 1."); return (true); } /* Cure Serious Wounds (4d10) */ if (pct_down >= 40 && (pct_down <= 50 || borg_skill[BI_CLEVEL] < 20) && ((danger) < borg_skill[BI_CURHP] + csw_heal) && (csw_heal > danger / 3) && /* No rope-a-doping */ (borg_quaff_potion(sv_potion_cure_serious) || borg_activate_item(act_cure_serious))) { borg_note("# Healing Level 2."); return (true); } /* Cure Critical Wounds (6d10) */ if (pct_down >= 50 && pct_down <= 55 && ((danger) < borg_skill[BI_CURHP] + ccw_heal) && (ccw_heal > danger / 3) && /* No rope-a-doping */ (borg_activate_item(act_cure_critical) || borg_quaff_crit(false))) { borg_note("# Healing Level 3."); return (true); } /* If in danger try one more Cure Critical if it will help */ if (danger >= borg_skill[BI_CURHP] && danger < borg_skill[BI_MAXHP] && borg_skill[BI_CURHP] < 50 && danger < ccw_heal && borg_quaff_crit(true)) { borg_note("# Healing Level 5."); return (true); } /* if deep, and low on HP, but in a zero danger spot, drink some CCW to add a few HP before resting */ if (borg_skill[BI_CDEPTH] >= 80 && danger < 50 && pct_down >= 20 && borg_quaff_potion(sv_potion_cure_critical)) { borg_note("# Healing Level 5B."); return (true); } /* Heal step one (200hp) */ if (pct_down >= 55 && danger < borg_skill[BI_CURHP] + heal_heal && ((((!borg_skill[BI_ATELEPORT] && !borg_skill[BI_AESCAPE]) || rod_good) && borg_zap_rod(sv_rod_healing)) || borg_activate_item(act_cure_full) || borg_activate_item(act_cure_full2) || borg_activate_item(act_cure_nonorlybig) || borg_activate_item(act_heal1) || borg_activate_item(act_heal2) || borg_activate_item(act_heal3) || borg_use_staff_fail(sv_staff_healing) || borg_spell_fail(HEALING, allow_fail))) { borg_note("# Healing Level 6."); return (true); } /* Generally continue to heal. But if we are preparing for the end * game uniques, then bail out here in order to save our heal pots. * (unless morgoth is dead) * Priests wont need to bail, they have good heal spells. */ if (borg_skill[BI_MAXDEPTH] >= 98 && !borg_skill[BI_KING] && !borg_fighting_unique && borg_class != CLASS_PRIEST) { /* Bail out to save the heal pots for Morgoth*/ return (false); } /* Heal step two (300hp) */ if (pct_down > 50 && danger < borg_skill[BI_CURHP] + heal_heal && (borg_use_staff_fail(sv_staff_healing) || (borg_fighting_evil_unique && borg_spell_fail(HOLY_WORD, allow_fail)) || /* holy word */ borg_spell_fail(HEALING, allow_fail) || (((!borg_skill[BI_ATELEPORT] && !borg_skill[BI_AESCAPE]) || rod_good) && borg_zap_rod(sv_rod_healing)) || borg_zap_rod(sv_rod_healing) || borg_quaff_potion(sv_potion_healing))) { borg_note("# Healing Level 7."); return (true); } /* Healing step three (300hp). */ if (pct_down > 60 && danger < borg_skill[BI_CURHP] + heal_heal && ((borg_fighting_evil_unique && borg_spell_fail(HOLY_WORD, allow_fail)) || /* holy word */ (((!borg_skill[BI_ATELEPORT] && !borg_skill[BI_AESCAPE]) || rod_good) && borg_zap_rod(sv_rod_healing)) || borg_spell_fail(HEALING, allow_fail) || borg_use_staff_fail(sv_staff_healing) || borg_quaff_potion(sv_potion_healing) || borg_activate_item(act_cure_full) || borg_activate_item(act_cure_full2) || borg_activate_item(act_cure_nonorlybig) || borg_activate_item(act_heal1) || borg_activate_item(act_heal2) || borg_activate_item(act_heal3))) { borg_note("# Healing Level 8."); return (true); } /* Healing. First use of EZ_Heals */ if (pct_down > 65 && (danger < borg_skill[BI_CURHP] + heal_heal) && ((borg_fighting_evil_unique && borg_spell_fail(HOLY_WORD, allow_fail)) || /* holy word */ borg_spell_fail(HEALING, allow_fail) || borg_use_staff_fail(sv_staff_healing) || (((!borg_skill[BI_ATELEPORT] && !borg_skill[BI_AESCAPE]) || rod_good) && borg_zap_rod(sv_rod_healing)) || borg_quaff_potion(sv_potion_healing) || borg_activate_item(act_cure_full) || borg_activate_item(act_cure_full2) || borg_activate_item(act_cure_nonorlybig) || borg_activate_item(act_heal1) || borg_activate_item(act_heal2) || borg_activate_item(act_heal3) || (borg_fighting_unique && (borg_quaff_potion(sv_potion_star_healing) || borg_quaff_potion(sv_potion_healing) || borg_quaff_potion(sv_potion_life))))) { borg_note("# Healing Level 9."); return (true); } /* Healing final check. Note that *heal* and Life potions are not * wasted. They are saved for Morgoth and emergencies. The * Emergency check is at the end of borg_caution() which is after the * borg_escape() routine. */ if (pct_down > 75 && danger > borg_skill[BI_CURHP] && borg_skill[BI_ATELEPORT] + borg_skill[BI_AESCAPE] <= 0 && (borg_quaff_potion(sv_potion_healing) || borg_quaff_potion(sv_potion_star_healing) || borg_quaff_potion(sv_potion_life))) { borg_note("# Healing Level 10."); return (true); } /*** Cures ***/ /* Dont do these in the middle of a fight, teleport out then try it */ if (danger > avoidance * 2 / 10) return (false); /* Hack -- cure poison when poisoned * This was moved from borg_caution. */ if (borg_skill[BI_ISPOISONED] && (borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] / 2)) { if (borg_spell_fail(CURE_POISON, 60) || borg_spell_fail(HERBAL_CURING, 60) || borg_quaff_potion(sv_potion_cure_poison) || borg_activate_item(act_cure_body) || borg_activate_item(act_cure_critical) || borg_activate_item(act_cure_temp) || borg_activate_item(act_rem_fear_pois) || borg_activate_item(act_cure_full) || borg_activate_item(act_cure_full2) || borg_activate_item(act_cure_nonorlybig) || borg_use_staff(sv_staff_curing) || borg_eat_food(TV_MUSHROOM, sv_mush_fast_recovery) || borg_eat_food(TV_MUSHROOM, sv_mush_purging) || /* buy time */ borg_quaff_crit(true) || borg_spell_fail(HEALING, 60) || borg_spell_fail(HOLY_WORD, 60) || borg_use_staff_fail(sv_staff_healing)) { borg_note("# Curing."); return (true); } /* attempt to fix mana then poison on next round */ if ((borg_spell_legal(CURE_POISON) || borg_spell_legal(HERBAL_CURING)) && (borg_quaff_potion(sv_potion_restore_mana))) { borg_note("# Curing next round."); return (true); } } /* Hack -- cure poison when poisoned CRITICAL CHECK */ if (borg_skill[BI_ISPOISONED] && (borg_skill[BI_CURHP] < 2 || borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] / 20)) { int sv_mana = borg_skill[BI_CURSP]; borg_skill[BI_CURSP] = borg_skill[BI_MAXSP]; if (borg_spell(CURE_POISON) || borg_spell(HERBAL_CURING) || borg_spell(HOLY_WORD) || borg_spell(HEALING)) { /* verify use of spell */ /* borg_keypress('y'); */ /* Flee! */ borg_note("# Emergency Cure Poison! Gasp!!!...."); return (true); } borg_skill[BI_CURSP] = sv_mana; /* Quaff healing pots to buy some time- in this emergency. */ if (borg_quaff_potion(sv_potion_cure_light) || borg_quaff_potion(sv_potion_cure_serious)) return (true); /* Try to Restore Mana */ if (borg_quaff_potion(sv_potion_restore_mana)) return (true); /* Emergency check on healing. Borg_heal has already been checked but * but we did not use our ez_heal potions. All other attempts to save * ourself have failed. Use the ez_heal if I have it. */ if (borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] / 20 && (borg_quaff_potion(sv_potion_star_healing) || borg_quaff_potion(sv_potion_life) || borg_quaff_potion(sv_potion_healing))) { borg_note("# Healing. Curing section."); return (true); } /* Quaff unknown potions in this emergency. We might get luck */ if (borg_quaff_unknown()) return (true); /* Eat unknown mushroom in this emergency. We might get luck */ if (borg_eat_unknown()) return (true); /* Use unknown Staff in this emergency. We might get luck */ if (borg_use_unknown()) return (true); } /* Hack -- cure wounds when bleeding, also critical check */ if (borg_skill[BI_ISCUT] && (borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] / 3 || randint0(100) < 20)) { if (borg_quaff_potion(sv_potion_cure_serious) || borg_quaff_potion(sv_potion_cure_light) || borg_quaff_crit(borg_skill[BI_CURHP] < 10) || borg_spell(MINOR_HEALING) || borg_quaff_potion(sv_potion_cure_critical)) { return (true); } } /* bleeding and about to die CRITICAL CHECK*/ if (borg_skill[BI_ISCUT] && ((borg_skill[BI_CURHP] < 2) || borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] / 20)) { int sv_mana = borg_skill[BI_CURSP]; borg_skill[BI_CURSP] = borg_skill[BI_MAXSP]; /* Quaff healing pots to buy some time- in this emergency. */ if (borg_quaff_potion(sv_potion_cure_light) || borg_quaff_potion(sv_potion_cure_serious)) return (true); /* Try to Restore Mana */ if (borg_quaff_potion(sv_potion_restore_mana)) return (true); /* Emergency check on healing. Borg_heal has already been checked but * but we did not use our ez_heal potions. All other attempts to save * ourself have failed. Use the ez_heal if I have it. */ if (borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] / 20 && (borg_quaff_potion(sv_potion_healing) || borg_quaff_potion(sv_potion_star_healing) || borg_quaff_potion(sv_potion_life))) { borg_note("# Healing. Bleeding."); return (true); } /* Cast a spell, go into negative mana */ if (borg_spell(MINOR_HEALING)) { /* verify use of spell */ /* borg_keypress('y'); */ /* Flee! */ borg_note("# Emergency Wound Patch! Gasp!!!...."); return (true); } borg_skill[BI_CURSP] = sv_mana; /* Quaff unknown potions in this emergency. We might get luck */ if (borg_quaff_unknown()) return (true); /* Eat unknown mushroom in this emergency. We might get luck */ if (borg_eat_unknown()) return (true); /* Use unknown Staff in this emergency. We might get luck */ if (borg_use_unknown()) return (true); } /* nothing to do */ return (false); } static bool borg_prep_leave_level_spells(void) { /* if we are running away, just run. */ if (goal_fleeing) return (false); /* if we are low on mana, don't prep. */ if (borg_skill[BI_CURSP] < ((borg_skill[BI_MAXSP] * 6) / 10)) return (false); /* Cast haste */ if (!borg_speed && borg_spell_fail(HASTE_SELF, 15)) { borg_note("# Casting speed spell before leaving level."); borg_no_rest_prep = 5000; return (true); } /* Cast resistance */ if (borg_skill[BI_TRFIRE] + borg_skill[BI_TRCOLD] + borg_skill[BI_TRACID] + borg_skill[BI_TRELEC] + borg_skill[BI_TRPOIS] < 3 && borg_spell_fail(RESISTANCE, 15)) { borg_note("# Casting Resistance spell before leaving level."); borg_no_rest_prep = 21000; return (true); } /* Cast fastcast */ if (!borg_fastcast && borg_spell_fail(MANA_CHANNEL, 15)) { borg_note("# Casting Mana Channel spell before leaving level."); borg_no_rest_prep = 6000; return (true); } /* Cast Berserk Strength */ if (!borg_berserk && borg_spell_fail(BERSERK_STRENGTH, 15)) { borg_note("# Casting Berserk Strength spell before leaving level."); borg_no_rest_prep = 10000; return (true); } /* Cast heroism */ if (!borg_hero && borg_spell_fail(HEROISM, 15)) { borg_note("# Casting Heroism spell before leaving level."); borg_no_rest_prep = 3000; return (true); } /* Cast regen just before returning to dungeon */ if (!borg_regen && borg_spell_fail(RAPID_REGENERATION, 15)) { borg_note("# Casting Regen before leaving level."); borg_no_rest_prep = 6000; return (true); } /* Cast Smite Evil just before returning to dungeon */ if (!borg_smite_evil && !borg_skill[BI_WS_EVIL] && borg_spell_fail(SMITE_EVIL, 15)) { borg_note("# Casting Smite Evil before leaving level."); borg_no_rest_prep = 21000; return (true); } /* Cast Venom just before returning to dungeon */ if (!borg_venom && !borg_skill[BI_WB_POIS] && borg_spell_fail(VENOM, 15)) { borg_note("# Casting Venom before leaving level."); borg_no_rest_prep = 18000; return (true); } /* Cast PFE just before returning to dungeon */ if (!borg_prot_from_evil && borg_spell_fail(PROTECTION_FROM_EVIL, 15)) { borg_note("# Casting PFE before leaving level."); borg_no_rest_prep = borg_skill[BI_CLEVEL] * 1000; return (true); } /* Cast bless prep things */ if ((!borg_bless && (borg_spell_fail(BLESS, 15) || borg_spell_fail(DEMON_BANE, 15)))) { borg_note("# Casting blessing before leaving level."); borg_no_rest_prep = 11000; return (true); } return (false); } /* * Be "cautious" and attempt to prevent death or dishonor. * * Strategy: * * (1) Caution * (1a) Analyze the situation * (1a1) try to heal * (1a2) try a defence * (1b) Teleport from danger * (1c) Handle critical stuff * (1d) Retreat to happy grids * (1e) Back away from danger * (1f) Heal various conditions * * (2) Attack * (2a) Simulate possible attacks * (2b) Perform optimal attack * * (3) Recover * (3a) Recover by spells/prayers * (3b) Recover by items/etc * (3c) Recover by resting * * XXX XXX XXX * In certain situations, the "proper" course of action is to simply * attack a nearby monster, since often most of the danger is due to * a single monster which can sometimes be killed in a single blow. * * Actually, both "borg_caution()" and "borg_recover()" need to * be more intelligent, and should probably take into account * such things as nearby monsters, and/or the relative advantage * of simply pummeling nearby monsters instead of recovering. * * Note that invisible/offscreen monsters contribute to the danger * of an extended "region" surrounding the observation, so we will * no longer rest near invisible monsters if they are dangerous. * * XXX XXX XXX * We should perhaps reduce the "fear" values of each region over * time, to take account of obsolete invisible monsters. * * Note that walking away from a fast monster is counter-productive, * since the monster will often just follow us, so we use a special * method which allows us to factor in the speed of the monster and * predict the state of the world after we move one step. Of course, * walking away from a spell casting monster is even worse, since the * monster will just get to use the spell attack multiple times. But, * if we are trying to get to known safety, then fleeing in such a way * might make sense. Actually, this has been done too well, note that * it makes sense to flee some monsters, if they "stumble", or if we * are trying to get to stairs. XXX XXX XXX * * Note that the "flow" routines attempt to avoid entering into * situations that are dangerous, but sometimes we do not see the * danger coming, and then we must attempt to survive by any means. * * We will attempt to "teleport" if the danger in the current situation, * as well as that resulting from attempting to "back away" from danger, * are sufficient to kill us in one or two blows. This allows us to * avoid teleportation in situations where simply backing away is the * proper course of action, for example, when standing next to a nasty * stationary monster, but also to teleport when backing away will not * reduce the danger sufficiently. * * But note that in "nasty" situations (when we are running out of light, * or when we are starving, blind, confused, or hallucinating), we will * ignore the possibility of "backing away" from danger, when considering * the possibility of using "teleport" to escape. But if the teleport * fails, we will still attempt to "retreat" or "back away" if possible. * * XXX XXX XXX Note that it should be possible to do some kind of nasty * "flow" algorithm which would use a priority queue, or some reasonably * efficient normal queue stuff, to determine the path which incurs the * smallest "cumulative danger", and minimizes the total path length. * It may even be sufficient to treat each step as having a cost equal * to the danger of the destination grid, plus one for the actual step. * This would allow the Borg to prefer a ten step path passing through * one grid with danger 10, to a five step path, where each step has * danger 9. Currently, he often chooses paths of constant danger over * paths with small amounts of high danger. However, the current method * is very fast, which is certainly a point in its favor... * * When in danger, attempt to "flee" by "teleport" or "recall", and if * this is not possible, attempt to "heal" damage, if needed, and else * attempt to "flee" by "running". * * XXX XXX XXX Both "borg_caution()" and "borg_recover()" should only * perform the HEALING tasks if they will cure more "damage"/"stuff" * than may be re-applied in the next turn, this should prevent using * wimpy healing spells next to dangerous monsters, and resting to regain * mana near a mana-drainer. * * Whenever we are in a situation in which, even when fully healed, we * could die in a single round, we set the "goal_fleeing" flag, and if * we could die in two rounds, we set the "goal_leaving" flag. * * In town, whenever we could die in two rounds if we were to stay still, * we set the "goal_leaving" flag. In combination with the "retreat" and * the "back away" code, this should allow us to leave town before getting * into situations which might be fatal. * * Flag "goal_fleeing" means get off this level right now, using recall * if possible when we get a chance, and otherwise, take stairs, even if * it is very dangerous to do so. * * Flag "goal_leaving" means get off this level when possible, using * stairs if possible when we get a chance. * * We will also take stairs if we happen to be standing on them, and we * could die in two rounds. This is often "safer" than teleportation, * and allows the "retreat" code to retreat towards stairs, knowing that * once there, we will leave the level. * * If we can, we should try to hit a monster with an offset spell. * A Druj can not move but they are really dangerous. So we should retreat * to a happy grid (meaning we have los and it does not), we should target * one space away from the bad guy then blast away with ball spells. * * Hack -- Special checks for dealing with Morgoth. * The borg would like to stay put on level 100 and use * spells to attack Morgoth then use Teleport Other as he * gets too close. * 1. Make certain borg is sitting in a central room. * 2. Attack Morgoth with spells. * 3. Use Teleport Other on Morgoth as he approches. * 4. Use Teleport Other/Mass Banishment on all other monsters * if borg is correctly positioned in a good room. * 5. Stay put and rest until Morgoth returns. */ bool borg_caution(void) { int j, pos_danger; bool borg_surround = false; bool nasty = false; bool on_dnstair = false; bool on_upstair = false; /*** Notice "nasty" situations ***/ /* About to run out of light is extremely nasty */ if (!borg_skill[BI_LIGHT] && borg_items[INVEN_LIGHT].timeout < 250) nasty = true; /* Starvation is nasty */ if (borg_skill[BI_ISWEAK]) nasty = true; /* Blind-ness is nasty */ if (borg_skill[BI_ISBLIND]) nasty = true; /* Confusion is nasty */ if (borg_skill[BI_ISCONFUSED]) nasty = true; /* Hallucination is nasty */ if (borg_skill[BI_ISIMAGE]) nasty = true; /* if on level 100 and not ready for Morgoth, run */ if (borg_skill[BI_CDEPTH] == 100 && borg_t - borg_began < 10 && !borg_morgoth_position) { if (borg_ready_morgoth == 0 && !borg_skill[BI_KING]) { /* teleport level up to 99 to finish uniques */ if (borg_spell(TELEPORT_LEVEL) || borg_read_scroll(sv_scroll_teleport_level)) { borg_note("# Rising one dlevel (Not ready for Morgoth)"); return (true); } /* Start leaving */ if (!goal_leaving) { /* Note */ borg_note("# Leaving (Not ready for Morgoth now)"); /* Start leaving */ goal_leaving = true; } } } /*** Evaluate local danger ***/ /* Monsters on all sides of me? */ borg_surround = borg_surrounded(); /* No searching if scary guys on the level */ if (scaryguy_on_level == true) borg_needs_searching = false; /* Only allow three 'escapes' per level unless heading for morogoth or fighting a unique, then allow 85. */ if ((borg_escapes > 3 && !unique_on_level && !borg_ready_morgoth) || borg_escapes > 55) { /* No leaving if going after questors */ if (borg_skill[BI_CDEPTH] <= 98) { /* Start leaving */ if (!goal_leaving) { /* Note */ borg_note("# Leaving (Too many escapes)"); /* Start leaving */ goal_leaving = true; } /* Start fleeing */ if (!goal_fleeing && borg_escapes > 3) { /* Note */ borg_note("# Fleeing (Too many escapes)"); /* Start fleeing */ goal_fleeing = true; } } } /* No hanging around if nasty here. */ if (scaryguy_on_level) { /* Note */ borg_note("# Scary guy on level."); /* Start leaving */ if (!goal_leaving) { /* Note */ borg_note("# Leaving (Scary guy on level)"); /* Start leaving */ goal_leaving = true; } /* Start fleeing */ if (!goal_fleeing) { /* Note */ borg_note("# Fleeing (Scary guy on level)"); /* Start fleeing */ goal_fleeing = true; } /* Return to town quickly after leaving town */ if (borg_skill[BI_CDEPTH] == 0) goal_fleeing_to_town = true; } /* Make a note if Ignoring monsters (no fighting) */ if (goal_ignoring) { /* Note */ borg_note("# Ignoring combat with monsters."); } /* Note if ignorig messages */ if (borg_dont_react) { borg_note("# Borg ignoring messges."); } /* Look around */ pos_danger = borg_danger(c_y, c_x, 1, true, false); /* Describe (briefly) the current situation */ /* Danger (ignore stupid "fear" danger) */ if ((((pos_danger > avoidance / 10) || (pos_danger > borg_fear_region[c_y / 11][c_x / 11]) || borg_morgoth_position || borg_skill[BI_ISWEAK]) || borg_skill[BI_CDEPTH] == 100) && !borg_skill[BI_KING]) { /* Describe (briefly) the current situation */ borg_note(format("# Loc:%d,%d Dep:%d Lev:%d HP:%d/%d SP:%d/%d Danger:p=%d", c_y, c_x, borg_skill[BI_CDEPTH], borg_skill[BI_CLEVEL], borg_skill[BI_CURHP], borg_skill[BI_MAXHP], borg_skill[BI_CURSP], borg_skill[BI_MAXSP], pos_danger)); if (borg_resistance) { borg_note(format("# Protected by Resistance (borg turns:%d; game turns:%d)", borg_resistance / borg_game_ratio, player->timed[TMD_OPP_ACID])); } if (borg_shield) { borg_note("# Protected by Mystic Shield"); } if (borg_prot_from_evil) { borg_note("# Protected by PFE"); } if (borg_morgoth_position) { borg_note("# Protected by Sea of Runes."); } if (borg_fighting_unique >= 10) { borg_note("# Questor Combat."); } if (borg_as_position) { borg_note("# Protected by anti-summon corridor."); } } /* Comment on glyph */ if (track_glyph.num) { int i; for (i = 0; i < track_glyph.num; i++) { /* Enqueue the grid */ if ((track_glyph.y[i] == c_y) && (track_glyph.x[i] == c_x)) { /* if standing on one */ borg_note(format("# Standing on Glyph")); } } } /* Comment on stair */ if (track_less.num) { int i; for (i = 0; i < track_less.num; i++) { /* Enqueue the grid */ if ((track_less.y[i] == c_y) && (track_less.x[i] == c_x)) { /* if standing on one */ borg_note(format("# Standing on up-stairs")); on_upstair = false; } } } /* Comment on stair */ if (track_more.num) { int i; for (i = 0; i < track_more.num; i++) { /* Enqueue the grid */ if ((track_more.y[i] == c_y) && (track_more.x[i] == c_x)) { /* if standing on one */ borg_note(format("# Standing on dn-stairs")); on_dnstair = false; } } } if (!goal_fleeing) { /* Start being cautious and trying to not die */ if (borg_class == CLASS_MAGE && !borg_morgoth_position && !borg_as_position && !borg_skill[BI_ISBLIND] && !borg_skill[BI_ISCUT] && !borg_skill[BI_ISPOISONED] && !borg_skill[BI_ISCONFUSED]) { /* do some defence before running away */ if (borg_defend(pos_danger)) return true; /* try healing before running away */ if (borg_heal(pos_danger)) return true; } else { /* try healing before running away */ if (borg_heal(pos_danger)) return true; /* do some defence before running away! */ if (borg_defend(pos_danger)) return true; } } if (borg_cfg[BORG_USES_SWAPS]) { /* do some swapping before running away! */ if (pos_danger > (avoidance / 3)) { if (borg_backup_swap(pos_danger)) return true; } } /* If I am waiting for recall, & safe, then stay put. */ if (goal_recalling && borg_check_rest(c_y, c_x) && borg_skill[BI_CDEPTH] && !borg_skill[BI_ISHUNGRY]) { /* rest here until lift off */ borg_note("# Resting for Recall."); borg_keypress('R'); borg_keypress('5'); borg_keypress('0'); borg_keypress('0'); borg_keypress(KC_ENTER); /* I'm not in a store */ borg_in_shop = false; return (true); } /* If I am waiting for recall in town */ if (goal_recalling && goal_recalling <= (borg_game_ratio * 2) && !borg_skill[BI_CDEPTH]) { if (borg_prep_leave_level_spells()) return (true); } /*** Danger ***/ /* Impending doom */ /* Don't take off in the middle of a fight */ /* just to restock and it is useless to restock */ /* if you have just left town. */ if (borg_restock(borg_skill[BI_CDEPTH]) && !borg_fighting_unique && (borg_time_town + (borg_t - borg_began)) > 200) { /* Start leaving */ if (!goal_leaving) { /* Note */ borg_note(format("# Leaving (restock) %s", borg_restock(borg_skill[BI_CDEPTH]))); /* Start leaving */ goal_leaving = true; } /* Start fleeing */ if (!goal_fleeing && borg_skill[BI_ACCW] < 2 && borg_skill[BI_FOOD] > 3 && borg_skill[BI_AFUEL] > 2) { /* Flee */ borg_note(format("# Fleeing (restock) %s", borg_restock(borg_skill[BI_CDEPTH]))); /* Start fleeing */ goal_fleeing = true; } } /* Excessive danger */ else if (pos_danger > (borg_skill[BI_CURHP] * 2)) { /* Start fleeing */ /* do not flee level if going after Morgoth or fighting a unique */ if (!goal_fleeing && !borg_fighting_unique && (borg_skill[BI_CLEVEL] < 50) && !vault_on_level && (borg_skill[BI_CDEPTH] < 100 && borg_ready_morgoth == 1)) { /* Note */ borg_note("# Fleeing (excessive danger)"); /* Start fleeing */ goal_fleeing = true; } } /* Potential danger (near death) in town */ else if (!borg_skill[BI_CDEPTH] && (pos_danger > borg_skill[BI_CURHP]) && (borg_skill[BI_CLEVEL] < 50)) { /* Flee now */ if (!goal_leaving) { /* Flee! */ borg_note("# Leaving (potential danger)"); /* Start leaving */ goal_leaving = true; } } /*** Stairs ***/ /* Leaving or Fleeing, take stairs */ if (goal_leaving || goal_fleeing || scaryguy_on_level || goal_fleeing_lunal || goal_fleeing_munchkin || ((pos_danger > avoidance || (borg_skill[BI_CLEVEL] < 5 && pos_danger > avoidance / 2)) && borg_grids[c_y][c_x].feat == FEAT_LESS)) /* danger and standing on stair */ { if (borg_ready_morgoth == 0 && !borg_skill[BI_KING]) { stair_less = true; if (scaryguy_on_level) borg_note("# Fleeing and leaving the level. (scaryguy)"); if (goal_fleeing_lunal) borg_note("# Fleeing and leaving the level. (fleeing_lunal)"); if (goal_fleeing_munchkin) borg_note("# Fleeing and leaving the level. (fleeing munchkin)"); if (pos_danger > avoidance && borg_skill[BI_CLEVEL] <= 49 && borg_grids[c_y][c_x].feat == FEAT_LESS) borg_note("# Leaving level, Some danger but I'm on a stair."); } if (scaryguy_on_level) stair_less = true; /* Only go down if fleeing or prepared */ if (goal_fleeing == true || goal_fleeing_lunal == true || goal_fleeing_munchkin) stair_more = true; if ((char*)NULL == borg_prepared(borg_skill[BI_CDEPTH] + 1)) stair_more = true; if (!track_less.num && (borg_skill[BI_CURLITE] == 0 || borg_skill[BI_ISHUNGRY] || borg_skill[BI_ISWEAK] || borg_skill[BI_FOOD] < 2)) stair_more = false; /* If I need to sell crap, then don't go down */ if (borg_skill[BI_CDEPTH] && borg_skill[BI_CLEVEL] < 25 && borg_gold < 25000 && borg_count_sell() >= 13) stair_more = false; /* Its ok to go one level deep if evading scary guy */ if (scaryguy_on_level) stair_more = true; /* if fleeing town, then dive */ if (!borg_skill[BI_CDEPTH]) stair_more = true; } /* Take stairs up */ if (stair_less) { /* Current grid */ borg_grid* ag = &borg_grids[c_y][c_x]; /* Usable stairs */ if (ag->feat == FEAT_LESS || borg_on_upstairs || on_upstair) { /* Log it */ borg_note(format("# Leaving via up stairs.")); /* Take the stairs */ borg_on_dnstairs = true; borg_keypress('<'); /* Success */ return (true); } } /* Take stairs down */ if (stair_more && !goal_recalling) { /* Current grid */ borg_grid* ag = &borg_grids[c_y][c_x]; /* Usable stairs */ if (ag->feat == FEAT_MORE || borg_on_dnstairs || on_dnstair) { /* Do these if not lunal mode */ if (!goal_fleeing_lunal && !goal_fleeing_munchkin) { if (borg_prep_leave_level_spells()) return (true); } /* Take the stairs */ borg_on_upstairs = true; borg_keypress('>'); /* Success */ return (true); } } /*** Deal with critical situations ***/ /* Hack -- require light */ if (!borg_skill[BI_CURLITE] && !borg_skill[BI_LIGHT]) /* No Lite, AND Not Glowing */ { enum borg_need need = borg_maintain_light(); if (need == BORG_MET_NEED) return true; else if ((need == BORG_UNMET_NEED) && borg_skill[BI_CDEPTH]) { /* Flee for fuel */ /* Start leaving */ if (!goal_leaving) { /* Flee */ borg_note("# Leaving (need fuel)"); /* Start leaving */ goal_leaving = true; } } } /* Hack -- prevent starvation */ if (borg_skill[BI_ISWEAK]) { /* Attempt to satisfy hunger */ if (borg_eat_food_any() || borg_spell(REMOVE_HUNGER) || borg_spell(HERBAL_CURING)) { /* Success */ return (true); } /* Try to restore mana then cast the spell next round */ if (borg_quaff_potion(sv_potion_restore_mana)) return (true); /* Flee for food */ if (borg_skill[BI_CDEPTH]) { /* Start leaving */ if (!goal_leaving) { /* Flee */ borg_note("# Leaving (need food)"); /* Start leaving */ goal_leaving = true; } /* Start fleeing */ if (!goal_fleeing) { /* Flee */ borg_note("# Fleeing (need food)"); /* Start fleeing */ goal_fleeing = true; } } } /* Prevent breeder explosions when low level */ if (breeder_level && borg_skill[BI_CLEVEL] < 15) { /* Start leaving */ if (!goal_fleeing) { /* Flee */ borg_note("# Fleeing (breeder level)"); /* Start fleeing */ goal_fleeing = true; } } /*** Flee on foot ***/ /* Desperation Head for stairs */ /* If you are low level and near the stairs and you can */ /* hop onto them in very few steps, try to head to them */ /* out of desperation */ if ((track_less.num || track_more.num) && (goal_fleeing || scaryguy_on_level || (pos_danger > avoidance && borg_skill[BI_CLEVEL] < 35))) { int y, x, i; int b_j = -1; int m; int b_m = -1; bool safe = true; borg_grid* ag; /* Check for an existing "up stairs" */ for (i = 0; i < track_less.num; i++) { x = track_less.x[i]; y = track_less.y[i]; ag = &borg_grids[y][x]; /* How far is the nearest up stairs */ j = borg_distance(c_y, c_x, y, x); /* Skip stairs if a monster is on the stair */ if (ag->kill) continue; /* skip the closer ones */ if (b_j >= j) continue; /* track it */ b_j = j; } /* Check for an existing "down stairs" */ for (i = 0; i < track_more.num; i++) { x = track_more.x[i]; y = track_more.y[i]; ag = &borg_grids[y][x]; /* How far is the nearest up stairs */ m = borg_distance(c_y, c_x, y, x); /* Skip stairs if a monster is on the stair */ if (ag->kill) continue; /* skip the closer ones */ if (b_m >= m) continue; /* track it */ b_m = m; } /* If you are within a few (3) steps of the stairs */ /* and you can take some damage to get there */ /* go for it */ if (b_j < 3 && b_j != -1 && pos_danger < borg_skill[BI_CURHP]) { borg_desperate = true; if (borg_flow_stair_less(GOAL_FLEE, false)) { /* Note */ borg_note("# Desperate for Stairs (one)"); borg_desperate = false; return (true); } borg_desperate = false; } /* If you are next to steps of the stairs go for it */ if (b_j <= 2 && b_j != -1) { borg_desperate = true; if (borg_flow_stair_less(GOAL_FLEE, false)) { /* Note */ borg_note("# Desperate for Stairs (two)"); borg_desperate = false; return (true); } borg_desperate = false; } /* Low level guys tend to waste money reading the recall scrolls */ if (b_j < 20 && b_j != -1 && scaryguy_on_level && borg_skill[BI_CLEVEL] < 20) { /* do not attempt it if an adjacent monster is faster than me */ for (i = 0; i < 8; i++) { x = c_x + ddx_ddd[i]; y = c_y + ddy_ddd[i]; /* check for bounds */ if (!square_in_bounds(cave, loc(x, y))) continue; /* Monster there ? */ if (!borg_grids[y][x].kill) continue; /* Access the monster and check it's speed */ if (borg_kills[borg_grids[y][x].kill].speed > borg_skill[BI_SPEED]) safe = false; } /* Dont run from Grip or Fang */ if ((borg_skill[BI_CDEPTH] <= 5 && borg_skill[BI_CDEPTH] != 0 && borg_fighting_unique) || !safe) { /* try to take them on, you cant outrun them */ } else { borg_desperate = true; if (borg_flow_stair_less(GOAL_FLEE, false)) { /* Note */ borg_note("# Desperate for Stairs (three)"); borg_desperate = false; return (true); } borg_desperate = false; } } /* If you are next to steps of the down stairs go for it */ if (b_m <= 2 && b_m != -1) { borg_desperate = true; if (borg_flow_stair_more(GOAL_FLEE, false, false)) { /* Note */ borg_note("# Desperate for Stairs (four)"); borg_desperate = false; return (true); } borg_desperate = false; } } /* * Strategic retreat * * Do not retreat if * 1) we are icky (poisoned, blind, confused etc * 2) we have boosted our avoidance because we are stuck * 3) we are in a Sea of Runes * 4) we are not in a vault */ if (((pos_danger > avoidance / 3 && !nasty && !borg_no_retreat) || (borg_surround && pos_danger != 0)) && !borg_morgoth_position && (borg_t - borg_t_antisummon >= 50) && !borg_skill[BI_ISCONFUSED] && !square_isvault(cave, loc(c_x, c_y)) && borg_skill[BI_CURHP] < 500) { int d, b_d = -1; int r, b_r = -1; int b_p = -1, p1 = -1; int b_x = c_x; int b_y = c_y; int ii; /* Scan the useful viewable grids */ for (j = 1; j < borg_view_n; j++) { int x1 = c_x; int y1 = c_y; int x2 = borg_view_x[j]; int y2 = borg_view_y[j]; /* Cant if confused: no way to predict motion */ if (borg_skill[BI_ISCONFUSED]) continue; /* Require "floor" grids */ if (!borg_cave_floor_bold(y2, x2)) continue; /* Try to avoid pillar dancing if at good health */ if ((borg_skill[BI_CURHP] >= borg_skill[BI_MAXHP] * 7 / 10 && ((track_step.num > 2 && (track_step.y[track_step.num - 2] == y2 && track_step.x[track_step.num - 2] == x2 && track_step.y[track_step.num - 3] == c_y && track_step.x[track_step.num - 3] == c_x)))) || time_this_panel >= 300) continue; /* XXX -- Borgs in an unexplored hall (& with only a torch) * will always return false for Happy Grids: * * 222222 Where 2 = unknown grid. Borg has a torch. * 2221.# Borg will consider both the . and the 1 * #@# for a retreat from the C. But the . will be * #C# false d/t adjacent wall to the east. 1 will * #'# will be false d/t unknown grid to the west. * So he makes no attempt to retreat. * However, the next function (backing away), allows him * to back up to 1 safely. * * To play safer, the borg should not retreat to grids where * he has not previously been. This tends to run him into * more monsters. It is better for him to retreat to grids * previously travelled, where the monsters are most likely * dead, and the path is clear. However, there is not (yet) * tag for those grids. Something like BORG_BEEN would work. */ /* Require "happy" grids (most of the time)*/ if (!borg_happy_grid_bold(y2, x2)) continue; /* Track "nearest" grid */ if (b_r >= 0) { int ay = ((y2 > y1) ? (y2 - y1) : (y1 - y2)); int ax = ((x2 > x1) ? (x2 - x1) : (x1 - x2)); /* Ignore "distant" locations */ if ((ax > b_r) || (ay > b_r)) continue; } /* Reset */ r = 0; /* Simulate movement */ while (1) { borg_grid* ag; /* Obtain direction */ d = borg_goto_dir(y1, x1, y2, x2); /* Verify direction */ if ((d == 0) || (d == 5)) break; /* Track distance */ r++; /* Simulate the step */ y1 += ddy[d]; x1 += ddx[d]; /* Obtain the grid */ ag = &borg_grids[y1][x1]; /* Lets make one more check that we are not bouncing */ if ((borg_skill[BI_CURHP] >= borg_skill[BI_MAXHP] * 7 / 10 && ((track_step.num > 2 && (track_step.y[track_step.num - 2] == y1 && track_step.x[track_step.num - 2] == x1 && track_step.y[track_step.num - 3] == c_y && track_step.x[track_step.num - 3] == c_x)))) || time_this_panel >= 300) break; /* Require floor */ if (!borg_cave_floor_grid(ag) || (ag->feat == FEAT_LAVA && !borg_skill[BI_IFIRE])) break; /* Require it to be somewhat close */ if (r >= 10) break; /* Check danger of that spot */ p1 = borg_danger(y1, x1, 1, true, false); if (p1 >= pos_danger) break; /* make sure it is not dangerous to take the first step; unless surrounded. */ if (r == 1) { /* Not surrounded or surrounded and ignoring*/ if (!borg_surround || (borg_surround && goal_ignoring)) { if (p1 >= borg_skill[BI_CURHP] * 4 / 10) break; /* Ought to be worth it */; if (p1 > pos_danger * 5 / 10) break; } else /* Surrounded, try to back-up */ { if (borg_skill[BI_CLEVEL] >= 20) { if (p1 >= (b_r <= 5 ? borg_skill[BI_CURHP] * 15 / 10 : borg_skill[BI_CURHP])) break; } else { if (p1 >= borg_skill[BI_CURHP] * 4) break; } } /* * Skip this grid if it is adjacent to a monster. He will just hit me * when I land on that grid. */ for (ii = 1; ii < borg_kills_nxt; ii++) { borg_kill* kill; /* Monster */ kill = &borg_kills[ii]; /* Skip dead monsters */ if (!kill->r_idx) continue; /* Require current knowledge */ if (kill->when < borg_t - 2) continue; /* Check distance -- 1 grid away */ if (borg_distance(kill->y, kill->x, y1, x1) <= 1 && kill->speed > borg_skill[BI_SPEED] && !borg_surround) break; } } /* Skip monsters */ if (ag->kill) break; /* Skip traps */ if (ag->trap && !ag->glyph) break; /* Safe arrival */ if ((x1 == x2) && (y1 == y2)) { /* Save distance */ b_r = r; b_p = p1; /* Save location */ b_x = x2; b_y = y2; /* Done */ break; } } } /* Retreat */ if (b_r >= 0) { /* Save direction */ b_d = borg_goto_dir(c_y, c_x, b_y, b_x); /* Hack -- set goal */ g_x = c_x + ddx[b_d]; g_y = c_y + ddy[b_d]; /* Note */ borg_note(format("# Retreating to %d,%d (distance %d) via %d,%d (%d > %d)", b_y, b_x, b_r, g_y, g_x, pos_danger, b_p)); /* Strategic retreat */ borg_keypress(I2D(b_d)); /* Reset my Movement and Flow Goals */ goal = 0; /* Success */ return (true); } } /*** Escape if possible ***/ /* Attempt to escape via spells */ if (borg_escape(pos_danger)) { /* increment the escapes this level counter */ borg_escapes++; /* Clear any Flow queues */ goal = 0; /* Success */ return (true); } /*** Back away ***/ /* Do not back up if * 1) we are nasty (poisoned, blind, confused etc * 2) we are boosting our avoidance because we are stuck * 3) we are in a sweet Morgoth position (sea of runes) * 4) the monster causing concern is asleep * 5) we are not in a vault * 6) loads of HP */ if (((pos_danger > (avoidance * 4 / 10) && !nasty && !borg_no_retreat) || (borg_surround && pos_danger != 0)) && !borg_morgoth_position && (borg_t - borg_t_antisummon >= 50) && !borg_skill[BI_ISCONFUSED] && !square_isvault(cave, loc(c_x, c_y)) && borg_skill[BI_CURHP] < 500) { int i = -1, b_i = -1; int k = -1, b_k = -1; int f = -1, b_f = -1; int g_k = 0; int ii; bool adjacent_monster = false; /* Current danger */ b_k = pos_danger; /* Fake the danger down if surounded so that he can move. */ if (borg_surround) b_k = (b_k * 12 / 10); /* Check the freedom */ b_f = borg_freedom(c_y, c_x); /* Attempt to find a better grid */ for (i = 0; i < 8; i++) { int x = c_x + ddx_ddd[i]; int y = c_y + ddy_ddd[i]; /* Access the grid */ borg_grid* ag = &borg_grids[y][x]; /* Cant if confused: no way to predict motion */ if (borg_skill[BI_ISCONFUSED]) continue; /* Skip walls/doors */ if (!borg_cave_floor_grid(ag)) continue; /* Skip monster grids */ if (ag->kill) continue; /* Mega-Hack -- skip stores XXX XXX XXX */ if (feat_is_shop(ag->feat)) continue; /* Mega-Hack -- skip traps XXX XXX XXX */ if (ag->trap && !ag->glyph) break; /* If i was here last round and 3 rounds ago, suggesting a "bounce" */ if ((borg_skill[BI_CURHP] >= borg_skill[BI_MAXHP] * 7 / 10 && ((track_step.num > 2 && (track_step.y[track_step.num - 2] == y && track_step.x[track_step.num - 2] == x && track_step.y[track_step.num - 3] == c_y && track_step.x[track_step.num - 3] == c_x)))) || time_this_panel >= 300) continue; /* * Skip this grid if it is adjacent to a monster. He will just hit me * when I land on that grid. */ for (ii = 1; ii < borg_kills_nxt; ii++) { borg_kill* kill; /* Monster */ kill = &borg_kills[ii]; /* Skip dead monsters */ if (!kill->r_idx) continue; /* Require current knowledge */ if (kill->when < borg_t - 2) continue; /* Check distance -- 1 grid away */ if (borg_distance(kill->y, kill->x, y, x) <= 1 && !borg_surround) adjacent_monster = true; /* Check distance -- 2 grids away and he is faster than me */ if (borg_distance(kill->y, kill->x, y, x) <= 2 && kill->speed > borg_skill[BI_SPEED] && !borg_surround) adjacent_monster = true; } /* Skip this grid consideration because it is next to a monster */ if (adjacent_monster == true) continue; /* Extract the danger there */ k = borg_danger(y, x, 1, true, false); /* Skip this grid if danger is higher than my HP. * Take my chances with fighting. */ if (k > avoidance) continue; /* Skip this grid if it is not really worth backing up. Look for a 40% * reduction in the danger if higher level. If the danger of the new grid * is close to the danger of my current grid, I'll stay and fight. */ if (borg_skill[BI_MAXCLEVEL] >= 35 && k > b_k * 6 / 10) continue; /* Skip this grid if it is not really worth backing up. If the danger of the new grid * is close to the danger of my current grid, I'll stay and fight unless I am low * level and there is an adjacent monster. */ if (borg_skill[BI_MAXCLEVEL] < 35 && adjacent_monster == false && k > b_k * 8 / 10) continue; /* Skip higher danger */ /* note: if surrounded, then b_k has been adjusted to a higher number to make his current * grid seem more dangerous. This will encourage him to Back-Up. */ if (k > b_k) continue; /* Record the danger of this prefered grid */ g_k = k; /* Check the freedom there */ f = borg_freedom(y, x); /* Danger is the same, so look at the nature of the grid */ if (b_k == k) { /* If I am low level, reward backing-up if safe */ if (borg_skill[BI_CLEVEL] <= 10 && borg_skill[BI_CDEPTH] && (borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] || borg_skill[BI_CURSP] < borg_skill[BI_MAXSP])) { /* do consider the retreat */ } else { /* Freedom of my grid is better than the next grid * so stay put and fight. */ if (b_f > f || borg_skill[BI_CDEPTH] >= 85) continue; } } /* Save the info */ b_i = i; b_k = k; b_f = f; } /* Back away */ if (b_i >= 0) { /* Hack -- set goal */ g_x = c_x + ddx_ddd[b_i]; g_y = c_y + ddy_ddd[b_i]; /* Note */ borg_note(format("# Backing up to %d,%d (%d > %d)", g_x, g_y, pos_danger, g_k)); /* Back away from danger */ borg_keypress(I2D(ddd[b_i])); /* Reset my Movement and Flow Goals */ goal = 0; /* Success */ return (true); } } /*** Cures ***/ /* cure confusion, second check, first (slightly different) in borg_heal */ if (borg_skill[BI_ISCONFUSED]) { if (borg_skill[BI_MAXHP] - borg_skill[BI_CURHP] >= 300 && (borg_quaff_potion(sv_potion_healing) || borg_quaff_potion(sv_potion_star_healing) || borg_quaff_potion(sv_potion_life))) { borg_note("# Healing. Confusion."); return (true); } if (borg_eat_food(TV_MUSHROOM, sv_mush_cure_mind) || borg_quaff_potion(sv_potion_cure_serious) || borg_quaff_crit(false) || borg_quaff_potion(sv_potion_healing) || borg_use_staff_fail(sv_staff_healing)) { borg_note("# Healing. Confusion."); return (true); } } /* Hack -- cure fear when afraid */ if ((borg_skill[BI_ISAFRAID] && !borg_skill[BI_CRSFEAR]) && (randint0(100) < 70 || (borg_class == CLASS_WARRIOR && borg_skill[BI_AMISSILES] <= 0))) { if (borg_eat_food(TV_MUSHROOM, sv_mush_cure_mind) || borg_quaff_potion(sv_potion_boldness) || borg_quaff_potion(sv_potion_heroism) || borg_quaff_potion(sv_potion_berserk) || borg_spell_fail(BERSERK_STRENGTH, 25) || /* berserk */ borg_spell_fail(HEROISM, 25) || /* hero */ borg_activate_item(act_cure_paranoia) || borg_activate_item(act_hero) || borg_activate_item(act_shero) || borg_activate_item(act_cure_mind) || borg_activate_item(act_rage_bless_resist) || borg_activate_item(act_rem_fear_pois) || borg_spell_fail(HOLY_WORD, 25)) { return (true); } } /*** Note impending death XXX XXX XXX ***/ /* Flee from low hit-points */ if (((borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] / 3) || ((borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] / 2) && borg_skill[BI_CURHP] < (borg_skill[BI_CLEVEL] * 3))) && (borg_skill[BI_ACCW] < 3) && (borg_skill[BI_AHEAL] < 1)) { /* Flee from low hit-points */ if (borg_skill[BI_CDEPTH] && (randint0(100) < 25)) { /* Start leaving */ if (!goal_leaving) { /* Flee */ borg_note("# Leaving (low hit-points)"); /* Start leaving */ goal_leaving = true; } /* Start fleeing */ if (!goal_fleeing) { /* Flee */ borg_note("# Fleeing (low hit-points)"); /* Start fleeing */ goal_fleeing = true; } } } /* Flee from bleeding wounds or poison and no heals */ if ((borg_skill[BI_ISCUT] || borg_skill[BI_ISPOISONED]) && (borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] / 2)) { /* Flee from bleeding wounds */ if (borg_skill[BI_CDEPTH] && (randint0(100) < 25)) { /* Start leaving */ if (!goal_leaving) { /* Flee */ borg_note("# Leaving (bleeding/posion)"); /* Start leaving */ goal_leaving = true; } /* Start fleeing */ if (!goal_fleeing) { /* Flee */ borg_note("# Fleeing (bleeding/poison)"); /* Start fleeing */ goal_fleeing = true; } } } /* Emergency check on healing. Borg_heal has already been checked but * but we did not use our ez_heal potions. All other attempts to save * ourself have failed. Use the ez_heal if I have it. */ if ((borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] / 10 || /* dangerously low HP -OR-*/ (pos_danger > borg_skill[BI_CURHP] && /* extreme danger -AND-*/ (borg_skill[BI_ATELEPORT] + borg_skill[BI_AESCAPE] <= 2 && borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] / 4)) || /* low on escapes */ (borg_skill[BI_AEZHEAL] > 5 && borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] / 4) || /* moderate danger, lots of heals */ (borg_skill[BI_MAXHP] - borg_skill[BI_CURHP] >= 600 && borg_fighting_unique && borg_skill[BI_CDEPTH] >= 85)) && /* moderate danger, unique, deep */ (borg_quaff_potion(sv_potion_star_healing) || borg_quaff_potion(sv_potion_healing) || borg_quaff_potion(sv_potion_life))) { borg_note("# Using reserve EZ_Heal."); return (true); } /* Hack -- use "recall" to flee if possible */ if (goal_fleeing && !goal_fleeing_munchkin && !goal_fleeing_to_town && borg_skill[BI_CDEPTH] >= 1 && (borg_recall())) { /* Note */ borg_note("# Fleeing the level (recall)"); /* Success */ return (true); } /* If I am waiting for recall,and in danger, buy time with * phase and cure_anythings. */ if (goal_recalling && (pos_danger > avoidance * 2)) { if (!borg_skill[BI_ISCONFUSED] && !borg_skill[BI_ISBLIND] && borg_skill[BI_MAXSP] > 60 && borg_skill[BI_CURSP] < (borg_skill[BI_CURSP] / 4) && borg_quaff_potion(sv_potion_restore_mana)) { borg_note("# Buying time waiting for Recall.(1)"); return (true); } if (borg_caution_phase(50, 1) && (borg_read_scroll(sv_scroll_phase_door) || borg_spell_fail(PHASE_DOOR, 30) || borg_spell_fail(PORTAL, 30) || borg_activate_item(act_tele_phase))) { borg_note("# Buying time waiting for Recall.(2)"); return (true); } if ((borg_skill[BI_MAXHP] - borg_skill[BI_CURHP] < 100) && (borg_quaff_crit(true) || borg_quaff_potion(sv_potion_cure_serious) || borg_quaff_potion(sv_potion_cure_light))) { borg_note("# Buying time waiting for Recall.(3)"); return (true); } if ((borg_skill[BI_MAXHP] - borg_skill[BI_CURHP] > 150) && (borg_zap_rod(sv_rod_healing) || borg_quaff_potion(sv_potion_healing) || borg_quaff_potion(sv_potion_star_healing) || borg_quaff_potion(sv_potion_life) || borg_quaff_crit(true) || borg_quaff_potion(sv_potion_cure_serious) || borg_quaff_potion(sv_potion_cure_light))) { borg_note("# Buying time waiting for Recall.(4)"); return (true); } } /* if I am gonna die next round, and I have no way to escape * use the unknown stuff (if I am low level). */ if (pos_danger > (borg_skill[BI_CURHP] * 4) && borg_skill[BI_CLEVEL] < 20 && !borg_skill[BI_MAXSP]) { if (borg_use_unknown() || borg_read_unknown() || borg_quaff_unknown() || borg_eat_unknown()) return (true); } /* Nothing */ return (false); } /* * New method for handling attacks, missiles, and spells * * Every turn, we evaluate every known method of causing damage * to monsters, and evaluate the "reward" inherent in each of * the known methods which is usable at that time, and then * we actually use whichever method, if any, scores highest. * * For each attack, we need a function which will determine the best * possible result of using that attack, and return its value. Also, * if requested, the function should actually perform the action. * * Note that the functions should return zero if the action is not * usable, or if the action is not useful. * * These functions need to apply some form of "cost" evaluation, to * prevent the use of expensive spells with minimal reward. Also, * we should always prefer attacking by hand to using spells if the * damage difference is "small", since there is no "cost" in making * a physical attack. * * We should take account of "spell failure", as well as "missile * missing" and "blow missing" probabilities. * * Note that the functions may store local state information when * doing a "simulation" and then they can use this information if * they are asked to implement their strategy. * * There are several types of damage inducers: * * Attacking physically * Launching missiles * Throwing objects * Casting spells * Praying prayers * Using wands * Using rods * Using staffs * Using scrolls * Activating Artifacts * Activate Dragon Armour */ enum { BF_REST, BF_THRUST, BF_OBJECT, BF_LAUNCH, BF_SPELL_MAGIC_MISSILE, BF_SPELL_MAGIC_MISSILE_RESERVE, /* 20 */ BF_SPELL_STINK_CLOUD, BF_SPELL_LIGHT_BEAM, BF_SPELL_COLD_BOLT, BF_SPELL_STONE_TO_MUD, BF_SPELL_SLOW_MONSTER, BF_SPELL_SLEEP_III, BF_SPELL_FIRE_BALL, BF_SPELL_SHOCK_WAVE, BF_SPELL_EXPLOSION, BF_SPELL_CONFUSE_MONSTER, BF_SPELL_COLD_STORM, BF_SPELL_METEOR_SWARM, BF_SPELL_RIFT, /* 40 */ BF_SPELL_MANA_STORM, BF_SPELL_BLIND_CREATURE, BF_SPELL_TRANCE, BF_PRAYER_HOLY_ORB_BALL, BF_PRAYER_DISP_UNDEAD, BF_PRAYER_DISP_EVIL, BF_PRAYER_DISP_SPIRITS, BF_PRAYER_HOLY_WORD, /* 50 */ BF_SPELL_ANNIHILATE, BF_SPELL_ELECTRIC_ARC, /* new spells in 4.x */ BF_SPELL_ACID_SPRAY, BF_SPELL_MANA_BOLT, BF_SPELL_THRUST_AWAY, BF_SPELL_LIGHTNING_STRIKE, BF_SPELL_EARTH_RISING, BF_SPELL_VOLCANIC_ERUPTION, BF_SPELL_RIVER_OF_LIGHTNING, BF_SPELL_SPEAR_OF_OROME, BF_SPELL_LIGHT_OF_MANWE, BF_SPELL_NETHER_BOLT, BF_SPELL_TAP_UNLIFE, BF_SPELL_CRUSH, BF_SPELL_SLEEP_EVIL, BF_SPELL_DISENCHANT, BF_SPELL_FRIGHTEN, BF_SPELL_VAMPIRE_STRIKE, BF_PRAYER_DISPEL_LIFE, BF_SPELL_DARK_SPEAR, BF_SPELL_UNLEASH_CHAOS, BF_SPELL_STORM_OF_DARKNESS, BF_SPELL_CURSE, BF_SPELL_WHIRLWIND_ATTACK, BF_SPELL_LEAP_INTO_BATTLE, BF_SPELL_MAIM_FOE, BF_SPELL_HOWL_OF_THE_DAMNED, BF_ROD_ELEC_BOLT, BF_ROD_COLD_BOLT, BF_ROD_ACID_BOLT, BF_ROD_FIRE_BOLT, BF_ROD_LIGHT_BEAM, BF_ROD_DRAIN_LIFE, BF_ROD_ELEC_BALL, /* 60 */ BF_ROD_COLD_BALL, BF_ROD_ACID_BALL, BF_ROD_FIRE_BALL, BF_ROD_SLOW_MONSTER, BF_ROD_SLEEP_MONSTER, BF_ROD_UNKNOWN, BF_STAFF_SLEEP_MONSTERS, BF_STAFF_SLOW_MONSTERS, BF_STAFF_DISPEL_EVIL, BF_STAFF_POWER, BF_STAFF_HOLINESS, BF_WAND_UNKNOWN, BF_WAND_MAGIC_MISSILE, BF_WAND_ELEC_BOLT, BF_WAND_COLD_BOLT, BF_WAND_ACID_BOLT, BF_WAND_FIRE_BOLT, BF_WAND_SLOW_MONSTER, BF_WAND_HOLD_MONSTER, BF_WAND_CONFUSE_MONSTER, BF_WAND_FEAR_MONSTER, BF_WAND_ANNIHILATION, BF_WAND_DRAIN_LIFE, BF_WAND_LIGHT_BEAM, BF_WAND_STINKING_CLOUD, BF_WAND_ELEC_BALL, BF_WAND_COLD_BALL, BF_WAND_ACID_BALL, BF_WAND_FIRE_BALL, BF_WAND_WONDER, BF_WAND_DRAGON_COLD, BF_WAND_DRAGON_FIRE, BF_EF_FIRE1, BF_EF_FIRE2, BF_EF_FIRE3, BF_EF_FROST1, BF_EF_FROST2, BF_EF_FROST3, BF_EF_FROST4, BF_EF_DRAIN_LIFE1, BF_EF_DRAIN_LIFE2, BF_EF_STINKING_CLOUD, BF_EF_CONFUSE, BF_EF_ARROW, BF_EF_MISSILE, BF_EF_SLEEP, BF_EF_LIGHTNING_BOLT, BF_EF_ACID1, BF_EF_DISP_EVIL, BF_EF_MANA_BOLT, BF_EF_STAR_BALL, /* Razorback and Mediator */ BF_EF_STARLIGHT2, BF_EF_STARLIGHT, BF_RING_ACID, BF_RING_FIRE, BF_RING_ICE, BF_RING_LIGHTNING, BF_DRAGON_BLUE, BF_DRAGON_WHITE, BF_DRAGON_BLACK, BF_DRAGON_GREEN, BF_DRAGON_RED, BF_DRAGON_MULTIHUED, BF_DRAGON_GOLD, BF_DRAGON_CHAOS, BF_DRAGON_LAW, BF_DRAGON_BALANCE, BF_DRAGON_SHINING, BF_DRAGON_POWER, BF_MAX }; /* * Guess how much damage a physical attack will do to a monster */ static int borg_thrust_damage_one(int i) { int dam; int mult; borg_kill* kill; struct monster_race* r_ptr; borg_item* item; int chance; /* Examine current weapon */ item = &borg_items[INVEN_WIELD]; /* Monster record */ kill = &borg_kills[i]; /* Monster race */ r_ptr = &r_info[kill->r_idx]; /* Damage */ dam = (item->dd * (item->ds + 1) / 2); /* here is the place for slays and such */ mult = 1; if (((borg_skill[BI_WS_ANIMAL]) && (rf_has(r_ptr->flags, RF_ANIMAL))) || ((borg_skill[BI_WS_EVIL]) && (rf_has(r_ptr->flags, RF_EVIL)))) mult = 2; if (((borg_skill[BI_WS_UNDEAD]) && (rf_has(r_ptr->flags, RF_UNDEAD))) || ((borg_skill[BI_WS_DEMON]) && (rf_has(r_ptr->flags, RF_DEMON))) || ((borg_skill[BI_WS_ORC]) && (rf_has(r_ptr->flags, RF_ORC))) || ((borg_skill[BI_WS_TROLL]) && (rf_has(r_ptr->flags, RF_TROLL))) || ((borg_skill[BI_WS_GIANT]) && (rf_has(r_ptr->flags, RF_GIANT))) || ((borg_skill[BI_WS_DRAGON]) && (rf_has(r_ptr->flags, RF_DRAGON))) || ((borg_skill[BI_WB_ACID]) && !(rf_has(r_ptr->flags, RF_IM_ACID))) || ((borg_skill[BI_WB_FIRE]) && !(rf_has(r_ptr->flags, RF_IM_FIRE))) || ((borg_skill[BI_WB_COLD]) && !(rf_has(r_ptr->flags, RF_IM_COLD))) || ((borg_skill[BI_WB_POIS]) && !(rf_has(r_ptr->flags, RF_IM_POIS))) || ((borg_skill[BI_WB_ELEC]) && !(rf_has(r_ptr->flags, RF_IM_ELEC)))) mult = 3; if (((borg_skill[BI_WK_UNDEAD]) && (rf_has(r_ptr->flags, RF_UNDEAD))) || ((borg_skill[BI_WK_DEMON]) && (rf_has(r_ptr->flags, RF_DEMON))) || ((borg_skill[BI_WK_DRAGON]) && (rf_has(r_ptr->flags, RF_DRAGON)))) mult = 5; /* add the multiplier */ dam *= mult; /* add weapon bonuses */ dam += item->to_d; /* add player bonuses */ dam += borg_skill[BI_TODAM]; /* multiply the damage for the whole round of attacks */ dam *= borg_skill[BI_BLOWS]; /* Bonuses for combat */ chance = (borg_skill[BI_THN] + ((borg_skill[BI_TOHIT] + item->to_h) * 3)); /* Chance of hitting the monsters AC */ if (chance < (r_ptr->ac * 3 / 4) * 8 / 10) dam = 0; /* 5% automatic success/fail */ if (chance > 95) chance = 95; if (chance < 5) chance = 5; /* add 10% to chance to give a bit more wieght to weapons */ if (borg_skill[BI_CLEVEL] > 15) chance += 10; /* Mages with Mana do not get that bonus, they should cast */ if ((borg_class == CLASS_MAGE || borg_class == CLASS_NECROMANCER) && borg_skill[BI_CURSP] > 1) chance -= 10; /* reduce damage by the % chance to hit */ dam = (dam * chance) / 100; /* Try to place a minimal amount of damage */ if (dam <= 0) dam = 1; /* Limit damage to twice maximal hitpoints */ if (dam > kill->power * 2 && !rf_has(r_ptr->flags, RF_UNIQUE)) dam = kill->power * 2; /* Reduce the damage if a mage, they should not melee if they can avoid it */ if ((borg_class == CLASS_MAGE || borg_class == CLASS_NECROMANCER) && borg_skill[BI_MAXCLEVEL] < 40 && borg_skill[BI_CURSP] > 1) dam = (dam * 8 / 10) + 1; /* * Enhance the preceived damage on Uniques. This way we target them * Keep in mind that he should hit the uniques but if he has a * x5 great bane of dragons, he will tend attack the dragon since the * precieved (and actual) damage is higher. But don't select * the town uniques (maggot does no damage) * */ if ((rf_has(r_ptr->flags, RF_UNIQUE)) && borg_skill[BI_CDEPTH] >= 1) dam += (dam * 5); /* Hack -- ignore Maggot until later. Player will chase Maggot * down all accross the screen waking up all the monsters. Then * he is stuck in a comprimised situation. */ if ((rf_has(r_ptr->flags, RF_UNIQUE)) && borg_skill[BI_CDEPTH] == 0) { dam = dam * 2 / 3; /* Dont hunt maggot until later */ if (borg_skill[BI_CLEVEL] < 5) dam = 0; } /* give a small bonus for whacking a breeder */ if (rf_has(r_ptr->flags, RF_MULTIPLY)) dam = (dam * 3 / 2); /* Enhance the perceived damage to summoner in order to influence the * choice of targets. */ if ((rsf_has(r_ptr->spell_flags, RSF_S_KIN)) || (rsf_has(r_ptr->spell_flags, RSF_S_HI_DEMON)) || (rsf_has(r_ptr->spell_flags, RSF_S_MONSTER)) || (rsf_has(r_ptr->spell_flags, RSF_S_MONSTERS)) || (rsf_has(r_ptr->spell_flags, RSF_S_ANIMAL)) || (rsf_has(r_ptr->spell_flags, RSF_S_SPIDER)) || (rsf_has(r_ptr->spell_flags, RSF_S_HOUND)) || (rsf_has(r_ptr->spell_flags, RSF_S_HYDRA)) || (rsf_has(r_ptr->spell_flags, RSF_S_AINU)) || (rsf_has(r_ptr->spell_flags, RSF_S_DEMON)) || (rsf_has(r_ptr->spell_flags, RSF_S_UNDEAD)) || (rsf_has(r_ptr->spell_flags, RSF_S_DRAGON)) || (rsf_has(r_ptr->spell_flags, RSF_S_HI_UNDEAD)) || (rsf_has(r_ptr->spell_flags, RSF_S_WRAITH)) || (rsf_has(r_ptr->spell_flags, RSF_S_UNIQUE))) dam += ((dam * 3) / 2); /* * Apply massive damage bonus to Questor monsters to * encourage borg to strike them. */ if (rf_has(r_ptr->flags, RF_QUESTOR)) dam += (dam * 5); /* Damage */ return (dam); } /* * Simulate/Apply the optimal result of making a physical attack */ extern int borg_attack_aux_thrust(void) { int p, dir; int i, b_i = -1; int d, b_d = -1; borg_grid* ag; borg_kill* kill; /* Too afraid to attack */ if (borg_skill[BI_ISAFRAID] || borg_skill[BI_CRSFEAR]) return (0); /* Examine possible destinations */ for (i = 0; i < borg_temp_n; i++) { int x = borg_temp_x[i]; int y = borg_temp_y[i]; /* Require "adjacent" */ if (borg_distance(c_y, c_x, y, x) > 1) continue; /* Acquire grid */ ag = &borg_grids[y][x]; /* Calculate "average" damage */ d = borg_thrust_damage_one(ag->kill); /* No damage */ if (d <= 0) continue; /* Obtain the monster */ kill = &borg_kills[ag->kill]; /* Hack -- avoid waking most "hard" sleeping monsters */ if (!kill->awake && (d <= kill->power) && !borg_munchkin_mode) { /* Calculate danger */ p = borg_danger_aux(y, x, 1, ag->kill, true, true); if (p > avoidance * 2) continue; } /* Hack -- ignore sleeping town monsters */ if (!borg_skill[BI_CDEPTH] && !kill->awake) continue; /* Calculate "danger" to player */ p = borg_danger_aux(c_y, c_x, 2, ag->kill, true, true); /* Reduce "bonus" of partial kills when higher level */ if (d <= kill->power && borg_skill[BI_MAXCLEVEL] > 15) p = p / 10; /* Add the danger-bonus to the damage */ d += p; /* Ignore lower damage */ if ((b_i >= 0) && (d < b_d)) continue; /* Save the info */ b_i = i; b_d = d; } /* Nothing to attack */ if (b_i < 0) return (0); /* Simulation */ if (borg_simulate) return (b_d); /* Save the location */ g_x = borg_temp_x[b_i]; g_y = borg_temp_y[b_i]; ag = &borg_grids[g_y][g_x]; kill = &borg_kills[ag->kill]; /* Note */ borg_note(format("# Facing %s at (%d,%d) who has %d Hit Points.", (r_info[kill->r_idx].name), g_y, g_x, kill->power)); borg_note(format("# Attacking with weapon '%s'", borg_items[INVEN_WIELD].desc)); /* Get a direction for attacking */ dir = borg_extract_dir(c_y, c_x, g_y, g_x); /* Attack the grid */ borg_keypress('+'); borg_keypress(I2D(dir)); /* Success */ return (b_d); } /* * Target a location. Can be used alone or at "Direction?" prompt. * * Warning -- This will only work for locations on the current panel */ bool borg_target(int y, int x) { int x1, y1, x2, y2; borg_grid* ag; borg_kill* kill; ag = &borg_grids[y][x]; kill = &borg_kills[ag->kill]; /* Log */ /* Report a little bit */ if (ag->kill) { borg_note(format("# Targeting %s who has %d Hit Points (%d,%d).", (r_info[kill->r_idx].name), kill->power, y, x)); } else { borg_note(format("# Targetting location (%d,%d)", y, x)); } /* Target mode */ borg_keypress('*'); /* Target a location */ borg_keypress('p'); /* Determine "path" */ x1 = c_x; y1 = c_y; x2 = x; y2 = y; /* Move to the location (diagonals) */ for (; (y1 < y2) && (x1 < x2); y1++, x1++) borg_keypress('3'); for (; (y1 < y2) && (x1 > x2); y1++, x1--) borg_keypress('1'); for (; (y1 > y2) && (x1 < x2); y1--, x1++) borg_keypress('9'); for (; (y1 > y2) && (x1 > x2); y1--, x1--) borg_keypress('7'); /* Move to the location */ for (; y1 < y2; y1++) borg_keypress('2'); for (; y1 > y2; y1--) borg_keypress('8'); for (; x1 < x2; x1++) borg_keypress('6'); for (; x1 > x2; x1--) borg_keypress('4'); /* Select the target */ borg_keypress('5'); /* Carry these variables to be used on reporting spell * pathway */ borg_target_y = y; borg_target_x = x; /* Success */ return (true); } /* * Mark spot along the target path a wall. * This will mark the unknown squares as a wall. This might not be * the wall we ran into but also might be. * * Warning -- This will only work for locations on the current panel */ bool borg_target_unknown_wall(int y, int x) { int n_x, n_y; bool found = false; bool y_hall = false; bool x_hall = false; borg_grid* ag; struct monster_race* r_ptr; borg_kill* kill; borg_note(format("# Perhaps wall near targetted location (%d,%d)", y, x)); /* Determine "path" */ n_x = c_x; n_y = c_y; /* check for 'in a hall' x axis */ /* This check is for this: */ /* * x x * ..@. or .@.. * x x * * 'x' being 'not a floor' and '.' being a floor. * * We would like to know if in a hall so we can place * the suspect wall off the hallway path. * like this:######x P * ........@.... * ################## * The shot may miss and we want the borg to guess the * wall to be at the X instead of first unkown grid which * is 3 west and 1 south of the X. */ if ((borg_grids[c_y + 1][c_x].feat == FEAT_FLOOR && borg_grids[c_y - 1][c_x].feat == FEAT_FLOOR && (borg_grids[c_y + 2][c_x].feat == FEAT_FLOOR || borg_grids[c_y - 2][c_x].feat == FEAT_FLOOR)) && (borg_grids[c_y][c_x + 1].feat != FEAT_FLOOR && borg_grids[c_y][c_x - 1].feat != FEAT_FLOOR)) x_hall = true; /* check for 'in a hall' y axis. * Again, we want to place the suspected wall off our * hallway. */ if ((borg_grids[c_y][c_x + 1].feat == FEAT_FLOOR && borg_grids[c_y][c_x - 1].feat == FEAT_FLOOR && (borg_grids[c_y][c_x + 2].feat == FEAT_FLOOR || borg_grids[c_y][c_x - 2].feat == FEAT_FLOOR)) && (borg_grids[c_y + 1][c_x].feat != FEAT_FLOOR && borg_grids[c_y - 1][c_x].feat != FEAT_FLOOR)) y_hall = true; while (1) { ag = &borg_grids[n_y][n_x]; kill = &borg_kills[ag->kill]; r_ptr = &r_info[kill->r_idx]; if (rf_has(r_ptr->flags, RF_PASS_WALL)) { borg_note(format("# Guessing wall (%d,%d) under ghostly target (%d,%d)", n_y, n_x, n_y, n_x)); borg_grids[n_y][n_x].feat = FEAT_GRANITE; found = true; return (found); /* not sure... should we return here? */ } if (borg_grids[n_y][n_x].feat == FEAT_NONE && ((n_y != c_y) || !y_hall) && ((n_x != c_x) || !x_hall)) { borg_note(format("# Guessing wall (%d,%d) near target (%d,%d)", n_y, n_x, y, x)); borg_grids[n_y][n_x].feat = FEAT_GRANITE; found = true; return (found); /* not sure... should we return here? maybe should mark ALL unknowns in path... */ } /* Pathway found the target. */ if (n_x == x && n_y == y) { /* end of the pathway */ mmove2(&n_y, &n_x, y, x, c_y, c_x); borg_note(format("# Guessing wall (%d,%d) near target (%d,%d)", n_y, n_x, y, x)); borg_grids[n_y][n_x].feat = FEAT_GRANITE; found = true; return (found); } /* Calculate the new location */ mmove2(&n_y, &n_x, c_y, c_x, y, x); } } /* adapted from player_attack.c make_ranged_shot() */ static int borg_best_mult(borg_item* obj, struct monster_race* r_ptr) { int i; int max_mult = 1; /* Brands */ for (i = 1; i < z_info->brand_max; i++) { struct brand* brand = &brands[i]; if (obj) { /* Brand is on an object */ if (!obj->brands[i]) continue; } else { /* Temporary brand */ if (!player_has_temporary_brand(player, i)) continue; } /* Is the monster vulnerable? */ if (!rf_has(r_ptr->flags, brand->resist_flag)) { int mult = brand->multiplier; if (brand->vuln_flag && rf_has(r_ptr->flags, brand->vuln_flag)) { mult *= 2; } max_mult = MAX(mult, max_mult); } } /* Slays */ for (i = 1; i < z_info->slay_max; i++) { struct slay* slay = &slays[i]; if (obj) { /* Slay is on an object */ if (!obj->slays || !obj->slays[i]) continue; } else { /* Temporary slay */ if (!player_has_temporary_slay(player, i)) continue; } if (rf_has(r_ptr->flags, slay->race_flag)) { max_mult = MAX(slay->multiplier, max_mult); } } return max_mult; } /* * Guess how much damage a spell attack will do to a monster * * We only handle the "standard" damage types. * * We are paranoid about monster resistances * * He tends to waste all of his arrows on a monsters immediately adjacent * to him. Then he has no arrows for the rest of the level. We will * decrease the damage if the monster is adjacent and we are getting low * on missiles. * * We will also decrease the value of the missile attack on breeders or * high clevel borgs town scumming. */ int borg_launch_damage_one(int i, int dam, int typ, int ammo_location) { int p1, p2 = 0; int j; bool borg_use_missile = false; int ii; int vault_grids = 0; int x, y; int k; bool gold_eater = false; int chance = 0; int bonus = 0; int cur_dis = 0; int armor = 0; borg_kill* kill; borg_grid* ag; struct monster_race* r_ptr; /* Monster record */ kill = &borg_kills[i]; /* Monster race */ r_ptr = &r_info[kill->r_idx]; /* How far away is the target? */ cur_dis = borg_distance(c_y, c_x, kill->y, kill->x); /* Calculation our chance of hitting. Player bonuses, Bow bonuses, Ammo Bonuses */ bonus = (borg_skill[BI_TOHIT] + borg_items[INVEN_BOW].to_h + borg_items[ammo_location].to_h); chance = (borg_skill[BI_THB] + (bonus * BTH_PLUS_ADJ)); armor = r_ptr->ac + cur_dis; /* Very quickly look for gold eating monsters */ for (k = 0; k < 4; k++) { /* gold eater */ if (r_ptr->blow[k].effect && borg_mon_blow_effect(r_ptr->blow[k].effect->name) == MONBLOW_EAT_GOLD) gold_eater = true; } /* Analyze the damage type */ switch (typ) { /* Magic Missile */ case BORG_ATTACK_MISSILE: break; case BORG_ATTACK_ARROW: { borg_item* bow = &borg_items[INVEN_BOW]; borg_item* ammo = &borg_items[ammo_location]; int mult = borg_best_mult(bow, r_ptr); mult = MAX(mult, borg_best_mult(ammo, r_ptr)); dam *= mult; /* don't point blank non-uniques */ if (cur_dis == 1 && !(rf_has(r_ptr->flags, RF_UNIQUE))) dam /= 5; /* Do I hit regularly? (80%)*/ if (chance < armor * 8 / 10) dam = 0; } break; /* Pure damage */ case BORG_ATTACK_MANA: if (borg_fighting_unique && borg_has[kv_potion_restore_mana] > 3) dam *= 2; break; /* Meteor -- powerful magic missile */ case BORG_ATTACK_METEOR: break; /* Acid */ case BORG_ATTACK_ACID: if (rf_has(r_ptr->flags, RF_IM_ACID)) dam = 0; break; /* Electricity */ case BORG_ATTACK_ELEC: if (rf_has(r_ptr->flags, RF_IM_ELEC)) dam = 0; break; /* Fire damage */ case BORG_ATTACK_FIRE: if (rf_has(r_ptr->flags, RF_IM_FIRE)) dam = 0; if ((rf_has(r_ptr->flags, RF_HURT_FIRE))) dam *= 2; break; /* Cold */ case BORG_ATTACK_COLD: if (rf_has(r_ptr->flags, RF_IM_COLD)) dam = 0; if (rf_has(r_ptr->flags, RF_HURT_COLD)) dam *= 2; break; /* Poison */ case BORG_ATTACK_POIS: if (rf_has(r_ptr->flags, RF_IM_POIS)) dam = 0; break; /* Ice */ case BORG_ATTACK_ICE: if (rf_has(r_ptr->flags, RF_IM_COLD)) dam = 0; break; /* Holy Orb */ case BORG_ATTACK_HOLY_ORB: if (rf_has(r_ptr->flags, RF_EVIL)) dam *= 2; break; /* dispel undead */ case BORG_ATTACK_DISP_UNDEAD: if (!(rf_has(r_ptr->flags, RF_UNDEAD))) dam = 0; break; /* dispel spirits */ case BORG_ATTACK_DISP_SPIRITS: if (!(rf_has(r_ptr->flags, RF_SPIRIT))) dam = 0; break; /* Dispel Evil */ case BORG_ATTACK_DISP_EVIL: if (!(rf_has(r_ptr->flags, RF_EVIL))) dam = 0; break; /* Dispel life */ case BORG_ATTACK_DRAIN_LIFE: if (!(rf_has(r_ptr->flags, RF_NONLIVING))) dam = 0; if (!(rf_has(r_ptr->flags, RF_UNDEAD))) dam = 0; break; /* Holy Word */ case BORG_ATTACK_HOLY_WORD: if (!(rf_has(r_ptr->flags, RF_EVIL))) dam = 0; break; /* Weak Lite */ case BORG_ATTACK_LIGHT_WEAK: if (!(rf_has(r_ptr->flags, RF_HURT_LIGHT))) dam = 0; break; /* Drain Life */ case BORG_ATTACK_OLD_DRAIN: if (borg_distance(c_y, c_x, kill->y, kill->x) == 1) dam /= 5; if ((rf_has(r_ptr->flags, RF_UNDEAD)) || (rf_has(r_ptr->flags, RF_DEMON)) || (strchr("Egv", r_ptr->d_char))) { dam = 0; } break; /* Stone to Mud */ case BORG_ATTACK_KILL_WALL: if (!(rf_has(r_ptr->flags, RF_HURT_ROCK))) dam = 0; break; /* New mage spell */ case BORG_ATTACK_NETHER: { if (rf_has(r_ptr->flags, RF_UNDEAD)) { dam = 0; } else if (rsf_has(r_ptr->spell_flags, RSF_BR_NETH)) { dam *= 3; dam /= 9; } else if (rf_has(r_ptr->flags, RF_EVIL)) { dam /= 2; } } break; /* New mage spell */ case BORG_ATTACK_CHAOS: if (rsf_has(r_ptr->spell_flags, RSF_BR_CHAO)) { dam *= 3; dam /= 9; } /* If the monster is Unique full damage ok. * Otherwise, polymorphing will reset HP */ if (!(rf_has(r_ptr->flags, RF_UNIQUE))) dam = -999; break; /* New mage spell */ case BORG_ATTACK_GRAVITY: if (rsf_has(r_ptr->spell_flags, RSF_BR_GRAV)) { dam *= 3; dam /= 9; } break; /* New mage spell */ case BORG_ATTACK_SHARD: if (rsf_has(r_ptr->spell_flags, RSF_BR_SHAR)) { dam *= 3; dam /= 9; } break; /* New mage spell */ case BORG_ATTACK_SOUND: if (rsf_has(r_ptr->spell_flags, RSF_BR_SOUN)) { dam *= 3; dam /= 9; } break; /* Weird attacks */ case BORG_ATTACK_PLASMA: if (rsf_has(r_ptr->spell_flags, RSF_BR_PLAS)) { dam *= 3; dam /= 9; } break; case BORG_ATTACK_CONFU: if (rf_has(r_ptr->flags, RF_NO_CONF)) { dam = 0; } break; case BORG_ATTACK_DISEN: if (rsf_has(r_ptr->spell_flags, RSF_BR_DISE)) { dam *= 3; dam /= 9; } break; case BORG_ATTACK_NEXUS: if (rsf_has(r_ptr->spell_flags, RSF_BR_NEXU)) { dam *= 3; dam /= 9; } break; case BORG_ATTACK_FORCE: if (rsf_has(r_ptr->spell_flags, RSF_BR_WALL)) { dam *= 3; dam /= 9; } break; case BORG_ATTACK_INERTIA: if (rsf_has(r_ptr->spell_flags, RSF_BR_INER)) { dam *= 3; dam /= 9; } break; case BORG_ATTACK_TIME: if (rsf_has(r_ptr->spell_flags, RSF_BR_TIME)) { dam *= 3; dam /= 9; } break; case BORG_ATTACK_LIGHT: if (rsf_has(r_ptr->spell_flags, RSF_BR_LIGHT)) { dam *= 3; dam /= 9; } break; case BORG_ATTACK_DARK: if (rsf_has(r_ptr->spell_flags, RSF_BR_DARK)) { dam *= 3; dam /= 9; } break; case BORG_ATTACK_WATER: if (rsf_has(r_ptr->spell_flags, RSF_BA_WATE)) { dam *= 3; dam /= 9; } dam /= 2; break; /* Various */ case BORG_ATTACK_OLD_HEAL: case BORG_ATTACK_OLD_CLONE: case BORG_ATTACK_OLD_SPEED: case BORG_ATTACK_DARK_WEAK: case BORG_ATTACK_KILL_DOOR: case BORG_ATTACK_KILL_TRAP: case BORG_ATTACK_MAKE_WALL: case BORG_ATTACK_MAKE_DOOR: case BORG_ATTACK_MAKE_TRAP: case BORG_ATTACK_AWAY_UNDEAD: case BORG_ATTACK_TURN_EVIL: dam = 0; break; /* These spells which put the monster out of commission, we * look at the danger of the monster prior to and after being * put out of commission. The difference is the damage. * The following factors are considered when we * consider the spell: * * 1. Is it already comprised by that spell? * 2. Is it comprimised by another spell? * 3. Does it resist the modality? * 4. Will it make it's savings throw better than half the time? * 5. We generally ignore these spells for breeders. * * The spell sleep II and sanctuary have a special consideration * since the monsters must be adjacent to the player. */ case BORG_ATTACK_AWAY_ALL: /* Teleport Other works differently. Basically the borg * will keep a list of all the monsters in the line of * fire. Then when he checks the danger, he will not * include those monsters. */ /* try not to teleport away uniques. These are the guys you are trying */ /* to kill! */ if (rf_has(r_ptr->flags, RF_UNIQUE)) { /* This unique is low on HP, finish it off */ if (kill->injury >= 60) dam = -9999; /* I am sitting pretty in an AS-Corridor */ else if (borg_as_position) dam = -9999; /* If this unique is causing the danger, get rid of it */ else if (dam > avoidance * 13 / 10 && borg_skill[BI_CDEPTH] <= 98) { /* get rid of this unique by storing his info */ borg_tp_other_index[borg_tp_other_n] = i; borg_tp_other_y[borg_tp_other_n] = kill->y; borg_tp_other_x[borg_tp_other_n] = kill->x; borg_tp_other_n++; } /* If fighting multiple uniques, get rid of one */ else if (borg_fighting_unique >= 2 && borg_fighting_unique <= 8) { /* get rid of one unique or both if they are in a beam-line */ borg_tp_other_index[borg_tp_other_n] = i; borg_tp_other_y[borg_tp_other_n] = kill->y; borg_tp_other_x[borg_tp_other_n] = kill->x; borg_tp_other_n++; } /* Unique is adjacent to Borg */ else if (borg_class == CLASS_MAGE && borg_distance(c_y, c_x, kill->y, kill->x) <= 2) { /* get rid of unique next to me */ borg_tp_other_index[borg_tp_other_n] = i; borg_tp_other_y[borg_tp_other_n] = kill->y; borg_tp_other_x[borg_tp_other_n] = kill->x; borg_tp_other_n++; } /* Unique in a vault, get rid of it, clean vault */ else if (vault_on_level) { /* Scan grids adjacent to monster */ for (ii = 0; ii < 8; ii++) { x = kill->x + ddx_ddd[ii]; y = kill->y + ddy_ddd[ii]; /* Access the grid */ ag = &borg_grids[y][x]; /* Skip unknown grids (important) */ if (ag->feat == FEAT_NONE) continue; /* Count adjacent Permas */ if (ag->feat == FEAT_PERM) vault_grids++; } /* Near enough perma grids? */ if (vault_grids >= 2) { /* get rid of unique next to perma grids */ borg_tp_other_index[borg_tp_other_n] = i; borg_tp_other_y[borg_tp_other_n] = kill->y; borg_tp_other_x[borg_tp_other_n] = kill->x; borg_tp_other_n++; } } else dam = -999; } else /* not a unique */ { /* get rid of this non-unique by storing his info */ borg_tp_other_index[borg_tp_other_n] = i; borg_tp_other_y[borg_tp_other_n] = kill->y; borg_tp_other_x[borg_tp_other_n] = kill->x; borg_tp_other_n++; } break; /* This teleport away is used to teleport away all monsters * as the borg goes through his special attacks. */ case BORG_ATTACK_AWAY_ALL_MORGOTH: /* Mostly no damage */ dam = 0; /* If its touching a glyph grid, nail it. */ for (j = 0; j < 8; j++) { int y2 = kill->y + ddy_ddd[j]; int x2 = kill->x + ddx_ddd[j]; /* Get the grid */ ag = &borg_grids[y2][x2]; /* If its touching a glyph grid, nail it. */ if (ag->glyph) { /* get rid of this one by storing his info */ borg_tp_other_index[borg_tp_other_n] = i; borg_tp_other_y[borg_tp_other_n] = kill->y; borg_tp_other_x[borg_tp_other_n] = kill->x; borg_tp_other_n++; dam = 300; } } /* If the borg is not in a good position, do it */ if (morgoth_on_level && !borg_morgoth_position) { /* get rid of this one by storing his info */ borg_tp_other_index[borg_tp_other_n] = i; borg_tp_other_y[borg_tp_other_n] = kill->y; borg_tp_other_x[borg_tp_other_n] = kill->x; borg_tp_other_n++; dam = 100; } /* If the borg does not have enough Mana to attack this * round and cast Teleport Away next round, then do it now. */ if (borg_skill[BI_CURSP] <= 35) { /* get rid of this unique by storing his info */ borg_tp_other_index[borg_tp_other_n] = i; borg_tp_other_y[borg_tp_other_n] = kill->y; borg_tp_other_x[borg_tp_other_n] = kill->x; borg_tp_other_n++; dam = 150; } break; /* This BORG_ATTACK_ is hacked to work for Mass Genocide. Since * we cannot mass gen uniques. */ case BORG_ATTACK_DISP_ALL: if (rf_has(r_ptr->flags, RF_UNIQUE)) { dam = 0; break; } dam = borg_danger_aux(c_y, c_x, 1, i, true, true); break; case BORG_ATTACK_OLD_CONF: dam = 0; if (rf_has(r_ptr->flags, RF_NO_CONF)) break; if (rf_has(r_ptr->flags, RF_MULTIPLY)) break; if (kill->speed < r_ptr->speed - 5) break; if (kill->confused) break; if (!kill->awake) break; if ((kill->level > (borg_skill[BI_CLEVEL] < 13 ? 10 : (((borg_skill[BI_CLEVEL] - 10) / 4) * 3) + 10))) break; dam = -999; if (rf_has(r_ptr->flags, RF_UNIQUE)) break; borg_confuse_spell = false; p1 = borg_danger_aux(c_y, c_x, 1, i, true, true); /* Make certain monsters appear to have more danger so the borg is more likely to use this attack */ if (kill->afraid && borg_skill[BI_CLEVEL] <= 10) p1 = p1 + 20; borg_confuse_spell = true; p2 = borg_danger_aux(c_y, c_x, 1, i, true, true); borg_confuse_spell = false; dam = (p1 - p2); break; case BORG_ATTACK_TURN_ALL: dam = 0; if (kill->speed < r_ptr->speed - 5) break; if (rf_has(r_ptr->flags, RF_NO_FEAR)) break; if (kill->confused) break; if (!kill->awake) break; if ((kill->level > (borg_skill[BI_CLEVEL] < 13 ? 10 : (((borg_skill[BI_CLEVEL] - 10) / 4) * 3) + 10))) break; dam = -999; if (rf_has(r_ptr->flags, RF_UNIQUE)) break; borg_fear_mon_spell = false; p1 = borg_danger_aux(c_y, c_x, 1, i, true, true); /* Make certain monsters appear to have more danger so the borg is more likely to use this attack */ if (kill->afraid && borg_skill[BI_CLEVEL] <= 10) p1 = p1 + 20; borg_fear_mon_spell = true; p2 = borg_danger_aux(c_y, c_x, 1, i, true, true); borg_fear_mon_spell = false; dam = (p1 - p2); break; case BORG_ATTACK_OLD_SLOW: dam = 0; if (kill->speed < r_ptr->speed - 5) break; if (kill->confused) break; if (!kill->awake) break; if ((kill->level > (borg_skill[BI_CLEVEL] < 13 ? 10 : (((borg_skill[BI_CLEVEL] - 10) / 4) * 3) + 10))) break; dam = -999; if (rf_has(r_ptr->flags, RF_UNIQUE)) break; borg_slow_spell = false; p1 = borg_danger_aux(c_y, c_x, 1, i, true, true); /* Make certain monsters appear to have more danger so the borg is more likely to use this attack */ if (kill->afraid && borg_skill[BI_CLEVEL] <= 10) p1 = p1 + 20; borg_slow_spell = true; p2 = borg_danger_aux(c_y, c_x, 1, i, true, true); borg_slow_spell = false; dam = (p1 - p2); break; case BORG_ATTACK_OLD_SLEEP: case BORG_ATTACK_SLEEP_EVIL: dam = 0; if (rf_has(r_ptr->flags, RF_NO_SLEEP)) break; if (!rf_has(r_ptr->flags, RF_EVIL) && typ == BORG_ATTACK_SLEEP_EVIL) break; if (kill->speed < r_ptr->speed - 5) break; if (kill->confused) break; if (!kill->awake) break; if ((kill->level > (borg_skill[BI_CLEVEL] < 13 ? 10 : (((borg_skill[BI_CLEVEL] - 10) / 4) * 3) + 10))) break; dam = -999; if (rf_has(r_ptr->flags, RF_UNIQUE)) break; borg_sleep_spell = false; p1 = borg_danger_aux(c_y, c_x, 1, i, true, true); /* Make certain monsters appear to have more danger so the borg is more likely to use this attack */ if (kill->afraid && borg_skill[BI_CLEVEL] <= 10) p1 = p1 + 20; borg_sleep_spell = true; p2 = borg_danger_aux(c_y, c_x, 1, i, true, true); borg_sleep_spell = false; dam = (p1 - p2); break; case BORG_ATTACK_OLD_POLY: dam = 0; if ((kill->level > (borg_skill[BI_CLEVEL] < 13 ? 10 : (((borg_skill[BI_CLEVEL] - 10) / 4) * 3) + 10))) break; dam = -999; if (rf_has(r_ptr->flags, RF_UNIQUE)) break; dam = borg_danger_aux(c_y, c_x, 2, i, true, true); /* dont bother unless he is a scary monster */ if ((dam < avoidance * 2) && !kill->afraid) dam = 0; break; case BORG_ATTACK_TURN_UNDEAD: if (rf_has(r_ptr->flags, RF_UNDEAD)) { dam = 0; if (kill->confused) break; if (kill->speed < r_ptr->speed - 5) break; if (!kill->awake) break; if (kill->level > borg_skill[BI_CLEVEL] - 5) break; borg_fear_mon_spell = false; p1 = borg_danger_aux(c_y, c_x, 1, i, true, true); borg_fear_mon_spell = true; p2 = borg_danger_aux(c_y, c_x, 1, i, true, true); borg_fear_mon_spell = false; dam = (p1 - p2); } else { dam = 0; } break; /* Banishment-- cast when in extreme danger (checked in borg_defense). */ case BORG_ATTACK_AWAY_EVIL: if (rf_has(r_ptr->flags, RF_EVIL)) { /* try not teleport away uniques. */ if (rf_has(r_ptr->flags, RF_UNIQUE)) { /* Banish ones with escorts */ if (r_ptr->friends || r_ptr->friends_base) { dam = 0; } else { /* try not Banish non escorted uniques */ dam = -500; } } else { /* damage is the danger of the baddie */ dam = borg_danger_aux(c_y, c_x, 1, i, true, true); } } else { dam = 0; } break; case BORG_ATTACK_TAP_UNLIFE: /* for now ignore the gain in sp */ if (!(rf_has(r_ptr->flags, RF_UNDEAD))) dam = 0; else { int sp_drain = borg_skill[BI_CURSP] - borg_skill[BI_CURSP]; if (sp_drain < kill->power) dam = kill->power - sp_drain; } break; } /* use Missiles on certain types of monsters */ if ((borg_skill[BI_CDEPTH] >= 1) && (borg_danger_aux(kill->y, kill->x, 1, i, true, true) > avoidance * 2 / 10 || ((r_ptr->friends || r_ptr->friends_base) /* monster has friends*/ && kill->level >= borg_skill[BI_CLEVEL] - 5 /* close levels */) || kill->ranged_attack /* monster has a ranged attack */ || rf_has(r_ptr->flags, RF_UNIQUE) || rf_has(r_ptr->flags, RF_MULTIPLY) || gold_eater || /* Monster can steal gold */ rf_has(r_ptr->flags, RF_NEVER_MOVE) /* monster never moves */ || borg_skill[BI_CLEVEL] <= 20 /* stil very weak */)) { borg_use_missile = true; } /* Return Damage as pure danger of the monster */ if (typ == BORG_ATTACK_AWAY_ALL || typ == BORG_ATTACK_AWAY_EVIL || typ == BORG_ATTACK_AWAY_ALL_MORGOTH) return (dam); /* Limit damage to twice maximal hitpoints */ if (dam > kill->power * 2 && !rf_has(r_ptr->flags, RF_UNIQUE)) dam = kill->power * 2; /* give a small bonus for whacking a unique */ /* this should be just enough to give prefrence to wacking uniques */ if ((rf_has(r_ptr->flags, RF_UNIQUE)) && borg_skill[BI_CDEPTH] >= 1) dam = (dam * 3); /* Hack -- ignore Maggot until later. Player will chase Maggot * down all accross the screen waking up all the monsters. Then * he is stuck in a compromised situation. */ if ((rf_has(r_ptr->flags, RF_UNIQUE)) && borg_skill[BI_CDEPTH] == 0) { dam = dam * 2 / 3; /* Dont hunt maggot until later */ if (borg_skill[BI_CLEVEL] < 5) dam = 0; } /* give a small bonus for whacking a breeder */ if (rf_has(r_ptr->flags, RF_MULTIPLY)) dam = (dam * 3 / 2); /* Enhance the perceived damage to summoner in order to influence the * choice of targets. */ if ((rsf_has(r_ptr->spell_flags, RSF_S_KIN)) || (rsf_has(r_ptr->spell_flags, RSF_S_HI_DEMON)) || (rsf_has(r_ptr->spell_flags, RSF_S_MONSTER)) || (rsf_has(r_ptr->spell_flags, RSF_S_MONSTERS)) || (rsf_has(r_ptr->spell_flags, RSF_S_ANIMAL)) || (rsf_has(r_ptr->spell_flags, RSF_S_SPIDER)) || (rsf_has(r_ptr->spell_flags, RSF_S_HOUND)) || (rsf_has(r_ptr->spell_flags, RSF_S_HYDRA)) || (rsf_has(r_ptr->spell_flags, RSF_S_AINU)) || (rsf_has(r_ptr->spell_flags, RSF_S_DEMON)) || (rsf_has(r_ptr->spell_flags, RSF_S_UNDEAD)) || (rsf_has(r_ptr->spell_flags, RSF_S_DRAGON)) || (rsf_has(r_ptr->spell_flags, RSF_S_HI_DRAGON)) || (rsf_has(r_ptr->spell_flags, RSF_S_HI_UNDEAD)) || (rsf_has(r_ptr->spell_flags, RSF_S_WRAITH)) || (rsf_has(r_ptr->spell_flags, RSF_S_UNIQUE))) dam += ((dam * 3) / 2); /* * Apply massive damage bonus to Questor monsters to * encourage borg to strike them. */ if (rf_has(r_ptr->flags, RF_QUESTOR)) dam += (dam * 9); /* Try to conserve missiles. */ if (typ == BORG_ATTACK_ARROW) { if (!borg_use_missile) /* set damage to zero, force borg to melee attack */ dam = 0; } /* Damage */ return (dam); } /* * Simulate / Invoke the launching of a bolt at a monster */ static int borg_launch_bolt_aux_hack(int i, int dam, int typ, int ammo_location) { int d, p2, p1, x, y; int o_y = 0; int o_x = 0; int walls = 0; int unknown = 0; borg_grid* ag; borg_kill* kill; struct monster_race* r_ptr; /* Monster */ kill = &borg_kills[i]; /* monster race */ r_ptr = &r_info[kill->r_idx]; /* Skip dead monsters */ if (!kill->r_idx) return (0); /* Require current knowledge */ if (kill->when < borg_t - 2) return (0); /* Acquire location */ x = kill->x; y = kill->y; /* Acquire the grid */ ag = &borg_grids[y][x]; /* Never shoot walls/doors */ if (!borg_cave_floor_grid(ag)) return (0); /* dont shoot at ghosts if not on known floor grid */ if ((rf_has(r_ptr->flags, RF_PASS_WALL)) && (ag->feat != FEAT_FLOOR && ag->feat != FEAT_OPEN && ag->feat != FEAT_BROKEN && !ag->trap)) return (0); /* dont shoot at ghosts in walls, not perfect */ if (rf_has(r_ptr->flags, RF_PASS_WALL)) { /* if 2 walls and 1 unknown skip this monster */ /* Acquire location */ x = kill->x; y = kill->y; /* Get grid */ for (o_x = -1; o_x <= 1; o_x++) { for (o_y = -1; o_y <= 1; o_y++) { /* Acquire location */ x = kill->x + o_x; y = kill->y + o_y; ag = &borg_grids[y][x]; if (ag->feat >= FEAT_MAGMA && ag->feat <= FEAT_PERM) walls++; if (ag->feat == FEAT_NONE) unknown++; } } /* Is the ghost likely in a wall? */ if (walls >= 2 && unknown >= 1) return (0); } /* Calculate damage */ d = borg_launch_damage_one(i, dam, typ, ammo_location); /* Return Damage, on Teleport Other, true damage is * calculated elsewhere */ if (typ == BORG_ATTACK_AWAY_ALL || typ == BORG_ATTACK_AWAY_ALL_MORGOTH) return (d); /* Return Damage as pure danger of the monster */ if (typ == BORG_ATTACK_AWAY_EVIL) return (d); /* Return 0 if the true damge (w/o the danger bonus) is 0 */ if (d <= 0) return (d); /* Calculate danger */ p2 = borg_danger_aux(y, x, 1, i, true, false); /* Hack -- avoid waking most "hard" sleeping monsters */ if (!kill->awake && (p2 > avoidance / 2) && (d < kill->power) && !borg_munchkin_mode) { return (-999); } /* Hack -- ignore sleeping town monsters */ if (!borg_skill[BI_CDEPTH] && !kill->awake) { return (0); } /* Hack -- ignore nonthreatening town monsters when low level */ if (!borg_skill[BI_CDEPTH] && borg_skill[BI_CLEVEL] < 3 /* && monster_is_nonthreatening_test */) { /* Nothing yet */ } /* Calculate "danger" to player */ p1 = borg_danger_aux(c_y, c_x, 1, i, true, false); /* Extra "bonus" if attack kills */ if (d >= kill->power) d = 2 * d; /* Add in dangers */ d = d + p1; /* Result */ return (d); } /* * Determine the "reward" of launching a beam/bolt/ball at a location * * An "unreachable" location always has zero reward. * * Basically, we sum the "rewards" of doing the appropriate amount of * damage to each of the "affected" monsters. * * We will attempt to apply the offset-ball attack here */ static int borg_launch_bolt_aux(int y, int x, int rad, int dam, int typ, int max, int ammo_location) { int ry, rx; int x1, y1; int x2, y2; int dist; int r, n; borg_grid* ag; struct monster_race* r_ptr; borg_kill* kill; int q_x, q_y; /* Extract panel */ q_x = w_x / borg_panel_wid(); q_y = w_y / borg_panel_hgt(); /* Reset damage */ n = 0; /* Initial location */ x1 = c_x; y1 = c_y; /* Final location */ x2 = x; y2 = y; /* Bounds Check */ if (!square_in_bounds_fully(cave, loc(x, y))) return 0; /* Start over */ x = x1; y = y1; /* Simulate the spell/missile path */ for (dist = 0; dist < max; dist++) { /* Bounds Check */ if (dist && !square_in_bounds_fully(cave, loc(x, y))) break; /* Get the grid of the targetted monster */ ag = &borg_grids[y2][x2]; kill = &borg_kills[ag->kill]; r_ptr = &r_info[kill->r_idx]; /* Get the grid of the pathway */ ag = &borg_grids[y][x]; /* Stop at walls */ /* note: beams end at walls. */ if (dist) { /* Stop at walls */ /* note if beam, this is the end of the beam */ /* dispel spells act like beams (sort of) */ if (!borg_cave_floor_grid(ag) || ag->feat == FEAT_PASS_RUBBLE) { if (rad != -1 && rad != 10) return (0); else return (n); } } /* Collect damage (bolts/beams) */ if (rad <= 0 || rad == 10) n += borg_launch_bolt_aux_hack(ag->kill, dam, typ, ammo_location); /* Check for arrival at "final target" */ /* except beams, which keep going. */ if ((rad != -1 && rad != 10) && ((x == x2) && (y == y2))) break; /* Stop bolts at monsters */ if (!rad && ag->kill) return (n); /* The missile path can be complicated. There are several checks * which need to be made. First we assume that we targetting * a monster. That monster could be known from either sight or * ESP. If the entire pathway from us to the monster is known, * then there is no concern. But if the borg is shooting through * unknown grids, then there is a concern when he has ESP; without * ESP he would not see that monster if the unknown grids * contained walls or closed doors. * * 1. ESP Inactive * A. No Infravision * -Then the monster must be in a lit grid. OK to shoot * B. Yes Infravision * -Then the monster must be projectable() OK to shoot * 2. ESP Active * A. No Infravision * -Then the monster could be in a lit grid. Try to shoot * -Or I detect it with ESP and it's not projectable(). * B. Yes Infravision * -Then the monster could be projectable() * -Or I detect it with ESP and it's not projectable(). * -In the cases of ESP Active, the borg will test fire a missile. * Then wait for a 'painful ouch' from the monster. * * Low level borgs will not take the shot unless they have * a clean and known pathway. Borgs over a certain clevel, * will attempt the shot and listen for the 'ouch' repsonse * to know that the clear. If no 'Ouch' is heard, then the * borg will assume there is a wall in the way. Exception to * this is with arrows. Arrows can miss the target or fall * fall short, in which case no 'ouch' is heard. So the borg * allowed to miss two shots with arrows/bolts/thrown objects. */ /* dont do the check if esp */ if (!borg_skill[BI_ESP]) { /* Check the missile path--no Infra, no HAS_LIGHT */ if (dist && (borg_skill[BI_INFRA] <= 0) && !(r_ptr->light > 0)) { /* Stop at unknown grids (see above) */ /* note if beam, dispel, this is the end of the beam */ if (ag->feat == FEAT_NONE) { if (rad != -1 && rad != 10) return (0); else return (n); } /* Stop at unseen walls */ /* We just shot and missed, this is our next shot */ if (successful_target < 0) { /* When throwing things, it is common to just 'miss' */ /* Skip only one round in this case */ if (successful_target <= -12) successful_target = 0; if (rad != -1 && rad != 10) return (0); else return (n); } } else /* I do have infravision or it's a lite monster */ { /* Stop at unseen walls */ /* We just shot and missed, this is our next shot */ if (successful_target < 0) { /* When throwing things, it is common to just 'miss' */ /* Skip only one round in this case */ if (successful_target <= -12) successful_target = 0; if (rad != -1 && rad != 10) return (0); else return (n); } } } else /* I do have ESP */ { /* Check the missile path */ if (dist) { /* if this area has been magic mapped, * ok to shoot in the dark */ if (!borg_detect_wall[q_y + 0][q_x + 0] && !borg_detect_wall[q_y + 0][q_x + 1] && !borg_detect_wall[q_y + 1][q_x + 0] && !borg_detect_wall[q_y + 1][q_x + 1] && borg_fear_region[c_y / 11][c_x / 11] < avoidance / 20) { /* Stop at unknown grids (see above) */ /* note if beam, dispel, this is the end of the beam */ if (ag->feat == FEAT_NONE) { if (rad != -1 && rad != 10) return (0); else return (n); } /* Stop at unseen walls */ /* We just shot and missed, this is our next shot */ if (successful_target < 0) { /* When throwing things, it is common to just 'miss' */ /* Skip only one round in this case */ if (successful_target <= -12) successful_target = 0; if (rad != -1 && rad != 10) return (0); else return (n); } } /* Stop at unseen walls */ /* We just shot and missed, this is our next shot */ if (successful_target < 0) { /* When throwing things, it is common to just 'miss' */ /* Skip only one round in this case */ if (successful_target <= -12) successful_target = 0; if (rad != -1 && rad != 10) return (0); else return (n); } } } /* Calculate the new location */ mmove2(&y, &x, y1, x1, y2, x2); } /* Bolt/Beam attack */ if (rad <= 0) return (n); /* Excessive distance */ if (dist >= max) return (0); /* Check monsters and objects in blast radius */ for (ry = y2 - rad; ry < y2 + rad; ry++) { for (rx = x2 - rad; rx < x2 + rad; rx++) { /* Bounds check */ if (!square_in_bounds(cave, loc(rx, ry))) continue; /* Get the grid */ ag = &borg_grids[ry][rx]; /* Check distance */ r = borg_distance(y2, x2, ry, rx); /* Maximal distance */ if (r > rad) continue; /* Never pass through walls*/ if (!borg_los(y2, x2, ry, rx)) continue; /* dispel spells should hurt the same no matter the rad: make r= y and x */ if (rad == 10) r = 0; /* Collect damage, lowered by distance */ n += borg_launch_bolt_aux_hack(ag->kill, dam / (r + 1), typ, ammo_location); /* probable damage int was just changed by b_l_b_a_h*/ /* check destroyed stuff. */ if (ag->take && borg_takes[ag->take].kind) { struct borg_take* take = &borg_takes[ag->take]; struct object_kind* k_ptr = take->kind; switch (typ) { case BORG_ATTACK_ACID: { /* rings/boots cost extra (might be speed!) */ if (k_ptr->tval == TV_BOOTS && !k_ptr->aware) { n -= 20; } break; } case BORG_ATTACK_ELEC: { /* rings/boots cost extra (might be speed!) */ if (k_ptr->tval == TV_RING && !k_ptr->aware) { n -= 20; } if (k_ptr->tval == TV_RING && k_ptr->sval == sv_ring_speed) { n -= 2000; } break; } case BORG_ATTACK_FIRE: { /* rings/boots cost extra (might be speed!) */ if (k_ptr->tval == TV_BOOTS && !k_ptr->aware) { n -= 20; } break; } case BORG_ATTACK_COLD: { if (k_ptr->tval == TV_POTION) { n -= 20; /* Extra penalty for cool potions */ if (!k_ptr->aware || k_ptr->sval == sv_potion_healing || k_ptr->sval == sv_potion_star_healing || k_ptr->sval == sv_potion_life || (k_ptr->sval == sv_potion_inc_str && amt_add_stat[STAT_STR] >= 1000) || (k_ptr->sval == sv_potion_inc_int && amt_add_stat[STAT_INT] >= 1000) || (k_ptr->sval == sv_potion_inc_wis && amt_add_stat[STAT_WIS] >= 1000) || (k_ptr->sval == sv_potion_inc_dex && amt_add_stat[STAT_DEX] >= 1000) || (k_ptr->sval == sv_potion_inc_con && amt_add_stat[STAT_CON] >= 1000)) n -= 2000; } break; } case BORG_ATTACK_MANA: { /* Used against uniques, allow the stuff to burn */ break; } } } } } /* Result */ return (n); } /* * Simulate/Apply the optimal result of launching a beam/bolt/ball * * Note that "beams" have a "rad" of "-1", "bolts" have a "rad" of "0", * and "balls" have a "rad" of "2" or "3", depending on "blast radius". * dispel spells have a rad of 10 */ static int borg_launch_bolt(int rad, int dam, int typ, int max, int ammo_location) { int i = 0; int b_i = -1; int n = 0; int b_n = -1; int b_o_y = 0, b_o_x = 0; int o_y = 0, o_x = 0; int d, b_d = z_info->max_range; /* Examine possible destinations */ /* This will allow the borg to target places adjacent to a monster * in order to exploit and abuse a feature of the game. Whereas, * the borg, while targeting a monster will not score d/t walls, he * could land a successful hit by targeting adjacent to the monster. * For example: * ###################### * #####....@......###### * ############Px........ * ###################### * In order to hit the P, the borg must target the x and not the P. * */ for (i = 0; i < borg_temp_n; i++) { int x = borg_temp_x[i]; int y = borg_temp_y[i]; /* Consider each adjacent spot to and on top of the monster */ for (o_x = -1; o_x <= 1; o_x++) { for (o_y = -1; o_y <= 1; o_y++) { /* Acquire location */ x = borg_temp_x[i] + o_x; y = borg_temp_y[i] + o_y; /* Reset Teleport Other variables */ borg_tp_other_n = 0; n = 0; /* Bounds check */ if (!square_in_bounds(cave, loc(x, y))) continue; /* Remember how far away the monster is */ d = borg_distance(c_y, c_x, borg_temp_y[i], borg_temp_x[i]); /* Skip certain types of Offset attacks */ if ((x != borg_temp_x[i] || y != borg_temp_y[i]) && typ == BORG_ATTACK_AWAY_ALL) continue; /* Skip places that are out of range */ if (borg_distance(c_y, c_x, y, x) > max) continue; /* Consider it if its a ball spell or right on top of it */ if ((rad >= 2 && borg_grids[y][x].feat != FEAT_NONE) || (y == borg_temp_y[i] && x == borg_temp_x[i])) n = borg_launch_bolt_aux(y, x, rad, dam, typ, max, ammo_location); /* Teleport Other is now considered */ if (typ == BORG_ATTACK_AWAY_ALL && n > 0) { /* Consider danger with certain monsters removed * from the danger check. They were removed from the list of * considered monsters (borg_tp_other array) */ n = borg_danger(c_y, c_x, 1, true, false); /* Skip Offsets that do only 1 damage */ if (n == 1) n = -10; } /* Reset Teleport Other variables */ borg_tp_other_n = 0; /* Skip useless attacks */ if (n <= 0) continue; /* The game forbids targetting the outside walls */ if (x == 0 || y == 0 || x == DUNGEON_WID - 1 || y == DUNGEON_HGT - 1) continue; /* Collect best attack */ if ((b_i >= 0) && (n < b_n)) continue; /* Skip attacking farther monster if rewards are equal. */ if (n == b_n && d > b_d) continue; /* Track it */ b_i = i; b_n = n; b_o_y = o_y; b_o_x = o_x; b_d = d; } } } if (b_i == -1) return (b_n); /* Reset Teleport Other variables */ borg_tp_other_n = 0; /* Simulation */ if (borg_simulate) return (b_n); /* Save the location */ g_x = borg_temp_x[b_i] + b_o_x; g_y = borg_temp_y[b_i] + b_o_y; /* Target the location */ (void)borg_target(g_y, g_x); /* Result */ return (b_n); } /* * Simulate/Apply the optimal result of launching a missile */ static int borg_attack_aux_launch(void) { int n, b_n = 0; int k, b_k = -1; int d = -1; int v, b_v = -1; borg_item* bow = &borg_items[INVEN_BOW]; /* skip if we don't have a bow */ if (!bow || bow->iqty == 0) return 0; /* No firing while blind, confused, or hallucinating */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISIMAGE]) return (0); /* Scan the quiver */ for (k = QUIVER_START; k < QUIVER_END; k++) { borg_item* item = &borg_items[k]; /* Skip empty items */ if (!item->iqty) break; /* Skip missiles that don't match the bow */ if (item->tval != borg_skill[BI_AMMO_TVAL]) continue; /* Skip worthless missiles */ if (item->value <= 0) continue; /* Determine average damage */ d = (item->dd * (item->ds + 1) / 2); d = d + item->to_d + bow->to_d; d = d * borg_skill[BI_AMMO_POWER] * borg_skill[BI_SHOTS]; v = item->value; /* Boost the perceived damage on unID'd ones so he can get a quick pseudoID on it */ if (borg_item_note_needs_id(item)) d = d * 99; /* Paranoia */ if (d <= 0) continue; /* Choose optimal target of bolt */ n = borg_launch_bolt(0, d, BORG_ATTACK_ARROW, 6 + 2 * borg_skill[BI_AMMO_POWER], k); /* if two attacks are equal, pick the cheaper ammo */ if (n == b_n && v >= b_v) continue; if (n >= b_n) { b_n = n; b_v = v; b_k = k; } } /* Nothing to use */ if (b_n < 0) return (0); /* Simulation */ if (borg_simulate) return (b_n); /* Do it */ borg_note(format("# Firing missile '%s'", borg_items[b_k].desc)); /* Fire */ borg_keypress('f'); /* Use the missile from the quiver */ borg_keypress(((b_k - QUIVER_START) + '0')); /* Use target */ borg_keypress('5'); /* Set our shooting flag */ successful_target = -2; /* Value */ return (b_n); } /* Attempt to rest on the grid to allow the monster to approach me. * Make sure the monster does not have a ranged attack and that I am * inclined to attack him. */ static int borg_attack_aux_rest(void) { int i; bool resting_is_good = false; int my_danger = borg_danger(c_y, c_x, 1, false, false); /* Examine all the monsters */ for (i = 1; i < borg_kills_nxt; i++) { borg_kill* kill = &borg_kills[i]; int x9 = kill->x; int y9 = kill->y; int ax, ay, d; /* Skip dead monsters */ if (!kill->r_idx) continue; /* Distance components */ ax = (x9 > c_x) ? (x9 - c_x) : (c_x - x9); ay = (y9 > c_y) ? (y9 - c_y) : (c_y - y9); /* Distance */ d = MAX(ax, ay); /* Minimal and maximal distance */ if (d != 2) continue; /* Ranged Attacks, don't rest. */ if (kill->ranged_attack) continue; /* Skip the sleeping ones */ if (!kill->awake) continue; /* need to have seen it recently */ if (borg_t - kill->when > 10) continue; /* Skip monsters that dont chase */ if (rf_has(r_info[kill->r_idx].flags, RF_NEVER_MOVE)) continue; /* Monster better not be faster than me */ if (kill->speed - borg_skill[BI_SPEED] >= 5) continue; /* Should be flowing towards the monster */ if (goal != GOAL_KILL || borg_flow_y[0] != kill->y) continue; /* Cant have an obstacle between us */ if (!borg_los(c_y, c_x, kill->y, kill->x)) continue; /* Might be a little dangerous to just wait here */ if (my_danger > borg_skill[BI_CURHP]) continue; /* Should be a good idea to wait for monster here. */ resting_is_good = true; } /* Not a good idea */ if (resting_is_good == false) return (0); /* Return some value for this rest */ if (borg_simulate) return (1); /* Rest */ borg_keypress(','); borg_note(format("# Resting on grid (%d, %d), waiting for monster to approach.", c_y, c_x)); /* All done */ return (1); } /* look for a throwable item */ static bool borg_has_throwable(void) { int i; for (i = 0; i < QUIVER_END; i++) { /* it will show wield in the list */ /* but not if that is the only thing */ if (i == INVEN_WIELD) continue; if (!borg_items[i].iqty) continue; if (of_has(borg_items[i].flags, OF_THROWING)) return true; } return false; } /* * Simulate/Apply the optimal result of throwing an object * * First choose the "best" object to throw, then check targets. */ static int borg_attack_aux_object(void) { int b_n; int b_r = 0; int k, b_k = -1; int d, b_d = -1; int div, mul; /* Scan the pack */ for (k = 0; k < z_info->pack_size; k++) { borg_item* item = &borg_items[k]; /* Skip empty items */ if (!item->iqty) continue; /* Skip my spell/prayer book */ if (obj_kind_can_browse(&k_info[item->kind])) continue; /* Skip "equipment" items (not ammo) */ if (borg_wield_slot(item) >= 0) continue; /* Determine average damage from object */ d = (k_info[item->kind].dd * (k_info[item->kind].ds + 1) / 2); /* Skip things that are worth money unless they do a lot of damage */ if (item->value > 100 && d < 5) continue; /* Skip useless stuff */ if (d <= 0) continue; /* Hack -- Save Heals and cool stuff */ if (item->tval == TV_POTION) continue; /* Hack -- Save last flasks for fuel, if needed */ if (item->tval == TV_FLASK && (borg_skill[BI_AFUEL] <= 1 && !borg_fighting_unique)) continue; /* Dont throw wands or rods */ if (item->tval == TV_WAND || item->tval == TV_ROD) continue; /* Ignore worse damage */ if ((b_k >= 0) && (d <= b_d)) continue; /* Track */ b_k = k; b_d = d; /* Extract a "distance multiplier" */ mul = 10; /* Enforce a minimum "weight" of one pound */ div = ((item->weight > 10) ? item->weight : 10); /* Hack -- Distance -- Reward strength, penalize weight */ b_r = (adj_str_blow[my_stat_ind[STAT_STR]] + 20) * mul / div; /* Max distance of 10 */ if (b_r > 10) b_r = 10; } /* Nothing to use */ if (b_k < 0) return (0); /* No firing while blind, confused, or hallucinating */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISIMAGE]) return (0); /* Choose optimal location */ b_n = borg_launch_bolt(0, b_d, BORG_ATTACK_ARROW, 6 + 2 * borg_skill[BI_AMMO_POWER], b_k); /* Simulation */ if (borg_simulate) return (b_n); /* Do it */ borg_note(format("# Throwing painful object '%s'", borg_items[b_k].desc)); /* Fire */ borg_keypress('v'); if (borg_has_throwable()) borg_keypress('/'); /* Use the object */ borg_keypress(all_letters_nohjkl[b_k]); /* Use target */ borg_keypress('5'); /* Set our shooting flag */ successful_target = -2; /* Value */ return (b_n); } /* * Simulate/Apply the optimal result of using a "normal" attack spell * * Take into account the failure rate of spells/objects/etc. XXX XXX XXX */ static int borg_attack_aux_spell_bolt(const enum borg_spells spell, int rad, int dam, int typ, int max_range) { int b_n; int penalty = 0; /* No firing while blind, confused, or hallucinating */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISIMAGE]) return (0); /* Paranoia */ if (borg_simulate && ((borg_class != CLASS_MAGE && borg_class != CLASS_NECROMANCER) && borg_skill[BI_CLEVEL] <= 2) && (randint0(100) < 1)) return (0); /* Not if money scumming in town */ if (borg_cfg[BORG_MONEY_SCUM_AMOUNT] && borg_skill[BI_CDEPTH] == 0) return (0); /* Not if low on food */ if (borg_skill[BI_FOOD] == 0 && (borg_skill[BI_ISWEAK] && (borg_spell_legal(REMOVE_HUNGER) || borg_spell_legal(HERBAL_CURING)))) return (0); /* Require ability (right now) */ if (!borg_spell_okay_fail(spell, (borg_fighting_unique ? 40 : 25))) return (0); /* Choose optimal location */ b_n = borg_launch_bolt(rad, dam, typ, max_range, 0); enum borg_spells primary_spell_for_class = MAGIC_MISSILE; switch (borg_class) { case CLASS_MAGE: primary_spell_for_class = MAGIC_MISSILE; break; case CLASS_DRUID: primary_spell_for_class = STINKING_CLOUD; break; case CLASS_PRIEST: primary_spell_for_class = ORB_OF_DRAINING; break; case CLASS_NECROMANCER: primary_spell_for_class = NETHER_BOLT; break; case CLASS_PALADIN: case CLASS_ROGUE: case CLASS_RANGER: case CLASS_BLACKGUARD: break; } /* weak mages need that spell, they dont get penalized */ /* weak == those that can't teleport reliably anyway */ if (spell == primary_spell_for_class && (!borg_spell_legal_fail(TELEPORT_SELF, 15) || borg_skill[BI_MAXCLEVEL] <= 30)) { if (borg_simulate) return (b_n); } /* Penalize mana usage except on MM */ int spell_power = borg_get_spell_power(spell); if (spell != primary_spell_for_class) { /* Standard penalty */ b_n = b_n - spell_power; /* Extra penalty if the cost far outweighs the damage */ if (borg_skill[BI_MAXSP] < 50 && spell_power > b_n) b_n = b_n - spell_power; /* Penalize use of reserve mana */ if (borg_skill[BI_CURSP] - spell_power < borg_skill[BI_MAXSP] / 2) b_n = b_n - (spell_power * 3); /* Penalize use of deep reserve mana */ if (borg_skill[BI_CURSP] - spell_power < borg_skill[BI_MAXSP] / 3) b_n = b_n - (spell_power * 5); } /* Really penalize use of mana needed for final teleport */ if (borg_class == CLASS_MAGE) penalty = 6; if (borg_class == CLASS_RANGER) penalty = 22; if (borg_class == CLASS_NECROMANCER) penalty = 10; if (borg_class == CLASS_ROGUE) penalty = 20; if (borg_class == CLASS_PRIEST) penalty = 8; if (borg_class == CLASS_PALADIN) penalty = 20; if ((borg_skill[BI_MAXSP] > 30) && (borg_skill[BI_CURSP] - spell_power < penalty)) b_n = b_n - (spell_power * 750); /* Simulation */ if (borg_simulate) return (b_n); /* Cast the spell */ (void)borg_spell(spell); /* Use target */ borg_keypress('5'); /* Set our shooting flag */ successful_target = -1; /* Value */ return (b_n); } /* This routine is the same as the one above only in an emergency case. * The borg will enter negative mana casting this */ static int borg_attack_aux_spell_bolt_reserve(const enum borg_spells spell, int rad, int dam, int typ, int max_range) { int b_n; int i; int x9, y9, ax, ay, d; int near_monsters = 0; /* Fake our Mana */ int sv_mana = borg_skill[BI_CURSP]; /* Only Weak guys should try this */ if (borg_skill[BI_CLEVEL] >= 15) return (0); /* No firing while blind, confused, or hallucinating */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISIMAGE]) return (0); /* Not if low on food */ if (borg_skill[BI_FOOD] == 0 && (borg_skill[BI_ISWEAK] && borg_spell_legal(REMOVE_HUNGER))) return (0); /* Must not have enough mana right now */ if (borg_spell_okay_fail(spell, 25)) return (0); /* Must be dangerous */ if (borg_danger(c_y, c_x, 1, true, false) < avoidance * 2) return (0); /* Find the monster */ for (i = 1; i < borg_kills_nxt; i++) { borg_kill* kill; /* Monster */ kill = &borg_kills[i]; /* Skip dead monsters */ if (!kill->r_idx) continue; /* check the location */ x9 = kill->x; y9 = kill->y; /* Distance components */ ax = (x9 > c_x) ? (x9 - c_x) : (c_x - x9); ay = (y9 > c_y) ? (y9 - c_y) : (c_y - y9); /* Distance */ d = MAX(ax, ay); /* Count the number of close monsters * There should only be one close monster. * We do not want to risk fainting. */ if (d < 7) near_monsters++; /* If it has too many hp to be taken out with this */ /* spell, don't bother trying */ /* NOTE: the +4 is because the damage is toned down as an 'average damage' */ if (kill->power > (dam + 4)) return (0); /* Do not use it in town */ if (borg_skill[BI_CDEPTH] == 0) return (0); break; } /* Should only be 1 near monster */ if (near_monsters > 1) return (0); /* Require ability (with faked mana) */ borg_skill[BI_CURSP] = borg_skill[BI_MAXSP]; if (!borg_spell_okay_fail(spell, 25)) { /* Restore Mana */ borg_skill[BI_CURSP] = sv_mana; return (0); } /* Choose optimal location */ b_n = borg_launch_bolt(rad, dam, typ, max_range, 0); /* return the value */ if (borg_simulate) { /* Restore Mana */ borg_skill[BI_CURSP] = sv_mana; return (b_n); } /* Cast the spell with fake mana */ borg_skill[BI_CURSP] = borg_skill[BI_MAXSP]; if (borg_spell_fail(spell, 25)) { /* Note the use of the emergency spell */ borg_note("# Emergency use of an Attack Spell."); /* verify use of spell */ /* borg_keypress('y'); */ } /* Use target */ borg_queue_direction('5'); /* Set our shooting flag */ successful_target = -1; /* restore true mana */ borg_skill[BI_CURSP] = 0; /* Value */ return (b_n); } /* * Simulate/Apply the optimal result of using a "dispel" attack spell */ static int borg_attack_aux_spell_dispel(const enum borg_spells spell, int dam, int typ) { int b_n; int penalty = 0; /* No firing while blind, confused, or hallucinating */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISIMAGE]) return (0); /* Not if low on food */ if (borg_skill[BI_FOOD] == 0 && (borg_skill[BI_ISWEAK] && (borg_spell_legal(REMOVE_HUNGER) || borg_spell_legal(HERBAL_CURING)))) return (0); /* Paranoia */ if (borg_simulate && (randint0(100) < 2)) return (0); /* Require ability */ if (!borg_spell_okay_fail(spell, 25)) return (0); /* Choose optimal location--radius defined as 10 */ b_n = borg_launch_bolt(10, dam, typ, z_info->max_range, 0); int spell_power = borg_get_spell_power(spell); /* Penalize mana usage */ b_n = b_n - spell_power; /* Penalize use of reserve mana */ if (borg_skill[BI_CURSP] - spell_power < borg_skill[BI_MAXSP] / 2) b_n = b_n - (spell_power * 3); /* Penalize use of deep reserve mana */ if (borg_skill[BI_CURSP] - spell_power < borg_skill[BI_MAXSP] / 3) b_n = b_n - (spell_power * 5); /* Really penalize use of mana needed for final teleport */ if (borg_class == CLASS_MAGE) penalty = 6; if (borg_class == CLASS_RANGER) penalty = 22; if (borg_class == CLASS_ROGUE) penalty = 20; if (borg_class == CLASS_PRIEST) penalty = 8; if (borg_class == CLASS_PALADIN) penalty = 20; if (borg_class == CLASS_NECROMANCER) penalty = 10; if ((borg_skill[BI_MAXSP] > 30) && (borg_skill[BI_CURSP] - spell_power < penalty)) b_n = b_n - (spell_power * 750); /* Really penalize use of mana needed for final teleport */ /* (6 pts for mage) */ if ((borg_skill[BI_MAXSP] > 30) && (borg_skill[BI_CURSP] - spell_power) < 6) b_n = b_n - (spell_power * 750); /* Simulation */ if (borg_simulate) return (b_n); /* Cast the prayer */ (void)borg_spell(spell); /* Value */ return (b_n); } /* * Simulate/Apply the optimal result of using a "dispel" staff * Which would be dispel evil, power, holiness. Genocide handeled later. */ static int borg_attack_aux_staff_dispel(int sval, int rad, int dam, int typ) { int b_n; /* No firing while blind, confused, or hallucinating */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISIMAGE]) return (0); /* Paranoia */ if (borg_simulate && (randint0(100) < 2)) return (0); /* look for the staff */ if (!borg_equips_staff_fail(sval)) return (0); /* Choose optimal location--radius defined as 10 */ b_n = borg_launch_bolt(10, dam, typ, z_info->max_range, 0); /* Big Penalize charge usage */ b_n = b_n - 50; /* Simulation */ if (borg_simulate) return (b_n); /* Cast the prayer */ (void)borg_use_staff(sval); /* Value */ return (b_n); } /* * Simulate/Apply the optimal result of using a "normal" attack rod */ static int borg_attack_aux_rod_bolt(int sval, int rad, int dam, int typ) { int b_n; /* No firing while blind, confused, or hallucinating */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISIMAGE]) return (0); /* Paranoia */ if (borg_simulate && (randint0(100) < 2)) return (0); /* Not likely to be successful in the activation */ if (500 < borg_activate_failure(TV_ROD, sval)) return (0); /* Look for that rod */ if (!borg_equips_rod(sval)) return (0); /* Choose optimal location */ b_n = borg_launch_bolt(rad, dam, typ, z_info->max_range, 0); /* Simulation */ if (borg_simulate) return (b_n); /* Zap the rod */ (void)borg_zap_rod(sval); /* Use target */ borg_keypress('5'); /* Set our shooting flag */ successful_target = -1; /* Value */ return (b_n); } /* * Simulate/Apply the optimal result of using a "normal" attack wand */ static int borg_attack_aux_wand_bolt(int sval, int rad, int dam, int typ, int selection) { int i; int b_n; /* No firing while blind, confused, or hallucinating */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISIMAGE]) return (0); /* Dont use wands in town, charges are too spendy */ if (!borg_skill[BI_CDEPTH]) return (0); /* Paranoia */ if (borg_simulate && (randint0(100) < 2)) return (0); /* Look for that wand */ i = borg_slot(TV_WAND, sval); /* None available */ if (i < 0) return (0); /* No charges */ if (!borg_items[i].pval) return (0); /* Not likely to be successful in the activation */ if (500 < borg_activate_failure(TV_WAND, sval)) return (0); /* Choose optimal location */ b_n = borg_launch_bolt(rad, dam, typ, z_info->max_range, 0); /* Penalize charge usage */ if (borg_skill[BI_CLEVEL] > 5) b_n = b_n - 5; /* Wands of wonder are used in last ditch efforts. They behave * randomly, so the best use of them is an emergency. I have seen * borgs die from hill orcs with fully charged wonder wands. Odds * are he could have taken the orcs with the wand. So use them in * an emergency after all the borg_caution() steps have failed */ if (sval == sv_wand_wonder && !borg_munchkin_mode) { /* check the danger */ if (b_n > 0 && borg_danger(c_y, c_x, 1, true, false) >= (avoidance * 7 / 10)) { /* make the wand appear deadly */ b_n = 999; /* note the use of the wand in the emergency */ borg_note(format("# Emergency use of a Wand of Wonder.")); } else { b_n = 0; } } /* Simulation */ if (borg_simulate) return (b_n); /* Aim the wand */ (void)borg_aim_wand(sval); /* Use target */ borg_keypress('5'); /* Set our shooting flag */ successful_target = -1; /* select the correct effect */ if (selection != -1) borg_keypress('b' + selection); /* Value */ return (b_n); } /* * Simulate/Apply the optimal result of using an un-id'd wand */ static int borg_attack_aux_wand_bolt_unknown(int dam, int typ) { int i; int b_i = -1; int b_n; /* No firing while blind, confused, or hallucinating */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISIMAGE]) return (0); /* Paranoia */ if (borg_simulate && (randint0(100) < 5)) return (0); /* Look for an un-id'd wand */ for (i = 0; i < z_info->pack_size; i++) { if (borg_items[i].tval != TV_WAND) continue; /* known */ if (borg_items[i].kind) continue; /* No charges */ if (!borg_items[i].pval) continue; if (strstr(borg_items[i].desc, "empty")) continue; /* Select this wand */ b_i = i; } /* None available */ if (b_i < 0) return (0); /* Choose optimal location */ b_n = borg_launch_bolt(0, dam, typ, z_info->max_range, 0); /* Simulation */ if (borg_simulate) return (b_n); /* Log the message */ borg_note(format("# Aiming unknown wand '%s.'", borg_items[b_i].desc)); /* record the address to avoid certain bugs with inscriptions&amnesia */ borg_zap_slot = b_i; /* Perform the action */ borg_keypress('a'); borg_keypress(all_letters_nohjkl[b_i]); /* Use target */ borg_keypress('5'); /* Set our shooting flag */ successful_target = -1; /* Value */ return (b_n); } /* * Simulate/Apply the optimal result of using an un-id'd rod */ static int borg_attack_aux_rod_bolt_unknown(int dam, int typ) { int i; int b_i = -1; int b_n; /* No firing while blind, confused, or hallucinating */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISIMAGE]) return (0); /* Paranoia */ if (borg_simulate && (randint0(100) < 5)) return (0); /* Look for an un-id'd wand */ for (i = 0; i < z_info->pack_size; i++) { if (borg_items[i].tval != TV_ROD) continue; /* known */ if (borg_items[i].kind) continue; /* No charges */ if (!borg_items[i].pval) continue; /* Not an attacker */ if (strstr(borg_items[i].desc, "tried")) continue; /* Select this rod */ b_i = i; } /* None available */ if (b_i < 0) return (0); /* Choose optimal location */ b_n = borg_launch_bolt(0, dam, typ, z_info->max_range, 0); /* Simulation */ if (borg_simulate) return (b_n); /* Log the message */ borg_note(format("# Aiming unknown rod '%s.'", borg_items[b_i].desc)); /* record the address to avoid certain bugs with inscriptions&amnesia */ borg_zap_slot = b_i; /* Perform the action */ borg_keypress('z'); borg_keypress(all_letters_nohjkl[b_i]); /* Use target */ borg_keypress('5'); /* Set our shooting flag */ successful_target = -1; /* Value */ return (b_n); } /* * Simulate/Apply the optimal result of ACTIVATING an attack artifact * */ static int borg_attack_aux_activation(int activation, int rad, int dam, int typ, bool aim, int selection) { int b_n; /* No firing while blind, confused, or hallucinating */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISIMAGE]) return (0); /* Paranoia */ if (borg_simulate && (randint0(100) < 2)) return (0); /* Look for and item with that activation and to see if it is charged */ if (!borg_equips_item(activation, true)) return (0); /* Choose optimal location */ b_n = borg_launch_bolt(rad, dam, typ, z_info->max_range, 0); /* Simulation */ if (borg_simulate) return (b_n); /* Activate the artifact */ (void)borg_activate_item(activation); /* Use target */ if (aim) { borg_keypress('5'); /* Set our shooting flag */ successful_target = -1; } /* select the correct effect */ if (selection != -1) borg_keypress('b' + selection); /* Value */ return (b_n); } /* * Simulate/Apply the optimal result of ACTIVATING an attack ring * */ static int borg_attack_aux_ring(int ring_name, int rad, int dam, int typ) { int b_n; /* No firing while blind, confused, or hallucinating */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISIMAGE]) return (0); /* Paranoia */ if (borg_simulate && (randint0(100) < 2)) return (0); /* Look for that ring and to see if it is charged */ if (!borg_equips_ring(ring_name)) return (0); /* Choose optimal location */ b_n = borg_launch_bolt(rad, dam, typ, z_info->max_range, 0); /* Simulation */ if (borg_simulate) return (b_n); /* Activate the artifact */ (void)borg_activate_ring(ring_name); /* Use target */ borg_keypress('5'); /* Set our shooting flag */ successful_target = -1; /* Value */ return (b_n); } /* * Simulate/Apply the optimal result of ACTIVATING a DRAGON ARMOUR * */ static int borg_attack_aux_dragon(int sval, int rad, int dam, int typ, int selection) { int b_n; /* No firing while blind, confused, or hallucinating */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISIMAGE]) return (0); /* Paranoia */ if (borg_simulate && (randint0(100) < 2)) return (0); /* Randart dragon armors do not activate for breath */ if (borg_items[INVEN_BODY].art_idx) return (0); /* Look for that scale mail and charged*/ if (!borg_equips_dragon(sval)) return (0); /* Choose optimal location */ b_n = borg_launch_bolt(rad, dam, typ, z_info->max_range, 0); /* Simulation */ if (borg_simulate) return (b_n); /* Activate the scale mail */ (void)borg_activate_dragon(sval); /* Use target */ borg_keypress('5'); /* Set our shooting flag */ successful_target = -1; /* select the correct effect */ if (selection != -1) borg_keypress('b' + selection); /* Value */ return (b_n); } /* * trying the Whirlwind Attack spell */ static int borg_attack_aux_whirlwind_attack(void) { int p; int i; int d; int total_d = 0; borg_grid* ag; borg_kill* kill; /* Can I do it */ if (!borg_spell_okay_fail(WHIRLWIND_ATTACK, (borg_fighting_unique ? 40 : 25))) return (0); /* int original_danger = borg_danger(c_y, c_x, 1, false, false); */ int blows = (borg_skill[BI_CLEVEL] + 10) / 15; /* Examine possible destinations */ for (i = 0; i < borg_temp_n; i++) { int x = borg_temp_x[i]; int y = borg_temp_y[i]; /* Require "adjacent" */ if (borg_distance(c_y, c_x, y, x) > 1) continue; /* Acquire grid */ ag = &borg_grids[y][x]; /* Calculate "average" damage */ d = borg_thrust_damage_one(ag->kill); /* No damage */ if (d <= 0) continue; /* get to do "blows" attacks */ d = d * blows; /* Obtain the monster */ kill = &borg_kills[ag->kill]; /* Hack -- avoid waking most "hard" sleeping monsters */ if (!kill->awake && (d <= kill->power) && !borg_munchkin_mode) { /* Calculate danger */ p = borg_danger_aux(y, x, 1, ag->kill, true, true); if (p > avoidance * 2) continue; } /* Hack -- ignore sleeping town monsters */ if (!borg_skill[BI_CDEPTH] && !kill->awake) continue; /* Calculate "danger" to player */ p = borg_danger_aux(c_y, c_x, 2, ag->kill, true, true); /* Reduce "bonus" of partial kills when higher level */ if (d <= kill->power && borg_skill[BI_MAXCLEVEL] > 15) p = p / 10; /* Add the danger-bonus to the damage */ d += p; total_d += d; } /* Nothing to attack */ if (total_d < 0) return (0); /* Simulation */ if (borg_simulate) return (total_d); /* try the spell */ if (borg_spell(WHIRLWIND_ATTACK)) return (total_d); return (0); } /* trying the Leap into Battle spell */ static int borg_attack_aux_leap_into_battle(void) { int p; int i, b_i = -1; int d, b_d = -1; borg_grid* ag; borg_kill* kill; /* Can I do it */ if (!borg_spell_okay_fail(LEAP_INTO_BATTLE, (borg_fighting_unique ? 40 : 25))) return (0); /* Too afraid to attack */ if (borg_skill[BI_ISAFRAID] || borg_skill[BI_CRSFEAR]) return (0); /* Examine possible destinations */ for (i = 0; i < borg_temp_n; i++) { int blows; int x = borg_temp_x[i]; int y = borg_temp_y[i]; /* Require up to distance 4 */ int m_dist = borg_distance(c_y, c_x, y, x); if (m_dist > 4) continue; /* Acquire grid */ ag = &borg_grids[y][x]; /* Calculate "average" damage */ d = borg_thrust_damage_one(ag->kill); blows = (borg_skill[BI_CLEVEL] + 5) / 15; blows = ((blows * m_dist + 2) / 4) + 1; d *= blows; /* No damage */ if (d <= 0) continue; /* Obtain the monster */ kill = &borg_kills[ag->kill]; /* Hack -- avoid waking most "hard" sleeping monsters */ if (!kill->awake && (d <= kill->power) && !borg_munchkin_mode) { /* Calculate danger */ p = borg_danger_aux(y, x, 1, ag->kill, true, true); if (p > avoidance * 2) continue; } /* Hack -- ignore sleeping town monsters */ if (!borg_skill[BI_CDEPTH] && !kill->awake) continue; /* Calculate "danger" to player */ p = borg_danger_aux(c_y, c_x, 2, ag->kill, true, true); /* Reduce "bonus" of partial kills when higher level */ if (d <= kill->power && borg_skill[BI_MAXCLEVEL] > 15) p = p / 10; /* Add the danger-bonus to the damage */ d += p; /* Ignore lower damage */ if ((b_i >= 0) && (d < b_d)) continue; /* Save the info */ b_i = i; b_d = d; } /* Nothing to attack */ if (b_i < 0) return (0); /* Simulation */ if (borg_simulate) return (b_d); /* Save the location */ g_x = borg_temp_x[b_i]; g_y = borg_temp_y[b_i]; ag = &borg_grids[g_y][g_x]; kill = &borg_kills[ag->kill]; /* Note */ borg_note(format("# Leaping at %s at (%d,%d dist %d) who has %d Hit Points.", (r_info[kill->r_idx].name), g_y, g_x, borg_distance(c_y, c_x, g_y, g_x), kill->power)); borg_note(format("# Attacking with weapon '%s'", borg_items[INVEN_WIELD].desc)); /* Attack the grid */ borg_target(g_y, g_x); borg_spell(LEAP_INTO_BATTLE); /* Use target */ borg_keypress('5'); /* Set our shooting flag */ successful_target = -1; /* Success */ return (b_d); } /* trying the Maim Foe spell */ /* this is a thrust but you get 1 blow/15 levels */ /* it also has a chance to stun but ignoring that for now. */ static int borg_attack_aux_maim_foe(void) { int blows; int p, dir; int i, b_i = -1; int d, b_d = -1; borg_grid* ag; borg_kill* kill; /* Too afraid to attack */ if (borg_skill[BI_ISAFRAID] || borg_skill[BI_CRSFEAR]) return (0); /* Can I do it */ if (!borg_spell_okay_fail(MAIM_FOE, (borg_fighting_unique ? 40 : 25))) return (0); blows = borg_skill[BI_CLEVEL] / 15; /* Examine possible destinations */ for (i = 0; i < borg_temp_n; i++) { int x = borg_temp_x[i]; int y = borg_temp_y[i]; /* Require "adjacent" */ if (borg_distance(c_y, c_x, y, x) > 1) continue; /* Acquire grid */ ag = &borg_grids[y][x]; /* Calculate "average" damage */ d = borg_thrust_damage_one(ag->kill) * blows; /* No damage */ if (d <= 0) continue; /* Obtain the monster */ kill = &borg_kills[ag->kill]; /* Hack -- avoid waking most "hard" sleeping monsters */ if (!kill->awake && (d <= kill->power) && !borg_munchkin_mode) { /* Calculate danger */ p = borg_danger_aux(y, x, 1, ag->kill, true, true); if (p > avoidance * 2) continue; } /* Hack -- ignore sleeping town monsters */ if (!borg_skill[BI_CDEPTH] && !kill->awake) continue; /* Calculate "danger" to player */ p = borg_danger_aux(c_y, c_x, 2, ag->kill, true, true); /* Reduce "bonus" of partial kills when higher level */ if (d <= kill->power && borg_skill[BI_MAXCLEVEL] > 15) p = p / 10; /* Add the danger-bonus to the damage */ d += p; /* Ignore lower damage */ if ((b_i >= 0) && (d < b_d)) continue; /* Save the info */ b_i = i; b_d = d; } /* Nothing to attack */ if (b_i < 0) return (0); /* Simulation */ if (borg_simulate) return (b_d); /* Save the location */ g_x = borg_temp_x[b_i]; g_y = borg_temp_y[b_i]; ag = &borg_grids[g_y][g_x]; kill = &borg_kills[ag->kill]; /* Get a direction for attacking */ dir = borg_extract_dir(c_y, c_x, g_y, g_x); /* Simulation */ if (borg_simulate) return (d); borg_spell(MAIM_FOE); borg_keypress(I2D(dir)); return (d); } /* trying the Curse spell */ static int borg_attack_aux_curse(void) { int p; int i, b_i = -1; int d, b_d = -1; borg_grid* ag; borg_kill* kill; /* costs 100hp to cast. Don't kill yourself doing it */ if (borg_skill[BI_CURHP] < 120) return (0); /* Can I do it */ if (!borg_spell_okay_fail(CURSE, (borg_fighting_unique ? 40 : 25))) return (0); /* Too afraid to attack */ if (borg_skill[BI_ISAFRAID] || borg_skill[BI_CRSFEAR]) return (0); /* Examine possible kills */ for (i = 0; i < borg_temp_n; i++) { int x = borg_temp_x[i]; int y = borg_temp_y[i]; /* Acquire grid */ ag = &borg_grids[y][x]; /* Obtain the monster */ kill = &borg_kills[ag->kill]; /* Calculate "average" damage */ d = (((((kill->injury * kill->power) / 100) + 1) / 2) + 50) * (borg_skill[BI_CLEVEL] / 12 + 1); /* No damage */ if (d <= 0) continue; /* Hack -- avoid waking most "hard" sleeping monsters */ if (!kill->awake && (d <= kill->power) && !borg_munchkin_mode) { /* Calculate danger */ p = borg_danger_aux(y, x, 1, ag->kill, true, true); if (p > avoidance * 2) continue; } /* Hack -- ignore sleeping town monsters */ if (!borg_skill[BI_CDEPTH] && !kill->awake) continue; /* Calculate "danger" to player */ p = borg_danger_aux(c_y, c_x, 2, ag->kill, true, true); /* Reduce "bonus" of partial kills when higher level */ if (d <= kill->power && borg_skill[BI_MAXCLEVEL] > 15) p = p / 10; /* Add the danger-bonus to the damage */ d += p; /* Ignore lower damage */ if ((b_i >= 0) && (d < b_d)) continue; /* Save the info */ b_i = i; b_d = d; } /* Nothing to attack */ if (b_i < 0) return (0); /* Simulation */ if (borg_simulate) return (b_d); /* Save the location */ g_x = borg_temp_x[b_i]; g_y = borg_temp_y[b_i]; ag = &borg_grids[g_y][g_x]; kill = &borg_kills[ag->kill]; /* Attack the grid */ borg_target(g_y, g_x); borg_spell(CURSE); /* Use target */ borg_keypress('5'); /* Set our shooting flag */ successful_target = -1; /* Success */ return (b_d); } /* trying the Vampire Strike spell */ static int borg_attack_aux_vampire_strike(void) { int p; int i /* , b_i */ = -1; int d, b_d = -1; int dist, best_dist = z_info->max_range; bool abort_attack = false; borg_grid* ag; borg_kill* kill; /* Can I do it */ if (!borg_spell_okay_fail(VAMPIRE_STRIKE, (borg_fighting_unique ? 40 : 25))) return (0); /* Examine possible destinations */ for (i = 0; i < borg_temp_n; i++) { bool new_low = false; int x = borg_temp_x[i]; int y = borg_temp_y[i]; int o_x, o_y, x2, y2; /* Consider each adjacent spot to the monster */ /* there must be an empty spot */ bool found = false; for (o_x = -1; o_x <= 1 && !found; o_x++) { for (o_y = -1; o_y <= 1 && !found; o_y++) { /* but not the monsters location */ if (!o_x && !o_y) continue; /* Acquire location */ x2 = borg_temp_x[i] + o_x; y2 = borg_temp_y[i] + o_y; ag = &borg_grids[y2][x2]; if (!ag->kill && ag->feat == FEAT_FLOOR && (y2 != c_y || x2 != c_x)) found = true; } } /* must have an empty square next to the monster */ if (!found) continue; /* Check the projectable, assume unknown grids are walls */ if (!borg_offset_projectable(c_y, c_x, y, x)) continue; /* closest distance */ dist = borg_distance(c_y, c_x, y, x); if (dist > best_dist) continue; if (dist < best_dist) { best_dist = dist; new_low = true; abort_attack = false; } /* Acquire grid */ ag = &borg_grids[y][x]; /* Calculate "average" damage */ d = borg_skill[BI_CLEVEL] * 2; /* Obtain the monster */ kill = &borg_kills[ag->kill]; struct monster_race* r_ptr = &r_info[kill->r_idx]; if (rf_has(r_ptr->flags, RF_NONLIVING) || rf_has(r_ptr->flags, RF_UNDEAD)) continue; /* Hack -- avoid waking most "hard" sleeping monsters */ if (!kill->awake && (d <= kill->power) && !borg_munchkin_mode) { /* Calculate danger */ p = borg_danger_aux(y, x, 1, ag->kill, true, true); if (p > avoidance * 2) { abort_attack = true; continue; } } /* Calculate "danger" to player */ p = borg_danger_aux(c_y, c_x, 2, ag->kill, true, true); /* Reduce "bonus" of partial kills when higher level */ if (d <= kill->power && borg_skill[BI_MAXCLEVEL] > 15) p = p / 10; /* Add the danger-bonus to the damage */ d += p; /* if this is a new closest, save the damage otherwise, average it in */ /* since we will only hit one */ if (new_low) b_d = d; else b_d = (d + b_d) / 2; } /* Nothing to attack, require relatively close */ if (best_dist > 20 || abort_attack) return (0); /* Simulation */ if (borg_simulate) return (b_d); /* cast the spell */ borg_spell(VAMPIRE_STRIKE); /* Success */ return (b_d); } /* trying the Crush spell */ /* right now it is coded so that if a monster is partially */ /* crushed, it is still fully a danger */ static int borg_attack_aux_crush(void) { int p1 = 0; int p2 = 0; int d = 0; /* Can I do it */ if (!borg_spell_okay(CRUSH)) return (0); /* don't kill yourself or leave less than 10hp */ if ((borg_skill[BI_CURHP] + 10) < (borg_skill[BI_CLEVEL] * 4)) return (0); /* Obtain initial danger */ borg_crush_spell = false; p1 = borg_danger(c_y, c_x, 4, true, false); /* What effect is there? */ borg_crush_spell = true; p2 = borg_danger(c_y, c_x, 4, true, false); borg_crush_spell = false; /* damage is reduction in danger */ d = (p1 - p2); /* if there is still danger afterward, make sure the reductioning in HP */ /* doesn't make this put us in danger */ int new_hp = (borg_skill[BI_CURHP] - (borg_skill[BI_CLEVEL] * 2)); if (borg_simulate && (p2 >= new_hp || new_hp <= 5)) return 0; int spell_power = borg_get_spell_power(CRUSH); /* Penalize mana usage */ d = d - spell_power; /* Penalize use of reserve mana */ if (borg_skill[BI_CURSP] - spell_power < borg_skill[BI_MAXSP] / 2) d = d - (spell_power * 10); /* Simulation */ if (borg_simulate) return (d); /* Cast the spell */ if (borg_spell(CRUSH)) return (d); else return (0); } /* * Try to sleep an adjacent bad guy * This had been a defence maneuver, which explains the format. * This is used for the sleep ii spell and the sanctuary prayer, * also the holcolleth activation. * * There is a slight concern with the level of the artifact and the * savings throw. Currently the borg uses his own level to determine * the save. The artifact level may be lower and the borg will have * the false impression that spell will work when in fact the monster * may easily save against the attack. */ static int borg_attack_aux_trance(void) { int p1 = 0; int p2 = 0; int d = 0; /* Can I do it */ if (!borg_spell_okay(TRANCE)) return (0); /* Obtain initial danger */ borg_sleep_spell_ii = false; p1 = borg_danger(c_y, c_x, 4, true, false); /* What effect is there? */ borg_sleep_spell_ii = true; p2 = borg_danger(c_y, c_x, 4, true, false); borg_sleep_spell_ii = false; /* value is d, enhance the value for rogues and rangers so that * they can use their critical hits. */ d = (p1 - p2); int spell_power = borg_get_spell_power(TRANCE); /* Penalize mana usage */ d = d - spell_power; /* Penalize use of reserve mana */ if (borg_skill[BI_CURSP] - spell_power < borg_skill[BI_MAXSP] / 2) d = d - (spell_power * 10); /* Simulation */ if (borg_simulate) return (d); /* Cast the spell */ if (borg_spell(TRANCE)) return (d); return (0); } static int borg_attack_aux_artifact_holcolleth(void) { int p1 = 0; int p2 = 0; int d = 0; if (!borg_equips_item(act_sleepii, true)) return (0); /* Obtain initial danger */ borg_sleep_spell_ii = false; p1 = borg_danger(c_y, c_x, 4, true, false); /* What effect is there? */ borg_sleep_spell_ii = true; p2 = borg_danger(c_y, c_x, 4, true, false); borg_sleep_spell_ii = false; /* value is d, enhance the value for rogues and rangers so that * they can use their critical hits. */ d = (p1 - p2); /* Simulation */ if (borg_simulate) return (d); /* Cast the spell */ if (borg_activate_item(act_sleepii)) { /* Value */ return (d); } else { borg_note("# Failed to properly activate the artifact"); return (0); } } /* * Simulate/Apply the optimal result of using the given "type" of attack */ static int borg_attack_aux(int what) { int dam = 0; int rad = 0; /* Analyze */ switch (what) { /* Wait on grid for monster to approach me */ case BF_REST: return (borg_attack_aux_rest()); /* Physical attack */ case BF_THRUST: return (borg_attack_aux_thrust()); /* Fired missile attack */ case BF_LAUNCH: return (borg_attack_aux_launch()); /* Object attack */ case BF_OBJECT: return (borg_attack_aux_object()); /* Spell -- slow monster */ case BF_SPELL_SLOW_MONSTER: dam = 10; return (borg_attack_aux_spell_bolt(SLOW_MONSTER, rad, dam, BORG_ATTACK_OLD_SLOW, z_info->max_range)); /* Spell -- confuse monster */ case BF_SPELL_CONFUSE_MONSTER: rad = 0; dam = 10; return (borg_attack_aux_spell_bolt(CONFUSE_MONSTER, rad, dam, BORG_ATTACK_OLD_CONF, z_info->max_range)); case BF_SPELL_SLEEP_III: dam = 10; return (borg_attack_aux_spell_dispel(MASS_SLEEP, dam, BORG_ATTACK_OLD_SLEEP)); /* Spell -- magic missile */ case BF_SPELL_MAGIC_MISSILE: rad = 0; dam = ((((borg_skill[BI_CLEVEL] - 1) / 5) + 3) * (4 + 1)) / 2; return (borg_attack_aux_spell_bolt(MAGIC_MISSILE, rad, dam, BORG_ATTACK_MISSILE, z_info->max_range)); /* Spell -- magic missile EMERGENCY*/ case BF_SPELL_MAGIC_MISSILE_RESERVE: rad = 0; dam = ((((borg_skill[BI_CLEVEL] - 1) / 5) + 3) * (4 + 1)); return (borg_attack_aux_spell_bolt_reserve(MAGIC_MISSILE, rad, dam, BORG_ATTACK_MISSILE, z_info->max_range)); /* Spell -- cold bolt */ case BF_SPELL_COLD_BOLT: rad = 0; dam = ((((borg_skill[BI_CLEVEL] - 5) / 3) + 6) * (8 + 1)) / 2; return (borg_attack_aux_spell_bolt(FROST_BOLT, rad, dam, BORG_ATTACK_COLD, z_info->max_range)); /* Spell -- kill wall */ case BF_SPELL_STONE_TO_MUD: rad = 0; dam = (20 + (30 / 2)); return (borg_attack_aux_spell_bolt(TURN_STONE_TO_MUD, rad, dam, BORG_ATTACK_KILL_WALL, z_info->max_range)); /* Spell -- light beam */ case BF_SPELL_LIGHT_BEAM: rad = -1; dam = (6 * (8 + 1) / 2); return (borg_attack_aux_spell_bolt(SPEAR_OF_LIGHT, rad, dam, BORG_ATTACK_LIGHT_WEAK, z_info->max_range)); /* Spell -- stinking cloud */ case BF_SPELL_STINK_CLOUD: rad = 2; dam = (10 + (borg_skill[BI_CLEVEL] / 2)); return (borg_attack_aux_spell_bolt(STINKING_CLOUD, rad, dam, BORG_ATTACK_POIS, z_info->max_range)); /* Spell -- fire ball */ case BF_SPELL_FIRE_BALL: rad = 2; dam = (borg_skill[BI_CLEVEL] * 2); return (borg_attack_aux_spell_bolt(FIRE_BALL, rad, dam, BORG_ATTACK_FIRE, z_info->max_range)); /* Spell -- Ice Storm */ case BF_SPELL_COLD_STORM: rad = 3; dam = (3 * ((borg_skill[BI_CLEVEL] * 3) + 1)) / 2; return (borg_attack_aux_spell_bolt(ICE_STORM, rad, dam, BORG_ATTACK_ICE, z_info->max_range)); /* Spell -- Meteor Swarm */ case BF_SPELL_METEOR_SWARM: rad = 3; dam = (30 + borg_skill[BI_CLEVEL] / 2) + (borg_skill[BI_CLEVEL] / 20) + 2; return (borg_attack_aux_spell_bolt(METEOR_SWARM, rad, dam, BORG_ATTACK_METEOR, z_info->max_range)); /* Spell -- Rift */ case BF_SPELL_RIFT: rad = -1; dam = ((borg_skill[BI_CLEVEL] * 3) + 40); return (borg_attack_aux_spell_bolt(RIFT, rad, dam, BORG_ATTACK_GRAVITY, z_info->max_range)); /* Spell -- mana storm */ case BF_SPELL_MANA_STORM: rad = 3; dam = (300 + (borg_skill[BI_CLEVEL] * 2)); return (borg_attack_aux_spell_bolt(MANA_STORM, rad, dam, BORG_ATTACK_MANA, z_info->max_range)); /* Spell -- Shock Wave */ case BF_SPELL_SHOCK_WAVE: dam = (borg_skill[BI_CLEVEL] * 2); rad = 2; return (borg_attack_aux_spell_bolt(SHOCK_WAVE, rad, dam, BORG_ATTACK_SOUND, z_info->max_range)); /* Spell -- Explosion */ case BF_SPELL_EXPLOSION: dam = ((borg_skill[BI_CLEVEL] * 2) + (borg_skill[BI_CLEVEL] / 5)); /* hack pretend it is all shards */ rad = 2; return (borg_attack_aux_spell_bolt(EXPLOSION, rad, dam, BORG_ATTACK_SHARD, z_info->max_range)); /* Prayer -- orb of draining */ case BF_PRAYER_HOLY_ORB_BALL: rad = ((borg_skill[BI_CLEVEL] >= 30) ? 3 : 2); dam = ((borg_skill[BI_CLEVEL] * 3) / 2) + (3 * (6 + 1)) / 2; return (borg_attack_aux_spell_bolt(ORB_OF_DRAINING, rad, dam, BORG_ATTACK_HOLY_ORB, z_info->max_range)); /* Prayer -- blind creature */ case BF_SPELL_BLIND_CREATURE: rad = 0; dam = 10; return (borg_attack_aux_spell_bolt(FRIGHTEN, rad, dam, BORG_ATTACK_OLD_CONF, z_info->max_range)); /* Druid - Trance */ case BF_SPELL_TRANCE: return (borg_attack_aux_trance()); /* Prayer -- Dispel Undead */ case BF_PRAYER_DISP_UNDEAD: dam = (((borg_skill[BI_CLEVEL] * 5) + 1) / 2); return (borg_attack_aux_spell_dispel(DISPEL_UNDEAD, dam, BORG_ATTACK_DISP_UNDEAD)); /* Prayer -- Dispel Evil */ case BF_PRAYER_DISP_EVIL: dam = (((borg_skill[BI_CLEVEL] * 5) + 1) / 2); return (borg_attack_aux_spell_dispel(DISPEL_EVIL, dam, BORG_ATTACK_DISP_EVIL)); /* Prayer -- Dispel Undead */ case BF_PRAYER_DISP_SPIRITS: dam = (100); return (borg_attack_aux_spell_dispel(BANISH_SPIRITS, dam, BORG_ATTACK_DISP_SPIRITS)); /* Prayer -- Banishment (teleport evil away)*/ /* This is a defense spell: done in borg_defense() */ /* Prayer -- Holy Word also has heal effect and is considered in borg_heal */ case BF_PRAYER_HOLY_WORD: if (borg_skill[BI_MAXHP] - borg_skill[BI_CURHP] >= 300) /* force him to think the spell is more deadly to get him to * cast it. This will provide some healing for him. */ { dam = ((borg_skill[BI_CLEVEL] * 10)); return (borg_attack_aux_spell_dispel(HOLY_WORD, dam, BORG_ATTACK_DISP_EVIL)); } else /* If he is not wounded dont cast this, use Disp Evil instead. */ { dam = ((borg_skill[BI_CLEVEL] * 3) / 2) - 50; return (borg_attack_aux_spell_dispel(DISPEL_EVIL, dam, BORG_ATTACK_DISP_EVIL)); } /* Prayer -- Annihilate */ case BF_SPELL_ANNIHILATE: rad = 0; dam = (borg_skill[BI_CLEVEL] * 4); return (borg_attack_aux_spell_bolt(ANNIHILATE, rad, dam, BORG_ATTACK_OLD_DRAIN, z_info->max_range)); /* Spell -- Electric Arc */ case BF_SPELL_ELECTRIC_ARC: rad = 0; dam = ((((borg_skill[BI_CLEVEL] - 1) / 5) + 3) * (6 + 1)) / 2; return (borg_attack_aux_spell_bolt(ELECTRIC_ARC, rad, dam, BORG_ATTACK_ELEC, borg_skill[BI_CLEVEL])); case BF_SPELL_ACID_SPRAY: rad = 3; /* HACK just pretend it is wide. */ dam = ((borg_skill[BI_CLEVEL] / 2) * (8 + 1)) / 2; return (borg_attack_aux_spell_bolt(ACID_SPRAY, rad, dam, BORG_ATTACK_ACID, 10)); /* Spell -- mana bolt */ case BF_SPELL_MANA_BOLT: rad = 0; dam = ((borg_skill[BI_CLEVEL] - 10) * (8 + 1) / 2); return (borg_attack_aux_spell_bolt(MANA_BOLT, rad, dam, BORG_ATTACK_MANA, z_info->max_range)); /* Spell -- thrust away */ case BF_SPELL_THRUST_AWAY: rad = 0; dam = (borg_skill[BI_CLEVEL] * (8 + 1) / 2); return (borg_attack_aux_spell_bolt(THRUST_AWAY, rad, dam, BORG_ATTACK_FORCE, (borg_skill[BI_CLEVEL] / 10) + 1)); /* Spell -- Lightning Strike */ case BF_SPELL_LIGHTNING_STRIKE: rad = 0; dam = ((borg_skill[BI_CLEVEL] / 4) * (4 + 1) / 2) + borg_skill[BI_CLEVEL] + 5; /* HACK pretend it is all elec */ return (borg_attack_aux_spell_bolt(LIGHTNING_STRIKE, rad, dam, BORG_ATTACK_ELEC, z_info->max_range)); /* Spell -- Earth Rising */ case BF_SPELL_EARTH_RISING: rad = 0; dam = (((borg_skill[BI_CLEVEL] / 3) + 2) * (6 + 1) / 2) + borg_skill[BI_CLEVEL] + 5; return (borg_attack_aux_spell_bolt(EARTH_RISING, rad, dam, BORG_ATTACK_SHARD, (borg_skill[BI_CLEVEL] / 5) + 4)); /* Spell -- Volcanic Eruption */ /* just count the damage. The earthquake defence is a side bennie, perhaps... */ case BF_SPELL_VOLCANIC_ERUPTION: rad = 0; dam = (((borg_skill[BI_CLEVEL] * 3) / 2) * ((borg_skill[BI_CLEVEL] * 3) + 1)) / 2; return (borg_attack_aux_spell_bolt(VOLCANIC_ERUPTION, rad, dam, BORG_ATTACK_FIRE, z_info->max_range)); /* Spell -- River of Lightning */ case BF_SPELL_RIVER_OF_LIGHTNING: rad = 2; dam = (borg_skill[BI_CLEVEL] + 10) * (8 + 1) / 2; return (borg_attack_aux_spell_bolt(RIVER_OF_LIGHTNING, rad, dam, BORG_ATTACK_PLASMA, 20)); /* spell -- Spear of OromŰ */ case BF_SPELL_SPEAR_OF_OROME: rad = 0; dam = ((borg_skill[BI_CLEVEL] / 2) + (8 + 1)) / 2; return (borg_attack_aux_spell_bolt(SPEAR_OF_OROME, rad, dam, BORG_ATTACK_HOLY_ORB, z_info->max_range)); /* spell -- Light of ManwŰ */ case BF_SPELL_LIGHT_OF_MANWE: rad = 0; dam = borg_skill[BI_CLEVEL] * 5 + 100; return (borg_attack_aux_spell_bolt(LIGHT_OF_MANWE, rad, dam, BORG_ATTACK_LIGHT, z_info->max_range)); /* spell -- Nether Bolt */ case BF_SPELL_NETHER_BOLT: rad = 0; dam = ((((borg_skill[BI_CLEVEL] / 4) + 3) * (4 + 1)) / 2); return (borg_attack_aux_spell_bolt(NETHER_BOLT, rad, dam, BORG_ATTACK_NETHER, z_info->max_range)); /* spell -- Tap Unlife */ case BF_SPELL_TAP_UNLIFE: dam = ((((borg_skill[BI_CLEVEL] / 4) + 3) * (4 + 1)) / 2); return (borg_attack_aux_spell_dispel(TAP_UNLIFE, dam, BORG_ATTACK_TAP_UNLIFE)); /* Spell - Crush */ case BF_SPELL_CRUSH: return (borg_attack_aux_crush()); case BF_SPELL_SLEEP_EVIL: dam = borg_skill[BI_CLEVEL] * 10 + 500; return (borg_attack_aux_spell_dispel(SLEEP_EVIL, dam, BORG_ATTACK_SLEEP_EVIL)); /* spell -- Disenchant */ case BF_SPELL_DISENCHANT: rad = 0; dam = ((((borg_skill[BI_CLEVEL] * 2) + 10) + 1) / 2) * 2; return (borg_attack_aux_spell_bolt(DISENCHANT, rad, dam, BORG_ATTACK_DISEN, z_info->max_range)); /* spell -- Frighten */ case BF_SPELL_FRIGHTEN: rad = 0; dam = borg_skill[BI_CLEVEL]; return (borg_attack_aux_spell_bolt(FRIGHTEN, rad, dam, BORG_ATTACK_TURN_ALL, z_info->max_range)); /* Spell - Vampire Strike*/ case BF_SPELL_VAMPIRE_STRIKE: return (borg_attack_aux_vampire_strike()); /* Spell - Dispel Life */ case BF_PRAYER_DISPEL_LIFE: rad = 0; dam = ((borg_skill[BI_CLEVEL] * 3) + 1) / 2; return (borg_attack_aux_spell_bolt(DISPEL_LIFE, rad, dam, BORG_ATTACK_DRAIN_LIFE, z_info->max_range)); /* spell -- Dark Spear */ case BF_SPELL_DARK_SPEAR: rad = 0; dam = (((borg_skill[BI_CLEVEL] * 2) + 1) / 2) * 2; return (borg_attack_aux_spell_bolt(DARK_SPEAR, rad, dam, BORG_ATTACK_DARK, z_info->max_range)); /* spell -- Unleash Chaos */ case BF_SPELL_UNLEASH_CHAOS: rad = 0; dam = ((borg_skill[BI_CLEVEL] + 1) / 2) * 8; return (borg_attack_aux_spell_bolt(UNLEASH_CHAOS, rad, dam, BORG_ATTACK_CHAOS, z_info->max_range)); /* Spell -- Storm of Darkness */ case BF_SPELL_STORM_OF_DARKNESS: rad = 4; dam = (((borg_skill[BI_CLEVEL] * 2) + 1) / 2) * 4; return (borg_attack_aux_spell_bolt(STORM_OF_DARKNESS, rad, dam, BORG_ATTACK_DARK, z_info->max_range)); /* Spell - Curse */ case BF_SPELL_CURSE: return (borg_attack_aux_curse()); /* spell - Whirlwind Attack */ case BF_SPELL_WHIRLWIND_ATTACK: return (borg_attack_aux_whirlwind_attack()); /* spell - Leap into Battle */ case BF_SPELL_LEAP_INTO_BATTLE: return (borg_attack_aux_leap_into_battle()); /* spell - Leap into Battle */ case BF_SPELL_MAIM_FOE: return (borg_attack_aux_maim_foe()); /* spell - Howl of the Damned */ case BF_SPELL_HOWL_OF_THE_DAMNED: dam = borg_skill[BI_CLEVEL]; return (borg_attack_aux_spell_dispel(HOWL_OF_THE_DAMNED, dam, BORG_ATTACK_TURN_ALL)); /* ROD -- slow monster */ case BF_ROD_SLOW_MONSTER: dam = 10; rad = 0; return (borg_attack_aux_rod_bolt(sv_rod_slow_monster, rad, dam, BORG_ATTACK_OLD_SLOW)); /* ROD -- sleep monster */ case BF_ROD_SLEEP_MONSTER: dam = 10; rad = 0; return (borg_attack_aux_rod_bolt(sv_rod_sleep_monster, rad, dam, BORG_ATTACK_OLD_SLEEP)); /* Rod -- elec bolt */ case BF_ROD_ELEC_BOLT: rad = -1; dam = 6 * (6 + 1) / 2; return (borg_attack_aux_rod_bolt(sv_rod_elec_bolt, rad, dam, BORG_ATTACK_ELEC)); /* Rod -- cold bolt */ case BF_ROD_COLD_BOLT: rad = 0; dam = 12 * (8 + 1) / 2; return (borg_attack_aux_rod_bolt(sv_rod_cold_bolt, rad, dam, BORG_ATTACK_COLD)); /* Rod -- acid bolt */ case BF_ROD_ACID_BOLT: rad = 0; dam = 12 * (8 + 1) / 2; return (borg_attack_aux_rod_bolt(sv_rod_acid_bolt, rad, dam, BORG_ATTACK_ACID)); /* Rod -- fire bolt */ case BF_ROD_FIRE_BOLT: rad = 0; dam = 12 * (8 + 1) / 2; return (borg_attack_aux_rod_bolt(sv_rod_fire_bolt, rad, dam, BORG_ATTACK_FIRE)); /* Rod -- light beam */ case BF_ROD_LIGHT_BEAM: rad = -1; dam = (6 * (8 + 1) / 2); return (borg_attack_aux_rod_bolt(sv_rod_light, rad, dam, BORG_ATTACK_LIGHT_WEAK)); /* Rod -- drain life */ case BF_ROD_DRAIN_LIFE: rad = 0; dam = (150); return (borg_attack_aux_rod_bolt(sv_rod_drain_life, rad, dam, BORG_ATTACK_OLD_DRAIN)); /* Rod -- elec ball */ case BF_ROD_ELEC_BALL: rad = 2; dam = 64; return (borg_attack_aux_rod_bolt(sv_rod_elec_ball, rad, dam, BORG_ATTACK_ELEC)); /* Rod -- acid ball */ case BF_ROD_COLD_BALL: rad = 2; dam = 100; return (borg_attack_aux_rod_bolt(sv_rod_cold_ball, rad, dam, BORG_ATTACK_COLD)); /* Rod -- acid ball */ case BF_ROD_ACID_BALL: rad = 2; dam = 120; return (borg_attack_aux_rod_bolt(sv_rod_acid_ball, rad, dam, BORG_ATTACK_ACID)); /* Rod -- fire ball */ case BF_ROD_FIRE_BALL: rad = 2; dam = 144; return (borg_attack_aux_rod_bolt(sv_rod_fire_ball, rad, dam, BORG_ATTACK_FIRE)); /* Rod -- unid'd rod */ case BF_ROD_UNKNOWN: rad = 0; dam = 75; return (borg_attack_aux_rod_bolt_unknown(dam, BORG_ATTACK_MISSILE)); /* Wand -- unid'd wand */ case BF_WAND_UNKNOWN: rad = 0; dam = 75; return (borg_attack_aux_wand_bolt_unknown(dam, BORG_ATTACK_MISSILE)); /* Wand -- magic missile */ case BF_WAND_MAGIC_MISSILE: rad = 0; dam = 3 * (4 + 1) / 2; return (borg_attack_aux_wand_bolt(sv_wand_magic_missile, rad, dam, BORG_ATTACK_MISSILE, -1)); /* Wand -- slow monster */ case BF_WAND_SLOW_MONSTER: rad = 0; dam = 10; return (borg_attack_aux_wand_bolt(sv_wand_slow_monster, rad, dam, BORG_ATTACK_OLD_SLOW, -1)); /* Wand -- sleep monster */ case BF_WAND_HOLD_MONSTER: rad = 0; dam = 10; return (borg_attack_aux_wand_bolt(sv_wand_hold_monster, rad, dam, BORG_ATTACK_OLD_SLEEP, -1)); /* Wand -- fear monster */ case BF_WAND_FEAR_MONSTER: rad = 0; dam = 2 * (6 + 1) / 2; return (borg_attack_aux_wand_bolt(sv_wand_fear_monster, rad, dam, BORG_ATTACK_TURN_ALL, -1)); /* Wand -- conf monster */ case BF_WAND_CONFUSE_MONSTER: rad = 0; dam = 2 * (6 + 1) / 2; return (borg_attack_aux_wand_bolt(sv_wand_confuse_monster, rad, dam, BORG_ATTACK_OLD_CONF, -1)); /* Wand -- elec bolt */ case BF_WAND_ELEC_BOLT: dam = 6 * (6 + 1) / 2; rad = -1; return (borg_attack_aux_wand_bolt(sv_wand_elec_bolt, rad, dam, BORG_ATTACK_ELEC, -1)); /* Wand -- cold bolt */ case BF_WAND_COLD_BOLT: dam = 12 * (8 + 1) / 2; rad = 0; return (borg_attack_aux_wand_bolt(sv_wand_cold_bolt, rad, dam, BORG_ATTACK_COLD, -1)); /* Wand -- acid bolt */ case BF_WAND_ACID_BOLT: rad = 0; dam = 5 * (8 + 1) / 2; return (borg_attack_aux_wand_bolt(sv_wand_acid_bolt, rad, dam, BORG_ATTACK_ACID, -1)); /* Wand -- fire bolt */ case BF_WAND_FIRE_BOLT: rad = 0; dam = 12 * (8 + 1) / 2; return (borg_attack_aux_wand_bolt(sv_wand_fire_bolt, rad, dam, BORG_ATTACK_FIRE, -1)); /* Spell -- light beam */ case BF_WAND_LIGHT_BEAM: rad = -1; dam = (6 * (8 + 1) / 2); return (borg_attack_aux_wand_bolt(sv_wand_light, rad, dam, BORG_ATTACK_LIGHT_WEAK, -1)); /* Wand -- stinking cloud */ case BF_WAND_STINKING_CLOUD: rad = 2; dam = 12; return (borg_attack_aux_wand_bolt(sv_wand_stinking_cloud, rad, dam, BORG_ATTACK_POIS, -1)); /* Wand -- elec ball */ case BF_WAND_ELEC_BALL: rad = 2; dam = 64; return (borg_attack_aux_wand_bolt(sv_wand_elec_ball, rad, dam, BORG_ATTACK_ELEC, -1)); /* Wand -- acid ball */ case BF_WAND_COLD_BALL: rad = 2; dam = 100; return (borg_attack_aux_wand_bolt(sv_wand_cold_ball, rad, dam, BORG_ATTACK_COLD, -1)); /* Wand -- acid ball */ case BF_WAND_ACID_BALL: rad = 2; dam = 120; return (borg_attack_aux_wand_bolt(sv_wand_acid_ball, rad, dam, BORG_ATTACK_ACID, -1)); /* Wand -- fire ball */ case BF_WAND_FIRE_BALL: rad = 2; dam = 144; return (borg_attack_aux_wand_bolt(sv_wand_fire_ball, rad, dam, BORG_ATTACK_FIRE, -1)); /* Wand -- dragon cold */ case BF_WAND_DRAGON_COLD: rad = 3; dam = 160; return (borg_attack_aux_wand_bolt(sv_wand_dragon_cold, rad, dam, BORG_ATTACK_COLD, -1)); /* Wand -- dragon fire */ case BF_WAND_DRAGON_FIRE: rad = 3; dam = 200; return (borg_attack_aux_wand_bolt(sv_wand_dragon_fire, rad, dam, BORG_ATTACK_FIRE, -1)); /* Wand -- annihilation */ case BF_WAND_ANNIHILATION: dam = 250; return (borg_attack_aux_wand_bolt(sv_wand_annihilation, rad, dam, BORG_ATTACK_OLD_DRAIN, -1)); /* Wand -- drain life */ case BF_WAND_DRAIN_LIFE: dam = 150; return (borg_attack_aux_wand_bolt(sv_wand_drain_life, rad, dam, BORG_ATTACK_OLD_DRAIN, -1)); /* Wand -- wand of wonder */ case BF_WAND_WONDER: dam = 35; return (borg_attack_aux_wand_bolt(sv_wand_wonder, rad, dam, BORG_ATTACK_MISSILE, -1)); /* Staff -- Sleep Monsters */ case BF_STAFF_SLEEP_MONSTERS: dam = 60; return (borg_attack_aux_staff_dispel(sv_staff_sleep_monsters, rad, dam, BORG_ATTACK_OLD_SLEEP)); /* Staff -- Slow Monsters */ case BF_STAFF_SLOW_MONSTERS: dam = 60; rad = 10; return (borg_attack_aux_staff_dispel(sv_staff_slow_monsters, rad, dam, BORG_ATTACK_OLD_SLOW)); /* Staff -- Dispel Evil */ case BF_STAFF_DISPEL_EVIL: dam = 60; return (borg_attack_aux_staff_dispel(sv_staff_dispel_evil, rad, dam, BORG_ATTACK_DISP_EVIL)); /* Staff -- Power */ case BF_STAFF_POWER: dam = 120; return (borg_attack_aux_staff_dispel(sv_staff_power, rad, dam, BORG_ATTACK_TURN_ALL)); /* Staff -- holiness */ case BF_STAFF_HOLINESS: if (borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] / 2) dam = 500; else dam = 120; return (borg_attack_aux_staff_dispel(sv_staff_holiness, rad, dam, BORG_ATTACK_DISP_EVIL)); /* Artifact -- Narthanc- fire bolt 9d8*/ case BF_EF_FIRE1: rad = 0; dam = (9 * (8 + 1) / 2); return (borg_attack_aux_activation(act_fire_bolt, rad, dam, BORG_ATTACK_FIRE, true, -1)); /* Artifact -- Anduril & Firestar- fire bolt 72*/ case BF_EF_FIRE2: rad = 2; dam = 72; return (borg_attack_aux_activation(act_fire_bolt72, rad, dam, BORG_ATTACK_FIRE, true, -1)); /* Artifact -- Gothmog- FIRE BALL 144 */ case BF_EF_FIRE3: rad = 2; dam = 144; return (borg_attack_aux_activation(act_fire_ball, rad, dam, BORG_ATTACK_FIRE, true, -1)); /* Artifact -- Nimthanc & Paurnimmen- frost bolt 6d8*/ case BF_EF_FROST1: rad = 0; dam = (6 * (8 + 1) / 2); return (borg_attack_aux_activation(act_cold_bolt, rad, dam, BORG_ATTACK_COLD, true, -1)); /* Artifact -- Belangil- frost ball 50 */ case BF_EF_FROST2: rad = 2; dam = 50; return (borg_attack_aux_activation(act_cold_ball50, rad, dam, BORG_ATTACK_COLD, true, -1)); /* Artifact -- Aranr˙th- frost bolt 12d8*/ case BF_EF_FROST4: rad = 0; dam = (12 * (8 + 1) / 2); return (borg_attack_aux_activation(act_cold_bolt2, rad, dam, BORG_ATTACK_COLD, true, -1)); /* Artifact -- Ringil- frost ball 100*/ case BF_EF_FROST3: rad = 2; dam = 100; return (borg_attack_aux_activation(act_cold_ball100, rad, dam, BORG_ATTACK_COLD, true, -1)); /* Artifact -- Dethanc- electric bolt 6d6*/ case BF_EF_LIGHTNING_BOLT: rad = -1; dam = (6 * (6 + 1) / 2); return (borg_attack_aux_activation(act_elec_bolt, rad, dam, BORG_ATTACK_ELEC, true, -1)); /* Artifact -- Rilia- poison gas 12*/ case BF_EF_STINKING_CLOUD: rad = 2; dam = 12; return (borg_attack_aux_activation(act_stinking_cloud, rad, dam, BORG_ATTACK_POIS, true, -1)); /* Artifact -- Theoden- drain Life 120*/ case BF_EF_DRAIN_LIFE2: rad = 0; dam = 120; return (borg_attack_aux_activation(act_drain_life2, rad, dam, BORG_ATTACK_OLD_DRAIN, true, -1)); /* Artifact -- Totila- confustion */ case BF_EF_CONFUSE: rad = 0; dam = 20; return (borg_attack_aux_activation(act_confuse2, rad, dam, BORG_ATTACK_OLD_CONF, true, -1)); /* Artifact -- Holcolleth -- sleep ii and sanctuary */ case BF_EF_SLEEP: dam = 10; return (borg_attack_aux_artifact_holcolleth()); /* Artifact -- TURMIL- drain life 90 */ case BF_EF_DRAIN_LIFE1: rad = 0; dam = 90; return (borg_attack_aux_activation(act_drain_life1, rad, dam, BORG_ATTACK_OLD_DRAIN, true, -1)); /* Artifact -- Fingolfin- spikes 150 */ case BF_EF_ARROW: rad = 0; dam = 150; return (borg_attack_aux_activation(act_arrow, rad, dam, BORG_ATTACK_MISSILE, true, -1)); /* Artifact -- Cammithrim- Magic Missile 3d4 */ case BF_EF_MISSILE: rad = 0; dam = (3 * (4 + 1) / 2); return (borg_attack_aux_activation(act_missile, rad, dam, BORG_ATTACK_MISSILE, true, -1)); /* Artifact -- Paurnen- ACID bolt 5d8 */ case BF_EF_ACID1: rad = 0; dam = (5 * (8 + 1) / 2); return (borg_attack_aux_activation(act_acid_bolt, rad, dam, BORG_ATTACK_ACID, true, -1)); /* Artifact -- INGWE- DISPEL EVIL X5 */ case BF_EF_DISP_EVIL: rad = 10; dam = (10 + (borg_skill[BI_CLEVEL] * 5) / 2); return (borg_attack_aux_activation(act_dispel_evil, rad, dam, BORG_ATTACK_DISP_EVIL, true, -1)); /* Artifact -- E÷l -- Mana Bolt 12d8 */ case BF_EF_MANA_BOLT: rad = 0; dam = (12 * (8 + 1)) / 2; return (borg_attack_aux_activation(act_mana_bolt, rad, dam, BORG_ATTACK_MANA, true, -1)); /* Artifact -- Razorback and Mediator */ case BF_EF_STAR_BALL: rad = 3; dam = 150; return (borg_attack_aux_activation(act_star_ball, rad, dam, BORG_ATTACK_ELEC, true, -1)); /* Artifact -- Gil-galad */ case BF_EF_STARLIGHT2: rad = 7; dam = (10 * (8 + 1)) / 2; return (borg_attack_aux_activation(act_starlight2, rad, dam, BORG_ATTACK_LIGHT, false, -1)); /* Artifact -- randart */ case BF_EF_STARLIGHT: rad = 7; dam = (6 * (8 + 1)) / 2; return (borg_attack_aux_activation(act_starlight, rad, dam, BORG_ATTACK_LIGHT, false, -1)); /* Ring of ACID */ case BF_RING_ACID: rad = 2; dam = 70; return (borg_attack_aux_ring(sv_ring_acid, rad, dam, BORG_ATTACK_ACID)); /* Ring of FLAMES */ case BF_RING_FIRE: rad = 2; dam = 80; return (borg_attack_aux_ring(sv_ring_flames, rad, dam, BORG_ATTACK_FIRE)); /* Ring of ICE */ case BF_RING_ICE: rad = 2; dam = 75; return (borg_attack_aux_ring(sv_ring_ice, rad, dam, BORG_ATTACK_ICE)); /* Ring of LIGHTNING */ case BF_RING_LIGHTNING: rad = 2; dam = 85; return (borg_attack_aux_ring(sv_ring_lightning, rad, dam, BORG_ATTACK_ELEC)); /* Hack -- Dragon Scale Mail can be activated as well */ case BF_DRAGON_BLUE: rad = 2; dam = 150; return (borg_attack_aux_dragon(sv_dragon_blue, rad, dam, BORG_ATTACK_ELEC, -1)); case BF_DRAGON_WHITE: rad = 2; dam = 100; return (borg_attack_aux_dragon(sv_dragon_white, rad, dam, BORG_ATTACK_COLD, -1)); case BF_DRAGON_BLACK: rad = 2; dam = 120; return (borg_attack_aux_dragon(sv_dragon_black, rad, dam, BORG_ATTACK_ACID, -1)); case BF_DRAGON_GREEN: rad = 2; dam = 150; return (borg_attack_aux_dragon(sv_dragon_green, rad, dam, BORG_ATTACK_POIS, -1)); case BF_DRAGON_RED: rad = 2; dam = 200; return (borg_attack_aux_dragon(sv_dragon_red, rad, dam, BORG_ATTACK_FIRE, -1)); case BF_DRAGON_MULTIHUED: { int value[5]; int type[5] = {BORG_ATTACK_ELEC, BORG_ATTACK_COLD, BORG_ATTACK_ACID, BORG_ATTACK_POIS, BORG_ATTACK_FIRE}; int biggest = 0; bool tmp_simulate = borg_simulate; rad = 2; dam = 250; if (!borg_simulate) borg_simulate = true; for (int x = 0; x < 5; x++) value[x] = borg_attack_aux_dragon(sv_dragon_multihued, rad, dam, type[x], x); for (int x = 1; x < 5; x++) if (value[x] > value[biggest]) biggest = x; borg_simulate = tmp_simulate; if (!borg_simulate) value[biggest] = borg_attack_aux_dragon(sv_dragon_multihued, rad, dam, type[biggest], biggest); return value[biggest]; } case BF_DRAGON_GOLD: rad = 2; dam = 150; return (borg_attack_aux_dragon(sv_dragon_gold, rad, dam, BORG_ATTACK_SOUND, -1)); case BF_DRAGON_CHAOS: { int value[2]; int type[2] = { BORG_ATTACK_CHAOS, BORG_ATTACK_DISEN }; int biggest = 0; bool tmp_simulate = borg_simulate; rad = 2; dam = 220; if (!borg_simulate) borg_simulate = true; for (int x = 0; x < 2; x++) value[x] = borg_attack_aux_dragon(sv_dragon_chaos, rad, dam, type[x], x); for (int x = 1; x < 2; x++) if (value[x] > value[biggest]) biggest = x; borg_simulate = tmp_simulate; if (!borg_simulate) value[biggest] = borg_attack_aux_dragon(sv_dragon_chaos, rad, dam, type[biggest], biggest); return value[biggest]; } case BF_DRAGON_LAW: { int value[2]; int type[2] ={ BORG_ATTACK_SOUND, BORG_ATTACK_SHARD }; int biggest = 0; bool tmp_simulate = borg_simulate; rad = 2; dam = 220; if (!borg_simulate) borg_simulate = true; for (int x = 0; x < 2; x++) value[x] = borg_attack_aux_dragon(sv_dragon_law, rad, dam, type[x], x); for (int x = 1; x < 2; x++) if (value[x] > value[biggest]) biggest = x; borg_simulate = tmp_simulate; if (!borg_simulate) value[biggest] = borg_attack_aux_dragon(sv_dragon_law, rad, dam, type[biggest], biggest); return value[biggest]; } case BF_DRAGON_BALANCE: { int value[4]; int type[4] = {BORG_ATTACK_CHAOS, BORG_ATTACK_DISEN, BORG_ATTACK_SOUND, BORG_ATTACK_SHARD}; int biggest = 0; bool tmp_simulate = borg_simulate; rad = 2; dam = 250; if (!borg_simulate) borg_simulate = true; for (int x = 0; x < 4; x++) value[x] = borg_attack_aux_dragon(sv_dragon_balance, rad, dam, type[x], x); for (int x = 1; x < 4; x++) if (value[x] > value[biggest]) biggest = x; borg_simulate = tmp_simulate; if (!borg_simulate) value[biggest] = borg_attack_aux_dragon(sv_dragon_balance, rad, dam, type[biggest], biggest); return value[biggest]; } case BF_DRAGON_SHINING: { int value[2]; int type[2] = { BORG_ATTACK_LIGHT, BORG_ATTACK_DARK }; int biggest = 0; bool tmp_simulate = borg_simulate; rad = 2; dam = 200; if (!borg_simulate) borg_simulate = true; for (int x = 0; x < 2; x++) value[x] = borg_attack_aux_dragon(sv_dragon_shining, rad, dam, type[x], x); for (int x = 1; x < 2; x++) if (value[x] > value[biggest]) biggest = x; borg_simulate = tmp_simulate; if (!borg_simulate) value[biggest] = borg_attack_aux_dragon(sv_dragon_shining, rad, dam, type[biggest], biggest); return value[biggest]; } case BF_DRAGON_POWER: rad = 2; dam = 300; return (borg_attack_aux_dragon(sv_dragon_power, rad, dam, BORG_ATTACK_MISSILE, -1)); } /* Oops */ return (0); } /* * Attack nearby monsters, in the best possible way, if any. * * We consider a variety of possible attacks, including physical attacks * on adjacent monsters, missile attacks on nearby monsters, spell/prayer * attacks on nearby monsters, and wand/rod attacks on nearby monsters. * * Basically, for each of the known "types" of attack, we "simulate" the * "optimal" result of using that attack, and then we "apply" the "type" * of attack which appears to have the "optimal" result. * * When calculating the "result" of using an attack, we only consider the * effect of the attack on visible, on-screen, known monsters, which are * within 16 grids of the player. This prevents most "spurious" attacks, * but we can still be fooled by situations like creeping coins which die * while out of sight, leaving behind a pile of coins, which we then find * again, and attack with distance attacks, which have no effect. Perhaps * we should "expect" certain results, and take note of failure to observe * those effects. XXX XXX XXX * * See above for the "semantics" of each "type" of attack. */ bool borg_attack(bool boosted_bravery) { int i, x, y; int a_y, a_x; int n, b_n = 0; int g, b_g = -1; bool adjacent_monster = false; borg_grid* ag; struct monster_race* r_ptr; /* Nobody around */ if (!borg_kills_cnt) return (false); /* Set the attacking flag so that danger is boosted for monsters */ /* we want to attack first. */ borg_attacking = true; /* Reset list */ borg_temp_n = 0; /* Find "nearby" monsters */ for (i = 1; i < borg_kills_nxt; i++) { borg_kill* kill; /* Monster */ kill = &borg_kills[i]; r_ptr = &r_info[kill->r_idx]; /* Skip dead monsters */ if (!kill->r_idx) continue; /* Require current knowledge */ if (kill->when < borg_t - 2) continue; /* Ignore multiplying monsters and when fleeing from scaries*/ if (goal_ignoring && !borg_skill[BI_ISAFRAID] && (rf_has(r_info[kill->r_idx].flags, RF_MULTIPLY))) continue; /* Acquire location */ a_x = kill->x; a_y = kill->y; /* Low level mages need to conserve the mana in town. These guys don't fight back */ if (borg_class == CLASS_MAGE && borg_skill[BI_MAXCLEVEL] < 10 && borg_skill[BI_CDEPTH] == 0 && (strstr(r_ptr->name, "Farmer") /* strstr(r_ptr->name, "Blubbering") || */ /* strstr(r_ptr->name, "Boil") || */ /* strstr(r_ptr->name, "Village") || */ /*strstr(r_ptr->name, "Pitiful") || */ /* strstr(r_ptr->name, "Mangy") */)) continue; /* Check if there is a monster adjacent to me or he's close and fast. */ if ((kill->speed > borg_skill[BI_SPEED] && borg_distance(c_y, c_x, a_y, a_x) <= 2) || borg_distance(c_y, c_x, a_y, a_x) <= 1) adjacent_monster = true; /* no attacking most scaryguys, try to get off the level */ if (scaryguy_on_level) { /* probably Grip or Fang. */ if (strstr(r_ptr->name, "Grip") || strstr(r_ptr->name, "Fang")) { /* Try to fight Grip and Fang. */ } else if (borg_skill[BI_CDEPTH] <= 5 && borg_skill[BI_CDEPTH] != 0 && (rf_has(r_info[kill->r_idx].flags, RF_MULTIPLY))) { /* Try to fight single worms and mice. */ } else if (borg_t - borg_began >= 2000 || borg_time_town + (borg_t - borg_began) >= 3000) { /* Try to fight been there too long. */ } else if (boosted_bravery || borg_no_retreat >= 1 || goal_recalling) { /* Try to fight if being Boosted or recall engaged. */ borg_note("# Bored, or recalling and fighting a monster on Scaryguy Level."); } else if (borg_skill[BI_CDEPTH] * 4 <= borg_skill[BI_CLEVEL] && borg_skill[BI_CLEVEL] > 10) { /* Try to fight anyway. */ borg_note("# High clevel fighting monster on Scaryguy Level."); } else if (adjacent_monster) { /* Try to fight if there is a monster next to me */ borg_note("# Adjacent to monster on Scaryguy Level."); } else { /* Flee from other scary guys */ continue; } } /* Acquire location */ x = kill->x; y = kill->y; /* Get grid */ ag = &borg_grids[y][x]; /* Never shoot off-screen */ if (!(ag->info & BORG_OKAY)) continue; /* Never shoot through walls */ if (!(ag->info & BORG_VIEW)) continue; /* Check the distance XXX XXX XXX */ if (borg_distance(c_y, c_x, y, x) > z_info->max_range) continue; /* Sometimes the borg can lose a monster index in the grid if there are lots of monsters * on screen. If he does lose one, reinject the index here. */ if (!ag->kill) borg_grids[kill->y][kill->x].kill = i; /* Save the location (careful) */ borg_temp_x[borg_temp_n] = x; borg_temp_y[borg_temp_n] = y; borg_temp_n++; } /* No destinations */ if (!borg_temp_n) { borg_attacking = false; return (false); } /* Simulate */ borg_simulate = true; /* Analyze the possible attacks */ for (g = 0; g < BF_MAX; g++) { /* Simulate */ n = borg_attack_aux(g); /* Track "best" attack <= */ if (n <= b_n) continue; /* Track best */ b_g = g; b_n = n; } /* Nothing good */ if (b_n <= 0) { borg_attacking = false; return (false); } /* Note */ borg_note(format("# Performing attack type %d with value %d.", b_g, b_n)); /* Instantiate */ borg_simulate = false; /* Instantiate */ (void)borg_attack_aux(b_g); borg_attacking = false; /* Success */ return (true); } /* Munchkin Attack - Magic * * The early mages have a very difficult time surviving until they level up some. * This routine will allow the mage to do some very limited attacking while he is * doing the munchking start (stair scumming for items). * * Basically, he will rest on stairs to recuperate mana, then use MM to attack some * easy to kill monsters. If the monster gets too close, he will flee via the stairs. * He hope to be able to kill the monster in two shots from the MM. A perfect scenario * would be a mold which does not move, then he could rest/shoot/rest. */ bool borg_munchkin_mage(void) { int i, x, y; int a_y, a_x; int b_dam = -1, dam = 0; int b_n = -1; borg_grid* ag; /* Must be standing on a stair */ if (borg_grids[c_y][c_x].feat != FEAT_MORE && borg_grids[c_y][c_x].feat != FEAT_LESS) return (false); /* Not if too dangerous */ if ((borg_danger(c_y, c_x, 1, true, true) > avoidance * 7 / 10) || borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] / 3) return (false); if (borg_skill[BI_ISCONFUSED]) return (false); /* Nobody around */ if (!borg_kills_cnt) return (false); /* Set the attacking flag so that danger is boosted for monsters */ /* we want to attack first. */ borg_attacking = true; /* Reset list */ borg_temp_n = 0; /* Find "nearby" monsters */ for (i = 1; i < borg_kills_nxt; i++) { borg_kill* kill; /* Monster */ kill = &borg_kills[i]; /* Skip dead monsters */ if (!kill->r_idx) continue; /* Require current knowledge */ if (kill->when < borg_t - 2) continue; /* Acquire location */ a_x = kill->x; a_y = kill->y; /* Not in town. This should not be reached, but just in case we add it */ if (borg_skill[BI_CDEPTH] == 0) continue; /* Check if there is a monster adjacent to me or he's close and fast. */ if ((kill->speed > borg_skill[BI_SPEED] && borg_distance(c_y, c_x, a_y, a_x) <= 2) || borg_distance(c_y, c_x, a_y, a_x) <= 1) return (false); /* no attacking most scaryguys, try to get off the level */ if (scaryguy_on_level) return (false); /* Acquire location */ x = kill->x; y = kill->y; /* Get grid */ ag = &borg_grids[y][x]; /* Never shoot off-screen */ if (!(ag->info & BORG_OKAY)) continue; /* Never shoot through walls */ if (!(ag->info & BORG_VIEW)) continue; /* Check the distance XXX XXX XXX */ if (borg_distance(c_y, c_x, y, x) > z_info->max_range) continue; /* Save the location (careful) */ borg_temp_x[borg_temp_n] = x; borg_temp_y[borg_temp_n] = y; borg_temp_n++; } /* No destinations */ if (!borg_temp_n) { borg_attacking = false; return (false); } /* Simulate */ borg_simulate = true; /* Simulated */ for (i = 0; i < BF_MAX; i++) { /* Skip certain ones */ if (i <= 1) continue; dam = borg_attack_aux(i); /* Track the best attack method */ if (dam >= b_dam && dam > 0) { b_dam = dam; b_n = i; } } /* Nothing good */ if (b_n < 0 || b_dam <= 0) { borg_attacking = false; return (false); } /* Note */ borg_note(format("# Performing munchkin attack with value %d.", b_dam)); /* Instantiate */ borg_simulate = false; /* Instantiate */ (void)borg_attack_aux(b_n); borg_attacking = false; /* Success */ return (true); } /* Munchkin Attack - Melee * * The early borgs have a very difficult time surviving until they level up some. * This routine will allow the borg to do some very limited attacking while he is * doing the munchking start (stair scumming for items). * * Basically, he will rest on stairs to recuperate HP, then use melee to attack some * easy to kill adjacent monsters. */ bool borg_munchkin_melee(void) { int i, x, y; int n = 0; borg_grid* ag; /* No Mages for now */ if ((borg_class == CLASS_MAGE || borg_class == CLASS_NECROMANCER)) return (false); /* Must be standing on a stair */ if (borg_grids[c_y][c_x].feat != FEAT_MORE && borg_grids[c_y][c_x].feat != FEAT_LESS) return (false); /* Nobody around */ if (!borg_kills_cnt) return (false); /* Not if too dangerous */ if ((borg_danger(c_y, c_x, 1, true, true) > avoidance * 7 / 10) || borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] / 3) return (false); if (borg_skill[BI_ISCONFUSED]) return (false); /* Set the attacking flag so that danger is boosted for monsters */ /* we want to attack first. */ borg_attacking = true; /* Reset list */ borg_temp_n = 0; /* Find "nearby" monsters */ for (i = 1; i < borg_kills_nxt; i++) { borg_kill* kill; /* Monster */ kill = &borg_kills[i]; /* Skip dead monsters */ if (!kill->r_idx) continue; /* Require current knowledge */ if (kill->when < borg_t - 2) continue; /* Not in town. This should not be reached, but just in case we add it */ if (borg_skill[BI_CDEPTH] == 0) continue; /* no attacking most scaryguys, try to get off the level */ if (scaryguy_on_level) return (false); /* Acquire location */ x = kill->x; y = kill->y; /* Get grid */ ag = &borg_grids[y][x]; /* Never shoot off-screen */ if (!(ag->info & BORG_OKAY)) continue; /* Never shoot through walls */ if (!(ag->info & BORG_VIEW)) continue; /* Check the distance XXX XXX XXX */ if (borg_distance(c_y, c_x, y, x) != 1) continue; /* Save the location (careful) */ borg_temp_x[borg_temp_n] = x; borg_temp_y[borg_temp_n] = y; borg_temp_n++; } /* No destinations */ if (!borg_temp_n) { borg_attacking = false; return (false); } /* Simulate */ borg_simulate = true; /* Simulated */ n = borg_attack_aux(BF_THRUST); /* Nothing good */ if (n <= 0) { borg_attacking = false; return (false); } /* Note */ borg_note(format("# Performing munchkin attack with value %d.", n)); /* Instantiate */ borg_simulate = false; /* Instantiate */ (void)borg_attack_aux(BF_THRUST); borg_attacking = false; /* Success */ return (true); } /* Log the pathway and feature of the spell pathway * Useful for debugging beams and Tport Other spell */ static void borg_log_spellpath(bool beam) { int n_x, n_y, x, y; int dist = 0; borg_grid* ag; borg_kill* kill; y = borg_target_y; x = borg_target_x; n_x = c_x; n_y = c_y; while (1) { ag = &borg_grids[n_y][n_x]; kill = &borg_kills[ag->kill]; /* Note the Pathway */ if (!borg_cave_floor_grid(ag)) { borg_note(format("# Logging Spell pathway (%d,%d): Wall grid.", n_y, n_x)); break; } else if (ag->kill) { borg_note(format("# Logging Spell pathway (%d,%d): %s, danger %d", n_y, n_x, (r_info[kill->r_idx].name), borg_danger_aux(c_y, c_x, 1, ag->kill, true, false))); } else if (n_y == c_y && n_x == c_x) { borg_note(format("# Logging Spell pathway (%d,%d): My grid.", n_y, n_x)); } else { borg_note(format("# Logging Spell pathway (%d,%d).", n_y, n_x)); } /* Stop loop if we reach our target if using bolt */ if (n_x == x && n_y == y) break; /* Safegaurd not to loop */ dist++; if (dist >= z_info->max_range) break; /* Calculate the new location */ mmove2(&n_y, &n_x, c_y, c_x, y, x); } } /* * * There are several types of setup moves: * * Temporary speed * Protect From Evil * Bless\Prayer * Berserk\Heroism * Temp Resist (either all or just cold/fire?) * Shield * Teleport away * Glyph of Warding * See inviso * * * and many others */ enum { BD_BLESS, BD_SPEED, BD_GRIM_PURPOSE, BD_RESIST_FECAP, BD_RESIST_F, BD_RESIST_C, /* 5*/ BD_RESIST_A, BD_RESIST_P, BD_PROT_FROM_EVIL, BD_SHIELD, BD_TELE_AWAY, /* 10 */ BD_HERO, BD_BERSERK, BD_SMITE_EVIL, BD_REGEN, BD_GLYPH, BD_CREATE_DOOR, BD_MASS_GENOCIDE, /* 15 */ BD_GENOCIDE, BD_GENOCIDE_NASTIES, BD_EARTHQUAKE, BD_DESTRUCTION, BD_TPORTLEVEL, /* 20 */ BD_BANISHMENT, /* Priest spell */ BD_DETECT_INVISO, BD_LIGHT_BEAM, BD_SHIFT_PANEL, BD_REST, BD_TELE_AWAY_MORGOTH, BD_BANISHMENT_MORGOTH, BD_LIGHT_MORGOTH, BD_MAX }; /* * Bless/Prayer to prepare for battle */ static int borg_defend_aux_bless(int p1) { int fail_allowed = 25; borg_grid* ag = &borg_grids[c_y][c_x]; int i; bool borg_near_kill = false; /* already blessed */ if (borg_bless) return (0); /* Cant when Blind */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* Dark */ if (!(ag->info & BORG_GLOW) && borg_skill[BI_CURLITE] == 0) return (0); /* no spell */ if (!borg_spell_okay_fail(BLESS, fail_allowed) && -1 == borg_slot(TV_SCROLL, sv_scroll_blessing) && -1 == borg_slot(TV_SCROLL, sv_scroll_holy_chant) && -1 == borg_slot(TV_SCROLL, sv_scroll_holy_prayer)) return (0); /* Check if a monster is close to me . */ for (i = 1; i < borg_kills_nxt; i++) { borg_kill* kill; /* Monster */ kill = &borg_kills[i]; /* Skip dead monsters */ if (!kill->r_idx) continue; /* Require current knowledge */ if (kill->when < borg_t - 5) continue; /* Check the distance XXX XXX XXX */ if (borg_distance(c_y, c_x, kill->y, kill->x) > 3) continue; /* kill near me */ borg_near_kill = true; } /* if we are in some danger but not much, go for a quick bless */ if ((p1 > avoidance / 12 || borg_skill[BI_CLEVEL] <= 15) && p1 > 0 && borg_near_kill && p1 < avoidance / 2) { /* Simulation */ /* bless is a low priority */ if (borg_simulate) return (1); borg_note("# Attempting to cast Bless"); /* No resting to recoop mana */ borg_no_rest_prep = 11000; /* do it! */ if (borg_spell(BLESS) || borg_read_scroll(sv_scroll_blessing) || borg_read_scroll(sv_scroll_holy_chant) || borg_read_scroll(sv_scroll_holy_prayer)) return 1; } return (0); } /* * Speed to prepare for battle */ static int borg_defend_aux_speed(int p1) { int p2 = 0; bool good_speed = false; bool speed_spell = false; bool speed_staff = false; bool speed_rod = false; int fail_allowed = 25; /* already fast */ if (borg_speed) return (0); /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* if very scary, do not allow for much chance of fail */ if (p1 > avoidance) fail_allowed -= 19; else /* a little scary */ if (p1 > (avoidance * 2) / 3) fail_allowed -= 10; else /* not very scary, allow lots of fail */ if (p1 < avoidance / 3) fail_allowed += 10; /* only cast defence spells if fail rate is not too high */ if (borg_spell_okay_fail(HASTE_SELF, fail_allowed)) speed_spell = true; /* staff must have charges */ if (borg_equips_staff_fail(sv_staff_speed)) speed_staff = true; /* rod can't be charging */ if (borg_equips_rod(sv_rod_speed)) speed_rod = true; /* Need some form */ if (0 > borg_slot(TV_POTION, sv_potion_speed) && !speed_staff && !speed_rod && !speed_spell && !borg_equips_item(act_haste, true) && !borg_equips_item(act_haste1, true) && !borg_equips_item(act_haste2, true)) return (0); /* if we have an infinite/large suppy of speed we can */ /* be generious with our use */ if (speed_rod || speed_spell || speed_staff || borg_equips_item(act_haste, true) || borg_equips_item(act_haste1, true) || borg_equips_item(act_haste2, true)) good_speed = true; /* pretend we are protected and look again */ borg_speed = true; p2 = borg_danger(c_y, c_x, 1, true, false); borg_speed = false; /* if scaryguy around cast it. */ if (scaryguy_on_level) { /* HACK pretend that it was scary and will be safer */ p2 = p2 * 3 / 10; } /* if we are fighting a unique cast it. */ if (good_speed && borg_fighting_unique) { /* HACK pretend that it was scary and will be safer */ p2 = p2 * 7 / 10; } /* if we are fighting a unique and a summoner cast it. */ if (borg_fighting_summoner && borg_fighting_unique) { /* HACK pretend that it was scary and will be safer */ p2 = p2 * 7 / 10; } /* if the unique is Sauron cast it */ if (borg_skill[BI_CDEPTH] == 99 && borg_fighting_unique >= 10) { p2 = p2 * 6 / 10; } /* if the unique is a rather nasty one. */ if (borg_fighting_unique && (streq(r_info[unique_on_level].name, "Bullroarer the Hobbit") || streq(r_info[unique_on_level].name, "Mughash the Kobold Lord") || streq(r_info[unique_on_level].name, "Wormtongue, Agent of Saruman") || streq(r_info[unique_on_level].name, "Lagduf, the Snaga") || streq(r_info[unique_on_level].name, "Brodda, the Easterling") || streq(r_info[unique_on_level].name, "Orfax, Son of Boldor"))) { p2 = p2 * 6 / 10; } /* if the unique is Morgoth cast it */ if (borg_skill[BI_CDEPTH] == 100 && borg_fighting_unique >= 10) { p2 = p2 * 5 / 10; } /* Attempt to conserve Speed at end of game */ if (borg_skill[BI_CDEPTH] >= 97 && !borg_fighting_unique && !good_speed) p2 = 9999; /* if this is an improvement and we may not avoid monster now and */ /* we may have before */ if (((p1 > p2) && p2 <= (borg_fighting_unique ? ((avoidance * 2) / 3) : (avoidance / 2)) && (p1 > (avoidance / 5)) && good_speed) || ((p1 > p2) && p2 <= (borg_fighting_unique ? ((avoidance * 2) / 3) : (avoidance / 3)) && (p1 > (avoidance / 7)))) { /* Simulation */ if (borg_simulate) return (p1 - p2); borg_note("# Attempting to cast Speed"); /* No resting to recoop mana */ borg_no_rest_prep = borg_skill[BI_CLEVEL] * 1000; /* do it! */ if (borg_zap_rod(sv_rod_speed) || borg_activate_item(act_haste) || borg_activate_item(act_haste1) || borg_activate_item(act_haste2) || borg_use_staff(sv_staff_speed) || borg_quaff_potion(sv_potion_speed)) /* Value */ return (p1 - p2); if (borg_spell_fail(HASTE_SELF, fail_allowed)) return (p1 - p2); } /* default to can't do it. */ return (0); } /* Grim Purpose */ static int borg_defend_aux_grim_purpose(int p1) { int p2 = 0; int fail_allowed = 25; bool save_conf = borg_skill[BI_RCONF]; bool save_fa = borg_skill[BI_FRACT]; /* already protected */ if (save_conf && save_fa) return (0); /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* if very scary, do not allow for much chance of fail */ if (p1 > avoidance) fail_allowed -= 19; else /* a little scary */ if (p1 > (avoidance * 2) / 3) fail_allowed -= 10; else /* not very scary, allow lots of fail */ if (p1 < avoidance / 3) fail_allowed += 10; if (!borg_spell_okay_fail(GRIM_PURPOSE, fail_allowed)) return (0); /* elemental and PFE use the 'averaging' method for danger. Redefine p1 as such. */ p1 = borg_danger(c_y, c_x, 1, false, false); /* pretend we are protected and look again */ borg_skill[BI_RCONF] = true; borg_skill[BI_FRACT] = true; p2 = borg_danger(c_y, c_x, 1, false, false); borg_skill[BI_RCONF] = save_conf; borg_skill[BI_FRACT] = save_fa; /* if this is an improvement and we may not avoid monster now and */ /* we may have before */ if (p1 > p2 && p2 <= (borg_fighting_unique ? ((avoidance * 2) / 3) : (avoidance / 2)) && p1 > (avoidance / 7)) { /* Simulation */ if (borg_simulate) return (p1 - p2 + 2); borg_note("# Attempting to cast Grim Purpose"); /* do it! */ if (borg_spell(GRIM_PURPOSE)) /* No resting to recoop mana */ borg_no_rest_prep = 13000; /* Value */ return (p1 - p2 + 2); } /* default to can't do it. */ return (0); } /* all resists */ static int borg_defend_aux_resist_fecap(int p1) { int p2 = 0; bool save_fire = false, save_acid = false, save_poison = false, save_elec = false, save_cold = false; if (borg_skill[BI_TRFIRE] && borg_skill[BI_TRACID] && borg_skill[BI_TRPOIS] && borg_skill[BI_TRELEC] && borg_skill[BI_TRCOLD]) return (0); /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); if (!borg_equips_item(act_resist_all, true) && !borg_equips_item(act_rage_bless_resist, true)) return (0); /* elemental and PFE use the 'averaging' method for danger. Redefine p1 as such. */ p1 = borg_danger(c_y, c_x, 1, false, false); /* pretend we are protected and look again */ save_fire = borg_skill[BI_TRFIRE]; save_elec = borg_skill[BI_TRELEC]; save_cold = borg_skill[BI_TRCOLD]; save_acid = borg_skill[BI_TRACID]; save_poison = borg_skill[BI_TRPOIS]; borg_skill[BI_TRFIRE] = true; borg_skill[BI_TRELEC] = true; borg_skill[BI_TRCOLD] = true; borg_skill[BI_TRACID] = true; borg_skill[BI_TRPOIS] = true; p2 = borg_danger(c_y, c_x, 1, false, false); borg_skill[BI_TRFIRE] = save_fire; borg_skill[BI_TRELEC] = save_elec; borg_skill[BI_TRCOLD] = save_cold; borg_skill[BI_TRACID] = save_acid; borg_skill[BI_TRPOIS] = save_poison; /* Hack - * If the borg is fighting a particular unique enhance the * benefit of the spell. */ if (borg_fighting_unique && (streq(r_info[unique_on_level].name, "The Tarrasque"))) p2 = p2 * 8 / 10; /* Hack - * If borg is high enough level, he does not need to worry * about mana consumption. Cast the good spell. */ if (borg_skill[BI_CLEVEL] >= 45) p2 = p2 * 8 / 10; /* if this is an improvement and we may not avoid monster now and */ /* we may have before */ if (p1 > p2 && p2 <= (borg_fighting_unique ? ((avoidance * 2) / 3) : (avoidance / 2)) && p1 > (avoidance / 7)) { /* Simulation */ if (borg_simulate) return (p1 - p2 + 2); borg_note("# Attempting to cast FECAP"); /* do it! */ if (borg_activate_item(act_resist_all) || borg_activate_item(act_rage_bless_resist)) /* No resting to recoop mana */ borg_no_rest_prep = 21000; /* Value */ return (p1 - p2 + 2); } /* default to can't do it. */ return (0); } /* fire */ static int borg_defend_aux_resist_f(int p1) { int p2 = 0; int fail_allowed = 25; bool save_fire = false; save_fire = borg_skill[BI_TRFIRE]; if (borg_skill[BI_TRFIRE]) return (0); /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* if very scary, do not allow for much chance of fail */ if (p1 > avoidance) fail_allowed -= 19; else /* a little scary */ if (p1 > (avoidance * 2) / 3) fail_allowed -= 10; else /* not very scary, allow lots of fail */ if (p1 < avoidance / 3) fail_allowed += 10; if (!borg_spell_okay_fail(RESISTANCE, fail_allowed) && !borg_equips_item(act_resist_all, true) && !borg_equips_item(act_rage_bless_resist, true) && !borg_equips_ring(sv_ring_flames) && -1 == borg_slot(TV_POTION, sv_potion_resist_heat)) return (0); /* elemental and PFE use the 'averaging' method for danger. Redefine p1 as such. */ p1 = borg_danger(c_y, c_x, 1, false, false); /* pretend we are protected and look again */ borg_skill[BI_TRFIRE] = true; p2 = borg_danger(c_y, c_x, 1, false, false); borg_skill[BI_TRFIRE] = save_fire; /* Hack - * If the borg is fighting a particular unique enhance the * benefit of the spell. */ if (borg_fighting_unique && (streq(r_info[unique_on_level].name, "The Tarrasque"))) p2 = p2 * 8 / 10; /* if this is an improvement and we may not avoid monster now and */ /* we may have before */ if (p1 > p2 && p2 <= (borg_fighting_unique ? ((avoidance * 2) / 3) : (avoidance / 2)) && p1 > (avoidance / 7)) { /* Simulation */ if (borg_simulate) return (p1 - p2); borg_note("# Attempting to cast RFire"); /* do it! */ if (borg_activate_ring(sv_ring_flames)) { /* Ring also attacks so target self */ borg_keypress('*'); borg_keypress('5'); return (p1 - p2); } if (borg_activate_item(act_resist_all) || borg_activate_item(act_rage_bless_resist) || borg_spell_fail(RESISTANCE, fail_allowed) || borg_quaff_potion(sv_potion_resist_heat)) /* No resting to recoop mana */ borg_no_rest_prep = 21000; /* Value */ return (p1 - p2); } /* default to can't do it. */ return (0); } /* cold */ static int borg_defend_aux_resist_c(int p1) { int p2 = 0; int fail_allowed = 25; bool save_cold = false; if (borg_skill[BI_TRCOLD]) return (0); /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* if very scary, do not allow for much chance of fail */ if (p1 > avoidance) fail_allowed -= 19; else /* a little scary */ if (p1 > (avoidance * 2) / 3) fail_allowed -= 10; else /* not very scary, allow lots of fail */ if (p1 < avoidance / 3) fail_allowed += 10; if (!borg_spell_okay_fail(RESISTANCE, fail_allowed) && !borg_equips_item(act_resist_all, true) && !borg_equips_item(act_rage_bless_resist, true) && !borg_equips_ring(sv_ring_ice) && -1 == borg_slot(TV_POTION, sv_potion_resist_cold)) return (0); /* elemental and PFE use the 'averaging' method for danger. Redefine p1 as such. */ p1 = borg_danger(c_y, c_x, 1, false, false); save_cold = borg_skill[BI_TRCOLD]; /* pretend we are protected and look again */ borg_skill[BI_TRCOLD] = true; p2 = borg_danger(c_y, c_x, 1, false, false); borg_skill[BI_TRCOLD] = save_cold; /* Hack - * If the borg is fighting a particular unique enhance the * benefit of the spell. */ if (borg_fighting_unique && (streq(r_info[unique_on_level].name, "The Tarrasque"))) p2 = p2 * 8 / 10; /* if this is an improvement and we may not avoid monster now and */ /* we may have before */ if (p1 > p2 && p2 <= (borg_fighting_unique ? ((avoidance * 2) / 3) : (avoidance / 2)) && p1 > (avoidance / 7)) { /* Simulation */ if (borg_simulate) return (p1 - p2); borg_note("# Attempting to cast RCold"); /* do it! */ if (borg_activate_ring(sv_ring_ice)) { /* Ring also attacks so target self */ borg_keypress('*'); borg_keypress('5'); return (p1 - p2); } if (borg_activate_item(act_resist_all) || borg_activate_item(act_rage_bless_resist) || borg_spell_fail(RESISTANCE, fail_allowed) || borg_quaff_potion(sv_potion_resist_cold)) /* No resting to recoop mana */ borg_no_rest_prep = 21000; /* Value */ return (p1 - p2); } /* default to can't do it. */ return (0); } /* acid */ static int borg_defend_aux_resist_a(int p1) { int p2 = 0; int fail_allowed = 25; bool save_acid = false; if (borg_skill[BI_TRACID]) return (0); /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* if very scary, do not allow for much chance of fail */ if (p1 > avoidance) fail_allowed -= 19; else /* a little scary */ if (p1 > (avoidance * 2) / 3) fail_allowed -= 10; else /* not very scary, allow lots of fail */ if (p1 < avoidance / 3) fail_allowed += 10; if (!borg_spell_okay_fail(RESISTANCE, fail_allowed) && !borg_equips_item(act_resist_all, true) && !borg_equips_item(act_rage_bless_resist, true) && !borg_equips_ring(sv_ring_acid)) return (0); /* elemental and PFE use the 'averaging' method for danger. Redefine p1 as such. */ p1 = borg_danger(c_y, c_x, 1, false, false); save_acid = borg_skill[BI_TRACID]; /* pretend we are protected and look again */ borg_skill[BI_TRACID] = true; p2 = borg_danger(c_y, c_x, 1, false, false); borg_skill[BI_TRACID] = save_acid; /* if this is an improvement and we may not avoid monster now and */ /* we may have before */ if (p1 > p2 && p2 <= (borg_fighting_unique ? ((avoidance * 2) / 3) : (avoidance / 2)) && p1 > (avoidance / 7)) { /* Simulation */ if (borg_simulate) return (p1 - p2); borg_note("# Attempting to cast RAcid"); /* do it! */ if (borg_spell(RESISTANCE)) { return (p1 - p2); } if (borg_activate_ring(sv_ring_acid)) { /* Ring also attacks so target self */ borg_keypress('*'); borg_keypress('5'); return (p1 - p2); } if (borg_activate_item(act_resist_all) || borg_activate_item(act_rage_bless_resist)) /* No resting to recoop mana */ borg_no_rest_prep = 21000; /* Value */ return (p1 - p2); } /* default to can't do it. */ return (0); } /* poison */ static int borg_defend_aux_resist_p(int p1) { int p2 = 0; int fail_allowed = 25; bool save_poison = false; if (borg_skill[BI_TRPOIS]) return (0); /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* if very scary, do not allow for much chance of fail */ if (p1 > avoidance) fail_allowed -= 19; else /* a little scary */ if (p1 > (avoidance * 2) / 3) fail_allowed -= 10; else /* not very scary, allow lots of fail */ if (p1 < avoidance / 3) fail_allowed += 10; if (!borg_spell_okay_fail(RESIST_POISON, fail_allowed) && !borg_equips_item(act_resist_all, true) && !borg_equips_item(act_rage_bless_resist, true) && !borg_spell_okay_fail(RESISTANCE, fail_allowed)) return (0); /* elemental and PFE use the 'averaging' method for danger. Redefine p1 as such. */ p1 = borg_danger(c_y, c_x, 1, false, false); save_poison = borg_skill[BI_TRPOIS]; /* pretend we are protected and look again */ borg_skill[BI_TRPOIS] = true; p2 = borg_danger(c_y, c_x, 1, false, false); borg_skill[BI_TRPOIS] = save_poison; /* if this is an improvement and we may not avoid monster now and */ /* we may have before */ if (p1 > p2 && p2 <= (borg_fighting_unique ? ((avoidance * 2) / 3) : (avoidance / 2)) && p1 > (avoidance / 7)) { /* Simulation */ if (borg_simulate) return (p1 - p2); borg_note("# Attempting to cast RPois"); /* do it! */ if (borg_spell_fail(RESIST_POISON, fail_allowed) || borg_activate_item(act_resist_all) || borg_activate_item(act_rage_bless_resist) || borg_spell_fail(RESISTANCE, fail_allowed)) /* No resting to recoop mana */ borg_no_rest_prep = 21000; /* Value */ return (p1 - p2); } /* default to can't do it. */ return (0); } static int borg_defend_aux_prot_evil(int p1) { int p2 = 0; int fail_allowed = 25; bool pfe_spell = false; borg_grid* ag = &borg_grids[c_y][c_x]; /* if already protected */ if (borg_prot_from_evil) return (0); /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* if very scary, do not allow for much chance of fail */ if (p1 > avoidance) fail_allowed -= 19; else /* a little scary */ if (p1 > (avoidance * 2) / 3) fail_allowed -= 5; else /* not very scary, allow lots of fail */ if (p1 < avoidance / 3) fail_allowed += 10; if (borg_spell_okay_fail(PROTECTION_FROM_EVIL, fail_allowed)) pfe_spell = true; if (0 <= borg_slot(TV_SCROLL, sv_scroll_protection_from_evil)) pfe_spell = true; if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISIMAGE]) pfe_spell = false; if (!(ag->info & BORG_GLOW) && borg_skill[BI_CURLITE] == 0) pfe_spell = false; if (borg_equips_item(act_protevil, true)) pfe_spell = true; if (pfe_spell == false) return (0); /* elemental and PFE use the 'averaging' method for danger. Redefine p1 as such. */ p1 = borg_danger(c_y, c_x, 1, false, false); /* pretend we are protected and look again */ borg_prot_from_evil = true; p2 = borg_danger(c_y, c_x, 1, false, false); borg_prot_from_evil = false; /* if this is an improvement and we may not avoid monster now and */ /* we may have before */ if ((p1 > p2 && p2 <= (borg_fighting_unique ? ((avoidance * 2) / 3) : (avoidance / 2)) && p1 > (avoidance / 7)) || (borg_cfg[BORG_MONEY_SCUM_AMOUNT] >= 1 && borg_skill[BI_CDEPTH] == 0)) { /* Simulation */ if (borg_simulate) return (p1 - p2); borg_note("# Attempting to cast PFE"); /* do it! */ if (borg_spell_fail(PROTECTION_FROM_EVIL, fail_allowed) || borg_activate_item(act_protevil) || borg_read_scroll(sv_scroll_protection_from_evil)) /* No resting to recoop mana */ borg_no_rest_prep = borg_skill[BI_CLEVEL] * 1000; /* Value */ return (p1 - p2); } /* default to can't do it. */ return (0); } static int borg_defend_aux_shield(int p1) { int p2 = 0; /* if already protected */ if (borg_shield) return (0); /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); if (borg_has[kv_mush_stoneskin] <= 0) return (0); /* pretend we are protected and look again */ borg_shield = true; p2 = borg_danger(c_y, c_x, 1, true, false); borg_shield = false; /* slightly enhance the value if fighting a unique */ if (borg_fighting_unique) p2 = (p2 * 7 / 10); /* if this is an improvement and we may not avoid monster now and */ /* we may have before */ if (p1 > p2 && p2 <= (borg_fighting_unique ? ((avoidance * 2) / 3) : (avoidance / 2)) && p1 > (avoidance / 7)) { /* Simulation */ if (borg_simulate) return (p1 - p2); borg_note("# Attempting to eat a stone skin"); /* do it! */ if (borg_eat_food(TV_MUSHROOM, sv_mush_stoneskin)) { /* No resting to recoop mana */ borg_no_rest_prep = 2000; return (p1 - p2); } } /* default to can't do it. */ return (0); } /* * Try to get rid of all of the non-uniques around so you can go at it * 'mano-e-mano' with the unique. Teleport Other. */ static int borg_defend_aux_tele_away(int p1) { int p2 = p1; int fail_allowed = 50; bool spell_ok = false; int i, x, y; borg_grid* ag; /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* * Only tport monster away if scared or getting low on mana */ if (borg_fighting_unique) { if (p1 < avoidance * 7 / 10 && borg_skill[BI_CURSP] > 30 && borg_simulate) return (0); } else { if (p1 < avoidance * 5 / 10 && borg_skill[BI_CURSP] > 30 && borg_simulate) return (0); } /* No real Danger to speak of */ if (p1 < avoidance * 4 / 10 && borg_simulate) return (0); spell_ok = false; /* if very scary, do not allow for much chance of fail */ if (p1 > avoidance * 3) fail_allowed -= 10; else /* scary */ if (p1 > avoidance * 2) fail_allowed -= 5; else /* a little scary */ if (p1 > (avoidance * 5) / 2) fail_allowed += 5; /* do I have the ability? */ if (borg_spell_okay_fail(TELEPORT_OTHER, fail_allowed) || borg_equips_item(act_tele_other, true) || (-1 != borg_slot(TV_WAND, sv_wand_teleport_away) && borg_items[borg_slot(TV_WAND, sv_wand_teleport_away)].pval)) spell_ok = true; if (!spell_ok) return (0); /* No Teleport Other if surrounded */ if (borg_surrounded() == true) return (0); /* Borg_temp_n temporarily stores several things. * Some of the borg_attack() sub-routines use these numbers, * which would have been filled in borg_attack(). * Since this is a defence manuever which will move into * and borrow some of the borg_attack() subroutines, we need * to make sure that the borg_temp_n arrays are properly * filled. Otherwise, the borg will attempt to consider * these grids which were left filled by some other routine. * Which was probably a flow routine which stored about 200 * grids into the array. * Any change in inclusion/exclusion criteria for filling this * array in borg_attack() should be included here also. */ /* Nobody around so dont worry */ if (!borg_kills_cnt && borg_simulate) return (0); /* Reset list */ borg_temp_n = 0; borg_tp_other_n = 0; /* Find "nearby" monsters */ for (i = 1; i < borg_kills_nxt; i++) { borg_kill* kill; /* Monster */ kill = &borg_kills[i]; /* Skip dead monsters */ if (!kill->r_idx) continue; /* Require current knowledge */ if (kill->when < borg_t - 2) continue; /* Acquire location */ x = kill->x; y = kill->y; /* Get grid */ ag = &borg_grids[y][x]; /* Never shoot off-screen */ if (!(ag->info & BORG_OKAY)) continue; /* Never shoot through walls */ if (!(ag->info & BORG_VIEW)) continue; if ((ag->feat >= FEAT_RUBBLE) && (ag->feat <= FEAT_PERM)) continue; /* Check the distance XXX XXX XXX */ if (borg_distance(c_y, c_x, y, x) > z_info->max_range) continue; /* Save the location (careful) */ borg_temp_x[borg_temp_n] = x; borg_temp_y[borg_temp_n] = y; borg_temp_n++; } /* No targets for me. */ if (!borg_temp_n && borg_simulate) return (0); /* choose, then target a bad guy. * Damage will be the danger to my grid which the monster creates. * We are targetting the single most dangerous monster. * p2 will be the original danger (p1) minus the danger from the most dangerous * monster eliminated. * ie: if we are fighting only a single monster who is generating 500 danger and we * target him, then p2 _should_ end up 0, since p1 - his danger is 500-500. * If we are fighting two guys each creating 500 danger, then p2 will be 500, since * 1000-500 = 500. */ p2 = p1 - borg_launch_bolt(-1, p1, BORG_ATTACK_AWAY_ALL, z_info->max_range, 0); /* check to see if I am left better off */ if (borg_simulate) { /* Reset list */ borg_temp_n = 0; borg_tp_other_n = 0; if (p1 > p2 && p2 < avoidance / 2) { /* Simulation */ return (p1 - p2); } else return (0); } /* Log the Path for Debug */ borg_log_spellpath(true); /* Log additional info for debug */ for (i = 0; i < borg_tp_other_n; i++) { borg_note(format("# T.O. %d, index %d (%d,%d)", borg_tp_other_n, borg_tp_other_index[i], borg_tp_other_y[i], borg_tp_other_x[i])); } /* Reset list */ borg_temp_n = 0; borg_tp_other_n = 0; /* Cast the spell */ if (borg_spell(TELEPORT_OTHER) || borg_activate_item(act_tele_other) || borg_aim_wand(sv_wand_teleport_away)) { /* Use target */ borg_keypress('5'); /* Set our shooting flag */ successful_target = -1; /* Value */ return (p2); } return (0); } /* * Hero to prepare for battle, +12 tohit. */ static int borg_defend_aux_hero(int p1) { int fail_allowed = 15; /* already hero */ if (borg_hero) return (0); /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); if (!borg_spell_okay_fail(HEROISM, fail_allowed) && -1 == borg_slot(TV_POTION, sv_potion_heroism)) return (0); /* if we are in some danger but not much, go for a quick bless */ if ((p1 > avoidance * 1 / 10 && p1 < avoidance * 5 / 10) || (borg_fighting_unique && p1 < avoidance * 7 / 10)) { /* Simulation */ /* hero is a low priority */ if (borg_simulate) return (1); borg_note("# Attempting to cast Hero"); /* do it! */ if (borg_spell(HEROISM) || borg_quaff_potion(sv_potion_heroism)) { /* No resting to recoop mana */ borg_no_rest_prep = 10000; return 1; } } return (0); } /* * Rapid Regen to prepare for battle */ static int borg_defend_aux_regen(int p1) { int fail_allowed = 15; /* already regenerating */ if (borg_regen) return (0); /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* don't bother if not much to regenerate */ if (borg_skill[BI_MAXHP] < 100) return (0); if (!borg_spell_okay_fail(RAPID_REGENERATION, fail_allowed)) return (0); /* if we are in some danger but not much, go for a quick bless */ if ((p1 > avoidance * 1 / 10 && p1 < avoidance * 5 / 10) || (borg_fighting_unique && p1 < avoidance * 7 / 10)) { /* Simulation */ /* regen is a low priority */ if (borg_simulate) return (1); /* do it! */ if (borg_spell(RAPID_REGENERATION)) return 1; } return (0); } /* * Berserk to prepare for battle, +24 tohit, -10 AC */ static int borg_defend_aux_berserk(int p1) { int fail_allowed = 15; /* already berserk */ if (borg_berserk) return (0); /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); if (!borg_spell_okay_fail(BERSERK_STRENGTH, fail_allowed) && -1 == borg_slot(TV_POTION, sv_potion_berserk) && !borg_equips_item(act_berserker, true) && !borg_equips_item(act_rage_bless_resist, true) && !borg_equips_item(act_shero, true)) return (0); /* if we are in some danger but not much, go for a quick bless */ if ((p1 > avoidance * 1 / 10 && p1 < avoidance * 5 / 10) || (borg_fighting_unique && p1 < avoidance * 7 / 10)) { /* Simulation */ /* berserk is a low priority */ if (borg_simulate) return (5); /* do it! */ if (borg_spell(BERSERK_STRENGTH) || borg_activate_item(act_berserker) || borg_activate_item(act_rage_bless_resist) || borg_activate_item(act_shero) || borg_quaff_potion(sv_potion_berserk)) return (5); } return (0); } /* * Smite Evil to prepare for battle */ static int borg_defend_aux_smite_evil(int p1) { int fail_allowed = 15; /* already smiting evil */ if (borg_smite_evil || borg_skill[BI_WS_EVIL]) return (0); /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); if (!borg_spell_okay_fail(SMITE_EVIL, fail_allowed)) return (0); // !FIX !TODO !AJG we should probably figure out if we are about to fight something evil. /* if we are in some danger but not much, go for a quick bless */ if ((p1 > avoidance * 1 / 10 && p1 < avoidance * 5 / 10) || (borg_fighting_unique && p1 < avoidance * 7 / 10)) { /* Simulation */ /* smite evil is a low priority */ if (borg_simulate) return (5); /* do it! */ if (borg_spell(SMITE_EVIL)) return (5); } return (0); } /* Glyph of Warding and Rune of Protection */ static int borg_defend_aux_glyph(int p1) { int p2 = 0, i; int fail_allowed = 25; bool glyph_spell = false; borg_grid* ag = &borg_grids[c_y][c_x]; /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* He should not cast it while on an object. * I have addressed this inadequately in borg9.c when dealing with * messages. The message "the object resists" will delete the glyph * from the array. Then I set a broken door on that spot, the borg ignores * broken doors, so he won't loop. */ if ((ag->take) || (ag->trap) || (ag->feat == FEAT_LESS) || (ag->feat == FEAT_MORE) || (ag->feat == FEAT_OPEN) || (ag->feat == FEAT_BROKEN)) { return (0); } /* Morgoth breaks these in one try so its a waste of mana against him */ if (borg_fighting_unique >= 10) return (0); /* if very scary, do not allow for much chance of fail */ if (p1 > avoidance) fail_allowed -= 19; else /* a little scary */ if (p1 > (avoidance * 2) / 3) fail_allowed -= 5; else /* not very scary, allow lots of fail */ if (p1 < avoidance / 3) fail_allowed += 20; if (borg_spell_okay_fail(GLYPH_OF_WARDING, fail_allowed)) glyph_spell = true; if (0 <= borg_slot(TV_SCROLL, sv_scroll_rune_of_protection)) glyph_spell = true; if ((borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISIMAGE]) && glyph_spell) glyph_spell = false; if (!(ag->info & BORG_GLOW) && borg_skill[BI_CURLITE] == 0) glyph_spell = false; if (!glyph_spell) return (0); /* pretend we are protected and look again */ borg_on_glyph = true; p2 = borg_danger(c_y, c_x, 1, true, false); borg_on_glyph = false; /* if this is an improvement and we may not avoid monster now and */ /* we may have before */ if (p1 > p2 && p2 <= (borg_fighting_unique ? ((avoidance * 2) / 3) : (avoidance / 2)) && p1 > (avoidance / 7)) { /* Simulation */ if (borg_simulate) return (p1 - p2); /* do it! */ if (borg_spell_fail(GLYPH_OF_WARDING, fail_allowed) || borg_read_scroll(sv_scroll_rune_of_protection)) { /* Check for an existing glyph */ for (i = 0; i < track_glyph.num; i++) { /* Stop if we already new about this glyph */ if ((track_glyph.x[i] == c_x) && (track_glyph.y[i] == c_y)) return (p1 - p2); } /* Track the newly discovered glyph */ if (track_glyph.num < track_glyph.size) { borg_note("# Noting the creation of a glyph."); track_glyph.x[track_glyph.num] = c_x; track_glyph.y[track_glyph.num] = c_y; track_glyph.num++; } return (p1 - p2); } } /* default to can't do it. */ return (0); } /* Create Door */ static int borg_defend_aux_create_door(int p1) { int p2 = 0; int fail_allowed = 30; int door_bad = 0; int door_x = 0, door_y = 0, x = 0, y = 0; borg_grid* ag; /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* any summoners near?*/ if (!borg_fighting_summoner) return (0); /* if very scary, do not allow for much chance of fail */ if (p1 > avoidance) fail_allowed -= 19; else /* a little scary */ if (p1 > (avoidance * 2) / 3) fail_allowed -= 5; else /* not very scary, allow lots of fail */ if (p1 < avoidance / 3) fail_allowed += 20; if (!borg_spell_okay_fail(DOOR_CREATION, fail_allowed)) return (0); /* Do not cast if surounded by doors or something */ /* Get grid */ for (door_x = -1; door_x <= 1; door_x++) { for (door_y = -1; door_y <= 1; door_y++) { /* Acquire location */ x = door_x + c_x; y = door_y + c_y; ag = &borg_grids[y][x]; /* track spaces already protected */ if ((ag->glyph) || ag->kill || ((ag->feat == FEAT_GRANITE) || (ag->feat == FEAT_PERM) || (ag->feat == FEAT_CLOSED))) { door_bad++; } /* track spaces that cannot be protected */ if ((ag->take) || (ag->trap) || (ag->feat == FEAT_LESS) || (ag->feat == FEAT_MORE) || (ag->feat == FEAT_OPEN) || (ag->feat == FEAT_BROKEN) || (ag->kill)) { door_bad++; } } } /* Track it */ /* lets make sure that we going to be benifited */ if (door_bad >= 6) { /* not really worth it. Only 2 spaces protected */ return (0); } /* pretend we are protected and look again */ borg_create_door = true; p2 = borg_danger(c_y, c_x, 1, true, false); borg_create_door = false; /* if this is an improvement and we may not avoid monster now and */ /* we may have before */ if (p1 > p2 && p2 <= (borg_fighting_unique ? ((avoidance * 2) / 3) : (avoidance / 2)) && p1 > (avoidance / 7)) { /* Simulation */ if (borg_simulate) return (p1 - p2); /* do it! */ if (borg_spell_fail(DOOR_CREATION, fail_allowed)) { /* Set the breeder flag to keep doors closed. Avoid summons */ breeder_level = true; /* Must make a new Sea too */ borg_needs_new_sea = true; /* Value */ return (p1 - p2); } } /* default to can't do it. */ return (0); } /* This will simulate and cast the mass genocide spell. */ static int borg_defend_aux_mass_genocide(int p1) { int hit = 0, i = 0, p2; int b_p = 0, p; borg_kill* kill; struct monster_race* r_ptr; /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* see if prayer is legal */ if (!borg_spell_okay_fail(MASS_BANISHMENT, 40) && !borg_equips_item(act_banishment, true) && (borg_skill[BI_AMASSBAN] == 0))/* Mass Banishment scroll */ return (0); /* See if he is in real danger */ if (p1 < avoidance * 12 / 10 && borg_simulate) return (0); /* Find a monster and calculate its danger */ for (i = 1; i < borg_kills_nxt; i++) { /* Monster */ kill = &borg_kills[i]; r_ptr = &r_info[kill->r_idx]; /* Skip dead monsters */ if (!kill->r_idx) continue; /* Check the distance */ if (borg_distance(c_y, c_x, kill->y, kill->x) > 20) continue; /* we try not to genocide uniques */ if (rf_has(r_ptr->flags, RF_UNIQUE)) continue; /* Calculate danger */ p = borg_danger_aux(c_y, c_x, 1, i, true, true); /* store the danger for this type of monster */ b_p = b_p + p; hit = hit + 3; } /* normalize the value */ p2 = (p1 - b_p); if (p2 < 0) p2 = 0; /* if strain (plus a pad incase we did not know about some monsters) * is greater than hp, don't cast it */ if ((hit * 12 / 10) >= borg_skill[BI_CURHP]) return (0); /* Penalize the strain from casting the spell */ p2 = p2 + hit; /* Be more likely to use this if fighting Morgoth */ if (borg_fighting_unique >= 10 && (hit / 3 > 8)) { p2 = p2 * 6 / 10; } /* if this is an improvement and we may not avoid monster now and */ /* we may have before */ if (p1 > p2 && p2 <= (borg_fighting_unique ? (avoidance * 2 / 3) : (avoidance / 2))) { /* Simulation */ if (borg_simulate) return (p1 - p2); /* Cast the spell */ if (borg_read_scroll(sv_scroll_mass_banishment) || borg_activate_item(act_banishment) || borg_spell(MASS_BANISHMENT)) { /* Remove monsters from the borg_kill */ for (i = 1; i < borg_kills_nxt; i++) { borg_kill* tmp_kill; struct monster_race* tmp_r_ptr; /* Monster */ tmp_kill = &borg_kills[i]; tmp_r_ptr = &r_info[tmp_kill->r_idx]; /* Cant kill uniques like this */ if (rf_has(tmp_r_ptr->flags, RF_UNIQUE)) continue; /* remove this monster */ borg_delete_kill(i); } /* Value */ return (p1 - p2); } } /* Not worth it */ return (0); } /* This will simulate and cast the genocide spell. * There are two seperate functions happening here. * 1. will genocide the race which is immediately threatening the borg. * 2. will genocide the race which is most dangerous on the level. Though it may not be * threatening the borg right now. It was considered to nuke the escorts of a unique. * But it could also be used to nuke a race if it becomes too dangerous, for example * a summoner called up 15-20 hounds, and they must be dealt with. * The first option may be called at any time. While the 2nd option is only called when the * borg is in relatively good health. */ static int borg_defend_aux_genocide(int p1) { int i, p, u, b_i = 0; int p2 = 0; int threat = 0; int max = 1; int b_p[256]; int b_num[256]; int b_threat[256]; int b_threat_num[256]; int total_danger_to_me = 0; char tmp_genocide_target = (char)0; unsigned char b_threat_id = (char)0; bool genocide_spell = false; int fail_allowed = 25; /* if very scary, do not allow for much chance of fail */ if (p1 > avoidance) fail_allowed -= 19; else /* a little scary */ if (p1 > (avoidance * 2) / 3) fail_allowed -= 10; else /* not very scary, allow lots of fail */ if (p1 < avoidance / 3) fail_allowed += 10; /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* Normalize the p1 value. It contains danger added from * regional fear and monster fear. Which wont be counted * in the post-genocide checks */ if (borg_fear_region[c_y / 11][c_x / 11]) p1 -= borg_fear_region[c_y / 11][c_x / 11]; if (borg_fear_monsters[c_y][c_x]) p1 -= borg_fear_monsters[c_y][c_x]; /* Make sure I have the spell */ if (borg_spell_okay_fail(BANISHMENT, fail_allowed) || borg_equips_item(act_banishment, true) || borg_equips_staff_fail(sv_staff_banishment) || (-1 != borg_slot(TV_SCROLL, sv_scroll_banishment))) { genocide_spell = true; } if (genocide_spell == false) return (0); /* Don't try it if really weak */ if (borg_skill[BI_CURHP] <= 75) return (0); /* two methods to calculate the threat: *1. cycle each character of monsters on screen * collect collective threat of each char *2 select race of most dangerous guy, and choose him. * Method 2 is cheaper and faster. * * The borg uses method #1 */ /* Clear previous dangers */ for (i = 0; i < 256; i++) { b_p[i] = 0; b_num[i] = 0; b_threat[i] = 0; b_threat_num[i] = 0; } /* Find a monster and calculate its danger */ for (i = 1; i < borg_kills_nxt; i++) { borg_kill* kill; struct monster_race* r_ptr; /* Monster */ kill = &borg_kills[i]; r_ptr = &r_info[kill->r_idx]; /* Our char of the monster */ u = r_ptr->d_char; /* Skip dead monsters */ if (!kill->r_idx) continue; /* we try not to genocide uniques */ if (rf_has(r_ptr->flags, RF_UNIQUE)) continue; /* Calculate danger */ /* Danger to me by this monster */ p = borg_danger_aux(c_y, c_x, 1, i, true, true); /* Danger of this monster to his own grid */ threat = borg_danger_aux(kill->y, kill->x, 1, i, true, true); /* store the danger for this type of monster */ b_p[u] = b_p[u] + p; /* Danger to me */ total_danger_to_me += p; b_threat[u] = b_threat[u] + threat; /* Danger to monsters grid */ /* Store the number of this type of monster */ b_num[u]++; b_threat_num[u]++; } /* Now, see which race contributes the most danger * both to me and danger on the level */ for (i = 0; i < 256; i++) { /* skip the empty ones */ if (b_num[i] == 0 && b_threat_num[i] == 0) continue; /* for the race threatening me right now */ if (b_p[i] > max) { /* track the race */ max = b_p[i]; b_i = i; /* note the danger with this race gone. Note that the borg does max his danger * at 2000 points. It could be much, much higher at depth 99 or so. * What the borg should do is recalculate the danger without considering this monster * instead of this hack which does not yeild the true danger. */ p2 = total_danger_to_me - b_p[b_i]; } /* for this race on the whole level */ if (b_threat[i] > max) { /* track the race */ max = b_threat[i]; b_threat_id = i; } /* Leave an interesting note for debugging */ if (!borg_simulate) borg_note(format("# Race '%c' is a threat with total danger %d from %d individuals.", i, b_threat[i], b_threat_num[i])); } /* This will track and decide if it is worth genociding this dangerous race for the level */ if (b_threat_id) { /* Not if I am weak (should have 400 HP really in case of a Pit) */ if (borg_skill[BI_CURHP] < 375) b_threat_id = 0; /* The threat must be real */ if (b_threat[b_threat_id] < borg_skill[BI_MAXHP] * 3) b_threat_id = 0; /* Too painful to cast it (padded to be safe incase of unknown monsters) */ if ((b_num[b_threat_id] * 4) * 12 / 10 >= borg_skill[BI_CURHP]) b_threat_id = 0; /* Loads of monsters might be a pit, in which case, try not to nuke them */ if (b_num[b_threat_id] >= 75) b_threat_id = 0; /* Do not perform in Danger */ if (p1 > avoidance / 5) b_threat_id = 0; /* report the danger and most dangerous race */ if (b_threat_id) { borg_note(format("# Race '%c' is a real threat with total danger %d from %d individuals.", b_threat_id, b_threat[b_threat_id], b_threat_num[b_threat_id])); } /* Genociding this race would reduce the danger of the level */ tmp_genocide_target = b_threat_id; } /* Consider the immediate threat genocide */ if (b_i) { /* Too painful to cast it (padded to be safe incase of unknown monsters) */ if ((b_num[b_i] * 4) * 12 / 10 >= borg_skill[BI_CURHP]) b_i = 0; /* See if he is in real danger, generally, * or deeper in the dungeon, conservatively, */ if (p1 < avoidance * 7 / 10 || (borg_skill[BI_CDEPTH] > 75 && p1 < avoidance * 6 / 10)) b_i = 0; /* Did this help improve my situation? */ if (p2 <= (avoidance / 2)) b_i = 0; /* Genociding this race would help me immediately */ tmp_genocide_target = b_i; } /* Complete the genocide routine */ if (tmp_genocide_target) { if (borg_simulate) { /* Simulation for immediate threat */ if (b_i) return (p1 - p2); /* Simulation for immediate threat */ if (b_threat_id) return (b_threat[b_threat_id]); } if (b_i) borg_note(format("# Banishing race '%c' (qty:%d). Danger after spell:%d", tmp_genocide_target, b_num[b_i], p2)); if (b_threat_id) borg_note(format("# Banishing race '%c' (qty:%d). Danger from them:%d", tmp_genocide_target, b_threat_num[b_threat_id], b_threat[b_threat_id])); /* do it! ---use scrolls first since they clutter inventory */ if (borg_read_scroll(sv_scroll_banishment) || borg_spell(BANISHMENT) || borg_activate_item(act_banishment) || borg_use_staff(sv_staff_banishment)) { /* and the winner is.....*/ borg_keypress((tmp_genocide_target)); } /* Remove this race from the borg_kill */ for (i = 1; i < borg_kills_nxt; i++) { borg_kill* kill; struct monster_race* r_ptr; /* Monster */ kill = &borg_kills[i]; r_ptr = &r_info[kill->r_idx]; /* Our char of the monster */ if (r_ptr->d_char != tmp_genocide_target) continue; /* we do not genocide uniques */ if (rf_has(r_ptr->flags, RF_UNIQUE)) continue; /* remove this monster */ borg_delete_kill(i); } return (p1 - p2); } /* default to can't do it. */ return (0); } /* This will cast the genocide spell on Hounds and other * really nasty guys like Angels, Demons, Dragons and Liches * at the beginning of each level or when they get too numerous. * The acceptable numbers are defined in borg_nasties_limit[] * The definition for the list is in borg1.c * borg_nasties[7] = "ZAVULWD" * */ static int borg_defend_aux_genocide_nasties(int p1) { int i = 0; int b_i = -1; bool genocide_spell = false; /* Not if I am weak */ if (borg_skill[BI_CURHP] < (borg_skill[BI_MAXHP] * 7 / 10) || borg_skill[BI_CURHP] < 250) return (0); /* only do it when Hounds start to show up, */ if (borg_skill[BI_CDEPTH] < 25) return (0); /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* Do not perform in Danger */ if (p1 > avoidance / 4) return (0); if (borg_spell_okay_fail(BANISHMENT, 35) || borg_equips_item(act_banishment, true) || borg_equips_staff_fail(sv_staff_banishment)) { genocide_spell = true; } if (genocide_spell == false) return (0); /* Find the numerous nasty in order of nastiness */ for (i = 0; i < borg_nasties_num; i++) { if (borg_nasties_count[i] >= borg_nasties_limit[i]) b_i = i; } /* Nothing good to Genocide */ if (b_i == -1) return (0); if (borg_simulate) return (10); /* Note it */ borg_note(format("# Banishing nasties '%c' (qty:%d).", borg_nasties[b_i], borg_nasties_count[b_i])); /* Execute -- Nice pun*/ if (borg_activate_item(act_banishment) || borg_use_staff(sv_staff_banishment) || borg_spell(BANISHMENT)) { /* and the winner is.....*/ borg_keypress(borg_nasties[b_i]); /* set the count to not do it again */ borg_nasties_count[b_i] = 0; /* Remove this race from the borg_kill */ for (i = 1; i < borg_kills_nxt; i++) { borg_kill* kill; struct monster_race* r_ptr; /* Monster */ kill = &borg_kills[i]; r_ptr = &r_info[kill->r_idx]; /* Our char of the monster */ if (r_ptr->d_char != borg_nasties[b_i]) continue; /* remove this monster */ borg_delete_kill(i); } return (10); } /* default to can't do it. */ return (0); } /* Earthquake, priest and mage spells. */ static int borg_defend_aux_earthquake(int p1) { int p2 = 9999; int i; int threat_count = 0; borg_kill* kill; /* Cast the spell */ if (!borg_simulate && (borg_spell(TREMOR) || borg_spell(QUAKE) || borg_spell(GRONDS_BLOW))) { /* Must make a new Sea too */ borg_needs_new_sea = true; return (p2); } /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* Can I cast the spell? */ if (!borg_spell_okay_fail(TREMOR, 35) && !borg_spell_okay_fail(QUAKE, 35) && !borg_spell_okay_fail(GRONDS_BLOW, 35)) return (0); /* See if he is in real danger or fighting summoner*/ if (p1 < avoidance * 6 / 10 && !borg_fighting_summoner) return (0); /* Several monsters can see the borg and they have ranged attacks */ for (i = 0; i < borg_kills_nxt; i++) { kill = &borg_kills[i]; /* Look for threats */ if (borg_los(c_y, c_x, kill->y, kill->x) && kill->ranged_attack && borg_distance(kill->y, kill->x, c_y, c_x) >= 2) { /* They can hit me */ threat_count++; } } /* Real danger? */ if (threat_count >= 4 && p1 > avoidance * 7 / 10) p2 = p1 / 3; if (threat_count == 3 && p1 > avoidance * 7 / 10) p2 = p1 * 6 / 10; if (p1 > p2 && p2 <= (borg_fighting_unique ? ((avoidance * 2) / 3) : (avoidance / 2)) && p1 > (avoidance / 5)) { /* Simulation */ if (borg_simulate) return (p1 - p2); } return (0); } /* Word of Destruction, priest and mage spells. Death is right around the * corner, so kill everything. */ static int borg_defend_aux_destruction(int p1) { int p2 = 0; int d = 0; bool spell = false; bool real_danger = false; /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* Cast the spell */ if (!borg_simulate) { if (borg_spell(WORD_OF_DESTRUCTION) || borg_use_staff(sv_staff_destruction)) { /* Must make a new Sea too */ borg_needs_new_sea = true; /* borg9.c will check for the success of the spell and remove the danger from the grids. */ } return (500); } /* Not if in a sea of runes */ if (borg_morgoth_position) return (0); /* See if he is in real danger */ if (p1 > avoidance) real_danger = true; if (p1 > avoidance * 8 / 10 && borg_skill[BI_CDEPTH] >= 90 && borg_skill[BI_CURHP] <= 300) real_danger = true; if (real_danger == false) return (0); /* Borg_defend() is called before borg_escape(). He may have some * easy ways to escape (teleport scroll) but he may attempt this spell * of Destruction instead of using the scrolls. * Note that there will be some times when it is better for * the borg to use Destruction instead of Teleport; too * often he will die out-of-the-fryingpan-into-the-fire. * So we have him to a quick check on safe landing zones. */ /* Examine landing zones from teleport scrolls instead of WoD */ if ((borg_skill[BI_ATELEPORT] || borg_skill[BI_ATELEPORTLVL]) && !borg_skill[BI_ISBLIND] && !borg_skill[BI_ISCONFUSED] && borg_fighting_unique <= 4 && borg_skill[BI_CURHP] >= 275) { if (borg_caution_teleport(75, 2)) return (0); } /* Examine Landing zones from teleport staff instead of WoD */ if (borg_skill[BI_AESCAPE] >= 2 && borg_skill[BI_CURHP] >= 275) { if (borg_caution_teleport(75, 2)) return (0); } /* capable of casting the spell */ if (borg_spell_okay_fail(WORD_OF_DESTRUCTION, 55) || borg_equips_staff_fail(sv_staff_destruction)) spell = true; /* Special check for super danger--no fail check */ if ((p1 > (avoidance * 4) || (p1 > avoidance && borg_skill[BI_CURHP] <= 150)) && borg_equips_staff_fail(sv_staff_destruction)) spell = true; if (spell == false) return (0); /* What effect is there? */ p2 = 0; /* value is d */ d = (p1 - p2); /* Try not to cast this against uniques */ if (borg_fighting_unique <= 2 && p1 < avoidance * 2) d = 0; if (borg_fighting_unique >= 10) d = 0; /* Simulation */ if (borg_simulate) return (d); return (0); } /* Teleport Level, priest and mage spells. Death is right around the * corner, Get off the level now. */ static int borg_defend_aux_teleportlevel(int p1) { /* Cast the spell */ if (!borg_simulate) { if (borg_spell(TELEPORT_LEVEL)) { /* Must make a new Sea too */ borg_needs_new_sea = true; return (500); } } /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* See if he is in real danger */ if (p1 < avoidance * 2) return (0); /* Borg_defend() is called before borg_escape(). He may have some * easy ways to escape (teleport scroll) but he may attempt this spell * of this spell instead of using the scrolls. * Note that there will be some times when it is better for * the borg to use this instead of Teleport; too * often he will die out-of-the-fryingpan-into-the-fire. * So we have him to a quick check on safe landing zones. */ /* Use teleport scrolls instead if safe to land */ if ((borg_skill[BI_ATELEPORT] || borg_skill[BI_ATELEPORTLVL]) && !borg_skill[BI_ISBLIND] && !borg_skill[BI_ISCONFUSED]) { if (borg_caution_teleport(65, 2)) return (0); } /* Use teleport staff instead if safe to land */ if (borg_skill[BI_AESCAPE] >= 2) { if (borg_caution_teleport(65, 2)) return (0); } /* capable of casting the spell */ if (!borg_spell_okay_fail(TELEPORT_LEVEL, 55)) return (0); /* Try not to cast this against special uniques */ if (morgoth_on_level || (borg_fighting_unique >= 1 && borg_as_position)) return (0); /* Simulation */ if (borg_simulate) return (p1); return (0); } /* Remove Evil guys within LOS. The Priest Spell */ static int borg_defend_aux_banishment(int p1) { int p2 = 0; int fail_allowed = 15; int i; int banished_monsters = 0; bool using_artifact; borg_grid* ag; /* Only tell away if scared */ if (p1 < avoidance * 1 / 10) return (0); /* if very scary, do not allow for much chance of fail */ if (p1 > avoidance * 4) fail_allowed -= 10; /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); using_artifact = borg_equips_item(act_loskill, true) && borg_has[BI_CURHP] > 100; if (!using_artifact && !borg_spell_okay_fail(BANISH_EVIL, fail_allowed)) return (0); /* reset initial danger */ p1 = 1; /* Two passes to determine exact danger */ for (i = 1; i < borg_kills_nxt; i++) { borg_kill* kill; /* Monster */ kill = &borg_kills[i]; ag = &borg_grids[kill->y][kill->x]; /* Skip dead monsters */ if (!kill->r_idx) continue; /* Check the LOS */ if (!borg_projectable(c_y, c_x, kill->y, kill->x)) continue; /* Calculate danger of who is left over */ p1 += borg_danger_aux(c_y, c_x, 1, i, true, true); } /* Set P2 to be P1 and subtract the danger from each monster * which will be booted. Non booted monsters wont decrement * the p2 */ p2 = p1; /* Pass two -- Find a monster and calculate its danger */ for (i = 1; i < borg_kills_nxt; i++) { borg_kill* kill; struct monster_race* r_ptr; /* Monster */ kill = &borg_kills[i]; r_ptr = &r_info[kill->r_idx]; ag = &borg_grids[kill->y][kill->x]; /* Skip dead monsters */ if (!kill->r_idx) continue; /* Check the LOS */ if (!borg_projectable(c_y, c_x, kill->y, kill->x)) continue; /* Note who gets considered */ if (!borg_simulate) { borg_note(format("# Banishing Evil: (%d,%d): %s, danger %d. is considered.", kill->y, kill->x, (r_info[kill->r_idx].name), borg_danger_aux(c_y, c_x, 1, ag->kill, true, false))); } /* Non evil monsters*/ if (!(rf_has(r_ptr->flags, RF_EVIL))) { /* Note who gets to stay */ if (!borg_simulate) { borg_note(format("# Banishing Evil: (%d,%d): %s, danger %d. Stays (not evil).", kill->y, kill->x, (r_info[kill->r_idx].name), borg_danger_aux(c_y, c_x, 1, ag->kill, true, false))); } continue; } /* Unique Monster in good health*/ if (rf_has(r_ptr->flags, RF_UNIQUE) && kill->injury > 60) { /* Note who gets to stay */ if (!borg_simulate) { borg_note(format("# Banishing Evil: (%d,%d): %s, danger %d. Unique not considered: Injury %d.", kill->y, kill->x, (r_info[kill->r_idx].name), borg_danger_aux(c_y, c_x, 1, ag->kill, true, false), kill->injury)); } continue; } /* Monsters in walls cant be booted */ if (!borg_cave_floor_bold(kill->y, kill->x)) { /* Note who gets banished */ if (!borg_simulate) { borg_note(format("# Banishing Evil: (%d,%d): %s, danger %d. Stays (in wall).", kill->y, kill->x, (r_info[kill->r_idx].name), borg_danger_aux(c_y, c_x, 1, ag->kill, true, true))); } continue; } /* Note who gets banished */ if (!borg_simulate) { borg_note(format("# Banishing Evil: (%d,%d): %s, danger %d. Booted.", kill->y, kill->x, (r_info[kill->r_idx].name), borg_danger_aux(c_y, c_x, 1, ag->kill, true, true))); borg_delete_kill(i); } /* Count */ banished_monsters++; /* Calculate danger of who is left over */ p2 -= borg_danger_aux(c_y, c_x, 1, i, true, true); } if (!borg_simulate) { /* attempt the banish */ if (using_artifact) if (borg_activate_item(act_loskill)) return (p1 - p2); if (borg_spell(BANISH_EVIL)) return (p1 - p2); } /* p2 is the danger after all the bad guys are removed. */ /* no negatives */ if (p2 <= 0) p2 = 0; /* No monsters get booted */ if (banished_monsters == 0) p2 = 9999; /* Try not to cast this against Morgy/Sauron */ if (borg_fighting_unique >= 10 && borg_skill[BI_CURHP] > 250 && borg_skill[BI_CDEPTH] == 99) p2 = 9999; if (borg_fighting_unique >= 10 && borg_skill[BI_CURHP] > 350 && borg_skill[BI_CDEPTH] == 100) p2 = 9999; /* check to see if I am left better off */ if (p1 > p2 && p2 <= (borg_fighting_unique ? ((avoidance * 2) / 3) : (avoidance / 2))) { /* Simulation */ if (borg_simulate) return (p1 - p2); } return (0); } /* * Detect Inviso/Monsters * Used only if I am hit by an unseen guy. * Casts detect invis. */ static int borg_defend_aux_inviso(int p1) { int fail_allowed = 25; borg_grid* ag = &borg_grids[c_y][c_x]; /* no need */ if (borg_skill[BI_ISFORGET] || borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_see_inv) return (0); /* not recent */ if (borg_t > need_see_inviso + 5) return (0); /* too dangerous to cast */ if (p1 > avoidance * 2) return (0); /* Do I have anything that will work? */ if (-1 == borg_slot(TV_POTION, sv_potion_detect_invis) && -1 == borg_slot(TV_SCROLL, sv_scroll_detect_invis) && !borg_equips_staff_fail(sv_staff_detect_invis) && !borg_equips_staff_fail(sv_staff_detect_evil) && !borg_spell_okay_fail(SENSE_INVISIBLE, fail_allowed) && !borg_spell_okay_fail(DETECTION, fail_allowed)) return (0); /* Darkness */ if (!(ag->info & BORG_GLOW) && !borg_skill[BI_CURLITE]) return (0); /* No real value known, but lets cast it to find the bad guys. */ if (borg_simulate) return (10); /* smoke em if you got em */ /* short time */ /* snap shot */ if (borg_spell_fail(REVEAL_MONSTERS, fail_allowed) || borg_read_scroll(sv_scroll_detect_invis) || borg_use_staff(sv_staff_detect_invis) || borg_use_staff(sv_staff_detect_evil)) { borg_see_inv = 3000; /* hack, actually a snap shot, no ignition message */ return (10); } if (borg_quaff_potion(sv_potion_detect_invis)) { borg_see_inv = 18000; borg_no_rest_prep = 18000; return (10); } /* long time */ if (borg_spell_fail(SENSE_INVISIBLE, fail_allowed)) { borg_see_inv = 30000; borg_no_rest_prep = 16000; return (10); } /* ah crap, I guess I wont be able to see them */ return (0); } /* * Light Beam to spot lurkers * Used only if I am hit by an unseen guy. * Lights up a hallway. */ static int borg_defend_aux_lbeam(void) { bool hallway = false; int x = c_x; int y = c_y; /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* Light Beam section to spot non seen guys */ /* not recent, dont bother */ if (borg_t > (need_see_inviso + 2)) return (0); /* Check to see if I am in a hallway */ /* Case 1a: north-south corridor */ if (borg_cave_floor_bold(y - 1, x) && borg_cave_floor_bold(y + 1, x) && !borg_cave_floor_bold(y, x - 1) && !borg_cave_floor_bold(y, x + 1) && !borg_cave_floor_bold(y + 1, x - 1) && !borg_cave_floor_bold(y + 1, x + 1) && !borg_cave_floor_bold(y - 1, x - 1) && !borg_cave_floor_bold(y - 1, x + 1)) { /* ok to light up */ hallway = true; } /* Case 1b: east-west corridor */ if (borg_cave_floor_bold(y, x - 1) && borg_cave_floor_bold(y, x + 1) && !borg_cave_floor_bold(y - 1, x) && !borg_cave_floor_bold(y + 1, x) && !borg_cave_floor_bold(y + 1, x - 1) && !borg_cave_floor_bold(y + 1, x + 1) && !borg_cave_floor_bold(y - 1, x - 1) && !borg_cave_floor_bold(y - 1, x + 1)) { /* ok to light up */ hallway = true; } /* Case 1aa: north-south doorway */ if (borg_cave_floor_bold(y - 1, x) && borg_cave_floor_bold(y + 1, x) && !borg_cave_floor_bold(y, x - 1) && !borg_cave_floor_bold(y, x + 1)) { /* ok to light up */ hallway = true; } /* Case 1ba: east-west doorway */ if (borg_cave_floor_bold(y, x - 1) && borg_cave_floor_bold(y, x + 1) && !borg_cave_floor_bold(y - 1, x) && !borg_cave_floor_bold(y + 1, x)) { /* ok to light up */ hallway = true; } /* not in a hallway */ if (!hallway) return (0); /* Make sure I am not in too much danger */ /* XXX '(' replaces previous use of global variable that was always * '('. This is a BUG. I however have no idea how to fix it bceause * I don't know the code well enough. -AS */ if (borg_simulate && '(' > avoidance * 3 / 4) return (0); /* test the beam function */ if (!borg_LIGHT_beam(true)) return (0); /* return some value */ if (borg_simulate) return (10); /* if in a hallway call the Light Beam routine */ if (borg_LIGHT_beam(false)) { return (10); } return (0); } /* Shift the panel to locate offscreen monsters */ static int borg_defend_aux_panel_shift(void) { int dir = 0; int wx = Term->offset_x / borg_panel_wid(); int wy = Term->offset_y / borg_panel_hgt(); /* no need */ if (!need_shift_panel && borg_skill[BI_CDEPTH] < 70) return (0); /* if Morgy is on my panel, dont do it */ if (borg_skill[BI_CDEPTH] == 100 && w_y == morgy_panel_y && w_x == morgy_panel_x) return (0); /* Which direction do we need to move? */ /* Shift panel to the right */ if (c_x >= 52 && c_x <= 60 && wx == 0) dir = 6; if (c_x >= 84 && c_x <= 94 && wx == 1) dir = 6; if (c_x >= 116 && c_x <= 123 && wx == 2) dir = 6; if (c_x >= 148 && c_x <= 159 && wx == 3) dir = 6; /* Shift panel to the left */ if (c_x <= 142 && c_x >= 136 && wx == 4) dir = 4; if (c_x <= 110 && c_x >= 103 && wx == 3) dir = 4; if (c_x <= 78 && c_x >= 70 && wx == 2) dir = 4; if (c_x <= 46 && c_x >= 37 && wx == 1) dir = 4; /* Shift panel down */ if (c_y >= 15 && c_y <= 19 && wy == 0) dir = 2; if (c_y >= 25 && c_y <= 30 && wy == 1) dir = 2; if (c_y >= 36 && c_y <= 41 && wy == 2) dir = 2; if (c_y >= 48 && c_y <= 52 && wy == 3) dir = 2; /* Shift panel up */ if (c_y <= 51 && c_y >= 47 && wy == 4) dir = 8; if (c_y <= 39 && c_y >= 35 && wy == 3) dir = 8; if (c_y <= 28 && c_y >= 24 && wy == 2) dir = 8; if (c_y <= 17 && c_y >= 13 && wy == 1) dir = 8; /* Do the Shift if needed, then note it, reset the flag */ if (need_shift_panel == true) { /* Send action (view panel info) */ borg_keypress('L'); if (dir) borg_keypress(I2D(dir)); borg_keypress(ESCAPE); borg_note("# Shifted panel to locate offscreen monster."); need_shift_panel = false; /* Leave the panel shift mode */ borg_keypress(ESCAPE); } else /* check to make sure its appropriate */ { /* Hack Not if I just did one */ if (when_shift_panel && (borg_t - when_shift_panel <= 10 || borg_t - borg_t_morgoth <= 10)) { /* do nothing */ } else { /* if not the first step */ if (track_step.num) { /* shift up? only if a north corridor */ if (dir == 8 && borg_projectable_pure(c_y, c_x, c_y - 2, c_x) && track_step.y[track_step.num - 1] != c_y - 1) { /* Send action (view panel info) */ borg_keypress('L'); if (dir) borg_keypress(I2D(dir)); borg_note("# Shifted panel as a precaution."); /* Mark the time to avoid loops */ when_shift_panel = borg_t; /* Leave the panel shift mode */ borg_keypress(ESCAPE); } /* shift down? only if a south corridor */ else if (dir == 2 && borg_projectable_pure(c_y, c_x, c_y + 2, c_x) && track_step.y[track_step.num - 1] != c_y + 1) { /* Send action (view panel info) */ borg_keypress('L'); borg_keypress(I2D(dir)); borg_note("# Shifted panel as a precaution."); /* Mark the time to avoid loops */ when_shift_panel = borg_t; /* Leave the panel shift mode */ borg_keypress(ESCAPE); } /* shift Left? only if a west corridor */ else if (dir == 4 && borg_projectable_pure(c_y, c_x, c_y, c_x - 2) && track_step.x[track_step.num - 1] != c_x - 1) { /* Send action (view panel info) */ borg_keypress('L'); if (dir) borg_keypress(I2D(dir)); borg_note("# Shifted panel as a precaution."); /* Mark the time to avoid loops */ when_shift_panel = borg_t; /* Leave the panel shift mode */ borg_keypress(ESCAPE); } /* shift Right? only if a east corridor */ else if (dir == 6 && borg_projectable_pure(c_y, c_x, c_y, c_x + 2) && track_step.x[track_step.num - 1] != c_x + 1) { /* Send action (view panel info) */ borg_keypress('L'); if (dir) borg_keypress(I2D(dir)); borg_note("# Shifted panel as a precaution."); /* Mark the time to avoid loops */ when_shift_panel = borg_t; /* Leave the panel shift mode */ borg_keypress(ESCAPE); } } } } /* This uses no energy */ return (0); } /* This and the next routine is used on level 100 and when * attacking Morgoth. The borg has found a safe place to wait * for Morgoth to show. * * If the borg is not being threatened immediately by a monster, * then rest right here. * * Only borgs with teleport away and a good attack spell do this * routine. */ static int borg_defend_aux_rest(void) { int i; if (!borg_morgoth_position && (!borg_as_position || borg_t - borg_t_antisummon >= 50)) return (0); /* Not if Morgoth is not on this level */ if (!morgoth_on_level && (!borg_as_position || borg_t - borg_t_antisummon >= 50)) return (0); /* Not if I can not teleport others away */ #if 0 if (!borg_spell_okay_fail(3, 1, 30) && !borg_spell_okay_fail(4, 2, 30)) return (0); #endif /* Not if a monster can see me */ /* Examine all the monsters */ for (i = 1; i < borg_kills_nxt; i++) { borg_kill* kill = &borg_kills[i]; int x9 = kill->x; int y9 = kill->y; int ax, ay, d; /* Skip dead monsters */ if (!kill->r_idx) continue; /* Distance components */ ax = (x9 > c_x) ? (x9 - c_x) : (c_x - x9); ay = (y9 > c_y) ? (y9 - c_y) : (c_y - y9); /* Distance */ d = MAX(ax, ay); /* Minimal distance */ if (d > z_info->max_range) continue; /* If I can see Morgoth or a guy with Ranged Attacks, don't rest. */ if (borg_los(c_y, c_x, kill->y, kill->x) && (kill->r_idx == borg_morgoth_id || kill->ranged_attack) && avoidance <= borg_skill[BI_CURHP]) { borg_note("# Not resting. I can see Morgoth or a shooter."); return(0); } /* If a little twitchy, its ok to stay put */ if (avoidance > borg_skill[BI_CURHP]) continue; } /* Return some value for this rest */ if (borg_simulate) return (200); /* Rest */ borg_keypress(','); borg_note(format("# Resting on grid (%d, %d), waiting for Morgoth.", c_y, c_x)); /* All done */ return (200); } /* * Try to get rid of all of the monsters while I build my * Sea of Runes. */ static int borg_defend_aux_tele_away_morgoth(void) { int p2 = 0; int fail_allowed = 40; int i, x, y; borg_grid* ag; /* Only if on level 100 */ if (!(borg_skill[BI_CDEPTH] == 100)) return (0); /* Not if Morgoth is not on this level */ if (!morgoth_on_level) return (0); /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* Do I have the T.O. spell? */ if (!borg_spell_okay_fail(TELEPORT_OTHER, fail_allowed)) return (0); /* Do I have the Glyph spell? No good to use TO if I cant build the sea of runes */ if (borg_skill[BI_AGLYPH] < 10) return (0); /* No Teleport Other if surrounded */ if (borg_surrounded() == true) return (0); /* Borg_temp_n temporarily stores several things. * Some of the borg_attack() sub-routines use these numbers, * which would have been filled in borg_attack(). * Since this is a defence manuever which will move into * and borrow some of the borg_attack() subroutines, we need * to make sure that the borg_temp_n arrays are properly * filled. Otherwise, the borg will attempt to consider * these grids which were left filled by some other routine. * Which was probably a flow routine which stored about 200 * grids into the array. * Any change in inclusion/exclusion criteria for filling this * array in borg_attack() should be included here also. */ /* Nobody around so dont worry */ if (!borg_kills_cnt && borg_simulate) return (0); /* Reset list */ borg_temp_n = 0; borg_tp_other_n = 0; /* Find "nearby" monsters */ for (i = 0; i < borg_kills_nxt; i++) { borg_kill* kill; /* Monster */ kill = &borg_kills[i]; /* Skip dead monsters */ if (!kill->r_idx) continue; /* Require current knowledge */ if (kill->when < borg_t - 2) continue; /* Acquire location */ x = kill->x; y = kill->y; /* Get grid */ ag = &borg_grids[y][x]; /* Never shoot off-screen */ if (!(ag->info & BORG_OKAY)) continue; /* Never shoot through walls */ if (!(ag->info & BORG_VIEW)) continue; /* Check the distance XXX XXX XXX */ if (borg_distance(c_y, c_x, y, x) > z_info->max_range) continue; /* Check the LOS */ if (!borg_projectable(c_y, c_x, kill->y, kill->x)) continue; /* Save the location (careful) */ borg_temp_x[borg_temp_n] = x; borg_temp_y[borg_temp_n] = y; borg_temp_n++; } /* No destinations */ if (!borg_temp_n && borg_simulate) return (0); /* choose then target a bad guy or several * If left as bolt, he targets the single most nasty guy. * If left as beam, he targets the collection of monsters. */ p2 = borg_launch_bolt(-1, 50, BORG_ATTACK_AWAY_ALL_MORGOTH, z_info->max_range, 0); /* Normalize the value a bit */ if (p2 > 1000) p2 = 1000; /* Reset list */ borg_temp_n = 0; borg_tp_other_n = 0; /* Return a good score to make him do it */ if (borg_simulate) return (p2); /* Log the Path for Debug */ borg_log_spellpath(true); /* Log additional info for debug */ for (i = 0; i < borg_tp_other_n; i++) { borg_note(format("# %d, index %d (%d,%d)", borg_tp_other_n, borg_tp_other_index[i], borg_tp_other_y[i], borg_tp_other_x[i])); } borg_note("# Attempting to cast T.O. for depth 100."); /* Cast the spell */ if (borg_spell(TELEPORT_OTHER) || borg_activate_item(act_tele_other) || borg_aim_wand(sv_wand_teleport_away)) { /* Use target */ borg_keypress('5'); /* Set our shooting flag */ successful_target = -1; /* Value */ return (p2); } return (0); } /* * Try to get rid of all of the monsters while I build my * Sea of Runes. */ static int borg_defend_aux_banishment_morgoth(void) { int fail_allowed = 50; int i, x, y; int count = 0; int glyphs = 0; borg_grid* ag; borg_kill* kill; struct monster_race* r_ptr; /* Not if Morgoth is not on this level */ if (!morgoth_on_level) return (0); /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* Scan grids looking for glyphs */ for (i = 0; i < 8; i++) { /* Access offset */ x = c_x + ddx_ddd[i]; y = c_y + ddy_ddd[i]; /* Access the grid */ ag = &borg_grids[y][x]; /* Check for Glyphs */ if (ag->glyph) glyphs++; } /* Only if on level 100 and in a sea of runes or * in the process of building one */ #if 0 if (!borg_morgoth_position && glyphs < 3) return (0); #endif /* Do I have the spell? (Banish Evil) */ if (!borg_spell_okay_fail(MASS_BANISHMENT, fail_allowed) && !borg_spell_okay_fail(BANISH_EVIL, fail_allowed)) return (0); /* Nobody around so dont worry */ if (!borg_kills_cnt && borg_simulate) return (0); /* Find "nearby" monsters */ for (i = 1; i < borg_kills_nxt; i++) { /* Monster */ kill = &borg_kills[i]; /* Skip dead monsters */ if (!kill->r_idx) continue; r_ptr = &r_info[kill->r_idx]; /* Require current knowledge */ if (kill->when < borg_t - 2) continue; /* Acquire location */ x = kill->x; y = kill->y; /* Get grid */ ag = &borg_grids[y][x]; /* Never try on non-evil guys if Priest */ if (borg_class == CLASS_PRIEST && !(rf_has(r_ptr->flags, RF_EVIL))) continue; /* Check the distance */ if (borg_distance(c_y, c_x, y, x) > z_info->max_range) continue; /* Monster must be LOS */ if (!borg_projectable(c_y, c_x, kill->y, kill->x)) continue; /* Count the number of monsters too close double*/ if (borg_distance(c_y, c_x, y, x) <= 7) count++; /* Count the number of monster on screen */ count++; } /* No destinations */ if (count <= 7 && borg_simulate) return (0); /* Return a good score to make him do it */ if (borg_simulate) return (1500); borg_note(format("# Attempting to cast Banishment for depth 100. %d monsters ", count)); /* Cast the spell */ if (borg_spell(MASS_BANISHMENT) || borg_spell(BANISH_EVIL)) { /* Remove this race from the borg_kill */ for (i = 0; i < borg_kills_nxt; i++) { borg_kill* tmp_kill; struct monster_race* tmp_r_ptr; /* Monster */ tmp_kill = &borg_kills[i]; tmp_r_ptr = &r_info[tmp_kill->r_idx]; /* Cant kill uniques like this */ if (rf_has(tmp_r_ptr->flags, RF_UNIQUE)) continue; /* remove this monster */ borg_delete_kill(i); } /* Value */ return (1000); } return (0); } /* * Sometimes the borg will not fire on Morgoth as he approaches * while tunneling through rock. The borg still remembers and * assumes that the rock is unknown grid. */ static int borg_defend_aux_light_morgoth(void) { int fail_allowed = 50; int i, x, y; int b_y = -1; int b_x = -1; int count = 0; borg_kill* kill; /* Only if on level 100 and in a sea of runes */ if (!borg_morgoth_position) return (0); /* Not if Morgoth is not on this level */ if (!morgoth_on_level) return (0); /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* Do I have the spell? */ if (!borg_spell_okay_fail(SPEAR_OF_LIGHT, fail_allowed) && !borg_spell_okay_fail(CLAIRVOYANCE, fail_allowed) && !borg_spell_okay_fail(FUME_OF_MORDOR, fail_allowed)) return (0); /* Nobody around so dont worry */ if (!borg_kills_cnt && borg_simulate) return (0); /* Find "nearby" monsters */ for (i = 1; i < borg_kills_nxt; i++) { /* Monster */ kill = &borg_kills[i]; /* Skip dead monsters */ if (!kill->r_idx) continue; /* Skip non- Morgoth monsters */ if (kill->r_idx != borg_morgoth_id) continue; /* Require current knowledge */ if (kill->when < borg_t - 2) continue; /* Acquire location */ x = kill->x; y = kill->y; /* Check the distance */ if (borg_distance(c_y, c_x, y, x) > z_info->max_range) continue; if (borg_distance(c_y, c_x, y, x) <= 5) continue; /* We want at least one dark spot on the path */ if (!borg_projectable_dark(c_y, c_x, y, x)) continue; /* Count Morgoth so I try the spell */ count++; b_y = y; b_x = x; } /* No destinations */ if (count <= 0 && borg_simulate) return (0); /* Return a good score to make him do it */ if (borg_simulate) return (500); borg_note(format("# Attempting to Illuminate a Pathway to (%d, %d)", b_y, b_x)); /* Target Morgoth Grid */ (void)borg_target(b_y, b_x); /* Cast the spell */ if (borg_spell(SPEAR_OF_LIGHT) || borg_spell(CLAIRVOYANCE) || borg_spell(FUME_OF_MORDOR)) { /* Select the target */ borg_keypress('5'); /* Value */ return (200); } return (0); } /* * Simulate/Apply the optimal result of using the given "type" of defence * p1 is the current danger level (passed in for effiency) */ static int borg_defend_aux(int what, int p1) { /* Analyze */ switch (what) { case BD_SPEED: { return (borg_defend_aux_speed(p1)); } case BD_PROT_FROM_EVIL: { return (borg_defend_aux_prot_evil(p1)); } case BD_GRIM_PURPOSE: { return (borg_defend_aux_grim_purpose(p1)); } case BD_RESIST_FECAP: { return (borg_defend_aux_resist_fecap(p1)); } case BD_RESIST_F: { return (borg_defend_aux_resist_f(p1)); } case BD_RESIST_C: { return (borg_defend_aux_resist_c(p1)); } case BD_RESIST_A: { return (borg_defend_aux_resist_a(p1)); } case BD_RESIST_P: { return (borg_defend_aux_resist_p(p1)); } case BD_BLESS: { return (borg_defend_aux_bless(p1)); } case BD_HERO: { return (borg_defend_aux_hero(p1)); } case BD_BERSERK: { return (borg_defend_aux_berserk(p1)); } case BD_SMITE_EVIL: { return (borg_defend_aux_smite_evil(p1)); } case BD_REGEN: { return (borg_defend_aux_regen(p1)); } case BD_SHIELD: { return (borg_defend_aux_shield(p1)); } case BD_TELE_AWAY: { return (borg_defend_aux_tele_away(p1)); } case BD_GLYPH: { return (borg_defend_aux_glyph(p1)); } case BD_CREATE_DOOR: { return (borg_defend_aux_create_door(p1)); } case BD_MASS_GENOCIDE: { return (borg_defend_aux_mass_genocide(p1)); } case BD_GENOCIDE: { return (borg_defend_aux_genocide(p1)); } case BD_GENOCIDE_NASTIES: { return (borg_defend_aux_genocide_nasties(p1)); } case BD_EARTHQUAKE: { return (borg_defend_aux_earthquake(p1)); } case BD_TPORTLEVEL: { return (borg_defend_aux_teleportlevel(p1)); } case BD_DESTRUCTION: { return (borg_defend_aux_destruction(p1)); } case BD_BANISHMENT: { return (borg_defend_aux_banishment(p1)); } case BD_DETECT_INVISO: { return (borg_defend_aux_inviso(p1)); } case BD_LIGHT_BEAM: { return (borg_defend_aux_lbeam()); } case BD_SHIFT_PANEL: { return (borg_defend_aux_panel_shift()); } case BD_REST: { return (borg_defend_aux_rest()); } case BD_TELE_AWAY_MORGOTH: { return (borg_defend_aux_tele_away_morgoth()); } case BD_BANISHMENT_MORGOTH: { return (borg_defend_aux_banishment_morgoth()); } case BD_LIGHT_MORGOTH: { return (borg_defend_aux_light_morgoth()); } } return (0); } /* * prepare to attack... this is setup for a battle. */ bool borg_defend(int p1) { int n, b_n = 0; int g, b_g = -1; /* Simulate */ borg_simulate = true; /* if you have Resist All and it is about to drop, */ /* refresh it (if you can) */ if (borg_resistance && borg_resistance < (borg_game_ratio * 2)) { int p; /* check 'true' danger. This will make sure we do not */ /* refresh our Resistance if no-one is around */ borg_attacking = true; p = borg_danger(c_y, c_x, 1, false, false); /* Note false for danger!! */ borg_attacking = false; if (p > borg_fear_region[c_y / 11][c_x / 11] || borg_fighting_unique) { if (borg_spell(RESISTANCE)) { borg_note(format("# Refreshing Resistance. borg_resistance=%d, player->=%d, (ratio=%d)", borg_resistance, player->timed[TMD_OPP_ACID], borg_game_ratio)); borg_attempting_refresh_resist = true; borg_resistance = 25000; return (true); } } } /* Analyze the possible setup moves */ for (g = 0; g < BD_MAX; g++) { /* Simulate */ n = borg_defend_aux(g, p1); /* Track "best" attack */ if (n <= b_n) continue; /* Track best */ b_g = g; b_n = n; } /* Nothing good */ if (b_n <= 0) { return (false); } /* Note */ borg_note(format("# Performing defence type %d with value %d", b_g, b_n)); /* Instantiate */ borg_simulate = false; /* Instantiate */ (void)borg_defend_aux(b_g, p1); /* Success */ return (true); } /* * Perma spells. Some are cool to have on all the time, so long as their * mana cost is not too much. * There are several types of setup moves: * * Temporary speed * Protect From Evil * Prayer * Temp Resist (either all or just cold/fire?) * Shield */ enum { BP_SPEED, BP_PROT_FROM_EVIL, BP_BLESS, BP_RESIST_ALL, BP_RESIST_ALL_COLLUIN, BP_RESIST_P, BP_FASTCAST, BP_HERO, BP_BERSERK, BP_BERSERK_POTION, BP_SMITE_EVIL, BP_VENOM, BP_REGEN, BP_GLYPH, BP_SEE_INV, BP_MAX }; /* * Prayer to prepare for battle */ static int borg_perma_aux_bless(void) { int fail_allowed = 15, cost; /* increase the threshold */ if (unique_on_level) fail_allowed = 20; if (borg_fighting_unique) fail_allowed = 25; /* already blessed */ if (borg_bless) return (0); /* Cant when Blind */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED]) return (0); /* XXX Dark */ if (!borg_spell_okay_fail(BLESS, fail_allowed)) return (0); /* Obtain the cost of the spell */ cost = borg_get_spell_power(BLESS); /* If its cheap, go ahead */ if (borg_skill[BI_CLEVEL] > 10 && cost >= ((unique_on_level) ? borg_skill[BI_CURSP] / 7 : borg_skill[BI_CURSP] / 10)) return (0); /* Simulation */ /* bless is a low priority */ if (borg_simulate) return (1); /* do it! */ borg_spell(BLESS); /* No resting to recoop mana */ borg_no_rest_prep = 10000; return (1); } /* all resists FECAP*/ static int borg_perma_aux_resist(void) { int cost = 0; int fail_allowed = 5; /* increase the threshold */ if (unique_on_level) fail_allowed = 10; if (borg_fighting_unique) fail_allowed = 15; if (borg_skill[BI_TRFIRE] + borg_skill[BI_TRACID] + borg_skill[BI_TRPOIS] + borg_skill[BI_TRELEC] + borg_skill[BI_TRCOLD] >= 3) return (0); if (!borg_spell_okay_fail(RESISTANCE, fail_allowed)) return (0); /* Obtain the cost of the spell */ cost = borg_get_spell_power(RESISTANCE); /* If its cheap, go ahead */ if (cost >= ((unique_on_level) ? borg_skill[BI_CURSP] / 7 : borg_skill[BI_CURSP] / 10)) return (0); /* Simulation */ if (borg_simulate) return (2); /* do it! */ borg_spell_fail(RESISTANCE, fail_allowed); /* No resting to recoop mana */ borg_no_rest_prep = 21000; /* default to can't do it. */ return (2); } /* all resists from the cloak*/ static int borg_perma_aux_resist_colluin(void) { if (borg_skill[BI_TRFIRE] + borg_skill[BI_TRACID] + borg_skill[BI_TRPOIS] + borg_skill[BI_TRELEC] + borg_skill[BI_TRCOLD] >= 3) return (0); /* Only use it when Unique is close */ if (!borg_fighting_unique) return (0); if (!borg_equips_item(act_resist_all, true) && !borg_equips_item(act_rage_bless_resist, true)) return (0); /* Simulation */ if (borg_simulate) return (2); /* do it! */ if (borg_activate_item(act_resist_all) || borg_activate_item(act_rage_bless_resist)) { /* No resting to recoop mana */ borg_no_rest_prep = 21000; } /* Value */ return (2); } /* resists--- Only bother if a Unique is on the level.*/ static int borg_perma_aux_resist_p(void) { int cost = 0; int fail_allowed = 5; /* increase the threshold */ if (unique_on_level) fail_allowed = 10; if (borg_fighting_unique) fail_allowed = 15; if (borg_skill[BI_TRPOIS] || !unique_on_level) return (0); if (!borg_spell_okay_fail(RESIST_POISON, fail_allowed)) return (0); /* Skip it if I can do the big spell */ if (borg_spell_okay_fail(RESISTANCE, fail_allowed)) return (0); /* Obtain the cost of the spell */ cost = borg_get_spell_power(RESIST_POISON); /* If its cheap, go ahead */ if (cost >= borg_skill[BI_CURSP] / 20) return (0); /* Simulation */ if (borg_simulate) return (1); /* do it! */ if (borg_spell_fail(RESIST_POISON, fail_allowed)) { /* No resting to recoop mana */ borg_no_rest_prep = 21000; /* Value */ return (1); } /* default to can't do it. */ return (0); } /* * Speed to prepare for battle */ static int borg_perma_aux_speed(void) { int fail_allowed = 7; int cost; /* increase the threshold */ if (unique_on_level) fail_allowed = 10; if (borg_fighting_unique) fail_allowed = 15; /* already fast */ if (borg_speed) return (0); /* only cast defence spells if fail rate is not too high */ if (!borg_spell_okay_fail(HASTE_SELF, fail_allowed)) return (0); /* Obtain the cost of the spell */ cost = borg_get_spell_power(HASTE_SELF); /* If its cheap, go ahead */ if (cost >= ((unique_on_level) ? borg_skill[BI_CURSP] / 7 : borg_skill[BI_CURSP] / 10)) return (0); /* Simulation */ if (borg_simulate) return (5); /* do it! */ if (borg_spell_fail(HASTE_SELF, fail_allowed)) { /* No resting to recoop mana */ borg_no_rest_prep = borg_skill[BI_CLEVEL] * 1000; return (5); } /* default to can't do it. */ return (0); } static int borg_perma_aux_prot_evil(void) { int cost = 0; int fail_allowed = 5; /* if already protected */ if (borg_prot_from_evil) return (0); /* increase the threshold */ if (unique_on_level) fail_allowed = 10; if (borg_fighting_unique) fail_allowed = 15; if (!borg_spell_okay_fail(PROTECTION_FROM_EVIL, fail_allowed)) return (0); /* Obtain the cost of the spell */ cost = borg_get_spell_power(PROTECTION_FROM_EVIL); /* If its cheap, go ahead */ if (cost >= ((unique_on_level) ? borg_skill[BI_CURSP] / 7 : borg_skill[BI_CURSP] / 10)) return (0); /* Simulation */ if (borg_simulate) return (3); /* do it! */ if (borg_spell_fail(PROTECTION_FROM_EVIL, fail_allowed)) { /* No resting to recoop mana */ borg_no_rest_prep = borg_skill[BI_CLEVEL] * 1000; /* Value */ return (3); } /* default to can't do it. */ return (0); } /* * Mana Channel to prepare for battle */ static int borg_perma_aux_fastcast(void) { int fail_allowed = 5, cost; /* increase the threshold */ if (unique_on_level) fail_allowed = 10; if (borg_fighting_unique) fail_allowed = 15; /* already fast */ if (borg_fastcast) return (0); /* Cant when Blind */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED]) return (0); if (!borg_spell_okay_fail(MANA_CHANNEL, fail_allowed)) return (0); /* Obtain the cost of the spell */ cost = borg_get_spell_power(MANA_CHANNEL); /* If its cheap, go ahead */ if (cost >= ((unique_on_level) ? borg_skill[BI_CURSP] / 7 : borg_skill[BI_CURSP] / 10)) return (0); /* Simulation */ /* fastcast is a low priority */ if (borg_simulate) return (5); /* do it! */ if (borg_spell(MANA_CHANNEL)) { /* No resting to recoop mana */ borg_no_rest_prep = 6000; return 1; } return (0); } /* * Hero to prepare for battle */ static int borg_perma_aux_hero(void) { int fail_allowed = 5, cost; /* increase the threshold */ if (unique_on_level) fail_allowed = 10; if (borg_fighting_unique) fail_allowed = 15; /* already blessed */ if (borg_hero) return (0); /* Cant when Blind */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED]) return (0); /* XXX Dark */ if (!borg_spell_okay_fail(HEROISM, fail_allowed)) return (0); /* Obtain the cost of the spell */ cost = borg_get_spell_power(HEROISM); /* If its cheap, go ahead */ if (cost >= ((unique_on_level) ? borg_skill[BI_CURSP] / 7 : borg_skill[BI_CURSP] / 10)) return (0); /* Simulation */ /* hero is a low priority */ if (borg_simulate) return (1); /* do it! */ if (borg_spell(HEROISM)) { /* No resting to recoop mana */ borg_no_rest_prep = 3000; return 1; } return (0); } /* * Rapid Regen to prepare for battle */ static int borg_perma_aux_regen(void) { int fail_allowed = 5, cost; /* increase the threshold */ if (unique_on_level) fail_allowed = 10; if (borg_fighting_unique) fail_allowed = 15; /* already regenerating */ if (borg_regen) return (0); /* Cant when screwed */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISFORGET]) return (0); /* don't bother if not much to regenerate */ if (borg_skill[BI_MAXHP] < 100) return (0); if (!borg_spell_okay_fail(RAPID_REGENERATION, fail_allowed)) return (0); /* Obtain the cost of the spell */ cost = borg_get_spell_power(RAPID_REGENERATION); /* If its cheap, go ahead */ if (cost >= ((unique_on_level) ? borg_skill[BI_CURSP] / 7 : borg_skill[BI_CURSP] / 10)) return (0); /* do it! */ if (borg_spell(RAPID_REGENERATION)) { /* No resting to recoop mana */ borg_no_rest_prep = 6000; return 1; } return (0); } /* * Smite evil to prepare for battle */ static int borg_perma_aux_smite_evil(void) { int fail_allowed = 5, cost; /* increase the threshold */ if (unique_on_level) fail_allowed = 10; if (borg_fighting_unique) fail_allowed = 15; /* already smoting */ if (borg_smite_evil || borg_skill[BI_WS_EVIL]) return (0); /* Cant when Blind */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED]) return (0); if (!borg_spell_okay_fail(SMITE_EVIL, fail_allowed)) return (0); /* Obtain the cost of the spell */ cost = borg_get_spell_power(SMITE_EVIL); /* If its cheap, go ahead */ if (cost >= ((unique_on_level) ? borg_skill[BI_CURSP] / 7 : borg_skill[BI_CURSP] / 10)) return (0); /* Simulation */ /* smite evil is a low priority */ if (borg_simulate) return (3); /* do it! */ if (borg_spell(SMITE_EVIL)) { /* No resting to recoop mana */ borg_no_rest_prep = 21000; return 3; } return (0); } /* * Poison your weapon to prepare for battle */ static int borg_perma_aux_venom(void) { int fail_allowed = 5, cost; /* increase the threshold */ if (unique_on_level) fail_allowed = 10; if (borg_fighting_unique) fail_allowed = 15; /* already smoting */ if (borg_venom || borg_skill[BI_WB_POIS]) return (0); /* Cant when Blind */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED]) return (0); if (!borg_spell_okay_fail(VENOM, fail_allowed)) return (0); /* Obtain the cost of the spell */ cost = borg_get_spell_power(SMITE_EVIL); /* If its cheap, go ahead */ if (cost >= ((unique_on_level) ? borg_skill[BI_CURSP] / 7 : borg_skill[BI_CURSP] / 10)) return (0); /* Simulation */ /* smite evil is a low priority */ if (borg_simulate) return (3); /* do it! */ if (borg_spell(VENOM)) { /* No resting to recoop mana */ borg_no_rest_prep = 19000; return 3; } return (0); } /* * Berserk to prepare for battle */ static int borg_perma_aux_berserk(void) { int fail_allowed = 5, cost; /* increase the threshold */ if (unique_on_level) fail_allowed = 10; if (borg_fighting_unique) fail_allowed = 15; /* already blessed */ if (borg_berserk) return (0); /* Cant when Blind */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED]) return (0); if (!borg_spell_okay_fail(BERSERK_STRENGTH, fail_allowed)) return (0); /* Obtain the cost of the spell */ cost = borg_get_spell_power(BERSERK_STRENGTH); /* If its cheap, go ahead */ if (cost >= ((unique_on_level) ? borg_skill[BI_CURSP] / 7 : borg_skill[BI_CURSP] / 10)) return (0); /* Simulation */ /* Berserk is a low priority */ if (borg_simulate) return (2); /* do it! */ if (borg_spell(BERSERK_STRENGTH)) { /* No resting to recoop mana */ borg_no_rest_prep = 11000; return 2; } return (0); } /* * Berserk to prepare for battle */ static int borg_perma_aux_berserk_potion(void) { /* Saver the potions */ if (!borg_fighting_unique) return (0); /* already blessed */ if (borg_hero || borg_berserk) return (0); /* do I have any? */ if (-1 == borg_slot(TV_POTION, sv_potion_berserk)) return (0); /* Simulation */ /* Berserk is a low priority */ if (borg_simulate) return (2); /* do it! */ if (borg_quaff_potion(sv_potion_berserk)) return (2); return (0); } #ifdef UNUSED /* Glyph of Warding in a a-s corridor */ static int borg_perma_aux_glyph(void) { int i, wall_y, wall_x, wall_count = 0, y, x; int fail_allowed = 20; borg_grid* ag = &borg_grids[c_y][c_x]; /* check to make sure a summoner is near */ if (borg_kills_summoner == -1) return (0); /* make sure I have the spell */ if (!borg_spell_okay_fail(3, 4, fail_allowed) && !borg_spell_okay_fail(6, 4, fail_allowed)) return (0); /* He should not cast it while on an object. * I have addressed this inadequately in borg9.c when dealing with * messages. The message "the object resists" will delete the glyph * from the array. Then I set a broken door on that spot, the borg ignores * broken doors, so he won't loop. */ if ((ag->take) || (ag->feat == FEAT_GLYPH) || ((ag->feat >= FEAT_TRAP_HEAD) && (ag->feat <= FEAT_TRAP_TAIL)) || ((ag->feat >= FEAT_DOOR_HEAD) && (ag->feat <= FEAT_DOOR_TAIL)) || (ag->feat == FEAT_LESS) || (ag->feat == FEAT_MORE) || (ag->feat == FEAT_OPEN) || (ag->feat == FEAT_BROKEN)) { return (0); } /* Check for an existing glyph that is not found in the auto_grid */ for (i = 0; i < track_glyph.num; i++) { /* Stop if we are on a glyph */ if ((track_glyph.x[i] == c_x) && (track_glyph.y[i] == c_y)) return (0); } /* This spell is cast while he is digging and AS Corridor */ /* Get grid */ for (wall_x = -1; wall_x <= 1; wall_x++) { for (wall_y = -1; wall_y <= 1; wall_y++) { /* Acquire location */ x = wall_x + c_x; y = wall_y + c_y; ag = &borg_grids[y][x]; /* track adjacent walls */ if ((ag->feat == FEAT_GLYPH) || ((ag->feat >= FEAT_MAGMA) && (ag->feat <= FEAT_WALL_SOLID))) { wall_count++; } } } /* must be in a corridor */ if (wall_count < 6) return (0); /* Simulation */ if (borg_simulate) return (10); /* do it! */ if (borg_spell_fail(3, 4, fail_allowed) || borg_spell_fail(6, 4, fail_allowed) || borg_read_scroll(sv_scroll_rune_of_protection)) { /* Check for an existing glyph */ for (i = 0; i < track_glyph.num; i++) { /* Stop if we already new about this glyph */ if ((track_glyph.x[i] == c_x) && (track_glyph.y[i] == c_y)) return (p1 - p2); } /* Track the newly discovered glyph */ if (track_glyph.num < track_glyph.size) { borg_note("# Noting the creation of a corridor glyph."); track_glyph.x[track_glyph.num] = c_x; track_glyph.y[track_glyph.num] = c_y; track_glyph.num++; } return (p1 - p2); } /* default to can't do it. */ return (0); } #endif /* * Detect Inviso/Monsters * Casts detect invis. */ static int borg_perma_aux_see_inv(void) { int fail_allowed = 25; borg_grid* ag = &borg_grids[c_y][c_x]; /* no need */ if (borg_skill[BI_ISBLIND] || borg_skill[BI_ISCONFUSED] || borg_skill[BI_SINV] || borg_see_inv) return (0); /* Do I have anything that will work? */ if (!borg_spell_okay_fail(SENSE_INVISIBLE, fail_allowed) /* && !borg_spell_okay_fail(2, 6, fail_allowed) */) return (0); /* Darkness */ if (!(ag->info & BORG_GLOW) && !borg_skill[BI_CURLITE]) return (0); /* No real value known, but lets cast it to find the bad guys. */ if (borg_simulate) return (10); /* long time */ if (borg_spell_fail(SENSE_INVISIBLE, fail_allowed) /* || borg_spell_fail(2, 6, fail_allowed) */) { borg_see_inv = 32000; borg_no_rest_prep = 16000; return (10); } /* ah crap, I guess I wont be able to see them */ return (0); } /* * Simulate/Apply the optimal result of using the given "type" of set-up */ static int borg_perma_aux(int what) { /* Analyze */ switch (what) { case BP_SPEED: { return (borg_perma_aux_speed()); } case BP_PROT_FROM_EVIL: { return (borg_perma_aux_prot_evil()); } case BP_RESIST_ALL: { return (borg_perma_aux_resist()); } case BP_RESIST_ALL_COLLUIN: { return (borg_perma_aux_resist_colluin()); } case BP_RESIST_P: { return (borg_perma_aux_resist_p()); } case BP_BLESS: { return (borg_perma_aux_bless()); } case BP_FASTCAST: { return (borg_perma_aux_fastcast()); } case BP_HERO: { return (borg_perma_aux_hero()); } case BP_BERSERK: { return (borg_perma_aux_berserk()); } case BP_BERSERK_POTION: { return (borg_perma_aux_berserk_potion()); } case BP_SMITE_EVIL: { return (borg_perma_aux_smite_evil()); } case BP_VENOM: { return (borg_perma_aux_venom()); } case BP_REGEN: { return (borg_perma_aux_regen()); } case BP_GLYPH: { /* return (borg_perma_aux_glyph()); Tends to use too much mana doing this */ return (0); } case BP_SEE_INV: { return (borg_perma_aux_see_inv()); } } return (0); } /* * Walk around with certain spells on if you can afford to do so. */ bool borg_perma_spell() { int n, b_n = 0; int g, b_g = -1; /* Simulate */ borg_simulate = true; /* Not in town */ if (!borg_skill[BI_CDEPTH]) return (false); /* Not in shallow dungeon */ if (borg_skill[BI_CDEPTH] < borg_skill[BI_CLEVEL] / 3 || borg_skill[BI_CDEPTH] < 7) return (false); /* Low Level, save your mana, use the Defence maneuvers above */ if (borg_skill[BI_CLEVEL] <= 10) return (false); /* Only when lots of mana is on hand */ if (borg_skill[BI_CURSP] < borg_skill[BI_MAXSP] * 75 / 100) return (false); /* Analyze the possible setup moves */ for (g = 0; g < BP_MAX; g++) { /* Simulate */ n = borg_perma_aux(g); /* Track "best" move */ if (n <= b_n) continue; /* Track best */ b_g = g; b_n = n; } /* Nothing good */ if (b_n <= 0) { return (false); } /* Note */ borg_note(format("# Performing perma-spell type %d with value %d", b_g, b_n)); /* Instantiate */ borg_simulate = false; /* Instantiate */ (void)borg_perma_aux(b_g); /* Success */ return (true); } /* * check to make sure there are no monsters around * that should prevent resting */ bool borg_check_rest(int y, int x) { int i, ii; bool borg_in_vault = false; /* never rest to recover SP (if HP at max) if you only recover */ /* sp in combat */ if (borg_skill[BI_CURHP] == borg_skill[BI_MAXHP] && player_has(player, PF_COMBAT_REGEN)) return false; /* Do not rest recently after killing a multiplier */ /* This will avoid the problem of resting next to */ /* an unkown area full of breeders */ if (when_last_kill_mult > (borg_t - 4) && when_last_kill_mult <= borg_t) return (false); /* No resting if Blessed and good HP and good SP */ /* don't rest for SP if you do combat regen */ if ((borg_bless || borg_hero || borg_berserk || borg_fastcast) && !borg_munchkin_mode && (borg_skill[BI_CURHP] >= borg_skill[BI_MAXHP] * 8 / 10) && (borg_skill[BI_CURSP] >= borg_skill[BI_MAXSP] * 7 / 10)) return (false); /* Set this to Zero */ when_last_kill_mult = 0; /* Most of the time, its ok to rest in a vault */ if (vault_on_level) { for (i = -1; i < 1; i++) { for (ii = -1; ii < 1; ii++) { /* check bounds */ if (!square_in_bounds_fully(cave, loc(c_x + ii, c_y + i))) continue; if (borg_grids[c_y + i][c_x + ii].feat == FEAT_PERM) borg_in_vault = true; } } } /* No resting to recover if I just cast a prepatory spell * which is what I like to do right before I take a stair, * Unless I am down by half my SP. */ if (borg_no_rest_prep >= 1 && !borg_munchkin_mode && borg_skill[BI_CURSP] > borg_skill[BI_MAXSP] / 2 && borg_skill[BI_CDEPTH] < 85) return (false); /* Don't rest on lava unless we are immune to fire */ if (borg_grids[y][x].feat == FEAT_LAVA && !borg_skill[BI_IFIRE]) return (false); /* Dont worry about fears if in a vault */ if (!borg_in_vault) { /* Be concerned about the Regional Fear. */ if (borg_fear_region[y / 11][x / 11] > borg_skill[BI_CURHP] / 20 && borg_skill[BI_CDEPTH] != 100) return (false); /* Be concerned about the Monster Fear. */ if (borg_fear_monsters[y][x] > borg_skill[BI_CURHP] / 10 && borg_skill[BI_CDEPTH] != 100) return (false); /* Be concerned about the Monster Danger. */ if (borg_danger(y, x, 1, true, false) > borg_skill[BI_CURHP] / 40 && borg_skill[BI_CDEPTH] >= 85) return (false); /* Be concerned if low on food */ if ((borg_skill[BI_CURLITE] == 0 || borg_skill[BI_ISWEAK] || borg_skill[BI_FOOD] < 2) && !borg_munchkin_mode) return (false); } /* Examine all the monsters */ for (i = 1; i < borg_kills_nxt; i++) { borg_kill* kill = &borg_kills[i]; struct monster_race* r_ptr = &r_info[kill->r_idx]; int x9 = kill->x; int y9 = kill->y; int ax, ay, d; int p = 0; /* Skip dead monsters */ if (!kill->r_idx) continue; /* Distance components */ ax = (x9 > x) ? (x9 - x) : (x - x9); ay = (y9 > y) ? (y9 - y) : (y - y9); /* Distance */ d = MAX(ax, ay); /* Minimal distance */ if (d > z_info->max_range) continue; /* if too close to a Mold or other Never-Mover, don't rest */ if (d < 2 && !(rf_has(r_ptr->flags, RF_NEVER_MOVE))) return (false); if (d == 1) return (false); /* if too close to a Multiplier, don't rest */ if (d < 10 && (rf_has(r_ptr->flags, RF_MULTIPLY))) return (false); /* If monster is asleep, dont worry */ if (!kill->awake && d > 8 && !borg_munchkin_mode) continue; /* one call for dangers */ p = borg_danger_aux(y9, x9, 1, i, true, true); /* Ignore proximity checks while inside a vault */ if (!borg_in_vault) { /* Real scary guys pretty close */ if (d < 5 && (p > avoidance / 3) && !borg_munchkin_mode) return (false); /* scary guys far away */ /*if (d < 17 && d > 5 && (p > avoidance/3)) return (false); */ } /* should check LOS... monster to me concerned for Ranged Attacks */ if (borg_los(y9, x9, y, x) && kill->ranged_attack) return false; /* Special handling for the munchkin mode */ if (borg_munchkin_mode && borg_los(y9, x9, y, x) && (kill->awake && !(rf_has(r_ptr->flags, RF_NEVER_MOVE)))) return false; /* if it walks through walls, not safe */ if ((rf_has(r_ptr->flags, RF_PASS_WALL)) && !borg_in_vault) return false; if (rf_has(r_ptr->flags, RF_KILL_WALL) && !borg_in_vault) return false; } return true; } /* * Attempt to recover from damage and such after a battle * * Note that resting while in danger is counter-productive, unless * the danger is low, in which case it may induce "farming". * * Note that resting while recall is active will often cause you * to lose hard-won treasure from nasty monsters, so we disable * resting when waiting for recall in the dungeon near objects. * * First we try spells/prayers, which are "free", and then we * try food, potions, scrolls, staffs, rods, artifacts, etc. * * XXX XXX XXX * Currently, we use healing spells as if they were "free", but really, * this is only true if the "danger" is less than the "reward" of doing * the healing spell, and if there are no monsters which might soon step * around the corner and attack. */ bool borg_recover(void) { int p = 0; int q; enum borg_need need; /*** Handle annoying situations ***/ need = borg_maintain_light(); if (need == BORG_MET_NEED) return true; else if (need == BORG_UNMET_NEED) borg_note(format("# Need to refuel but cant!")); /*** Do not recover when in danger ***/ /* Look around for danger */ p = borg_danger(c_y, c_x, 1, true, false); /* Never recover in dangerous situations */ if (p > avoidance / 4) return (false); /*** Roll for "paranoia" ***/ /* Base roll */ q = randint0(100); /* Half dead */ if (borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] / 2) q = q - 10; /* Almost dead */ if (borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] / 4) q = q - 10; /*** Use "cheap" cures ***/ /* Hack -- cure stun */ if (borg_skill[BI_ISSTUN] && (q < 75)) { if (borg_activate_item(act_cure_body) || borg_activate_item(act_cure_critical) || borg_activate_item(act_cure_full) || borg_activate_item(act_cure_full2) || borg_activate_item(act_cure_temp) || borg_activate_item(act_heal3) || borg_spell(MINOR_HEALING) || borg_spell(HEALING) || borg_spell(HERBAL_CURING) || borg_spell(HOLY_WORD)) { /* Take note */ borg_note(format("# Cure Stun - danger %d", p)); return (true); } } /* Hack -- cure stun */ if (borg_skill[BI_ISHEAVYSTUN]) { if (borg_eat_food(TV_MUSHROOM, sv_mush_fast_recovery) || borg_activate_item(act_cure_body) || borg_activate_item(act_cure_critical) || borg_activate_item(act_cure_full) || borg_activate_item(act_cure_full2) || borg_activate_item(act_cure_temp) || borg_activate_item(act_heal3) || borg_spell(MINOR_HEALING) || borg_spell(HEALING) || borg_spell(HERBAL_CURING) || borg_spell(HOLY_WORD)) { /* Take note */ borg_note(format("# Cure Heavy Stun - danger %d", p)); return (true); } } /* Hack -- cure cuts */ if (borg_skill[BI_ISCUT] && (q < 75)) { if (borg_activate_item(act_cure_light) || borg_spell(MINOR_HEALING) || borg_spell(HEALING) || borg_spell(HERBAL_CURING) || borg_spell(HOLY_WORD)) { /* Take note */ borg_note(format("# Cure Cuts - danger %d", p)); return (true); } } /* Hack -- cure poison */ if (borg_skill[BI_ISPOISONED] && (q < 75)) { if (borg_eat_food(TV_MUSHROOM, sv_mush_fast_recovery) || borg_activate_item(act_rem_fear_pois) || borg_spell(HERBAL_CURING) || borg_spell(CURE_POISON)) { /* Take note */ borg_note(format("# Cure poison - danger %d", p)); return (true); } } /* Hack -- cure fear */ if (borg_skill[BI_ISAFRAID] && !borg_skill[BI_CRSFEAR] && (q < 75)) { if (borg_eat_food(TV_MUSHROOM, sv_mush_cure_mind) || borg_activate_item(act_rem_fear_pois) || borg_spell(HEROISM) || borg_spell(BERSERK_STRENGTH) || borg_spell(HOLY_WORD)) { /* Take note */ borg_note(format("# Cure fear - danger %d", p)); return (true); } } /* Hack -- satisfy hunger */ if ((borg_skill[BI_ISHUNGRY] || borg_skill[BI_ISWEAK]) && (q < 75)) { if (borg_spell(REMOVE_HUNGER) || borg_spell(HERBAL_CURING)) { return (true); } } /* Hack -- hallucination */ if (borg_skill[BI_ISIMAGE] && (q < 75)) { if (borg_eat_food(TV_MUSHROOM, sv_mush_cure_mind)) { return (true); } } /* Hack -- heal damage */ if ((borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] / 2) && (q < 75) && p == 0 && (borg_skill[BI_CURSP] > borg_skill[BI_MAXSP] / 4)) { if (borg_activate_item(act_heal1) || borg_activate_item(act_heal2) || borg_activate_item(act_heal3) || borg_spell(HEALING) || borg_spell(HOLY_WORD) || borg_spell(MINOR_HEALING) || borg_spell(HEROISM)) { /* Take note */ borg_note(format("# heal damage (recovering)")); return (true); } } /* cure experience loss with prayer */ if (borg_skill[BI_ISFIXEXP] && (borg_activate_item(act_restore_exp) || borg_activate_item(act_restore_life) || borg_spell(REVITALIZE) || borg_spell(REMEMBRANCE) || (borg_skill[BI_CURHP] > 90 && borg_spell(UNHOLY_REPRIEVE)))) { return (true); } /* cure stat drain with prayer */ if ((borg_skill[BI_ISFIXSTR] || borg_skill[BI_ISFIXINT] || borg_skill[BI_ISFIXWIS] || borg_skill[BI_ISFIXDEX] || borg_skill[BI_ISFIXCON] || borg_skill[BI_ISFIXALL]) && (borg_spell(RESTORATION) || borg_spell(REVITALIZE))) { return (true); } /* cure stat drain with prayer */ if ((borg_skill[BI_ISFIXSTR] || borg_skill[BI_ISFIXINT] || borg_skill[BI_ISFIXCON]) && borg_skill[BI_CURHP] > 90 && borg_spell(UNHOLY_REPRIEVE)) { return (true); } /*** Use "expensive" cures ***/ /* Hack -- cure stun */ if (borg_skill[BI_ISSTUN] && (q < 25)) { if (borg_use_staff_fail(sv_staff_curing) || borg_zap_rod(sv_rod_curing) || borg_zap_rod(sv_rod_healing) || borg_activate_item(act_heal1) || borg_activate_item(act_heal2) || borg_quaff_crit(false)) { return (true); } } /* Hack -- cure heavy stun */ if (borg_skill[BI_ISHEAVYSTUN] && (q < 95)) { if (borg_quaff_crit(true) || borg_use_staff_fail(sv_staff_curing) || borg_zap_rod(sv_rod_curing) || borg_zap_rod(sv_rod_healing) || borg_activate_item(act_heal1) || borg_activate_item(act_heal2)) { return (true); } } /* Hack -- cure cuts */ if (borg_skill[BI_ISCUT] && (q < 25)) { if (borg_use_staff_fail(sv_staff_curing) || borg_zap_rod(sv_rod_curing) || borg_zap_rod(sv_rod_healing) || borg_activate_item(act_heal1) || borg_activate_item(act_heal2) || borg_quaff_crit(borg_skill[BI_CURHP] < 10)) { return (true); } } /* Hack -- cure poison */ if (borg_skill[BI_ISPOISONED] && (q < 25)) { if (borg_eat_food(TV_MUSHROOM, sv_mush_fast_recovery) || borg_quaff_potion(sv_potion_cure_poison) || borg_eat_food(TV_FOOD, sv_food_waybread) || borg_eat_food(TV_MUSHROOM, sv_mush_fast_recovery) || borg_quaff_crit(borg_skill[BI_CURHP] < 10) || borg_use_staff_fail(sv_staff_curing) || borg_zap_rod(sv_rod_curing) || borg_activate_item(act_rem_fear_pois)) { return (true); } } /* Hack -- cure blindness */ if (borg_skill[BI_ISBLIND] && (q < 25)) { if (borg_eat_food(TV_MUSHROOM, sv_mush_fast_recovery) || borg_eat_food(TV_FOOD, sv_food_waybread) || borg_quaff_potion(sv_potion_cure_light) || borg_quaff_potion(sv_potion_cure_serious) || borg_quaff_crit(false) || borg_use_staff_fail(sv_staff_curing) || borg_zap_rod(sv_rod_curing)) { return (true); } } /* Hack -- cure confusion */ if (borg_skill[BI_ISCONFUSED] && (q < 25)) { if (borg_eat_food(TV_MUSHROOM, sv_mush_cure_mind) || borg_quaff_potion(sv_potion_cure_serious) || borg_quaff_crit(false) || borg_use_staff_fail(sv_staff_curing) || borg_zap_rod(sv_rod_curing)) { return (true); } } /* Hack -- cure fear */ if (borg_skill[BI_ISAFRAID] && !borg_skill[BI_CRSFEAR] && (q < 25)) { if (borg_eat_food(TV_MUSHROOM, sv_mush_cure_mind) || borg_quaff_potion(sv_potion_boldness) || borg_quaff_potion(sv_potion_heroism) || borg_quaff_potion(sv_potion_berserk) || borg_activate_item(act_rem_fear_pois)) { return (true); } } /* Hack -- satisfy hunger */ if ((borg_skill[BI_ISHUNGRY] || borg_skill[BI_ISWEAK]) && (q < 25)) { if (borg_read_scroll(sv_scroll_satisfy_hunger)) { return (true); } } /* Hack -- heal damage */ if ((borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] / 2) && (q < 25)) { if (borg_zap_rod(sv_rod_healing) || borg_quaff_potion(sv_potion_cure_serious) || borg_quaff_crit(false) || borg_activate_item(act_cure_serious)) { return (true); } } /* Hack -- Rest to recharge Rods of Healing or Recall*/ if (borg_has[kv_rod_recall] || borg_has[kv_rod_healing]) { /* Step 1. Recharge just 1 rod. */ if ((borg_has[kv_rod_healing] && !borg_items[borg_slot(TV_ROD, sv_rod_healing)].pval) || (borg_has[kv_rod_recall] && !borg_items[borg_slot(TV_ROD, sv_rod_recall)].pval)) { /* Mages can cast the recharge spell */ /* Rest until at least one recharges */ if (!borg_skill[BI_ISWEAK] && !borg_skill[BI_ISCUT] && !borg_skill[BI_ISHUNGRY] && !borg_skill[BI_ISPOISONED] && borg_check_rest(c_y, c_x) && !borg_spell_okay(RECHARGING)) { /* Take note */ borg_note("# Resting to recharge a rod..."); /* Reset the Bouncing-borg Timer */ time_this_panel = 0; /* Rest until done */ borg_keypress('R'); borg_keypress('1'); borg_keypress('0'); borg_keypress('0'); borg_keypress(KC_ENTER); /* I'm not in a store */ borg_in_shop = false; /* Done */ return (true); } } } /*** Just Rest ***/ /* Hack -- rest until healed */ if (!borg_skill[BI_ISBLIND] && !borg_skill[BI_ISPOISONED] && !borg_skill[BI_ISCUT] && !borg_skill[BI_ISWEAK] && !borg_skill[BI_ISHUNGRY] && (borg_skill[BI_ISCONFUSED] || borg_skill[BI_ISIMAGE] || borg_skill[BI_ISAFRAID] || borg_skill[BI_ISSTUN] || borg_skill[BI_ISHEAVYSTUN] || borg_skill[BI_CURHP] < borg_skill[BI_MAXHP] || borg_skill[BI_CURSP] < borg_skill[BI_MAXSP] * (borg_skill[BI_CDEPTH] > 85 ? 7 : 6) / 10)) { if (borg_check_rest(c_y, c_x) && !scaryguy_on_level && p <= borg_fear_region[c_y / 11][c_x / 11] && goal != GOAL_RECOVER) { /* check for then call lite in dark room before resting */ if (!borg_check_LIGHT_only()) { /* Take note */ borg_note(format("# Resting to recover HP/SP...")); /* Rest until done */ borg_keypress('R'); borg_keypress('&'); borg_keypress(KC_ENTER); /* Reset our panel clock, we need to be here */ time_this_panel = 0; /* reset the inviso clock to avoid loops */ need_see_inviso = borg_t - 50; /* Done */ return (true); } else { /* Must have been a dark room */ borg_note(format("# Lighted the darkened room instead of resting.")); return (true); } } } /* Hack to recharge mana if a low level mage or priest */ if (borg_skill[BI_MAXSP] && (borg_skill[BI_CLEVEL] <= 40 || borg_skill[BI_CDEPTH] >= 85) && borg_skill[BI_CURSP] < (borg_skill[BI_MAXSP] * 8 / 10) && p < avoidance * 1 / 10 && borg_check_rest(c_y, c_x)) { if (!borg_skill[BI_ISWEAK] && !borg_skill[BI_ISCUT] && !borg_skill[BI_ISHUNGRY] && !borg_skill[BI_ISPOISONED] && borg_skill[BI_FOOD] > 2 && !borg_munchkin_mode) { /* Take note */ borg_note(format("# Resting to gain Mana. (danger %d)...", p)); /* Rest until done */ borg_keypress('R'); borg_keypress('*'); borg_keypress(KC_ENTER); /* I'm not in a store */ borg_in_shop = false; /* Done */ return (true); } } /* Hack to recharge mana if a low level mage in munchkin mode */ if (borg_skill[BI_MAXSP] && borg_munchkin_mode == true && (borg_skill[BI_CURSP] < borg_skill[BI_MAXSP] || borg_skill[BI_CURHP] < borg_skill[BI_MAXHP]) && borg_check_rest(c_y, c_x)) { if (!borg_skill[BI_ISWEAK] && !borg_skill[BI_ISCUT] && !borg_skill[BI_ISHUNGRY] && !borg_skill[BI_ISPOISONED] && borg_skill[BI_FOOD] > 2 && (borg_grids[c_y][c_x].feat == FEAT_MORE || borg_grids[c_y][c_x].feat == FEAT_LESS)) { /* Take note */ borg_note(format("# Resting to gain munchkin HP/mana. (danger %d)...", p)); /* Rest until done */ borg_keypress('R'); borg_keypress('*'); borg_keypress(KC_ENTER); /* I'm not in a store */ borg_in_shop = false; /* Done */ return (true); } } /* Hack to heal blindness if in munchkin mode */ if (borg_skill[BI_ISBLIND] && borg_munchkin_mode == true) { /* Take note */ borg_note("# Resting to cure problem. (danger %d)..."); /* Rest until done */ borg_keypress('R'); borg_keypress('*'); borg_keypress(KC_ENTER); /* I'm not in a store */ borg_in_shop = false; /* Done */ return (true); } /* Nope */ return (false); } /* * Check if the borg can dig. * check_fail = check if the spell failure rate is too high * hard = check if hard things, like granite, can be dug */ static bool borg_can_dig(bool check_fail, bool hard) { int dig_check = hard ? BORG_DIG_HARD : BORG_DIG; if ((weapon_swap && borg_skill[BI_DIG] >= dig_check && borg_items[weapon_swap -1].tval == TV_DIGGING) || (borg_skill[BI_DIG] >= dig_check + 20)) return true; if (check_fail) { if (borg_spell_legal_fail(TURN_STONE_TO_MUD, 40) || borg_spell_legal_fail(SHATTER_STONE, 40) || borg_equips_item(act_stone_to_mud, true) || borg_equips_ring(sv_ring_digging)) return true; } else { if (borg_spell_legal(TURN_STONE_TO_MUD) || borg_spell_legal(SHATTER_STONE) || borg_equips_item(act_stone_to_mud, false) || borg_equips_ring(sv_ring_digging)) return true; } return false; } /* * Take one "step" towards the given location, return true if possible */ static bool borg_play_step(int y2, int x2) { borg_grid* ag; borg_grid* ag2; ui_event ch_evt = EVENT_EMPTY; int dir, x, y, ox, oy, i; int o_y = 0, o_x = 0, door_found = 0; /* Breeder levels, close all doors */ if (breeder_level) { /* scan the adjacent grids */ for (ox = -1; ox <= 1; ox++) { for (oy = -1; oy <= 1; oy++) { /* skip our own spot */ if ((oy + c_y == c_y) && (ox + c_x == c_x)) continue; /* Acquire location */ ag = &borg_grids[oy + c_y][ox + c_x]; /* skip non open doors */ if (ag->feat != FEAT_OPEN) continue; /* skip monster on door */ if (ag->kill) continue; /* Skip repeatedly closed doors */ if (track_door.num >= 255) continue; /* skip our orignal goal */ if ((oy + c_y == y2) && (ox + c_x == x2)) continue; /* save this spot */ o_y = oy; o_x = ox; door_found++; } } /* Is there a door to close? */ if (door_found) { /* Get a direction, if possible */ dir = borg_goto_dir(c_y, c_x, c_y + o_y, c_x + o_x); /* Obtain the destination */ x = c_x + ddx[dir]; y = c_y + ddy[dir]; /* Hack -- set goal */ g_x = x; g_y = y; /* Close */ borg_note("# Closing a door"); borg_keypress('c'); borg_queue_direction(I2D(dir)); /* Check for an existing flag */ for (i = 0; i < track_door.num; i++) { /* Stop if we already new about this door */ if ((track_door.x[i] == x) && (track_door.y[i] == y)) return (true); } /* Track the newly closed door */ if (i == track_door.num && i < track_door.size) { borg_note("# Noting the closing of a door."); track_door.num++; track_door.x[i] = x; track_door.y[i] = y; } return (true); } } /* Stand stairs up */ if (goal_less) { /* Define the grid we are looking at to be our own grid */ ag = &borg_grids[c_y][c_x]; /* Up stairs. Cheat the game grid info in. (cave_feat[c_y][c_x] == FEAT_LESS) */ if (ag->feat == FEAT_LESS) { /* Stand on stairs */ borg_on_dnstairs = true; goal_less = false; borg_keypress('<'); /* Success */ return (true); } } /* Get a direction, if possible */ dir = borg_goto_dir(c_y, c_x, y2, x2); /* We have arrived */ if (dir == 5) return (false); /* Obtain the destination */ x = c_x + ddx[dir]; y = c_y + ddy[dir]; /* Access the grid we are stepping on */ ag = &borg_grids[y][x]; /* Hack -- set goal */ g_x = x; g_y = y; /* Monsters -- Attack */ if (ag->kill) { borg_kill* kill = &borg_kills[ag->kill]; /* can't attack someone if afraid! */ if (borg_skill[BI_ISAFRAID] || borg_skill[BI_CRSFEAR]) return (false); /* Hack -- ignore Maggot until later. */ if ((rf_has(r_info[kill->r_idx].flags, RF_UNIQUE)) && borg_skill[BI_CDEPTH] == 0 && borg_skill[BI_CLEVEL] < 5) return (false); /* Message */ borg_note(format("# Walking into a '%s' at (%d,%d)", r_info[kill->r_idx].name, kill->y, kill->x)); /* Walk into it */ if (my_no_alter) { borg_keypress(';'); my_no_alter = false; } else { borg_keypress('+'); } borg_keypress(I2D(dir)); return (true); } /* Objects -- Take */ if (ag->take && borg_takes[ag->take].kind) { borg_take* take = &borg_takes[ag->take]; /*** Handle Chests ***/ /* The borg will cheat when it comes to chests. * He does not have to but it makes him faster and * it does not give him any information that a * person would not know by looking at the trap. * So there is no advantage to the borg. */ if (strstr(take->kind->name, "chest") && !strstr(take->kind->name, "Ruined")) { struct object* o_ptr = square_object(cave, loc(x2, y2)); /* this should only happen when something picks up the chest */ /* outside the borgs view. */ if (!o_ptr) { borg_delete_take(ag->take); return false; } /* Traps. Disarm it w/ fail check */ if (o_ptr->pval > 1 && o_ptr->known && borg_skill[BI_DEV] - o_ptr->pval >= borg_cfg[BORG_CHEST_FAIL_TOLERANCE]) { borg_note(format("# Disarming a '%s' at (%d,%d)", take->kind->name, take->y, take->x)); /* Open it */ borg_keypress('D'); borg_queue_direction(I2D(dir)); return (true); } /* No trap, or unknown trap that passed above checks - Open it */ if (o_ptr->pval < 0 || !o_ptr->known) { borg_note(format("# Opening a '%s' at (%d,%d)", take->kind->name, take->y, take->x)); /* Open it */ borg_keypress('o'); borg_queue_direction(I2D(dir)); return (true); } /* Empty chest */ /* continue in routine and pick it up */ } /*** Handle Orb of Draining ***/ /* Priest/Paladin borgs who have very limited ID ability can save some money and * inventory space my casting Orb of Draining on objects. Cursed objects will melt * under the Orb of Draining spell. This will save the borg from carrying the item * around until he can ID it. * * By default, the flag ORBED is set to false when an item is created. If the borg * gets close to an item, and the conditions are favorable, he will cast OoD on the * item and change the flag. */ if (take->orbed == false && (take->tval >= TV_SHOT && take->tval < TV_STAFF)) { if (borg_distance(take->y, take->x, c_y, c_x) == 1) { if (borg_spell_okay_fail(ORB_OF_DRAINING, 25)) { /* Target the Take location */ borg_target(take->y, take->x); /* Cast the prayer */ borg_spell(ORB_OF_DRAINING); /* Message */ borg_note("# Orbing an object to check for cursed item."); /* use the old target */ borg_keypress('5'); /* Change the take flag */ take->orbed = true; /* check the blast radius of the prayer for other items */ for (i = 0; i < 24; i++) { /* Extract the location */ int xx = take->x + borg_ddx_ddd[i]; int yy = take->y + borg_ddy_ddd[i]; /* Check the grid for a take */ if (!square_in_bounds_fully(cave, loc(xx, yy))) continue; ag2 = &borg_grids[yy][xx]; if (ag2->take) { /* This item was orbed (mostly true)*/ borg_takes[borg_grids[yy][xx].take].orbed = true; } } /* Return */ return (true); } } } /*** Handle other takes ***/ /* Message */ borg_note(format("# Walking onto and deleting a '%s' at (%d,%d)", take->kind->name, take->y, take->x)); /* Delete the item from the list */ borg_delete_take(ag->take); /* Walk onto it */ borg_keypress(I2D(dir)); return (true); } /* Glyph of Warding */ if (ag->glyph) { /* Message */ borg_note(format("# Walking onto a glyph of warding.")); /* Walk onto it */ borg_keypress(I2D(dir)); return (true); } /* Traps -- disarm -- */ if (borg_skill[BI_CURLITE] && !borg_skill[BI_ISBLIND] && !borg_skill[BI_ISCONFUSED] && !scaryguy_on_level && ag->trap) { /* NOTE: If a scary guy is on the level, we allow the borg to run over the * trap in order to escape this level. */ /* allow "destroy doors" */ /* don't bother unless we are near full mana */ if (borg_skill[BI_CURSP] > ((borg_skill[BI_MAXSP] * 4) / 5)) { if (borg_spell(DISABLE_TRAPS_DESTROY_DOORS)) { borg_note("# Disable Traps, Destroy Doors"); ag->trap = 0; /* since this just disables the trap and doesn't remove it, */ /* don't rest next to it */ borg_no_rest_prep = 3000; return (true); } } /* Disarm */ borg_note("# Disarming a trap"); borg_keypress('D'); borg_queue_direction(I2D(dir)); /* We are not sure if the trap will get 'untrapped'. pretend it will*/ ag->trap = 0; return (true); } /* Closed Doors -- Open */ if (ag->feat == FEAT_CLOSED) { /* Paranoia XXX XXX XXX */ if (!randint0(100)) return (false); /* Not a good idea to open locked doors if a monster * is next to the borg beating on him */ /* scan the adjacent grids */ for (i = 0; i < 8; i++) { /* Grid in that direction */ x = c_x + ddx_ddd[i]; y = c_y + ddy_ddd[i]; /* Access the grid */ ag2 = &borg_grids[y][x]; /* If monster adjacent to me and I'm weak, dont * even try to open the door */ if (ag2->kill && borg_skill[BI_CLEVEL] < 15 && !borg_skill[BI_ISAFRAID]) return (false); } /* Use other techniques from time to time */ if (!randint0(100) || time_this_panel >= 500) { /* Mega-Hack -- allow "destroy doors" */ if (borg_spell(DISABLE_TRAPS_DESTROY_DOORS)) { borg_note("# Disable Traps, Destroy Doors"); return (true); } /* Mega-Hack -- allow "stone to mud" */ if (borg_spell(TURN_STONE_TO_MUD) || borg_spell(SHATTER_STONE) || borg_activate_ring(sv_ring_digging) || borg_activate_item(act_stone_to_mud)) { borg_note("# Melting a door"); borg_keypress(I2D(dir)); /* Remove this closed door from the list. * Its faster to clear all doors from the list * then rebuild the list. */ if (track_closed.num) { track_closed.num = 0; } return (true); } } /* Open */ if (my_need_alter) { borg_keypress('+'); my_need_alter = false; } else { borg_note("# Opening a door"); borg_keypress('o'); } borg_queue_direction(I2D(dir)); /* Remove this closed door from the list. * Its faster to clear all doors from the list * then rebuild the list. */ if (track_closed.num) { track_closed.num = 0; } return (true); } /* Rubble, Treasure, Seams, Walls -- Tunnel or Melt */ /* HACK depends on FEAT order, kinda evil. */ if (ag->feat >= FEAT_SECRET && ag->feat <= FEAT_GRANITE) { /* No digging when hungry */ if (borg_skill[BI_ISHUNGRY]) return false; /* Don't dig walls and seams when exploring (do dig rubble) */ if (ag->feat != FEAT_RUBBLE && goal == GOAL_DARK) return false; /* Don't bother digging without sufficient dig ability */ if (!borg_can_dig(false, false) && ag->feat != FEAT_RUBBLE) { goal = 0; return false; } if (ag->feat == FEAT_GRANITE && !borg_can_dig(false, true)) { goal = 0; return false; } /* Use Stone to Mud when available */ if (borg_spell(TURN_STONE_TO_MUD) || borg_spell(SHATTER_STONE) || borg_activate_ring(sv_ring_digging) || borg_activate_item(act_stone_to_mud)) { borg_note("# Melting a wall/etc"); borg_keypress(I2D(dir)); /* Forget number of mineral veins to force rebuild of vein list */ track_vein.num = 0; return true; } /* Mega-Hack -- prevent infinite loops */ if (randint0(500) <= 5 && !vault_on_level) return false; /* Switch to a digger if we have one is automatic */ /* Dig */ borg_note("# Digging through wall/etc"); borg_keypress('T'); borg_keypress(I2D(dir)); /* Forget number of mineral veins to force rebuild of vein list */ /* XXX Maybe only do this if successful? */ track_vein.num = 0; return true; } /* Shops -- Enter */ if (feat_is_shop(ag->feat)) { /* Message */ borg_note(format("# Entering a '%d' shop", ag->store)); /* Enter the shop */ borg_keypress(I2D(dir)); return (true); } /* Walk in that direction */ if (my_need_alter) { borg_keypress('+'); my_need_alter = false; } else { /* nothing */ } /* Actually enter the direction */ borg_keypress(I2D(dir)); /* I'm not in a store */ borg_in_shop = false; /* for some reason, selling and buying in the store sets the event handler to Select. * This is a game bug not a borg bug. The borg is trying to overcome the game bug. * But he still has some troubles unhooking in town after shopping. Again, this is * due to the event handler. The handler should release the EVT_SELECT but it does not. */ if (ch_evt.type & EVT_SELECT) ch_evt.type = EVT_KBRD; if (ch_evt.type & EVT_MOVE) ch_evt.type = EVT_KBRD; /* Did something */ return (true); } /* * Act twitchy */ bool borg_twitchy(void) { int dir = 5; int count; /* This is a bad thing */ borg_note("# Twitchy!"); /* try to phase out of it */ if (borg_allow_teleport()) { if (borg_caution_phase(15, 2) && (borg_spell_fail(PHASE_DOOR, 40) || borg_spell_fail(PORTAL, 40) || borg_shadow_shift(40) || borg_activate_item(act_tele_phase) || borg_activate_item(act_tele_long) || borg_read_scroll(sv_scroll_phase_door))) { /* We did something */ return (true); } } /* Pick a random direction */ count = 100; while (true) { dir = randint0(9); if (dir == 5 || dir == 0) continue; if (!(count--)) break; /* Hack -- set goal */ g_x = c_x + ddx[dir]; g_y = c_y + ddy[dir]; if (!square_in_bounds_fully(cave, loc(g_x, g_y))) continue; if (borg_grids[g_y][g_x].feat >= FEAT_SECRET && borg_grids[g_y][g_x].feat <= FEAT_PERM) continue; break; } if (!count) { bool all_walls = true; for (dir = 1; dir < 10; dir++) { if (dir == 5) continue; if (!square_in_bounds_fully(cave, loc(g_x, g_y))) continue; if (borg_grids[g_y][g_x].feat >= FEAT_SECRET && borg_grids[g_y][g_x].feat <= FEAT_PERM) continue; all_walls = false; break; } if (all_walls) { /* Rest until done */ borg_keypress('R'); borg_keypress('1'); borg_keypress('0'); borg_keypress('0'); borg_keypress(KC_ENTER); /* We did something */ return (true); } } /* Normally move */ /* Send direction */ borg_keypress(I2D(dir)); /* We did something */ return (true); } /* * Commit the current "flow" */ static bool borg_flow_commit(const char* who, int why) { int cost; /* Cost of current grid */ cost = borg_data_cost->data[c_y][c_x]; /* Verify the total "cost" */ if (cost >= 250) return (false); /* Message */ if (who) borg_note(format("# Flowing toward %s at cost %d", who, cost)); /* Obtain the "flow" information */ memcpy(borg_data_flow, borg_data_cost, sizeof(borg_data)); /* Save the goal type */ goal = why; /* Success */ return (true); } /* * Attempt to take an optimal step towards the current goal location * * Note that the "borg_update()" routine notices new monsters and objects, * and movement of monsters and objects, and cancels any flow in progress. * * Note that the "borg_update()" routine notices when a grid which was * not thought to block motion is discovered to in fact be a grid which * blocks motion, and removes that grid from any flow in progress. * * When given multiple alternative steps, this function attempts to choose * the "safest" path, by penalizing grids containing embedded gold, monsters, * rubble, doors, traps, store doors, and even floors. This allows the Borg * to "step around" dangerous grids, even if this means extending the path by * a step or two, and encourages him to prefer grids such as objects and stairs * which are not only interesting but which are known not to be invisible traps. * * XXX XXX XXX XXX This function needs some work. It should attempt to * analyze the "local region" around the player and determine the optimal * choice of locations based on some useful computations. * * If it works, return true, otherwise, cancel the goal and return false. */ bool borg_flow_old(int why) { int x, y; /* Continue */ if (goal == why) { int b_n = 0; int i, b_i = -1; int c, b_c; /* Flow cost of current grid */ b_c = borg_data_flow->data[c_y][c_x] * 10; /* Prevent loops */ b_c = b_c - 5; /* Look around */ for (i = 0; i < 8; i++) { /* Grid in that direction */ x = c_x + ddx_ddd[i]; y = c_y + ddy_ddd[i]; /* Flow cost at that grid */ c = borg_data_flow->data[y][x] * 10; /* Never backtrack */ if (c > b_c) continue; /* avoid screen edgeds */ if (x > AUTO_MAX_X - 1 || x < 1 || y > AUTO_MAX_Y - 1 || y < 1) continue; /* Notice new best value */ if (c < b_c) b_n = 0; /* Apply the randomizer to equivalent values */ if (borg_skill[BI_CDEPTH] == 0 && (++b_n >= 2) && (randint0(b_n) != 0)) continue; else if (borg_skill[BI_CDEPTH] >= 1 && ++b_n >= 2) continue; /* Special case when digging anti-summon corridor */ if (goal == GOAL_DIGGING && (ddx_ddd[i] == 0 || ddy_ddd[i] == 0)) { /* No straight lines */ if (borg_distance(c_y, c_x, borg_flow_y[0], borg_flow_x[0]) <= 2) continue; } /* Track it */ b_i = i; b_c = c; } /* Try it */ if (b_i >= 0) { /* Access the location */ x = c_x + ddx_ddd[b_i]; y = c_y + ddy_ddd[b_i]; /* Attempt motion */ if (borg_play_step(y, x)) return (true); } /* Mark a timestamp to wait on a anti-summon spot for a few turns */ if (goal == GOAL_DIGGING && c_y == borg_flow_y[0] && c_x == borg_flow_x[0]) borg_t_antisummon = borg_t; /* Cancel goal */ goal = 0; } /* Nothing to do */ return (false); } /* * Prepare to flee the level via stairs */ bool borg_flow_stair_both(int why, bool sneak) { int i; /* None to flow to */ if (!track_less.num && !track_more.num) return (false); /* dont go down if hungry or low on food, unless fleeing a scary town */ if (!goal_fleeing && !scaryguy_on_level && !track_less.num && (avoidance <= borg_skill[BI_CURHP] * 15 / 10) && (borg_skill[BI_ISWEAK] || borg_skill[BI_ISHUNGRY] || borg_skill[BI_FOOD] < 2)) return (false); /* Absolutely no diving if no light */ if (borg_skill[BI_CURLITE] == 0 && borg_skill[BI_CDEPTH] != 0 && borg_munchkin_mode == false) return (false); /* clear the possible searching flag */ borg_needs_searching = false; /* Clear the flow codes */ borg_flow_clear(); /* Enqueue useful grids */ for (i = 0; i < track_less.num; i++) { /* Not if a monster is parked on the stiar */ if (borg_grids[track_less.y[i]][track_less.x[i]].kill) continue; /* Enqueue the grid */ borg_flow_enqueue_grid(track_less.y[i], track_less.x[i]); } /* Enqueue useful grids */ for (i = 0; i < track_more.num; i++) { /* Not if a monster is parked on the stiar */ if (borg_grids[track_more.y[i]][track_more.x[i]].kill) continue; /* Enqueue the grid */ borg_flow_enqueue_grid(track_more.y[i], track_more.x[i]); } /* Spread the flow */ borg_flow_spread(250, false, false, false, -1, sneak); /* Attempt to Commit the flow */ if (!borg_flow_commit("stairs", why)) return (false); /* Take one step */ if (!borg_flow_old(why)) return (false); /* Success */ return (true); } /* * Prepare to flow towards "up" stairs */ bool borg_flow_stair_less(int why, bool sneak) { int i; /* None to flow to */ if (!track_less.num) return (false); /* Clear the flow codes */ borg_flow_clear(); /* clear the possible searching flag */ borg_needs_searching = false; /* Enqueue useful grids */ for (i = 0; i < track_less.num; i++) { /* Not if a monster is parked on the stiar */ if (borg_grids[track_less.y[i]][track_less.x[i]].kill) continue; /* Enqueue the grid */ borg_flow_enqueue_grid(track_less.y[i], track_less.x[i]); } if (borg_skill[BI_CLEVEL] > 35 || borg_skill[BI_CURLITE] == 0) { /* Spread the flow */ borg_flow_spread(250, true, false, false, -1, sneak); } else { /* Spread the flow, No Optimize, Avoid */ borg_flow_spread(250, false, !borg_desperate, false, -1, sneak); } /* Attempt to Commit the flow */ if (!borg_flow_commit("up-stairs", why)) return (false); /* Take one step */ if (!borg_flow_old(why)) return (false); /* Success */ return (true); } /* * Prepare to flow towards "down" stairs */ bool borg_flow_stair_more(int why, bool sneak, bool brave) { int i; /* None to flow to */ if (!track_more.num) return (false); /* not unless safe or munchkin/Lunal Mode or brave */ if (!borg_lunal_mode && !borg_munchkin_mode && !brave && (char*)NULL != borg_prepared(borg_skill[BI_CDEPTH] + 1)) return (false); /* dont go down if hungry or low on food, unless fleeing a scary town */ if (!brave && borg_skill[BI_CDEPTH] && !scaryguy_on_level && (borg_skill[BI_ISWEAK] || borg_skill[BI_ISHUNGRY] || borg_skill[BI_FOOD] < 2)) return (false); /* If I need to sell crap, then don't go down */ if (borg_skill[BI_CDEPTH] && borg_skill[BI_CLEVEL] < 25 && borg_gold < 25000 && borg_count_sell() >= 13 && !borg_munchkin_mode) return (false); /* No diving if no light */ if (borg_skill[BI_CURLITE] == 0 && borg_munchkin_mode == false) return (false); /* don't head for the stairs if you are recalling, */ /* even if you are fleeing. */ if (goal_recalling) return (false); /* Clear the flow codes */ borg_flow_clear(); /* Enqueue useful grids */ for (i = 0; i < track_more.num; i++) { /* Not if a monster is parked on the stiar */ if (borg_grids[track_more.y[i]][track_more.x[i]].kill) continue; /* Enqueue the grid */ borg_flow_enqueue_grid(track_more.y[i], track_more.x[i]); } /* Spread the flow */ borg_flow_spread(250, true, false, false, -1, sneak); /* Attempt to Commit the flow */ if (!borg_flow_commit("down-stairs", why)) return (false); /* Take one step */ if (!borg_flow_old(why)) return (false); /* Success */ return (true); } /* * Hack -- Glyph creating */ static uint8_t glyph_x; static uint8_t glyph_y; static uint8_t glyph_y_center = 0; static uint8_t glyph_x_center = 0; /* * Prepare to flow towards a location and create a * special glyph of warding pattern. * * The borg will look for a room that is at least 7x7. * ########## * #3.......# * #2.xxxxx.# * #1.xxxxx.# * #0.xx@xx.# * #1.xxxxx.# * #2.xxxxx.# * #3.......# * # 3210123# * ########## * and when he locates one, he will attempt to: * 1. flow to a central location and * 2. begin planting Runes in a pattern. When complete, * 3. move to the center of it. */ /* * ghijk The borg will use the following ddx and ddy to search * d827a for a suitable grid in an open room. * e4@3b * f615c * lmnop 24 grids * */ bool borg_flow_glyph(int why) { int i; int cost; int x, y; int v = 0; int b_x = c_x; int b_y = c_y; int b_v = -1; int goal_glyph = 0; int glyph = 0; borg_grid* ag; if ((glyph_y_center == 0 && glyph_x_center == 0) || borg_distance(c_y, c_x, glyph_y_center, glyph_x_center) >= 50) { borg_needs_new_sea = true; } /* We have arrived */ if ((glyph_x == c_x) && (glyph_y == c_y)) { /* Cancel */ glyph_x = 0; glyph_y = 0; /* Store the center of the glyphs */ if (borg_needs_new_sea) { glyph_y_center = c_y; glyph_x_center = c_x; } borg_needs_new_sea = false; /* Take note */ borg_note(format("# Glyph Creating at (%d,%d)", c_x, c_y)); /* Create the Glyph */ if (borg_spell_fail(GLYPH_OF_WARDING, 30) || borg_read_scroll(sv_scroll_rune_of_protection)) { /* Check for an existing glyph */ for (i = 0; i < track_glyph.num; i++) { /* Stop if we already new about this glyph */ if ((track_glyph.x[i] == c_x) && (track_glyph.y[i] == c_y)) return (false); } /* Track the newly discovered glyph */ if (track_glyph.num < track_glyph.size) { borg_note("# Noting the creation of a glyph."); track_glyph.x[track_glyph.num] = c_x; track_glyph.y[track_glyph.num] = c_y; track_glyph.num++; } /* Success */ return (true); } /* Nope */ return (false); } /* Reverse flow */ borg_flow_reverse(250, true, false, false, -1, false); /* Scan the entire map */ for (y = 15; y < AUTO_MAX_Y - 15; y++) { for (x = 50; x < AUTO_MAX_X - 50; x++) { borg_grid* ag_ptr[24]; int floor = 0; int tmp_glyph = 0; /* Acquire the grid */ ag = &borg_grids[y][x]; /* Skip every non floor/glyph */ if (ag->feat != FEAT_FLOOR && ag->glyph) continue; /* Acquire the cost */ cost = borg_data_cost->data[y][x]; /* Skip grids that are really far away. He probably * won't be able to safely get there */ if (cost >= 75) continue; /* Extract adjacent locations to each considered grid */ for (i = 0; i < 24; i++) { /* Extract the location */ int xx = x + borg_ddx_ddd[i]; int yy = y + borg_ddy_ddd[i]; /* Get the grid contents */ ag_ptr[i] = &borg_grids[yy][xx]; } /* Center Grid */ if (borg_needs_new_sea) { goal_glyph = 24; /* Count Adjacent Flooors */ for (i = 0; i < 24; i++) { ag = ag_ptr[i]; if (ag->feat == FEAT_FLOOR || ag->glyph) floor++; } /* Not a good location if not the center of the sea */ if (floor != 24) { continue; } /* Count floors already glyphed */ for (i = 0; i < 24; i++) { ag = ag_ptr[i]; /* Glyphs */ if (ag->glyph) { tmp_glyph++; } } /* Tweak -- Reward certain floors, punish distance */ v = 100 + (tmp_glyph * 500) - (cost * 1); if (borg_grids[y][x].feat == FEAT_FLOOR) v += 3000; /* If this grid is surrounded by glyphs, select it */ if (tmp_glyph == goal_glyph) v += 5000; /* If this grid is already glyphed but not * surrounded by glyphs, then choose another. */ if (tmp_glyph != goal_glyph && borg_grids[y][x].glyph) v = -1; /* The grid is not searchable */ if (v <= 0) continue; /* Track "best" grid */ if ((b_v >= 0) && (v < b_v)) continue; /* Save the data */ b_v = v; b_x = x; b_y = y; } /* old center, making outlying glyphs, */ else { /* Count Adjacent Flooors */ for (i = 0; i < 24; i++) { /* Leave if this grid is not in good array */ if (glyph_x_center + borg_ddx_ddd[i] != x) continue; if (glyph_y_center + borg_ddy_ddd[i] != y) continue; /* Already got a glyph on it */ if (borg_grids[y][x].glyph) continue; /* Tweak -- Reward certain floors, punish distance */ v = 500 + (tmp_glyph * 500) - (cost * 1); /* The grid is not searchable */ if (v <= 0) continue; /* Track "best" grid */ if ((b_v >= 0) && (v < b_v)) continue; /* Save the data */ b_v = v; b_x = x; b_y = y; } } } } /* Extract adjacent locations to each considered grid */ if (glyph_y_center != 0 && glyph_x_center != 0) { for (i = 0; i < 24; i++) { /* Extract the location */ int xx = glyph_x_center + borg_ddx_ddd[i]; int yy = glyph_y_center + borg_ddy_ddd[i]; borg_grid* ag_ptr[24]; /* Get the grid contents */ ag_ptr[i] = &borg_grids[yy][xx]; ag = ag_ptr[i]; /* If it is not a glyph, skip it */ if (ag->glyph) glyph++; /* Save the data */ if (glyph == 24) { b_v = 5000; b_x = glyph_x_center; b_y = glyph_y_center; } } } /* Clear the flow codes */ borg_flow_clear(); /* Hack -- Nothing found */ if (b_v < 0) return (false); /* Access grid */ ag = &borg_grids[b_y][b_x]; /* Memorize */ glyph_x = b_x; glyph_y = b_y; /* Enqueue the grid */ borg_flow_enqueue_grid(b_y, b_x); /* Spread the flow */ borg_flow_spread(250, true, false, false, -1, false); /* Attempt to Commit the flow */ if (!borg_flow_commit("Glyph", GOAL_MISC)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_MISC)) return (false); /* Success */ return (true); } /* * Prepare to flow towards light */ bool borg_flow_light(int why) { int y, x, i; /* reset counters */ borg_glow_n = 0; i = 0; /* build the glow array */ /* Scan map */ for (y = w_y; y < w_y + SCREEN_HGT; y++) { for (x = w_x; x < w_x + SCREEN_WID; x++) { borg_grid* ag = &borg_grids[y][x]; /* Not a perma-lit, and not our spot. */ if (!(ag->info & BORG_GLOW)) continue; /* keep count */ borg_glow_y[borg_glow_n] = y; borg_glow_x[borg_glow_n] = x; borg_glow_n++; } } /* None to flow to */ if (!borg_glow_n) return (false); /* Clear the flow codes */ borg_flow_clear(); /* Enqueue useful grids */ for (i = 0; i < borg_glow_n; i++) { /* Enqueue the grid */ borg_flow_enqueue_grid(borg_glow_y[i], borg_glow_x[i]); } /* Spread the flow */ borg_flow_spread(250, true, false, false, -1, false); /* Attempt to Commit the flow */ if (!borg_flow_commit("a lighted area", why)) return (false); /* Take one step */ if (!borg_flow_old(why)) return (false); /* Success */ return (true); } /* * Prepare to flow towards a vault grid which can be excavated */ bool borg_flow_vault(int nearness) { int y, x, i; int b_y, b_x; bool can_dig_hard; borg_grid* ag; /* reset counters */ borg_temp_n = 0; i = 0; /* no need if no vault on level */ if (!vault_on_level) return (false); /* no need if we can't dig */ if (!borg_can_dig(false, false)) return (false); can_dig_hard = borg_can_dig(false, true); /* build the array -- Scan screen */ for (y = w_y; y < w_y + SCREEN_HGT; y++) { for (x = w_x; x < w_x + SCREEN_WID; x++) { /* only bother with near ones */ if (borg_distance(c_y, c_x, y, x) > nearness) continue; /* only deal with excavatable walls */ if (can_dig_hard) { if (borg_grids[y][x].feat != FEAT_FLOOR && borg_grids[y][x].feat != FEAT_LAVA && borg_grids[y][x].feat != FEAT_GRANITE && borg_grids[y][x].feat != FEAT_RUBBLE && borg_grids[y][x].feat != FEAT_QUARTZ && borg_grids[y][x].feat != FEAT_MAGMA && borg_grids[y][x].feat != FEAT_QUARTZ_K && borg_grids[y][x].feat != FEAT_MAGMA_K) continue; } else { if (borg_grids[y][x].feat != FEAT_FLOOR && borg_grids[y][x].feat != FEAT_LAVA && borg_grids[y][x].feat != FEAT_RUBBLE && borg_grids[y][x].feat != FEAT_QUARTZ_K && borg_grids[y][x].feat != FEAT_MAGMA_K) continue; } /* Examine grids adjacent to this grid to see if there is a perma wall adjacent */ for (i = 0; i < 8; i++) { b_x = x + ddx_ddd[i]; b_y = y + ddy_ddd[i]; /* Bounds check */ if (!square_in_bounds_fully(cave, loc(b_x, b_y))) continue; /* Access the grid */ ag = &borg_grids[b_y][b_x]; /* Not a perma, and not our spot. */ if (ag->feat != FEAT_PERM) continue; /* keep count */ borg_temp_y[borg_temp_n] = y; borg_temp_x[borg_temp_n] = x; borg_temp_n++; } } } /* None to flow to */ if (!borg_temp_n) return (false); /* Examine each ones */ for (i = 0; i < borg_temp_n; i++) { /* Enqueue the grid */ borg_flow_enqueue_grid(borg_temp_y[i], borg_temp_x[i]); } /* Spread the flow */ borg_flow_spread(250, true, false, false, -1, false); /* Attempt to Commit the flow */ if (!borg_flow_commit("vault excavation", GOAL_VAULT)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_VAULT)) return (false); /* Success */ return (true); } /* Excavate an existing vault using ranged spells. * Stand where you are, use stone to mud to excavate the vault. This will allow the mage * borgs to get a few more attack spells on the monster. Without this routine, he would * approach the vault and use Stone to Mud when he was adjacent to the wall, giving him * only 1 or 2 shots before the monster is next to the borg. * */ bool borg_excavate_vault(int range) { int y, x, i, ii; int b_y, b_x; borg_grid* ag; /* reset counters */ borg_temp_n = 0; i = 0; ii = 0; /* no need if no vault on level */ if (!vault_on_level) return (false); /* only if you can cast the spell */ if (!borg_spell_okay_fail(TURN_STONE_TO_MUD, 30) && !borg_spell_okay_fail(SHATTER_STONE, 30)) return (false); /* Danger/bad idea checks */ /* build the array -- Scan screen */ for (y = w_y; y < w_y + SCREEN_HGT; y++) { for (x = w_x; x < w_x + SCREEN_WID; x++) { /* only bother with near ones */ if (borg_distance(c_y, c_x, y, x) > range) continue; /* only deal with excavatable walls */ if (borg_grids[y][x].feat != FEAT_FLOOR && borg_grids[y][x].feat != FEAT_LAVA && borg_grids[y][x].feat != FEAT_GRANITE && borg_grids[y][x].feat != FEAT_RUBBLE && borg_grids[y][x].feat != FEAT_QUARTZ && borg_grids[y][x].feat != FEAT_MAGMA && borg_grids[y][x].feat != FEAT_QUARTZ_K && borg_grids[y][x].feat != FEAT_MAGMA_K) continue; continue; /* Examine grids adjacent to this grid to see if there is a perma wall adjacent */ for (i = 0; i < 8; i++) { b_x = x + ddx_ddd[i]; b_y = y + ddy_ddd[i]; /* Bounds check */ if (!square_in_bounds_fully(cave, loc(b_x, b_y))) continue; ag = &borg_grids[b_y][b_x]; /* Not a perma, and not our spot. */ if (ag->feat != FEAT_PERM) continue; /* Track the new grid */ for (ii = 0; ii < borg_temp_n; ii++) { if (borg_temp_y[ii] == y && borg_temp_x[ii] == x) break; } /* Track the newly discovered excavatable wall */ if ((ii == borg_temp_n) && (ii < AUTO_TEMP_MAX)) { borg_temp_x[ii] = x; borg_temp_y[ii] = y; borg_temp_n++; /* do not overflow */ if (borg_temp_n > AUTO_TEMP_MAX) borg_temp_n = AUTO_TEMP_MAX; } } } } /* None to excavate */ if (!borg_temp_n) return (false); /* Review the useful grids */ for (i = 0; i < borg_temp_n; i++) { /* skip non-projectable grids grid (I cant shoot them) */ if (!borg_los(c_y, c_x, borg_temp_y[i], borg_temp_x[i])) continue; /* Attempt to target the grid */ borg_target(borg_temp_y[i], borg_temp_x[i]); /* Attempt to excavate it with "stone to mud" */ if (borg_spell(TURN_STONE_TO_MUD) || borg_spell(SHATTER_STONE) || borg_activate_ring(sv_ring_digging) || borg_activate_item(act_stone_to_mud)) { borg_note("# Excavation of vault"); borg_keypress('5'); /* turn that wall into a floor grid. If the spell failed, it will still look * like a wall and the borg_update routine will redefine it as a wall */ borg_do_update_view = true; borg_do_update_lite = true; /* Not Lit */ borg_grids[borg_temp_y[i]][borg_temp_x[i]].info &= ~BORG_GLOW; /* Dark */ borg_grids[borg_temp_y[i]][borg_temp_x[i]].info |= BORG_GLOW; /* Feat Floor */ borg_grids[borg_temp_y[i]][borg_temp_x[i]].feat = FEAT_FLOOR; return (true); } /* Success */ return (true); } /* No grid to excavate */ return (false); } /* * Prepare to "flow" towards any non-visited shop */ bool borg_flow_shop_visit(void) { /* Borg is allowed to cheat the store inventory as of 320. No need to visit each one */ return (false); #if 0 int i, x, y; /* Must be in town */ if (borg_skill[BI_CDEPTH]) return (false); /* Clear the flow codes */ borg_flow_clear(); /* Visit the shops */ for (i = 0; i < MAX_STORES; i++) { /* If low Level skip certain buildings in town * in order to reduce time spent in town. */ if (borg_skill[BI_CLEVEL] <= 10) { /* Skip Magic Shop unless Mage */ if (i == 5 && (borg_class != CLASS_MAGE)) { borg_shops[i].when = borg_t; continue; } /* Skip Black Market */ if (i == 6) { borg_shops[i].when = borg_t; continue; } /* Skip Home */ if (i == 7) { borg_shops[i].when = borg_t; continue; } } /* Must not be visited */ if (borg_shops[i].when) continue; /* if poisoned or bleeding skip non temples */ if ((borg_skill[BI_ISCUT] || borg_skill[BI_ISPOISONED]) && (i != 3 && i != 7)) continue; /* if starving--skip non food places */ if (borg_skill[BI_FOOD] == 0 && (i != 0 && i != 7)) continue; /* if dark--skip non food places */ if (borg_skill[BI_CURLITE] == 0 && (i != 0) && borg_skill[BI_CLEVEL] >= 2) continue; /* if only torch-- go directly to Gen Store --Get a Lantern */ if (borg_skill[BI_CURLITE] == 1 && i != 0 && /* !borg_shops[0].when && */ borg_gold >= 75) continue; /* Obtain the location */ x = track_shop_x[i]; y = track_shop_y[i]; /* Hack -- Must be known and not under the player */ if (!x || !y || ((c_x == x) && (c_y == y))) continue; /* Enqueue the grid */ borg_flow_enqueue_grid(y, x); } /* Spread the flow */ borg_flow_spread(250, true, false, false, -1, false); /* Attempt to Commit the flow */ if (!borg_flow_commit("un-visited shops", GOAL_MISC)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_MISC)) return (false); /* Success */ return (true); #endif } /* * Prepare to "flow" towards a specific shop entry */ bool borg_flow_shop_entry(int i) { int x, y; const char* name = (f_info[stores[i].feat].name); /* Must be in town */ if (borg_skill[BI_CDEPTH]) return (false); /* Obtain the location */ x = track_shop_x[i]; y = track_shop_y[i]; /* Hack -- Must be known */ if (!x || !y) return (false); /* Hack -- re-enter a shop if needed */ if ((x == c_x) && (y == c_y)) { /* Note */ borg_note("# Re-entering a shop"); /* Enter the store */ borg_keypress('5'); /* Success */ return (true); } /* Clear the flow codes */ borg_flow_clear(); /* Enqueue the grid */ borg_flow_enqueue_grid(y, x); /* Spread the flow */ borg_flow_spread(250, true, false, false, -1, false); /* Attempt to Commit the flow */ if (!borg_flow_commit(name, GOAL_MISC)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_MISC)) return (false); /* Success */ return (true); } /* * The borg can take a shot from a distance * */ static bool borg_has_distance_attack(void) { /* line up Magic Missle shots (covers Mages) */ if (borg_attack_aux_spell_bolt(MAGIC_MISSILE, 0, 10, BORG_ATTACK_MISSILE, z_info->max_range)) return true; /* line up Nether Bolt shots (covers Necromancers) */ if (borg_attack_aux_spell_bolt(NETHER_BOLT, 0, 10, BORG_ATTACK_NETHER, z_info->max_range)) return true; /* or arrows (covers warrior/ranger/paladins/rogues) */ if (borg_attack_aux_launch() > 0) return true; /* not lining up Priests (OOD has area of effect, will line up more naturally) */ /* or Druids (Stinking cloud is area of effect again) */ /* Blackguards should be doing HTH */ return false; } /* * Take a couple of steps to line up a shot * */ bool borg_flow_kill_aim(bool viewable) { int o_y, o_x; int s_c_y = c_y; int s_c_x = c_x; int i; /* Efficiency -- Nothing to kill */ if (!borg_kills_cnt) return (false); /* Sometimes we loop on this if we back up to a point where */ /* the monster is out of site */ if (time_this_panel > 500) return (false); /* Not if Weak from hunger or no food */ if (borg_skill[BI_ISHUNGRY] || borg_skill[BI_ISWEAK] || borg_skill[BI_FOOD] == 0) return (false); /* If you can shoot from where you are, don't bother reaiming */ if (borg_has_distance_attack()) return (false); /* Consider each adjacent spot */ for (o_x = -2; o_x <= 2; o_x++) { for (o_y = -2; o_y <= 2; o_y++) { /* borg_attack would have already checked for a shot from where I currently am */ if (o_x == 0 && o_y == 0) continue; /* XXX Mess with where the program thinks the player is */ c_x = s_c_x + o_x; c_y = s_c_y + o_y; /* avoid screen edgeds */ if (c_x > AUTO_MAX_X - 2 || c_x < 2 || c_y > AUTO_MAX_Y - 2 || c_y < 2) continue; /* Make sure we do not end up next to a monster */ for (i = 0; i < borg_kills_nxt; i++) { if (borg_distance(c_y, c_x, borg_kills[i].y, borg_kills[i].x) == 1) break; } if (i != borg_kills_nxt) continue; /* Check for a distance attack from here */ if (borg_has_distance_attack()) { /* Clear the flow codes */ borg_flow_clear(); /* Enqueue the grid */ borg_flow_enqueue_grid(c_y, c_x); /* restore the saved player position */ c_x = s_c_x; c_y = s_c_y; /* Spread the flow */ borg_flow_spread(5, true, !viewable, false, -1, false); /* Attempt to Commit the flow */ if (!borg_flow_commit("targetable position", GOAL_KILL)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_KILL)) return (false); return (true); } } } /* restore the saved player position */ c_x = s_c_x; c_y = s_c_y; return false; } /* * Dig an anti-summon corridor. Type I * * ############## We want the borg to dig a tunnel which * #............# limits the LOS of summoned monsters. * ###............# It works better in hallways. * ##@#............# * #p##............# The borg will build an array of grids * ########## #######+###### near him. Then look at specific patterns * # # to find the good grids to excavate. * # ################ # * # # # * ### # # * * Look at wall array to see if it is acceptable * We want to find this in the array: * * ##### ..@.. ####. .#### * ##.## ##.## ##.#. .#.## * #.#.# #.#.# #.#.@ @.#.# * ##.## ##.## ##.#. .#.## * ..@.. ##### ####. .#### * * NORTH SOUTH WEST East * */ bool borg_flow_kill_corridor_1(bool viewable) { int o_y = 0; int o_x = 0; int m_x = 0; int m_y = 0; int b_y = 0, b_x = 0; int b_distance = 99; int i; bool b_n = false; bool b_s = false; bool b_e = false; bool b_w = false; int n_array[25] = { 1,0,0,0,1, 1,0,1,0,1, 0,1,0,1,0, 0,0,1,0,0, 1,1,1,1,1 }; int ny[25] = { -4,-4,-4,-4,-4,-3,-3,-3,-3,-3,-2,-2,-2,-2,-2,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0 }; int nx[25] = { -2,-1, 0, 1, 2,-2,-1, 0, 1, 2,-2,-1, 0, 1, 2,-2,-1, 0, 1, 2,-2,-1, 0, 1, 2 }; int s_array[25] = { 1,1,1,1,1, 0,0,1,0,0, 0,1,0,1,0, 1,0,1,0,1, 1,0,0,0,1 }; int sy[25] = { 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4 }; int sx[25] = { -2,-1, 0, 1, 2,-2,-1, 0, 1, 2,-2,-1, 0, 1, 2,-2,-1, 0, 1, 2,-2,-1, 0, 1, 2 }; int e_array[25] = { 1,0,0,1,1, 1,0,1,0,0, 1,1,0,1,0, 1,0,1,0,0, 1,0,0,1,1 }; int ey[25] = { -2,-2,-2,-2,-2,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2 }; int ex[25] = { 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4 }; int w_array[25] = { 1,1,0,0,1, 0,0,1,0,1, 0,1,0,1,1, 0,0,1,0,1, 1,1,0,0,1 }; int wy[25] = { -2,-2,-2,-2,-2,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2 }; int wx[25] = { -4,-3,-2,-1, 0,-4,-3,-2,-1, 0,-4,-3,-2,-1, 0,-4,-3,-2,-1, 0,-4,-3,-2,-1, 0 }; int wall_north = 0; int wall_south = 0; int wall_east = 0; int wall_west = 0; int q_x; int q_y; borg_kill* kill; borg_digging = false; /* Efficiency -- Nothing to kill */ if (!borg_kills_cnt) return (false); /* Only do this to summoners when they are close*/ if (borg_kills_summoner == -1) return (false); /* Hungry,starving */ if (borg_skill[BI_ISHUNGRY] || borg_skill[BI_ISWEAK]) return (false); /* Sometimes we loop on this */ if (time_this_panel > 500) return (false); /* Do not dig when confused */ if (borg_skill[BI_ISCONFUSED]) return (false); /* Not when darkened */ if (borg_skill[BI_CURLITE] == 0) return (false); /* Not if sitting in a sea of runes */ if (borg_morgoth_position) return (false); if (borg_as_position) return (false); /* get the summoning monster */ kill = &borg_kills[borg_kills_summoner]; /* Summoner must be mobile */ if (rf_has(r_info[kill->r_idx].flags, RF_NEVER_MOVE)) return(false); /* Summoner must be able to pass through walls */ if (rf_has(r_info[kill->r_idx].flags, RF_PASS_WALL)) return(false); if (rf_has(r_info[kill->r_idx].flags, RF_KILL_WALL)) return(false); /* Summoner has to be awake (so he will chase me */ if (!kill->awake) return (false); /* Must have Stone to Mud spell */ if (!borg_spell_okay(TURN_STONE_TO_MUD) && !borg_spell_okay(SHATTER_STONE) && !borg_equips_ring(sv_ring_digging) && !borg_equips_item(act_stone_to_mud, true)) return (false); /* Summoner needs to be able to follow me. * So I either need to be able to * 1) have LOS on him or * 2) this panel needs to have had Magic Map or Wizard light cast on it. * If Mapped, then the flow codes needs to be used. */ if (!borg_los(kill->y, kill->x, c_y, c_x)) { /* Extract panel */ q_x = w_x / borg_panel_wid(); q_y = w_y / borg_panel_hgt(); if (borg_detect_wall[q_y + 0][q_x + 0] == true && borg_detect_wall[q_y + 0][q_x + 1] == true && borg_detect_wall[q_y + 1][q_x + 0] == true && borg_detect_wall[q_y + 1][q_x + 1] == true) { borg_flow_clear(); borg_digging = true; borg_flow_enqueue_grid(kill->y, kill->x); borg_flow_spread(10, true, false, false, -1, false); if (!borg_flow_commit("Monster Path", GOAL_KILL)) return (false); } else { borg_flow_clear(); borg_digging = true; borg_flow_enqueue_grid(kill->y, kill->x); borg_flow_spread(10, true, true, false, -1, false); if (!borg_flow_commit("Monster Path", GOAL_KILL)) return (false); } } /* NORTH -- Consider each area near the borg, looking for a good spot to hide */ for (o_y = -2; o_y < 1; o_y++) { /* Resest Wall count */ wall_north = 0; /* No E-W offset when looking North-South */ o_x = 0; for (i = 0; i < 25; i++) { borg_grid* ag; /* Check grids near borg */ m_y = c_y + o_y + ny[i]; m_x = c_x + o_x + nx[i]; /* avoid screen edgeds */ if (!square_in_bounds_fully(cave, loc(m_x, m_y))) { continue; } /* grid the grid */ ag = &borg_grids[m_y][m_x]; /* Certain grids must not be floor types */ if (n_array[i] == 0 && ((ag->feat == FEAT_NONE) || (ag->feat >= FEAT_MAGMA && ag->feat <= FEAT_QUARTZ_K) || ag->feat == FEAT_GRANITE)) { /* This is a good grid */ wall_north++; } if (n_array[i] == 1 && ((ag->feat <= FEAT_MORE) || (ag->feat >= FEAT_MAGMA && ag->feat <= FEAT_QUARTZ_K) || ag->feat == FEAT_GRANITE)) { /* A good wall would score 25. */ wall_north++; } } /* If I found 25 grids, then that spot will work well */ if (wall_north == 25) { if (borg_distance(c_y, c_x, c_y + o_y + ny[7], c_x + o_x + nx[7]) < b_distance) { b_y = o_y; b_x = o_x; b_n = true; b_distance = borg_distance(c_y, c_x, c_y + o_y + ny[7], c_x + o_x + nx[7]); } } } /* SOUTH -- Consider each area near the borg, looking for a good spot to hide */ for (o_y = -1; o_y < 2; o_y++) { /* Resest Wall count */ wall_south = 0; for (i = 0; i < 25; i++) { borg_grid* ag; /* No lateral offset on South check */ o_x = 0; /* Check grids near borg */ m_y = c_y + o_y + sy[i]; m_x = c_x + o_x + sx[i]; /* avoid screen edgeds */ if (!square_in_bounds_fully(cave, loc(m_x, m_y))) continue; /* grid the grid */ ag = &borg_grids[m_y][m_x]; /* Certain grids must not be floor types */ if (s_array[i] == 0 && ((ag->feat == FEAT_NONE) || (ag->feat >= FEAT_MAGMA && ag->feat <= FEAT_QUARTZ_K) || ag->feat == FEAT_GRANITE)) { /* This is a good grid */ wall_south++; } if (s_array[i] == 1 && ((ag->feat <= FEAT_MORE) || (ag->feat >= FEAT_MAGMA && ag->feat <= FEAT_QUARTZ_K) || ag->feat == FEAT_GRANITE)) { /* A good wall would score 25. */ wall_south++; } } /* If I found 25 grids, then that spot will work well */ if (wall_south == 25) { if (borg_distance(c_y, c_x, c_y + o_y + sy[17], c_x + o_x + sx[17]) < b_distance) { b_y = o_y; b_x = o_x; b_s = true; b_n = false; b_distance = borg_distance(c_y, c_x, c_y + b_y + sy[17], c_x + b_x + sx[17]); } } } /* EAST -- Consider each area near the borg, looking for a good spot to hide */ for (o_x = -1; o_x < 2; o_x++) { /* Resest Wall count */ wall_east = 0; /* No N-S offset check when looking E-W */ o_y = 0; for (i = 0; i < 25; i++) { borg_grid* ag; /* Check grids near borg */ m_y = c_y + o_y + ey[i]; m_x = c_x + o_x + ex[i]; /* avoid screen edgeds */ if (!square_in_bounds_fully(cave, loc(m_x, m_y))) continue; /* grid the grid */ ag = &borg_grids[m_y][m_x]; /* Certain grids must not be floor types */ if (e_array[i] == 0 && ((ag->feat == FEAT_NONE) || (ag->feat >= FEAT_MAGMA && ag->feat <= FEAT_QUARTZ_K) || ag->feat == FEAT_GRANITE)) { /* This is a good grid */ wall_east++; } if (e_array[i] == 1 && ((ag->feat <= FEAT_MORE) || (ag->feat >= FEAT_MAGMA && ag->feat <= FEAT_QUARTZ_K) || ag->feat == FEAT_GRANITE)) { /* A good wall would score 25. */ wall_east++; } } /* If I found 25 grids, then that spot will work well */ if (wall_east == 25) { if (borg_distance(c_y, c_x, c_y + o_y + ey[13], c_x + o_x + ex[13]) < b_distance) { b_y = o_y; b_x = o_x; b_e = true; b_s = false; b_n = false; b_distance = borg_distance(c_y, c_x, c_y + b_y + ey[13], c_x + b_x + ex[13]); } } } /* WEST -- Consider each area near the borg, looking for a good spot to hide */ for (o_x = -2; o_x < 1; o_x++) { /* Resest Wall count */ wall_west = 0; /* No N-S offset check when looking E-W */ o_y = 0; for (i = 0; i < 25; i++) { borg_grid* ag; /* Check grids near borg */ m_y = c_y + o_y + wy[i]; m_x = c_x + o_x + wx[i]; /* avoid screen edgeds */ if (!square_in_bounds_fully(cave, loc(m_x, m_y))) continue; /* grid the grid */ ag = &borg_grids[m_y][m_x]; /* Certain grids must not be floor types */ if (w_array[i] == 0 && ((ag->feat == FEAT_NONE) || (ag->feat >= FEAT_MAGMA && ag->feat <= FEAT_QUARTZ_K) || ag->feat == FEAT_GRANITE)) { /* This is a good grid */ wall_west++; } if (w_array[i] == 1 && ((ag->feat <= FEAT_MORE) || (ag->feat >= FEAT_MAGMA && ag->feat <= FEAT_QUARTZ_K) || ag->feat == FEAT_GRANITE)) { /* A good wall would score 25. */ wall_west++; } } /* If I found 25 grids, then that spot will work well */ if (wall_west == 25) { if (borg_distance(c_y, c_x, c_y + o_y + wy[11], c_x + o_x + wx[11]) < b_distance) { b_y = o_y; b_x = o_x; b_w = true; b_e = false; b_s = false; b_n = false; b_distance = borg_distance(c_y, c_x, c_y + o_y + wy[11], c_x + o_x + wx[11]); } } } /* Attempt to enqueu the grids that should be floor grids and have the borg * move onto those grids */ if (b_n == true) { /* Clear the flow codes */ borg_flow_clear(); /* Enqueue the grid where I will hide */ borg_digging = true; borg_flow_enqueue_grid(c_y + b_y + ny[7], c_x + b_x + nx[7]); /* Spread the flow */ borg_flow_spread(5, true, false, true, -1, false); /* Attempt to Commit the flow */ if (!borg_flow_commit("anti-summon corridor north type 1", GOAL_DIGGING)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_DIGGING)) return (false); return (true); } if (b_s == true) { /* Clear the flow codes */ borg_flow_clear(); /* Enqueue the grid where I will hide */ borg_digging = true; borg_flow_enqueue_grid(c_y + b_y + sy[17], c_x + b_x + sx[17]); /* Spread the flow */ borg_flow_spread(6, true, false, true, -1, false); /* Attempt to Commit the flow */ if (!borg_flow_commit("anti-summon corridor south type 1", GOAL_DIGGING)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_DIGGING)) return (false); return (true); } if (b_e == true) { /* Clear the flow codes */ borg_flow_clear(); /* Enqueue the grid where I will hide */ borg_digging = true; borg_flow_enqueue_grid(c_y + b_y + ey[13], c_x + b_x + ex[13]); /* Spread the flow */ borg_digging = true; borg_flow_spread(5, true, false, true, -1, false); /* Attempt to Commit the flow */ if (!borg_flow_commit("anti-summon corridor east type 1", GOAL_DIGGING)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_DIGGING)) return (false); return (true); } if (b_w == true) { /* Clear the flow codes */ borg_flow_clear(); /* Enqueue the grid where I will hide */ borg_digging = true; borg_flow_enqueue_grid(c_y + b_y + wy[11], c_x + b_x + wx[11]); /* Spread the flow */ borg_flow_spread(5, true, false, true, -1, false); /* Attempt to Commit the flow */ if (!borg_flow_commit("anti-summon corridor west type 1", GOAL_DIGGING)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_DIGGING)) return (false); return (true); } return false; } /* * Dig an anti-summon corridor * * ############## We want the borg to not dig #1 * #............# but to dig #2, and hopefully shoot from the * #######............# last #2 and try to avoid standing on #3. * #222223............# This is great for offset ball attacks but * #2#####..s.........# not for melee. Warriors need to dig a wall * ######2###########+###### adjacent to the monsters so he can swing on them. * # 1 # * # ################ # * # # # * ### # # * */ bool borg_flow_kill_corridor_2(bool viewable) { int o_y, o_x; int m_x, m_y; int f_y, f_x; int floors = 0; int b_y = 0, b_x = 0; int perma_grids = 0; borg_kill* kill; /* Efficiency -- Nothing to kill */ if (!borg_kills_cnt) return (false); /* Only do this to summoners when they are close*/ if (borg_kills_summoner == -1) return (false); /* Do not dig when weak. It takes too long */ if (borg_skill[BI_STR] < 17) return (false); /* Hungry,starving */ if (borg_skill[BI_ISHUNGRY] || borg_skill[BI_ISWEAK]) return (false); /* Sometimes we loop on this */ if (time_this_panel > 500) return (false); /* Do not dig when confused */ if (borg_skill[BI_ISCONFUSED]) return (false); /* Not when darkened */ if (borg_skill[BI_CURLITE] == 0) return (false); /* Not if sitting in a sea of runes */ if (borg_morgoth_position) return (false); /* get the summoning monster */ kill = &borg_kills[borg_kills_summoner]; /* Consider each adjacent spot to monster*/ for (o_x = -1; o_x <= 1; o_x++) { for (o_y = -1; o_y <= 1; o_y++) { borg_grid* ag; /* Check grids near monster */ m_x = kill->x + o_x; m_y = kill->y + o_y; /* grid the grid */ ag = &borg_grids[m_y][m_x]; /* avoid screen edgeds */ if (m_x > AUTO_MAX_X - 2 || m_x < 2 || m_y > AUTO_MAX_Y - 2 || m_y < 2) continue; /* Can't tunnel a non wall or permawall*/ if (ag->feat != FEAT_NONE && ag->feat < FEAT_MAGMA) continue; if (ag->feat == FEAT_PERM) { perma_grids++; continue; } /* Do not dig unless we appear strong enough to succeed or we have a digger */ if (!borg_can_dig(false, false)) continue; /* reset floors counter */ floors = 0; /* That grid must not have too many floors adjacent */ for (f_x = -1; f_x <= 1; f_x++) { for (f_y = -1; f_y <= 1; f_y++) { /* grid the grid */ ag = &borg_grids[m_y + f_y][m_x + f_x]; /* check if this neighbor is a floor */ if (ag->feat == FEAT_FLOOR || ag->feat == FEAT_BROKEN) floors++; } } /* Do not dig if too many floors near. */ if (floors >= 5) continue; /* Track the good location */ b_y = m_y; b_x = m_x; } } /* NOTE: Perma_grids count the number of grids which contain permawalls. * The borg may try to flow to an unknown grid but may get stuck on a perma * wall. This will keep him from flowing to a summoner if the summoner is * near a perma grid. The real fix would to be in the flow_spread so that * he will not flow through perma_grids. I will work on that next. */ if (b_y != 0 && b_x != 0 && perma_grids == 0) { /* Clear the flow codes */ borg_flow_clear(); /* Enqueue the grid */ borg_flow_enqueue_grid(m_y, m_x); /* Spread the flow */ borg_flow_spread(15, true, false, true, -1, false); /* Attempt to Commit the flow */ if (!borg_flow_commit("anti-summon corridor", GOAL_KILL)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_KILL)) return (false); return (true); } return false; } /* * Attempt to flow to a safe grid in order to rest up properly. Following a battle, a borg needs to heal up. * He will attempt to heal up right where the fight was, but if he cannot, then he needs to retreat a bit. * This will help him find a good safe place to hide. * */ bool borg_flow_recover(bool viewable, int dist) { int i, x, y; /* Sometimes we loop on this */ if (time_this_panel > 500) return (false); /* No retreating and recovering when low level */ if (borg_skill[BI_CLEVEL] <= 5) return (false); /* Mana for spell casters */ if (player->class->magic.num_books > 3) { if (borg_skill[BI_CURHP] > borg_skill[BI_MAXHP] / 3 && borg_skill[BI_CURSP] > borg_skill[BI_MAXSP] / 4 && /* Non spell casters? */ !borg_skill[BI_ISCUT] && !borg_skill[BI_ISSTUN] && !borg_skill[BI_ISHEAVYSTUN] && !borg_skill[BI_ISAFRAID]) return (false); } else /* Non Spell Casters */ { /* do I need to recover some? */ if (borg_skill[BI_CURHP] > borg_skill[BI_MAXHP] / 3 && !borg_skill[BI_ISCUT] && !borg_skill[BI_ISSTUN] && !borg_skill[BI_ISHEAVYSTUN] && !borg_skill[BI_ISAFRAID]) return (false); } /* If Fleeing, then do not rest */ if (goal_fleeing) return (false); /* If Scumming, then do not rest */ if (borg_lunal_mode || borg_munchkin_mode) return (false); /* No need if hungry */ if (borg_skill[BI_ISHUNGRY]) return (false); /* Nothing found */ borg_temp_n = 0; /* Scan some known Grids * Favor the following types of grids: * 1. Happy grids */ /* look at grids within 20 grids of me */ for (y = c_y - 25; y < c_y + 25; y++) { for (x = c_x - 25; x < c_x + 25; x++) { /* Stay in bounds */ if (!square_in_bounds(cave, loc(x, y))) continue; /* Skip my own grid */ if (y == c_y && x == c_x) continue; /* Skip grids that are too close to me */ if (borg_distance(c_y, c_x, y, x) < 7) continue; /* Is this grid a happy grid? */ if (!borg_happy_grid_bold(y, x)) continue; /* Can't rest on a wall grid. */ /* HACK depends on FEAT order, kinda evil */ if (borg_grids[y][x].feat >= FEAT_SECRET && borg_grids[y][x].feat != FEAT_PASS_RUBBLE) continue; /* Can I rest on that one? */ if (!borg_check_rest(y, x)) continue; /* Careful -- Remember it */ borg_temp_x[borg_temp_n] = x; borg_temp_y[borg_temp_n] = y; borg_temp_n++; } } /* Nothing to kill */ if (!borg_temp_n) return (false); /* Clear the flow codes */ borg_flow_clear(); /* Look through the good grids */ for (i = 0; i < borg_temp_n; i++) { /* Enqueue the grid */ borg_flow_enqueue_grid(borg_temp_y[i], borg_temp_x[i]); } /* Spread the flow */ borg_flow_spread(dist, false, true, false, -1, false); /* Attempt to Commit the flow */ if (!borg_flow_commit("Recover Grid", GOAL_RECOVER)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_RECOVER)) return (false); return (true); } /* * Prepare to "flow" towards monsters to "kill" * But in a few phases, viewable, near and far. * Note that monsters under the player are always deleted */ bool borg_flow_kill(bool viewable, int nearness) { int i, x, y, p, j, b_j = -1; int b_stair = -1; bool borg_in_hall = false; int hall_y, hall_x, hall_walls = 0; bool skip_monster = false; borg_grid* ag; /* Efficiency -- Nothing to kill */ if (!borg_kills_cnt) return (false); /* Don't chase down town monsters when you are just starting out */ if (borg_skill[BI_CDEPTH] == 0 && borg_skill[BI_CLEVEL] < 20) return (false); /* YOU ARE NOT A WARRIOR!! DON'T ACT LIKE ONE!! */ if ((borg_class == CLASS_MAGE || borg_class == CLASS_NECROMANCER) && borg_skill[BI_CLEVEL] < (borg_skill[BI_CDEPTH] ? 35 : 25)) return (false); /* Not if Weak from hunger or no food */ if (borg_skill[BI_ISHUNGRY] || borg_skill[BI_ISWEAK] || borg_skill[BI_FOOD] == 0) return (false); /* Not if sitting in a sea of runes */ if (borg_morgoth_position) return (false); /* Nothing found */ borg_temp_n = 0; /* check to see if in a hall, used later */ for (hall_x = -1; hall_x <= 1; hall_x++) { for (hall_y = -1; hall_y <= 1; hall_y++) { /* Acquire location */ x = hall_x + c_x; y = hall_y + c_y; ag = &borg_grids[y][x]; /* track walls */ if ((ag->glyph) || ((ag->feat >= FEAT_MAGMA) && (ag->feat <= FEAT_PERM))) { hall_walls++; } /* addem up */ if (hall_walls >= 5) borg_in_hall = true; } } /* Check distance away from stairs, used later */ /* Check for an existing "up stairs" */ for (i = 0; i < track_less.num; i++) { x = track_less.x[i]; y = track_less.y[i]; /* How far is the nearest up stairs */ j = borg_distance(c_y, c_x, y, x); /* skip the closer ones */ if (b_j >= j) continue; /* track it */ b_j = j; b_stair = i; } /* Scan the monster list */ for (i = 1; i < borg_kills_nxt; i++) { borg_kill* kill = &borg_kills[i]; int x9 = kill->x; int y9 = kill->y; int ax, ay, d; /* Skip dead monsters */ if (!kill->r_idx) continue; /* Distance components */ ax = (x9 > c_x) ? (x9 - c_x) : (c_x - x9); ay = (y9 > c_y) ? (y9 - c_y) : (c_y - y9); /* Distance */ d = MAX(ax, ay); /* dont bother flowing to an adjacent monster when I am afraid */ if (d == 1 && (borg_skill[BI_ISAFRAID] || borg_skill[BI_CRSFEAR])) continue; /* Ignore multiplying monsters */ if (goal_ignoring && !borg_skill[BI_ISAFRAID] && (rf_has(r_info[kill->r_idx].flags, RF_MULTIPLY))) continue; /* Ignore molds when low level */ if (borg_skill[BI_MAXCLEVEL] < 10 && (rf_has(r_info[kill->r_idx].flags, RF_NEVER_MOVE))) continue; /* Avoid flowing to a fight if a scary guy is on the level */ if (scaryguy_on_level) continue; /* Avoid multiplying monsters when low level */ if (borg_skill[BI_CLEVEL] < 10 && (rf_has(r_info[kill->r_idx].flags, RF_MULTIPLY))) continue; /* Hack -- ignore Maggot until later. Player will chase Maggot * down all accross the screen waking up all the monsters. Then * he is stuck in a compromised situation. */ if ((rf_has(r_info[kill->r_idx].flags, RF_UNIQUE)) && borg_skill[BI_CDEPTH] == 0 && borg_skill[BI_CLEVEL] < 5) continue; /* Access the location */ x = kill->x; y = kill->y; /* Get the grid */ ag = &borg_grids[y][x]; /* Require line of sight if requested */ if (viewable && !(ag->info & BORG_VIEW)) continue; /* Calculate danger */ p = borg_danger(y, x, 1, true, false); /* Hack -- Skip "deadly" monsters unless uniques*/ if (borg_skill[BI_CLEVEL] > 25 && (!rf_has(r_info->flags, RF_UNIQUE)) && p > avoidance / 2) continue; if (borg_skill[BI_CLEVEL] <= 15 && p > avoidance / 3) continue; /* Skip ones that make me wander too far */ if (b_stair != -1 && borg_skill[BI_CLEVEL] < 10) { /* Check the distance of this monster to the stair */ j = borg_distance(track_less.y[b_stair], track_less.x[b_stair], y, x); /* skip far away monsters while I am close to stair */ if (b_j <= borg_skill[BI_CLEVEL] * 5 + 9 && j >= borg_skill[BI_CLEVEL] * 5 + 9) continue; } /* Hack -- Avoid getting surrounded */ if (borg_in_hall && (rf_has(r_info[kill->r_idx].flags, RF_GROUP_AI))) { /* check to see if monster is in a hall, */ for (hall_x = -1; hall_x <= 1; hall_x++) { for (hall_y = -1; hall_y <= 1; hall_y++) { if (!square_in_bounds_fully(cave, loc(hall_x + x, hall_y + y))) continue; ag = &borg_grids[hall_y + y][hall_x + x]; /* track walls */ if ((ag->glyph) || ((ag->feat >= FEAT_MAGMA) && (ag->feat <= FEAT_PERM))) { hall_walls++; } /* we want the monster to be in a hall also * * ######################## * ############ S ### * # @' SSS ### * # ########## SS### * # # # Ss### * # # ###### ###### * # # # # * Currently, we would like the borg to avoid * flowing to a situation like the one above. * We would like him to stay in the hall and * attack from a distance. One problem is the * lower case 's' in the corner, He will show * up as being in a corner, and the borg may * flow to it. Let's hope that is a rare case. * * The borg might flow to the 'dark' south exit * of the room. This would be dangerous for * him as well. */ /* add 'em up */ if (hall_walls < 4) { /* This monster is not in a hallway. * It may not be safe to fight. */ skip_monster = true; } } } } /* Skip this one if it is just 2 grids from me and it can attack me as soon as I * move 1 grid closer to it. Note that some monsters are faster than me and it * could still cover the 1 grid and hit me. I'll fix it (based on my speed) later XXX */ if (d == 2 && /* Spacing is important */ (!(kill->ranged_attack)) && /* Ranged Attacks, don't rest. */ (!(rf_has(r_info[kill->r_idx].flags, RF_NEVER_MOVE)))) /* Skip monsters that dont chase */ { skip_monster = true; } /* skip certain ones */ if (skip_monster) continue; /* Clear the flow codes */ borg_flow_clear(); /* Check the distance to stair for this proposed grid and leash*/ if (borg_flow_cost_stair(y, x, b_stair) > borg_skill[BI_CLEVEL] * 3 + 9 && borg_skill[BI_CLEVEL] < 20) continue; /* Careful -- Remember it */ borg_temp_x[borg_temp_n] = x; borg_temp_y[borg_temp_n] = y; borg_temp_n++; } /* Nothing to kill */ if (!borg_temp_n) return (false); /* Clear the flow codes */ borg_flow_clear(); /* Look for something to kill */ for (i = 0; i < borg_temp_n; i++) { /* Enqueue the grid */ borg_flow_enqueue_grid(borg_temp_y[i], borg_temp_x[i]); } /* Spread the flow */ /* if we are not flowing toward monsters that we can see, make sure they */ /* are at least easily reachable. The second flag is whether or not */ /* to avoid unknown squares. This was for performance when we have ESP. */ borg_flow_spread(nearness, true, !viewable, false, -1, false); /* Attempt to Commit the flow */ if (!borg_flow_commit("kill", GOAL_KILL)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_KILL)) return (false); /* Success */ return (true); } /* * Prepare to "flow" towards mineral veins with treasure * */ bool borg_flow_vein(bool viewable, int nearness) { int i, x, y; int b_stair = -1, j, b_j = -1; int cost = 0; int leash = borg_skill[BI_CLEVEL] * 3 + 9; borg_grid* ag; /* Efficiency -- Nothing to take */ if (!track_vein.num) return (false); /* Increase leash */ if (borg_skill[BI_CLEVEL] >= 20) leash = 250; /* Not needed if rich */ if (borg_gold >= 100000) return (false); /* Require digger, capacity, or skill */ if (!borg_can_dig(true, false)) return (false); /* Nothing yet */ borg_temp_n = 0; /* Set the searching flag for low level borgs */ borg_needs_searching = true; /* Check distance away from stairs, used later */ /* Check for an existing "up stairs" */ for (i = 0; i < track_less.num; i++) { x = track_less.x[i]; y = track_less.y[i]; /* How far is the nearest up stairs */ j = borg_distance(c_y, c_x, y, x); /* skip the closer ones */ if (b_j >= j) continue; /* track it */ b_j = j; b_stair = i; } /* Scan the vein list */ for (i = 0; i < track_vein.num; i++) { /* Access the location */ x = track_vein.x[i]; y = track_vein.y[i]; /* Get the grid */ ag = &borg_grids[y][x]; /* Require line of sight if requested */ if (viewable && !(ag->info & BORG_VIEW)) continue; /* Clear the flow codes */ borg_flow_clear(); /* obtain the number of steps from this take to the stairs */ cost = borg_flow_cost_stair(y, x, b_stair); /* Check the distance to stair for this proposed grid, unless i am looking for very close items (leash) */ if (nearness > 5 && cost > leash && borg_skill[BI_CLEVEL] < 20) continue; /* Careful -- Remember it */ borg_temp_x[borg_temp_n] = x; borg_temp_y[borg_temp_n] = y; borg_temp_n++; } /* Nothing to mine */ if (!borg_temp_n) return (false); /* Clear the flow codes */ borg_flow_clear(); /* Look for something to take */ for (i = 0; i < borg_temp_n; i++) { /* Enqueue the grid */ borg_flow_enqueue_grid(borg_temp_y[i], borg_temp_x[i]); } /* Spread the flow */ /* if we are not flowing toward items that we can see, make sure they */ /* are at least easily reachable. The second flag is weather or not */ /* to avoid unkown squares. This was for performance. */ borg_flow_spread(nearness, true, !viewable, false, -1, false); /* Attempt to Commit the flow */ if (!borg_flow_commit("vein", GOAL_TAKE)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_TAKE)) return (false); /* Success */ return (true); } /* * Prepare to "flow" towards objects to "take" * * Note that objects under the player are always deleted */ bool borg_flow_take(bool viewable, int nearness) { int i, x, y; int b_stair = -1, j, b_j = -1; int leash = borg_skill[BI_CLEVEL] * 3 + 9; int full_quiver; borg_grid* ag; /* Missile carry limit */ /* allow shooters to two quiver slots full */ if (player_has(player, PF_FAST_SHOT)) full_quiver = (z_info->quiver_slot_size - 1) * 2; else full_quiver = z_info->quiver_slot_size - 1; /* Efficiency -- Nothing to take */ if (!borg_takes_cnt) return (false); /* Require one empty slot */ if (borg_items[PACK_SLOTS - 1].iqty) return (false); /* If ScaryGuy, no chasing down items */ if (scaryguy_on_level) return (false); /* If out of fuel, don't mess around */ if (!borg_skill[BI_CURLITE]) return (false); /* Not if sitting in a sea of runes */ if (borg_morgoth_position) return (false); /* increase leash */ if (borg_skill[BI_CLEVEL] >= 20) leash = 250; /* Starting over on count */ borg_temp_n = 0; /* Set the searching flag for low level borgs */ borg_needs_searching = true; /* if the borg is running on Boosted Bravery, no * searching */ if (borg_no_retreat >= 1) borg_needs_searching = false; /* Check distance away from stairs, used later */ /* Check for an existing "up stairs" */ for (i = 0; i < track_less.num; i++) { x = track_less.x[i]; y = track_less.y[i]; /* How far is the nearest up stairs */ j = borg_distance(c_y, c_x, y, x); /* skip the closer ones */ if (b_j >= j) continue; /* track it */ b_j = j; b_stair = i; } /* Scan the object list */ for (i = 1; i < borg_takes_nxt; i++) { borg_take* take = &borg_takes[i]; /* Skip dead objects */ if (!take->kind) continue; /* Access the location */ x = take->x; y = take->y; /* Skip ones that make me wander too far */ if (b_stair != -1 && borg_skill[BI_CLEVEL] < 10) { /* Check the distance of this 'take' to the stair */ j = borg_distance(track_less.y[b_stair], track_less.x[b_stair], y, x); /* skip far away takes while I am close to stair*/ if (b_j <= leash && j >= leash) continue; } /* skip worthless items */ if (take->value <= 0) continue; /* Get the grid */ ag = &borg_grids[y][x]; /* Require line of sight if requested */ if (viewable && !(ag->info & BORG_VIEW)) continue; /* Don't bother with ammo if I am at capacity */ if (take->tval == borg_skill[BI_AMMO_TVAL] && borg_skill[BI_AMISSILES] >= full_quiver) continue; /* No need to chase certain things down after a certain amount. Dont chase: * Money * Other spell books * Wrong ammo */ if (borg_gold >= 500000) { if (take->tval == TV_GOLD) continue; if (!obj_kind_can_browse(&k_info[take->kind->kidx])) continue; if ((take->tval == TV_SHOT || take->tval == TV_ARROW || take->tval == TV_BOLT) && take->tval != borg_skill[BI_AMMO_TVAL] ) continue; /* Restore Mana for warriors? low level potions low level scrolls */ } /* Clear the flow codes */ borg_flow_clear(); /* Check the distance to stair for this proposed grid and leash*/ if (nearness > 5 && borg_flow_cost_stair(y, x, b_stair) > leash && borg_skill[BI_CLEVEL] < 20) continue; /* Careful -- Remember it */ borg_temp_x[borg_temp_n] = x; borg_temp_y[borg_temp_n] = y; borg_temp_n++; } /* Nothing to take */ if (!borg_temp_n) return (false); /* Clear the flow codes */ borg_flow_clear(); /* Look for something to take */ for (i = 0; i < borg_temp_n; i++) { /* Enqueue the grid */ borg_flow_enqueue_grid(borg_temp_y[i], borg_temp_x[i]); } /* Spread the flow */ /* if we are not flowing toward items that we can see, make sure they */ /* are at least easily reachable. The second flag is weather or not */ /* to avoid unkown squares. This was for performance. */ borg_flow_spread(nearness, true, !viewable, false, -1, false); /* Attempt to Commit the flow */ if (!borg_flow_commit("item", GOAL_TAKE)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_TAKE)) return (false); /* Success */ return (true); } /* * Prepare to "flow" towards special objects to "take" * * Note that objects under the player are always deleted */ bool borg_flow_take_scum(bool viewable, int nearness) { int i, x, y; int j; int b_j = -1; int b_stair = -1; borg_grid* ag; /* Efficiency -- Nothing to take */ if (!borg_takes_cnt) return (false); /* Require one empty slot */ if (borg_items[PACK_SLOTS - 1].iqty) return (false); /* Nothing yet */ borg_temp_n = 0; /* Set the searching flag for low level borgs */ borg_needs_searching = true; /* Check distance away from stairs, used later */ /* Check for an existing "up stairs" */ for (i = 0; i < track_less.num; i++) { x = track_less.x[i]; y = track_less.y[i]; /* How far is the nearest up stairs */ j = borg_distance(c_y, c_x, y, x); /* skip the closer ones */ if (b_j >= j) continue; /* track it */ b_j = j; b_stair = i; } /* Scan the object list -- set filter*/ for (i = 1; i < borg_takes_nxt; i++) { borg_take* take = &borg_takes[i]; /* Skip dead objects */ if (!take->kind) continue; /* Access the location */ x = take->x; y = take->y; /* Get the grid */ ag = &borg_grids[y][x]; /* skip worthless items */ if (take->value <= 0) continue; /* Require line of sight if requested */ if (viewable && !(ag->info & BORG_VIEW)) continue; /* Clear the flow codes */ borg_flow_clear(); /* Check the distance to stair for this proposed grid with leash */ if (borg_flow_cost_stair(y, x, b_stair) > borg_skill[BI_CLEVEL] * 3 + 9 && borg_skill[BI_CLEVEL] < 20) continue; /* Careful -- Remember it */ borg_temp_x[borg_temp_n] = x; borg_temp_y[borg_temp_n] = y; borg_temp_n++; } /* Nothing to take */ if (!borg_temp_n) return (false); /* Clear the flow codes */ borg_flow_clear(); /* Look for something to take */ for (i = 0; i < borg_temp_n; i++) { /* Enqueue the grid */ borg_flow_enqueue_grid(borg_temp_y[i], borg_temp_x[i]); } /* Spread the flow */ /* if we are not flowing toward items that we can see, make sure they */ /* are at least easily reachable. The second flag is weather or not */ /* to avoid unknown squares. This was for performance. */ borg_flow_spread(nearness, true, !viewable, false, -1, true); /* Attempt to Commit the flow */ if (!borg_flow_commit("Scum item", GOAL_TAKE)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_TAKE)) return (false); /* Success */ return (true); } /* * Prepare to "flow" towards special objects to "take" * * Note that objects under the player are always deleted */ bool borg_flow_take_lunal(bool viewable, int nearness) { int i, ii, x, y; int j; int b_j = -1; int b_stair = -1; borg_grid* ag; /* Efficiency -- Nothing to take */ if (!borg_takes_cnt) return (false); /* Check for an existing "up stairs" */ for (i = 0; i < track_less.num; i++) { x = track_less.x[i]; y = track_less.y[i]; /* How far is the nearest up stairs */ j = borg_distance(c_y, c_x, y, x); /* skip the closer ones */ if (b_j >= j) continue; /* track it */ b_j = j; b_stair = i; } /* Nothing yet */ borg_temp_n = 0; /* Set the searching flag for low level borgs */ borg_needs_searching = true; /* Scan the object list -- set filter*/ for (i = 1; i < borg_takes_nxt; i++) { borg_take* take = &borg_takes[i]; struct object_kind* k_ptr = take->kind; bool item_bad; /* Skip dead objects */ if (!k_ptr) continue; /* Access the location */ x = take->x; y = take->y; /* all items start bad */ item_bad = true; /* Gold is good to have */ if (take->tval == TV_GOLD) { borg_note(format("# Lunal Item %s, at %d,%d", take->kind->name, y, x)); item_bad = false; } /* If full can I absorb the item into an existing stack */ if (item_bad && take->value > 0) { if (borg_is_ammo(take->tval)) { /* Scan the quiver */ for (ii = QUIVER_START; ii < QUIVER_END; ii++) { /* skip empty slots */ if (!borg_items[ii].iqty) continue; /* skip fullslots */ if (borg_items[ii].iqty == z_info->quiver_slot_size) continue; /* Both objects should have the same ID value */ if (take->kind->kidx != borg_items[ii].kind) continue; if (k_ptr->sval == borg_items[ii].sval && k_ptr->tval == borg_items[ii].tval) { item_bad = false; } } } else if (borg_items[PACK_SLOTS - 1].iqty) { /* Scan the inventory */ for (ii = 0; ii < PACK_SLOTS; ii++) { /* skip empty slots */ if (!borg_items[ii].iqty) continue; /* Both objects should have the same ID value */ if (take->kind->kidx != borg_items[ii].kind) continue; /* Certain types of items can stack */ if (k_ptr->sval == borg_items[ii].sval && k_ptr->tval == borg_items[ii].tval && (borg_items[ii].tval == TV_POTION || borg_items[ii].tval == TV_SCROLL || borg_items[ii].tval == TV_ROD)) { item_bad = false; } } } } /* Require one empty slot */ if (!borg_items[PACK_SLOTS - 1].iqty && item_bad == true) { /* for ammo, make sure the quiver isn't full */ if (!borg_is_ammo(take->tval) || borg_items[QUIVER_END - 1].iqty == 0) { /* Certain Potions are worthless */ if (take->tval == TV_POTION && (take->kind->sval >= sv_potion_inc_str) && (take->kind->sval <= sv_potion_detect_invis)) { borg_note(format("# Lunal Item %s, at %d,%d", take->kind->name, y, x)); item_bad = false; } /* Certain insta_arts are good. Note that there is no top end of this. So if an item * were added after the last artifact, it would also be picked up. */ if (kf_has(take->kind->kind_flags, KF_INSTA_ART)) { borg_note(format("# Lunal Item %s, at %d,%d", take->kind->name, y, x)); item_bad = false; } /* if scumming the start of the game, take all items to sell them */ if (borg_cfg[BORG_MUNCHKIN_START]) { /* Certain known items are junky and should be ignored. Grab only * things of value */ if (take->value >= 1) item_bad = false; } } } /* Get the grid */ ag = &borg_grids[y][x]; /* Require line of sight if requested */ if (viewable && !(ag->info & BORG_VIEW)) continue; /* Clear the flow codes */ borg_flow_clear(); /* Check the distance to stair for this proposed grid */ if (borg_flow_cost_stair(y, x, b_stair) > borg_skill[BI_CLEVEL] * 3 + 9 && borg_skill[BI_CLEVEL] < 20) continue; /* Careful -- Remember it */ borg_temp_x[borg_temp_n] = x; borg_temp_y[borg_temp_n] = y; borg_temp_n++; } /* Nothing to take */ if (!borg_temp_n) return (false); /* Clear the flow codes */ borg_flow_clear(); /* Look for something to take */ for (i = 0; i < borg_temp_n; i++) { /* Enqueue the grid */ borg_flow_enqueue_grid(borg_temp_y[i], borg_temp_x[i]); } /* Spread the flow */ /* if we are not flowing toward items that we can see, make sure they */ /* are at least easily reachable. The second flag is weather or not */ /* to avoid unknown squares. This was for performance. */ borg_flow_spread(nearness, false, !viewable, false, -1, true); /* Attempt to Commit the flow */ if (!borg_flow_commit("munchkin item", GOAL_TAKE)) return (false); /* Check for monsters before walking over to the item */ if (borg_check_LIGHT()) return (true); /* Take one step */ if (!borg_flow_old(GOAL_TAKE)) return (false); /* Success */ return (true); } /* * Determine if a grid is "interesting" (and should be explored) * * A grid is "interesting" if it is a closed door, rubble, hidden treasure, * or a visible trap, or an "unknown" grid. * or a non-perma-wall adjacent to a perma-wall. (GCV) * * b_stair is the index to the closest upstairs. */ static bool borg_flow_dark_interesting(int y, int x, int b_stair) { int oy; int ox, i; borg_grid* ag; /* Have the borg so some Searching */ borg_needs_searching = true; /* Get the borg_grid */ ag = &borg_grids[y][x]; /* Explore unknown grids */ if (ag->feat == FEAT_NONE) return (true); /* Efficiency -- Ignore "boring" grids */ if (ag->feat < FEAT_SECRET) return (false); /* Explore "known treasure" */ if ((ag->feat == FEAT_MAGMA_K) || (ag->feat == FEAT_QUARTZ_K)) { /* Do not dig when confused */ if (borg_skill[BI_ISCONFUSED]) return (false); /* Do not bother if super rich */ if (borg_gold >= 100000) return (false); /* Not when darkened */ if (borg_skill[BI_CURLITE] == 0) return (false); /* don't try to dig if we can't */ if (!borg_can_dig(false, false)) return (false); /* Okay */ return (true); } /* "Vaults" Explore non perma-walls adjacent to a perma wall */ if (ag->feat == FEAT_GRANITE || ag->feat == FEAT_MAGMA || ag->feat == FEAT_QUARTZ) { /* Do not attempt when confused */ if (borg_skill[BI_ISCONFUSED]) return (false); /* hack and cheat. No vaults on this level */ if (!vault_on_level) return (false); /* AJG Do not attempt on the edge */ if (x < AUTO_MAX_X - 1 && y < AUTO_MAX_Y - 1 && x > 1 && y > 1) { /* scan the adjacent grids */ for (ox = -1; ox <= 1; ox++) { for (oy = -1; oy <= 1; oy++) { /* Acquire location */ ag = &borg_grids[oy + y][ox + x]; /* skip non perma grids wall */ if (ag->feat != FEAT_PERM) continue; /* make sure we can dig */ if (!borg_can_dig(false, false)) return (false); /* Glove up and dig in */ return (true); } } } /* not adjacent to a GCV, Restore Grid */ ag = &borg_grids[y][x]; } /* Explore "rubble" */ if (ag->feat == FEAT_RUBBLE && !borg_skill[BI_ISWEAK]) { return (true); } /* Explore "closed doors" */ if (ag->feat == FEAT_CLOSED) { /* some closed doors leave alone */ if (breeder_level) { /* Did I close this one */ for (i = 0; i < track_door.num; i++) { /* mark as icky if I closed this one */ if ((track_door.x[i] == x) && (track_door.y[i] == y)) { /* not interesting */ return (false); } } } /* this door should be ok to open */ return (true); } /* Explore "visible traps" */ if (feat_is_trap_holding(ag->feat)) { /* Do not disarm when blind */ if (borg_skill[BI_ISBLIND]) return (false); /* Do not disarm when confused */ if (borg_skill[BI_ISCONFUSED]) return (false); /* Do not disarm when hallucinating */ if (borg_skill[BI_ISIMAGE]) return (false); /* Do not flow without lite */ if (borg_skill[BI_CURLITE] == 0) return (false); /* Do not disarm trap doors on level 99 */ if (borg_skill[BI_CDEPTH] == 99 && ag->trap && !ag->glyph) return (false); /* Do not disarm when you could end up dead */ if (borg_skill[BI_CURHP] < 60) return (false); /* Do not disarm when clumsy */ if (borg_skill[BI_DISP] < 30 && borg_skill[BI_CLEVEL] < 20) return (false); if (borg_skill[BI_DISP] < 45 && borg_skill[BI_CLEVEL] < 10) return (false); if (borg_skill[BI_DISM] < 30 && borg_skill[BI_CLEVEL] < 20) return (false); if (borg_skill[BI_DISM] < 45 && borg_skill[BI_CLEVEL] < 10) return (false); /* Do not explore if a Scaryguy on the Level */ if (scaryguy_on_level) return (false); /* NOTE: the flow code allows a borg to flow through a trap and so he may * still try to disarm one on his way to the other interesting grid. If mods * are made to the above criteria for disarming traps, then mods must also be * made to borg_flow_spread() and borg_flow_direct() */ /* Okay */ return (true); } /* Ignore other grids */ return (false); } /* * Determine if a grid is "reachable" (and can be explored) */ static bool borg_flow_dark_reachable(int y, int x) { int j; borg_grid* ag; /* Scan neighbors */ for (j = 0; j < 8; j++) { int y2 = y + ddy_ddd[j]; int x2 = x + ddx_ddd[j]; /* Get the grid */ ag = &borg_grids[y2][x2]; /* Skip unknown grids (important) */ if (ag->feat == FEAT_NONE) continue; /* Accept known floor grids */ if (borg_cave_floor_grid(ag)) return (true); } /* Failure */ return (false); } /* Dig a straight Tunnel to a close monster */ bool borg_flow_kill_direct(bool viewable, bool twitchy) { int i; int b_i = -1; int d; int b_d = z_info->max_sight; borg_kill* kill; /* Do not dig when weak. It takes too long */ if (!borg_can_dig(false, false)) return (false); /* Not if Weak from hunger or no food */ if (!twitchy && (borg_skill[BI_ISHUNGRY] || borg_skill[BI_ISWEAK] || borg_skill[BI_FOOD] == 0)) return (false); /* Only when sitting for too long or twitchy */ if (!twitchy && borg_t - borg_began < 3000 && borg_times_twitch < 5) return (false); /* Do not dig when confused */ if (borg_skill[BI_ISCONFUSED]) return (false); /* Not when darkened */ if (borg_skill[BI_CURLITE] == 0) return (false); /* Efficiency -- Nothing to kill */ if (borg_kills_cnt) { /* Scan the monsters */ for (i = 1; i < borg_kills_nxt; i++) { kill = &borg_kills[i]; /* Skip "dead" monsters */ if (!kill->r_idx) continue; /* Distance away */ d = borg_distance(kill->y, kill->x, c_y, c_x); /* Track closest one */ if (d > b_d) continue; /* Track it */ b_i = i; b_d = d; } } /* If no Kill, then pick the center of the map */ if (b_i == -1) { /* Clear the flow codes */ borg_flow_clear(); /* Enqueue the grid */ borg_flow_enqueue_grid(AUTO_MAX_Y / 2, AUTO_MAX_X / 2); /* Spread the flow */ borg_flow_spread(150, true, false, true, -1, false); /* Attempt to Commit the flow */ if (!borg_flow_commit("center direct", GOAL_KILL)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_KILL)) return (false); return (true); } if (b_i) /* don't want it near permawall */ { /* get the closest monster */ kill = &borg_kills[b_i]; /* Clear the flow codes */ borg_flow_clear(); /* Enqueue the grid */ borg_flow_enqueue_grid(kill->y, kill->x); /* Spread the flow */ borg_flow_spread(15, true, false, true, -1, false); /* Attempt to Commit the flow */ if (!borg_flow_commit("kill direct", GOAL_KILL)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_KILL)) return (false); return (true); } return false; } /* * Place a "direct path" into the flow array, checking danger * * Modify the "cost" array in such a way that from any point on * one "direct" path from the player to the given grid, as long * as the rest of the path is "safe" and "clear", the Borg will * walk along the path to the given grid. * * This function is used by "borg_flow_dark_1()" to provide an * optimized "flow" during the initial exploration of a level. * It is also used by "borg_flow_dark_2()" in a similar fashion. */ static void borg_flow_direct(int y, int x) { int n = 0; int x1, y1, x2, y2; int ay, ax; int shift; int p, fear = 0; borg_grid* ag; /* Avoid icky grids */ if (borg_data_icky->data[y][x]) return; /* Unknown */ if (!borg_data_know->data[y][x]) { /* Mark as known */ borg_data_know->data[y][x] = true; /* Get the danger */ p = borg_danger(y, x, 1, true, false); /* Increase bravery */ if (borg_skill[BI_MAXCLEVEL] == 50) fear = avoidance * 5 / 10; if (borg_skill[BI_MAXCLEVEL] != 50) fear = avoidance * 3 / 10; if (scaryguy_on_level) fear = avoidance * 2; if (unique_on_level && vault_on_level && borg_skill[BI_MAXCLEVEL] == 50) fear = avoidance * 3; if (scaryguy_on_level && borg_skill[BI_CLEVEL] <= 5) fear = avoidance * 3; if (goal_ignoring) fear = avoidance * 5; if (borg_t - borg_began > 5000) fear = avoidance * 25; if (borg_skill[BI_FOOD] == 0) fear = avoidance * 100; /* Normal in town */ if (borg_skill[BI_CLEVEL] == 0) fear = avoidance * 1 / 10; /* Mark dangerous grids as icky */ if (p > fear) { /* Icky */ borg_data_icky->data[y][x] = true; /* Avoid */ return; } } /* Save the flow cost (zero) */ borg_data_cost->data[y][x] = 0; /* Save "origin" */ y1 = y; x1 = x; /* Save "destination" */ y2 = c_y; x2 = c_x; /* Calculate distance components */ ay = (y2 < y1) ? (y1 - y2) : (y2 - y1); ax = (x2 < x1) ? (x1 - x2) : (x2 - x1); /* Path */ while (1) { /* Check for arrival at player */ if ((x == x2) && (y == y2)) return; /* Next */ n++; /* Move mostly vertically */ if (ay > ax) { /* Extract a shift factor XXX */ shift = (n * ax + (ay - 1) / 2) / ay; /* Sometimes move along the minor axis */ x = (x2 < x1) ? (x1 - shift) : (x1 + shift); /* Always move along major axis */ y = (y2 < y1) ? (y1 - n) : (y1 + n); } /* Move mostly horizontally */ else { /* Extract a shift factor XXX */ shift = (n * ay + (ax - 1) / 2) / ax; /* Sometimes move along the minor axis */ y = (y2 < y1) ? (y1 - shift) : (y1 + shift); /* Always move along major axis */ x = (x2 < x1) ? (x1 - n) : (x1 + n); } /* Access the grid */ ag = &borg_grids[y][x]; /* Ignore "wall" grids */ if (!borg_cave_floor_grid(ag)) return; /* Avoid Traps if low level-- unless brave or scaryguy. */ if (ag->trap && avoidance <= borg_skill[BI_CURHP] && !scaryguy_on_level) { /* Do not disarm when you could end up dead */ if (borg_skill[BI_CURHP] < 60) return; /* Do not disarm when clumsy */ if (borg_skill[BI_DISP] < 30 && borg_skill[BI_CLEVEL] < 20) return; if (borg_skill[BI_DISP] < 45 && borg_skill[BI_CLEVEL] < 10) return; if (borg_skill[BI_DISM] < 30 && borg_skill[BI_CLEVEL] < 20) return; if (borg_skill[BI_DISM] < 45 && borg_skill[BI_CLEVEL] < 10) return; } /* Abort at "icky" grids */ if (borg_data_icky->data[y][x]) return; /* Analyze every grid once */ if (!borg_data_know->data[y][x]) { /* Mark as known */ borg_data_know->data[y][x] = true; /* Get the danger */ p = borg_danger(y, x, 1, true, false); /* Increase bravery */ if (borg_skill[BI_MAXCLEVEL] == 50) fear = avoidance * 5 / 10; if (borg_skill[BI_MAXCLEVEL] != 50) fear = avoidance * 3 / 10; if (scaryguy_on_level) fear = avoidance * 2; if (unique_on_level && vault_on_level && borg_skill[BI_MAXCLEVEL] == 50) fear = avoidance * 3; if (scaryguy_on_level && borg_skill[BI_CLEVEL] <= 5) fear = avoidance * 3; if (goal_ignoring) fear = avoidance * 5; if (borg_t - borg_began > 5000) fear = avoidance * 25; if (borg_skill[BI_FOOD] == 0) fear = avoidance * 100; /* Normal in town */ if (borg_skill[BI_CLEVEL] == 0) fear = avoidance * 1 / 10; /* Avoid dangerous grids (forever) */ if (p > fear) { /* Mark as icky */ borg_data_icky->data[y][x] = true; /* Abort */ return; } } /* Abort "pointless" paths if possible */ if (borg_data_cost->data[y][x] <= n) break; /* Save the new flow cost */ borg_data_cost->data[y][x] = n; } } /* Currently not used, I thought I might need it for anti-summoning */ extern void borg_flow_direct_dig(int y, int x) { int n = 0; int x1, y1, x2, y2; int ay, ax; int shift; int p, fear = 0; #if 0 /* Avoid icky grids */ if (borg_data_icky->data[y][x]) return; /* Unknown */ if (!borg_data_know->data[y][x]) { /* Mark as known */ borg_data_know->data[y][x] = true; /* Mark dangerous grids as icky */ if (borg_danger(y, x, 1, true, false) > avoidance / 3) { /* Icky */ borg_data_icky->data[y][x] = true; /* Avoid */ return; } } #endif /* Save the flow cost (zero) */ borg_data_cost->data[y][x] = 0; /* Save "origin" */ y1 = y; x1 = x; /* Save "destination" */ y2 = c_y; x2 = c_x; /* Calculate distance components */ ay = (y2 < y1) ? (y1 - y2) : (y2 - y1); ax = (x2 < x1) ? (x1 - x2) : (x2 - x1); /* Path */ while (1) { /* Check for arrival at player */ if ((x == x2) && (y == y2)) return; /* Next */ n++; /* Move mostly vertically */ if (ay > ax) { /* Extract a shift factor XXX */ shift = (n * ax + (ay - 1) / 2) / ay; /* Sometimes move along the minor axis */ x = (x2 < x1) ? (x1 - shift) : (x1 + shift); /* Always move along major axis */ y = (y2 < y1) ? (y1 - n) : (y1 + n); } /* Move mostly horizontally */ else { /* Extract a shift factor XXX */ shift = (n * ay + (ax - 1) / 2) / ax; /* Sometimes move along the minor axis */ y = (y2 < y1) ? (y1 - shift) : (y1 + shift); /* Always move along major axis */ x = (x2 < x1) ? (x1 - n) : (x1 + n); } /* Abort at "icky" grids */ if (borg_data_icky->data[y][x]) return; /* Analyze every grid once */ if (!borg_data_know->data[y][x]) { /* Mark as known */ borg_data_know->data[y][x] = true; /* Get the danger */ p = borg_danger(y, x, 1, true, false); /* Increase bravery */ if (borg_skill[BI_MAXCLEVEL] == 50) fear = avoidance * 5 / 10; if (borg_skill[BI_MAXCLEVEL] != 50) fear = avoidance * 3 / 10; if (scaryguy_on_level) fear = avoidance * 2; if (unique_on_level && vault_on_level && borg_skill[BI_MAXCLEVEL] == 50) fear = avoidance * 3; if (scaryguy_on_level && borg_skill[BI_CLEVEL] <= 5) fear = avoidance * 3; if (goal_ignoring) fear = avoidance * 5; if (borg_t - borg_began > 5000) fear = avoidance * 25; if (borg_skill[BI_FOOD] == 0) fear = avoidance * 100; /* Normal in town */ if (borg_skill[BI_CLEVEL] == 0) fear = avoidance * 1 / 10; /* Avoid dangerous grids (forever) */ if (p > fear) { /* Mark as icky */ borg_data_icky->data[y][x] = true; /* Abort */ return; } } /* Abort "pointless" paths if possible */ if (borg_data_cost->data[y][x] <= n) break; /* Save the new flow cost */ borg_data_cost->data[y][x] = n; } } /* * Hack -- mark off the edges of a rectangle as "avoid" or "clear" */ static void borg_flow_border(int y1, int x1, int y2, int x2, bool stop) { int x, y; /* Scan west/east edges */ for (y = y1; y <= y2; y++) { /* Avoid/Clear west edge */ borg_data_know->data[y][x1] = stop; borg_data_icky->data[y][x1] = stop; /* Avoid/Clear east edge */ borg_data_know->data[y][x2] = stop; borg_data_icky->data[y][x2] = stop; } /* Scan north/south edges */ for (x = x1; x <= x2; x++) { /* Avoid/Clear north edge */ borg_data_know->data[y1][x] = stop; borg_data_icky->data[y1][x] = stop; /* Avoid/Clear south edge */ borg_data_know->data[y2][x] = stop; borg_data_icky->data[y2][x] = stop; } } /* * Prepare to "flow" towards "interesting" grids (method 1) * * This function examines the torch-lit grids for "interesting" grids. */ static bool borg_flow_dark_1(int b_stair) { int i; int cost; int x, y; /* Hack -- not in town */ if (!borg_skill[BI_CDEPTH]) return (false); /* Reset */ borg_temp_n = 0; /* Scan torch-lit grids */ for (i = 0; i < borg_LIGHT_n; i++) { y = borg_LIGHT_y[i]; x = borg_LIGHT_x[i]; /* Skip "boring" grids (assume reachable) */ if (!borg_flow_dark_interesting(y, x, b_stair)) continue; /* Clear the flow codes */ borg_flow_clear(); /* obtain the number of steps from this take to the stairs */ cost = borg_flow_cost_stair(y, x, b_stair); /* Check the distance to stair for this proposed grid if dangerous */ if (borg_skill[BI_CDEPTH] >= borg_skill[BI_CLEVEL] - 5 && cost > borg_skill[BI_CLEVEL] * 3 + 9 && borg_skill[BI_CLEVEL] < 20) continue; /* Careful -- Remember it */ borg_temp_x[borg_temp_n] = x; borg_temp_y[borg_temp_n] = y; borg_temp_n++; } /* Nothing */ if (!borg_temp_n) return (false); /* Wipe icky codes from grids if needed */ if (goal_ignoring || scaryguy_on_level) borg_danger_wipe = true; /* Clear the flow codes */ borg_flow_clear(); /* Create paths to useful grids */ for (i = 0; i < borg_temp_n; i++) { y = borg_temp_y[i]; x = borg_temp_x[i]; /* Create a path */ borg_flow_direct(y, x); } /* Attempt to Commit the flow */ if (!borg_flow_commit(NULL, GOAL_DARK)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_DARK)) return (false); /* Forget goal */ /* goal = 0; */ /* Success */ return (true); } /* * Prepare to "flow" towards "interesting" grids (method 2) * * This function is only used when the player is at least 4 grids away * from the outer dungeon wall, to prevent any nasty memory errors. * * This function examines the grids just outside the torch-lit grids * for "unknown" grids, and flows directly towards them (one step). */ static bool borg_flow_dark_2(int b_stair) { int i, r; int cost; int x, y; borg_grid* ag; /* Hack -- not in town */ if (!borg_skill[BI_CDEPTH]) return (false); /* Set the searching flag for low level borgs */ borg_needs_searching = true; /* Maximal radius */ r = borg_skill[BI_CURLITE] + 1; /* Reset */ borg_temp_n = 0; /* Four directions */ for (i = 0; i < 4; i++) { y = c_y + ddy_ddd[i] * r; x = c_x + ddx_ddd[i] * r; /* Check legality */ if (y < 1) continue; if (x < 1) continue; if (y > AUTO_MAX_Y - 2) continue; if (x > AUTO_MAX_X - 2) continue; /* Acquire grid */ ag = &borg_grids[y][x]; /* Require unknown */ if (ag->feat != FEAT_NONE) continue; /* Require viewable */ if (!(ag->info & BORG_VIEW)) continue; /* if it makes me wander, skip it */ /* Clear the flow codes */ borg_flow_clear(); /* obtain the number of steps from this take to the stairs */ cost = borg_flow_cost_stair(y, x, b_stair); /* Check the distance to stair for this proposed grid */ if (borg_skill[BI_CDEPTH] >= borg_skill[BI_CLEVEL] - 5 && cost > borg_skill[BI_CLEVEL] * 3 + 9 && borg_skill[BI_CLEVEL] < 20) continue; /* Careful -- Remember it */ borg_temp_x[borg_temp_n] = x; borg_temp_y[borg_temp_n] = y; borg_temp_n++; } /* Nothing */ if (!borg_temp_n) return (false); /* Wipe icky codes from grids if needed */ if (goal_ignoring || scaryguy_on_level) borg_danger_wipe = true; /* Clear the flow codes */ borg_flow_clear(); /* Create paths to useful grids */ for (i = 0; i < borg_temp_n; i++) { y = borg_temp_y[i]; x = borg_temp_x[i]; /* Create a path */ borg_flow_direct(y, x); } /* Attempt to Commit the flow */ if (!borg_flow_commit(NULL, GOAL_DARK)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_DARK)) return (false); /* Forget goal */ /* goal = 0; */ /* Success */ return (true); } /* * Prepare to "flow" towards "interesting" grids (method 3) * * Note the use of a limit on the "depth" of the flow, and of the flag * which avoids "unknown" grids when calculating the flow, both of which * help optimize this function to only handle "easily reachable" grids. * * The "borg_temp" array is much larger than any "local region". */ static bool borg_flow_dark_3(int b_stair) { int i; int cost; int x, y; int x1, y1, x2, y2; /* Hack -- not in town */ if (!borg_skill[BI_CDEPTH]) return (false); /* Local region */ y1 = c_y - 4; x1 = c_x - 4; y2 = c_y + 4; x2 = c_x + 4; /* Restrict to "legal" grids */ if (y1 < 1) y1 = 1; if (x1 < 1) x1 = 1; if (y2 > AUTO_MAX_Y - 2) y2 = AUTO_MAX_Y - 2; if (x2 > AUTO_MAX_X - 2) x2 = AUTO_MAX_X - 2; /* Reset */ borg_temp_n = 0; /* Examine the region */ for (y = y1; y <= y2; y++) { /* Examine the region */ for (x = x1; x <= x2; x++) { /* Skip "boring" grids */ if (!borg_flow_dark_interesting(y, x, b_stair)) continue; /* Skip "unreachable" grids */ if (!borg_flow_dark_reachable(y, x)) continue; /* Clear the flow codes */ borg_flow_clear(); /* obtain the number of steps from this take to the stairs */ cost = borg_flow_cost_stair(y, x, b_stair); /* Check the distance to stair for this proposed grid */ if (borg_skill[BI_CDEPTH] >= borg_skill[BI_CLEVEL] - 5 && cost > borg_skill[BI_CLEVEL] * 3 + 9 && borg_skill[BI_CLEVEL] < 20) continue; /* Careful -- Remember it */ borg_temp_x[borg_temp_n] = x; borg_temp_y[borg_temp_n] = y; borg_temp_n++; } } /* Nothing interesting */ if (!borg_temp_n) return (false); /* Wipe icky codes from grids if needed */ if (goal_ignoring || scaryguy_on_level) borg_danger_wipe = true; /* Clear the flow codes */ borg_flow_clear(); /* Enqueue useful grids */ for (i = 0; i < borg_temp_n; i++) { y = borg_temp_y[i]; x = borg_temp_x[i]; /* Enqueue the grid */ borg_flow_enqueue_grid(y, x); } /* Spread the flow (limit depth) */ borg_flow_spread(5, false, true, false, -1, false); /* Attempt to Commit the flow */ if (!borg_flow_commit(NULL, GOAL_DARK)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_DARK)) return (false); /* Success */ return (true); } /* * Prepare to "flow" towards "interesting" grids (method 4) * * Note that we avoid grids close to the edge of the panel, since they * induce panel scrolling, which is "expensive" in terms of CPU usage, * and because this allows us to "expand" the border by several grids * to lay down the "avoidance" border in known legal grids. * * We avoid paths that would take us into different panels by setting * the "icky" flag for the "border" grids to prevent path construction, * and then clearing them when done, to prevent confusion elsewhere. * * The "borg_temp" array is large enough to hold one panel full of grids. */ static bool borg_flow_dark_4(int b_stair) { int i, x, y; int cost; int x1, y1, x2, y2; int leash = 250; /* Hack -- not in town */ if (!borg_skill[BI_CDEPTH]) return (false); /* Hack -- Not if a vault is on the level */ if (vault_on_level) return (false); /* Local region */ y1 = c_y - 11; x1 = c_x - 11; y2 = c_y + 11; x2 = c_x + 11; /* Restrict to "legal" grids */ if (y1 < 1) y1 = 1; if (x1 < 1) x1 = 1; if (y2 > AUTO_MAX_Y - 2) y2 = AUTO_MAX_Y - 2; if (x2 > AUTO_MAX_X - 2) x2 = AUTO_MAX_X - 2; /* Nothing yet */ borg_temp_n = 0; /* check the leash length */ if (borg_skill[BI_CDEPTH] >= borg_skill[BI_CLEVEL] - 5) leash = borg_skill[BI_CLEVEL] * 3 + 9; /* Examine the panel */ for (y = y1; y <= y2; y++) { /* Examine the panel */ for (x = x1; x <= x2; x++) { /* Skip "boring" grids */ if (!borg_flow_dark_interesting(y, x, b_stair)) continue; /* Skip "unreachable" grids */ if (!borg_flow_dark_reachable(y, x)) continue; /* Clear the flow codes */ borg_flow_clear(); /* obtain the number of steps from this take to the stairs */ cost = borg_flow_cost_stair(y, x, b_stair); /* Check the distance to stair for this proposed grid */ if (cost > borg_skill[BI_CLEVEL] * 3 + 9 && borg_skill[BI_CLEVEL] < 20) continue; /* Careful -- Remember it */ borg_temp_x[borg_temp_n] = x; borg_temp_y[borg_temp_n] = y; borg_temp_n++; } } /* Nothing useful */ if (!borg_temp_n) return (false); /* Wipe icky codes from grids if needed */ if (goal_ignoring || scaryguy_on_level) borg_danger_wipe = true; /* Clear the flow codes */ borg_flow_clear(); /* Enqueue useful grids */ for (i = 0; i < borg_temp_n; i++) { y = borg_temp_y[i]; x = borg_temp_x[i]; /* Enqueue the grid */ borg_flow_enqueue_grid(y, x); } /* Expand borders */ y1--; x1--; y2++; x2++; /* Avoid the edges */ borg_flow_border(y1, x1, y2, x2, true); /* Spread the flow (limit depth Leash) */ if (borg_skill[BI_CLEVEL] < 15) { /* Short Leash */ borg_flow_spread(leash, true, true, false, -1, false); } else { /* Long Leash */ borg_flow_spread(250, true, true, false, -1, false); } /* Clear the edges */ borg_flow_border(y1, x1, y2, x2, false); /* Attempt to Commit the flow */ if (!borg_flow_commit("dark-4", GOAL_DARK)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_DARK)) return (false); /* Success */ return (true); } /* * Prepare to "flow" towards "interesting" grids (method 5) */ static bool borg_flow_dark_5(int b_stair) { int i, x, y; int cost; int leash = 250; /* Hack -- not in town */ if (!borg_skill[BI_CDEPTH]) return (false); /* Nothing yet */ borg_temp_n = 0; /* check the leash length */ if (borg_skill[BI_CDEPTH] >= borg_skill[BI_CLEVEL] - 5) leash = borg_skill[BI_CLEVEL] * 3 + 9; /* Examine every "legal" grid */ for (y = 1; y < AUTO_MAX_Y - 1; y++) { for (x = 1; x < AUTO_MAX_X - 1; x++) { /* Skip "boring" grids */ if (!borg_flow_dark_interesting(y, x, b_stair)) continue; /* Skip "unreachable" grids */ if (!borg_flow_dark_reachable(y, x)) continue; /* Clear the flow codes */ borg_flow_clear(); /* obtain the number of steps from this take to the stairs */ cost = borg_flow_cost_stair(y, x, b_stair); /* Check the distance to stair for this proposed grid */ if (cost > borg_skill[BI_CLEVEL] * 3 + 9 && borg_skill[BI_CLEVEL] < 20) continue; /* Careful -- Remember it */ borg_temp_x[borg_temp_n] = x; borg_temp_y[borg_temp_n] = y; borg_temp_n++; /* Paranoia -- Check for overflow */ if (borg_temp_n == AUTO_TEMP_MAX) { /* Hack -- Double break */ y = AUTO_MAX_Y; x = AUTO_MAX_X; break; } } } /* Nothing useful */ if (!borg_temp_n) return (false); /* Wipe icky codes from grids if needed */ if (goal_ignoring || scaryguy_on_level) borg_danger_wipe = true; /* Clear the flow codes */ borg_flow_clear(); /* Enqueue useful grids */ for (i = 0; i < borg_temp_n; i++) { y = borg_temp_y[i]; x = borg_temp_x[i]; /* Enqueue the grid */ borg_flow_enqueue_grid(y, x); } /* Spread the flow */ if (borg_skill[BI_CLEVEL] <= 5 && avoidance <= borg_skill[BI_CURHP]) { /* Short Leash */ borg_flow_spread(leash, true, true, false, -1, false); } else if (borg_skill[BI_CLEVEL] <= 30 && avoidance <= borg_skill[BI_CURHP]) { /* Short Leash */ borg_flow_spread(leash, true, true, false, -1, false); } else { /* Long Leash */ borg_flow_spread(250, true, true, false, -1, false); } /* Attempt to Commit the flow */ if (!borg_flow_commit("dark-5", GOAL_DARK)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_DARK)) return (false); /* Success */ return (true); } /* * Prepare to "flow" towards "interesting" grids * * The "exploration" routines are broken into "near" and "far" * exploration, and each set is chosen via the flag below. */ bool borg_flow_dark(bool neer) { int i; int x, y, j, b_j = -1; int b_stair = -1; /* Not if sitting in a sea of runes and we saw Morgoth recently */ if (borg_morgoth_position && morgoth_on_level) return (false); /* Paranoia */ if (borg_flow_dark_interesting(c_y, c_x, -1)) { return (false); } /* Check distance away from stairs, used later */ /* Check for an existing "up stairs" */ for (i = 0; i < track_less.num; i++) { x = track_less.x[i]; y = track_less.y[i]; /* How far is the nearest up stairs */ j = borg_distance(c_y, c_x, y, x); /* skip the closer ones */ if (b_j >= j) continue; /* track it */ b_j = j; b_stair = i; } /* Near */ if (neer) { /* Method 1 */ if (borg_flow_dark_1(b_stair)) return (true); /* Method 2 */ if (borg_flow_dark_2(b_stair)) return (true); /* Method 3 */ if (borg_flow_dark_3(b_stair)) return (true); } /* Far */ else { /* Method 4 */ if (borg_flow_dark_4(b_stair)) return (true); /* Method 5 */ if (borg_flow_dark_5(b_stair)) return (true); } /* Fail */ return (false); } /* * Hack -- spastic searching */ static uint8_t spastic_x; static uint8_t spastic_y; /* * Search carefully for secret doors and such */ bool borg_flow_spastic(bool bored) { int cost; int i, x, y, v; int b_x = c_x; int b_y = c_y; int b_v = -1; int j, b_j = -1; int b_stair = -1; borg_grid* ag; /* Hack -- not in town */ if (!borg_skill[BI_CDEPTH]) return (false); /* Hack -- Not if starving */ if (borg_skill[BI_ISWEAK]) return (false); /* Hack -- Not if hopeless unless twitchy */ if (borg_t - borg_began > 3000 && avoidance <= borg_skill[BI_CURHP]) return (false); /* Not bored */ if (!bored) { /* Look around for danger */ int p = borg_danger(c_y, c_x, 1, true, false); /* Avoid searching when in danger */ if (p > avoidance / 4) return (false); } /* Check distance away from stairs, used later */ /* Check for an existing "up stairs" */ for (i = 0; i < track_less.num; i++) { x = track_less.x[i]; y = track_less.y[i]; /* How far is the nearest up stairs */ j = borg_distance(c_y, c_x, y, x); /* skip the closer ones */ if (b_j >= j) continue; /* track it */ b_j = j; b_stair = i; } /* We have arrived */ if ((spastic_x == c_x) && (spastic_y == c_y)) { /* Cancel */ spastic_x = 0; spastic_y = 0; ag = &borg_grids[c_y][c_x]; /* Take note */ borg_note(format("# Spastic Searching at (%d,%d)...value:%d", c_x, c_y, ag->xtra)); /* Count searching */ for (i = 0; i < 9; i++) { /* Extract the location */ int xx = c_x + ddx_ddd[i]; int yy = c_y + ddy_ddd[i]; /* Current grid */ ag = &borg_grids[yy][xx]; /* Tweak -- Remember the search */ if (ag->xtra < 100) ag->xtra += 5; } /* we searched here */ return (false); } /* Reverse flow */ borg_flow_reverse(250, true, false, false, -1, false); /* Scan the entire map */ for (y = 1; y < AUTO_MAX_Y - 1; y++) { for (x = 1; x < AUTO_MAX_X - 1; x++) { borg_grid* ag_ptr[8]; int wall = 0; int supp = 0; int diag = 0; int monsters = 0; /* Acquire the grid */ ag = &borg_grids[y][x]; /* Skip unknown grids */ if (ag->feat == FEAT_NONE) continue; /* Skip trap grids */ if (ag->trap) continue; /* Skip walls/doors */ if (!borg_cave_floor_grid(ag)) continue; /* Acquire the cost */ cost = borg_data_cost->data[y][x]; /* Skip "unreachable" grids */ if (cost >= 250) continue; /* Skip grids that are really far away. He probably * won't find anything and it takes lots of turns */ if (cost >= 25 && borg_skill[BI_CLEVEL] < 30) continue; if (cost >= 50) continue; /* Tweak -- Limit total searches */ if (ag->xtra >= 50) continue; if (ag->xtra >= borg_skill[BI_CLEVEL]) continue; /* Limit initial searches until bored */ if (!bored && (ag->xtra > 5)) continue; /* Avoid searching detected sectors */ if (borg_detect_door[y / borg_panel_hgt()][x / borg_panel_wid()]) continue; /* Skip ones that make me wander too far unless twitchy (Leash)*/ if (b_stair != -1 && borg_skill[BI_CLEVEL] < 15 && avoidance <= borg_skill[BI_CURHP]) { /* Check the distance of this grid to the stair */ j = borg_distance(track_less.y[b_stair], track_less.x[b_stair], y, x); /* Distance of me to the stairs */ b_j = borg_distance(c_y, c_x, track_less.y[b_stair], track_less.x[b_stair]); /* skip far away grids while I am close to stair*/ if (b_j <= borg_skill[BI_CLEVEL] * 3 + 9 && j >= borg_skill[BI_CLEVEL] * 3 + 9) continue; /* If really low level don't do this much */ if (borg_skill[BI_CLEVEL] <= 3 && b_j <= borg_skill[BI_CLEVEL] + 9 && j >= borg_skill[BI_CLEVEL] + 9) continue; /* Do not Venture too far from stair */ if (borg_skill[BI_CLEVEL] <= 3 && j >= borg_skill[BI_CLEVEL] + 5) continue; /* Do not Venture too far from stair */ if (borg_skill[BI_CLEVEL] <= 10 && j >= borg_skill[BI_CLEVEL] + 9) continue; } /* Extract adjacent locations */ for (i = 0; i < 8; i++) { /* Extract the location */ int xx = x + ddx_ddd[i]; int yy = y + ddy_ddd[i]; /* Get the grid contents */ ag_ptr[i] = &borg_grids[yy][xx]; } /* Count possible door locations */ for (i = 0; i < 4; i++) { ag = ag_ptr[i]; if (ag->feat >= FEAT_GRANITE) wall++; } /* No possible secret doors */ if (wall < 1) continue; /* Count supporting evidence for secret doors */ for (i = 0; i < 4; i++) { ag = ag_ptr[i]; /* Rubble */ if (ag->feat == FEAT_RUBBLE) continue; /* Walls, Doors */ if (((ag->feat >= FEAT_SECRET) && (ag->feat <= FEAT_GRANITE)) || ((ag->feat == FEAT_OPEN) || (ag->feat == FEAT_BROKEN)) || (ag->feat == FEAT_CLOSED)) { supp++; } } /* Count supporting evidence for secret doors */ for (i = 4; i < 8; i++) { ag = ag_ptr[i]; /* Rubble */ if (ag->feat == FEAT_RUBBLE) continue; /* Walls */ if (ag->feat >= FEAT_SECRET) { diag++; } } /* No possible secret doors */ if (diag < 2) continue; /* Count monsters */ for (i = 0; i < 8; i++) { ag = ag_ptr[i]; /* monster */ if (ag->kill) monsters++; } /* No search near monsters */ if (monsters >= 1) continue; /* Tweak -- Reward walls, punish visitation, distance, time on level */ v = (supp * 500) + (diag * 100) - (ag->xtra * 40) - (cost * 2) - (borg_t - borg_began); /* Punish low level and searching too much */ v -= (50 - borg_skill[BI_CLEVEL]) * 5; /* The grid is not searchable */ if (v <= 0) continue; /* Tweak -- Minimal interest until bored */ if (!bored && (v < 1500)) continue; /* Track "best" grid */ if ((b_v >= 0) && (v < b_v)) continue; /* Save the data */ b_v = v; b_x = x; b_y = y; } } /* Clear the flow codes */ borg_flow_clear(); /* Hack -- Nothing found */ if (b_v < 0) return (false); /* Access grid */ ag = &borg_grids[b_y][b_x]; /* Memorize */ spastic_x = b_x; spastic_y = b_y; /* Enqueue the grid */ borg_flow_enqueue_grid(b_y, b_x); /* Spread the flow */ borg_flow_spread(250, true, false, false, -1, false); /* Attempt to Commit the flow */ if (!borg_flow_commit("spastic", GOAL_XTRA)) return (false); /* Take one step */ if (!borg_flow_old(GOAL_XTRA)) return (false); /* Success */ return (true); } /* * Initialize this file */ void borg_init_6(void) { /* Nothing */ } #ifdef MACINTOSH static int HACK = 0; #endif #endif /* ALLOW_BORG */
965d43904d40916d074863861042c1dce977d57d
9ceacf33fd96913cac7ef15492c126d96cae6911
/sys/dev/fdt/sxirintc.c
142b59db9025680ac8aaad7457f48c0606c30ee0
[]
no_license
openbsd/src
ab97ef834fd2d5a7f6729814665e9782b586c130
9e79f3a0ebd11a25b4bff61e900cb6de9e7795e9
refs/heads/master
2023-09-02T18:54:56.624627
2023-09-02T15:16:12
2023-09-02T15:16:12
66,966,208
3,394
1,235
null
2023-08-08T02:42:25
2016-08-30T18:18:25
C
UTF-8
C
false
false
3,252
c
sxirintc.c
/* $OpenBSD: sxirintc.c,v 1.1 2022/07/14 19:06:29 kettenis Exp $ */ /* * Copyright (c) 2022 Mark Kettenis <kettenis@openbsd.org> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <sys/param.h> #include <sys/systm.h> #include <sys/device.h> #include <machine/bus.h> #include <machine/fdt.h> #include <dev/ofw/openfirm.h> #include <dev/ofw/fdt.h> #define RINTC_IRQ_PENDING 0x10 #define RINTC_IRQ_ENABLE 0x40 #define RINTC_IRQ_ENABLE_NMI (1 << 0) #define HREAD4(sc, reg) \ (bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))) #define HWRITE4(sc, reg, val) \ bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val)) #define HSET4(sc, reg, bits) \ HWRITE4((sc), (reg), HREAD4((sc), (reg)) | (bits)) #define HCLR4(sc, reg, bits) \ HWRITE4((sc), (reg), HREAD4((sc), (reg)) & ~(bits)) struct sxirintc_softc { struct device sc_dev; bus_space_tag_t sc_iot; bus_space_handle_t sc_ioh; }; int sxirintc_match(struct device *, void *, void *); void sxirintc_attach(struct device *, struct device *, void *); int sxirintc_activate(struct device *, int); const struct cfattach sxirintc_ca = { sizeof(struct sxirintc_softc), sxirintc_match, sxirintc_attach, NULL, sxirintc_activate }; struct cfdriver sxirintc_cd = { NULL, "sxirintc", DV_DULL }; int sxirintc_match(struct device *parent, void *match, void *aux) { struct fdt_attach_args *faa = aux; return OF_is_compatible(faa->fa_node, "allwinner,sun6i-a31-r-intc"); } void sxirintc_attach(struct device *parent, struct device *self, void *aux) { struct sxirintc_softc *sc = (struct sxirintc_softc *)self; struct fdt_attach_args *faa = aux; if (faa->fa_nreg < 1) { printf(": no registers\n"); return; } sc->sc_iot = faa->fa_iot; if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr, faa->fa_reg[0].size, 0, &sc->sc_ioh)) { printf(": can't map registers\n"); return; } printf("\n"); } int sxirintc_activate(struct device *self, int act) { struct sxirintc_softc *sc = (struct sxirintc_softc *)self; /* * Typically the "NMI" interrupt is controlled by the PMIC. * This interrupt is routed in parallel to the GIC and the * ARISC coprocessor. Enable this interrupt when we suspend * such that the firmware running on the ARISC coprocessor can * wake up the SoC when the PMIC triggers this interrupt. */ switch (act) { case DVACT_SUSPEND: HWRITE4(sc, RINTC_IRQ_PENDING, ~0); HSET4(sc, RINTC_IRQ_ENABLE, RINTC_IRQ_ENABLE_NMI); break; case DVACT_RESUME: HCLR4(sc, RINTC_IRQ_ENABLE, RINTC_IRQ_ENABLE_NMI); break; } return 0; }
fff5477bf7c753e1e1cce30cafcb433a40a48d59
a9b450f72db333e4712976ea048402cd23842bd4
/ext/ruby2d/font.c
da00745ec975060e656887772ba5d950263c227c
[ "MIT" ]
permissive
ruby2d/ruby2d
dfbadb9e776771ec6af8b7aad3d86ad4aba01947
14ddfe7228029db4b85cf462307cdce2e26180d8
refs/heads/main
2023-07-06T22:28:50.071480
2023-02-21T23:27:31
2023-02-21T23:27:31
43,657,793
661
104
MIT
2023-08-25T20:41:26
2015-10-05T00:13:00
Ruby
UTF-8
C
false
false
879
c
font.c
// font.c #include "ruby2d.h" /* * Create a TTF_Font object given a path to a font and a size */ TTF_Font *R2D_FontCreateTTFFont(const char *path, int size, const char *style) { // Check if font file exists if (!R2D_FileExists(path)) { R2D_Error("R2D_FontCreateTTFFont", "Font file `%s` not found", path); return NULL; } TTF_Font *font = TTF_OpenFont(path, size); if (!font) { R2D_Error("TTF_OpenFont", TTF_GetError()); return NULL; } if(strncmp(style, "bold", 4) == 0) { TTF_SetFontStyle(font, TTF_STYLE_BOLD); } else if(strncmp(style, "italic", 6) == 0) { TTF_SetFontStyle(font, TTF_STYLE_ITALIC); } else if(strncmp(style, "underline", 9) == 0) { TTF_SetFontStyle(font, TTF_STYLE_UNDERLINE); } else if(strncmp(style, "strikethrough", 13) == 0) { TTF_SetFontStyle(font, TTF_STYLE_STRIKETHROUGH); } return font; }
ddf0d1675638e37200d7f453859985fed1a25d9b
28d0f8c01599f8f6c711bdde0b59f9c2cd221203
/sys/arch/hpcarm/dev/wzero3_kbd.c
058cd779c6ff70c8d56ee6f57b6790b8c52f213c
[]
no_license
NetBSD/src
1a9cbc22ed778be638b37869ed4fb5c8dd616166
23ee83f7c0aea0777bd89d8ebd7f0cde9880d13c
refs/heads/trunk
2023-08-31T13:24:58.105962
2023-08-27T15:50:47
2023-08-27T15:50:47
88,439,547
656
348
null
2023-07-20T20:07:24
2017-04-16T20:03:43
null
UTF-8
C
false
false
19,846
c
wzero3_kbd.c
/* $NetBSD: wzero3_kbd.c,v 1.12 2021/08/07 16:18:53 thorpej Exp $ */ /*- * Copyright (C) 2008, 2009, 2010 NONAKA Kimihiro <nonaka@netbsd.org> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <sys/cdefs.h> __KERNEL_RCSID(0, "$NetBSD: wzero3_kbd.c,v 1.12 2021/08/07 16:18:53 thorpej Exp $"); #include <sys/param.h> #include <sys/systm.h> #include <sys/device.h> #include <sys/kernel.h> #include <sys/kmem.h> #include <sys/callout.h> #include <sys/bus.h> #include <dev/sysmon/sysmonvar.h> #include <dev/sysmon/sysmon_taskq.h> #include <arm/xscale/pxa2x0cpu.h> #include <arm/xscale/pxa2x0var.h> #include <arm/xscale/pxa2x0_gpio.h> #include <machine/bootinfo.h> #include <machine/config_hook.h> #include <machine/platid.h> #include <machine/platid_mask.h> #include <dev/hpc/hpckbdvar.h> #include <arch/hpcarm/dev/wzero3_reg.h> #ifdef DEBUG #define DPRINTF(arg) printf arg #else #define DPRINTF(arg) /* nothing */ #endif #define CSR_READ1(r) bus_space_read_1(sc->sc_iot, sc->sc_ioh, (r)) #define CSR_WRITE1(r,v) bus_space_write_1(sc->sc_iot, sc->sc_ioh, (r), (v)) #define CSR_READ2(r) bus_space_read_2(sc->sc_iot, sc->sc_ioh, (r)) #define CSR_WRITE2(r,v) bus_space_write_2(sc->sc_iot, sc->sc_ioh, (r), (v)) #define CSR_READ4(r) bus_space_read_4(sc->sc_iot, sc->sc_ioh, (r)) #define CSR_WRITE4(r,v) bus_space_write_4(sc->sc_iot, sc->sc_ioh, (r), (v)) /* register */ #define KBDCOL_L (0x00) /* Write */ #define KBDCOL_U (0x04) /* Write */ #define KBDCHARGE (0x08) /* Write */ #define KBDDATA (0x08) /* Read */ #define REGMAPSIZE 0x0c #define KEYWAIT 20 /* us */ #define WS003SH_NCOLUMN 12 #define WS003SH_NROW 7 struct wzero3kbd_softc { device_t sc_dev; bus_space_tag_t sc_iot; bus_space_handle_t sc_ioh; int sc_ncolumn; int sc_nrow; uint8_t *sc_okeystat; uint8_t *sc_keystat; void *sc_key_ih; void *sc_power_ih; void *sc_reset_ih; int sc_key_pin; int sc_power_pin; int sc_reset_pin; struct hpckbd_ic_if sc_if; struct hpckbd_if *sc_hpckbd; struct sysmon_pswitch sc_smpsw; /* for reset key */ int sc_enabled; /* polling stuff */ struct callout sc_keyscan_ch; int sc_interval; #define KEY_INTERVAL 50 /* ms */ #if defined(KEYTEST) || defined(KEYTEST2) || defined(KEYTEST3) || defined(KEYTEST4) || defined(KEYTEST5) void *sc_test_ih; int sc_test_pin; int sc_nouse_pin; int sc_nouse_pin2; int sc_nouse_pin3; int sc_bit; #endif }; static int wzero3kbd_match(device_t, cfdata_t, void *); static void wzero3kbd_attach(device_t, device_t, void *); CFATTACH_DECL_NEW(wzero3kbd, sizeof(struct wzero3kbd_softc), wzero3kbd_match, wzero3kbd_attach, NULL, NULL); static int wzero3kbd_intr(void *arg); #if defined(KEYTEST) static int wzero3kbd_intr2(void *arg); #endif #if defined(KEYTEST3) static int wzero3kbd_intr3(void *arg); #endif static void wzero3kbd_tick(void *arg); static int wzero3kbd_power_intr(void *arg); static int wzero3kbd_reset_intr(void *arg); static int wzero3kbd_input_establish(void *arg, struct hpckbd_if *kbdif); static void wzero3kbd_sysmon_reset_event(void *arg); static int wzero3kbd_poll(void *arg); static int wzero3kbd_poll1(void *arg); /* * WS003SH/WS004SH/WS007SH keyscan map col#0 col#1 col#2 col#3 col#4 col#5 col#6 col#7 col#8 col#9 col#10 col#11 row#0: CTRL 1 3 5 6 7 9 0 BS (none) ROTATE CAMERA row#1: (none) 2 4 r y 8 i o p (none) VOL- VOL+ row#2: TAB q e t g u j k (none) (none) (none) (none) row#3: (none) w s f v h m l (none) (none) SHIFT (none) row#4: CALL a d c b n . (none) ENTER (none) WIN (none) row#5: MAIL z x - SPACE / (none) UP (none) (none) LSOFT FN row#6: IE MOJI (none) OK ACTION , LEFT DOWN RIGHT (none) RSOFT (none) */ /* * WS011SH keyscan map col#0 col#1 col#2 col#3 col#4 col#5 col#6 col#7 col#8 col#9 col#10 col#11 row#0 Ctrl (none) (none) (none) (none) (none) (none) (none) Del (none) ROTATE (none) row#1 (none) (none) (none) R Y (none) I O P (none) (none) (none) row#2 Tab Q E T G U J K (none) (none) (none) (none) row#3 (none) W S F V H M L (none) (none) Shift (none) row#4 (none) A D C B N . (none) Enter (none) (none) (none) row#5 (none) Z X - Space / (none) UP (none) (none) (none) Fn row#6 (none) MOJI HAN/ZEN OK (none) , LEFT DOWN RIGHT (none) (none) (none) */ /* * WS020SH keyscan map col#0 col#1 col#2 col#3 col#4 col#5 col#6 col#7 col#8 col#9 col#10 col#11 row#0 Ctrl (none) (none) (none) (none) (none) (none) (none) Del (none) ROTATE (none) row#1 (none) (none) (none) R Y (none) I O P (none) MEDIA (none) row#2 Tab Q E T G U J K (none) (none) (none) (none) row#3 (none) W S F V H M L (none) (none) LShift (none) row#4 (none) A D C B N . (none) Enter (none) RShift (none) row#5 (none) Z X - Space / (none) UP (none) DOWN (none) Fn row#6 (none) MOJI HAN/ZEN OK (none) , LEFT (none) RIGHT (none) (none) (none) */ static const struct wzero3kbd_model { platid_mask_t *platid; int key_pin; int power_pin; int reset_pin; int ncolumn; int nrow; } wzero3kbd_table[] = { /* WS003SH */ { &platid_mask_MACH_SHARP_WZERO3_WS003SH, -1, /* XXX */ GPIO_WS003SH_POWER_BUTTON, -1, /* None */ WS003SH_NCOLUMN, WS003SH_NROW, }, /* WS004SH */ { &platid_mask_MACH_SHARP_WZERO3_WS004SH, -1, /* XXX */ GPIO_WS003SH_POWER_BUTTON, -1, /* None */ WS003SH_NCOLUMN, WS003SH_NROW, }, /* WS007SH */ { &platid_mask_MACH_SHARP_WZERO3_WS007SH, -1, /* XXX */ GPIO_WS007SH_POWER_BUTTON, GPIO_WS007SH_RESET_BUTTON, WS003SH_NCOLUMN, WS003SH_NROW, }, /* WS011SH */ { &platid_mask_MACH_SHARP_WZERO3_WS011SH, -1, /* XXX */ GPIO_WS011SH_POWER_BUTTON, GPIO_WS011SH_RESET_BUTTON, WS003SH_NCOLUMN, WS003SH_NROW, }, /* WS020SH */ { &platid_mask_MACH_SHARP_WZERO3_WS020SH, -1, /* XXX */ GPIO_WS020SH_POWER_BUTTON, GPIO_WS020SH_RESET_BUTTON, WS003SH_NCOLUMN, WS003SH_NROW, }, { NULL, -1, -1, -1, 0, 0, } }; static const struct wzero3kbd_model * wzero3kbd_lookup(void) { const struct wzero3kbd_model *model; for (model = wzero3kbd_table; model->platid != NULL; model++) { if (platid_match(&platid, model->platid)) { return model; } } return NULL; } static int wzero3kbd_match(device_t parent, cfdata_t cf, void *aux) { if (strcmp(cf->cf_name, "wzero3kbd") != 0) return 0; if (wzero3kbd_lookup() == NULL) return 0; return 1; } static void wzero3kbd_attach(device_t parent, device_t self, void *aux) { struct wzero3kbd_softc *sc = device_private(self); struct pxaip_attach_args *pxa = (struct pxaip_attach_args *)aux; struct hpckbd_attach_args haa; const struct wzero3kbd_model *model; sc->sc_dev = self; model = wzero3kbd_lookup(); if (model == NULL) { aprint_error(": unknown model\n"); return; } aprint_normal(": keyboard\n"); aprint_naive("\n"); sc->sc_key_pin = model->key_pin; sc->sc_power_pin = model->power_pin; sc->sc_reset_pin = model->reset_pin; sc->sc_ncolumn = model->ncolumn; sc->sc_nrow = model->nrow; sc->sc_iot = pxa->pxa_iot; if (bus_space_map(sc->sc_iot, PXA2X0_CS2_START, REGMAPSIZE, 0, &sc->sc_ioh)) { aprint_error_dev(self, "couldn't map registers.\n"); return; } sc->sc_okeystat = kmem_zalloc(sc->sc_nrow * sc->sc_ncolumn, KM_SLEEP); sc->sc_keystat = kmem_zalloc(sc->sc_nrow * sc->sc_ncolumn, KM_SLEEP); sc->sc_if.hii_ctx = sc; sc->sc_if.hii_establish = wzero3kbd_input_establish; sc->sc_if.hii_poll = wzero3kbd_poll; /* Attach console if not using serial. */ if (!(bootinfo->bi_cnuse & BI_CNUSE_SERIAL)) hpckbd_cnattach(&sc->sc_if); /* Install interrupt handler. */ if (sc->sc_key_pin >= 0) { pxa2x0_gpio_set_function(sc->sc_key_pin, GPIO_IN); sc->sc_key_ih = pxa2x0_gpio_intr_establish(sc->sc_key_pin, IST_EDGE_BOTH, IPL_TTY, wzero3kbd_intr, sc); if (sc->sc_key_ih == NULL) { aprint_error_dev(sc->sc_dev, "couldn't establish key interrupt\n"); } } else { sc->sc_interval = KEY_INTERVAL / (1000 / hz); if (sc->sc_interval < 1) sc->sc_interval = 1; callout_init(&sc->sc_keyscan_ch, 0); callout_reset(&sc->sc_keyscan_ch, sc->sc_interval, wzero3kbd_tick, sc); } /* power key */ if (sc->sc_power_pin >= 0) { pxa2x0_gpio_set_function(sc->sc_power_pin, GPIO_IN); sc->sc_power_ih = pxa2x0_gpio_intr_establish( sc->sc_power_pin, IST_EDGE_BOTH, IPL_TTY, wzero3kbd_power_intr, sc); if (sc->sc_power_ih == NULL) { aprint_error_dev(sc->sc_dev, "couldn't establish power key interrupt\n"); } } /* reset button */ if (sc->sc_reset_pin >= 0) { pxa2x0_gpio_set_function(sc->sc_reset_pin, GPIO_IN); sc->sc_reset_ih = pxa2x0_gpio_intr_establish( sc->sc_reset_pin, IST_EDGE_BOTH, IPL_TTY, wzero3kbd_reset_intr, sc); if (sc->sc_reset_ih == NULL) { aprint_error_dev(sc->sc_dev, "couldn't establish reset key interrupt\n"); } sc->sc_smpsw.smpsw_name = device_xname(self); sc->sc_smpsw.smpsw_type = PSWITCH_TYPE_RESET; if (sysmon_pswitch_register(&sc->sc_smpsw) != 0) { aprint_error_dev(sc->sc_dev, "unable to register reset event handler\n"); } } /* Attach hpckbd. */ haa.haa_ic = &sc->sc_if; config_found(self, &haa, hpckbd_print, CFARGS_NONE); #if defined(KEYTEST) || defined(KEYTEST2) || defined(KEYTEST3) || defined(KEYTEST4) || defined(KEYTEST5) sc->sc_test_ih = NULL; sc->sc_test_pin = -1; sc->sc_nouse_pin = -1; sc->sc_nouse_pin2 = -1; sc->sc_nouse_pin3 = -1; sc->sc_bit = 0x01; if (platid_match(&platid, &platid_mask_MACH_SHARP_WZERO3_WS003SH) || platid_match(&platid, &platid_mask_MACH_SHARP_WZERO3_WS004SH)) { sc->sc_nouse_pin = GPIO_WS003SH_SD_DETECT; /* SD_DETECT */ sc->sc_nouse_pin2 = 86; /* Vsync? */ sc->sc_nouse_pin3 = 89; /* RESET? */ } if (platid_match(&platid, &platid_mask_MACH_SHARP_WZERO3_WS007SH)) { sc->sc_nouse_pin = GPIO_WS007SH_SD_DETECT; /* SD_DETECT */ sc->sc_nouse_pin2 = 77; /* Vsync? */ } if (platid_match(&platid, &platid_mask_MACH_SHARP_WZERO3_WS011SH)) { sc->sc_nouse_pin = GPIO_WS011SH_SD_DETECT; /* SD_DETECT */ sc->sc_nouse_pin2 = 77; /* Vsync? */ } if (platid_match(&platid, &platid_mask_MACH_SHARP_WZERO3_WS020SH)) { sc->sc_nouse_pin = GPIO_WS020SH_SD_DETECT; /* SD_DETECT */ sc->sc_nouse_pin2 = 77; /* Vsync? */ } #ifdef KEYTEST for (sc->sc_test_pin = 2; sc->sc_test_pin < PXA270_GPIO_NPINS; sc->sc_test_pin++) { if (sc->sc_test_pin != sc->sc_nouse_pin && sc->sc_test_pin != sc->sc_nouse_pin2 && sc->sc_test_pin != sc->sc_nouse_pin3 && sc->sc_test_pin != sc->sc_key_pin && sc->sc_test_pin != sc->sc_power_pin && sc->sc_test_pin != sc->sc_reset_pin && GPIO_IS_GPIO_IN(pxa2x0_gpio_get_function(sc->sc_test_pin))) break; } if (sc->sc_test_pin < PXA270_GPIO_NPINS) { printf("GPIO_IN: GPIO pin #%d\n", sc->sc_test_pin); sc->sc_test_ih = pxa2x0_gpio_intr_establish(sc->sc_test_pin, IST_EDGE_BOTH, IPL_TTY, wzero3kbd_intr2, sc); } else { sc->sc_test_pin = -1; } #endif #ifdef KEYTEST3 { int i; printf("pin: "); for (i = 0; i < PXA270_GPIO_NPINS; i++) { if (i == sc->sc_nouse_pin || i == sc->sc_nouse_pin2 || i == sc->sc_nouse_pin3 || i == sc->sc_key_pin || i == sc->sc_power_pin || i == sc->sc_reset_pin) continue; printf("%d, ", i); if (GPIO_IS_GPIO_IN(pxa2x0_gpio_get_function(i))) { pxa2x0_gpio_intr_establish(i, IST_EDGE_BOTH, IPL_TTY, wzero3kbd_intr3, (void *)(long)i); } } } #endif #ifdef KEYTEST4 for (sc->sc_test_pin = 2; sc->sc_test_pin < PXA270_GPIO_NPINS; sc->sc_test_pin++) { if (sc->sc_test_pin != sc->sc_nouse_pin && sc->sc_test_pin != sc->sc_nouse_pin2 && sc->sc_test_pin != sc->sc_nouse_pin3 && sc->sc_test_pin != sc->sc_key_pin && sc->sc_test_pin != sc->sc_power_pin && sc->sc_test_pin != sc->sc_reset_pin && GPIO_IS_GPIO_OUT(pxa2x0_gpio_get_function(sc->sc_test_pin))) break; } if (sc->sc_test_pin < PXA270_GPIO_NPINS) { printf("GPIO_OUT: GPIO pin #%d\n", sc->sc_test_pin); } else { sc->sc_test_pin = -1; } #endif #ifdef KEYTEST5 sc->sc_test_pin = 0x00; sc->sc_bit = 0x01; #endif #endif } static int wzero3kbd_intr(void *arg) { struct wzero3kbd_softc *sc = (struct wzero3kbd_softc *)arg; #if defined(KEYTEST) || defined(KEYTEST2) || defined(KEYTEST3) || defined(KEYTEST4) || defined(KEYTEST5) printf("wzero3kbd_intr: GPIO pin #%d = %s\n", sc->sc_key_pin, pxa2x0_gpio_get_bit(sc->sc_key_pin) ? "on" : "off"); #endif #if defined(KEYTEST4) if (sc->sc_test_pin >= 0) { if (pxa2x0_gpio_get_bit(sc->sc_test_pin)) { printf("GPIO_OUT: GPIO pin #%d: L\n",sc->sc_test_pin); pxa2x0_gpio_clear_bit(sc->sc_test_pin); } else { printf("GPIO_OUT: GPIO pin #%d: H\n", sc->sc_test_pin); pxa2x0_gpio_set_bit(sc->sc_test_pin); } } #endif #if defined(KEYTEST5) printf("CPLD(%#x): value=%#x, mask=%#x\n", sc->sc_test_pin, CSR_READ4(sc->sc_test_pin), sc->sc_bit); if (CSR_READ4(sc->sc_test_pin) & sc->sc_bit) { printf("CPLD_OUT: CPLD: L\n"); CSR_WRITE4(sc->sc_test_pin, CSR_READ4(sc->sc_test_pin) & ~sc->sc_bit); } else { printf("CPLD_OUT: CPLD: H\n"); CSR_WRITE4(sc->sc_test_pin, CSR_READ4(sc->sc_test_pin) | sc->sc_bit); } #endif (void) wzero3kbd_poll1(sc); pxa2x0_gpio_clear_intr(sc->sc_key_pin); return 1; } #if defined(KEYTEST) static int wzero3kbd_intr2(void *arg) { struct wzero3kbd_softc *sc = (struct wzero3kbd_softc *)arg; printf("wzero3kbd_intr2: GPIO_IN: GPIO pin #%d = %s\n", sc->sc_test_pin, pxa2x0_gpio_get_bit(sc->sc_test_pin) ? "on" : "off"); return 1; } #endif #if defined(KEYTEST3) static int wzero3kbd_intr3(void *arg) { int pin = (int)arg; printf("wzero3kbd_intr3: GPIO pin #%d = %s\n", pin, pxa2x0_gpio_get_bit(pin) ? "on" : "off"); return 1; } #endif static void wzero3kbd_tick(void *arg) { struct wzero3kbd_softc *sc = (struct wzero3kbd_softc *)arg; (void) wzero3kbd_poll1(sc); callout_schedule(&sc->sc_keyscan_ch, sc->sc_interval); } static int wzero3kbd_power_intr(void *arg) { struct wzero3kbd_softc *sc = (struct wzero3kbd_softc *)arg; #if defined(KEYTEST) || defined(KEYTEST2) || defined(KEYTEST3) || defined(KEYTEST4) printf("wzero3kbd_power_intr: status = %s\n", pxa2x0_gpio_get_bit(sc->sc_power_pin) ? "on" : "off"); #endif #if defined(KEYTEST) if (pxa2x0_gpio_get_bit(sc->sc_power_pin)) { if (sc->sc_test_pin >= 0) { int orig_pin = sc->sc_test_pin; pxa2x0_gpio_intr_disestablish(sc->sc_test_ih); sc->sc_test_ih = NULL; for (;;) { if (++sc->sc_test_pin >= PXA270_GPIO_NPINS) sc->sc_test_pin = 2; if (sc->sc_test_pin == orig_pin) break; if (sc->sc_test_pin != sc->sc_nouse_pin && sc->sc_test_pin != sc->sc_nouse_pin2 && sc->sc_test_pin != sc->sc_nouse_pin3 && sc->sc_test_pin != sc->sc_key_pin && sc->sc_test_pin != sc->sc_power_pin && sc->sc_test_pin != sc->sc_reset_pin && GPIO_IS_GPIO_IN(pxa2x0_gpio_get_function(sc->sc_test_pin))) break; } if (sc->sc_test_pin != orig_pin) { printf("GPIO_IN: GPIO pin #%d\n", sc->sc_test_pin); sc->sc_test_ih = pxa2x0_gpio_intr_establish(sc->sc_test_pin, IST_EDGE_BOTH, IPL_TTY, wzero3kbd_intr2,sc); } else { sc->sc_test_pin = -1; } } } #endif #if defined(KEYTEST2) if (pxa2x0_gpio_get_bit(sc->sc_power_pin)) { sc->sc_enabled ^= 2; if (sc->sc_enabled & 2) { printf("print col/row\n"); } else { printf("keyscan\n"); } } #endif #if defined(KEYTEST4) if (pxa2x0_gpio_get_bit(sc->sc_power_pin)) { if (sc->sc_test_pin >= 0) { int orig_pin = sc->sc_test_pin; for (;;) { if (++sc->sc_test_pin >= PXA270_GPIO_NPINS) sc->sc_test_pin = 2; if (sc->sc_test_pin == orig_pin) break; if (sc->sc_test_pin != sc->sc_nouse_pin && sc->sc_test_pin != sc->sc_nouse_pin2 && sc->sc_test_pin != sc->sc_nouse_pin3 && sc->sc_test_pin != sc->sc_key_pin && sc->sc_test_pin != sc->sc_power_pin && sc->sc_test_pin != sc->sc_reset_pin && GPIO_IS_GPIO_OUT(pxa2x0_gpio_get_function(sc->sc_test_pin))) break; } if (sc->sc_test_pin != orig_pin) { printf("GPIO_OUT: GPIO pin #%d\n", sc->sc_test_pin); } else { sc->sc_test_pin = -1; } } } #endif #if defined(KEYTEST5) if (pxa2x0_gpio_get_bit(sc->sc_power_pin)) { sc->sc_bit <<= 1; if (sc->sc_bit & ~0xff) { sc->sc_bit = 0x01; sc->sc_test_pin += 0x4; if (sc->sc_test_pin >= 0x20) { sc->sc_test_pin = 0x00; } } printf("CPLD(%#x), mask=%#x\n", sc->sc_test_pin, sc->sc_bit); } #endif pxa2x0_gpio_clear_intr(sc->sc_power_pin); return 1; } static int wzero3kbd_reset_intr(void *arg) { struct wzero3kbd_softc *sc = (struct wzero3kbd_softc *)arg; sysmon_task_queue_sched(0, wzero3kbd_sysmon_reset_event, sc); pxa2x0_gpio_clear_intr(sc->sc_reset_pin); return 1; } static int wzero3kbd_input_establish(void *arg, struct hpckbd_if *kbdif) { struct wzero3kbd_softc *sc = (struct wzero3kbd_softc *)arg; /* Save hpckbd interface. */ sc->sc_hpckbd = kbdif; sc->sc_enabled = 1; return 0; } static void wzero3kbd_sysmon_reset_event(void *arg) { struct wzero3kbd_softc *sc = (struct wzero3kbd_softc *)arg; sysmon_pswitch_event(&sc->sc_smpsw, PSWITCH_EVENT_PRESSED); } static int wzero3kbd_poll(void *arg) { int keydown; keydown = wzero3kbd_poll1(arg); return keydown; } static int wzero3kbd_poll1(void *arg) { struct wzero3kbd_softc *sc = (struct wzero3kbd_softc *)arg; int row, col, data; int keycol; int keydown; int i; int s; if (!sc->sc_enabled) { DPRINTF(("wzero3kbd_poll: disabled\n")); return 0; } s = spltty(); for (col = 0; col < sc->sc_ncolumn; col++) { /* deselect column# and charge */ CSR_WRITE1(KBDCOL_L, 0); CSR_WRITE1(KBDCOL_U, 0); CSR_WRITE1(KBDCHARGE, 1); delay(KEYWAIT); CSR_WRITE1(KBDCHARGE, 0); /* select scan column# */ keycol = 1 << col; CSR_WRITE1(KBDCOL_L, keycol & 0xff); CSR_WRITE1(KBDCOL_U, keycol >> 8); delay(KEYWAIT); CSR_WRITE1(KBDCHARGE, 0); /* read key data */ data = CSR_READ1(KBDDATA); for (row = 0; row < sc->sc_nrow; row++) { #ifdef KEYTEST2 if (!(sc->sc_enabled & 2)) { #endif sc->sc_keystat[row + col * sc->sc_nrow] = (data >> row) & 1; #ifdef KEYTEST2 } else if (data & (1 << row)) { printf("col = %d, row = %d, idx = %d, data = 0x%02x\n", col, row, row + col * sc->sc_nrow, data); } #endif } } /* deselect column# and charge */ CSR_WRITE1(KBDCOL_L, 0); CSR_WRITE1(KBDCOL_U, 0); CSR_WRITE1(KBDCHARGE, 1); delay(KEYWAIT); CSR_WRITE1(KBDCHARGE, 0); /* send key scan code */ keydown = 0; for (i = 0; i < sc->sc_nrow * sc->sc_ncolumn; i++) { if (sc->sc_keystat[i] == sc->sc_okeystat[i]) continue; keydown |= sc->sc_keystat[i]; hpckbd_input(sc->sc_hpckbd, sc->sc_keystat[i], i); sc->sc_okeystat[i] = sc->sc_keystat[i]; } splx(s); return keydown; }
8576ce708121f341d99ea21624373f91fdb1d11b
2a7d1897f3524c5ef205906b23f3c7dc6fac6828
/odpi/src/dpiObjectType.c
a8788673831593081c73322cbb5eb8c91c5fb9e9
[ "Apache-2.0", "UPL-1.0", "BSD-3-Clause", "MIT" ]
permissive
godror/godror
8f0991a970e66a7813bc14b75667e47f0245e8aa
fe4f08126df5815ab2668aba06f0a212fddb1056
refs/heads/main
2023-09-01T13:43:54.743051
2023-08-31T09:03:59
2023-08-31T09:03:59
223,268,020
480
127
NOASSERTION
2023-08-20T06:02:15
2019-11-21T21:23:17
C
UTF-8
C
false
false
14,713
c
dpiObjectType.c
//----------------------------------------------------------------------------- // Copyright (c) 2016, 2022, Oracle and/or its affiliates. // // This software is dual-licensed to you under the Universal Permissive License // (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License // 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose // either license. // // If you elect to accept the software under the Apache License, Version 2.0, // the following applies: // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- // dpiObjectType.c // Implementation of object types. //----------------------------------------------------------------------------- #include "dpiImpl.h" // forward declarations of internal functions only used in this file static int dpiObjectType__init(dpiObjectType *objType, void *handle, uint32_t handleType, dpiError *error); //----------------------------------------------------------------------------- // dpiObjectType__allocate() [INTERNAL] // Allocate and initialize an object type structure. //----------------------------------------------------------------------------- int dpiObjectType__allocate(dpiConn *conn, void *handle, uint32_t handleType, dpiObjectType **objType, dpiError *error) { dpiObjectType *tempObjType; // create structure and retain reference to connection *objType = NULL; if (dpiGen__allocate(DPI_HTYPE_OBJECT_TYPE, conn->env, (void**) &tempObjType, error) < 0) return DPI_FAILURE; dpiGen__setRefCount(conn, error, 1); tempObjType->conn = conn; // perform initialization if (dpiObjectType__init(tempObjType, handle, handleType, error) < 0) { dpiObjectType__free(tempObjType, error); return DPI_FAILURE; } *objType = tempObjType; return DPI_SUCCESS; } //----------------------------------------------------------------------------- // dpiObjectType__check() [INTERNAL] // Validate that the connection from which the object type was created is // still connected and issue an error if it is not. //----------------------------------------------------------------------------- static int dpiObjectType__check(dpiObjectType *objType, const char *fnName, dpiError *error) { if (dpiGen__startPublicFn(objType, DPI_HTYPE_OBJECT_TYPE, fnName, error) < 0) return DPI_FAILURE; return dpiConn__checkConnected(objType->conn, error); } //----------------------------------------------------------------------------- // dpiObjectType__describe() [INTERNAL] // Describe the object type and store information about it. Note that a // separate call to OCIDescribeAny() is made in order to support nested types; // an illegal attribute value is returned if this is not done. //----------------------------------------------------------------------------- static int dpiObjectType__describe(dpiObjectType *objType, void *describeHandle, dpiError *error) { void *collectionParam, *param; uint16_t typeCode; // describe the type if (dpiOci__describeAny(objType->conn, objType->tdo, 0, DPI_OCI_OTYPE_PTR, describeHandle, error) < 0) return DPI_FAILURE; // get top level parameter descriptor if (dpiOci__attrGet(describeHandle, DPI_OCI_HTYPE_DESCRIBE, &param, 0, DPI_OCI_ATTR_PARAM, "get top level parameter", error) < 0) return DPI_FAILURE; // determine type code if (dpiOci__attrGet(param, DPI_OCI_DTYPE_PARAM, &typeCode, 0, DPI_OCI_ATTR_TYPECODE, "get type code", error) < 0) return DPI_FAILURE; objType->typeCode = typeCode; // determine the schema of the type if (dpiUtils__getAttrStringWithDup("get schema", param, DPI_OCI_DTYPE_PARAM, DPI_OCI_ATTR_SCHEMA_NAME, &objType->schema, &objType->schemaLength, error) < 0) return DPI_FAILURE; // determine the name of the type if (dpiUtils__getAttrStringWithDup("get name", param, DPI_OCI_DTYPE_PARAM, DPI_OCI_ATTR_NAME, &objType->name, &objType->nameLength, error) < 0) return DPI_FAILURE; // determine the package name of the type if (dpiUtils__getAttrStringWithDup("get package name", param, DPI_OCI_DTYPE_PARAM, DPI_OCI_ATTR_PACKAGE_NAME, &objType->packageName, &objType->packageNameLength, error) < 0) return DPI_FAILURE; // determine the number of attributes if (dpiOci__attrGet(param, DPI_OCI_DTYPE_PARAM, (void*) &objType->numAttributes, 0, DPI_OCI_ATTR_NUM_TYPE_ATTRS, "get number of attributes", error) < 0) return DPI_FAILURE; // if a collection, need to determine the element type if (typeCode == DPI_SQLT_NCO) { objType->isCollection = 1; // acquire collection parameter descriptor if (dpiOci__attrGet(param, DPI_OCI_DTYPE_PARAM, &collectionParam, 0, DPI_OCI_ATTR_COLLECTION_ELEMENT, "get collection descriptor", error) < 0) return DPI_FAILURE; // determine type of element if (dpiOracleType__populateTypeInfo(objType->conn, collectionParam, DPI_OCI_DTYPE_PARAM, &objType->elementTypeInfo, error) < 0) return DPI_FAILURE; } return DPI_SUCCESS; } //----------------------------------------------------------------------------- // dpiObjectType__free() [INTERNAL] // Free the memory for an object type. //----------------------------------------------------------------------------- void dpiObjectType__free(dpiObjectType *objType, dpiError *error) { if (objType->conn) { dpiGen__setRefCount(objType->conn, error, -1); objType->conn = NULL; } if (objType->elementTypeInfo.objectType) { dpiGen__setRefCount(objType->elementTypeInfo.objectType, error, -1); objType->elementTypeInfo.objectType = NULL; } if (objType->schema) { dpiUtils__freeMemory((void*) objType->schema); objType->schema = NULL; } if (objType->name) { dpiUtils__freeMemory((void*) objType->name); objType->name = NULL; } if (objType->packageName) { dpiUtils__freeMemory((void*) objType->packageName); objType->packageName = NULL; } dpiUtils__freeMemory(objType); } //----------------------------------------------------------------------------- // dpiObjectType__init() [INTERNAL] // Initialize the object type. //----------------------------------------------------------------------------- static int dpiObjectType__init(dpiObjectType *objType, void *handle, uint32_t handleType, dpiError *error) { void *describeHandle, *tdoReference; // retrieve TDO of the parameter and pin it in the cache if (dpiOci__attrGet(handle, handleType, (void*) &tdoReference, 0, DPI_OCI_ATTR_REF_TDO, "get TDO reference", error) < 0) return DPI_FAILURE; if (dpiOci__objectPin(objType->env->handle, tdoReference, &objType->tdo, error) < 0) return DPI_FAILURE; // acquire a describe handle if (dpiOci__handleAlloc(objType->env->handle, &describeHandle, DPI_OCI_HTYPE_DESCRIBE, "allocate describe handle", error) < 0) return DPI_FAILURE; // describe the type if (dpiObjectType__describe(objType, describeHandle, error) < 0) { dpiOci__handleFree(describeHandle, DPI_OCI_HTYPE_DESCRIBE); return DPI_FAILURE; } // free the describe handle dpiOci__handleFree(describeHandle, DPI_OCI_HTYPE_DESCRIBE); return DPI_SUCCESS; } //----------------------------------------------------------------------------- // dpiObjectType__isXmlType() [INTERNAL] // Returns a boolean indicating if the object type in question refers to the // type SYS.XMLTYPE. //----------------------------------------------------------------------------- int dpiObjectType__isXmlType(dpiObjectType *objType) { static const char *schema = "SYS", *name = "XMLTYPE"; size_t schemaLength, nameLength; schemaLength = strlen(schema); nameLength = strlen(name); return (objType->schemaLength == schemaLength && strncmp(objType->schema, schema, schemaLength) == 0 && objType->nameLength == nameLength && strncmp(objType->name, name, nameLength) == 0); } //----------------------------------------------------------------------------- // dpiObjectType_addRef() [PUBLIC] // Add a reference to the object type. //----------------------------------------------------------------------------- int dpiObjectType_addRef(dpiObjectType *objType) { return dpiGen__addRef(objType, DPI_HTYPE_OBJECT_TYPE, __func__); } //----------------------------------------------------------------------------- // dpiObjectType_createObject() [PUBLIC] // Create a new object of the specified type and return it. Return NULL on // error. //----------------------------------------------------------------------------- int dpiObjectType_createObject(dpiObjectType *objType, dpiObject **obj) { dpiError error; int status; // validate parameters if (dpiObjectType__check(objType, __func__, &error) < 0) return dpiGen__endPublicFn(objType, DPI_FAILURE, &error); DPI_CHECK_PTR_NOT_NULL(objType, obj) status = dpiObject__allocate(objType, NULL, NULL, NULL, obj, &error); return dpiGen__endPublicFn(objType, status, &error); } //----------------------------------------------------------------------------- // dpiObjectType_getAttributes() [PUBLIC] // Get the attributes for the object type in the provided array. //----------------------------------------------------------------------------- int dpiObjectType_getAttributes(dpiObjectType *objType, uint16_t numAttributes, dpiObjectAttr **attributes) { void *topLevelParam, *attrListParam, *attrParam, *describeHandle; dpiError error; uint16_t i; // validate object type and the number of attributes if (dpiObjectType__check(objType, __func__, &error) < 0) return dpiGen__endPublicFn(objType, DPI_FAILURE, &error); DPI_CHECK_PTR_NOT_NULL(objType, attributes) if (numAttributes < objType->numAttributes) { dpiError__set(&error, "get attributes", DPI_ERR_ARRAY_SIZE_TOO_SMALL, numAttributes); return dpiGen__endPublicFn(objType, DPI_FAILURE, &error); } if (numAttributes == 0) return dpiGen__endPublicFn(objType, DPI_SUCCESS, &error); // acquire a describe handle if (dpiOci__handleAlloc(objType->env->handle, &describeHandle, DPI_OCI_HTYPE_DESCRIBE, "allocate describe handle", &error) < 0) return dpiGen__endPublicFn(objType, DPI_FAILURE, &error); // describe the type if (dpiOci__describeAny(objType->conn, objType->tdo, 0, DPI_OCI_OTYPE_PTR, describeHandle, &error) < 0) { dpiOci__handleFree(describeHandle, DPI_OCI_HTYPE_DESCRIBE); return dpiGen__endPublicFn(objType, DPI_FAILURE, &error); } // get the top level parameter descriptor if (dpiOci__attrGet(describeHandle, DPI_OCI_HTYPE_DESCRIBE, &topLevelParam, 0, DPI_OCI_ATTR_PARAM, "get top level param", &error) < 0) { dpiOci__handleFree(describeHandle, DPI_OCI_HTYPE_DESCRIBE); return dpiGen__endPublicFn(objType, DPI_FAILURE, &error); } // get the attribute list parameter descriptor if (dpiOci__attrGet(topLevelParam, DPI_OCI_DTYPE_PARAM, (void*) &attrListParam, 0, DPI_OCI_ATTR_LIST_TYPE_ATTRS, "get attr list param", &error) < 0) { dpiOci__handleFree(describeHandle, DPI_OCI_HTYPE_DESCRIBE); return dpiGen__endPublicFn(objType, DPI_FAILURE, &error); } // create attribute structure for each attribute for (i = 0; i < objType->numAttributes; i++) { if (dpiOci__paramGet(attrListParam, DPI_OCI_DTYPE_PARAM, &attrParam, (uint32_t) i + 1, "get attribute param", &error) < 0) { dpiOci__handleFree(describeHandle, DPI_OCI_HTYPE_DESCRIBE); return dpiGen__endPublicFn(objType, DPI_FAILURE, &error); } if (dpiObjectAttr__allocate(objType, attrParam, &attributes[i], &error) < 0) { dpiOci__handleFree(describeHandle, DPI_OCI_HTYPE_DESCRIBE); return dpiGen__endPublicFn(objType, DPI_FAILURE, &error); } } // free the describe handle dpiOci__handleFree(describeHandle, DPI_OCI_HTYPE_DESCRIBE); return dpiGen__endPublicFn(objType, DPI_SUCCESS, &error); } //----------------------------------------------------------------------------- // dpiObjectType_getInfo() [PUBLIC] // Return information about the object type. //----------------------------------------------------------------------------- int dpiObjectType_getInfo(dpiObjectType *objType, dpiObjectTypeInfo *info) { dpiError error; if (dpiGen__startPublicFn(objType, DPI_HTYPE_OBJECT_TYPE, __func__, &error) < 0) return dpiGen__endPublicFn(objType, DPI_FAILURE, &error); DPI_CHECK_PTR_NOT_NULL(objType, info) info->name = objType->name; info->nameLength = objType->nameLength; if (objType->env->context->dpiMinorVersion > 5) { info->packageName = objType->packageName; info->packageNameLength = objType->packageNameLength; } info->schema = objType->schema; info->schemaLength = objType->schemaLength; info->isCollection = objType->isCollection; info->elementTypeInfo = objType->elementTypeInfo; info->numAttributes = objType->numAttributes; return dpiGen__endPublicFn(objType, DPI_SUCCESS, &error); } //----------------------------------------------------------------------------- // dpiObjectType_release() [PUBLIC] // Release a reference to the object type. //----------------------------------------------------------------------------- int dpiObjectType_release(dpiObjectType *objType) { return dpiGen__release(objType, DPI_HTYPE_OBJECT_TYPE, __func__); }
38252e115bb7c0dba00d877fb3569c744b7cfa19
b6acd6eed2b8946c1c1e19fa30081cbab0a2954f
/starry_fmu/Framework/source/Sensor/sensor_manager.c
d7d85c35d9f3345b4ea16fc01448ac5bf8ba8c9e
[ "BSD-3-Clause" ]
permissive
JcZou/StarryPilot
7ce1ed454f133ccd30d71916811e2bf23196d2eb
97af0338a54e1eeece877c72222aeaf4b7e80ad7
refs/heads/master
2023-03-12T19:10:17.225314
2021-11-27T19:44:26
2021-11-27T19:44:26
137,048,745
304
172
BSD-3-Clause
2020-08-26T07:34:49
2018-06-12T09:27:59
C
UTF-8
C
false
false
27,633
c
sensor_manager.c
/* * File : sensor_manager.c * * Change Logs: * Date Author Notes * 2016-06-20 zoujiachi first version. */ #include <rthw.h> #include <rtdevice.h> #include <rtthread.h> #include <string.h> #include <math.h> #include <stdlib.h> #include "console.h" #include "ms5611.h" #include "gps.h" #include "param.h" #include "sensor_manager.h" #include "lsm303d.h" #include "l3gd20h.h" #include "hmc5883.h" #include "mpu6000.h" #include "uMCN.h" #include "filter.h" #include "delay.h" #include "lidar.h" #include "ap_math.h" #include "hil_interface.h" #include "control_main.h" #include "att_estimator.h" #include "pos_estimator.h" #include "calibration.h" #define ADDR_CMD_CONVERT_D1 0x48 /* write to this address to start pressure conversion */ #define ADDR_CMD_CONVERT_D2 0x58 /* write to this address to start temperature conversion */ #define BARO_UPDATE_INTERVAL 10 #define EARTH_RADIUS 6371000 static char *TAG = "Sensor"; static uint32_t gyr_read_time_stamp = 0; static uint32_t acc_read_time_stamp = 0; static uint32_t mag_read_time_stamp = 0; static uint32_t _baro_update_time_stamp = 0; static rt_device_t acc_device_t; static rt_device_t mag_device_t; static rt_device_t gyr_device_t; static rt_device_t baro_device_t; static rt_device_t gps_device_t; //static rt_device_t lidar_device_t; //for debug use static struct vehicle_gps_position_s gps_position; static struct satellite_info_s satellite_info; static McnNode_t gps_node_t; struct rt_event event_vehicle; static volatile bool _baro_update_flag = false; static volatile bool _mag_update_flag = false; static GPS_Status _gps_status; float _lidar_dis = 0.0f; uint32_t _lidar_recv_stamp = 0; static uint32_t _lidar_time = 0; static float _baro_last_alt = 0.0f; static uint32_t _baro_last_time = 0; static BaroPosition _baro_pos = {0.0f, 0.0f, 0.0f}; static GPS_Driv_Vel _gps_driv_vel; static bool _gps_connected = false; MCN_DEFINE(SENSOR_MEASURE_GYR, 12); MCN_DEFINE(SENSOR_MEASURE_ACC, 12); MCN_DEFINE(SENSOR_MEASURE_MAG, 12); MCN_DEFINE(SENSOR_GYR, 12); MCN_DEFINE(SENSOR_ACC, 12); MCN_DEFINE(SENSOR_MAG, 12); MCN_DEFINE(SENSOR_FILTER_GYR, 12); MCN_DEFINE(SENSOR_FILTER_ACC, 12); MCN_DEFINE(SENSOR_FILTER_MAG, 12); MCN_DEFINE(SENSOR_BARO, sizeof(MS5611_REPORT_Def)); MCN_DEFINE(SENSOR_LIDAR, sizeof(float)); MCN_DEFINE(CORRECT_LIDAR, sizeof(float)); MCN_DEFINE(BARO_POSITION, sizeof(BaroPosition)); MCN_DEFINE(GPS_STATUS, sizeof(GPS_Status)); MCN_DECLARE(GPS_POSITION); /************************** ACC API **************************/ bool sensor_acc_ready(void) { uint32_t time_now = time_nowMs(); if(acc_read_time_stamp - time_now >= 2){ return true; }else{ return false; } } rt_err_t sensor_acc_raw_measure(int16_t acc[3]) { rt_size_t r_byte; r_byte = rt_device_read(acc_device_t, ACC_RAW_POS, (void*)acc, 6); return r_byte == 6 ? RT_EOK : RT_ERROR; } rt_err_t sensor_acc_measure(float acc[3]) { rt_size_t r_byte; acc_read_time_stamp = time_nowMs(); r_byte = rt_device_read(acc_device_t, ACC_SCALE_POS, (void*)acc, 12); return r_byte == 12 ? RT_EOK : RT_ERROR; } rt_err_t sensor_acc_get_calibrated_data(float acc[3]) { float acc_f[3]; rt_err_t res; res = sensor_acc_measure(acc_f); // publish non-calibrated data for calibration mcn_publish(MCN_ID(SENSOR_MEASURE_ACC), acc_f); float ofs[3] = {PARAM_GET_FLOAT(CALIBRATION, ACC_X_OFFSET), PARAM_GET_FLOAT(CALIBRATION, ACC_Y_OFFSET), PARAM_GET_FLOAT(CALIBRATION, ACC_Z_OFFSET)}; float transM[3][3] = { {PARAM_GET_FLOAT(CALIBRATION, ACC_TRANS_MAT00), PARAM_GET_FLOAT(CALIBRATION, ACC_TRANS_MAT01), PARAM_GET_FLOAT(CALIBRATION, ACC_TRANS_MAT02)}, {PARAM_GET_FLOAT(CALIBRATION, ACC_TRANS_MAT10), PARAM_GET_FLOAT(CALIBRATION, ACC_TRANS_MAT11), PARAM_GET_FLOAT(CALIBRATION, ACC_TRANS_MAT12)}, {PARAM_GET_FLOAT(CALIBRATION, ACC_TRANS_MAT20), PARAM_GET_FLOAT(CALIBRATION, ACC_TRANS_MAT21), PARAM_GET_FLOAT(CALIBRATION, ACC_TRANS_MAT22)}, }; float ofs_acc[3]; for(uint8_t i=0 ; i<3 ; i++){ ofs_acc[i] = acc_f[i] - ofs[i]; } for(uint8_t i=0 ; i<3 ; i++){ acc[i] = ofs_acc[0]*transM[0][i] + ofs_acc[1]*transM[1][i] + ofs_acc[2]*transM[2][i]; } return res; } /************************** MAG API **************************/ bool sensor_mag_ready(void) { uint32_t time_now = time_nowMs(); if( (time_now - mag_read_time_stamp) >= 10){ return true; }else{ return false; } } rt_err_t sensor_mag_raw_measure(int16_t mag[3]) { rt_size_t r_byte; r_byte = rt_device_read(mag_device_t, MAG_RAW_POS, (void*)mag, 6); return r_byte == 6 ? RT_EOK : RT_ERROR; } rt_err_t sensor_mag_measure(float mag[3]) { rt_size_t r_byte; mag_read_time_stamp = time_nowMs(); r_byte = rt_device_read(mag_device_t, MAG_SCLAE_POS, (void*)mag, 12); return r_byte == 12 ? RT_EOK : RT_ERROR; } rt_err_t sensor_mag_get_calibrated_data(float mag[3]) { float mag_f[3]; rt_err_t res; res = sensor_mag_measure(mag_f); // publish non-calibrated data for calibration mcn_publish(MCN_ID(SENSOR_MEASURE_MAG), mag_f); #ifdef USE_EXTERNAL_MAG_DEV float ofs[3] = {0.16833, 0.051961, -0.030025}; float transM[3][3] = { {1.8408, -0.028278, -0.013698}, {-0.028278, 1.7414, 0.0057671}, {-0.013698, 0.0057671, 1.9104} }; #else float ofs[3] = {PARAM_GET_FLOAT(CALIBRATION, MAG_X_OFFSET), PARAM_GET_FLOAT(CALIBRATION, MAG_Y_OFFSET), PARAM_GET_FLOAT(CALIBRATION, MAG_Z_OFFSET)}; float transM[3][3] = { {PARAM_GET_FLOAT(CALIBRATION, MAG_TRANS_MAT00), PARAM_GET_FLOAT(CALIBRATION, MAG_TRANS_MAT01), PARAM_GET_FLOAT(CALIBRATION, MAG_TRANS_MAT02)}, {PARAM_GET_FLOAT(CALIBRATION, MAG_TRANS_MAT10), PARAM_GET_FLOAT(CALIBRATION, MAG_TRANS_MAT11), PARAM_GET_FLOAT(CALIBRATION, MAG_TRANS_MAT12)}, {PARAM_GET_FLOAT(CALIBRATION, MAG_TRANS_MAT20), PARAM_GET_FLOAT(CALIBRATION, MAG_TRANS_MAT21), PARAM_GET_FLOAT(CALIBRATION, MAG_TRANS_MAT22)}, }; #endif float ofs_mag[3]; for(uint8_t i=0 ; i<3 ; i++){ ofs_mag[i] = mag_f[i] - ofs[i]; } for(uint8_t i=0 ; i<3 ; i++){ mag[i] = ofs_mag[0]*transM[0][i] + ofs_mag[1]*transM[1][i] + ofs_mag[2]*transM[2][i]; } return res; } bool sensor_mag_get_update_flag(void) { return _mag_update_flag; } void sensor_mag_clear_update_flag(void) { _mag_update_flag = false; } /************************** GYR API **************************/ bool sensor_gyr_ready(void) { uint32_t time_now = time_nowMs(); if(gyr_read_time_stamp - time_now >= 2){ return true; }else{ return false; } } rt_err_t sensor_gyr_raw_measure(int16_t gyr[3]) { rt_size_t r_size; r_size = rt_device_read(gyr_device_t, GYR_RAW_POS, (void*)gyr, 6); return r_size == 6 ? RT_EOK : RT_ERROR; } rt_err_t sensor_gyr_measure(float gyr[3]) { rt_size_t r_size; gyr_read_time_stamp = time_nowMs(); r_size = rt_device_read(gyr_device_t, GYR_SCALE_POS, (void*)gyr, 12); return r_size == 12 ? RT_EOK : RT_ERROR; } rt_err_t sensor_gyr_get_calibrated_data(float gyr[3]) { float gyr_dps[3]; rt_err_t res; float gyr_offset[3] = {PARAM_GET_FLOAT(CALIBRATION, GYR_X_OFFSET), PARAM_GET_FLOAT(CALIBRATION, GYR_Y_OFFSET), PARAM_GET_FLOAT(CALIBRATION, GYR_Z_OFFSET)}; float gyr_gain[3] = {PARAM_GET_FLOAT(CALIBRATION, GYR_X_GAIN), PARAM_GET_FLOAT(CALIBRATION, GYR_Y_GAIN), PARAM_GET_FLOAT(CALIBRATION, GYR_Z_GAIN)}; res = sensor_gyr_measure(gyr_dps); // publish non-calibrated data for calibration mcn_publish(MCN_ID(SENSOR_MEASURE_GYR), gyr_dps); for(uint8_t i=0 ; i<3 ; i++) { gyr[i] = (gyr_dps[i] + gyr_offset[i]) * gyr_gain[i]; } return res; } uint8_t sensor_get_device_id(char* device_name) { uint8_t device_id = 0xFF; //unknown device if(strcmp(device_name , ACC_DEVICE_NAME) == 0) { rt_device_control(acc_device_t, SENSOR_GET_DEVICE_ID, (void*)&device_id); } else if(strcmp(device_name , MAG_DEVICE_NAME) == 0) { rt_device_control(mag_device_t, SENSOR_GET_DEVICE_ID, (void*)&device_id); } else if(strcmp(device_name , GYR_DEVICE_NAME) == 0) { rt_device_control(gyr_device_t, SENSOR_GET_DEVICE_ID, (void*)&device_id); } return device_id; } /************************** BARO API **************************/ static Baro_Machine_State baro_state; static MS5611_REPORT_Def report_baro; rt_err_t _baro_trig_conversion(uint8_t addr) { return rt_device_control(baro_device_t, SENSOR_CONVERSION, (void*)&addr); } rt_bool_t _baro_is_conv_finish(void) { if(rt_device_control(baro_device_t, SENSOR_IS_CONV_FIN, RT_NULL) == RT_EOK) { return RT_TRUE; }else { return RT_FALSE; } } rt_err_t _baro_read_raw_temp(void) { rt_err_t err; if(rt_device_read(baro_device_t, RAW_TEMPERATURE_POS, NULL, 1)) err = RT_EOK; else err = RT_ERROR; return err; } rt_err_t _baro_read_raw_press(void) { rt_err_t err; if(rt_device_read(baro_device_t, RAW_PRESSURE_POS, NULL, 1)) err = RT_EOK; else err = RT_ERROR; return err; } /* * There are 5 steps to get barometer report * 1: convert D1 * 2: read pressure raw data * 3: convert D2 * 4: read temperature raw dara * 5: compute temperature,pressure,altitute according to prom param. */ rt_err_t sensor_process_baro_state_machine(void) { rt_err_t err = RT_ERROR; switch((uint8_t)baro_state) { case S_CONV_1: { err = _baro_trig_conversion(ADDR_CMD_CONVERT_D1); if(err == RT_EOK) baro_state = S_CONV_2; }break; case S_CONV_2: { if(!_baro_is_conv_finish()){ //need 9.04ms to converse err = RT_EBUSY; }else{ err = _baro_read_raw_press(); if(err == RT_EOK){ /* directly start D2 conversion */ err = _baro_trig_conversion(ADDR_CMD_CONVERT_D2); if(err == RT_EOK) baro_state = S_COLLECT_REPORT; else baro_state = S_CONV_1; } else baro_state = S_CONV_1; //if err, restart } }break; case S_COLLECT_REPORT: { if(!_baro_is_conv_finish()){ //need 9.04ms to converse err = RT_EBUSY; }else{ baro_state = S_CONV_1; err = _baro_read_raw_temp(); if(err == RT_EOK){ if(rt_device_read(baro_device_t, COLLECT_DATA_POS, (void*)&report_baro, 1)){ /* start D1 conversion */ if(_baro_trig_conversion(ADDR_CMD_CONVERT_D1) == RT_EOK) baro_state = S_CONV_2; }else{ err = RT_ERROR; } } } }break; } return err; } bool sensor_baro_ready(void) { uint32_t time_now = time_nowMs(); if( (time_now - _baro_update_time_stamp) >= 10){ _baro_update_time_stamp = time_now; return true; }else{ return false; } } bool sensor_baro_get_update_flag(void) { #ifdef HIL_SIMULATION return hil_baro_poll(); #else return _baro_update_flag; #endif } void sensor_baro_clear_update_flag(void) { _baro_update_flag = false; } bool sensor_baro_update(void) { rt_err_t res; if(sensor_baro_get_state() == S_COLLECT_REPORT){ res = sensor_process_baro_state_machine(); //get report; if(res == RT_EOK){ _baro_update_flag = true; return true; } }else{ res = sensor_process_baro_state_machine(); } return false; } Baro_Machine_State sensor_baro_get_state(void) { return baro_state; } MS5611_REPORT_Def* sensor_baro_get_report(void) { #ifdef HIL_SIMULATION mcn_copy_from_hub(MCN_ID(SENSOR_BARO), &report_baro); #endif return &report_baro; } BaroPosition sensor_baro_get_position(void) { return _baro_pos; } /************************** LIDAR-LITE API **************************/ void lidar_lite_store(float dis) { OS_ENTER_CRITICAL; _lidar_dis = dis; _lidar_recv_stamp = time_nowMs(); OS_EXIT_CRITICAL; } float lidar_lite_get_dis(void) { float distance; #ifdef USE_LIDAR_PWM OS_ENTER_CRITICAL; distance = _lidar_dis; OS_EXIT_CRITICAL; _lidar_time = time_nowMs(); #elif defined USE_LIDAR_I2C rt_size_t size = rt_device_read(lidar_device_t, 1, &distance, 1); if(size != 1) return -1.0f; _lidar_time = time_nowMs(); #else Console.e(TAG, "err, do not define to use lidar\n"); #endif /* compensate distance with angle */ quaternion att = attitude_est_get_quaternion(); // float zn[3] = {0.0f, 0.0f, 1.0f}; // float zb[3]; // quaternion_inv_rotateVector(att, zn, zb); // float cos_tilt = fabs(Vector3_DotProduct(zn, zb)); // float cor_dis = distance * cos_theta; Euler e; quaternion_toEuler(&att, &e); float cos_tilt = arm_cos_f32(e.roll)*arm_cos_f32(e.pitch); float cor_dis = distance * cos_tilt; mcn_publish(MCN_ID(SENSOR_LIDAR), &distance); mcn_publish(MCN_ID(CORRECT_LIDAR), &cor_dis); return cor_dis; } bool lidar_lite_is_connect(void) { uint32_t time_now = time_nowMs(); uint32_t time_elapse = (time_now>=_lidar_recv_stamp) ? (time_now-_lidar_recv_stamp) : (0xFFFFFFFF-_lidar_recv_stamp+time_now); /* if more than 50ms no lidar data is received, then we think lidar is disconected */ if(time_elapse < 50){ return true; }else{ return false; } } bool lidar_is_ready(void) { uint32_t time_now = time_nowMs(); /* read lidar each 20ms */ uint32_t time_elapse = (time_now>=_lidar_time) ? (time_now-_lidar_time) : (0xFFFFFFFF-_lidar_time+time_now); if(time_elapse >= 20){ return true; }else{ return false; } } //////////////// GPS Function /////////////////////// void gps_calc_geometry_distance(Vector3f_t* dis, double ref_lat, double ref_lon, double lat, double lon) { double delta_lat = Deg2Rad(lat - ref_lat); double delta_lon = Deg2Rad(lon - ref_lon); dis->x = (float)(delta_lat * EARTH_RADIUS); dis->y = (float)(delta_lon * EARTH_RADIUS * arm_cos_f32(lat)); } void gps_calc_geometry_distance2(Vector3f_t* dis, double ref_lat, double ref_lon, double lat, double lon) { const double lat_rad = Deg2Rad(lat); const double lon_rad = Deg2Rad(lon); const double sin_lat = sin(lat_rad); const double cos_lat = cos(lat_rad); const double cos_d_lon = cos(lon_rad - Deg2Rad(ref_lon)); const double arg = constrain_float(sin(Deg2Rad(ref_lat)) * sin_lat + cos(Deg2Rad(ref_lat)) * cos_lat * cos_d_lon, -1.0, 1.0); const double c = acos(arg); double k = 1.0; if (fabs(c) > 0) { k = (c / sin(c)); } dis->x = (float)(k * (cos(Deg2Rad(ref_lat)) * sin_lat - sin(Deg2Rad(ref_lat)) * cos_lat * cos_d_lon) * EARTH_RADIUS); dis->y = (float)(k * cos_lat * sin(lon_rad - Deg2Rad(ref_lon)) * EARTH_RADIUS); } struct vehicle_gps_position_s gps_get_report(void) { struct vehicle_gps_position_s gps_pos_t; mcn_copy_from_hub(MCN_ID(GPS_POSITION), &gps_pos_t); return gps_pos_t; } int gps_get_position(Vector3f_t* gps_pos, struct vehicle_gps_position_s gps_report) { HOME_Pos home = pos_home_get(); if(home.gps_coordinate_set == false){ // gps home have not set yet return -1; } //gps_calc_geometry_distance2(gps_pos, home.lat, home.lon, (double)gps_report.lat*1e-7, (double)gps_report.lon*1e-7); gps_calc_geometry_distance(gps_pos, home.lat, home.lon, (double)gps_report.lat*1e-7, (double)gps_report.lon*1e-7); gps_pos->z = (float)gps_report.alt*1e-3; return 0; } int gps_get_velocity(Vector3f_t* gps_vel, struct vehicle_gps_position_s gps_report) { #ifdef USE_GPS_VEL gps_vel->x = gps_report.vel_n_m_s; gps_vel->y = gps_report.vel_e_m_s; gps_vel->z = gps_report.vel_d_m_s; #else OS_ENTER_CRITICAL; gps_vel->x = _gps_driv_vel.velocity.x; gps_vel->y = _gps_driv_vel.velocity.y; gps_vel->z = _gps_driv_vel.velocity.z; OS_EXIT_CRITICAL; #endif return 0; } void gps_get_status(GPS_Status* gps_sta) { mcn_copy_from_hub(MCN_ID(GPS_STATUS), gps_sta); } /************************** Public API ***************************/ void sensor_get_gyr(float gyr[3]) { mcn_copy_from_hub(MCN_ID(SENSOR_FILTER_GYR), gyr); } void sensor_get_acc(float acc[3]) { mcn_copy_from_hub(MCN_ID(SENSOR_FILTER_ACC), acc); } void sensor_get_mag(float mag[3]) { mcn_copy_from_hub(MCN_ID(SENSOR_FILTER_MAG), mag); } /************************** INIT FUNC **************************/ rt_err_t device_sensor_init(void) { rt_err_t res = RT_EOK; /* init all sensor drivers */ res |= rt_lsm303d_init("spi_d1"); res |= rt_l3gd20h_init("spi_d2"); #ifdef USE_EXTERNAL_MAG_DEV res |= rt_hmc5883_init("i2c1"); #endif res |= rt_ms5611_init("spi_d3"); res |= rt_mpu6000_init("spi_d4"); res |= rt_gps_init("uart4" , &gps_position , &satellite_info); /* init acc device */ acc_device_t = rt_device_find(ACC_DEVICE_NAME); if(acc_device_t == RT_NULL) { Console.e(TAG, "can't find acc device\r\n"); return RT_EEMPTY; } rt_device_open(acc_device_t , RT_DEVICE_OFLAG_RDWR); /* init mag device */ mag_device_t = rt_device_find(MAG_DEVICE_NAME); if(mag_device_t == RT_NULL) { Console.e(TAG, "can't find mag device\r\n"); return RT_EEMPTY; }else{ rt_device_open(mag_device_t , RT_DEVICE_OFLAG_RDWR); } /* init gyr device */ gyr_device_t = rt_device_find(GYR_DEVICE_NAME); if(gyr_device_t == RT_NULL) { Console.e(TAG, "can't find gyr device\r\n"); return RT_EEMPTY; } rt_device_open(gyr_device_t , RT_DEVICE_OFLAG_RDWR); /* init barometer device */ baro_state = S_CONV_1; baro_device_t = rt_device_find(BARO_DEVICE_NAME); if(baro_device_t == RT_NULL) { Console.e(TAG, "can't find baro device\r\n"); return RT_EEMPTY; } rt_device_open(baro_device_t , RT_DEVICE_OFLAG_RDWR); /* init gps device */ gps_device_t = rt_device_find(GPS_DEVICE_NAME); if(gps_device_t == RT_NULL) { Console.e(TAG, "can't find gps device\r\n"); return RT_EEMPTY; } rt_err_t gps_open_res = rt_device_open(gps_device_t , RT_DEVICE_OFLAG_RDWR); _gps_connected = gps_open_res == RT_EOK ? true : false; #ifdef USE_LIDAR_I2C /* init lidar lite device */ rt_lidar_init("i2c1"); lidar_device_t = rt_device_find(LIDAR_DEVICE_NAME); if(lidar_device_t == RT_NULL) { Console.e(TAG, "can't find %s device\r\n", LIDAR_DEVICE_NAME); return RT_EEMPTY; } rt_device_open(lidar_device_t , RT_DEVICE_OFLAG_RDWR); #endif float null_data[3] = {0, 0, 0}; /* advertise sensor data */ int mcn_res; mcn_res = mcn_advertise(MCN_ID(SENSOR_MEASURE_GYR)); if(mcn_res != 0){ Console.e(TAG, "err:%d, SENSOR_MEASURE_GYR advertise fail!\n", mcn_res); } mcn_res = mcn_advertise(MCN_ID(SENSOR_MEASURE_ACC)); if(mcn_res != 0){ Console.e(TAG, "err:%d, SENSOR_MEASURE_ACC advertise fail!\n", mcn_res); } mcn_res = mcn_advertise(MCN_ID(SENSOR_MEASURE_MAG)); if(mcn_res != 0){ Console.e(TAG, "err:%d, SENSOR_MEASURE_MAG advertise fail!\n", mcn_res); } mcn_res = mcn_advertise(MCN_ID(SENSOR_GYR)); if(mcn_res != 0){ Console.e(TAG, "err:%d, sensor_gyr advertise fail!\n", mcn_res); } mcn_res = mcn_advertise(MCN_ID(SENSOR_ACC)); if(mcn_res != 0){ Console.e(TAG, "err:%d, sensor_acc advertise fail!\n", mcn_res); } mcn_res = mcn_advertise(MCN_ID(SENSOR_MAG)); if(mcn_res != 0){ Console.e(TAG, "err:%d, sensor_mag advertise fail!\n", mcn_res); } mcn_res = mcn_advertise(MCN_ID(SENSOR_FILTER_GYR)); if(mcn_res != 0){ Console.e(TAG, "err:%d, sensor_filter_gyr advertise fail!\n", mcn_res); } mcn_publish(MCN_ID(SENSOR_FILTER_GYR), &null_data); mcn_res = mcn_advertise(MCN_ID(SENSOR_FILTER_ACC)); if(mcn_res != 0){ Console.e(TAG, "err:%d, sensor_filter_acc advertise fail!\n", mcn_res); } mcn_publish(MCN_ID(SENSOR_FILTER_ACC), &null_data); mcn_res = mcn_advertise(MCN_ID(SENSOR_FILTER_MAG)); if(mcn_res != 0){ Console.e(TAG, "err:%d, sensor_filter_mag advertise fail!\n", mcn_res); } mcn_publish(MCN_ID(SENSOR_FILTER_MAG), &null_data); mcn_res = mcn_advertise(MCN_ID(SENSOR_BARO)); if(mcn_res != 0){ Console.e(TAG, "err:%d, sensor_baro advertise fail!\n", mcn_res); } mcn_res = mcn_advertise(MCN_ID(SENSOR_LIDAR)); if(mcn_res != 0){ Console.e(TAG, "err:%d, sensor_lidar advertise fail!\n", mcn_res); } mcn_res = mcn_advertise(MCN_ID(CORRECT_LIDAR)); if(mcn_res != 0){ Console.e(TAG, "err:%d, correct_lidar advertise fail!\n", mcn_res); } mcn_res = mcn_advertise(MCN_ID(BARO_POSITION)); if(mcn_res != 0){ Console.e(TAG, "err:%d, baro_position advertise fail!\n", mcn_res); } mcn_res = mcn_advertise(MCN_ID(GPS_STATUS)); if(mcn_res != 0){ Console.e(TAG, "err:%d, GPS_STATUS advertise fail!\n", mcn_res); } gps_node_t = mcn_subscribe(MCN_ID(GPS_POSITION), NULL); if(gps_node_t == NULL) Console.e(TAG, "gps_node_t subscribe err\n"); _baro_last_alt = 0.0f; _gps_status.status = GPS_UNDETECTED; _gps_status.fix_cnt = 0; // publish init gps status mcn_publish(MCN_ID(GPS_STATUS), &_gps_status); _gps_driv_vel.velocity.x = _gps_driv_vel.velocity.y = _gps_driv_vel.velocity.z = 0.0f; _gps_driv_vel.last_pos.x = _gps_driv_vel.last_pos.y = _gps_driv_vel.last_pos.z = 0.0f; return res; } void sensor_collect(void) { float gyr[3], acc[3], mag[3]; if(sensor_gyr_get_calibrated_data(gyr) == RT_EOK){ gyrfilter_input(gyr); mcn_publish(MCN_ID(SENSOR_GYR), gyr); mcn_publish(MCN_ID(SENSOR_FILTER_GYR), gyrfilter_current()); }else{ Console.e(TAG, "fail to get gyr data\n"); } if(sensor_acc_get_calibrated_data(acc) == RT_EOK){ accfilter_input(acc); mcn_publish(MCN_ID(SENSOR_ACC), acc); mcn_publish(MCN_ID(SENSOR_FILTER_ACC), accfilter_current()); }else{ Console.e(TAG, "fail to get acc data\n"); } if(sensor_mag_ready()){ if(sensor_mag_get_calibrated_data(mag) == RT_EOK){ magfilter_input(mag); mcn_publish(MCN_ID(SENSOR_MAG), mag); mcn_publish(MCN_ID(SENSOR_FILTER_MAG), magfilter_current()); _mag_update_flag = true; }else{ Console.e(TAG, "fail to get mag data\n"); } } if(sensor_baro_ready()){ if(sensor_baro_update()){ MS5611_REPORT_Def* baro_report = sensor_baro_get_report(); float dt = (float)(baro_report->time_stamp-_baro_last_time)*1e-3; _baro_pos.time_stamp = baro_report->time_stamp; if(dt <= 0.0f) dt = 0.02f; _baro_pos.altitude = -baro_report->altitude; // change to NED coordinate float vel = (_baro_pos.altitude-_baro_last_alt)/dt; _baro_pos.velocity = _baro_pos.velocity + 0.05*(vel-_baro_pos.velocity); mcn_publish(MCN_ID(SENSOR_BARO), baro_report); mcn_publish(MCN_ID(BARO_POSITION), &_baro_pos); _baro_last_alt = _baro_pos.altitude; _baro_last_time = baro_report->time_stamp; } } if(mcn_poll(gps_node_t)){ struct vehicle_gps_position_s gps_pos_t; mcn_copy(MCN_ID(GPS_POSITION), gps_node_t, &gps_pos_t); HOME_Pos home = pos_home_get(); if(home.gps_coordinate_set == true){ Vector3f_t pos; gps_get_position(&pos, gps_pos_t); _gps_driv_vel.velocity.x = (pos.x - _gps_driv_vel.last_pos.x) / 0.1f; // the gps update interval is 100ms _gps_driv_vel.velocity.y = (pos.y - _gps_driv_vel.last_pos.y) / 0.1f; _gps_driv_vel.velocity.z = (pos.z - _gps_driv_vel.last_pos.z) / 0.1f; _gps_driv_vel.last_pos.x = pos.x; _gps_driv_vel.last_pos.y = pos.y; _gps_driv_vel.last_pos.z = pos.z; } // check legality if(_gps_status.status!=GPS_AVAILABLE && gps_pos_t.satellites_used>=6 && IN_RANGE(gps_pos_t.eph, 0.0f, 2.5f)){ _gps_status.fix_cnt++; if(_gps_status.fix_cnt >= 10){ _gps_status.status = GPS_AVAILABLE; // gps becomes available, publish mcn_publish(MCN_ID(GPS_STATUS), &_gps_status); } } if(_gps_status.status!=GPS_INAVAILABLE && (gps_pos_t.satellites_used<=4 || gps_pos_t.eph>3.5f)){ _gps_status.status = GPS_INAVAILABLE; _gps_status.fix_cnt = 0; mcn_publish(MCN_ID(GPS_STATUS), &_gps_status); } } } void sensor_manager_init(void) { // do something here } int handle_gps_shell_cmd(int argc, char** argv) { if(argc > 1){ if(strcmp(argv[1], "status") == 0){ char status_str[20] = ""; GPS_Status gps_status; mcn_copy_from_hub(MCN_ID(GPS_STATUS), &gps_status); if(gps_status.status == GPS_UNDETECTED){ strcpy(status_str, "UNDETECTED"); } else if(gps_status.status == GPS_AVAILABLE){ strcpy(status_str, "AVAILABLE"); } else{ strcpy(status_str, "INAVAILABLE"); } struct vehicle_gps_position_s gps_pos = gps_get_report(); Console.print("gps status: %s, satelites:%d, fix type:%d [eph,epv]:[%.3f %.3f], [hdop,vdop]:[%.3f %.3f]\n", status_str, gps_pos.satellites_used, gps_pos.fix_type, gps_pos.eph, gps_pos.epv, gps_pos.hdop, gps_pos.vdop); } } return 0; } int handle_sensor_shell_cmd(int argc, char** argv) { uint8_t sensor_type = 0; uint32_t interval = 1000; //default is 1s uint32_t cnt = 1; uint8_t raw_data = 0; uint8_t no_cali = 0; if(argc > 1){ if(strcmp(argv[1], "acc") == 0){ sensor_type = 1; } else if(strcmp(argv[1], "mag") == 0){ sensor_type = 2; } else if(strcmp(argv[1], "gyr") == 0){ sensor_type = 3; }else if(strcmp(argv[1], "gps") == 0){ sensor_type = 4; }else{ Console.print("unknow parameter:%s\n", argv[1]); return 1; } for(uint16_t i = 2 ; i < argc ; i++){ if(strcmp(argv[i], "-t") == 0){ i++; if(i >= argc){ Console.print("wrong cmd format.\n"); return 2; } interval = atoi(argv[i]); } if(strcmp(argv[i], "-n") == 0){ i++; if(i >= argc){ Console.print("wrong cmd format.\n"); return 2; } cnt = atoi(argv[i]); } if(strcmp(argv[i], "-r") == 0){ raw_data = 1; } if(strcmp(argv[i], "-nc") == 0){ no_cali = 1; } } switch(sensor_type) { case 1: //acc { for(uint32_t i = 0 ; i < cnt ; i++){ if(raw_data){ int16_t raw_acc[3]; sensor_acc_raw_measure(raw_acc); Console.print("raw acc:%d %d %d\n", raw_acc[0], raw_acc[1], raw_acc[2]); }else if(no_cali){ float acc[3]; sensor_acc_measure(acc); Console.print("acc:%f %f %f\n", acc[0], acc[1], acc[2]); }else{ float acc[3]; /* read from topics instead of remeasuring */ mcn_copy_from_hub(MCN_ID(SENSOR_ACC), acc); //sensor_acc_get_calibrated_data(acc); Console.print("cali acc:%f %f %f\n", acc[0], acc[1], acc[2]); } if(cnt > 1) rt_thread_delay(interval); } }break; case 2: //mag { for(uint32_t i = 0 ; i < cnt ; i++){ if(raw_data){ int16_t raw_mag[3]; sensor_mag_raw_measure(raw_mag); Console.print("raw mag:%d %d %d\n", raw_mag[0], raw_mag[1], raw_mag[2]); }else if(no_cali){ float mag[3]; sensor_mag_measure(mag); Console.print("mag:%f %f %f\n", mag[0], mag[1], mag[2]); }else{ float mag[3]; /* read from topics instead of remeasuring */ mcn_copy_from_hub(MCN_ID(SENSOR_MAG), mag); Console.print("cali mag:%f %f %f\n", mag[0], mag[1], mag[2]); } if(cnt > 1) rt_thread_delay(interval); } }break; case 3: //gyr { for(uint32_t i = 0 ; i < cnt ; i++){ if(raw_data){ int16_t raw_gyr[3]; sensor_gyr_raw_measure(raw_gyr); Console.print("raw gyr:%d %d %d\n", raw_gyr[0], raw_gyr[1], raw_gyr[2]); }else if(no_cali){ float gyr[3]; sensor_gyr_measure(gyr); Console.print("gyr:%f %f %f\n", gyr[0], gyr[1], gyr[2]); }else{ float gyr[3]; //sensor_gyr_get_calibrated_data(gyr); mcn_copy_from_hub(MCN_ID(SENSOR_GYR), gyr); Console.print("cali gyr:%f %f %f\n", gyr[0], gyr[1], gyr[2]); } if(cnt > 1) rt_thread_delay(interval); } }break; case 4: //gps { if(argc > 2){ if(strcmp(argv[2], "sethome") == 0){ ctrl_set_home(); Console.print("set home success!\n"); }else{ for(uint32_t i = 0 ; i < cnt ; i++){ struct vehicle_gps_position_s gps_report = gps_get_report(); Console.print("sv:%d lat:%f lon:%f vn:%f ve:%f eph:%f hdop:%f\n", gps_report.satellites_used, (float)gps_report.lat*1e-7, (float)gps_report.lon*1e-7, gps_report.vel_n_m_s, gps_report.vel_e_m_s, gps_report.eph, gps_report.hdop); } } } if(cnt > 1) rt_thread_delay(interval); }break; default: break; } } return 0; }
c41ee4b07316022abdbbaa1fa24d170c13ee34ff
83e7dc1281874779c46dfadcc15b2bb66d8e599c
/src/draw/sw/lv_draw_sw_border.c
11de501ceb8d0c3e5d25d56061bb53bd3acef1ce
[ "MIT" ]
permissive
lvgl/lvgl
7d51d6774d6ac71df7101fc7ded56fea4b70be01
5c984b4a5364b6455966eb3a860153806c51626f
refs/heads/master
2023-08-30T22:39:20.283922
2023-08-30T19:55:29
2023-08-30T19:55:29
60,667,730
9,296
2,218
MIT
2023-09-14T17:59:34
2016-06-08T04:14:34
C
UTF-8
C
false
false
11,320
c
lv_draw_sw_border.c
/** * @file lv_draw_rect.c * */ /********************* * INCLUDES *********************/ #include "lv_draw_sw.h" #if LV_USE_DRAW_SW #include "blend/lv_draw_sw_blend.h" #include "../../misc/lv_math.h" #include "../../misc/lv_txt_ap.h" #include "../../core/lv_refr.h" #include "../../misc/lv_assert.h" #include "../../stdlib/lv_string.h" #include "../lv_draw_mask.h" /********************* * DEFINES *********************/ #define SPLIT_LIMIT 50 /********************** * TYPEDEFS **********************/ /********************** * STATIC PROTOTYPES **********************/ static void draw_border_complex(lv_draw_unit_t * draw_unit, const lv_area_t * outer_area, const lv_area_t * inner_area, lv_coord_t rout, lv_coord_t rin, lv_color_t color, lv_opa_t opa); static void draw_border_simple(lv_draw_unit_t * draw_unit, const lv_area_t * outer_area, const lv_area_t * inner_area, lv_color_t color, lv_opa_t opa); /********************** * STATIC VARIABLES **********************/ /********************** * MACROS **********************/ /********************** * GLOBAL FUNCTIONS **********************/ void lv_draw_sw_border(lv_draw_unit_t * draw_unit, const lv_draw_border_dsc_t * dsc, const lv_area_t * coords) { if(dsc->opa <= LV_OPA_MIN) return; if(dsc->width == 0) return; if(dsc->side == LV_BORDER_SIDE_NONE) return; int32_t coords_w = lv_area_get_width(coords); int32_t coords_h = lv_area_get_height(coords); int32_t rout = dsc->radius; int32_t short_side = LV_MIN(coords_w, coords_h); if(rout > short_side >> 1) rout = short_side >> 1; /*Get the inner area*/ lv_area_t area_inner; lv_area_copy(&area_inner, coords); area_inner.x1 += ((dsc->side & LV_BORDER_SIDE_LEFT) ? dsc->width : - (dsc->width + rout)); area_inner.x2 -= ((dsc->side & LV_BORDER_SIDE_RIGHT) ? dsc->width : - (dsc->width + rout)); area_inner.y1 += ((dsc->side & LV_BORDER_SIDE_TOP) ? dsc->width : - (dsc->width + rout)); area_inner.y2 -= ((dsc->side & LV_BORDER_SIDE_BOTTOM) ? dsc->width : - (dsc->width + rout)); lv_coord_t rin = rout - dsc->width; if(rin < 0) rin = 0; if(rout == 0 && rin == 0) { draw_border_simple(draw_unit, coords, &area_inner, dsc->color, dsc->opa); } else { draw_border_complex(draw_unit, coords, &area_inner, rout, rin, dsc->color, dsc->opa); } } /********************** * STATIC FUNCTIONS **********************/ void draw_border_complex(lv_draw_unit_t * draw_unit, const lv_area_t * outer_area, const lv_area_t * inner_area, lv_coord_t rout, lv_coord_t rin, lv_color_t color, lv_opa_t opa) { #if LV_DRAW_SW_COMPLEX /*Get clipped draw area which is the real draw area. *It is always the same or inside `coords`*/ lv_area_t draw_area; if(!_lv_area_intersect(&draw_area, outer_area, draw_unit->clip_area)) return; int32_t draw_area_w = lv_area_get_width(&draw_area); lv_draw_sw_blend_dsc_t blend_dsc; lv_memzero(&blend_dsc, sizeof(blend_dsc)); lv_opa_t * mask_buf = lv_malloc(draw_area_w); blend_dsc.mask_buf = mask_buf; void * mask_list[3] = {0}; /*Create mask for the inner mask*/ lv_draw_sw_mask_radius_param_t mask_rin_param; lv_draw_sw_mask_radius_init(&mask_rin_param, inner_area, rin, true); mask_list[0] = &mask_rin_param; /*Create mask for the outer area*/ lv_draw_sw_mask_radius_param_t mask_rout_param; if(rout > 0) { lv_draw_sw_mask_radius_init(&mask_rout_param, outer_area, rout, false); mask_list[1] = &mask_rout_param; } int32_t h; lv_area_t blend_area; blend_dsc.blend_area = &blend_area; blend_dsc.mask_area = &blend_area; blend_dsc.color = color; blend_dsc.opa = opa; /*Calculate the x and y coordinates where the straight parts area is*/ lv_area_t core_area; core_area.x1 = LV_MAX(outer_area->x1 + rout, inner_area->x1); core_area.x2 = LV_MIN(outer_area->x2 - rout, inner_area->x2); core_area.y1 = LV_MAX(outer_area->y1 + rout, inner_area->y1); core_area.y2 = LV_MIN(outer_area->y2 - rout, inner_area->y2); lv_coord_t core_w = lv_area_get_width(&core_area); bool top_side = outer_area->y1 <= inner_area->y1; bool bottom_side = outer_area->y2 >= inner_area->y2; /*No masks*/ bool left_side = outer_area->x1 <= inner_area->x1; bool right_side = outer_area->x2 >= inner_area->x2; bool split_hor = true; if(left_side && right_side && top_side && bottom_side && core_w < SPLIT_LIMIT) { split_hor = false; } blend_dsc.mask_res = LV_DRAW_SW_MASK_RES_FULL_COVER; /*Draw the straight lines first if they are long enough*/ if(top_side && split_hor) { blend_area.x1 = core_area.x1; blend_area.x2 = core_area.x2; blend_area.y1 = outer_area->y1; blend_area.y2 = inner_area->y1 - 1; lv_draw_sw_blend(draw_unit, &blend_dsc); } if(bottom_side && split_hor) { blend_area.x1 = core_area.x1; blend_area.x2 = core_area.x2; blend_area.y1 = inner_area->y2 + 1; blend_area.y2 = outer_area->y2; lv_draw_sw_blend(draw_unit, &blend_dsc); } /*If the border is very thick and the vertical sides overlap horizontally draw a single rectangle*/ if(inner_area->x1 >= inner_area->x2 && left_side && right_side) { blend_area.x1 = outer_area->x1; blend_area.x2 = outer_area->x2; blend_area.y1 = core_area.y1; blend_area.y2 = core_area.y2; lv_draw_sw_blend(draw_unit, &blend_dsc); } else { if(left_side) { blend_area.x1 = outer_area->x1; blend_area.x2 = inner_area->x1 - 1; blend_area.y1 = core_area.y1; blend_area.y2 = core_area.y2; lv_draw_sw_blend(draw_unit, &blend_dsc); } if(right_side) { blend_area.x1 = inner_area->x2 + 1; blend_area.x2 = outer_area->x2; blend_area.y1 = core_area.y1; blend_area.y2 = core_area.y2; lv_draw_sw_blend(draw_unit, &blend_dsc); } } /*Draw the corners*/ lv_coord_t blend_w; /*Left and right corner together if they are close to each other*/ if(!split_hor) { /*Calculate the top corner and mirror it to the bottom*/ blend_area.x1 = draw_area.x1; blend_area.x2 = draw_area.x2; lv_coord_t max_h = LV_MAX(rout, inner_area->y1 - outer_area->y1); for(h = 0; h < max_h; h++) { lv_coord_t top_y = outer_area->y1 + h; lv_coord_t bottom_y = outer_area->y2 - h; if(top_y < draw_area.y1 && bottom_y > draw_area.y2) continue; /*This line is clipped now*/ lv_memset(mask_buf, 0xff, draw_area_w); blend_dsc.mask_res = lv_draw_sw_mask_apply(mask_list, mask_buf, blend_area.x1, top_y, draw_area_w); if(top_y >= draw_area.y1) { blend_area.y1 = top_y; blend_area.y2 = top_y; lv_draw_sw_blend(draw_unit, &blend_dsc); } if(bottom_y <= draw_area.y2) { blend_area.y1 = bottom_y; blend_area.y2 = bottom_y; lv_draw_sw_blend(draw_unit, &blend_dsc); } } } else { /*Left corners*/ blend_area.x1 = draw_area.x1; blend_area.x2 = LV_MIN(draw_area.x2, core_area.x1 - 1); blend_w = lv_area_get_width(&blend_area); if(blend_w > 0) { if(left_side || top_side) { for(h = draw_area.y1; h < core_area.y1; h++) { blend_area.y1 = h; blend_area.y2 = h; lv_memset(mask_buf, 0xff, blend_w); blend_dsc.mask_res = lv_draw_sw_mask_apply(mask_list, mask_buf, blend_area.x1, h, blend_w); lv_draw_sw_blend(draw_unit, &blend_dsc); } } if(left_side || bottom_side) { for(h = core_area.y2 + 1; h <= draw_area.y2; h++) { blend_area.y1 = h; blend_area.y2 = h; lv_memset(mask_buf, 0xff, blend_w); blend_dsc.mask_res = lv_draw_sw_mask_apply(mask_list, mask_buf, blend_area.x1, h, blend_w); lv_draw_sw_blend(draw_unit, &blend_dsc); } } } /*Right corners*/ blend_area.x1 = LV_MAX(draw_area.x1, blend_area.x2 + 1); /*To not overlap with the left side*/ blend_area.x1 = LV_MAX(draw_area.x1, core_area.x2 + 1); blend_area.x2 = draw_area.x2; blend_w = lv_area_get_width(&blend_area); if(blend_w > 0) { if(right_side || top_side) { for(h = draw_area.y1; h < core_area.y1; h++) { blend_area.y1 = h; blend_area.y2 = h; lv_memset(mask_buf, 0xff, blend_w); blend_dsc.mask_res = lv_draw_sw_mask_apply(mask_list, mask_buf, blend_area.x1, h, blend_w); lv_draw_sw_blend(draw_unit, &blend_dsc); } } if(right_side || bottom_side) { for(h = core_area.y2 + 1; h <= draw_area.y2; h++) { blend_area.y1 = h; blend_area.y2 = h; lv_memset(mask_buf, 0xff, blend_w); blend_dsc.mask_res = lv_draw_sw_mask_apply(mask_list, mask_buf, blend_area.x1, h, blend_w); lv_draw_sw_blend(draw_unit, &blend_dsc); } } } } lv_draw_sw_mask_free_param(&mask_rin_param); if(rout > 0) lv_draw_sw_mask_free_param(&mask_rout_param); lv_free(mask_buf); #endif /*LV_DRAW_SW_COMPLEX*/ } static void draw_border_simple(lv_draw_unit_t * draw_unit, const lv_area_t * outer_area, const lv_area_t * inner_area, lv_color_t color, lv_opa_t opa) { lv_area_t a; lv_draw_sw_blend_dsc_t blend_dsc; lv_memzero(&blend_dsc, sizeof(lv_draw_sw_blend_dsc_t)); blend_dsc.blend_area = &a; blend_dsc.color = color; blend_dsc.opa = opa; bool top_side = outer_area->y1 <= inner_area->y1; bool bottom_side = outer_area->y2 >= inner_area->y2; bool left_side = outer_area->x1 <= inner_area->x1; bool right_side = outer_area->x2 >= inner_area->x2; /*Top*/ a.x1 = outer_area->x1; a.x2 = outer_area->x2; a.y1 = outer_area->y1; a.y2 = inner_area->y1 - 1; if(top_side) { lv_draw_sw_blend(draw_unit, &blend_dsc); } /*Bottom*/ a.y1 = inner_area->y2 + 1; a.y2 = outer_area->y2; if(bottom_side) { lv_draw_sw_blend(draw_unit, &blend_dsc); } /*Left*/ a.x1 = outer_area->x1; a.x2 = inner_area->x1 - 1; a.y1 = (top_side) ? inner_area->y1 : outer_area->y1; a.y2 = (bottom_side) ? inner_area->y2 : outer_area->y2; if(left_side) { lv_draw_sw_blend(draw_unit, &blend_dsc); } /*Right*/ a.x1 = inner_area->x2 + 1; a.x2 = outer_area->x2; if(right_side) { lv_draw_sw_blend(draw_unit, &blend_dsc); } } #endif /*LV_USE_DRAW_SW*/
937af791d2261aad0629e4652aaefa9820ad1132
7973881afbe0be1da0afc2c6cee74b4601846afa
/src/wordbuf.c
696b205832240fed430892a8926c932fe2ae97e1
[ "DOC", "MIT" ]
permissive
koron/cmigemo
c199d95b9d06c9cac570b635ab95a67d7db405e2
e0f6145f61e0b7058c3006f344e58571d9fdd83a
refs/heads/master
2022-07-17T07:27:36.165886
2022-06-23T01:52:51
2022-06-23T01:52:51
7,397,118
114
30
null
2022-06-23T01:52:52
2013-01-01T17:07:57
C
SHIFT_JIS
C
false
false
2,415
c
wordbuf.c
/* vim:set ts=8 sts=4 sw=4 tw=0: */ /* * wordbuf.h - * * Written By: MURAOKA Taro <koron.kaoriya@gmail.com> * Last Change: 25-Oct-2011. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include "wordbuf.h" #define WORDLEN_DEF 64 int n_wordbuf_open = 0; /* for DEBUG */ int n_wordbuf_close = 0; /* for DEBUG */ /* function pre-declaration */ static int wordbuf_extend(wordbuf_p p, int len); wordbuf_p wordbuf_open() { wordbuf_p p = (wordbuf_p)malloc(sizeof(wordbuf_t)); if (p) { ++n_wordbuf_open; /* for DEBUG */ p->len = WORDLEN_DEF; p->buf = (unsigned char*)malloc(p->len); p->last = 0; p->buf[0] = '\0'; } return p; } void wordbuf_close(wordbuf_p p) { if (p) { ++n_wordbuf_close; /* for DEBUG */ free(p->buf); free(p); } } void wordbuf_reset(wordbuf_p p) { p->last = 0; p->buf[0] = '\0'; } /* * wordbuf_extend(wordbuf_p p, int req_len); * バッファの伸長。エラー時には0が帰る。 * 高速化のために伸ばすべきかは呼出側で判断する。 */ static int wordbuf_extend(wordbuf_p p, int req_len) { int newlen = p->len * 2; unsigned char *newbuf; while (req_len > newlen) newlen *= 2; if (!(newbuf = (unsigned char*)realloc(p->buf, newlen))) { /*fprintf(stderr, "wordbuf_add(): failed to extend buffer\n");*/ return 0; } else { p->len = newlen; p->buf = newbuf; return req_len; } } int wordbuf_last(wordbuf_p p) { return p->last; } int wordbuf_add(wordbuf_p p, unsigned char ch) { int newlen = p->last + 2; if (newlen > p->len && !wordbuf_extend(p, newlen)) return 0; else { #if 1 unsigned char *buf = p->buf + p->last; buf[0] = ch; buf[1] = '\0'; #else /* リトルエンディアンを仮定するなら使えるが… */ *(unsigned short*)&p->buf[p->last] = (unsigned short)ch; #endif return ++p->last; } } int wordbuf_cat(wordbuf_p p, const unsigned char* sz) { int len = 0; if (sz != NULL) { size_t l = strlen(sz); len = l < INT_MAX ? (int)l : INT_MAX; } if (len > 0) { int newlen = p->last + len + 1; if (newlen > p->len && !wordbuf_extend(p, newlen)) return 0; memcpy(&p->buf[p->last], sz, len + 1); p->last = p->last + len; } return p->last; } unsigned char* wordbuf_get(wordbuf_p p) { return p->buf; }
78b2df3833a12b979b21d64996b43bcf7a2628ef
39b8d37edbc228c0ee43644f620fd5f7ce448ff8
/src/sclite/boolpr1.c
b70ac4afbc0a983cf46c5e538e0bee142b7ea74d
[ "LicenseRef-scancode-unknown-license-reference", "LicenseRef-scancode-public-domain", "NIST-Software" ]
permissive
usnistgov/SCTK
e29feea7744e7a2a8204f5e90d0a6fd81e6ed690
f48376a203ab17f0d479995d87275db6772dcb4a
refs/heads/master
2023-01-31T15:08:23.855355
2022-09-08T11:53:45
2022-09-08T11:53:45
57,993,905
170
52
NOASSERTION
2023-01-27T22:28:02
2016-05-03T19:00:47
C
UTF-8
C
false
false
627
c
boolpr1.c
/* file boolpr1.c */ #include "sctk.h" /**********************************************************************/ /* */ /* char *bool_print(x) */ /* */ /* Returns the printing equivalent of the boolean *x. */ /* */ /**********************************************************************/ char *bool_print(boolean x) {if (x) return "T"; else return "F";}
311941ef14cc3c25ccf370c743794c2b74662acd
fb47ab6337a71029dee71933e449cf7f6805fc0f
/app/lkboot/commands.c
04e2739afedfdf565dc58b630b09fc53cd0fc3e5
[ "MIT" ]
permissive
littlekernel/lk
7e7ba50b87b1f2e0b6e2f052c59249825c91975b
30dc320054f70910e1c1ee40a6948ee99672acec
refs/heads/master
2023-09-02T00:47:52.203963
2023-06-21T22:42:35
2023-06-21T22:42:35
3,058,456
3,077
618
MIT
2023-08-30T09:41:31
2011-12-27T19:19:36
C
UTF-8
C
false
false
11,286
c
commands.c
/* * Copyright (c) 2014 Brian Swetland * * Use of this source code is governed by a MIT-style * license that can be found in the LICENSE file or at * https://opensource.org/licenses/MIT */ #include <platform.h> #include <stdio.h> #include <stdlib.h> #include <lk/debug.h> #include <string.h> #include <endian.h> #include <malloc.h> #include <arch.h> #include <lk/err.h> #include <lk/trace.h> #include <lk/pow2.h> #include <kernel/thread.h> #include <kernel/vm.h> #include <lib/bio.h> #include <lib/bootargs.h> #include <lib/bootimage.h> #include <lib/ptable.h> #include <lib/sysparam.h> #include <app/lkboot.h> #if PLATFORM_ZYNQ #include <platform/fpga.h> #include <platform/zynq.h> #endif #include "lkboot.h" #define bootdevice "spi0" #define LOCAL_TRACE 0 struct lkb_command { struct lkb_command *next; const char *name; lkb_handler_t handler; void *cookie; }; struct lkb_command *lkb_cmd_list = NULL; void lkb_register(const char *name, lkb_handler_t handler, void *cookie) { struct lkb_command *cmd = malloc(sizeof(struct lkb_command)); if (cmd != NULL) { cmd->next = lkb_cmd_list; cmd->name = name; cmd->handler = handler; cmd->cookie = cookie; lkb_cmd_list = cmd; } } static int do_reboot(void *arg) { thread_sleep(250); platform_halt(HALT_ACTION_REBOOT, HALT_REASON_SW_RESET); return 0; } struct chainload_args { void *func; ulong args[4]; }; static int chainload_thread(void *arg) { struct chainload_args *args = (struct chainload_args *)arg; thread_sleep(250); TRACEF("chain loading address %p, args 0x%lx 0x%lx 0x%lx 0x%lx\n", args->func, args->args[0], args->args[1], args->args[2], args->args[3]); arch_chain_load((void *)args->func, args->args[0], args->args[1], args->args[2], args->args[3]); for (;;); } static int do_boot(lkb_t *lkb, size_t len, const char **result) { LTRACEF("lkb %p, len %zu, result %p\n", lkb, len, result); void *buf; paddr_t buf_phys; if (vmm_alloc_contiguous(vmm_get_kernel_aspace(), "lkboot_iobuf", len, &buf, log2_uint(1024*1024), 0, ARCH_MMU_FLAG_UNCACHED) < 0) { *result = "not enough memory"; return -1; } buf_phys = vaddr_to_paddr(buf); LTRACEF("iobuffer %p (phys 0x%lx)\n", buf, buf_phys); if (lkb_read(lkb, buf, len)) { *result = "io error"; // XXX free buffer here return -1; } /* construct a boot argument list */ const size_t bootargs_size = PAGE_SIZE; #if 0 void *args = (void *)((uintptr_t)lkb_iobuffer + lkb_iobuffer_size - bootargs_size); paddr_t args_phys = lkb_iobuffer_phys + lkb_iobuffer_size - bootargs_size; #elif PLATFORM_ZYNQ /* grab the top page of sram */ /* XXX do this better */ paddr_t args_phys = SRAM_BASE + SRAM_SIZE - bootargs_size; void *args = paddr_to_kvaddr(args_phys); #else #error need better way #endif LTRACEF("boot args %p, phys 0x%lx, len %zu\n", args, args_phys, bootargs_size); bootargs_start(args, bootargs_size); bootargs_add_command_line(args, bootargs_size, "what what"); arch_clean_cache_range((vaddr_t)args, bootargs_size); ulong lk_args[4]; bootargs_generate_lk_arg_values(args_phys, lk_args); const void *ptr; /* sniff it to see if it's a bootimage or a raw image */ bootimage_t *bi; if (bootimage_open(buf, len, &bi) >= 0) { /* it's a bootimage */ TRACEF("detected bootimage\n"); /* find the lk image */ if (bootimage_get_file_section(bi, TYPE_LK, &ptr, NULL) >= 0) { TRACEF("found lk section at %p\n", ptr); /* add the boot image to the argument list */ size_t bootimage_size; bootimage_get_range(bi, NULL, &bootimage_size); bootargs_add_bootimage_pointer(args, bootargs_size, "pmem", buf_phys, bootimage_size); } } else { /* raw image, just chain load it directly */ TRACEF("raw image, chainloading\n"); ptr = buf; } /* start a boot thread to complete the startup */ static struct chainload_args cl_args; cl_args.func = (void *)ptr; cl_args.args[0] = lk_args[0]; cl_args.args[1] = lk_args[1]; cl_args.args[2] = lk_args[2]; cl_args.args[3] = lk_args[3]; thread_resume(thread_create("boot", &chainload_thread, &cl_args, DEFAULT_PRIORITY, DEFAULT_STACK_SIZE)); return 0; } /* try to boot the system from a flash partition */ status_t do_flash_boot(void) { status_t err; LTRACE_ENTRY; /* construct a boot argument list */ const size_t bootargs_size = PAGE_SIZE; #if 0 /* old code */ void *args = (void *)((uintptr_t)lkb_iobuffer + lkb_iobuffer_size - bootargs_size); paddr_t args_phys = lkb_iobuffer_phys + lkb_iobuffer_size - bootargs_size; #elif PLATFORM_ZYNQ /* grab the top page of sram */ paddr_t args_phys = SRAM_BASE + SRAM_SIZE - bootargs_size; void *args = paddr_to_kvaddr(args_phys); #else #error need better way #endif LTRACEF("boot args %p, phys 0x%lx, len %zu\n", args, args_phys, bootargs_size); bootargs_start(args, bootargs_size); bootargs_add_command_line(args, bootargs_size, "what what"); arch_clean_cache_range((vaddr_t)args, bootargs_size); ulong lk_args[4]; bootargs_generate_lk_arg_values(args_phys, lk_args); const void *ptr; if (!ptable_found_valid()) { TRACEF("ptable not found\n"); return ERR_NOT_FOUND; } /* find the system partition */ struct ptable_entry entry; err = ptable_find("system", &entry); if (err < 0) { TRACEF("cannot find system partition\n"); return ERR_NOT_FOUND; } /* get a direct pointer to the device */ bdev_t *bdev = ptable_get_device(); if (!bdev) { TRACEF("error opening boot device\n"); return ERR_NOT_FOUND; } /* convert the bdev to a memory pointer */ err = bio_ioctl(bdev, BIO_IOCTL_GET_MEM_MAP, (void *)&ptr); TRACEF("err %d, ptr %p\n", err, ptr); if (err < 0) { TRACEF("error getting direct pointer to block device\n"); return ERR_NOT_FOUND; } /* sniff it to see if it's a bootimage or a raw image */ bootimage_t *bi; if (bootimage_open((char *)ptr + entry.offset, entry.length, &bi) >= 0) { size_t len; /* it's a bootimage */ TRACEF("detected bootimage\n"); /* find the lk image */ if (bootimage_get_file_section(bi, TYPE_LK, &ptr, &len) >= 0) { TRACEF("found lk section at %p\n", ptr); /* add the boot image to the argument list */ size_t bootimage_size; bootimage_get_range(bi, NULL, &bootimage_size); bootargs_add_bootimage_pointer(args, bootargs_size, bdev->name, entry.offset, bootimage_size); } } else { /* did not find a bootimage, abort */ bio_ioctl(bdev, BIO_IOCTL_PUT_MEM_MAP, NULL); return ERR_NOT_FOUND; } TRACEF("chain loading binary at %p\n", ptr); arch_chain_load((void *)ptr, lk_args[0], lk_args[1], lk_args[2], lk_args[3]); /* put the block device back into block mode (though we never get here) */ bio_ioctl(bdev, BIO_IOCTL_PUT_MEM_MAP, NULL); return NO_ERROR; } // return NULL for success, error string for failure int lkb_handle_command(lkb_t *lkb, const char *cmd, const char *arg, size_t len, const char **result) { *result = NULL; struct lkb_command *lcmd; for (lcmd = lkb_cmd_list; lcmd; lcmd = lcmd->next) { if (!strcmp(lcmd->name, cmd)) { *result = lcmd->handler(lkb, arg, len, lcmd->cookie); return 0; } } if (!strcmp(cmd, "flash") || !strcmp(cmd, "erase")) { struct ptable_entry entry; bdev_t *bdev; if (ptable_find(arg, &entry) < 0) { size_t plen = len; /* doesn't exist, make one */ if (ptable_add(arg, plen, 0) < 0) { *result = "error creating partition"; return -1; } if (ptable_find(arg, &entry) < 0) { *result = "couldn't find partition after creating it"; return -1; } } if (len > entry.length) { *result = "partition too small"; return -1; } if (!(bdev = ptable_get_device())) { *result = "ptable_get_device failed"; return -1; } printf("lkboot: erasing partition of size %llu\n", entry.length); if (bio_erase(bdev, entry.offset, entry.length) != (ssize_t)entry.length) { *result = "bio_erase failed"; return -1; } if (!strcmp(cmd, "flash")) { printf("lkboot: writing to partition\n"); void *buf = malloc(bdev->block_size); if (!buf) { *result = "memory allocation failed"; return -1; } size_t pos = 0; while (pos < len) { size_t toread = MIN(len - pos, bdev->block_size); LTRACEF("offset %zu, toread %zu\n", pos, toread); if (lkb_read(lkb, buf, toread)) { *result = "io error"; free(buf); return -1; } if (bio_write(bdev, buf, entry.offset + pos, toread) != (ssize_t)toread) { *result = "bio_write failed"; free(buf); return -1; } pos += toread; } free(buf); } } else if (!strcmp(cmd, "remove")) { if (ptable_remove(arg) < 0) { *result = "remove failed"; return -1; } } else if (!strcmp(cmd, "fpga")) { #if PLATFORM_ZYNQ void *buf = malloc(len); if (!buf) { *result = "error allocating buffer"; return -1; } /* translate to physical address */ paddr_t pa = vaddr_to_paddr(buf); if (pa == 0) { *result = "error allocating buffer"; free(buf); return -1; } if (lkb_read(lkb, buf, len)) { *result = "io error"; free(buf); return -1; } /* make sure the cache is flushed for this buffer for DMA coherency purposes */ arch_clean_cache_range((vaddr_t)buf, len); /* program the fpga */ zynq_reset_fpga(); zynq_program_fpga(pa, len); free(buf); #else *result = "no fpga"; return -1; #endif } else if (!strcmp(cmd, "boot")) { return do_boot(lkb, len, result); } else if (!strcmp(cmd, "getsysparam")) { const void *ptr; size_t len_local; if (sysparam_get_ptr(arg, &ptr, &len_local) == 0) { lkb_write(lkb, ptr, len_local); } } else if (!strcmp(cmd, "reboot")) { thread_resume(thread_create("reboot", &do_reboot, NULL, DEFAULT_PRIORITY, DEFAULT_STACK_SIZE)); } else { *result = "unknown command"; return -1; } return 0; }
1bc4b5c8079da1294514daa918b22f6e58d657aa
28d0f8c01599f8f6c711bdde0b59f9c2cd221203
/sys/arch/iyonix/include/asm.h
53c9eda5a2f24dc72ecbce93c9c6a1d0f4ce2bce
[]
no_license
NetBSD/src
1a9cbc22ed778be638b37869ed4fb5c8dd616166
23ee83f7c0aea0777bd89d8ebd7f0cde9880d13c
refs/heads/trunk
2023-08-31T13:24:58.105962
2023-08-27T15:50:47
2023-08-27T15:50:47
88,439,547
656
348
null
2023-07-20T20:07:24
2017-04-16T20:03:43
null
UTF-8
C
false
false
84
h
asm.h
/* $NetBSD: asm.h,v 1.2 2005/12/11 12:17:51 christos Exp $ */ #include <arm/asm.h>
472c988d9bbc579b28c0e2243adcee3422fa8434
c8b39acfd4a857dc15ed3375e0d93e75fa3f1f64
/Engine/Plugins/Experimental/AlembicImporter/Source/ThirdParty/Alembic/hdf5/test/gen_bad_compound.c
b8641957486359d987425fe9da048bfb07e1f358
[ "MIT", "LicenseRef-scancode-proprietary-license" ]
permissive
windystrife/UnrealEngine_NVIDIAGameWorks
c3c7863083653caf1bc67d3ef104fb4b9f302e2a
b50e6338a7c5b26374d66306ebc7807541ff815e
refs/heads/4.18-GameWorks
2023-03-11T02:50:08.471040
2022-01-13T20:50:29
2022-01-13T20:50:29
124,100,479
262
179
MIT
2022-12-16T05:36:38
2018-03-06T15:44:09
C++
UTF-8
C
false
false
2,918
c
gen_bad_compound.c
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * Copyright by The HDF Group. * * Copyright by the Board of Trustees of the University of Illinois. * * All rights reserved. * * * * This file is part of HDF5. The full HDF5 copyright notice, including * * terms governing use, modification, and redistribution, is contained in * * the files COPYING and Copyright.html. COPYING can be found at the root * * of the source code distribution tree; Copyright.html can be found at the * * root level of an installed copy of the electronic HDF5 document set and * * is linked from the top-level documents page. It can also be found at * * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have * * access to either file, you may request a copy from help@hdfgroup.org. * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* * Programmer: Quincey Koziol <koziol@hdfgroup.org> * April 14, 2011 * * Purpose: This program is run to generate an HDF5 data file with objects * that use compound datatypes with no fields (now forbidden to * be created by the library, as of v1.4.x). It must be built/run * with a copy of the 1.2.x library. */ #include <assert.h> #include "hdf5.h" #define FILENAME "bad_compound.h5" int main() { hid_t file; hid_t cmpd_dt; hid_t sid; hid_t did; hid_t aid; hid_t gid; hsize_t dim = 1; herr_t ret; /* Create compound datatype, but don't insert fields */ cmpd_dt = H5Tcreate(H5T_COMPOUND, (size_t)8); assert(cmpd_dt > 0); /* Create File */ file = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); assert(file > 0); /* Create a dataspace to use */ sid = H5Screate_simple(1, &dim, NULL); assert(sid > 0); /* Create a dataset with the bad compound datatype */ did = H5Dcreate(file, "dataset", cmpd_dt, sid, H5P_DEFAULT); assert(did > 0); /* Create a group */ gid = H5Gcreate(file, "group", (size_t)0); assert(gid > 0); /* Create an attribute with the bad compound datatype */ aid = H5Acreate(gid, "attr", cmpd_dt, sid, H5P_DEFAULT); assert(aid > 0); /* Commit the datatype */ ret = H5Tcommit(file, "cmpnd", cmpd_dt); assert(ret >= 0); /* Close IDs */ ret = H5Gclose(gid); assert(ret >= 0); ret = H5Aclose(aid); assert(ret >= 0); ret = H5Sclose(sid); assert(ret >= 0); ret = H5Dclose(did); assert(ret >= 0); ret = H5Tclose(cmpd_dt); assert(ret >= 0); ret = H5Fclose(file); assert(ret >= 0); return(0); }
f4c4e6a8dd91361253ecc3980710e6d8b1dff94d
aa3befea459382dc5c01c925653d54f435b3fb0f
/drivers/clk/clk.c
5559fd151735042addf14f5e30187c9422d8a67f
[ "MIT-open-group", "BSD-3-Clause", "HPND-sell-variant", "BSD-4-Clause-UC", "LicenseRef-scancode-warranty-disclaimer", "MIT-0", "LicenseRef-scancode-bsd-atmel", "LicenseRef-scancode-gary-s-brown", "LicenseRef-scancode-proprietary-license", "SunPro", "MIT", "LicenseRef-scancode-public-domain-disclaimer", "LicenseRef-scancode-other-permissive", "HPND", "ISC", "Apache-2.0", "LicenseRef-scancode-public-domain", "BSD-2-Clause", "GPL-1.0-or-later", "CC-BY-2.0", "CC-BY-4.0" ]
permissive
apache/nuttx
14519a7bff4a87935d94fb8fb2b19edb501c7cec
606b6d9310fb25c7d92c6f95bf61737e3c79fa0f
refs/heads/master
2023-08-25T06:55:45.822534
2023-08-23T16:03:31
2023-08-24T21:25:47
228,103,273
407
241
Apache-2.0
2023-09-14T18:26:05
2019-12-14T23:27:55
C
UTF-8
C
false
false
26,699
c
clk.c
/**************************************************************************** * drivers/clk/clk.c * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. The * ASF licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * ****************************************************************************/ /**************************************************************************** * Included Files ****************************************************************************/ #include <nuttx/clk/clk.h> #include <nuttx/clk/clk_provider.h> #include <nuttx/fs/fs.h> #include <nuttx/fs/procfs.h> #include <nuttx/kmalloc.h> #include <nuttx/list.h> #include <nuttx/mutex.h> #include <debug.h> #include <fcntl.h> #include <stdio.h> #include <sys/stat.h> /**************************************************************************** * Pre-processor Definitions ****************************************************************************/ #define CLK_PROCFS_LINELEN 80 /**************************************************************************** * Private Datas ****************************************************************************/ static mutex_t g_clk_list_lock = NXMUTEX_INITIALIZER; static struct list_node g_clk_root_list = LIST_INITIAL_VALUE(g_clk_root_list); static struct list_node g_clk_orphan_list = LIST_INITIAL_VALUE(g_clk_orphan_list); /**************************************************************************** * Private Function Prototypes ****************************************************************************/ static irqstate_t clk_list_lock(void); static void clk_list_unlock(irqstate_t flags); static int clk_fetch_parent_index(FAR struct clk_s *clk, FAR struct clk_s *parent); static void clk_init_parent(FAR struct clk_s *clk); static void clk_reparent(FAR struct clk_s *clk, FAR struct clk_s *parent); static uint32_t clk_recalc(FAR struct clk_s *clk, uint32_t parent_rate); static void __clk_recalc_rate(FAR struct clk_s *clk); static void clk_calc_subtree(FAR struct clk_s *clk, uint32_t new_rate, FAR struct clk_s *new_parent, uint8_t p_index); static FAR struct clk_s *clk_calc_new_rates(FAR struct clk_s *clk, uint32_t rate); static void clk_change_rate(FAR struct clk_s *clk, uint32_t best_parent_rate); static uint32_t __clk_get_rate(FAR struct clk_s *clk); static uint32_t __clk_round_rate(FAR struct clk_s *clk, uint32_t rate); static int __clk_enable(FAR struct clk_s *clk); static int __clk_disable(FAR struct clk_s *clk); static struct clk_s *__clk_lookup(FAR const char *name, FAR struct clk_s *clk); static int __clk_register(FAR struct clk_s *clk); static void clk_disable_unused_subtree(FAR struct clk_s *clk); /* File system methods */ #if !defined(CONFIG_FS_PROCFS_EXCLUDE_CLK) && defined(CONFIG_FS_PROCFS) static int clk_procfs_open(FAR struct file *filep, FAR const char *relpath, int oflags, mode_t mode); static int clk_procfs_close(FAR struct file *filep); static ssize_t clk_procfs_read(FAR struct file *filep, FAR char *buffer, size_t buflen); static int clk_procfs_dup(FAR const struct file *oldp, FAR struct file *newp); static int clk_procfs_stat(const char *relpath, struct stat *buf); #endif /* !defined(CONFIG_FS_PROCFS_EXCLUDE_CLK) && defined(CONFIG_FS_PROCFS) */ /**************************************************************************** * Public Data ****************************************************************************/ #if !defined(CONFIG_FS_PROCFS_EXCLUDE_CLK) && defined(CONFIG_FS_PROCFS) const struct procfs_operations clk_procfsoperations = { clk_procfs_open, /* open */ clk_procfs_close, /* close */ clk_procfs_read, /* read */ NULL, /* write */ clk_procfs_dup, /* dup */ NULL, /* opendir */ NULL, /* closedir */ NULL, /* readdir */ NULL, /* rewinddir */ clk_procfs_stat, /* stat */ }; #endif /* !defined(CONFIG_FS_PROCFS_EXCLUDE_CLK) && defined(CONFIG_FS_PROCFS) */ /**************************************************************************** * Private Function ****************************************************************************/ #if !defined(CONFIG_FS_PROCFS_EXCLUDE_CLK) && defined(CONFIG_FS_PROCFS) static int clk_procfs_open(FAR struct file *filep, FAR const char *relpath, int oflags, mode_t mode) { FAR struct procfs_file_s *priv; if ((oflags & O_WRONLY) != 0 || (oflags & O_RDONLY) == 0) { return -EACCES; } priv = kmm_zalloc(sizeof(struct procfs_file_s)); if (!priv) { return -ENOMEM; } filep->f_priv = priv; return OK; } static int clk_procfs_close(FAR struct file *filep) { FAR struct procfs_file_s *priv = filep->f_priv; kmm_free(priv); filep->f_priv = NULL; return OK; } static size_t clk_procfs_printf(FAR char *buffer, size_t buflen, off_t *pos, FAR const char *fmt, ...) { char tmp[CLK_PROCFS_LINELEN]; size_t tmplen; va_list ap; va_start(ap, fmt); tmplen = vsnprintf(tmp, sizeof(tmp), fmt, ap); va_end(ap); return procfs_memcpy(tmp, tmplen, buffer, buflen, pos); } static size_t clk_procfs_show_subtree(FAR struct clk_s *clk, int level, FAR char *buffer, size_t buflen, off_t *pos, FAR irqstate_t *flags) { FAR struct clk_s *child; size_t oldlen = buflen; size_t ret; if (strchr(clk_get_name(clk), '/')) { clk_list_unlock(*flags); } ret = clk_procfs_printf(buffer, buflen, pos, "%*s%-*s %11d %11u %11d\n", level * 2, "", 40 - level * 2, clk_get_name(clk), clk_is_enabled(clk), clk_get_rate(clk), clk_get_phase(clk)); buffer += ret; buflen -= ret; if (strchr(clk_get_name(clk), '/')) { *flags = clk_list_lock(); } if (buflen > 0) { list_for_every_entry(&clk->children, child, struct clk_s, node) { ret = clk_procfs_show_subtree(child, level + 1, buffer, buflen, pos, flags); buffer += ret; buflen -= ret; if (buflen == 0) { break; /* No enough space, return */ } } } return oldlen - buflen; } static size_t clk_procfs_showtree(FAR char *buffer, size_t buflen, off_t *pos) { FAR struct clk_s *clk; size_t oldlen = buflen; irqstate_t flags; size_t ret; flags = clk_list_lock(); list_for_every_entry(&g_clk_root_list, clk, struct clk_s, node) { ret = clk_procfs_show_subtree(clk, 0, buffer, buflen, pos, &flags); buffer += ret; buflen -= ret; if (buflen == 0) { goto out; /* No enough space, return */ } } list_for_every_entry(&g_clk_orphan_list, clk, struct clk_s, node) { ret = clk_procfs_show_subtree(clk, 0, buffer, buflen, pos, &flags); buffer += ret; buflen -= ret; if (buflen == 0) { goto out; /* No enough space, return */ } } out: clk_list_unlock(flags); return oldlen - buflen; } static ssize_t clk_procfs_read(FAR struct file *filep, FAR char *buffer, size_t buflen) { off_t pos = filep->f_pos; size_t oldlen = buflen; size_t ret; ret = clk_procfs_printf(buffer, buflen, &pos, "%8s%44s%12s%12s\n", "clock", "enable_cnt", "rate", "phase"); buffer += ret; buflen -= ret; if (buflen > 0) { ret = clk_procfs_showtree(buffer, buflen, &pos); buffer += ret; buflen -= ret; } filep->f_pos += oldlen - buflen; return oldlen - buflen; } static int clk_procfs_dup(FAR const struct file *oldp, FAR struct file *newp) { FAR struct procfs_file_s *oldpriv; FAR struct procfs_file_s *newpriv; oldpriv = oldp->f_priv; DEBUGASSERT(oldpriv); newpriv = kmm_zalloc(sizeof(struct procfs_file_s)); if (!newpriv) { return -ENOMEM; } memcpy(newpriv, oldpriv, sizeof(struct procfs_file_s)); newp->f_priv = newpriv; return OK; } static int clk_procfs_stat(const char *relpath, struct stat *buf) { /* File/directory size, access block size */ buf->st_mode = S_IFREG | S_IROTH | S_IRGRP | S_IRUSR; buf->st_size = 0; buf->st_blksize = 0; buf->st_blocks = 0; return OK; } #endif /* !defined(CONFIG_FS_PROCFS_EXCLUDE_CLK) && defined(CONFIG_FS_PROCFS) */ static irqstate_t clk_list_lock(void) { if (!up_interrupt_context() && !sched_idletask()) { nxmutex_lock(&g_clk_list_lock); } return enter_critical_section(); } static void clk_list_unlock(irqstate_t flags) { leave_critical_section(flags); if (!up_interrupt_context() && !sched_idletask()) { nxmutex_unlock(&g_clk_list_lock); } } static int clk_fetch_parent_index(FAR struct clk_s *clk, FAR struct clk_s *parent) { int i; if (!parent) { return -EINVAL; } for (i = 0; i < clk->num_parents; i++) { if (!strcmp(clk->parent_names[i], parent->name)) { return i; } } return -EINVAL; } static void clk_reparent(FAR struct clk_s *clk, FAR struct clk_s *parent) { list_delete(&clk->node); if (parent) { if (parent->new_child == clk) { parent->new_child = NULL; } list_add_head(&parent->children, &clk->node); } clk->parent = parent; } static uint32_t clk_recalc(FAR struct clk_s *clk, uint32_t parent_rate) { if (clk->ops->recalc_rate) { return clk->ops->recalc_rate(clk, parent_rate); } return parent_rate; } static void __clk_recalc_rate(FAR struct clk_s *clk) { uint32_t parent_rate = 0; FAR struct clk_s *child; if (clk->parent) { parent_rate = __clk_get_rate(clk->parent); } clk->rate = clk_recalc(clk, parent_rate); list_for_every_entry(&clk->children, child, struct clk_s, node) { __clk_recalc_rate(child); } } static void clk_calc_subtree(FAR struct clk_s *clk, uint32_t new_rate, FAR struct clk_s *new_parent, uint8_t p_index) { FAR struct clk_s *child; clk->new_rate = new_rate; clk->new_parent = new_parent; clk->new_parent_index = p_index; clk->new_child = NULL; if (new_parent && new_parent != clk->parent) { new_parent->new_child = clk; } list_for_every_entry(&clk->children, child, struct clk_s, node) { child->new_rate = clk_recalc(child, new_rate); clk_calc_subtree(child, child->new_rate, NULL, 0); } } static FAR struct clk_s *clk_calc_new_rates(FAR struct clk_s *clk, uint32_t rate) { FAR struct clk_s *top = clk; FAR struct clk_s *old_parent; FAR struct clk_s *parent; uint32_t best_parent_rate = 0; uint32_t new_rate = 0; int p_index = 0; if (!clk) { return NULL; } parent = old_parent = clk->parent; if (parent) { best_parent_rate = __clk_get_rate(parent); } if (clk->ops->determine_rate) { new_rate = clk->ops->determine_rate(clk, rate, &best_parent_rate, &parent); } else if (clk->ops->round_rate) { new_rate = clk->ops->round_rate(clk, rate, &best_parent_rate); } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) { clk->new_rate = clk->rate; return NULL; } else { top = clk_calc_new_rates(parent, rate); new_rate = parent->new_rate; goto out; } if (parent) { p_index = clk_fetch_parent_index(clk, parent); if (p_index < 0) { return NULL; } } if ((clk->flags & CLK_SET_RATE_PARENT) && parent && best_parent_rate != __clk_get_rate(parent)) { top = clk_calc_new_rates(parent, best_parent_rate); } out: clk_calc_subtree(clk, new_rate, parent, p_index); return top; } static void clk_change_rate(FAR struct clk_s *clk, uint32_t best_parent_rate) { FAR struct clk_s *child; FAR struct clk_s *old_parent; bool skip_set_rate = false; list_for_every_entry(&clk->children, child, struct clk_s, node) { if (child->new_parent && child->new_parent != clk) { continue; } if (child->new_rate > __clk_get_rate(child)) { clk_change_rate(child, clk->new_rate); } } old_parent = clk->parent; if (clk->new_parent && clk->new_parent != clk->parent) { if (clk->enable_count) { clk_enable(clk->new_parent); clk_enable(clk); } clk_reparent(clk, clk->new_parent); if (clk->ops->set_rate_and_parent) { skip_set_rate = true; clk->ops->set_rate_and_parent(clk, clk->new_rate, best_parent_rate, clk->new_parent_index); } else if (clk->ops->set_parent) { clk->ops->set_parent(clk, clk->new_parent_index); } if (clk->enable_count) { clk_disable(clk); clk_disable(old_parent); } } if (!skip_set_rate && clk->ops->set_rate) { clk->ops->set_rate(clk, clk->new_rate, best_parent_rate); } clk->rate = clk->new_rate; list_for_every_entry(&clk->children, child, struct clk_s, node) { if (child->new_parent && child->new_parent != clk) { continue; } if (child->new_rate != __clk_get_rate(child)) { clk_change_rate(child, clk->new_rate); } } if (clk->new_child && clk->new_child->new_rate != __clk_get_rate(clk->new_child)) { clk_change_rate(clk->new_child, clk->new_rate); } } static struct clk_s *__clk_lookup(FAR const char *name, FAR struct clk_s *clk) { FAR struct clk_s *child; FAR struct clk_s *ret; if (!strcmp(clk->name, name)) { return clk; } list_for_every_entry(&clk->children, child, struct clk_s, node) { ret = __clk_lookup(name, child); if (ret) { return ret; } } return NULL; } static uint32_t __clk_get_rate(FAR struct clk_s *clk) { uint32_t parent_rate; if (!clk) { return 0; } if (clk->rate == 0) { parent_rate = __clk_get_rate(clk->parent); clk->rate = clk_recalc(clk, parent_rate); } return clk->rate; } static uint32_t __clk_round_rate(FAR struct clk_s *clk, uint32_t rate) { uint32_t parent_rate = 0; FAR struct clk_s *parent; if (!clk) { return 0; } parent = clk->parent; if (parent) { parent_rate = __clk_get_rate(parent); } if (clk->ops->determine_rate) { return clk->ops->determine_rate(clk, rate, &parent_rate, &parent); } else if (clk->ops->round_rate) { return clk->ops->round_rate(clk, rate, &parent_rate); } else if (clk->flags & CLK_SET_RATE_PARENT) { return __clk_round_rate(clk->parent, rate); } else { return __clk_get_rate(clk); } } static int __clk_enable(FAR struct clk_s *clk) { int ret = 0; if (!clk) { return 0; } if (clk->enable_count == 0) { ret = __clk_enable(clk->parent); if (ret < 0) { return ret; } if (clk->ops->enable) { ret = clk->ops->enable(clk); if (ret < 0) { __clk_disable(clk->parent); return ret; } } } return ++clk->enable_count; } static int __clk_disable(FAR struct clk_s *clk) { if (!clk || clk->enable_count == 0) { return 0; } if (clk->flags & CLK_IS_CRITICAL) { return 0; } if (--clk->enable_count == 0) { if (clk->ops->disable) { clk->ops->disable(clk); } if (clk->parent) { __clk_disable(clk->parent); } } return clk->enable_count; } static void clk_init_parent(FAR struct clk_s *clk) { uint8_t index; if (!clk->num_parents) { return; } if (clk->num_parents == 1) { clk->parent = clk_get(clk->parent_names[0]); return; } if (!clk->ops->get_parent) { return; }; index = clk->ops->get_parent(clk); clk->parent = clk_get_parent_by_index(clk, index); } static int __clk_register(FAR struct clk_s *clk) { FAR struct clk_s *orphan; FAR struct clk_s *temp; irqstate_t flags; uint8_t i; if (!clk) { return -EINVAL; } if (clk->ops->set_rate && !((clk->ops->round_rate || clk->ops->determine_rate) && clk->ops->recalc_rate)) { return -EINVAL; } if (clk->ops->set_parent && !clk->ops->get_parent) { return -EINVAL; } if (clk->ops->set_rate_and_parent && !(clk->ops->set_parent && clk->ops->set_rate)) { return -EINVAL; } clk_init_parent(clk); flags = clk_list_lock(); if (clk->parent) { list_add_head(&clk->parent->children, &clk->node); } else if (!clk->num_parents) { list_add_head(&g_clk_root_list, &clk->node); } else { list_add_head(&g_clk_orphan_list, &clk->node); } list_for_every_entry_safe(&g_clk_orphan_list, orphan, temp, struct clk_s, node) { if (orphan->num_parents && orphan->ops->get_parent) { i = orphan->ops->get_parent(orphan); if (!strcmp(clk->name, orphan->parent_names[i])) { clk_reparent(orphan, clk); } } else if (orphan->num_parents) { for (i = 0; i < orphan->num_parents; i++) { if (!strcmp(clk->name, orphan->parent_names[i])) { clk_reparent(orphan, clk); break; } } } } clk_list_unlock(flags); return 0; } static void clk_disable_unused_subtree(FAR struct clk_s *clk) { FAR struct clk_s *child = NULL; list_for_every_entry(&clk->children, child, struct clk_s, node) { clk_disable_unused_subtree(child); } if (clk->enable_count) { return; } if (clk_is_enabled(clk)) { if (clk->flags & CLK_IS_CRITICAL) { __clk_enable(clk); } else if (clk->ops->disable) { clk->ops->disable(clk); } } } /**************************************************************************** * Public Functions ****************************************************************************/ void clk_disable_unused(void) { FAR struct clk_s *root_clk = NULL; irqstate_t flags; flags = clk_list_lock(); list_for_every_entry(&g_clk_root_list, root_clk, struct clk_s, node) { clk_disable_unused_subtree(root_clk); } list_for_every_entry(&g_clk_orphan_list, root_clk, struct clk_s, node) { clk_disable_unused_subtree(root_clk); } clk_list_unlock(flags); } int clk_disable(FAR struct clk_s *clk) { return __clk_disable(clk); } int clk_enable(FAR struct clk_s *clk) { return __clk_enable(clk); } uint32_t clk_round_rate(FAR struct clk_s *clk, uint32_t rate) { return __clk_round_rate(clk, rate); } int clk_set_rate(FAR struct clk_s *clk, uint32_t rate) { uint32_t parent_rate; FAR struct clk_s *top; int ret = 0; if (!clk) { return 0; } if (rate == __clk_get_rate(clk)) { goto out; } if ((clk->flags & CLK_SET_RATE_GATE) && clk->enable_count) { ret = -EBUSY; goto out; } top = clk_calc_new_rates(clk, rate); if (!top) { ret = -EINVAL; goto out; } if (top->new_parent) { parent_rate = __clk_get_rate(top->new_parent); } else if (top->parent) { parent_rate = __clk_get_rate(top->parent); } else { parent_rate = 0; } clk_change_rate(top, parent_rate); out: return ret; } int clk_set_rates(FAR const struct clk_rate_s *rates) { FAR struct clk_s *clk; int ret; if (!rates) { return 0; } while (rates->name) { clk = clk_get(rates->name); if (!clk) { return -EINVAL; } ret = clk_set_rate(clk, rates->rate); if (ret < 0) { return ret; } rates++; } return 0; } int clk_set_phase(FAR struct clk_s *clk, int degrees) { int ret = -EINVAL; if (!clk) { return 0; } degrees %= 360; if (degrees < 0) { degrees += 360; } if (clk->ops->set_phase) { ret = clk->ops->set_phase(clk, degrees); } return ret; } int clk_get_phase(FAR struct clk_s *clk) { if (!clk || !clk->ops->get_phase) { return 0; } return clk->ops->get_phase(clk); } FAR const char *clk_get_name(FAR const struct clk_s *clk) { return !clk ? NULL : clk->name; } int clk_is_enabled(FAR struct clk_s *clk) { if (!clk) { return 0; } /* when hardware .is_enabled missing, used software counter */ if (!clk->ops->is_enabled) { return clk->enable_count; } return clk->ops->is_enabled(clk); } FAR struct clk_s *clk_get(FAR const char *name) { FAR struct clk_s *root_clk = NULL; FAR struct clk_s *ret = NULL; irqstate_t flags; if (!name) { return NULL; } flags = clk_list_lock(); list_for_every_entry(&g_clk_root_list, root_clk, struct clk_s, node) { ret = __clk_lookup(name, root_clk); if (ret) { goto out; } } list_for_every_entry(&g_clk_orphan_list, root_clk, struct clk_s, node) { ret = __clk_lookup(name, root_clk); if (ret) { goto out; } } out: clk_list_unlock(flags); #ifdef CONFIG_CLK_RPMSG if (ret == NULL) { ret = clk_register_rpmsg(name, CLK_GET_RATE_NOCACHE); } #endif return ret; } int clk_set_parent(FAR struct clk_s *clk, FAR struct clk_s *parent) { FAR struct clk_s *old_parent = NULL; int ret = 0; int index = 0; if (!clk) { return 0; } if (clk->num_parents > 1 && !clk->ops->set_parent) { return -ENOSYS; } if (clk->parent == parent) { goto out; } if ((clk->flags & CLK_SET_PARENT_GATE) && clk->enable_count) { ret = -EBUSY; goto out; } if (parent) { index = clk_fetch_parent_index(clk, parent); if (index < 0) { ret = index; goto out; } } old_parent = clk->parent; if (clk->enable_count) { clk_enable(parent); clk_enable(clk); } clk_reparent(clk, parent); if (parent && clk->ops->set_parent) { ret = clk->ops->set_parent(clk, index); } if (ret < 0) { clk_reparent(clk, old_parent); if (clk->enable_count) { clk_disable(clk); clk_disable(parent); } goto out; } if (clk->enable_count) { clk_disable(clk); clk_disable(old_parent); } __clk_recalc_rate(clk); out: return ret; } FAR struct clk_s *clk_get_parent_by_index(FAR struct clk_s *clk, uint8_t index) { if (!clk || index >= clk->num_parents) { return NULL; } return clk_get(clk->parent_names[index]); } FAR struct clk_s *clk_get_parent(FAR struct clk_s *clk) { return !clk ? NULL : clk->parent; } uint32_t clk_get_rate(FAR struct clk_s *clk) { if (!clk) { return 0; } if (clk->flags & CLK_GET_RATE_NOCACHE) { __clk_recalc_rate(clk); } return __clk_get_rate(clk); } FAR struct clk_s *clk_register(FAR const char *name, FAR const char * const *parent_names, uint8_t num_parents, uint8_t flags, FAR const struct clk_ops_s *ops, FAR void *private_data, size_t private_size) { FAR struct clk_s *clk; size_t size; size_t off; size_t len; int i; off = len = sizeof(struct clk_s) + num_parents * sizeof(char *); if (!(flags & CLK_PARENT_NAME_IS_STATIC)) { for (i = 0; i < num_parents; i++) { len += strlen(parent_names[i]) + 1; } } len += private_size; if (flags & CLK_NAME_IS_STATIC) { clk = kmm_zalloc(len); if (!clk) { return NULL; } clk->name = name; } else { size = strlen(name) + 1; clk = kmm_zalloc(len + size); if (!clk) { return NULL; } clk->name = (char *)clk + len; strlcpy((char *)clk->name, name, size); } clk->ops = ops; clk->num_parents = num_parents; clk->flags = flags; clk->private_data = (char *)clk + off; memcpy(clk->private_data, private_data, private_size); off += private_size; for (i = 0; i < num_parents; i++) { if (flags & CLK_PARENT_NAME_IS_STATIC) { clk->parent_names[i] = parent_names[i]; } else { clk->parent_names[i] = (char *)clk + off; strlcpy((char *)clk->parent_names[i], parent_names[i], len - off); off += strlen(parent_names[i]) + 1; } } list_initialize(&clk->node); list_initialize(&clk->children); if (!__clk_register(clk)) { return clk; } kmm_free(clk); return NULL; }
abacf97342e8cfab70571f5602c023ab817847fa
2c73a693c2b3c162eae2ab94f649d8c4494878ba
/components/network/paho.mqtt.c/src/MQTTPacketOut.c
fdff8cf060099b16bfcc7061fda00a05953b1f50
[ "MIT" ]
permissive
openLuat/LuatOS
185e1e140aed908434168133571ddcafe98f4e12
4b29d5121ab4f7133630331e8502c526c7856897
refs/heads/master
2023-08-23T04:57:23.263539
2023-08-23T04:46:46
2023-08-23T04:46:46
230,403,844
378
93
MIT
2021-12-17T02:19:30
2019-12-27T08:29:19
C
UTF-8
C
false
false
13,080
c
MQTTPacketOut.c
/******************************************************************************* * Copyright (c) 2009, 2021 IBM Corp. and Ian Craggs * * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v2.0 * and Eclipse Distribution License v1.0 which accompany this distribution. * * The Eclipse Public License is available at * https://www.eclipse.org/legal/epl-2.0/ * and the Eclipse Distribution License is available at * http://www.eclipse.org/org/documents/edl-v10.php. * * Contributors: * Ian Craggs - initial API and implementation and/or initial documentation * Ian Craggs, Allan Stockdill-Mander - SSL updates * Ian Craggs - MQTT 3.1.1 support * Rong Xiang, Ian Craggs - C++ compatibility * Ian Craggs - binary password and will payload * Ian Craggs - MQTT 5.0 support *******************************************************************************/ /** * @file * \brief functions to deal with reading and writing of MQTT packets from and to sockets * * Some other related functions are in the MQTTPacket module */ #include "MQTTPacketOut.h" #include "Log.h" #include "StackTrace.h" #include <string.h> #include <stdlib.h> #include "Heap.h" /** * Send an MQTT CONNECT packet down a socket for V5 or later * @param client a structure from which to get all the required values * @param MQTTVersion the MQTT version to connect with * @param connectProperties MQTT V5 properties for the connect packet * @param willProperties MQTT V5 properties for the will message, if any * @return the completion code (e.g. TCPSOCKET_COMPLETE) */ int MQTTPacket_send_connect(Clients* client, int MQTTVersion, MQTTProperties* connectProperties, MQTTProperties* willProperties) { char *buf, *ptr; Connect packet; int rc = SOCKET_ERROR, len; FUNC_ENTRY; packet.header.byte = 0; packet.header.bits.type = CONNECT; len = ((MQTTVersion == MQTTVERSION_3_1) ? 12 : 10) + (int)strlen(client->clientID)+2; if (client->will) len += (int)strlen(client->will->topic)+2 + client->will->payloadlen+2; if (client->username) len += (int)strlen(client->username)+2; if (client->password) len += client->passwordlen+2; if (MQTTVersion >= MQTTVERSION_5) { len += MQTTProperties_len(connectProperties); if (client->will) len += MQTTProperties_len(willProperties); } ptr = buf = malloc(len); if (ptr == NULL) goto exit_nofree; if (MQTTVersion == MQTTVERSION_3_1) { writeUTF(&ptr, "MQIsdp"); writeChar(&ptr, (char)MQTTVERSION_3_1); } else if (MQTTVersion == MQTTVERSION_3_1_1 || MQTTVersion == MQTTVERSION_5) { writeUTF(&ptr, "MQTT"); writeChar(&ptr, (char)MQTTVersion); } else goto exit; packet.flags.all = 0; if (MQTTVersion >= MQTTVERSION_5) packet.flags.bits.cleanstart = client->cleanstart; else packet.flags.bits.cleanstart = client->cleansession; packet.flags.bits.will = (client->will) ? 1 : 0; if (packet.flags.bits.will) { packet.flags.bits.willQoS = client->will->qos; packet.flags.bits.willRetain = client->will->retained; } if (client->username) packet.flags.bits.username = 1; if (client->password) packet.flags.bits.password = 1; writeChar(&ptr, packet.flags.all); writeInt(&ptr, client->keepAliveInterval); if (MQTTVersion >= MQTTVERSION_5) MQTTProperties_write(&ptr, connectProperties); writeUTF(&ptr, client->clientID); if (client->will) { if (MQTTVersion >= MQTTVERSION_5) MQTTProperties_write(&ptr, willProperties); writeUTF(&ptr, client->will->topic); writeData(&ptr, client->will->payload, client->will->payloadlen); } if (client->username) writeUTF(&ptr, client->username); if (client->password) writeData(&ptr, client->password, client->passwordlen); rc = MQTTPacket_send(&client->net, packet.header, buf, len, 1, MQTTVersion); Log(LOG_PROTOCOL, 0, NULL, client->net.socket, client->clientID, MQTTVersion, client->cleansession, rc); exit: if (rc != TCPSOCKET_INTERRUPTED) free(buf); exit_nofree: FUNC_EXIT_RC(rc); return rc; } /** * Function used in the new packets table to create connack packets. * @param MQTTVersion MQTT 5 or less? * @param aHeader the MQTT header byte * @param data the rest of the packet * @param datalen the length of the rest of the packet * @return pointer to the packet structure */ void* MQTTPacket_connack(int MQTTVersion, unsigned char aHeader, char* data, size_t datalen) { Connack* pack = NULL; char* curdata = data; char* enddata = &data[datalen]; FUNC_ENTRY; if ((pack = malloc(sizeof(Connack))) == NULL) goto exit; pack->MQTTVersion = MQTTVersion; pack->header.byte = aHeader; if (datalen < 2) /* enough data for connect flags and reason code? */ { free(pack); pack = NULL; goto exit; } pack->flags.all = readChar(&curdata); /* connect flags */ pack->rc = readChar(&curdata); /* reason code */ if (MQTTVersion >= MQTTVERSION_5 && datalen > 2) { MQTTProperties props = MQTTProperties_initializer; pack->properties = props; if (MQTTProperties_read(&pack->properties, &curdata, enddata) != 1) { if (pack->properties.array) free(pack->properties.array); if (pack) free(pack); pack = NULL; /* signal protocol error */ goto exit; } } exit: FUNC_EXIT; return pack; } /** * Free allocated storage for a connack packet. * @param pack pointer to the connack packet structure */ void MQTTPacket_freeConnack(Connack* pack) { FUNC_ENTRY; if (pack->MQTTVersion >= MQTTVERSION_5) MQTTProperties_free(&pack->properties); free(pack); FUNC_EXIT; } /** * Send an MQTT PINGREQ packet down a socket. * @param socket the open socket to send the data to * @param clientID the string client identifier, only used for tracing * @return the completion code (e.g. TCPSOCKET_COMPLETE) */ int MQTTPacket_send_pingreq(networkHandles* net, const char* clientID) { Header header; int rc = 0; FUNC_ENTRY; header.byte = 0; header.bits.type = PINGREQ; rc = MQTTPacket_send(net, header, NULL, 0, 0, MQTTVERSION_3_1_1); Log(LOG_PROTOCOL, 20, NULL, net->socket, clientID, rc); FUNC_EXIT_RC(rc); return rc; } /** * Send an MQTT subscribe packet down a socket. * @param topics list of topics * @param qoss list of corresponding QoSs * @param msgid the MQTT message id to use * @param dup boolean - whether to set the MQTT DUP flag * @param socket the open socket to send the data to * @param clientID the string client identifier, only used for tracing * @return the completion code (e.g. TCPSOCKET_COMPLETE) */ int MQTTPacket_send_subscribe(List* topics, List* qoss, MQTTSubscribe_options* opts, MQTTProperties* props, int msgid, int dup, Clients* client) { Header header; char *data, *ptr; int rc = -1; ListElement *elem = NULL, *qosElem = NULL; int datalen, i = 0; FUNC_ENTRY; header.bits.type = SUBSCRIBE; header.bits.dup = dup; header.bits.qos = 1; header.bits.retain = 0; datalen = 2 + topics->count * 3; /* utf length + char qos == 3 */ while (ListNextElement(topics, &elem)) datalen += (int)strlen((char*)(elem->content)); if (client->MQTTVersion >= MQTTVERSION_5) datalen += MQTTProperties_len(props); ptr = data = malloc(datalen); if (ptr == NULL) goto exit; writeInt(&ptr, msgid); if (client->MQTTVersion >= MQTTVERSION_5) MQTTProperties_write(&ptr, props); elem = NULL; while (ListNextElement(topics, &elem)) { char subopts = 0; ListNextElement(qoss, &qosElem); writeUTF(&ptr, (char*)(elem->content)); subopts = *(int*)(qosElem->content); if (client->MQTTVersion >= MQTTVERSION_5 && opts != NULL) { subopts |= (opts[i].noLocal << 2); /* 1 bit */ subopts |= (opts[i].retainAsPublished << 3); /* 1 bit */ subopts |= (opts[i].retainHandling << 4); /* 2 bits */ } writeChar(&ptr, subopts); ++i; } rc = MQTTPacket_send(&client->net, header, data, datalen, 1, client->MQTTVersion); Log(LOG_PROTOCOL, 22, NULL, client->net.socket, client->clientID, msgid, rc); if (rc != TCPSOCKET_INTERRUPTED) free(data); exit: FUNC_EXIT_RC(rc); return rc; } /** * Function used in the new packets table to create suback packets. * @param MQTTVersion the version of MQTT * @param aHeader the MQTT header byte * @param data the rest of the packet * @param datalen the length of the rest of the packet * @return pointer to the packet structure */ void* MQTTPacket_suback(int MQTTVersion, unsigned char aHeader, char* data, size_t datalen) { Suback* pack = NULL; char* curdata = data; char* enddata = &data[datalen]; FUNC_ENTRY; if ((pack = malloc(sizeof(Suback))) == NULL) goto exit; pack->MQTTVersion = MQTTVersion; pack->header.byte = aHeader; if (enddata - curdata < 2) /* Is there enough data to read the msgid? */ { free(pack); pack = NULL; goto exit; } pack->msgId = readInt(&curdata); if (MQTTVersion >= MQTTVERSION_5) { MQTTProperties props = MQTTProperties_initializer; pack->properties = props; if (MQTTProperties_read(&pack->properties, &curdata, enddata) != 1) { if (pack->properties.array) free(pack->properties.array); if (pack) free(pack); pack = NULL; /* signal protocol error */ goto exit; } } pack->qoss = ListInitialize(); while ((size_t)(curdata - data) < datalen) { unsigned int* newint; newint = malloc(sizeof(unsigned int)); if (newint == NULL) { if (pack->properties.array) free(pack->properties.array); if (pack) free(pack); pack = NULL; /* signal protocol error */ goto exit; } *newint = (unsigned int)readChar(&curdata); ListAppend(pack->qoss, newint, sizeof(unsigned int)); } if (pack->qoss->count == 0) { if (pack->properties.array) free(pack->properties.array); ListFree(pack->qoss); free(pack); pack = NULL; } exit: FUNC_EXIT; return pack; } /** * Send an MQTT unsubscribe packet down a socket. * @param topics list of topics * @param msgid the MQTT message id to use * @param dup boolean - whether to set the MQTT DUP flag * @param socket the open socket to send the data to * @param clientID the string client identifier, only used for tracing * @return the completion code (e.g. TCPSOCKET_COMPLETE) */ int MQTTPacket_send_unsubscribe(List* topics, MQTTProperties* props, int msgid, int dup, Clients* client) { Header header; char *data, *ptr; int rc = SOCKET_ERROR; ListElement *elem = NULL; int datalen; FUNC_ENTRY; header.bits.type = UNSUBSCRIBE; header.bits.dup = dup; header.bits.qos = 1; header.bits.retain = 0; datalen = 2 + topics->count * 2; /* utf length == 2 */ while (ListNextElement(topics, &elem)) datalen += (int)strlen((char*)(elem->content)); if (client->MQTTVersion >= MQTTVERSION_5) datalen += MQTTProperties_len(props); ptr = data = malloc(datalen); if (ptr == NULL) goto exit; writeInt(&ptr, msgid); if (client->MQTTVersion >= MQTTVERSION_5) MQTTProperties_write(&ptr, props); elem = NULL; while (ListNextElement(topics, &elem)) writeUTF(&ptr, (char*)(elem->content)); rc = MQTTPacket_send(&client->net, header, data, datalen, 1, client->MQTTVersion); Log(LOG_PROTOCOL, 25, NULL, client->net.socket, client->clientID, msgid, rc); if (rc != TCPSOCKET_INTERRUPTED) free(data); exit: FUNC_EXIT_RC(rc); return rc; } /** * Function used in the new packets table to create unsuback packets. * @param MQTTVersion the version of MQTT * @param aHeader the MQTT header byte * @param data the rest of the packet * @param datalen the length of the rest of the packet * @return pointer to the packet structure */ void* MQTTPacket_unsuback(int MQTTVersion, unsigned char aHeader, char* data, size_t datalen) { Unsuback* pack = NULL; char* curdata = data; char* enddata = &data[datalen]; FUNC_ENTRY; if ((pack = malloc(sizeof(Unsuback))) == NULL) goto exit; pack->MQTTVersion = MQTTVersion; pack->header.byte = aHeader; if (enddata - curdata < 2) /* Is there enough data? */ { free(pack); pack = NULL; goto exit; } pack->msgId = readInt(&curdata); pack->reasonCodes = NULL; if (MQTTVersion >= MQTTVERSION_5) { MQTTProperties props = MQTTProperties_initializer; pack->properties = props; if (MQTTProperties_read(&pack->properties, &curdata, enddata) != 1) { if (pack->properties.array) free(pack->properties.array); if (pack) free(pack); pack = NULL; /* signal protocol error */ goto exit; } pack->reasonCodes = ListInitialize(); while ((size_t)(curdata - data) < datalen) { enum MQTTReasonCodes* newrc; newrc = malloc(sizeof(enum MQTTReasonCodes)); if (newrc == NULL) { if (pack->properties.array) free(pack->properties.array); if (pack) free(pack); pack = NULL; /* signal protocol error */ goto exit; } *newrc = (enum MQTTReasonCodes)readChar(&curdata); ListAppend(pack->reasonCodes, newrc, sizeof(enum MQTTReasonCodes)); } if (pack->reasonCodes->count == 0) { ListFree(pack->reasonCodes); if (pack->properties.array) free(pack->properties.array); if (pack) free(pack); pack = NULL; } } exit: FUNC_EXIT; return pack; }
81139184ef40b93d70e21171efe96098302bbf9f
99bdb3251fecee538e0630f15f6574054dfc1468
/bsp/imx6sx/iMX6_Platform_SDK/sdk/common/usb_stack/Device/app/msd/disk.c
5e0b25ad85cef01d13404eabaeb574ff59c86348
[ "Apache-2.0", "Zlib", "LicenseRef-scancode-proprietary-license", "MIT", "BSD-3-Clause", "X11", "BSD-4-Clause-UC", "LicenseRef-scancode-unknown-license-reference", "LicenseRef-scancode-warranty-disclaimer" ]
permissive
RT-Thread/rt-thread
03a7c52c2aeb1b06a544143b0e803d72f47d1ece
3602f891211904a27dcbd51e5ba72fefce7326b2
refs/heads/master
2023-09-01T04:10:20.295801
2023-08-31T16:20:55
2023-08-31T16:20:55
7,408,108
9,599
5,805
Apache-2.0
2023-09-14T13:37:26
2013-01-02T14:49:21
C
UTF-8
C
false
false
13,067
c
disk.c
/****************************************************************************** * * Freescale Semiconductor Inc. * (c) Copyright 2004-2010 Freescale Semiconductor, Inc. * ALL RIGHTS RESERVED. * **************************************************************************//*! * * @file disk.c * * @author * * @version * * @date May-08-2009 * * @brief RAM Disk has been emulated via this Mass Storage Demo *****************************************************************************/ /****************************************************************************** * Includes *****************************************************************************/ #include "types.h" /* User Defined Data Types */ #include "hidef.h" /* for EnableInterrupts macro */ #include "derivative.h" /* include peripheral declarations */ #include "usb_msc.h" /* USB MSC Class Header File */ #include "disk.h" /* Disk Application Header File */ #include "usb_class.h" #ifdef __MCF52xxx_H__ #include "Wdt_cfv2.h" #endif #if (defined _MCF51MM256_H) || (defined _MCF51JE256_H) #include "exceptions.h" #endif /* skip the inclusion in dependency stage */ #ifndef __NO_SETJMP #include <stdio.h> #endif #include <stdlib.h> #include <string.h> /***************************************************************************** * Constant and Macro's - None *****************************************************************************/ /***************************************************************************** * Global Functions Prototypes *****************************************************************************/ void TestApp_Init(void); extern void Watchdog_Reset(void); /**************************************************************************** * Global Variables ****************************************************************************/ #ifdef HIGH_SPEED_DEVICE uint_8 msd_buff[BULK_OUT_ENDP_PACKET_SIZE>>1]; #endif /* Add all the variables needed for disk.c to this structure */ DISK_GLOBAL_VARIABLE_STRUCT g_disk; /***************************************************************************** * Local Types - None *****************************************************************************/ /***************************************************************************** * Local Functions Prototypes *****************************************************************************/ void USB_App_Callback(uint_8 controller_ID, uint_8 event_type, void* val); void MSD_Event_Callback(uint_8 controller_ID, uint_8 event_type, void* val); void Disk_App(void); /***************************************************************************** * Local Variables *****************************************************************************/ /***************************************************************************** * Local Functions *****************************************************************************/ /****************************************************************************** * * @name Disk_App * * @brief * * @param None * * @return None * *****************************************************************************/ void Disk_App(void) { /* User Code */ return; } /****************************************************************************** * * @name USB_App_Callback * * @brief This function handles the callback * * @param controller_ID : To Identify the controller * @param event_type : value of the event * @param val : gives the configuration value * * @return None * *****************************************************************************/ void USB_App_Callback(uint_8 controller_ID, uint_8 event_type, void* val) { UNUSED (controller_ID) UNUSED (val) if(event_type == USB_APP_BUS_RESET) { g_disk.start_app=FALSE; } else if(event_type == USB_APP_ENUM_COMPLETE) { #if HIGH_SPEED_DEVICE _usb_device_recv_data(&controller_ID, BULK_OUT_ENDPOINT, (uint_8_ptr)msd_buff, BULK_OUT_ENDP_PACKET_SIZE); #endif // HIGH_SPEED_DEVICE g_disk.start_app=TRUE; } else if(event_type == USB_APP_ERROR) { /* add user code for error handling */ } else if(event_type == USB_APP_SEND_COMPLETE){ #if HIGH_SPEED_DEVICE _usb_device_recv_data(&controller_ID, BULK_OUT_ENDPOINT, (uint_8_ptr)msd_buff, BULK_OUT_ENDP_PACKET_SIZE); #endif // HIGH_SPEED_DEVICE } return; } /****************************************************************************** * * @name MSD_Event_Callback * * @brief This function handles the callback * * @param controller_ID : To Identify the controller * @param event_type : value of the event * @param val : gives the configuration value * * @return None * *****************************************************************************/ void MSD_Event_Callback(uint_8 controller_ID, uint_8 event_type, void* val) { PTR_LBA_APP_STRUCT lba_data_ptr; uint_8_ptr prevent_removal_ptr, load_eject_start_ptr; PTR_DEVICE_LBA_INFO_STRUCT device_lba_info_ptr; UNUSED (controller_ID) switch(event_type) { case USB_APP_DATA_RECEIVED : break; case USB_APP_SEND_COMPLETE : break; case USB_MSC_START_STOP_EJECT_MEDIA : load_eject_start_ptr = (uint_8_ptr)val; /* Code to be added by user for starting, stopping or ejecting the disk drive. e.g. starting/stopping the motor in case of CD/DVD*/ break; case USB_MSC_DEVICE_READ_REQUEST : /* copy data from storage device before sending it on USB Bus (Called before calling send_data on BULK IN endpoints)*/ lba_data_ptr = (PTR_LBA_APP_STRUCT)val; /* read data from mass storage device to driver buffer */ #if RAM_DISK_APP USB_memcopy(g_disk.storage_disk + lba_data_ptr->offset, lba_data_ptr->buff_ptr, lba_data_ptr->size); #elif SD_CARD_APP SD_Read_Block(lba_data_ptr); #endif break; case USB_MSC_DEVICE_WRITE_REQUEST : /* copy data from USb buffer to Storage device (Called before after recv_data on BULK OUT endpoints)*/ lba_data_ptr = (PTR_LBA_APP_STRUCT)val; /* read data from driver buffer to mass storage device */ #if RAM_DISK_APP USB_memcopy(lba_data_ptr->buff_ptr, g_disk.storage_disk + lba_data_ptr->offset, lba_data_ptr->size); #elif SD_CARD_APP SD_Write_Block(lba_data_ptr); #endif break; case USB_MSC_DEVICE_FORMAT_COMPLETE : break; case USB_MSC_DEVICE_REMOVAL_REQUEST : prevent_removal_ptr = (uint_8_ptr) val; if(SUPPORT_DISK_LOCKING_MECHANISM) { g_disk.disk_lock = *prevent_removal_ptr; } else if((!SUPPORT_DISK_LOCKING_MECHANISM)&&(!(*prevent_removal_ptr))) { /*there is no support for disk locking and removal of medium is enabled*/ /* code to be added here for this condition, if required */ } break; case USB_MSC_DEVICE_GET_INFO : device_lba_info_ptr = (PTR_DEVICE_LBA_INFO_STRUCT)val; #if RAM_DISK_APP device_lba_info_ptr->total_lba_device_supports = TOTAL_LOGICAL_BLOCKS_ADDRESS; device_lba_info_ptr->length_of_each_lba_of_device = LENGTH_OF_EACH_LBA; #elif SD_CARD_APP SD_Card_Info(&device_lba_info_ptr->total_lba_device_supports, &device_lba_info_ptr->length_of_each_lba_of_device); #endif device_lba_info_ptr->num_lun_supported = LOGICAL_UNIT_SUPPORTED; break; default : break; } return; } /****************************************************************************** * * @name TestApp_Init * * @brief This function is the entry for mouse (or other usuage) * * @param None * * @return None ** *****************************************************************************/ void TestApp_Init(void) { uint_8 error; /* initialize the Global Variable Structure */ USB_memzero(&g_disk, sizeof(DISK_GLOBAL_VARIABLE_STRUCT)); g_disk.app_controller_ID = USB_CONTROLLER_ID; DisableInterrupts; #if (defined _MCF51MM256_H) || (defined _MCF51JE256_H) usb_int_dis(); #endif #if SD_CARD_APP #if(defined(__MCF52259_H__) || defined(__MCF52221_H__)) /* PAN0 is configured to be GPIO */ MCF_GPIO_PANPAR &= ~(MCF_GPIO_PANPAR_PANPAR0 | MCF_GPIO_PANPAR_PANPAR1 | MCF_GPIO_PANPAR_PANPAR2); /* PAN0 is input */ MCF_GPIO_DDRAN &= ~MCF_GPIO_DDRAN_DDRAN0; _SD_DE; /* Card detection */ _SD_WR; /* Write protection */ #else #ifdef __MCF52277_H__ /* IRQ1 is configured as input */ MCF_PAD_PAR_IRQ = 0; _SD_DE; /* Card detection */ _SD_WR; /* Write protect */ #else #if (defined __MK_xxx_H__) #if USE_SPI_PROTOCOL #if (defined MCU_MK40N512VMD100)||(defined MCU_MK53N512CMD100) SIM_SCGC5 |= SIM_SCGC5_PORTB_MASK |SIM_SCGC5_PORTE_MASK; GPIOB_PDIR |= 1 << 8; PORTB_PCR8 |= PORT_PCR_MUX(1); GPIOB_PDDR &= ~((uint_32)1 << 8); PORTB_PCR8 |= PORT_PCR_PE_MASK|PORT_PCR_PS_MASK; #elif (defined MCU_MK60N512VMD100) || (defined MCU_MK70F12) GPIOA_PDIR |= 1 << 27; PORTA_PCR27 |= PORT_PCR_MUX(1); GPIOA_PDDR &= ~((uint_32)1 << 27); PORTA_PCR27 |= PORT_PCR_PE_MASK|PORT_PCR_PS_MASK; #elif defined MCU_MKL25Z4 GPIOC_PDIR |= 1 << 5; PORTC_PCR5 |= PORT_PCR_MUX(1); GPIOC_PDDR &= ~((uint_32)1 << 5); PORTC_PCR5 |= PORT_PCR_PE_MASK|PORT_PCR_PS_MASK; #else GPIOE_PDIR |= 1 << 5; PORTE_PCR5 |= PORT_PCR_MUX(1); GPIOE_PDDR &= ~((uint_32)1 << 5); PORTE_PCR5 |= PORT_PCR_PE_MASK|PORT_PCR_PS_MASK; #endif #elif USE_SDHC_PROTOCOL #if (defined MCU_MK40N512VMD100) SIM_SCGC5 |= SIM_SCGC5_PORTA_MASK |SIM_SCGC5_PORTE_MASK; GPIOA_PDIR |= 1 << 16; PORTA_PCR16 |= PORT_PCR_MUX(1); GPIOA_PDDR &= ~((uint_32)1 << 16); PORTA_PCR16 |= PORT_PCR_PE_MASK|PORT_PCR_PS_MASK; #else #if (defined MCU_MK53N512CMD100) SIM_SCGC5 |= SIM_SCGC5_PORTC_MASK |SIM_SCGC5_PORTE_MASK; SIM_SCGC3 |= SIM_SCGC3_SDHC_MASK; #endif GPIOE_PDIR |= 1 << 28; PORTE_PCR28 |= PORT_PCR_MUX(1); GPIOE_PDDR &= ~((uint_32)1 << 28); PORTE_PCR28 |= PORT_PCR_PE_MASK|PORT_PCR_PS_MASK; #endif // MCU_MK40N512VMD100 #if (defined MCU_MK53N512CMD100) GPIOC_PDIR |= 1 << 9; PORTC_PCR9 |= PORT_PCR_MUX(1); GPIOC_PDDR &= ~((uint_32)1 << 9); PORTC_PCR9 |= PORT_PCR_PE_MASK|PORT_PCR_PS_MASK; #else GPIOE_PDIR |= 1 << 27; PORTE_PCR27 |= PORT_PCR_MUX(1); GPIOE_PDDR &= ~((uint_32)1 << 27); PORTE_PCR27 |= PORT_PCR_PE_MASK|PORT_PCR_PS_MASK; #endif // MCU_MK53N512CMD100 #endif // USE_SPI_PROTOCOL _SD_DE; /* Card detection */ _SD_WR; /* Write protect */ #elif defined(MCU_mcf51jf128) SIM_SCGC6 |= SIM_SCGC6_PORTC_MASK; PTC_DD &=~0x20; /* Clear PTF4 to input for SD_DE*/ PCTLC_PUE |=0x20;/* Enable pull up resistor on PTC4 pin*/ MXC_PTCPF2 &=~MXC_PTCPF2_C4_MASK; MXC_PTCPF2 |= MXC_PTCPF2_C4(1);/* Set GPIO funtionality to PTC4 pin for SD card detecting*/ _SD_DE; #else PTGDD_PTGDD0 = 0; /* PTG0 is input*/ PTGPE_PTGPE0 = 1; /* internal pullup for PTG0 */ _SD_DE = 0; /* Card detection */ _SD_WR = 0; /* Write protect */ #endif #endif // __MCF52277_H__ #endif #if (defined __MK_xxx_H__) || defined(MCU_mcf51jf128) while(SD_DE&kSD_Desert) { Watchdog_Reset(); } /* SD Card inserted */ #else while(SD_DE == kSD_Desert) { Watchdog_Reset(); } /* SD Card inserted */ #endif if(!SD_Init()) return; /* Initialize SD_CARD and SPI Interface */ #if (USE_SPI_PROTOCOL && !(defined _MCF51MM256_H) && !defined(MCU_mcf51jf128)) (void)SD_ReadCSD(); #endif #endif // SD_CARD_APP /* Initialize the USB interface */ error = USB_Class_MSC_Init(g_disk.app_controller_ID, USB_App_Callback,NULL, MSD_Event_Callback); #ifdef SERIAL_DEBUG if(error!= USB_OK) printf_error("ERROR(%d)", error) else printf_ok("OK\n") #endif EnableInterrupts; #if (defined _MCF51MM256_H) || (defined _MCF51JE256_H) usb_int_en(); #endif } /****************************************************************************** * * @name TestApp_Task * * @brief Application task function. It is called from the main loop * * @param None * * @return None * ***************************************************************************** * Application task function. It is called from the main loop *****************************************************************************/ void TestApp_Task(void) { /* call the periodic task function */ USB_MSC_Periodic_Task(); /*check whether enumeration is complete or not */ if(g_disk.start_app==TRUE) { Disk_App(); } } /* EOF */
391402e3118cfc5baba9b644b2057d8f545da7e2
88ae8695987ada722184307301e221e1ba3cc2fa
/third_party/ffmpeg/libavfilter/vf_noise.c
8ed12f74098231006f10078a34858fe519fdfdf5
[ "Apache-2.0", "LGPL-2.0-or-later", "MIT", "GPL-1.0-or-later", "BSD-3-Clause", "LGPL-2.1-only", "LGPL-3.0-only", "GPL-2.0-only", "LGPL-2.1-or-later", "GPL-3.0-or-later", "LGPL-3.0-or-later", "IJG", "LicenseRef-scancode-other-permissive", "GPL-2.0-or-later", "GPL-3.0-only" ]
permissive
iridium-browser/iridium-browser
71d9c5ff76e014e6900b825f67389ab0ccd01329
5ee297f53dc7f8e70183031cff62f37b0f19d25f
refs/heads/master
2023-08-03T16:44:16.844552
2023-07-20T15:17:00
2023-07-23T16:09:30
220,016,632
341
40
BSD-3-Clause
2021-08-13T13:54:45
2019-11-06T14:32:31
null
UTF-8
C
false
false
11,103
c
vf_noise.c
/* * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at> * Copyright (c) 2013 Paul B Mahol * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * noise generator */ #include "libavutil/opt.h" #include "libavutil/imgutils.h" #include "libavutil/lfg.h" #include "libavutil/parseutils.h" #include "libavutil/pixdesc.h" #include "avfilter.h" #include "formats.h" #include "internal.h" #include "vf_noise.h" #include "video.h" typedef struct ThreadData { AVFrame *in, *out; } ThreadData; #define OFFSET(x) offsetof(NoiseContext, x) #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM #define NOISE_PARAMS(name, x, param) \ {#name"_seed", "set component #"#x" noise seed", OFFSET(param.seed), AV_OPT_TYPE_INT, {.i64=-1}, -1, INT_MAX, FLAGS}, \ {#name"_strength", "set component #"#x" strength", OFFSET(param.strength), AV_OPT_TYPE_INT, {.i64=0}, 0, 100, FLAGS}, \ {#name"s", "set component #"#x" strength", OFFSET(param.strength), AV_OPT_TYPE_INT, {.i64=0}, 0, 100, FLAGS}, \ {#name"_flags", "set component #"#x" flags", OFFSET(param.flags), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, 31, FLAGS, #name"_flags"}, \ {#name"f", "set component #"#x" flags", OFFSET(param.flags), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, 31, FLAGS, #name"_flags"}, \ {"a", "averaged noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_AVERAGED}, 0, 0, FLAGS, #name"_flags"}, \ {"p", "(semi)regular pattern", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_PATTERN}, 0, 0, FLAGS, #name"_flags"}, \ {"t", "temporal noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_TEMPORAL}, 0, 0, FLAGS, #name"_flags"}, \ {"u", "uniform noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_UNIFORM}, 0, 0, FLAGS, #name"_flags"}, static const AVOption noise_options[] = { NOISE_PARAMS(all, 0, all) NOISE_PARAMS(c0, 0, param[0]) NOISE_PARAMS(c1, 1, param[1]) NOISE_PARAMS(c2, 2, param[2]) NOISE_PARAMS(c3, 3, param[3]) {NULL} }; AVFILTER_DEFINE_CLASS(noise); static const int8_t patt[4] = { -1, 0, 1, 0 }; #define RAND_N(range) ((int) ((double) range * av_lfg_get(lfg) / (UINT_MAX + 1.0))) static av_cold int init_noise(NoiseContext *n, int comp) { int8_t *noise = av_malloc(MAX_NOISE * sizeof(int8_t)); FilterParams *fp = &n->param[comp]; AVLFG *lfg = &n->param[comp].lfg; int strength = fp->strength; int flags = fp->flags; int i, j; if (!noise) return AVERROR(ENOMEM); av_lfg_init(&fp->lfg, fp->seed + comp*31415U); for (i = 0, j = 0; i < MAX_NOISE; i++, j++) { if (flags & NOISE_UNIFORM) { if (flags & NOISE_AVERAGED) { if (flags & NOISE_PATTERN) { noise[i] = (RAND_N(strength) - strength / 2) / 6 + patt[j % 4] * strength * 0.25 / 3; } else { noise[i] = (RAND_N(strength) - strength / 2) / 3; } } else { if (flags & NOISE_PATTERN) { noise[i] = (RAND_N(strength) - strength / 2) / 2 + patt[j % 4] * strength * 0.25; } else { noise[i] = RAND_N(strength) - strength / 2; } } } else { double x1, x2, w, y1; do { x1 = 2.0 * av_lfg_get(lfg) / (float)UINT_MAX - 1.0; x2 = 2.0 * av_lfg_get(lfg) / (float)UINT_MAX - 1.0; w = x1 * x1 + x2 * x2; } while (w >= 1.0); w = sqrt((-2.0 * log(w)) / w); y1 = x1 * w; y1 *= strength / sqrt(3.0); if (flags & NOISE_PATTERN) { y1 /= 2; y1 += patt[j % 4] * strength * 0.35; } y1 = av_clipf(y1, -128, 127); if (flags & NOISE_AVERAGED) y1 /= 3.0; noise[i] = (int)y1; } if (RAND_N(6) == 0) j--; } for (i = 0; i < MAX_RES; i++) for (j = 0; j < 3; j++) fp->prev_shift[i][j] = noise + (av_lfg_get(lfg) & (MAX_SHIFT - 1)); fp->noise = noise; return 0; } static int query_formats(AVFilterContext *ctx) { AVFilterFormats *formats = NULL; int fmt, ret; for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt); if (desc->flags & AV_PIX_FMT_FLAG_PLANAR && !(desc->comp[0].depth & 7) && (ret = ff_add_format(&formats, fmt)) < 0) return ret; } return ff_set_common_formats(ctx, formats); } static int config_input(AVFilterLink *inlink) { NoiseContext *n = inlink->dst->priv; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); int ret; n->nb_planes = av_pix_fmt_count_planes(inlink->format); if ((ret = av_image_fill_linesizes(n->bytewidth, inlink->format, inlink->w)) < 0) return ret; n->height[1] = n->height[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h); n->height[0] = n->height[3] = inlink->h; return 0; } void ff_line_noise_c(uint8_t *dst, const uint8_t *src, const int8_t *noise, int len, int shift) { int i; noise += shift; for (i = 0; i < len; i++) { int v = src[i] + noise[i]; dst[i] = av_clip_uint8(v); } } void ff_line_noise_avg_c(uint8_t *dst, const uint8_t *src, int len, const int8_t * const *shift) { int i; const int8_t *src2 = (const int8_t*)src; for (i = 0; i < len; i++) { const int n = shift[0][i] + shift[1][i] + shift[2][i]; dst[i] = src2[i] + ((n * src2[i]) >> 7); } } static void noise(uint8_t *dst, const uint8_t *src, int dst_linesize, int src_linesize, int width, int start, int end, NoiseContext *n, int comp) { FilterParams *p = &n->param[comp]; int8_t *noise = p->noise; const int flags = p->flags; int y; if (!noise) { if (dst != src) av_image_copy_plane(dst, dst_linesize, src, src_linesize, width, end - start); return; } for (y = start; y < end; y++) { const int ix = y & (MAX_RES - 1); int x; for (x=0; x < width; x+= MAX_RES) { int w = FFMIN(width - x, MAX_RES); int shift = p->rand_shift[ix]; if (flags & NOISE_AVERAGED) { n->line_noise_avg(dst + x, src + x, w, (const int8_t**)p->prev_shift[ix]); p->prev_shift[ix][shift & 3] = noise + shift; } else { n->line_noise(dst + x, src + x, noise, w, shift); } } dst += dst_linesize; src += src_linesize; } } static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) { NoiseContext *s = ctx->priv; ThreadData *td = arg; int plane; for (plane = 0; plane < s->nb_planes; plane++) { const int height = s->height[plane]; const int start = (height * jobnr ) / nb_jobs; const int end = (height * (jobnr+1)) / nb_jobs; noise(td->out->data[plane] + start * td->out->linesize[plane], td->in->data[plane] + start * td->in->linesize[plane], td->out->linesize[plane], td->in->linesize[plane], s->bytewidth[plane], start, end, s, plane); } return 0; } static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref) { AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; NoiseContext *n = ctx->priv; ThreadData td; AVFrame *out; int comp, i; if (av_frame_is_writable(inpicref)) { out = inpicref; } else { out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { av_frame_free(&inpicref); return AVERROR(ENOMEM); } av_frame_copy_props(out, inpicref); } for (comp = 0; comp < 4; comp++) { FilterParams *fp = &n->param[comp]; if ((!fp->rand_shift_init || (fp->flags & NOISE_TEMPORAL)) && fp->strength) { for (i = 0; i < MAX_RES; i++) { fp->rand_shift[i] = av_lfg_get(&fp->lfg) & (MAX_SHIFT - 1); } fp->rand_shift_init = 1; } } td.in = inpicref; td.out = out; ff_filter_execute(ctx, filter_slice, &td, NULL, FFMIN(n->height[0], ff_filter_get_nb_threads(ctx))); emms_c(); if (inpicref != out) av_frame_free(&inpicref); return ff_filter_frame(outlink, out); } static av_cold int init(AVFilterContext *ctx) { NoiseContext *n = ctx->priv; int ret, i; for (i = 0; i < 4; i++) { if (n->all.seed >= 0) n->param[i].seed = n->all.seed; else n->param[i].seed = 123457; if (n->all.strength) n->param[i].strength = n->all.strength; if (n->all.flags) n->param[i].flags = n->all.flags; } for (i = 0; i < 4; i++) { if (n->param[i].strength && ((ret = init_noise(n, i)) < 0)) return ret; } n->line_noise = ff_line_noise_c; n->line_noise_avg = ff_line_noise_avg_c; #if ARCH_X86 ff_noise_init_x86(n); #endif return 0; } static av_cold void uninit(AVFilterContext *ctx) { NoiseContext *n = ctx->priv; int i; for (i = 0; i < 4; i++) av_freep(&n->param[i].noise); } static const AVFilterPad noise_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, .filter_frame = filter_frame, .config_props = config_input, }, }; static const AVFilterPad noise_outputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, }, }; const AVFilter ff_vf_noise = { .name = "noise", .description = NULL_IF_CONFIG_SMALL("Add noise."), .priv_size = sizeof(NoiseContext), .init = init, .uninit = uninit, FILTER_INPUTS(noise_inputs), FILTER_OUTPUTS(noise_outputs), FILTER_QUERY_FUNC(query_formats), .priv_class = &noise_class, .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS, };
e7196cbe00b1dfa2694820b8cd25b0c9de74247e
de21f9075f55640514c29ef0f1fe3f0690845764
/regression/cprover/pointers/pointers4.c
05e396d25bde2e65fb28d77da7b7378a2213122a
[ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "BSD-4-Clause" ]
permissive
diffblue/cbmc
975a074ac445febb3b5715f8792beb545522dc18
decd2839c2f51a54b2ad0f3e89fdc1b4bf78cd16
refs/heads/develop
2023-08-31T05:52:05.342195
2023-08-30T13:31:51
2023-08-30T13:31:51
51,877,056
589
309
NOASSERTION
2023-09-14T18:49:17
2016-02-16T23:03:52
C++
UTF-8
C
false
false
184
c
pointers4.c
int x; int main() { int *p; __CPROVER_assume(*p == 10); p = &x; // not provable, since p may have pointed elsewhere __CPROVER_assert(*p == 10, "property 1"); return 0; }
caa712e3c95915cf6d2f2b8456cc50801e989eca
f25de16d19fb949ae09a7a8c8467f23a664fcfc6
/Pods/Headers/Private/AVOSCloud/AVHTTPRequestOperation.h
ee527b74f9ed79d2e0774ff57fe608e2b4880cdf
[]
no_license
pthtc/Loveprogress
ebe69a41484984b27137cb9e85c346e70d469fb6
c6909932176cc11015c92f1020aa11c478569734
refs/heads/master
2021-05-11T01:36:45.706475
2018-02-11T08:14:52
2018-02-11T08:14:52
118,331,958
128
23
null
null
null
null
UTF-8
C
false
false
82
h
AVHTTPRequestOperation.h
../../../AVOSCloud/AVOS/AVOSCloud/ThirdParty/AFNetworking/AVHTTPRequestOperation.h
1fa2ef6b233ec67e4c991c2efe4755f1ba0d01d2
0744dcc5394cebf57ebcba343747af6871b67017
/external/iotivity/iotivity_1.2-rel/resource/csdk/stack/include/oickeepalive.h
d813665d882239f1f38f7bacc406fbffb9cac268
[ "MIT", "Apache-2.0", "GPL-2.0-only", "BSD-3-Clause" ]
permissive
Samsung/TizenRT
96abf62f1853f61fcf91ff14671a5e0c6ca48fdb
1a5c2e00a4b1bbf4c505bbf5cc6a8259e926f686
refs/heads/master
2023-08-31T08:59:33.327998
2023-08-08T06:09:20
2023-08-31T04:38:20
82,517,252
590
719
Apache-2.0
2023-09-14T06:54:49
2017-02-20T04:38:30
C
UTF-8
C
false
false
3,613
h
oickeepalive.h
/* **************************************************************** * * Copyright 2016 Samsung Electronics All Rights Reserved. * * * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************/ /** * @file * This file contains the APIs for KeepAlive Mechanism. * In order to ensure that the connection between an OIC Devices, * when using CoAP over TCP, OIC Device should send application layer * KeepAlive messages. */ #ifndef KEEP_ALIVE_H_ #define KEEP_ALIVE_H_ #include "octypes.h" #ifdef __cplusplus extern "C" { #endif /** * This function discovers on a /oic/ping resource. * * @param handle To refer to the request sent out on behalf of * calling this API. This handle can be used to cancel this operation * via the OCCancel API. * @note: This reference is handled internally, and should not be free'd by * the consumer. A NULL handle is permitted in the event where the caller * has no use for the return value. * @param remoteAddr The target device address to discovery a resource. * @param cbData Asynchronous callback function that is invoked by the stack when * discovery or resource interaction is received. The discovery could be * related to filtered/scoped/particular resource. The callback is * generated for each response received. * * @return ::OC_STACK_OK on success, some other value upon failure. */ OCStackResult OCFindKeepAliveResource(OCDoHandle *handle, const char *remoteAddr, OCCallbackData *cbData); /** * Send ping message to remote endpoint. * * @param handle To refer to the request sent out on behalf of * calling this API. This handle can be used to cancel this operation * via the OCCancel API. * @note: This reference is handled internally, and should not be free'd by * the consumer. A NULL handle is permitted in the event where the caller * has no use for the return value. * @param remoteAddr The target device address to discovery a resource. * @param payload Encoded request payload. * @param cbData Asynchronous callback function that is invoked by the stack when * discovery or resource interaction is received. The discovery could be * related to filtered/scoped/particular resource. The callback is * generated for each response received. * * @return ::OC_STACK_OK on success, some other value upon failure. */ OCStackResult OCSendKeepAliveRequest(OCDoHandle *handle, const char *remoteAddr, OCPayload *payload, OCCallbackData *cbData); #ifdef __cplusplus } // extern "C" #endif #endif // KEEP_ALIVE_H_
ea0407fe5db156609ea8ecd39ec5f040d0ac46f1
e73547787354afd9b717ea57fe8dd0695d161821
/src/world/area_pra/pra_37/pra_37_2_main.c
ede256d9d3154b129890bda37e3ea10814f41263
[]
no_license
pmret/papermario
8b514b19653cef8d6145e47499b3636b8c474a37
9774b26d93f1045dd2a67e502b6efc9599fb6c31
refs/heads/main
2023-08-31T07:09:48.951514
2023-08-21T18:07:08
2023-08-21T18:07:08
287,151,133
904
139
null
2023-09-14T02:44:23
2020-08-13T01:22:57
C
UTF-8
C
false
false
2,818
c
pra_37_2_main.c
#include "pra_37.h" s32 N(map_init)(void) { gGameStatusPtr->playerSpriteSet = PLAYER_SPRITES_MARIO_REFLECT_FLOOR; sprintf(wMapShapeName, "pra_10_shape"); sprintf(wMapHitName, "pra_10_hit"); return FALSE; } #include "../common/Reflection.inc.c" #include "../common/Reflection.data.inc.c" EvtScript N(EVS_ExitDoors_pra_22_1) = { EVT_SET_GROUP(EVT_GROUP_1B) EVT_CALL(DisablePlayerInput, TRUE) EVT_SET(LVar0, pra_37_ENTRY_0) EVT_SET(LVar1, COLLIDER_deilittsw) EVT_SET(LVar2, MODEL_o772) EVT_SET(LVar3, MODEL_o768) EVT_SET(LVar4, MODEL_o844) EVT_SET(LVar5, MODEL_o846) EVT_EXEC(ExitSplitDoubleDoor) EVT_WAIT(17) EVT_CALL(GotoMap, EVT_PTR("pra_22"), pra_22_ENTRY_1) EVT_WAIT(100) EVT_RETURN EVT_END }; EvtScript N(EVS_ExitDoors_pra_28_0) = { EVT_SET_GROUP(EVT_GROUP_1B) EVT_CALL(DisablePlayerInput, TRUE) EVT_SET(LVar0, pra_37_ENTRY_1) EVT_SET(LVar1, COLLIDER_deilittse) EVT_SET(LVar2, MODEL_o1019) EVT_SET(LVar3, MODEL_o1021) EVT_SET(LVar4, MODEL_o1020) EVT_SET(LVar5, MODEL_o1022) EVT_EXEC(ExitSplitDoubleDoor) EVT_WAIT(17) EVT_CALL(GotoMap, EVT_PTR("pra_28"), pra_28_ENTRY_0) EVT_WAIT(100) EVT_RETURN EVT_END }; EvtScript N(EVS_BindExitTriggers) = { EVT_BIND_TRIGGER(EVT_PTR(N(EVS_ExitDoors_pra_22_1)), TRIGGER_WALL_PRESS_A, COLLIDER_deilittsw, 1, 0) EVT_BIND_TRIGGER(EVT_PTR(N(EVS_ExitDoors_pra_28_0)), TRIGGER_WALL_PRESS_A, COLLIDER_deilittse, 1, 0) EVT_RETURN EVT_END }; EvtScript N(EVS_EnterMap) = { EVT_CALL(GetEntryID, LVar0) EVT_SWITCH(LVar0) EVT_CASE_EQ(pra_37_ENTRY_0) EVT_SET(LVar2, MODEL_o772) EVT_SET(LVar3, MODEL_o768) EVT_SET(LVar4, MODEL_o844) EVT_SET(LVar5, MODEL_o846) EVT_EXEC_WAIT(EnterSplitDoubleDoor) EVT_CASE_EQ(pra_37_ENTRY_1) EVT_SET(LVar2, MODEL_o1019) EVT_SET(LVar3, MODEL_o1021) EVT_SET(LVar4, MODEL_o1020) EVT_SET(LVar5, MODEL_o1022) EVT_EXEC_WAIT(EnterSplitDoubleDoor) EVT_END_SWITCH EVT_EXEC(N(EVS_BindExitTriggers)) EVT_RETURN EVT_END }; EvtScript N(EVS_Main) = { EVT_SET(GB_WorldLocation, LOCATION_CRYSTAL_PALACE) EVT_CALL(SetSpriteShading, SHADING_NONE) EVT_CALL(SetCamPerspective, CAM_DEFAULT, CAM_UPDATE_FROM_ZONE, 25, 16, 4096) EVT_CALL(SetCamBGColor, CAM_DEFAULT, 24, 24, 40) EVT_CALL(SetCamLeadPlayer, CAM_DEFAULT, FALSE) EVT_CALL(SetCamEnabled, CAM_DEFAULT, TRUE) EVT_CALL(MakeNpcs, TRUE, EVT_PTR(N(DefaultNPCs))) EVT_EXEC(N(EVS_SetupMusic)) EVT_SET(LVar0, REFLECTION_FLOOR_ONLY) EVT_SET(LVar1, GF_PRA_BrokeIllusion) EVT_EXEC(N(EVS_SetupReflections)) EVT_EXEC(N(EVS_EnterMap)) EVT_WAIT(1) EVT_RETURN EVT_END };
eda03a6ad889e6a0e4183a1166fa53e4aab66fb4
e9911598c43e8526da22b2773a73d9b5966f602a
/imap/partlist.h
a346a892cd52ec32b7cd4343916c7bee6192ce6f
[ "LicenseRef-scancode-warranty-disclaimer", "BSD-2-Clause" ]
permissive
cyrusimap/cyrus-imapd
07236dfd887ed92c147938cf1ed2591449d7e8fd
315441d067ba85814768f840f20bc3bb7f20ea6b
refs/heads/master
2023-09-05T09:57:10.683822
2023-09-05T06:09:43
2023-09-05T06:09:43
59,071,965
508
164
NOASSERTION
2023-09-13T04:34:31
2016-05-18T01:33:49
C
UTF-8
C
false
false
6,153
h
partlist.h
/* partlist.h - Partition/backend selection functions * * Copyright (c) 1994-2010 Carnegie Mellon University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. The name "Carnegie Mellon University" must not be used to * endorse or promote products derived from this software without * prior written permission. For permission or any legal * details, please contact * Carnegie Mellon University * Center for Technology Transfer and Enterprise Creation * 4615 Forbes Avenue * Suite 302 * Pittsburgh, PA 15213 * (412) 268-7393, fax: (412) 268-7395 * innovation@andrew.cmu.edu * * 4. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by Computing Services * at Carnegie Mellon University (http://www.cmu.edu/computing/)." * * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "config.h" typedef struct partitem { /** Item name */ char *item; /** Item value */ char *value; /** Item underlying id (filesystem id) */ unsigned long id; /** Item available space (KiB) */ uint64_t available; /** Item total space (KiB) */ uint64_t total; /** Item selection data */ double quota; } partitem_t; typedef enum partmode { /** Random */ PART_SELECT_MODE_RANDOM, /** Most free space. */ PART_SELECT_MODE_FREESPACE_MOST, /** Most free space (percent). */ PART_SELECT_MODE_FREESPACE_PERCENT_MOST, /** Weighted free space (percent) */ PART_SELECT_MODE_FREESPACE_PERCENT_WEIGHTED, /** Weighted free space (percent) delta */ PART_SELECT_MODE_FREESPACE_PERCENT_WEIGHTED_DELTA } partmode_t; struct partlist; /** * \brief Item data callback. * * @param inout part_list items list structure * @param in idx item index */ typedef void (*cb_part_filldata)(struct partlist *part_list, int idx); typedef struct partlist { /** Data callback */ cb_part_filldata filldata; /** Number of items */ int size; /** Items */ partitem_t *items; /** Mode */ partmode_t mode; /** Whether to actually use random mode */ int force_random; /** Usage limit */ int soft_usage_limit; /** Reinit limit */ int reinit; /** Reinit counter */ int reinit_counter; } partlist_t; /** * \brief Gets enumerated mode from string. */ extern partmode_t partlist_getmode(const char *mode); /** * \brief Initializes items list. * * @param inout part_list items list structure * @param in filldata items data callback, NULL for default (physical partitions) * @param in key_prefix key prefix for items to search for in configuration * @param in key_value key value, to be used if list of items is stored in one option * @param in excluded excluded items list * @param in mode items mode * @param in soft_usage_limit usage limit * @param in reinit reinit items data after given amount of operations */ extern void partlist_initialize(partlist_t *part_list, cb_part_filldata filldata, const char *key_prefix, const char *key_value, const char *excluded, partmode_t mode, int soft_usage_limit, int reinit); /** * \brief Frees items list. * * @param inout part_list items list structure */ extern void partlist_free(partlist_t *part_list); /** * \brief Selects item value from list. * * @param inout part_list items list structure * @return selected item value, according to requested mode, or NULL if none found */ extern const char *partlist_select_value(partlist_t *part_list); /** * \brief Iterate items in list * * @param inout part_list items list structure * @param in proc callback function, called for each item * @param in rock argument to pass through to callback function * @return return value from callback function */ typedef int (*partlist_foreach_cb)(partitem_t *part_item, void *rock); extern int partlist_foreach(partlist_t *part_list, partlist_foreach_cb proc, void *rock); /** * \brief Selects local partitions. * * @return selected partition, according to requested mode, or NULL if none found */ const char *partlist_local_select(void); /** * \brief Finds partition with most freespace (bytes or percents). * * @param out available number of KiB available on partition * @param out total total number of KiB on partition * @param out tavailable number of KiB available on server * @param out ttotal total number of KiB on server * @return partition, or NULL if none found */ const char *partlist_local_find_freespace_most(int percent, uint64_t *available, uint64_t *total, uint64_t *tavailable, uint64_t *ttotal); /** * \brief Frees local partition data. */ extern void partlist_local_done(void);
efe1d90fdb7c332365b9ec1a33d6bb2f8a38e55a
e1d9c54e9925e30e388a255b53a93cccad0b94cb
/kubernetes/unit-test/test_v1_volume_error.c
e97d3864c7b485217e60dd1fd01f5b0f43ccfb28
[ "curl", "Apache-2.0" ]
permissive
kubernetes-client/c
dd4fd8095485c083e0f40f2b48159b1609a6141b
5ac5ff25e9809a92a48111b1f77574b6d040b711
refs/heads/master
2023-08-13T10:51:03.702497
2023-08-07T19:18:32
2023-08-07T19:18:32
247,958,425
127
47
Apache-2.0
2023-09-07T20:07:00
2020-03-17T11:59:05
C
UTF-8
C
false
false
1,576
c
test_v1_volume_error.c
#ifndef v1_volume_error_TEST #define v1_volume_error_TEST // the following is to include only the main from the first c file #ifndef TEST_MAIN #define TEST_MAIN #define v1_volume_error_MAIN #endif // TEST_MAIN #include <stdlib.h> #include <string.h> #include <stdio.h> #include <stdbool.h> #include "../external/cJSON.h" #include "../model/v1_volume_error.h" v1_volume_error_t* instantiate_v1_volume_error(int include_optional); v1_volume_error_t* instantiate_v1_volume_error(int include_optional) { v1_volume_error_t* v1_volume_error = NULL; if (include_optional) { v1_volume_error = v1_volume_error_create( "0", "2013-10-20T19:20:30+01:00" ); } else { v1_volume_error = v1_volume_error_create( "0", "2013-10-20T19:20:30+01:00" ); } return v1_volume_error; } #ifdef v1_volume_error_MAIN void test_v1_volume_error(int include_optional) { v1_volume_error_t* v1_volume_error_1 = instantiate_v1_volume_error(include_optional); cJSON* jsonv1_volume_error_1 = v1_volume_error_convertToJSON(v1_volume_error_1); printf("v1_volume_error :\n%s\n", cJSON_Print(jsonv1_volume_error_1)); v1_volume_error_t* v1_volume_error_2 = v1_volume_error_parseFromJSON(jsonv1_volume_error_1); cJSON* jsonv1_volume_error_2 = v1_volume_error_convertToJSON(v1_volume_error_2); printf("repeating v1_volume_error:\n%s\n", cJSON_Print(jsonv1_volume_error_2)); } int main() { test_v1_volume_error(1); test_v1_volume_error(0); printf("Hello world \n"); return 0; } #endif // v1_volume_error_MAIN #endif // v1_volume_error_TEST
4f0a6277a20ccc2b885de5df3d21f56cb5cbc65a
fce81b804cae23f525a5ad4370b684bf0dc531a5
/numpy/core/src/multiarray/common.c
573d0d6063c72e9876f4dcf754ff95a303752230
[ "Zlib", "BSD-3-Clause", "MIT", "Apache-2.0" ]
permissive
numpy/numpy
ba2abcc1d2d46affbb6aabe5aed6407b4b57507e
dc2ff125493777a1084044e6cd6857a42ee323d4
refs/heads/main
2023-09-05T10:10:52.767363
2023-09-04T18:03:29
2023-09-04T18:03:29
908,607
25,725
11,968
BSD-3-Clause
2023-09-14T21:26:09
2010-09-13T23:02:39
Python
UTF-8
C
false
false
13,793
c
common.c
#define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE #define PY_SSIZE_T_CLEAN #include <Python.h> #include "numpy/arrayobject.h" #include "npy_config.h" #include "npy_pycompat.h" #include "common.h" #include "abstractdtypes.h" #include "usertypes.h" #include "npy_buffer.h" #include "get_attr_string.h" #include "mem_overlap.h" #include "array_coercion.h" /* * The casting to use for implicit assignment operations resulting from * in-place operations (like +=) and out= arguments. (Notice that this * variable is misnamed, but it's part of the public API so I'm not sure we * can just change it. Maybe someone should try and see if anyone notices. */ /* * In numpy 1.6 and earlier, this was NPY_UNSAFE_CASTING. In a future * release, it will become NPY_SAME_KIND_CASTING. Right now, during the * transitional period, we continue to follow the NPY_UNSAFE_CASTING rules (to * avoid breaking people's code), but we also check for whether the cast would * be allowed under the NPY_SAME_KIND_CASTING rules, and if not we issue a * warning (that people's code will be broken in a future release.) */ NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING = NPY_SAME_KIND_CASTING; NPY_NO_EXPORT PyArray_Descr * _array_find_python_scalar_type(PyObject *op) { if (PyFloat_Check(op)) { return PyArray_DescrFromType(NPY_DOUBLE); } else if (PyComplex_Check(op)) { return PyArray_DescrFromType(NPY_CDOUBLE); } else if (PyLong_Check(op)) { return NPY_DT_CALL_discover_descr_from_pyobject( &PyArray_PyIntAbstractDType, op); } return NULL; } /* * Get a suitable string dtype by calling `__str__`. * For `np.bytes_`, this assumes an ASCII encoding. */ NPY_NO_EXPORT PyArray_Descr * PyArray_DTypeFromObjectStringDiscovery( PyObject *obj, PyArray_Descr *last_dtype, int string_type) { int itemsize; if (string_type == NPY_STRING) { PyObject *temp = PyObject_Str(obj); if (temp == NULL) { return NULL; } /* assume that when we do the encoding elsewhere we'll use ASCII */ itemsize = PyUnicode_GetLength(temp); Py_DECREF(temp); if (itemsize < 0) { return NULL; } } else if (string_type == NPY_UNICODE) { PyObject *temp = PyObject_Str(obj); if (temp == NULL) { return NULL; } itemsize = PyUnicode_GetLength(temp); Py_DECREF(temp); if (itemsize < 0) { return NULL; } itemsize *= 4; /* convert UCS4 codepoints to bytes */ } else { return NULL; } if (last_dtype != NULL && last_dtype->type_num == string_type && last_dtype->elsize >= itemsize) { Py_INCREF(last_dtype); return last_dtype; } PyArray_Descr *dtype = PyArray_DescrNewFromType(string_type); if (dtype == NULL) { return NULL; } dtype->elsize = itemsize; return dtype; } /* * This function is now identical to the new PyArray_DiscoverDTypeAndShape * but only returns the dtype. It should in most cases be slowly phased out. * (Which may need some refactoring to PyArray_FromAny to make it simpler) */ NPY_NO_EXPORT int PyArray_DTypeFromObject(PyObject *obj, int maxdims, PyArray_Descr **out_dtype) { coercion_cache_obj *cache = NULL; npy_intp shape[NPY_MAXDIMS]; int ndim; ndim = PyArray_DiscoverDTypeAndShape( obj, maxdims, shape, &cache, NULL, NULL, out_dtype, 0); if (ndim < 0) { return -1; } npy_free_coercion_cache(cache); return 0; } NPY_NO_EXPORT npy_bool _IsWriteable(PyArrayObject *ap) { PyObject *base = PyArray_BASE(ap); Py_buffer view; /* * C-data wrapping arrays may not own their data while not having a base; * WRITEBACKIFCOPY arrays have a base, but do own their data. */ if (base == NULL || PyArray_CHKFLAGS(ap, NPY_ARRAY_OWNDATA)) { /* * This is somewhat unsafe for directly wrapped non-writable C-arrays, * which do not know whether the memory area is writable or not and * do not own their data (but have no base). * It would be better if this returned PyArray_ISWRITEABLE(ap). * Since it is hard to deprecate, this is deprecated only on the Python * side, but not on in PyArray_UpdateFlags. */ return NPY_TRUE; } /* * Get to the final base object. * If it is a writeable array, then return True if we can * find an array object or a writeable buffer object as * the final base object. */ while (PyArray_Check(base)) { ap = (PyArrayObject *)base; base = PyArray_BASE(ap); if (PyArray_ISWRITEABLE(ap)) { /* * If any base is writeable, it must be OK to switch, note that * bases are typically collapsed to always point to the most * general one. */ return NPY_TRUE; } if (base == NULL || PyArray_CHKFLAGS(ap, NPY_ARRAY_OWNDATA)) { /* there is no further base to test the writeable flag for */ return NPY_FALSE; } assert(!PyArray_CHKFLAGS(ap, NPY_ARRAY_OWNDATA)); } if (PyObject_GetBuffer(base, &view, PyBUF_WRITABLE|PyBUF_SIMPLE) < 0) { PyErr_Clear(); return NPY_FALSE; } PyBuffer_Release(&view); return NPY_TRUE; } /** * Convert an array shape to a string such as "(1, 2)". * * @param Dimensionality of the shape * @param npy_intp pointer to shape array * @param String to append after the shape `(1, 2)%s`. * * @return Python unicode string */ NPY_NO_EXPORT PyObject * convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending) { npy_intp i; /* * Negative dimension indicates "newaxis", which can * be discarded for printing if it's a leading dimension. * Find the first non-"newaxis" dimension. */ for (i = 0; i < n && vals[i] < 0; i++); if (i == n) { return PyUnicode_FromFormat("()%s", ending); } PyObject *ret = PyUnicode_FromFormat("%" NPY_INTP_FMT, vals[i++]); if (ret == NULL) { return NULL; } for (; i < n; ++i) { PyObject *tmp; if (vals[i] < 0) { tmp = PyUnicode_FromString(",newaxis"); } else { tmp = PyUnicode_FromFormat(",%" NPY_INTP_FMT, vals[i]); } if (tmp == NULL) { Py_DECREF(ret); return NULL; } Py_SETREF(ret, PyUnicode_Concat(ret, tmp)); Py_DECREF(tmp); if (ret == NULL) { return NULL; } } if (i == 1) { Py_SETREF(ret, PyUnicode_FromFormat("(%S,)%s", ret, ending)); } else { Py_SETREF(ret, PyUnicode_FromFormat("(%S)%s", ret, ending)); } return ret; } NPY_NO_EXPORT void dot_alignment_error(PyArrayObject *a, int i, PyArrayObject *b, int j) { PyObject *errmsg = NULL, *format = NULL, *fmt_args = NULL, *i_obj = NULL, *j_obj = NULL, *shape1 = NULL, *shape2 = NULL, *shape1_i = NULL, *shape2_j = NULL; format = PyUnicode_FromString("shapes %s and %s not aligned:" " %d (dim %d) != %d (dim %d)"); shape1 = convert_shape_to_string(PyArray_NDIM(a), PyArray_DIMS(a), ""); shape2 = convert_shape_to_string(PyArray_NDIM(b), PyArray_DIMS(b), ""); i_obj = PyLong_FromLong(i); j_obj = PyLong_FromLong(j); shape1_i = PyLong_FromSsize_t(PyArray_DIM(a, i)); shape2_j = PyLong_FromSsize_t(PyArray_DIM(b, j)); if (!format || !shape1 || !shape2 || !i_obj || !j_obj || !shape1_i || !shape2_j) { goto end; } fmt_args = PyTuple_Pack(6, shape1, shape2, shape1_i, i_obj, shape2_j, j_obj); if (fmt_args == NULL) { goto end; } errmsg = PyUnicode_Format(format, fmt_args); if (errmsg != NULL) { PyErr_SetObject(PyExc_ValueError, errmsg); } else { PyErr_SetString(PyExc_ValueError, "shapes are not aligned"); } end: Py_XDECREF(errmsg); Py_XDECREF(fmt_args); Py_XDECREF(format); Py_XDECREF(i_obj); Py_XDECREF(j_obj); Py_XDECREF(shape1); Py_XDECREF(shape2); Py_XDECREF(shape1_i); Py_XDECREF(shape2_j); } /** * unpack tuple of dtype->fields (descr, offset, title[not-needed]) * * @param "value" should be the tuple. * * @return "descr" will be set to the field's dtype * @return "offset" will be set to the field's offset * * returns -1 on failure, 0 on success. */ NPY_NO_EXPORT int _unpack_field(PyObject *value, PyArray_Descr **descr, npy_intp *offset) { PyObject * off; if (PyTuple_GET_SIZE(value) < 2) { return -1; } *descr = (PyArray_Descr *)PyTuple_GET_ITEM(value, 0); off = PyTuple_GET_ITEM(value, 1); if (PyLong_Check(off)) { *offset = PyLong_AsSsize_t(off); } else { PyErr_SetString(PyExc_IndexError, "can't convert offset"); return -1; } return 0; } /* * check whether arrays with datatype dtype might have object fields. This will * only happen for structured dtypes (which may have hidden objects even if the * HASOBJECT flag is false), object dtypes, or subarray dtypes whose base type * is either of these. */ NPY_NO_EXPORT int _may_have_objects(PyArray_Descr *dtype) { PyArray_Descr *base = dtype; if (PyDataType_HASSUBARRAY(dtype)) { base = dtype->subarray->base; } return (PyDataType_HASFIELDS(base) || PyDataType_FLAGCHK(base, NPY_ITEM_HASOBJECT) ); } /* * Make a new empty array, of the passed size, of a type that takes the * priority of ap1 and ap2 into account. * * If `out` is non-NULL, memory overlap is checked with ap1 and ap2, and an * updateifcopy temporary array may be returned. If `result` is non-NULL, the * output array to be returned (`out` if non-NULL and the newly allocated array * otherwise) is incref'd and put to *result. */ NPY_NO_EXPORT PyArrayObject * new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out, int nd, npy_intp dimensions[], int typenum, PyArrayObject **result) { PyArrayObject *out_buf; if (out) { int d; /* verify that out is usable */ if (PyArray_NDIM(out) != nd || PyArray_TYPE(out) != typenum || !PyArray_ISCARRAY(out)) { PyErr_SetString(PyExc_ValueError, "output array is not acceptable (must have the right datatype, " "number of dimensions, and be a C-Array)"); return 0; } for (d = 0; d < nd; ++d) { if (dimensions[d] != PyArray_DIM(out, d)) { PyErr_SetString(PyExc_ValueError, "output array has wrong dimensions"); return 0; } } /* check for memory overlap */ if (!(solve_may_share_memory(out, ap1, 1) == 0 && solve_may_share_memory(out, ap2, 1) == 0)) { /* allocate temporary output array */ out_buf = (PyArrayObject *)PyArray_NewLikeArray(out, NPY_CORDER, NULL, 0); if (out_buf == NULL) { return NULL; } /* set copy-back */ Py_INCREF(out); if (PyArray_SetWritebackIfCopyBase(out_buf, out) < 0) { Py_DECREF(out); Py_DECREF(out_buf); return NULL; } } else { Py_INCREF(out); out_buf = out; } if (result) { Py_INCREF(out); *result = out; } return out_buf; } else { PyTypeObject *subtype; double prior1, prior2; /* * Need to choose an output array that can hold a sum * -- use priority to determine which subtype. */ if (Py_TYPE(ap2) != Py_TYPE(ap1)) { prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0); prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0); subtype = (prior2 > prior1 ? Py_TYPE(ap2) : Py_TYPE(ap1)); } else { prior1 = prior2 = 0.0; subtype = Py_TYPE(ap1); } out_buf = (PyArrayObject *)PyArray_New(subtype, nd, dimensions, typenum, NULL, NULL, 0, 0, (PyObject *) (prior2 > prior1 ? ap2 : ap1)); if (out_buf != NULL && result) { Py_INCREF(out_buf); *result = out_buf; } return out_buf; } } NPY_NO_EXPORT int check_is_convertible_to_scalar(PyArrayObject *v) { if (PyArray_NDIM(v) == 0) { return 0; } /* Remove this if-else block when the deprecation expires */ if (PyArray_SIZE(v) == 1) { /* Numpy 1.25.0, 2023-01-02 */ if (DEPRECATE( "Conversion of an array with ndim > 0 to a scalar " "is deprecated, and will error in future. " "Ensure you extract a single element from your array " "before performing this operation. " "(Deprecated NumPy 1.25.)") < 0) { return -1; } return 0; } else { PyErr_SetString(PyExc_TypeError, "only length-1 arrays can be converted to Python scalars"); return -1; } PyErr_SetString(PyExc_TypeError, "only 0-dimensional arrays can be converted to Python scalars"); return -1; }
7fb6ae1ad7b7e60842979c5dd80aade8516037ea
0cc343d927d5db6693006018986715c43acab961
/examples/default_clause_test.c
64f67973f7ea86a021bb4a48947ea2eeae7e01ed
[ "MIT" ]
permissive
verifast/verifast
ec0101fc4a69bd33c5f66be4444169c4e060ead8
d152da790c7ebf72ce616533a6c83082629adbdb
refs/heads/master
2023-08-25T00:13:51.464802
2023-08-13T10:00:00
2023-08-13T10:00:00
14,519,163
325
64
NOASSERTION
2023-08-10T21:16:08
2013-11-19T08:57:02
OCaml
UTF-8
C
false
false
1,565
c
default_clause_test.c
/*@ inductive foo = A | B | C | D; fixpoint bool isA(foo f) { switch (f) { case A: return true; default: return false; } } fixpoint bool bothA(foo f1, foo f2) { switch (f1) { case A: return switch (f2) { case A: return true; default: return false; }; default: return false; } } fixpoint bool eitherA(foo f1, foo f2) { switch (f1) { case A: return true; default: return switch (f2) { case A: return true; default: return false; }; } } fixpoint bool allA(foo f1, foo f2, foo f3) { switch (f1) { case A: return switch (f2) { case A: return switch (f3) { case A: return true; default: return false; }; default: return false; }; default: return false; } } fixpoint bool anyA(foo f1, foo f2, foo f3) { switch (f1) { case A: return true; default: return switch (f2) { case A: return true; default: return switch (f3) { case A: return true; default: return false; }; }; } } lemma void test() requires true; ensures true; { assert !!isA(A); assert !isA(B); assert !!bothA(A, A); assert !bothA(A, D); assert !bothA(C, A); assert !!eitherA(A, B); assert !!eitherA(D, A); assert !eitherA(B, D); assert !!allA(A, A, A); assert !allA(A, B, A); assert !allA(B, A, A); assert !allA(A, A, B); assert !allA(A, B, C); assert !!anyA(A, B, D); assert !!anyA(B, A, D); assert !!anyA(B, B, A); assert !anyA(B, C, C); } @*/
25edfa7ed606202a126677daded48b4eb2198c02
2a505950da885d50ef8942344765ee2570b8ab66
/libs/signature/src/sha1.c
7e7c804fdc4ab98ada7d6fdfc8ac9b6dcd60a024
[ "Apache-2.0", "BSD-3-Clause" ]
permissive
apache/rocketmq-client-cpp
87b909128b2d1e920d57cedcaeb768251d7041fa
de41701d7e9a6a78bbe4d34578e444e27a499394
refs/heads/master
2023-08-23T23:33:17.744449
2022-10-25T08:48:49
2022-10-25T08:48:49
152,519,793
361
193
Apache-2.0
2023-06-22T11:04:37
2018-10-11T02:36:54
C++
UTF-8
C
false
false
15,393
c
sha1.c
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "sha1.h" #include <stddef.h> #include <string.h> #if USE_UNLOCKED_IO # include "unlocked-io.h" #endif #ifdef __cplusplus namespace rocketmqSignature { #endif #ifdef WORDS_BIGENDIAN # define SWAP(n) (n) #else # define SWAP(n) \ (((n) << 24) | (((n) & 0xff00) << 8) | (((n) >> 8) & 0xff00) | ((n) >> 24)) #endif #define BLOCKSIZE 4096 #if BLOCKSIZE % 64 != 0 # error "invalid BLOCKSIZE" #endif /* This array contains the bytes used to pad the buffer to the next 64-byte boundary. (RFC 1321, 3.1: Step 1) */ static const unsigned char fillbuf[64] = { 0x80, 0 /* , 0, 0, ... */ }; /*! * @fn void sha1_init_ctx (struct sha1_ctx *ctx) * * @brief initialize a context with start constants * * @details Take a pointer to a 160 bit block of data (five 32 bit ints) and * initialize it to the start constants of the SHA1 algorithm. This * must be called before using hash in the call to sha1_hash. * * @param[out] ctx pointer to a context to be initialized */ void sha1_init_ctx (struct sha1_ctx *ctx) { ctx->A = 0x67452301; ctx->B = 0xefcdab89; ctx->C = 0x98badcfe; ctx->D = 0x10325476; ctx->E = 0xc3d2e1f0; ctx->total[0] = ctx->total[1] = 0; ctx->buflen = 0; } /*! * @fn static __inline__ void set_uint32 (char *cp, uint32_t v) * * @brief Copy the 4 byte value from v into the memory location pointed to by *cp * * @details Copy the 4 byte value from v into the memory location pointed to by * *cp, If your architecture allows unaligned access this is equivalent * to * (uint32_t *) cp = v * * @param[out] cp memory location to copy v into * @param[in] v 4 byte value to be copied */ #ifdef WIN32 static _inline void #else static __inline__ void #endif set_uint32 (char *cp, uint32_t v) { memcpy (cp, &v, sizeof v); } /*! * @fn void *sha1_read_ctx (const struct sha1_ctx *ctx, void *resbuf) * * @brief Put result from CTX in first 20 bytes following RESBUF * * @details Put result from CTX in first 20 bytes following RESBUF. The result * must be in little endian byte order. * * @param[in] ctx context whose results will be copied * @param[out] resbuf result of copies saved in little endian byte order * @return resbuf */ void * sha1_read_ctx (const struct sha1_ctx *ctx, void *resbuf) { char *r = (char*)resbuf; set_uint32 (r + 0 * sizeof ctx->A, SWAP (ctx->A)); set_uint32 (r + 1 * sizeof ctx->B, SWAP (ctx->B)); set_uint32 (r + 2 * sizeof ctx->C, SWAP (ctx->C)); set_uint32 (r + 3 * sizeof ctx->D, SWAP (ctx->D)); set_uint32 (r + 4 * sizeof ctx->E, SWAP (ctx->E)); return resbuf; } /*! * @fn void *sha1_finish_ctx (struct sha1_ctx *ctx, void *resbuf) * * @brief Process the remaining bytes in the internal buffer and write the result to RESBUF. * * @details Process the remaining bytes in the internal buffer and the usual * prolog according to the standard and write the result to RESBUF. * * @param[in] ctx context to be used * @param[out] resbuf resultant SHA1 hash * @return resultant SHA1 hash */ void * sha1_finish_ctx (struct sha1_ctx *ctx, void *resbuf) { /* Take yet unprocessed bytes into account. */ uint32_t bytes = ctx->buflen; size_t size = (bytes < 56) ? 64 / 4 : 64 * 2 / 4; /* Now count remaining bytes. */ ctx->total[0] += bytes; if (ctx->total[0] < bytes) ++ctx->total[1]; /* Put the 64-bit file length in *bits* at the end of the buffer. */ ctx->buffer[size - 2] = SWAP ((ctx->total[1] << 3) | (ctx->total[0] >> 29)); ctx->buffer[size - 1] = SWAP (ctx->total[0] << 3); memcpy (&((char *) ctx->buffer)[bytes], fillbuf, (size - 2) * 4 - bytes); /* Process last bytes. */ sha1_process_block (ctx->buffer, size * 4, ctx); return sha1_read_ctx (ctx, resbuf); } /* * @fn void *sha1_stream (FILE *stream, void *resblock) * * @brief Compute SHA1 message digest for A Stream. * * @details Compute SHA1 message digest for Stream. The * result is always in little endian byte order, so that a byte-wise * output yields to the wanted ASCII representation of the message * digest. * * @param[in] stream message stream to be hashed * @param[out] resblock resultant hash in little endian byte order * @return resultant hash in little endian byte order */ int sha1_stream (FILE *stream, void *resblock) { struct sha1_ctx ctx; char buffer[BLOCKSIZE + 72]; size_t sum; /* Initialize the computation context. */ sha1_init_ctx (&ctx); /* Iterate over full file contents. */ while (1) { /* We read the file in blocks of BLOCKSIZE bytes. One call of the computation function processes the whole buffer so that with the next round of the loop another block can be read. */ size_t n; sum = 0; /* Read block. Take care for partial reads. */ while (1) { n = fread (buffer + sum, 1, BLOCKSIZE - sum, stream); sum += n; if (sum == BLOCKSIZE) break; if (n == 0) { /* Check for the error flag IFF N == 0, so that we don't exit the loop after a partial read due to e.g., EAGAIN or EWOULDBLOCK. */ if (ferror (stream)) return 1; goto process_partial_block; } /* We've read at least one byte, so ignore errors. But always check for EOF, since feof may be true even though N > 0. Otherwise, we could end up calling fread after EOF. */ if (feof (stream)) goto process_partial_block; } /* Process buffer with BLOCKSIZE bytes. Note that BLOCKSIZE % 64 == 0 */ sha1_process_block (buffer, BLOCKSIZE, &ctx); } process_partial_block:; /* Process any remaining bytes. */ if (sum > 0) sha1_process_bytes (buffer, sum, &ctx); /* Construct result in desired memory. */ sha1_finish_ctx (&ctx, resblock); return 0; } /* * @fn void *sha1_buffer (const char *buffer, size_t len, void *resblock) * * @brief Compute SHA1 message digest for LEN bytes beginning at BUFFER. * * @details Compute SHA1 message digest for LEN bytes beginning at BUFFER. The * result is always in little endian byte order, so that a byte-wise * output yields to the wanted ASCII representation of the message * digest. * * @param[in] buffer message to be hashed * @param[in] len length of buffer * @param[out] resblock resultant hash in little endian byte order * @return resultant hash in little endian byte order */ void * sha1_buffer (const char *buffer, size_t len, void *resblock) { struct sha1_ctx ctx; /* Initialize the computation context. */ sha1_init_ctx (&ctx); /* Process whole buffer but last len % 64 bytes. */ sha1_process_bytes (buffer, len, &ctx); /* Put result in desired memory area. */ return sha1_finish_ctx (&ctx, resblock); } /*! * @fn void sha1_process_bytes (const void *buffer, size_t len, struct sha1_ctx *ctx) * * @brief update the context for the next LEN bytes starting at BUFFER. * * @details Starting with the result of former calls of this function (or the * initialization function) update the context for the next LEN bytes * starting at BUFFER. * It is NOT required that LEN is a multiple of 64. * * @param[in] buffer buffer used to update context values * @param[in] len length of buffer * @param[out] ctx context to be updated */ void sha1_process_bytes (const void *buffer, size_t len, struct sha1_ctx *ctx) { /* When we already have some bits in our internal buffer concatenate both inputs first. */ if (ctx->buflen != 0) { size_t left_over = ctx->buflen; size_t add = 128 - left_over > len ? len : 128 - left_over; memcpy (&((char *) ctx->buffer)[left_over], buffer, add); ctx->buflen += add; if (ctx->buflen > 64) { sha1_process_block (ctx->buffer, ctx->buflen & ~63, ctx); ctx->buflen &= 63; /* The regions in the following copy operation cannot overlap. */ memcpy (ctx->buffer, &((char *) ctx->buffer)[(left_over + add) & ~63], ctx->buflen); } buffer = (const char *) buffer + add; len -= add; } /* Process available complete blocks. */ if (len >= 64) { #if !_STRING_ARCH_unaligned # define alignof(type) offsetof (struct { char c; type x; }, x) # define UNALIGNED_P(p) (((size_t) p) % alignof (uint32_t) != 0) if (UNALIGNED_P (buffer)) while (len > 64) { sha1_process_block (memcpy (ctx->buffer, buffer, 64), 64, ctx); buffer = (const char *) buffer + 64; len -= 64; } else #endif { sha1_process_block (buffer, len & ~63, ctx); buffer = (const char *) buffer + (len & ~63); len &= 63; } } /* Move remaining bytes in internal buffer. */ if (len > 0) { size_t left_over = ctx->buflen; memcpy (&((char *) ctx->buffer)[left_over], buffer, len); left_over += len; if (left_over >= 64) { sha1_process_block (ctx->buffer, 64, ctx); left_over -= 64; memcpy (ctx->buffer, &ctx->buffer[16], left_over); } ctx->buflen = left_over; } } /* --- Code below is the primary difference between md5.c and sha1.c --- */ /* SHA1 round constants */ #define K1 0x5a827999 #define K2 0x6ed9eba1 #define K3 0x8f1bbcdc #define K4 0xca62c1d6 /* Round functions. Note that F2 is the same as F4. */ #define F1(B,C,D) ( D ^ ( B & ( C ^ D ) ) ) #define F2(B,C,D) (B ^ C ^ D) #define F3(B,C,D) ( ( B & C ) | ( D & ( B | C ) ) ) #define F4(B,C,D) (B ^ C ^ D) /*! * @fn void sha1_process_block (const void *buffer, size_t len, struct sha1_ctx *ctx) * * @brief Process LEN bytes of BUFFER, accumulating context into CTX. * * @details Process LEN bytes of BUFFER, accumulating context into CTX. * It is assumed that LEN % 64 == 0. * Most of this code comes from GnuPG's cipher/sha1.c. * * @param[in] buffer buffer to be processed * @param[in] len length of buffer * @param[out] ctx context used to accumulate results */ void sha1_process_block (const void *buffer, size_t len, struct sha1_ctx *ctx) { const uint32_t *words = (const uint32_t*)buffer; size_t nwords = len / sizeof (uint32_t); const uint32_t *endp = words + nwords; uint32_t x[16]; uint32_t a = ctx->A; uint32_t b = ctx->B; uint32_t c = ctx->C; uint32_t d = ctx->D; uint32_t e = ctx->E; /* First increment the byte count. RFC 1321 specifies the possible length of the file up to 2^64 bits. Here we only compute the number of bytes. Do a double word increment. */ ctx->total[0] += len; if (ctx->total[0] < len) ++ctx->total[1]; #define rol(x, n) (((x) << (n)) | ((uint32_t) (x) >> (32 - (n)))) #define M(I) ( tm = x[I&0x0f] ^ x[(I-14)&0x0f] \ ^ x[(I-8)&0x0f] ^ x[(I-3)&0x0f] \ , (x[I&0x0f] = rol(tm, 1)) ) #define R(A,B,C,D,E,F,K,M) do { E += rol( A, 5 ) \ + F( B, C, D ) \ + K \ + M; \ B = rol( B, 30 ); \ } while(0) while (words < endp) { uint32_t tm; int t; for (t = 0; t < 16; t++) { x[t] = SWAP (*words); words++; } R( a, b, c, d, e, F1, K1, x[ 0] ); R( e, a, b, c, d, F1, K1, x[ 1] ); R( d, e, a, b, c, F1, K1, x[ 2] ); R( c, d, e, a, b, F1, K1, x[ 3] ); R( b, c, d, e, a, F1, K1, x[ 4] ); R( a, b, c, d, e, F1, K1, x[ 5] ); R( e, a, b, c, d, F1, K1, x[ 6] ); R( d, e, a, b, c, F1, K1, x[ 7] ); R( c, d, e, a, b, F1, K1, x[ 8] ); R( b, c, d, e, a, F1, K1, x[ 9] ); R( a, b, c, d, e, F1, K1, x[10] ); R( e, a, b, c, d, F1, K1, x[11] ); R( d, e, a, b, c, F1, K1, x[12] ); R( c, d, e, a, b, F1, K1, x[13] ); R( b, c, d, e, a, F1, K1, x[14] ); R( a, b, c, d, e, F1, K1, x[15] ); R( e, a, b, c, d, F1, K1, M(16) ); R( d, e, a, b, c, F1, K1, M(17) ); R( c, d, e, a, b, F1, K1, M(18) ); R( b, c, d, e, a, F1, K1, M(19) ); R( a, b, c, d, e, F2, K2, M(20) ); R( e, a, b, c, d, F2, K2, M(21) ); R( d, e, a, b, c, F2, K2, M(22) ); R( c, d, e, a, b, F2, K2, M(23) ); R( b, c, d, e, a, F2, K2, M(24) ); R( a, b, c, d, e, F2, K2, M(25) ); R( e, a, b, c, d, F2, K2, M(26) ); R( d, e, a, b, c, F2, K2, M(27) ); R( c, d, e, a, b, F2, K2, M(28) ); R( b, c, d, e, a, F2, K2, M(29) ); R( a, b, c, d, e, F2, K2, M(30) ); R( e, a, b, c, d, F2, K2, M(31) ); R( d, e, a, b, c, F2, K2, M(32) ); R( c, d, e, a, b, F2, K2, M(33) ); R( b, c, d, e, a, F2, K2, M(34) ); R( a, b, c, d, e, F2, K2, M(35) ); R( e, a, b, c, d, F2, K2, M(36) ); R( d, e, a, b, c, F2, K2, M(37) ); R( c, d, e, a, b, F2, K2, M(38) ); R( b, c, d, e, a, F2, K2, M(39) ); R( a, b, c, d, e, F3, K3, M(40) ); R( e, a, b, c, d, F3, K3, M(41) ); R( d, e, a, b, c, F3, K3, M(42) ); R( c, d, e, a, b, F3, K3, M(43) ); R( b, c, d, e, a, F3, K3, M(44) ); R( a, b, c, d, e, F3, K3, M(45) ); R( e, a, b, c, d, F3, K3, M(46) ); R( d, e, a, b, c, F3, K3, M(47) ); R( c, d, e, a, b, F3, K3, M(48) ); R( b, c, d, e, a, F3, K3, M(49) ); R( a, b, c, d, e, F3, K3, M(50) ); R( e, a, b, c, d, F3, K3, M(51) ); R( d, e, a, b, c, F3, K3, M(52) ); R( c, d, e, a, b, F3, K3, M(53) ); R( b, c, d, e, a, F3, K3, M(54) ); R( a, b, c, d, e, F3, K3, M(55) ); R( e, a, b, c, d, F3, K3, M(56) ); R( d, e, a, b, c, F3, K3, M(57) ); R( c, d, e, a, b, F3, K3, M(58) ); R( b, c, d, e, a, F3, K3, M(59) ); R( a, b, c, d, e, F4, K4, M(60) ); R( e, a, b, c, d, F4, K4, M(61) ); R( d, e, a, b, c, F4, K4, M(62) ); R( c, d, e, a, b, F4, K4, M(63) ); R( b, c, d, e, a, F4, K4, M(64) ); R( a, b, c, d, e, F4, K4, M(65) ); R( e, a, b, c, d, F4, K4, M(66) ); R( d, e, a, b, c, F4, K4, M(67) ); R( c, d, e, a, b, F4, K4, M(68) ); R( b, c, d, e, a, F4, K4, M(69) ); R( a, b, c, d, e, F4, K4, M(70) ); R( e, a, b, c, d, F4, K4, M(71) ); R( d, e, a, b, c, F4, K4, M(72) ); R( c, d, e, a, b, F4, K4, M(73) ); R( b, c, d, e, a, F4, K4, M(74) ); R( a, b, c, d, e, F4, K4, M(75) ); R( e, a, b, c, d, F4, K4, M(76) ); R( d, e, a, b, c, F4, K4, M(77) ); R( c, d, e, a, b, F4, K4, M(78) ); R( b, c, d, e, a, F4, K4, M(79) ); a = ctx->A += a; b = ctx->B += b; c = ctx->C += c; d = ctx->D += d; e = ctx->E += e; } } #ifdef __cplusplus } #endif
01750bdb750356da9b522b605fd26b03a99e672c
909095842af0bbf2e769aff361b5af344abc7433
/engine/source/platform/platform_ScriptBinding.h
2967ad67b725e5991caa3b965bd06e7c1caa741c
[ "MIT" ]
permissive
TorqueGameEngines/Torque2D
316105e8b91cebf8660ff43871440e1c4d0b1c5e
2c555d6dd0172a05ddb6a14f014d22f335b4ccad
refs/heads/master
2023-09-01T02:22:53.663431
2023-05-02T20:45:37
2023-05-02T20:45:37
268,352,960
1,001
117
MIT
2023-05-02T20:19:19
2020-05-31T19:51:55
C
UTF-8
C
false
false
3,422
h
platform_ScriptBinding.h
//----------------------------------------------------------------------------- // Copyright (c) 2013 GarageGames, LLC // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to // deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or // sell copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. //----------------------------------------------------------------------------- /*! @defgroup PlatformFunctions Platform @ingroup TorqueScriptFunctions @{ */ /*! Use the setMouseLock function to un/lock the mouse. @param isLocked A boolean value. @return No return value */ ConsoleFunctionWithDocs( setMouseLock, ConsoleVoid, 2, 2, ( isLocked )) { Platform::setMouseLock(dAtob(argv[1])); } //----------------------------------------------------------------------------- /*! Use the getRealTime function to the computer time in milliseconds. @return Returns the current real time in milliseconds. @sa getSimTime */ ConsoleFunctionWithDocs( getRealTime, ConsoleInt, 1, 1, ()) { return Platform::getRealMilliseconds(); } //----------------------------------------------------------------------------- /*! Get the local time @return the local time formatted as: monthday/month/year hour/minute/second */ ConsoleFunctionWithDocs( getLocalTime, ConsoleString, 1, 1, ()) { char* buf = Con::getReturnBuffer(128); Platform::LocalTime lt; Platform::getLocalTime(lt); dSprintf(buf, 128, "%d/%d/%d %02d:%02d:%02d", lt.monthday, lt.month + 1, lt.year + 1900, lt.hour, lt.min, lt.sec); return buf; } /*! Use the getClipboard function to get the contents of the GUI clipboard. @return Returns a string equal to the current contents of the copy the clipboard, or a NULL strain if the copy clipboard is empty. @sa setClipboard */ ConsoleFunctionWithDocs( getClipboard, ConsoleString, 1, 1, ()) { return Platform::getClipboard(); }; /*! Use the setClipboard function to Set value on clipboard to string. @param string The new value to place in the GUI clipboard. @return Returns true if successful, false otherwise. @sa getClipoard") */ ConsoleFunctionWithDocs( setClipboard, ConsoleBool, 2, 2, ( string )) { return Platform::setClipboard(argv[1]); }; /*! Creates a UUID string. */ ConsoleFunctionWithDocs( createUUID, ConsoleString, 1, 1, () ) { return Platform::createUUID(); } /*! @} */ // group PlatformFunctions
b623e4bcf3bf907f0de30e455aa037ae1a7de9e0
35c04ea32351dc95bc18d46e5c70dda9c1e08668
/Examples/CodeWarrior/MCF51JM128_BadgeBoard/BadgeBoard/Sources/Events.h
ec888e840ac6f22d4c5f56afd7fdc08421da7030
[ "LicenseRef-scancode-warranty-disclaimer" ]
no_license
ErichStyger/mcuoneclipse
0f8e7a2056a26ed79d9d4a0afd64777ff0b2b2fe
04ad311b11860ae5f8285316010961a87fa06d0c
refs/heads/master
2023-08-28T22:54:08.501719
2023-08-25T15:11:44
2023-08-25T15:11:44
7,446,094
620
1,191
NOASSERTION
2020-10-16T03:13:28
2013-01-04T19:38:12
Batchfile
UTF-8
C
false
false
1,422
h
Events.h
/** ################################################################### ** Filename : Events.h ** Project : ProcessorExpert ** Processor : MCF51JM128VLK ** Component : Events ** Version : Driver 01.02 ** Compiler : CodeWarrior ColdFireV1 C Compiler ** Date/Time : 2012-01-21, 18:57, # CodeGen: 0 ** Abstract : ** This is user's event module. ** Put your event handler code here. ** Settings : ** Contents : ** No public methods ** ** ###################################################################*/ #ifndef __Events_H #define __Events_H /* MODULE Events */ #include "PE_Types.h" #include "PE_Error.h" #include "PE_Const.h" #include "IO_Map.h" #include "LEDM1.h" #include "Inhr1.h" #include "LEDpin1.h" #include "Inhr2.h" #include "LEDpin2.h" #include "Inhr3.h" #include "LEDpin3.h" #include "Inhr4.h" #include "LEDpin4.h" #include "Inhr5.h" #include "LEDpin5.h" #include "Inhr6.h" #include "LEDpin6.h" #include "Inhr7.h" #include "LEDpin7.h" #include "MCUC1.h" #include "GDisp1.h" #include "WAIT1.h" /* END Events */ #endif /* __Events_H*/ /* ** ################################################################### ** ** This file was created by Processor Expert 5.3 [05.01] ** for the Freescale ColdFireV1 series of microcontrollers. ** ** ################################################################### */
e9b024b68da6d04586f688397753fadc01acc9d5
ebadb390ddc4aadc372cf91511aa3678efa599b5
/arduino_style/arduino/arduino_spi.h
4be1805ffc8ca3a584f490c928ef5106d6674783
[ "Apache-2.0" ]
permissive
OLIMEX/ESP8266
fa73177a5f62ac7e6988d1e7c4e61b8c899f3e34
3be4013892a5ac1b8991e1b922d393b6e0703581
refs/heads/master
2023-08-15T05:57:42.393727
2021-09-02T13:46:22
2021-09-02T13:46:22
30,020,689
318
164
null
2017-10-09T16:19:18
2015-01-29T13:15:11
Eagle
UTF-8
C
false
false
1,789
h
arduino_spi.h
/* * * arduino_spi.h is part of esp_arduino_style. * * esp_arduino_style is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * esp_arduino_style is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with esp_arduino_style. If not, see <http://www.gnu.org/licenses/>. * * Created on: Mar 3, 2015 * Author: Stefan Mavrodiev * Company: Olimex LTD. * Contact: support@olimex.com */ #ifndef USER_ARDUINO_ARDUINO_SPI_H_ #define USER_ARDUINO_ARDUINO_SPI_H_ /* SPI */ typedef enum { SPI_MODE0, SPI_MODE1, SPI_MODE2, SPI_MODE3 } spiMode_t; typedef enum { MSBFIRST = 0, LSBFIRST } spiOrder_t; typedef enum { SPI_CONTINUE, SPI_LAST } spiTransferMode_t; typedef struct { spiOrder_t order; spiMode_t mode; uint16 delay; } spi_config_t; typedef struct { void (*begin)(uint8 pin); void (*setClockDivider)(uint16 predivider, uint8 divider); void (*setDataMode)(spiMode_t mode); void (*setBitOrder)(spiOrder_t order); uint8 (*transfer)(uint8 pin, uint8 val, spiTransferMode_t transferMode); } spi_t; void spi_begin(uint8 pin); void spi_set_clock_divider(uint16 predivider, uint8 divider); void spi_set_data_mode(spiMode_t mode); void spi_set_bit_order(spiOrder_t order); uint8 spi_transfer(uint8 pin, uint8 val, spiTransferMode_t transferMode); extern spi_t spi; #endif /* USER_ARDUINO_ARDUINO_SPI_H_ */
13a2be5ce30cf6c826b60404be46341bb0a93315
aa3befea459382dc5c01c925653d54f435b3fb0f
/fs/nxffs/nxffs.h
a2da06a36e89c8d21f00a8ffffceec805e5e8d38
[ "MIT-open-group", "BSD-3-Clause", "HPND-sell-variant", "BSD-4-Clause-UC", "LicenseRef-scancode-warranty-disclaimer", "MIT-0", "LicenseRef-scancode-bsd-atmel", "LicenseRef-scancode-gary-s-brown", "LicenseRef-scancode-proprietary-license", "SunPro", "MIT", "LicenseRef-scancode-public-domain-disclaimer", "LicenseRef-scancode-other-permissive", "HPND", "ISC", "Apache-2.0", "LicenseRef-scancode-public-domain", "BSD-2-Clause", "GPL-1.0-or-later", "CC-BY-2.0", "CC-BY-4.0" ]
permissive
apache/nuttx
14519a7bff4a87935d94fb8fb2b19edb501c7cec
606b6d9310fb25c7d92c6f95bf61737e3c79fa0f
refs/heads/master
2023-08-25T06:55:45.822534
2023-08-23T16:03:31
2023-08-24T21:25:47
228,103,273
407
241
Apache-2.0
2023-09-14T18:26:05
2019-12-14T23:27:55
C
UTF-8
C
false
false
40,800
h
nxffs.h
/**************************************************************************** * fs/nxffs/nxffs.h * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. The * ASF licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * ****************************************************************************/ #ifndef __FS_NXFFS_NXFFS_H #define __FS_NXFFS_NXFFS_H /**************************************************************************** * Included Files ****************************************************************************/ #include <nuttx/config.h> #include <sys/param.h> #include <sys/types.h> #include <stdint.h> #include <stdbool.h> #include <nuttx/mtd/mtd.h> #include <nuttx/fs/nxffs.h> #include <nuttx/mutex.h> #include <nuttx/semaphore.h> /**************************************************************************** * Pre-processor Definitions ****************************************************************************/ /* NXFFS Definitions ********************************************************/ /* General NXFFS organization. The following example assumes 4 logical * blocks per FLASH erase block. The actual relationship is determined by * the FLASH geometry reported by the MTD driver. * * ERASE LOGICAL Inodes begin with a inode header which may * BLOCK BLOCK CONTENTS be marked as deleted, pending re-packing. * n 4*n --+--------------+ * |BBBBBBBBBBBBBB| Logic block header * |IIIIIIIIIIIIII| Inodes begin with a inode header * |DDDDDDDDDDDDDD| Data block containing inode data block * | (Inode Data) | * 4*n+1 --+--------------+ * |BBBBBBBBBBBBBB| Logic block header * |DDDDDDDDDDDDDD| Inodes may consist multiple data blocks * | (Inode Data) | * |IIIIIIIIIIIIII| Next inode header * | | Possibly unused bytes at the end of block * 4*n+2 --+--------------+ * |BBBBBBBBBBBBBB| Logic block header * |DDDDDDDDDDDDDD| * | (Inode Data) | * 4*n+3 --+--------------+ * |BBBBBBBBBBBBBB| Logic block header * |IIIIIIIIIIIIII| Next inode header * |DDDDDDDDDDDDDD| * | (Inode Data) | * n+1 4*(n+1) --+--------------+ * |BBBBBBBBBBBBBB| Logic block header * | | All FLASH is unused after the end of the * | | final inode. * --+--------------+ * * General operation: * Inodes are written starting at the beginning of FLASH. As inodes are * deleted, they are marked as deleted but not removed. As new inodes are * written, allocations proceed to toward the end of the FLASH -- thus, * supporting wear leveling by using all FLASH blocks equally. * * When the FLASH becomes full (no more space at the end of the FLASH), a * re-packing operation must be performed: All inodes marked deleted are * finally removed and the remaining inodes are packed at the beginning of * the FLASH. Allocations then continue at the freed FLASH memory at the * end of the FLASH. * * BLOCK HEADER: * The block header is used to determine if the block has every been * formatted and also indicates bad blocks which should never be used. * * INODE HEADER: * Each inode begins with an inode header that contains, among other * things, the name of the inode, the offset to the first data block, * and the length of the inode data. * * At present, the only kind of inode support is a file. So for now, the * term file and inode are interchangeable. * * INODE DATA HEADER: * Inode data is enclosed in a data header. For a given inode, there * is at most one inode data block per logical block. If the inode data * spans more than one logical block, then the inode data may be enclosed * in multiple data blocks, one per logical block. * * NXFFS Limitations: * 1. Since the files are contiguous in FLASH and since allocations always * proceed toward the end of the FLASH, there can only be one file opened * for writing at a time. Multiple files may be opened for reading. * 2. Files may not be increased in size after they have been closed. The * O_APPEND open flag is not supported. * 3. Files are always written sequential. Seeking within a file opened for * writing will not work. * 4. There are no directories, however, '/' may be used within a file name * string providing some illusion of directories. * 5. Files may be opened for reading or for writing, but not both: The * O_RDWR open flag is not supported. * 6. The re-packing process occurs only during a write when the free FLASH * memory at the end of the FLASH is exhausted. Thus, occasionally, file * writing may take a long time. * 7. Another limitation is that there can be only a single NXFFS volume * mounted at any time. This has to do with the fact that we bind to * an MTD driver (instead of a block driver) and bypass all of the normal * mount operations. */ /* Values for logical block state. Basically, there are only two, perhaps * three, states: * * BLOCK_STATE_GOOD - The block is not known to be bad. * BLOCK_STATE_BAD - An error was found on the block and it is marked bad. * Other values - The block is bad and has an invalid state. * * Care is taken so that the GOOD to BAD transition only involves burning * bits from the erased to non-erased state. */ #define BLOCK_STATE_GOOD (CONFIG_NXFFS_ERASEDSTATE ^ 0x44) #define BLOCK_STATE_BAD (CONFIG_NXFFS_ERASEDSTATE ^ 0x55) /* Values for NXFFS inode state. Similar there are 2 (maybe 3) inode states: * * INODE_STATE_FILE - The inode is a valid usable, file * INODE_STATE_DELETED - The inode has been deleted. * Other values - The inode is bad and has an invalid state. * * Care is taken so that the VALID to DELETED transition only involves * burning bits from the erased to non-erased state. */ #define INODE_STATE_FILE (CONFIG_NXFFS_ERASEDSTATE ^ 0x22) #define INODE_STATE_DELETED (CONFIG_NXFFS_ERASEDSTATE ^ 0xaa) /* Number of bytes in an the NXFFS magic sequences */ #define NXFFS_MAGICSIZE 4 /* When we allocate FLASH for a new inode data block, we will require that * space is available to hold this minimum number of data bytes in addition * to the size of the data block headeer. */ #define NXFFS_MINDATA 16 /* Internal definitions *****************************************************/ /* If we encounter this number of erased bytes, we assume that all of the * flash beyond this point is erased. */ #define NXFFS_NERASED 128 /**************************************************************************** * Public Types ****************************************************************************/ /* This structure defines each packed block on the FLASH media */ struct nxffs_block_s { uint8_t magic[4]; /* 0-3: Magic number for valid block */ uint8_t state; /* 4: Block state: See BLOCK_STATE_* */ }; #define SIZEOF_NXFFS_BLOCK_HDR 5 /* This structure defines each packed NXFFS inode header on the FLASH media */ struct nxffs_inode_s { uint8_t magic[4]; /* 0-3: Magic number for valid inode */ uint8_t state; /* 4: Inode state: See INODE_STATE_* */ uint8_t namlen; /* 5: Length of the inode name */ uint8_t noffs[4]; /* 6-9: FLASH offset to the file name */ uint8_t doffs[4]; /* 10-13: FLASH offset to the first data block */ uint8_t utc[4]; /* 14-17: Creation time */ uint8_t crc[4]; /* 18-21: CRC32 */ uint8_t datlen[4]; /* 22-25: Length of data in bytes */ }; #define SIZEOF_NXFFS_INODE_HDR 26 /* This structure defines each packed NXFFS data header on the FLASH media */ struct nxffs_data_s { uint8_t magic[4]; /* 0-3: Magic number for valid data */ uint8_t crc[4]; /* 4-7: CRC32 */ uint8_t datlen[2]; /* 8-9: Length of data in bytes */ }; #define SIZEOF_NXFFS_DATA_HDR 10 /* This is an in-memory representation of the NXFFS inode as extracted from * FLASH and with additional state information. */ struct nxffs_entry_s { off_t hoffset; /* FLASH offset to the inode header */ off_t noffset; /* FLASH offset to the inode name */ off_t doffset; /* FLASH offset to the first data header */ FAR char *name; /* inode name */ uint32_t utc; /* Time stamp */ uint32_t datlen; /* Length of inode data */ }; /* This structure describes int in-memory representation of the data block */ struct nxffs_blkentry_s { off_t hoffset; /* Offset to the block data header */ uint16_t datlen; /* Length of data following the header */ uint16_t foffset; /* Offset to start of data */ }; /* This structure describes the state of one open file. This structure * is protected by the volume semaphore. */ struct nxffs_ofile_s { struct nxffs_ofile_s *flink; /* Supports a singly linked list */ int16_t crefs; /* Reference count */ mode_t oflags; /* Open mode */ struct nxffs_entry_s entry; /* Describes the NXFFS inode entry */ }; /* A file opened for writing require some additional information */ struct nxffs_wrfile_s { /* The following fields provide the common open file information. */ struct nxffs_ofile_s ofile; /* The following fields are required to support the write operation */ bool truncate; /* Delete a file of the same name */ uint16_t datlen; /* Number of bytes written in data block */ off_t doffset; /* FLASH offset to the current data header */ uint32_t crc; /* Accumulated data block CRC */ }; /* This structure represents the overall state of on NXFFS instance. */ struct nxffs_volume_s { FAR struct mtd_dev_s *mtd; /* Supports FLASH access */ mutex_t lock; /* Used to assure thread-safe access */ sem_t wrsem; /* Enforces single writer restriction */ struct mtd_geometry_s geo; /* Device geometry */ uint8_t blkper; /* R/W blocks per erase block */ uint16_t iooffset; /* Next offset in read/write access (in ioblock) */ off_t inoffset; /* Offset to the first valid inode header */ off_t froffset; /* Offset to the first free byte */ off_t nblocks; /* Number of R/W blocks on volume */ off_t ioblock; /* Current block number being accessed */ off_t cblock; /* Starting block number in cache */ FAR struct nxffs_ofile_s *ofiles; /* A singly-linked list of open files */ FAR uint8_t *cache; /* On cached erase block for general I/O */ FAR uint8_t *pack; /* A full erase block to support packing */ }; /* This structure describes the state of the blocks on the NXFFS volume */ struct nxffs_blkstats_s { off_t nblocks; /* Total number of FLASH blocks */ off_t ngood; /* Number of good FLASH blocks found */ off_t nbad; /* Number of well-formatted FLASH blocks marked as bad */ off_t nunformat; /* Number of unformatted FLASH blocks */ off_t ncorrupt; /* Number of blocks with corrupted format info */ off_t nbadread; /* Number of blocks that could not be read */ }; /**************************************************************************** * Public Data ****************************************************************************/ /* The magic number that appears that the beginning of each NXFFS (logical) * block */ extern const uint8_t g_blockmagic[NXFFS_MAGICSIZE]; /* The magic number that appears that the beginning of each NXFFS inode */ extern const uint8_t g_inodemagic[NXFFS_MAGICSIZE]; /* The magic number that appears that the beginning of each NXFFS inode * data block. */ extern const uint8_t g_datamagic[NXFFS_MAGICSIZE]; /* If CONFIG_NXFFS_PREALLOCATED is defined, then this is the single, pre- * allocated NXFFS volume instance. */ #ifdef CONFIG_NXFFS_PREALLOCATED extern struct nxffs_volume_s g_volume; #endif /**************************************************************************** * Public Function Prototypes ****************************************************************************/ /**************************************************************************** * Name: nxffs_limits * * Description: * Recalculate file system limits: (1) the FLASH offset to the first, * valid inode, and (2) the FLASH offset to the first, unused byte after * the last inode (invalid or not). * * The first, lower limit must be recalculated: (1) initially, (2) * whenever the first inode is deleted, or (3) whenever inode is moved * as part of the file system packing operation. * * The second, upper limit must be (1) incremented whenever new file * data is written, or (2) recalculated as part of the file system packing * operation. * * Input Parameters: * volume - Identifies the NXFFS volume * * Returned Value: * Zero on success. Otherwise, a negated error is returned indicating the * nature of the failure. * * Defined in nxffs_initialize.c * ****************************************************************************/ int nxffs_limits(FAR struct nxffs_volume_s *volume); /**************************************************************************** * Name: nxffs_rdle16 * * Description: * Get a (possibly unaligned) 16-bit little endian value. * * Input Parameters: * val - A pointer to the first byte of the little endian value. * * Returned Value: * A uint16_t representing the whole 16-bit integer value * * Defined in nxffs_util.c * ****************************************************************************/ uint16_t nxffs_rdle16(FAR const uint8_t *val); /**************************************************************************** * Name: nxffs_wrle16 * * Description: * Put a (possibly unaligned) 16-bit little endian value. * * Input Parameters: * dest - A pointer to the first byte to save the little endian value. * val - The 16-bit value to be saved. * * Returned Value: * None * * Defined in nxffs_util.c * ****************************************************************************/ void nxffs_wrle16(uint8_t *dest, uint16_t val); /**************************************************************************** * Name: nxffs_rdle32 * * Description: * Get a (possibly unaligned) 32-bit little endian value. * * Input Parameters: * val - A pointer to the first byte of the little endian value. * * Returned Value: * A uint32_t representing the whole 32-bit integer value * * Defined in nxffs_util.c * ****************************************************************************/ uint32_t nxffs_rdle32(FAR const uint8_t *val); /**************************************************************************** * Name: nxffs_wrle32 * * Description: * Put a (possibly unaligned) 32-bit little endian value. * * Input Parameters: * dest - A pointer to the first byte to save the little endian value. * val - The 32-bit value to be saved. * * Returned Value: * None * * Defined in nxffs_util.c * ****************************************************************************/ void nxffs_wrle32(uint8_t *dest, uint32_t val); /**************************************************************************** * Name: nxffs_erased * * Description: * Check if a block of memory is in the erased state. * * Input Parameters: * buffer - Address of the start of the memory to check. * buflen - The number of bytes to check. * * Returned Value: * The number of erased bytes found at the beginning of the memory region. * * Defined in nxffs_util.c * ****************************************************************************/ size_t nxffs_erased(FAR const uint8_t *buffer, size_t buflen); /**************************************************************************** * Name: nxffs_rdcache * * Description: * Read one I/O block into the volume cache memory. * * Input Parameters: * volume - Describes the current volume * block - The first logical block to read * * Returned Value: * Negated errnos are returned only in the case of MTD reported failures. * Nothing in the volume data itself will generate errors. * * Defined in nxffs_cache.c * ****************************************************************************/ int nxffs_rdcache(FAR struct nxffs_volume_s *volume, off_t block); /**************************************************************************** * Name: nxffs_wrcache * * Description: * Write one or more logical blocks from the volume cache memory. * * Input Parameters: * volume - Describes the current volume * * Returned Value: * Negated errnos are returned only in the case of MTD reported failures. * * Defined in nxffs_cache.c * ****************************************************************************/ int nxffs_wrcache(FAR struct nxffs_volume_s *volume); /**************************************************************************** * Name: nxffs_ioseek * * Description: * Seek to a position in FLASH memory. This simply sets up the offsets * and pointer values. This is a necessary step prior to using * nxffs_getc(). * * Input Parameters: * volume - Describes the NXFFS volume * offset - The physical offset in bytes from the beginning of the FLASH * in bytes. * * Defined in nxffs_cache.c * ****************************************************************************/ void nxffs_ioseek(FAR struct nxffs_volume_s *volume, off_t offset); /**************************************************************************** * Name: nxffs_iotell * * Description: * Report the current position. * * Input Parameters: * volume - Describes the NXFFS volume * * Returned Value: * The offset from the beginning of FLASH to the current seek position. * * Defined in nxffs_cache.c * ****************************************************************************/ off_t nxffs_iotell(FAR struct nxffs_volume_s *volume); /**************************************************************************** * Name: nxffs_getc * * Description: * Get the next byte from FLASH. This function allows the data in the * formatted FLASH blocks to be read as a continuous byte stream, skipping * over bad blocks and block headers as necessary. * * Input Parameters: * volume - Describes the NXFFS volume. The parameters ioblock and iooffset * in the volume structure determine the behavior of nxffs_getc(). * reserve - If less than this much space is available at the end of the * block, then skip to the next block. * * Returned Value: * Zero is returned on success. Otherwise, a negated errno indicating the * nature of the failure. * * Defined in nxffs_cache.c * ****************************************************************************/ int nxffs_getc(FAR struct nxffs_volume_s *volume, uint16_t reserve); /**************************************************************************** * Name: nxffs_freeentry * * Description: * The inode values returned by nxffs_nextentry() include allocated memory * (specifically, the file name string). This function should be called * to dispose of that memory when the inode entry is no longer needed. * * Note that the nxffs_entry_s containing structure is not freed. The * caller may call kmm_free upon return of this function if necessary to * free the entry container. * * Input Parameters: * entry - The entry to be freed. * * Returned Value: * None * * Defined in nxffs_inode.c * ****************************************************************************/ void nxffs_freeentry(FAR struct nxffs_entry_s *entry); /**************************************************************************** * Name: nxffs_nextentry * * Description: * Search for the next valid inode starting at the provided FLASH offset. * * Input Parameters: * volume - Describes the NXFFS volume. * offset - The FLASH memory offset to begin searching. * entry - A pointer to memory provided by the caller in which to return * the inode description. * * Returned Value: * Zero is returned on success. Otherwise, a negated errno is returned * that indicates the nature of the failure. * * Defined in nxffs_inode.c * ****************************************************************************/ int nxffs_nextentry(FAR struct nxffs_volume_s *volume, off_t offset, FAR struct nxffs_entry_s *entry); /**************************************************************************** * Name: nxffs_findinode * * Description: * Search for an inode with the provided name starting with the first * valid inode and proceeding to the end FLASH or until the matching * inode is found. * * Input Parameters: * volume - Describes the NXFFS volume * name - The name of the inode to find * entry - The location to return information about the inode. * * Returned Value: * Zero is returned on success. Otherwise, a negated errno is returned * that indicates the nature of the failure. * * Defined in nxffs_inode.c * ****************************************************************************/ int nxffs_findinode(FAR struct nxffs_volume_s *volume, FAR const char *name, FAR struct nxffs_entry_s *entry); /**************************************************************************** * Name: nxffs_inodeend * * Description: * Return an *approximiate* FLASH offset to end of the inode data. The * returned value is guaranteed to be be less then or equal to the offset * of the thing-of-interest in FLASH. Parsing for interesting things * can begin at that point. * * Assumption: The inode header has been verified by the caller and is * known to contain valid data. * * Input Parameters: * volume - Describes the NXFFS volume * entry - Describes the inode. * * Returned Value: * A FLASH offset to the (approximate) end of the inode data. No errors * are detected. * * Defined in nxffs_inode.c * ****************************************************************************/ off_t nxffs_inodeend(FAR struct nxffs_volume_s *volume, FAR struct nxffs_entry_s *entry); /**************************************************************************** * Name: nxffs_verifyblock * * Description: * Assure that the provided (logical) block number is in the block cache * and that it has a valid block header (i.e., proper magic and * marked good) * * Input Parameters: * volume - Describes the NXFFS volume * block - The (logical) block number to load and verify. * * Returned Value: * OK (zero( is returned on success. Otherwise, a negated errno value is * returned indicating the nature of the failure: * * -EIO is returned if we failed to read the block. If we are using * NAND memory, then this probably means that the block has * uncorrectable bit errors. * -ENOENT is returned if the block is a bad block. * * Defined in nxffs_block.c * ****************************************************************************/ int nxffs_verifyblock(FAR struct nxffs_volume_s *volume, off_t block); /**************************************************************************** * Name: nxffs_validblock * * Description: * Find the next valid (logical) block in the volume. * * Input Parameters: * volume - Describes the NXFFS volume * block - On entry, this provides the starting block number. If the * function is succesfful, then this memory location will hold the * block number of the next valid block on return. * * Returned Value: * Zero on success otherwise a negated errno value indicating the nature * of the failure. * * Defined in nxffs_block.c * ****************************************************************************/ int nxffs_validblock(struct nxffs_volume_s *volume, off_t *block); /**************************************************************************** * Name: nxffs_blockstats * * Description: * Analyze the NXFFS volume. This operation must be performed when the * volume is first mounted in order to detect if the volume has been * formatted and contains a usable NXFFS file system. * * Input Parameters: * volume - Describes the current NXFFS volume. * stats - On return, will hold nformation describing the state of the * volume. * * Returned Value: * Negated errnos are returned only in the case of MTD reported failures. * Nothing in the volume data itself will generate errors. * * Defined in nxffs_blockstats.c * ****************************************************************************/ int nxffs_blockstats(FAR struct nxffs_volume_s *volume, FAR struct nxffs_blkstats_s *stats); /**************************************************************************** * Name: nxffs_reformat * * Description: * Erase and reformat the entire volume. Verify each block and mark * improperly erased blocks as bad. * * Input Parameters: * volume - Describes the NXFFS volume to be reformatted. * * Returned Value: * Zero on success or a negated errno on a failure. Failures will be * returned n the case of MTD reported failures o. * Nothing in the volume data itself will generate errors. * * Defined in nxffs_reformat.c * ****************************************************************************/ int nxffs_reformat(FAR struct nxffs_volume_s *volume); /**************************************************************************** * Name: nxffs_blkinit * * Description: * Initialize an NXFFS block to the erased state with the specified block * status. * * Input Parameters: * volume - Describes the NXFFS volume (needed for the blocksize). * blkptr - Pointer to the logic block to initialize. * state - Either BLOCK_STATE_GOOD or BLOCK_STATE_BAD. * * Returned Value: * None. * ****************************************************************************/ void nxffs_blkinit(FAR struct nxffs_volume_s *volume, FAR uint8_t *blkptr, uint8_t state); /**************************************************************************** * Name: nxffs_findofile * * Description: * Search the list of already opened files to see if the inode of this * name is one of the opened files. * * Input Parameters: * volume - Describes the NXFFS volume. * name - The name of the inode to check. * * Returned Value: * If an inode of this name is found in the list of opened inodes, then * a reference to the open file structure is returned. NULL is returned * otherwise. * * Defined in nxffs_open.c * ****************************************************************************/ FAR struct nxffs_ofile_s *nxffs_findofile(FAR struct nxffs_volume_s *volume, FAR const char *name); /**************************************************************************** * Name: nxffs_findwriter * * Description: * Search the list of already opened files and return the open file * instance for the write. * * Input Parameters: * volume - Describes the NXFFS volume. * * Returned Value: * If there is an active writer of the volume, its open file instance is * returned. NULL is returned otherwise. * * Defined in nxffs_open.c * ****************************************************************************/ FAR struct nxffs_wrfile_s * nxffs_findwriter(FAR struct nxffs_volume_s *volume); /**************************************************************************** * Name: nxffs_wrinode * * Description: * Write the inode header (only to FLASH. This is done in two contexts: * * 1. When an inode is closed, or * 2. As part of the file system packing logic when an inode is moved. * * Note that in either case, the inode name has already been written to * FLASH. * * Input Parameters: * volume - Describes the NXFFS volume * entry - Describes the inode header to write * * Returned Value: * Zero is returned on success; Otherwise, a negated errno value is * returned indicating the nature of the failure. * * Defined in nxffs_open.c * ****************************************************************************/ int nxffs_wrinode(FAR struct nxffs_volume_s *volume, FAR struct nxffs_entry_s *entry); /**************************************************************************** * Name: nxffs_updateinode * * Description: * The packing logic has moved an inode. Check if any open files are using * this inode and, if so, move the data in the open file structure as well. * * Input Parameters: * volume - Describes the NXFFS volume * entry - Describes the new inode entry * * Returned Value: * Zero is returned on success; Otherwise, a negated errno value is * returned indicating the nature of the failure. * ****************************************************************************/ int nxffs_updateinode(FAR struct nxffs_volume_s *volume, FAR struct nxffs_entry_s *entry); /**************************************************************************** * Name: nxffs_wrextend * * Description: * Zero-extend a file. * * Input Parameters: * volume - Describes the NXFFS volume * entry - Describes the new inode entry * length - The new, extended length of the file * * Assumptions: * The caller holds the NXFFS semaphore. * The caller has verified that the file is writable. * ****************************************************************************/ #ifdef __NO_TRUNCATE_SUPPORT__ int nxffs_wrextend(FAR struct nxffs_volume_s *volume, FAR struct nxffs_wrfile_s *wrfile, off_t length); #endif /**************************************************************************** * Name: nxffs_wrreserve * * Description: * Find a valid location for a file system object of 'size'. A valid * location will have these properties: * * 1. It will lie in the free flash region. * 2. It will have enough contiguous memory to hold the entire object * 3. The memory at this location will be fully erased. * * This function will only perform the checks of 1) and 2). The * end-of-filesystem offset, froffset, is update past this memory which, * in effect, reserves the memory. * * Input Parameters: * volume - Describes the NXFFS volume * size - The size of the object to be reserved. * * Returned Value: * Zero is returned on success. Otherwise, a negated errno value is * returned indicating the nature of the failure. Of special interest * the return error of -ENOSPC which means that the FLASH volume is * full and should be repacked. * * On successful return the following are also valid: * * volume->ioblock - Read/write block number of the block containing the * candidate object position * volume->iooffset - The offset in the block to the candidate object * position. * volume->froffset - Updated offset to the first free FLASH block after * the reserved memory. * * Defined in nxffs_write.c * ****************************************************************************/ int nxffs_wrreserve(FAR struct nxffs_volume_s *volume, size_t size); /**************************************************************************** * Name: nxffs_wrverify * * Description: * Find a valid location for the object. A valid location will have * these properties: * * 1. It will lie in the free flash region. * 2. It will have enough contiguous memory to hold the entire header * (excluding the file name which may lie in the next block). * 3. The memory at this location will be fully erased. * * This function will only perform the check 3). On entry it assumes the * following settings (left by nxffs_wrreserve()): * * volume->ioblock - Read/write block number of the block containing the * candidate object position * volume->iooffset - The offset in the block to the candidate object * position. * * Input Parameters: * volume - Describes the NXFFS volume * size - The size of the object to be verified. * * Returned Value: * Zero is returned on success. Otherwise, a negated errno value is * returned indicating the nature of the failure. Of special interest * the return error of -ENOSPC which means that the FLASH volume is * full and should be repacked. * * On successful return the following are also valid: * * volume->ioblock - Read/write block number of the block containing the * verified object position * volume->iooffset - The offset in the block to the verified object * position. * volume->froffset - Updated offset to the first free FLASH block. * * Defined in nxffs_write.c * ****************************************************************************/ int nxffs_wrverify(FAR struct nxffs_volume_s *volume, size_t size); /**************************************************************************** * Name: nxffs_wrblkhdr * * Description: * Write the block header information. This is done (1) whenever the end- * block is encountered and (2) also when the file is closed in order to * flush the final block of data to FLASH. * * Input Parameters: * volume - Describes the state of the NXFFS volume * wrfile - Describes the state of the open file * * Returned Value: * Zero is returned on success; Otherwise, a negated errno value is * returned to indicate the nature of the failure. * * Defined in nxffs_write.c * ****************************************************************************/ int nxffs_wrblkhdr(FAR struct nxffs_volume_s *volume, FAR struct nxffs_wrfile_s *wrfile); /**************************************************************************** * Name: nxffs_nextblock * * Description: * Search for the next valid data block starting at the provided * FLASH offset. * * Input Parameters: * volume - Describes the NXFFS volume. * datlen - A memory location to return the data block length. * * Returned Value: * Zero is returned on success. Otherwise, a negated errno is returned * that indicates the nature of the failure. * * Defined in nxffs_read.c * ****************************************************************************/ int nxffs_nextblock(FAR struct nxffs_volume_s *volume, off_t offset, FAR struct nxffs_blkentry_s *blkentry); /**************************************************************************** * Name: nxffs_rdblkhdr * * Description: * Read and verify the data block header at the specified offset. * * Input Parameters: * volume - Describes the current volume. * offset - The byte offset from the beginning of FLASH where the data * block header is expected. * datlen - A memory location to return the data block length. * * Returned Value: * Zero on success. Otherwise, a negated errno value is returned * indicating the nature of the failure. * * Defined in nxffs_read.c * ****************************************************************************/ int nxffs_rdblkhdr(FAR struct nxffs_volume_s *volume, off_t offset, FAR uint16_t *datlen); /**************************************************************************** * Name: nxffs_rminode * * Description: * Remove an inode from FLASH. This is the internal implementation of * the file system unlink operation. * * Input Parameters: * volume - Describes the NXFFS volume. * name - the name of the inode to be deleted. * * Returned Value: * Zero is returned if the inode is successfully deleted. Otherwise, a * negated errno value is returned indicating the nature of the failure. * ****************************************************************************/ int nxffs_rminode(FAR struct nxffs_volume_s *volume, FAR const char *name); /**************************************************************************** * Name: nxffs_pack * * Description: * Pack and re-write the filesystem in order to free up memory at the end * of FLASH. * * Input Parameters: * volume - The volume to be packed. * * Returned Value: * Zero on success; Otherwise, a negated errno value is returned to * indicate the nature of the failure. * ****************************************************************************/ int nxffs_pack(FAR struct nxffs_volume_s *volume); /**************************************************************************** * Standard mountpoint operation methods * * Description: * See include/nuttx/fs/fs.h * * - nxffs_open() and nxffs_close() are defined in nxffs_open.c * - nxffs_read() is defined in nxffs_read.c * - nxffs_write() is defined in nxffs_write.c * - nxffs_ioctl() is defined in nxffs_ioctl.c * - nxffs_dup() is defined in nxffs_open.c * - nxffs_opendir(), nxffs_readdir(), and nxffs_rewindir() are defined in * nxffs_dirent.c * - nxffs_bind() and nxffs_unbind() are defined in nxffs_initialize.c * - nxffs_stat() and nxffs_statfs() are defined in nxffs_stat.c * - nxffs_unlink() is defined nxffs_unlink.c * ****************************************************************************/ struct file; /* Forward references */ struct inode; struct fs_dirent_s; struct statfs; struct stat; int nxffs_open(FAR struct file *filep, FAR const char *relpath, int oflags, mode_t mode); int nxffs_close(FAR struct file *filep); ssize_t nxffs_read(FAR struct file *filep, FAR char *buffer, size_t buflen); ssize_t nxffs_write(FAR struct file *filep, FAR const char *buffer, size_t buflen); int nxffs_ioctl(FAR struct file *filep, int cmd, unsigned long arg); int nxffs_dup(FAR const struct file *oldp, FAR struct file *newp); int nxffs_fstat(FAR const struct file *filep, FAR struct stat *buf); #ifdef __NO_TRUNCATE_SUPPORT__ int nxffs_truncate(FAR struct file *filep, off_t length); #endif int nxffs_opendir(FAR struct inode *mountpt, FAR const char *relpath, FAR struct fs_dirent_s **dir); int nxffs_closedir(FAR struct inode *mountpt, FAR struct fs_dirent_s *dir); int nxffs_readdir(FAR struct inode *mountpt, FAR struct fs_dirent_s *dir, FAR struct dirent *entry); int nxffs_rewinddir(FAR struct inode *mountpt, FAR struct fs_dirent_s *dir); int nxffs_bind(FAR struct inode *blkdriver, FAR const void *data, FAR void **handle); int nxffs_unbind(FAR void *handle, FAR struct inode **blkdriver, unsigned int flags); int nxffs_statfs(FAR struct inode *mountpt, FAR struct statfs *buf); int nxffs_stat(FAR struct inode *mountpt, FAR const char *relpath, FAR struct stat *buf); int nxffs_unlink(FAR struct inode *mountpt, FAR const char *relpath); #endif /* __FS_NXFFS_NXFFS_H */
2d1b6fc45ac410d67edb971a96edca99b92d1d41
05819963250c2ae0ba59ffef48d7c99a5b6b7cfd
/examples/low_power_mode/event-config.c
b2c6d491c3856a8f41c19edb36e222741be9f989
[ "LicenseRef-scancode-bsd-atmel" ]
permissive
atmelcorp/atmel-software-package
cefa3213069995d453d3b47b8b3aa7a7aca683ac
e0428c7c8175a42a2460cff27bb0501db0bbe160
refs/heads/master
2023-04-13T16:34:56.181081
2023-04-06T17:30:10
2023-04-11T06:05:12
47,840,424
117
94
NOASSERTION
2022-10-20T03:07:15
2015-12-11T17:18:56
C
UTF-8
C
false
false
6,098
c
event-config.c
/* ---------------------------------------------------------------------------- * SAM Software Package License * ---------------------------------------------------------------------------- * Copyright (c) 2016, Atmel Corporation * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the disclaimer below. * * Atmel's name may not be used to endorse or promote products derived from * this software without specific prior written permission. * * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ---------------------------------------------------------------------------- */ /*---------------------------------------------------------------------------- * Headers *----------------------------------------------------------------------------*/ #include "assert.h" #include <stdio.h> #include "board.h" #include "irq/irq.h" #include "gpio/pio.h" #include "peripherals/pmc.h" #include "peripherals/rtc.h" #include "event-config.h" /*---------------------------------------------------------------------------- * Variables *----------------------------------------------------------------------------*/ #ifdef PINS_PUSHBUTTONS /* Pushbutton \#1 pin instance. */ static const struct _pin button_pins[] = PINS_PUSHBUTTONS; #endif const char *event_menu[] = { "user_btn: Use PB_USER button to wake up", "rtc_int: Auto wakeup by RTC interrupt after 3 second", "rtc_alarm: Auto wakeup by RTC alarm after 3 second", "wake_up: Use WKUP button to wake up", NULL, }; /*---------------------------------------------------------------------------- * Local functions *---------------------------------------------------------------------------- */ /** * \brief Handler for Buttons rising edge interrupt. * * Handle process led1 status change. */ #ifdef PINS_PUSHBUTTONS static void _pio_handler(uint32_t group, uint32_t status, void *user_arg) { /* unused */ (void)group; (void)user_arg; /* Handle process led1 status change: to do */ } #endif /** * \brief Interrupt handler for the RTC. Refreshes the display. */ static void _rtc_handler(uint32_t source, void* user_arg) { uint32_t dwStatus = rtc_get_sr(0xffffffff); assert(source == ID_RTC); /* Time or date alarm */ if ((dwStatus & RTC_SR_ALARM) == RTC_SR_ALARM) { /* Disable RTC interrupt */ rtc_disable_it(RTC_IDR_ALRDIS); rtc_clear_sccr(RTC_SCCR_ALRCLR); } } /** * \brief Configure rtc event * wakup_in_seconds set wait time * enable true: enable rtc irq; false: disable rtc irq */ static void _start_rtc_timer_for_wakeup(unsigned int wakup_in_seconds, bool enable) { struct _time new_time; rtc_disable_it(RTC_IER_SECEN | RTC_IER_ALREN); /* Default RTC configuration */ /* 24-hour mode */ rtc_set_hour_mode(RTC_HOUR_MODE_24); struct _time empty_time = {0, 0, 0}; if (rtc_set_time_alarm(&empty_time)) printf("\r\n Disable time alarm fail!"); struct _date empty_date = {0, 0, 0}; if (rtc_set_date_alarm(&empty_date)) printf("\r\n Disable date alarm fail!"); new_time.hour = 0; new_time.min = 0; new_time.sec = 0; rtc_set_time(&new_time); /* Configure RTC interrupts */ if (enable == true) { rtc_enable_it(RTC_IER_ALREN); irq_add_handler(ID_RTC, _rtc_handler, NULL); irq_enable(ID_RTC); } new_time.hour = 0; new_time.min = 0; new_time.sec = wakup_in_seconds; rtc_set_time_alarm(&new_time); } /** * \brief Configure the Pushbuttons * * Configure the PIO as inputs and generate corresponding interrupt when * pressed or released. */ static void _configure_buttons(void) { #ifdef PINS_PUSHBUTTONS int i; /* Adjust pio debounce filter parameters, uses 10 Hz filter. */ pio_set_debounce_filter(10); for (i = 0; i < ARRAY_SIZE(button_pins); ++i) { /* Configure pios as inputs. */ pio_configure(&button_pins[i], 1); /* Check handler in group before add it */ if (!pio_check_handler_in_group(button_pins[i].group, button_pins[i].mask, _pio_handler, NULL)) { /* Initialize pios interrupt with its handlers, see */ /* PIO definition in board.h. */ pio_add_handler_to_group(button_pins[i].group, button_pins[i].mask, _pio_handler, NULL); } /* Enable PIO line interrupts. */ pio_enable_it(button_pins); } #endif } /*---------------------------------------------------------------------------- * Exported function *----------------------------------------------------------------------------*/ void wakeup_event(uint32_t event){ /* set wake up event */ switch (event) { case USER_BTN: _configure_buttons(); break; case RTC_INT: /* set RTC interrupt */ _start_rtc_timer_for_wakeup(3, true); break; case RTC_ALARM: /* set RTC alarm */ _start_rtc_timer_for_wakeup(3, false); #if defined(PMC_FSMR_LPM) && defined(PMC_FSMR_RTCAL) /* config wake up sources */ pmc_set_fast_startup_mode(PMC_FSMR_RTCAL | PMC_FSMR_LPM); #elif defined(PMC_FSMR_RTCAL) pmc_set_fast_startup_mode(PMC_FSMR_RTCAL); #endif break; case WAKE_UP: /* set WKUP0 pin */ #ifdef PMC_FSMR_LPM /* config wake up sources */ pmc_set_fast_startup_mode(PMC_FSMR_FSTT0 | PMC_FSMR_LPM); #endif break; default: break; } }
5ad21b734e4597839e615218f049655ce54d3fb3
65089dbc386e1184983c15fe3a2282763ae65960
/gear-lib/libhomekit/streaming_session.h
962ac3920ccf13f9aea2efb7e249ad202c1630c4
[ "MIT" ]
permissive
gozfree/gear-lib
9f4db1bce799ded1cf1f3411cb51bdfbcbe7c7bc
bffbfd25af4ff7b04ebfafdab391b55270b0273e
refs/heads/master
2023-08-14T16:01:29.449910
2023-07-24T16:08:47
2023-07-24T16:09:41
40,359,871
1,771
488
MIT
2023-05-27T11:08:46
2015-08-07T12:41:05
C
UTF-8
C
false
false
704
h
streaming_session.h
#pragma once #include <sys/socket.h> #include "camera_session.h" typedef struct { uint8_t key[16]; uint8_t salt[14]; uint8_t auth[20]; } srtp_keys_t; typedef struct _streaming_session { struct sockaddr_in controller_addr; bool started; bool failed; uint32_t timestamp; uint16_t sequence; int sequence_largest; uint32_t rtcp_index; uint32_t roc; uint8_t buffered_nals; uint8_t *video_buffer; uint8_t *video_buffer_ptr; srtp_keys_t video_rtp; srtp_keys_t video_rtcp; /* srtp_keys_t audio_rtp; srtp_keys_t audio_rtcp; */ camera_session_t *settings; struct _streaming_session *next; } streaming_session_t;
65bac606710664257da7659468b0297ca4f3d9aa
54a60696114ae0fc8233baf0111f3b0cf72be5b9
/inform6/Tests/Assistants/dumb-frotz/redirect.c
fc92b418de9d34a4e0c984c53e94c06976674d71
[ "Artistic-2.0", "LicenseRef-scancode-free-unknown", "Glulxe" ]
permissive
ganelson/inform
de89a3df0fa5b40660707a7c66853cf5b066b1c9
56be55c4879b133a37fcd55fcd2452868a881551
refs/heads/master
2023-09-01T01:08:43.133920
2023-08-31T23:06:22
2023-08-31T23:06:22
169,170,146
1,130
63
Artistic-2.0
2023-09-12T09:56:18
2019-02-05T00:18:15
C
UTF-8
C
false
false
2,546
c
redirect.c
/* * redirect.c * * Output redirection to Z-machine memory * */ #include "frotz.h" #define MAX_NESTING 16 extern zword get_max_width (zword); static depth = -1; static struct { zword xsize; zword table; zword width; zword total; } redirect[MAX_NESTING]; /* * memory_open * * Begin output redirection to the memory of the Z-machine. * */ void memory_open (zword table, zword xsize, bool buffering) { if (++depth < MAX_NESTING) { if (!buffering) xsize = 0xffff; if (buffering && (short) xsize <= 0) xsize = get_max_width ((zword) (- (short) xsize)); storew (table, 0); redirect[depth].table = table; redirect[depth].width = 0; redirect[depth].total = 0; redirect[depth].xsize = xsize; ostream_memory = TRUE; } else runtime_error ("Nesting stream #3 too deep"); }/* memory_open */ /* * memory_new_line * * Redirect a newline to the memory of the Z-machine. * */ void memory_new_line (void) { zword size; zword addr; redirect[depth].total += redirect[depth].width; redirect[depth].width = 0; addr = redirect[depth].table; LOW_WORD (addr, size) addr += 2; if (redirect[depth].xsize != 0xffff) { redirect[depth].table = addr + size; size = 0; } else storeb ((zword) (addr + (size++)), 13); storew (redirect[depth].table, size); }/* memory_new_line */ /* * memory_word * * Redirect a string of characters to the memory of the Z-machine. * */ void memory_word (const zchar *s) { zword size; zword addr; zchar c; if (h_version == V6) { int width = os_string_width (s); if (redirect[depth].xsize != 0xffff) if (redirect[depth].width + width > redirect[depth].xsize) { if (*s == ' ' || *s == ZC_INDENT || *s == ZC_GAP) width = os_string_width (++s); memory_new_line (); } redirect[depth].width += width; } addr = redirect[depth].table; LOW_WORD (addr, size) addr += 2; while ((c = *s++) != 0) storeb ((zword) (addr + (size++)), translate_to_zscii (c)); storew (redirect[depth].table, size); }/* memory_word */ /* * memory_close * * End of output redirection. * */ void memory_close (void) { if (depth >= 0) { if (redirect[depth].xsize != 0xffff) memory_new_line (); if (h_version == V6) { h_line_width = (redirect[depth].xsize != 0xffff) ? redirect[depth].total : redirect[depth].width; SET_WORD (H_LINE_WIDTH, h_line_width) } if (depth == 0) ostream_memory = FALSE; depth--; } }/* memory_close */
404b1c21d33c265fd399b5bb2155bfdbdc0f0b65
7eaf54a78c9e2117247cb2ab6d3a0c20719ba700
/SOFTWARE/A64-TERES/linux-a64/drivers/video/pxafb.h
26ba9fa3f7370642e46596219a1f20d0e7c400ed
[ "LicenseRef-scancode-free-unknown", "Apache-2.0", "Linux-syscall-note", "GPL-2.0-only", "GPL-1.0-or-later" ]
permissive
OLIMEX/DIY-LAPTOP
ae82f4ee79c641d9aee444db9a75f3f6709afa92
a3fafd1309135650bab27f5eafc0c32bc3ca74ee
refs/heads/rel3
2023-08-04T01:54:19.483792
2023-04-03T07:18:12
2023-04-03T07:18:12
80,094,055
507
92
Apache-2.0
2023-04-03T07:05:59
2017-01-26T07:25:50
C
UTF-8
C
false
false
4,348
h
pxafb.h
#ifndef __PXAFB_H__ #define __PXAFB_H__ /* * linux/drivers/video/pxafb.h * -- Intel PXA250/210 LCD Controller Frame Buffer Device * * Copyright (C) 1999 Eric A. Thomas. * Copyright (C) 2004 Jean-Frederic Clere. * Copyright (C) 2004 Ian Campbell. * Copyright (C) 2004 Jeff Lackey. * Based on sa1100fb.c Copyright (C) 1999 Eric A. Thomas * which in turn is * Based on acornfb.c Copyright (C) Russell King. * * 2001-08-03: Cliff Brake <cbrake@acclent.com> * - ported SA1100 code to PXA * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ /* PXA LCD DMA descriptor */ struct pxafb_dma_descriptor { unsigned int fdadr; unsigned int fsadr; unsigned int fidr; unsigned int ldcmd; }; enum { PAL_NONE = -1, PAL_BASE = 0, PAL_OV1 = 1, PAL_OV2 = 2, PAL_MAX, }; enum { DMA_BASE = 0, DMA_UPPER = 0, DMA_LOWER = 1, DMA_OV1 = 1, DMA_OV2_Y = 2, DMA_OV2_Cb = 3, DMA_OV2_Cr = 4, DMA_CURSOR = 5, DMA_CMD = 6, DMA_MAX, }; /* maximum palette size - 256 entries, each 4 bytes long */ #define PALETTE_SIZE (256 * 4) #define CMD_BUFF_SIZE (1024 * 50) /* NOTE: the palette and frame dma descriptors are doubled to allow * the 2nd set for branch settings (FBRx) */ struct pxafb_dma_buff { unsigned char palette[PAL_MAX * PALETTE_SIZE]; uint16_t cmd_buff[CMD_BUFF_SIZE]; struct pxafb_dma_descriptor pal_desc[PAL_MAX * 2]; struct pxafb_dma_descriptor dma_desc[DMA_MAX * 2]; }; enum { OVERLAY1, OVERLAY2, }; enum { OVERLAY_FORMAT_RGB = 0, OVERLAY_FORMAT_YUV444_PACKED, OVERLAY_FORMAT_YUV444_PLANAR, OVERLAY_FORMAT_YUV422_PLANAR, OVERLAY_FORMAT_YUV420_PLANAR, }; #define NONSTD_TO_XPOS(x) (((x) >> 0) & 0x3ff) #define NONSTD_TO_YPOS(x) (((x) >> 10) & 0x3ff) #define NONSTD_TO_PFOR(x) (((x) >> 20) & 0x7) struct pxafb_layer; struct pxafb_layer_ops { void (*enable)(struct pxafb_layer *); void (*disable)(struct pxafb_layer *); void (*setup)(struct pxafb_layer *); }; struct pxafb_layer { struct fb_info fb; int id; int registered; uint32_t usage; uint32_t control[2]; struct pxafb_layer_ops *ops; void __iomem *video_mem; unsigned long video_mem_phys; size_t video_mem_size; struct completion branch_done; struct pxafb_info *fbi; }; struct pxafb_info { struct fb_info fb; struct device *dev; struct clk *clk; void __iomem *mmio_base; struct pxafb_dma_buff *dma_buff; size_t dma_buff_size; dma_addr_t dma_buff_phys; dma_addr_t fdadr[DMA_MAX * 2]; void __iomem *video_mem; /* virtual address of frame buffer */ unsigned long video_mem_phys; /* physical address of frame buffer */ size_t video_mem_size; /* size of the frame buffer */ u16 * palette_cpu; /* virtual address of palette memory */ u_int palette_size; u_int lccr0; u_int lccr3; u_int lccr4; u_int cmap_inverse:1, cmap_static:1, unused:30; u_int reg_lccr0; u_int reg_lccr1; u_int reg_lccr2; u_int reg_lccr3; u_int reg_lccr4; u_int reg_cmdcr; unsigned long hsync_time; volatile u_char state; volatile u_char task_state; struct mutex ctrlr_lock; wait_queue_head_t ctrlr_wait; struct work_struct task; struct completion disable_done; #ifdef CONFIG_FB_PXA_SMARTPANEL uint16_t *smart_cmds; size_t n_smart_cmds; struct completion command_done; struct completion refresh_done; struct task_struct *smart_thread; #endif #ifdef CONFIG_FB_PXA_OVERLAY struct pxafb_layer overlay[2]; #endif #ifdef CONFIG_CPU_FREQ struct notifier_block freq_transition; struct notifier_block freq_policy; #endif void (*lcd_power)(int, struct fb_var_screeninfo *); void (*backlight_power)(int); }; #define TO_INF(ptr,member) container_of(ptr,struct pxafb_info,member) /* * These are the actions for set_ctrlr_state */ #define C_DISABLE (0) #define C_ENABLE (1) #define C_DISABLE_CLKCHANGE (2) #define C_ENABLE_CLKCHANGE (3) #define C_REENABLE (4) #define C_DISABLE_PM (5) #define C_ENABLE_PM (6) #define C_STARTUP (7) #define PXA_NAME "PXA" /* * Minimum X and Y resolutions */ #define MIN_XRES 64 #define MIN_YRES 64 /* maximum X and Y resolutions - note these are limits from the register * bits length instead of the real ones */ #define MAX_XRES 1024 #define MAX_YRES 1024 #endif /* __PXAFB_H__ */
fdb7833a273d983e879c5b7502e84b920e0f07e6
75196819c910f3fd523f1a4d28e5d0fe12570ab1
/src/bdb53/src/os/os_handle.c
2ce5cc6a1dc9678d06fad43ff095bae4fbee863a
[ "BSD-3-Clause", "Sleepycat", "MIT" ]
permissive
gridcoin-community/Gridcoin-Research
889967579b5b05bcc3ae836697a0f173b5ae91ea
21414dd0f63c9a34060f7f762f08d1a52aeb9e87
refs/heads/development
2023-09-04T09:21:55.006935
2023-09-03T17:45:20
2023-09-03T17:45:20
23,332,350
292
117
MIT
2023-09-11T10:22:10
2014-08-25T23:41:04
HTML
UTF-8
C
false
false
6,044
c
os_handle.c
/*- * See the file LICENSE for redistribution information. * * Copyright (c) 1998, 2013 Oracle and/or its affiliates. All rights reserved. * * $Id$ */ #include "db_config.h" #include "db_int.h" /* * __os_openhandle -- * Open a file, using POSIX 1003.1 open flags. * * PUBLIC: int __os_openhandle * PUBLIC: __P((ENV *, const char *, int, int, DB_FH **)); */ int __os_openhandle(env, name, flags, mode, fhpp) ENV *env; const char *name; int flags, mode; DB_FH **fhpp; { DB_FH *fhp; u_int nrepeat, retries; int fcntl_flags, ret; #ifdef HAVE_VXWORKS int newflags; #endif /* * Allocate the file handle and copy the file name. We generally only * use the name for verbose or error messages, but on systems where we * can't unlink temporary files immediately, we use the name to unlink * the temporary file when the file handle is closed. * * Lock the ENV handle and insert the new file handle on the list. */ if ((ret = __os_calloc(env, 1, sizeof(DB_FH), &fhp)) != 0) return (ret); if ((ret = __os_strdup(env, name, &fhp->name)) != 0) goto err; if (env != NULL) { MUTEX_LOCK(env, env->mtx_env); TAILQ_INSERT_TAIL(&env->fdlist, fhp, q); MUTEX_UNLOCK(env, env->mtx_env); F_SET(fhp, DB_FH_ENVLINK); } /* If the application specified an interface, use it. */ if (DB_GLOBAL(j_open) != NULL) { if ((fhp->fd = DB_GLOBAL(j_open)(name, flags, mode)) == -1) { ret = __os_posix_err(__os_get_syserr()); goto err; } goto done; } retries = 0; for (nrepeat = 1; nrepeat < 4; ++nrepeat) { ret = 0; #ifdef HAVE_VXWORKS /* * VxWorks does not support O_CREAT on open, you have to use * creat() instead. (It does not support O_EXCL or O_TRUNC * either, even though they are defined "for future support".) * We really want the POSIX behavior that if O_CREAT is set, * we open if it exists, or create it if it doesn't exist. * If O_CREAT is specified, single thread and try to open the * file. If successful, and O_EXCL return EEXIST. If * unsuccessful call creat and then end single threading. */ if (LF_ISSET(O_CREAT)) { DB_BEGIN_SINGLE_THREAD; newflags = flags & ~(O_CREAT | O_EXCL); if ((fhp->fd = open(name, newflags, mode)) != -1) { /* * We need to mark the file opened at this * point so that if we get any error below * we will properly close the fd we just * opened on the error path. */ F_SET(fhp, DB_FH_OPENED); if (LF_ISSET(O_EXCL)) { /* * If we get here, want O_EXCL create, * and the file exists. Close and * return EEXISTS. */ DB_END_SINGLE_THREAD; ret = EEXIST; goto err; } /* * XXX * Assume any error means non-existence. * Unfortunately return values (even for * non-existence) are driver specific so * there is no single error we can use to * verify we truly got the equivalent of * ENOENT. */ } else fhp->fd = creat(name, newflags); DB_END_SINGLE_THREAD; } else /* FALLTHROUGH */ #endif #ifdef __VMS /* * !!! * Open with full sharing on VMS. * * We use these flags because they are the ones set by the VMS * CRTL mmap() call when it opens a file, and we have to be * able to open files that mmap() has previously opened, e.g., * when we're joining already existing DB regions. */ fhp->fd = open(name, flags, mode, "shr=get,put,upd,del,upi"); #else fhp->fd = open(name, flags, mode); #endif if (fhp->fd != -1) { ret = 0; break; } switch (ret = __os_posix_err(__os_get_syserr())) { case EMFILE: case ENFILE: case ENOSPC: /* * If it's a "temporary" error, we retry up to 3 times, * waiting up to 12 seconds. While it's not a problem * if we can't open a database, an inability to open a * log file is cause for serious dismay. */ __os_yield(env, nrepeat * 2, 0); break; case EAGAIN: case EBUSY: case EINTR: /* * If an EAGAIN, EBUSY or EINTR, retry immediately for * DB_RETRY times. */ if (++retries < DB_RETRY) --nrepeat; break; default: /* Open is silent on error. */ goto err; } } if (ret == 0) { #if defined(HAVE_FCNTL_F_SETFD) /* Deny file descriptor access to any child process. */ if ((fcntl_flags = fcntl(fhp->fd, F_GETFD)) == -1 || fcntl(fhp->fd, F_SETFD, fcntl_flags | FD_CLOEXEC) == -1) { ret = __os_get_syserr(); __db_syserr(env, ret, DB_STR("0162", "fcntl(F_SETFD)")); ret = __os_posix_err(ret); goto err; } #else COMPQUIET(fcntl_flags, 0); #endif done: F_SET(fhp, DB_FH_OPENED); *fhpp = fhp; return (0); } err: (void)__os_closehandle(env, fhp); return (ret); } /* * __os_closehandle -- * Close a file. * * PUBLIC: int __os_closehandle __P((ENV *, DB_FH *)); */ int __os_closehandle(env, fhp) ENV *env; DB_FH *fhp; { DB_ENV *dbenv; int ret; ret = 0; /* * If we linked the DB_FH handle into the ENV, it needs to be * unlinked. */ DB_ASSERT(env, env != NULL || !F_ISSET(fhp, DB_FH_ENVLINK)); if (env != NULL) { dbenv = env->dbenv; if (fhp->name != NULL && FLD_ISSET( dbenv->verbose, DB_VERB_FILEOPS | DB_VERB_FILEOPS_ALL)) __db_msg(env, DB_STR_A("0163", "fileops: close %s", "%s"), fhp->name); if (F_ISSET(fhp, DB_FH_ENVLINK)) { /* * Lock the ENV handle and remove this file * handle from the list. */ MUTEX_LOCK(env, env->mtx_env); TAILQ_REMOVE(&env->fdlist, fhp, q); MUTEX_UNLOCK(env, env->mtx_env); } } /* Discard any underlying system file reference. */ if (F_ISSET(fhp, DB_FH_OPENED)) { if (DB_GLOBAL(j_close) != NULL) ret = DB_GLOBAL(j_close)(fhp->fd); else RETRY_CHK((close(fhp->fd)), ret); if (ret != 0) { __db_syserr(env, ret, DB_STR("0164", "close")); ret = __os_posix_err(ret); } } /* Unlink the file if we haven't already done so. */ if (F_ISSET(fhp, DB_FH_UNLINK)) (void)__os_unlink(env, fhp->name, 0); if (fhp->name != NULL) __os_free(env, fhp->name); __os_free(env, fhp); return (ret); }
c51781d882f5d589823da0fa4bd1393a53b711b0
9de0cec678bc4a3bec2b4adabef9f39ff5b4afac
/PWGJE/macros/AddTaskFastEmbedding.C
0506dc504454c8c2ee267e942e0831908269acb4
[]
permissive
alisw/AliPhysics
91bf1bd01ab2af656a25ff10b25e618a63667d3e
5df28b2b415e78e81273b0d9bf5c1b99feda3348
refs/heads/master
2023-08-31T20:41:44.927176
2023-08-31T14:51:12
2023-08-31T14:51:12
61,661,378
129
1,150
BSD-3-Clause
2023-09-14T18:48:45
2016-06-21T19:31:29
C++
UTF-8
C
false
false
6,790
c
AddTaskFastEmbedding.C
AliAnalysisTaskFastEmbedding* AddTaskFastEmbedding(){ AliAnalysisManager *mgr = AliAnalysisManager::GetAnalysisManager(); if(!mgr){ ::Error("AddTaskFastEmbedding", "No analysis manager to connect ot."); return NULL; } if(!mgr->GetInputEventHandler()){ ::Error("AddTaskFastEmbedding", "This task requires an input event handler."); return NULL; } AliAnalysisTaskFastEmbedding *task = new AliAnalysisTaskFastEmbedding("FastEmbedding"); // ## set embedding mode ## // kAODFull=0, kAODJetTracks, kAODJet4Mom, kToySingle4Mom //task->SetEmbedMode(AliAnalysisTaskFastEmbedding::kToyTracks); task->SetEmbedMode(AliAnalysisTaskFastEmbedding::kAODFull); // embed full event: all tracks in PYTHIA event are added to PbPb data event task->SetJetFriends("AliAOD.Jets.root"); // ## set ranges for toy ## //SetToyTrackRanges( Double_t minPt = 50.; Double_t maxPt = 100.; Double_t minEta = -0.5; Double_t maxEta = 0.5; //Double_t minEta = -0.4; Double_t maxEta = 0.4; // for LHC10h pass1 Double_t minPhi = 0.; Double_t maxPhi = 2*TMath::Pi(); //fToyDistributionTrackPt: 0 = uniform distribution // else = exponential / power law (not implemented yet) //task->SetToyNumberOfTrackRange(4,4); //task->SetToyTrackRanges(0.15, 300., 5,-.9, .9, 0., 2*TMath::Pi()); task->SetToyTrackRanges(minPt,maxPt,0.,minEta,maxEta,minPhi,maxPhi); task->SetToyFilterMap((1<<32)-1); // ## set event selection for events of the addition AOD ## // kEventsAll=0; kEventsJetPt task->SetEvtSelecMode(AliAnalysisTaskFastEmbedding::kEventsJetPt); task->SetQAMode(kTRUE); task->SetEPMode(0);//in-plane embedding, 0: no EP, 1: in-plane, 2: out-of-plane // event selection task->SetOfflineTrgMask(AliVEvent::kMB); task->SetCentMin(0.); task->SetCentMax(10.); //task->SetVtxMin(-10.); //task->SetVtxMax(10.); // ## set jet pT range for event selection ## // SetEvtSelJetPtRange(Float_t minPt, Float_t maxPt) task->SetEvtSelJetPtRange(5.,110.); //task->SetEvtSelJetEtaRange(-0.4, 0.4); // smaller eta window for LHC10h pass1 task->SetEvtSelJetEtaRange(-0.5, 0.5); task->SetTrackFilterMap(272); task->SetJetBranch("clustersAOD_ANTIKT04_B0_Filter00272_Cut00150_Skip00"); task->SetEvtSelJetPtRange(5.,10000.); //jet min pt cut //task->SetEvtSelJetPtRange(10.,10000.); //jet min pt cut task->SetEvtSelJetEtaRange(-0.5,0.5); task->SetEvtSelJetMinLConstPt(1); task->SetFFRadius(0.2); //jet cone size //v0 cuts for embedded candidates Int_t K0type = AliAnalysisTaskFastEmbedding::kOffl; Int_t Latype = AliAnalysisTaskFastEmbedding::kOffl; Int_t ALatype = AliAnalysisTaskFastEmbedding::kOffl; TString strK0type; if(K0type == AliAnalysisTaskFastEmbedding::kOnFly) strK0type = "OnFly"; if(K0type == AliAnalysisTaskFastEmbedding::kOffl) strK0type = "Offl"; TString strLatype; if(Latype == AliAnalysisTaskFastEmbedding::kOnFly) strLatype = "OnFly"; if(Latype == AliAnalysisTaskFastEmbedding::kOffl) strLatype = "Offl"; TString strALatype; if(ALatype == AliAnalysisTaskFastEmbedding::kOnFly) strALatype = "OnFly"; if(ALatype == AliAnalysisTaskFastEmbedding::kOffl) strALatype = "Offl"; //pp V0 cut selection task->SetK0Type(K0type); task->SetLaType(Latype); task->SetALaType(ALatype); mgr->AddTask(task); // ## create the output containers ## AliAnalysisDataContainer *coutputFastEmbedding = mgr->CreateContainer( "fastembedding", TList::Class(), AliAnalysisManager::kOutputContainer, Form("%s:PWGJE_FastEmbedding", AliAnalysisManager::GetCommonFileName())); mgr->ConnectInput (task, 0, mgr->GetCommonInputContainer()); mgr->ConnectOutput(task, 0, mgr->GetCommonOutputContainer()); mgr->ConnectOutput(task, 1, coutputFastEmbedding); return task; } AliAnalysisTaskFastEmbedding* AddTaskFastEmbedding(TObjArray* aodarray){ AliAnalysisTaskFastEmbedding *task = AddTaskFastEmbedding(); if(aodarray){ task->SetArrayOfAODPaths(aodarray); task->SetEmbedMode(AliAnalysisTaskFastEmbedding::kAODFull); } return task; } AliAnalysisTaskFastEmbedding* AddTaskFastEmbedding(const char* filepath, Int_t mode = 0){ AliAnalysisTaskFastEmbedding *task = AddTaskFastEmbedding(); if(strlen(filepath)){ if(mode==0){ // path to single AOD task->SetAODPath(filepath); } if(mode==1){ // path to text file with list of paths of multiple AODs Printf("Read aod paths from file %s", filepath); TObjArray* array = new TObjArray(); TObjString* ostr = 0; TString line; ifstream in; in.open(filepath); while(in.good()){ in >> line; if(line.Length() == 0) continue; Printf("found aod path %s", line.Data()); ostr = new TObjString(line.Data()); array->Add(ostr); } Printf("-> %d aod paths found", array->GetEntries()); task->SetArrayOfAODPaths(array); } if(mode==2) { //read root file which contains object array TFile *f = TFile::Open(filepath); TObjArray *objarray; f->GetObject("array",objarray); Printf("-> %d aod paths found", objarray->GetEntries()); task->SetArrayOfAODPaths(objarray); Int_t count = 0; Int_t iEntry = -1; Int_t iEntrySum = 0; Int_t iEntryMax = 0; TArrayI* array = new TArrayI(); for(int i=0; i<objarray->GetEntriesFast(); i++) { TObjString *objStr = (TObjString*) objarray->At(i); TString str = objStr->GetString(); iEntry = str.Atoi(); array->Set(count+1); array->AddAt(iEntry,count); count++; iEntrySum += iEntry; if(iEntry>iEntryMax) iEntryMax = iEntry; } task->SetArrayOfAODEntries(array); task->SetAODEntriesSum(iEntrySum); task->SetAODEntriesMax(iEntryMax); } task->SetEmbedMode(AliAnalysisTaskFastEmbedding::kAODFull); } return task; } AliAnalysisTaskFastEmbedding* AddTaskFastEmbedding(const char* aodpath, const char* entriespath){ AliAnalysisTaskFastEmbedding *task = AddTaskFastEmbedding(aodpath, 1); Printf("Read entries of aod files from %s", entriespath); TArrayI* array = new TArrayI(); Int_t count = 0; Int_t iEntry = -1; Int_t iEntrySum = 0; Int_t iEntryMax = 0; TString line; ifstream in; in.open(entriespath); while(in.good()){ in >> line; iEntry = line.Atoi(); array->Set(count+1); array->AddAt(iEntry,count); count++; iEntrySum += iEntry; if(iEntry>iEntryMax) iEntryMax = iEntry; } task->SetArrayOfAODEntries(array); task->SetAODEntriesSum(iEntrySum); task->SetAODEntriesMax(iEntryMax); return task; }
f27f79a86f168acd5c30b4a918e3abc9efc5797f
971b2cea2d1c3001aadc8ca1a48110b7db1ed5f2
/deps/abc/src/map/amap/amapGraph.c
2438425b9234ba15cb72545aa0f450a33dc5ff2e
[ "LicenseRef-scancode-warranty-disclaimer", "MIT", "MIT-Modern-Variant" ]
permissive
emsec/hal
70ad2921739967d914dd458984bd7d6d497d3b0a
e4fae37bec9168a61100eacfda37a1f291b4d0be
refs/heads/master
2023-09-02T20:27:32.909426
2023-09-01T13:03:24
2023-09-01T13:03:24
169,076,171
510
72
MIT
2023-09-01T13:03:26
2019-02-04T12:37:20
C++
UTF-8
C
false
false
13,360
c
amapGraph.c
/**CFile**************************************************************** FileName [amapGraph.c] SystemName [ABC: Logic synthesis and verification system.] PackageName [Technology mapper for standard cells.] Synopsis [Internal AIG manager.] Author [Alan Mishchenko] Affiliation [UC Berkeley] Date [Ver. 1.0. Started - June 20, 2005.] Revision [$Id: amapGraph.c,v 1.00 2005/06/20 00:00:00 alanmi Exp $] ***********************************************************************/ #include "amapInt.h" ABC_NAMESPACE_IMPL_START //////////////////////////////////////////////////////////////////////// /// DECLARATIONS /// //////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////// /// FUNCTION DEFINITIONS /// //////////////////////////////////////////////////////////////////////// /**Function************************************************************* Synopsis [Creates object.] Description [] SideEffects [] SeeAlso [] ***********************************************************************/ Amap_Obj_t * Amap_ManSetupObj( Amap_Man_t * p ) { Amap_Obj_t * pObj; pObj = (Amap_Obj_t *)Aig_MmFixedEntryFetch( p->pMemObj ); memset( pObj, 0, sizeof(Amap_Obj_t) ); pObj->nFouts[0] = 1; // needed for flow to work in the first pass pObj->Id = Vec_PtrSize(p->vObjs); Vec_PtrPush( p->vObjs, pObj ); return pObj; } /**Function************************************************************* Synopsis [Creates constant 1 node.] Description [] SideEffects [] SeeAlso [] ***********************************************************************/ Amap_Obj_t * Amap_ManCreateConst1( Amap_Man_t * p ) { Amap_Obj_t * pObj; pObj = Amap_ManSetupObj( p ); pObj->Type = AMAP_OBJ_CONST1; pObj->fPhase = 1; p->nObjs[AMAP_OBJ_CONST1]++; return pObj; } /**Function************************************************************* Synopsis [Creates primary input.] Description [] SideEffects [] SeeAlso [] ***********************************************************************/ Amap_Obj_t * Amap_ManCreatePi( Amap_Man_t * p ) { Amap_Obj_t * pObj; pObj = Amap_ManSetupObj( p ); pObj->Type = AMAP_OBJ_PI; pObj->IdPio = Vec_PtrSize( p->vPis ); Vec_PtrPush( p->vPis, pObj ); p->nObjs[AMAP_OBJ_PI]++; return pObj; } /**Function************************************************************* Synopsis [Creates primary output with the given driver.] Description [] SideEffects [] SeeAlso [] ***********************************************************************/ Amap_Obj_t * Amap_ManCreatePo( Amap_Man_t * p, Amap_Obj_t * pFan0 ) { Amap_Obj_t * pObj; pObj = Amap_ManSetupObj( p ); pObj->IdPio = Vec_PtrSize( p->vPos ); Vec_PtrPush( p->vPos, pObj ); pObj->Type = AMAP_OBJ_PO; pObj->Fan[0] = Amap_ObjToLit(pFan0); Amap_Regular(pFan0)->nRefs++; pObj->Level = Amap_Regular(pFan0)->Level; if ( p->nLevelMax < (int)pObj->Level ) p->nLevelMax = (int)pObj->Level; assert( p->nLevelMax < 4094 ); // 2^12-2 p->nObjs[AMAP_OBJ_PO]++; return pObj; } /**Function************************************************************* Synopsis [Create the new node assuming it does not exist.] Description [] SideEffects [] SeeAlso [] ***********************************************************************/ Amap_Obj_t * Amap_ManCreateAnd( Amap_Man_t * p, Amap_Obj_t * pFan0, Amap_Obj_t * pFan1 ) { Amap_Obj_t * pObj; pObj = Amap_ManSetupObj( p ); pObj->Type = AMAP_OBJ_AND; pObj->Fan[0] = Amap_ObjToLit(pFan0); Amap_Regular(pFan0)->nRefs++; pObj->Fan[1] = Amap_ObjToLit(pFan1); Amap_Regular(pFan1)->nRefs++; assert( Abc_Lit2Var(pObj->Fan[0]) != Abc_Lit2Var(pObj->Fan[1]) ); pObj->fPhase = Amap_ObjPhaseReal(pFan0) & Amap_ObjPhaseReal(pFan1); pObj->Level = 1 + Abc_MaxInt( Amap_Regular(pFan0)->Level, Amap_Regular(pFan1)->Level ); if ( p->nLevelMax < (int)pObj->Level ) p->nLevelMax = (int)pObj->Level; assert( p->nLevelMax < 4094 ); // 2^12-2 p->nObjs[AMAP_OBJ_AND]++; return pObj; } /**Function************************************************************* Synopsis [Create the new node assuming it does not exist.] Description [] SideEffects [] SeeAlso [] ***********************************************************************/ Amap_Obj_t * Amap_ManCreateXor( Amap_Man_t * p, Amap_Obj_t * pFan0, Amap_Obj_t * pFan1 ) { Amap_Obj_t * pObj; pObj = Amap_ManSetupObj( p ); pObj->Type = AMAP_OBJ_XOR; pObj->Fan[0] = Amap_ObjToLit(pFan0); Amap_Regular(pFan0)->nRefs++; pObj->Fan[1] = Amap_ObjToLit(pFan1); Amap_Regular(pFan1)->nRefs++; pObj->fPhase = Amap_ObjPhaseReal(pFan0) ^ Amap_ObjPhaseReal(pFan1); pObj->Level = 2 + Abc_MaxInt( Amap_Regular(pFan0)->Level, Amap_Regular(pFan1)->Level ); if ( p->nLevelMax < (int)pObj->Level ) p->nLevelMax = (int)pObj->Level; assert( p->nLevelMax < 4094 ); // 2^12-2 p->nObjs[AMAP_OBJ_XOR]++; return pObj; } /**Function************************************************************* Synopsis [Create the new node assuming it does not exist.] Description [] SideEffects [] SeeAlso [] ***********************************************************************/ Amap_Obj_t * Amap_ManCreateMux( Amap_Man_t * p, Amap_Obj_t * pFan0, Amap_Obj_t * pFan1, Amap_Obj_t * pFanC ) { Amap_Obj_t * pObj; pObj = Amap_ManSetupObj( p ); pObj->Type = AMAP_OBJ_MUX; pObj->Fan[0] = Amap_ObjToLit(pFan0); Amap_Regular(pFan0)->nRefs++; pObj->Fan[1] = Amap_ObjToLit(pFan1); Amap_Regular(pFan1)->nRefs++; pObj->Fan[2] = Amap_ObjToLit(pFanC); Amap_Regular(pFanC)->nRefs++; pObj->fPhase = (Amap_ObjPhaseReal(pFan1) & Amap_ObjPhaseReal(pFanC)) | (Amap_ObjPhaseReal(pFan0) & ~Amap_ObjPhaseReal(pFanC)); pObj->Level = Abc_MaxInt( Amap_Regular(pFan0)->Level, Amap_Regular(pFan1)->Level ); pObj->Level = 2 + Abc_MaxInt( pObj->Level, Amap_Regular(pFanC)->Level ); if ( p->nLevelMax < (int)pObj->Level ) p->nLevelMax = (int)pObj->Level; assert( p->nLevelMax < 4094 ); // 2^12-2 p->nObjs[AMAP_OBJ_MUX]++; return pObj; } /**Function************************************************************* Synopsis [Creates the choice node.] Description [Should be called after the equivalence class nodes are linked.] SideEffects [] SeeAlso [] ***********************************************************************/ void Amap_ManCreateChoice( Amap_Man_t * p, Amap_Obj_t * pObj ) { Amap_Obj_t * pTemp; // mark the node as a representative if its class // assert( pObj->fRepr == 0 ); pObj->fRepr = 1; // update the level of this node (needed for correct required time computation) for ( pTemp = pObj; pTemp; pTemp = Amap_ObjChoice(p, pTemp) ) { pObj->Level = Abc_MaxInt( pObj->Level, pTemp->Level ); // pTemp->nVisits++; pTemp->nVisitsCopy++; } // mark the largest level if ( p->nLevelMax < (int)pObj->Level ) p->nLevelMax = (int)pObj->Level; assert( p->nLevelMax < 4094 ); // 2^12-2 } /**Function************************************************************* Synopsis [Creates XOR/MUX choices for the node.] Description [] SideEffects [] SeeAlso [] ***********************************************************************/ void Amap_ManCreateXorChoices( Amap_Man_t * p, Amap_Obj_t * pFan0, Amap_Obj_t * pFan1, Amap_Obj_t * pChoices[] ) { pChoices[0] = Amap_ManCreateXor( p, pFan0, pFan1 ); pChoices[1] = Amap_ManCreateXor( p, Amap_Not(pFan0), pFan1 ); pChoices[2] = Amap_ManCreateXor( p, pFan0, Amap_Not(pFan1) ); pChoices[3] = Amap_ManCreateXor( p, Amap_Not(pFan0), Amap_Not(pFan1) ); } /**Function************************************************************* Synopsis [Creates XOR/MUX choices for the node.] Description [] SideEffects [] SeeAlso [] ***********************************************************************/ void Amap_ManCreateMuxChoices( Amap_Man_t * p, Amap_Obj_t * pFan0, Amap_Obj_t * pFan1, Amap_Obj_t * pFanC, Amap_Obj_t * pChoices[] ) { pChoices[0] = Amap_ManCreateMux( p, pFan0, pFan1, pFanC ); pChoices[1] = Amap_ManCreateMux( p, Amap_Not(pFan0), Amap_Not(pFan1), pFanC ); pChoices[2] = Amap_ManCreateMux( p, pFan1, pFan0, Amap_Not(pFanC) ); pChoices[3] = Amap_ManCreateMux( p, Amap_Not(pFan1), Amap_Not(pFan0), Amap_Not(pFanC) ); } /**Function************************************************************* Synopsis [Drags pointer out through the copy.] Description [] SideEffects [] SeeAlso [] ***********************************************************************/ static inline Amap_Obj_t * Amap_AndToObj( Aig_Obj_t * pObj ) { return Amap_NotCond( (Amap_Obj_t *)Aig_Regular(pObj)->pData, Aig_IsComplement(pObj) ); } /**Function************************************************************* Synopsis [Starts the AIG manager.] Description [] SideEffects [] SeeAlso [] ***********************************************************************/ Amap_Obj_t * Amap_ManGetLast_rec( Amap_Man_t * p, Amap_Obj_t * pObj ) { if ( pObj->Equiv == 0 ) return pObj; return Amap_ManGetLast_rec( p, Amap_ObjChoice(p, pObj) ); } /**Function************************************************************* Synopsis [Starts the AIG manager.] Description [] SideEffects [] SeeAlso [] ***********************************************************************/ void Amap_ManCreate( Amap_Man_t * p, Aig_Man_t * pAig ) { Vec_Ptr_t * vNodes; Amap_Obj_t * pChoices[4]; Aig_Obj_t * pObj, * pFanin, * pPrev, * pFan0, * pFan1, * pFanC; int i, fChoices; if ( pAig->pEquivs ) vNodes = Aig_ManDfsChoices( pAig ); else vNodes = Aig_ManDfs( pAig, 1 ); p->pConst1 = Amap_ManCreateConst1( p ); // print warning about excessive memory usage if ( p->pPars->fVerbose ) { if ( 1.0 * Aig_ManObjNum(pAig) * sizeof(Amap_Obj_t) / (1<<30) > 0.1 ) printf( "Warning: Mapper allocates %.3f GB for subject graph with %d objects.\n", 1.0 * Aig_ManObjNum(pAig) * sizeof(Amap_Obj_t) / (1<<30), Aig_ManObjNum(pAig) ); } // create PIs and remember them in the old nodes Aig_ManCleanData(pAig); Aig_ManConst1(pAig)->pData = Amap_ManConst1( p ); Aig_ManForEachCi( pAig, pObj, i ) pObj->pData = Amap_ManCreatePi( p ); // load the AIG into the mapper Vec_PtrForEachEntry( Aig_Obj_t *, vNodes, pObj, i ) { fChoices = 0; if ( p->fUseXor && Aig_ObjRecognizeExor(pObj, &pFan0, &pFan1 ) ) { Amap_ManCreateXorChoices( p, Amap_AndToObj(pFan0), Amap_AndToObj(pFan1), pChoices ); fChoices = 1; } else if ( p->fUseMux && Aig_ObjIsMuxType(pObj) ) { pFanC = Aig_ObjRecognizeMux( pObj, &pFan1, &pFan0 ); Amap_ManCreateMuxChoices( p, Amap_AndToObj(pFan0), Amap_AndToObj(pFan1), Amap_AndToObj(pFanC), pChoices ); fChoices = 1; } pObj->pData = Amap_ManCreateAnd( p, (Amap_Obj_t *)Aig_ObjChild0Copy(pObj), (Amap_Obj_t *)Aig_ObjChild1Copy(pObj) ); if ( fChoices ) { p->nChoicesAdded++; Amap_ObjSetChoice( (Amap_Obj_t *)pObj->pData, pChoices[0] ); Amap_ObjSetChoice( pChoices[0], pChoices[1] ); Amap_ObjSetChoice( pChoices[1], pChoices[2] ); Amap_ObjSetChoice( pChoices[2], pChoices[3] ); Amap_ManCreateChoice( p, (Amap_Obj_t *)pObj->pData ); } if ( Aig_ObjIsChoice( pAig, pObj ) ) { // assert( !fChoices ); p->nChoicesGiven++; for ( pPrev = pObj, pFanin = Aig_ObjEquiv(pAig, pObj); pFanin; pPrev = pFanin, pFanin = Aig_ObjEquiv(pAig, pFanin) ) { ((Amap_Obj_t *)pFanin->pData)->fRepr = 0; Amap_ObjSetChoice( Amap_ManGetLast_rec(p, (Amap_Obj_t *)pPrev->pData), (Amap_Obj_t *)pFanin->pData ); } Amap_ManCreateChoice( p, (Amap_Obj_t *)pObj->pData ); } } Vec_PtrFree( vNodes ); // set the primary outputs without copying the phase Aig_ManForEachCo( pAig, pObj, i ) pObj->pData = Amap_ManCreatePo( p, (Amap_Obj_t *)Aig_ObjChild0Copy(pObj) ); if ( p->pPars->fVerbose ) printf( "Performing mapping with %d given and %d created choices.\n", p->nChoicesGiven, p->nChoicesAdded ); } //////////////////////////////////////////////////////////////////////// /// END OF FILE /// //////////////////////////////////////////////////////////////////////// ABC_NAMESPACE_IMPL_END
c5674a6da6b423a0714c0ca46ebe8e62925c80af
9ceacf33fd96913cac7ef15492c126d96cae6911
/sys/dev/acpi/acpidmar.c
1a789278348e0f99fabc610a8eeba6878973149c
[]
no_license
openbsd/src
ab97ef834fd2d5a7f6729814665e9782b586c130
9e79f3a0ebd11a25b4bff61e900cb6de9e7795e9
refs/heads/master
2023-09-02T18:54:56.624627
2023-09-02T15:16:12
2023-09-02T15:16:12
66,966,208
3,394
1,235
null
2023-08-08T02:42:25
2016-08-30T18:18:25
C
UTF-8
C
false
false
75,116
c
acpidmar.c
/* * Copyright (c) 2015 Jordan Hargrave <jordan_hargrave@hotmail.com> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <sys/param.h> #include <sys/systm.h> #include <sys/kernel.h> #include <sys/device.h> #include <sys/malloc.h> #include <sys/queue.h> #include <sys/types.h> #include <sys/mbuf.h> #include <sys/proc.h> #include <uvm/uvm_extern.h> #include <machine/apicvar.h> #include <machine/biosvar.h> #include <machine/cpuvar.h> #include <machine/bus.h> #include <dev/acpi/acpireg.h> #include <dev/acpi/acpivar.h> #include <dev/acpi/acpidev.h> #include <dev/acpi/amltypes.h> #include <dev/acpi/dsdt.h> #include <machine/i8259.h> #include <machine/i82093reg.h> #include <machine/i82093var.h> #include <machine/i82489reg.h> #include <machine/i82489var.h> #include <machine/mpbiosvar.h> #include <dev/pci/pcireg.h> #include <dev/pci/pcivar.h> #include <dev/pci/pcidevs.h> #include <dev/pci/ppbreg.h> #include "ioapic.h" #include "acpidmar.h" #include "amd_iommu.h" /* We don't want IOMMU to remap MSI */ #define MSI_BASE_ADDRESS 0xFEE00000L #define MSI_BASE_SIZE 0x00100000L #define MAX_DEVFN 65536 #ifdef IOMMU_DEBUG int acpidmar_dbg_lvl = 0; #define DPRINTF(lvl,x...) if (acpidmar_dbg_lvl >= lvl) { printf(x); } #else #define DPRINTF(lvl,x...) #endif #ifdef DDB int acpidmar_ddb = 0; #endif int acpidmar_force_cm = 1; /* Page Table Entry per domain */ struct iommu_softc; static inline int mksid(int b, int d, int f) { return (b << 8) + (d << 3) + f; } static inline int sid_devfn(int sid) { return sid & 0xff; } static inline int sid_bus(int sid) { return (sid >> 8) & 0xff; } static inline int sid_dev(int sid) { return (sid >> 3) & 0x1f; } static inline int sid_fun(int sid) { return (sid >> 0) & 0x7; } /* Alias mapping */ #define SID_INVALID 0x80000000L static uint32_t sid_flag[MAX_DEVFN]; struct domain_dev { int sid; int sec; int sub; TAILQ_ENTRY(domain_dev) link; }; struct domain { struct iommu_softc *iommu; int did; int gaw; struct pte_entry *pte; paddr_t ptep; struct bus_dma_tag dmat; int flag; struct mutex exlck; char exname[32]; struct extent *iovamap; TAILQ_HEAD(,domain_dev) devices; TAILQ_ENTRY(domain) link; }; #define DOM_DEBUG 0x1 #define DOM_NOMAP 0x2 struct dmar_devlist { int type; int bus; int ndp; struct acpidmar_devpath *dp; TAILQ_ENTRY(dmar_devlist) link; }; TAILQ_HEAD(devlist_head, dmar_devlist); struct ivhd_devlist { int start_id; int end_id; int cfg; TAILQ_ENTRY(ivhd_devlist) link; }; struct rmrr_softc { TAILQ_ENTRY(rmrr_softc) link; struct devlist_head devices; int segment; uint64_t start; uint64_t end; }; struct atsr_softc { TAILQ_ENTRY(atsr_softc) link; struct devlist_head devices; int segment; int flags; }; struct iommu_pic { struct pic pic; struct iommu_softc *iommu; }; #define IOMMU_FLAGS_CATCHALL 0x1 #define IOMMU_FLAGS_BAD 0x2 #define IOMMU_FLAGS_SUSPEND 0x4 struct iommu_softc { TAILQ_ENTRY(iommu_softc)link; struct devlist_head devices; int id; int flags; int segment; struct mutex reg_lock; bus_space_tag_t iot; bus_space_handle_t ioh; uint64_t cap; uint64_t ecap; uint32_t gcmd; int mgaw; int agaw; int ndoms; struct root_entry *root; struct context_entry *ctx[256]; void *intr; struct iommu_pic pic; int fedata; uint64_t feaddr; uint64_t rtaddr; /* Queued Invalidation */ int qi_head; int qi_tail; paddr_t qip; struct qi_entry *qi; struct domain *unity; TAILQ_HEAD(,domain) domains; /* AMD iommu */ struct ivhd_dte *dte; void *cmd_tbl; void *evt_tbl; paddr_t cmd_tblp; paddr_t evt_tblp; }; static inline int iommu_bad(struct iommu_softc *sc) { return (sc->flags & IOMMU_FLAGS_BAD); } static inline int iommu_enabled(struct iommu_softc *sc) { if (sc->dte) { return 1; } return (sc->gcmd & GCMD_TE); } struct acpidmar_softc { struct device sc_dev; pci_chipset_tag_t sc_pc; bus_space_tag_t sc_memt; int sc_haw; int sc_flags; bus_dma_tag_t sc_dmat; struct ivhd_dte *sc_hwdte; paddr_t sc_hwdtep; TAILQ_HEAD(,iommu_softc)sc_drhds; TAILQ_HEAD(,rmrr_softc) sc_rmrrs; TAILQ_HEAD(,atsr_softc) sc_atsrs; }; int acpidmar_activate(struct device *, int); int acpidmar_match(struct device *, void *, void *); void acpidmar_attach(struct device *, struct device *, void *); struct domain *acpidmar_pci_attach(struct acpidmar_softc *, int, int, int); const struct cfattach acpidmar_ca = { sizeof(struct acpidmar_softc), acpidmar_match, acpidmar_attach, NULL, acpidmar_activate }; struct cfdriver acpidmar_cd = { NULL, "acpidmar", DV_DULL }; struct acpidmar_softc *acpidmar_sc; int acpidmar_intr(void *); int acpiivhd_intr(void *); #define DID_UNITY 0x1 void _dumppte(struct pte_entry *, int, vaddr_t); struct domain *domain_create(struct iommu_softc *, int); struct domain *domain_lookup(struct acpidmar_softc *, int, int); void domain_unload_map(struct domain *, bus_dmamap_t); void domain_load_map(struct domain *, bus_dmamap_t, int, int, const char *); void (*domain_map_page)(struct domain *, vaddr_t, paddr_t, uint64_t); void domain_map_page_amd(struct domain *, vaddr_t, paddr_t, uint64_t); void domain_map_page_intel(struct domain *, vaddr_t, paddr_t, uint64_t); void domain_map_pthru(struct domain *, paddr_t, paddr_t); void acpidmar_pci_hook(pci_chipset_tag_t, struct pci_attach_args *); void acpidmar_parse_devscope(union acpidmar_entry *, int, int, struct devlist_head *); int acpidmar_match_devscope(struct devlist_head *, pci_chipset_tag_t, int); void acpidmar_init(struct acpidmar_softc *, struct acpi_dmar *); void acpidmar_drhd(struct acpidmar_softc *, union acpidmar_entry *); void acpidmar_rmrr(struct acpidmar_softc *, union acpidmar_entry *); void acpidmar_atsr(struct acpidmar_softc *, union acpidmar_entry *); void acpiivrs_init(struct acpidmar_softc *, struct acpi_ivrs *); void *acpidmar_intr_establish(void *, int, int (*)(void *), void *, const char *); void iommu_write_4(struct iommu_softc *, int, uint32_t); uint32_t iommu_read_4(struct iommu_softc *, int); void iommu_write_8(struct iommu_softc *, int, uint64_t); uint64_t iommu_read_8(struct iommu_softc *, int); void iommu_showfault(struct iommu_softc *, int, struct fault_entry *); void iommu_showcfg(struct iommu_softc *, int); int iommu_init(struct acpidmar_softc *, struct iommu_softc *, struct acpidmar_drhd *); int iommu_enable_translation(struct iommu_softc *, int); void iommu_enable_qi(struct iommu_softc *, int); void iommu_flush_cache(struct iommu_softc *, void *, size_t); void *iommu_alloc_page(struct iommu_softc *, paddr_t *); void iommu_flush_write_buffer(struct iommu_softc *); void iommu_issue_qi(struct iommu_softc *, struct qi_entry *); void iommu_flush_ctx(struct iommu_softc *, int, int, int, int); void iommu_flush_ctx_qi(struct iommu_softc *, int, int, int, int); void iommu_flush_tlb(struct iommu_softc *, int, int); void iommu_flush_tlb_qi(struct iommu_softc *, int, int); void iommu_set_rtaddr(struct iommu_softc *, paddr_t); void *iommu_alloc_hwdte(struct acpidmar_softc *, size_t, paddr_t *); const char *dmar_bdf(int); const char * dmar_bdf(int sid) { static char bdf[32]; snprintf(bdf, sizeof(bdf), "%.4x:%.2x:%.2x.%x", 0, sid_bus(sid), sid_dev(sid), sid_fun(sid)); return (bdf); } /* busdma */ static int dmar_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t, bus_size_t, int, bus_dmamap_t *); static void dmar_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t); static int dmar_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t, struct proc *, int); static int dmar_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t, struct mbuf *, int); static int dmar_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t, struct uio *, int); static int dmar_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t, bus_dma_segment_t *, int, bus_size_t, int); static void dmar_dmamap_unload(bus_dma_tag_t, bus_dmamap_t); static void dmar_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t, bus_size_t, int); static int dmar_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t, bus_size_t, bus_dma_segment_t *, int, int *, int); static void dmar_dmamem_free(bus_dma_tag_t, bus_dma_segment_t *, int); static int dmar_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *, int, size_t, caddr_t *, int); static void dmar_dmamem_unmap(bus_dma_tag_t, caddr_t, size_t); static paddr_t dmar_dmamem_mmap(bus_dma_tag_t, bus_dma_segment_t *, int, off_t, int, int); static void dmar_dumpseg(bus_dma_tag_t, int, bus_dma_segment_t *, const char *); const char *dom_bdf(struct domain *); void domain_map_check(struct domain *); struct pte_entry *pte_lvl(struct iommu_softc *, struct pte_entry *, vaddr_t, int, uint64_t); int ivhd_poll_events(struct iommu_softc *); void ivhd_showreg(struct iommu_softc *); void ivhd_showdte(struct iommu_softc *); void ivhd_showcmd(struct iommu_softc *); static inline int debugme(struct domain *dom) { return 0; return (dom->flag & DOM_DEBUG); } void domain_map_check(struct domain *dom) { struct iommu_softc *iommu; struct domain_dev *dd; struct context_entry *ctx; int v; iommu = dom->iommu; TAILQ_FOREACH(dd, &dom->devices, link) { acpidmar_pci_attach(acpidmar_sc, iommu->segment, dd->sid, 1); if (iommu->dte) continue; /* Check if this is the first time we are mapped */ ctx = &iommu->ctx[sid_bus(dd->sid)][sid_devfn(dd->sid)]; v = context_user(ctx); if (v != 0xA) { printf(" map: %.4x:%.2x:%.2x.%x iommu:%d did:%.4x\n", iommu->segment, sid_bus(dd->sid), sid_dev(dd->sid), sid_fun(dd->sid), iommu->id, dom->did); context_set_user(ctx, 0xA); } } } /* Map a single page as passthrough - used for DRM */ void dmar_ptmap(bus_dma_tag_t tag, bus_addr_t addr) { struct domain *dom = tag->_cookie; if (!acpidmar_sc) return; domain_map_check(dom); domain_map_page(dom, addr, addr, PTE_P | PTE_R | PTE_W); } /* Map a range of pages 1:1 */ void domain_map_pthru(struct domain *dom, paddr_t start, paddr_t end) { domain_map_check(dom); while (start < end) { domain_map_page(dom, start, start, PTE_P | PTE_R | PTE_W); start += VTD_PAGE_SIZE; } } /* Map a single paddr to IOMMU paddr */ void domain_map_page_intel(struct domain *dom, vaddr_t va, paddr_t pa, uint64_t flags) { paddr_t paddr; struct pte_entry *pte, *npte; int lvl, idx; struct iommu_softc *iommu; iommu = dom->iommu; /* Insert physical address into virtual address map * XXX: could we use private pmap here? * essentially doing a pmap_enter(map, va, pa, prot); */ /* Only handle 4k pages for now */ npte = dom->pte; for (lvl = iommu->agaw - VTD_STRIDE_SIZE; lvl>= VTD_LEVEL0; lvl -= VTD_STRIDE_SIZE) { idx = (va >> lvl) & VTD_STRIDE_MASK; pte = &npte[idx]; if (lvl == VTD_LEVEL0) { /* Level 1: Page Table - add physical address */ pte->val = pa | flags; iommu_flush_cache(iommu, pte, sizeof(*pte)); break; } else if (!(pte->val & PTE_P)) { /* Level N: Point to lower level table */ iommu_alloc_page(iommu, &paddr); pte->val = paddr | PTE_P | PTE_R | PTE_W; iommu_flush_cache(iommu, pte, sizeof(*pte)); } npte = (void *)PMAP_DIRECT_MAP((pte->val & VTD_PTE_MASK)); } } /* Map a single paddr to IOMMU paddr: AMD * physical address breakdown into levels: * xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx * 5.55555555.44444444.43333333,33222222.22211111.1111----.-------- * mode: * 000 = none shift * 001 = 1 [21].12 * 010 = 2 [30].21 * 011 = 3 [39].30 * 100 = 4 [48].39 * 101 = 5 [57] * 110 = 6 * 111 = reserved */ struct pte_entry * pte_lvl(struct iommu_softc *iommu, struct pte_entry *pte, vaddr_t va, int shift, uint64_t flags) { paddr_t paddr; int idx; idx = (va >> shift) & VTD_STRIDE_MASK; if (!(pte[idx].val & PTE_P)) { /* Page Table entry is not present... create a new page entry */ iommu_alloc_page(iommu, &paddr); pte[idx].val = paddr | flags; iommu_flush_cache(iommu, &pte[idx], sizeof(pte[idx])); } return (void *)PMAP_DIRECT_MAP((pte[idx].val & PTE_PADDR_MASK)); } void domain_map_page_amd(struct domain *dom, vaddr_t va, paddr_t pa, uint64_t flags) { struct pte_entry *pte; struct iommu_softc *iommu; int idx; iommu = dom->iommu; /* Insert physical address into virtual address map * XXX: could we use private pmap here? * essentially doing a pmap_enter(map, va, pa, prot); */ /* Always assume AMD levels=4 */ /* 39 30 21 12 */ /* ---------|---------|---------|---------|------------ */ pte = dom->pte; pte = pte_lvl(iommu, pte, va, 30, PTE_NXTLVL(2) | PTE_IR | PTE_IW | PTE_P); pte = pte_lvl(iommu, pte, va, 21, PTE_NXTLVL(1) | PTE_IR | PTE_IW | PTE_P); if (flags) flags = PTE_P | PTE_R | PTE_W | PTE_IW | PTE_IR | PTE_NXTLVL(0); /* Level 1: Page Table - add physical address */ idx = (va >> 12) & 0x1FF; pte[idx].val = pa | flags; iommu_flush_cache(iommu, pte, sizeof(*pte)); } static void dmar_dumpseg(bus_dma_tag_t tag, int nseg, bus_dma_segment_t *segs, const char *lbl) { struct domain *dom = tag->_cookie; int i; return; if (!debugme(dom)) return; printf("%s: %s\n", lbl, dom_bdf(dom)); for (i = 0; i < nseg; i++) { printf(" %.16llx %.8x\n", (uint64_t)segs[i].ds_addr, (uint32_t)segs[i].ds_len); } } /* Unload mapping */ void domain_unload_map(struct domain *dom, bus_dmamap_t dmam) { bus_dma_segment_t *seg; paddr_t base, end, idx; psize_t alen; int i; if (iommu_bad(dom->iommu)) { printf("unload map no iommu\n"); return; } for (i = 0; i < dmam->dm_nsegs; i++) { seg = &dmam->dm_segs[i]; base = trunc_page(seg->ds_addr); end = roundup(seg->ds_addr + seg->ds_len, VTD_PAGE_SIZE); alen = end - base; if (debugme(dom)) { printf(" va:%.16llx len:%x\n", (uint64_t)base, (uint32_t)alen); } /* Clear PTE */ for (idx = 0; idx < alen; idx += VTD_PAGE_SIZE) domain_map_page(dom, base + idx, 0, 0); if (dom->flag & DOM_NOMAP) { printf("%s: nomap %.16llx\n", dom_bdf(dom), (uint64_t)base); continue; } mtx_enter(&dom->exlck); if (extent_free(dom->iovamap, base, alen, EX_NOWAIT)) { panic("domain_unload_map: extent_free"); } mtx_leave(&dom->exlck); } } /* map.segs[x].ds_addr is modified to IOMMU virtual PA */ void domain_load_map(struct domain *dom, bus_dmamap_t map, int flags, int pteflag, const char *fn) { bus_dma_segment_t *seg; struct iommu_softc *iommu; paddr_t base, end, idx; psize_t alen; u_long res; int i; iommu = dom->iommu; if (!iommu_enabled(iommu)) { /* Lazy enable translation when required */ if (iommu_enable_translation(iommu, 1)) { return; } } domain_map_check(dom); for (i = 0; i < map->dm_nsegs; i++) { seg = &map->dm_segs[i]; base = trunc_page(seg->ds_addr); end = roundup(seg->ds_addr + seg->ds_len, VTD_PAGE_SIZE); alen = end - base; res = base; if (dom->flag & DOM_NOMAP) { goto nomap; } /* Allocate DMA Virtual Address */ mtx_enter(&dom->exlck); if (extent_alloc(dom->iovamap, alen, VTD_PAGE_SIZE, 0, map->_dm_boundary, EX_NOWAIT, &res)) { panic("domain_load_map: extent_alloc"); } if (res == -1) { panic("got -1 address"); } mtx_leave(&dom->exlck); /* Reassign DMA address */ seg->ds_addr = res | (seg->ds_addr & VTD_PAGE_MASK); nomap: if (debugme(dom)) { printf(" LOADMAP: %.16llx %x => %.16llx\n", (uint64_t)seg->ds_addr, (uint32_t)seg->ds_len, (uint64_t)res); } for (idx = 0; idx < alen; idx += VTD_PAGE_SIZE) { domain_map_page(dom, res + idx, base + idx, PTE_P | pteflag); } } if ((iommu->cap & CAP_CM) || acpidmar_force_cm) { iommu_flush_tlb(iommu, IOTLB_DOMAIN, dom->did); } else { iommu_flush_write_buffer(iommu); } } const char * dom_bdf(struct domain *dom) { struct domain_dev *dd; static char mmm[48]; dd = TAILQ_FIRST(&dom->devices); snprintf(mmm, sizeof(mmm), "%s iommu:%d did:%.4x%s", dmar_bdf(dd->sid), dom->iommu->id, dom->did, dom->did == DID_UNITY ? " [unity]" : ""); return (mmm); } /* Bus DMA Map functions */ static int dmar_dmamap_create(bus_dma_tag_t tag, bus_size_t size, int nsegments, bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp) { int rc; rc = _bus_dmamap_create(tag, size, nsegments, maxsegsz, boundary, flags, dmamp); if (!rc) { dmar_dumpseg(tag, (*dmamp)->dm_nsegs, (*dmamp)->dm_segs, __FUNCTION__); } return (rc); } static void dmar_dmamap_destroy(bus_dma_tag_t tag, bus_dmamap_t dmam) { dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, __FUNCTION__); _bus_dmamap_destroy(tag, dmam); } static int dmar_dmamap_load(bus_dma_tag_t tag, bus_dmamap_t dmam, void *buf, bus_size_t buflen, struct proc *p, int flags) { struct domain *dom = tag->_cookie; int rc; rc = _bus_dmamap_load(tag, dmam, buf, buflen, p, flags); if (!rc) { dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, __FUNCTION__); domain_load_map(dom, dmam, flags, PTE_R|PTE_W, __FUNCTION__); dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, __FUNCTION__); } return (rc); } static int dmar_dmamap_load_mbuf(bus_dma_tag_t tag, bus_dmamap_t dmam, struct mbuf *chain, int flags) { struct domain *dom = tag->_cookie; int rc; rc = _bus_dmamap_load_mbuf(tag, dmam, chain, flags); if (!rc) { dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, __FUNCTION__); domain_load_map(dom, dmam, flags, PTE_R|PTE_W,__FUNCTION__); dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, __FUNCTION__); } return (rc); } static int dmar_dmamap_load_uio(bus_dma_tag_t tag, bus_dmamap_t dmam, struct uio *uio, int flags) { struct domain *dom = tag->_cookie; int rc; rc = _bus_dmamap_load_uio(tag, dmam, uio, flags); if (!rc) { dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, __FUNCTION__); domain_load_map(dom, dmam, flags, PTE_R|PTE_W, __FUNCTION__); dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, __FUNCTION__); } return (rc); } static int dmar_dmamap_load_raw(bus_dma_tag_t tag, bus_dmamap_t dmam, bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags) { struct domain *dom = tag->_cookie; int rc; rc = _bus_dmamap_load_raw(tag, dmam, segs, nsegs, size, flags); if (!rc) { dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, __FUNCTION__); domain_load_map(dom, dmam, flags, PTE_R|PTE_W, __FUNCTION__); dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, __FUNCTION__); } return (rc); } static void dmar_dmamap_unload(bus_dma_tag_t tag, bus_dmamap_t dmam) { struct domain *dom = tag->_cookie; dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, __FUNCTION__); domain_unload_map(dom, dmam); _bus_dmamap_unload(tag, dmam); } static void dmar_dmamap_sync(bus_dma_tag_t tag, bus_dmamap_t dmam, bus_addr_t offset, bus_size_t len, int ops) { #if 0 struct domain *dom = tag->_cookie; int flag; flag = PTE_P; if (ops == BUS_DMASYNC_PREREAD) { /* make readable */ flag |= PTE_R; } else if (ops == BUS_DMASYNC_PREWRITE) { /* make writeable */ flag |= PTE_W; } dmar_dumpseg(tag, dmam->dm_nsegs, dmam->dm_segs, __FUNCTION__); #endif _bus_dmamap_sync(tag, dmam, offset, len, ops); } static int dmar_dmamem_alloc(bus_dma_tag_t tag, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags) { int rc; rc = _bus_dmamem_alloc(tag, size, alignment, boundary, segs, nsegs, rsegs, flags); if (!rc) { dmar_dumpseg(tag, *rsegs, segs, __FUNCTION__); } return (rc); } static void dmar_dmamem_free(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs) { dmar_dumpseg(tag, nsegs, segs, __FUNCTION__); _bus_dmamem_free(tag, segs, nsegs); } static int dmar_dmamem_map(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs, size_t size, caddr_t *kvap, int flags) { dmar_dumpseg(tag, nsegs, segs, __FUNCTION__); return (_bus_dmamem_map(tag, segs, nsegs, size, kvap, flags)); } static void dmar_dmamem_unmap(bus_dma_tag_t tag, caddr_t kva, size_t size) { struct domain *dom = tag->_cookie; if (debugme(dom)) { printf("dmamap_unmap: %s\n", dom_bdf(dom)); } _bus_dmamem_unmap(tag, kva, size); } static paddr_t dmar_dmamem_mmap(bus_dma_tag_t tag, bus_dma_segment_t *segs, int nsegs, off_t off, int prot, int flags) { dmar_dumpseg(tag, nsegs, segs, __FUNCTION__); return (_bus_dmamem_mmap(tag, segs, nsegs, off, prot, flags)); } /*=================================== * IOMMU code *===================================*/ /* Intel: Set Context Root Address */ void iommu_set_rtaddr(struct iommu_softc *iommu, paddr_t paddr) { int i, sts; mtx_enter(&iommu->reg_lock); iommu_write_8(iommu, DMAR_RTADDR_REG, paddr); iommu_write_4(iommu, DMAR_GCMD_REG, iommu->gcmd | GCMD_SRTP); for (i = 0; i < 5; i++) { sts = iommu_read_4(iommu, DMAR_GSTS_REG); if (sts & GSTS_RTPS) break; } mtx_leave(&iommu->reg_lock); if (i == 5) { printf("set_rtaddr fails\n"); } } /* Allocate contiguous memory (1Mb) for the Device Table Entries */ void * iommu_alloc_hwdte(struct acpidmar_softc *sc, size_t size, paddr_t *paddr) { caddr_t vaddr; bus_dmamap_t map; bus_dma_segment_t seg; bus_dma_tag_t dmat = sc->sc_dmat; int rc, nsegs; rc = _bus_dmamap_create(dmat, size, 1, size, 0, BUS_DMA_NOWAIT, &map); if (rc != 0) { printf("hwdte_create fails\n"); return NULL; } rc = _bus_dmamem_alloc(dmat, size, 4, 0, &seg, 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); if (rc != 0) { printf("hwdte alloc fails\n"); return NULL; } rc = _bus_dmamem_map(dmat, &seg, 1, size, &vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT); if (rc != 0) { printf("hwdte map fails\n"); return NULL; } rc = _bus_dmamap_load_raw(dmat, map, &seg, 1, size, BUS_DMA_NOWAIT); if (rc != 0) { printf("hwdte load raw fails\n"); return NULL; } *paddr = map->dm_segs[0].ds_addr; return vaddr; } /* COMMON: Allocate a new memory page */ void * iommu_alloc_page(struct iommu_softc *iommu, paddr_t *paddr) { void *va; *paddr = 0; va = km_alloc(VTD_PAGE_SIZE, &kv_page, &kp_zero, &kd_nowait); if (va == NULL) { panic("can't allocate page"); } pmap_extract(pmap_kernel(), (vaddr_t)va, paddr); return (va); } /* Intel: Issue command via queued invalidation */ void iommu_issue_qi(struct iommu_softc *iommu, struct qi_entry *qi) { #if 0 struct qi_entry *pi, *pw; idx = iommu->qi_head; pi = &iommu->qi[idx]; pw = &iommu->qi[(idx+1) % MAXQ]; iommu->qi_head = (idx+2) % MAXQ; memcpy(pw, &qi, sizeof(qi)); issue command; while (pw->xxx) ; #endif } /* Intel: Flush TLB entries, Queued Invalidation mode */ void iommu_flush_tlb_qi(struct iommu_softc *iommu, int mode, int did) { struct qi_entry qi; /* Use queued invalidation */ qi.hi = 0; switch (mode) { case IOTLB_GLOBAL: qi.lo = QI_IOTLB | QI_IOTLB_IG_GLOBAL; break; case IOTLB_DOMAIN: qi.lo = QI_IOTLB | QI_IOTLB_IG_DOMAIN | QI_IOTLB_DID(did); break; case IOTLB_PAGE: qi.lo = QI_IOTLB | QI_IOTLB_IG_PAGE | QI_IOTLB_DID(did); qi.hi = 0; break; } if (iommu->cap & CAP_DRD) qi.lo |= QI_IOTLB_DR; if (iommu->cap & CAP_DWD) qi.lo |= QI_IOTLB_DW; iommu_issue_qi(iommu, &qi); } /* Intel: Flush Context entries, Queued Invalidation mode */ void iommu_flush_ctx_qi(struct iommu_softc *iommu, int mode, int did, int sid, int fm) { struct qi_entry qi; /* Use queued invalidation */ qi.hi = 0; switch (mode) { case CTX_GLOBAL: qi.lo = QI_CTX | QI_CTX_IG_GLOBAL; break; case CTX_DOMAIN: qi.lo = QI_CTX | QI_CTX_IG_DOMAIN | QI_CTX_DID(did); break; case CTX_DEVICE: qi.lo = QI_CTX | QI_CTX_IG_DEVICE | QI_CTX_DID(did) | QI_CTX_SID(sid) | QI_CTX_FM(fm); break; } iommu_issue_qi(iommu, &qi); } /* Intel: Flush write buffers */ void iommu_flush_write_buffer(struct iommu_softc *iommu) { int i, sts; if (iommu->dte) return; if (!(iommu->cap & CAP_RWBF)) return; DPRINTF(1,"writebuf\n"); iommu_write_4(iommu, DMAR_GCMD_REG, iommu->gcmd | GCMD_WBF); for (i = 0; i < 5; i++) { sts = iommu_read_4(iommu, DMAR_GSTS_REG); if (sts & GSTS_WBFS) break; delay(10000); } if (i == 5) { printf("write buffer flush fails\n"); } } void iommu_flush_cache(struct iommu_softc *iommu, void *addr, size_t size) { if (iommu->dte) { pmap_flush_cache((vaddr_t)addr, size); return; } if (!(iommu->ecap & ECAP_C)) pmap_flush_cache((vaddr_t)addr, size); } /* * Intel: Flush IOMMU TLB Entries * Flushing can occur globally, per domain or per page */ void iommu_flush_tlb(struct iommu_softc *iommu, int mode, int did) { int n; uint64_t val; /* Call AMD */ if (iommu->dte) { ivhd_invalidate_domain(iommu, did); return; } val = IOTLB_IVT; switch (mode) { case IOTLB_GLOBAL: val |= IIG_GLOBAL; break; case IOTLB_DOMAIN: val |= IIG_DOMAIN | IOTLB_DID(did); break; case IOTLB_PAGE: val |= IIG_PAGE | IOTLB_DID(did); break; } /* Check for Read/Write Drain */ if (iommu->cap & CAP_DRD) val |= IOTLB_DR; if (iommu->cap & CAP_DWD) val |= IOTLB_DW; mtx_enter(&iommu->reg_lock); iommu_write_8(iommu, DMAR_IOTLB_REG(iommu), val); n = 0; do { val = iommu_read_8(iommu, DMAR_IOTLB_REG(iommu)); } while (n++ < 5 && val & IOTLB_IVT); mtx_leave(&iommu->reg_lock); } /* Intel: Flush IOMMU settings * Flushes can occur globally, per domain, or per device */ void iommu_flush_ctx(struct iommu_softc *iommu, int mode, int did, int sid, int fm) { uint64_t val; int n; if (iommu->dte) return; val = CCMD_ICC; switch (mode) { case CTX_GLOBAL: val |= CIG_GLOBAL; break; case CTX_DOMAIN: val |= CIG_DOMAIN | CCMD_DID(did); break; case CTX_DEVICE: val |= CIG_DEVICE | CCMD_DID(did) | CCMD_SID(sid) | CCMD_FM(fm); break; } mtx_enter(&iommu->reg_lock); n = 0; iommu_write_8(iommu, DMAR_CCMD_REG, val); do { val = iommu_read_8(iommu, DMAR_CCMD_REG); } while (n++ < 5 && val & CCMD_ICC); mtx_leave(&iommu->reg_lock); } /* Intel: Enable Queued Invalidation */ void iommu_enable_qi(struct iommu_softc *iommu, int enable) { int n = 0; int sts; if (!(iommu->ecap & ECAP_QI)) return; if (enable) { iommu->gcmd |= GCMD_QIE; mtx_enter(&iommu->reg_lock); iommu_write_4(iommu, DMAR_GCMD_REG, iommu->gcmd); do { sts = iommu_read_4(iommu, DMAR_GSTS_REG); } while (n++ < 5 && !(sts & GSTS_QIES)); mtx_leave(&iommu->reg_lock); DPRINTF(1,"set.qie: %d\n", n); } else { iommu->gcmd &= ~GCMD_QIE; mtx_enter(&iommu->reg_lock); iommu_write_4(iommu, DMAR_GCMD_REG, iommu->gcmd); do { sts = iommu_read_4(iommu, DMAR_GSTS_REG); } while (n++ < 5 && sts & GSTS_QIES); mtx_leave(&iommu->reg_lock); DPRINTF(1,"clr.qie: %d\n", n); } } /* Intel: Enable IOMMU translation */ int iommu_enable_translation(struct iommu_softc *iommu, int enable) { uint32_t sts; uint64_t reg; int n = 0; if (iommu->dte) return (0); reg = 0; if (enable) { DPRINTF(0,"enable iommu %d\n", iommu->id); iommu_showcfg(iommu, -1); iommu->gcmd |= GCMD_TE; /* Enable translation */ printf(" pre tes: "); mtx_enter(&iommu->reg_lock); iommu_write_4(iommu, DMAR_GCMD_REG, iommu->gcmd); printf("xxx"); do { printf("yyy"); sts = iommu_read_4(iommu, DMAR_GSTS_REG); delay(n * 10000); } while (n++ < 5 && !(sts & GSTS_TES)); mtx_leave(&iommu->reg_lock); printf(" set.tes: %d\n", n); if (n >= 5) { printf("error.. unable to initialize iommu %d\n", iommu->id); iommu->flags |= IOMMU_FLAGS_BAD; /* Disable IOMMU */ iommu->gcmd &= ~GCMD_TE; mtx_enter(&iommu->reg_lock); iommu_write_4(iommu, DMAR_GCMD_REG, iommu->gcmd); mtx_leave(&iommu->reg_lock); return (1); } iommu_flush_ctx(iommu, CTX_GLOBAL, 0, 0, 0); iommu_flush_tlb(iommu, IOTLB_GLOBAL, 0); } else { iommu->gcmd &= ~GCMD_TE; mtx_enter(&iommu->reg_lock); iommu_write_4(iommu, DMAR_GCMD_REG, iommu->gcmd); do { sts = iommu_read_4(iommu, DMAR_GSTS_REG); } while (n++ < 5 && sts & GSTS_TES); mtx_leave(&iommu->reg_lock); printf(" clr.tes: %d\n", n); } return (0); } /* Intel: Initialize IOMMU */ int iommu_init(struct acpidmar_softc *sc, struct iommu_softc *iommu, struct acpidmar_drhd *dh) { static int niommu; int len = VTD_PAGE_SIZE; int i, gaw; uint32_t sts; paddr_t paddr; if (_bus_space_map(sc->sc_memt, dh->address, len, 0, &iommu->ioh) != 0) { return (-1); } TAILQ_INIT(&iommu->domains); iommu->id = ++niommu; iommu->flags = dh->flags; iommu->segment = dh->segment; iommu->iot = sc->sc_memt; iommu->cap = iommu_read_8(iommu, DMAR_CAP_REG); iommu->ecap = iommu_read_8(iommu, DMAR_ECAP_REG); iommu->ndoms = cap_nd(iommu->cap); /* Print Capabilities & Extended Capabilities */ DPRINTF(0, " caps: %s%s%s%s%s%s%s%s%s%s%s\n", iommu->cap & CAP_AFL ? "afl " : "", /* adv fault */ iommu->cap & CAP_RWBF ? "rwbf " : "", /* write-buffer flush */ iommu->cap & CAP_PLMR ? "plmr " : "", /* protected lo region */ iommu->cap & CAP_PHMR ? "phmr " : "", /* protected hi region */ iommu->cap & CAP_CM ? "cm " : "", /* caching mode */ iommu->cap & CAP_ZLR ? "zlr " : "", /* zero-length read */ iommu->cap & CAP_PSI ? "psi " : "", /* page invalidate */ iommu->cap & CAP_DWD ? "dwd " : "", /* write drain */ iommu->cap & CAP_DRD ? "drd " : "", /* read drain */ iommu->cap & CAP_FL1GP ? "Gb " : "", /* 1Gb pages */ iommu->cap & CAP_PI ? "pi " : ""); /* posted interrupts */ DPRINTF(0, " ecap: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", iommu->ecap & ECAP_C ? "c " : "", /* coherent */ iommu->ecap & ECAP_QI ? "qi " : "", /* queued invalidate */ iommu->ecap & ECAP_DT ? "dt " : "", /* device iotlb */ iommu->ecap & ECAP_IR ? "ir " : "", /* intr remap */ iommu->ecap & ECAP_EIM ? "eim " : "", /* x2apic */ iommu->ecap & ECAP_PT ? "pt " : "", /* passthrough */ iommu->ecap & ECAP_SC ? "sc " : "", /* snoop control */ iommu->ecap & ECAP_ECS ? "ecs " : "", /* extended context */ iommu->ecap & ECAP_MTS ? "mts " : "", /* memory type */ iommu->ecap & ECAP_NEST ? "nest " : "", /* nested translations */ iommu->ecap & ECAP_DIS ? "dis " : "", /* deferred invalidation */ iommu->ecap & ECAP_PASID ? "pas " : "", /* pasid */ iommu->ecap & ECAP_PRS ? "prs " : "", /* page request */ iommu->ecap & ECAP_ERS ? "ers " : "", /* execute request */ iommu->ecap & ECAP_SRS ? "srs " : "", /* supervisor request */ iommu->ecap & ECAP_NWFS ? "nwfs " : "", /* no write flag */ iommu->ecap & ECAP_EAFS ? "eafs " : ""); /* extended accessed flag */ mtx_init(&iommu->reg_lock, IPL_HIGH); /* Clear Interrupt Masking */ iommu_write_4(iommu, DMAR_FSTS_REG, FSTS_PFO | FSTS_PPF); iommu->intr = acpidmar_intr_establish(iommu, IPL_HIGH, acpidmar_intr, iommu, "dmarintr"); /* Enable interrupts */ sts = iommu_read_4(iommu, DMAR_FECTL_REG); iommu_write_4(iommu, DMAR_FECTL_REG, sts & ~FECTL_IM); /* Allocate root pointer */ iommu->root = iommu_alloc_page(iommu, &paddr); DPRINTF(0, "Allocated root pointer: pa:%.16llx va:%p\n", (uint64_t)paddr, iommu->root); iommu->rtaddr = paddr; iommu_flush_write_buffer(iommu); iommu_set_rtaddr(iommu, paddr); #if 0 if (iommu->ecap & ECAP_QI) { /* Queued Invalidation support */ iommu->qi = iommu_alloc_page(iommu, &iommu->qip); iommu_write_8(iommu, DMAR_IQT_REG, 0); iommu_write_8(iommu, DMAR_IQA_REG, iommu->qip | IQA_QS_256); } if (iommu->ecap & ECAP_IR) { /* Interrupt remapping support */ iommu_write_8(iommu, DMAR_IRTA_REG, 0); } #endif /* Calculate guest address width and supported guest widths */ gaw = -1; iommu->mgaw = cap_mgaw(iommu->cap); DPRINTF(0, "gaw: %d { ", iommu->mgaw); for (i = 0; i < 5; i++) { if (cap_sagaw(iommu->cap) & (1L << i)) { gaw = VTD_LEVELTOAW(i); DPRINTF(0, "%d ", gaw); iommu->agaw = gaw; } } DPRINTF(0, "}\n"); /* Cache current status register bits */ sts = iommu_read_4(iommu, DMAR_GSTS_REG); if (sts & GSTS_TES) iommu->gcmd |= GCMD_TE; if (sts & GSTS_QIES) iommu->gcmd |= GCMD_QIE; if (sts & GSTS_IRES) iommu->gcmd |= GCMD_IRE; DPRINTF(0, "gcmd: %x preset\n", iommu->gcmd); acpidmar_intr(iommu); return (0); } /* Read/Write IOMMU register */ uint32_t iommu_read_4(struct iommu_softc *iommu, int reg) { uint32_t v; v = bus_space_read_4(iommu->iot, iommu->ioh, reg); return (v); } void iommu_write_4(struct iommu_softc *iommu, int reg, uint32_t v) { bus_space_write_4(iommu->iot, iommu->ioh, reg, (uint32_t)v); } uint64_t iommu_read_8(struct iommu_softc *iommu, int reg) { uint64_t v; v = bus_space_read_8(iommu->iot, iommu->ioh, reg); return (v); } void iommu_write_8(struct iommu_softc *iommu, int reg, uint64_t v) { bus_space_write_8(iommu->iot, iommu->ioh, reg, v); } /* Check if a device is within a device scope */ int acpidmar_match_devscope(struct devlist_head *devlist, pci_chipset_tag_t pc, int sid) { struct dmar_devlist *ds; int sub, sec, i; int bus, dev, fun, sbus; pcireg_t reg; pcitag_t tag; sbus = sid_bus(sid); TAILQ_FOREACH(ds, devlist, link) { bus = ds->bus; dev = ds->dp[0].device; fun = ds->dp[0].function; /* Walk PCI bridges in path */ for (i = 1; i < ds->ndp; i++) { tag = pci_make_tag(pc, bus, dev, fun); reg = pci_conf_read(pc, tag, PPB_REG_BUSINFO); bus = PPB_BUSINFO_SECONDARY(reg); dev = ds->dp[i].device; fun = ds->dp[i].function; } /* Check for device exact match */ if (sid == mksid(bus, dev, fun)) { return DMAR_ENDPOINT; } /* Check for device subtree match */ if (ds->type == DMAR_BRIDGE) { tag = pci_make_tag(pc, bus, dev, fun); reg = pci_conf_read(pc, tag, PPB_REG_BUSINFO); sec = PPB_BUSINFO_SECONDARY(reg); sub = PPB_BUSINFO_SUBORDINATE(reg); if (sec <= sbus && sbus <= sub) { return DMAR_BRIDGE; } } } return (0); } struct domain * domain_create(struct iommu_softc *iommu, int did) { struct domain *dom; int gaw; DPRINTF(0, "iommu%d: create domain: %.4x\n", iommu->id, did); dom = malloc(sizeof(*dom), M_DEVBUF, M_ZERO | M_WAITOK); dom->did = did; dom->iommu = iommu; dom->pte = iommu_alloc_page(iommu, &dom->ptep); TAILQ_INIT(&dom->devices); /* Setup DMA */ dom->dmat._cookie = dom; dom->dmat._dmamap_create = dmar_dmamap_create; /* nop */ dom->dmat._dmamap_destroy = dmar_dmamap_destroy; /* nop */ dom->dmat._dmamap_load = dmar_dmamap_load; /* lm */ dom->dmat._dmamap_load_mbuf = dmar_dmamap_load_mbuf; /* lm */ dom->dmat._dmamap_load_uio = dmar_dmamap_load_uio; /* lm */ dom->dmat._dmamap_load_raw = dmar_dmamap_load_raw; /* lm */ dom->dmat._dmamap_unload = dmar_dmamap_unload; /* um */ dom->dmat._dmamap_sync = dmar_dmamap_sync; /* lm */ dom->dmat._dmamem_alloc = dmar_dmamem_alloc; /* nop */ dom->dmat._dmamem_free = dmar_dmamem_free; /* nop */ dom->dmat._dmamem_map = dmar_dmamem_map; /* nop */ dom->dmat._dmamem_unmap = dmar_dmamem_unmap; /* nop */ dom->dmat._dmamem_mmap = dmar_dmamem_mmap; snprintf(dom->exname, sizeof(dom->exname), "did:%x.%.4x", iommu->id, dom->did); /* Setup IOMMU address map */ gaw = min(iommu->agaw, iommu->mgaw); dom->iovamap = extent_create(dom->exname, 0, (1LL << gaw)-1, M_DEVBUF, NULL, 0, EX_WAITOK | EX_NOCOALESCE); /* Reserve the first 16M */ extent_alloc_region(dom->iovamap, 0, 16*1024*1024, EX_WAITOK); /* Zero out MSI Interrupt region */ extent_alloc_region(dom->iovamap, MSI_BASE_ADDRESS, MSI_BASE_SIZE, EX_WAITOK); mtx_init(&dom->exlck, IPL_HIGH); TAILQ_INSERT_TAIL(&iommu->domains, dom, link); return dom; } void domain_add_device(struct domain *dom, int sid) { struct domain_dev *ddev; DPRINTF(0, "add %s to iommu%d.%.4x\n", dmar_bdf(sid), dom->iommu->id, dom->did); ddev = malloc(sizeof(*ddev), M_DEVBUF, M_ZERO | M_WAITOK); ddev->sid = sid; TAILQ_INSERT_TAIL(&dom->devices, ddev, link); /* Should set context entry here?? */ } void domain_remove_device(struct domain *dom, int sid) { struct domain_dev *ddev, *tmp; TAILQ_FOREACH_SAFE(ddev, &dom->devices, link, tmp) { if (ddev->sid == sid) { TAILQ_REMOVE(&dom->devices, ddev, link); free(ddev, sizeof(*ddev), M_DEVBUF); } } } /* Lookup domain by segment & source id (bus.device.function) */ struct domain * domain_lookup(struct acpidmar_softc *sc, int segment, int sid) { struct iommu_softc *iommu; struct domain_dev *ddev; struct domain *dom; int rc; if (sc == NULL) { return NULL; } /* Lookup IOMMU for this device */ TAILQ_FOREACH(iommu, &sc->sc_drhds, link) { if (iommu->segment != segment) continue; /* Check for devscope match or catchall iommu */ rc = acpidmar_match_devscope(&iommu->devices, sc->sc_pc, sid); if (rc != 0 || iommu->flags) { break; } } if (!iommu) { printf("%s: no iommu found\n", dmar_bdf(sid)); return NULL; } /* Search domain devices */ TAILQ_FOREACH(dom, &iommu->domains, link) { TAILQ_FOREACH(ddev, &dom->devices, link) { /* XXX: match all functions? */ if (ddev->sid == sid) { return dom; } } } if (iommu->ndoms <= 2) { /* Running out of domains.. create catchall domain */ if (!iommu->unity) { iommu->unity = domain_create(iommu, 1); } dom = iommu->unity; } else { dom = domain_create(iommu, --iommu->ndoms); } if (!dom) { printf("no domain here\n"); return NULL; } /* Add device to domain */ domain_add_device(dom, sid); return dom; } /* Map Guest Pages into IOMMU */ void _iommu_map(void *dom, vaddr_t va, bus_addr_t gpa, bus_size_t len) { bus_size_t i; paddr_t hpa; if (dom == NULL) { return; } DPRINTF(1, "Mapping dma: %lx = %lx/%lx\n", va, gpa, len); for (i = 0; i < len; i += PAGE_SIZE) { hpa = 0; pmap_extract(curproc->p_vmspace->vm_map.pmap, va, &hpa); domain_map_page(dom, gpa, hpa, PTE_P | PTE_R | PTE_W); gpa += PAGE_SIZE; va += PAGE_SIZE; } } /* Find IOMMU for a given PCI device */ void *_iommu_domain(int segment, int bus, int dev, int func, int *id) { struct domain *dom; dom = domain_lookup(acpidmar_sc, segment, mksid(bus, dev, func)); if (dom) { *id = dom->did; } return dom; } void domain_map_device(struct domain *dom, int sid); void domain_map_device(struct domain *dom, int sid) { struct iommu_softc *iommu; struct context_entry *ctx; paddr_t paddr; int bus, devfn; int tt, lvl; iommu = dom->iommu; bus = sid_bus(sid); devfn = sid_devfn(sid); /* AMD attach device */ if (iommu->dte) { struct ivhd_dte *dte = &iommu->dte[sid]; if (!dte->dw0) { /* Setup Device Table Entry: bus.devfn */ DPRINTF(1, "@@@ PCI Attach: %.4x[%s] %.4x\n", sid, dmar_bdf(sid), dom->did); dte_set_host_page_table_root_ptr(dte, dom->ptep); dte_set_domain(dte, dom->did); dte_set_mode(dte, 3); /* Set 3 level PTE */ dte_set_tv(dte); dte_set_valid(dte); ivhd_flush_devtab(iommu, dom->did); #ifdef IOMMU_DEBUG //ivhd_showreg(iommu); ivhd_showdte(iommu); #endif } return; } /* Create Bus mapping */ if (!root_entry_is_valid(&iommu->root[bus])) { iommu->ctx[bus] = iommu_alloc_page(iommu, &paddr); iommu->root[bus].lo = paddr | ROOT_P; iommu_flush_cache(iommu, &iommu->root[bus], sizeof(struct root_entry)); DPRINTF(0, "iommu%d: Allocate context for bus: %.2x pa:%.16llx va:%p\n", iommu->id, bus, (uint64_t)paddr, iommu->ctx[bus]); } /* Create DevFn mapping */ ctx = iommu->ctx[bus] + devfn; if (!context_entry_is_valid(ctx)) { tt = CTX_T_MULTI; lvl = VTD_AWTOLEVEL(iommu->agaw); /* Initialize context */ context_set_slpte(ctx, dom->ptep); context_set_translation_type(ctx, tt); context_set_domain_id(ctx, dom->did); context_set_address_width(ctx, lvl); context_set_present(ctx); /* Flush it */ iommu_flush_cache(iommu, ctx, sizeof(struct context_entry)); if ((iommu->cap & CAP_CM) || acpidmar_force_cm) { iommu_flush_ctx(iommu, CTX_DEVICE, dom->did, sid, 0); iommu_flush_tlb(iommu, IOTLB_GLOBAL, 0); } else { iommu_flush_write_buffer(iommu); } DPRINTF(0, "iommu%d: %s set context ptep:%.16llx lvl:%d did:%.4x tt:%d\n", iommu->id, dmar_bdf(sid), (uint64_t)dom->ptep, lvl, dom->did, tt); } } struct domain * acpidmar_pci_attach(struct acpidmar_softc *sc, int segment, int sid, int mapctx) { static struct domain *dom; dom = domain_lookup(sc, segment, sid); if (!dom) { printf("no domain: %s\n", dmar_bdf(sid)); return NULL; } if (mapctx) { domain_map_device(dom, sid); } return dom; } void acpidmar_pci_hook(pci_chipset_tag_t pc, struct pci_attach_args *pa) { int bus, dev, fun, sid; struct domain *dom; pcireg_t reg; if (!acpidmar_sc) { /* No DMAR, ignore */ return; } /* Add device to our list if valid */ pci_decompose_tag(pc, pa->pa_tag, &bus, &dev, &fun); sid = mksid(bus, dev, fun); if (sid_flag[sid] & SID_INVALID) return; reg = pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG); /* Add device to domain */ dom = acpidmar_pci_attach(acpidmar_sc, pa->pa_domain, sid, 0); if (dom == NULL) return; if (PCI_CLASS(reg) == PCI_CLASS_DISPLAY && PCI_SUBCLASS(reg) == PCI_SUBCLASS_DISPLAY_VGA) { dom->flag = DOM_NOMAP; } if (PCI_CLASS(reg) == PCI_CLASS_BRIDGE && PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_ISA) { /* For ISA Bridges, map 0-16Mb as 1:1 */ printf("dmar: %.4x:%.2x:%.2x.%x mapping ISA\n", pa->pa_domain, bus, dev, fun); domain_map_pthru(dom, 0x00, 16*1024*1024); } /* Change DMA tag */ pa->pa_dmat = &dom->dmat; } /* Create list of device scope entries from ACPI table */ void acpidmar_parse_devscope(union acpidmar_entry *de, int off, int segment, struct devlist_head *devlist) { struct acpidmar_devscope *ds; struct dmar_devlist *d; int dplen, i; TAILQ_INIT(devlist); while (off < de->length) { ds = (struct acpidmar_devscope *)((unsigned char *)de + off); off += ds->length; /* We only care about bridges and endpoints */ if (ds->type != DMAR_ENDPOINT && ds->type != DMAR_BRIDGE) continue; dplen = ds->length - sizeof(*ds); d = malloc(sizeof(*d) + dplen, M_DEVBUF, M_ZERO | M_WAITOK); d->bus = ds->bus; d->type = ds->type; d->ndp = dplen / 2; d->dp = (void *)&d[1]; memcpy(d->dp, &ds[1], dplen); TAILQ_INSERT_TAIL(devlist, d, link); DPRINTF(1, " %8s %.4x:%.2x.%.2x.%x {", ds->type == DMAR_BRIDGE ? "bridge" : "endpoint", segment, ds->bus, d->dp[0].device, d->dp[0].function); for (i = 1; i < d->ndp; i++) { DPRINTF(1, " %2x.%x ", d->dp[i].device, d->dp[i].function); } DPRINTF(1, "}\n"); } } /* DMA Remapping Hardware Unit */ void acpidmar_drhd(struct acpidmar_softc *sc, union acpidmar_entry *de) { struct iommu_softc *iommu; printf("DRHD: segment:%.4x base:%.16llx flags:%.2x\n", de->drhd.segment, de->drhd.address, de->drhd.flags); iommu = malloc(sizeof(*iommu), M_DEVBUF, M_ZERO | M_WAITOK); acpidmar_parse_devscope(de, sizeof(de->drhd), de->drhd.segment, &iommu->devices); iommu_init(sc, iommu, &de->drhd); if (de->drhd.flags) { /* Catchall IOMMU goes at end of list */ TAILQ_INSERT_TAIL(&sc->sc_drhds, iommu, link); } else { TAILQ_INSERT_HEAD(&sc->sc_drhds, iommu, link); } } /* Reserved Memory Region Reporting */ void acpidmar_rmrr(struct acpidmar_softc *sc, union acpidmar_entry *de) { struct rmrr_softc *rmrr; bios_memmap_t *im, *jm; uint64_t start, end; printf("RMRR: segment:%.4x range:%.16llx-%.16llx\n", de->rmrr.segment, de->rmrr.base, de->rmrr.limit); if (de->rmrr.limit <= de->rmrr.base) { printf(" buggy BIOS\n"); return; } rmrr = malloc(sizeof(*rmrr), M_DEVBUF, M_ZERO | M_WAITOK); rmrr->start = trunc_page(de->rmrr.base); rmrr->end = round_page(de->rmrr.limit); rmrr->segment = de->rmrr.segment; acpidmar_parse_devscope(de, sizeof(de->rmrr), de->rmrr.segment, &rmrr->devices); for (im = bios_memmap; im->type != BIOS_MAP_END; im++) { if (im->type != BIOS_MAP_RES) continue; /* Search for adjacent reserved regions */ start = im->addr; end = im->addr+im->size; for (jm = im+1; jm->type == BIOS_MAP_RES && end == jm->addr; jm++) { end = jm->addr+jm->size; } printf("e820: %.16llx - %.16llx\n", start, end); if (start <= rmrr->start && rmrr->end <= end) { /* Bah.. some buggy BIOS stomp outside RMRR */ printf(" ** inside E820 Reserved %.16llx %.16llx\n", start, end); rmrr->start = trunc_page(start); rmrr->end = round_page(end); break; } } TAILQ_INSERT_TAIL(&sc->sc_rmrrs, rmrr, link); } /* Root Port ATS Reporting */ void acpidmar_atsr(struct acpidmar_softc *sc, union acpidmar_entry *de) { struct atsr_softc *atsr; printf("ATSR: segment:%.4x flags:%x\n", de->atsr.segment, de->atsr.flags); atsr = malloc(sizeof(*atsr), M_DEVBUF, M_ZERO | M_WAITOK); atsr->flags = de->atsr.flags; atsr->segment = de->atsr.segment; acpidmar_parse_devscope(de, sizeof(de->atsr), de->atsr.segment, &atsr->devices); TAILQ_INSERT_TAIL(&sc->sc_atsrs, atsr, link); } void acpidmar_init(struct acpidmar_softc *sc, struct acpi_dmar *dmar) { struct rmrr_softc *rmrr; struct iommu_softc *iommu; struct domain *dom; struct dmar_devlist *dl; union acpidmar_entry *de; int off, sid, rc; domain_map_page = domain_map_page_intel; printf(": hardware width: %d, intr_remap:%d x2apic_opt_out:%d\n", dmar->haw+1, !!(dmar->flags & 0x1), !!(dmar->flags & 0x2)); sc->sc_haw = dmar->haw+1; sc->sc_flags = dmar->flags; TAILQ_INIT(&sc->sc_drhds); TAILQ_INIT(&sc->sc_rmrrs); TAILQ_INIT(&sc->sc_atsrs); off = sizeof(*dmar); while (off < dmar->hdr.length) { de = (union acpidmar_entry *)((unsigned char *)dmar + off); switch (de->type) { case DMAR_DRHD: acpidmar_drhd(sc, de); break; case DMAR_RMRR: acpidmar_rmrr(sc, de); break; case DMAR_ATSR: acpidmar_atsr(sc, de); break; default: printf("DMAR: unknown %x\n", de->type); break; } off += de->length; } /* Pre-create domains for iommu devices */ TAILQ_FOREACH(iommu, &sc->sc_drhds, link) { TAILQ_FOREACH(dl, &iommu->devices, link) { sid = mksid(dl->bus, dl->dp[0].device, dl->dp[0].function); dom = acpidmar_pci_attach(sc, iommu->segment, sid, 0); if (dom != NULL) { printf("%.4x:%.2x:%.2x.%x iommu:%d did:%.4x\n", iommu->segment, dl->bus, dl->dp[0].device, dl->dp[0].function, iommu->id, dom->did); } } } /* Map passthrough pages for RMRR */ TAILQ_FOREACH(rmrr, &sc->sc_rmrrs, link) { TAILQ_FOREACH(dl, &rmrr->devices, link) { sid = mksid(dl->bus, dl->dp[0].device, dl->dp[0].function); dom = acpidmar_pci_attach(sc, rmrr->segment, sid, 0); if (dom != NULL) { printf("%s map ident: %.16llx %.16llx\n", dom_bdf(dom), rmrr->start, rmrr->end); domain_map_pthru(dom, rmrr->start, rmrr->end); rc = extent_alloc_region(dom->iovamap, rmrr->start, rmrr->end, EX_WAITOK | EX_CONFLICTOK); } } } } /*===================================================== * AMD Vi *=====================================================*/ void acpiivrs_ivhd(struct acpidmar_softc *, struct acpi_ivhd *); int ivhd_iommu_init(struct acpidmar_softc *, struct iommu_softc *, struct acpi_ivhd *); int _ivhd_issue_command(struct iommu_softc *, const struct ivhd_command *); void ivhd_show_event(struct iommu_softc *, struct ivhd_event *evt, int); int ivhd_issue_command(struct iommu_softc *, const struct ivhd_command *, int); int ivhd_invalidate_domain(struct iommu_softc *, int); void ivhd_intr_map(struct iommu_softc *, int); void ivhd_checkerr(struct iommu_softc *iommu); int acpiivhd_intr(void *); int acpiivhd_intr(void *ctx) { struct iommu_softc *iommu = ctx; if (!iommu->dte) return (0); ivhd_poll_events(iommu); return (1); } /* Setup interrupt for AMD */ void ivhd_intr_map(struct iommu_softc *iommu, int devid) { pci_intr_handle_t ih; if (iommu->intr) return; ih.tag = pci_make_tag(NULL, sid_bus(devid), sid_dev(devid), sid_fun(devid)); ih.line = APIC_INT_VIA_MSG; ih.pin = 0; iommu->intr = pci_intr_establish(NULL, ih, IPL_NET | IPL_MPSAFE, acpiivhd_intr, iommu, "amd_iommu"); printf("amd iommu intr: %p\n", iommu->intr); } void _dumppte(struct pte_entry *pte, int lvl, vaddr_t va) { char *pfx[] = { " ", " ", " ", " ", "" }; uint64_t i, sh; struct pte_entry *npte; for (i = 0; i < 512; i++) { sh = (i << (((lvl-1) * 9) + 12)); if (pte[i].val & PTE_P) { if (lvl > 1) { npte = (void *)PMAP_DIRECT_MAP((pte[i].val & PTE_PADDR_MASK)); printf("%slvl%d: %.16llx nxt:%llu\n", pfx[lvl], lvl, pte[i].val, (pte[i].val >> 9) & 7); _dumppte(npte, lvl-1, va | sh); } else { printf("%slvl%d: %.16llx <- %.16llx \n", pfx[lvl], lvl, pte[i].val, va | sh); } } } } void ivhd_showpage(struct iommu_softc *iommu, int sid, paddr_t paddr) { struct domain *dom; static int show = 0; if (show > 10) return; show++; dom = acpidmar_pci_attach(acpidmar_sc, 0, sid, 0); if (!dom) return; printf("DTE: %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n", iommu->dte[sid].dw0, iommu->dte[sid].dw1, iommu->dte[sid].dw2, iommu->dte[sid].dw3, iommu->dte[sid].dw4, iommu->dte[sid].dw5, iommu->dte[sid].dw6, iommu->dte[sid].dw7); _dumppte(dom->pte, 3, 0); } /* Display AMD IOMMU Error */ void ivhd_show_event(struct iommu_softc *iommu, struct ivhd_event *evt, int head) { int type, sid, did, flag; uint64_t address; /* Get Device, Domain, Address and Type of event */ sid = __EXTRACT(evt->dw0, EVT_SID); type = __EXTRACT(evt->dw1, EVT_TYPE); did = __EXTRACT(evt->dw1, EVT_DID); flag = __EXTRACT(evt->dw1, EVT_FLAG); address = _get64(&evt->dw2); printf("=== IOMMU Error[%.4x]: ", head); switch (type) { case ILLEGAL_DEV_TABLE_ENTRY: printf("illegal dev table entry dev=%s addr=0x%.16llx %s, %s, %s, %s\n", dmar_bdf(sid), address, evt->dw1 & EVT_TR ? "translation" : "transaction", evt->dw1 & EVT_RZ ? "reserved bit" : "invalid level", evt->dw1 & EVT_RW ? "write" : "read", evt->dw1 & EVT_I ? "interrupt" : "memory"); ivhd_showdte(iommu); break; case IO_PAGE_FAULT: printf("io page fault dev=%s did=0x%.4x addr=0x%.16llx\n%s, %s, %s, %s, %s, %s\n", dmar_bdf(sid), did, address, evt->dw1 & EVT_TR ? "translation" : "transaction", evt->dw1 & EVT_RZ ? "reserved bit" : "invalid level", evt->dw1 & EVT_PE ? "no perm" : "perm", evt->dw1 & EVT_RW ? "write" : "read", evt->dw1 & EVT_PR ? "present" : "not present", evt->dw1 & EVT_I ? "interrupt" : "memory"); ivhd_showdte(iommu); ivhd_showpage(iommu, sid, address); break; case DEV_TAB_HARDWARE_ERROR: printf("device table hardware error dev=%s addr=0x%.16llx %s, %s, %s\n", dmar_bdf(sid), address, evt->dw1 & EVT_TR ? "translation" : "transaction", evt->dw1 & EVT_RW ? "write" : "read", evt->dw1 & EVT_I ? "interrupt" : "memory"); ivhd_showdte(iommu); break; case PAGE_TAB_HARDWARE_ERROR: printf("page table hardware error dev=%s addr=0x%.16llx %s, %s, %s\n", dmar_bdf(sid), address, evt->dw1 & EVT_TR ? "translation" : "transaction", evt->dw1 & EVT_RW ? "write" : "read", evt->dw1 & EVT_I ? "interrupt" : "memory"); ivhd_showdte(iommu); break; case ILLEGAL_COMMAND_ERROR: printf("illegal command addr=0x%.16llx\n", address); ivhd_showcmd(iommu); break; case COMMAND_HARDWARE_ERROR: printf("command hardware error addr=0x%.16llx flag=0x%.4x\n", address, flag); ivhd_showcmd(iommu); break; case IOTLB_INV_TIMEOUT: printf("iotlb invalidation timeout dev=%s address=0x%.16llx\n", dmar_bdf(sid), address); break; case INVALID_DEVICE_REQUEST: printf("invalid device request dev=%s addr=0x%.16llx flag=0x%.4x\n", dmar_bdf(sid), address, flag); break; default: printf("unknown type=0x%.2x\n", type); break; } /* Clear old event */ evt->dw0 = 0; evt->dw1 = 0; evt->dw2 = 0; evt->dw3 = 0; } /* AMD: Process IOMMU error from hardware */ int ivhd_poll_events(struct iommu_softc *iommu) { uint32_t head, tail; int sz; sz = sizeof(struct ivhd_event); head = iommu_read_4(iommu, EVT_HEAD_REG); tail = iommu_read_4(iommu, EVT_TAIL_REG); if (head == tail) { /* No pending events */ return (0); } while (head != tail) { ivhd_show_event(iommu, iommu->evt_tbl + head, head); head = (head + sz) % EVT_TBL_SIZE; } iommu_write_4(iommu, EVT_HEAD_REG, head); return (0); } /* AMD: Issue command to IOMMU queue */ int _ivhd_issue_command(struct iommu_softc *iommu, const struct ivhd_command *cmd) { u_long rf; uint32_t head, tail, next; int sz; head = iommu_read_4(iommu, CMD_HEAD_REG); sz = sizeof(*cmd); rf = intr_disable(); tail = iommu_read_4(iommu, CMD_TAIL_REG); next = (tail + sz) % CMD_TBL_SIZE; if (next == head) { printf("FULL\n"); /* Queue is full */ intr_restore(rf); return -EBUSY; } memcpy(iommu->cmd_tbl + tail, cmd, sz); iommu_write_4(iommu, CMD_TAIL_REG, next); intr_restore(rf); return (tail / sz); } #define IVHD_MAXDELAY 8 int ivhd_issue_command(struct iommu_softc *iommu, const struct ivhd_command *cmd, int wait) { struct ivhd_command wq = { 0 }; volatile uint64_t wv __aligned(16) = 0LL; paddr_t paddr; int rc, i; rc = _ivhd_issue_command(iommu, cmd); if (rc >= 0 && wait) { /* Wait for previous commands to complete. * Store address of completion variable to command */ pmap_extract(pmap_kernel(), (vaddr_t)&wv, &paddr); wq.dw0 = (paddr & ~0xF) | 0x1; wq.dw1 = (COMPLETION_WAIT << CMD_SHIFT) | ((paddr >> 32) & 0xFFFFF); wq.dw2 = 0xDEADBEEF; wq.dw3 = 0xFEEDC0DE; rc = _ivhd_issue_command(iommu, &wq); /* wv will change to value in dw2/dw3 when command is complete */ for (i = 0; i < IVHD_MAXDELAY && !wv; i++) { DELAY(10 << i); } if (i == IVHD_MAXDELAY) { printf("ivhd command timeout: %.8x %.8x %.8x %.8x wv:%llx idx:%x\n", cmd->dw0, cmd->dw1, cmd->dw2, cmd->dw3, wv, rc); } } return rc; } /* AMD: Flush changes to Device Table Entry for a specific domain */ int ivhd_flush_devtab(struct iommu_softc *iommu, int did) { struct ivhd_command cmd = { .dw0 = did, .dw1 = INVALIDATE_DEVTAB_ENTRY << CMD_SHIFT }; return ivhd_issue_command(iommu, &cmd, 1); } /* AMD: Invalidate all IOMMU device and page tables */ int ivhd_invalidate_iommu_all(struct iommu_softc *iommu) { struct ivhd_command cmd = { .dw1 = INVALIDATE_IOMMU_ALL << CMD_SHIFT }; return ivhd_issue_command(iommu, &cmd, 0); } /* AMD: Invalidate interrupt remapping */ int ivhd_invalidate_interrupt_table(struct iommu_softc *iommu, int did) { struct ivhd_command cmd = { .dw0 = did, .dw1 = INVALIDATE_INTERRUPT_TABLE << CMD_SHIFT }; return ivhd_issue_command(iommu, &cmd, 0); } /* AMD: Invalidate all page tables in a domain */ int ivhd_invalidate_domain(struct iommu_softc *iommu, int did) { struct ivhd_command cmd = { .dw1 = did | (INVALIDATE_IOMMU_PAGES << CMD_SHIFT) }; cmd.dw2 = 0xFFFFF000 | 0x3; cmd.dw3 = 0x7FFFFFFF; return ivhd_issue_command(iommu, &cmd, 1); } /* AMD: Display Registers */ void ivhd_showreg(struct iommu_softc *iommu) { printf("---- dt:%.16llx cmd:%.16llx evt:%.16llx ctl:%.16llx sts:%.16llx\n", iommu_read_8(iommu, DEV_TAB_BASE_REG), iommu_read_8(iommu, CMD_BASE_REG), iommu_read_8(iommu, EVT_BASE_REG), iommu_read_8(iommu, IOMMUCTL_REG), iommu_read_8(iommu, IOMMUSTS_REG)); printf("---- cmd queue:%.16llx %.16llx evt queue:%.16llx %.16llx\n", iommu_read_8(iommu, CMD_HEAD_REG), iommu_read_8(iommu, CMD_TAIL_REG), iommu_read_8(iommu, EVT_HEAD_REG), iommu_read_8(iommu, EVT_TAIL_REG)); } /* AMD: Generate Errors to test event handler */ void ivhd_checkerr(struct iommu_softc *iommu) { struct ivhd_command cmd = { -1, -1, -1, -1 }; /* Generate ILLEGAL DEV TAB entry? */ iommu->dte[0x2303].dw0 = -1; /* invalid */ iommu->dte[0x2303].dw2 = 0x1234; /* domain */ iommu->dte[0x2303].dw7 = -1; /* reserved */ ivhd_flush_devtab(iommu, 0x1234); ivhd_poll_events(iommu); /* Generate ILLEGAL_COMMAND_ERROR : ok */ ivhd_issue_command(iommu, &cmd, 0); ivhd_poll_events(iommu); /* Generate page hardware error */ } /* AMD: Show Device Table Entry */ void ivhd_showdte(struct iommu_softc *iommu) { int i; for (i = 0; i < 65536; i++) { if (iommu->dte[i].dw0) { printf("%.2x:%.2x.%x: %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n", i >> 8, (i >> 3) & 0x1F, i & 0x7, iommu->dte[i].dw0, iommu->dte[i].dw1, iommu->dte[i].dw2, iommu->dte[i].dw3, iommu->dte[i].dw4, iommu->dte[i].dw5, iommu->dte[i].dw6, iommu->dte[i].dw7); } } } /* AMD: Show command entries */ void ivhd_showcmd(struct iommu_softc *iommu) { struct ivhd_command *ihd; paddr_t phd; int i; ihd = iommu->cmd_tbl; phd = iommu_read_8(iommu, CMD_BASE_REG) & CMD_BASE_MASK; for (i = 0; i < 4096 / 128; i++) { printf("%.2x: %.16llx %.8x %.8x %.8x %.8x\n", i, (uint64_t)phd + i * sizeof(*ihd), ihd[i].dw0,ihd[i].dw1,ihd[i].dw2,ihd[i].dw3); } } #define _c(x) (int)((iommu->ecap >> x ##_SHIFT) & x ## _MASK) /* AMD: Initialize IOMMU */ int ivhd_iommu_init(struct acpidmar_softc *sc, struct iommu_softc *iommu, struct acpi_ivhd *ivhd) { static int niommu; paddr_t paddr; uint64_t ov; if (sc == NULL || iommu == NULL || ivhd == NULL) { printf("Bad pointer to iommu_init!\n"); return -1; } if (_bus_space_map(sc->sc_memt, ivhd->address, 0x80000, 0, &iommu->ioh) != 0) { printf("Bus Space Map fails\n"); return -1; } TAILQ_INIT(&iommu->domains); TAILQ_INIT(&iommu->devices); /* Setup address width and number of domains */ iommu->id = ++niommu; iommu->iot = sc->sc_memt; iommu->mgaw = 48; iommu->agaw = 48; iommu->flags = 1; iommu->segment = 0; iommu->ndoms = 256; printf(": AMD iommu%d at 0x%.8llx\n", iommu->id, ivhd->address); iommu->ecap = iommu_read_8(iommu, EXTFEAT_REG); DPRINTF(0,"iommu%d: ecap:%.16llx ", iommu->id, iommu->ecap); DPRINTF(0,"%s%s%s%s%s%s%s%s\n", iommu->ecap & EFR_PREFSUP ? "pref " : "", iommu->ecap & EFR_PPRSUP ? "ppr " : "", iommu->ecap & EFR_NXSUP ? "nx " : "", iommu->ecap & EFR_GTSUP ? "gt " : "", iommu->ecap & EFR_IASUP ? "ia " : "", iommu->ecap & EFR_GASUP ? "ga " : "", iommu->ecap & EFR_HESUP ? "he " : "", iommu->ecap & EFR_PCSUP ? "pc " : ""); DPRINTF(0,"hats:%x gats:%x glxsup:%x smif:%x smifrc:%x gam:%x\n", _c(EFR_HATS), _c(EFR_GATS), _c(EFR_GLXSUP), _c(EFR_SMIFSUP), _c(EFR_SMIFRC), _c(EFR_GAMSUP)); /* Turn off iommu */ ov = iommu_read_8(iommu, IOMMUCTL_REG); iommu_write_8(iommu, IOMMUCTL_REG, ov & ~(CTL_IOMMUEN | CTL_COHERENT | CTL_HTTUNEN | CTL_RESPASSPW | CTL_PASSPW | CTL_ISOC)); /* Enable intr, mark IOMMU device as invalid for remap */ sid_flag[ivhd->devid] |= SID_INVALID; ivhd_intr_map(iommu, ivhd->devid); /* Setup command buffer with 4k buffer (128 entries) */ iommu->cmd_tbl = iommu_alloc_page(iommu, &paddr); iommu_write_8(iommu, CMD_BASE_REG, (paddr & CMD_BASE_MASK) | CMD_TBL_LEN_4K); iommu_write_4(iommu, CMD_HEAD_REG, 0x00); iommu_write_4(iommu, CMD_TAIL_REG, 0x00); iommu->cmd_tblp = paddr; /* Setup event log with 4k buffer (128 entries) */ iommu->evt_tbl = iommu_alloc_page(iommu, &paddr); iommu_write_8(iommu, EVT_BASE_REG, (paddr & EVT_BASE_MASK) | EVT_TBL_LEN_4K); iommu_write_4(iommu, EVT_HEAD_REG, 0x00); iommu_write_4(iommu, EVT_TAIL_REG, 0x00); iommu->evt_tblp = paddr; /* Setup device table * 1 entry per source ID (bus:device:function - 64k entries) */ iommu->dte = sc->sc_hwdte; iommu_write_8(iommu, DEV_TAB_BASE_REG, (sc->sc_hwdtep & DEV_TAB_MASK) | DEV_TAB_LEN); /* Enable IOMMU */ ov |= (CTL_IOMMUEN | CTL_EVENTLOGEN | CTL_CMDBUFEN | CTL_EVENTINTEN); if (ivhd->flags & IVHD_COHERENT) ov |= CTL_COHERENT; if (ivhd->flags & IVHD_HTTUNEN) ov |= CTL_HTTUNEN; if (ivhd->flags & IVHD_RESPASSPW) ov |= CTL_RESPASSPW; if (ivhd->flags & IVHD_PASSPW) ov |= CTL_PASSPW; if (ivhd->flags & IVHD_ISOC) ov |= CTL_ISOC; ov &= ~(CTL_INVTIMEOUT_MASK << CTL_INVTIMEOUT_SHIFT); ov |= (CTL_INVTIMEOUT_10MS << CTL_INVTIMEOUT_SHIFT); iommu_write_8(iommu, IOMMUCTL_REG, ov); ivhd_invalidate_iommu_all(iommu); TAILQ_INSERT_TAIL(&sc->sc_drhds, iommu, link); return 0; } void acpiivrs_ivhd(struct acpidmar_softc *sc, struct acpi_ivhd *ivhd) { struct iommu_softc *iommu; struct acpi_ivhd_ext *ext; union acpi_ivhd_entry *ie; int start, off, dte, all_dte = 0; if (ivhd->type == IVRS_IVHD_EXT) { ext = (struct acpi_ivhd_ext *)ivhd; DPRINTF(0,"ivhd: %.2x %.2x %.4x %.4x:%s %.4x %.16llx %.4x %.8x %.16llx\n", ext->type, ext->flags, ext->length, ext->segment, dmar_bdf(ext->devid), ext->cap, ext->address, ext->info, ext->attrib, ext->efr); if (ext->flags & IVHD_PPRSUP) DPRINTF(0," PPRSup"); if (ext->flags & IVHD_PREFSUP) DPRINTF(0," PreFSup"); if (ext->flags & IVHD_COHERENT) DPRINTF(0," Coherent"); if (ext->flags & IVHD_IOTLB) DPRINTF(0," Iotlb"); if (ext->flags & IVHD_ISOC) DPRINTF(0," ISoc"); if (ext->flags & IVHD_RESPASSPW) DPRINTF(0," ResPassPW"); if (ext->flags & IVHD_PASSPW) DPRINTF(0," PassPW"); if (ext->flags & IVHD_HTTUNEN) DPRINTF(0, " HtTunEn"); if (ext->flags) DPRINTF(0,"\n"); off = sizeof(*ext); iommu = malloc(sizeof(*iommu), M_DEVBUF, M_ZERO|M_WAITOK); ivhd_iommu_init(sc, iommu, ivhd); } else { DPRINTF(0,"ivhd: %.2x %.2x %.4x %.4x:%s %.4x %.16llx %.4x %.8x\n", ivhd->type, ivhd->flags, ivhd->length, ivhd->segment, dmar_bdf(ivhd->devid), ivhd->cap, ivhd->address, ivhd->info, ivhd->feature); if (ivhd->flags & IVHD_PPRSUP) DPRINTF(0," PPRSup"); if (ivhd->flags & IVHD_PREFSUP) DPRINTF(0," PreFSup"); if (ivhd->flags & IVHD_COHERENT) DPRINTF(0," Coherent"); if (ivhd->flags & IVHD_IOTLB) DPRINTF(0," Iotlb"); if (ivhd->flags & IVHD_ISOC) DPRINTF(0," ISoc"); if (ivhd->flags & IVHD_RESPASSPW) DPRINTF(0," ResPassPW"); if (ivhd->flags & IVHD_PASSPW) DPRINTF(0," PassPW"); if (ivhd->flags & IVHD_HTTUNEN) DPRINTF(0, " HtTunEn"); if (ivhd->flags) DPRINTF(0,"\n"); off = sizeof(*ivhd); } while (off < ivhd->length) { ie = (void *)ivhd + off; switch (ie->type) { case IVHD_ALL: all_dte = ie->all.data; DPRINTF(0," ALL %.4x\n", dte); off += sizeof(ie->all); break; case IVHD_SEL: dte = ie->sel.data; DPRINTF(0," SELECT: %s %.4x\n", dmar_bdf(ie->sel.devid), dte); off += sizeof(ie->sel); break; case IVHD_SOR: dte = ie->sor.data; start = ie->sor.devid; DPRINTF(0," SOR: %s %.4x\n", dmar_bdf(start), dte); off += sizeof(ie->sor); break; case IVHD_EOR: DPRINTF(0," EOR: %s\n", dmar_bdf(ie->eor.devid)); off += sizeof(ie->eor); break; case IVHD_ALIAS_SEL: dte = ie->alias.data; DPRINTF(0," ALIAS: src=%s: ", dmar_bdf(ie->alias.srcid)); DPRINTF(0," %s %.4x\n", dmar_bdf(ie->alias.devid), dte); off += sizeof(ie->alias); break; case IVHD_ALIAS_SOR: dte = ie->alias.data; DPRINTF(0," ALIAS_SOR: %s %.4x ", dmar_bdf(ie->alias.devid), dte); DPRINTF(0," src=%s\n", dmar_bdf(ie->alias.srcid)); off += sizeof(ie->alias); break; case IVHD_EXT_SEL: dte = ie->ext.data; DPRINTF(0," EXT SEL: %s %.4x %.8x\n", dmar_bdf(ie->ext.devid), dte, ie->ext.extdata); off += sizeof(ie->ext); break; case IVHD_EXT_SOR: dte = ie->ext.data; DPRINTF(0," EXT SOR: %s %.4x %.8x\n", dmar_bdf(ie->ext.devid), dte, ie->ext.extdata); off += sizeof(ie->ext); break; case IVHD_SPECIAL: DPRINTF(0," SPECIAL\n"); off += sizeof(ie->special); break; default: DPRINTF(0," 2:unknown %x\n", ie->type); off = ivhd->length; break; } } } void acpiivrs_init(struct acpidmar_softc *sc, struct acpi_ivrs *ivrs) { union acpi_ivrs_entry *ie; int off; if (!sc->sc_hwdte) { sc->sc_hwdte = iommu_alloc_hwdte(sc, HWDTE_SIZE, &sc->sc_hwdtep); if (sc->sc_hwdte == NULL) panic("Can't allocate HWDTE!"); } domain_map_page = domain_map_page_amd; DPRINTF(0,"IVRS Version: %d\n", ivrs->hdr.revision); DPRINTF(0," VA Size: %d\n", (ivrs->ivinfo >> IVRS_VASIZE_SHIFT) & IVRS_VASIZE_MASK); DPRINTF(0," PA Size: %d\n", (ivrs->ivinfo >> IVRS_PASIZE_SHIFT) & IVRS_PASIZE_MASK); TAILQ_INIT(&sc->sc_drhds); TAILQ_INIT(&sc->sc_rmrrs); TAILQ_INIT(&sc->sc_atsrs); DPRINTF(0,"======== IVRS\n"); off = sizeof(*ivrs); while (off < ivrs->hdr.length) { ie = (void *)ivrs + off; switch (ie->type) { case IVRS_IVHD: case IVRS_IVHD_EXT: acpiivrs_ivhd(sc, &ie->ivhd); break; case IVRS_IVMD_ALL: case IVRS_IVMD_SPECIFIED: case IVRS_IVMD_RANGE: DPRINTF(0,"ivmd\n"); break; default: DPRINTF(0,"1:unknown: %x\n", ie->type); break; } off += ie->length; } DPRINTF(0,"======== End IVRS\n"); } static int acpiivhd_activate(struct iommu_softc *iommu, int act) { switch (act) { case DVACT_SUSPEND: iommu->flags |= IOMMU_FLAGS_SUSPEND; break; case DVACT_RESUME: iommu->flags &= ~IOMMU_FLAGS_SUSPEND; break; } return (0); } int acpidmar_activate(struct device *self, int act) { struct acpidmar_softc *sc = (struct acpidmar_softc *)self; struct iommu_softc *iommu; printf("called acpidmar_activate %d %p\n", act, sc); if (sc == NULL) { return (0); } switch (act) { case DVACT_RESUME: TAILQ_FOREACH(iommu, &sc->sc_drhds, link) { printf("iommu%d resume\n", iommu->id); if (iommu->dte) { acpiivhd_activate(iommu, act); continue; } iommu_flush_write_buffer(iommu); iommu_set_rtaddr(iommu, iommu->rtaddr); iommu_write_4(iommu, DMAR_FEDATA_REG, iommu->fedata); iommu_write_4(iommu, DMAR_FEADDR_REG, iommu->feaddr); iommu_write_4(iommu, DMAR_FEUADDR_REG, iommu->feaddr >> 32); if ((iommu->flags & (IOMMU_FLAGS_BAD|IOMMU_FLAGS_SUSPEND)) == IOMMU_FLAGS_SUSPEND) { printf("enable wakeup translation\n"); iommu_enable_translation(iommu, 1); } iommu_showcfg(iommu, -1); } break; case DVACT_SUSPEND: TAILQ_FOREACH(iommu, &sc->sc_drhds, link) { printf("iommu%d suspend\n", iommu->id); if (iommu->flags & IOMMU_FLAGS_BAD) continue; if (iommu->dte) { acpiivhd_activate(iommu, act); continue; } iommu->flags |= IOMMU_FLAGS_SUSPEND; iommu_enable_translation(iommu, 0); iommu_showcfg(iommu, -1); } break; } return (0); } int acpidmar_match(struct device *parent, void *match, void *aux) { struct acpi_attach_args *aaa = aux; struct acpi_table_header *hdr; /* If we do not have a table, it is not us */ if (aaa->aaa_table == NULL) return (0); /* If it is an DMAR table, we can attach */ hdr = (struct acpi_table_header *)aaa->aaa_table; if (memcmp(hdr->signature, DMAR_SIG, sizeof(DMAR_SIG) - 1) == 0) return (1); if (memcmp(hdr->signature, IVRS_SIG, sizeof(IVRS_SIG) - 1) == 0) return (1); return (0); } void acpidmar_attach(struct device *parent, struct device *self, void *aux) { struct acpidmar_softc *sc = (void *)self; struct acpi_attach_args *aaa = aux; struct acpi_dmar *dmar = (struct acpi_dmar *)aaa->aaa_table; struct acpi_ivrs *ivrs = (struct acpi_ivrs *)aaa->aaa_table; struct acpi_table_header *hdr; hdr = (struct acpi_table_header *)aaa->aaa_table; sc->sc_memt = aaa->aaa_memt; sc->sc_dmat = aaa->aaa_dmat; if (memcmp(hdr->signature, DMAR_SIG, sizeof(DMAR_SIG) - 1) == 0) { acpidmar_sc = sc; acpidmar_init(sc, dmar); } if (memcmp(hdr->signature, IVRS_SIG, sizeof(IVRS_SIG) - 1) == 0) { acpidmar_sc = sc; acpiivrs_init(sc, ivrs); } } /* Interrupt shiz */ void acpidmar_msi_hwmask(struct pic *, int); void acpidmar_msi_hwunmask(struct pic *, int); void acpidmar_msi_addroute(struct pic *, struct cpu_info *, int, int, int); void acpidmar_msi_delroute(struct pic *, struct cpu_info *, int, int, int); void acpidmar_msi_hwmask(struct pic *pic, int pin) { struct iommu_pic *ip = (void *)pic; struct iommu_softc *iommu = ip->iommu; printf("msi_hwmask\n"); mtx_enter(&iommu->reg_lock); iommu_write_4(iommu, DMAR_FECTL_REG, FECTL_IM); iommu_read_4(iommu, DMAR_FECTL_REG); mtx_leave(&iommu->reg_lock); } void acpidmar_msi_hwunmask(struct pic *pic, int pin) { struct iommu_pic *ip = (void *)pic; struct iommu_softc *iommu = ip->iommu; printf("msi_hwunmask\n"); mtx_enter(&iommu->reg_lock); iommu_write_4(iommu, DMAR_FECTL_REG, 0); iommu_read_4(iommu, DMAR_FECTL_REG); mtx_leave(&iommu->reg_lock); } void acpidmar_msi_addroute(struct pic *pic, struct cpu_info *ci, int pin, int vec, int type) { struct iommu_pic *ip = (void *)pic; struct iommu_softc *iommu = ip->iommu; mtx_enter(&iommu->reg_lock); iommu->fedata = vec; iommu->feaddr = 0xfee00000L | (ci->ci_apicid << 12); iommu_write_4(iommu, DMAR_FEDATA_REG, vec); iommu_write_4(iommu, DMAR_FEADDR_REG, iommu->feaddr); iommu_write_4(iommu, DMAR_FEUADDR_REG, iommu->feaddr >> 32); mtx_leave(&iommu->reg_lock); } void acpidmar_msi_delroute(struct pic *pic, struct cpu_info *ci, int pin, int vec, int type) { printf("msi_delroute\n"); } void * acpidmar_intr_establish(void *ctx, int level, int (*func)(void *), void *arg, const char *what) { struct iommu_softc *iommu = ctx; struct pic *pic; pic = &iommu->pic.pic; iommu->pic.iommu = iommu; strlcpy(pic->pic_dev.dv_xname, "dmarpic", sizeof(pic->pic_dev.dv_xname)); pic->pic_type = PIC_MSI; pic->pic_hwmask = acpidmar_msi_hwmask; pic->pic_hwunmask = acpidmar_msi_hwunmask; pic->pic_addroute = acpidmar_msi_addroute; pic->pic_delroute = acpidmar_msi_delroute; pic->pic_edge_stubs = ioapic_edge_stubs; #ifdef MULTIPROCESSOR mtx_init(&pic->pic_mutex, level); #endif return intr_establish(-1, pic, 0, IST_PULSE, level, NULL, func, arg, what); } /* Intel: Handle DMAR Interrupt */ int acpidmar_intr(void *ctx) { struct iommu_softc *iommu = ctx; struct fault_entry fe; static struct fault_entry ofe; int fro, nfr, fri, i; uint32_t sts; /*splassert(IPL_HIGH);*/ if (!(iommu->gcmd & GCMD_TE)) { return (1); } mtx_enter(&iommu->reg_lock); sts = iommu_read_4(iommu, DMAR_FECTL_REG); sts = iommu_read_4(iommu, DMAR_FSTS_REG); if (!(sts & FSTS_PPF)) { mtx_leave(&iommu->reg_lock); return (1); } nfr = cap_nfr(iommu->cap); fro = cap_fro(iommu->cap); fri = (sts >> FSTS_FRI_SHIFT) & FSTS_FRI_MASK; for (i = 0; i < nfr; i++) { fe.hi = iommu_read_8(iommu, fro + (fri*16) + 8); if (!(fe.hi & FRCD_HI_F)) break; fe.lo = iommu_read_8(iommu, fro + (fri*16)); if (ofe.hi != fe.hi || ofe.lo != fe.lo) { iommu_showfault(iommu, fri, &fe); ofe.hi = fe.hi; ofe.lo = fe.lo; } fri = (fri + 1) % nfr; } iommu_write_4(iommu, DMAR_FSTS_REG, FSTS_PFO | FSTS_PPF); mtx_leave(&iommu->reg_lock); return (1); } const char *vtd_faults[] = { "Software", "Root Entry Not Present", /* ok (rtaddr + 4096) */ "Context Entry Not Present", /* ok (no CTX_P) */ "Context Entry Invalid", /* ok (tt = 3) */ "Address Beyond MGAW", "Write", /* ok */ "Read", /* ok */ "Paging Entry Invalid", /* ok */ "Root Table Invalid", "Context Table Invalid", "Root Entry Reserved", /* ok (root.lo |= 0x4) */ "Context Entry Reserved", "Paging Entry Reserved", "Context Entry TT", "Reserved", }; void iommu_showpte(uint64_t, int, uint64_t); /* Intel: Show IOMMU page table entry */ void iommu_showpte(uint64_t ptep, int lvl, uint64_t base) { uint64_t nb, pb, i; struct pte_entry *pte; pte = (void *)PMAP_DIRECT_MAP(ptep); for (i = 0; i < 512; i++) { if (!(pte[i].val & PTE_P)) continue; nb = base + (i << lvl); pb = pte[i].val & ~VTD_PAGE_MASK; if(lvl == VTD_LEVEL0) { printf(" %3llx %.16llx = %.16llx %c%c %s\n", i, nb, pb, pte[i].val == PTE_R ? 'r' : ' ', pte[i].val & PTE_W ? 'w' : ' ', (nb == pb) ? " ident" : ""); if (nb == pb) return; } else { iommu_showpte(pb, lvl - VTD_STRIDE_SIZE, nb); } } } /* Intel: Show IOMMU configuration */ void iommu_showcfg(struct iommu_softc *iommu, int sid) { int i, j, sts, cmd; struct context_entry *ctx; pcitag_t tag; pcireg_t clc; cmd = iommu_read_4(iommu, DMAR_GCMD_REG); sts = iommu_read_4(iommu, DMAR_GSTS_REG); printf("iommu%d: flags:%d root pa:%.16llx %s %s %s %.8x %.8x\n", iommu->id, iommu->flags, iommu_read_8(iommu, DMAR_RTADDR_REG), sts & GSTS_TES ? "enabled" : "disabled", sts & GSTS_QIES ? "qi" : "ccmd", sts & GSTS_IRES ? "ir" : "", cmd, sts); for (i = 0; i < 256; i++) { if (!root_entry_is_valid(&iommu->root[i])) { continue; } for (j = 0; j < 256; j++) { ctx = iommu->ctx[i] + j; if (!context_entry_is_valid(ctx)) { continue; } tag = pci_make_tag(NULL, i, (j >> 3), j & 0x7); clc = pci_conf_read(NULL, tag, 0x08) >> 8; printf(" %.2x:%.2x.%x lvl:%d did:%.4x tt:%d ptep:%.16llx flag:%x cc:%.6x\n", i, (j >> 3), j & 7, context_address_width(ctx), context_domain_id(ctx), context_translation_type(ctx), context_pte(ctx), context_user(ctx), clc); #if 0 /* dump pagetables */ iommu_showpte(ctx->lo & ~VTD_PAGE_MASK, iommu->agaw - VTD_STRIDE_SIZE, 0); #endif } } } /* Intel: Show IOMMU fault */ void iommu_showfault(struct iommu_softc *iommu, int fri, struct fault_entry *fe) { int bus, dev, fun, type, fr, df; bios_memmap_t *im; const char *mapped; if (!(fe->hi & FRCD_HI_F)) return; type = (fe->hi & FRCD_HI_T) ? 'r' : 'w'; fr = (fe->hi >> FRCD_HI_FR_SHIFT) & FRCD_HI_FR_MASK; bus = (fe->hi >> FRCD_HI_BUS_SHIFT) & FRCD_HI_BUS_MASK; dev = (fe->hi >> FRCD_HI_DEV_SHIFT) & FRCD_HI_DEV_MASK; fun = (fe->hi >> FRCD_HI_FUN_SHIFT) & FRCD_HI_FUN_MASK; df = (fe->hi >> FRCD_HI_FUN_SHIFT) & 0xFF; iommu_showcfg(iommu, mksid(bus,dev,fun)); if (!iommu->ctx[bus]) { /* Bus is not initialized */ mapped = "nobus"; } else if (!context_entry_is_valid(&iommu->ctx[bus][df])) { /* DevFn not initialized */ mapped = "nodevfn"; } else if (context_user(&iommu->ctx[bus][df]) != 0xA) { /* no bus_space_map */ mapped = "nomap"; } else { /* bus_space_map */ mapped = "mapped"; } printf("fri%d: dmar: %.2x:%.2x.%x %s error at %llx fr:%d [%s] iommu:%d [%s]\n", fri, bus, dev, fun, type == 'r' ? "read" : "write", fe->lo, fr, fr <= 13 ? vtd_faults[fr] : "unknown", iommu->id, mapped); for (im = bios_memmap; im->type != BIOS_MAP_END; im++) { if ((im->type == BIOS_MAP_RES) && (im->addr <= fe->lo) && (fe->lo <= im->addr+im->size)) { printf("mem in e820.reserved\n"); } } #ifdef DDB if (acpidmar_ddb) db_enter(); #endif }
a44132b40a8e841716de917838f734659153f1a1
51de1ebe7fa09fb262e015fb454829987ed5fc83
/iothub_client/tests/common_longhaul/iothub_client_statistics.h
2be52af3d3bcdd850904c99199ee8010fe3ae015
[ "MIT" ]
permissive
Azure/azure-iot-sdk-c
54bf46938f5b8089ba06081cb4d7967fa3a8f777
1f3d95b4dae09927ae4bbe479e52d48acef1f93c
refs/heads/main
2023-08-31T03:36:13.694208
2023-08-30T23:56:13
2023-08-30T23:56:13
70,934,373
629
902
NOASSERTION
2023-09-06T17:41:38
2016-10-14T17:54:57
C
UTF-8
C
false
false
5,240
h
iothub_client_statistics.h
// Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. #ifndef IOTHUB_CLIENT_STATISTICS_H #define IOTHUB_CLIENT_STATISTICS_H #include <stdlib.h> #include <stddef.h> #include <time.h> #include "iothub_messaging_ll.h" #include "iothub_devicemethod.h" #include "iothub_devicetwin.h" #include "iothub_client_ll.h" #define TELEMETRY_EVENT_TYPE_VALUES \ TELEMETRY_QUEUED, \ TELEMETRY_SENT, \ TELEMETRY_RECEIVED MU_DEFINE_ENUM(TELEMETRY_EVENT_TYPE, TELEMETRY_EVENT_TYPE_VALUES) #define C2D_EVENT_TYPE_VALUES \ C2D_QUEUED, \ C2D_SENT, \ C2D_RECEIVED MU_DEFINE_ENUM(C2D_EVENT_TYPE, C2D_EVENT_TYPE_VALUES) #define DEVICE_METHOD_EVENT_TYPE_VALUES \ DEVICE_METHOD_INVOKED, \ DEVICE_METHOD_RECEIVED MU_DEFINE_ENUM(DEVICE_METHOD_EVENT_TYPE, DEVICE_METHOD_EVENT_TYPE_VALUES) #define DEVICE_TWIN_EVENT_TYPE_VALUES \ DEVICE_TWIN_UPDATE_QUEUED, \ DEVICE_TWIN_UPDATE_SENT, \ DEVICE_TWIN_UPDATE_RECEIVED MU_DEFINE_ENUM(DEVICE_TWIN_EVENT_TYPE, DEVICE_TWIN_EVENT_TYPE_VALUES) typedef struct TELEMETRY_INFO_TAG { size_t message_id; time_t time_queued; size_t send_result; time_t time_sent; IOTHUB_CLIENT_CONFIRMATION_RESULT send_callback_result; time_t time_received; } TELEMETRY_INFO; typedef struct IOTHUB_CLIENT_STATISTICS_TELEMETRY_SUMMARY_TAG { size_t messages_sent; size_t messages_received; double min_travel_time_secs; double max_travel_time_secs; } IOTHUB_CLIENT_STATISTICS_TELEMETRY_SUMMARY; typedef struct C2D_MESSAGE_INFO_TAG { size_t message_id; time_t time_queued; size_t send_result; time_t time_sent; IOTHUB_MESSAGING_RESULT send_callback_result; time_t time_received; } C2D_MESSAGE_INFO; typedef struct IOTHUB_CLIENT_STATISTICS_C2D_SUMMARY_TAG { size_t messages_sent; size_t messages_received; double min_travel_time_secs; double max_travel_time_secs; } IOTHUB_CLIENT_STATISTICS_C2D_SUMMARY; typedef struct DEVICE_METHOD_INFO_TAG { size_t method_id; time_t time_invoked; IOTHUB_DEVICE_METHOD_RESULT method_result; time_t time_received; } DEVICE_METHOD_INFO; typedef struct IOTHUB_CLIENT_STATISTICS_DEVICE_METHOD_SUMMARY_TAG { size_t methods_invoked; size_t methods_received; double min_travel_time_secs; double max_travel_time_secs; } IOTHUB_CLIENT_STATISTICS_DEVICE_METHOD_SUMMARY; typedef struct DEVICE_TWIN_DESIRED_INFO_TAG { size_t update_id; time_t time_updated; int update_result; int version; time_t time_received; } DEVICE_TWIN_DESIRED_INFO; typedef struct DEVICE_TWIN_REPORTED_INFO_TAG { size_t update_id; time_t time_queued; IOTHUB_CLIENT_RESULT update_result; time_t time_sent; int send_status_code; time_t time_received; } DEVICE_TWIN_REPORTED_INFO; typedef struct IOTHUB_CLIENT_STATISTICS_DEVICE_TWIN_SUMMARY_TAG { size_t updates_sent; size_t updates_received; double min_travel_time_secs; double max_travel_time_secs; } IOTHUB_CLIENT_STATISTICS_DEVICE_TWIN_SUMMARY; typedef struct IOTHUB_CLIENT_STATISTICS_TAG* IOTHUB_CLIENT_STATISTICS_HANDLE; extern IOTHUB_CLIENT_STATISTICS_HANDLE iothub_client_statistics_create(void); extern char* iothub_client_statistics_to_json(IOTHUB_CLIENT_STATISTICS_HANDLE handle); extern int iothub_client_statistics_add_connection_status(IOTHUB_CLIENT_STATISTICS_HANDLE handle, IOTHUB_CLIENT_CONNECTION_STATUS status, IOTHUB_CLIENT_CONNECTION_STATUS_REASON reason); extern int iothub_client_statistics_add_telemetry_info(IOTHUB_CLIENT_STATISTICS_HANDLE handle, TELEMETRY_EVENT_TYPE type, TELEMETRY_INFO* info); extern int iothub_client_statistics_get_telemetry_summary(IOTHUB_CLIENT_STATISTICS_HANDLE handle, IOTHUB_CLIENT_STATISTICS_TELEMETRY_SUMMARY* summary); extern int iothub_client_statistics_add_c2d_info(IOTHUB_CLIENT_STATISTICS_HANDLE handle, C2D_EVENT_TYPE type, C2D_MESSAGE_INFO* info); extern int iothub_client_statistics_get_c2d_summary(IOTHUB_CLIENT_STATISTICS_HANDLE handle, IOTHUB_CLIENT_STATISTICS_C2D_SUMMARY* summary); extern int iothub_client_statistics_add_device_method_info(IOTHUB_CLIENT_STATISTICS_HANDLE handle, DEVICE_METHOD_EVENT_TYPE type, DEVICE_METHOD_INFO* info); extern int iothub_client_statistics_get_device_method_summary(IOTHUB_CLIENT_STATISTICS_HANDLE handle, IOTHUB_CLIENT_STATISTICS_DEVICE_METHOD_SUMMARY* summary); extern int iothub_client_statistics_add_device_twin_desired_info(IOTHUB_CLIENT_STATISTICS_HANDLE handle, DEVICE_TWIN_EVENT_TYPE type, DEVICE_TWIN_DESIRED_INFO* info); extern int iothub_client_statistics_get_device_twin_desired_summary(IOTHUB_CLIENT_STATISTICS_HANDLE handle, IOTHUB_CLIENT_STATISTICS_DEVICE_TWIN_SUMMARY* summary); extern int iothub_client_statistics_add_device_twin_reported_info(IOTHUB_CLIENT_STATISTICS_HANDLE handle, DEVICE_TWIN_EVENT_TYPE type, DEVICE_TWIN_REPORTED_INFO* info); extern int iothub_client_statistics_get_device_twin_reported_summary(IOTHUB_CLIENT_STATISTICS_HANDLE handle, IOTHUB_CLIENT_STATISTICS_DEVICE_TWIN_SUMMARY* summary); extern void iothub_client_statistics_destroy(IOTHUB_CLIENT_STATISTICS_HANDLE handle); #endif // IOTHUB_CLIENT_STATISTICS_H
5560c9c283d5719912293b106a9a281e259b9138
4a57a29d0a47932dc44dd5affdd37418f48a0945
/subprojects/libswvkc-wl/extensions/fullscreen-shell-unstable-v1/zwp_fullscreen_shell_v1.c
7fea82e054adfbd61c6b00e869a9cd0f24e0e5e0
[ "MIT" ]
permissive
st3r4g/swvkc
ee3f5a33205dafdbdbb558bfa0485ad0bd51e756
dd8d124948ad57d3e7b12c49f53dcd0ec4c7d407
refs/heads/master
2021-12-24T06:50:01.380350
2021-08-28T21:56:57
2021-08-28T21:56:57
195,874,209
171
12
MIT
2021-06-29T08:36:41
2019-07-08T19:30:15
C
UTF-8
C
false
false
1,388
c
zwp_fullscreen_shell_v1.c
#define _POSIX_C_SOURCE 200809L #include <core/wl_surface.h> #include <util/log.h> #include <fullscreen-shell-unstable-v1-server-protocol.h> static void release(struct wl_client *client, struct wl_resource *resource) { wl_resource_destroy(resource); } static void present_surface(struct wl_client *client, struct wl_resource *resource, struct wl_resource *surface_resource, uint32_t method, struct wl_resource *output) { errlog("PRESENT SURFACE"); struct surface *surface = wl_resource_get_user_data(surface_resource); surface->role = ROLE_FULLSCREEN; surface->surface_events.map(surface, surface->surface_events.user_data); surface->is_mapped = true; } static void present_surface_for_mode(struct wl_client *client, struct wl_resource *resource, struct wl_resource *surface, struct wl_resource *output, int32_t framerate, uint32_t feedback) { errlog("PRESENT SURFACE FOR MODE"); } static const struct zwp_fullscreen_shell_v1_interface impl = {release, present_surface, present_surface_for_mode}; void zwp_fullscreen_shell_v1_new(struct wl_resource *resource) { errlog("BOUND TO FULLSCREEN SHELL"); wl_resource_set_implementation(resource, &impl, NULL, NULL); // zwp_fullscreen_shell_v1_send_capability(resource, ZWP_FULLSCREEN_SHELL_V1_CAPABILITY_ARBITRARY_MODES); // zwp_fullscreen_shell_v1_send_capability(resource, ZWP_FULLSCREEN_SHELL_V1_CAPABILITY_CURSOR_PLANE); }
946c8b052ae9e5e2c0a0f49ca7d3a7351e45e623
0744dcc5394cebf57ebcba343747af6871b67017
/os/drivers/wireless/realtek/rtk/include/autoconf.h
b29c0cb08d961acc44f5c2aa293a786b6adc18fd
[ "GPL-1.0-or-later", "BSD-3-Clause", "ISC", "MIT", "LicenseRef-scancode-warranty-disclaimer", "LicenseRef-scancode-other-permissive", "Apache-2.0" ]
permissive
Samsung/TizenRT
96abf62f1853f61fcf91ff14671a5e0c6ca48fdb
1a5c2e00a4b1bbf4c505bbf5cc6a8259e926f686
refs/heads/master
2023-08-31T08:59:33.327998
2023-08-08T06:09:20
2023-08-31T04:38:20
82,517,252
590
719
Apache-2.0
2023-09-14T06:54:49
2017-02-20T04:38:30
C
UTF-8
C
false
false
22,172
h
autoconf.h
/****************************************************************************** * Copyright (c) 2013-2016 Realtek Semiconductor Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ #ifndef WLANCONFIG_H #define WLANCONFIG_H /* * Include user defined options first. Anything not defined in these files * will be set to standard values. Override anything you dont like! */ #include "platform_opts.h" #if defined(CONFIG_PLATFORM_8195A) || defined(CONFIG_PLATFORM_8711B) || defined(CONFIG_PLATFORM_8721D) || defined(CONFIG_PLATFORM_8195BHP) || defined(CONFIG_PLATFORM_8710C) #ifndef CONFIG_PLATFORM_AMEBA_X #define CONFIG_PLATFORM_AMEBA_X 1 #endif #else #define CONFIG_PLATFORM_AMEBA_X 0 #include "autoconf_tizenrt.h" #endif #if (CONFIG_PLATFORM_AMEBA_X == 1) #if (CONFIG_PLATFORM_AMEBA_X == 1) #if defined(CONFIG_PLATFORM_8195BHP) || defined(CONFIG_PLATFORM_8710C) #define CONFIG_AXI_HCI #else #define CONFIG_LX_HCI #endif #else #if defined(CONFIG_PLATFOMR_CUSTOMER_RTOS) #define PLATFORM_CUSTOMER_RTOS 1 #define CONFIG_LWIP_LAYER 0 #else #define PLATFORM_FREERTOS 1 #endif #ifdef USE_SDIO_INTERFACE #define CONFIG_SDIO_HCI #else #define CONFIG_GSPI_HCI #endif #endif // #if (CONFIG_PLATFORM_AMEBA_X == 1) #if defined(CONFIG_HARDWARE_8188F) || defined(CONFIG_HARDWARE_8192E) || defined(CONFIG_HARDWARE_8723D) || defined(CONFIG_HARDWARE_8821C) || defined(CONFIG_PLATFORM_8195BHP) || defined(CONFIG_HARDWARE_8188E) || defined(CONFIG_PLATFORM_8721D) || defined(CONFIG_PLATFORM_8710C) //#define CONFIG_IEEE80211W #define CONFIG_FW_C2H_PKT #define PHYDM_LINUX_CODING_STYLE 1 #else #define PHYDM_LINUX_CODING_STYLE 0 #endif #if (PHYDM_LINUX_CODING_STYLE == 1) #define PHYDM_NEW_INTERFACE 1 #else #define PHYDM_NEW_INTERFACE 0 #endif #ifndef CONFIG_INIC_EN #define CONFIG_INIC_EN 0 //For iNIC project #endif #if CONFIG_INIC_EN #define CONFIG_LWIP_LAYER 0 #endif #ifndef CONFIG_WIFI_CRITICAL_CODE_SECTION #define CONFIG_WIFI_CRITICAL_CODE_SECTION #endif #define CONFIG_LITTLE_ENDIAN #define CONFIG_80211N_HT //#define CONFIG_RECV_REORDERING_CTRL #define RTW_NOTCH_FILTER 0 #define CONFIG_EMBEDDED_FWIMG #define CONFIG_PHY_SETTING_WITH_ODM #if (CONFIG_PLATFORM_AMEBA_X == 0) #define HAL_MAC_ENABLE 1 #define HAL_BB_ENABLE 1 #define HAL_RF_ENABLE 1 #endif #if (CONFIG_PLATFORM_AMEBA_X == 1) /* Patch when dynamic mechanism is not ready */ //#define CONFIG_DM_PATCH #endif //#define CONFIG_DEBUG //#define CONFIG_DEBUG_RTL871X #if (CONFIG_PLATFORM_AMEBA_X == 1) #define CONFIG_MEM_MONITOR MEM_MONITOR_SIMPLE #define WLAN_INTF_DBG 0 //#define CONFIG_DEBUG_DYNAMIC //#define DBG_TX 1 //#define DBG_XMIT_BUF 1 //#define DBG_XMIT_BUF_EXT 1 #define DBG_TX_DROP_FRAME #else #define CONFIG_MEM_MONITOR MEM_MONITOR_SIMPLE //#define CONFIG_TRACE_SKB //#define WLAN_INTF_DBG #endif // CONFIG_PLATFORM_AMEBA_X //#define CONFIG_DONT_CARE_TP //#define CONFIG_HIGH_TP //#define CONFIG_MEMORY_ACCESS_ALIGNED #ifdef CONFIG_BT_EN #define CONFIG_FTL_ENABLED #endif #if !defined(CONFIG_PLATFORM_8710C) #define CONFIG_POWER_SAVING #endif #ifdef CONFIG_POWER_SAVING #define CONFIG_IPS #define CONFIG_LPS #ifdef CONFIG_LPS #define CONFIG_LPS_CHK_BY_TP #endif //#define CONFIG_LPS_LCLK #if (CONFIG_PLATFORM_AMEBA_X == 0) #ifdef CONFIG_LPS_LCLK #define CONFIG_DETECT_CPWM_BY_POLLING #define LPS_RPWM_WAIT_MS 300 #endif #else #define CONFIG_LPS_32K #define TDMA_POWER_SAVING #endif #define CONFIG_WAIT_PS_ACK #define CONFIG_FW_PSTIMEOUT #endif #define BAD_MIC_COUNTERMEASURE 1 #define DEFRAGMENTATION 1 #define WIFI_LOGO_CERTIFICATION 0 #if WIFI_LOGO_CERTIFICATION #define RX_AGGREGATION 1 #define RX_AMSDU 1 #else #ifdef CONFIG_HIGH_TP_TEST #define RX_AGGREGATION 1 #else #define RX_AGGREGATION 0 #endif #define RX_AMSDU 0 #endif #if defined(CONFIG_PLATFORM_8711B) #define CONFIG_FW_C2H_PKT #endif #if (CONFIG_PLATFORM_AMEBA_X == 1) #if defined(CONFIG_PLATFORM_8195A) #define CONFIG_USE_TCM_HEAP 1 /* USE TCM HEAP */ #endif #define CONFIG_RECV_TASKLET_THREAD #define CONFIG_XMIT_TASKLET_THREAD #else #define CONFIG_XMIT_THREAD_MODE #endif // CONFIG_PLATFORM_AMEBA_X //#define CONFIG_RECV_THREAD_MODE /* Wlan IRQ Polling Mode*/ //#define CONFIG_ISR_THREAD_MODE_POLLING /* Wlan IRQ Polling Mode*/ //1 Chris #ifndef CONFIG_SDIO_HCI #define CONFIG_ISR_THREAD_MODE_INTERRUPT /* Wlan IRQ Interrupt Mode*/ #endif #if defined(CONFIG_ISR_THREAD_MODE_POLLING) && defined(CONFIG_ISR_THREAD_MODE_INTERRUPT) #error "CONFIG_ISR_THREAD_MODE_POLLING and CONFIG_ISR_THREAD_MODE_INTERRUPT are mutually exclusive. " #endif //#define CONFIG_RECV_TASK_THREAD_MODE #if (CONFIG_PLATFORM_AMEBA_X == 1) /* CRC DMEM optimized mode consume 1k less SRM memory consumption */ #define CRC_IMPLEMENTATION_MODE CRC_IMPLEMENTATION_DMEM_OPTIMIZED #endif /* AES DMEM optimized mode comsume 10k less memory compare to IMEM optimized mode AES_IMPLEMENTATION_IMEM_OPTIMIZED */ #define AES_IMPLEMENTATION_MODE AES_IMPLEMENTATION_DMEM_OPTIMIZED #define USE_SKB_AS_XMITBUF 1 #if (CONFIG_PLATFORM_AMEBA_X == 1) #define USE_XMIT_EXTBUFF 1 #else #define USE_XMIT_EXTBUFF 0 #endif #define USE_MUTEX_FOR_SPINLOCK 1 // remove function to reduce code #define NOT_SUPPORT_5G #if !defined(CONFIG_HARDWARE_8192E) #define NOT_SUPPORT_RF_MULTIPATH #endif #define NOT_SUPPORT_VHT #define NOT_SUPPORT_40M #define NOT_SUPPORT_80M #if defined(CONFIG_PLATFORM_8195A) #define NOT_SUPPORT_BBSWING #endif #ifdef CONFIG_HIGH_TP_TEST #undef NOT_SUPPORT_40M #endif #define NOT_SUPPORT_OLD_CHANNEL_PLAN #define NOT_SUPPORT_BT #define CONFIG_WIFI_SPEC 0 #define CONFIG_FAKE_EFUSE 0 #if CONFIG_FAKE_EFUSE #define FAKE_CHIPID CHIPID_8710BN #endif #define CONFIG_AUTO_RECONNECT 1 #define ENABLE_HWPDN_PIN #define SUPPORT_SCAN_BUF 1 #if (CONFIG_PLATFORM_AMEBA_X == 0) #define BE_I_CUT 1 #endif /* For WPA2 */ #define CONFIG_INCLUDE_WPA_PSK #ifdef CONFIG_INCLUDE_WPA_PSK #define CONFIG_MULTIPLE_WPA_STA //#define CONFIG_WPA2_PREAUTH #define PSK_SUPPORT_TKIP 1 #endif /* For promiscuous mode */ #define CONFIG_PROMISC #define PROMISC_DENY_PAIRWISE 0 /* For Simple Link */ #ifndef CONFIG_INCLUDE_SIMPLE_CONFIG //#define CONFIG_INCLUDE_SIMPLE_CONFIG 1 #endif // for probe request with custom vendor specific IE #define CONFIG_CUSTOM_IE #if (CONFIG_PLATFORM_AMEBA_X == 0) /* For multicast */ #define CONFIG_MULTICAST #endif /* For STA+AP Concurrent MODE */ #define CONFIG_CONCURRENT_MODE #ifdef CONFIG_CONCURRENT_MODE //#define CONFIG_MCC_MODE #if defined(CONFIG_PLATFORM_8195A) || defined(CONFIG_PLATFORM_8195BHP) #define CONFIG_RUNTIME_PORT_SWITCH #endif #define NET_IF_NUM ((CONFIG_ETHERNET) + (CONFIG_WLAN) + 1) #else #define NET_IF_NUM ((CONFIG_ETHERNET) + (CONFIG_WLAN)) #endif /****************** For EAP auth configurations *******************/ #define CONFIG_TLS 0 #define CONFIG_PEAP 0 #define CONFIG_TTLS 0 // DO NOT change the below config of EAP #ifdef PRE_CONFIG_EAP #undef CONFIG_TLS #define CONFIG_TLS 1 #undef CONFIG_PEAP #define CONFIG_PEAP 1 #undef CONFIG_TTLS #define CONFIG_TTLS 1 #endif // enable 1X code in lib_wlan as default (increase 380 bytes) #define CONFIG_EAP #if CONFIG_TLS || CONFIG_PEAP || CONFIG_TTLS #define EAP_REMOVE_UNUSED_CODE 1 #endif #define EAP_SSL_VERIFY_SERVER #if CONFIG_TLS #define EAP_SSL_VERIFY_CLIENT #endif #if CONFIG_TTLS #define EAP_MSCHAPv2 #define EAP_TTLS_MSCHAPv2 //#define EAP_TTLS_EAP //#define EAP_TTLS_MSCHAP //#define EAP_TTLS_PAP //#define EAP_TTLS_CHAP #endif /****************** End of EAP configurations *******************/ /* For WPS and P2P */ #define CONFIG_WPS #if 0 #define CONFIG_WPS_AP #define CONFIG_P2P_NEW #if (!defined(SUPPORT_SCAN_BUF) || !defined(CONFIG_WPS_AP)) && defined(CONFIG_P2P_NEW) #error "If CONFIG_P2P_NEW, need to SUPPORT_SCAN_BUF" #endif #endif #define CONFIG_NEW_SIGNAL_STAT_PROCESS #define CONFIG_SKIP_SIGNAL_SCALE_MAPPING /* For AP_MODE */ #define CONFIG_AP_MODE extern unsigned char g_user_ap_sta_num; #define USER_AP_STA_NUM g_user_ap_sta_num #if (CONFIG_PLATFORM_AMEBA_X == 1) #define AP_STA_NUM 3 //2014/10/27 modify to 3 #define USE_DEDICATED_BCN_TX 0 #if USE_DEDICATED_BCN_TX #error "WLAN driver for Ameba should not enable USE_DEDICATED_BCN_TX" #endif #else extern unsigned int g_ap_sta_num; #define AP_STA_NUM 3 //g_ap_sta_num #endif #ifdef CONFIG_AP_MODE #if defined(CONFIG_PLATFORM_8195A) //softap sent qos null0 polling client alive or not #define CONFIG_AP_POLLING_CLIENT_ALIVE #endif #define CONFIG_NATIVEAP_MLME #if (CONFIG_PLATFORM_AMEBA_X == 1) #define CONFIG_INTERRUPT_BASED_TXBCN #endif #ifdef CONFIG_INTERRUPT_BASED_TXBCN //#define CONFIG_INTERRUPT_BASED_TXBCN_EARLY_INT #define CONFIG_INTERRUPT_BASED_TXBCN_BCN_OK_ERR #endif // #define CONFIG_GK_REKEY #if (CONFIG_PLATFORM_AMEBA_X == 0) #define USE_DEDICATED_BCN_TX 1 #endif #if CONFIG_INIC_EN // #define REPORT_STA_EVENT //useless #endif #else #if (CONFIG_PLATFORM_AMEBA_X == 0) #define USE_DEDICATED_BCN_TX 0 #endif #endif #if defined(CONFIG_AP_MODE) && defined(CONFIG_GK_REKEY) && !defined(CONFIG_MULTIPLE_WPA_STA) #error "If CONFIG_GK_REKEY when CONFIG_AP_MODE, need to CONFIG_MULTIPLE_WPA_STA" #endif #if (CONFIG_PLATFORM_AMEBA_X == 0) #if !defined(CONFIG_AP_MODE) && defined(CONFIG_CONCURRENT_MODE) #error "If CONFIG_CONCURRENT_MODEE, need to CONFIG_AP_MODE" #endif #endif /* For efuse or flash config */ #if (CONFIG_PLATFORM_AMEBA_X == 1) #define CONFIG_RW_PHYSICAL_EFUSE 0 // Mask efuse user blocks #define CONFIG_HIDE_PROTECT_EFUSE 1 #define CONFIG_ADAPTOR_INFO_CACHING_FLASH 1 #define CHECK_FLASH_VALID_MASK 1 #define CHECK_EFUSE_VALID_MASK 1 /* For K-free */ #define CONFIG_RF_GAIN_OFFSET #endif // CONFIG_PLATFORM_AMEBA_X /* For MP_MODE */ //#define CONFIG_MP_INCLUDED #ifdef CONFIG_MP_INCLUDED #define MP_DRIVER 1 #define CONFIG_MP_IWPRIV_SUPPORT // #define HAL_EFUSE_MEMORY #if (CONFIG_PLATFORM_AMEBA_X == 1) #define MP_REG_TEST #endif #else #define MP_DRIVER 0 #if defined(CONFIG_PLATFORM_8195A) //Control wifi mcu function #define CONFIG_LITTLE_WIFI_MCU_FUNCTION_THREAD #define CONFIG_ODM_REFRESH_RAMASK //#define CONFIG_ANTENNA_DIVERSITY //#define CONFIG_BT_COEXIST #endif #if defined(CONFIG_PLATFORM_8721D) #define CONFIG_ANTENNA_DIVERSITY //#define CONFIG_BT_COEXIST //#define CONFIG_SW_MAILBOX_EN //#define NEW_BT_COEX #endif #if defined(CONFIG_PLATFORM_8710C) //#define CONFIG_ANTENNA_DIVERSITY //#define CONFIG_BT_COEXIST //#define CONFIG_SW_MAILBOX_EN //#define NEW_BT_COEX #endif #endif // #ifdef CONFIG_MP_INCLUDED #ifdef CONFIG_BT_COEXIST #undef NOT_SUPPORT_BT #define CONFIG_BT_MAILBOX #define CONFIG_BT_EFUSE //#define CONFIG_BT_TWO_ANTENNA #endif // for Debug message #define DBG 0 #if (CONFIG_PLATFORM_AMEBA_X == 1) #if (DBG == 0) #define ROM_E_RTW_MSG 1 #define ROM_F_RTW_MSG 1 #if (CONFIG_INIC_EN == 0) && (PHYDM_LINUX_CODING_STYLE == 0) /* For DM debug*/ // BB #define DBG_RX_INFO 1 #define DBG_DM_DIG 1 // DebugComponents: bit0 #define DBG_DM_RA_MASK 1 // DebugComponents: bit1 #define DBG_DM_ANT_DIV 1 // DebugComponents: bit6 #define DBG_TX_RATE 1 // DebugComponents: bit9 #define DBG_DM_RA 1 // DebugComponents: bit9 #define DBG_DM_ADAPTIVITY 1 // DebugComponents: bit17 // RF #define DBG_PWR_TRACKING 1 // DebugComponents: bit24 #define DBG_RF_IQK 1 // DebugComponents: bit26 // Common #define DBG_PWR_INDEX 1 // DebugComponents: bit30 #endif #endif #endif #if (CONFIG_PLATFORM_AMEBA_X == 1) #if defined(CONFIG_PLATFORM_8195A) #undef CONFIG_RTL8195A #define CONFIG_RTL8195A #endif #if defined(CONFIG_PLATFORM_8711B) #ifndef CONFIG_RTL8711B #define CONFIG_RTL8711B #endif #undef CONFIG_ADAPTOR_INFO_CACHING_FLASH #define CONFIG_ADAPTOR_INFO_CACHING_FLASH 0 //#undef CONFIG_EAP //#undef CONFIG_IPS #define CONFIG_8710B_MOVE_TO_ROM #define CONFIG_EFUSE_SEPARATE #define CONFIG_MOVE_PSK_TO_ROM #define CONFIG_WOWLAN #define CONFIG_TRAFFIC_PROTECT #define CONFIG_FABVERSION_UMC 1 #if (CONFIG_INIC_EN == 1) #undef CONFIG_PROMISC #undef CONFIG_WPS #undef CONFIG_AP_MODE #undef CONFIG_NATIVEAP_MLME #undef CONFIG_INTERRUPT_BASED_TXBCN #undef CONFIG_INTERRUPT_BASED_TXBCN_BCN_OK_ERR #undef USE_DEDICATED_BCN_TX //#undef SUPPORT_SCAN_BUF #undef CONFIG_CONCURRENT_MODE #undef CONFIG_AUTO_RECONNECT #endif #endif #if defined(CONFIG_PLATFORM_8721D) #define CONFIG_EMPTY_EFUSE_PG_ENABLE #ifndef CONFIG_RTL8721D #define CONFIG_RTL8721D #endif #undef NOT_SUPPORT_5G #undef CONFIG_ADAPTOR_INFO_CACHING_FLASH #define CONFIG_ADAPTOR_INFO_CACHING_FLASH 0 #define CONFIG_EFUSE_SEPARATE #define CONFIG_WOWLAN //#define CONFIG_TRAFFIC_PROTECT #define SUPPORT_5G_CHANNEL 1 #define CONFIG_DFS #define CONFIG_XMIT_ACK #ifdef CONFIG_DFS #define CONFIG_DFS_ACTION #endif #define DBG_DM_DIG 0 // DebugComponents: bit0 //#define CONFIG_SUPPORT_DYNAMIC_TXPWR //rtw_phydm_fill_desc_dpt todo #if (CONFIG_INIC_EN == 1) #undef CONFIG_PROMISC #undef CONFIG_WPS #undef CONFIG_AP_MODE #undef CONFIG_NATIVEAP_MLME #undef CONFIG_INTERRUPT_BASED_TXBCN #undef CONFIG_INTERRUPT_BASED_TXBCN_BCN_OK_ERR #undef USE_DEDICATED_BCN_TX //#undef SUPPORT_SCAN_BUF #undef CONFIG_CONCURRENT_MODE #undef CONFIG_AUTO_RECONNECT #endif #endif #if defined(CONFIG_PLATFORM_8195BHP) #define CONFIG_RTL8195B #undef CONFIG_EAP // #undef CONFIG_ADAPTOR_INFO_CACHING_FLASH // #define CONFIG_ADAPTOR_INFO_CACHING_FLASH 0 #undef CHECK_FLASH_VALID_MASK #define CHECK_FLASH_VALID_MASK 0 #undef CHECK_EFUSE_VALID_MASK #define CHECK_EFUSE_VALID_MASK 0 #undef CONFIG_RW_PHYSICAL_EFUSE #define CONFIG_RW_PHYSICAL_EFUSE 1 // efuse_get realraw #undef NOT_SUPPORT_5G #undef NOT_SUPPORT_VHT // #undef NOT_SUPPORT_40M // #undef NOT_SUPPORT_80M #undef DBG #define DBG 1 #ifdef CONFIG_POWER_SAVING #define CONFIG_LPS_LCLK #ifdef CONFIG_LPS_LCLK #define CONFIG_DETECT_CPWM_BY_POLLING #define LPS_RPWM_WAIT_MS 300 #endif #define CONFIG_LPS_PG #endif #define CONFIG_80211AC_VHT #undef CONFIG_IPS // #define CONFIG_NO_FW #define CONFIG_EX_FW_BIN #define CONFIG_WOWLAN #define CONFIG_WOWLAN_HW_CAM #define CONFIG_WOWLAN_CUSTOM_PATTERN #define LOAD_FW_HEADER_FROM_DRIVER #define FW_IQK // #define RTW_IQK_FW_OFFLOAD #define CONFIG_PHY_CAPABILITY_QUERY #define CONFIG_ISR_THREAD_MODE_INTERRUPT /* Wlan IRQ Interrupt Mode*/ // #define CONFIG_WLAN_RF_CNTL #define SUPPORT_5G_CHANNEL 1 #define CONFIG_DFS #ifdef CONFIG_DFS #define CONFIG_DFS_ACTION #endif #define DBG_DM_DIG 0 // DebugComponents: bit0 // #define CONFIG_DEBUG #define RTW_HALMAC /* Use HALMAC architecture */ #define RTW_HALMAC_MU_BF 0 #define RTW_HALMAC_SU_BF 0 #define RTW_HALMAC_BT_COEX 0 #define RTW_HALMAC_DUMP_INFO 0 #define RTW_HALMAC_TXBF 0 #define RTW_HALMAC_FW_OFFLOAD 0 #define RTW_HALMAC_PHYSICAL_EFUSE 0 #define RTW_HALMAC_SIZE_OPTIMIZATION 1 #define RTW_HALMAC_SDIO_CIA_READ 0 #define RTW_HALMAC_LTE_COEX 0 #define CONFIG_MAC_LOOPBACK_DRIVER_RTL8195B 0 #endif #if defined(CONFIG_PLATFORM_8710C) #ifndef CONFIG_RTL8710C #define CONFIG_RTL8710C #endif #undef CONFIG_EAP #undef CONFIG_ADAPTOR_INFO_CACHING_FLASH #define CONFIG_ADAPTOR_INFO_CACHING_FLASH 0 #define NOT_SUPPORT_5G #define NOT_SUPPORT_VHT #undef NOT_SUPPORT_40M #define NOT_SUPPORT_80M #undef CONFIG_BW_80 #undef CONFIG_80211AC_VHT #undef RTK_AC_SUPPORT #define LOAD_FW_HEADER_FROM_DRIVER //#define CONFIG_EFUSE_SEPARATE //#define CONFIG_WOWLAN #define CONFIG_TRAFFIC_PROTECT #define DBG_DM_DIG 0 // DebugComponents: bit0 // #define CONFIG_DEBUG //#define RTW_HALMAC /* Use HALMAC architecture */ //#define RTW_HALMAC_MU_BF 0 //#define RTW_HALMAC_SU_BF 0 //#define RTW_HALMAC_BT_COEX 0 //#define RTW_HALMAC_DUMP_INFO 0 //#define RTW_HALMAC_TXBF 0 //#define RTW_HALMAC_FW_OFFLOAD 0 //#define RTW_HALMAC_PHYSICAL_EFUSE 0 //#define RTW_HALMAC_SIZE_OPTIMIZATION 1 //#define RTW_HALMAC_SDIO_CIA_READ 0 //#define RTW_HALMAC_LTE_COEX 0 //#define CONFIG_MAC_LOOPBACK_DRIVER_RTL8710C 1 // 1: HAL+MAC LOOPBACK, 2: HAL+MAC+BB LOOPBACK 3: DRV+HAL+MAC LOOPBACK #if defined(CONFIG_MAC_LOOPBACK_DRIVER_RTL8710C) && (CONFIG_MAC_LOOPBACK_DRIVER_RTL8710C == 3) #define CONFIG_MAC_LOOPBACK_DRIVER_AMEBA #endif #undef CONFIG_CONCURRENT_MODE #endif #elif defined(CONFIG_HARDWARE_8188F) #define CONFIG_RTL8188F #elif defined(CONFIG_HARDWARE_8192E) #define CONFIG_RTL8192E #elif defined(CONFIG_HARDWARE_8821C) #define CONFIG_RTL8821C #elif defined(CONFIG_HARDWARE_8723D) #define CONFIG_RTL8723D #elif defined(CONFIG_HARDWARE_8188E) #define CONFIG_RTL8188E #else #define CONFIG_RTL8188E #endif #define RTL8192E_SUPPORT 0 #define RTL8812A_SUPPORT 0 #define RTL8821A_SUPPORT 0 #define RTL8723B_SUPPORT 0 #define RTL8195A_SUPPORT 0 #define RTL8188E_SUPPORT 0 #define RTL8188F_SUPPORT 0 #define RTL8711B_SUPPORT 0 #define RTL8721D_SUPPORT 0 #define RTL8821C_SUPPORT 0 #define RTL8723D_SUPPORT 0 #define RTL8195B_SUPPORT 0 #if defined(CONFIG_PLATFORM_8195A) #undef RTL8195A_SUPPORT #define RTL8195A_SUPPORT 1 #elif defined(CONFIG_PLATFORM_8711B) #undef RTL8711B_SUPPORT #define RTL8711B_SUPPORT 1 #elif defined(CONFIG_PLATFORM_8721D) #undef RTL8721D_SUPPORT #define RTL8721D_SUPPORT 1 #elif defined(CONFIG_PLATFORM_8195BHP) #undef RTL8195B_SUPPORT #define RTL8195B_SUPPORT 1 #elif defined(CONFIG_PLATFORM_8710C) #undef RTL8710C_SUPPORT #define RTL8710C_SUPPORT 1 #elif defined(CONFIG_HARDWARE_8188F) #undef RTL8188F_SUPPORT #define RTL8188F_SUPPORT 1 #elif defined(CONFIG_HARDWARE_8192E) #undef RTL8192E_SUPPORT #define RTL8192E_SUPPORT 1 #elif defined(CONFIG_HARDWARE_8821C) #undef RTL8821C_SUPPORT #define RTL8821C_SUPPORT 1 #elif defined(CONFIG_HARDWARE_8723D) #undef RTL8723D_SUPPORT #define RTL8723D_SUPPORT 1 #elif defined(CONFIG_HARDWARE_8188E) #undef RTL8188E_SUPPORT #define RTL8188E_SUPPORT 1 #else #undef RTL8188E_SUPPORT #define RTL8188E_SUPPORT 1 #endif /* For DM support */ #if defined(CONFIG_RTL8188F) #define RATE_ADAPTIVE_SUPPORT 0 #elif defined(CONFIG_RTL8821C) #define RATE_ADAPTIVE_SUPPORT 0 #elif defined(CONFIG_RTL8192E) #define RATE_ADAPTIVE_SUPPORT 0 #elif defined(CONFIG_RTL8723D) #define RATE_ADAPTIVE_SUPPORT 0 #elif defined(CONFIG_PLATFORM_8711B) #define RATE_ADAPTIVE_SUPPORT 0 #define CONFIG_ODM_REFRESH_RAMASK #elif defined(CONFIG_PLATFORM_8721D) #define RATE_ADAPTIVE_SUPPORT 0 //#define CONFIG_ODM_REFRESH_RAMASK #elif defined(CONFIG_PLATFORM_8710C) #define RATE_ADAPTIVE_SUPPORT 0 //#define CONFIG_ODM_REFRESH_RAMASK #else #define RATE_ADAPTIVE_SUPPORT 1 #endif // adaptivity #define RTW_ADAPTIVITY_EN_DISABLE 0 #define RTW_ADAPTIVITY_EN_ENABLE 1 #define CONFIG_RTW_ADAPTIVITY_EN RTW_ADAPTIVITY_EN_DISABLE #define RTW_ADAPTIVITY_MODE_NORMAL 0 #define RTW_ADAPTIVITY_MODE_CARRIER_SENSE 1 #define CONFIG_RTW_ADAPTIVITY_MODE RTW_ADAPTIVITY_MODE_CARRIER_SENSE #define CONFIG_RTW_ADAPTIVITY_DML 0 #if (CONFIG_PLATFORM_AMEBA_X == 1) #define CONFIG_POWER_TRAINING_WIL 0 // in RA #else #define POWER_BY_RATE_SUPPORT 0 #endif #if (CONFIG_PLATFORM_AMEBA_X == 1) #define RTL8195A_FOR_TEST_CHIP 0 //#define CONFIG_WIFI_TEST 1 //#define CONFIG_MAC_LOOPBACK_DRIVER 1 //#define CONFIG_WLAN_HAL_TEST 1 //#define SKB_PRE_ALLOCATE_TX 1 #ifdef CONFIG_HIGH_TP_TEST #define SKB_PRE_ALLOCATE_RX 1 #else #define SKB_PRE_ALLOCATE_RX 0 #endif #if (!defined(CONFIG_PLATFORM_8721D)) #define TX_CHECK_DSEC_ALWAYS 1 #endif #define CONFIG_DBG_DISABLE_RDU_INTERRUPT //#define CONFIG_WLAN_HAL_RX_TASK #if (SKB_PRE_ALLOCATE_RX == 1) #ifdef CONFIG_HIGH_TP_TEST #define EXCHANGE_LXBUS_RX_SKB 1 #else #define EXCHANGE_LXBUS_RX_SKB 0 #endif #endif #if (defined(CONFIG_FPGA) && !defined(CONFIG_PLATFORM_8710C)) || (defined(CONFIG_PLATFORM_8710C) && defined(CONFIG_MAC_LOOPBACK_DRIVER_RTL8710C) && (CONFIG_MAC_LOOPBACK_DRIVER_RTL8710C == 1)) //Enable mac loopback for test mode (Ameba) #ifdef CONFIG_WIFI_NORMAL #define CONFIG_TWO_MAC_DRIVER // for test mode #else //CONFIG_WIFI_VERIFY #define ENABLE_MAC_LB_FOR_TEST_MODE #endif #define AP_PSK_SUPPORT_TKIP #endif #ifdef ENABLE_MAC_LB_FOR_TEST_MODE #define CONFIG_SUDO_PHY_SETTING #define INT_HANDLE_IN_ISR 1 #ifdef CONFIG_LWIP_LAYER #undef CONFIG_LWIP_LAYER #define CONFIG_LWIP_LAYER 0 #else #define CONFIG_LWIP_LAYER 0 #endif #define CONFIG_WLAN_HAL_TEST #define CONFIG_WLAN_HAL_RX_TASK #define CONFIG_MAC_LOOPBACK_DRIVER_AMEBA 1 #define HAL_MAC_ENABLE 1 #if !defined(CONFIG_PLATFORM_8710C) #define CONFIG_TWO_MAC_TEST_MODE #endif #if defined(CONFIG_MAC_LOOPBACK_DRIVER_RTL8710C) && (CONFIG_MAC_LOOPBACK_DRIVER_RTL8710C == 2) // Enable BB loopback test #define HAL_BB_ENABLE 1 #define HAL_RF_ENABLE 1 #define DISABLE_BB_RF 0 #else #define DISABLE_BB_RF 1 #endif #else //#define CONFIG_TWO_MAC_DRIVER //for mornal driver; two mac #if defined(CONFIG_TWO_MAC_DRIVER) || defined(CONFIG_MAC_LOOPBACK_DRIVER_AMEBA) #define CONFIG_SUDO_PHY_SETTING #define HAL_MAC_ENABLE 1 #define DISABLE_BB_RF 1 #else #define HAL_MAC_ENABLE 1 #define HAL_BB_ENABLE 1 #define HAL_RF_ENABLE 1 #define DISABLE_BB_RF 0 #endif //#define INT_HANDLE_IN_ISR 1 #endif #endif // CONFIG_PLATFORM_AMEBA_X #ifndef CONFIG_LWIP_LAYER #define CONFIG_LWIP_LAYER 1 #endif #define CONFIG_MAC_ADDRESS 0 //fast reconnection //#define CONFIG_FAST_RECONNECTION 1 #if defined(CONFIG_INIC_EN) && (CONFIG_INIC_EN == 1) #define CONFIG_RECV_REORDERING_CTRL //enable reordering for iNIC high throughput #undef RX_AGGREGATION #define RX_AGGREGATION 1 #undef NOT_SUPPORT_40M #undef CONFIG_CONCURRENT_MODE #endif #if defined(CONFIG_HARDWARE_8821C) #define FW_IQK #define RTW_HALMAC #define LOAD_FW_HEADER_FROM_DRIVER #define RTW_HALMAC_SIZE_OPTIMIZATION 1 //#define CONFIG_NO_FW #ifdef NOT_SUPPORT_5G #undef NOT_SUPPORT_5G #define SUPPORT_5G_CHANNEL 1 #endif #endif //#define CONFIG_ADDRESS_ALIGNMENT #ifdef CONFIG_ADDRESS_ALIGNMENT #define ALIGNMENT_SIZE 32 #endif #define CONFIG_DFS //#define CONFIG_EMPTY_EFUSE_PG_ENABLE #define WLAN_WRAPPER_VERSION 1 #define TIME_THRES 20 #endif //CONFIG_PLATFORM_AMEBA_X == 1 #endif //WLANCONFIG_H
2039b19f705a59c44715c254ff782cdf719e3ea4
f367e4b66a1ee42e85830b31df88f63723c36a47
/src/aws/flb_aws_credentials.c
850142e24f43356e1ce42c8ad6e5aed49a434742
[ "Apache-2.0" ]
permissive
fluent/fluent-bit
06873e441162b92941024e9a7e9e8fc934150bf7
1a41f49dc2f3ae31a780caa9ffd6137b1d703065
refs/heads/master
2023-09-05T13:44:55.347372
2023-09-05T10:14:33
2023-09-05T10:14:33
29,933,948
4,907
1,565
Apache-2.0
2023-09-14T10:17:02
2015-01-27T20:41:52
C
UTF-8
C
false
false
29,833
c
flb_aws_credentials.c
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* Fluent Bit * ========== * Copyright (C) 2015-2022 The Fluent Bit Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <fluent-bit/flb_info.h> #include <fluent-bit/flb_sds.h> #include <fluent-bit/flb_http_client.h> #include <fluent-bit/flb_aws_credentials.h> #include <fluent-bit/flb_aws_util.h> #include <fluent-bit/flb_jsmn.h> #include <fluent-bit/flb_output_plugin.h> #include <stdlib.h> #include <time.h> #define FIVE_MINUTES 300 #define TWELVE_HOURS 43200 /* Credentials Environment Variables */ #define AWS_ACCESS_KEY_ID "AWS_ACCESS_KEY_ID" #define AWS_SECRET_ACCESS_KEY "AWS_SECRET_ACCESS_KEY" #define AWS_SESSION_TOKEN "AWS_SESSION_TOKEN" #define EKS_POD_EXECUTION_ROLE "EKS_POD_EXECUTION_ROLE" /* declarations */ static struct flb_aws_provider *standard_chain_create(struct flb_config *config, struct flb_tls *tls, char *region, char *sts_endpoint, char *proxy, struct flb_aws_client_generator *generator, int eks_irsa, char *profile); /* * The standard credential provider chain: * 1. Environment variables * 2. Shared credentials file (AWS Profile) * 3. EKS OIDC * 4. EC2 IMDS * 5. ECS HTTP credentials endpoint * * This provider will evaluate each provider in order, returning the result * from the first provider that returns valid credentials. * * Note: Client code should use this provider by default. */ struct flb_aws_provider_chain { struct mk_list sub_providers; /* * The standard chain provider picks the first successful provider and * then uses it until a call to refresh is made. */ struct flb_aws_provider *sub_provider; }; /* * Iterates through the chain and returns credentials from the first provider * that successfully returns creds. Caches this provider on the implementation. */ struct flb_aws_credentials *get_from_chain(struct flb_aws_provider_chain *implementation) { struct flb_aws_provider *sub_provider = NULL; struct mk_list *tmp; struct mk_list *head; struct flb_aws_credentials *creds = NULL; /* find the first provider that produces a valid set of creds */ mk_list_foreach_safe(head, tmp, &implementation->sub_providers) { sub_provider = mk_list_entry(head, struct flb_aws_provider, _head); creds = sub_provider->provider_vtable->get_credentials(sub_provider); if (creds) { implementation->sub_provider = sub_provider; return creds; } } return NULL; } struct flb_aws_credentials *get_credentials_fn_standard_chain(struct flb_aws_provider *provider) { struct flb_aws_credentials *creds = NULL; struct flb_aws_provider_chain *implementation = provider->implementation; struct flb_aws_provider *sub_provider = implementation->sub_provider; if (sub_provider) { return sub_provider->provider_vtable->get_credentials(sub_provider); } if (try_lock_provider(provider)) { creds = get_from_chain(implementation); unlock_provider(provider); return creds; } /* * We failed to lock the provider and sub_provider is unset. This means that * another co-routine is selecting a provider from the chain. */ flb_warn("[aws_credentials] No cached credentials are available and " "a credential refresh is already in progress. The current " "co-routine will retry."); return NULL; } int init_fn_standard_chain(struct flb_aws_provider *provider) { struct flb_aws_provider_chain *implementation = provider->implementation; struct flb_aws_provider *sub_provider = NULL; struct mk_list *tmp; struct mk_list *head; int ret = -1; if (try_lock_provider(provider)) { /* find the first provider that indicates successful init */ mk_list_foreach_safe(head, tmp, &implementation->sub_providers) { sub_provider = mk_list_entry(head, struct flb_aws_provider, _head); ret = sub_provider->provider_vtable->init(sub_provider); if (ret >= 0) { implementation->sub_provider = sub_provider; break; } } unlock_provider(provider); } return ret; } /* * Client code should only call refresh if there has been an * error from the AWS APIs indicating creds are expired/invalid. * Refresh may change the current sub_provider. */ int refresh_fn_standard_chain(struct flb_aws_provider *provider) { struct flb_aws_provider_chain *implementation = provider->implementation; struct flb_aws_provider *sub_provider = NULL; struct mk_list *tmp; struct mk_list *head; int ret = -1; if (try_lock_provider(provider)) { /* find the first provider that indicates successful refresh */ mk_list_foreach_safe(head, tmp, &implementation->sub_providers) { sub_provider = mk_list_entry(head, struct flb_aws_provider, _head); ret = sub_provider->provider_vtable->refresh(sub_provider); if (ret >= 0) { implementation->sub_provider = sub_provider; break; } } unlock_provider(provider); } return ret; } void sync_fn_standard_chain(struct flb_aws_provider *provider) { struct flb_aws_provider_chain *implementation = provider->implementation; struct flb_aws_provider *sub_provider = NULL; struct mk_list *tmp; struct mk_list *head; /* set all providers to sync mode */ mk_list_foreach_safe(head, tmp, &implementation->sub_providers) { sub_provider = mk_list_entry(head, struct flb_aws_provider, _head); sub_provider->provider_vtable->sync(sub_provider); } } void async_fn_standard_chain(struct flb_aws_provider *provider) { struct flb_aws_provider_chain *implementation = provider->implementation; struct flb_aws_provider *sub_provider = NULL; struct mk_list *tmp; struct mk_list *head; /* set all providers to async mode */ mk_list_foreach_safe(head, tmp, &implementation->sub_providers) { sub_provider = mk_list_entry(head, struct flb_aws_provider, _head); sub_provider->provider_vtable->async(sub_provider); } } void upstream_set_fn_standard_chain(struct flb_aws_provider *provider, struct flb_output_instance *ins) { struct flb_aws_provider_chain *implementation = provider->implementation; struct flb_aws_provider *sub_provider = NULL; struct mk_list *tmp; struct mk_list *head; /* set all providers to async mode */ mk_list_foreach_safe(head, tmp, &implementation->sub_providers) { sub_provider = mk_list_entry(head, struct flb_aws_provider, _head); sub_provider->provider_vtable->upstream_set(sub_provider, ins); } } void destroy_fn_standard_chain(struct flb_aws_provider *provider) { struct flb_aws_provider *sub_provider; struct flb_aws_provider_chain *implementation; struct mk_list *tmp; struct mk_list *head; implementation = provider->implementation; if (implementation) { mk_list_foreach_safe(head, tmp, &implementation->sub_providers) { sub_provider = mk_list_entry(head, struct flb_aws_provider, _head); mk_list_del(&sub_provider->_head); flb_aws_provider_destroy(sub_provider); } flb_free(implementation); } } static struct flb_aws_provider_vtable standard_chain_provider_vtable = { .get_credentials = get_credentials_fn_standard_chain, .init = init_fn_standard_chain, .refresh = refresh_fn_standard_chain, .destroy = destroy_fn_standard_chain, .sync = sync_fn_standard_chain, .async = async_fn_standard_chain, .upstream_set = upstream_set_fn_standard_chain, }; struct flb_aws_provider *flb_standard_chain_provider_create(struct flb_config *config, struct flb_tls *tls, char *region, char *sts_endpoint, char *proxy, struct flb_aws_client_generator *generator, char *profile) { struct flb_aws_provider *provider; struct flb_aws_provider *tmp_provider; char *eks_pod_role = NULL; char *session_name; eks_pod_role = getenv(EKS_POD_EXECUTION_ROLE); if (eks_pod_role && strlen(eks_pod_role) > 0) { /* * eks fargate * standard chain will be base provider used to * assume the EKS_POD_EXECUTION_ROLE */ flb_debug("[aws_credentials] Using EKS_POD_EXECUTION_ROLE=%s", eks_pod_role); tmp_provider = standard_chain_create(config, tls, region, sts_endpoint, proxy, generator, FLB_FALSE, profile); if (!tmp_provider) { return NULL; } session_name = flb_sts_session_name(); if (!session_name) { flb_error("Failed to generate random STS session name"); flb_aws_provider_destroy(tmp_provider); return NULL; } provider = flb_sts_provider_create(config, tls, tmp_provider, NULL, eks_pod_role, session_name, region, sts_endpoint, NULL, generator); if (!provider) { flb_error("Failed to create EKS Fargate Credential Provider"); flb_aws_provider_destroy(tmp_provider); return NULL; } /* session name can freed after provider is created */ flb_free(session_name); session_name = NULL; return provider; } /* standard case- not in EKS Fargate */ provider = standard_chain_create(config, tls, region, sts_endpoint, proxy, generator, FLB_TRUE, profile); return provider; } struct flb_aws_provider *flb_managed_chain_provider_create(struct flb_output_instance *ins, struct flb_config *config, char *config_key_prefix, char *proxy, struct flb_aws_client_generator *generator) { flb_sds_t config_key_region; flb_sds_t config_key_sts_endpoint; flb_sds_t config_key_role_arn; flb_sds_t config_key_external_id; flb_sds_t config_key_profile; const char *region = NULL; const char *sts_endpoint = NULL; const char *role_arn = NULL; const char *external_id = NULL; const char *profile = NULL; char *session_name = NULL; int key_prefix_len; int key_max_len; /* Provider managed dependencies */ struct flb_aws_provider *aws_provider = NULL; struct flb_aws_provider *base_aws_provider = NULL; struct flb_tls *cred_tls = NULL; struct flb_tls *sts_tls = NULL; /* Config keys */ key_prefix_len = strlen(config_key_prefix); key_max_len = key_prefix_len + 12; /* max length of "region", "sts_endpoint", "role_arn", "external_id" */ /* Evaluate full config keys */ config_key_region = flb_sds_create_len(config_key_prefix, key_max_len); strcpy(config_key_region + key_prefix_len, "region"); config_key_sts_endpoint = flb_sds_create_len(config_key_prefix, key_max_len); strcpy(config_key_sts_endpoint + key_prefix_len, "sts_endpoint"); config_key_role_arn = flb_sds_create_len(config_key_prefix, key_max_len); strcpy(config_key_role_arn + key_prefix_len, "role_arn"); config_key_external_id = flb_sds_create_len(config_key_prefix, key_max_len); strcpy(config_key_external_id + key_prefix_len, "external_id"); config_key_profile = flb_sds_create_len(config_key_prefix, key_max_len); strcpy(config_key_profile + key_prefix_len, "profile"); /* AWS provider needs a separate TLS instance */ cred_tls = flb_tls_create(FLB_TLS_CLIENT_MODE, FLB_TRUE, ins->tls_debug, ins->tls_vhost, ins->tls_ca_path, ins->tls_ca_file, ins->tls_crt_file, ins->tls_key_file, ins->tls_key_passwd); if (!cred_tls) { flb_plg_error(ins, "Failed to create TLS instance for AWS Provider"); flb_errno(); goto error; } region = flb_output_get_property(config_key_region, ins); if (!region) { flb_plg_error(ins, "aws_auth enabled but %s not set", config_key_region); goto error; } /* Use null sts_endpoint if none provided */ sts_endpoint = flb_output_get_property(config_key_sts_endpoint, ins); /* Get the profile from configuration */ profile = flb_output_get_property(config_key_profile, ins); aws_provider = flb_standard_chain_provider_create(config, cred_tls, (char *) region, (char *) sts_endpoint, NULL, flb_aws_client_generator(), profile); if (!aws_provider) { flb_plg_error(ins, "Failed to create AWS Credential Provider"); goto error; } role_arn = flb_output_get_property(config_key_role_arn, ins); if (role_arn) { /* Use the STS Provider */ base_aws_provider = aws_provider; external_id = flb_output_get_property(config_key_external_id, ins); session_name = flb_sts_session_name(); if (!session_name) { flb_plg_error(ins, "Failed to generate aws iam role " "session name"); goto error; } /* STS provider needs yet another separate TLS instance */ sts_tls = flb_tls_create(FLB_TLS_CLIENT_MODE, FLB_TRUE, ins->tls_debug, ins->tls_vhost, ins->tls_ca_path, ins->tls_ca_file, ins->tls_crt_file, ins->tls_key_file, ins->tls_key_passwd); if (!sts_tls) { flb_plg_error(ins, "Failed to create TLS instance for AWS STS Credential " "Provider"); flb_errno(); goto error; } aws_provider = flb_sts_provider_create(config, sts_tls, base_aws_provider, (char *) external_id, (char *) role_arn, session_name, (char *) region, (char *) sts_endpoint, NULL, flb_aws_client_generator()); if (!aws_provider) { flb_plg_error(ins, "Failed to create AWS STS Credential " "Provider"); goto error; } } /* initialize credentials in sync mode */ aws_provider->provider_vtable->sync(aws_provider); aws_provider->provider_vtable->init(aws_provider); /* set back to async */ aws_provider->provider_vtable->async(aws_provider); /* store dependencies in aws_provider for managed cleanup */ aws_provider->base_aws_provider = base_aws_provider; aws_provider->cred_tls = cred_tls; aws_provider->sts_tls = sts_tls; goto cleanup; error: if (aws_provider) { /* disconnect dependencies */ aws_provider->base_aws_provider = NULL; aws_provider->cred_tls = NULL; aws_provider->sts_tls = NULL; /* destroy */ flb_aws_provider_destroy(aws_provider); } /* free dependencies */ if (base_aws_provider) { flb_aws_provider_destroy(base_aws_provider); } if (cred_tls) { flb_tls_destroy(cred_tls); } if (sts_tls) { flb_tls_destroy(sts_tls); } aws_provider = NULL; cleanup: if (config_key_region) { flb_sds_destroy(config_key_region); } if (config_key_sts_endpoint) { flb_sds_destroy(config_key_sts_endpoint); } if (config_key_role_arn) { flb_sds_destroy(config_key_role_arn); } if (config_key_external_id) { flb_sds_destroy(config_key_external_id); } if (session_name) { flb_free(session_name); } return aws_provider; } static struct flb_aws_provider *standard_chain_create(struct flb_config *config, struct flb_tls *tls, char *region, char *sts_endpoint, char *proxy, struct flb_aws_client_generator *generator, int eks_irsa, char *profile) { struct flb_aws_provider *sub_provider; struct flb_aws_provider *provider; struct flb_aws_provider_chain *implementation; provider = flb_calloc(1, sizeof(struct flb_aws_provider)); if (!provider) { flb_errno(); return NULL; } pthread_mutex_init(&provider->lock, NULL); implementation = flb_calloc(1, sizeof(struct flb_aws_provider_chain)); if (!implementation) { flb_errno(); flb_free(provider); return NULL; } provider->provider_vtable = &standard_chain_provider_vtable; provider->implementation = implementation; /* Create chain of providers */ mk_list_init(&implementation->sub_providers); sub_provider = flb_aws_env_provider_create(); if (!sub_provider) { /* Env provider will only fail creation if a memory alloc failed */ flb_aws_provider_destroy(provider); return NULL; } flb_debug("[aws_credentials] Initialized Env Provider in standard chain"); mk_list_add(&sub_provider->_head, &implementation->sub_providers); flb_debug("[aws_credentials] creating profile %s provider", profile); sub_provider = flb_profile_provider_create(profile); if (sub_provider) { /* Profile provider can fail if HOME env var is not set */; mk_list_add(&sub_provider->_head, &implementation->sub_providers); flb_debug("[aws_credentials] Initialized AWS Profile Provider in " "standard chain"); } if (eks_irsa == FLB_TRUE) { sub_provider = flb_eks_provider_create(config, tls, region, sts_endpoint, proxy, generator); if (sub_provider) { /* EKS provider can fail if we are not running in k8s */; mk_list_add(&sub_provider->_head, &implementation->sub_providers); flb_debug("[aws_credentials] Initialized EKS Provider in standard chain"); } } sub_provider = flb_ecs_provider_create(config, generator); if (sub_provider) { /* ECS Provider will fail creation if we are not running in ECS */ mk_list_add(&sub_provider->_head, &implementation->sub_providers); flb_debug("[aws_credentials] Initialized ECS Provider in standard chain"); } sub_provider = flb_ec2_provider_create(config, generator); if (!sub_provider) { /* EC2 provider will only fail creation if a memory alloc failed */ flb_aws_provider_destroy(provider); return NULL; } mk_list_add(&sub_provider->_head, &implementation->sub_providers); flb_debug("[aws_credentials] Initialized EC2 Provider in standard chain"); return provider; } /* Environment Provider */ struct flb_aws_credentials *get_credentials_fn_environment(struct flb_aws_provider *provider) { char *access_key = NULL; char *secret_key = NULL; char *session_token = NULL; struct flb_aws_credentials *creds = NULL; flb_debug("[aws_credentials] Requesting credentials from the " "env provider.."); access_key = getenv(AWS_ACCESS_KEY_ID); if (!access_key || strlen(access_key) <= 0) { return NULL; } secret_key = getenv(AWS_SECRET_ACCESS_KEY); if (!secret_key || strlen(secret_key) <= 0) { return NULL; } creds = flb_calloc(1, sizeof(struct flb_aws_credentials)); if (!creds) { flb_errno(); return NULL; } creds->access_key_id = flb_sds_create(access_key); if (!creds->access_key_id) { flb_aws_credentials_destroy(creds); flb_errno(); return NULL; } creds->secret_access_key = flb_sds_create(secret_key); if (!creds->secret_access_key) { flb_aws_credentials_destroy(creds); flb_errno(); return NULL; } session_token = getenv(AWS_SESSION_TOKEN); if (session_token && strlen(session_token) > 0) { creds->session_token = flb_sds_create(session_token); if (!creds->session_token) { flb_aws_credentials_destroy(creds); flb_errno(); return NULL; } } else { creds->session_token = NULL; } return creds; } int refresh_env(struct flb_aws_provider *provider) { char *access_key = NULL; char *secret_key = NULL; access_key = getenv(AWS_ACCESS_KEY_ID); if (!access_key || strlen(access_key) <= 0) { return -1; } secret_key = getenv(AWS_SECRET_ACCESS_KEY); if (!secret_key || strlen(secret_key) <= 0) { return -1; } return 0; } /* * For the env provider, refresh simply checks if the environment * variables are available. */ int refresh_fn_environment(struct flb_aws_provider *provider) { flb_debug("[aws_credentials] Refresh called on the env provider"); return refresh_env(provider); } int init_fn_environment(struct flb_aws_provider *provider) { flb_debug("[aws_credentials] Init called on the env provider"); return refresh_env(provider); } /* * sync and async are no-ops for the env provider because it does not make * network IO calls */ void sync_fn_environment(struct flb_aws_provider *provider) { return; } void async_fn_environment(struct flb_aws_provider *provider) { return; } void upstream_set_fn_environment(struct flb_aws_provider *provider, struct flb_output_instance *ins) { return; } /* Destroy is a no-op for the env provider */ void destroy_fn_environment(struct flb_aws_provider *provider) { return; } static struct flb_aws_provider_vtable environment_provider_vtable = { .get_credentials = get_credentials_fn_environment, .init = init_fn_environment, .refresh = refresh_fn_environment, .destroy = destroy_fn_environment, .sync = sync_fn_environment, .async = async_fn_environment, .upstream_set = upstream_set_fn_environment, }; struct flb_aws_provider *flb_aws_env_provider_create() { struct flb_aws_provider *provider = flb_calloc(1, sizeof( struct flb_aws_provider)); if (!provider) { flb_errno(); return NULL; } provider->provider_vtable = &environment_provider_vtable; provider->implementation = NULL; return provider; } void flb_aws_credentials_destroy(struct flb_aws_credentials *creds) { if (creds) { if (creds->access_key_id) { flb_sds_destroy(creds->access_key_id); } if (creds->secret_access_key) { flb_sds_destroy(creds->secret_access_key); } if (creds->session_token) { flb_sds_destroy(creds->session_token); } flb_free(creds); } } void flb_aws_provider_destroy(struct flb_aws_provider *provider) { if (provider) { if (provider->implementation) { provider->provider_vtable->destroy(provider); } pthread_mutex_destroy(&provider->lock); /* free managed dependencies */ if (provider->base_aws_provider) { flb_aws_provider_destroy(provider->base_aws_provider); } if (provider->cred_tls) { flb_tls_destroy(provider->cred_tls); } if (provider->sts_tls) { flb_tls_destroy(provider->sts_tls); } flb_free(provider); } } time_t timestamp_to_epoch(const char *timestamp) { struct tm tm = {0}; time_t seconds; int r; r = sscanf(timestamp, "%d-%d-%dT%d:%d:%dZ", &tm.tm_year, &tm.tm_mon, &tm.tm_mday, &tm.tm_hour, &tm.tm_min, &tm.tm_sec); if (r != 6) { return -1; } tm.tm_year -= 1900; tm.tm_mon -= 1; tm.tm_isdst = -1; seconds = timegm(&tm); if (seconds < 0) { return -1; } return seconds; } time_t flb_aws_cred_expiration(const char *timestamp) { time_t now; time_t expiration = timestamp_to_epoch(timestamp); if (expiration < 0) { flb_warn("[aws_credentials] Could not parse expiration: %s", timestamp); return -1; } /* * Sanity check - expiration should be ~10 minutes to 12 hours in the future * (> 12 hours is impossible with the current APIs and would likely indicate * a bug in how this code processes timestamps.) */ now = time(NULL); if (expiration < (now + FIVE_MINUTES)) { flb_warn("[aws_credentials] Credential expiration '%s' is less than " "5 minutes in the future.", timestamp); } if (expiration > (now + TWELVE_HOURS)) { flb_warn("[aws_credentials] Credential expiration '%s' is greater than " "12 hours in the future. This should not be possible.", timestamp); } return expiration; } /* * Fluent Bit is now multi-threaded and asynchonous with coros. * The trylock prevents deadlock, and protects the provider * when a cred refresh happens. The refresh frees and * sets the shared cred cache, a double free could occur * if two threads do it at the same exact time. */ /* Like a traditional try lock- it does not block if the lock is not obtained */ int try_lock_provider(struct flb_aws_provider *provider) { int ret = 0; ret = pthread_mutex_trylock(&provider->lock); if (ret != 0) { return FLB_FALSE; } return FLB_TRUE; } void unlock_provider(struct flb_aws_provider *provider) { pthread_mutex_unlock(&provider->lock); }
74b52b88082b14843592fc493cf3c5d0ffa5e1c2
8a87f5b889a9ce7d81421515f06d9c9cbf6ce64a
/3rdParty/boost/1.78.0/boost/predef/platform/mingw32.h
73e99e685ddb8335c814cf98b0ded5f3e7b9651b
[ "Apache-2.0", "BSD-3-Clause", "ICU", "Zlib", "GPL-1.0-or-later", "OpenSSL", "ISC", "LicenseRef-scancode-gutenberg-2020", "MIT", "GPL-2.0-only", "CC0-1.0", "BSL-1.0", "LicenseRef-scancode-autoconf-simple-exception", "LicenseRef-scancode-pcre", "Bison-exception-2.2", "LicenseRef-scancode-public-domain", "JSON", "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "Unlicense", "BSD-4-Clause", "Python-2.0", "LGPL-2.1-or-later" ]
permissive
arangodb/arangodb
0980625e76c56a2449d90dcb8d8f2c485e28a83b
43c40535cee37fc7349a21793dc33b1833735af5
refs/heads/devel
2023-08-31T09:34:47.451950
2023-08-31T07:25:02
2023-08-31T07:25:02
2,649,214
13,385
982
Apache-2.0
2023-09-14T17:02:16
2011-10-26T06:42:00
C++
UTF-8
C
false
false
1,869
h
mingw32.h
/* Copyright Rene Rivera 2008-2015 Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ #ifndef BOOST_PREDEF_PLAT_MINGW32_H #define BOOST_PREDEF_PLAT_MINGW32_H #include <boost/predef/version_number.h> #include <boost/predef/make.h> /* tag::reference[] = `BOOST_PLAT_MINGW32` http://www.mingw.org/[MinGW] platform. Version number available as major, minor, and patch. [options="header"] |=== | {predef_symbol} | {predef_version} | `+__MINGW32__+` | {predef_detection} | `+__MINGW32_VERSION_MAJOR+`, `+__MINGW32_VERSION_MINOR+` | V.R.0 |=== */ // end::reference[] #define BOOST_PLAT_MINGW32 BOOST_VERSION_NUMBER_NOT_AVAILABLE #if defined(__MINGW32__) # include <_mingw.h> # if !defined(BOOST_PLAT_MINGW32_DETECTION) && (defined(__MINGW32_VERSION_MAJOR) && defined(__MINGW32_VERSION_MINOR)) # define BOOST_PLAT_MINGW32_DETECTION \ BOOST_VERSION_NUMBER(__MINGW32_VERSION_MAJOR,__MINGW32_VERSION_MINOR,0) # endif # if !defined(BOOST_PLAT_MINGW32_DETECTION) # define BOOST_PLAT_MINGW32_DETECTION BOOST_VERSION_NUMBER_AVAILABLE # endif #endif #ifdef BOOST_PLAT_MINGW32_DETECTION # define BOOST_PLAT_MINGW32_AVAILABLE # if defined(BOOST_PREDEF_DETAIL_PLAT_DETECTED) # define BOOST_PLAT_MINGW32_EMULATED BOOST_PLAT_MINGW32_DETECTION # else # undef BOOST_PLAT_MINGW32 # define BOOST_PLAT_MINGW32 BOOST_PLAT_MINGW32_DETECTION # endif # include <boost/predef/detail/platform_detected.h> #endif #define BOOST_PLAT_MINGW32_NAME "MinGW" #endif #include <boost/predef/detail/test.h> BOOST_PREDEF_DECLARE_TEST(BOOST_PLAT_MINGW32,BOOST_PLAT_MINGW32_NAME) #ifdef BOOST_PLAT_MINGW32_EMULATED #include <boost/predef/detail/test.h> BOOST_PREDEF_DECLARE_TEST(BOOST_PLAT_MINGW32_EMULATED,BOOST_PLAT_MINGW32_NAME) #endif
5a967815c11d5a290d00961b3d182955bfd672d9
376e1818d427b5e4d32fa6dd6c7b71e9fd88afdb
/lang/ruby30-base/patches/patch-include_ruby_internal_static__assert.h
0e93ffd892916b1ad0c5ba179533db45ede4fe3f
[]
no_license
NetBSD/pkgsrc
a0732c023519650ef821ab89c23ab6ab59e25bdb
d042034ec4896cc5b47ed6f2e5b8802d9bc5c556
refs/heads/trunk
2023-09-01T07:40:12.138283
2023-09-01T05:25:19
2023-09-01T05:25:19
88,439,572
321
138
null
2023-07-12T22:34:14
2017-04-16T20:04:15
null
UTF-8
C
false
false
598
h
patch-include_ruby_internal_static__assert.h
$NetBSD: patch-include_ruby_internal_static__assert.h,v 1.1 2022/05/04 16:44:53 taca Exp $ Add the way to stop using static_assert. --- include/ruby/internal/static_assert.h.orig 2022-04-12 11:48:55.000000000 +0000 +++ include/ruby/internal/static_assert.h @@ -50,7 +50,7 @@ #elif defined(__STDC_VERSION__) && RBIMPL_COMPILER_SINCE(GCC, 4, 6, 0) # define RBIMPL_STATIC_ASSERT0 __extension__ _Static_assert -#elif defined(static_assert) +#elif defined(static_assert) && !defined(RB_AVOID_STATIC_ASSERT) # /* Take <assert.h> definition */ # define RBIMPL_STATIC_ASSERT0 static_assert #endif
83c4eecc2bce6b865a82b1cdfcb6b0755801d18c
1efb2283837c9b70bc6449cec877799e4efa3268
/src/pm/hydra/lib/tools/bootstrap/external/pbs.h
6d07f82d3919d7220a218ddd3bd64841741203c6
[ "mpich2" ]
permissive
pmodels/mpich
d2392e8e30536cad3e500c16aa1e71211101d83f
2d265f9f5f93ebdd07ad547423bc6212868262a4
refs/heads/main
2023-09-04T05:50:15.041823
2023-09-01T23:07:33
2023-09-01T23:07:33
70,918,679
506
313
NOASSERTION
2023-09-14T14:38:36
2016-10-14T14:39:42
C
UTF-8
C
false
false
953
h
pbs.h
/* * Copyright (C) by Argonne National Laboratory * See COPYRIGHT in top-level directory */ #ifndef PBS_H_INCLUDED #define PBS_H_INCLUDED #include "hydra.h" #if defined(HAVE_TM_H) #include "tm.h" struct HYDT_bscd_pbs_sys_s { int spawn_count; tm_event_t *spawn_events; tm_task_id *task_id; /* Array of TM task(process) IDs */ }; extern struct HYDT_bscd_pbs_sys_s *HYDT_bscd_pbs_sys; HYD_status HYDT_bscd_pbs_launch_procs(char **args, struct HYD_proxy *proxy_list, int num_hosts, int use_rmk, int *control_fd); HYD_status HYDT_bscd_pbs_query_env_inherit(const char *env_name, int *ret); HYD_status HYDT_bscd_pbs_wait_for_completion(int timeout); HYD_status HYDT_bscd_pbs_launcher_finalize(void); #endif /* if defined(HAVE_TM_H) */ HYD_status HYDT_bscd_pbs_query_native_int(int *ret); HYD_status HYDT_bscd_pbs_query_node_list(struct HYD_node **node_list); #endif /* PBS_H_INCLUDED */
dba18baff661a3cb3c518fdfda5fd7c56ae8095f
de21f9075f55640514c29ef0f1fe3f0690845764
/regression/ansi-c/array_initialization2/main.c
47e266d2bed3ba2f19031b734b2007084acb5b34
[ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "BSD-4-Clause" ]
permissive
diffblue/cbmc
975a074ac445febb3b5715f8792beb545522dc18
decd2839c2f51a54b2ad0f3e89fdc1b4bf78cd16
refs/heads/develop
2023-08-31T05:52:05.342195
2023-08-30T13:31:51
2023-08-30T13:31:51
51,877,056
589
309
NOASSERTION
2023-09-14T18:49:17
2016-02-16T23:03:52
C++
UTF-8
C
false
false
296
c
main.c
#include <assert.h> int a[(int)(10./1.)]; int b[(int)2] = { 10, 20 }; int c[(int)(10./1.)] = { 10, 20 }; int d[(int)(10/1)] = { 10, 20 }; int main (void) { if (a[0]) { assert(b[1] + c[2] > 20); } return 1; } extern int g_array[]; int array[(int)(10./1)]; int array2[(int)(10./1)];
20551da9d32ec9d38b219095a955b91e07c33fb3
72f6d3ad72b2a4a9b6c5f93c5d1b744e2940b884
/lib/common/http3client.c
d6264edf74d606f181f3c02dd2e9d8851abbb9b3
[ "MIT" ]
permissive
h2o/h2o
70012b6527ceb54e9e2819c9c75242b18e381485
b165770ce704c782ddee7428ea4a0b23c8bb7894
refs/heads/master
2023-08-16T13:16:35.018003
2023-08-16T03:56:28
2023-08-16T03:56:28
23,029,617
9,377
983
MIT
2023-09-12T04:49:14
2014-08-16T23:59:03
C
UTF-8
C
false
false
35,951
c
http3client.c
/* * Copyright (c) 2018 Fastly, Kazuho Oku * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <assert.h> #include <errno.h> #include <stdlib.h> #include <sys/types.h> #include "quicly.h" #include "h2o/hostinfo.h" #include "h2o/httpclient.h" #include "h2o/http2_common.h" #include "h2o/http3_common.h" #include "h2o/http3_internal.h" #include "../probes_.h" /** * internal error code used for signalling EOS */ #define ERROR_EOS H2O_HTTP3_ERROR_USER1 /** * Maxmium amount of unsent bytes to be buffered when acting as a tunnel. */ #define TUNNEL_MAX_UNSENT 16384 struct st_h2o_http3client_req_t { /** * superclass */ h2o_httpclient_t super; /** * pointer to the connection */ struct st_h2o_httpclient__h3_conn_t *conn; /** * is NULL until connection is established */ quicly_stream_t *quic; /** * currently only used for pending_requests */ h2o_linklist_t link; /** * */ uint64_t bytes_left_in_data_frame; /** * */ h2o_buffer_t *sendbuf; /** * */ struct { /** * HTTP-level buffer that contains (part of) response body received. Is the variable registered as `h2o_httpclient::buf`. */ h2o_buffer_t *body; /** * QUIC stream-level buffer that contains bytes that have not yet been processed at the HTTP/3 framing decoding level. This * buffer may have gaps. The beginning offset of `partial_frame` is equal to `recvstate.data_off`. */ h2o_buffer_t *stream; /** * Retains the amount of stream-level data that was available in the previous call. This value is used to see if processing * of new stream data is necessary. */ size_t prev_bytes_available; } recvbuf; /** * called when new contigious data becomes available */ int (*handle_input)(struct st_h2o_http3client_req_t *req, const uint8_t **src, const uint8_t *src_end, int err, const char **err_desc); /** * `proceed_req` callback. The callback is invoked when all bytes in the send buffer is emitted for the first time. * `bytes_inflight` contains the number of bytes being transmitted, or SIZE_MAX if nothing is inflight. */ struct { h2o_httpclient_proceed_req_cb cb; size_t bytes_inflight; } proceed_req; /** * */ enum { H2O_HTTP3CLIENT_RESPONSE_STATE_HEAD, H2O_HTTP3CLIENT_RESPONSE_STATE_BODY, H2O_HTTP3CLIENT_RESPONSE_STATE_CLOSED } response_state; /** * callback used for forwarding CONNECT-UDP using H3_DATAGRAMS */ h2o_httpclient_forward_datagram_cb on_read_datagrams; /** * flags */ unsigned offered_datagram_flow_id : 1; }; static int handle_input_expect_data_frame(struct st_h2o_http3client_req_t *req, const uint8_t **src, const uint8_t *src_end, int err, const char **err_desc); static void start_request(struct st_h2o_http3client_req_t *req); static int do_write_req(h2o_httpclient_t *_client, h2o_iovec_t chunk, int is_end_stream); static size_t emit_data(struct st_h2o_http3client_req_t *req, h2o_iovec_t payload) { size_t nbytes; { /* emit header */ uint8_t buf[9], *p = buf; *p++ = H2O_HTTP3_FRAME_TYPE_DATA; p = quicly_encodev(p, payload.len); nbytes = p - buf; h2o_buffer_append(&req->sendbuf, buf, nbytes); } /* emit payload */ h2o_buffer_append(&req->sendbuf, payload.base, payload.len); nbytes += payload.len; return nbytes; } static void destroy_request(struct st_h2o_http3client_req_t *req) { assert(req->quic == NULL); h2o_buffer_dispose(&req->sendbuf); h2o_buffer_dispose(&req->recvbuf.body); h2o_buffer_dispose(&req->recvbuf.stream); if (h2o_timer_is_linked(&req->super._timeout)) h2o_timer_unlink(&req->super._timeout); if (h2o_linklist_is_linked(&req->link)) h2o_linklist_unlink(&req->link); free(req); } static void detach_stream(struct st_h2o_http3client_req_t *req) { req->quic->callbacks = &quicly_stream_noop_callbacks; req->quic->data = NULL; req->quic = NULL; } static void close_stream(struct st_h2o_http3client_req_t *req, int err) { /* TODO are we expected to send two error codes? */ if (!quicly_sendstate_transfer_complete(&req->quic->sendstate)) quicly_reset_stream(req->quic, err); if (!quicly_recvstate_transfer_complete(&req->quic->recvstate)) quicly_request_stop(req->quic, err); detach_stream(req); } static void write_datagrams(h2o_httpclient_t *_client, h2o_iovec_t *datagrams, size_t num_datagrams) { struct st_h2o_http3client_req_t *req = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3client_req_t, super, _client); h2o_http3_send_h3_datagrams(&req->conn->super, req->quic->stream_id, datagrams, num_datagrams); } static struct st_h2o_httpclient__h3_conn_t *find_connection(h2o_httpclient_connection_pool_t *pool, h2o_url_t *origin) { int should_check_target = h2o_socketpool_is_global(pool->socketpool); /* FIXME: * - check connection state(e.g., max_concurrent_streams, if received GOAWAY) * - use hashmap */ for (h2o_linklist_t *l = pool->http3.conns.next; l != &pool->http3.conns; l = l->next) { struct st_h2o_httpclient__h3_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_httpclient__h3_conn_t, link, l); if (should_check_target && !(conn->server.origin_url.scheme == origin->scheme && h2o_memis(conn->server.origin_url.authority.base, conn->server.origin_url.authority.len, origin->authority.base, origin->authority.len))) continue; return conn; } return NULL; } static void start_pending_requests(struct st_h2o_httpclient__h3_conn_t *conn) { while (!h2o_linklist_is_empty(&conn->pending_requests)) { struct st_h2o_http3client_req_t *req = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3client_req_t, link, conn->pending_requests.next); h2o_linklist_unlink(&req->link); start_request(req); } } static void call_proceed_req(struct st_h2o_http3client_req_t *req, const char *errstr) { req->proceed_req.bytes_inflight = SIZE_MAX; req->proceed_req.cb(&req->super, errstr); } static void destroy_connection(struct st_h2o_httpclient__h3_conn_t *conn, const char *errstr) { assert(errstr != NULL); if (h2o_linklist_is_linked(&conn->link)) h2o_linklist_unlink(&conn->link); while (!h2o_linklist_is_empty(&conn->pending_requests)) { struct st_h2o_http3client_req_t *req = H2O_STRUCT_FROM_MEMBER(struct st_h2o_http3client_req_t, link, conn->pending_requests.next); h2o_linklist_unlink(&req->link); req->super._cb.on_connect(&req->super, errstr, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); destroy_request(req); } assert(h2o_linklist_is_empty(&conn->pending_requests)); if (conn->getaddr_req != NULL) h2o_hostinfo_getaddr_cancel(conn->getaddr_req); h2o_timer_unlink(&conn->timeout); free(conn->server.origin_url.authority.base); free(conn->server.origin_url.host.base); free(conn->handshake_properties.client.session_ticket.base); h2o_http3_dispose_conn(&conn->super); free(conn); } static void destroy_connection_on_transport_close(h2o_quic_conn_t *_conn) { struct st_h2o_httpclient__h3_conn_t *conn = (void *)_conn; /* When a connection gets closed while request is inflight, the most probable cause is some error in the transport (or at the * application protocol layer). But as we do not know the exact cause, we use a generic error here. */ destroy_connection(conn, h2o_httpclient_error_io); } static void on_connect_timeout(h2o_timer_t *timeout) { struct st_h2o_httpclient__h3_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_httpclient__h3_conn_t, timeout, timeout); destroy_connection(conn, h2o_httpclient_error_connect_timeout); } static void start_connect(struct st_h2o_httpclient__h3_conn_t *conn, struct sockaddr *sa) { quicly_conn_t *qconn; ptls_iovec_t address_token = ptls_iovec_init(NULL, 0); quicly_transport_parameters_t resumed_tp; int ret; assert(conn->super.super.quic == NULL); assert(conn->getaddr_req == NULL); assert(h2o_timer_is_linked(&conn->timeout)); assert(conn->timeout.cb == on_connect_timeout); /* create QUIC connection context and attach */ if (conn->ctx->http3->load_session != NULL) { if (!conn->ctx->http3->load_session(conn->ctx, sa, conn->server.origin_url.host.base, &address_token, &conn->handshake_properties.client.session_ticket, &resumed_tp)) goto Fail; } if ((ret = quicly_connect(&qconn, &conn->ctx->http3->quic, conn->server.origin_url.host.base, sa, NULL, &conn->ctx->http3->h3.next_cid, address_token, &conn->handshake_properties, conn->handshake_properties.client.session_ticket.base != NULL ? &resumed_tp : NULL, NULL)) != 0) { conn->super.super.quic = NULL; /* just in case */ goto Fail; } ++conn->ctx->http3->h3.next_cid.master_id; /* FIXME check overlap */ if ((ret = h2o_http3_setup(&conn->super, qconn)) != 0) goto Fail; if (quicly_connection_is_ready(conn->super.super.quic)) start_pending_requests(conn); h2o_quic_send(&conn->super.super); free(address_token.base); return; Fail: free(address_token.base); destroy_connection(conn, h2o_httpclient_error_internal); } static void on_getaddr(h2o_hostinfo_getaddr_req_t *getaddr_req, const char *errstr, struct addrinfo *res, void *_conn) { struct st_h2o_httpclient__h3_conn_t *conn = _conn; assert(getaddr_req == conn->getaddr_req); conn->getaddr_req = NULL; if (errstr != NULL) { destroy_connection(conn, errstr); return; } struct addrinfo *selected = h2o_hostinfo_select_one(res); start_connect(conn, selected->ai_addr); } static void handle_control_stream_frame(h2o_http3_conn_t *_conn, uint64_t type, const uint8_t *payload, size_t len) { struct st_h2o_httpclient__h3_conn_t *conn = (void *)_conn; int err; const char *err_desc = NULL; if (!h2o_http3_has_received_settings(&conn->super)) { if (type != H2O_HTTP3_FRAME_TYPE_SETTINGS) { err = H2O_HTTP3_ERROR_MISSING_SETTINGS; goto Fail; } if ((err = h2o_http3_handle_settings_frame(&conn->super, payload, len, &err_desc)) != 0) goto Fail; assert(h2o_http3_has_received_settings(&conn->super)); /* issue requests (unless it has been done already due to 0-RTT key being available) */ start_pending_requests(conn); } else { switch (type) { case H2O_HTTP3_FRAME_TYPE_SETTINGS: err = H2O_HTTP3_ERROR_FRAME_UNEXPECTED; err_desc = "unexpected SETTINGS frame"; goto Fail; case H2O_HTTP3_FRAME_TYPE_GOAWAY: { h2o_http3_goaway_frame_t frame; if ((err = h2o_http3_decode_goaway_frame(&frame, payload, len, &err_desc)) != 0) goto Fail; /* FIXME: stop issuing new requests */ break; } default: break; } } return; Fail: h2o_quic_close_connection(&conn->super.super, err, err_desc); } struct st_h2o_httpclient__h3_conn_t *create_connection(h2o_httpclient_ctx_t *ctx, h2o_httpclient_connection_pool_t *pool, h2o_url_t *origin) { /* FIXME When using a non-global socket pool, let the socket pool load balance H3 connections among the list of targets being * available. But until then, we use the first entry. */ if (!h2o_socketpool_is_global(pool->socketpool)) origin = &pool->socketpool->targets.entries[0]->url; static const h2o_http3_conn_callbacks_t callbacks = {{destroy_connection_on_transport_close}, handle_control_stream_frame}; static const h2o_http3_qpack_context_t qpack_ctx = {0 /* TODO */}; struct st_h2o_httpclient__h3_conn_t *conn = h2o_mem_alloc(sizeof(*conn)); h2o_http3_init_conn(&conn->super, &ctx->http3->h3, &callbacks, &qpack_ctx); memset((char *)conn + sizeof(conn->super), 0, sizeof(*conn) - sizeof(conn->super)); conn->ctx = ctx; h2o_url_copy(NULL, &conn->server.origin_url, origin); sprintf(conn->server.named_serv, "%" PRIu16, h2o_url_get_port(origin)); conn->handshake_properties.client.negotiated_protocols.list = h2o_http3_alpn; conn->handshake_properties.client.negotiated_protocols.count = sizeof(h2o_http3_alpn) / sizeof(h2o_http3_alpn[0]); h2o_linklist_insert(&pool->http3.conns, &conn->link); h2o_linklist_init_anchor(&conn->pending_requests); conn->getaddr_req = h2o_hostinfo_getaddr(conn->ctx->getaddr_receiver, conn->server.origin_url.host, h2o_iovec_init(conn->server.named_serv, strlen(conn->server.named_serv)), AF_UNSPEC, SOCK_DGRAM, IPPROTO_UDP, AI_ADDRCONFIG | AI_NUMERICSERV, on_getaddr, conn); h2o_timer_link(conn->ctx->loop, conn->ctx->connect_timeout, &conn->timeout); conn->timeout.cb = on_connect_timeout; return conn; } static void notify_response_error(struct st_h2o_http3client_req_t *req, const char *errstr) { assert(errstr != NULL); switch (req->response_state) { case H2O_HTTP3CLIENT_RESPONSE_STATE_HEAD: req->super._cb.on_head(&req->super, errstr, NULL); break; case H2O_HTTP3CLIENT_RESPONSE_STATE_BODY: req->super._cb.on_body(&req->super, errstr, NULL, 0); break; default: break; } req->response_state = H2O_HTTP3CLIENT_RESPONSE_STATE_CLOSED; } static int call_on_body(struct st_h2o_http3client_req_t *req, const char *errstr) { assert(req->response_state == H2O_HTTP3CLIENT_RESPONSE_STATE_BODY); int ret = req->super._cb.on_body(&req->super, errstr, NULL, 0); if (errstr != NULL) req->response_state = H2O_HTTP3CLIENT_RESPONSE_STATE_CLOSED; return ret; } static int handle_input_data_payload(struct st_h2o_http3client_req_t *req, const uint8_t **src, const uint8_t *src_end, int err, const char **err_desc) { /* save data, update states */ if (req->bytes_left_in_data_frame != 0) { size_t payload_bytes = req->bytes_left_in_data_frame; if (src_end - *src < payload_bytes) payload_bytes = src_end - *src; h2o_buffer_append(&req->recvbuf.body, *src, payload_bytes); *src += payload_bytes; req->bytes_left_in_data_frame -= payload_bytes; } if (req->bytes_left_in_data_frame == 0) req->handle_input = handle_input_expect_data_frame; /* call the handler */ const char *errstr = NULL; if (*src == src_end && err != 0) { /* FIXME also check content-length? see what other protocol handlers do */ errstr = err == ERROR_EOS && req->bytes_left_in_data_frame == 0 ? h2o_httpclient_error_is_eos : h2o_httpclient_error_io; } if (call_on_body(req, errstr) != 0) return H2O_HTTP3_ERROR_INTERNAL; return 0; } int handle_input_expect_data_frame(struct st_h2o_http3client_req_t *req, const uint8_t **src, const uint8_t *src_end, int err, const char **err_desc) { assert(req->bytes_left_in_data_frame == 0); if (*src == src_end) { /* return early if no input, no state change */ if (err == 0) return 0; /* either EOS or an unexpected close; delegate the task to the payload processing function */ } else { /* otherwise, read the frame */ h2o_http3_read_frame_t frame; int ret; if ((ret = h2o_http3_read_frame(&frame, 1, H2O_HTTP3_STREAM_TYPE_REQUEST, src, src_end, err_desc)) != 0) { /* incomplete */ if (ret == H2O_HTTP3_ERROR_INCOMPLETE && err == 0) return ret; call_on_body(req, h2o_httpclient_error_malformed_frame); return ret; } switch (frame.type) { case H2O_HTTP3_FRAME_TYPE_DATA: break; case H2O_HTTP3_FRAME_TYPE_HEADERS: if (req->super.upgrade_to == h2o_httpclient_upgrade_to_connect) return H2O_HTTP3_ERROR_FRAME_UNEXPECTED; /* flow continues */ default: /* FIXME handle push_promise, trailers */ return 0; } req->bytes_left_in_data_frame = frame.length; } /* unexpected close of DATA frame is handled by handle_input_data_payload. We rely on the function to detect if the DATA frame * is closed right after the frame header */ req->handle_input = handle_input_data_payload; return handle_input_data_payload(req, src, src_end, err, err_desc); } static int handle_input_expect_headers(struct st_h2o_http3client_req_t *req, const uint8_t **src, const uint8_t *src_end, int err, const char **err_desc) { h2o_http3_read_frame_t frame; int status; h2o_headers_t headers = {NULL}; h2o_iovec_t datagram_flow_id = {}; uint8_t header_ack[H2O_HPACK_ENCODE_INT_MAX_LENGTH]; size_t header_ack_len; int ret, frame_is_eos; /* read HEADERS frame */ if ((ret = h2o_http3_read_frame(&frame, 1, H2O_HTTP3_STREAM_TYPE_REQUEST, src, src_end, err_desc)) != 0) { if (ret == H2O_HTTP3_ERROR_INCOMPLETE) { if (err != 0) { notify_response_error(req, h2o_httpclient_error_io); return 0; } return ret; } notify_response_error(req, "response header too large"); return H2O_HTTP3_ERROR_EXCESSIVE_LOAD; /* FIXME correct code? */ } frame_is_eos = *src == src_end && err != 0; if (frame.type != H2O_HTTP3_FRAME_TYPE_HEADERS) { switch (frame.type) { case H2O_HTTP3_FRAME_TYPE_DATA: *err_desc = "received DATA frame before HEADERS"; return H2O_HTTP3_ERROR_FRAME_UNEXPECTED; default: return 0; } } if ((ret = h2o_qpack_parse_response(req->super.pool, req->conn->super.qpack.dec, req->quic->stream_id, &status, &headers, &datagram_flow_id, header_ack, &header_ack_len, frame.payload, frame.length, err_desc)) != 0) { if (ret == H2O_HTTP2_ERROR_INCOMPLETE) { /* the request is blocked by the QPACK stream */ req->handle_input = NULL; /* FIXME */ return 0; } if (*err_desc == NULL) *err_desc = "qpack error"; notify_response_error(req, *err_desc); return H2O_HTTP3_ERROR_GENERAL_PROTOCOL; /* FIXME */ } if (header_ack_len != 0) h2o_http3_send_qpack_header_ack(&req->conn->super, header_ack, header_ack_len); if (datagram_flow_id.base != NULL) { if (!req->offered_datagram_flow_id) { *err_desc = "no offered datagram-flow-id"; return H2O_HTTP3_ERROR_GENERAL_PROTOCOL; } /* TODO validate the returned value */ } /* handle 1xx */ if (100 <= status && status <= 199) { if (status == 101) { *err_desc = "unexpected 101"; notify_response_error(req, *err_desc); return H2O_HTTP3_ERROR_GENERAL_PROTOCOL; } if (frame_is_eos) { notify_response_error(req, h2o_httpclient_error_io); return 0; } if (req->super.informational_cb != NULL && req->super.informational_cb(&req->super, 0x300, status, h2o_iovec_init(NULL, 0), headers.entries, headers.size) != 0) { return H2O_HTTP3_ERROR_INTERNAL; } return 0; } /* handle final response, creating tunnel object if necessary */ h2o_httpclient_on_head_t on_head = {.version = 0x300, .msg = h2o_iovec_init(NULL, 0), .status = status, .headers = headers.entries, .num_headers = headers.size}; if (h2o_httpclient__tunnel_is_ready(&req->super, status) && datagram_flow_id.base != NULL) { on_head.forward_datagram.write_ = write_datagrams; on_head.forward_datagram.read_ = &req->on_read_datagrams; } req->super._cb.on_body = req->super._cb.on_head(&req->super, frame_is_eos ? h2o_httpclient_error_is_eos : NULL, &on_head); req->response_state = H2O_HTTP3CLIENT_RESPONSE_STATE_BODY; if (req->super._cb.on_body == NULL) return frame_is_eos ? 0 : H2O_HTTP3_ERROR_INTERNAL; /* handle body */ req->handle_input = handle_input_expect_data_frame; return 0; } static void on_stream_destroy(quicly_stream_t *qs, int err) { struct st_h2o_http3client_req_t *req; if ((req = qs->data) == NULL) return; notify_response_error(req, h2o_httpclient_error_io); detach_stream(req); destroy_request(req); } static void on_send_shift(quicly_stream_t *qs, size_t delta) { struct st_h2o_http3client_req_t *req = qs->data; assert(req != NULL); h2o_buffer_consume(&req->sendbuf, delta); } static void on_send_emit(quicly_stream_t *qs, size_t off, void *dst, size_t *len, int *wrote_all) { struct st_h2o_http3client_req_t *req = qs->data; if (*len >= req->sendbuf->size - off) { *len = req->sendbuf->size - off; *wrote_all = 1; } else { *wrote_all = 0; } memcpy(dst, req->sendbuf->bytes + off, *len); if (*wrote_all && req->proceed_req.bytes_inflight != SIZE_MAX) call_proceed_req(req, NULL); } static void on_send_stop(quicly_stream_t *qs, int err) { struct st_h2o_http3client_req_t *req; if ((req = qs->data) == NULL) return; if (!quicly_sendstate_transfer_complete(&req->quic->sendstate)) quicly_reset_stream(req->quic, err); if (req->proceed_req.bytes_inflight != SIZE_MAX) call_proceed_req(req, h2o_httpclient_error_io /* TODO better error code? */); if (!quicly_recvstate_transfer_complete(&req->quic->recvstate)) { quicly_request_stop(req->quic, H2O_HTTP3_ERROR_REQUEST_CANCELLED); notify_response_error(req, h2o_httpclient_error_io); } detach_stream(req); destroy_request(req); } static int on_receive_process_bytes(struct st_h2o_http3client_req_t *req, const uint8_t **src, const uint8_t *src_end, const char **err_desc) { int ret, is_eos = quicly_recvstate_transfer_complete(&req->quic->recvstate); assert(is_eos || *src != src_end); do { if ((ret = req->handle_input(req, src, src_end, is_eos ? ERROR_EOS : 0, err_desc)) != 0) { if (ret == H2O_HTTP3_ERROR_INCOMPLETE) ret = is_eos ? H2O_HTTP3_ERROR_FRAME : 0; break; } } while (*src != src_end); return ret; } static void on_receive(quicly_stream_t *qs, size_t off, const void *input, size_t len) { struct st_h2o_http3client_req_t *req = qs->data; size_t bytes_consumed; int err = 0; const char *err_desc = NULL; /* process the input, update stream-level receive buffer */ if (req->recvbuf.stream->size == 0 && off == 0) { /* fast path; process the input directly, save the remaining bytes */ const uint8_t *src = input; err = on_receive_process_bytes(req, &src, src + len, &err_desc); bytes_consumed = src - (const uint8_t *)input; if (bytes_consumed != len) h2o_buffer_append(&req->recvbuf.stream, src, len - bytes_consumed); } else { /* slow path; copy data to partial_frame */ size_t size_required = off + len; if (req->recvbuf.stream->size < size_required) { h2o_buffer_reserve(&req->recvbuf.stream, size_required - req->recvbuf.stream->size); req->recvbuf.stream->size = size_required; } memcpy(req->recvbuf.stream->bytes + off, input, len); /* just return if no new data is available */ size_t bytes_available = quicly_recvstate_bytes_available(&req->quic->recvstate); if (req->recvbuf.prev_bytes_available == bytes_available) return; /* process the bytes that have not been processed, update stream-level buffer */ const uint8_t *src = (const uint8_t *)req->recvbuf.stream->bytes; err = on_receive_process_bytes(req, &src, (const uint8_t *)req->recvbuf.stream->bytes + bytes_available, &err_desc); bytes_consumed = src - (const uint8_t *)req->recvbuf.stream->bytes; h2o_buffer_consume(&req->recvbuf.stream, bytes_consumed); } /* update QUIC stream-level state */ if (bytes_consumed != 0) quicly_stream_sync_recvbuf(req->quic, bytes_consumed); req->recvbuf.prev_bytes_available = quicly_recvstate_bytes_available(&req->quic->recvstate); /* cleanup */ if (quicly_recvstate_transfer_complete(&req->quic->recvstate)) { /* destroy the request if send-side is already closed, otherwise wait until the send-side gets closed */ if (quicly_sendstate_transfer_complete(&req->quic->sendstate)) { detach_stream(req); destroy_request(req); } } else if (err != 0) { notify_response_error(req, h2o_httpclient_error_io); int send_is_open = quicly_sendstate_is_open(&req->quic->sendstate); close_stream(req, err); /* immediately dispose of the request if possible, or wait for the send-side to close */ if (!send_is_open) { destroy_request(req); } else if (req->proceed_req.bytes_inflight != SIZE_MAX) { call_proceed_req(req, h2o_httpclient_error_io); destroy_request(req); } else { /* wait for write_req to be called */ } } } static void on_receive_reset(quicly_stream_t *qs, int err) { struct st_h2o_http3client_req_t *req = qs->data; notify_response_error(req, h2o_httpclient_error_io); close_stream(req, H2O_HTTP3_ERROR_REQUEST_CANCELLED); destroy_request(req); } void start_request(struct st_h2o_http3client_req_t *req) { h2o_iovec_t method; h2o_url_t url; const h2o_header_t *headers; size_t num_headers; h2o_iovec_t body; h2o_httpclient_properties_t props = {NULL}; char datagram_flow_id_buf[sizeof(H2O_UINT64_LONGEST_STR)]; int ret; assert(req->quic == NULL); assert(!h2o_linklist_is_linked(&req->link)); if ((req->super._cb.on_head = req->super._cb.on_connect(&req->super, NULL, &method, &url, &headers, &num_headers, &body, &req->proceed_req.cb, &props, &req->conn->server.origin_url)) == NULL) { destroy_request(req); return; } if ((ret = quicly_open_stream(req->conn->super.super.quic, &req->quic, 0)) != 0) { notify_response_error(req, "failed to open stream"); destroy_request(req); return; } req->quic->data = req; /* send request (TODO optimize) */ h2o_iovec_t datagram_flow_id = {}; if (req->super.upgrade_to == h2o_httpclient_upgrade_to_connect && h2o_memis(method.base, method.len, H2O_STRLIT("CONNECT-UDP")) && req->conn->super.peer_settings.h3_datagram) { datagram_flow_id.len = sprintf(datagram_flow_id_buf, "%" PRIu64, req->quic->stream_id); datagram_flow_id.base = datagram_flow_id_buf; req->offered_datagram_flow_id = 1; } h2o_iovec_t headers_frame = h2o_qpack_flatten_request(req->conn->super.qpack.enc, req->super.pool, req->quic->stream_id, NULL, method, url.scheme, url.authority, url.path, headers, num_headers, datagram_flow_id); h2o_buffer_append(&req->sendbuf, headers_frame.base, headers_frame.len); if (body.len != 0) emit_data(req, body); if (req->proceed_req.cb != NULL) { req->super.write_req = do_write_req; req->proceed_req.bytes_inflight = body.len; } if (req->proceed_req.cb == NULL && req->super.upgrade_to == NULL) quicly_sendstate_shutdown(&req->quic->sendstate, req->sendbuf->size); quicly_stream_sync_sendbuf(req->quic, 1); req->handle_input = handle_input_expect_headers; } static void cancel_request(h2o_httpclient_t *_client) { struct st_h2o_http3client_req_t *req = (void *)_client; if (req->quic != NULL) close_stream(req, H2O_HTTP3_ERROR_REQUEST_CANCELLED); destroy_request(req); } static void do_get_conn_properties(h2o_httpclient_t *_client, h2o_httpclient_conn_properties_t *properties) { struct st_h2o_http3client_req_t *req = (void *)_client; ptls_t *tls; ptls_cipher_suite_t *cipher; if (req->quic != NULL && (tls = quicly_get_tls(req->quic->conn), (cipher = ptls_get_cipher(tls)) != NULL)) { properties->ssl.protocol_version = "TLSv1.3"; properties->ssl.session_reused = ptls_is_psk_handshake(tls); properties->ssl.cipher = cipher->name; properties->ssl.cipher_bits = (int)cipher->aead->key_size; } else { properties->ssl.protocol_version = NULL; properties->ssl.session_reused = -1; properties->ssl.cipher = NULL; properties->ssl.cipher_bits = 0; } properties->sock = NULL; } static void do_update_window(h2o_httpclient_t *_client) { /* TODO Stop receiving data for the stream when `buf` grows to certain extent. Then, resume when this function is being called. */ } int do_write_req(h2o_httpclient_t *_client, h2o_iovec_t chunk, int is_end_stream) { struct st_h2o_http3client_req_t *req = (void *)_client; assert(req->proceed_req.bytes_inflight == SIZE_MAX); /* Notify error to the application, if the stream has already been closed (due to e.g., a stream error) or if the send-side has * been closed (due to STOP_SENDING). Also, destroy the request if the receive side has already been closed. */ if (req->quic == NULL || !quicly_sendstate_is_open(&req->quic->sendstate)) { if (req->quic != NULL && quicly_recvstate_transfer_complete(&req->quic->recvstate)) close_stream(req, H2O_HTTP3_ERROR_REQUEST_CANCELLED); if (req->quic == NULL) destroy_request(req); return 1; } emit_data(req, chunk); /* shutdown if we've written all request body */ if (is_end_stream) { assert(quicly_sendstate_is_open(&req->quic->sendstate)); quicly_sendstate_shutdown(&req->quic->sendstate, req->quic->sendstate.acked.ranges[0].end + req->sendbuf->size); } else { assert(chunk.len != 0); } req->proceed_req.bytes_inflight = chunk.len; quicly_stream_sync_sendbuf(req->quic, 1); h2o_quic_schedule_timer(&req->conn->super.super); return 0; } void h2o_httpclient__connect_h3(h2o_httpclient_t **_client, h2o_mem_pool_t *pool, void *data, h2o_httpclient_ctx_t *ctx, h2o_httpclient_connection_pool_t *connpool, h2o_url_t *target, const char *upgrade_to, h2o_httpclient_connect_cb cb) { struct st_h2o_httpclient__h3_conn_t *conn; struct st_h2o_http3client_req_t *req; assert(upgrade_to == NULL || upgrade_to == h2o_httpclient_upgrade_to_connect); if ((conn = find_connection(connpool, target)) == NULL) conn = create_connection(ctx, connpool, target); req = h2o_mem_alloc(sizeof(*req)); *req = (struct st_h2o_http3client_req_t){ .super = {pool, ctx, connpool, &req->recvbuf.body, data, NULL, {h2o_gettimeofday(ctx->loop)}, upgrade_to, {0}, {0}, cancel_request, do_get_conn_properties, do_update_window}, .conn = conn, .proceed_req = {.cb = NULL, .bytes_inflight = SIZE_MAX}, }; req->super._cb.on_connect = cb; h2o_buffer_init(&req->sendbuf, &h2o_socket_buffer_prototype); h2o_buffer_init(&req->recvbuf.body, &h2o_socket_buffer_prototype); h2o_buffer_init(&req->recvbuf.stream, &h2o_socket_buffer_prototype); if (_client != NULL) *_client = &req->super; if (h2o_http3_has_received_settings(&conn->super)) { start_request(req); h2o_quic_schedule_timer(&conn->super.super); } else { h2o_linklist_insert(&conn->pending_requests, &req->link); } } void h2o_httpclient_http3_notify_connection_update(h2o_quic_ctx_t *ctx, h2o_quic_conn_t *_conn) { struct st_h2o_httpclient__h3_conn_t *conn = (void *)_conn; if (h2o_timer_is_linked(&conn->timeout) && conn->timeout.cb == on_connect_timeout) { /* TODO check connection state? */ h2o_timer_unlink(&conn->timeout); } } static int stream_open_cb(quicly_stream_open_t *self, quicly_stream_t *qs) { if (quicly_stream_is_unidirectional(qs->stream_id)) { h2o_http3_on_create_unidirectional_stream(qs); } else { static const quicly_stream_callbacks_t callbacks = {on_stream_destroy, on_send_shift, on_send_emit, on_send_stop, on_receive, on_receive_reset}; assert(quicly_stream_is_client_initiated(qs->stream_id)); qs->callbacks = &callbacks; } return 0; } quicly_stream_open_t h2o_httpclient_http3_on_stream_open = {stream_open_cb}; static void on_receive_datagram_frame(quicly_receive_datagram_frame_t *self, quicly_conn_t *qc, ptls_iovec_t datagram) { struct st_h2o_httpclient__h3_conn_t *conn = H2O_STRUCT_FROM_MEMBER(struct st_h2o_httpclient__h3_conn_t, super, *quicly_get_data(qc)); uint64_t flow_id; h2o_iovec_t payload; quicly_stream_t *qs; /* decode, validate, get stream */ if ((flow_id = h2o_http3_decode_h3_datagram(&payload, datagram.base, datagram.len)) == UINT64_MAX || !(quicly_stream_is_client_initiated(flow_id) && !quicly_stream_is_unidirectional(flow_id))) { h2o_quic_close_connection(&conn->super.super, H2O_HTTP3_ERROR_GENERAL_PROTOCOL, "invalid DATAGRAM frame"); return; } if ((qs = quicly_get_stream(conn->super.super.quic, flow_id)) == NULL) return; struct st_h2o_http3client_req_t *req = qs->data; if (req->on_read_datagrams != NULL) req->on_read_datagrams(&req->super, &payload, 1); } quicly_receive_datagram_frame_t h2o_httpclient_http3_on_receive_datagram_frame = {on_receive_datagram_frame};
8f2b91e77aef3aa1da301b7168ec6051e630fa40
431a5c28b8dfcc7d6ca6f4f97bf370cd770547a7
/src/tmx/Asn_J2735/src/r63/PositionConfidence.c
691168dd9db557c743bd6c7fd35a6ec02f61a09f
[ "Apache-2.0" ]
permissive
usdot-fhwa-OPS/V2X-Hub
134061cfb55d8c83e871f7fd4bbfa5d8d3092eb0
aae33e6a16b8a30e1faee31a7ee863d191be06b8
refs/heads/develop
2023-08-26T10:10:59.989176
2023-08-24T14:58:21
2023-08-24T14:58:21
168,020,929
106
63
null
2023-09-11T20:24:45
2019-01-28T19:16:45
C
UTF-8
C
false
false
2,533
c
PositionConfidence.c
/* * Generated by asn1c-0.9.29 (http://lionet.info/asn1c) * From ASN.1 module "DSRC" * found in "J2735_201603_ASN_CC.asn" * `asn1c -gen-PER -fcompound-names -fincludes-quoted -fskeletons-copy` */ #include "PositionConfidence.h" /* * This type is implemented using NativeEnumerated, * so here we adjust the DEF accordingly. */ static asn_oer_constraints_t asn_OER_type_PositionConfidence_constr_1 CC_NOTUSED = { { 0, 0 }, -1}; asn_per_constraints_t asn_PER_type_PositionConfidence_constr_1 CC_NOTUSED = { { APC_CONSTRAINED, 4, 4, 0, 15 } /* (0..15) */, { APC_UNCONSTRAINED, -1, -1, 0, 0 }, 0, 0 /* No PER value map */ }; static const asn_INTEGER_enum_map_t asn_MAP_PositionConfidence_value2enum_1[] = { { 0, 11, "unavailable" }, { 1, 5, "a500m" }, { 2, 5, "a200m" }, { 3, 5, "a100m" }, { 4, 4, "a50m" }, { 5, 4, "a20m" }, { 6, 4, "a10m" }, { 7, 3, "a5m" }, { 8, 3, "a2m" }, { 9, 3, "a1m" }, { 10, 5, "a50cm" }, { 11, 5, "a20cm" }, { 12, 5, "a10cm" }, { 13, 4, "a5cm" }, { 14, 4, "a2cm" }, { 15, 4, "a1cm" } }; static const unsigned int asn_MAP_PositionConfidence_enum2value_1[] = { 3, /* a100m(3) */ 12, /* a10cm(12) */ 6, /* a10m(6) */ 15, /* a1cm(15) */ 9, /* a1m(9) */ 2, /* a200m(2) */ 11, /* a20cm(11) */ 5, /* a20m(5) */ 14, /* a2cm(14) */ 8, /* a2m(8) */ 1, /* a500m(1) */ 10, /* a50cm(10) */ 4, /* a50m(4) */ 13, /* a5cm(13) */ 7, /* a5m(7) */ 0 /* unavailable(0) */ }; const asn_INTEGER_specifics_t asn_SPC_PositionConfidence_specs_1 = { asn_MAP_PositionConfidence_value2enum_1, /* "tag" => N; sorted by tag */ asn_MAP_PositionConfidence_enum2value_1, /* N => "tag"; sorted by N */ 16, /* Number of elements in the maps */ 0, /* Enumeration is not extensible */ 1, /* Strict enumeration */ 0, /* Native long size */ 0 }; static const ber_tlv_tag_t asn_DEF_PositionConfidence_tags_1[] = { (ASN_TAG_CLASS_UNIVERSAL | (10 << 2)) }; asn_TYPE_descriptor_t asn_DEF_PositionConfidence = { "PositionConfidence", "PositionConfidence", &asn_OP_NativeEnumerated, asn_DEF_PositionConfidence_tags_1, sizeof(asn_DEF_PositionConfidence_tags_1) /sizeof(asn_DEF_PositionConfidence_tags_1[0]), /* 1 */ asn_DEF_PositionConfidence_tags_1, /* Same as above */ sizeof(asn_DEF_PositionConfidence_tags_1) /sizeof(asn_DEF_PositionConfidence_tags_1[0]), /* 1 */ { &asn_OER_type_PositionConfidence_constr_1, &asn_PER_type_PositionConfidence_constr_1, NativeEnumerated_constraint }, 0, 0, /* Defined elsewhere */ &asn_SPC_PositionConfidence_specs_1 /* Additional specs */ };
7af0197c6a26d1d022ffe4e69d704241d7e57f67
526b17ad25eec622b8f266034df3506ca1580ce6
/cups/ppd-page.c
1f5860a83c0dc7fce4d427dd7afec68a828acc60
[ "LLVM-exception", "Apache-2.0", "LicenseRef-scancode-warranty-disclaimer", "EPL-1.0", "GPL-2.0-only", "LGPL-2.0-only" ]
permissive
apple/cups
906903c936f9ec702e50dcd9971ec71af90a56fb
ec055da6794984133d48cc376f04e10af62b64dc
refs/heads/master
2023-08-24T17:53:09.249969
2022-06-27T16:22:46
2022-06-27T16:22:46
44,137,852
1,875
570
Apache-2.0
2023-01-19T21:23:20
2015-10-12T22:33:18
C
UTF-8
C
false
false
9,071
c
ppd-page.c
/* * Page size functions for CUPS. * * Copyright 2007-2015 by Apple Inc. * Copyright 1997-2007 by Easy Software Products, all rights reserved. * * Licensed under Apache License v2.0. See the file "LICENSE" for more * information. * * PostScript is a trademark of Adobe Systems, Inc. */ /* * Include necessary headers... */ #include "string-private.h" #include "debug-internal.h" #include "ppd.h" /* * 'ppdPageSize()' - Get the page size record for the named size. */ ppd_size_t * /* O - Size record for page or NULL */ ppdPageSize(ppd_file_t *ppd, /* I - PPD file record */ const char *name) /* I - Size name */ { int i; /* Looping var */ ppd_size_t *size; /* Current page size */ double w, l; /* Width and length of page */ char *nameptr; /* Pointer into name */ struct lconv *loc; /* Locale data */ ppd_coption_t *coption; /* Custom option for page size */ ppd_cparam_t *cparam; /* Custom option parameter */ DEBUG_printf(("2ppdPageSize(ppd=%p, name=\"%s\")", ppd, name)); if (!ppd) { DEBUG_puts("3ppdPageSize: Bad PPD pointer, returning NULL..."); return (NULL); } if (name) { if (!strncmp(name, "Custom.", 7) && ppd->variable_sizes) { /* * Find the custom page size... */ for (i = ppd->num_sizes, size = ppd->sizes; i > 0; i --, size ++) if (!strcmp("Custom", size->name)) break; if (!i) { DEBUG_puts("3ppdPageSize: No custom sizes, returning NULL..."); return (NULL); } /* * Variable size; size name can be one of the following: * * Custom.WIDTHxLENGTHin - Size in inches * Custom.WIDTHxLENGTHft - Size in feet * Custom.WIDTHxLENGTHcm - Size in centimeters * Custom.WIDTHxLENGTHmm - Size in millimeters * Custom.WIDTHxLENGTHm - Size in meters * Custom.WIDTHxLENGTH[pt] - Size in points */ loc = localeconv(); w = _cupsStrScand(name + 7, &nameptr, loc); if (!nameptr || *nameptr != 'x') return (NULL); l = _cupsStrScand(nameptr + 1, &nameptr, loc); if (!nameptr) return (NULL); if (!_cups_strcasecmp(nameptr, "in")) { w *= 72.0; l *= 72.0; } else if (!_cups_strcasecmp(nameptr, "ft")) { w *= 12.0 * 72.0; l *= 12.0 * 72.0; } else if (!_cups_strcasecmp(nameptr, "mm")) { w *= 72.0 / 25.4; l *= 72.0 / 25.4; } else if (!_cups_strcasecmp(nameptr, "cm")) { w *= 72.0 / 2.54; l *= 72.0 / 2.54; } else if (!_cups_strcasecmp(nameptr, "m")) { w *= 72.0 / 0.0254; l *= 72.0 / 0.0254; } size->width = (float)w; size->length = (float)l; size->left = ppd->custom_margins[0]; size->bottom = ppd->custom_margins[1]; size->right = (float)(w - ppd->custom_margins[2]); size->top = (float)(l - ppd->custom_margins[3]); /* * Update the custom option records for the page size, too... */ if ((coption = ppdFindCustomOption(ppd, "PageSize")) != NULL) { if ((cparam = ppdFindCustomParam(coption, "Width")) != NULL) cparam->current.custom_points = (float)w; if ((cparam = ppdFindCustomParam(coption, "Height")) != NULL) cparam->current.custom_points = (float)l; } /* * Return the page size... */ DEBUG_printf(("3ppdPageSize: Returning %p (\"%s\", %gx%g)", size, size->name, size->width, size->length)); return (size); } else { /* * Lookup by name... */ for (i = ppd->num_sizes, size = ppd->sizes; i > 0; i --, size ++) if (!_cups_strcasecmp(name, size->name)) { DEBUG_printf(("3ppdPageSize: Returning %p (\"%s\", %gx%g)", size, size->name, size->width, size->length)); return (size); } } } else { /* * Find default... */ for (i = ppd->num_sizes, size = ppd->sizes; i > 0; i --, size ++) if (size->marked) { DEBUG_printf(("3ppdPageSize: Returning %p (\"%s\", %gx%g)", size, size->name, size->width, size->length)); return (size); } } DEBUG_puts("3ppdPageSize: Size not found, returning NULL"); return (NULL); } /* * 'ppdPageSizeLimits()' - Return the custom page size limits. * * This function returns the minimum and maximum custom page sizes and printable * areas based on the currently-marked (selected) options. * * If the specified PPD file does not support custom page sizes, both * "minimum" and "maximum" are filled with zeroes. * * @since CUPS 1.4/macOS 10.6@ */ int /* O - 1 if custom sizes are supported, 0 otherwise */ ppdPageSizeLimits(ppd_file_t *ppd, /* I - PPD file record */ ppd_size_t *minimum, /* O - Minimum custom size */ ppd_size_t *maximum) /* O - Maximum custom size */ { ppd_choice_t *qualifier2, /* Second media qualifier */ *qualifier3; /* Third media qualifier */ ppd_attr_t *attr; /* Attribute */ float width, /* Min/max width */ length; /* Min/max length */ char spec[PPD_MAX_NAME]; /* Selector for min/max */ /* * Range check input... */ if (!ppd || !ppd->variable_sizes || !minimum || !maximum) { if (minimum) memset(minimum, 0, sizeof(ppd_size_t)); if (maximum) memset(maximum, 0, sizeof(ppd_size_t)); return (0); } /* * See if we have the cupsMediaQualifier2 and cupsMediaQualifier3 attributes... */ cupsArraySave(ppd->sorted_attrs); if ((attr = ppdFindAttr(ppd, "cupsMediaQualifier2", NULL)) != NULL && attr->value) qualifier2 = ppdFindMarkedChoice(ppd, attr->value); else qualifier2 = NULL; if ((attr = ppdFindAttr(ppd, "cupsMediaQualifier3", NULL)) != NULL && attr->value) qualifier3 = ppdFindMarkedChoice(ppd, attr->value); else qualifier3 = NULL; /* * Figure out the current minimum width and length... */ width = ppd->custom_min[0]; length = ppd->custom_min[1]; if (qualifier2) { /* * Try getting cupsMinSize... */ if (qualifier3) { snprintf(spec, sizeof(spec), ".%s.%s", qualifier2->choice, qualifier3->choice); attr = ppdFindAttr(ppd, "cupsMinSize", spec); } else attr = NULL; if (!attr) { snprintf(spec, sizeof(spec), ".%s.", qualifier2->choice); attr = ppdFindAttr(ppd, "cupsMinSize", spec); } if (!attr && qualifier3) { snprintf(spec, sizeof(spec), "..%s", qualifier3->choice); attr = ppdFindAttr(ppd, "cupsMinSize", spec); } if ((attr && attr->value && sscanf(attr->value, "%f%f", &width, &length) != 2) || !attr) { width = ppd->custom_min[0]; length = ppd->custom_min[1]; } } minimum->width = width; minimum->length = length; minimum->left = ppd->custom_margins[0]; minimum->bottom = ppd->custom_margins[1]; minimum->right = width - ppd->custom_margins[2]; minimum->top = length - ppd->custom_margins[3]; /* * Figure out the current maximum width and length... */ width = ppd->custom_max[0]; length = ppd->custom_max[1]; if (qualifier2) { /* * Try getting cupsMaxSize... */ if (qualifier3) { snprintf(spec, sizeof(spec), ".%s.%s", qualifier2->choice, qualifier3->choice); attr = ppdFindAttr(ppd, "cupsMaxSize", spec); } else attr = NULL; if (!attr) { snprintf(spec, sizeof(spec), ".%s.", qualifier2->choice); attr = ppdFindAttr(ppd, "cupsMaxSize", spec); } if (!attr && qualifier3) { snprintf(spec, sizeof(spec), "..%s", qualifier3->choice); attr = ppdFindAttr(ppd, "cupsMaxSize", spec); } if (!attr || (attr->value && sscanf(attr->value, "%f%f", &width, &length) != 2)) { width = ppd->custom_max[0]; length = ppd->custom_max[1]; } } maximum->width = width; maximum->length = length; maximum->left = ppd->custom_margins[0]; maximum->bottom = ppd->custom_margins[1]; maximum->right = width - ppd->custom_margins[2]; maximum->top = length - ppd->custom_margins[3]; /* * Return the min and max... */ cupsArrayRestore(ppd->sorted_attrs); return (1); } /* * 'ppdPageWidth()' - Get the page width for the given size. */ float /* O - Width of page in points or 0.0 */ ppdPageWidth(ppd_file_t *ppd, /* I - PPD file record */ const char *name) /* I - Size name */ { ppd_size_t *size; /* Page size */ if ((size = ppdPageSize(ppd, name)) == NULL) return (0.0); else return (size->width); } /* * 'ppdPageLength()' - Get the page length for the given size. */ float /* O - Length of page in points or 0.0 */ ppdPageLength(ppd_file_t *ppd, /* I - PPD file */ const char *name) /* I - Size name */ { ppd_size_t *size; /* Page size */ if ((size = ppdPageSize(ppd, name)) == NULL) return (0.0); else return (size->length); }
5137ffbc02cb382316c04bbb8e41b9762028a70b
eecd5e4c50d8b78a769bcc2675250576bed34066
/src/mat/tests/ex195.c
ed266c2dad0ab93c3d85f8525f78c5c9ba5feeaf
[ "BSD-2-Clause" ]
permissive
petsc/petsc
3b1a04fea71858e0292f9fd4d04ea11618c50969
9c5460f9064ca60dd71a234a1f6faf93e7a6b0c9
refs/heads/main
2023-08-17T20:51:16.507070
2023-08-17T16:08:06
2023-08-17T16:08:06
8,691,401
341
169
NOASSERTION
2023-03-29T11:02:58
2013-03-10T20:55:21
C
UTF-8
C
false
false
4,645
c
ex195.c
/* * ex195.c * * Created on: Aug 24, 2015 * Author: Fande Kong <fdkong.jd@gmail.com> */ static char help[] = " Demonstrate the use of MatConvert_Nest_AIJ\n"; #include <petscmat.h> int main(int argc, char **args) { Mat A1, A2, A3, A4, A5, B, C, C1, nest; Mat mata[4]; Mat aij; MPI_Comm comm; PetscInt m, M, n, istart, iend, ii, i, J, j, K = 10; PetscScalar v; PetscMPIInt size; PetscBool equal; PetscFunctionBeginUser; PetscCall(PetscInitialize(&argc, &args, (char *)0, help)); comm = PETSC_COMM_WORLD; PetscCallMPI(MPI_Comm_size(comm, &size)); /* Assemble the matrix for the five point stencil, YET AGAIN */ PetscCall(MatCreate(comm, &A1)); m = 2, n = 2; PetscCall(MatSetSizes(A1, PETSC_DECIDE, PETSC_DECIDE, m * n, m * n)); PetscCall(MatSetFromOptions(A1)); PetscCall(MatSetUp(A1)); PetscCall(MatGetOwnershipRange(A1, &istart, &iend)); for (ii = istart; ii < iend; ii++) { v = -1.0; i = ii / n; j = ii - i * n; if (i > 0) { J = ii - n; PetscCall(MatSetValues(A1, 1, &ii, 1, &J, &v, INSERT_VALUES)); } if (i < m - 1) { J = ii + n; PetscCall(MatSetValues(A1, 1, &ii, 1, &J, &v, INSERT_VALUES)); } if (j > 0) { J = ii - 1; PetscCall(MatSetValues(A1, 1, &ii, 1, &J, &v, INSERT_VALUES)); } if (j < n - 1) { J = ii + 1; PetscCall(MatSetValues(A1, 1, &ii, 1, &J, &v, INSERT_VALUES)); } v = 4.0; PetscCall(MatSetValues(A1, 1, &ii, 1, &ii, &v, INSERT_VALUES)); } PetscCall(MatAssemblyBegin(A1, MAT_FINAL_ASSEMBLY)); PetscCall(MatAssemblyEnd(A1, MAT_FINAL_ASSEMBLY)); PetscCall(MatView(A1, PETSC_VIEWER_STDOUT_WORLD)); PetscCall(MatDuplicate(A1, MAT_COPY_VALUES, &A2)); PetscCall(MatDuplicate(A1, MAT_COPY_VALUES, &A3)); PetscCall(MatDuplicate(A1, MAT_COPY_VALUES, &A4)); /*create a nest matrix */ PetscCall(MatCreate(comm, &nest)); PetscCall(MatSetType(nest, MATNEST)); mata[0] = A1, mata[1] = A2, mata[2] = A3, mata[3] = A4; PetscCall(MatNestSetSubMats(nest, 2, NULL, 2, NULL, mata)); PetscCall(MatSetUp(nest)); PetscCall(MatConvert(nest, MATAIJ, MAT_INITIAL_MATRIX, &aij)); PetscCall(MatView(aij, PETSC_VIEWER_STDOUT_WORLD)); /* create a dense matrix */ PetscCall(MatGetSize(nest, &M, NULL)); PetscCall(MatGetLocalSize(nest, &m, NULL)); PetscCall(MatCreateDense(comm, m, PETSC_DECIDE, M, K, NULL, &B)); PetscCall(MatSetRandom(B, NULL)); /* C = nest*B_dense */ PetscCall(MatMatMult(nest, B, MAT_INITIAL_MATRIX, PETSC_DEFAULT, &C)); PetscCall(MatMatMult(nest, B, MAT_REUSE_MATRIX, PETSC_DEFAULT, &C)); PetscCall(MatMatMultEqual(nest, B, C, 10, &equal)); PetscCheck(equal, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "Error in C != nest*B_dense"); /* Test B = nest*C, reuse C and B with MatProductCreateWithMat() */ /* C has been obtained from nest*B. Clear internal data structures related to factors to prevent circular references */ PetscCall(MatProductClear(C)); PetscCall(MatProductCreateWithMat(nest, C, NULL, B)); PetscCall(MatProductSetType(B, MATPRODUCT_AB)); PetscCall(MatProductSetFromOptions(B)); PetscCall(MatProductSymbolic(B)); PetscCall(MatProductNumeric(B)); PetscCall(MatMatMultEqual(nest, C, B, 10, &equal)); PetscCheck(equal, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "Error in B != nest*C_dense"); PetscCall(MatConvert(nest, MATAIJ, MAT_INPLACE_MATRIX, &nest)); PetscCall(MatEqual(nest, aij, &equal)); PetscCheck(equal, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "Error in aij != nest"); PetscCall(MatDestroy(&nest)); if (size > 1) { /* Do not know why this test fails for size = 1 */ PetscCall(MatCreateTranspose(A1, &A5)); /* A1 is symmetric */ mata[0] = A5; PetscCall(MatCreate(comm, &nest)); PetscCall(MatSetType(nest, MATNEST)); PetscCall(MatNestSetSubMats(nest, 2, NULL, 2, NULL, mata)); PetscCall(MatSetUp(nest)); PetscCall(MatMatMult(nest, B, MAT_INITIAL_MATRIX, PETSC_DEFAULT, &C1)); PetscCall(MatMatMult(nest, B, MAT_REUSE_MATRIX, PETSC_DEFAULT, &C1)); PetscCall(MatMatMultEqual(nest, B, C1, 10, &equal)); PetscCheck(equal, PETSC_COMM_WORLD, PETSC_ERR_PLIB, "Error in C1 != C"); PetscCall(MatDestroy(&C1)); PetscCall(MatDestroy(&A5)); PetscCall(MatDestroy(&nest)); } PetscCall(MatDestroy(&C)); PetscCall(MatDestroy(&B)); PetscCall(MatDestroy(&aij)); PetscCall(MatDestroy(&A1)); PetscCall(MatDestroy(&A2)); PetscCall(MatDestroy(&A3)); PetscCall(MatDestroy(&A4)); PetscCall(PetscFinalize()); return 0; } /*TEST test: nsize: 2 test: suffix: 2 TEST*/
066ce80fa4367207099e9ed4adb9e8c33d4dce5a
7eaf54a78c9e2117247cb2ab6d3a0c20719ba700
/SOFTWARE/A64-TERES/linux-a64/arch/blackfin/include/asm/pci.h
74352c4597d92f5802531ac0ca86c08766b6a62f
[ "BSD-3-Clause-Clear", "Linux-syscall-note", "GPL-2.0-only", "GPL-1.0-or-later", "LicenseRef-scancode-free-unknown", "Apache-2.0" ]
permissive
OLIMEX/DIY-LAPTOP
ae82f4ee79c641d9aee444db9a75f3f6709afa92
a3fafd1309135650bab27f5eafc0c32bc3ca74ee
refs/heads/rel3
2023-08-04T01:54:19.483792
2023-04-03T07:18:12
2023-04-03T07:18:12
80,094,055
507
92
Apache-2.0
2023-04-03T07:05:59
2017-01-26T07:25:50
C
UTF-8
C
false
false
413
h
pci.h
/* Changed from asm-m68k version, Lineo Inc. May 2001 */ #ifndef _ASM_BFIN_PCI_H #define _ASM_BFIN_PCI_H #include <asm/scatterlist.h> #include <asm-generic/pci-dma-compat.h> #include <asm-generic/pci.h> #define PCIBIOS_MIN_IO 0x00001000 #define PCIBIOS_MIN_MEM 0x10000000 static inline void pcibios_penalize_isa_irq(int irq) { /* We don't do dynamic PCI IRQ allocation */ } #endif /* _ASM_BFIN_PCI_H */
2385ff31de6dc792f6cdf4317f9354237c655d37
3d144a23e67c839a4df1c073c6a2c842508f16b2
/test/SILOptimizer/Inputs/switch_enum_objc.h
7a79c853e663400a1782b4c96ffa961e8dd34ec9
[ "Apache-2.0", "Swift-exception" ]
permissive
apple/swift
c2724e388959f6623cf6e4ad6dc1cdd875fd0592
98ada1b200a43d090311b72eb45fe8ecebc97f81
refs/heads/main
2023-08-16T10:48:25.985330
2023-08-16T09:00:42
2023-08-16T09:00:42
44,838,949
78,897
15,074
Apache-2.0
2023-09-14T21:19:23
2015-10-23T21:15:07
C++
UTF-8
C
false
false
428
h
switch_enum_objc.h
// Even though these are marked "closed", Swift shouldn't trust it. enum Alpha { AlphaA __attribute__((swift_name("a"))), AlphaB __attribute__((swift_name("b"))), AlphaC __attribute__((swift_name("c"))), AlphaD __attribute__((swift_name("d"))), AlphaE __attribute__((swift_name("e"))) } __attribute__((enum_extensibility(closed))); enum Coin { CoinHeads, CoinTails } __attribute__((enum_extensibility(closed)));
4c92eef911a313a5913fa14fa0923be98226a1b9
e4200b764d0b4ffba65180e54cf84b30ee84efcc
/selfdrive/modeld/models/commonmodel.c
0369d16d5dd05de20466318abfae7342384587a5
[ "LicenseRef-scancode-warranty-disclaimer", "MIT" ]
permissive
kegman/openpilot
c9ba96a72d905956f02c684e065091e023942883
54a8614b5a6451154817a4c6c86141c96103ae47
refs/heads/kegman-0.7
2022-05-22T17:07:16.656336
2020-01-23T16:40:55
2020-01-23T16:40:55
229,979,925
105
212
MIT
2022-03-13T05:47:51
2019-12-24T17:27:11
C
UTF-8
C
false
false
2,421
c
commonmodel.c
#include "commonmodel.h" #include <czmq.h> #include "cereal/gen/c/log.capnp.h" #include "common/mat.h" #include "common/timing.h" void model_input_init(ModelInput* s, int width, int height, cl_device_id device_id, cl_context context) { int err; s->device_id = device_id; s->context = context; transform_init(&s->transform, context, device_id); s->transformed_width = width; s->transformed_height = height; s->transformed_y_cl = clCreateBuffer(s->context, CL_MEM_READ_WRITE, s->transformed_width*s->transformed_height, NULL, &err); assert(err == 0); s->transformed_u_cl = clCreateBuffer(s->context, CL_MEM_READ_WRITE, (s->transformed_width/2)*(s->transformed_height/2), NULL, &err); assert(err == 0); s->transformed_v_cl = clCreateBuffer(s->context, CL_MEM_READ_WRITE, (s->transformed_width/2)*(s->transformed_height/2), NULL, &err); assert(err == 0); s->net_input_size = ((width*height*3)/2)*sizeof(float); s->net_input = clCreateBuffer(s->context, CL_MEM_READ_WRITE, s->net_input_size, (void*)NULL, &err); assert(err == 0); loadyuv_init(&s->loadyuv, context, device_id, s->transformed_width, s->transformed_height); } float *model_input_prepare(ModelInput* s, cl_command_queue q, cl_mem yuv_cl, int width, int height, mat3 transform) { int err; int i = 0; transform_queue(&s->transform, q, yuv_cl, width, height, s->transformed_y_cl, s->transformed_u_cl, s->transformed_v_cl, s->transformed_width, s->transformed_height, transform); loadyuv_queue(&s->loadyuv, q, s->transformed_y_cl, s->transformed_u_cl, s->transformed_v_cl, s->net_input); float *net_input_buf = (float *)clEnqueueMapBuffer(q, s->net_input, CL_TRUE, CL_MAP_READ, 0, s->net_input_size, 0, NULL, NULL, &err); clFinish(q); return net_input_buf; } void model_input_free(ModelInput* s) { transform_destroy(&s->transform); loadyuv_destroy(&s->loadyuv); } float sigmoid(float input) { return 1 / (1 + expf(-input)); } float softplus(float input) { return log1p(expf(input)); }
d9f5d2bc68e01a07e4a8ffccffc21c41b7b726ab
0744dcc5394cebf57ebcba343747af6871b67017
/os/kernel/semaphore/sem_trywait.c
c8e0a1957770040a2cf9e2722874ec4680b1c935
[ "Apache-2.0", "GPL-1.0-or-later", "BSD-3-Clause", "ISC", "MIT", "LicenseRef-scancode-warranty-disclaimer", "LicenseRef-scancode-other-permissive" ]
permissive
Samsung/TizenRT
96abf62f1853f61fcf91ff14671a5e0c6ca48fdb
1a5c2e00a4b1bbf4c505bbf5cc6a8259e926f686
refs/heads/master
2023-08-31T08:59:33.327998
2023-08-08T06:09:20
2023-08-31T04:38:20
82,517,252
590
719
Apache-2.0
2023-09-14T06:54:49
2017-02-20T04:38:30
C
UTF-8
C
false
false
5,857
c
sem_trywait.c
/**************************************************************************** * * Copyright 2016 Samsung Electronics All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific * language governing permissions and limitations under the License. * ****************************************************************************/ /**************************************************************************** * kernel/semaphore/sem_trywait.c * * Copyright (C) 2007-2009 Gregory Nutt. All rights reserved. * Author: Gregory Nutt <gnutt@nuttx.org> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name NuttX nor the names of its contributors may be * used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************/ /**************************************************************************** * Included Files ****************************************************************************/ #include <tinyara/config.h> #include <stdbool.h> #include <semaphore.h> #include <sched.h> #include <errno.h> #include <tinyara/arch.h> #include "sched/sched.h" #include "semaphore/semaphore.h" #ifdef CONFIG_SEMAPHORE_HISTORY #include <tinyara/debug/sysdbg.h> #endif /**************************************************************************** * Pre-processor Definitions ****************************************************************************/ /**************************************************************************** * Private Type Declarations ****************************************************************************/ /**************************************************************************** * Global Variables ****************************************************************************/ /**************************************************************************** * Private Variables ****************************************************************************/ /**************************************************************************** * Private Functions ****************************************************************************/ /**************************************************************************** * Public Functions ****************************************************************************/ /**************************************************************************** * Name: sem_trywait * * Description: * This function locks the specified semaphore only if the semaphore is * currently not locked. Otherwise, it locks the semaphore. In either * case, the call returns without blocking. * * Parameters: * sem - the semaphore descriptor * * Return Value: * 0 (OK) or -1 (ERROR) if unsuccessful. If this function returns -1 * (ERROR),then the cause of the failure will be reported in "errno" as: * * EINVAL: Invalid attempt to get the semaphore * EAGAIN: The semaphore is not available. * * Assumptions: * ****************************************************************************/ int sem_trywait(FAR sem_t *sem) { FAR struct tcb_s *rtcb = this_task(); irqstate_t saved_state; int ret = ERROR; /* This API should not be called from interrupt handlers */ DEBUGASSERT(sem != NULL && up_interrupt_context() == false); if ((sem != NULL) && ((sem->flags & FLAGS_INITIALIZED) != 0)) { /* The following operations must be performed with interrupts disabled * because sem_post() may be called from an interrupt handler. */ saved_state = irqsave(); /* If the semaphore is available, give it to the requesting task */ if (sem->semcount > 0) { /* It is, let the task take the semaphore */ sem->semcount--; sem_addholder(sem); rtcb->waitsem = NULL; #ifdef CONFIG_SEMAPHORE_HISTORY save_semaphore_history(sem, (void *)rtcb, SEM_ACQUIRE); #endif ret = OK; } else { /* Semaphore is not available */ set_errno(EAGAIN); } /* Interrupts may now be enabled. */ irqrestore(saved_state); } else { set_errno(EINVAL); } return ret; }
6ada87f0655ac98bad8405dad3f74dd8131307c6
0cd893fddf3a43459030292dad953c3810713513
/clif/testing/t10.h
7ae1a9e50459560c92e5ea2a4dc18615bb13fdae
[ "Apache-2.0" ]
permissive
google/clif
8fc6d75f7e4a1a443f9bd596d05ea3e4c820e1c4
7501b3ca70a92a7a15022b3035bc4b1706f7569a
refs/heads/main
2023-08-19T06:26:17.321706
2023-08-18T23:18:45
2023-08-18T23:20:30
88,560,371
1,026
146
Apache-2.0
2023-08-31T23:41:41
2017-04-17T23:36:06
C++
UTF-8
C
false
false
951
h
t10.h
/* * Copyright 2017 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef CLIF_TESTING_T10_H_ #define CLIF_TESTING_T10_H_ #include <Python.h> #include "clif/testing/t2.h" K* CreateK() { static K obj(0); return &obj; } struct A : K { A() : K(0) {} PyObject* __str__() { return PyUnicode_FromString("A"); } }; inline PyObject* ConversionFunctionCheck(PyObject* x) { return x; } #endif // CLIF_TESTING_T10_H_
cb4966cb2c2b7d0d00d6a9d1f39e6b6848fbe2bf
de21f9075f55640514c29ef0f1fe3f0690845764
/regression/cprover/pointers/struct_pointer3.c
139ed6ca1011336c33c19acf868f549a4434dd27
[ "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "BSD-4-Clause" ]
permissive
diffblue/cbmc
975a074ac445febb3b5715f8792beb545522dc18
decd2839c2f51a54b2ad0f3e89fdc1b4bf78cd16
refs/heads/develop
2023-08-31T05:52:05.342195
2023-08-30T13:31:51
2023-08-30T13:31:51
51,877,056
589
309
NOASSERTION
2023-09-14T18:49:17
2016-02-16T23:03:52
C++
UTF-8
C
false
false
150
c
struct_pointer3.c
struct S { int x, y; }; int main() { struct S a; struct S *p = &a; __CPROVER_assert(p->x == a.x, "property 1"); // should pass return 0; }
5d6463a33e08ee8dedc4f98fadf91c1b3fb057b8
aa3befea459382dc5c01c925653d54f435b3fb0f
/arch/arm/src/sam34/hardware/sam_gpbr.h
05e98ad56882ae123611fe99dc1723c69fd5e724
[ "MIT-open-group", "BSD-3-Clause", "HPND-sell-variant", "BSD-4-Clause-UC", "LicenseRef-scancode-warranty-disclaimer", "MIT-0", "LicenseRef-scancode-bsd-atmel", "LicenseRef-scancode-gary-s-brown", "LicenseRef-scancode-proprietary-license", "SunPro", "MIT", "LicenseRef-scancode-public-domain-disclaimer", "LicenseRef-scancode-other-permissive", "HPND", "ISC", "Apache-2.0", "LicenseRef-scancode-public-domain", "BSD-2-Clause", "GPL-1.0-or-later", "CC-BY-2.0", "CC-BY-4.0" ]
permissive
apache/nuttx
14519a7bff4a87935d94fb8fb2b19edb501c7cec
606b6d9310fb25c7d92c6f95bf61737e3c79fa0f
refs/heads/master
2023-08-25T06:55:45.822534
2023-08-23T16:03:31
2023-08-24T21:25:47
228,103,273
407
241
Apache-2.0
2023-09-14T18:26:05
2019-12-14T23:27:55
C
UTF-8
C
false
false
4,541
h
sam_gpbr.h
/**************************************************************************** * arch/arm/src/sam34/hardware/sam_gpbr.h * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. The * ASF licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * ****************************************************************************/ #ifndef __ARCH_ARM_SRC_SAM34_HARDWARE_SAM_GPBR_H #define __ARCH_ARM_SRC_SAM34_HARDWARE_SAM_GPBR_H /**************************************************************************** * Included Files ****************************************************************************/ #include <nuttx/config.h> #include "chip.h" #include "hardware/sam_memorymap.h" /**************************************************************************** * Pre-processor Definitions ****************************************************************************/ /* GPBR register offsets ****************************************************/ #define SAM_GPBR_OFFSET(n) ((n)<<2) /* General purpose back-up registers */ #define SAM_GPBR0_OFFSET 0x00 #define SAM_GPBR1_OFFSET 0x04 #define SAM_GPBR2_OFFSET 0x08 #define SAM_GPBR3_OFFSET 0x0c #define SAM_GPBR4_OFFSET 0x10 #define SAM_GPBR5_OFFSET 0x14 #define SAM_GPBR6_OFFSET 0x18 #define SAM_GPBR7_OFFSET 0x1c #if defined(CONFIG_ARCH_CHIP_SAM4E) # define SAM_GPBR8_OFFSET 0x20 # define SAM_GPBR9_OFFSET 0x24 # define SAM_GPBR10_OFFSET 0x28 # define SAM_GPBR11_OFFSET 0x2c # define SAM_GPBR12_OFFSET 0x30 # define SAM_GPBR13_OFFSET 0x34 # define SAM_GPBR14_OFFSET 0x38 # define SAM_GPBR15_OFFSET 0x3c # define SAM_GPBR16_OFFSET 0x40 # define SAM_GPBR17_OFFSET 0x44 # define SAM_GPBR18_OFFSET 0x48 # define SAM_GPBR19_OFFSET 0x4c #endif /* GPBR register addresses **************************************************/ #define SAM_GPBR(n)) (SAM_GPBR_BASE+SAM_GPBR_OFFSET(n)) #define SAM_GPBR0 (SAM_GPBR_BASE+SAM_GPBR0_OFFSET) #define SAM_GPBR1 (SAM_GPBR_BASE+SAM_GPBR1_OFFSET) #define SAM_GPBR2 (SAM_GPBR_BASE+SAM_GPBR2_OFFSET) #define SAM_GPBR3 (SAM_GPBR_BASE+SAM_GPBR3_OFFSET) #define SAM_GPBR4 (SAM_GPBR_BASE+SAM_GPBR4_OFFSET) #define SAM_GPBR5 (SAM_GPBR_BASE+SAM_GPBR5_OFFSET) #define SAM_GPBR6 (SAM_GPBR_BASE+SAM_GPBR6_OFFSET) #define SAM_GPBR7 (SAM_GPBR_BASE+SAM_GPBR7_OFFSET) #if defined(CONFIG_ARCH_CHIP_SAM4E) # define SAM_GPBR8 (SAM_GPBR_BASE+SAM_GPBR8_OFFSET) # define SAM_GPBR9 (SAM_GPBR_BASE+SAM_GPBR9_OFFSET) # define SAM_GPBR10 (SAM_GPBR_BASE+SAM_GPBR10_OFFSET) # define SAM_GPBR11 (SAM_GPBR_BASE+SAM_GPBR11_OFFSET) # define SAM_GPBR12 (SAM_GPBR_BASE+SAM_GPBR12_OFFSET) # define SAM_GPBR13 (SAM_GPBR_BASE+SAM_GPBR13_OFFSET) # define SAM_GPBR14 (SAM_GPBR_BASE+SAM_GPBR14_OFFSET) # define SAM_GPBR15 (SAM_GPBR_BASE+SAM_GPBR15_OFFSET) # define SAM_GPBR16 (SAM_GPBR_BASE+SAM_GPBR16_OFFSET) # define SAM_GPBR17 (SAM_GPBR_BASE+SAM_GPBR17_OFFSET) # define SAM_GPBR18 (SAM_GPBR_BASE+SAM_GPBR18_OFFSET) # define SAM_GPBR19 (SAM_GPBR_BASE+SAM_GPBR19_OFFSET) #endif /* GPBR register bit definitions ********************************************/ /* All 32-bit values */ /**************************************************************************** * Public Types ****************************************************************************/ /**************************************************************************** * Public Data ****************************************************************************/ /**************************************************************************** * Public Functions Prototypes ****************************************************************************/ #endif /* __ARCH_ARM_SRC_SAM34_HARDWARE_SAM_GPBR_H */
ebb8ef6553e633747fd993cdedd318e8d486e07f
20e1c2f5cfac01f6b007124fa7792dd69751a6bb
/src/ut-stubs/osapi-idmap-handlers.c
296fb0705231028c0fe1729a92807a21fb15f04d
[ "Apache-2.0" ]
permissive
nasa/osal
71f159b767ba4a8c39df48f238b4f296cc571ac8
99e3b4007da51031b521d90390526e123ff740b4
refs/heads/main
2023-09-01T06:33:53.932829
2023-08-18T14:27:02
2023-08-18T14:27:02
4,814,601
493
229
Apache-2.0
2023-09-13T13:57:40
2012-06-27T23:10:37
C
UTF-8
C
false
false
7,257
c
osapi-idmap-handlers.c
/************************************************************************ * NASA Docket No. GSC-18,719-1, and identified as “core Flight System: Bootes” * * Copyright (c) 2020 United States Government as represented by the * Administrator of the National Aeronautics and Space Administration. * All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ************************************************************************/ /** * \file * * Stub implementations for the functions defined in the OSAL API * * The stub implementation can be used for unit testing applications built * on top of OSAL. The stubs do not do any real function, but allow * the return code to be crafted such that error paths in the application * can be executed. * * NOTE: The Object ID manipulation calls would not be called by applications. * However stubs are still defined in order to support things such as * coverage testing of the low-level implementation. This set of stubs * is implemented separately here as it is only needed when coverage testing * OSAL itself (not for coverage testing other units). */ #include "osapi-idmap.h" /* OSAL public API for this subsystem */ #include "utstub-helpers.h" /* * ----------------------------------------------------------------- * Default handler implementation for 'OS_ObjectIdToArrayIndex' stub * ----------------------------------------------------------------- */ void UT_DefaultHandler_OS_ObjectIdToArrayIndex(void *UserObj, UT_EntryKey_t FuncKey, const UT_StubContext_t *Context) { osal_id_t object_id = UT_Hook_GetArgValueByName(Context, "object_id", osal_id_t); osal_index_t * ArrayIndex = UT_Hook_GetArgValueByName(Context, "ArrayIndex", osal_index_t *); int32 Status; osal_objtype_t checktype; uint32 tempserial; UT_Stub_GetInt32StatusCode(Context, &Status); if (Status == 0 && UT_Stub_CopyToLocal(UT_KEY(OS_ObjectIdToArrayIndex), ArrayIndex, sizeof(*ArrayIndex)) < sizeof(*ArrayIndex)) { /* this needs to output something valid or code will break */ UT_ObjIdDecompose(object_id, &tempserial, &checktype); *ArrayIndex = OSAL_INDEX_C(tempserial); } } /* * ----------------------------------------------------------------- * Default handler implementation for 'OS_GetResourceName' stub * ----------------------------------------------------------------- */ void UT_DefaultHandler_OS_GetResourceName(void *UserObj, UT_EntryKey_t FuncKey, const UT_StubContext_t *Context) { char * buffer = UT_Hook_GetArgValueByName(Context, "buffer", char *); size_t buffer_size = UT_Hook_GetArgValueByName(Context, "buffer_size", size_t); int32 status; UT_Stub_GetInt32StatusCode(Context, &status); if (status == OS_SUCCESS) { if (buffer_size > 0 && UT_Stub_CopyToLocal(UT_KEY(OS_GetResourceName), buffer, buffer_size) == 0) { /* return an empty string by default */ buffer[0] = 0; } } } /* * ----------------------------------------------------------------- * Default handler implementation for 'OS_ConvertToArrayIndex' stub * ----------------------------------------------------------------- */ void UT_DefaultHandler_OS_ConvertToArrayIndex(void *UserObj, UT_EntryKey_t FuncKey, const UT_StubContext_t *Context) { osal_id_t object_id = UT_Hook_GetArgValueByName(Context, "object_id", osal_id_t); osal_index_t * ArrayIndex = UT_Hook_GetArgValueByName(Context, "ArrayIndex", osal_index_t *); osal_objtype_t ObjType; int32 status; uint32 tempserial; UT_Stub_GetInt32StatusCode(Context, &status); if (status == OS_SUCCESS) { UT_ObjIdDecompose(object_id, &tempserial, &ObjType); if (ObjType != OS_OBJECT_TYPE_UNDEFINED && ObjType < OS_OBJECT_TYPE_USER) { tempserial %= UT_MAXOBJS[ObjType]; } } else { /* * If set to fail, then set the output to something bizarre - if the code * actually tries to use this, chances are it will segfault and be fixed */ tempserial = 0xDEADBEEFU; } *ArrayIndex = OSAL_INDEX_C(tempserial); } /* * ----------------------------------------------------------------- * Default handler implementation for 'OS_ForEachObjectOfType' stub * ----------------------------------------------------------------- */ void UT_DefaultHandler_OS_ForEachObjectOfType(void *UserObj, UT_EntryKey_t FuncKey, const UT_StubContext_t *Context) { OS_ArgCallback_t callback_ptr = UT_Hook_GetArgValueByName(Context, "callback_ptr", OS_ArgCallback_t); void * callback_arg = UT_Hook_GetArgValueByName(Context, "callback_arg", void *); osal_id_t NextId; size_t IdSize; while (1) { IdSize = UT_Stub_CopyToLocal(UT_KEY(OS_ForEachObjectOfType), &NextId, sizeof(NextId)); if (IdSize < sizeof(NextId)) { break; } (*callback_ptr)(NextId, callback_arg); } } /* * ----------------------------------------------------------------- * Default handler implementation for 'OS_ForEachObject' stub * ----------------------------------------------------------------- */ void UT_DefaultHandler_OS_ForEachObject(void *UserObj, UT_EntryKey_t FuncKey, const UT_StubContext_t *Context) { OS_ArgCallback_t callback_ptr = UT_Hook_GetArgValueByName(Context, "callback_ptr", OS_ArgCallback_t); void * callback_arg = UT_Hook_GetArgValueByName(Context, "callback_arg", void *); osal_id_t NextId; size_t IdSize; while (1) { IdSize = UT_Stub_CopyToLocal((UT_EntryKey_t)&OS_ForEachObject, &NextId, sizeof(NextId)); if (IdSize < sizeof(NextId)) { break; } (*callback_ptr)(NextId, callback_arg); } } /* * ----------------------------------------------------------------- * Default handler implementation for 'OS_IdentifyObject' stub * ----------------------------------------------------------------- */ void UT_DefaultHandler_OS_IdentifyObject(void *UserObj, UT_EntryKey_t FuncKey, const UT_StubContext_t *Context) { osal_id_t object_id = UT_Hook_GetArgValueByName(Context, "object_id", osal_id_t); osal_objtype_t ObjType; uint32 checkindx; int32 status; if (UT_Stub_GetInt32StatusCode(Context, &status)) { /* Use the "status code" as the object type if it was set */ ObjType = status; } else { /* output a type that will actually match the ID */ UT_ObjIdDecompose(object_id, &checkindx, &ObjType); } UT_Stub_SetReturnValue(FuncKey, ObjType); }
5ae00f1d0285e028bd474b13f4f0c420c5116f69
47c7e467826c9c02c36008e671605358d29f04b8
/cpg-language-cxx/src/test/resources/another-include.h
635026161f7c98bd9f9251d0532a7b4ba3f11d60
[ "LicenseRef-scancode-generic-cla", "Apache-2.0" ]
permissive
Fraunhofer-AISEC/cpg
c055c28d844fa486be8d682093e549405e8788ec
2ff78e10916eee5a4754925e3875d85eba313c8b
refs/heads/main
2023-08-31T09:07:38.441380
2023-08-30T17:22:24
2023-08-30T17:22:24
225,386,107
183
55
Apache-2.0
2023-09-13T00:44:00
2019-12-02T13:51:37
Kotlin
UTF-8
C
false
false
85
h
another-include.h
/* This file is just there to demonstrate include include black and white-listing */
d11bb79a5d1d013412815dfb4e14022ceb56dd21
ea49dd7d31d2e0b65ce6aadf1274f3bb70abfaf9
/problems/0551_Student_Attendance_Record_I/nicefuture.c
e3ecf825e2945ab108395280dff52785d0ad0043
[]
no_license
yychuyu/LeetCode
907a3d7d67ada9714e86103ac96422381e75d683
48384483a55e120caf5d8d353e9aa287fce3cf4a
refs/heads/master
2020-03-30T15:02:12.492378
2019-06-19T01:52:45
2019-06-19T01:52:45
151,345,944
134
331
null
2019-08-01T02:56:10
2018-10-03T01:26:28
C++
UTF-8
C
false
false
924
c
nicefuture.c
bool checkRecord(char* s) { char *p = s; int absent = 0, late = 0; while (*p != '\0') { if (*p == 'A') { absent++; late = 0; } else if (*p =='L') late++; else late = 0; if (absent > 1 || late > 2) return false; p++; } return true; } /* bool checkRecord(char* s) { char *p = s; int absent = 0, late = 0; while (*p != '\0') { if (*p == 'P') { p++; } if (*p != '\0' && *p == 'A') { absent++; p++; } if (*p != '\0' && *p == 'L') { int cnt = 0; for (int i=0; *p!='\0' && i<3; i++,p++) { // putchar(*p); if (*p == 'L') cnt++; else break; } if (cnt >= 3) late = 1; } if (absent > 1 || late == 1) return false; } return true; } */
aa6e0dd8a5c8c2acb2d7aeda94af9947e22667fb
6e1cde66aa5a649138babe297293962cdf97743e
/src/pl-util.c
883f041d8411e841bb58669d8887aa7f50b0fc03
[ "BSD-2-Clause" ]
permissive
SWI-Prolog/swipl-devel
db56676481984addc09f4f228bc1c41f7f53759f
41ac4a569c8e6c3d3b93a21449403962e3de1ece
refs/heads/master
2023-09-01T03:49:40.696481
2023-08-30T18:12:56
2023-08-30T18:12:56
17,516,793
935
328
NOASSERTION
2023-08-26T14:32:33
2014-03-07T14:43:14
C
UTF-8
C
false
false
7,890
c
pl-util.c
/* Part of SWI-Prolog Author: Jan Wielemaker E-mail: J.Wielemaker@vu.nl WWW: http://www.swi-prolog.org Copyright (c) 1985-2015, University of Amsterdam VU University Amsterdam All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "pl-util.h" #include "pl-fli.h" #include "pl-proc.h" #include "os/pl-ctype.h" #include "os/pl-utf8.h" static bool isUserSystemPredicate(Definition def); /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - These functions return a user-printable name of a predicate as name/arity or module:name/arity. The result is stored in the foreign buffer ring, so we are thread-safe, but the result needs to be copied before the ring is exhausted. See buffer_string() for details. For wide character versions, we use UTF-8 encoding. This isn't very elegant, but these functions are for debugging only. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ char * procedureName(Procedure proc) { return predicateName(proc->definition); } #define fetch_text(s, i) \ ((s)->encoding == ENC_ISO_LATIN_1 ? (s)->text.t[i]&0xff \ : (s)->text.w[i]) static const char * text_summary(PL_chars_t *txt, char q, unsigned int maxlen) { Buffer b; size_t i; if ( txt->encoding == ENC_ISO_LATIN_1 && txt->length < maxlen && !q ) { const unsigned char *s = (const unsigned char*) txt->text.t; const unsigned char *e = &s[txt->length]; for( ; s<e; s++ ) { if ( *s >= 0x80 ) break; } if ( s == e ) return txt->text.t; } b = findBuffer(BUF_STACK); if ( q ) { maxlen -= 2; addBuffer(b, q, char); } for(i=0; i<txt->length; i++) { char buf[6]; char *e; e = utf8_put_char(buf, fetch_text(txt, i)); addMultipleBuffer(b, buf, e-buf, char); if ( i == maxlen - 6 ) { addMultipleBuffer(b, "...", 3, char); i = txt->length - 4; maxlen = 0; /* make sure not to trap again */ } } if ( q ) addBuffer(b, q, char); addBuffer(b, 0, char); return baseBuffer(b, char); } const char * atom_summary(atom_t name, unsigned int maxlen) { PL_chars_t txt; if ( !get_atom_text(name, &txt) ) { if ( isNil(name) ) return "[]"; return "<blob>"; } return text_summary(&txt, 0, maxlen); } const char * string_summary(word string, unsigned int maxlen) { GET_LD PL_chars_t txt; if ( !get_string_text(string, &txt) ) return NULL; return text_summary(&txt, '"', maxlen); } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - predicateName() returns an UTF-8 representation of the name of the predicate. Note that we need for the buffer 6*max summary length, - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ char * predicateName(Definition def) { char tmp[650]; char *e = tmp; if ( !def ) return "(nil)"; if ( def->module != MODULE_user && !isUserSystemPredicate(def) ) { if ( def->module && GD->cleaning != CLN_DATA ) strcpy(e, atom_summary(def->module->name, 50)); else strcpy(e, "(nil)"); e += strlen(e); *e++ = ':'; } strcpy(e, atom_summary(def->functor->name, 50)); e += strlen(e); *e++ = '/'; Ssprintf(e, "%zd", def->functor->arity); return buffer_string(tmp, BUF_STACK); } char * functorName(functor_t f) { char tmp[650]; char *e = tmp; FunctorDef fd; if ( tagex(f) != (TAG_ATOM|STG_GLOBAL) ) return "<not-a-functor>"; fd = valueFunctor(f); strcpy(e, atom_summary(fd->name, 50)); e += strlen(e); *e++ = '/'; Ssprintf(e, "%zd", fd->arity); return buffer_string(tmp, BUF_STACK); } char * keyName(word key) { if ( tagex(key) == (TAG_ATOM|STG_GLOBAL) ) { return functorName(key); } else { char tmp[650]; if ( !key ) { strcpy(tmp, "<nil>"); } else { switch(tag(key)) { case TAG_INTEGER: case TAG_FLOAT: { GET_LD number n; get_number(key, &n); switch(n.type) { case V_INTEGER: Ssprintf(tmp, "%" PRIi64, n.value.i); break; case V_FLOAT: Ssprintf(tmp, "%f", n.value.f); break; default: strcpy(tmp, "<number>"); } break; } case TAG_ATOM: strcpy(tmp, atom_summary(key, 30)); break; case TAG_STRING: strcpy(tmp, string_summary(key, 30)); break; default: assert(0); } } return buffer_string(tmp, BUF_STACK); } } char * sourceFileName(SourceFile sf) { char tmp[650]; strcpy(tmp, atom_summary(sf->name, 50)); return buffer_string(tmp, BUF_STACK); } char * generationName(gen_t gen) { char tmp[256]; if ( gen == GEN_MAX ) return "GEN_MAX"; if ( gen == GEN_INFINITE ) return "GEN_INFINITE"; if ( gen > GEN_TRANSACTION_BASE ) { int tid = (gen-GEN_TRANSACTION_BASE)/GEN_TRANSACTION_SIZE; int64_t g2 = (gen-GEN_TRANSACTION_BASE)%GEN_TRANSACTION_SIZE; Ssprintf(tmp, "%d@%" PRIi64, tid, (int64_t)g2); } else { Ssprintf(tmp, "%" PRIi64, (int64_t)gen); } return buffer_string(tmp, BUF_STACK); } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - clauseNo() returns the clause index of the given clause at the given generation. Use the current generation if gen is 0; - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ int clauseNo(Clause cl, gen_t gen) { GET_LD int i; ClauseRef cref; Definition def = cl->predicate; if ( !gen ) gen = global_generation(); acquire_def(def); for(i=1, cref=def->impl.clauses.first_clause; cref; cref=cref->next) { Clause c = cref->value.clause; if ( visibleClause(c, gen) ) { if ( c == cl ) { release_def(def); return i; } } i++; } release_def(def); return -1; } /* succeeds if proc is a system predicate exported to the public module. ** Fri Sep 2 17:03:43 1988 jan@swivax.UUCP (Jan Wielemaker) */ static bool isUserSystemPredicate(Definition def) { GET_LD if ( true(def, P_LOCKED) && GD->cleaning != CLN_DATA && isCurrentProcedure(def->functor->functor, MODULE_user) ) succeed; fail; } int notImplemented(char *name, int arity) { return PL_error(NULL, 0, NULL, ERR_NOT_IMPLEMENTED_PROC, name, arity); } word setBoolean(int *flag, term_t old, term_t new) { if ( !PL_unify_bool_ex(old, *flag) || !PL_get_bool_ex(new, flag) ) fail; succeed; } word setInteger(int *flag, term_t old, term_t new) { GET_LD if ( !PL_unify_integer(old, *flag) || !PL_get_integer_ex(new, flag) ) fail; succeed; }
01e642e07b412fdcb10b219d758c56bc9aeac153
ed4e5caeb2b5c8f0aa68dbdb1fae5a4556d5b10f
/lib/GxEPD/src/imglib/gridicons_minus_small.h
f3736c4d04b42ea98af39461b3a9e6cd21c6c263
[ "MIT", "GPL-3.0-only" ]
permissive
Xinyuan-LilyGO/T5-Ink-Screen-Series
4d8fb8d98c981faf5efdcb414fa9cb0e66a61eb2
fba4a4afd316b7e014c2080b6f6a5bed9c672b59
refs/heads/master
2021-12-21T22:32:53.789295
2021-03-26T00:58:24
2021-03-26T00:58:24
168,290,783
148
45
MIT
2021-12-18T09:37:20
2019-01-30T06:24:14
C
UTF-8
C
false
false
668
h
gridicons_minus_small.h
#if defined(ESP8266) || defined(ESP32) #include <pgmspace.h> #else #include <avr/pgmspace.h> #endif // 24 x 24 gridicons_minus_small const unsigned char gridicons_minus_small[] PROGMEM = { /* 0X01,0X01,0XB4,0X00,0X40,0X00, */ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x3F, 0xFC, 0x00, 0x3F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5904c5077deae74eb70a5b072a4f32c747a5dd42
83e7dc1281874779c46dfadcc15b2bb66d8e599c
/src/misc/lv_fs.c
71df1e8f887d92f2ebc3200668c019cf7e6f581c
[ "MIT" ]
permissive
lvgl/lvgl
7d51d6774d6ac71df7101fc7ded56fea4b70be01
5c984b4a5364b6455966eb3a860153806c51626f
refs/heads/master
2023-08-30T22:39:20.283922
2023-08-30T19:55:29
2023-08-30T19:55:29
60,667,730
9,296
2,218
MIT
2023-09-14T17:59:34
2016-06-08T04:14:34
C
UTF-8
C
false
false
14,356
c
lv_fs.c
/** * @file lv_fs.c * */ /********************* * INCLUDES *********************/ #include "lv_fs.h" #include <string.h> #include "../misc/lv_assert.h" #include "../stdlib/lv_string.h" #include "lv_ll.h" #include "../core/lv_global.h" /********************* * DEFINES *********************/ #define fsdrv_ll_p &(LV_GLOBAL_DEFAULT()->fsdrv_ll) /********************** * TYPEDEFS **********************/ /********************** * STATIC PROTOTYPES **********************/ static const char * lv_fs_get_real_path(const char * path); /********************** * STATIC VARIABLES **********************/ /********************** * MACROS **********************/ /********************** * GLOBAL FUNCTIONS **********************/ void _lv_fs_init(void) { _lv_ll_init(fsdrv_ll_p, sizeof(lv_fs_drv_t *)); } bool lv_fs_is_ready(char letter) { lv_fs_drv_t * drv = lv_fs_get_drv(letter); if(drv == NULL) return false; /*An unknown driver in not ready*/ if(drv->ready_cb == NULL) return true; /*Assume the driver is always ready if no handler provided*/ return drv->ready_cb(drv); } lv_fs_res_t lv_fs_open(lv_fs_file_t * file_p, const char * path, lv_fs_mode_t mode) { if(path == NULL) { LV_LOG_WARN("Can't open file: path is NULL"); return LV_FS_RES_INV_PARAM; } char letter = path[0]; lv_fs_drv_t * drv = lv_fs_get_drv(letter); if(drv == NULL) { LV_LOG_WARN("Can't open file (%s): unknown driver letter", path); return LV_FS_RES_NOT_EX; } if(drv->ready_cb) { if(drv->ready_cb(drv) == false) { LV_LOG_WARN("Can't open file (%s): driver not ready", path); return LV_FS_RES_HW_ERR; } } if(drv->open_cb == NULL) { LV_LOG_WARN("Can't open file (%s): open function not exists", path); return LV_FS_RES_NOT_IMP; } const char * real_path = lv_fs_get_real_path(path); void * file_d = drv->open_cb(drv, real_path, mode); if(file_d == NULL || file_d == (void *)(-1)) { return LV_FS_RES_UNKNOWN; } file_p->drv = drv; file_p->file_d = file_d; if(drv->cache_size) { file_p->cache = lv_malloc(sizeof(lv_fs_file_cache_t)); LV_ASSERT_MALLOC(file_p->cache); lv_memzero(file_p->cache, sizeof(lv_fs_file_cache_t)); file_p->cache->start = UINT32_MAX; /*Set an invalid range by default*/ file_p->cache->end = UINT32_MAX - 1; } return LV_FS_RES_OK; } lv_fs_res_t lv_fs_close(lv_fs_file_t * file_p) { if(file_p->drv == NULL) { return LV_FS_RES_INV_PARAM; } if(file_p->drv->close_cb == NULL) { return LV_FS_RES_NOT_IMP; } lv_fs_res_t res = file_p->drv->close_cb(file_p->drv, file_p->file_d); if(file_p->drv->cache_size && file_p->cache) { if(file_p->cache->buffer) { lv_free(file_p->cache->buffer); } lv_free(file_p->cache); } file_p->file_d = NULL; file_p->drv = NULL; file_p->cache = NULL; return res; } static lv_fs_res_t lv_fs_read_cached(lv_fs_file_t * file_p, char * buf, uint32_t btr, uint32_t * br) { lv_fs_res_t res = LV_FS_RES_OK; uint32_t file_position = file_p->cache->file_position; uint32_t start = file_p->cache->start; uint32_t end = file_p->cache->end; char * buffer = file_p->cache->buffer; uint16_t buffer_size = file_p->drv->cache_size; if(start <= file_position && file_position <= end) { /* Data can be read from cache buffer */ uint32_t buffer_remaining_length = (uint32_t)end - file_position + 1; uint16_t buffer_offset = (end - start) - buffer_remaining_length + 1; if(btr <= buffer_remaining_length) { /*Data is in cache buffer, and buffer end not reached, no need to read from FS*/ lv_memcpy(buf, buffer + buffer_offset, btr); *br = btr; } else { /*First part of data is in cache buffer, but we need to read rest of data from FS*/ lv_memcpy(buf, buffer + buffer_offset, buffer_remaining_length); file_p->drv->seek_cb(file_p->drv, file_p->file_d, file_p->cache->end + 1, LV_FS_SEEK_SET); uint32_t bytes_read_to_buffer = 0; if(btr - buffer_remaining_length > buffer_size) { /*If remaining data chuck is bigger than buffer size, then do not use cache, instead read it directly from FS*/ res = file_p->drv->read_cb(file_p->drv, file_p->file_d, (void *)(buf + buffer_remaining_length), btr - buffer_remaining_length, &bytes_read_to_buffer); } else { /*If remaining data chunk is smaller than buffer size, then read into cache buffer*/ res = file_p->drv->read_cb(file_p->drv, file_p->file_d, (void *)buffer, buffer_size, &bytes_read_to_buffer); file_p->cache->start = file_p->cache->end + 1; file_p->cache->end = file_p->cache->start + bytes_read_to_buffer - 1; uint16_t data_chunk_remaining = LV_MIN(btr - buffer_remaining_length, bytes_read_to_buffer); lv_memcpy(buf + buffer_remaining_length, buffer, data_chunk_remaining); } *br = LV_MIN(buffer_remaining_length + bytes_read_to_buffer, btr); } } else { file_p->drv->seek_cb(file_p->drv, file_p->file_d, file_p->cache->file_position, LV_FS_SEEK_SET); /*Data is not in cache buffer*/ if(btr > buffer_size) { /*If bigger data is requested, then do not use cache, instead read it directly*/ res = file_p->drv->read_cb(file_p->drv, file_p->file_d, (void *)buf, btr, br); } else { /*If small data is requested, then read from FS into cache buffer*/ if(buffer == NULL) { file_p->cache->buffer = lv_malloc(buffer_size); LV_ASSERT_MALLOC(file_p->cache->buffer); buffer = file_p->cache->buffer; } uint32_t bytes_read_to_buffer = 0; res = file_p->drv->read_cb(file_p->drv, file_p->file_d, (void *)buffer, buffer_size, &bytes_read_to_buffer); file_p->cache->start = file_position; file_p->cache->end = file_p->cache->start + bytes_read_to_buffer - 1; *br = LV_MIN(btr, bytes_read_to_buffer); lv_memcpy(buf, buffer, *br); } } if(res == LV_FS_RES_OK) { file_p->cache->file_position += *br; } return res; } lv_fs_res_t lv_fs_read(lv_fs_file_t * file_p, void * buf, uint32_t btr, uint32_t * br) { if(br != NULL) *br = 0; if(file_p->drv == NULL) return LV_FS_RES_INV_PARAM; if(file_p->drv->read_cb == NULL) return LV_FS_RES_NOT_IMP; uint32_t br_tmp = 0; lv_fs_res_t res; if(file_p->drv->cache_size) { res = lv_fs_read_cached(file_p, (char *)buf, btr, &br_tmp); } else { res = file_p->drv->read_cb(file_p->drv, file_p->file_d, buf, btr, &br_tmp); } if(br != NULL) *br = br_tmp; return res; } lv_fs_res_t lv_fs_write(lv_fs_file_t * file_p, const void * buf, uint32_t btw, uint32_t * bw) { if(bw != NULL) *bw = 0; if(file_p->drv == NULL) { return LV_FS_RES_INV_PARAM; } if(file_p->drv->write_cb == NULL) { return LV_FS_RES_NOT_IMP; } uint32_t bw_tmp = 0; lv_fs_res_t res = file_p->drv->write_cb(file_p->drv, file_p->file_d, buf, btw, &bw_tmp); if(bw != NULL) *bw = bw_tmp; return res; } lv_fs_res_t lv_fs_seek(lv_fs_file_t * file_p, uint32_t pos, lv_fs_whence_t whence) { if(file_p->drv == NULL) { return LV_FS_RES_INV_PARAM; } if(file_p->drv->seek_cb == NULL) { return LV_FS_RES_NOT_IMP; } lv_fs_res_t res = LV_FS_RES_OK; if(file_p->drv->cache_size) { switch(whence) { case LV_FS_SEEK_SET: { file_p->cache->file_position = pos; /*FS seek if new position is outside cache buffer*/ if(file_p->cache->file_position < file_p->cache->start || file_p->cache->file_position > file_p->cache->end) { res = file_p->drv->seek_cb(file_p->drv, file_p->file_d, file_p->cache->file_position, LV_FS_SEEK_SET); } break; } case LV_FS_SEEK_CUR: { file_p->cache->file_position += pos; /*FS seek if new position is outside cache buffer*/ if(file_p->cache->file_position < file_p->cache->start || file_p->cache->file_position > file_p->cache->end) { res = file_p->drv->seek_cb(file_p->drv, file_p->file_d, file_p->cache->file_position, LV_FS_SEEK_SET); } break; } case LV_FS_SEEK_END: { /*Because we don't know the file size, we do a little trick: do a FS seek, then get the new file position from FS*/ res = file_p->drv->seek_cb(file_p->drv, file_p->file_d, pos, whence); if(res == LV_FS_RES_OK) { uint32_t tmp_position; res = file_p->drv->tell_cb(file_p->drv, file_p->file_d, &tmp_position); if(res == LV_FS_RES_OK) { file_p->cache->file_position = tmp_position; } } break; } } } else { res = file_p->drv->seek_cb(file_p->drv, file_p->file_d, pos, whence); } return res; } lv_fs_res_t lv_fs_tell(lv_fs_file_t * file_p, uint32_t * pos) { if(file_p->drv == NULL) { *pos = 0; return LV_FS_RES_INV_PARAM; } if(file_p->drv->tell_cb == NULL) { *pos = 0; return LV_FS_RES_NOT_IMP; } lv_fs_res_t res; if(file_p->drv->cache_size) { *pos = file_p->cache->file_position; res = LV_FS_RES_OK; } else { res = file_p->drv->tell_cb(file_p->drv, file_p->file_d, pos); } return res; } lv_fs_res_t lv_fs_dir_open(lv_fs_dir_t * rddir_p, const char * path) { if(path == NULL) return LV_FS_RES_INV_PARAM; char letter = path[0]; lv_fs_drv_t * drv = lv_fs_get_drv(letter); if(drv == NULL) { return LV_FS_RES_NOT_EX; } if(drv->ready_cb) { if(drv->ready_cb(drv) == false) { return LV_FS_RES_HW_ERR; } } if(drv->dir_open_cb == NULL) { return LV_FS_RES_NOT_IMP; } const char * real_path = lv_fs_get_real_path(path); void * dir_d = drv->dir_open_cb(drv, real_path); if(dir_d == NULL || dir_d == (void *)(-1)) { return LV_FS_RES_UNKNOWN; } rddir_p->drv = drv; rddir_p->dir_d = dir_d; return LV_FS_RES_OK; } lv_fs_res_t lv_fs_dir_read(lv_fs_dir_t * rddir_p, char * fn) { if(rddir_p->drv == NULL || rddir_p->dir_d == NULL) { fn[0] = '\0'; return LV_FS_RES_INV_PARAM; } if(rddir_p->drv->dir_read_cb == NULL) { fn[0] = '\0'; return LV_FS_RES_NOT_IMP; } lv_fs_res_t res = rddir_p->drv->dir_read_cb(rddir_p->drv, rddir_p->dir_d, fn); return res; } lv_fs_res_t lv_fs_dir_close(lv_fs_dir_t * rddir_p) { if(rddir_p->drv == NULL || rddir_p->dir_d == NULL) { return LV_FS_RES_INV_PARAM; } if(rddir_p->drv->dir_close_cb == NULL) { return LV_FS_RES_NOT_IMP; } lv_fs_res_t res = rddir_p->drv->dir_close_cb(rddir_p->drv, rddir_p->dir_d); rddir_p->dir_d = NULL; rddir_p->drv = NULL; return res; } void lv_fs_drv_init(lv_fs_drv_t * drv) { lv_memzero(drv, sizeof(lv_fs_drv_t)); } void lv_fs_drv_register(lv_fs_drv_t * drv_p) { /*Save the new driver*/ lv_fs_drv_t ** new_drv; new_drv = _lv_ll_ins_head(fsdrv_ll_p); LV_ASSERT_MALLOC(new_drv); if(new_drv == NULL) return; *new_drv = drv_p; } lv_fs_drv_t * lv_fs_get_drv(char letter) { lv_fs_drv_t ** drv; _LV_LL_READ(fsdrv_ll_p, drv) { if((*drv)->letter == letter) { return *drv; } } return NULL; } char * lv_fs_get_letters(char * buf) { lv_fs_drv_t ** drv; uint8_t i = 0; _LV_LL_READ(fsdrv_ll_p, drv) { buf[i] = (*drv)->letter; i++; } buf[i] = '\0'; return buf; } const char * lv_fs_get_ext(const char * fn) { size_t i; for(i = lv_strlen(fn); i > 0; i--) { if(fn[i] == '.') { return &fn[i + 1]; } else if(fn[i] == '/' || fn[i] == '\\') { return ""; /*No extension if a '\' or '/' found*/ } } return ""; /*Empty string if no '.' in the file name.*/ } char * lv_fs_up(char * path) { size_t len = lv_strlen(path); if(len == 0) return path; len--; /*Go before the trailing '\0'*/ /*Ignore trailing '/' or '\'*/ while(path[len] == '/' || path[len] == '\\') { path[len] = '\0'; if(len > 0) len--; else return path; } size_t i; for(i = len; i > 0; i--) { if(path[i] == '/' || path[i] == '\\') break; } if(i > 0) path[i] = '\0'; return path; } const char * lv_fs_get_last(const char * path) { size_t len = lv_strlen(path); if(len == 0) return path; len--; /*Go before the trailing '\0'*/ /*Ignore trailing '/' or '\'*/ while(path[len] == '/' || path[len] == '\\') { if(len > 0) len--; else return path; } size_t i; for(i = len; i > 0; i--) { if(path[i] == '/' || path[i] == '\\') break; } /*No '/' or '\' in the path so return with path itself*/ if(i == 0) return path; return &path[i + 1]; } /********************** * STATIC FUNCTIONS **********************/ /** * Skip the driver letter and the possible : after the letter * @param path path string (E.g. S:/folder/file.txt) * @return pointer to the beginning of the real path (E.g. /folder/file.txt) */ static const char * lv_fs_get_real_path(const char * path) { path++; /*Ignore the driver letter*/ if(*path == ':') path++; return path; }
8eac47aad83a47fd68923a1de0bf11ff1db6ea08
229a28fc18c13bfe1ba7fc81c38b03651ed8e93b
/sw/tests/fp8alt_comparison_vector.c
25e27bfb9992c244931e8c902710156100b5a306
[ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference" ]
permissive
pulp-platform/snitch
d3967742434fa21e8af71afa6be35ea5420166ca
d026f47843f0ea6c269244c4e6851e0e09141ec3
refs/heads/master
2023-08-24T08:42:36.230951
2023-06-19T09:34:05
2023-06-19T09:34:05
289,236,605
194
44
Apache-2.0
2023-07-11T12:46:26
2020-08-21T09:57:34
SystemVerilog
UTF-8
C
false
false
8,637
c
fp8alt_comparison_vector.c
// Copyright 2020 ETH Zurich and University of Bologna. // Licensed under the Apache License, Version 2.0, see LICENSE for details. // SPDX-License-Identifier: Apache-2.0 #include <snrt.h> #include "printf.h" int main() { int errs = 64; if (snrt_is_compute_core()) { uint32_t fa8 = 0x4048F5C3; // 0x4248 3.14 uint32_t fa8n = 0xC048F5C3; // 0xC248 -3.14 uint32_t fb8 = 0x3FCF1AA0; // 0x3E79 1.618 uint32_t fb8n = 0xBFCF1AA0; // 0xBE79 -1.618 int cmp0 = 0; int cmp1 = 0; int cmp2 = 0; int cmp3 = 0; write_csr(2048, 3); asm volatile( "fmv.s.x ft3, %0\n" "fmv.s.x ft4, %1\n" "vfcpka.ab.s ft5, ft4, ft3\n" "vfcpkb.ab.s ft5, ft4, ft3\n" "vfcpkc.ab.s ft5, ft4, ft3\n" "vfcpkd.ab.s ft5, ft4, ft3\n" "vfcpka.ab.s ft6, ft3, ft4\n" "vfcpkb.ab.s ft6, ft3, ft4\n" "vfcpkc.ab.s ft6, ft3, ft4\n" "vfcpkd.ab.s ft6, ft3, ft4\n" "vfcpka.ab.s ft7, ft3, ft3\n" "vfcpkb.ab.s ft7, ft3, ft3\n" "vfcpkc.ab.s ft7, ft3, ft3\n" "vfcpkd.ab.s ft7, ft3, ft3\n" "vfcpka.ab.s ft8, ft4, ft4\n" "vfcpkb.ab.s ft8, ft4, ft4\n" "vfcpkc.ab.s ft8, ft4, ft4\n" "vfcpkd.ab.s ft8, ft4, ft4\n" : "+r"(fa8), "+r"(fb8)); // vfeq asm volatile( "vfeq.ab %0, ft5, ft5\n" "vfeq.ab %1, ft6, ft6\n" "vfeq.ab %2, ft5, ft6\n" "vfeq.ab %3, ft6, ft5\n" : "+r"(cmp0), "+r"(cmp1), "+r"(cmp2), "+r"(cmp3)); errs -= (cmp0 == 0xff); // 1111 errs -= (cmp1 == 0xff); // 1111 errs -= (cmp2 == 0); errs -= (cmp3 == 0); // vfeq.R asm volatile( "vfeq.r.ab %0, ft5, ft5\n" "vfeq.r.ab %1, ft6, ft6\n" "vfeq.r.ab %2, ft5, ft6\n" "vfeq.r.ab %3, ft6, ft5\n" : "+r"(cmp0), "+r"(cmp1), "+r"(cmp2), "+r"(cmp3)); errs -= (cmp0 == 0x55); errs -= (cmp1 == 0x55); errs -= (cmp2 == 0xaa); errs -= (cmp3 == 0xaa); // vfne asm volatile( "vfne.ab %0, ft5, ft5\n" "vfne.ab %1, ft6, ft6\n" "vfne.ab %2, ft5, ft6\n" "vfne.ab %3, ft6, ft5\n" : "+r"(cmp0), "+r"(cmp1), "+r"(cmp2), "+r"(cmp3)); errs -= (cmp0 == 0); errs -= (cmp1 == 0); errs -= (cmp2 == 0xff); errs -= (cmp3 == 0xff); // vfne.R asm volatile( "vfne.r.ab %0, ft5, ft5\n" "vfne.r.ab %1, ft6, ft6\n" "vfne.r.ab %2, ft5, ft6\n" "vfne.r.ab %3, ft6, ft5\n" : "+r"(cmp0), "+r"(cmp1), "+r"(cmp2), "+r"(cmp3)); errs -= (cmp0 == 0xaa); errs -= (cmp1 == 0xaa); errs -= (cmp2 == 0x55); errs -= (cmp3 == 0x55); // vflt asm volatile( "vflt.ab %0, ft5, ft5\n" "vflt.ab %1, ft6, ft6\n" "vflt.ab %2, ft5, ft6\n" "vflt.ab %3, ft6, ft5\n" : "+r"(cmp0), "+r"(cmp1), "+r"(cmp2), "+r"(cmp3)); errs -= (cmp0 == 0); errs -= (cmp1 == 0); errs -= (cmp2 == 0x55); errs -= (cmp3 == 0xaa); // vflt.R asm volatile( "vflt.r.ab %0, ft5, ft5\n" "vflt.r.ab %1, ft6, ft6\n" "vflt.r.ab %2, ft5, ft6\n" "vflt.r.ab %3, ft6, ft5\n" : "+r"(cmp0), "+r"(cmp1), "+r"(cmp2), "+r"(cmp3)); errs -= (cmp0 == 0); errs -= (cmp1 == 0xaa); errs -= (cmp2 == 0x55); errs -= (cmp3 == 0); // vfle asm volatile( "vfle.ab %0, ft5, ft5\n" "vfle.ab %1, ft6, ft6\n" "vfle.ab %2, ft5, ft6\n" "vfle.ab %3, ft6, ft5\n" : "+r"(cmp0), "+r"(cmp1), "+r"(cmp2), "+r"(cmp3)); errs -= (cmp0 == 0xff); errs -= (cmp1 == 0xff); errs -= (cmp2 == 0x55); errs -= (cmp3 == 0xaa); // vfle.R asm volatile( "vfle.r.ab %0, ft5, ft5\n" "vfle.r.ab %1, ft6, ft6\n" "vfle.r.ab %2, ft5, ft6\n" "vfle.r.ab %3, ft6, ft5\n" : "+r"(cmp0), "+r"(cmp1), "+r"(cmp2), "+r"(cmp3)); errs -= (cmp0 == 0x55); errs -= (cmp1 == 0xff); errs -= (cmp2 == 0xff); errs -= (cmp3 == 0xaa); // vfgt asm volatile( "vfgt.ab %0, ft5, ft5\n" "vfgt.ab %1, ft6, ft6\n" "vfgt.ab %2, ft5, ft6\n" "vfgt.ab %3, ft6, ft5\n" : "+r"(cmp0), "+r"(cmp1), "+r"(cmp2), "+r"(cmp3)); errs -= (cmp0 == 0); errs -= (cmp1 == 0); errs -= (cmp2 == 0xaa); errs -= (cmp3 == 0x55); // vfgt.R asm volatile( "vfgt.r.ab %0, ft5, ft5\n" "vfgt.r.ab %1, ft6, ft6\n" "vfgt.r.ab %2, ft5, ft6\n" "vfgt.r.ab %3, ft6, ft5\n" : "+r"(cmp0), "+r"(cmp1), "+r"(cmp2), "+r"(cmp3)); errs -= (cmp0 == 0xaa); errs -= (cmp1 == 0); errs -= (cmp2 == 0); errs -= (cmp3 == 0x55); // vfge asm volatile( "vfge.ab %0, ft5, ft5\n" "vfge.ab %1, ft6, ft6\n" "vfge.ab %2, ft5, ft6\n" "vfge.ab %3, ft6, ft5\n" : "+r"(cmp0), "+r"(cmp1), "+r"(cmp2), "+r"(cmp3)); errs -= (cmp0 == 0xff); errs -= (cmp1 == 0xff); errs -= (cmp2 == 0xaa); errs -= (cmp3 == 0x55); // vfge.R asm volatile( "vfge.r.ab %0, ft5, ft5\n" "vfge.r.ab %1, ft6, ft6\n" "vfge.r.ab %2, ft5, ft6\n" "vfge.r.ab %3, ft6, ft5\n" : "+r"(cmp0), "+r"(cmp1), "+r"(cmp2), "+r"(cmp3)); errs -= (cmp0 == 0xff); errs -= (cmp1 == 0x55); errs -= (cmp2 == 0xaa); errs -= (cmp3 == 0xff); // vfmax asm volatile( "vfmax.ab ft0, ft5, ft5\n" "vfeq.ab %1, ft5, ft0\n" : "+r"(cmp0)); errs -= (cmp0 == 0xff); asm volatile( "vfmax.ab ft0, ft6, ft6\n" "vfeq.ab %1, ft6, ft0\n" : "+r"(cmp0)); errs -= (cmp0 == 0xff); asm volatile( "vfmax.ab ft0, ft5, ft6\n" "vfeq.ab %1, ft7, ft0\n" : "+r"(cmp0)); errs -= (cmp0 == 0xff); asm volatile( "vfmax.ab ft0, ft6, ft5\n" "vfeq.ab %1, ft7, ft0\n" : "+r"(cmp0)); errs -= (cmp0 == 0xff); // vfmax.R asm volatile( "vfmax.r.ab ft0, ft5, ft5\n" "vfeq.ab %1, ft5, ft0\n" : "+r"(cmp0)); errs -= (cmp0 == 0xff); asm volatile( "vfmax.r.ab ft0, ft6, ft6\n" "vfeq.ab %1, ft7, ft0\n" : "+r"(cmp0)); errs -= (cmp0 == 0xff); asm volatile( "vfmax.r.ab ft0, ft5, ft6\n" "vfeq.ab %1, ft7, ft0\n" : "+r"(cmp0)); errs -= (cmp0 == 0xff); asm volatile( "vfmax.r.ab ft0, ft6, ft5\n" "vfeq.ab %1, ft6, ft0\n" : "+r"(cmp0)); errs -= (cmp0 == 0xff); // vfmin asm volatile( "vfmin.ab ft0, ft5, ft5\n" "vfeq.ab %1, ft5, ft0\n" : "+r"(cmp0)); errs -= (cmp0 == 0xff); asm volatile( "vfmin.ab ft0, ft6, ft6\n" "vfeq.ab %1, ft6, ft0\n" : "+r"(cmp0)); errs -= (cmp0 == 0xff); asm volatile( "vfmin.ab ft0, ft5, ft6\n" "vfeq.ab %1, ft8, ft0\n" : "+r"(cmp0)); errs -= (cmp0 == 0xff); asm volatile( "vfmin.ab ft0, ft6, ft5\n" "vfeq.ab %1, ft8, ft0\n" : "+r"(cmp0)); errs -= (cmp0 == 0xff); // vfmin.R asm volatile( "vfmin.r.ab ft0, ft5, ft5\n" "vfeq.ab %1, ft8, ft0\n" : "+r"(cmp0)); errs -= (cmp0 == 0xff); asm volatile( "vfmin.r.ab ft0, ft6, ft6\n" "vfeq.ab %1, ft6, ft0\n" : "+r"(cmp0)); errs -= (cmp0 == 0xff); asm volatile( "vfmin.r.ab ft0, ft5, ft6\n" "vfeq.ab %1, ft5, ft0\n" : "+r"(cmp0)); errs -= (cmp0 == 0xff); asm volatile( "vfmin.r.ab ft0, ft6, ft5\n" "vfeq.ab %1, ft8, ft0\n" : "+r"(cmp0)); errs -= (cmp0 == 0xff); } return errs; }
0de5d4af9b5d3c0de1b893140be11fdfd5394d16
99bdb3251fecee538e0630f15f6574054dfc1468
/bsp/gd32/arm/libraries/GD32F4xx_Firmware_Library/GD32F4xx_standard_peripheral/Source/gd32f4xx_dci.c
730410ad2a60a0b74ff6e2d08c1dd74f3f8ffb21
[ "Zlib", "LicenseRef-scancode-proprietary-license", "MIT", "BSD-3-Clause", "X11", "BSD-4-Clause-UC", "LicenseRef-scancode-unknown-license-reference", "Apache-2.0" ]
permissive
RT-Thread/rt-thread
03a7c52c2aeb1b06a544143b0e803d72f47d1ece
3602f891211904a27dcbd51e5ba72fefce7326b2
refs/heads/master
2023-09-01T04:10:20.295801
2023-08-31T16:20:55
2023-08-31T16:20:55
7,408,108
9,599
5,805
Apache-2.0
2023-09-14T13:37:26
2013-01-02T14:49:21
C
UTF-8
C
false
false
10,112
c
gd32f4xx_dci.c
/*! \file gd32f4xx_dci.c \brief DCI driver \version 2016-08-15, V1.0.0, firmware for GD32F4xx \version 2018-12-12, V2.0.0, firmware for GD32F4xx \version 2020-09-30, V2.1.0, firmware for GD32F4xx \version 2022-03-09, V3.0.0, firmware for GD32F4xx */ /* Copyright (c) 2022, GigaDevice Semiconductor Inc. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "gd32f4xx_dci.h" /*! \brief DCI deinit \param[in] none \param[out] none \retval none */ void dci_deinit(void) { rcu_periph_reset_enable(RCU_DCIRST); rcu_periph_reset_disable(RCU_DCIRST); } /*! \brief initialize DCI registers \param[in] dci_struct: DCI parameter initialization structure members of the structure and the member values are shown as below: capture_mode : DCI_CAPTURE_MODE_CONTINUOUS, DCI_CAPTURE_MODE_SNAPSHOT colck_polarity : DCI_CK_POLARITY_FALLING, DCI_CK_POLARITY_RISING hsync_polarity : DCI_HSYNC_POLARITY_LOW, DCI_HSYNC_POLARITY_HIGH vsync_polarity : DCI_VSYNC_POLARITY_LOW, DCI_VSYNC_POLARITY_HIGH frame_rate : DCI_FRAME_RATE_ALL, DCI_FRAME_RATE_1_2, DCI_FRAME_RATE_1_4 interface_format: DCI_INTERFACE_FORMAT_8BITS, DCI_INTERFACE_FORMAT_10BITS, DCI_INTERFACE_FORMAT_12BITS, DCI_INTERFACE_FORMAT_14BITS \param[out] none \retval none */ void dci_init(dci_parameter_struct *dci_struct) { uint32_t reg = 0U; /* disable capture function and DCI */ DCI_CTL &= ~(DCI_CTL_CAP | DCI_CTL_DCIEN); /* configure DCI parameter */ reg |= dci_struct->capture_mode; reg |= dci_struct->clock_polarity; reg |= dci_struct->hsync_polarity; reg |= dci_struct->vsync_polarity; reg |= dci_struct->frame_rate; reg |= dci_struct->interface_format; DCI_CTL = reg; } /*! \brief enable DCI function \param[in] none \param[out] none \retval none */ void dci_enable(void) { DCI_CTL |= DCI_CTL_DCIEN; } /*! \brief disable DCI function \param[in] none \param[out] none \retval none */ void dci_disable(void) { DCI_CTL &= ~DCI_CTL_DCIEN; } /*! \brief enable DCI capture \param[in] none \param[out] none \retval none */ void dci_capture_enable(void) { DCI_CTL |= DCI_CTL_CAP; } /*! \brief disable DCI capture \param[in] none \param[out] none \retval none */ void dci_capture_disable(void) { DCI_CTL &= ~DCI_CTL_CAP; } /*! \brief enable DCI jpeg mode \param[in] none \param[out] none \retval none */ void dci_jpeg_enable(void) { DCI_CTL |= DCI_CTL_JM; } /*! \brief disable DCI jpeg mode \param[in] none \param[out] none \retval none */ void dci_jpeg_disable(void) { DCI_CTL &= ~DCI_CTL_JM; } /*! \brief enable cropping window function \param[in] none \param[out] none \retval none */ void dci_crop_window_enable(void) { DCI_CTL |= DCI_CTL_WDEN; } /*! \brief disable cropping window function \param[in] none \param[out] none \retval none */ void dci_crop_window_disable(void) { DCI_CTL &= ~DCI_CTL_WDEN; } /*! \brief configure DCI cropping window \param[in] start_x: window horizontal start position \param[in] start_y: window vertical start position \param[in] size_width: window horizontal size \param[in] size_height: window vertical size \param[out] none \retval none */ void dci_crop_window_config(uint16_t start_x, uint16_t start_y, uint16_t size_width, uint16_t size_height) { DCI_CWSPOS = ((uint32_t)start_x | ((uint32_t)start_y << 16)); DCI_CWSZ = ((uint32_t)size_width | ((uint32_t)size_height << 16)); } /*! \brief enable embedded synchronous mode \param[in] none \param[out] none \retval none */ void dci_embedded_sync_enable(void) { DCI_CTL |= DCI_CTL_ESM; } /*! \brief disble embedded synchronous mode \param[in] none \param[out] none \retval none */ void dci_embedded_sync_disable(void) { DCI_CTL &= ~DCI_CTL_ESM; } /*! \brief config synchronous codes in embedded synchronous mode \param[in] frame_start: frame start code in embedded synchronous mode \param[in] line_start: line start code in embedded synchronous mode \param[in] line_end: line end code in embedded synchronous mode \param[in] frame_end: frame end code in embedded synchronous mode \param[out] none \retval none */ void dci_sync_codes_config(uint8_t frame_start, uint8_t line_start, uint8_t line_end, uint8_t frame_end) { DCI_SC = ((uint32_t)frame_start | ((uint32_t)line_start << 8) | ((uint32_t)line_end << 16) | ((uint32_t)frame_end << 24)); } /*! \brief config synchronous codes unmask in embedded synchronous mode \param[in] frame_start: frame start code unmask bits in embedded synchronous mode \param[in] line_start: line start code unmask bits in embedded synchronous mode \param[in] line_end: line end code unmask bits in embedded synchronous mode \param[in] frame_end: frame end code unmask bits in embedded synchronous mode \param[out] none \retval none */ void dci_sync_codes_unmask_config(uint8_t frame_start, uint8_t line_start, uint8_t line_end, uint8_t frame_end) { DCI_SCUMSK = ((uint32_t)frame_start | ((uint32_t)line_start << 8) | ((uint32_t)line_end << 16) | ((uint32_t)frame_end << 24)); } /*! \brief read DCI data register \param[in] none \param[out] none \retval data */ uint32_t dci_data_read(void) { return DCI_DATA; } /*! \brief get specified flag \param[in] flag: \arg DCI_FLAG_HS: HS line status \arg DCI_FLAG_VS: VS line status \arg DCI_FLAG_FV:FIFO valid \arg DCI_FLAG_EF: end of frame flag \arg DCI_FLAG_OVR: FIFO overrun flag \arg DCI_FLAG_ESE: embedded synchronous error flag \arg DCI_FLAG_VSYNC: vsync flag \arg DCI_FLAG_EL: end of line flag \param[out] none \retval FlagStatus: SET or RESET */ FlagStatus dci_flag_get(uint32_t flag) { uint32_t stat = 0U; if(flag >> 31) { /* get flag status from DCI_STAT1 register */ stat = DCI_STAT1; } else { /* get flag status from DCI_STAT0 register */ stat = DCI_STAT0; } if(flag & stat) { return SET; } else { return RESET; } } /*! \brief enable specified DCI interrupt \param[in] interrupt: \arg DCI_INT_EF: end of frame interrupt \arg DCI_INT_OVR: FIFO overrun interrupt \arg DCI_INT_ESE: embedded synchronous error interrupt \arg DCI_INT_VSYNC: vsync interrupt \arg DCI_INT_EL: end of line interrupt \param[out] none \retval none */ void dci_interrupt_enable(uint32_t interrupt) { DCI_INTEN |= interrupt; } /*! \brief disable specified DCI interrupt \param[in] interrupt: \arg DCI_INT_EF: end of frame interrupt \arg DCI_INT_OVR: FIFO overrun interrupt \arg DCI_INT_ESE: embedded synchronous error interrupt \arg DCI_INT_VSYNC: vsync interrupt \arg DCI_INT_EL: end of line interrupt \param[out] none \retval none */ void dci_interrupt_disable(uint32_t interrupt) { DCI_INTEN &= ~interrupt; } /*! \brief clear specified interrupt flag \param[in] int_flag: \arg DCI_INT_EF: end of frame interrupt \arg DCI_INT_OVR: FIFO overrun interrupt \arg DCI_INT_ESE: embedded synchronous error interrupt \arg DCI_INT_VSYNC: vsync interrupt \arg DCI_INT_EL: end of line interrupt \param[out] none \retval none */ void dci_interrupt_flag_clear(uint32_t int_flag) { DCI_INTC |= int_flag; } /*! \brief get specified interrupt flag \param[in] int_flag: \arg DCI_INT_FLAG_EF: end of frame interrupt flag \arg DCI_INT_FLAG_OVR: FIFO overrun interrupt flag \arg DCI_INT_FLAG_ESE: embedded synchronous error interrupt flag \arg DCI_INT_FLAG_VSYNC: vsync interrupt flag \arg DCI_INT_FLAG_EL: end of line interrupt flag \param[out] none \retval FlagStatus: SET or RESET */ FlagStatus dci_interrupt_flag_get(uint32_t int_flag) { if(RESET == (DCI_INTF & int_flag)) { return RESET; } else { return SET; } }
b86ec8fdbd9beaa90a5ea737b94d82964965d26e
d6d6893f488941edfc5d244221583c63572d9d5f
/code/parse/parsehi.h
ab2a2d3f361ce31078606e46655ecf67a4a0233b
[ "Unlicense" ]
permissive
scp-fs2open/fs2open.github.com
4170cc58b92577b41308a9e6343dd3fc3fb7a074
865f7e725c7a4d9c0b209a49ed0cbd8dc45e8ae7
refs/heads/master
2023-08-29T11:29:27.822804
2023-08-27T20:33:28
2023-08-27T20:33:28
7,700,081
382
311
NOASSERTION
2023-09-14T15:49:22
2013-01-19T06:10:53
C++
UTF-8
C
false
false
517
h
parsehi.h
#pragma once #include "parse/parselo.h" #include "graphics/color.h" //parsehi is intended for higher level, frequently used combinations of parselo functions //and also for collection of standardized parsing of complex types where appropriate extern bool parse_optional_float_into(const SCP_string& field_name, float* value_target); extern bool parse_optional_bool_into(const SCP_string& field_name, bool* value_target); extern bool parse_optional_color3i_into(const SCP_string &field_name, hdr_color *out_color);
d233057c9ea0ffb125b2de016b90dce12c7a60c3
af84faea52cfa5bf69f5a0f2ec71f3087554ddeb
/winewayland.drv/vulkan.c
7104b41865c8fa0394a680411b660912518a2595
[]
no_license
varmd/wine-wayland
4fa38396fbff716b9bc6892c30228d308e84638c
e3260f1a249fbc0dedc97f5548aa813ed66b5326
refs/heads/master
2023-04-06T18:57:43.607197
2023-02-19T17:21:22
2023-02-19T17:21:22
237,471,965
719
24
null
2023-02-19T23:12:05
2020-01-31T16:38:49
C
UTF-8
C
false
false
121,405
c
vulkan.c
/* WAYLANDDRV Vulkan+Wayland Implementation * * Copyright 2017 Roderick Colenbrander * Copyright 2018-2022 varmd (github.com/varmd) * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA */ #if 0 #pragma makedep unix #endif #include "config.h" #include <stdarg.h> #include <dlfcn.h> #include "winternl.h" #include "windef.h" #include "winbase.h" #include "wingdi.h" #include "wine/gdi_driver.h" #include "winuser.h" //TODO //#include "wine/heap.h" #include "wine/server.h" #include "wine/debug.h" #include "wine/unixlib.h" #include "wine/vulkan.h" #include "wine/vulkan_driver.h" #include "waylanddrv.h" #define VK_NO_PROTOTYPES #define WINE_VK_HOST //latest version is 5 #define WINE_WAYLAND_SEAT_VERSION 5 #include "wayland-protocols/xdg-shell-client-protocol.h" #include "wayland-protocols/pointer-constraints-unstable-v1-client-protocol.h" #include "wayland-protocols/relative-pointer-unstable-v1-client-protocol.h" WINE_DEFAULT_DEBUG_CHANNEL(waylanddrv); #ifndef SONAME_LIBVULKAN #define SONAME_LIBVULKAN "" #endif #ifdef _WIN64 #define HAS_FSR 1 #endif struct wl_compositor *wayland_compositor = NULL; unsigned int global_wayland_confine = 0; unsigned int global_wayland_full = 0; unsigned long global_sx = 0; unsigned long global_sy = 0; int global_cursor_set = 0; int global_cursor_gdi_fd = 0; int global_cursor_last_change = 0; int global_cursor_height = 0; int global_cursor_width = 0; int global_custom_cursors = 0; HCURSOR global_last_cursor_handle = NULL; void *global_cursor_shm_data = NULL; struct wl_shm_pool *global_cursor_pool = NULL; struct cursor_cache *global_cursor_cache[32768] = {0}; int global_wait_for_configure = 0; int global_is_vulkan = 0; int global_hide_cursor = 0; int global_disable_clip_cursor = 0; int global_fullscreen_grab_cursor = 0; int global_last_cursor_change = 0; int global_is_cursor_visible = 1; int global_is_always_fullscreen = 0; int global_fsr = 0; int global_fsr_set = 0; RECT global_vulkan_rect; int global_vulkan_rect_flag = 0; int global_gdi_fd = 0; int global_gdi_size = 0; int global_gdi_position_changing = 0; int global_gdi_lb_hold = 0; void *global_shm_data = NULL; struct wl_buffer *global_wl_buffer = NULL; struct wl_shm_pool *global_wl_pool = NULL; HWND global_vulkan_hwnd = NULL; HWND global_update_hwnd = NULL; INPUT global_input; //wl_output struct wl_output *global_first_wl_output = NULL; int global_output_width = 0; int global_output_height = 0; //Wayland defs struct xdg_wm_base *wm_base = NULL; static struct wl_seat *wayland_seat = NULL; static struct wl_pointer *wayland_pointer = NULL; static struct wl_keyboard *wayland_keyboard = NULL; static struct zwp_pointer_constraints_v1 *pointer_constraints = NULL; static struct zwp_relative_pointer_manager_v1 *relative_pointer_manager = NULL; struct zwp_locked_pointer_v1 *locked_pointer = NULL; struct zwp_confined_pointer_v1 *confined_pointer = NULL; struct zwp_relative_pointer_v1 *relative_pointer = NULL; struct wl_display *wayland_display = NULL; struct wl_cursor_theme *wayland_cursor_theme; struct wl_cursor *wayland_default_cursor; struct wl_surface *wayland_cursor_surface; uint32_t wayland_serial_id = 0; struct wl_shm *wayland_cursor_shm; struct wl_shm *global_shm; struct wl_subcompositor *wayland_subcompositor; struct wayland_window { struct wl_surface *surface; struct xdg_surface *xdg_surface; struct xdg_toplevel *xdg_toplevel; HWND pointer_to_hwnd; int test; int height; int width; }; struct wayland_window *vulkan_window = NULL; struct wayland_window *gdi_window = NULL; struct wl_surface_win_data { HWND hwnd; /* hwnd that this private data belongs to */ struct wl_subsurface *wayland_subsurface; struct wl_surface *wayland_surface; struct wayland_window *wayland_window; }; int is_buffer_busy = 0; UINT desktop_tid = 0; VkInstance *global_vk_instance = NULL; #define ZWP_RELATIVE_POINTER_MANAGER_V1_VERSION 1 static const struct vulkan_funcs vulkan_funcs; //Wayland /* Examples https://github.com/SaschaWillems/Vulkan/blob/b4fb49504e714ecbd4485dfe98514a47b4e9c2cc/external/vulkan/vulkan_wayland.h */ #include "keycodes-inc.c" /*********************************************************************** * WAYLAND_ToUnicodeEx */ INT WAYLANDDRV_ToUnicodeEx( UINT virt, UINT scan, const BYTE *state, LPWSTR buf, int size, UINT flags, HKL hkl ) { WCHAR buffer[2]; BOOL shift = state[VK_SHIFT] & 0x80; BOOL ctrl = state[VK_CONTROL] & 0x80; BOOL numlock = state[VK_NUMLOCK] & 0x01; buffer[0] = buffer[1] = 0; if (scan & 0x8000) return 0; /* key up */ /* FIXME: hardcoded layout */ if (!ctrl) { switch (virt) { case VK_BACK: buffer[0] = '\b'; break; case VK_OEM_1: buffer[0] = shift ? ':' : ';'; break; case VK_OEM_2: buffer[0] = shift ? '?' : '/'; break; case VK_OEM_3: buffer[0] = shift ? '~' : '`'; break; case VK_OEM_4: buffer[0] = shift ? '{' : '['; break; case VK_OEM_5: buffer[0] = shift ? '|' : '\\'; break; case VK_OEM_6: buffer[0] = shift ? '}' : ']'; break; case VK_OEM_7: buffer[0] = shift ? '"' : '\''; break; case VK_OEM_COMMA: buffer[0] = shift ? '<' : ','; break; case VK_OEM_MINUS: buffer[0] = shift ? '_' : '-'; break; case VK_OEM_PERIOD: buffer[0] = shift ? '>' : '.'; break; case VK_OEM_PLUS: buffer[0] = shift ? '+' : '='; break; case VK_RETURN: buffer[0] = '\r'; break; case VK_SPACE: buffer[0] = ' '; break; case VK_TAB: buffer[0] = '\t'; break; case VK_MULTIPLY: buffer[0] = '*'; break; case VK_ADD: buffer[0] = '+'; break; case VK_SUBTRACT: buffer[0] = '-'; break; case VK_DIVIDE: buffer[0] = '/'; break; default: if (virt >= '0' && virt <= '9') { buffer[0] = shift ? ")!@#$%^&*("[virt - '0'] : virt; break; } if (virt >= 'A' && virt <= 'Z') { buffer[0] = shift || (state[VK_CAPITAL] & 0x01) ? virt : virt + 'a' - 'A'; break; } if (virt >= VK_NUMPAD0 && virt <= VK_NUMPAD9 && numlock && !shift) { buffer[0] = '0' + virt - VK_NUMPAD0; break; } if (virt == VK_DECIMAL && numlock && !shift) { buffer[0] = '.'; break; } break; } } else /* Control codes */ { if (virt >= 'A' && virt <= 'Z') buffer[0] = virt - 'A' + 1; else { switch (virt) { case VK_OEM_4: buffer[0] = 0x1b; break; case VK_OEM_5: buffer[0] = 0x1c; break; case VK_OEM_6: buffer[0] = 0x1d; break; case VK_SUBTRACT: buffer[0] = 0x1e; break; } } } lstrcpynW( buf, buffer, size ); //TRACE( "returning %d / %s\n", strlenW( buffer ), debugstr_wn(buf, strlenW( buffer ))); return lstrlenW( buffer ); } /*********************************************************************** * WAYLAND_MapVirtualKeyEx */ UINT WAYLANDDRV_MapVirtualKeyEx( UINT code, UINT maptype, HKL hkl ) { UINT ret = 0; const char *s; char key; //TRACE( "code=%d %x, maptype=%d, hkl %p \n", code, code, maptype, hkl ); switch (maptype) { case MAPVK_VK_TO_VSC_EX: case MAPVK_VK_TO_VSC: /* vkey to scancode */ switch (code) { //case VK_LSHIFT: //case VK_RSHIFT: //case VK_SHIFT: // code = VK_SHIFT; // break; case VK_CONTROL: code = VK_CONTROL; break; case VK_MENU: code = VK_LMENU; break; } if (code < ( sizeof(vkey_to_scancode) / sizeof(vkey_to_scancode[0]) ) ) ret = vkey_to_scancode[code]; break; case MAPVK_VSC_TO_VK: case MAPVK_VSC_TO_VK_EX: /* scancode to vkey */ ret = scancode_to_vkey( code ); if (maptype == MAPVK_VSC_TO_VK) switch (ret) { //case VK_LSHIFT: //case VK_RSHIFT: //case VK_SHIFT: // ret = VK_SHIFT; break; case VK_LCONTROL: case VK_RCONTROL: ret = VK_CONTROL; break; case VK_LMENU: case VK_RMENU: ret = VK_MENU; break; } break; case MAPVK_VK_TO_CHAR: if ((code >= 0x30 && code <= 0x39) || (code >= 0x41 && code <= 0x5a)) { key = code; if (code >= 0x41) key += 0x20; ret = toupper(key); //TRACE( "returning char code of %d \n", ret ); } else { s = vkey_to_name( code ); if (s && (strlen( s ) == 1)) ret = s[0]; else ret = 0; } break; default: FIXME( "Unknown maptype %d\n", maptype ); break; } //TRACE( "returning 0x%04x %x %d \n", ret, ret, ret ); return ret; } /*********************************************************************** * WAYLAND_GetKeyboardLayout */ HKL WAYLANDDRV_GetKeyboardLayout( DWORD thread_id ) { return (HKL)MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US); } /*********************************************************************** * WAYLAND_VkKeyScanEx */ SHORT WAYLANDDRV_VkKeyScanEx( WCHAR ch, HKL hkl ) { //TRACE("%s \n", debugstr_w(ch)); SHORT ret = -1; if (ch < sizeof(char_vkey_map) / sizeof(char_vkey_map[0])) ret = char_vkey_map[ch]; return ret; } /*********************************************************************** * WAYLAND_GetKeyNameText */ INT WAYLANDDRV_GetKeyNameText( LONG lparam, LPWSTR buffer, INT size ) { int scancode, vkey; const char *name; char key[2]; DWORD len; scancode = (lparam >> 16) & 0x1FF; vkey = scancode_to_vkey( scancode ); TRACE( "scancode is %d %d\n", scancode, vkey); if (lparam & (1 << 25)) { /* Caller doesn't care about distinctions between left and right keys. */ switch (vkey) { case VK_LSHIFT: case VK_RSHIFT: vkey = VK_SHIFT; break; case VK_LCONTROL: case VK_RCONTROL: vkey = VK_CONTROL; break; case VK_LMENU: case VK_RMENU: vkey = VK_MENU; break; } } if (scancode & 0x100) vkey |= 0x100; if ((vkey >= 0x30 && vkey <= 0x39) || (vkey >= 0x41 && vkey <= 0x5a)) { key[0] = vkey; if (vkey >= 0x41) key[0] += 0x20; key[1] = 0; name = key; } else { name = vkey_to_name( vkey ); } RtlUTF8ToUnicodeN( buffer, size * sizeof(WCHAR), &len, name, strlen( name ) + 1 ); len /= sizeof(WCHAR); if (len) len--; if (!len) { char name[16]; len = sprintf( name, "Key 0x%02x", vkey ); len = min( len + 1, size ); ascii_to_unicode( buffer, name, len ); if (len) buffer[--len] = 0; } // TRACE( "lparam 0x%08x -> %s\n", lparam, debugstr_w( buffer )); return len; } /*********************************************************************** * GetCursorPos (WAYLANDDRV.@) */ BOOL WAYLANDDRV_GetCursorPos(LPPOINT pos) { if(global_wayland_confine) { return TRUE; } if(!global_sx) { pos->x = 0; pos->y = 0; return TRUE; } pos->x = global_sx; pos->y = global_sy; //TRACE( "Global pointer at %d \n", pos->x, pos->y ); return TRUE; } //End Wayland keyboard arrays and funcs #define VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR 1000006000; typedef struct VkWaylandSurfaceCreateInfoKHR { VkStructureType sType; const void* pNext; VkWaylandSurfaceCreateFlagsKHR flags; struct wl_display* display; struct wl_surface* surface; } VkWaylandSurfaceCreateInfoKHR; static VkResult (*pvkCreateInstance)(const VkInstanceCreateInfo *, const VkAllocationCallbacks *, VkInstance *); static VkResult (*pvkCreateSwapchainKHR)(VkDevice, const VkSwapchainCreateInfoKHR *, const VkAllocationCallbacks *, VkSwapchainKHR *); static VkResult (*pvkCreateWaylandSurfaceKHR)(VkInstance, const VkWaylandSurfaceCreateInfoKHR *, const VkAllocationCallbacks *, VkSurfaceKHR *); static void (*pvkDestroyInstance)(VkInstance, const VkAllocationCallbacks *); static void (*pvkDestroySurfaceKHR)(VkInstance, VkSurfaceKHR, const VkAllocationCallbacks *); static void (*pvkDestroySwapchainKHR)(VkDevice, VkSwapchainKHR, const VkAllocationCallbacks *); static VkResult (*pvkEnumerateInstanceExtensionProperties)(const char *, uint32_t *, VkExtensionProperties *); static VkResult (*pvkGetDeviceGroupSurfacePresentModesKHR)(VkDevice, VkSurfaceKHR, VkDeviceGroupPresentModeFlagsKHR *); static void * (*pvkGetDeviceProcAddr)(VkDevice, const char *); static void * (*pvkGetInstanceProcAddr)(VkInstance, const char *); static VkResult (*pvkGetPhysicalDevicePresentRectanglesKHR)(VkPhysicalDevice, VkSurfaceKHR, uint32_t *, VkRect2D *); static VkResult (*pvkGetPhysicalDeviceSurfaceCapabilities2KHR)(VkPhysicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR *, VkSurfaceCapabilities2KHR *); static VkResult (*pvkGetPhysicalDeviceSurfaceCapabilitiesKHR)(VkPhysicalDevice, VkSurfaceKHR, VkSurfaceCapabilitiesKHR *); static VkResult (*pvkGetPhysicalDeviceSurfaceFormats2KHR)(VkPhysicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR *, uint32_t *, VkSurfaceFormat2KHR *); static VkResult (*pvkGetPhysicalDeviceSurfaceFormatsKHR)(VkPhysicalDevice, VkSurfaceKHR, uint32_t *, VkSurfaceFormatKHR *); static VkResult (*pvkGetPhysicalDeviceSurfacePresentModesKHR)(VkPhysicalDevice, VkSurfaceKHR, uint32_t *, VkPresentModeKHR *); static VkResult (*pvkGetPhysicalDeviceSurfaceSupportKHR)(VkPhysicalDevice, uint32_t, VkSurfaceKHR, VkBool32 *); static VkBool32 (*pvkGetPhysicalDeviceWaylandPresentationSupportKHR)(VkPhysicalDevice, uint32_t, struct wl_display *); static VkResult (*pvkGetSwapchainImagesKHR)(VkDevice, VkSwapchainKHR, uint32_t *, VkImage *); static VkResult (*pvkQueuePresentKHR)(VkQueue, const VkPresentInfoKHR *); static void *vulkan_handle; static void wine_vk_init(void) { vulkan_handle = dlopen(SONAME_LIBVULKAN, RTLD_NOW); if (!vulkan_handle) { ERR("Failed to load vulkan library\n"); return; } #define LOAD_FUNCPTR(f) if (!(p##f = dlsym(vulkan_handle, #f))) goto fail; #define LOAD_OPTIONAL_FUNCPTR(f) p##f = dlsym(vulkan_handle, #f); LOAD_FUNCPTR(vkCreateInstance); LOAD_FUNCPTR(vkCreateSwapchainKHR); LOAD_FUNCPTR(vkCreateWaylandSurfaceKHR); LOAD_FUNCPTR(vkDestroyInstance); LOAD_FUNCPTR(vkDestroySurfaceKHR); LOAD_FUNCPTR(vkDestroySwapchainKHR); LOAD_FUNCPTR(vkEnumerateInstanceExtensionProperties); LOAD_FUNCPTR(vkGetDeviceProcAddr); LOAD_FUNCPTR(vkGetInstanceProcAddr); LOAD_OPTIONAL_FUNCPTR(vkGetPhysicalDeviceSurfaceCapabilities2KHR); LOAD_FUNCPTR(vkGetPhysicalDeviceSurfaceCapabilitiesKHR); LOAD_OPTIONAL_FUNCPTR(vkGetPhysicalDeviceSurfaceFormats2KHR); LOAD_FUNCPTR(vkGetPhysicalDeviceSurfaceFormatsKHR); LOAD_FUNCPTR(vkGetPhysicalDeviceSurfacePresentModesKHR); LOAD_FUNCPTR(vkGetPhysicalDeviceSurfaceSupportKHR); LOAD_FUNCPTR(vkGetPhysicalDeviceWaylandPresentationSupportKHR); LOAD_FUNCPTR(vkGetSwapchainImagesKHR); LOAD_FUNCPTR(vkQueuePresentKHR); LOAD_OPTIONAL_FUNCPTR(vkGetDeviceGroupSurfacePresentModesKHR); LOAD_OPTIONAL_FUNCPTR(vkGetPhysicalDevicePresentRectanglesKHR); #undef LOAD_FUNCPTR #undef LOAD_OPTIONAL_FUNCPTR return; fail: TRACE("VULKAN closed \n"); dlclose(vulkan_handle); vulkan_handle = NULL; return; } static struct wl_surface_win_data *wl_surface_data_context[32768] = {0}; static inline int context_wl_idx( struct wl_surface *wl_surface ) { return LOWORD( wl_surface ) >> 1; } static struct wl_surface_win_data *alloc_wl_win_data( struct wl_surface *surface, HWND hwnd, struct wayland_window *window ) { struct wl_surface_win_data *data; if ((data = calloc(1, sizeof(*data) ) )) { TRACE("Surface %d \n", context_wl_idx( surface )); data->hwnd = hwnd; //data->wayland_subsurface = surface; data->wayland_surface = surface; data->wayland_window = window; wl_surface_data_context[context_wl_idx(surface)] = data; } return data; } /*********************************************************************** * free_win_data */ static void free_wl_win_data( struct wl_surface_win_data *data ) { wl_surface_data_context[context_wl_idx( data->wayland_surface )] = NULL; //LeaveCriticalSection( &win_data_section ); free( data ); } /*********************************************************************** * get_wl_win_data * * Lock and return the data structure associated with a window. */ static struct wl_surface_win_data *get_wl_win_data( struct wl_surface *surface ) { struct wl_surface_win_data *data; if (!surface) return NULL; if ((data = wl_surface_data_context[context_wl_idx(surface)])) { return data; } return NULL; } //Cursors and cursor cache struct cursor_cache { HCURSOR handle; /* cursor that this cache belongs to */ uint32_t *cached_pixels; int width; int height; int xhotspot; int yhotspot; }; static inline int cursor_idx( HCURSOR handle ) { return LOWORD( handle ) >> 1; } static void alloc_cursor_cache( HCURSOR handle ) { struct cursor_cache *data; if ((data = calloc(1, sizeof(*data)))) { global_cursor_cache[cursor_idx(handle)] = data; } } //End Cursor cache //GDI win data struct gdi_win_data { HWND hwnd; /* hwnd that this private data belongs to */ HWND parent; /* parent hwnd for child windows */ RECT window_rect; /* USER window rectangle relative to parent */ RECT whole_rect; /* X window rectangle for the whole window relative to parent */ RECT client_rect; /* client area relative to parent */ struct wayland_window *window; /* native window wrapper that forwards calls to the desktop process */ struct window_surface *surface; struct wl_subsurface *wayland_subsurface; struct wl_surface *wayland_surface; void *shm_data; struct wl_shm_pool *wl_pool; struct wl_buffer *buffer; int gdi_fd; int surface_changed; int size_changed; int window_width; int window_height; int buffer_busy; int size; }; static struct gdi_win_data *win_data_context[32768]; static void set_surface_region( struct window_surface *window_surface, HRGN win_region ); struct gdi_window_surface { struct window_surface header; HWND hwnd; struct wayland_window *window; struct wl_subsurface *wayland_subsurface; struct wl_surface *wayland_surface; void *shm_data; struct wl_shm_pool *wl_pool; int gdi_fd; RECT bounds; RGNDATA *region_data; HRGN region; BYTE alpha; COLORREF color_key; void *bits; CRITICAL_SECTION crit; BITMAPINFO info; /* variable size, must be last */ }; // listeners static struct gdi_win_data *get_win_data( HWND hwnd ); static void buffer_release(void *data, struct wl_buffer *buffer) { HWND hwnd = data; struct gdi_win_data *hwnd_data; if ( hwnd != NULL) { hwnd_data = get_win_data( hwnd ); if(hwnd_data) hwnd_data->buffer_busy = 0; } is_buffer_busy = 0; //wl_buffer_destroy(buffer); } static const struct wl_buffer_listener buffer_listener = { buffer_release }; void wayland_pointer_enter_cb(void *data, struct wl_pointer *pointer, uint32_t serial, struct wl_surface *surface, wl_fixed_t sx, wl_fixed_t sy) { HWND temp; #if 0 struct wl_surface_win_data *hwnd_data; TRACE("Surface %p \n", surface ); TRACE("Surface %d \n", context_wl_idx( surface )); if ( hwnd_data = get_wl_win_data( surface )) { TRACE("Data found \n"); TRACE("Current hwnd is %p and surface %p %p \n", hwnd_data->hwnd, hwnd_data->wayland_subsurface, surface); } #endif wayland_serial_id = serial; temp = wl_surface_get_user_data(surface); if(temp) { TRACE("Current hwnd is %p and surface %p \n", temp, surface); global_update_hwnd = temp; if (NtUserGetAncestor(temp, GA_PARENT) == NtUserGetDesktopWindow()) { NtUserSetFocus(temp); NtUserSetActiveWindow(temp); NtUserSetForegroundWindow(temp); } } else if (vulkan_window != NULL && vulkan_window->surface == surface && global_vulkan_hwnd != NULL) { //TRACE("Current vulkan hwnd is %p and surface %p \n", global_vulkan_hwnd, surface); NtUserSetActiveWindow( global_vulkan_hwnd ); NtUserSetForegroundWindow( global_vulkan_hwnd ); NtUserSetFocus(global_vulkan_hwnd); } global_last_cursor_change = 0; //Remove cursor if it's hidden on alt-tab if(!global_is_cursor_visible) { wl_pointer_set_cursor(wayland_pointer, wayland_serial_id, NULL, 0, 0); } } void wayland_pointer_leave_cb(void *data, struct wl_pointer *pointer, uint32_t serial, struct wl_surface *surface) { } void wayland_pointer_motion_cb_vulkan(void *data, struct wl_pointer *pointer, uint32_t time, wl_fixed_t sx, wl_fixed_t sy) { POINT point; POINT client_point; if(global_wayland_confine) { return; } global_input.mi.dx = wl_fixed_to_int(sx); global_input.mi.dy = wl_fixed_to_int(sy); //It takes some time before correct rect is returned if(global_vulkan_rect_flag < 3) { global_vulkan_rect_flag++; NtUserGetWindowRect(global_vulkan_hwnd, &global_vulkan_rect); } //SetWindowPos crashes Unity if used elsewhere if(global_vulkan_rect.left != 0 || global_vulkan_rect.top != 0) { NtUserSetWindowPos( global_vulkan_hwnd, HWND_TOP, 0, 0, 0, 0, SWP_NOZORDER | SWP_NOSIZE | SWP_NOSENDCHANGING); NtUserGetWindowRect(global_vulkan_hwnd, &global_vulkan_rect); } if(global_fsr) { //RECT title_rect; client_point.x = global_vulkan_rect.left; client_point.y = global_vulkan_rect.top; if(client_point.x != 0 || client_point.y != 0) { fsr_user_to_real(&client_point); } global_input.mi.dx = global_input.mi.dx + client_point.x; global_input.mi.dy = global_input.mi.dy + client_point.y; point.x = global_input.mi.dx; point.y = global_input.mi.dy; fsr_real_to_user(&point); global_input.mi.dx = point.x; global_input.mi.dy = point.y; #if 0 TRACE("Motion (x y - x y %d %d %d %d) %s %s \n", wl_fixed_to_int(sx), wl_fixed_to_int(sy), global_input.mi.dx, global_input.mi.dy, wine_dbgstr_rect( &global_vulkan_rect ), wine_dbgstr_rect( &title_rect ) ); #endif } else { global_input.mi.dx = global_input.mi.dx + global_vulkan_rect.left; global_input.mi.dy = global_input.mi.dy + global_vulkan_rect.top; } global_sx = global_input.mi.dx; global_sy = global_input.mi.dy; SERVER_START_REQ( send_hardware_message ) { req->win = wine_server_user_handle( global_vulkan_hwnd ); req->flags = 0; req->input.type = INPUT_MOUSE; req->input.mouse.x = global_input.mi.dx; req->input.mouse.y = global_input.mi.dy; req->input.mouse.data = 0; req->input.mouse.flags = MOUSEEVENTF_MOVE | MOUSEEVENTF_ABSOLUTE; req->input.mouse.time = 0; req->input.mouse.info = 0; wine_server_call( req ); } SERVER_END_REQ; } int global_last_sx, global_last_sy = 0; void wayland_pointer_motion_cb(void *data, struct wl_pointer *pointer, uint32_t time, wl_fixed_t sx, wl_fixed_t sy) { HWND hwnd; RECT rect; if(global_vulkan_hwnd) { return wayland_pointer_motion_cb_vulkan(data, pointer, time, sx, sy); } #if 0 if(global_gdi_position_changing > 0) { if(global_gdi_position_changing == 1) { global_last_sx = 0; global_last_sy = 0; global_gdi_position_changing = 2; global_sx = wl_fixed_to_int(sx); global_sy = wl_fixed_to_int(sy); return; } else if(global_gdi_position_changing == 2) { global_last_sx = wl_fixed_to_int(sx) - global_sx; global_last_sy = wl_fixed_to_int(sy) - global_sy; } } #endif global_input.mi.dx = wl_fixed_to_int(sx); global_input.mi.dy = wl_fixed_to_int(sy); global_sx = global_input.mi.dx; global_sy = global_input.mi.dy; hwnd = global_update_hwnd; NtUserGetWindowRect(hwnd, &rect); global_input.mi.dx = global_input.mi.dx + rect.left; global_input.mi.dy = global_input.mi.dy + rect.top; #if 0 TRACE("Motion x y %d %d %s hwnd %p \n", wl_fixed_to_int(sx), wl_fixed_to_int(sy), wine_dbgstr_rect( &rect ), hwnd); #endif #if 0 if(global_gdi_position_changing == 2) { global_input.mi.dx = global_last_sx + rect.left; global_input.mi.dx = global_last_sy + rect.top; TRACE("Rel. Motion x y %d %d \n", global_last_sx, global_last_sy ); } #endif //TRACE("Motion x y %d %d \n", global_sx, global_sy); SERVER_START_REQ( send_hardware_message ) { req->win = wine_server_user_handle( hwnd ); req->flags = 0; req->input.type = INPUT_MOUSE; req->input.mouse.x = global_input.mi.dx; req->input.mouse.y = global_input.mi.dy; req->input.mouse.data = 0; req->input.mouse.flags = MOUSEEVENTF_MOVE | MOUSEEVENTF_ABSOLUTE; req->input.mouse.time = 0; req->input.mouse.info = 0; wine_server_call( req ); } SERVER_END_REQ; } void wayland_pointer_button_cb_vulkan(void *data, struct wl_pointer *pointer, uint32_t serial, uint32_t time, uint32_t button, uint32_t state) { HWND hwnd; INPUT input; input.type = INPUT_MOUSE; input.mi.dx = (int)global_sx; input.mi.dy = (int)global_sy; input.mi.mouseData = 0; input.mi.dwFlags = MOUSEEVENTF_MOVE | MOUSEEVENTF_ABSOLUTE; if(global_wayland_confine) { input.mi.dx = 0; input.mi.dy = 0; input.mi.dwFlags = 0; } hwnd = global_vulkan_hwnd; //TRACE("Button code %p \n", button); switch (button) { case BTN_LEFT: if(state == WL_POINTER_BUTTON_STATE_PRESSED) { input.mi.dwFlags |= MOUSEEVENTF_LEFTDOWN; } else if(state == WL_POINTER_BUTTON_STATE_RELEASED) { input.mi.dwFlags |= MOUSEEVENTF_LEFTUP; } break; case BTN_MIDDLE: if(state == WL_POINTER_BUTTON_STATE_PRESSED) input.mi.dwFlags |= MOUSEEVENTF_MIDDLEDOWN; else if(state == WL_POINTER_BUTTON_STATE_RELEASED) input.mi.dwFlags |= MOUSEEVENTF_MIDDLEUP; break; case BTN_RIGHT: if(state == WL_POINTER_BUTTON_STATE_PRESSED) input.mi.dwFlags |= MOUSEEVENTF_RIGHTDOWN; else if(state == WL_POINTER_BUTTON_STATE_RELEASED) input.mi.dwFlags |= MOUSEEVENTF_RIGHTUP; break; case BTN_EXTRA: case BTN_FORWARD: TRACE("Forward Click \n"); if(state == WL_POINTER_BUTTON_STATE_PRESSED) input.mi.dwFlags |= MOUSEEVENTF_XDOWN; else if(state == WL_POINTER_BUTTON_STATE_RELEASED) input.mi.dwFlags |= MOUSEEVENTF_XUP; input.mi.mouseData = XBUTTON1; break; case BTN_BACK: case BTN_SIDE: TRACE("Back Click \n"); if(state == WL_POINTER_BUTTON_STATE_PRESSED) input.mi.dwFlags |= MOUSEEVENTF_XDOWN; else if(state == WL_POINTER_BUTTON_STATE_RELEASED) input.mi.dwFlags |= MOUSEEVENTF_XUP; input.mi.mouseData = XBUTTON2; break; default: break; } SERVER_START_REQ( send_hardware_message ) { req->win = wine_server_user_handle( hwnd ); req->flags = 0; req->input.type = input.type; req->input.mouse.x = input.mi.dx; req->input.mouse.y = input.mi.dy; req->input.mouse.data = 0; req->input.mouse.flags = input.mi.dwFlags; req->input.mouse.time = 0; req->input.mouse.info = 0; wine_server_call( req ); } SERVER_END_REQ; } void wayland_pointer_button_cb(void *data, struct wl_pointer *pointer, uint32_t serial, uint32_t time, uint32_t button, uint32_t state) { HWND hwnd; RECT rect; INPUT input; //Support running without WINE_VK_VULKAN_ONLY if(global_vulkan_hwnd) { return wayland_pointer_button_cb_vulkan(data, pointer, serial, time, button, state); } input.type = INPUT_MOUSE; input.mi.dx = (int)global_sx; input.mi.dy = (int)global_sy; input.mi.mouseData = 0; input.mi.dwFlags = MOUSEEVENTF_MOVE | MOUSEEVENTF_ABSOLUTE; if(global_wayland_confine) { input.mi.dx = 0; input.mi.dy = 0; input.mi.dwFlags = 0; } switch (button) { case BTN_LEFT: if(state == WL_POINTER_BUTTON_STATE_PRESSED) { input.mi.dwFlags |= MOUSEEVENTF_LEFTDOWN; global_gdi_lb_hold = 1; } else if(state == WL_POINTER_BUTTON_STATE_RELEASED) { input.mi.dwFlags |= MOUSEEVENTF_LEFTUP; global_gdi_lb_hold = 0; } break; case BTN_MIDDLE: if(state == WL_POINTER_BUTTON_STATE_PRESSED) input.mi.dwFlags |= MOUSEEVENTF_MIDDLEDOWN; else if(state == WL_POINTER_BUTTON_STATE_RELEASED) input.mi.dwFlags |= MOUSEEVENTF_MIDDLEUP; break; case BTN_RIGHT: if(state == WL_POINTER_BUTTON_STATE_PRESSED) { input.mi.dwFlags |= MOUSEEVENTF_RIGHTDOWN; } else if(state == WL_POINTER_BUTTON_STATE_RELEASED) { input.mi.dwFlags |= MOUSEEVENTF_RIGHTUP; } break; default: break; } hwnd = global_update_hwnd; NtUserGetWindowRect(global_update_hwnd, &rect); TRACE("Click x y %d %d %s \n", input.mi.dx, input.mi.dy, wine_dbgstr_rect( &rect )); input.mi.dx = input.mi.dx + rect.left; input.mi.dy = input.mi.dy + rect.top; TRACE("Click x y %d %d %s \n", input.mi.dx, input.mi.dy, wine_dbgstr_rect( &rect )); SERVER_START_REQ( send_hardware_message ) { req->win = wine_server_user_handle( hwnd ); req->flags = 0; req->input.type = input.type; req->input.mouse.x = input.mi.dx; req->input.mouse.y = input.mi.dy; req->input.mouse.data = 0; req->input.mouse.flags = input.mi.dwFlags; req->input.mouse.time = 0; req->input.mouse.info = 0; wine_server_call( req ); } SERVER_END_REQ; } static void wayland_pointer_frame_cb(void *data, struct wl_pointer *wl_pointer) { //do nothing } static void wayland_pointer_axis_source_cb(void *data, struct wl_pointer *wl_pointer, uint32_t axis_source) { TRACE("Pointer axis source \n"); //do nothing } static void wayland_pointer_axis_stop_cb(void *data, struct wl_pointer *wl_pointer, uint32_t time, uint32_t axis) { TRACE("Pointer axis stop \n"); //do nothing } static void wayland_pointer_axis_discrete_cb(void *data, struct wl_pointer *wl_pointer, uint32_t axis, int32_t discrete) { TRACE("Motion Wheel discrete %d \n", discrete); } //Mouse wheel void wayland_pointer_axis_cb(void *data, struct wl_pointer *pointer, uint32_t time, uint32_t axis, wl_fixed_t value) { SERVER_START_REQ( send_hardware_message ) { req->win = wine_server_user_handle( global_vulkan_hwnd ); req->flags = 0; req->input.type = INPUT_MOUSE; req->input.mouse.x = global_sx; req->input.mouse.y = global_sy; req->input.mouse.data = value > 0 ? -WHEEL_DELTA : WHEEL_DELTA; req->input.mouse.flags = MOUSEEVENTF_MOVE | MOUSEEVENTF_ABSOLUTE | MOUSEEVENTF_WHEEL; if(global_wayland_confine) { req->input.mouse.flags = MOUSEEVENTF_WHEEL; req->input.mouse.x = 0; req->input.mouse.y = 0; } req->input.mouse.time = 0; req->input.mouse.info = 0; wine_server_call( req ); } SERVER_END_REQ; } //relative pointer for locked surface static void relative_pointer_handle_motion(void *data, struct zwp_relative_pointer_v1 *pointer, uint32_t utime_hi, uint32_t utime_lo, wl_fixed_t dx, wl_fixed_t dy, wl_fixed_t dx_unaccel, wl_fixed_t dy_unaccel) { INPUT input; input.type = INPUT_MOUSE; input.mi.mouseData = 0; input.mi.time = 0; input.mi.dwExtraInfo = 0; input.mi.dwFlags = MOUSEEVENTF_MOVE; //Slows mouse #if 0 if(global_fsr) { POINT fsr_point; double x, y; x = wl_fixed_to_double(dx); y = wl_fixed_to_double(dy); fsr_real_to_user_relative(&x, &y); input.mi.dx = x; input.mi.dy = y; } else { input.mi.dx = wl_fixed_to_double(dx); input.mi.dy = wl_fixed_to_double(dy); } #endif input.mi.dx = wl_fixed_to_double(dx); input.mi.dy = wl_fixed_to_double(dy); __wine_send_input( global_vulkan_hwnd, &input, NULL ); } static const struct zwp_relative_pointer_v1_listener relative_pointer_listener = { relative_pointer_handle_motion, }; //relative pointer for locked surface void grab_wayland_screen(void) { if(!global_wayland_confine) { global_wayland_confine = 1; locked_pointer = zwp_pointer_constraints_v1_lock_pointer( pointer_constraints, vulkan_window->surface, wayland_pointer, NULL,ZWP_POINTER_CONSTRAINTS_V1_LIFETIME_PERSISTENT); relative_pointer = zwp_relative_pointer_manager_v1_get_relative_pointer(relative_pointer_manager, wayland_pointer); zwp_relative_pointer_v1_add_listener(relative_pointer, &relative_pointer_listener, NULL); wl_surface_commit(vulkan_window->surface); } } void ungrab_wayland_screen(void) { if(global_wayland_confine) { if(locked_pointer) zwp_locked_pointer_v1_destroy(locked_pointer); if(relative_pointer) zwp_relative_pointer_v1_destroy(relative_pointer); locked_pointer = NULL; relative_pointer = NULL; global_wayland_confine = 0; } } void wayland_keyboard_keymap_cb(void *data, struct wl_keyboard *keyboard, uint32_t format, int fd, uint32_t size) { } void wayland_keyboard_enter_cb(void *data, struct wl_keyboard *keyboard, uint32_t serial, struct wl_surface *surface, struct wl_array *keys) { TRACE( "keyboard_event: Entered \n" ); } void wayland_keyboard_leave_cb(void *data, struct wl_keyboard *keyboard, uint32_t serial, struct wl_surface *surface) { } //https://stackoverflow.com/questions/8161741/handling-keyboard-input-in-win32-wm-char-or-wm-keydown-wm-keyup //https://stackoverflow.com/questions/44897991/wm-keydown-repeat-count void wayland_keyboard_key_cb (void *data, struct wl_keyboard *keyboard, uint32_t serial, uint32_t time, uint32_t keycode, uint32_t state) { INPUT input; HWND hwnd; //TRACE( "keyboard_event: %u keycode \n", keycode ); //if ((unsigned int)keycode >= sizeof(keycode_to_vkey)/sizeof(keycode_to_vkey[0]) || !keycode_to_vkey[keycode]) { //TRACE( "keyboard_event: code %u unmapped key, ignoring \n", keycode ); //} input.type = INPUT_KEYBOARD; input.ki.wVk = keycode_to_vkey[(unsigned int)keycode]; if(!input.ki.wVk) { return; } input.ki.wScan = vkey_to_scancode[(int)input.ki.wVk]; input.ki.time = 0; input.ki.dwExtraInfo = 0; input.ki.dwFlags = (input.ki.wScan & 0x100) ? KEYEVENTF_EXTENDEDKEY : 0; //TRACE("keyboard_event: code %u vkey %x scan %x meta %x \n", // keycode, input.ki.wVk, input.ki.wScan, state ); input.type = INPUT_KEYBOARD; hwnd = global_update_hwnd; if(global_vulkan_hwnd) { hwnd = global_vulkan_hwnd; } if (state == WL_KEYBOARD_KEY_STATE_RELEASED) { input.ki.dwFlags |= KEYEVENTF_KEYUP; } __wine_send_input( hwnd, &input, NULL ); if(state != WL_KEYBOARD_KEY_STATE_RELEASED) { return; } switch (keycode) { case KEY_F11: if(!global_wayland_full) { global_wait_for_configure = 1; xdg_toplevel_set_fullscreen(vulkan_window->xdg_toplevel, NULL); wl_surface_commit(vulkan_window->surface); wl_display_flush (wayland_display); while(global_wait_for_configure) { sleep(0.3); wl_display_dispatch(wayland_display); } global_wayland_full = 1; } break; case KEY_F10: if(!global_wayland_confine) { global_wayland_confine = 1; if(vulkan_window) { TRACE("Enabling grab \n"); grab_wayland_screen(); TRACE("Enabling grab done \n"); } else if(gdi_window) { confined_pointer = zwp_pointer_constraints_v1_confine_pointer( pointer_constraints, gdi_window->surface, wayland_pointer, NULL, ZWP_POINTER_CONSTRAINTS_V1_LIFETIME_PERSISTENT); relative_pointer = zwp_relative_pointer_manager_v1_get_relative_pointer(relative_pointer_manager, wayland_pointer); zwp_relative_pointer_v1_add_listener(relative_pointer, &relative_pointer_listener, NULL); wl_surface_commit(gdi_window->surface); } //hide cursor wl_pointer_set_cursor(wayland_pointer, wayland_serial_id, NULL, 0, 0); wl_surface_commit(wayland_cursor_surface); } else { zwp_locked_pointer_v1_destroy(locked_pointer); zwp_relative_pointer_v1_destroy(relative_pointer); locked_pointer = NULL; relative_pointer = NULL; global_wayland_confine = 0; } break; case KEY_F8: //lock pointer if(vulkan_window) xdg_toplevel_set_minimized(vulkan_window->xdg_toplevel); else if(gdi_window && gdi_window->xdg_toplevel) xdg_toplevel_set_minimized(gdi_window->xdg_toplevel); break; case KEY_F9: //lock pointer if(!global_wayland_confine) { global_wayland_confine = 1; locked_pointer = zwp_pointer_constraints_v1_lock_pointer( pointer_constraints, vulkan_window->surface, wayland_pointer, NULL,ZWP_POINTER_CONSTRAINTS_V1_LIFETIME_ONESHOT); relative_pointer = zwp_relative_pointer_manager_v1_get_relative_pointer(relative_pointer_manager, wayland_pointer); zwp_relative_pointer_v1_add_listener(relative_pointer, &relative_pointer_listener, NULL); wl_pointer_set_cursor(wayland_pointer, wayland_serial_id, NULL, 0, 0); wl_surface_commit(vulkan_window->surface); } else { if(confined_pointer) zwp_confined_pointer_v1_destroy(confined_pointer); if(relative_pointer) zwp_relative_pointer_v1_destroy(relative_pointer); locked_pointer = NULL; relative_pointer = NULL; global_wayland_confine = 0; } break; //end F9 default: break; } } void wayland_keyboard_modifiers_cb(void *data, struct wl_keyboard *keyboard, uint32_t serial, uint32_t mods_depressed, uint32_t mods_latched, uint32_t mods_locked, uint32_t group) { } void wayland_keyboard_repeat_info(void* data, struct wl_keyboard *wl_keyboard, int rate, int delay) { } static void seat_handle_name(void *data, struct wl_seat *wl_seat, const char *name) { } static const struct wl_pointer_listener pointer_listener_gdi = { wayland_pointer_enter_cb, wayland_pointer_leave_cb, wayland_pointer_motion_cb, wayland_pointer_button_cb, wayland_pointer_axis_cb, wayland_pointer_frame_cb, wayland_pointer_axis_source_cb, wayland_pointer_axis_stop_cb, wayland_pointer_axis_discrete_cb, }; static const struct wl_pointer_listener pointer_listener_vulkan = { wayland_pointer_enter_cb, wayland_pointer_leave_cb, wayland_pointer_motion_cb_vulkan, wayland_pointer_button_cb_vulkan, wayland_pointer_axis_cb, wayland_pointer_frame_cb, wayland_pointer_axis_source_cb, wayland_pointer_axis_stop_cb, wayland_pointer_axis_discrete_cb, }; static void seat_caps_cb(void *data, struct wl_seat *seat, enum wl_seat_capability caps) { char *is_vulkan; char *env_hide_cursor; char *env_fullscreen_grab_cursor; char *env_no_clip_cursor; char *env_use_custom_cursors; char *env_use_fsr; if ((caps & WL_SEAT_CAPABILITY_POINTER) && !wayland_pointer) { wayland_pointer = wl_seat_get_pointer(seat); is_vulkan = getenv( "WINE_VK_VULKAN_ONLY" ); //Some games want to use their cursor env_hide_cursor = getenv( "WINE_VK_HIDE_CURSOR" ); //Some games want to grab cursor when ClipCursor is passed fullscreen rect env_fullscreen_grab_cursor = getenv( "WINE_VK_FULLSCREEN_GRAB_CURSOR" ); //Some games need ClipCursor disabled env_no_clip_cursor = getenv( "WINE_VK_NO_CLIP_CURSOR" ); //Some games use custom cursors env_use_custom_cursors = getenv( "WINE_VK_USE_CUSTOM_CURSORS" ); //Upscale env_use_fsr = getenv( "WINE_VK_USE_FSR" ); if(env_hide_cursor) { global_hide_cursor = 1; } if(env_no_clip_cursor) { global_disable_clip_cursor = 1; } if(env_fullscreen_grab_cursor) { global_fullscreen_grab_cursor = 1; } if(env_use_custom_cursors) { global_custom_cursors = 1; } if(env_use_fsr) { global_fsr = 1; global_fsr_set = 1; global_is_always_fullscreen = 1; //enable fullscreen for FSR } if(!is_vulkan && !global_is_vulkan) { wl_pointer_add_listener(wayland_pointer, &pointer_listener_gdi, NULL); } else { wl_pointer_add_listener(wayland_pointer, &pointer_listener_vulkan, NULL); } } else if (!(caps & WL_SEAT_CAPABILITY_POINTER) && wayland_pointer) { wl_pointer_destroy(wayland_pointer); wayland_pointer = NULL; } if ((caps & WL_SEAT_CAPABILITY_KEYBOARD) && !wayland_keyboard) { wayland_keyboard = wl_seat_get_keyboard(seat); static const struct wl_keyboard_listener keyboard_listener = { wayland_keyboard_keymap_cb, wayland_keyboard_enter_cb, wayland_keyboard_leave_cb, wayland_keyboard_key_cb, wayland_keyboard_modifiers_cb, wayland_keyboard_repeat_info, }; wl_keyboard_add_listener(wayland_keyboard, &keyboard_listener, NULL); } else if (!(caps & WL_SEAT_CAPABILITY_KEYBOARD) && wayland_keyboard) { wl_keyboard_destroy(wayland_keyboard); wayland_keyboard = NULL; } } void shm_format(void *data, struct wl_shm *wl_shm, uint32_t format) { } struct wl_shm_listener shm_listener = { shm_format }; static void xdg_wm_base_ping(void *data, struct xdg_wm_base *wm_base, uint32_t serial) { xdg_wm_base_pong(wm_base, serial); } static const struct xdg_wm_base_listener xdg_wm_base_listener = { xdg_wm_base_ping, }; //wl output static void display_handle_geometry(void *data, struct wl_output *wl_output, int x, int y, int physical_width, int physical_height, int subpixel, const char *make, const char *model, int transform) { //Do nothing } static void display_handle_mode(void *data, struct wl_output *wl_output, uint32_t flags, int width, int height, int refresh) { if (global_first_wl_output && wl_output == global_first_wl_output && (flags & WL_OUTPUT_MODE_CURRENT)) { global_output_width = width; global_output_height = height; TRACE("Found output with WxH %d %d \n", global_output_width, global_output_height); } } static void display_handle_done(void *data, struct wl_output *wl_output) { } static void display_handle_scale(void *data, struct wl_output *wl_output, int32_t scale) { } static const struct wl_output_listener output_listener = { display_handle_geometry, display_handle_mode, display_handle_done, display_handle_scale, }; static void registry_add_object (void *data, struct wl_registry *registry, uint32_t name, const char *wl_interface, uint32_t version) { const char *cursor_theme; const char *cursor_size_str; int cursor_size = 32; if (!strcmp(wl_interface,"wl_compositor")) { wayland_compositor = wl_registry_bind (registry, name, &wl_compositor_interface, 4); //Sway calls wl_shm before wl_compositor if(wayland_compositor && !wayland_cursor_surface) { wayland_cursor_surface = wl_compositor_create_surface(wayland_compositor); } } else if (strcmp(wl_interface, "wl_subcompositor") == 0) { wayland_subcompositor = wl_registry_bind(registry, name, &wl_subcompositor_interface, 1); } else if (strcmp(wl_interface, "xdg_wm_base") == 0) { wm_base = wl_registry_bind(registry, name, &xdg_wm_base_interface, 2); xdg_wm_base_add_listener(wm_base, &xdg_wm_base_listener, NULL); } else if (!strcmp(wl_interface, "wl_seat")) { wayland_seat = (struct wl_seat *) wl_registry_bind(registry, name, &wl_seat_interface, WINE_WAYLAND_SEAT_VERSION); static const struct wl_seat_listener seat_listener = { seat_caps_cb, seat_handle_name }; wl_seat_add_listener(wayland_seat, &seat_listener, data); } else if (strcmp(wl_interface, "zwp_pointer_constraints_v1") == 0) { pointer_constraints = wl_registry_bind(registry, name, &zwp_pointer_constraints_v1_interface, 1); } else if (strcmp(wl_interface, "zwp_relative_pointer_manager_v1") == 0) { relative_pointer_manager = wl_registry_bind(registry, name, &zwp_relative_pointer_manager_v1_interface, 1); } else if (strcmp(wl_interface, "wl_shm") == 0) { cursor_theme = getenv("XCURSOR_THEME"); cursor_size_str = getenv("XCURSOR_SIZE"); if (cursor_size_str) { cursor_size = atoi(cursor_size_str); } if (!cursor_theme) { cursor_theme = NULL; } global_shm = wl_registry_bind(registry, name, &wl_shm_interface, 1); wl_shm_add_listener(global_shm, &shm_listener, NULL); wayland_cursor_shm = wl_registry_bind(registry, name, &wl_shm_interface, 1); wayland_cursor_theme = wl_cursor_theme_load(cursor_theme, cursor_size, wayland_cursor_shm); wayland_default_cursor = wl_cursor_theme_get_cursor(wayland_cursor_theme, "left_ptr"); //Sway calls wl_shm before wl_compositor if(wayland_compositor && !wayland_cursor_surface) wayland_cursor_surface = wl_compositor_create_surface(wayland_compositor); } else if (strcmp(wl_interface, "wl_output") == 0) { global_first_wl_output = wl_registry_bind(registry, name, &wl_output_interface, 2); wl_output_add_listener(global_first_wl_output, &output_listener, NULL); } } static void registry_remove_object (void *data, struct wl_registry *registry, uint32_t name) { } static struct wl_registry_listener registry_listener = {&registry_add_object, &registry_remove_object}; static void handle_xdg_surface_configure(void *data, struct xdg_surface *surface, uint32_t serial) { TRACE( "Surface configured \n" ); global_wait_for_configure = 0; xdg_surface_ack_configure(surface, serial); } static void handle_xdg_toplevel_configure(void *data, struct xdg_toplevel *toplevel, int32_t width, int32_t height, struct wl_array *states) { //do nothing } static void handle_xdg_toplevel_close(void *data, struct xdg_toplevel *xdg_toplevel) { } static const struct xdg_toplevel_listener xdg_toplevel_listener = { handle_xdg_toplevel_configure, handle_xdg_toplevel_close, }; static const struct xdg_surface_listener xdg_surface_listener = { handle_xdg_surface_configure }; /* store the display fd into the message queue */ static void set_queue_display_fd( int esync_fd ) { HANDLE handle; static int done = 0; int ret; if(done) { return; } done = 1; if (wine_server_fd_to_handle( esync_fd, GENERIC_READ | SYNCHRONIZE, 0, &handle )) { TRACE( "waylanddrv: Can't allocate handle for display fd\n" ); exit(1); } SERVER_START_REQ( set_queue_fd ) { req->handle = wine_server_obj_handle( handle ); ret = wine_server_call( req ); } SERVER_END_REQ; if (ret) { MESSAGE( "waylanddrv: Can't store handle for display fd\n" ); exit(1); } NtClose( handle ); } static void create_wayland_display (void) { int fd = 0; char *env_is_always_fullscreen; struct wl_registry *registry = NULL; if(desktop_tid) return; desktop_tid = GetCurrentThreadId(); wayland_display = wl_display_connect (NULL); if(!wayland_display) { TRACE("wayland display is not working \n"); exit(1); return; } //Automate fullscreen env_is_always_fullscreen = getenv( "WINE_VK_ALWAYS_FULLSCREEN" ); if(env_is_always_fullscreen) { TRACE("Is always fullscreen \n"); global_is_always_fullscreen = 1; } registry = wl_display_get_registry (wayland_display); wl_registry_add_listener (registry, &registry_listener, NULL); wl_display_roundtrip (wayland_display); wl_display_roundtrip (wayland_display); wl_display_roundtrip (wayland_display); fd = wl_display_get_fd(wayland_display); if(fd) { set_queue_display_fd(fd); } TRACE("Created wayland display thread id %d \n", desktop_tid); } //todo add delete static struct wayland_window *create_wayland_window (HWND hwnd, int32_t width, int32_t height) { //struct wl_region *region; struct wayland_window *window = malloc(sizeof(struct wayland_window)); global_wait_for_configure = 1; window->surface = wl_compositor_create_surface (wayland_compositor); window->xdg_surface = xdg_wm_base_get_xdg_surface(wm_base, window->surface); xdg_surface_add_listener(window->xdg_surface, &xdg_surface_listener, window); window->xdg_toplevel = xdg_surface_get_toplevel(window->xdg_surface); xdg_toplevel_add_listener(window->xdg_toplevel, &xdg_toplevel_listener, window); /* region = wl_compositor_create_region(wayland_compositor); wl_region_add(region, 0, 0, width, height); wl_surface_set_opaque_region(window->surface, region); */ window->pointer_to_hwnd = hwnd; window->width = width; window->height = height; if(global_is_always_fullscreen) xdg_toplevel_set_fullscreen(window->xdg_toplevel, NULL); wl_surface_commit(window->surface); wl_display_flush (wayland_display); while(global_wait_for_configure) { sleep(0.3); wl_display_dispatch(wayland_display); } alloc_wl_win_data(window->surface, hwnd, window); TRACE("Created wayland window %p \n", window); return window; } static void delete_wayland_window (struct wayland_window *window) { struct wl_surface_win_data *data; data = get_wl_win_data(window->surface); free_wl_win_data(data); TRACE("Deleting wayland window %p \n", window); if (window->xdg_toplevel) xdg_toplevel_destroy(window->xdg_toplevel); if (window->xdg_surface) xdg_surface_destroy(window->xdg_surface); wl_surface_destroy (window->surface); wl_display_dispatch(wayland_display); window = NULL; desktop_tid = 0; } static void draw_gdi_wayland_window (struct wayland_window *window) { RECT rect; struct wl_region *region; int screen_width = 1600; int screen_height = 900; char *env_width = NULL; char *env_height = NULL; struct wl_buffer *buffer = NULL; int stride = 0; if(!wayland_display) { return; } if(is_buffer_busy) { return; } TRACE( "Creating/Resetting main gdi wayland surface \n" ); env_width = getenv( "WINE_VK_WAYLAND_WIDTH" ); env_height = getenv( "WINE_VK_WAYLAND_HEIGHT" ); NtUserGetWindowRect(window->pointer_to_hwnd, &rect); if(env_width) { screen_width = atoi(env_width); } if(env_height) { screen_height = atoi(env_height); } TRACE( "creating gdi window for hwnd %p wxh %dx%d \n", window->pointer_to_hwnd, screen_width, screen_height ); stride = screen_width * 4; // 4 bytes per pixel global_gdi_size = stride * screen_height; if(!global_gdi_fd) { TRACE( "creating gdi fd \n" ); global_gdi_fd = memfd_create("wine-shared", MFD_CLOEXEC | MFD_ALLOW_SEALING); if (global_gdi_fd >= 0) { fcntl(global_gdi_fd, F_ADD_SEALS, F_SEAL_SHRINK); } else { exit(1); } posix_fallocate(global_gdi_fd, 0, global_gdi_size); } //MAP_SHARED if(!global_shm_data) { void *shm_data = mmap(NULL, global_gdi_size, PROT_READ | PROT_WRITE, MAP_SHARED, global_gdi_fd, 0); if (shm_data == MAP_FAILED) { fprintf(stderr, "mmap failed: %m\n"); close(global_gdi_fd); return; } else { global_shm_data = shm_data; } TRACE( "creating wl_shm_data \n" ); } if(!global_wl_pool) { TRACE( "creating wl_pool \n" ); global_wl_pool = wl_shm_create_pool(global_shm, global_gdi_fd, global_gdi_size); } is_buffer_busy = 1; buffer = wl_shm_pool_create_buffer(global_wl_pool, 0, screen_width, screen_height, stride, WL_SHM_FORMAT_ARGB8888); wl_buffer_add_listener(buffer, &buffer_listener, NULL); wl_surface_attach(window->surface, buffer, 0, 0); region = wl_compositor_create_region(wayland_compositor); wl_region_add(region, 0, 0, 1, 1); wl_surface_set_input_region(window->surface, region); wl_surface_damage(window->surface, 0, 0, screen_width, screen_height); wl_surface_commit(window->surface); } /*********************************************************************** * ClipCursor (WAYLANDDRV.@) */ BOOL WAYLANDDRV_ClipCursor( LPCRECT clip ) { RECT virtual_rect = get_virtual_screen_rect(); if(!global_is_vulkan || global_disable_clip_cursor) { return TRUE; } if (!clip ) { TRACE( "Release Mouse Capture Called \n" ); if(global_wayland_confine) { struct wl_cursor_image *image; struct wl_buffer *buffer; zwp_locked_pointer_v1_destroy(locked_pointer); zwp_relative_pointer_v1_destroy(relative_pointer); locked_pointer = NULL; relative_pointer = NULL; global_wayland_confine = 0; //show mouse if it's not hidden by env variable if(!global_hide_cursor && !global_custom_cursors) { image = wayland_default_cursor->images[0]; buffer = wl_cursor_image_get_buffer(image); wl_pointer_set_cursor(wayland_pointer, wayland_serial_id, wayland_cursor_surface, image->hotspot_x, image->hotspot_y); wl_surface_attach(wayland_cursor_surface, buffer, 0, 0); wl_surface_damage(wayland_cursor_surface, 0, 0, image->width, image->height); wl_surface_commit(wayland_cursor_surface); } } return TRUE; } #if 0 TRACE( "virtual rect %s clip rect %s\n", wine_dbgstr_rect(&virtual_rect), wine_dbgstr_rect(clip) ); #endif // we are clipping if the clip rectangle is smaller than the screen if (clip->left > virtual_rect.left || clip->right < virtual_rect.right || clip->top > virtual_rect.top || clip->bottom < virtual_rect.bottom) { TRACE( "Set Mouse Capture %s \n", wine_dbgstr_rect(clip) ); grab_wayland_screen(); } else // if currently clipping, check if we should switch to fullscreen clipping { if ( global_fullscreen_grab_cursor && !global_is_cursor_visible ) { //grab cursor if clip equals to desktop TRACE( "Set Mouse Capture - fullscreen grab \n" ); grab_wayland_screen(); return TRUE; } else if ( global_fullscreen_grab_cursor && global_is_cursor_visible ) { TRACE( "Remove Mouse Capture - fullscreen grab \n" ); ungrab_wayland_screen(); TRACE( "Remove Mouse Capture - fullscreen grab done \n" ); return TRUE; } //Release grab instead if(!global_hide_cursor) { TRACE( "Release Mouse Capture #2 \n" ); ungrab_wayland_screen(); } } return TRUE; } static uint32_t *get_bitmap_argb( HDC hdc, HBITMAP color, HBITMAP mask, unsigned int *width, unsigned int *height ) { char buffer[FIELD_OFFSET( BITMAPINFO, bmiColors[256] )]; BITMAPINFO *info = (BITMAPINFO *)buffer; BITMAP bm; uint32_t *ptr, *bits = NULL; unsigned char *mask_bits = NULL; int i, j; BOOL has_alpha = FALSE; int red, green, blue, alpha; int cClrBits = 0; unsigned int width_bytes = 0; if (!color) return NULL; if (!NtGdiExtGetObjectW( color, sizeof(bm), &bm )) return NULL; info->bmiHeader.biSize = sizeof(BITMAPINFOHEADER); info->bmiHeader.biWidth = bm.bmWidth; info->bmiHeader.biHeight = -bm.bmHeight; info->bmiHeader.biPlanes = 1; info->bmiHeader.biBitCount = 32; info->bmiHeader.biCompression = BI_RGB; info->bmiHeader.biSizeImage = bm.bmWidth * bm.bmHeight * 4; info->bmiHeader.biXPelsPerMeter = 0; info->bmiHeader.biYPelsPerMeter = 0; info->bmiHeader.biClrUsed = 0; info->bmiHeader.biClrImportant = 0; // Convert the color format to a count of bits. cClrBits = (WORD)(bm.bmPlanes * bm.bmBitsPixel); if (cClrBits == 1) cClrBits = 1; else if (cClrBits <= 4) cClrBits = 4; else if (cClrBits <= 8) cClrBits = 8; else if (cClrBits <= 16) cClrBits = 16; else if (cClrBits <= 24) cClrBits = 24; else cClrBits = 32; TRACE("Got %d format for cursor \n", cClrBits); if (!(bits = malloc( bm.bmWidth * bm.bmHeight * sizeof(unsigned int) ))) goto failed; // if (!GetDIBits( hdc, color, 0, bm.bmHeight, bits, info, DIB_RGB_COLORS )) goto failed; if (!NtGdiGetDIBitsInternal( hdc, color, 0, bm.bmHeight, bits, info, DIB_RGB_COLORS , 0, 0)) goto failed; *width = bm.bmWidth; *height = bm.bmHeight; for (i = 0; i < bm.bmWidth * bm.bmHeight; i++) if ((has_alpha = (bits[i] & 0xff000000) != 0)) break; ptr = bits; if (!has_alpha) { TRACE("No alpha channel for cursor \n"); width_bytes = (bm.bmWidth + 31) / 32 * 4; /* generate alpha channel from the mask */ info->bmiHeader.biBitCount = 1; info->bmiHeader.biSizeImage = width_bytes * bm.bmHeight; if (!(mask_bits = malloc( info->bmiHeader.biSizeImage ))) goto failed; if (!NtGdiGetDIBitsInternal( hdc, mask, 0, bm.bmHeight, mask_bits, info, DIB_RGB_COLORS, 0, 0 )) goto failed; for (i = 0; i < bm.bmHeight; i++) { for (j = 0; j < bm.bmWidth; j++, ptr++) { if (!((mask_bits[i * width_bytes + j / 8] << (j % 8)) & 0x80)) *ptr |= 0xff000000; } } free( mask_bits ); } ptr = bits; for (i = 0; i < bm.bmWidth * bm.bmHeight; i++, ptr++) { red = (*ptr >> 16) & 0xff; green = (*ptr >> 8) & 0xff; blue = (*ptr >> 0) & 0xff; alpha = (*ptr >> 24); if(!alpha || alpha < 26) { *ptr = 0x00000000; } else if ( (red + green + blue < 0x40) && alpha < 26 ) { *ptr = 0x00000000; } else if(*ptr) { //*ptr |= 0x50000000; //*ptr = blue | green << 8 | red << 16 | alpha << 24; } } return bits; failed: free( bits ); free( mask_bits ); *width = *height = 0; return NULL; } static void set_custom_cursor( HCURSOR handle ) { unsigned int width = 0, height = 0; unsigned int xhotspot = 0, yhotspot = 0; ICONINFO info; struct wl_buffer *buffer; uint32_t *dest_pixels = NULL; uint32_t *src_pixels = NULL; uint32_t *bits = NULL; HDC hdc = NULL; int stride = 0; int size = 0; int y, x; char sprint_buffer[200]; struct cursor_cache *cached_cursor; if ((cached_cursor = global_cursor_cache[cursor_idx(handle)])) { bits = global_cursor_cache[cursor_idx( handle )]->cached_pixels; width = global_cursor_cache[cursor_idx( handle )]->width; height = global_cursor_cache[cursor_idx( handle )]->height; xhotspot = global_cursor_cache[cursor_idx( handle )]->xhotspot; yhotspot = global_cursor_cache[cursor_idx( handle )]->yhotspot; //TRACE("Cursor cache hit w h %d %d %d \n", width, height, cursor_idx(handle)); } else { //TRACE("Cursor cache miss w h %p %d \n", handle, cursor_idx(handle)); if (!NtUserGetIconInfo( handle, &info, NULL, NULL, NULL, 0 )) return; hdc = NtGdiCreateCompatibleDC( 0 ); bits = get_bitmap_argb( hdc, info.hbmColor, info.hbmMask, &width, &height); if(!bits) { return; } if(width < 1) { return; } /* make sure hotspot is valid */ if (info.xHotspot >= width || info.yHotspot >= height) { info.xHotspot = width / 2; info.yHotspot = height / 2; } TRACE("Cursor cache set w h %p %d - %d %d \n", handle, cursor_idx(handle), info.xHotspot, info.yHotspot ); alloc_cursor_cache(handle); global_cursor_cache[cursor_idx( handle )]->cached_pixels = bits; global_cursor_cache[cursor_idx( handle )]->handle = handle; global_cursor_cache[cursor_idx( handle )]->width = width; global_cursor_cache[cursor_idx( handle )]->height = height; global_cursor_cache[cursor_idx( handle )]->xhotspot = info.xHotspot; global_cursor_cache[cursor_idx( handle )]->yhotspot = info.yHotspot; xhotspot = info.xHotspot; yhotspot = info.yHotspot; NtGdiDeleteObjectApp( hdc ); } //TRACE("Cursor width is %d %d\n", width, height); stride = width * 4; // 4 bytes per pixel size = stride * height; if(width != global_cursor_width || height != global_cursor_height) { if(global_cursor_gdi_fd) { close(global_cursor_gdi_fd); } sprintf(sprint_buffer, "wine-shared-cursor-%d", width); TRACE( "creating gdi fd %s \n", sprint_buffer); global_cursor_gdi_fd = memfd_create(sprint_buffer, MFD_CLOEXEC | MFD_ALLOW_SEALING); if (global_cursor_gdi_fd >= 0) { fcntl(global_cursor_gdi_fd, F_ADD_SEALS, F_SEAL_SHRINK); } else { exit(1); } posix_fallocate(global_cursor_gdi_fd, 0, size); if(global_cursor_shm_data) { munmap(global_cursor_shm_data, global_cursor_width * 4 * global_cursor_height); } global_cursor_width = width; global_cursor_height = height; //MAP_SHARED global_cursor_shm_data = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, global_cursor_gdi_fd, 0); if (global_cursor_shm_data == MAP_FAILED) { fprintf(stderr, "mmap failed: %m\n"); close(global_cursor_gdi_fd); exit(1); } TRACE( "creating cursor wl_shm_data \n" ); if(global_cursor_pool) { wl_shm_pool_destroy(global_cursor_pool); } global_cursor_pool = wl_shm_create_pool(global_shm, global_cursor_gdi_fd, size); } //compositor may not be ready if(!wayland_compositor || !wayland_cursor_surface) return; dest_pixels = (uint32_t *)global_cursor_shm_data; src_pixels = bits; buffer = wl_shm_pool_create_buffer(global_cursor_pool, 0, width, height, stride, WL_SHM_FORMAT_ARGB8888); wl_buffer_add_listener(buffer, &buffer_listener, NULL); wl_surface_attach(wayland_cursor_surface, buffer, 0, 0); for (y = 0; y < height ; y++) { for (x = 0; x < width; x++) { dest_pixels[x] = src_pixels[x]; } src_pixels += width; dest_pixels += width; } wl_surface_damage(wayland_cursor_surface, 0, 0, width, height); wl_surface_commit(wayland_cursor_surface); wl_pointer_set_cursor(wayland_pointer, wayland_serial_id, wayland_cursor_surface, xhotspot, yhotspot); //TRACE( "DONE set_custom_cursor \n" ); } void WAYLANDDRV_SetCursor( HCURSOR handle ) { if(global_hide_cursor || !wayland_default_cursor) { return; } if(handle) { struct wl_cursor_image *image; struct wl_buffer *buffer; //show mouse if( !global_is_cursor_visible || !global_last_cursor_change || ( global_custom_cursors && handle != global_last_cursor_handle) ) { //TRACE("Showing cursor \n"); global_last_cursor_change = 1; global_is_cursor_visible = 1; if(!global_custom_cursors) { image = wayland_default_cursor->images[0]; buffer = wl_cursor_image_get_buffer(image); wl_pointer_set_cursor(wayland_pointer, wayland_serial_id, wayland_cursor_surface, image->hotspot_x, image->hotspot_y); wl_surface_attach(wayland_cursor_surface, buffer, 0, 0); wl_surface_damage(wayland_cursor_surface, 0, 0, image->width, image->height); wl_surface_commit(wayland_cursor_surface); } else { global_last_cursor_handle = handle; set_custom_cursor( handle ); } //ungrab screen if necessary ungrab_wayland_screen(); } } else if(!handle && (global_is_cursor_visible || !global_last_cursor_change) ) { //Remove cursor TRACE("Removing cursor \n"); global_is_cursor_visible = 0; global_last_cursor_change = 1; wl_pointer_set_cursor(wayland_pointer, wayland_serial_id, NULL, 0, 0); wl_surface_commit(wayland_cursor_surface); if( global_fullscreen_grab_cursor ){ grab_wayland_screen(); } } } // End wayland //GDI surface /* only for use on sanitized BITMAPINFO structures */ static inline int get_dib_info_size( const BITMAPINFO *info, UINT coloruse ) { if (info->bmiHeader.biCompression == BI_BITFIELDS) return sizeof(BITMAPINFOHEADER) + 3 * sizeof(DWORD); if (coloruse == DIB_PAL_COLORS) return sizeof(BITMAPINFOHEADER) + info->bmiHeader.biClrUsed * sizeof(WORD); return FIELD_OFFSET( BITMAPINFO, bmiColors[info->bmiHeader.biClrUsed] ); } static inline int get_dib_stride( int width, int bpp ) { return ((width * bpp + 31) >> 3) & ~3; } static inline int get_dib_image_size( const BITMAPINFO *info ) { return get_dib_stride( info->bmiHeader.biWidth, info->bmiHeader.biBitCount ) * abs( info->bmiHeader.biHeight ); } /* Window surface support */ static inline int context_idx( HWND hwnd ) { return LOWORD( hwnd ) >> 1; } static struct gdi_window_surface *get_gdi_surface( struct window_surface *surface ) { return (struct gdi_window_surface *)surface; } /* store the palette or color mask data in the bitmap info structure */ static void set_color_info( BITMAPINFO *info, BOOL has_alpha ) { //DWORD *colors = (DWORD *)info->bmiColors; info->bmiHeader.biSize = sizeof(info->bmiHeader); info->bmiHeader.biClrUsed = 0; info->bmiHeader.biBitCount = 32; if (has_alpha) { info->bmiHeader.biCompression = BI_RGB; return; } /* info->bmiHeader.biCompression = BI_BITFIELDS; colors[0] = 0xff0000; colors[1] = 0x00ff00; colors[2] = 0x0000ff; */ } /*********************************************************************** * alloc_gdi_win_data */ static struct gdi_win_data *alloc_gdi_win_data( HWND hwnd ) { struct gdi_win_data *data; if ((data = calloc(1, sizeof(*data)))) { data->hwnd = hwnd; data->window = gdi_window; win_data_context[context_idx(hwnd)] = data; } return data; } /*********************************************************************** * free_win_data */ static void free_win_data( struct gdi_win_data *data ) { win_data_context[context_idx( data->hwnd )] = NULL; free( data ); } /*********************************************************************** * get_win_data * * Lock and return the data structure associated with a window. */ static struct gdi_win_data *get_win_data( HWND hwnd ) { struct gdi_win_data *data; if (!hwnd) return NULL; if ((data = win_data_context[context_idx(hwnd)]) && data->hwnd == hwnd) { return data; } return NULL; } /*********************************************************************** * gdi_surface_lock */ static void gdi_surface_lock( struct window_surface *window_surface ) { } /*********************************************************************** * gdi_surface_unlock */ static void gdi_surface_unlock( struct window_surface *window_surface ) { } /*********************************************************************** * gdi_surface_get_bitmap_info */ static void *gdi_surface_get_bitmap_info( struct window_surface *window_surface, BITMAPINFO *info ) { struct gdi_window_surface *surface = get_gdi_surface( window_surface ); memcpy( info, &surface->info, get_dib_info_size( &surface->info, DIB_RGB_COLORS )); return surface->bits; } /*********************************************************************** * gdi_surface_get_bounds */ static RECT *gdi_surface_get_bounds( struct window_surface *window_surface ) { struct gdi_window_surface *surface = get_gdi_surface( window_surface ); return &surface->bounds; } /*********************************************************************** * gdi_surface_set_region */ static void gdi_surface_set_region( struct window_surface *window_surface, HRGN region ) { struct gdi_window_surface *surface = get_gdi_surface( window_surface ); if (!region) { if (surface->region) NtGdiDeleteObjectApp( surface->region ); surface->region = 0; } else { if (!surface->region) surface->region = NtGdiCreateRectRgn( 0, 0, 0, 0 ); NtGdiCombineRgn( surface->region, region, 0, RGN_COPY ); } set_surface_region( &surface->header, (HRGN)1 ); } /*********************************************************************** * gdi_surface_flush */ //Basic GDI windows support - mostly not working //https://github.com/wayland-project/weston/blob/3957863667c15bc5f1984ddc6c5967a323f41e7a/clients/simple-shm.c //https://github.com/ricardomv/cairo-wayland/blob/master/src/shm.c static void gdi_surface_flush( struct window_surface *window_surface ) { //TRACE( "GDI flush %p \n", window_surface); int x, y, width; uint32_t *src_pixels; uint32_t *dest_pixels; HWND owner; RECT client_rect; int HEIGHT = 0; int WIDTH = 0; RECT rect; BOOL needs_flush; LONG l,t; struct gdi_window_surface *surface = get_gdi_surface( window_surface ); int stride = 0; int size = 0; struct gdi_win_data *hwnd_data; char sprint_buffer[200]; if(global_is_vulkan) { return; } if(!wayland_display) { return; } surface = get_gdi_surface( window_surface ); if(!surface) { TRACE("No surface found \n" ); return; } if(surface->hwnd == global_vulkan_hwnd) { TRACE("Global Vulkan hwnd is %p \n", surface->hwnd); window_surface_release( &*window_surface ); return; } if(!surface->hwnd) { return; } if (!(hwnd_data = get_win_data( surface->hwnd ))) return; //without global_update_hwnd gray screen on startup if(hwnd_data->buffer_busy && global_update_hwnd) { return; } owner = NtUserGetWindowRelative( surface->hwnd, GW_OWNER ); NtUserGetWindowRect(surface->hwnd, &client_rect); WIDTH = client_rect.right - client_rect.left; HEIGHT = client_rect.bottom - client_rect.top; stride = WIDTH * 4; // 4 bytes per pixel size = stride * HEIGHT; SetRect( &rect, 0, 0, surface->header.rect.right - surface->header.rect.left, surface->header.rect.bottom - surface->header.rect.top ); //Checks and reduces rect to changed areas needs_flush = intersect_rect( &rect, &rect, &surface->bounds ); reset_bounds( &surface->bounds ); //(hwnd_data->window_width == WIDTH && hwnd_data->window_height == HEIGHT) if (!needs_flush && hwnd_data->surface_changed < 1) { return; } intersect_rect( &rect, &rect, &surface->header.rect ); if(hwnd_data->window_width && (hwnd_data->window_width != WIDTH || hwnd_data->window_height != HEIGHT)) { hwnd_data->size_changed = 1; hwnd_data->window_width = WIDTH; hwnd_data->window_height = HEIGHT; TRACE( "Size changed %p \n", surface->hwnd); } //TODO proper cleanup if(hwnd_data->size_changed > 0) { TRACE("wl surface changed \n"); if(hwnd_data->gdi_fd) close(hwnd_data->gdi_fd); if(hwnd_data->wl_pool) wl_shm_pool_destroy(hwnd_data->wl_pool); if(hwnd_data->shm_data && hwnd_data->size) munmap(hwnd_data->shm_data, hwnd_data->size); if(hwnd_data->buffer) wl_buffer_destroy(hwnd_data->buffer); hwnd_data->gdi_fd = 0; hwnd_data->shm_data = NULL; hwnd_data->wl_pool = NULL; hwnd_data->buffer = NULL; } hwnd_data->size = size; if(!hwnd_data->gdi_fd) { sprintf(sprint_buffer, "wine-shared-%p", surface->hwnd); TRACE( "creating gdi fd %s \n", sprint_buffer); hwnd_data->gdi_fd = memfd_create(sprint_buffer, MFD_CLOEXEC | MFD_ALLOW_SEALING); if (hwnd_data->gdi_fd >= 0) { fcntl(hwnd_data->gdi_fd, F_ADD_SEALS, F_SEAL_SHRINK); } else { exit(1); } posix_fallocate(hwnd_data->gdi_fd, 0, size); } //MAP_SHARED if(!hwnd_data->shm_data) { hwnd_data->shm_data = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, hwnd_data->gdi_fd, 0); if (hwnd_data->shm_data == MAP_FAILED) { fprintf(stderr, "mmap failed: %m\n"); close(hwnd_data->gdi_fd); exit(1); } TRACE( "creating wl_shm_data \n" ); } //TRACE("Client Rect rect %s %d %d %p \n", wine_dbgstr_rect( &client_rect ), WIDTH, HEIGHT, surface->hwnd); if(!hwnd_data->wl_pool) { TRACE( "creating wl_pool \n" ); hwnd_data->wl_pool = wl_shm_create_pool(global_shm, hwnd_data->gdi_fd, size); } if(!hwnd_data->buffer) { TRACE( "creating buffer \n" ); hwnd_data->buffer = wl_shm_pool_create_buffer(hwnd_data->wl_pool, 0, WIDTH, HEIGHT, stride, WL_SHM_FORMAT_ARGB8888); wl_buffer_add_listener(hwnd_data->buffer, &buffer_listener, surface->hwnd); } hwnd_data->buffer_busy= 1; if(!hwnd_data->wayland_subsurface) { hwnd_data->window_width = WIDTH; hwnd_data->window_height = HEIGHT; hwnd_data->wayland_surface = wl_compositor_create_surface(wayland_compositor); hwnd_data->wayland_subsurface = wl_subcompositor_get_subsurface(wayland_subcompositor, hwnd_data->wayland_surface, gdi_window->surface); TRACE( "creating wl_subsurface %p %p for hwnd %p \n", hwnd_data->wayland_subsurface, hwnd_data->wayland_surface, surface->hwnd ); wl_subsurface_set_position(hwnd_data->wayland_subsurface, client_rect.left, client_rect.top); wl_subsurface_set_desync(hwnd_data->wayland_subsurface); //if window is owned if(owner) { struct gdi_win_data *owner_hwnd_data; owner_hwnd_data = get_win_data( owner ); if (owner_hwnd_data && owner_hwnd_data->wayland_surface) wl_subsurface_place_above(hwnd_data->wayland_subsurface, owner_hwnd_data->wayland_surface); else wl_subsurface_place_above(hwnd_data->wayland_subsurface, gdi_window->surface); } //alloc_wl_win_data(hwnd_data->wayland_subsurface, surface->hwnd); wl_surface_set_user_data(hwnd_data->wayland_surface, surface->hwnd); wl_surface_attach(hwnd_data->wayland_surface, hwnd_data->buffer, 0, 0); } else { wl_surface_attach(hwnd_data->wayland_surface, hwnd_data->buffer, 0, 0); if(hwnd_data->surface_changed) { wl_subsurface_set_position(hwnd_data->wayland_subsurface, client_rect.left, client_rect.top); //Dynamic move is currently broken if(!global_gdi_lb_hold) { TRACE("wl surface changed %d %d \n", client_rect.left, client_rect.top ); global_gdi_position_changing = 1; TRACE("relative mouse move on \n"); } wl_surface_commit(gdi_window->surface); //global_gdi_position_changed = 1; //wl_surface_damage(hwnd_data->wayland_surface, 0, 0, WIDTH, HEIGHT); //wl_surface_commit(hwnd_data->wayland_surface); } } src_pixels = (unsigned int *)surface->bits + (rect.top - surface->header.rect.top) * surface->info.bmiHeader.biWidth + (rect.left - surface->header.rect.left); dest_pixels = (unsigned int *)hwnd_data->shm_data; l = rect.left; t = rect.top; if(l != 0 || t != 0 ) { dest_pixels = (unsigned int *)hwnd_data->shm_data + (rect.top) * WIDTH + rect.left; } width = min( rect.right - rect.left, stride ); //for (y = rect.top; y < min( HEIGHT, rect.bottom); y++) for (y = rect.top; y < min( HEIGHT, rect.bottom); y++) { for (x = 0; x < width; x++) { dest_pixels[x] = src_pixels[x] | 0xFF000000; } src_pixels += surface->info.bmiHeader.biWidth; dest_pixels += WIDTH; } hwnd_data->surface_changed = 0; hwnd_data->size_changed = 0; wl_surface_damage(hwnd_data->wayland_surface, 0, 0, WIDTH, HEIGHT); wl_surface_commit(hwnd_data->wayland_surface); } /*********************************************************************** * gdi_surface_destroy */ static void gdi_surface_destroy( struct window_surface *window_surface ) { struct gdi_window_surface *surface = get_gdi_surface( window_surface ); struct gdi_win_data *hwnd_data; hwnd_data = get_win_data( surface->hwnd ); if (hwnd_data) { //hwnd_data->surface_changed = 1; } TRACE( "Freeing wine surface - %p bits %p %p \n", surface, surface->bits, surface->hwnd ); free( surface->region_data ); if (surface->region) NtGdiDeleteObjectApp( surface->region ); free( surface->bits ); free( surface ); } static const struct window_surface_funcs gdi_surface_funcs = { gdi_surface_lock, gdi_surface_unlock, gdi_surface_get_bitmap_info, gdi_surface_get_bounds, gdi_surface_set_region, gdi_surface_flush, gdi_surface_destroy }; /*********************************************************************** * set_surface_region */ static void set_surface_region( struct window_surface *window_surface, HRGN win_region ) { struct gdi_window_surface *surface = get_gdi_surface( window_surface ); struct gdi_win_data *win_data; HRGN region = win_region; RGNDATA *data = NULL; UINT size; int offset_x, offset_y; if (window_surface->funcs != &gdi_surface_funcs) return; /* we may get the null surface */ if (!(win_data = get_win_data( surface->hwnd ))) return; offset_x = win_data->window_rect.left - win_data->whole_rect.left; offset_y = win_data->window_rect.top - win_data->whole_rect.top; if (win_region == (HRGN)1) /* hack: win_region == 1 means retrieve region from server */ { region = NtGdiCreateRectRgn( 0, 0, win_data->window_rect.right - win_data->window_rect.left, win_data->window_rect.bottom - win_data->window_rect.top ); if (NtUserGetWindowRgnEx( surface->hwnd, region, 0 ) == ERROR && !surface->region) goto done; } NtGdiOffsetRgn( region, offset_x, offset_y ); if (surface->region) NtGdiCombineRgn( region, region, surface->region, RGN_AND ); if (!(size = NtGdiGetRegionData( region, 0, NULL ))) goto done; if (!(data = calloc( 1, size )) ) goto done; if (!NtGdiGetRegionData( region, size, data )) { free( data ); data = NULL; } done: window_surface->funcs->lock( window_surface ); free( surface->region_data ); surface->region_data = data; *window_surface->funcs->get_bounds( window_surface ) = surface->header.rect; window_surface->funcs->unlock( window_surface ); if (region != win_region) NtGdiDeleteObjectApp( region ); } /*********************************************************************** * create_surface */ static struct window_surface *create_surface( HWND hwnd, const RECT *rect, BYTE alpha, COLORREF color_key, BOOL src_alpha ) { struct gdi_window_surface *surface; int width = rect->right - rect->left, height = rect->bottom - rect->top; surface = calloc( 1, FIELD_OFFSET( struct gdi_window_surface, info.bmiColors[3] )); if (!surface) return NULL; set_color_info( &surface->info, 1 ); // set_color_info( &surface->info, src_alpha ); surface->info.bmiHeader.biWidth = width; surface->info.bmiHeader.biHeight = -height; /* top-down */ surface->info.bmiHeader.biPlanes = 1; surface->info.bmiHeader.biSizeImage = get_dib_image_size( &surface->info ); surface->header.funcs = &gdi_surface_funcs; surface->header.rect = *rect; surface->header.ref = 1; surface->hwnd = hwnd; surface->window = gdi_window; surface->wayland_subsurface = NULL; surface->wayland_surface = NULL; surface->shm_data = NULL; surface->wl_pool = NULL; surface->gdi_fd = 0; surface->alpha = alpha; set_surface_region( &surface->header, (HRGN)1 ); reset_bounds( &surface->bounds ); if (!(surface->bits = malloc( surface->info.bmiHeader.biSizeImage ))) goto failed; TRACE( "created %p hwnd %p %s bits %p-%p\n", surface, hwnd, wine_dbgstr_rect(rect), surface->bits, (char *)surface->bits + surface->info.bmiHeader.biSizeImage ); return &surface->header; failed: gdi_surface_destroy( &surface->header ); return NULL; } //Windows functions /*********************************************************************** * * * Create an gdi data structure for an existing window. */ static int do_create_gdi_data( HWND hwnd, const RECT *window_rect, const RECT *client_rect ) { HWND parent; if (!(parent = NtUserGetAncestor( hwnd, GA_PARENT ))) return 0; /* desktop */ // don't create win data for HWND_MESSAGE windows */ if (parent != NtUserGetDesktopWindow() && !NtUserGetAncestor( parent, GA_PARENT )) return 0; if (NtUserGetWindowThread( hwnd, NULL ) != GetCurrentThreadId()) return 0; return 1; } static struct gdi_win_data *create_gdi_data( HWND hwnd, const RECT *window_rect, const RECT *client_rect ) { struct gdi_win_data *data; HWND parent; if (!(parent = NtUserGetAncestor( hwnd, GA_PARENT ))) return NULL; /* desktop or HWND_MESSAGE */ if (!(data = alloc_gdi_win_data( hwnd ))) return NULL; data->parent = (parent == NtUserGetDesktopWindow()) ? 0 : parent; data->whole_rect = data->window_rect = *window_rect; data->client_rect = *client_rect; data->wayland_subsurface = NULL; data->wayland_surface = NULL; data->window_width = 0; data->window_height = 0; data->size = 0; data->shm_data = NULL; data->wl_pool = NULL; data->buffer = NULL; data->gdi_fd = 0; data->buffer_busy = 0; data->surface_changed = 0; data->size_changed = 0; TRACE( "created gdi_win_data for %p hwnd /n", hwnd); return data; } static inline BOOL get_surface_rect( const RECT *visible_rect, RECT *surface_rect ) { RECT virtual_screen_rect = get_virtual_screen_rect(); if (!intersect_rect( surface_rect, visible_rect, &virtual_screen_rect )) return FALSE; OffsetRect( surface_rect, -visible_rect->left, -visible_rect->top ); surface_rect->left &= ~31; surface_rect->top &= ~31; surface_rect->right = max( surface_rect->left + 32, (surface_rect->right + 31) & ~31 ); surface_rect->bottom = max( surface_rect->top + 32, (surface_rect->bottom + 31) & ~31 ); return TRUE; } /*********************************************************************** * WindowPosChanging (WAYLANDDRV.@) */ BOOL WAYLANDDRV_WindowPosChanging( HWND hwnd, HWND insert_after, UINT swp_flags, const RECT *window_rect, const RECT *client_rect, RECT *visible_rect, struct window_surface **surface ) { int count = 0; struct gdi_win_data *data; COLORREF key; HWND parent; int do_create_surface = 0; int HEIGHT = 0; int WIDTH = 0; RECT window_client_rect, rect; WCHAR title_name[1024] = { L'\0' }; WCHAR class_buff[64]; UNICODE_STRING class_name = { .Buffer = class_buff, .MaximumLength = sizeof(class_buff) }; static const WCHAR desktop_class[] = {'#', '3', '2', '7', '6', '9', 0}; static const WCHAR ole_class[] = {'O','l','e','M','a','i','n','T','h','r','e','a','d','W','n','d','C','l','a','s','s', 0}; static const WCHAR msg_class[] = {'M','e','s','s','a','g','e', 0}; static const WCHAR ime_class[] = {'I','M','E', 0}; static const WCHAR tooltip_class[] = {'t','o','o','l','t','i','p','s','_', 'c','l','a','s','s','3','2', 0}; static const WCHAR sdl_class[] = {'S','D','L','_','a','p','p', 0}; static const WCHAR unreal_class[] = {'U','n','r','e','a','l','W','i','n','d','o','w', 0}; //POEWindowClass static const WCHAR poe_class[] = {'P','O','E','W','i','n','d','o','w','C','l','a','s','s', 0}; //OgreD3D11Wnd static const WCHAR ogre_class[] = {'O','g','r','e','D','3','D','1','1','W','n','d', 0}; static const WCHAR unity_class[] = {'U','n','i','t','y','W','n','d','C','l','a','s','s', 0}; //Unreal splash screens are not destroyed static const WCHAR unreal_splash_class[] = {'S','p','l','a','s','h','S','c','r','e','e','n','C','l','a','s','s', 0}; //Shogun2 crash fix static const WCHAR shogun2_frame_class[] = {'S','h','o','g','u','n','2', 0}; //Flstudio buggy splashscreen static const WCHAR flstudio_hwnd_class[] = { 'T','L','i','g','h','t','w','e', 'i','g','h','t','L','a','y','e','r','e','d','C','o','n','t','r','o','l', 0}; const char *is_vulkan_only = getenv( "WINE_VK_VULKAN_ONLY" ); if(hwnd == global_vulkan_hwnd) { TRACE("Removing window decorations for FSR \n"); //For FSR NtUserSetWindowLongPtr(global_vulkan_hwnd, GWL_EXSTYLE, WS_EX_APPWINDOW | WS_EX_TOPMOST, 0); NtUserSetWindowLongPtr(global_vulkan_hwnd, GWL_STYLE, WS_POPUP | WS_VISIBLE, 0); //For caching global_vulkan_hwnd rect global_vulkan_rect_flag = 0; NtUserGetWindowRect(global_vulkan_hwnd, &global_vulkan_rect); return TRUE; } if(is_vulkan_only) { return TRUE; } if(hwnd == NtUserGetDesktopWindow()) { return TRUE; } parent = NtUserGetAncestor(hwnd, GA_PARENT); if( !parent || parent != NtUserGetDesktopWindow()) { return TRUE; } if( NtUserGetClassName(hwnd, FALSE, &class_name )) { TRACE( "Changing %p %s \n", hwnd, debugstr_w(class_name.Buffer) ); if(!wcsicmp(class_name.Buffer, msg_class)) { return TRUE; } if(!wcsicmp(class_name.Buffer, ole_class)) { return TRUE; } if(!wcsicmp(class_name.Buffer, ime_class)) { return TRUE; } if(!wcsicmp(class_name.Buffer, desktop_class)) { return TRUE; } if(!wcsicmp(class_name.Buffer, tooltip_class)) { return TRUE; } if(!wcsicmp(class_name.Buffer, unreal_class)) { return TRUE; } if(!wcsicmp(class_name.Buffer, unreal_splash_class)) { return TRUE; } if(!wcsicmp(class_name.Buffer, shogun2_frame_class)) { return TRUE; } if(!wcsicmp(class_name.Buffer, flstudio_hwnd_class)) { return TRUE; } //Upscale //Wayland display may not be ready yet, test for FSR variable here as well if(getenv( "WINE_VK_USE_FSR" )) { global_fsr = 1; global_fsr_set = 1; global_is_always_fullscreen = 1; //enable fullscreen for FSR } //Remove borders for some games that refuse to do elsewhere if(global_fsr) { TRACE( "Removing window decorations %p %s \n", hwnd, debugstr_w(class_name.Buffer) ); NtUserSetWindowLongPtr(hwnd, GWL_EXSTYLE, WS_EX_APPWINDOW | WS_EX_TOPMOST, 0); NtUserSetWindowLongPtr(hwnd, GWL_STYLE, WS_POPUP | WS_VISIBLE, 0); } if(!wcsicmp(class_name.Buffer, sdl_class)) { return TRUE; } if(!wcsicmp(class_name.Buffer, poe_class)) { return TRUE; } if(!wcsicmp(class_name.Buffer, ogre_class)) { return TRUE; } if(!wcsicmp(class_name.Buffer, unity_class)) { return TRUE; } } //Get window width/height NtUserGetWindowRect(hwnd, &window_client_rect); NtUserInternalGetWindowText(hwnd, title_name, 1024); TRACE( "Changing %p %s Window title %d / %s %s rect \n", hwnd, debugstr_w(class_name.Buffer), lstrlenW( title_name ), debugstr_wn(title_name, lstrlenW( title_name )), wine_dbgstr_rect( &window_client_rect ) ); do_create_surface = do_create_gdi_data( hwnd, window_rect, client_rect ); if (!do_create_surface) { return TRUE; } if (!(swp_flags & SWP_SHOWWINDOW) && !(NtUserGetWindowLongW( hwnd, GWL_STYLE ) & WS_VISIBLE)) { return TRUE; } if (swp_flags & SWP_HIDEWINDOW) { TRACE("Window Should be hidden %s \n", debugstr_wn(title_name, lstrlenW( title_name ))); return TRUE; } if(!wayland_display) { create_wayland_display(); } WIDTH = window_client_rect.right - window_client_rect.left; HEIGHT = window_client_rect.bottom - window_client_rect.top; if(WIDTH < 1) WIDTH = 1440; if(HEIGHT < 1) HEIGHT = 900; //TRACE("WXH is %d %d for %p \n", WIDTH, HEIGHT, hwnd); if(wayland_display && !gdi_window) { TRACE("Creating wayland window %s %p \n", debugstr_w(class_name.Buffer), hwnd); gdi_window = create_wayland_window (hwnd, WIDTH, HEIGHT); while (!count) { sleep(0.1); wl_display_dispatch_pending (wayland_display); draw_gdi_wayland_window (gdi_window); sleep(0.1); count = 1; } } else if(wayland_display && gdi_window && global_vulkan_hwnd != NULL) { draw_gdi_wayland_window (gdi_window); } data = get_win_data( hwnd ); if(!data) { data = create_gdi_data( hwnd, window_rect, client_rect ); } else { if (*surface) { // TRACE("Surface exists \n", WIDTH, HEIGHT); // gdi_surface_destroy( *surface ); window_surface_release( *surface ); *surface = NULL; } } if(!data) { TRACE("NO DATA \n"); return TRUE; } else { if (data->surface) { /* existing surface is good enough */ window_surface_add_ref( data->surface ); if (*surface) window_surface_release( *surface ); *surface = data->surface; return TRUE; } rect = get_virtual_screen_rect(); if ( (!parent || parent == NtUserGetDesktopWindow()) ) { if (*surface) { window_surface_release( *surface ); } *surface = NULL; *surface = create_surface( data->hwnd, &rect, 255, key, FALSE ); } } return TRUE; } /*********************************************************************** * ShowWindow (WAYLANDDRV.@) */ UINT WAYLANDDRV_ShowWindow( HWND hwnd, INT cmd, RECT *rect, UINT swp ) { // WCHAR title_name[1024] = { L'\0' }; struct gdi_win_data *hwnd_data; // WCHAR class_name[64]; struct gdi_win_data *data; if(global_is_vulkan) { return swp; } TRACE( "Show Window \n"); data = get_win_data( hwnd ); if(!data) { return swp; } // if(RealGetWindowClassW(hwnd, class_name, ARRAY_SIZE(class_name))) { // NtUserInternalGetWindowText(hwnd, title_name, 1024); // TRACE( "Show/hide window %p %s title %s \n", hwnd, debugstr_w(class_name), debugstr_wn(title_name, strlenW( title_name ))); // } if(!cmd || cmd & SW_HIDE) { TRACE("Hiding window %d %p %p \n", cmd, hwnd, global_update_hwnd); hwnd_data = get_win_data( hwnd ); if (hwnd_data && hwnd_data->wayland_surface ) { TRACE("Hiding window %d %p clearing wayland surfaces \n", cmd, hwnd); wl_subsurface_destroy(hwnd_data->wayland_subsurface); wl_surface_destroy(hwnd_data->wayland_surface); hwnd_data->wayland_subsurface = NULL; hwnd_data->wayland_surface = NULL; wl_shm_pool_destroy(hwnd_data->wl_pool); if(hwnd_data->gdi_fd) close(hwnd_data->gdi_fd); if(hwnd_data->shm_data && hwnd_data->size) { TRACE("Clearing shm_data for %p \n", hwnd); munmap(hwnd_data->shm_data, hwnd_data->size); } if(hwnd_data->buffer) { wl_buffer_destroy(hwnd_data->buffer); } if(hwnd_data->surface) { //gdi_surface_destroy( hwnd_data->surface ); window_surface_release( hwnd_data->surface ); hwnd_data->surface = NULL; } hwnd_data->wl_pool = NULL; hwnd_data->buffer = NULL; hwnd_data->size = 0; hwnd_data->gdi_fd = 0; free_win_data(hwnd_data); } } return swp; } /*********************************************************************** * WindowPosChanged */ void WAYLANDDRV_WindowPosChanged( HWND hwnd, HWND insert_after, UINT swp_flags, const RECT *window_rect, const RECT *client_rect, const RECT *visible_rect, const RECT *valid_rects, struct window_surface *surface ) { struct gdi_win_data *hwnd_data; RECT rect; int height = 0; int width = 0; struct wl_region *region; hwnd_data = get_win_data( hwnd ); if(!hwnd_data) { return; } NtUserGetWindowRect(hwnd, &rect); width = rect.right - rect.left; height = rect.bottom - rect.top; region = wl_compositor_create_region(wayland_compositor); wl_region_add(region, rect.left, rect.top, width, height); // wl_surface_set_input_region(data->wayland_surface, region); TRACE("Adding surface for hwnd %p %d %d / rect %s \n", hwnd, rect.left, rect.top, wine_dbgstr_rect( &rect ) ); if (surface) window_surface_add_ref( surface ); if (hwnd_data->surface) { window_surface_release( hwnd_data->surface ); } hwnd_data->surface = surface; if (swp_flags & SWP_HIDEWINDOW) { TRACE("Window Should be hidden %p \n", hwnd); WAYLANDDRV_ShowWindow( hwnd, 0, NULL, 0 ); return; } if(hwnd_data->wayland_surface) { hwnd_data->surface_changed = 1; } } /*********************************************************************** * SysCommand */ LRESULT WAYLANDDRV_SysCommand(HWND hwnd, WPARAM wparam, LPARAM lparam) { struct gdi_win_data *hwnd_data; WPARAM command = wparam & 0xfff0; hwnd_data = get_win_data( hwnd ); if(!hwnd_data) { return -1; } if (command == SC_MOVE) { return 0; } return -1; } /********************************************************************** * CreateWindow (WAYLANDDRV.@) */ BOOL WAYLANDDRV_CreateWindow( HWND hwnd ) { WCHAR title_name[1024] = { L'\0' }; WCHAR class_buff[64]; UNICODE_STRING class_name = { .Buffer = class_buff, .MaximumLength = sizeof(class_buff) }; if( NtUserGetClassName(hwnd, FALSE, &class_name )) { NtUserInternalGetWindowText(hwnd, title_name, 1024); TRACE( "Changing %p %s Window title %d / %s \n", hwnd, debugstr_w(class_name.Buffer), lstrlenW( title_name ), debugstr_wn(title_name, lstrlenW( title_name )) ); TRACE("Creating window 1 \n"); } return TRUE; } /*********************************************************************** * DestroyWindow (WAYLANDDRV.@) */ void WAYLANDDRV_DestroyWindow( HWND hwnd ) { WCHAR title_name[1024] = { L'\0' }; WCHAR class_buff[64]; UNICODE_STRING class_name = { .Buffer = class_buff, .MaximumLength = sizeof(class_buff) }; struct gdi_win_data *hwnd_data; if( NtUserGetClassName(hwnd, FALSE, &class_name )) { NtUserInternalGetWindowText(hwnd, title_name, 1024); TRACE( "Destroying %p %s Window title %d / %s \n", hwnd, debugstr_w(class_name.Buffer), lstrlenW( title_name ), debugstr_wn(title_name, lstrlenW( title_name )) ); TRACE("Destroying window 1 \n"); } if(global_is_vulkan) { if(hwnd == global_vulkan_hwnd) { global_vulkan_hwnd = NULL; } //destroy GDI windows games create if(vulkan_window && vulkan_window->pointer_to_hwnd == hwnd) { TRACE("Destroy wayland window for hwnd %p \n", hwnd); delete_wayland_window(vulkan_window); vulkan_window = NULL; } else if( vulkan_window && vulkan_window->pointer_to_hwnd != hwnd ){ //try to find the window for(int ii = 0; ii < 32768; ii++ ) if(wl_surface_data_context[ii]) { if(wl_surface_data_context[ii]->hwnd == hwnd) { delete_wayland_window(wl_surface_data_context[ii]->wayland_window); } } } return; } //Clean subsurface windows data hwnd_data = get_win_data( hwnd ); if (hwnd_data && hwnd_data->wayland_surface ) { TRACE("destroying hwnd_data %p for %p \n", hwnd_data, hwnd); wl_subsurface_destroy(hwnd_data->wayland_subsurface); TRACE("hwnd_data %p for %p \n", hwnd_data, hwnd); wl_surface_destroy(hwnd_data->wayland_surface); wl_shm_pool_destroy(hwnd_data->wl_pool); if(hwnd_data->gdi_fd) close(hwnd_data->gdi_fd); if(hwnd_data->shm_data && hwnd_data->size) { TRACE("Clearing shm_data for %p \n", hwnd); munmap(hwnd_data->shm_data, hwnd_data->size); } if(hwnd_data->buffer) { wl_buffer_destroy(hwnd_data->buffer); } hwnd_data->wayland_subsurface = NULL; hwnd_data->wayland_surface = NULL; hwnd_data->wl_pool = NULL; hwnd_data->buffer = NULL; hwnd_data->size = 0; hwnd_data->gdi_fd = 0; if(hwnd_data->surface != NULL) { TRACE("Attempt clear surface %p for %p \n", hwnd_data->surface, hwnd); window_surface_release( hwnd_data->surface ); hwnd_data->surface = NULL; } free_win_data(hwnd_data); } return; } //Win32 loop callback NTSTATUS WAYLANDDRV_MsgWaitForMultipleObjectsEx( DWORD count, const HANDLE *handles, const LARGE_INTEGER *timeout, DWORD mask, DWORD flags ) { if (wayland_display && desktop_tid && GetCurrentThreadId() == desktop_tid && !global_wait_for_configure) { while (wl_display_prepare_read(wayland_display) != 0) { wl_display_dispatch_pending(wayland_display); } wl_display_flush(wayland_display); wl_display_read_events(wayland_display); wl_display_dispatch_pending(wayland_display); } return NtWaitForMultipleObjects( count, handles, !(flags & MWMO_WAITALL), !!(flags & MWMO_ALERTABLE), timeout ); } //Windows functions /* Helper function for converting between win32 and X11 compatible VkInstanceCreateInfo. * Caller is responsible for allocation and cleanup of 'dst'. */ static VkResult wine_vk_instance_convert_create_info(const VkInstanceCreateInfo *src, VkInstanceCreateInfo *dst) { unsigned int i; const char **enabled_extensions = NULL; dst->sType = src->sType; dst->flags = src->flags; dst->pApplicationInfo = src->pApplicationInfo; dst->pNext = src->pNext; dst->enabledLayerCount = 0; dst->ppEnabledLayerNames = NULL; dst->enabledExtensionCount = 0; dst->ppEnabledExtensionNames = NULL; if (src->enabledExtensionCount > 0) { enabled_extensions = calloc(src->enabledExtensionCount, sizeof(*src->ppEnabledExtensionNames)); if (!enabled_extensions) { ERR("Failed to allocate memory for enabled extensions\n"); return VK_ERROR_OUT_OF_HOST_MEMORY; } for (i = 0; i < src->enabledExtensionCount; i++) { /* Substitute extension with X11 ones else copy. Long-term, when we * support more extensions, we should store these in a list. */ if (!strcmp(src->ppEnabledExtensionNames[i], "VK_KHR_win32_surface")) { enabled_extensions[i] = "VK_KHR_wayland_surface"; } else { enabled_extensions[i] = src->ppEnabledExtensionNames[i]; } } dst->ppEnabledExtensionNames = enabled_extensions; dst->enabledExtensionCount = src->enabledExtensionCount; } return VK_SUCCESS; } static VkSurfaceKHR WAYLANDDRV_wine_get_native_surface(VkSurfaceKHR surface) { return surface; } static VkResult WAYLANDDRV_vkCreateInstance(const VkInstanceCreateInfo *create_info, const VkAllocationCallbacks *allocator, VkInstance *instance) { VkInstanceCreateInfo create_info_host; VkResult res; /* Perform a second pass on converting VkInstanceCreateInfo. Winevulkan * performed a first pass in which it handles everything except for WSI * functionality such as VK_KHR_win32_surface. Handle this now. */ res = wine_vk_instance_convert_create_info(create_info, &create_info_host); if (res != VK_SUCCESS) { ERR("Failed to convert instance create info, res=%d\n", res); return res; } res = pvkCreateInstance(&create_info_host, NULL /* allocator */, instance); if(res == VK_SUCCESS) { global_vk_instance = instance; TRACE("Create global_vk_instance create_info %p, allocator %p, instance %p\n", create_info, allocator, instance); } free((void *)create_info_host.ppEnabledExtensionNames); return res; } static VkResult WAYLANDDRV_vkCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *create_info, const VkAllocationCallbacks *allocator, VkSwapchainKHR *swapchain) { RECT window_rect; global_vulkan_rect_flag = 0; //FSR //TRACE("%p %p %p %p\n", device, create_info, allocator, swapchain); TRACE("Vulkan swapchain rect %d %d \n", create_info->imageExtent.width, create_info->imageExtent.height ); if(global_vulkan_hwnd && global_fsr && !fsr_matches_real_mode( create_info->imageExtent.width, create_info->imageExtent.height ) ) { NtUserGetClientRect(global_vulkan_hwnd, &window_rect); TRACE("FSR Vulkan hwnd rect %d %d \n", create_info->imageExtent.width, create_info->imageExtent.height ); fsr_set_current_mode(create_info->imageExtent.width, create_info->imageExtent.height); } if (allocator) FIXME("Support for allocation callbacks not implemented yet\n"); return pvkCreateSwapchainKHR(device, create_info, NULL /* allocator */, swapchain); } /*************************************************************************** * get_basename * * Return the base name of a file name (i.e. remove the path components). */ //TODO #if 0 static const WCHAR *get_basename( const WCHAR *name ) { const WCHAR *ptr; if (name[0] && name[1] == ':') name += 2; /* strip drive specification */ if ((ptr = strrchrW( name, '\\' ))) name = ptr + 1; if ((ptr = strrchrW( name, '/' ))) name = ptr + 1; return name; } #endif static VkResult WAYLANDDRV_vkCreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *create_info, const VkAllocationCallbacks *allocator, VkSurfaceKHR *surface) { VkResult res; VkWaylandSurfaceCreateInfoKHR create_info_host; int no_flag = 1; int count = 0; int screen_width = 1920; int screen_height = 1080; RECT window_rect; char *env_width = NULL; char *env_height = NULL; WCHAR class_buff[64]; UNICODE_STRING class_name = { .Buffer = class_buff, .MaximumLength = sizeof(class_buff) }; //Hack //Do not create vulkan windows for Paradox detect static const WCHAR pdx_class[] = {'P','d','x','D','e','t','e','c','t','W','i','n','d','o','w', 0}; TRACE("Vulkan hwnd %d \n", no_flag); if( NtUserGetClassName(create_info->hwnd, FALSE, &class_name )) { if(!wcsicmp(class_name.Buffer, pdx_class)) { no_flag = 0; } } if(no_flag) { TRACE("Creating wayland display early \n"); if(!wayland_display) { create_wayland_display(); } TRACE("Vulkan hwnd 1 \n" ); env_width = getenv( "WINE_VK_WAYLAND_WIDTH" ); env_height = getenv( "WINE_VK_WAYLAND_HEIGHT" ); if(global_output_width > 0 && global_output_height > 0) { screen_width = global_output_width; screen_height = global_output_height; } if(env_width) { screen_width = atoi(env_width); } if(env_height) { screen_height = atoi(env_height); } TRACE("hwnd hxw %d %d \n", screen_width, screen_height); global_vulkan_hwnd = create_info->hwnd; NtUserSetActiveWindow( global_vulkan_hwnd ); NtUserSetForegroundWindow( global_vulkan_hwnd ); NtUserSetFocus(global_vulkan_hwnd); if(global_fsr) { NtUserGetClientRect(global_vulkan_hwnd, &window_rect); NtUserSetWindowLongPtr(global_vulkan_hwnd, GWL_EXSTYLE, WS_EX_APPWINDOW | WS_EX_TOPMOST, 0); NtUserSetWindowLongPtr(global_vulkan_hwnd, GWL_STYLE, WS_POPUP | WS_VISIBLE, 0); TRACE("Vulkan hwnd rect %s \n", wine_dbgstr_rect( &window_rect )); TRACE("Vulkan hwnd set to borderless %s \n", wine_dbgstr_rect( &window_rect )); fsr_set_current_mode(window_rect.right, window_rect.bottom); } SERVER_START_REQ( set_focus_window ) { req->handle = wine_server_user_handle( global_vulkan_hwnd ); } SERVER_END_REQ; TRACE("New global vulkan hwnd is %p \n", create_info->hwnd); } else { TRACE("Not visible for %p %p %p %p\n", instance, create_info, allocator, surface); } //TRACE("%p %p %p %p\n", instance, create_info->hwnd, allocator, surface); TRACE("Creating vulkan Window %p %s \n", create_info->hwnd, debugstr_w(class_name.Buffer)); /* TODO: support child window rendering. */ if (NtUserGetAncestor(create_info->hwnd, GA_PARENT) != NtUserGetDesktopWindow()) { TRACE("Application requires child window rendering, which is not implemented yet!\n"); //return VK_ERROR_INCOMPATIBLE_DRIVER; } global_is_vulkan = 1; vulkan_window = create_wayland_window (create_info->hwnd, screen_width, screen_height); while (!count) { sleep(0.5); wl_display_dispatch_pending (wayland_display); sleep(0.5); count = 1; } NtUserSystemParametersInfo( SPI_SETMOUSESPEED , 0 , (LPVOID)1, SPIF_UPDATEINIFILE | SPIF_SENDCHANGE | SPIF_SENDWININICHANGE ) ; create_info_host.sType = VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR; create_info_host.pNext = NULL; create_info_host.flags = 0; create_info_host.display = wayland_display; create_info_host.surface = vulkan_window->surface; res = pvkCreateWaylandSurfaceKHR(instance, &create_info_host, NULL /* allocator */, surface); if (res != VK_SUCCESS) { TRACE("Failed to create Vulkan surface, res=%d\n", res); exit(0); goto err; } TRACE("Created vulkan Window for %p %s \n", create_info->hwnd, debugstr_w(class_name.Buffer)); return VK_SUCCESS; err: return res; } static void WAYLANDDRV_vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *allocator) { TRACE("%p %p\n", instance, allocator); //if(instance != VK_NULL_HANDLE) // pvkDestroyInstance(instance, NULL /* allocator */); if(instance != VK_NULL_HANDLE && instance && global_vk_instance != NULL && &instance == global_vk_instance) { TRACE("=vkDestroyInstance 2 \n"); TRACE("%p %p\n", instance, global_vk_instance); pvkDestroyInstance(instance, NULL /* allocator */); } TRACE("vkDestroyInstance 2 \n"); // if(instance != VK_NULL_HANDLE) // pvkDestroyInstance(instance, NULL /* allocator */); TRACE("vkDestroyInstance 1 \n"); } static void WAYLANDDRV_vkDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *allocator) { //TRACE("%p 0x%s %p\n", instance, wine_dbgstr_longlong(surface), allocator); if (allocator) FIXME("Support for allocation callbacks not implemented yet\n"); /* vkDestroySurfaceKHR must handle VK_NULL_HANDLE (0) for surface. */ if (surface) { pvkDestroySurfaceKHR(instance, surface, NULL /* allocator */); } } static void WAYLANDDRV_vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *allocator) { TRACE("%p, 0x%s %p\n", device, wine_dbgstr_longlong(swapchain), allocator); if(swapchain != VK_NULL_HANDLE) pvkDestroySwapchainKHR(device, swapchain, NULL /* allocator */); } static VkResult WAYLANDDRV_vkEnumerateInstanceExtensionProperties(const char *layer_name, uint32_t *count, VkExtensionProperties* properties) { unsigned int i; VkResult res; /* This shouldn't get called with layer_name set, the ICD loader prevents it. */ if (layer_name) { ERR("Layer enumeration not supported from ICD.\n"); return VK_ERROR_LAYER_NOT_PRESENT; } /* We will return the same number of instance extensions reported by the host back to * winevulkan. Along the way we may replace xlib extensions with their win32 equivalents. * Winevulkan will perform more detailed filtering as it knows whether it has thunks * for a particular extension. */ res = pvkEnumerateInstanceExtensionProperties(layer_name, count, properties); if (!properties || res < 0) return res; for (i = 0; i < *count; i++) { /* For now the only x11 extension we need to fixup. Long-term we may need an array. */ if (!strcmp(properties[i].extensionName, "VK_KHR_wayland_surface")) { //TRACE("Substituting VK_KHR_xlib_surface for VK_KHR_win32_surface\n"); snprintf(properties[i].extensionName, sizeof(properties[i].extensionName), VK_KHR_WIN32_SURFACE_EXTENSION_NAME); properties[i].specVersion = VK_KHR_WIN32_SURFACE_SPEC_VERSION; } } TRACE("Returning %u extensions.\n", *count); return res; } static VkResult WAYLANDDRV_vkGetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice phys_dev, VkSurfaceKHR surface, uint32_t *count, VkRect2D *rects) { //TRACE("%p, 0x%s, %p, %p\n", phys_dev, wine_dbgstr_longlong(surface), count, rects); return pvkGetPhysicalDevicePresentRectanglesKHR(phys_dev, surface, count, rects); } /* Set the image extent in the capabilities to match what Windows expects. */ static void set_image_extent(VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR *caps) { BOOL zero_extents = FALSE; if (surface == VK_NULL_HANDLE || !global_vulkan_hwnd) zero_extents = TRUE; // if (NtUserGetWindowLongW(wine_vk_surface->hwnd, GWL_STYLE) & WS_MINIMIZE) // zero_extents = TRUE; if (zero_extents) { caps->minImageExtent.width = 0; caps->minImageExtent.height = 0; caps->maxImageExtent.width = 0; caps->maxImageExtent.height = 0; caps->currentExtent.width = 0; caps->currentExtent.height = 0; } else { RECT client; NtUserGetClientRect(global_vulkan_hwnd, &client); caps->minImageExtent.width = client.right; caps->minImageExtent.height = client.bottom; caps->maxImageExtent.width = client.right; caps->maxImageExtent.height = client.bottom; caps->currentExtent.width = client.right; caps->currentExtent.height = client.bottom; } } static VkResult WAYLANDDRV_vkGetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice phys_dev, const VkPhysicalDeviceSurfaceInfo2KHR *surface_info, VkSurfaceCapabilities2KHR *capabilities) { TRACE("%p, %p, %p\n", phys_dev, surface_info, capabilities); pvkGetPhysicalDeviceSurfaceCapabilities2KHR(phys_dev, surface_info, capabilities); set_image_extent(surface_info->surface, &capabilities->surfaceCapabilities); return VK_SUCCESS; } static VkResult WAYLANDDRV_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice phys_dev, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR *capabilities) { if(surface != VK_NULL_HANDLE) { pvkGetPhysicalDeviceSurfaceCapabilitiesKHR(phys_dev, surface, capabilities); set_image_extent(surface, capabilities); return VK_SUCCESS; } return VK_ERROR_SURFACE_LOST_KHR; } static VkResult WAYLANDDRV_vkGetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice phys_dev, const VkPhysicalDeviceSurfaceInfo2KHR *surface_info, uint32_t *count, VkSurfaceFormat2KHR *formats) { // VkResult result; TRACE("%p, %p, %p, %p\n", phys_dev, surface_info, count, formats); if (pvkGetPhysicalDeviceSurfaceFormats2KHR) return pvkGetPhysicalDeviceSurfaceFormats2KHR(phys_dev, surface_info, count, formats); return VK_ERROR_SURFACE_LOST_KHR; } static VkResult WAYLANDDRV_vkGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice phys_dev, VkSurfaceKHR surface, uint32_t *count, VkSurfaceFormatKHR *formats) { VkResult res; TRACE("%p, 0x%s, %d, %p\n", phys_dev, wine_dbgstr_longlong(surface), *count, formats); if( surface != VK_NULL_HANDLE ) { res = pvkGetPhysicalDeviceSurfaceFormatsKHR(phys_dev, surface, count, formats); // TRACE("%p, 0x%s, %d, %p\n", phys_dev, wine_dbgstr_longlong(surface), *count, formats); return res; } return VK_ERROR_SURFACE_LOST_KHR; } static VkResult WAYLANDDRV_vkGetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice phys_dev, VkSurfaceKHR surface, uint32_t *count, VkPresentModeKHR *modes) { VkResult res; TRACE("%p, 0x%s, %d, %p\n", phys_dev, wine_dbgstr_longlong(surface), *count, modes); if( surface != VK_NULL_HANDLE ) { res = pvkGetPhysicalDeviceSurfacePresentModesKHR(phys_dev, surface, count, modes); return res; } return VK_ERROR_SURFACE_LOST_KHR; } static VkResult WAYLANDDRV_vkGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice phys_dev, uint32_t index, VkSurfaceKHR surface, VkBool32 *supported) { if(surface != VK_NULL_HANDLE) { TRACE("%p, %u, 0x%s, %p\n", phys_dev, index, wine_dbgstr_longlong(surface), supported); return pvkGetPhysicalDeviceSurfaceSupportKHR(phys_dev, index, surface, supported); } return VK_ERROR_SURFACE_LOST_KHR; } static VkBool32 WAYLANDDRV_vkGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice phys_dev, uint32_t index) { return pvkGetPhysicalDeviceWaylandPresentationSupportKHR(phys_dev, index, wayland_display); } static VkResult WAYLANDDRV_vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *count, VkImage *images) { //TRACE("%p, 0x%s %p %p\n", device, wine_dbgstr_longlong(swapchain), count, images); return pvkGetSwapchainImagesKHR(device, swapchain, count, images); } static VkResult WAYLANDDRV_vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *present_info) { //TRACE("%p, %p\n", queue, present_info); return pvkQueuePresentKHR(queue, present_info); } #ifdef HAS_FSR static VkBool32 WAYLANDDRV_query_fsr(VkSurfaceKHR surface, VkExtent2D *real_sz, VkExtent2D *user_sz, VkRect2D *dst_blit, VkFilter *filter, BOOL *fsr, float *sharpness) { RECT window_rect; char *env_width, *env_height; int screen_width = 0, screen_height = 0; TRACE("Test for FSR %d \n", global_fsr); global_vulkan_rect_flag = 0; env_width = getenv( "WINE_VK_WAYLAND_WIDTH" ); env_height = getenv( "WINE_VK_WAYLAND_HEIGHT" ); if(!global_fsr && !global_fsr_set) return VK_FALSE; if(!global_vulkan_hwnd) return VK_FALSE; //TODO move to function if(global_output_width > 0 && global_output_height > 0) { screen_width = global_output_width; screen_height = global_output_height; } if(env_width) { screen_width = atoi(env_width); } if(env_height) { screen_height = atoi(env_height); } fsr_set_real_mode(screen_width, screen_height); NtUserGetClientRect(global_vulkan_hwnd, &window_rect); if(window_rect.right == 0) return VK_FALSE; //real res equals user res if(window_rect.right == screen_width && window_rect.bottom == screen_height && fsr_matches_current_mode(window_rect.right, window_rect.bottom) ) { TRACE("Disabling FSR \n"); global_fsr = 0; return VK_FALSE; } else { global_fsr = 1; } fsr_set_current_mode(window_rect.right, window_rect.bottom); if(real_sz){ real_sz->width = screen_width; real_sz->height = screen_height; } if(user_sz){ user_sz->width = window_rect.right; user_sz->height = window_rect.bottom; } if(dst_blit){ dst_blit->offset.x = 0; dst_blit->offset.y = 0; dst_blit->extent.width = screen_width; dst_blit->extent.height = screen_height; } if(filter) *filter = VK_FILTER_NEAREST; if(sharpness) *sharpness = (float) 2 / 10.0f; return VK_TRUE; } #endif static VkResult WAYLANDDRV_vkGetDeviceGroupSurfacePresentModesKHR(VkDevice device, VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHR *flags) { //TRACE("%p, 0x%s, %p\n", device, wine_dbgstr_longlong(surface), flags); return pvkGetDeviceGroupSurfacePresentModesKHR(device, surface, flags); } static void *WAYLANDDRV_vkGetDeviceProcAddr(VkDevice device, const char *name) { void *proc_addr; //TRACE("%p, %s\n", device, debugstr_a(name)); if ((proc_addr = get_vulkan_driver_device_proc_addr(&vulkan_funcs, name))) return proc_addr; return pvkGetDeviceProcAddr(device, name); } static void *WAYLANDDRV_vkGetInstanceProcAddr(VkInstance instance, const char *name) { void *proc_addr; //TRACE("%p, %s\n", instance, debugstr_a(name)); if ((proc_addr = get_vulkan_driver_instance_proc_addr(&vulkan_funcs, instance, name) )) return proc_addr; return pvkGetInstanceProcAddr(instance, name); } static const struct vulkan_funcs vulkan_funcs = { .p_vkCreateInstance = WAYLANDDRV_vkCreateInstance, .p_vkCreateSwapchainKHR = WAYLANDDRV_vkCreateSwapchainKHR, .p_vkCreateWin32SurfaceKHR = WAYLANDDRV_vkCreateWin32SurfaceKHR, .p_vkDestroyInstance = WAYLANDDRV_vkDestroyInstance, .p_vkDestroySurfaceKHR = WAYLANDDRV_vkDestroySurfaceKHR, .p_vkDestroySwapchainKHR = WAYLANDDRV_vkDestroySwapchainKHR, .p_vkEnumerateInstanceExtensionProperties = WAYLANDDRV_vkEnumerateInstanceExtensionProperties, .p_vkGetDeviceGroupSurfacePresentModesKHR = WAYLANDDRV_vkGetDeviceGroupSurfacePresentModesKHR, .p_vkGetDeviceProcAddr = WAYLANDDRV_vkGetDeviceProcAddr, .p_vkGetInstanceProcAddr = WAYLANDDRV_vkGetInstanceProcAddr, .p_vkGetPhysicalDevicePresentRectanglesKHR = WAYLANDDRV_vkGetPhysicalDevicePresentRectanglesKHR, .p_vkGetPhysicalDeviceSurfaceCapabilities2KHR = WAYLANDDRV_vkGetPhysicalDeviceSurfaceCapabilities2KHR, .p_vkGetPhysicalDeviceSurfaceCapabilitiesKHR = WAYLANDDRV_vkGetPhysicalDeviceSurfaceCapabilitiesKHR, .p_vkGetPhysicalDeviceSurfaceFormats2KHR = WAYLANDDRV_vkGetPhysicalDeviceSurfaceFormats2KHR, .p_vkGetPhysicalDeviceSurfaceFormatsKHR = WAYLANDDRV_vkGetPhysicalDeviceSurfaceFormatsKHR, .p_vkGetPhysicalDeviceSurfacePresentModesKHR = WAYLANDDRV_vkGetPhysicalDeviceSurfacePresentModesKHR, .p_vkGetPhysicalDeviceSurfaceSupportKHR = WAYLANDDRV_vkGetPhysicalDeviceSurfaceSupportKHR, .p_vkGetPhysicalDeviceWin32PresentationSupportKHR = WAYLANDDRV_vkGetPhysicalDeviceWin32PresentationSupportKHR, .p_vkGetSwapchainImagesKHR = WAYLANDDRV_vkGetSwapchainImagesKHR, .p_vkQueuePresentKHR = WAYLANDDRV_vkQueuePresentKHR, .p_wine_get_native_surface = WAYLANDDRV_wine_get_native_surface, #ifdef HAS_FSR .query_fs_hack = WAYLANDDRV_query_fsr #endif }; const struct vulkan_funcs *get_vulkan_driver(UINT version) { static pthread_once_t init_once = PTHREAD_ONCE_INIT; pthread_once(&init_once, wine_vk_init); if (vulkan_handle) return &vulkan_funcs; return NULL; }
944982cf354c80f3c674c909e9ad8913d60993ad
03b2c80dbc41e904b167d504666e27d798da5447
/src/common_system.c
3a43f1ce216b32cca7f4d89d646fbc7b849dcc07
[ "Unlicense" ]
permissive
nptcl/npt
7c1570b497cdce0b8971cb445fbc04cb500232d3
aa714a2370ac9fa5348c2fc96159b40b9de3de07
refs/heads/master
2023-03-20T09:13:54.669118
2022-07-02T11:17:44
2022-07-02T11:17:44
171,985,905
160
12
Unlicense
2023-03-11T01:36:37
2019-02-22T03:11:34
C
UTF-8
C
false
false
10,600
c
common_system.c
/* * ANSI COMMON LISP: 24. System Construction */ #include "call_system.h" #include "common_header.h" #include "compile.h" #include "compile_file.h" #include "cons.h" #include "require.h" #include "strtype.h" #include "type_parse.h" /* (defun compile-file * (input-file &key output-file verbose print external-format) * -> output-truename, warnings-p, failure-p * input-file pathname-designator ;; merge *default-pathname-defaults* * output-file pathname-designator * verbose T ;; boolean, *compile-verbose* * print T ;; boolean, *compile-print* * external-format external-format-designator * output-truename (or pathname null) ;; truename * warnings-p boolean * failure-p boolean */ static int function_compile_file(Execute ptr, addr file, addr rest) { addr x, y, z; Return(compile_file_common_(ptr, file, rest, &x, &y, &z)); setvalues_control(ptr, x, y, z, NULL); return 0; } static void type_compile_file(addr *ret) { addr args, values, key, key1, key2, key3, key4; addr type1, type2; /* key */ KeyTypeTable(&key1, OUTPUT_FILE, PathnameDesignator); KeyTypeTable(&key2, VERBOSE, T); KeyTypeTable(&key3, PRINT, T); KeyTypeTable(&key4, EXTERNAL_FORMAT, ExternalFormat); list_heap(&key, key1, key2, key3, key4, NULL); /* type */ GetTypeTable(&args, PathnameDesignator); typeargs_var1key(&args, args, key); GetTypeTable(&type1, PathnameNull); GetTypeTable(&type2, Boolean); typevalues_values3(&values, type1, type2, type2); type_compiled_heap(args, values, ret); } static void defun_compile_file(void) { addr symbol, pos, type; /* function */ GetConst(COMMON_COMPILE_FILE, &symbol); compiled_system(&pos, symbol); setcompiled_var1dynamic(pos, p_defun_compile_file); SetFunctionCommon(symbol, pos); /* type */ type_compile_file(&type); settype_function(pos, type); settype_function_symbol(symbol, type); } /* (defun compile-file-pathname * (input-file &key output-file &allow-other-keys) * -> pathname * input-file pathname-designator ;; merge *default-pathname-defaults* * output-file pathname-designator * pathname pathname */ static int function_compile_file_pathname(Execute ptr, addr var, addr rest) { Return(compile_file_pathname_common_(ptr, var, rest, &var)); setresult_control(ptr, var); return 0; } static void type_compile_file_pathname(addr *ret) { addr args, values; GetTypeTable(&args, PathnameDesignator); GetTypeTable(&values, T); typeargs_var1rest(&args, args, values); GetTypeValues(&values, Pathname); type_compiled_heap(args, values, ret); } static void defun_compile_file_pathname(void) { addr symbol, pos, type; /* function */ GetConst(COMMON_COMPILE_FILE_PATHNAME, &symbol); compiled_system(&pos, symbol); setcompiled_var1dynamic(pos, p_defun_compile_file_pathname); SetFunctionCommon(symbol, pos); /* type */ type_compile_file_pathname(&type); settype_function(pos, type); settype_function_symbol(symbol, type); } /* (defun load * (filespec &key verbose print if-does-not-exist external-format) ...) * -> boolean * filespec (or stream pathname-designator) * verbose t ;; boolean * print t ;; boolean * if-does-not-exist t ;; boolean * external-format t ;; external-format-designator */ static int function_load(Execute ptr, addr filespec, addr rest) { int check; Return(load_common_(ptr, filespec, rest, &check)); setbool_control(ptr, check); return 0; } static void type_load(addr *ret) { addr args, values, type, key1, key2, key3, key4, key5, key; /* args */ GetTypeTable(&args, Stream); GetTypeTable(&type, PathnameDesignator); type2or_heap(args, type, &args); GetTypeTable(&type, T); GetConst(KEYWORD_VERBOSE, &key1); GetConst(KEYWORD_PRINT, &key2); GetConst(KEYWORD_IF_DOES_NOT_EXIST, &key3); GetConst(KEYWORD_EXTERNAL_FORMAT, &key4); GetConst(KEYWORD_TYPE, &key5); cons_heap(&key1, key1, type); cons_heap(&key2, key2, type); cons_heap(&key3, key3, type); cons_heap(&key4, key4, type); cons_heap(&key5, key5, type); list_heap(&key, key1, key2, key3, key4, key5, NULL); typeargs_var1key(&args, args, key); /* values */ GetTypeValues(&values, Boolean); /* result */ type_compiled_heap(args, values, ret); } static void defun_load(void) { addr symbol, pos, type; /* function */ GetConst(COMMON_LOAD, &symbol); compiled_system(&pos, symbol); setcompiled_var1dynamic(pos, p_defun_load); SetFunctionCommon(symbol, pos); /* type */ type_load(&type); settype_function(pos, type); settype_function_symbol(symbol, type); } /* (defmacro with-compilation-unit * ((&key &allow-other-keys) &body body) ...) * -> result */ static int function_with_compilation_unit(Execute ptr, addr form, addr env) { Return(with_compilation_unit_common_(form, &form)); setresult_control(ptr, form); return 0; } static void defmacro_with_compilation_unit(void) { addr symbol, pos, type; GetConst(COMMON_WITH_COMPILATION_UNIT, &symbol); compiled_macro_system(&pos, symbol); setcompiled_macro(pos, p_defmacro_with_compilation_unit); SetMacroCommon(symbol, pos); /* type */ GetTypeCompiled(&type, MacroFunction); settype_function(pos, type); } /* (defvar *features* list) */ static void defvar_features(void) { addr symbol, type; /* symbol */ GetConst(SPECIAL_FEATURES, &symbol); setspecial_symbol(symbol); /* type */ GetTypeTable(&type, List); settype_value_symbol(symbol, type); } /* (defvar *compile-file-pathname* (or pathname null)) */ static void defvar_compile_file_pathname(void) { addr symbol, type; /* symbol */ GetConst(SPECIAL_COMPILE_FILE_PATHNAME, &symbol); SetValueSymbol(symbol, Nil); setspecial_symbol(symbol); /* type */ GetTypeTable(&type, PathnameNull); settype_value_symbol(symbol, type); } /* (defvar *compile-file-truename* (or pathname null)) */ static void defvar_compile_file_truename(void) { addr symbol, type; /* symbol */ GetConst(SPECIAL_COMPILE_FILE_TRUENAME, &symbol); SetValueSymbol(symbol, Nil); setspecial_symbol(symbol); /* type */ GetTypeTable(&type, PathnameNull); settype_value_symbol(symbol, type); } /* (defvar *load-pathname* (or pathname null)) */ static void defvar_load_pathname(void) { addr symbol, type; /* symbol */ GetConst(SPECIAL_LOAD_PATHNAME, &symbol); SetValueSymbol(symbol, Nil); setspecial_symbol(symbol); /* type */ GetTypeTable(&type, PathnameNull); settype_value_symbol(symbol, type); } /* (defvar *load-truename* (or pathname null)) */ static void defvar_load_truename(void) { addr symbol, type; /* symbol */ GetConst(SPECIAL_LOAD_TRUENAME, &symbol); SetValueSymbol(symbol, Nil); setspecial_symbol(symbol); /* type */ GetTypeTable(&type, PathnameNull); settype_value_symbol(symbol, type); } /* (defvar *compile-print* boolean) */ static void defvar_compile_print(void) { addr symbol, type; /* symbol */ GetConst(SPECIAL_COMPILE_PRINT, &symbol); SetValueSymbol(symbol, Nil); setspecial_symbol(symbol); /* type */ GetTypeTable(&type, T); settype_value_symbol(symbol, type); } /* (defvar *compile-verbose* boolean) */ static void defvar_compile_verbose(void) { addr symbol, type; /* symbol */ GetConst(SPECIAL_COMPILE_VERBOSE, &symbol); SetValueSymbol(symbol, Nil); setspecial_symbol(symbol); /* type */ GetTypeTable(&type, T); settype_value_symbol(symbol, type); } /* (defvar *load-print* boolean) */ static void defvar_load_print(void) { addr symbol, type; /* symbol */ GetConst(SPECIAL_LOAD_PRINT, &symbol); SetValueSymbol(symbol, Nil); setspecial_symbol(symbol); /* type */ GetTypeTable(&type, T); settype_value_symbol(symbol, type); } /* (defvar *load-verbose* boolean) */ static void defvar_load_verbose(void) { addr symbol, type; /* symbol */ GetConst(SPECIAL_LOAD_VERBOSE, &symbol); SetValueSymbol(symbol, Nil); setspecial_symbol(symbol); /* type */ GetTypeTable(&type, T); settype_value_symbol(symbol, type); } /* (defvar *modules* boolean) */ static void defvar_modules(void) { addr symbol, type; /* symbol */ GetConst(SPECIAL_MODULES, &symbol); SetValueSymbol(symbol, Nil); setspecial_symbol(symbol); /* type */ GetTypeTable(&type, List); settype_value_symbol(symbol, type); } /* (defun provide (var) ...) -> null */ static int function_provide(Execute ptr, addr var) { Return(provide_common_(ptr, var)); setresult_control(ptr, Nil); return 0; } static void type_provide(addr *ret) { addr args, values; GetTypeTable(&args, StringDesignator); typeargs_var1(&args, args); GetTypeValues(&values, Null); type_compiled_heap(args, values, ret); } static void defun_provide(void) { addr symbol, pos, type; /* function */ GetConst(COMMON_PROVIDE, &symbol); compiled_system(&pos, symbol); setcompiled_var1(pos, p_defun_provide); SetFunctionCommon(symbol, pos); /* type */ type_provide(&type); settype_function(pos, type); settype_function_symbol(symbol, type); } /* (defun require (var) ...) -> null */ static int function_require(Execute ptr, addr var, addr opt) { Return(require_common_(ptr, var, opt)); setresult_control(ptr, Nil); return 0; } static void type_require(addr *ret) { addr args, values, type; GetTypeTable(&args, StringDesignator); GetTypeTable(&values, List); GetTypeTable(&type, PathnameDesignator); type2or_heap(values, type, &values); typeargs_var1opt1(&args, args, values); GetTypeValues(&values, Null); type_compiled_heap(args, values, ret); } static void defun_require(void) { addr symbol, pos, type; /* function */ GetConst(COMMON_REQUIRE, &symbol); compiled_system(&pos, symbol); setcompiled_var1opt1(pos, p_defun_require); SetFunctionCommon(symbol, pos); /* type */ type_require(&type); settype_function(pos, type); settype_function_symbol(symbol, type); } /* * function */ void init_common_system(void) { SetPointerCall(defun, var1dynamic, compile_file); SetPointerCall(defun, var1dynamic, compile_file_pathname); SetPointerCall(defmacro, macro, with_compilation_unit); SetPointerCall(defun, var1dynamic, load); SetPointerCall(defun, var1, provide); SetPointerCall(defun, var1opt1, require); } void build_common_system(void) { defun_compile_file(); defun_compile_file_pathname(); defun_load(); defmacro_with_compilation_unit(); defvar_features(); defvar_compile_file_pathname(); defvar_compile_file_truename(); defvar_load_pathname(); defvar_load_truename(); defvar_compile_print(); defvar_compile_verbose(); defvar_load_print(); defvar_load_verbose(); defvar_modules(); defun_provide(); defun_require(); }
9de5c800221124b66c6fc3a2d7274b5222dfeb97
28d0f8c01599f8f6c711bdde0b59f9c2cd221203
/tests/usr.bin/xlint/lint1/msg_050.c
8eb8de50ec3ca59def54236651c91d5e30b3736d
[]
no_license
NetBSD/src
1a9cbc22ed778be638b37869ed4fb5c8dd616166
23ee83f7c0aea0777bd89d8ebd7f0cde9880d13c
refs/heads/trunk
2023-08-31T13:24:58.105962
2023-08-27T15:50:47
2023-08-27T15:50:47
88,439,547
656
348
null
2023-07-20T20:07:24
2017-04-16T20:03:43
null
UTF-8
C
false
false
410
c
msg_050.c
/* $NetBSD: msg_050.c,v 1.7 2023/07/09 11:18:55 rillig Exp $ */ # 3 "msg_050.c" /* Test for message: parameter '%s' has function type, should be pointer [50] */ /* lint1-flags: -tw */ typedef void (function)(); /* expect+1: warning: parameter 'f' unused in function 'example' [231] */ void example(f) /* expect+1: warning: parameter 'f' has function type, should be pointer [50] */ function f; { }
4fb7dc5e9d74dde2c2f295bac9545d31d83a10ec
e65a4dbfbfb0e54e59787ba7741efee12f7687f3
/multimedia/aribb24/files/patch-src_drcs.h
2fbb9f7f6d8016261b7784f996e13270fdfecb67
[ "BSD-2-Clause" ]
permissive
freebsd/freebsd-ports
86f2e89d43913412c4f6b2be3e255bc0945eac12
605a2983f245ac63f5420e023e7dce56898ad801
refs/heads/main
2023-08-30T21:46:28.720924
2023-08-30T19:33:44
2023-08-30T19:33:44
1,803,961
916
918
NOASSERTION
2023-09-08T04:06:26
2011-05-26T11:15:35
null
UTF-8
C
false
false
475
h
patch-src_drcs.h
https://github.com/scimmia9286/aribb24/commit/58a34b8fabf4d0e9e1984fb603a97f52fb934a09 --- src/drcs.h.orig 2019-09-16 15:57:19 UTC +++ src/drcs.h @@ -77,6 +77,6 @@ typedef struct drcs_data_s bool apply_drcs_conversion_table( arib_instance_t * ); bool load_drcs_conversion_table( arib_instance_t * ); -void save_drcs_pattern( arib_instance_t *, int, int, int, const int8_t* ); +void save_drcs_pattern( arib_instance_t *, int, int, int, const int8_t*, int16_t ); #endif
09c75a59c651ddc95a6936327d5445d297ac6cf4
bb38c44037a99d0a12a12d92059678f2faebbc80
/src/include/parser/parse_agg.h
38fc618edd16d52027847f2605a55faaf40dae01
[ "LicenseRef-scancode-mulanpsl-2.0-en", "LicenseRef-scancode-unknown-license-reference", "PostgreSQL", "BSD-3-Clause", "LGPL-2.0-or-later", "LicenseRef-scancode-unicode", "LicenseRef-scancode-warranty-disclaimer", "curl", "GPL-1.0-or-later", "LGPL-2.1-or-later", "LGPL-2.1-only", "CC-BY-4.0", "LicenseRef-scancode-protobuf", "OpenSSL", "LicenseRef-scancode-generic-export-compliance", "X11-distribute-modifications-variant", "LicenseRef-scancode-other-permissive", "MIT", "NCSA", "Python-2.0", "LicenseRef-scancode-openssl", "LicenseRef-scancode-ssleay-windows", "CC-BY-3.0", "LicenseRef-scancode-other-copyleft", "GPL-2.0-only", "BSL-1.0", "Apache-2.0", "LGPL-2.0-only", "LicenseRef-scancode-public-domain", "BSD-2-Clause", "Zlib" ]
permissive
opengauss-mirror/openGauss-server
a9c5a62908643492347830826c56da49f0942796
310e84631c68c8bf37b004148b66f94064f701e4
refs/heads/master
2023-07-26T19:29:12.495484
2023-07-17T12:23:32
2023-07-17T12:23:32
276,117,477
591
208
MulanPSL-2.0
2023-04-28T12:30:18
2020-06-30T14:08:59
C++
UTF-8
C
false
false
2,577
h
parse_agg.h
/* ------------------------------------------------------------------------- * * parse_agg.h * handle aggregates and window functions in parser * * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/parser/parse_agg.h * * ------------------------------------------------------------------------- */ #ifndef PARSE_AGG_H #define PARSE_AGG_H #include "parser/parse_node.h" extern void transformAggregateCall(ParseState* pstate, Aggref* agg, List* args, List* aggorder, bool agg_distinct); extern void transformWindowFuncCall(ParseState* pstate, WindowFunc* wfunc, WindowDef* windef); extern void parseCheckAggregates(ParseState* pstate, Query* qry); extern void parseCheckWindowFuncs(ParseState* pstate, Query* qry); extern Node* transformGroupingFunc(ParseState* pstate, GroupingFunc* g); extern List* expand_grouping_sets(List* groupingSets, int limit); extern List* extract_rollup_sets(List* groupingSets); extern List* reorder_grouping_sets(List* groupingSets, List* sortclause); extern List* preprocess_groupclause(PlannerInfo* root, List* force); extern void build_aggregate_fnexprs(Oid* agg_input_types, int agg_num_inputs, Oid agg_state_type, Oid agg_result_type, Oid agg_input_collation, Oid transfn_oid, Oid finalfn_oid, Expr** transfnexpr, Expr** finalfnexpr); extern bool check_windowagg_can_shuffle(List* partitionClause, List* targetList); extern void build_trans_aggregate_fnexprs(int agg_num_inputs, int agg_num_direct_inputs, bool agg_ordered_set, bool agg_variadic, Oid agg_state_type, Oid* agg_input_types, Oid agg_result_type, Oid agg_input_collation, Oid transfn_oid, Oid finalfn_oid, Expr** transfnexpr, Expr** finalfnexpr); extern int get_aggregate_argtypes(Aggref* aggref, Oid* inputTypes, int func_max_args); extern Oid resolve_aggregate_transtype(Oid aggfuncid, Oid aggtranstype, Oid* inputTypes, int numArguments); extern void build_aggregate_transfn_expr(Oid *agg_input_types, int agg_num_inputs, int agg_num_direct_inputs, bool agg_variadic, Oid agg_state_type, Oid agg_input_collation, Oid transfn_oid, Expr **transfnexpr); extern void build_aggregate_finalfn_expr(Oid *agg_input_types, int num_finalfn_inputs, Oid agg_state_type, Oid agg_result_type, Oid agg_input_collation, Oid finalfn_oid, Expr **finalfnexpr); #endif /* PARSE_AGG_H */
2d1e545bc31337b49b2a0aeb2d81cc93f139198d
cf60f9591fef521d3092f81785de7942d0ca568e
/PubNub/include/PNDownloadFileResult.h
ab6db37e28a5380495148d9964aba190e96b6229
[ "MIT" ]
permissive
pubnub/objective-c
a49e2e5c7898eb893b601ae31a098361fb8b4406
eef06ae76fd45931ea09900e3b3ce50133870eca
refs/heads/master
2023-04-07T08:28:47.174178
2023-01-05T11:15:57
2023-01-05T11:15:57
8,490,984
137
145
NOASSERTION
2023-03-30T08:47:36
2013-03-01T00:10:41
Objective-C
UTF-8
C
false
false
46
h
PNDownloadFileResult.h
../Data/Service Objects/PNDownloadFileResult.h
25686cf9c38ffb56bacaa19c7df298c5fe3b15c9
f54021ed2b6bb09a18b2bd6331b01fdfacba4f08
/test/full/with-time.c
65958f5dc210c6679de9c2cb58ae7475df048357
[ "MIT" ]
permissive
Snaipe/Criterion
e13784611d4f024114759c81db7978a34158a571
9c01cbe75002ad8640e0f411f453fbcd0567ff79
refs/heads/bleeding
2023-09-01T14:38:14.824224
2023-05-13T15:44:26
2023-05-13T16:02:30
30,111,969
1,965
240
MIT
2023-04-29T11:26:11
2015-01-31T12:45:39
C
UTF-8
C
false
false
77
c
with-time.c
#include <criterion/criterion.h> Test(samples, timed) { cr_assert(0); }
561e34973efcf4d0f03625687c03c642c9b1417d
9c4ec01e04f7b0a1d213e1060c6b0a008dde7cbd
/series1/rtcc/rtcc_alarm_set/src/main_gg11.c
32b454f43be31bb3c18d5fefbb1d1743a4fb484b
[ "Zlib" ]
permissive
SiliconLabs/peripheral_examples
edf5ee87cd0bcb2e7ad5066e278fa1ad3b92bd35
87b252e5a1bf5b36a548c121e8ffda085d3bcbc4
refs/heads/master
2023-07-26T22:20:57.916375
2023-07-07T18:18:01
2023-07-07T18:20:16
116,865,771
326
212
NOASSERTION
2021-06-17T20:12:04
2018-01-09T20:13:39
C
UTF-8
C
false
false
7,663
c
main_gg11.c
/***************************************************************************//** * @file main_gg11.c * @brief This project demonstrates the use of the RTCC by taking user input * from the VCOM and setting an alarm to trigger at the specified date and time. ******************************************************************************* * # License * <b>Copyright 2020 Silicon Laboratories Inc. www.silabs.com</b> ******************************************************************************* * * SPDX-License-Identifier: Zlib * * The licensor of this software is Silicon Laboratories Inc. * * This software is provided 'as-is', without any express or implied * warranty. In no event will the authors be held liable for any damages * arising from the use of this software. * * Permission is granted to anyone to use this software for any purpose, * including commercial applications, and to alter it and redistribute it * freely, subject to the following restrictions: * * 1. The origin of this software must not be misrepresented; you must not * claim that you wrote the original software. If you use this software * in a product, an acknowledgment in the product documentation would be * appreciated but is not required. * 2. Altered source versions must be plainly marked as such, and must not be * misrepresented as being the original software. * 3. This notice may not be removed or altered from any source distribution. * ******************************************************************************* * # Evaluation Quality * This code has been minimally tested to ensure that it builds and is suitable * as a demonstration for evaluation purposes only. This code will be maintained * at the sole discretion of Silicon Labs. ******************************************************************************/ #include <stdio.h> #include "em_device.h" #include "em_chip.h" #include "em_cmu.h" #include "bsp.h" #include "em_emu.h" #include "em_rtcc.h" #include "em_gpio.h" #include "retargetserial.h" #include "retargetserialconfig.h" uint32_t rtccFlag; int start = -1; int strtol(char*,char **,int); /**************************************************************************//** * @brief RTCC interrupt service routine *****************************************************************************/ void RTCC_IRQHandler(void) { // Read the interrupt source rtccFlag = RTCC_IntGet(); // Clear interrupt flag RTCC_IntClear(rtccFlag); // Toggle LED to turn it on GPIO_PinOutClear(BSP_GPIO_LED1_PORT, BSP_GPIO_LED1_PIN); // LEDS active low } /**************************************************************************//** * @brief GPIO initialization *****************************************************************************/ void initGPIO(void) { // Turn on the clock for the GPIO CMU_ClockEnable(cmuClock_GPIO, true); // Enable LED1 GPIO_PinModeSet(BSP_GPIO_LED1_PORT, BSP_GPIO_LED1_PIN, gpioModePushPull, 0); GPIO_PinOutSet(BSP_GPIO_LED1_PORT, BSP_GPIO_LED1_PIN); // LEDs active low } /**************************************************************************//** * @brief Functions to take input from VCOM *****************************************************************************/ char* setCurrentTime(char hour[]) { // Ask the user for input printf("\nPlease enter the current time in the format hhmmss:\n "); // Receive the input from the VCOM and store as a string for(int i = 0; i < 6;i++) { while(start == -1) { scanf("%d", &start); if(start != -1) { start = start + '0'; hour[i+2] = start; } } start = -1; } return hour; } char* setCurrentDate(char date[]) { // Ask the user for input printf("Please enter the current date in the format yymmdd:\n "); // Receive the input from the VCOM and store as a string for(int i = 0; i < 6;i++) { while(start == -1) { scanf("%d", &start); if(start != -1) { start = start + '0'; date[i+2] = start; } } start = -1; } return date; } char* setAlarmTime(char alarmh[]) { // Ask the user for input printf("\nPlease enter the time for the alarm in the format hhmmss:\n "); // Receive the input from the VCOM and store as a string for(int i = 0; i < 6;i++) { while(start == -1) { scanf("%d", &start); if(start != -1) { start = start + '0'; alarmh[i+2] = start; } } start = -1; } return alarmh; } char* setAlarmDate(char alarmd[]) { // Ask the user for input printf("\nPlease enter the date for the alarm in the format yymmdd:\n "); // Receive the input from the VCOM and store as a string for(int i = 0; i <6;i++) { while(start == -1) { scanf("%d", &start); if(start != -1) { start = start + '0'; alarmd[i+2] = start; } } start = -1; } return alarmd; } /**************************************************************************//** * @brief RTCC initialization *****************************************************************************/ void rtccSetup(int start_time, int start_date, int alarmh_start, int alarmd_start) { // Configure the RTCC settings RTCC_Init_TypeDef rtcc = RTCC_INIT_DEFAULT; rtcc.enable = false; rtcc.presc = rtccCntPresc_32768; rtcc.cntMode = rtccCntModeCalendar; rtcc.cntWrapOnCCV1 = true; // Configure the compare settings RTCC_CCChConf_TypeDef compare = RTCC_CH_INIT_COMPARE_DEFAULT; // Turn on the clock for the RTCC CMU_ClockEnable(cmuClock_HFLE, true); CMU_ClockSelectSet(cmuClock_LFE, cmuSelect_LFXO); CMU_ClockEnable(cmuClock_RTCC, true); // Initialise RTCC with pre-defined settings RTCC_Init(&rtcc); // Set current date and time RTCC_DateSet(start_date); RTCC_TimeSet(start_time); // Initialise RTCC compare with a date, the date when interrupt will occur RTCC_ChannelInit(1, &compare); RTCC_ChannelDateSet(1, alarmd_start); RTCC_ChannelTimeSet(1,alarmh_start); // Set channel 1 to cause an interrupt RTCC_IntEnable(RTCC_IEN_CC1); NVIC_ClearPendingIRQ(RTCC_IRQn); NVIC_EnableIRQ(RTCC_IRQn); // Start counter after all initialisations are complete RTCC_Enable(true); } /**************************************************************************//** * @brief Main function *****************************************************************************/ int main(void) { CHIP_Init(); // Initialise USART, enable board controller VCOM function, and map LF to CRLF RETARGET_SerialInit(); RETARGET_SerialCrLf(1); RETARGET_ReadChar(); // Initialise the variables needed to find the VCOM input char hour_time[] = "00hhmmss"; char date[] = "20yymmdd"; char alarmh[] = "00hhmmss"; char alarmd[] = "20yymmdd"; // Print the initial statement to the VCOM printf("This example works to set the time and then a future wake up time\n"); // Store the strings with the timing information char *dates = setCurrentDate(date); char *hour = setCurrentTime(hour_time); char *alarm_date = setAlarmDate(alarmd); char *alarm_time = setAlarmTime(alarmh); printf("\nThe alarm has been set\n"); // Convert the strings received into hex for passing to the RTCC int start_time = (int)strtol(hour,NULL,16); int start_date = (int)strtol(dates,NULL,16); int alarmh_start = (int)strtol(alarm_time,NULL,16); int alarmd_start = (int)strtol(alarm_date,NULL,16); // Initialisations initGPIO(); rtccSetup(start_time, start_date,alarmh_start,alarmd_start); // Infinite loop while(1) { EMU_EnterEM1(); } }
fb960cbecbb9b1e2fcf9488ac64c3035a39acc45
7eaf54a78c9e2117247cb2ab6d3a0c20719ba700
/SOFTWARE/A64-TERES/linux-a64/arch/arm/mach-gemini/board-wbd111.c
418188cd1712fe7f6247a8b97fb423e358f4891d
[ "LicenseRef-scancode-free-unknown", "Apache-2.0", "Linux-syscall-note", "GPL-2.0-only", "GPL-1.0-or-later" ]
permissive
OLIMEX/DIY-LAPTOP
ae82f4ee79c641d9aee444db9a75f3f6709afa92
a3fafd1309135650bab27f5eafc0c32bc3ca74ee
refs/heads/rel3
2023-08-04T01:54:19.483792
2023-04-03T07:18:12
2023-04-03T07:18:12
80,094,055
507
92
Apache-2.0
2023-04-03T07:05:59
2017-01-26T07:25:50
C
UTF-8
C
false
false
2,822
c
board-wbd111.c
/* * Support for Wiliboard WBD-111 * * Copyright (C) 2009 Imre Kaloz <kaloz@openwrt.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/leds.h> #include <linux/input.h> #include <linux/skbuff.h> #include <linux/gpio_keys.h> #include <linux/mdio-gpio.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include "common.h" static struct gpio_keys_button wbd111_keys[] = { { .code = KEY_SETUP, .gpio = 5, .active_low = 1, .desc = "reset", .type = EV_KEY, }, }; static struct gpio_keys_platform_data wbd111_keys_data = { .buttons = wbd111_keys, .nbuttons = ARRAY_SIZE(wbd111_keys), }; static struct platform_device wbd111_keys_device = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &wbd111_keys_data, }, }; static struct gpio_led wbd111_leds[] = { { .name = "L3red", .gpio = 1, }, { .name = "L4green", .gpio = 2, }, { .name = "L4red", .gpio = 3, }, { .name = "L3green", .gpio = 5, }, }; static struct gpio_led_platform_data wbd111_leds_data = { .num_leds = ARRAY_SIZE(wbd111_leds), .leds = wbd111_leds, }; static struct platform_device wbd111_leds_device = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &wbd111_leds_data, }, }; static struct mtd_partition wbd111_partitions[] = { { .name = "RedBoot", .offset = 0, .size = 0x020000, .mask_flags = MTD_WRITEABLE, } , { .name = "kernel", .offset = 0x020000, .size = 0x100000, } , { .name = "rootfs", .offset = 0x120000, .size = 0x6a0000, } , { .name = "VCTL", .offset = 0x7c0000, .size = 0x010000, .mask_flags = MTD_WRITEABLE, } , { .name = "cfg", .offset = 0x7d0000, .size = 0x010000, .mask_flags = MTD_WRITEABLE, } , { .name = "FIS", .offset = 0x7e0000, .size = 0x010000, .mask_flags = MTD_WRITEABLE, } }; #define wbd111_num_partitions ARRAY_SIZE(wbd111_partitions) static void __init wbd111_init(void) { gemini_gpio_init(); platform_register_uart(); platform_register_pflash(SZ_8M, wbd111_partitions, wbd111_num_partitions); platform_device_register(&wbd111_leds_device); platform_device_register(&wbd111_keys_device); platform_register_rtc(); } MACHINE_START(WBD111, "Wiliboard WBD-111") .atag_offset = 0x100, .map_io = gemini_map_io, .init_irq = gemini_init_irq, .init_time = gemini_timer_init, .init_machine = wbd111_init, .restart = gemini_restart, MACHINE_END
c73094fc016366ca40b0cbbfdec89787bf803ac9
2898fa4f2ad766afa0495a837f59fe95daa081a7
/tests/unit-pass/pr28403.c
4d3909303f12718883cb31d805d00cef1df7193a
[ "NCSA" ]
permissive
kframework/c-semantics
12fcc1b1bf1f7792636d1c37f6f7bb1b89a392b5
e6879d14455771aa0cb3e3d201131d4d763a73a2
refs/heads/master
2023-07-31T23:57:03.316456
2022-02-01T17:50:31
2022-02-01T17:50:31
11,747,541
312
52
NOASSERTION
2022-02-01T17:50:33
2013-07-29T19:13:25
C
UTF-8
C
false
false
444
c
pr28403.c
#include <stdlib.h> typedef unsigned long long ull; int global; int __attribute__((noinline)) foo (int x1, int x2, int x3, int x4, int x5, int x6, int x7, int x8) { global = x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8; return 0; } ull __attribute__((noinline)) bar (ull x) { foo (1, 2, 1, 3, 1, 4, 1, 5); return x >> global; } int main (void) { if (bar (0x123456789abcdefULL) != (0x123456789abcdefULL >> 18)) abort (); exit (0); }
faae492e1c5ddb679462b29fdc77489cfb300ff5
4bcc9806152542ab43fc2cf47c499424f200896c
/tensorflow/c/env.h
ac6a9e32aff6e24f49ffeba5fbcc9647975c8b61
[ "Apache-2.0", "LicenseRef-scancode-generic-cla", "BSD-2-Clause" ]
permissive
tensorflow/tensorflow
906276dbafcc70a941026aa5dc50425ef71ee282
a7f3934a67900720af3d3b15389551483bee50b8
refs/heads/master
2023-08-25T04:24:41.611870
2023-08-25T04:06:24
2023-08-25T04:14:08
45,717,250
208,740
109,943
Apache-2.0
2023-09-14T20:55:50
2015-11-07T01:19:20
C++
UTF-8
C
false
false
9,852
h
env.h
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_C_ENV_H_ #define TENSORFLOW_C_ENV_H_ #include <stdbool.h> #include <stddef.h> #include <stdint.h> #include "tensorflow/c/c_api_macros.h" #include "tensorflow/c/tf_file_statistics.h" #include "tensorflow/c/tf_status.h" // -------------------------------------------------------------------------- // C API for tensorflow::Env. #ifdef __cplusplus extern "C" { #endif typedef struct TF_WritableFileHandle TF_WritableFileHandle; typedef struct TF_StringStream TF_StringStream; typedef struct TF_Thread TF_Thread; typedef struct TF_ThreadOptions { // Thread stack size to use (in bytes), zero implies that the system default // will be used. size_t stack_size; // Guard area size to use near thread stacks to use (in bytes), zero implies // that the system default will be used. size_t guard_size; // The NUMA node to use, -1 implies that there should be no NUMA affinity for // this thread. int numa_node; } TF_ThreadOptions; // Creates the specified directory. Typical status code are: // * TF_OK - successfully created the directory // * TF_ALREADY_EXISTS - directory already exists // * TF_PERMISSION_DENIED - dirname is not writable TF_CAPI_EXPORT extern void TF_CreateDir(const char* dirname, TF_Status* status); // Deletes the specified directory. Typical status codes are: // * TF_OK - successfully deleted the directory // * TF_FAILED_PRECONDITION - the directory is not empty TF_CAPI_EXPORT extern void TF_DeleteDir(const char* dirname, TF_Status* status); // Deletes the specified directory and all subdirectories and files underneath // it. This is accomplished by traversing the directory tree rooted at dirname // and deleting entries as they are encountered. // // If dirname itself is not readable or does not exist, *undeleted_dir_count is // set to 1, *undeleted_file_count is set to 0 and an appropriate status (e.g. // TF_NOT_FOUND) is returned. // // If dirname and all its descendants were successfully deleted, TF_OK is // returned and both error counters are set to zero. // // Otherwise, while traversing the tree, undeleted_file_count and // undeleted_dir_count are updated if an entry of the corresponding type could // not be deleted. The returned error status represents the reason that any one // of these entries could not be deleted. // // Typical status codes: // * TF_OK - dirname exists and we were able to delete everything underneath // * TF_NOT_FOUND - dirname doesn't exist // * TF_PERMISSION_DENIED - dirname or some descendant is not writable // * TF_UNIMPLEMENTED - some underlying functions (like Delete) are not // implemented TF_CAPI_EXPORT extern void TF_DeleteRecursively(const char* dirname, int64_t* undeleted_file_count, int64_t* undeleted_dir_count, TF_Status* status); // Obtains statistics for the given path. If status is TF_OK, *stats is // updated, otherwise it is not touched. TF_CAPI_EXPORT extern void TF_FileStat(const char* filename, TF_FileStatistics* stats, TF_Status* status); // Creates or truncates the given filename and returns a handle to be used for // appending data to the file. If status is TF_OK, *handle is updated and the // caller is responsible for freeing it (see TF_CloseWritableFile). TF_CAPI_EXPORT extern void TF_NewWritableFile(const char* filename, TF_WritableFileHandle** handle, TF_Status* status); // Closes the given handle and frees its memory. If there was a problem closing // the file, it is indicated by status. Memory is freed in any case. TF_CAPI_EXPORT extern void TF_CloseWritableFile(TF_WritableFileHandle* handle, TF_Status* status); // Syncs content of the handle to the filesystem. Blocks waiting for the // filesystem to indicate that the content has been persisted. TF_CAPI_EXPORT extern void TF_SyncWritableFile(TF_WritableFileHandle* handle, TF_Status* status); // Flush local buffers to the filesystem. If the process terminates after a // successful flush, the contents may still be persisted, since the underlying // filesystem may eventually flush the contents. If the OS or machine crashes // after a successful flush, the contents may or may not be persisted, depending // on the implementation. TF_CAPI_EXPORT extern void TF_FlushWritableFile(TF_WritableFileHandle* handle, TF_Status* status); // Appends the given bytes to the file. Any failure to do so is indicated in // status. TF_CAPI_EXPORT extern void TF_AppendWritableFile(TF_WritableFileHandle* handle, const char* data, size_t length, TF_Status* status); // Deletes the named file and indicates whether successful in *status. TF_CAPI_EXPORT extern void TF_DeleteFile(const char* filename, TF_Status* status); // Retrieves the next item from the given TF_StringStream and places a pointer // to it in *result. If no more items are in the list, *result is set to NULL // and false is returned. // // Ownership of the items retrieved with this function remains with the library. // Item points are invalidated after a call to TF_StringStreamDone. TF_CAPI_EXPORT extern bool TF_StringStreamNext(TF_StringStream* list, const char** result); // Frees the resources associated with given string list. All pointers returned // by TF_StringStreamNext are invalid after this call. TF_CAPI_EXPORT extern void TF_StringStreamDone(TF_StringStream* list); // Retrieves the list of children of the given directory. You can iterate // through the list with TF_StringStreamNext. The caller is responsible for // freeing the list (see TF_StringStreamDone). TF_CAPI_EXPORT extern TF_StringStream* TF_GetChildren(const char* filename, TF_Status* status); // Retrieves a list of directory names on the local machine that may be used for // temporary storage. You can iterate through the list with TF_StringStreamNext. // The caller is responsible for freeing the list (see TF_StringStreamDone). TF_CAPI_EXPORT extern TF_StringStream* TF_GetLocalTempDirectories(void); // Creates a temporary file name with an extension. // The caller is responsible for freeing the returned pointer. TF_CAPI_EXPORT extern char* TF_GetTempFileName(const char* extension); // Returns the number of nanoseconds since the Unix epoch. TF_CAPI_EXPORT extern uint64_t TF_NowNanos(void); // Returns the number of microseconds since the Unix epoch. TF_CAPI_EXPORT extern uint64_t TF_NowMicros(void); // Returns the number of seconds since the Unix epoch. TF_CAPI_EXPORT extern uint64_t TF_NowSeconds(void); // Populates a TF_ThreadOptions struct with system-default values. TF_CAPI_EXPORT extern void TF_DefaultThreadOptions(TF_ThreadOptions* options); // Returns a new thread that is running work_func and is identified // (for debugging/performance-analysis) by thread_name. // // The given param (which may be null) is passed to work_func when the thread // starts. In this way, data may be passed from the thread back to the caller. // // Caller takes ownership of the result and must call TF_JoinThread on it // eventually. TF_CAPI_EXPORT extern TF_Thread* TF_StartThread(const TF_ThreadOptions* options, const char* thread_name, void (*work_func)(void*), void* param); // Waits for the given thread to finish execution, then deletes it. TF_CAPI_EXPORT extern void TF_JoinThread(TF_Thread* thread); // \brief Load a dynamic library. // // Pass "library_filename" to a platform-specific mechanism for dynamically // loading a library. The rules for determining the exact location of the // library are platform-specific and are not documented here. // // On success, place OK in status and return the newly created library handle. // Otherwise returns nullptr and set error status. TF_CAPI_EXPORT extern void* TF_LoadSharedLibrary(const char* library_filename, TF_Status* status); // \brief Get a pointer to a symbol from a dynamic library. // // "handle" should be a pointer returned from a previous call to // TF_LoadLibraryFromEnv. On success, place OK in status and return a pointer to // the located symbol. Otherwise returns nullptr and set error status. TF_CAPI_EXPORT extern void* TF_GetSymbolFromLibrary(void* handle, const char* symbol_name, TF_Status* status); #ifdef __cplusplus } #endif #endif // TENSORFLOW_C_ENV_H_
00c4efddb5a495e5b61eedb4cc7a1327c2047e86
79d343002bb63a44f8ab0dbac0c9f4ec54078c3a
/lib/libc/include/generic-glibc/bits/struct_stat_time64_helper.h
15f02c4dc15d9d901fa7f3cb0dad22ba684eadf0
[ "MIT" ]
permissive
ziglang/zig
4aa75d8d3bcc9e39bf61d265fd84b7f005623fc5
f4c9e19bc3213c2bc7e03d7b06d7129882f39f6c
refs/heads/master
2023-08-31T13:16:45.980913
2023-08-31T05:50:29
2023-08-31T05:50:29
40,276,274
25,560
2,399
MIT
2023-09-14T21:09:50
2015-08-06T00:51:28
Zig
UTF-8
C
false
false
2,600
h
struct_stat_time64_helper.h
/* Definition for helper to define struct stat with 64-bit time. Copyright (C) 2021 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library. If not, see <https://www.gnu.org/licenses/>. */ /* Content of internal __stat64_t64 struct. */ __dev_t st_dev; /* Device. */ __ino64_t st_ino; /* file serial number. */ __mode_t st_mode; /* File mode. */ __nlink_t st_nlink; /* Link count. */ __uid_t st_uid; /* User ID of the file's owner. */ __gid_t st_gid; /* Group ID of the file's group. */ __dev_t st_rdev; /* Device number, if device. */ __off64_t st_size; /* Size of file, in bytes. */ __blksize_t st_blksize; /* Optimal block size for I/O. */ __blkcnt64_t st_blocks; /* Number 512-byte blocks allocated. */ #ifdef __USE_XOPEN2K8 # ifndef __struct_timespec # define __struct_timespec struct timespec # endif /* Nanosecond resolution timestamps are stored in a format equivalent to 'struct timespec'. This is the type used whenever possible but the Unix namespace rules do not allow the identifier 'timespec' to appear in the <sys/stat.h> header. Therefore we have to handle the use of this header in strictly standard-compliant sources special. */ __struct_timespec st_atim; __struct_timespec st_mtim; __struct_timespec st_ctim; # define st_atime st_atim.tv_sec # define st_mtime st_mtim.tv_sec # define st_ctime st_ctim.tv_sec # undef __struct_timespec #else /* The definition should be equal to the 'struct __timespec64' internal layout. */ # if __BYTE_ORDER == __BIG_ENDIAN # define __fieldts64(name) \ __time64_t name; __int32_t :32; __int32_t name ## nsec # else # define __fieldts64(name) \ __time64_t name; __int32_t name ## nsec; __int32_t :32 # endif __fieldts64 (st_atime); __fieldts64 (st_mtime); __fieldts64 (st_ctime); unsigned long int __glibc_reserved4; unsigned long int __glibc_reserved5; # undef __fieldts64 #endif
b2cc25bbe52d2fbdf6feada29160e7ad95b7fcda
a04dc56b5ed4d5f15e73a014795a6342faf449a7
/frontpanel/jpeg.h
5517674c3100cf7aeeafca009426a77637340644
[ "LicenseRef-scancode-free-unknown", "MIT" ]
permissive
udo-munk/z80pack
3cdb9aeb4592644acb36873deda7cefafc50b8ce
53a343015aa28cb9b393ff6f18f20f6984018808
refs/heads/master
2023-04-05T15:49:29.832220
2021-08-15T22:28:25
2021-08-15T22:28:25
122,672,477
125
35
MIT
2022-12-02T07:30:39
2018-02-23T21:03:59
C
UTF-8
C
false
false
151
h
jpeg.h
// jpeg.h #ifndef __JPEG_DEFS__ #define __JPEG_DEFS__ unsigned char *read_jpeg(char *fname, int *width, int *height, int *num_components); #endif
fef1c823d6a1c59102d95904df5df0c46700d8ae
85b4bbde9e0ec36b0db29463281c36b24e1f161d
/cond/x_part_cond_qcqp.c
2839c326a6c46f31fd7eaa72d7792e9aa805a295
[ "BSD-2-Clause" ]
permissive
giaf/hpipm
9617000b88c3a65842154e86209a09a067fe34ac
b5239f756181918f97deca39884d12dc6286dc73
refs/heads/master
2023-08-17T01:13:25.056738
2023-08-15T15:02:11
2023-08-15T15:02:11
91,068,419
398
120
NOASSERTION
2023-08-15T20:51:27
2017-05-12T08:20:10
C
UTF-8
C
false
false
30,081
c
x_part_cond_qcqp.c
/************************************************************************************************** * * * This file is part of HPIPM. * * * * HPIPM -- High-Performance Interior Point Method. * * Copyright (C) 2019 by Gianluca Frison. * * Developed at IMTEK (University of Freiburg) under the supervision of Moritz Diehl. * * All rights reserved. * * * * The 2-Clause BSD License * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted provided that the following conditions are met: * * * * 1. Redistributions of source code must retain the above copyright notice, this * * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * * this list of conditions and the following disclaimer in the documentation * * and/or other materials provided with the distribution. * * * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * * Author: Gianluca Frison, gianluca.frison (at) imtek.uni-freiburg.de * * * **************************************************************************************************/ void PART_COND_QCQP_COMPUTE_BLOCK_SIZE(int N, int N2, int *block_size) { int ii; int bs0 = N/N2; // (floor) size of small blocks // the first blocks have size bs0+1 for(ii=0; ii<N-N2*bs0; ii++) block_size[ii] = bs0+1; // the following blocks have size bs0 for(; ii<N2; ii++) block_size[ii] = bs0; // the last block has size 0 block_size[N2] = 0; return; } void PART_COND_QCQP_COMPUTE_DIM(struct OCP_QCQP_DIM *ocp_dim, int *block_size, struct OCP_QCQP_DIM *part_dense_dim) { // TODO run time check on sum(block_size) = N int N = ocp_dim->N; int *nx = ocp_dim->nx; int *nu = ocp_dim->nu; int *nb = ocp_dim->nb; int *nbx = ocp_dim->nbx; int *nbu = ocp_dim->nbu; int *ng = ocp_dim->ng; int *nq = ocp_dim->nq; int *ns = ocp_dim->ns; int *nsbx = ocp_dim->nsbx; int *nsbu = ocp_dim->nsbu; int *nsg = ocp_dim->nsg; int *nsq = ocp_dim->nsq; int N2 = part_dense_dim->N; // int *nx2 = part_dense_dim->nx; // int *nu2 = part_dense_dim->nu; // int *nb2 = part_dense_dim->nb; // int *nbx2 = part_dense_dim->nbx; // int *nbu2 = part_dense_dim->nbu; // int *ng2 = part_dense_dim->ng; // int *nq2 = part_dense_dim->nq; // int *ns2 = part_dense_dim->ns; // int *nsbx2 = part_dense_dim->nsbx; // int *nsbu2 = part_dense_dim->nsbu; // int *nsg2 = part_dense_dim->nsg; // int *nsq2 = part_dense_dim->nsq; int nx2, nu2, nb2, nbx2, nbu2, ng2, nq2, ns2, nsbu2, nsbx2, nsg2, nsq2; int ii, jj; // TODO equality constraints !!!!!!!!! int nbb; // box constr that remain box constr int nbg; // box constr that becomes general constr int N_tmp = 0; // temporary sum of block size // first stages for(ii=0; ii<N2; ii++) { nx2 = nx[N_tmp+0]; nu2 = nu[N_tmp+0]; nbx2 = nbx[N_tmp+0]; nbu2 = nbu[N_tmp+0]; nb2 = nb[N_tmp+0]; ng2 = ng[N_tmp+0]; nq2 = nq[N_tmp+0]; ns2 = ns[N_tmp+0]; nsbx2 = nsbx[N_tmp+0]; nsbu2 = nsbu[N_tmp+0]; nsg2 = nsg[N_tmp+0]; nsq2 = nsq[N_tmp+0]; for(jj=1; jj<block_size[ii]; jj++) { nx2 += 0; nu2 += nu[N_tmp+jj]; nbx2 += 0; nbu2 += nbu[N_tmp+jj]; nb2 += nbu[N_tmp+jj]; ng2 += ng[N_tmp+jj] + nbx[N_tmp+jj]; nq2 += nq[N_tmp+jj]; ns2 += ns[N_tmp+jj]; nsbx2 += 0; nsbu2 += nsbu[N_tmp+jj]; nsg2 += nsg[N_tmp+jj] + nsbx[N_tmp+jj]; nsq2 += nsq[N_tmp+jj]; } N_tmp += block_size[ii]; // XXX must use setters to correctly set qp ones too ! OCP_QCQP_DIM_SET_NX(ii, nx2, part_dense_dim); OCP_QCQP_DIM_SET_NU(ii, nu2, part_dense_dim); OCP_QCQP_DIM_SET_NBX(ii, nbx2, part_dense_dim); OCP_QCQP_DIM_SET_NBU(ii, nbu2, part_dense_dim); OCP_QCQP_DIM_SET_NG(ii, ng2, part_dense_dim); OCP_QCQP_DIM_SET_NQ(ii, nq2, part_dense_dim); OCP_QCQP_DIM_SET_NS(ii, ns2, part_dense_dim); OCP_QCQP_DIM_SET_NSBX(ii, nsbx2, part_dense_dim); OCP_QCQP_DIM_SET_NSBU(ii, nsbu2, part_dense_dim); OCP_QCQP_DIM_SET_NSG(ii, nsg2, part_dense_dim); OCP_QCQP_DIM_SET_NSQ(ii, nsq2, part_dense_dim); } // last stage: condense also following stage ii = N2; nx2 = nx[N_tmp+0]; nu2 = nu[N_tmp+0]; nbx2 = nbx[N_tmp+0]; nbu2 = nbu[N_tmp+0]; nb2 = nb[N_tmp+0]; ng2 = ng[N_tmp+0]; nq2 = nq[N_tmp+0]; ns2 = ns[N_tmp+0]; nsbx2 = nsbx[N_tmp+0]; nsbu2 = nsbu[N_tmp+0]; nsg2 = nsg[N_tmp+0]; nsq2 = nsq[N_tmp+0]; for(jj=1; jj<block_size[ii]+1; jj++) { nx2 += 0; nu2 += nu[N_tmp+jj]; nbx2 += 0; nbu2 += nbu[N_tmp+jj]; nb2 += nbu[N_tmp+jj]; ng2 += ng[N_tmp+jj] + nbx[N_tmp+jj]; nq2 += nq[N_tmp+jj]; ns2 += ns[N_tmp+jj]; nsbx2 += 0; nsbu2 += nsbu[N_tmp+jj]; // nsbx2 = nsbx[N_tmp+0]; // nsbu2 = nsbu[N_tmp+0]; nsg2 += nsg[N_tmp+jj] + nsbx[N_tmp+jj]; nsq2 += nsq[N_tmp+jj]; } // XXX must use setters to correctly set qp ones too ! OCP_QCQP_DIM_SET_NX(ii, nx2, part_dense_dim); OCP_QCQP_DIM_SET_NU(ii, nu2, part_dense_dim); OCP_QCQP_DIM_SET_NBX(ii, nbx2, part_dense_dim); OCP_QCQP_DIM_SET_NBU(ii, nbu2, part_dense_dim); OCP_QCQP_DIM_SET_NG(ii, ng2, part_dense_dim); OCP_QCQP_DIM_SET_NQ(ii, nq2, part_dense_dim); OCP_QCQP_DIM_SET_NS(ii, ns2, part_dense_dim); OCP_QCQP_DIM_SET_NSBX(ii, nsbx2, part_dense_dim); OCP_QCQP_DIM_SET_NSBU(ii, nsbu2, part_dense_dim); OCP_QCQP_DIM_SET_NSG(ii, nsg2, part_dense_dim); OCP_QCQP_DIM_SET_NSQ(ii, nsq2, part_dense_dim); return; } hpipm_size_t PART_COND_QCQP_ARG_MEMSIZE(int N2) { int ii; hpipm_size_t size = 0; size += (N2+1)*sizeof(struct COND_QCQP_ARG); for(ii=0; ii<=N2; ii++) { size += COND_QCQP_ARG_MEMSIZE(); } size = (size+63)/64*64; // make multiple of typical cache line size size += 1*64; // align once to typical cache line size return size; } void PART_COND_QCQP_ARG_CREATE(int N2, struct PART_COND_QCQP_ARG *part_cond_arg, void *mem) { int ii; // cond workspace struct struct COND_QCQP_ARG *cws_ptr = mem; part_cond_arg->cond_arg = cws_ptr; cws_ptr += N2+1; // align to typical cache line size hpipm_size_t s_ptr = (hpipm_size_t) cws_ptr; s_ptr = (s_ptr+63)/64*64; char *c_ptr = (char *) s_ptr; for(ii=0; ii<=N2; ii++) { COND_QCQP_ARG_CREATE(part_cond_arg->cond_arg+ii, c_ptr); c_ptr += (part_cond_arg->cond_arg+ii)->memsize; } part_cond_arg->N2 = N2; part_cond_arg->memsize = PART_COND_QCQP_ARG_MEMSIZE(N2); #if defined(RUNTIME_CHECKS) if(c_ptr > ((char *) mem) + part_cond_arg->memsize) { printf("\nCreate_cond_qcqp_ocp2ocp_arg: outside memory bounds!\n\n"); exit(1); } #endif return; } void PART_COND_QCQP_ARG_SET_DEFAULT(struct PART_COND_QCQP_ARG *part_cond_arg) { int ii; int N2 = part_cond_arg->N2; for(ii=0; ii<=N2; ii++) { COND_QCQP_ARG_SET_DEFAULT(part_cond_arg->cond_arg+ii); COND_QCQP_ARG_SET_COND_LAST_STAGE(0, part_cond_arg->cond_arg+ii); } // cond_last_stage at last stage COND_QCQP_ARG_SET_COND_LAST_STAGE(1, part_cond_arg->cond_arg+N2); return; } void PART_COND_QCQP_ARG_SET_RIC_ALG(int ric_alg, struct PART_COND_QCQP_ARG *part_cond_arg) { int ii; int N2 = part_cond_arg->N2; for(ii=0; ii<=N2; ii++) { COND_QCQP_ARG_SET_RIC_ALG(ric_alg, part_cond_arg->cond_arg+ii); } return; } hpipm_size_t PART_COND_QCQP_WS_MEMSIZE(struct OCP_QCQP_DIM *ocp_dim, int *block_size, struct OCP_QCQP_DIM *part_dense_dim, struct PART_COND_QCQP_ARG *part_cond_arg) { struct OCP_QCQP_DIM tmp_ocp_qcqp_dim; struct OCP_QP_DIM tmp_ocp_qp_dim; int ii; int N = ocp_dim->N; int N2 = part_dense_dim->N; hpipm_size_t size = 0; size += (N2+1)*sizeof(struct COND_QCQP_ARG_WS); int N_tmp = 0; // temporary sum of horizons for(ii=0; ii<=N2; ii++) { // alias ocp_qcqp_dim tmp_ocp_qcqp_dim.N = block_size[ii]; tmp_ocp_qcqp_dim.nx = ocp_dim->nx+N_tmp; tmp_ocp_qcqp_dim.nu = ocp_dim->nu+N_tmp; tmp_ocp_qcqp_dim.nbx = ocp_dim->nbx+N_tmp; tmp_ocp_qcqp_dim.nbu = ocp_dim->nbu+N_tmp; tmp_ocp_qcqp_dim.nb = ocp_dim->nb+N_tmp; tmp_ocp_qcqp_dim.ng = ocp_dim->ng+N_tmp; tmp_ocp_qcqp_dim.nq = ocp_dim->nq+N_tmp; tmp_ocp_qcqp_dim.nsbx = ocp_dim->nsbx+N_tmp; tmp_ocp_qcqp_dim.nsbu = ocp_dim->nsbu+N_tmp; tmp_ocp_qcqp_dim.nsg = ocp_dim->nsg+N_tmp; tmp_ocp_qcqp_dim.nsq = ocp_dim->nsq+N_tmp; tmp_ocp_qcqp_dim.ns = ocp_dim->ns+N_tmp; // alias ocp_qcqp_dim tmp_ocp_qp_dim.N = block_size[ii]; tmp_ocp_qp_dim.nx = ocp_dim->qp_dim->nx+N_tmp; tmp_ocp_qp_dim.nu = ocp_dim->qp_dim->nu+N_tmp; tmp_ocp_qp_dim.nbx = ocp_dim->qp_dim->nbx+N_tmp; tmp_ocp_qp_dim.nbu = ocp_dim->qp_dim->nbu+N_tmp; tmp_ocp_qp_dim.nb = ocp_dim->qp_dim->nb+N_tmp; tmp_ocp_qp_dim.ng = ocp_dim->qp_dim->ng+N_tmp; tmp_ocp_qp_dim.nsbx = ocp_dim->qp_dim->nsbx+N_tmp; tmp_ocp_qp_dim.nsbu = ocp_dim->qp_dim->nsbu+N_tmp; tmp_ocp_qp_dim.nsg = ocp_dim->qp_dim->nsg+N_tmp; tmp_ocp_qp_dim.ns = ocp_dim->qp_dim->ns+N_tmp; tmp_ocp_qcqp_dim.qp_dim = &tmp_ocp_qp_dim; size += COND_QCQP_WS_MEMSIZE(&tmp_ocp_qcqp_dim, part_cond_arg->cond_arg+ii); N_tmp += block_size[ii]; } size = (size+63)/64*64; // make multiple of typical cache line size size += 1*64; // align once to typical cache line size return size; } void PART_COND_QCQP_WS_CREATE(struct OCP_QCQP_DIM *ocp_dim, int *block_size, struct OCP_QCQP_DIM *part_dense_dim, struct PART_COND_QCQP_ARG *part_cond_arg, struct PART_COND_QCQP_WS *part_cond_ws, void *mem) { struct OCP_QCQP_DIM tmp_ocp_qcqp_dim; struct OCP_QP_DIM tmp_ocp_qp_dim; int ii; int N = ocp_dim->N; int N2 = part_dense_dim->N; // cond workspace struct struct COND_QCQP_ARG_WS *cws_ptr = mem; part_cond_ws->cond_ws = cws_ptr; cws_ptr += N2+1; // align to typical cache line size hpipm_size_t s_ptr = (hpipm_size_t) cws_ptr; s_ptr = (s_ptr+63)/64*64; char *c_ptr = (char *) s_ptr; int N_tmp = 0; // temporary sum of horizons for(ii=0; ii<=N2; ii++) { // alias ocp_qcqp_dim tmp_ocp_qcqp_dim.N = block_size[ii]; tmp_ocp_qcqp_dim.nx = ocp_dim->nx+N_tmp; tmp_ocp_qcqp_dim.nu = ocp_dim->nu+N_tmp; tmp_ocp_qcqp_dim.nbx = ocp_dim->nbx+N_tmp; tmp_ocp_qcqp_dim.nbu = ocp_dim->nbu+N_tmp; tmp_ocp_qcqp_dim.nb = ocp_dim->nb+N_tmp; tmp_ocp_qcqp_dim.ng = ocp_dim->ng+N_tmp; tmp_ocp_qcqp_dim.nq = ocp_dim->nq+N_tmp; tmp_ocp_qcqp_dim.nsbx = ocp_dim->nsbx+N_tmp; tmp_ocp_qcqp_dim.nsbu = ocp_dim->nsbu+N_tmp; tmp_ocp_qcqp_dim.nsg = ocp_dim->nsg+N_tmp; tmp_ocp_qcqp_dim.nsq = ocp_dim->nsq+N_tmp; tmp_ocp_qcqp_dim.ns = ocp_dim->ns+N_tmp; // alias ocp_qcqp_dim tmp_ocp_qp_dim.N = block_size[ii]; tmp_ocp_qp_dim.nx = ocp_dim->qp_dim->nx+N_tmp; tmp_ocp_qp_dim.nu = ocp_dim->qp_dim->nu+N_tmp; tmp_ocp_qp_dim.nbx = ocp_dim->qp_dim->nbx+N_tmp; tmp_ocp_qp_dim.nbu = ocp_dim->qp_dim->nbu+N_tmp; tmp_ocp_qp_dim.nb = ocp_dim->qp_dim->nb+N_tmp; tmp_ocp_qp_dim.ng = ocp_dim->qp_dim->ng+N_tmp; tmp_ocp_qp_dim.nsbx = ocp_dim->qp_dim->nsbx+N_tmp; tmp_ocp_qp_dim.nsbu = ocp_dim->qp_dim->nsbu+N_tmp; tmp_ocp_qp_dim.nsg = ocp_dim->qp_dim->nsg+N_tmp; tmp_ocp_qp_dim.ns = ocp_dim->qp_dim->ns+N_tmp; tmp_ocp_qcqp_dim.qp_dim = &tmp_ocp_qp_dim; COND_QCQP_WS_CREATE(&tmp_ocp_qcqp_dim, part_cond_arg->cond_arg+ii, part_cond_ws->cond_ws+ii, c_ptr); c_ptr += (part_cond_ws->cond_ws+ii)->memsize; N_tmp += block_size[ii]; } part_cond_ws->memsize = PART_COND_QCQP_WS_MEMSIZE(ocp_dim, block_size, part_dense_dim, part_cond_arg); #if defined(RUNTIME_CHECKS) if(c_ptr > ((char *) mem) + part_cond_ws->memsize) { printf("\nCreate_cond_qp_ocp2ocp: outside memory bounds!\n\n"); exit(1); } #endif return; } void PART_COND_QCQP_COND(struct OCP_QCQP *ocp_qp, struct OCP_QCQP *part_dense_qp, struct PART_COND_QCQP_ARG *part_cond_arg, struct PART_COND_QCQP_WS *part_cond_ws) { struct OCP_QP_DIM tmp_ocp_dim; struct OCP_QP tmp_ocp_qp; struct OCP_QCQP_DIM tmp_ocp_qcqp_dim; struct OCP_QCQP tmp_ocp_qcqp; int ii; int N = ocp_qp->dim->N; int N2 = part_dense_qp->dim->N; int bs; // horizon of current block int N_tmp = 0; // temporary sum of horizons for(ii=0; ii<=N2; ii++) { bs = part_cond_ws->cond_ws[ii].qp_ws->bs; // alias ocp_dim tmp_ocp_dim.N = bs; tmp_ocp_dim.nx = ocp_qp->dim->qp_dim->nx+N_tmp; tmp_ocp_dim.nu = ocp_qp->dim->qp_dim->nu+N_tmp; tmp_ocp_dim.nbx = ocp_qp->dim->qp_dim->nbx+N_tmp; tmp_ocp_dim.nbu = ocp_qp->dim->qp_dim->nbu+N_tmp; tmp_ocp_dim.nb = ocp_qp->dim->qp_dim->nb+N_tmp; tmp_ocp_dim.ng = ocp_qp->dim->qp_dim->ng+N_tmp; tmp_ocp_dim.nsbx = ocp_qp->dim->qp_dim->nsbx+N_tmp; tmp_ocp_dim.nsbu = ocp_qp->dim->qp_dim->nsbu+N_tmp; tmp_ocp_dim.nsg = ocp_qp->dim->qp_dim->nsg+N_tmp; tmp_ocp_dim.ns = ocp_qp->dim->qp_dim->ns+N_tmp; // TODO equality constraints !!!!!!!!!!!!!!!!!!!!!!!!!!! // alias ocp_qp tmp_ocp_qp.dim = &tmp_ocp_dim; tmp_ocp_qp.idxb = ocp_qp->idxb+N_tmp; tmp_ocp_qp.BAbt = ocp_qp->BAbt+N_tmp; tmp_ocp_qp.b = ocp_qp->b+N_tmp; tmp_ocp_qp.RSQrq = ocp_qp->RSQrq+N_tmp; tmp_ocp_qp.rqz = ocp_qp->rqz+N_tmp; tmp_ocp_qp.DCt = ocp_qp->DCt+N_tmp; tmp_ocp_qp.d = ocp_qp->d+N_tmp; tmp_ocp_qp.d_mask = ocp_qp->d_mask+N_tmp; tmp_ocp_qp.Z = ocp_qp->Z+N_tmp; tmp_ocp_qp.idxs_rev = ocp_qp->idxs_rev+N_tmp; COND_BABT(&tmp_ocp_qp, part_dense_qp->BAbt+ii, part_dense_qp->b+ii, part_cond_arg->cond_arg[ii].qp_arg, part_cond_ws->cond_ws[ii].qp_ws); COND_RSQRQ(&tmp_ocp_qp, part_dense_qp->RSQrq+ii, part_dense_qp->rqz+ii, part_cond_arg->cond_arg[ii].qp_arg, part_cond_ws->cond_ws[ii].qp_ws); COND_DCTD(&tmp_ocp_qp, part_dense_qp->idxb[ii], part_dense_qp->DCt+ii, part_dense_qp->d+ii, part_dense_qp->d_mask+ii, part_dense_qp->idxs_rev[ii], part_dense_qp->Z+ii, part_dense_qp->rqz+ii, part_cond_arg->cond_arg[ii].qp_arg, part_cond_ws->cond_ws[ii].qp_ws); // alias ocp_dim tmp_ocp_qcqp_dim.N = bs; tmp_ocp_qcqp_dim.nx = ocp_qp->dim->nx+N_tmp; tmp_ocp_qcqp_dim.nu = ocp_qp->dim->nu+N_tmp; tmp_ocp_qcqp_dim.nbx = ocp_qp->dim->nbx+N_tmp; tmp_ocp_qcqp_dim.nbu = ocp_qp->dim->nbu+N_tmp; tmp_ocp_qcqp_dim.nb = ocp_qp->dim->nb+N_tmp; tmp_ocp_qcqp_dim.ng = ocp_qp->dim->ng+N_tmp; tmp_ocp_qcqp_dim.nq = ocp_qp->dim->nq+N_tmp; tmp_ocp_qcqp_dim.nsbx = ocp_qp->dim->nsbx+N_tmp; tmp_ocp_qcqp_dim.nsbu = ocp_qp->dim->nsbu+N_tmp; tmp_ocp_qcqp_dim.nsg = ocp_qp->dim->nsg+N_tmp; tmp_ocp_qcqp_dim.nsq = ocp_qp->dim->nsq+N_tmp; tmp_ocp_qcqp_dim.ns = ocp_qp->dim->ns+N_tmp; // alias ocp_qp tmp_ocp_qcqp.dim = &tmp_ocp_qcqp_dim; tmp_ocp_qcqp.idxb = ocp_qp->idxb+N_tmp; tmp_ocp_qcqp.BAbt = ocp_qp->BAbt+N_tmp; tmp_ocp_qcqp.b = ocp_qp->b+N_tmp; tmp_ocp_qcqp.RSQrq = ocp_qp->RSQrq+N_tmp; tmp_ocp_qcqp.rqz = ocp_qp->rqz+N_tmp; tmp_ocp_qcqp.DCt = ocp_qp->DCt+N_tmp; tmp_ocp_qcqp.d = ocp_qp->d+N_tmp; tmp_ocp_qcqp.d_mask = ocp_qp->d_mask+N_tmp; tmp_ocp_qcqp.Z = ocp_qp->Z+N_tmp; tmp_ocp_qcqp.idxs_rev = ocp_qp->idxs_rev+N_tmp; tmp_ocp_qcqp.Hq = ocp_qp->Hq+N_tmp; tmp_ocp_qcqp.Hq_nzero = ocp_qp->Hq_nzero+N_tmp; COND_QCQP_QC(&tmp_ocp_qcqp, part_dense_qp->Hq[ii], part_dense_qp->Hq_nzero[ii], part_dense_qp->DCt+ii, part_dense_qp->d+ii, part_cond_arg->cond_arg+ii, part_cond_ws->cond_ws+ii); N_tmp += bs; } #if 0 // copy last stage int *nx = ocp_qp->dim->nx; int *nu = ocp_qp->dim->nu; int *nb = ocp_qp->dim->nb; int *ng = ocp_qp->dim->ng; int *ns = ocp_qp->dim->ns; GECP(nu[N]+nx[N]+1, nu[N]+nx[N], ocp_qp->RSQrq+N, 0, 0, part_dense_qp->RSQrq+N2, 0, 0); VECCP(nu[N]+nx[N], ocp_qp->rq+N, 0, part_dense_qp->rq+N2, 0); GECP(nu[N]+nx[N], ng[N], ocp_qp->DCt+N, 0, 0, part_dense_qp->DCt+N2, 0, 0); VECCP(2*nb[N]+2*ng[N], ocp_qp->d+N, 0, part_dense_qp->d+N2, 0); for(ii=0; ii<nb[N]; ii++) part_dense_qp->idxb[N2][ii] = ocp_qp->idxb[N][ii]; VECCP(2*ns[N], ocp_qp->Z+N, 0, part_dense_qp->Z+N2, 0); VECCP(2*ns[N], ocp_qp->z+N, 0, part_dense_qp->z+N2, 0); for(ii=0; ii<ns[N]; ii++) part_dense_qp->idxs_rev[N2][ii] = ocp_qp->idxs_rev[N][ii]; #endif return; } void PART_COND_QCQP_COND_LHS(struct OCP_QCQP *ocp_qp, struct OCP_QCQP *part_dense_qp, struct PART_COND_QCQP_ARG *part_cond_arg, struct PART_COND_QCQP_WS *part_cond_ws) { struct OCP_QP_DIM tmp_ocp_dim; struct OCP_QP tmp_ocp_qp; struct OCP_QCQP_DIM tmp_ocp_qcqp_dim; struct OCP_QCQP tmp_ocp_qcqp; int ii; int N = ocp_qp->dim->N; int N2 = part_dense_qp->dim->N; int bs; // horizon of current block int N_tmp = 0; // temporary sum of horizons for(ii=0; ii<=N2; ii++) { bs = part_cond_ws->cond_ws[ii].qp_ws->bs; // alias ocp_dim tmp_ocp_dim.N = bs; tmp_ocp_dim.nx = ocp_qp->dim->qp_dim->nx+N_tmp; tmp_ocp_dim.nu = ocp_qp->dim->qp_dim->nu+N_tmp; tmp_ocp_dim.nbx = ocp_qp->dim->qp_dim->nbx+N_tmp; tmp_ocp_dim.nbu = ocp_qp->dim->qp_dim->nbu+N_tmp; tmp_ocp_dim.nb = ocp_qp->dim->qp_dim->nb+N_tmp; tmp_ocp_dim.ng = ocp_qp->dim->qp_dim->ng+N_tmp; tmp_ocp_dim.nsbx = ocp_qp->dim->qp_dim->nsbx+N_tmp; tmp_ocp_dim.nsbu = ocp_qp->dim->qp_dim->nsbu+N_tmp; tmp_ocp_dim.nsg = ocp_qp->dim->qp_dim->nsg+N_tmp; tmp_ocp_dim.ns = ocp_qp->dim->qp_dim->ns+N_tmp; // alias ocp_qp tmp_ocp_qp.dim = &tmp_ocp_dim; tmp_ocp_qp.idxb = ocp_qp->idxb+N_tmp; tmp_ocp_qp.BAbt = ocp_qp->BAbt+N_tmp; tmp_ocp_qp.b = ocp_qp->b+N_tmp; tmp_ocp_qp.RSQrq = ocp_qp->RSQrq+N_tmp; tmp_ocp_qp.rqz = ocp_qp->rqz+N_tmp; tmp_ocp_qp.DCt = ocp_qp->DCt+N_tmp; tmp_ocp_qp.d = ocp_qp->d+N_tmp; tmp_ocp_qp.d_mask = ocp_qp->d_mask+N_tmp; tmp_ocp_qp.Z = ocp_qp->Z+N_tmp; tmp_ocp_qp.idxs_rev = ocp_qp->idxs_rev+N_tmp; COND_BAT(&tmp_ocp_qp, part_dense_qp->BAbt+ii, part_cond_arg->cond_arg[ii].qp_arg, part_cond_ws->cond_ws[ii].qp_ws); COND_RSQ(&tmp_ocp_qp, part_dense_qp->RSQrq+ii, part_cond_arg->cond_arg[ii].qp_arg, part_cond_ws->cond_ws[ii].qp_ws); COND_DCT(&tmp_ocp_qp, part_dense_qp->idxb[ii], part_dense_qp->DCt+ii, part_dense_qp->idxs_rev[ii], part_dense_qp->Z+ii, part_cond_arg->cond_arg[ii].qp_arg, part_cond_ws->cond_ws[ii].qp_ws); // alias ocp_dim tmp_ocp_qcqp_dim.N = bs; tmp_ocp_qcqp_dim.nx = ocp_qp->dim->nx+N_tmp; tmp_ocp_qcqp_dim.nu = ocp_qp->dim->nu+N_tmp; tmp_ocp_qcqp_dim.nbx = ocp_qp->dim->nbx+N_tmp; tmp_ocp_qcqp_dim.nbu = ocp_qp->dim->nbu+N_tmp; tmp_ocp_qcqp_dim.nb = ocp_qp->dim->nb+N_tmp; tmp_ocp_qcqp_dim.ng = ocp_qp->dim->ng+N_tmp; tmp_ocp_qcqp_dim.nq = ocp_qp->dim->nq+N_tmp; tmp_ocp_qcqp_dim.nsbx = ocp_qp->dim->nsbx+N_tmp; tmp_ocp_qcqp_dim.nsbu = ocp_qp->dim->nsbu+N_tmp; tmp_ocp_qcqp_dim.nsg = ocp_qp->dim->nsg+N_tmp; tmp_ocp_qcqp_dim.nsq = ocp_qp->dim->nsq+N_tmp; tmp_ocp_qcqp_dim.ns = ocp_qp->dim->ns+N_tmp; // alias ocp_qp tmp_ocp_qcqp.dim = &tmp_ocp_qcqp_dim; tmp_ocp_qcqp.idxb = ocp_qp->idxb+N_tmp; tmp_ocp_qcqp.BAbt = ocp_qp->BAbt+N_tmp; tmp_ocp_qcqp.b = ocp_qp->b+N_tmp; tmp_ocp_qcqp.RSQrq = ocp_qp->RSQrq+N_tmp; tmp_ocp_qcqp.rqz = ocp_qp->rqz+N_tmp; tmp_ocp_qcqp.DCt = ocp_qp->DCt+N_tmp; tmp_ocp_qcqp.d = ocp_qp->d+N_tmp; tmp_ocp_qcqp.d_mask = ocp_qp->d_mask+N_tmp; tmp_ocp_qcqp.Z = ocp_qp->Z+N_tmp; tmp_ocp_qcqp.idxs_rev = ocp_qp->idxs_rev+N_tmp; tmp_ocp_qcqp.Hq = ocp_qp->Hq+N_tmp; tmp_ocp_qcqp.Hq_nzero = ocp_qp->Hq_nzero+N_tmp; COND_QCQP_QC_LHS(&tmp_ocp_qcqp, part_dense_qp->Hq[ii], part_dense_qp->Hq_nzero[ii], part_dense_qp->DCt+ii, part_cond_arg->cond_arg+ii, part_cond_ws->cond_ws+ii); N_tmp += bs; } #if 0 // copy last stage int *nx = ocp_qp->dim->nx; int *nu = ocp_qp->dim->nu; int *nb = ocp_qp->dim->nb; int *ng = ocp_qp->dim->ng; int *ns = ocp_qp->dim->ns; GECP(nu[N]+nx[N]+1, nu[N]+nx[N], ocp_qp->RSQrq+N, 0, 0, part_dense_qp->RSQrq+N2, 0, 0); VECCP(nu[N]+nx[N], ocp_qp->rq+N, 0, part_dense_qp->rq+N2, 0); GECP(nu[N]+nx[N], ng[N], ocp_qp->DCt+N, 0, 0, part_dense_qp->DCt+N2, 0, 0); VECCP(2*nb[N]+2*ng[N], ocp_qp->d+N, 0, part_dense_qp->d+N2, 0); for(ii=0; ii<nb[N]; ii++) part_dense_qp->idxb[N2][ii] = ocp_qp->idxb[N][ii]; VECCP(2*ns[N], ocp_qp->Z+N, 0, part_dense_qp->Z+N2, 0); VECCP(2*ns[N], ocp_qp->z+N, 0, part_dense_qp->z+N2, 0); for(ii=0; ii<ns[N]; ii++) part_dense_qp->idxs_rev[N2][ii] = ocp_qp->idxs_rev[N][ii]; #endif return; } void PART_COND_QCQP_COND_RHS(struct OCP_QCQP *ocp_qp, struct OCP_QCQP *part_dense_qp, struct PART_COND_QCQP_ARG *part_cond_arg, struct PART_COND_QCQP_WS *part_cond_ws) { struct OCP_QP_DIM tmp_ocp_dim; struct OCP_QP tmp_ocp_qp; struct OCP_QCQP_DIM tmp_ocp_qcqp_dim; struct OCP_QCQP tmp_ocp_qcqp; int ii; int N = ocp_qp->dim->N; int N2 = part_dense_qp->dim->N; int bs; // horizon of current block int N_tmp = 0; // temporary sum of horizons for(ii=0; ii<=N2; ii++) { bs = part_cond_ws->cond_ws[ii].qp_ws->bs; // alias ocp_dim tmp_ocp_dim.N = bs; tmp_ocp_dim.nx = ocp_qp->dim->nx+N_tmp; tmp_ocp_dim.nu = ocp_qp->dim->nu+N_tmp; tmp_ocp_dim.nbx = ocp_qp->dim->nbx+N_tmp; tmp_ocp_dim.nbu = ocp_qp->dim->nbu+N_tmp; tmp_ocp_dim.nb = ocp_qp->dim->nb+N_tmp; tmp_ocp_dim.ng = ocp_qp->dim->ng+N_tmp; tmp_ocp_dim.nsbx = ocp_qp->dim->nsbx+N_tmp; tmp_ocp_dim.nsbu = ocp_qp->dim->nsbu+N_tmp; tmp_ocp_dim.nsg = ocp_qp->dim->nsg+N_tmp; tmp_ocp_dim.ns = ocp_qp->dim->ns+N_tmp; // alias ocp_qp tmp_ocp_qp.dim = &tmp_ocp_dim; tmp_ocp_qp.idxb = ocp_qp->idxb+N_tmp; tmp_ocp_qp.BAbt = ocp_qp->BAbt+N_tmp; tmp_ocp_qp.b = ocp_qp->b+N_tmp; tmp_ocp_qp.RSQrq = ocp_qp->RSQrq+N_tmp; tmp_ocp_qp.rqz = ocp_qp->rqz+N_tmp; tmp_ocp_qp.DCt = ocp_qp->DCt+N_tmp; tmp_ocp_qp.d = ocp_qp->d+N_tmp; tmp_ocp_qp.d_mask = ocp_qp->d_mask+N_tmp; tmp_ocp_qp.Z = ocp_qp->Z+N_tmp; tmp_ocp_qp.idxs_rev = ocp_qp->idxs_rev+N_tmp; COND_B(&tmp_ocp_qp, part_dense_qp->b+ii, part_cond_arg->cond_arg[ii].qp_arg, part_cond_ws->cond_ws[ii].qp_ws); COND_RQ(&tmp_ocp_qp, part_dense_qp->rqz+ii, part_cond_arg->cond_arg[ii].qp_arg, part_cond_ws->cond_ws[ii].qp_ws); COND_D(&tmp_ocp_qp, part_dense_qp->d+ii, part_dense_qp->d_mask+ii, part_dense_qp->rqz+ii, part_cond_arg->cond_arg[ii].qp_arg, part_cond_ws->cond_ws[ii].qp_ws); // alias ocp_dim tmp_ocp_qcqp_dim.N = bs; tmp_ocp_qcqp_dim.nx = ocp_qp->dim->nx+N_tmp; tmp_ocp_qcqp_dim.nu = ocp_qp->dim->nu+N_tmp; tmp_ocp_qcqp_dim.nbx = ocp_qp->dim->nbx+N_tmp; tmp_ocp_qcqp_dim.nbu = ocp_qp->dim->nbu+N_tmp; tmp_ocp_qcqp_dim.nb = ocp_qp->dim->nb+N_tmp; tmp_ocp_qcqp_dim.ng = ocp_qp->dim->ng+N_tmp; tmp_ocp_qcqp_dim.nq = ocp_qp->dim->nq+N_tmp; tmp_ocp_qcqp_dim.nsbx = ocp_qp->dim->nsbx+N_tmp; tmp_ocp_qcqp_dim.nsbu = ocp_qp->dim->nsbu+N_tmp; tmp_ocp_qcqp_dim.nsg = ocp_qp->dim->nsg+N_tmp; tmp_ocp_qcqp_dim.nsq = ocp_qp->dim->nsq+N_tmp; tmp_ocp_qcqp_dim.ns = ocp_qp->dim->ns+N_tmp; // alias ocp_qp tmp_ocp_qcqp.dim = &tmp_ocp_qcqp_dim; tmp_ocp_qcqp.idxb = ocp_qp->idxb+N_tmp; tmp_ocp_qcqp.BAbt = ocp_qp->BAbt+N_tmp; tmp_ocp_qcqp.b = ocp_qp->b+N_tmp; tmp_ocp_qcqp.RSQrq = ocp_qp->RSQrq+N_tmp; tmp_ocp_qcqp.rqz = ocp_qp->rqz+N_tmp; tmp_ocp_qcqp.DCt = ocp_qp->DCt+N_tmp; tmp_ocp_qcqp.d = ocp_qp->d+N_tmp; tmp_ocp_qcqp.d_mask = ocp_qp->d_mask+N_tmp; tmp_ocp_qcqp.Z = ocp_qp->Z+N_tmp; tmp_ocp_qcqp.idxs_rev = ocp_qp->idxs_rev+N_tmp; tmp_ocp_qcqp.Hq = ocp_qp->Hq+N_tmp; tmp_ocp_qcqp.Hq_nzero = ocp_qp->Hq_nzero+N_tmp; COND_QCQP_QC_RHS(&tmp_ocp_qcqp, part_dense_qp->d+ii, part_cond_arg->cond_arg+ii, part_cond_ws->cond_ws+ii); N_tmp += bs; } #if 0 // copy last stage int *nx = ocp_qp->dim->nx; int *nu = ocp_qp->dim->nu; int *nb = ocp_qp->dim->nb; int *ng = ocp_qp->dim->ng; int *ns = ocp_qp->dim->ns; VECCP_LIBSTR(nu[N]+nx[N], ocp_qp->rq+N, 0, part_dense_qp->rq+N2, 0); VECCP_LIBSTR(2*nb[N]+2*ng[N], ocp_qp->d+N, 0, part_dense_qp->d+N2, 0); VECCP_LIBSTR(2*ns[N], ocp_qp->z+N, 0, part_dense_qp->z+N2, 0); #endif return; } void PART_COND_QCQP_EXPAND_SOL(struct OCP_QCQP *ocp_qp, struct OCP_QCQP *part_dense_qp, struct OCP_QCQP_SOL *part_dense_qp_sol, struct OCP_QCQP_SOL *ocp_qp_sol, struct PART_COND_QCQP_ARG *part_cond_arg, struct PART_COND_QCQP_WS *part_cond_ws) { struct OCP_QP_DIM tmp_ocp_dim; struct OCP_QP tmp_ocp_qp; struct OCP_QP_SOL tmp_ocp_qp_sol; struct DENSE_QP_SOL dense_qp_sol; int bkp_comp_prim_sol; int bkp_comp_dual_sol_eq; int bkp_comp_dual_sol_ineq; int *nx = ocp_qp->dim->nx; int *nu = ocp_qp->dim->nu; int *nb = ocp_qp->dim->nb; int *ng = ocp_qp->dim->ng; int *nq = ocp_qp->dim->nq; int *ns = ocp_qp->dim->ns; int ii, jj, kk; int N = ocp_qp->dim->N; int N2 = part_dense_qp->dim->N; int bs; // horizon of current block int N_tmp = 0; // temporary sum of horizons for(ii=0; ii<=N2; ii++) { bs = part_cond_ws->cond_ws[ii].qp_ws->bs; // alias ocp_dim tmp_ocp_dim.N = bs; tmp_ocp_dim.nx = ocp_qp->dim->qp_dim->nx+N_tmp; tmp_ocp_dim.nu = ocp_qp->dim->qp_dim->nu+N_tmp; tmp_ocp_dim.nbx = ocp_qp->dim->qp_dim->nbx+N_tmp; tmp_ocp_dim.nbu = ocp_qp->dim->qp_dim->nbu+N_tmp; tmp_ocp_dim.nb = ocp_qp->dim->qp_dim->nb+N_tmp; tmp_ocp_dim.ng = ocp_qp->dim->qp_dim->ng+N_tmp; tmp_ocp_dim.nsbx = ocp_qp->dim->qp_dim->nsbx+N_tmp; tmp_ocp_dim.nsbu = ocp_qp->dim->qp_dim->nsbu+N_tmp; tmp_ocp_dim.nsg = ocp_qp->dim->qp_dim->nsg+N_tmp; tmp_ocp_dim.ns = ocp_qp->dim->qp_dim->ns+N_tmp; // alias ocp_qp tmp_ocp_qp.dim = &tmp_ocp_dim; tmp_ocp_qp.idxb = ocp_qp->idxb+N_tmp; tmp_ocp_qp.BAbt = ocp_qp->BAbt+N_tmp; tmp_ocp_qp.b = ocp_qp->b+N_tmp; tmp_ocp_qp.RSQrq = ocp_qp->RSQrq+N_tmp; tmp_ocp_qp.rqz = ocp_qp->rqz+N_tmp; tmp_ocp_qp.DCt = ocp_qp->DCt+N_tmp; tmp_ocp_qp.d = ocp_qp->d+N_tmp; tmp_ocp_qp.d_mask = ocp_qp->d_mask+N_tmp; tmp_ocp_qp.Z = ocp_qp->Z+N_tmp; tmp_ocp_qp.idxs_rev = ocp_qp->idxs_rev+N_tmp; // alias ocp qp sol tmp_ocp_qp_sol.dim = &tmp_ocp_dim; tmp_ocp_qp_sol.ux = ocp_qp_sol->ux+N_tmp; tmp_ocp_qp_sol.pi = ocp_qp_sol->pi+N_tmp; tmp_ocp_qp_sol.lam = ocp_qp_sol->lam+N_tmp; tmp_ocp_qp_sol.t = ocp_qp_sol->t+N_tmp; // alias ocp qp sol dense_qp_sol.v = part_dense_qp_sol->ux+ii; dense_qp_sol.pi = part_dense_qp_sol->pi+ii; dense_qp_sol.lam = part_dense_qp_sol->lam+ii; dense_qp_sol.t = part_dense_qp_sol->t+ii; bkp_comp_prim_sol = part_cond_arg->cond_arg[ii].qp_arg->comp_prim_sol; bkp_comp_dual_sol_eq = part_cond_arg->cond_arg[ii].qp_arg->comp_dual_sol_eq; bkp_comp_dual_sol_ineq = part_cond_arg->cond_arg[ii].qp_arg->comp_dual_sol_ineq; part_cond_arg->cond_arg[ii].qp_arg->comp_prim_sol = 1 & bkp_comp_prim_sol; part_cond_arg->cond_arg[ii].qp_arg->comp_dual_sol_eq = 0 & bkp_comp_dual_sol_eq; part_cond_arg->cond_arg[ii].qp_arg->comp_dual_sol_ineq = 1 & bkp_comp_dual_sol_ineq; EXPAND_SOL(&tmp_ocp_qp, &dense_qp_sol, &tmp_ocp_qp_sol, part_cond_arg->cond_arg[ii].qp_arg, part_cond_ws->cond_ws[ii].qp_ws); // linearize quadr constr for(jj=N_tmp; jj<=N_tmp+bs; jj++) { GECP(nu[jj]+nx[jj], ng[jj]+nq[jj], ocp_qp->DCt+jj, 0, 0, part_cond_ws->cond_ws[ii].tmp_DCt+(jj-N_tmp), 0, 0); for(kk=0; kk<nq[jj]; kk++) { SYMV_L(nu[jj]+nx[jj], 1.0, ocp_qp->Hq[jj]+kk, 0, 0, ocp_qp_sol->ux+jj, 0, 0.0, part_cond_ws->cond_ws[ii].tmp_nuxM, 0, part_cond_ws->cond_ws[ii].tmp_nuxM, 0); COLAD(nu[jj]+nx[jj], 1.0, part_cond_ws->cond_ws[ii].tmp_nuxM, 0, part_cond_ws->cond_ws[ii].tmp_DCt+(jj-N_tmp), 0, ng[jj]+kk); } } tmp_ocp_qp.DCt = part_cond_ws->cond_ws[ii].tmp_DCt+0; part_cond_arg->cond_arg[ii].qp_arg->comp_prim_sol = 0 & bkp_comp_prim_sol; part_cond_arg->cond_arg[ii].qp_arg->comp_dual_sol_eq = 1 & bkp_comp_dual_sol_eq; part_cond_arg->cond_arg[ii].qp_arg->comp_dual_sol_ineq = 0 & bkp_comp_dual_sol_ineq; EXPAND_SOL(&tmp_ocp_qp, &dense_qp_sol, &tmp_ocp_qp_sol, part_cond_arg->cond_arg[ii].qp_arg, part_cond_ws->cond_ws[ii].qp_ws); part_cond_arg->cond_arg[ii].qp_arg->comp_prim_sol = bkp_comp_prim_sol; part_cond_arg->cond_arg[ii].qp_arg->comp_dual_sol_eq = bkp_comp_dual_sol_eq; part_cond_arg->cond_arg[ii].qp_arg->comp_dual_sol_ineq = bkp_comp_dual_sol_ineq; N_tmp += bs; } #if 0 // copy last stage VECCP_LIBSTR(nu[N]+nx[N]+2*ns[N], part_dense_qp_sol->ux+N2, 0, ocp_qp_sol->ux+N, 0); VECCP_LIBSTR(2*nb[N]+2*ng[N]+2*ns[N], part_dense_qp_sol->lam+N2, 0, ocp_qp_sol->lam+N, 0); VECCP_LIBSTR(2*nb[N]+2*ng[N]+2*ns[N], part_dense_qp_sol->t+N2, 0, ocp_qp_sol->t+N, 0); #endif return; }
89762753c88b571b2afef9bc6d27509ec913733c
fdbb74a95924e2677466614f6ab6e2bb13b2a95a
/libc/isystem/sys/ioctl.h
1ae5208a5d5149a2debaf7afcefd4ae71b3bdfbc
[ "ISC" ]
permissive
jart/cosmopolitan
fb11b5658939023977060a7c6c71a74093d9cb44
0d748ad58e1063dd1f8560f18a0c75293b9415b7
refs/heads/master
2023-09-06T09:17:29.303607
2023-09-02T03:49:13
2023-09-02T03:50:18
272,457,606
11,887
435
ISC
2023-09-14T17:47:58
2020-06-15T14:16:13
C
UTF-8
C
false
false
386
h
ioctl.h
#ifndef LIBC_ISYSTEM_SYS_IOCTL_H_ #define LIBC_ISYSTEM_SYS_IOCTL_H_ #include "libc/calls/calls.h" #include "libc/calls/struct/winsize.h" #include "libc/sysv/consts/fd.h" #include "libc/sysv/consts/fio.h" #include "libc/sysv/consts/io.h" #include "libc/sysv/consts/modem.h" #include "libc/sysv/consts/pty.h" #include "libc/sysv/consts/sio.h" #include "libc/sysv/consts/termios.h" #endif
c61bd6bf65631eb7918d1a1df262c2cedda62b01
88ae8695987ada722184307301e221e1ba3cc2fa
/third_party/ffmpeg/libavcodec/vp9dsp_template.c
9b1166170423d9a624c1515a761d90a7c121bcc6
[ "Apache-2.0", "LGPL-2.0-or-later", "MIT", "GPL-1.0-or-later", "BSD-3-Clause", "LGPL-2.1-only", "LGPL-3.0-only", "GPL-2.0-only", "LGPL-2.1-or-later", "GPL-3.0-or-later", "LGPL-3.0-or-later", "IJG", "LicenseRef-scancode-other-permissive", "GPL-2.0-or-later", "GPL-3.0-only" ]
permissive
iridium-browser/iridium-browser
71d9c5ff76e014e6900b825f67389ab0ccd01329
5ee297f53dc7f8e70183031cff62f37b0f19d25f
refs/heads/master
2023-08-03T16:44:16.844552
2023-07-20T15:17:00
2023-07-23T16:09:30
220,016,632
341
40
BSD-3-Clause
2021-08-13T13:54:45
2019-11-06T14:32:31
null
UTF-8
C
false
false
88,301
c
vp9dsp_template.c
/* * VP9 compatible video decoder * * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com> * Copyright (C) 2013 Clément Bœsch <u pkh me> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/common.h" #include "bit_depth_template.c" #include "vp9dsp.h" #if BIT_DEPTH != 12 // FIXME see whether we can merge parts of this (perhaps at least 4x4 and 8x8) // back with h264pred.[ch] static void vert_4x4_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *_top) { pixel *dst = (pixel *) _dst; const pixel *top = (const pixel *) _top; pixel4 p4 = AV_RN4PA(top); stride /= sizeof(pixel); AV_WN4PA(dst + stride * 0, p4); AV_WN4PA(dst + stride * 1, p4); AV_WN4PA(dst + stride * 2, p4); AV_WN4PA(dst + stride * 3, p4); } static void vert_8x8_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *_top) { pixel *dst = (pixel *) _dst; const pixel *top = (const pixel *) _top; pixel4 p4a = AV_RN4PA(top + 0); pixel4 p4b = AV_RN4PA(top + 4); int y; stride /= sizeof(pixel); for (y = 0; y < 8; y++) { AV_WN4PA(dst + 0, p4a); AV_WN4PA(dst + 4, p4b); dst += stride; } } static void vert_16x16_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *_top) { pixel *dst = (pixel *) _dst; const pixel *top = (const pixel *) _top; pixel4 p4a = AV_RN4PA(top + 0); pixel4 p4b = AV_RN4PA(top + 4); pixel4 p4c = AV_RN4PA(top + 8); pixel4 p4d = AV_RN4PA(top + 12); int y; stride /= sizeof(pixel); for (y = 0; y < 16; y++) { AV_WN4PA(dst + 0, p4a); AV_WN4PA(dst + 4, p4b); AV_WN4PA(dst + 8, p4c); AV_WN4PA(dst + 12, p4d); dst += stride; } } static void vert_32x32_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *_top) { pixel *dst = (pixel *) _dst; const pixel *top = (const pixel *) _top; pixel4 p4a = AV_RN4PA(top + 0); pixel4 p4b = AV_RN4PA(top + 4); pixel4 p4c = AV_RN4PA(top + 8); pixel4 p4d = AV_RN4PA(top + 12); pixel4 p4e = AV_RN4PA(top + 16); pixel4 p4f = AV_RN4PA(top + 20); pixel4 p4g = AV_RN4PA(top + 24); pixel4 p4h = AV_RN4PA(top + 28); int y; stride /= sizeof(pixel); for (y = 0; y < 32; y++) { AV_WN4PA(dst + 0, p4a); AV_WN4PA(dst + 4, p4b); AV_WN4PA(dst + 8, p4c); AV_WN4PA(dst + 12, p4d); AV_WN4PA(dst + 16, p4e); AV_WN4PA(dst + 20, p4f); AV_WN4PA(dst + 24, p4g); AV_WN4PA(dst + 28, p4h); dst += stride; } } static void hor_4x4_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *_left, const uint8_t *top) { pixel *dst = (pixel *) _dst; const pixel *left = (const pixel *) _left; stride /= sizeof(pixel); AV_WN4PA(dst + stride * 0, PIXEL_SPLAT_X4(left[3])); AV_WN4PA(dst + stride * 1, PIXEL_SPLAT_X4(left[2])); AV_WN4PA(dst + stride * 2, PIXEL_SPLAT_X4(left[1])); AV_WN4PA(dst + stride * 3, PIXEL_SPLAT_X4(left[0])); } static void hor_8x8_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *_left, const uint8_t *top) { pixel *dst = (pixel *) _dst; const pixel *left = (const pixel *) _left; int y; stride /= sizeof(pixel); for (y = 0; y < 8; y++) { pixel4 p4 = PIXEL_SPLAT_X4(left[7 - y]); AV_WN4PA(dst + 0, p4); AV_WN4PA(dst + 4, p4); dst += stride; } } static void hor_16x16_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *_left, const uint8_t *top) { pixel *dst = (pixel *) _dst; const pixel *left = (const pixel *) _left; int y; stride /= sizeof(pixel); for (y = 0; y < 16; y++) { pixel4 p4 = PIXEL_SPLAT_X4(left[15 - y]); AV_WN4PA(dst + 0, p4); AV_WN4PA(dst + 4, p4); AV_WN4PA(dst + 8, p4); AV_WN4PA(dst + 12, p4); dst += stride; } } static void hor_32x32_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *_left, const uint8_t *top) { pixel *dst = (pixel *) _dst; const pixel *left = (const pixel *) _left; int y; stride /= sizeof(pixel); for (y = 0; y < 32; y++) { pixel4 p4 = PIXEL_SPLAT_X4(left[31 - y]); AV_WN4PA(dst + 0, p4); AV_WN4PA(dst + 4, p4); AV_WN4PA(dst + 8, p4); AV_WN4PA(dst + 12, p4); AV_WN4PA(dst + 16, p4); AV_WN4PA(dst + 20, p4); AV_WN4PA(dst + 24, p4); AV_WN4PA(dst + 28, p4); dst += stride; } } #endif /* BIT_DEPTH != 12 */ static void tm_4x4_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *_left, const uint8_t *_top) { pixel *dst = (pixel *) _dst; const pixel *left = (const pixel *) _left; const pixel *top = (const pixel *) _top; int y, tl = top[-1]; stride /= sizeof(pixel); for (y = 0; y < 4; y++) { int l_m_tl = left[3 - y] - tl; dst[0] = av_clip_pixel(top[0] + l_m_tl); dst[1] = av_clip_pixel(top[1] + l_m_tl); dst[2] = av_clip_pixel(top[2] + l_m_tl); dst[3] = av_clip_pixel(top[3] + l_m_tl); dst += stride; } } static void tm_8x8_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *_left, const uint8_t *_top) { pixel *dst = (pixel *) _dst; const pixel *left = (const pixel *) _left; const pixel *top = (const pixel *) _top; int y, tl = top[-1]; stride /= sizeof(pixel); for (y = 0; y < 8; y++) { int l_m_tl = left[7 - y] - tl; dst[0] = av_clip_pixel(top[0] + l_m_tl); dst[1] = av_clip_pixel(top[1] + l_m_tl); dst[2] = av_clip_pixel(top[2] + l_m_tl); dst[3] = av_clip_pixel(top[3] + l_m_tl); dst[4] = av_clip_pixel(top[4] + l_m_tl); dst[5] = av_clip_pixel(top[5] + l_m_tl); dst[6] = av_clip_pixel(top[6] + l_m_tl); dst[7] = av_clip_pixel(top[7] + l_m_tl); dst += stride; } } static void tm_16x16_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *_left, const uint8_t *_top) { pixel *dst = (pixel *) _dst; const pixel *left = (const pixel *) _left; const pixel *top = (const pixel *) _top; int y, tl = top[-1]; stride /= sizeof(pixel); for (y = 0; y < 16; y++) { int l_m_tl = left[15 - y] - tl; dst[ 0] = av_clip_pixel(top[ 0] + l_m_tl); dst[ 1] = av_clip_pixel(top[ 1] + l_m_tl); dst[ 2] = av_clip_pixel(top[ 2] + l_m_tl); dst[ 3] = av_clip_pixel(top[ 3] + l_m_tl); dst[ 4] = av_clip_pixel(top[ 4] + l_m_tl); dst[ 5] = av_clip_pixel(top[ 5] + l_m_tl); dst[ 6] = av_clip_pixel(top[ 6] + l_m_tl); dst[ 7] = av_clip_pixel(top[ 7] + l_m_tl); dst[ 8] = av_clip_pixel(top[ 8] + l_m_tl); dst[ 9] = av_clip_pixel(top[ 9] + l_m_tl); dst[10] = av_clip_pixel(top[10] + l_m_tl); dst[11] = av_clip_pixel(top[11] + l_m_tl); dst[12] = av_clip_pixel(top[12] + l_m_tl); dst[13] = av_clip_pixel(top[13] + l_m_tl); dst[14] = av_clip_pixel(top[14] + l_m_tl); dst[15] = av_clip_pixel(top[15] + l_m_tl); dst += stride; } } static void tm_32x32_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *_left, const uint8_t *_top) { pixel *dst = (pixel *) _dst; const pixel *left = (const pixel *) _left; const pixel *top = (const pixel *) _top; int y, tl = top[-1]; stride /= sizeof(pixel); for (y = 0; y < 32; y++) { int l_m_tl = left[31 - y] - tl; dst[ 0] = av_clip_pixel(top[ 0] + l_m_tl); dst[ 1] = av_clip_pixel(top[ 1] + l_m_tl); dst[ 2] = av_clip_pixel(top[ 2] + l_m_tl); dst[ 3] = av_clip_pixel(top[ 3] + l_m_tl); dst[ 4] = av_clip_pixel(top[ 4] + l_m_tl); dst[ 5] = av_clip_pixel(top[ 5] + l_m_tl); dst[ 6] = av_clip_pixel(top[ 6] + l_m_tl); dst[ 7] = av_clip_pixel(top[ 7] + l_m_tl); dst[ 8] = av_clip_pixel(top[ 8] + l_m_tl); dst[ 9] = av_clip_pixel(top[ 9] + l_m_tl); dst[10] = av_clip_pixel(top[10] + l_m_tl); dst[11] = av_clip_pixel(top[11] + l_m_tl); dst[12] = av_clip_pixel(top[12] + l_m_tl); dst[13] = av_clip_pixel(top[13] + l_m_tl); dst[14] = av_clip_pixel(top[14] + l_m_tl); dst[15] = av_clip_pixel(top[15] + l_m_tl); dst[16] = av_clip_pixel(top[16] + l_m_tl); dst[17] = av_clip_pixel(top[17] + l_m_tl); dst[18] = av_clip_pixel(top[18] + l_m_tl); dst[19] = av_clip_pixel(top[19] + l_m_tl); dst[20] = av_clip_pixel(top[20] + l_m_tl); dst[21] = av_clip_pixel(top[21] + l_m_tl); dst[22] = av_clip_pixel(top[22] + l_m_tl); dst[23] = av_clip_pixel(top[23] + l_m_tl); dst[24] = av_clip_pixel(top[24] + l_m_tl); dst[25] = av_clip_pixel(top[25] + l_m_tl); dst[26] = av_clip_pixel(top[26] + l_m_tl); dst[27] = av_clip_pixel(top[27] + l_m_tl); dst[28] = av_clip_pixel(top[28] + l_m_tl); dst[29] = av_clip_pixel(top[29] + l_m_tl); dst[30] = av_clip_pixel(top[30] + l_m_tl); dst[31] = av_clip_pixel(top[31] + l_m_tl); dst += stride; } } #if BIT_DEPTH != 12 static void dc_4x4_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *_left, const uint8_t *_top) { pixel *dst = (pixel *) _dst; const pixel *left = (const pixel *) _left; const pixel *top = (const pixel *) _top; pixel4 dc = PIXEL_SPLAT_X4((left[0] + left[1] + left[2] + left[3] + top[0] + top[1] + top[2] + top[3] + 4) >> 3); stride /= sizeof(pixel); AV_WN4PA(dst + stride * 0, dc); AV_WN4PA(dst + stride * 1, dc); AV_WN4PA(dst + stride * 2, dc); AV_WN4PA(dst + stride * 3, dc); } static void dc_8x8_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *_left, const uint8_t *_top) { pixel *dst = (pixel *) _dst; const pixel *left = (const pixel *) _left; const pixel *top = (const pixel *) _top; pixel4 dc = PIXEL_SPLAT_X4 ((left[0] + left[1] + left[2] + left[3] + left[4] + left[5] + left[6] + left[7] + top[0] + top[1] + top[2] + top[3] + top[4] + top[5] + top[6] + top[7] + 8) >> 4); int y; stride /= sizeof(pixel); for (y = 0; y < 8; y++) { AV_WN4PA(dst + 0, dc); AV_WN4PA(dst + 4, dc); dst += stride; } } static void dc_16x16_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *_left, const uint8_t *_top) { pixel *dst = (pixel *) _dst; const pixel *left = (const pixel *) _left; const pixel *top = (const pixel *) _top; pixel4 dc = PIXEL_SPLAT_X4 ((left[0] + left[1] + left[2] + left[3] + left[4] + left[5] + left[6] + left[7] + left[8] + left[9] + left[10] + left[11] + left[12] + left[13] + left[14] + left[15] + top[0] + top[1] + top[2] + top[3] + top[4] + top[5] + top[6] + top[7] + top[8] + top[9] + top[10] + top[11] + top[12] + top[13] + top[14] + top[15] + 16) >> 5); int y; stride /= sizeof(pixel); for (y = 0; y < 16; y++) { AV_WN4PA(dst + 0, dc); AV_WN4PA(dst + 4, dc); AV_WN4PA(dst + 8, dc); AV_WN4PA(dst + 12, dc); dst += stride; } } static void dc_32x32_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *_left, const uint8_t *_top) { pixel *dst = (pixel *) _dst; const pixel *left = (const pixel *) _left; const pixel *top = (const pixel *) _top; pixel4 dc = PIXEL_SPLAT_X4 ((left[0] + left[1] + left[2] + left[3] + left[4] + left[5] + left[6] + left[7] + left[8] + left[9] + left[10] + left[11] + left[12] + left[13] + left[14] + left[15] + left[16] + left[17] + left[18] + left[19] + left[20] + left[21] + left[22] + left[23] + left[24] + left[25] + left[26] + left[27] + left[28] + left[29] + left[30] + left[31] + top[0] + top[1] + top[2] + top[3] + top[4] + top[5] + top[6] + top[7] + top[8] + top[9] + top[10] + top[11] + top[12] + top[13] + top[14] + top[15] + top[16] + top[17] + top[18] + top[19] + top[20] + top[21] + top[22] + top[23] + top[24] + top[25] + top[26] + top[27] + top[28] + top[29] + top[30] + top[31] + 32) >> 6); int y; stride /= sizeof(pixel); for (y = 0; y < 32; y++) { AV_WN4PA(dst + 0, dc); AV_WN4PA(dst + 4, dc); AV_WN4PA(dst + 8, dc); AV_WN4PA(dst + 12, dc); AV_WN4PA(dst + 16, dc); AV_WN4PA(dst + 20, dc); AV_WN4PA(dst + 24, dc); AV_WN4PA(dst + 28, dc); dst += stride; } } static void dc_left_4x4_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *_left, const uint8_t *top) { pixel *dst = (pixel *) _dst; const pixel *left = (const pixel *) _left; pixel4 dc = PIXEL_SPLAT_X4((left[0] + left[1] + left[2] + left[3] + 2) >> 2); stride /= sizeof(pixel); AV_WN4PA(dst + stride * 0, dc); AV_WN4PA(dst + stride * 1, dc); AV_WN4PA(dst + stride * 2, dc); AV_WN4PA(dst + stride * 3, dc); } static void dc_left_8x8_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *_left, const uint8_t *top) { pixel *dst = (pixel *) _dst; const pixel *left = (const pixel *) _left; pixel4 dc = PIXEL_SPLAT_X4 ((left[0] + left[1] + left[2] + left[3] + left[4] + left[5] + left[6] + left[7] + 4) >> 3); int y; stride /= sizeof(pixel); for (y = 0; y < 8; y++) { AV_WN4PA(dst + 0, dc); AV_WN4PA(dst + 4, dc); dst += stride; } } static void dc_left_16x16_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *_left, const uint8_t *top) { pixel *dst = (pixel *) _dst; const pixel *left = (const pixel *) _left; pixel4 dc = PIXEL_SPLAT_X4 ((left[0] + left[1] + left[2] + left[3] + left[4] + left[5] + left[6] + left[7] + left[8] + left[9] + left[10] + left[11] + left[12] + left[13] + left[14] + left[15] + 8) >> 4); int y; stride /= sizeof(pixel); for (y = 0; y < 16; y++) { AV_WN4PA(dst + 0, dc); AV_WN4PA(dst + 4, dc); AV_WN4PA(dst + 8, dc); AV_WN4PA(dst + 12, dc); dst += stride; } } static void dc_left_32x32_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *_left, const uint8_t *top) { pixel *dst = (pixel *) _dst; const pixel *left = (const pixel *) _left; pixel4 dc = PIXEL_SPLAT_X4 ((left[0] + left[1] + left[2] + left[3] + left[4] + left[5] + left[6] + left[7] + left[8] + left[9] + left[10] + left[11] + left[12] + left[13] + left[14] + left[15] + left[16] + left[17] + left[18] + left[19] + left[20] + left[21] + left[22] + left[23] + left[24] + left[25] + left[26] + left[27] + left[28] + left[29] + left[30] + left[31] + 16) >> 5); int y; stride /= sizeof(pixel); for (y = 0; y < 32; y++) { AV_WN4PA(dst + 0, dc); AV_WN4PA(dst + 4, dc); AV_WN4PA(dst + 8, dc); AV_WN4PA(dst + 12, dc); AV_WN4PA(dst + 16, dc); AV_WN4PA(dst + 20, dc); AV_WN4PA(dst + 24, dc); AV_WN4PA(dst + 28, dc); dst += stride; } } static void dc_top_4x4_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *_top) { pixel *dst = (pixel *) _dst; const pixel *top = (const pixel *) _top; pixel4 dc = PIXEL_SPLAT_X4((top[0] + top[1] + top[2] + top[3] + 2) >> 2); stride /= sizeof(pixel); AV_WN4PA(dst + stride * 0, dc); AV_WN4PA(dst + stride * 1, dc); AV_WN4PA(dst + stride * 2, dc); AV_WN4PA(dst + stride * 3, dc); } static void dc_top_8x8_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *_top) { pixel *dst = (pixel *) _dst; const pixel *top = (const pixel *) _top; pixel4 dc = PIXEL_SPLAT_X4 ((top[0] + top[1] + top[2] + top[3] + top[4] + top[5] + top[6] + top[7] + 4) >> 3); int y; stride /= sizeof(pixel); for (y = 0; y < 8; y++) { AV_WN4PA(dst + 0, dc); AV_WN4PA(dst + 4, dc); dst += stride; } } static void dc_top_16x16_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *_top) { pixel *dst = (pixel *) _dst; const pixel *top = (const pixel *) _top; pixel4 dc = PIXEL_SPLAT_X4 ((top[0] + top[1] + top[2] + top[3] + top[4] + top[5] + top[6] + top[7] + top[8] + top[9] + top[10] + top[11] + top[12] + top[13] + top[14] + top[15] + 8) >> 4); int y; stride /= sizeof(pixel); for (y = 0; y < 16; y++) { AV_WN4PA(dst + 0, dc); AV_WN4PA(dst + 4, dc); AV_WN4PA(dst + 8, dc); AV_WN4PA(dst + 12, dc); dst += stride; } } static void dc_top_32x32_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *_top) { pixel *dst = (pixel *) _dst; const pixel *top = (const pixel *) _top; pixel4 dc = PIXEL_SPLAT_X4 ((top[0] + top[1] + top[2] + top[3] + top[4] + top[5] + top[6] + top[7] + top[8] + top[9] + top[10] + top[11] + top[12] + top[13] + top[14] + top[15] + top[16] + top[17] + top[18] + top[19] + top[20] + top[21] + top[22] + top[23] + top[24] + top[25] + top[26] + top[27] + top[28] + top[29] + top[30] + top[31] + 16) >> 5); int y; stride /= sizeof(pixel); for (y = 0; y < 32; y++) { AV_WN4PA(dst + 0, dc); AV_WN4PA(dst + 4, dc); AV_WN4PA(dst + 8, dc); AV_WN4PA(dst + 12, dc); AV_WN4PA(dst + 16, dc); AV_WN4PA(dst + 20, dc); AV_WN4PA(dst + 24, dc); AV_WN4PA(dst + 28, dc); dst += stride; } } #endif /* BIT_DEPTH != 12 */ static void dc_128_4x4_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *top) { pixel *dst = (pixel *) _dst; pixel4 val = PIXEL_SPLAT_X4(128 << (BIT_DEPTH - 8)); stride /= sizeof(pixel); AV_WN4PA(dst + stride * 0, val); AV_WN4PA(dst + stride * 1, val); AV_WN4PA(dst + stride * 2, val); AV_WN4PA(dst + stride * 3, val); } static void dc_128_8x8_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *top) { pixel *dst = (pixel *) _dst; pixel4 val = PIXEL_SPLAT_X4(128 << (BIT_DEPTH - 8)); int y; stride /= sizeof(pixel); for (y = 0; y < 8; y++) { AV_WN4PA(dst + 0, val); AV_WN4PA(dst + 4, val); dst += stride; } } static void dc_128_16x16_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *top) { pixel *dst = (pixel *) _dst; pixel4 val = PIXEL_SPLAT_X4(128 << (BIT_DEPTH - 8)); int y; stride /= sizeof(pixel); for (y = 0; y < 16; y++) { AV_WN4PA(dst + 0, val); AV_WN4PA(dst + 4, val); AV_WN4PA(dst + 8, val); AV_WN4PA(dst + 12, val); dst += stride; } } static void dc_128_32x32_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *top) { pixel *dst = (pixel *) _dst; pixel4 val = PIXEL_SPLAT_X4(128 << (BIT_DEPTH - 8)); int y; stride /= sizeof(pixel); for (y = 0; y < 32; y++) { AV_WN4PA(dst + 0, val); AV_WN4PA(dst + 4, val); AV_WN4PA(dst + 8, val); AV_WN4PA(dst + 12, val); AV_WN4PA(dst + 16, val); AV_WN4PA(dst + 20, val); AV_WN4PA(dst + 24, val); AV_WN4PA(dst + 28, val); dst += stride; } } static void dc_127_4x4_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *top) { pixel *dst = (pixel *) _dst; pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) - 1); stride /= sizeof(pixel); AV_WN4PA(dst + stride * 0, val); AV_WN4PA(dst + stride * 1, val); AV_WN4PA(dst + stride * 2, val); AV_WN4PA(dst + stride * 3, val);} static void dc_127_8x8_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *top) { pixel *dst = (pixel *) _dst; pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) - 1); int y; stride /= sizeof(pixel); for (y = 0; y < 8; y++) { AV_WN4PA(dst + 0, val); AV_WN4PA(dst + 4, val); dst += stride; } } static void dc_127_16x16_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *top) { pixel *dst = (pixel *) _dst; pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) - 1); int y; stride /= sizeof(pixel); for (y = 0; y < 16; y++) { AV_WN4PA(dst + 0, val); AV_WN4PA(dst + 4, val); AV_WN4PA(dst + 8, val); AV_WN4PA(dst + 12, val); dst += stride; } } static void dc_127_32x32_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *top) { pixel *dst = (pixel *) _dst; pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) - 1); int y; stride /= sizeof(pixel); for (y = 0; y < 32; y++) { AV_WN4PA(dst + 0, val); AV_WN4PA(dst + 4, val); AV_WN4PA(dst + 8, val); AV_WN4PA(dst + 12, val); AV_WN4PA(dst + 16, val); AV_WN4PA(dst + 20, val); AV_WN4PA(dst + 24, val); AV_WN4PA(dst + 28, val); dst += stride; } } static void dc_129_4x4_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *top) { pixel *dst = (pixel *) _dst; pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) + 1); stride /= sizeof(pixel); AV_WN4PA(dst + stride * 0, val); AV_WN4PA(dst + stride * 1, val); AV_WN4PA(dst + stride * 2, val); AV_WN4PA(dst + stride * 3, val); } static void dc_129_8x8_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *top) { pixel *dst = (pixel *) _dst; pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) + 1); int y; stride /= sizeof(pixel); for (y = 0; y < 8; y++) { AV_WN4PA(dst + 0, val); AV_WN4PA(dst + 4, val); dst += stride; } } static void dc_129_16x16_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *top) { pixel *dst = (pixel *) _dst; pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) + 1); int y; stride /= sizeof(pixel); for (y = 0; y < 16; y++) { AV_WN4PA(dst + 0, val); AV_WN4PA(dst + 4, val); AV_WN4PA(dst + 8, val); AV_WN4PA(dst + 12, val); dst += stride; } } static void dc_129_32x32_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *top) { pixel *dst = (pixel *) _dst; pixel4 val = PIXEL_SPLAT_X4((128 << (BIT_DEPTH - 8)) + 1); int y; stride /= sizeof(pixel); for (y = 0; y < 32; y++) { AV_WN4PA(dst + 0, val); AV_WN4PA(dst + 4, val); AV_WN4PA(dst + 8, val); AV_WN4PA(dst + 12, val); AV_WN4PA(dst + 16, val); AV_WN4PA(dst + 20, val); AV_WN4PA(dst + 24, val); AV_WN4PA(dst + 28, val); dst += stride; } } #if BIT_DEPTH != 12 #if BIT_DEPTH == 8 #define memset_bpc memset #else static inline void memset_bpc(uint16_t *dst, int val, int len) { int n; for (n = 0; n < len; n++) { dst[n] = val; } } #endif #define DST(x, y) dst[(x) + (y) * stride] static void diag_downleft_4x4_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *_top) { pixel *dst = (pixel *) _dst; const pixel *top = (const pixel *) _top; int a0 = top[0], a1 = top[1], a2 = top[2], a3 = top[3], a4 = top[4], a5 = top[5], a6 = top[6], a7 = top[7]; stride /= sizeof(pixel); DST(0,0) = (a0 + a1 * 2 + a2 + 2) >> 2; DST(1,0) = DST(0,1) = (a1 + a2 * 2 + a3 + 2) >> 2; DST(2,0) = DST(1,1) = DST(0,2) = (a2 + a3 * 2 + a4 + 2) >> 2; DST(3,0) = DST(2,1) = DST(1,2) = DST(0,3) = (a3 + a4 * 2 + a5 + 2) >> 2; DST(3,1) = DST(2,2) = DST(1,3) = (a4 + a5 * 2 + a6 + 2) >> 2; DST(3,2) = DST(2,3) = (a5 + a6 * 2 + a7 + 2) >> 2; DST(3,3) = a7; // note: this is different from vp8 and such } #define def_diag_downleft(size) \ static void diag_downleft_##size##x##size##_c(uint8_t *_dst, ptrdiff_t stride, \ const uint8_t *left, const uint8_t *_top) \ { \ pixel *dst = (pixel *) _dst; \ const pixel *top = (const pixel *) _top; \ int i, j; \ pixel v[size - 1]; \ \ stride /= sizeof(pixel); \ for (i = 0; i < size - 2; i++) \ v[i] = (top[i] + top[i + 1] * 2 + top[i + 2] + 2) >> 2; \ v[size - 2] = (top[size - 2] + top[size - 1] * 3 + 2) >> 2; \ \ for (j = 0; j < size; j++) { \ memcpy(dst + j*stride, v + j, (size - 1 - j) * sizeof(pixel)); \ memset_bpc(dst + j*stride + size - 1 - j, top[size - 1], j + 1); \ } \ } def_diag_downleft(8) def_diag_downleft(16) def_diag_downleft(32) static void diag_downright_4x4_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *_left, const uint8_t *_top) { pixel *dst = (pixel *) _dst; const pixel *top = (const pixel *) _top; const pixel *left = (const pixel *) _left; int tl = top[-1], a0 = top[0], a1 = top[1], a2 = top[2], a3 = top[3], l0 = left[3], l1 = left[2], l2 = left[1], l3 = left[0]; stride /= sizeof(pixel); DST(0,3) = (l1 + l2 * 2 + l3 + 2) >> 2; DST(0,2) = DST(1,3) = (l0 + l1 * 2 + l2 + 2) >> 2; DST(0,1) = DST(1,2) = DST(2,3) = (tl + l0 * 2 + l1 + 2) >> 2; DST(0,0) = DST(1,1) = DST(2,2) = DST(3,3) = (l0 + tl * 2 + a0 + 2) >> 2; DST(1,0) = DST(2,1) = DST(3,2) = (tl + a0 * 2 + a1 + 2) >> 2; DST(2,0) = DST(3,1) = (a0 + a1 * 2 + a2 + 2) >> 2; DST(3,0) = (a1 + a2 * 2 + a3 + 2) >> 2; } #define def_diag_downright(size) \ static void diag_downright_##size##x##size##_c(uint8_t *_dst, ptrdiff_t stride, \ const uint8_t *_left, const uint8_t *_top) \ { \ pixel *dst = (pixel *) _dst; \ const pixel *top = (const pixel *) _top; \ const pixel *left = (const pixel *) _left; \ int i, j; \ pixel v[size + size - 1]; \ \ stride /= sizeof(pixel); \ for (i = 0; i < size - 2; i++) { \ v[i ] = (left[i] + left[i + 1] * 2 + left[i + 2] + 2) >> 2; \ v[size + 1 + i] = (top[i] + top[i + 1] * 2 + top[i + 2] + 2) >> 2; \ } \ v[size - 2] = (left[size - 2] + left[size - 1] * 2 + top[-1] + 2) >> 2; \ v[size - 1] = (left[size - 1] + top[-1] * 2 + top[ 0] + 2) >> 2; \ v[size ] = (top[-1] + top[0] * 2 + top[ 1] + 2) >> 2; \ \ for (j = 0; j < size; j++) \ memcpy(dst + j*stride, v + size - 1 - j, size * sizeof(pixel)); \ } def_diag_downright(8) def_diag_downright(16) def_diag_downright(32) static void vert_right_4x4_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *_left, const uint8_t *_top) { pixel *dst = (pixel *) _dst; const pixel *top = (const pixel *) _top; const pixel *left = (const pixel *) _left; int tl = top[-1], a0 = top[0], a1 = top[1], a2 = top[2], a3 = top[3], l0 = left[3], l1 = left[2], l2 = left[1]; stride /= sizeof(pixel); DST(0,3) = (l0 + l1 * 2 + l2 + 2) >> 2; DST(0,2) = (tl + l0 * 2 + l1 + 2) >> 2; DST(0,0) = DST(1,2) = (tl + a0 + 1) >> 1; DST(0,1) = DST(1,3) = (l0 + tl * 2 + a0 + 2) >> 2; DST(1,0) = DST(2,2) = (a0 + a1 + 1) >> 1; DST(1,1) = DST(2,3) = (tl + a0 * 2 + a1 + 2) >> 2; DST(2,0) = DST(3,2) = (a1 + a2 + 1) >> 1; DST(2,1) = DST(3,3) = (a0 + a1 * 2 + a2 + 2) >> 2; DST(3,0) = (a2 + a3 + 1) >> 1; DST(3,1) = (a1 + a2 * 2 + a3 + 2) >> 2; } #define def_vert_right(size) \ static void vert_right_##size##x##size##_c(uint8_t *_dst, ptrdiff_t stride, \ const uint8_t *_left, const uint8_t *_top) \ { \ pixel *dst = (pixel *) _dst; \ const pixel *top = (const pixel *) _top; \ const pixel *left = (const pixel *) _left; \ int i, j; \ pixel ve[size + size/2 - 1], vo[size + size/2 - 1]; \ \ stride /= sizeof(pixel); \ for (i = 0; i < size/2 - 2; i++) { \ vo[i] = (left[i*2 + 3] + left[i*2 + 2] * 2 + left[i*2 + 1] + 2) >> 2; \ ve[i] = (left[i*2 + 4] + left[i*2 + 3] * 2 + left[i*2 + 2] + 2) >> 2; \ } \ vo[size/2 - 2] = (left[size - 1] + left[size - 2] * 2 + left[size - 3] + 2) >> 2; \ ve[size/2 - 2] = (top[-1] + left[size - 1] * 2 + left[size - 2] + 2) >> 2; \ \ ve[size/2 - 1] = (top[-1] + top[0] + 1) >> 1; \ vo[size/2 - 1] = (left[size - 1] + top[-1] * 2 + top[0] + 2) >> 2; \ for (i = 0; i < size - 1; i++) { \ ve[size/2 + i] = (top[i] + top[i + 1] + 1) >> 1; \ vo[size/2 + i] = (top[i - 1] + top[i] * 2 + top[i + 1] + 2) >> 2; \ } \ \ for (j = 0; j < size / 2; j++) { \ memcpy(dst + j*2 *stride, ve + size/2 - 1 - j, size * sizeof(pixel)); \ memcpy(dst + (j*2 + 1)*stride, vo + size/2 - 1 - j, size * sizeof(pixel)); \ } \ } def_vert_right(8) def_vert_right(16) def_vert_right(32) static void hor_down_4x4_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *_left, const uint8_t *_top) { pixel *dst = (pixel *) _dst; const pixel *top = (const pixel *) _top; const pixel *left = (const pixel *) _left; int l0 = left[3], l1 = left[2], l2 = left[1], l3 = left[0], tl = top[-1], a0 = top[0], a1 = top[1], a2 = top[2]; stride /= sizeof(pixel); DST(2,0) = (tl + a0 * 2 + a1 + 2) >> 2; DST(3,0) = (a0 + a1 * 2 + a2 + 2) >> 2; DST(0,0) = DST(2,1) = (tl + l0 + 1) >> 1; DST(1,0) = DST(3,1) = (a0 + tl * 2 + l0 + 2) >> 2; DST(0,1) = DST(2,2) = (l0 + l1 + 1) >> 1; DST(1,1) = DST(3,2) = (tl + l0 * 2 + l1 + 2) >> 2; DST(0,2) = DST(2,3) = (l1 + l2 + 1) >> 1; DST(1,2) = DST(3,3) = (l0 + l1 * 2 + l2 + 2) >> 2; DST(0,3) = (l2 + l3 + 1) >> 1; DST(1,3) = (l1 + l2 * 2 + l3 + 2) >> 2; } #define def_hor_down(size) \ static void hor_down_##size##x##size##_c(uint8_t *_dst, ptrdiff_t stride, \ const uint8_t *_left, const uint8_t *_top) \ { \ pixel *dst = (pixel *) _dst; \ const pixel *top = (const pixel *) _top; \ const pixel *left = (const pixel *) _left; \ int i, j; \ pixel v[size * 3 - 2]; \ \ stride /= sizeof(pixel); \ for (i = 0; i < size - 2; i++) { \ v[i*2 ] = (left[i + 1] + left[i + 0] + 1) >> 1; \ v[i*2 + 1] = (left[i + 2] + left[i + 1] * 2 + left[i + 0] + 2) >> 2; \ v[size*2 + i] = (top[i - 1] + top[i] * 2 + top[i + 1] + 2) >> 2; \ } \ v[size*2 - 2] = (top[-1] + left[size - 1] + 1) >> 1; \ v[size*2 - 4] = (left[size - 1] + left[size - 2] + 1) >> 1; \ v[size*2 - 1] = (top[0] + top[-1] * 2 + left[size - 1] + 2) >> 2; \ v[size*2 - 3] = (top[-1] + left[size - 1] * 2 + left[size - 2] + 2) >> 2; \ \ for (j = 0; j < size; j++) \ memcpy(dst + j*stride, v + size*2 - 2 - j*2, size * sizeof(pixel)); \ } def_hor_down(8) def_hor_down(16) def_hor_down(32) static void vert_left_4x4_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *left, const uint8_t *_top) { pixel *dst = (pixel *) _dst; const pixel *top = (const pixel *) _top; int a0 = top[0], a1 = top[1], a2 = top[2], a3 = top[3], a4 = top[4], a5 = top[5], a6 = top[6]; stride /= sizeof(pixel); DST(0,0) = (a0 + a1 + 1) >> 1; DST(0,1) = (a0 + a1 * 2 + a2 + 2) >> 2; DST(1,0) = DST(0,2) = (a1 + a2 + 1) >> 1; DST(1,1) = DST(0,3) = (a1 + a2 * 2 + a3 + 2) >> 2; DST(2,0) = DST(1,2) = (a2 + a3 + 1) >> 1; DST(2,1) = DST(1,3) = (a2 + a3 * 2 + a4 + 2) >> 2; DST(3,0) = DST(2,2) = (a3 + a4 + 1) >> 1; DST(3,1) = DST(2,3) = (a3 + a4 * 2 + a5 + 2) >> 2; DST(3,2) = (a4 + a5 + 1) >> 1; DST(3,3) = (a4 + a5 * 2 + a6 + 2) >> 2; } #define def_vert_left(size) \ static void vert_left_##size##x##size##_c(uint8_t *_dst, ptrdiff_t stride, \ const uint8_t *left, const uint8_t *_top) \ { \ pixel *dst = (pixel *) _dst; \ const pixel *top = (const pixel *) _top; \ int i, j; \ pixel ve[size - 1], vo[size - 1]; \ \ stride /= sizeof(pixel); \ for (i = 0; i < size - 2; i++) { \ ve[i] = (top[i] + top[i + 1] + 1) >> 1; \ vo[i] = (top[i] + top[i + 1] * 2 + top[i + 2] + 2) >> 2; \ } \ ve[size - 2] = (top[size - 2] + top[size - 1] + 1) >> 1; \ vo[size - 2] = (top[size - 2] + top[size - 1] * 3 + 2) >> 2; \ \ for (j = 0; j < size / 2; j++) { \ memcpy(dst + j*2 * stride, ve + j, (size - j - 1) * sizeof(pixel)); \ memset_bpc(dst + j*2 * stride + size - j - 1, top[size - 1], j + 1); \ memcpy(dst + (j*2 + 1) * stride, vo + j, (size - j - 1) * sizeof(pixel)); \ memset_bpc(dst + (j*2 + 1) * stride + size - j - 1, top[size - 1], j + 1); \ } \ } def_vert_left(8) def_vert_left(16) def_vert_left(32) static void hor_up_4x4_c(uint8_t *_dst, ptrdiff_t stride, const uint8_t *_left, const uint8_t *top) { pixel *dst = (pixel *) _dst; const pixel *left = (const pixel *) _left; int l0 = left[0], l1 = left[1], l2 = left[2], l3 = left[3]; stride /= sizeof(pixel); DST(0,0) = (l0 + l1 + 1) >> 1; DST(1,0) = (l0 + l1 * 2 + l2 + 2) >> 2; DST(0,1) = DST(2,0) = (l1 + l2 + 1) >> 1; DST(1,1) = DST(3,0) = (l1 + l2 * 2 + l3 + 2) >> 2; DST(0,2) = DST(2,1) = (l2 + l3 + 1) >> 1; DST(1,2) = DST(3,1) = (l2 + l3 * 3 + 2) >> 2; DST(0,3) = DST(1,3) = DST(2,2) = DST(2,3) = DST(3,2) = DST(3,3) = l3; } #define def_hor_up(size) \ static void hor_up_##size##x##size##_c(uint8_t *_dst, ptrdiff_t stride, \ const uint8_t *_left, const uint8_t *top) \ { \ pixel *dst = (pixel *) _dst; \ const pixel *left = (const pixel *) _left; \ int i, j; \ pixel v[size*2 - 2]; \ \ stride /= sizeof(pixel); \ for (i = 0; i < size - 2; i++) { \ v[i*2 ] = (left[i] + left[i + 1] + 1) >> 1; \ v[i*2 + 1] = (left[i] + left[i + 1] * 2 + left[i + 2] + 2) >> 2; \ } \ v[size*2 - 4] = (left[size - 2] + left[size - 1] + 1) >> 1; \ v[size*2 - 3] = (left[size - 2] + left[size - 1] * 3 + 2) >> 2; \ \ for (j = 0; j < size / 2; j++) \ memcpy(dst + j*stride, v + j*2, size * sizeof(pixel)); \ for (j = size / 2; j < size; j++) { \ memcpy(dst + j*stride, v + j*2, (size*2 - 2 - j*2) * sizeof(pixel)); \ memset_bpc(dst + j*stride + size*2 - 2 - j*2, left[size - 1], \ 2 + j*2 - size); \ } \ } def_hor_up(8) def_hor_up(16) def_hor_up(32) #undef DST #endif /* BIT_DEPTH != 12 */ #if BIT_DEPTH != 8 void ff_vp9dsp_intrapred_init_10(VP9DSPContext *dsp); #endif #if BIT_DEPTH != 10 static #endif av_cold void FUNC(ff_vp9dsp_intrapred_init)(VP9DSPContext *dsp) { #define init_intra_pred_bd_aware(tx, sz) \ dsp->intra_pred[tx][TM_VP8_PRED] = tm_##sz##_c; \ dsp->intra_pred[tx][DC_128_PRED] = dc_128_##sz##_c; \ dsp->intra_pred[tx][DC_127_PRED] = dc_127_##sz##_c; \ dsp->intra_pred[tx][DC_129_PRED] = dc_129_##sz##_c #if BIT_DEPTH == 12 ff_vp9dsp_intrapred_init_10(dsp); #define init_intra_pred(tx, sz) \ init_intra_pred_bd_aware(tx, sz) #else #define init_intra_pred(tx, sz) \ dsp->intra_pred[tx][VERT_PRED] = vert_##sz##_c; \ dsp->intra_pred[tx][HOR_PRED] = hor_##sz##_c; \ dsp->intra_pred[tx][DC_PRED] = dc_##sz##_c; \ dsp->intra_pred[tx][DIAG_DOWN_LEFT_PRED] = diag_downleft_##sz##_c; \ dsp->intra_pred[tx][DIAG_DOWN_RIGHT_PRED] = diag_downright_##sz##_c; \ dsp->intra_pred[tx][VERT_RIGHT_PRED] = vert_right_##sz##_c; \ dsp->intra_pred[tx][HOR_DOWN_PRED] = hor_down_##sz##_c; \ dsp->intra_pred[tx][VERT_LEFT_PRED] = vert_left_##sz##_c; \ dsp->intra_pred[tx][HOR_UP_PRED] = hor_up_##sz##_c; \ dsp->intra_pred[tx][LEFT_DC_PRED] = dc_left_##sz##_c; \ dsp->intra_pred[tx][TOP_DC_PRED] = dc_top_##sz##_c; \ init_intra_pred_bd_aware(tx, sz) #endif init_intra_pred(TX_4X4, 4x4); init_intra_pred(TX_8X8, 8x8); init_intra_pred(TX_16X16, 16x16); init_intra_pred(TX_32X32, 32x32); #undef init_intra_pred #undef init_intra_pred_bd_aware } #define itxfm_wrapper(type_a, type_b, sz, bits, has_dconly) \ static void type_a##_##type_b##_##sz##x##sz##_add_c(uint8_t *_dst, \ ptrdiff_t stride, \ int16_t *_block, int eob) \ { \ int i, j; \ pixel *dst = (pixel *) _dst; \ dctcoef *block = (dctcoef *) _block, tmp[sz * sz], out[sz]; \ \ stride /= sizeof(pixel); \ if (has_dconly && eob == 1) { \ const int t = ((((dctint) block[0] * 11585 + (1 << 13)) >> 14) \ * 11585 + (1 << 13)) >> 14; \ block[0] = 0; \ for (i = 0; i < sz; i++) { \ for (j = 0; j < sz; j++) \ dst[j * stride] = av_clip_pixel(dst[j * stride] + \ (bits ? \ (int)(t + (1U << (bits - 1))) >> bits : \ t)); \ dst++; \ } \ return; \ } \ \ for (i = 0; i < sz; i++) \ type_a##sz##_1d(block + i, sz, tmp + i * sz, 0); \ memset(block, 0, sz * sz * sizeof(*block)); \ for (i = 0; i < sz; i++) { \ type_b##sz##_1d(tmp + i, sz, out, 1); \ for (j = 0; j < sz; j++) \ dst[j * stride] = av_clip_pixel(dst[j * stride] + \ (bits ? \ (int)(out[j] + (1U << (bits - 1))) >> bits : \ out[j])); \ dst++; \ } \ } #define itxfm_wrap(sz, bits) \ itxfm_wrapper(idct, idct, sz, bits, 1) \ itxfm_wrapper(iadst, idct, sz, bits, 0) \ itxfm_wrapper(idct, iadst, sz, bits, 0) \ itxfm_wrapper(iadst, iadst, sz, bits, 0) #define IN(x) ((dctint) in[(x) * stride]) static av_always_inline void idct4_1d(const dctcoef *in, ptrdiff_t stride, dctcoef *out, int pass) { dctint t0, t1, t2, t3; t0 = ((IN(0) + IN(2)) * 11585 + (1 << 13)) >> 14; t1 = ((IN(0) - IN(2)) * 11585 + (1 << 13)) >> 14; t2 = (IN(1) * 6270 - IN(3) * 15137 + (1 << 13)) >> 14; t3 = (IN(1) * 15137 + IN(3) * 6270 + (1 << 13)) >> 14; out[0] = t0 + t3; out[1] = t1 + t2; out[2] = t1 - t2; out[3] = t0 - t3; } static av_always_inline void iadst4_1d(const dctcoef *in, ptrdiff_t stride, dctcoef *out, int pass) { dctint t0, t1, t2, t3; t0 = 5283 * IN(0) + 15212 * IN(2) + 9929 * IN(3); t1 = 9929 * IN(0) - 5283 * IN(2) - 15212 * IN(3); t2 = 13377 * (IN(0) - IN(2) + IN(3)); t3 = 13377 * IN(1); out[0] = (t0 + t3 + (1 << 13)) >> 14; out[1] = (t1 + t3 + (1 << 13)) >> 14; out[2] = (t2 + (1 << 13)) >> 14; out[3] = (t0 + t1 - t3 + (1 << 13)) >> 14; } itxfm_wrap(4, 4) static av_always_inline void idct8_1d(const dctcoef *in, ptrdiff_t stride, dctcoef *out, int pass) { dctint t0, t0a, t1, t1a, t2, t2a, t3, t3a, t4, t4a, t5, t5a, t6, t6a, t7, t7a; t0a = ((IN(0) + IN(4)) * 11585 + (1 << 13)) >> 14; t1a = ((IN(0) - IN(4)) * 11585 + (1 << 13)) >> 14; t2a = (IN(2) * 6270 - IN(6) * 15137 + (1 << 13)) >> 14; t3a = (IN(2) * 15137 + IN(6) * 6270 + (1 << 13)) >> 14; t4a = (IN(1) * 3196 - IN(7) * 16069 + (1 << 13)) >> 14; t5a = (IN(5) * 13623 - IN(3) * 9102 + (1 << 13)) >> 14; t6a = (IN(5) * 9102 + IN(3) * 13623 + (1 << 13)) >> 14; t7a = (IN(1) * 16069 + IN(7) * 3196 + (1 << 13)) >> 14; t0 = t0a + t3a; t1 = t1a + t2a; t2 = t1a - t2a; t3 = t0a - t3a; t4 = t4a + t5a; t5a = t4a - t5a; t7 = t7a + t6a; t6a = t7a - t6a; t5 = ((t6a - t5a) * 11585 + (1 << 13)) >> 14; t6 = ((t6a + t5a) * 11585 + (1 << 13)) >> 14; out[0] = t0 + t7; out[1] = t1 + t6; out[2] = t2 + t5; out[3] = t3 + t4; out[4] = t3 - t4; out[5] = t2 - t5; out[6] = t1 - t6; out[7] = t0 - t7; } static av_always_inline void iadst8_1d(const dctcoef *in, ptrdiff_t stride, dctcoef *out, int pass) { dctint t0, t0a, t1, t1a, t2, t2a, t3, t3a, t4, t4a, t5, t5a, t6, t6a, t7, t7a; t0a = 16305 * IN(7) + 1606 * IN(0); t1a = 1606 * IN(7) - 16305 * IN(0); t2a = 14449 * IN(5) + 7723 * IN(2); t3a = 7723 * IN(5) - 14449 * IN(2); t4a = 10394 * IN(3) + 12665 * IN(4); t5a = 12665 * IN(3) - 10394 * IN(4); t6a = 4756 * IN(1) + 15679 * IN(6); t7a = 15679 * IN(1) - 4756 * IN(6); t0 = (t0a + t4a + (1 << 13)) >> 14; t1 = (t1a + t5a + (1 << 13)) >> 14; t2 = (t2a + t6a + (1 << 13)) >> 14; t3 = (t3a + t7a + (1 << 13)) >> 14; t4 = (t0a - t4a + (1 << 13)) >> 14; t5 = (t1a - t5a + (1 << 13)) >> 14; t6 = (t2a - t6a + (1 << 13)) >> 14; t7 = (t3a - t7a + (1 << 13)) >> 14; t4a = 15137U * t4 + 6270U * t5; t5a = 6270U * t4 - 15137U * t5; t6a = 15137U * t7 - 6270U * t6; t7a = 6270U * t7 + 15137U * t6; out[0] = t0 + t2; out[7] = -(t1 + t3); t2 = t0 - t2; t3 = t1 - t3; out[1] = -((dctint)((1U << 13) + t4a + t6a) >> 14); out[6] = (dctint)((1U << 13) + t5a + t7a) >> 14; t6 = (dctint)((1U << 13) + t4a - t6a) >> 14; t7 = (dctint)((1U << 13) + t5a - t7a) >> 14; out[3] = -((dctint)((t2 + t3) * 11585U + (1 << 13)) >> 14); out[4] = (dctint)((t2 - t3) * 11585U + (1 << 13)) >> 14; out[2] = (dctint)((t6 + t7) * 11585U + (1 << 13)) >> 14; out[5] = -((dctint)((t6 - t7) * 11585U + (1 << 13)) >> 14); } itxfm_wrap(8, 5) static av_always_inline void idct16_1d(const dctcoef *in, ptrdiff_t stride, dctcoef *out, int pass) { dctint t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15; dctint t0a, t1a, t2a, t3a, t4a, t5a, t6a, t7a; dctint t8a, t9a, t10a, t11a, t12a, t13a, t14a, t15a; t0a = (dctint)((IN(0) + IN(8)) * 11585U + (1 << 13)) >> 14; t1a = (dctint)((IN(0) - IN(8)) * 11585U + (1 << 13)) >> 14; t2a = (dctint)(IN(4) * 6270U - IN(12) * 15137U + (1 << 13)) >> 14; t3a = (dctint)(IN(4) * 15137U + IN(12) * 6270U + (1 << 13)) >> 14; t4a = (dctint)(IN(2) * 3196U - IN(14) * 16069U + (1 << 13)) >> 14; t7a = (dctint)(IN(2) * 16069U + IN(14) * 3196U + (1 << 13)) >> 14; t5a = (dctint)(IN(10) * 13623U - IN(6) * 9102U + (1 << 13)) >> 14; t6a = (dctint)(IN(10) * 9102U + IN(6) * 13623U + (1 << 13)) >> 14; t8a = (dctint)(IN(1) * 1606U - IN(15) * 16305U + (1 << 13)) >> 14; t15a = (dctint)(IN(1) * 16305U + IN(15) * 1606U + (1 << 13)) >> 14; t9a = (dctint)(IN(9) * 12665U - IN(7) * 10394U + (1 << 13)) >> 14; t14a = (dctint)(IN(9) * 10394U + IN(7) * 12665U + (1 << 13)) >> 14; t10a = (dctint)(IN(5) * 7723U - IN(11) * 14449U + (1 << 13)) >> 14; t13a = (dctint)(IN(5) * 14449U + IN(11) * 7723U + (1 << 13)) >> 14; t11a = (dctint)(IN(13) * 15679U - IN(3) * 4756U + (1 << 13)) >> 14; t12a = (dctint)(IN(13) * 4756U + IN(3) * 15679U + (1 << 13)) >> 14; t0 = t0a + t3a; t1 = t1a + t2a; t2 = t1a - t2a; t3 = t0a - t3a; t4 = t4a + t5a; t5 = t4a - t5a; t6 = t7a - t6a; t7 = t7a + t6a; t8 = t8a + t9a; t9 = t8a - t9a; t10 = t11a - t10a; t11 = t11a + t10a; t12 = t12a + t13a; t13 = t12a - t13a; t14 = t15a - t14a; t15 = t15a + t14a; t5a = (dctint)((t6 - t5) * 11585U + (1 << 13)) >> 14; t6a = (dctint)((t6 + t5) * 11585U + (1 << 13)) >> 14; t9a = (dctint)( t14 * 6270U - t9 * 15137U + (1 << 13)) >> 14; t14a = (dctint)( t14 * 15137U + t9 * 6270U + (1 << 13)) >> 14; t10a = (dctint)(-(t13 * 15137U + t10 * 6270U) + (1 << 13)) >> 14; t13a = (dctint)( t13 * 6270U - t10 * 15137U + (1 << 13)) >> 14; t0a = t0 + t7; t1a = t1 + t6a; t2a = t2 + t5a; t3a = t3 + t4; t4 = t3 - t4; t5 = t2 - t5a; t6 = t1 - t6a; t7 = t0 - t7; t8a = t8 + t11; t9 = t9a + t10a; t10 = t9a - t10a; t11a = t8 - t11; t12a = t15 - t12; t13 = t14a - t13a; t14 = t14a + t13a; t15a = t15 + t12; t10a = (dctint)((t13 - t10) * 11585U + (1 << 13)) >> 14; t13a = (dctint)((t13 + t10) * 11585U + (1 << 13)) >> 14; t11 = (dctint)((t12a - t11a) * 11585U + (1 << 13)) >> 14; t12 = (dctint)((t12a + t11a) * 11585U + (1 << 13)) >> 14; out[ 0] = t0a + t15a; out[ 1] = t1a + t14; out[ 2] = t2a + t13a; out[ 3] = t3a + t12; out[ 4] = t4 + t11; out[ 5] = t5 + t10a; out[ 6] = t6 + t9; out[ 7] = t7 + t8a; out[ 8] = t7 - t8a; out[ 9] = t6 - t9; out[10] = t5 - t10a; out[11] = t4 - t11; out[12] = t3a - t12; out[13] = t2a - t13a; out[14] = t1a - t14; out[15] = t0a - t15a; } static av_always_inline void iadst16_1d(const dctcoef *in, ptrdiff_t stride, dctcoef *out, int pass) { dctint t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15; dctint t0a, t1a, t2a, t3a, t4a, t5a, t6a, t7a; dctint t8a, t9a, t10a, t11a, t12a, t13a, t14a, t15a; t0 = IN(15) * 16364U + IN(0) * 804U; t1 = IN(15) * 804U - IN(0) * 16364U; t2 = IN(13) * 15893U + IN(2) * 3981U; t3 = IN(13) * 3981U - IN(2) * 15893U; t4 = IN(11) * 14811U + IN(4) * 7005U; t5 = IN(11) * 7005U - IN(4) * 14811U; t6 = IN(9) * 13160U + IN(6) * 9760U; t7 = IN(9) * 9760U - IN(6) * 13160U; t8 = IN(7) * 11003U + IN(8) * 12140U; t9 = IN(7) * 12140U - IN(8) * 11003U; t10 = IN(5) * 8423U + IN(10) * 14053U; t11 = IN(5) * 14053U - IN(10) * 8423U; t12 = IN(3) * 5520U + IN(12) * 15426U; t13 = IN(3) * 15426U - IN(12) * 5520U; t14 = IN(1) * 2404U + IN(14) * 16207U; t15 = IN(1) * 16207U - IN(14) * 2404U; t0a = (dctint)((1U << 13) + t0 + t8 ) >> 14; t1a = (dctint)((1U << 13) + t1 + t9 ) >> 14; t2a = (dctint)((1U << 13) + t2 + t10) >> 14; t3a = (dctint)((1U << 13) + t3 + t11) >> 14; t4a = (dctint)((1U << 13) + t4 + t12) >> 14; t5a = (dctint)((1U << 13) + t5 + t13) >> 14; t6a = (dctint)((1U << 13) + t6 + t14) >> 14; t7a = (dctint)((1U << 13) + t7 + t15) >> 14; t8a = (dctint)((1U << 13) + t0 - t8 ) >> 14; t9a = (dctint)((1U << 13) + t1 - t9 ) >> 14; t10a = (dctint)((1U << 13) + t2 - t10) >> 14; t11a = (dctint)((1U << 13) + t3 - t11) >> 14; t12a = (dctint)((1U << 13) + t4 - t12) >> 14; t13a = (dctint)((1U << 13) + t5 - t13) >> 14; t14a = (dctint)((1U << 13) + t6 - t14) >> 14; t15a = (dctint)((1U << 13) + t7 - t15) >> 14; t8 = t8a * 16069U + t9a * 3196U; t9 = t8a * 3196U - t9a * 16069U; t10 = t10a * 9102U + t11a * 13623U; t11 = t10a * 13623U - t11a * 9102U; t12 = t13a * 16069U - t12a * 3196U; t13 = t13a * 3196U + t12a * 16069U; t14 = t15a * 9102U - t14a * 13623U; t15 = t15a * 13623U + t14a * 9102U; t0 = t0a + t4a; t1 = t1a + t5a; t2 = t2a + t6a; t3 = t3a + t7a; t4 = t0a - t4a; t5 = t1a - t5a; t6 = t2a - t6a; t7 = t3a - t7a; t8a = (dctint)((1U << 13) + t8 + t12) >> 14; t9a = (dctint)((1U << 13) + t9 + t13) >> 14; t10a = (dctint)((1U << 13) + t10 + t14) >> 14; t11a = (dctint)((1U << 13) + t11 + t15) >> 14; t12a = (dctint)((1U << 13) + t8 - t12) >> 14; t13a = (dctint)((1U << 13) + t9 - t13) >> 14; t14a = (dctint)((1U << 13) + t10 - t14) >> 14; t15a = (dctint)((1U << 13) + t11 - t15) >> 14; t4a = t4 * 15137U + t5 * 6270U; t5a = t4 * 6270U - t5 * 15137U; t6a = t7 * 15137U - t6 * 6270U; t7a = t7 * 6270U + t6 * 15137U; t12 = t12a * 15137U + t13a * 6270U; t13 = t12a * 6270U - t13a * 15137U; t14 = t15a * 15137U - t14a * 6270U; t15 = t15a * 6270U + t14a * 15137U; out[ 0] = t0 + t2; out[15] = -(t1 + t3); t2a = t0 - t2; t3a = t1 - t3; out[ 3] = -((dctint)((1U << 13) + t4a + t6a) >> 14); out[12] = (dctint)((1U << 13) + t5a + t7a) >> 14; t6 = (dctint)((1U << 13) + t4a - t6a) >> 14; t7 = (dctint)((1U << 13) + t5a - t7a) >> 14; out[ 1] = -(t8a + t10a); out[14] = t9a + t11a; t10 = t8a - t10a; t11 = t9a - t11a; out[ 2] = (dctint)((1U << 13) + t12 + t14) >> 14; out[13] = -((dctint)((1U << 13) + t13 + t15) >> 14); t14a = (dctint)((1U << 13) + t12 - t14) >> 14; t15a = (dctint)((1U << 13) + t13 - t15) >> 14; out[ 7] = (dctint)(-(t2a + t3a) * 11585U + (1 << 13)) >> 14; out[ 8] = (dctint)( (t2a - t3a) * 11585U + (1 << 13)) >> 14; out[ 4] = (dctint)( (t7 + t6) * 11585U + (1 << 13)) >> 14; out[11] = (dctint)( (t7 - t6) * 11585U + (1 << 13)) >> 14; out[ 6] = (dctint)( (t11 + t10) * 11585U + (1 << 13)) >> 14; out[ 9] = (dctint)( (t11 - t10) * 11585U + (1 << 13)) >> 14; out[ 5] = (dctint)(-(t14a + t15a) * 11585U + (1 << 13)) >> 14; out[10] = (dctint)( (t14a - t15a) * 11585U + (1 << 13)) >> 14; } itxfm_wrap(16, 6) static av_always_inline void idct32_1d(const dctcoef *in, ptrdiff_t stride, dctcoef *out, int pass) { dctint t0a = (dctint)((IN(0) + IN(16)) * 11585U + (1 << 13)) >> 14; dctint t1a = (dctint)((IN(0) - IN(16)) * 11585U + (1 << 13)) >> 14; dctint t2a = (dctint)(IN( 8) * 6270U - IN(24) * 15137U + (1 << 13)) >> 14; dctint t3a = (dctint)(IN( 8) * 15137U + IN(24) * 6270U + (1 << 13)) >> 14; dctint t4a = (dctint)(IN( 4) * 3196U - IN(28) * 16069U + (1 << 13)) >> 14; dctint t7a = (dctint)(IN( 4) * 16069U + IN(28) * 3196U + (1 << 13)) >> 14; dctint t5a = (dctint)(IN(20) * 13623U - IN(12) * 9102U + (1 << 13)) >> 14; dctint t6a = (dctint)(IN(20) * 9102U + IN(12) * 13623U + (1 << 13)) >> 14; dctint t8a = (dctint)(IN( 2) * 1606U - IN(30) * 16305U + (1 << 13)) >> 14; dctint t15a = (dctint)(IN( 2) * 16305U + IN(30) * 1606U + (1 << 13)) >> 14; dctint t9a = (dctint)(IN(18) * 12665U - IN(14) * 10394U + (1 << 13)) >> 14; dctint t14a = (dctint)(IN(18) * 10394U + IN(14) * 12665U + (1 << 13)) >> 14; dctint t10a = (dctint)(IN(10) * 7723U - IN(22) * 14449U + (1 << 13)) >> 14; dctint t13a = (dctint)(IN(10) * 14449U + IN(22) * 7723U + (1 << 13)) >> 14; dctint t11a = (dctint)(IN(26) * 15679U - IN( 6) * 4756U + (1 << 13)) >> 14; dctint t12a = (dctint)(IN(26) * 4756U + IN( 6) * 15679U + (1 << 13)) >> 14; dctint t16a = (dctint)(IN( 1) * 804U - IN(31) * 16364U + (1 << 13)) >> 14; dctint t31a = (dctint)(IN( 1) * 16364U + IN(31) * 804U + (1 << 13)) >> 14; dctint t17a = (dctint)(IN(17) * 12140U - IN(15) * 11003U + (1 << 13)) >> 14; dctint t30a = (dctint)(IN(17) * 11003U + IN(15) * 12140U + (1 << 13)) >> 14; dctint t18a = (dctint)(IN( 9) * 7005U - IN(23) * 14811U + (1 << 13)) >> 14; dctint t29a = (dctint)(IN( 9) * 14811U + IN(23) * 7005U + (1 << 13)) >> 14; dctint t19a = (dctint)(IN(25) * 15426U - IN( 7) * 5520U + (1 << 13)) >> 14; dctint t28a = (dctint)(IN(25) * 5520U + IN( 7) * 15426U + (1 << 13)) >> 14; dctint t20a = (dctint)(IN( 5) * 3981U - IN(27) * 15893U + (1 << 13)) >> 14; dctint t27a = (dctint)(IN( 5) * 15893U + IN(27) * 3981U + (1 << 13)) >> 14; dctint t21a = (dctint)(IN(21) * 14053U - IN(11) * 8423U + (1 << 13)) >> 14; dctint t26a = (dctint)(IN(21) * 8423U + IN(11) * 14053U + (1 << 13)) >> 14; dctint t22a = (dctint)(IN(13) * 9760U - IN(19) * 13160U + (1 << 13)) >> 14; dctint t25a = (dctint)(IN(13) * 13160U + IN(19) * 9760U + (1 << 13)) >> 14; dctint t23a = (dctint)(IN(29) * 16207U - IN( 3) * 2404U + (1 << 13)) >> 14; dctint t24a = (dctint)(IN(29) * 2404U + IN( 3) * 16207U + (1 << 13)) >> 14; dctint t0 = t0a + t3a; dctint t1 = t1a + t2a; dctint t2 = t1a - t2a; dctint t3 = t0a - t3a; dctint t4 = t4a + t5a; dctint t5 = t4a - t5a; dctint t6 = t7a - t6a; dctint t7 = t7a + t6a; dctint t8 = t8a + t9a; dctint t9 = t8a - t9a; dctint t10 = t11a - t10a; dctint t11 = t11a + t10a; dctint t12 = t12a + t13a; dctint t13 = t12a - t13a; dctint t14 = t15a - t14a; dctint t15 = t15a + t14a; dctint t16 = t16a + t17a; dctint t17 = t16a - t17a; dctint t18 = t19a - t18a; dctint t19 = t19a + t18a; dctint t20 = t20a + t21a; dctint t21 = t20a - t21a; dctint t22 = t23a - t22a; dctint t23 = t23a + t22a; dctint t24 = t24a + t25a; dctint t25 = t24a - t25a; dctint t26 = t27a - t26a; dctint t27 = t27a + t26a; dctint t28 = t28a + t29a; dctint t29 = t28a - t29a; dctint t30 = t31a - t30a; dctint t31 = t31a + t30a; t5a = (dctint)((t6 - t5) * 11585U + (1 << 13)) >> 14; t6a = (dctint)((t6 + t5) * 11585U + (1 << 13)) >> 14; t9a = (dctint)( t14 * 6270U - t9 * 15137U + (1 << 13)) >> 14; t14a = (dctint)( t14 * 15137U + t9 * 6270U + (1 << 13)) >> 14; t10a = (dctint)(-(t13 * 15137U + t10 * 6270U) + (1 << 13)) >> 14; t13a = (dctint)( t13 * 6270U - t10 * 15137U + (1 << 13)) >> 14; t17a = (dctint)( t30 * 3196U - t17 * 16069U + (1 << 13)) >> 14; t30a = (dctint)( t30 * 16069U + t17 * 3196U + (1 << 13)) >> 14; t18a = (dctint)(-(t29 * 16069U + t18 * 3196U) + (1 << 13)) >> 14; t29a = (dctint)( t29 * 3196U - t18 * 16069U + (1 << 13)) >> 14; t21a = (dctint)( t26 * 13623U - t21 * 9102U + (1 << 13)) >> 14; t26a = (dctint)( t26 * 9102U + t21 * 13623U + (1 << 13)) >> 14; t22a = (dctint)(-(t25 * 9102U + t22 * 13623U) + (1 << 13)) >> 14; t25a = (dctint)( t25 * 13623U - t22 * 9102U + (1 << 13)) >> 14; t0a = t0 + t7; t1a = t1 + t6a; t2a = t2 + t5a; t3a = t3 + t4; t4a = t3 - t4; t5 = t2 - t5a; t6 = t1 - t6a; t7a = t0 - t7; t8a = t8 + t11; t9 = t9a + t10a; t10 = t9a - t10a; t11a = t8 - t11; t12a = t15 - t12; t13 = t14a - t13a; t14 = t14a + t13a; t15a = t15 + t12; t16a = t16 + t19; t17 = t17a + t18a; t18 = t17a - t18a; t19a = t16 - t19; t20a = t23 - t20; t21 = t22a - t21a; t22 = t22a + t21a; t23a = t23 + t20; t24a = t24 + t27; t25 = t25a + t26a; t26 = t25a - t26a; t27a = t24 - t27; t28a = t31 - t28; t29 = t30a - t29a; t30 = t30a + t29a; t31a = t31 + t28; t10a = (dctint)((t13 - t10) * 11585U + (1 << 13)) >> 14; t13a = (dctint)((t13 + t10) * 11585U + (1 << 13)) >> 14; t11 = (dctint)((t12a - t11a) * 11585U + (1 << 13)) >> 14; t12 = (dctint)((t12a + t11a) * 11585U + (1 << 13)) >> 14; t18a = (dctint)( t29 * 6270U - t18 * 15137U + (1 << 13)) >> 14; t29a = (dctint)( t29 * 15137U + t18 * 6270U + (1 << 13)) >> 14; t19 = (dctint)( t28a * 6270U - t19a * 15137U + (1 << 13)) >> 14; t28 = (dctint)( t28a * 15137U + t19a * 6270U + (1 << 13)) >> 14; t20 = (dctint)(-(t27a * 15137U + t20a * 6270U) + (1 << 13)) >> 14; t27 = (dctint)( t27a * 6270U - t20a * 15137U + (1 << 13)) >> 14; t21a = (dctint)(-(t26 * 15137U + t21 * 6270U) + (1 << 13)) >> 14; t26a = (dctint)( t26 * 6270U - t21 * 15137U + (1 << 13)) >> 14; t0 = t0a + t15a; t1 = t1a + t14; t2 = t2a + t13a; t3 = t3a + t12; t4 = t4a + t11; t5a = t5 + t10a; t6a = t6 + t9; t7 = t7a + t8a; t8 = t7a - t8a; t9a = t6 - t9; t10 = t5 - t10a; t11a = t4a - t11; t12a = t3a - t12; t13 = t2a - t13a; t14a = t1a - t14; t15 = t0a - t15a; t16 = t16a + t23a; t17a = t17 + t22; t18 = t18a + t21a; t19a = t19 + t20; t20a = t19 - t20; t21 = t18a - t21a; t22a = t17 - t22; t23 = t16a - t23a; t24 = t31a - t24a; t25a = t30 - t25; t26 = t29a - t26a; t27a = t28 - t27; t28a = t28 + t27; t29 = t29a + t26a; t30a = t30 + t25; t31 = t31a + t24a; t20 = (dctint)((t27a - t20a) * 11585U + (1 << 13)) >> 14; t27 = (dctint)((t27a + t20a) * 11585U + (1 << 13)) >> 14; t21a = (dctint)((t26 - t21 ) * 11585U + (1 << 13)) >> 14; t26a = (dctint)((t26 + t21 ) * 11585U + (1 << 13)) >> 14; t22 = (dctint)((t25a - t22a) * 11585U + (1 << 13)) >> 14; t25 = (dctint)((t25a + t22a) * 11585U + (1 << 13)) >> 14; t23a = (dctint)((t24 - t23 ) * 11585U + (1 << 13)) >> 14; t24a = (dctint)((t24 + t23 ) * 11585U + (1 << 13)) >> 14; out[ 0] = t0 + t31; out[ 1] = t1 + t30a; out[ 2] = t2 + t29; out[ 3] = t3 + t28a; out[ 4] = t4 + t27; out[ 5] = t5a + t26a; out[ 6] = t6a + t25; out[ 7] = t7 + t24a; out[ 8] = t8 + t23a; out[ 9] = t9a + t22; out[10] = t10 + t21a; out[11] = t11a + t20; out[12] = t12a + t19a; out[13] = t13 + t18; out[14] = t14a + t17a; out[15] = t15 + t16; out[16] = t15 - t16; out[17] = t14a - t17a; out[18] = t13 - t18; out[19] = t12a - t19a; out[20] = t11a - t20; out[21] = t10 - t21a; out[22] = t9a - t22; out[23] = t8 - t23a; out[24] = t7 - t24a; out[25] = t6a - t25; out[26] = t5a - t26a; out[27] = t4 - t27; out[28] = t3 - t28a; out[29] = t2 - t29; out[30] = t1 - t30a; out[31] = t0 - t31; } itxfm_wrapper(idct, idct, 32, 6, 1) static av_always_inline void iwht4_1d(const dctcoef *in, ptrdiff_t stride, dctcoef *out, int pass) { int t0, t1, t2, t3, t4; if (pass == 0) { t0 = IN(0) >> 2; t1 = IN(3) >> 2; t2 = IN(1) >> 2; t3 = IN(2) >> 2; } else { t0 = IN(0); t1 = IN(3); t2 = IN(1); t3 = IN(2); } t0 += t2; t3 -= t1; t4 = (t0 - t3) >> 1; t1 = t4 - t1; t2 = t4 - t2; t0 -= t1; t3 += t2; out[0] = t0; out[1] = t1; out[2] = t2; out[3] = t3; } itxfm_wrapper(iwht, iwht, 4, 0, 0) #undef IN #undef itxfm_wrapper #undef itxfm_wrap static av_cold void vp9dsp_itxfm_init(VP9DSPContext *dsp) { #define init_itxfm(tx, sz) \ dsp->itxfm_add[tx][DCT_DCT] = idct_idct_##sz##_add_c; \ dsp->itxfm_add[tx][DCT_ADST] = iadst_idct_##sz##_add_c; \ dsp->itxfm_add[tx][ADST_DCT] = idct_iadst_##sz##_add_c; \ dsp->itxfm_add[tx][ADST_ADST] = iadst_iadst_##sz##_add_c #define init_idct(tx, nm) \ dsp->itxfm_add[tx][DCT_DCT] = \ dsp->itxfm_add[tx][ADST_DCT] = \ dsp->itxfm_add[tx][DCT_ADST] = \ dsp->itxfm_add[tx][ADST_ADST] = nm##_add_c init_itxfm(TX_4X4, 4x4); init_itxfm(TX_8X8, 8x8); init_itxfm(TX_16X16, 16x16); init_idct(TX_32X32, idct_idct_32x32); init_idct(4 /* lossless */, iwht_iwht_4x4); #undef init_itxfm #undef init_idct } static av_always_inline void loop_filter(pixel *dst, int E, int I, int H, ptrdiff_t stridea, ptrdiff_t strideb, int wd) { int i, F = 1 << (BIT_DEPTH - 8); E <<= (BIT_DEPTH - 8); I <<= (BIT_DEPTH - 8); H <<= (BIT_DEPTH - 8); for (i = 0; i < 8; i++, dst += stridea) { int p7, p6, p5, p4; int p3 = dst[strideb * -4], p2 = dst[strideb * -3]; int p1 = dst[strideb * -2], p0 = dst[strideb * -1]; int q0 = dst[strideb * +0], q1 = dst[strideb * +1]; int q2 = dst[strideb * +2], q3 = dst[strideb * +3]; int q4, q5, q6, q7; int fm = FFABS(p3 - p2) <= I && FFABS(p2 - p1) <= I && FFABS(p1 - p0) <= I && FFABS(q1 - q0) <= I && FFABS(q2 - q1) <= I && FFABS(q3 - q2) <= I && FFABS(p0 - q0) * 2 + (FFABS(p1 - q1) >> 1) <= E; int flat8out, flat8in; if (!fm) continue; if (wd >= 16) { p7 = dst[strideb * -8]; p6 = dst[strideb * -7]; p5 = dst[strideb * -6]; p4 = dst[strideb * -5]; q4 = dst[strideb * +4]; q5 = dst[strideb * +5]; q6 = dst[strideb * +6]; q7 = dst[strideb * +7]; flat8out = FFABS(p7 - p0) <= F && FFABS(p6 - p0) <= F && FFABS(p5 - p0) <= F && FFABS(p4 - p0) <= F && FFABS(q4 - q0) <= F && FFABS(q5 - q0) <= F && FFABS(q6 - q0) <= F && FFABS(q7 - q0) <= F; } if (wd >= 8) flat8in = FFABS(p3 - p0) <= F && FFABS(p2 - p0) <= F && FFABS(p1 - p0) <= F && FFABS(q1 - q0) <= F && FFABS(q2 - q0) <= F && FFABS(q3 - q0) <= F; if (wd >= 16 && flat8out && flat8in) { dst[strideb * -7] = (p7 + p7 + p7 + p7 + p7 + p7 + p7 + p6 * 2 + p5 + p4 + p3 + p2 + p1 + p0 + q0 + 8) >> 4; dst[strideb * -6] = (p7 + p7 + p7 + p7 + p7 + p7 + p6 + p5 * 2 + p4 + p3 + p2 + p1 + p0 + q0 + q1 + 8) >> 4; dst[strideb * -5] = (p7 + p7 + p7 + p7 + p7 + p6 + p5 + p4 * 2 + p3 + p2 + p1 + p0 + q0 + q1 + q2 + 8) >> 4; dst[strideb * -4] = (p7 + p7 + p7 + p7 + p6 + p5 + p4 + p3 * 2 + p2 + p1 + p0 + q0 + q1 + q2 + q3 + 8) >> 4; dst[strideb * -3] = (p7 + p7 + p7 + p6 + p5 + p4 + p3 + p2 * 2 + p1 + p0 + q0 + q1 + q2 + q3 + q4 + 8) >> 4; dst[strideb * -2] = (p7 + p7 + p6 + p5 + p4 + p3 + p2 + p1 * 2 + p0 + q0 + q1 + q2 + q3 + q4 + q5 + 8) >> 4; dst[strideb * -1] = (p7 + p6 + p5 + p4 + p3 + p2 + p1 + p0 * 2 + q0 + q1 + q2 + q3 + q4 + q5 + q6 + 8) >> 4; dst[strideb * +0] = (p6 + p5 + p4 + p3 + p2 + p1 + p0 + q0 * 2 + q1 + q2 + q3 + q4 + q5 + q6 + q7 + 8) >> 4; dst[strideb * +1] = (p5 + p4 + p3 + p2 + p1 + p0 + q0 + q1 * 2 + q2 + q3 + q4 + q5 + q6 + q7 + q7 + 8) >> 4; dst[strideb * +2] = (p4 + p3 + p2 + p1 + p0 + q0 + q1 + q2 * 2 + q3 + q4 + q5 + q6 + q7 + q7 + q7 + 8) >> 4; dst[strideb * +3] = (p3 + p2 + p1 + p0 + q0 + q1 + q2 + q3 * 2 + q4 + q5 + q6 + q7 + q7 + q7 + q7 + 8) >> 4; dst[strideb * +4] = (p2 + p1 + p0 + q0 + q1 + q2 + q3 + q4 * 2 + q5 + q6 + q7 + q7 + q7 + q7 + q7 + 8) >> 4; dst[strideb * +5] = (p1 + p0 + q0 + q1 + q2 + q3 + q4 + q5 * 2 + q6 + q7 + q7 + q7 + q7 + q7 + q7 + 8) >> 4; dst[strideb * +6] = (p0 + q0 + q1 + q2 + q3 + q4 + q5 + q6 * 2 + q7 + q7 + q7 + q7 + q7 + q7 + q7 + 8) >> 4; } else if (wd >= 8 && flat8in) { dst[strideb * -3] = (p3 + p3 + p3 + 2 * p2 + p1 + p0 + q0 + 4) >> 3; dst[strideb * -2] = (p3 + p3 + p2 + 2 * p1 + p0 + q0 + q1 + 4) >> 3; dst[strideb * -1] = (p3 + p2 + p1 + 2 * p0 + q0 + q1 + q2 + 4) >> 3; dst[strideb * +0] = (p2 + p1 + p0 + 2 * q0 + q1 + q2 + q3 + 4) >> 3; dst[strideb * +1] = (p1 + p0 + q0 + 2 * q1 + q2 + q3 + q3 + 4) >> 3; dst[strideb * +2] = (p0 + q0 + q1 + 2 * q2 + q3 + q3 + q3 + 4) >> 3; } else { int hev = FFABS(p1 - p0) > H || FFABS(q1 - q0) > H; if (hev) { int f = av_clip_intp2(p1 - q1, BIT_DEPTH - 1), f1, f2; f = av_clip_intp2(3 * (q0 - p0) + f, BIT_DEPTH - 1); f1 = FFMIN(f + 4, (1 << (BIT_DEPTH - 1)) - 1) >> 3; f2 = FFMIN(f + 3, (1 << (BIT_DEPTH - 1)) - 1) >> 3; dst[strideb * -1] = av_clip_pixel(p0 + f2); dst[strideb * +0] = av_clip_pixel(q0 - f1); } else { int f = av_clip_intp2(3 * (q0 - p0), BIT_DEPTH - 1), f1, f2; f1 = FFMIN(f + 4, (1 << (BIT_DEPTH - 1)) - 1) >> 3; f2 = FFMIN(f + 3, (1 << (BIT_DEPTH - 1)) - 1) >> 3; dst[strideb * -1] = av_clip_pixel(p0 + f2); dst[strideb * +0] = av_clip_pixel(q0 - f1); f = (f1 + 1) >> 1; dst[strideb * -2] = av_clip_pixel(p1 + f); dst[strideb * +1] = av_clip_pixel(q1 - f); } } } } #define lf_8_fn(dir, wd, stridea, strideb) \ static void loop_filter_##dir##_##wd##_8_c(uint8_t *_dst, \ ptrdiff_t stride, \ int E, int I, int H) \ { \ pixel *dst = (pixel *) _dst; \ stride /= sizeof(pixel); \ loop_filter(dst, E, I, H, stridea, strideb, wd); \ } #define lf_8_fns(wd) \ lf_8_fn(h, wd, stride, 1) \ lf_8_fn(v, wd, 1, stride) lf_8_fns(4) lf_8_fns(8) lf_8_fns(16) #undef lf_8_fn #undef lf_8_fns #define lf_16_fn(dir, stridea) \ static void loop_filter_##dir##_16_16_c(uint8_t *dst, \ ptrdiff_t stride, \ int E, int I, int H) \ { \ loop_filter_##dir##_16_8_c(dst, stride, E, I, H); \ loop_filter_##dir##_16_8_c(dst + 8 * stridea, stride, E, I, H); \ } lf_16_fn(h, stride) lf_16_fn(v, sizeof(pixel)) #undef lf_16_fn #define lf_mix_fn(dir, wd1, wd2, stridea) \ static void loop_filter_##dir##_##wd1##wd2##_16_c(uint8_t *dst, \ ptrdiff_t stride, \ int E, int I, int H) \ { \ loop_filter_##dir##_##wd1##_8_c(dst, stride, E & 0xff, I & 0xff, H & 0xff); \ loop_filter_##dir##_##wd2##_8_c(dst + 8 * stridea, stride, E >> 8, I >> 8, H >> 8); \ } #define lf_mix_fns(wd1, wd2) \ lf_mix_fn(h, wd1, wd2, stride) \ lf_mix_fn(v, wd1, wd2, sizeof(pixel)) lf_mix_fns(4, 4) lf_mix_fns(4, 8) lf_mix_fns(8, 4) lf_mix_fns(8, 8) #undef lf_mix_fn #undef lf_mix_fns static av_cold void vp9dsp_loopfilter_init(VP9DSPContext *dsp) { dsp->loop_filter_8[0][0] = loop_filter_h_4_8_c; dsp->loop_filter_8[0][1] = loop_filter_v_4_8_c; dsp->loop_filter_8[1][0] = loop_filter_h_8_8_c; dsp->loop_filter_8[1][1] = loop_filter_v_8_8_c; dsp->loop_filter_8[2][0] = loop_filter_h_16_8_c; dsp->loop_filter_8[2][1] = loop_filter_v_16_8_c; dsp->loop_filter_16[0] = loop_filter_h_16_16_c; dsp->loop_filter_16[1] = loop_filter_v_16_16_c; dsp->loop_filter_mix2[0][0][0] = loop_filter_h_44_16_c; dsp->loop_filter_mix2[0][0][1] = loop_filter_v_44_16_c; dsp->loop_filter_mix2[0][1][0] = loop_filter_h_48_16_c; dsp->loop_filter_mix2[0][1][1] = loop_filter_v_48_16_c; dsp->loop_filter_mix2[1][0][0] = loop_filter_h_84_16_c; dsp->loop_filter_mix2[1][0][1] = loop_filter_v_84_16_c; dsp->loop_filter_mix2[1][1][0] = loop_filter_h_88_16_c; dsp->loop_filter_mix2[1][1][1] = loop_filter_v_88_16_c; } #if BIT_DEPTH != 12 static av_always_inline void copy_c(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src, ptrdiff_t src_stride, int w, int h) { do { memcpy(dst, src, w * sizeof(pixel)); dst += dst_stride; src += src_stride; } while (--h); } static av_always_inline void avg_c(uint8_t *_dst, ptrdiff_t dst_stride, const uint8_t *_src, ptrdiff_t src_stride, int w, int h) { pixel *dst = (pixel *) _dst; const pixel *src = (const pixel *) _src; dst_stride /= sizeof(pixel); src_stride /= sizeof(pixel); do { int x; for (x = 0; x < w; x += 4) AV_WN4PA(&dst[x], rnd_avg_pixel4(AV_RN4PA(&dst[x]), AV_RN4P(&src[x]))); dst += dst_stride; src += src_stride; } while (--h); } #define fpel_fn(type, sz) \ static void type##sz##_c(uint8_t *dst, ptrdiff_t dst_stride, \ const uint8_t *src, ptrdiff_t src_stride, \ int h, int mx, int my) \ { \ type##_c(dst, dst_stride, src, src_stride, sz, h); \ } #define copy_avg_fn(sz) \ fpel_fn(copy, sz) \ fpel_fn(avg, sz) copy_avg_fn(64) copy_avg_fn(32) copy_avg_fn(16) copy_avg_fn(8) copy_avg_fn(4) #undef fpel_fn #undef copy_avg_fn #endif /* BIT_DEPTH != 12 */ #define FILTER_8TAP(src, x, F, stride) \ av_clip_pixel((F[0] * src[x + -3 * stride] + \ F[1] * src[x + -2 * stride] + \ F[2] * src[x + -1 * stride] + \ F[3] * src[x + +0 * stride] + \ F[4] * src[x + +1 * stride] + \ F[5] * src[x + +2 * stride] + \ F[6] * src[x + +3 * stride] + \ F[7] * src[x + +4 * stride] + 64) >> 7) static av_always_inline void do_8tap_1d_c(uint8_t *_dst, ptrdiff_t dst_stride, const uint8_t *_src, ptrdiff_t src_stride, int w, int h, ptrdiff_t ds, const int16_t *filter, int avg) { pixel *dst = (pixel *) _dst; const pixel *src = (const pixel *) _src; dst_stride /= sizeof(pixel); src_stride /= sizeof(pixel); do { int x; for (x = 0; x < w; x++) if (avg) { dst[x] = (dst[x] + FILTER_8TAP(src, x, filter, ds) + 1) >> 1; } else { dst[x] = FILTER_8TAP(src, x, filter, ds); } dst += dst_stride; src += src_stride; } while (--h); } #define filter_8tap_1d_fn(opn, opa, dir, ds) \ static av_noinline void opn##_8tap_1d_##dir##_c(uint8_t *dst, ptrdiff_t dst_stride, \ const uint8_t *src, ptrdiff_t src_stride, \ int w, int h, const int16_t *filter) \ { \ do_8tap_1d_c(dst, dst_stride, src, src_stride, w, h, ds, filter, opa); \ } filter_8tap_1d_fn(put, 0, v, src_stride / sizeof(pixel)) filter_8tap_1d_fn(put, 0, h, 1) filter_8tap_1d_fn(avg, 1, v, src_stride / sizeof(pixel)) filter_8tap_1d_fn(avg, 1, h, 1) #undef filter_8tap_1d_fn static av_always_inline void do_8tap_2d_c(uint8_t *_dst, ptrdiff_t dst_stride, const uint8_t *_src, ptrdiff_t src_stride, int w, int h, const int16_t *filterx, const int16_t *filtery, int avg) { int tmp_h = h + 7; pixel tmp[64 * 71], *tmp_ptr = tmp; pixel *dst = (pixel *) _dst; const pixel *src = (const pixel *) _src; dst_stride /= sizeof(pixel); src_stride /= sizeof(pixel); src -= src_stride * 3; do { int x; for (x = 0; x < w; x++) tmp_ptr[x] = FILTER_8TAP(src, x, filterx, 1); tmp_ptr += 64; src += src_stride; } while (--tmp_h); tmp_ptr = tmp + 64 * 3; do { int x; for (x = 0; x < w; x++) if (avg) { dst[x] = (dst[x] + FILTER_8TAP(tmp_ptr, x, filtery, 64) + 1) >> 1; } else { dst[x] = FILTER_8TAP(tmp_ptr, x, filtery, 64); } tmp_ptr += 64; dst += dst_stride; } while (--h); } #define filter_8tap_2d_fn(opn, opa) \ static av_noinline void opn##_8tap_2d_hv_c(uint8_t *dst, ptrdiff_t dst_stride, \ const uint8_t *src, ptrdiff_t src_stride, \ int w, int h, const int16_t *filterx, \ const int16_t *filtery) \ { \ do_8tap_2d_c(dst, dst_stride, src, src_stride, w, h, filterx, filtery, opa); \ } filter_8tap_2d_fn(put, 0) filter_8tap_2d_fn(avg, 1) #undef filter_8tap_2d_fn #define filter_fn_1d(sz, dir, dir_m, type, type_idx, avg) \ static void avg##_8tap_##type##_##sz##dir##_c(uint8_t *dst, ptrdiff_t dst_stride, \ const uint8_t *src, ptrdiff_t src_stride, \ int h, int mx, int my) \ { \ avg##_8tap_1d_##dir##_c(dst, dst_stride, src, src_stride, sz, h, \ ff_vp9_subpel_filters[type_idx][dir_m]); \ } #define filter_fn_2d(sz, type, type_idx, avg) \ static void avg##_8tap_##type##_##sz##hv_c(uint8_t *dst, ptrdiff_t dst_stride, \ const uint8_t *src, ptrdiff_t src_stride, \ int h, int mx, int my) \ { \ avg##_8tap_2d_hv_c(dst, dst_stride, src, src_stride, sz, h, \ ff_vp9_subpel_filters[type_idx][mx], \ ff_vp9_subpel_filters[type_idx][my]); \ } #if BIT_DEPTH != 12 #define FILTER_BILIN(src, x, mxy, stride) \ (src[x] + ((mxy * (src[x + stride] - src[x]) + 8) >> 4)) static av_always_inline void do_bilin_1d_c(uint8_t *_dst, ptrdiff_t dst_stride, const uint8_t *_src, ptrdiff_t src_stride, int w, int h, ptrdiff_t ds, int mxy, int avg) { pixel *dst = (pixel *) _dst; const pixel *src = (const pixel *) _src; dst_stride /= sizeof(pixel); src_stride /= sizeof(pixel); do { int x; for (x = 0; x < w; x++) if (avg) { dst[x] = (dst[x] + FILTER_BILIN(src, x, mxy, ds) + 1) >> 1; } else { dst[x] = FILTER_BILIN(src, x, mxy, ds); } dst += dst_stride; src += src_stride; } while (--h); } #define bilin_1d_fn(opn, opa, dir, ds) \ static av_noinline void opn##_bilin_1d_##dir##_c(uint8_t *dst, ptrdiff_t dst_stride, \ const uint8_t *src, ptrdiff_t src_stride, \ int w, int h, int mxy) \ { \ do_bilin_1d_c(dst, dst_stride, src, src_stride, w, h, ds, mxy, opa); \ } bilin_1d_fn(put, 0, v, src_stride / sizeof(pixel)) bilin_1d_fn(put, 0, h, 1) bilin_1d_fn(avg, 1, v, src_stride / sizeof(pixel)) bilin_1d_fn(avg, 1, h, 1) #undef bilin_1d_fn static av_always_inline void do_bilin_2d_c(uint8_t *_dst, ptrdiff_t dst_stride, const uint8_t *_src, ptrdiff_t src_stride, int w, int h, int mx, int my, int avg) { pixel tmp[64 * 65], *tmp_ptr = tmp; int tmp_h = h + 1; pixel *dst = (pixel *) _dst; const pixel *src = (const pixel *) _src; dst_stride /= sizeof(pixel); src_stride /= sizeof(pixel); do { int x; for (x = 0; x < w; x++) tmp_ptr[x] = FILTER_BILIN(src, x, mx, 1); tmp_ptr += 64; src += src_stride; } while (--tmp_h); tmp_ptr = tmp; do { int x; for (x = 0; x < w; x++) if (avg) { dst[x] = (dst[x] + FILTER_BILIN(tmp_ptr, x, my, 64) + 1) >> 1; } else { dst[x] = FILTER_BILIN(tmp_ptr, x, my, 64); } tmp_ptr += 64; dst += dst_stride; } while (--h); } #define bilin_2d_fn(opn, opa) \ static av_noinline void opn##_bilin_2d_hv_c(uint8_t *dst, ptrdiff_t dst_stride, \ const uint8_t *src, ptrdiff_t src_stride, \ int w, int h, int mx, int my) \ { \ do_bilin_2d_c(dst, dst_stride, src, src_stride, w, h, mx, my, opa); \ } bilin_2d_fn(put, 0) bilin_2d_fn(avg, 1) #undef bilin_2d_fn #define bilinf_fn_1d(sz, dir, dir_m, avg) \ static void avg##_bilin_##sz##dir##_c(uint8_t *dst, ptrdiff_t dst_stride, \ const uint8_t *src, ptrdiff_t src_stride, \ int h, int mx, int my) \ { \ avg##_bilin_1d_##dir##_c(dst, dst_stride, src, src_stride, sz, h, dir_m); \ } #define bilinf_fn_2d(sz, avg) \ static void avg##_bilin_##sz##hv_c(uint8_t *dst, ptrdiff_t dst_stride, \ const uint8_t *src, ptrdiff_t src_stride, \ int h, int mx, int my) \ { \ avg##_bilin_2d_hv_c(dst, dst_stride, src, src_stride, sz, h, mx, my); \ } #else #define bilinf_fn_1d(a, b, c, d) #define bilinf_fn_2d(a, b) #endif #define filter_fn(sz, avg) \ filter_fn_1d(sz, h, mx, regular, FILTER_8TAP_REGULAR, avg) \ filter_fn_1d(sz, v, my, regular, FILTER_8TAP_REGULAR, avg) \ filter_fn_2d(sz, regular, FILTER_8TAP_REGULAR, avg) \ filter_fn_1d(sz, h, mx, smooth, FILTER_8TAP_SMOOTH, avg) \ filter_fn_1d(sz, v, my, smooth, FILTER_8TAP_SMOOTH, avg) \ filter_fn_2d(sz, smooth, FILTER_8TAP_SMOOTH, avg) \ filter_fn_1d(sz, h, mx, sharp, FILTER_8TAP_SHARP, avg) \ filter_fn_1d(sz, v, my, sharp, FILTER_8TAP_SHARP, avg) \ filter_fn_2d(sz, sharp, FILTER_8TAP_SHARP, avg) \ bilinf_fn_1d(sz, h, mx, avg) \ bilinf_fn_1d(sz, v, my, avg) \ bilinf_fn_2d(sz, avg) #define filter_fn_set(avg) \ filter_fn(64, avg) \ filter_fn(32, avg) \ filter_fn(16, avg) \ filter_fn(8, avg) \ filter_fn(4, avg) filter_fn_set(put) filter_fn_set(avg) #undef filter_fn #undef filter_fn_set #undef filter_fn_1d #undef filter_fn_2d #undef bilinf_fn_1d #undef bilinf_fn_2d #if BIT_DEPTH != 8 void ff_vp9dsp_mc_init_10(VP9DSPContext *dsp); #endif #if BIT_DEPTH != 10 static #endif av_cold void FUNC(ff_vp9dsp_mc_init)(VP9DSPContext *dsp) { #if BIT_DEPTH == 12 ff_vp9dsp_mc_init_10(dsp); #else /* BIT_DEPTH == 12 */ #define init_fpel(idx1, idx2, sz, type) \ dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] = type##sz##_c; \ dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] = type##sz##_c; \ dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] = type##sz##_c; \ dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = type##sz##_c #define init_copy_avg(idx, sz) \ init_fpel(idx, 0, sz, copy); \ init_fpel(idx, 1, sz, avg) init_copy_avg(0, 64); init_copy_avg(1, 32); init_copy_avg(2, 16); init_copy_avg(3, 8); init_copy_avg(4, 4); #undef init_copy_avg #undef init_fpel #endif /* BIT_DEPTH == 12 */ #define init_subpel1_bd_aware(idx1, idx2, idxh, idxv, sz, dir, type) \ dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][idxh][idxv] = type##_8tap_smooth_##sz##dir##_c; \ dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][idxh][idxv] = type##_8tap_regular_##sz##dir##_c; \ dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][idxh][idxv] = type##_8tap_sharp_##sz##dir##_c #if BIT_DEPTH == 12 #define init_subpel1 init_subpel1_bd_aware #else #define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type) \ init_subpel1_bd_aware(idx1, idx2, idxh, idxv, sz, dir, type); \ dsp->mc[idx1][FILTER_BILINEAR ][idx2][idxh][idxv] = type##_bilin_##sz##dir##_c #endif #define init_subpel2(idx, idxh, idxv, dir, type) \ init_subpel1(0, idx, idxh, idxv, 64, dir, type); \ init_subpel1(1, idx, idxh, idxv, 32, dir, type); \ init_subpel1(2, idx, idxh, idxv, 16, dir, type); \ init_subpel1(3, idx, idxh, idxv, 8, dir, type); \ init_subpel1(4, idx, idxh, idxv, 4, dir, type) #define init_subpel3(idx, type) \ init_subpel2(idx, 1, 1, hv, type); \ init_subpel2(idx, 0, 1, v, type); \ init_subpel2(idx, 1, 0, h, type) init_subpel3(0, put); init_subpel3(1, avg); #undef init_subpel1 #undef init_subpel2 #undef init_subpel3 #undef init_subpel1_bd_aware } static av_always_inline void do_scaled_8tap_c(uint8_t *_dst, ptrdiff_t dst_stride, const uint8_t *_src, ptrdiff_t src_stride, int w, int h, int mx, int my, int dx, int dy, int avg, const int16_t (*filters)[8]) { int tmp_h = (((h - 1) * dy + my) >> 4) + 8; pixel tmp[64 * 135], *tmp_ptr = tmp; pixel *dst = (pixel *) _dst; const pixel *src = (const pixel *) _src; dst_stride /= sizeof(pixel); src_stride /= sizeof(pixel); src -= src_stride * 3; do { int x; int imx = mx, ioff = 0; for (x = 0; x < w; x++) { tmp_ptr[x] = FILTER_8TAP(src, ioff, filters[imx], 1); imx += dx; ioff += imx >> 4; imx &= 0xf; } tmp_ptr += 64; src += src_stride; } while (--tmp_h); tmp_ptr = tmp + 64 * 3; do { int x; const int16_t *filter = filters[my]; for (x = 0; x < w; x++) if (avg) { dst[x] = (dst[x] + FILTER_8TAP(tmp_ptr, x, filter, 64) + 1) >> 1; } else { dst[x] = FILTER_8TAP(tmp_ptr, x, filter, 64); } my += dy; tmp_ptr += (my >> 4) * 64; my &= 0xf; dst += dst_stride; } while (--h); } #define scaled_filter_8tap_fn(opn, opa) \ static av_noinline void opn##_scaled_8tap_c(uint8_t *dst, ptrdiff_t dst_stride, \ const uint8_t *src, ptrdiff_t src_stride, \ int w, int h, int mx, int my, int dx, int dy, \ const int16_t (*filters)[8]) \ { \ do_scaled_8tap_c(dst, dst_stride, src, src_stride, w, h, mx, my, dx, dy, \ opa, filters); \ } scaled_filter_8tap_fn(put, 0) scaled_filter_8tap_fn(avg, 1) #undef scaled_filter_8tap_fn #undef FILTER_8TAP #define scaled_filter_fn(sz, type, type_idx, avg) \ static void avg##_scaled_##type##_##sz##_c(uint8_t *dst, ptrdiff_t dst_stride, \ const uint8_t *src, ptrdiff_t src_stride, \ int h, int mx, int my, int dx, int dy) \ { \ avg##_scaled_8tap_c(dst, dst_stride, src, src_stride, sz, h, mx, my, dx, dy, \ ff_vp9_subpel_filters[type_idx]); \ } #if BIT_DEPTH != 12 static av_always_inline void do_scaled_bilin_c(uint8_t *_dst, ptrdiff_t dst_stride, const uint8_t *_src, ptrdiff_t src_stride, int w, int h, int mx, int my, int dx, int dy, int avg) { pixel tmp[64 * 129], *tmp_ptr = tmp; int tmp_h = (((h - 1) * dy + my) >> 4) + 2; pixel *dst = (pixel *) _dst; const pixel *src = (const pixel *) _src; dst_stride /= sizeof(pixel); src_stride /= sizeof(pixel); do { int x; int imx = mx, ioff = 0; for (x = 0; x < w; x++) { tmp_ptr[x] = FILTER_BILIN(src, ioff, imx, 1); imx += dx; ioff += imx >> 4; imx &= 0xf; } tmp_ptr += 64; src += src_stride; } while (--tmp_h); tmp_ptr = tmp; do { int x; for (x = 0; x < w; x++) if (avg) { dst[x] = (dst[x] + FILTER_BILIN(tmp_ptr, x, my, 64) + 1) >> 1; } else { dst[x] = FILTER_BILIN(tmp_ptr, x, my, 64); } my += dy; tmp_ptr += (my >> 4) * 64; my &= 0xf; dst += dst_stride; } while (--h); } #define scaled_bilin_fn(opn, opa) \ static av_noinline void opn##_scaled_bilin_c(uint8_t *dst, ptrdiff_t dst_stride, \ const uint8_t *src, ptrdiff_t src_stride, \ int w, int h, int mx, int my, int dx, int dy) \ { \ do_scaled_bilin_c(dst, dst_stride, src, src_stride, w, h, mx, my, dx, dy, opa); \ } scaled_bilin_fn(put, 0) scaled_bilin_fn(avg, 1) #undef scaled_bilin_fn #undef FILTER_BILIN #define scaled_bilinf_fn(sz, avg) \ static void avg##_scaled_bilin_##sz##_c(uint8_t *dst, ptrdiff_t dst_stride, \ const uint8_t *src, ptrdiff_t src_stride, \ int h, int mx, int my, int dx, int dy) \ { \ avg##_scaled_bilin_c(dst, dst_stride, src, src_stride, sz, h, mx, my, dx, dy); \ } #else #define scaled_bilinf_fn(a, b) #endif #define scaled_filter_fns(sz, avg) \ scaled_filter_fn(sz, regular, FILTER_8TAP_REGULAR, avg) \ scaled_filter_fn(sz, smooth, FILTER_8TAP_SMOOTH, avg) \ scaled_filter_fn(sz, sharp, FILTER_8TAP_SHARP, avg) \ scaled_bilinf_fn(sz, avg) #define scaled_filter_fn_set(avg) \ scaled_filter_fns(64, avg) \ scaled_filter_fns(32, avg) \ scaled_filter_fns(16, avg) \ scaled_filter_fns(8, avg) \ scaled_filter_fns(4, avg) scaled_filter_fn_set(put) scaled_filter_fn_set(avg) #undef scaled_filter_fns #undef scaled_filter_fn_set #undef scaled_filter_fn #undef scaled_bilinf_fn #if BIT_DEPTH != 8 void ff_vp9dsp_scaled_mc_init_10(VP9DSPContext *dsp); #endif #if BIT_DEPTH != 10 static #endif av_cold void FUNC(ff_vp9dsp_scaled_mc_init)(VP9DSPContext *dsp) { #define init_scaled_bd_aware(idx1, idx2, sz, type) \ dsp->smc[idx1][FILTER_8TAP_SMOOTH ][idx2] = type##_scaled_smooth_##sz##_c; \ dsp->smc[idx1][FILTER_8TAP_REGULAR][idx2] = type##_scaled_regular_##sz##_c; \ dsp->smc[idx1][FILTER_8TAP_SHARP ][idx2] = type##_scaled_sharp_##sz##_c #if BIT_DEPTH == 12 ff_vp9dsp_scaled_mc_init_10(dsp); #define init_scaled(a,b,c,d) init_scaled_bd_aware(a,b,c,d) #else #define init_scaled(idx1, idx2, sz, type) \ init_scaled_bd_aware(idx1, idx2, sz, type); \ dsp->smc[idx1][FILTER_BILINEAR ][idx2] = type##_scaled_bilin_##sz##_c #endif #define init_scaled_put_avg(idx, sz) \ init_scaled(idx, 0, sz, put); \ init_scaled(idx, 1, sz, avg) init_scaled_put_avg(0, 64); init_scaled_put_avg(1, 32); init_scaled_put_avg(2, 16); init_scaled_put_avg(3, 8); init_scaled_put_avg(4, 4); #undef init_scaled_put_avg #undef init_scaled #undef init_scaled_bd_aware } av_cold void FUNC(ff_vp9dsp_init)(VP9DSPContext *dsp) { FUNC(ff_vp9dsp_intrapred_init)(dsp); vp9dsp_itxfm_init(dsp); vp9dsp_loopfilter_init(dsp); FUNC(ff_vp9dsp_mc_init)(dsp); FUNC(ff_vp9dsp_scaled_mc_init)(dsp); }
20704bf1cc43f280ebdb06039bf2b32c9f5caf7e
3a2071c34e3c35847b2bcc2a5d3b3a74114daa0f
/subversion/libsvn_wc/externals.c
30d58768eed4c864cca7da5bfae7517c3ca03618
[ "BSD-3-Clause", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-other-permissive", "X11", "BSD-2-Clause", "LicenseRef-scancode-unknown-license-reference", "MIT", "HPND-Markus-Kuhn", "LicenseRef-scancode-unicode", "Apache-2.0", "FSFAP" ]
permissive
apache/subversion
18a9142afe63f060ffc0814fe0c758c91ad8bd31
dd957c4991e61bde23cc60d13449ea8b65f80c43
refs/heads/trunk
2023-09-04T15:22:36.755177
2023-08-29T19:55:03
2023-08-29T19:55:03
454,263
520
207
Apache-2.0
2023-08-26T14:17:30
2009-12-31T09:00:10
C
UTF-8
C
false
false
67,488
c
externals.c
/* * externals.c : routines dealing with (file) externals in the working copy * * ==================================================================== * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * ==================================================================== */ #include <stdlib.h> #include <string.h> #include <apr_pools.h> #include <apr_hash.h> #include <apr_tables.h> #include <apr_general.h> #include <apr_uri.h> #include "svn_dirent_uri.h" #include "svn_path.h" #include "svn_error.h" #include "svn_hash.h" #include "svn_io.h" #include "svn_pools.h" #include "svn_props.h" #include "svn_string.h" #include "svn_time.h" #include "svn_types.h" #include "svn_wc.h" #include "private/svn_skel.h" #include "private/svn_subr_private.h" #include "wc.h" #include "adm_files.h" #include "props.h" #include "translate.h" #include "workqueue.h" #include "conflicts.h" #include "textbase.h" #include "svn_private_config.h" /** Externals **/ /* * Look for either * * -r N * -rN * * in the LINE_PARTS array and update the revision field in ITEM with * the revision if the revision is found. Set REV_IDX to the index in * LINE_PARTS where the revision specification starts. Remove from * LINE_PARTS the element(s) that specify the revision. * Set REV_STR to the element that specifies the revision. * PARENT_DIRECTORY_DISPLAY and LINE are given to return a nice error * string. * * If this function returns successfully, then LINE_PARTS will have * only two elements in it. */ static svn_error_t * find_and_remove_externals_revision(int *rev_idx, const char **rev_str, const char **line_parts, int num_line_parts, svn_wc_external_item2_t *item, const char *parent_directory_display, const char *line, apr_pool_t *pool) { int i; for (i = 0; i < 2; ++i) { const char *token = line_parts[i]; if (token[0] == '-' && token[1] == 'r') { svn_opt_revision_t end_revision = { svn_opt_revision_unspecified }; const char *digits_ptr; int shift_count; int j; *rev_idx = i; if (token[2] == '\0') { /* There must be a total of four elements in the line if -r N is used. */ if (num_line_parts != 4) goto parse_error; shift_count = 2; digits_ptr = line_parts[i+1]; } else { /* There must be a total of three elements in the line if -rN is used. */ if (num_line_parts != 3) goto parse_error; shift_count = 1; digits_ptr = token+2; } if (svn_opt_parse_revision(&item->revision, &end_revision, digits_ptr, pool) != 0) goto parse_error; /* We want a single revision, not a range. */ if (end_revision.kind != svn_opt_revision_unspecified) goto parse_error; /* Allow only numbers and dates, not keywords. */ if (item->revision.kind != svn_opt_revision_number && item->revision.kind != svn_opt_revision_date) goto parse_error; /* Shift any line elements past the revision specification down over the revision specification. */ for (j = i; j < num_line_parts-shift_count; ++j) line_parts[j] = line_parts[j+shift_count]; line_parts[num_line_parts-shift_count] = NULL; *rev_str = apr_psprintf(pool, "-r%s", digits_ptr); /* Found the revision, so leave the function immediately, do * not continue looking for additional revisions. */ return SVN_NO_ERROR; } } /* No revision was found, so there must be exactly two items in the line array. */ if (num_line_parts == 2) return SVN_NO_ERROR; parse_error: return svn_error_createf (SVN_ERR_CLIENT_INVALID_EXTERNALS_DESCRIPTION, NULL, _("Error parsing %s property on '%s': '%s'"), SVN_PROP_EXTERNALS, parent_directory_display, line); } svn_error_t * svn_wc__parse_externals_description(apr_array_header_t **externals_p, apr_array_header_t **parser_infos_p, const char *defining_directory, const char *desc, svn_boolean_t canonicalize_url, apr_pool_t *pool) { int i; apr_array_header_t *externals = NULL; apr_array_header_t *parser_infos = NULL; apr_array_header_t *lines = svn_cstring_split(desc, "\n\r", TRUE, pool); const char *defining_directory_display = svn_path_is_url(defining_directory) ? defining_directory : svn_dirent_local_style(defining_directory, pool); /* If an error occurs halfway through parsing, *externals_p should stay * untouched. So, store the list in a local var first. */ if (externals_p) externals = apr_array_make(pool, 1, sizeof(svn_wc_external_item2_t *)); if (parser_infos_p) parser_infos = apr_array_make(pool, 1, sizeof(svn_wc__externals_parser_info_t *)); for (i = 0; i < lines->nelts; i++) { const char *line = APR_ARRAY_IDX(lines, i, const char *); apr_status_t status; char **line_parts; int num_line_parts; svn_wc_external_item2_t *item; const char *token0; const char *token1; svn_boolean_t token0_is_url; svn_boolean_t token1_is_url; svn_wc__externals_parser_info_t *info = NULL; /* Index into line_parts where the revision specification started. */ int rev_idx = -1; const char *rev_str = NULL; if ((! line) || (line[0] == '#')) continue; /* else proceed */ status = apr_tokenize_to_argv(line, &line_parts, pool); if (status) return svn_error_wrap_apr(status, _("Can't split line into components: '%s'"), line); /* Count the number of tokens. */ for (num_line_parts = 0; line_parts[num_line_parts]; num_line_parts++) ; SVN_ERR(svn_wc_external_item2_create(&item, pool)); item->revision.kind = svn_opt_revision_unspecified; item->peg_revision.kind = svn_opt_revision_unspecified; if (parser_infos) info = apr_pcalloc(pool, sizeof(*info)); /* * There are six different formats of externals: * * 1) DIR URL * 2) DIR -r N URL * 3) DIR -rN URL * 4) URL DIR * 5) -r N URL DIR * 6) -rN URL DIR * * The last three allow peg revisions in the URL. * * With relative URLs and no '-rN' or '-r N', there is no way to * distinguish between 'DIR URL' and 'URL DIR' when URL is a * relative URL like /svn/repos/trunk, so this case is taken as * case 4). */ if (num_line_parts < 2 || num_line_parts > 4) return svn_error_createf (SVN_ERR_CLIENT_INVALID_EXTERNALS_DESCRIPTION, NULL, _("Error parsing %s property on '%s': '%s'"), SVN_PROP_EXTERNALS, defining_directory_display, line); /* To make it easy to check for the forms, find and remove -r N or -rN from the line item array. If it is found, rev_idx contains the index into line_parts where '-r' was found and set item->revision to the parsed revision. */ /* ### ugh. stupid cast. */ SVN_ERR(find_and_remove_externals_revision(&rev_idx, &rev_str, (const char **)line_parts, num_line_parts, item, defining_directory_display, line, pool)); token0 = line_parts[0]; token1 = line_parts[1]; token0_is_url = svn_path_is_url(token0); token1_is_url = svn_path_is_url(token1); if (token0_is_url && token1_is_url) return svn_error_createf (SVN_ERR_CLIENT_INVALID_EXTERNALS_DESCRIPTION, NULL, _("Invalid %s property on '%s': " "cannot use two absolute URLs ('%s' and '%s') in an external; " "one must be a path where an absolute or relative URL is " "checked out to"), SVN_PROP_EXTERNALS, defining_directory_display, token0, token1); if (0 == rev_idx && token1_is_url) return svn_error_createf (SVN_ERR_CLIENT_INVALID_EXTERNALS_DESCRIPTION, NULL, _("Invalid %s property on '%s': " "cannot use a URL '%s' as the target directory for an external " "definition"), SVN_PROP_EXTERNALS, defining_directory_display, token1); if (1 == rev_idx && token0_is_url) return svn_error_createf (SVN_ERR_CLIENT_INVALID_EXTERNALS_DESCRIPTION, NULL, _("Invalid %s property on '%s': " "cannot use a URL '%s' as the target directory for an external " "definition"), SVN_PROP_EXTERNALS, defining_directory_display, token0); /* The appearance of -r N or -rN forces the type of external. If -r is at the beginning of the line or the first token is an absolute URL or if the second token is not an absolute URL, then the URL supports peg revisions. */ if (0 == rev_idx || (-1 == rev_idx && (token0_is_url || ! token1_is_url))) { /* The URL is passed to svn_opt_parse_path in uncanonicalized form so that the scheme relative URL //hostname/foo is not collapsed to a server root relative URL /hostname/foo. */ SVN_ERR(svn_opt_parse_path(&item->peg_revision, &item->url, token0, pool)); item->target_dir = token1; if (info) { info->format = svn_wc__external_description_format_2; if (rev_str) info->rev_str = apr_pstrdup(pool, rev_str); if (item->peg_revision.kind != svn_opt_revision_unspecified) info->peg_rev_str = strrchr(token0, '@'); } } else { item->target_dir = token0; item->url = token1; item->peg_revision = item->revision; if (info) { info->format = svn_wc__external_description_format_1; if (rev_str) { info->rev_str = apr_pstrdup(pool, rev_str); info->peg_rev_str = info->rev_str; } } } SVN_ERR(svn_opt_resolve_revisions(&item->peg_revision, &item->revision, TRUE, FALSE, pool)); item->target_dir = svn_dirent_internal_style(item->target_dir, pool); if (item->target_dir[0] == '\0' || svn_dirent_is_absolute(item->target_dir) || svn_path_is_backpath_present(item->target_dir) || !svn_dirent_skip_ancestor("dummy", svn_dirent_join("dummy", item->target_dir, pool))) return svn_error_createf (SVN_ERR_CLIENT_INVALID_EXTERNALS_DESCRIPTION, NULL, _("Invalid %s property on '%s': " "target '%s' is an absolute path or involves '..'"), SVN_PROP_EXTERNALS, defining_directory_display, item->target_dir); if (canonicalize_url) { /* Uh... this is stupid. But it's consistent with what our code did before we split up the relpath/dirent/uri APIs. Still, given this, it's no wonder that our own libraries don't ask this function to canonicalize the results. */ if (svn_path_is_url(item->url)) item->url = svn_uri_canonicalize(item->url, pool); else item->url = svn_dirent_canonicalize(item->url, pool); } if (externals) APR_ARRAY_PUSH(externals, svn_wc_external_item2_t *) = item; if (parser_infos) APR_ARRAY_PUSH(parser_infos, svn_wc__externals_parser_info_t *) = info; } if (externals_p) *externals_p = externals; if (parser_infos_p) *parser_infos_p = parser_infos; return SVN_NO_ERROR; } svn_error_t * svn_wc_parse_externals_description3(apr_array_header_t **externals_p, const char *defining_directory, const char *desc, svn_boolean_t canonicalize_url, apr_pool_t *pool) { return svn_error_trace(svn_wc__parse_externals_description(externals_p, NULL, defining_directory, desc, canonicalize_url, pool)); } svn_error_t * svn_wc__externals_find_target_dups(apr_array_header_t **duplicate_targets, apr_array_header_t *externals, apr_pool_t *pool, apr_pool_t *scratch_pool) { int i; unsigned int len; unsigned int len2; const char *target; apr_hash_t *targets = apr_hash_make(scratch_pool); apr_hash_t *targets2 = NULL; *duplicate_targets = NULL; for (i = 0; i < externals->nelts; i++) { target = APR_ARRAY_IDX(externals, i, svn_wc_external_item2_t*)->target_dir; len = apr_hash_count(targets); svn_hash_sets(targets, target, ""); if (len == apr_hash_count(targets)) { /* Hashtable length is unchanged. This must be a duplicate. */ /* Collapse multiple duplicates of the same target by using a second * hash layer. */ if (! targets2) targets2 = apr_hash_make(scratch_pool); len2 = apr_hash_count(targets2); svn_hash_sets(targets2, target, ""); if (len2 < apr_hash_count(targets2)) { /* The second hash list just got bigger, i.e. this target has * not been counted as duplicate before. */ if (! *duplicate_targets) { *duplicate_targets = apr_array_make( pool, 1, sizeof(svn_wc_external_item2_t*)); } APR_ARRAY_PUSH((*duplicate_targets), const char *) = target; } /* Else, this same target has already been recorded as a duplicate, * don't count it again. */ } } return SVN_NO_ERROR; } struct edit_baton { apr_pool_t *pool; svn_wc__db_t *db; /* We explicitly use wri_abspath and local_abspath here, because we might want to install file externals in an obstructing working copy */ const char *wri_abspath; /* The working defining the file external */ const char *local_abspath; /* The file external itself */ const char *name; /* The basename of the file external itself */ /* Information from the caller */ svn_boolean_t use_commit_times; const apr_array_header_t *ext_patterns; const char *diff3cmd; const char *repos_root_url; const char *repos_uuid; const char *old_repos_relpath; const char *new_repos_relpath; const char *record_ancestor_abspath; const char *recorded_repos_relpath; svn_revnum_t recorded_peg_revision; svn_revnum_t recorded_revision; /* Introducing a new file external */ svn_boolean_t added; svn_wc_conflict_resolver_func2_t conflict_func; void *conflict_baton; svn_cancel_func_t cancel_func; void *cancel_baton; svn_wc_notify_func2_t notify_func; void *notify_baton; svn_revnum_t *target_revision; /* What was there before the update */ svn_revnum_t original_revision; const svn_checksum_t *original_checksum; /* What we are installing now */ svn_wc__db_install_data_t *install_data; svn_checksum_t *new_sha1_checksum; svn_checksum_t *new_md5_checksum; /* List of incoming propchanges */ apr_array_header_t *propchanges; /* Array of svn_prop_inherited_item_t * structures representing the properties inherited by the base node at LOCAL_ABSPATH. */ apr_array_header_t *iprops; /* The last change information */ svn_revnum_t changed_rev; apr_time_t changed_date; const char *changed_author; svn_boolean_t had_props; svn_boolean_t file_closed; }; /* svn_delta_editor_t function for svn_wc__get_file_external_editor */ static svn_error_t * set_target_revision(void *edit_baton, svn_revnum_t target_revision, apr_pool_t *pool) { struct edit_baton *eb = edit_baton; *eb->target_revision = target_revision; return SVN_NO_ERROR; } /* svn_delta_editor_t function for svn_wc__get_file_external_editor */ static svn_error_t * open_root(void *edit_baton, svn_revnum_t base_revision, apr_pool_t *dir_pool, void **root_baton) { *root_baton = edit_baton; return SVN_NO_ERROR; } /* svn_delta_editor_t function for svn_wc__get_file_external_editor */ static svn_error_t * add_file(const char *path, void *parent_baton, const char *copyfrom_path, svn_revnum_t copyfrom_revision, apr_pool_t *file_pool, void **file_baton) { struct edit_baton *eb = parent_baton; if (strcmp(path, eb->name)) return svn_error_createf(SVN_ERR_WC_PATH_NOT_FOUND, NULL, _("This editor can only update '%s'"), svn_dirent_local_style(eb->local_abspath, file_pool)); *file_baton = eb; eb->original_revision = SVN_INVALID_REVNUM; eb->added = TRUE; return SVN_NO_ERROR; } /* svn_delta_editor_t function for svn_wc__get_file_external_editor */ static svn_error_t * open_file(const char *path, void *parent_baton, svn_revnum_t base_revision, apr_pool_t *file_pool, void **file_baton) { struct edit_baton *eb = parent_baton; svn_node_kind_t kind; if (strcmp(path, eb->name)) return svn_error_createf(SVN_ERR_WC_PATH_NOT_FOUND, NULL, _("This editor can only update '%s'"), svn_dirent_local_style(eb->local_abspath, file_pool)); *file_baton = eb; SVN_ERR(svn_wc__db_base_get_info(NULL, &kind, &eb->original_revision, &eb->old_repos_relpath, NULL, NULL, &eb->changed_rev, &eb->changed_date, &eb->changed_author, NULL, &eb->original_checksum, NULL, NULL, &eb->had_props, NULL, NULL, eb->db, eb->local_abspath, eb->pool, file_pool)); if (kind != svn_node_file) return svn_error_createf(SVN_ERR_WC_PATH_UNEXPECTED_STATUS, NULL, _("Node '%s' is no existing file external"), svn_dirent_local_style(eb->local_abspath, file_pool)); return SVN_NO_ERROR; } /* svn_delta_editor_t function for svn_wc__get_file_external_editor */ static svn_error_t * apply_textdelta(void *file_baton, const char *base_checksum_digest, apr_pool_t *pool, svn_txdelta_window_handler_t *handler, void **handler_baton) { struct edit_baton *eb = file_baton; svn_stream_t *src_stream; svn_stream_t *dest_stream; if (eb->original_checksum) { if (base_checksum_digest) { svn_checksum_t *expected_checksum; const svn_checksum_t *original_md5; SVN_ERR(svn_checksum_parse_hex(&expected_checksum, svn_checksum_md5, base_checksum_digest, pool)); if (eb->original_checksum->kind != svn_checksum_md5) SVN_ERR(svn_wc__db_pristine_get_md5(&original_md5, eb->db, eb->wri_abspath, eb->original_checksum, pool, pool)); else original_md5 = eb->original_checksum; if (!svn_checksum_match(expected_checksum, original_md5)) return svn_error_trace(svn_checksum_mismatch_err( expected_checksum, original_md5, pool, _("Base checksum mismatch for '%s'"), svn_dirent_local_style(eb->local_abspath, pool))); } SVN_ERR(svn_wc__textbase_get_contents(&src_stream, eb->db, eb->local_abspath, eb->original_checksum, FALSE, pool, pool)); } else src_stream = svn_stream_empty(pool); SVN_ERR(svn_wc__textbase_prepare_install(&dest_stream, &eb->install_data, &eb->new_sha1_checksum, &eb->new_md5_checksum, eb->db, eb->local_abspath, TRUE, eb->pool, pool)); svn_txdelta_apply2(src_stream, dest_stream, NULL, eb->local_abspath, pool, handler, handler_baton); return SVN_NO_ERROR; } /* svn_delta_editor_t function for svn_wc__get_file_external_editor */ static svn_error_t * change_file_prop(void *file_baton, const char *name, const svn_string_t *value, apr_pool_t *pool) { struct edit_baton *eb = file_baton; svn_prop_t *propchange; propchange = apr_array_push(eb->propchanges); propchange->name = apr_pstrdup(eb->pool, name); propchange->value = svn_string_dup(value, eb->pool); return SVN_NO_ERROR; } /* svn_delta_editor_t function for svn_wc__get_file_external_editor */ static svn_error_t * close_file(void *file_baton, const char *expected_md5_digest, apr_pool_t *pool) { struct edit_baton *eb = file_baton; svn_wc_notify_state_t prop_state = svn_wc_notify_state_unknown; svn_wc_notify_state_t content_state = svn_wc_notify_state_unknown; svn_boolean_t obstructed = FALSE; eb->file_closed = TRUE; /* We bump the revision here */ /* Check the checksum, if provided */ if (expected_md5_digest) { svn_checksum_t *expected_md5_checksum; const svn_checksum_t *actual_md5_checksum = eb->new_md5_checksum; SVN_ERR(svn_checksum_parse_hex(&expected_md5_checksum, svn_checksum_md5, expected_md5_digest, pool)); if (actual_md5_checksum == NULL) { actual_md5_checksum = eb->original_checksum; if (actual_md5_checksum != NULL && actual_md5_checksum->kind != svn_checksum_md5) { SVN_ERR(svn_wc__db_pristine_get_md5(&actual_md5_checksum, eb->db, eb->wri_abspath, actual_md5_checksum, pool, pool)); } } if (! svn_checksum_match(expected_md5_checksum, actual_md5_checksum)) return svn_checksum_mismatch_err( expected_md5_checksum, actual_md5_checksum, pool, _("Checksum mismatch for '%s'"), svn_dirent_local_style(eb->local_abspath, pool)); } /* First move the file in the pristine store; this hands over the cleanup behavior to the pristine store. */ if (eb->new_sha1_checksum) { SVN_ERR(svn_wc__db_pristine_install(eb->install_data, eb->new_sha1_checksum, eb->new_md5_checksum, pool)); eb->install_data = NULL; } /* Merge the changes */ { svn_skel_t *all_work_items = NULL; svn_skel_t *conflict_skel = NULL; svn_skel_t *work_item; apr_hash_t *base_props = NULL; apr_hash_t *actual_props = NULL; apr_hash_t *new_pristine_props = NULL; apr_hash_t *new_actual_props = NULL; apr_hash_t *new_dav_props = NULL; const svn_checksum_t *new_checksum = NULL; const svn_checksum_t *original_checksum = NULL; svn_boolean_t added = !SVN_IS_VALID_REVNUM(eb->original_revision); if (! added) { new_checksum = eb->original_checksum; if (eb->had_props) SVN_ERR(svn_wc__db_base_get_props( &base_props, eb->db, eb->local_abspath, pool, pool)); SVN_ERR(svn_wc__db_read_props( &actual_props, eb->db, eb->local_abspath, pool, pool)); } if (!base_props) base_props = apr_hash_make(pool); if (!actual_props) actual_props = apr_hash_make(pool); if (eb->new_sha1_checksum) new_checksum = eb->new_sha1_checksum; /* Merge the properties */ { apr_array_header_t *entry_prop_changes; apr_array_header_t *dav_prop_changes; apr_array_header_t *regular_prop_changes; int i; SVN_ERR(svn_categorize_props(eb->propchanges, &entry_prop_changes, &dav_prop_changes, &regular_prop_changes, pool)); /* Read the entry-prop changes to update the last-changed info. */ for (i = 0; i < entry_prop_changes->nelts; i++) { const svn_prop_t *prop = &APR_ARRAY_IDX(entry_prop_changes, i, svn_prop_t); if (! prop->value) continue; /* authz or something */ if (! strcmp(prop->name, SVN_PROP_ENTRY_LAST_AUTHOR)) eb->changed_author = apr_pstrdup(pool, prop->value->data); else if (! strcmp(prop->name, SVN_PROP_ENTRY_COMMITTED_REV)) { apr_int64_t rev; SVN_ERR(svn_cstring_atoi64(&rev, prop->value->data)); eb->changed_rev = (svn_revnum_t)rev; } else if (! strcmp(prop->name, SVN_PROP_ENTRY_COMMITTED_DATE)) SVN_ERR(svn_time_from_cstring(&eb->changed_date, prop->value->data, pool)); } /* Store the DAV-prop (aka WC-prop) changes. (This treats a list * of changes as a list of new props, but we only use this when * adding a new file and it's equivalent in that case.) */ if (dav_prop_changes->nelts > 0) new_dav_props = svn_prop_array_to_hash(dav_prop_changes, pool); /* Merge the regular prop changes. */ if (regular_prop_changes->nelts > 0) { new_pristine_props = svn_prop__patch(base_props, regular_prop_changes, pool); SVN_ERR(svn_wc__merge_props(&conflict_skel, &prop_state, &new_actual_props, eb->db, eb->local_abspath, NULL /* server_baseprops*/, base_props, actual_props, regular_prop_changes, pool, pool)); } else { new_pristine_props = base_props; new_actual_props = actual_props; } } /* Merge the text */ if (eb->new_sha1_checksum) { svn_node_kind_t disk_kind; svn_boolean_t install_pristine = FALSE; SVN_ERR(svn_io_check_path(eb->local_abspath, &disk_kind, pool)); if (disk_kind == svn_node_none) { /* Just install the file */ install_pristine = TRUE; content_state = svn_wc_notify_state_changed; } else if (disk_kind != svn_node_file || (eb->added && disk_kind == svn_node_file)) { /* The node is obstructed; we just change the DB */ obstructed = TRUE; content_state = svn_wc_notify_state_unchanged; } else { svn_boolean_t is_mod; SVN_ERR(svn_wc__internal_file_modified_p(&is_mod, eb->db, eb->local_abspath, FALSE, pool)); if (!is_mod) { install_pristine = TRUE; content_state = svn_wc_notify_state_changed; } else { svn_boolean_t found_text_conflict; /* Ok, we have to do some work to merge a local change */ SVN_ERR(svn_wc__perform_file_merge(&work_item, &conflict_skel, &found_text_conflict, eb->db, eb->local_abspath, eb->wri_abspath, new_checksum, original_checksum, actual_props, eb->ext_patterns, eb->original_revision, *eb->target_revision, eb->propchanges, eb->diff3cmd, eb->cancel_func, eb->cancel_baton, pool, pool)); all_work_items = svn_wc__wq_merge(all_work_items, work_item, pool); if (found_text_conflict) content_state = svn_wc_notify_state_conflicted; else content_state = svn_wc_notify_state_merged; } } if (install_pristine) { svn_stream_t *contents; const char *tmpdir_abspath; svn_stream_t *tmpstream; const char *tmpfile_abspath; SVN_ERR(svn_wc__db_pristine_read(&contents, NULL, eb->db, eb->wri_abspath, eb->new_sha1_checksum, pool, pool)); if (!contents) return svn_error_create(SVN_ERR_WC_PRISTINE_DEHYDRATED, NULL, NULL); SVN_ERR(svn_wc__db_temp_wcroot_tempdir(&tmpdir_abspath, eb->db, eb->wri_abspath, pool, pool)); SVN_ERR(svn_stream_open_unique(&tmpstream, &tmpfile_abspath, tmpdir_abspath, svn_io_file_del_none, pool, pool)); SVN_ERR(svn_stream_copy3(contents, tmpstream, eb->cancel_func, eb->cancel_baton, pool)); SVN_ERR(svn_wc__wq_build_file_install(&work_item, eb->db, eb->local_abspath, tmpfile_abspath, eb->use_commit_times, TRUE, pool, pool)); all_work_items = svn_wc__wq_merge(all_work_items, work_item, pool); SVN_ERR(svn_wc__wq_build_file_remove(&work_item, eb->db, eb->wri_abspath, tmpfile_abspath, pool, pool)); all_work_items = svn_wc__wq_merge(all_work_items, work_item, pool); } } else { content_state = svn_wc_notify_state_unchanged; /* ### Retranslate on magic property changes, etc. */ } /* Generate a conflict description, if needed */ if (conflict_skel) { SVN_ERR(svn_wc__conflict_skel_set_op_switch( conflict_skel, svn_wc_conflict_version_create2( eb->repos_root_url, eb->repos_uuid, eb->old_repos_relpath, eb->original_revision, svn_node_file, pool), svn_wc_conflict_version_create2( eb->repos_root_url, eb->repos_uuid, eb->new_repos_relpath, *eb->target_revision, svn_node_file, pool), pool, pool)); SVN_ERR(svn_wc__conflict_create_markers(&work_item, eb->db, eb->local_abspath, conflict_skel, pool, pool)); all_work_items = svn_wc__wq_merge(all_work_items, work_item, pool); } /* Install the file in the DB */ SVN_ERR(svn_wc__db_external_add_file( eb->db, eb->local_abspath, eb->wri_abspath, eb->new_repos_relpath, eb->repos_root_url, eb->repos_uuid, *eb->target_revision, new_pristine_props, eb->iprops, eb->changed_rev, eb->changed_date, eb->changed_author, new_checksum, new_dav_props, eb->record_ancestor_abspath, eb->recorded_repos_relpath, eb->recorded_peg_revision, eb->recorded_revision, TRUE, new_actual_props, FALSE /* keep_recorded_info */, conflict_skel, all_work_items, pool)); /* close_edit may also update iprops for switched files, catching those for which close_file is never called (e.g. an update of a file external with no changes). So as a minor optimization we clear the iprops so as not to set them again in close_edit. */ eb->iprops = NULL; /* Run the work queue to complete the installation */ SVN_ERR(svn_wc__wq_run(eb->db, eb->wri_abspath, eb->cancel_func, eb->cancel_baton, pool)); if (conflict_skel && eb->conflict_func) SVN_ERR(svn_wc__conflict_invoke_resolver(eb->db, eb->local_abspath, svn_node_file, conflict_skel, NULL /* merge_options */, eb->conflict_func, eb->conflict_baton, eb->cancel_func, eb->cancel_baton, pool)); } /* Notify */ if (eb->notify_func) { svn_wc_notify_action_t action; svn_wc_notify_t *notify; if (!eb->added) action = obstructed ? svn_wc_notify_update_shadowed_update : svn_wc_notify_update_update; else action = obstructed ? svn_wc_notify_update_shadowed_add : svn_wc_notify_update_add; notify = svn_wc_create_notify(eb->local_abspath, action, pool); notify->kind = svn_node_file; notify->revision = *eb->target_revision; notify->prop_state = prop_state; notify->content_state = content_state; notify->old_revision = eb->original_revision; eb->notify_func(eb->notify_baton, notify, pool); } return SVN_NO_ERROR; } /* svn_delta_editor_t function for svn_wc__get_file_external_editor */ static svn_error_t * close_edit(void *edit_baton, apr_pool_t *pool) { struct edit_baton *eb = edit_baton; if (!eb->file_closed) { apr_hash_t *wcroot_iprops = NULL; /* The file wasn't updated, but its url or revision might have... e.g. switch between branches for relative externals. Just bump the information as that is just as expensive as investigating when we should and shouldn't update it... and avoid hard to debug edge cases */ if (eb->iprops) { wcroot_iprops = apr_hash_make(pool); svn_hash_sets(wcroot_iprops, eb->local_abspath, eb->iprops); } SVN_ERR(svn_wc__db_op_bump_revisions_post_update(eb->db, eb->local_abspath, svn_depth_infinity, eb->new_repos_relpath, eb->repos_root_url, eb->repos_uuid, *eb->target_revision, apr_hash_make(pool) /* exclude_relpaths */, wcroot_iprops, TRUE /* empty update */, eb->notify_func, eb->notify_baton, pool)); } return SVN_NO_ERROR; } svn_error_t * svn_wc__get_file_external_editor(const svn_delta_editor_t **editor, void **edit_baton, svn_revnum_t *target_revision, svn_wc_context_t *wc_ctx, const char *local_abspath, const char *wri_abspath, const char *url, const char *repos_root_url, const char *repos_uuid, apr_array_header_t *iprops, svn_boolean_t use_commit_times, const char *diff3_cmd, const apr_array_header_t *preserved_exts, const char *record_ancestor_abspath, const char *recorded_url, const svn_opt_revision_t *recorded_peg_rev, const svn_opt_revision_t *recorded_rev, svn_wc_conflict_resolver_func2_t conflict_func, void *conflict_baton, svn_cancel_func_t cancel_func, void *cancel_baton, svn_wc_notify_func2_t notify_func, void *notify_baton, apr_pool_t *result_pool, apr_pool_t *scratch_pool) { svn_wc__db_t *db = wc_ctx->db; apr_pool_t *edit_pool = result_pool; struct edit_baton *eb = apr_pcalloc(edit_pool, sizeof(*eb)); svn_delta_editor_t *tree_editor = svn_delta_default_editor(edit_pool); eb->pool = edit_pool; eb->db = db; eb->local_abspath = apr_pstrdup(edit_pool, local_abspath); if (wri_abspath) eb->wri_abspath = apr_pstrdup(edit_pool, wri_abspath); else eb->wri_abspath = svn_dirent_dirname(local_abspath, edit_pool); eb->name = svn_dirent_basename(eb->local_abspath, NULL); eb->target_revision = target_revision; eb->repos_root_url = apr_pstrdup(edit_pool, repos_root_url); eb->repos_uuid = apr_pstrdup(edit_pool, repos_uuid); eb->new_repos_relpath = svn_uri_skip_ancestor(eb->repos_root_url, url, edit_pool); eb->old_repos_relpath = eb->new_repos_relpath; eb->original_revision = SVN_INVALID_REVNUM; eb->iprops = iprops; eb->use_commit_times = use_commit_times; eb->ext_patterns = preserved_exts; eb->diff3cmd = diff3_cmd; eb->record_ancestor_abspath = apr_pstrdup(edit_pool,record_ancestor_abspath); eb->recorded_repos_relpath = svn_uri_skip_ancestor(repos_root_url, recorded_url, edit_pool); eb->changed_rev = SVN_INVALID_REVNUM; if (recorded_peg_rev->kind == svn_opt_revision_number) eb->recorded_peg_revision = recorded_peg_rev->value.number; else eb->recorded_peg_revision = SVN_INVALID_REVNUM; /* Not fixed/HEAD */ if (recorded_rev->kind == svn_opt_revision_number) eb->recorded_revision = recorded_rev->value.number; else eb->recorded_revision = SVN_INVALID_REVNUM; /* Not fixed/HEAD */ eb->conflict_func = conflict_func; eb->conflict_baton = conflict_baton; eb->cancel_func = cancel_func; eb->cancel_baton = cancel_baton; eb->notify_func = notify_func; eb->notify_baton = notify_baton; eb->propchanges = apr_array_make(edit_pool, 1, sizeof(svn_prop_t)); tree_editor->open_root = open_root; tree_editor->set_target_revision = set_target_revision; tree_editor->add_file = add_file; tree_editor->open_file = open_file; tree_editor->apply_textdelta = apply_textdelta; tree_editor->change_file_prop = change_file_prop; tree_editor->close_file = close_file; tree_editor->close_edit = close_edit; return svn_delta_get_cancellation_editor(cancel_func, cancel_baton, tree_editor, eb, editor, edit_baton, result_pool); } svn_error_t * svn_wc__crawl_file_external(svn_wc_context_t *wc_ctx, const char *local_abspath, const svn_ra_reporter3_t *reporter, void *report_baton, svn_boolean_t restore_files, svn_boolean_t use_commit_times, svn_cancel_func_t cancel_func, void *cancel_baton, svn_wc_notify_func2_t notify_func, void *notify_baton, apr_pool_t *scratch_pool) { svn_wc__db_t *db = wc_ctx->db; svn_error_t *err; svn_node_kind_t kind; svn_wc__db_lock_t *lock; svn_revnum_t revision; const char *repos_root_url; const char *repos_relpath; svn_boolean_t update_root; err = svn_wc__db_base_get_info(NULL, &kind, &revision, &repos_relpath, &repos_root_url, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &lock, NULL, NULL, &update_root, db, local_abspath, scratch_pool, scratch_pool); if (err || kind == svn_node_dir || !update_root) { if (err && err->apr_err != SVN_ERR_WC_PATH_NOT_FOUND) return svn_error_trace(err); svn_error_clear(err); /* We don't know about this node, so all we have to do is tell the reporter that we don't know this node. But first we have to start the report by sending some basic information for the root. */ SVN_ERR(reporter->set_path(report_baton, "", 0, svn_depth_infinity, FALSE, NULL, scratch_pool)); SVN_ERR(reporter->delete_path(report_baton, "", scratch_pool)); /* Finish the report, which causes the update editor to be driven. */ SVN_ERR(reporter->finish_report(report_baton, scratch_pool)); return SVN_NO_ERROR; } else { if (restore_files) { svn_node_kind_t disk_kind; SVN_ERR(svn_io_check_path(local_abspath, &disk_kind, scratch_pool)); if (disk_kind == svn_node_none) { err = svn_wc_restore2(wc_ctx, local_abspath, use_commit_times, scratch_pool); if (err) { if (err->apr_err != SVN_ERR_WC_PATH_UNEXPECTED_STATUS) return svn_error_trace(err); svn_error_clear(err); } } } /* Report that we know the path */ SVN_ERR(reporter->set_path(report_baton, "", revision, svn_depth_infinity, FALSE, NULL, scratch_pool)); /* For compatibility with the normal update editor report we report the target as switched. ### We can probably report a parent url and unswitched later */ SVN_ERR(reporter->link_path(report_baton, "", svn_path_url_add_component2(repos_root_url, repos_relpath, scratch_pool), revision, svn_depth_infinity, FALSE /* start_empty*/, lock ? lock->token : NULL, scratch_pool)); } return svn_error_trace(reporter->finish_report(report_baton, scratch_pool)); } svn_error_t * svn_wc__read_external_info(svn_node_kind_t *external_kind, const char **defining_abspath, const char **defining_url, svn_revnum_t *defining_operational_revision, svn_revnum_t *defining_revision, svn_wc_context_t *wc_ctx, const char *wri_abspath, const char *local_abspath, svn_boolean_t ignore_enoent, apr_pool_t *result_pool, apr_pool_t *scratch_pool) { const char *repos_root_url; svn_wc__db_status_t status; svn_node_kind_t kind; svn_error_t *err; err = svn_wc__db_external_read(&status, &kind, defining_abspath, defining_url ? &repos_root_url : NULL, NULL, defining_url, defining_operational_revision, defining_revision, wc_ctx->db, local_abspath, wri_abspath, result_pool, scratch_pool); if (err) { if (err->apr_err != SVN_ERR_WC_PATH_NOT_FOUND || !ignore_enoent) return svn_error_trace(err); svn_error_clear(err); if (external_kind) *external_kind = svn_node_none; if (defining_abspath) *defining_abspath = NULL; if (defining_url) *defining_url = NULL; if (defining_operational_revision) *defining_operational_revision = SVN_INVALID_REVNUM; if (defining_revision) *defining_revision = SVN_INVALID_REVNUM; return SVN_NO_ERROR; } if (external_kind) { if (status != svn_wc__db_status_normal) *external_kind = svn_node_unknown; else switch(kind) { case svn_node_file: case svn_node_symlink: *external_kind = svn_node_file; break; case svn_node_dir: *external_kind = svn_node_dir; break; default: *external_kind = svn_node_none; } } if (defining_url && *defining_url) *defining_url = svn_path_url_add_component2(repos_root_url, *defining_url, result_pool); return SVN_NO_ERROR; } /* Return TRUE in *IS_ROLLED_OUT iff a node exists at XINFO->LOCAL_ABSPATH and * if that node's origin corresponds with XINFO->REPOS_ROOT_URL and * XINFO->REPOS_RELPATH. All allocations are made in SCRATCH_POOL. */ static svn_error_t * is_external_rolled_out(svn_boolean_t *is_rolled_out, svn_wc_context_t *wc_ctx, svn_wc__committable_external_info_t *xinfo, apr_pool_t *scratch_pool) { const char *repos_relpath; const char *repos_root_url; svn_error_t *err; *is_rolled_out = FALSE; err = svn_wc__db_base_get_info(NULL, NULL, NULL, &repos_relpath, &repos_root_url, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, wc_ctx->db, xinfo->local_abspath, scratch_pool, scratch_pool); if (err) { if (err->apr_err == SVN_ERR_WC_PATH_NOT_FOUND) { svn_error_clear(err); return SVN_NO_ERROR; } SVN_ERR(err); } *is_rolled_out = (strcmp(xinfo->repos_root_url, repos_root_url) == 0 && strcmp(xinfo->repos_relpath, repos_relpath) == 0); return SVN_NO_ERROR; } svn_error_t * svn_wc__committable_externals_below(apr_array_header_t **externals, svn_wc_context_t *wc_ctx, const char *local_abspath, svn_depth_t depth, apr_pool_t *result_pool, apr_pool_t *scratch_pool) { apr_array_header_t *orig_externals; int i; apr_pool_t *iterpool; /* For svn_depth_files, this also fetches dirs. They are filtered later. */ SVN_ERR(svn_wc__db_committable_externals_below(&orig_externals, wc_ctx->db, local_abspath, depth != svn_depth_infinity, result_pool, scratch_pool)); if (orig_externals == NULL) return SVN_NO_ERROR; iterpool = svn_pool_create(scratch_pool); for (i = 0; i < orig_externals->nelts; i++) { svn_boolean_t is_rolled_out; svn_wc__committable_external_info_t *xinfo = APR_ARRAY_IDX(orig_externals, i, svn_wc__committable_external_info_t *); /* Discard dirs for svn_depth_files (s.a.). */ if (depth == svn_depth_files && xinfo->kind == svn_node_dir) continue; svn_pool_clear(iterpool); /* Discard those externals that are not currently checked out. */ SVN_ERR(is_external_rolled_out(&is_rolled_out, wc_ctx, xinfo, iterpool)); if (! is_rolled_out) continue; if (*externals == NULL) *externals = apr_array_make( result_pool, 0, sizeof(svn_wc__committable_external_info_t *)); APR_ARRAY_PUSH(*externals, svn_wc__committable_external_info_t *) = xinfo; if (depth != svn_depth_infinity) continue; /* Are there any nested externals? */ SVN_ERR(svn_wc__committable_externals_below(externals, wc_ctx, xinfo->local_abspath, svn_depth_infinity, result_pool, iterpool)); } return SVN_NO_ERROR; } svn_error_t * svn_wc__externals_defined_below(apr_hash_t **externals, svn_wc_context_t *wc_ctx, const char *local_abspath, apr_pool_t *result_pool, apr_pool_t *scratch_pool) { return svn_error_trace( svn_wc__db_externals_defined_below(externals, wc_ctx->db, local_abspath, result_pool, scratch_pool)); } svn_error_t * svn_wc__external_register(svn_wc_context_t *wc_ctx, const char *defining_abspath, const char *local_abspath, svn_node_kind_t kind, const char *repos_root_url, const char *repos_uuid, const char *repos_relpath, svn_revnum_t operational_revision, svn_revnum_t revision, apr_pool_t *scratch_pool) { SVN_ERR_ASSERT(kind == svn_node_dir); return svn_error_trace( svn_wc__db_external_add_dir(wc_ctx->db, local_abspath, defining_abspath, repos_root_url, repos_uuid, defining_abspath, repos_relpath, operational_revision, revision, NULL, scratch_pool)); } svn_error_t * svn_wc__external_remove(svn_wc_context_t *wc_ctx, const char *wri_abspath, const char *local_abspath, svn_boolean_t declaration_only, svn_cancel_func_t cancel_func, void *cancel_baton, apr_pool_t *scratch_pool) { svn_wc__db_status_t status; svn_node_kind_t kind; SVN_ERR(svn_wc__db_external_read(&status, &kind, NULL, NULL, NULL, NULL, NULL, NULL, wc_ctx->db, local_abspath, wri_abspath, scratch_pool, scratch_pool)); SVN_ERR(svn_wc__db_external_remove(wc_ctx->db, local_abspath, wri_abspath, NULL, scratch_pool)); if (declaration_only) return SVN_NO_ERROR; if (kind == svn_node_dir) SVN_ERR(svn_wc_remove_from_revision_control2(wc_ctx, local_abspath, TRUE, TRUE, cancel_func, cancel_baton, scratch_pool)); else { SVN_ERR(svn_wc__db_base_remove(wc_ctx->db, local_abspath, FALSE, TRUE, FALSE, 0, NULL, NULL, scratch_pool)); SVN_ERR(svn_wc__wq_run(wc_ctx->db, local_abspath, cancel_func, cancel_baton, scratch_pool)); } return SVN_NO_ERROR; } svn_error_t * svn_wc__externals_gather_definitions(apr_hash_t **externals, apr_hash_t **depths, svn_wc_context_t *wc_ctx, const char *local_abspath, svn_depth_t depth, apr_pool_t *result_pool, apr_pool_t *scratch_pool) { if (depth == svn_depth_infinity || depth == svn_depth_unknown) { return svn_error_trace( svn_wc__db_externals_gather_definitions(externals, depths, wc_ctx->db, local_abspath, result_pool, scratch_pool)); } else { const svn_string_t *value; svn_error_t *err; *externals = apr_hash_make(result_pool); local_abspath = apr_pstrdup(result_pool, local_abspath); err = svn_wc_prop_get2(&value, wc_ctx, local_abspath, SVN_PROP_EXTERNALS, result_pool, scratch_pool); if (err) { if (err->apr_err != SVN_ERR_WC_PATH_NOT_FOUND) return svn_error_trace(err); svn_error_clear(err); value = NULL; } if (value) svn_hash_sets(*externals, local_abspath, value->data); if (value && depths) { svn_depth_t node_depth; *depths = apr_hash_make(result_pool); SVN_ERR(svn_wc__db_read_info(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, &node_depth, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, wc_ctx->db, local_abspath, scratch_pool, scratch_pool)); svn_hash_sets(*depths, local_abspath, svn_depth_to_word(node_depth)); } return SVN_NO_ERROR; } } svn_error_t * svn_wc__close_db(const char *external_abspath, svn_wc_context_t *wc_ctx, apr_pool_t *scratch_pool) { SVN_ERR(svn_wc__db_drop_root(wc_ctx->db, external_abspath, scratch_pool)); return SVN_NO_ERROR; } /* Return the scheme of @a uri in @a scheme allocated from @a pool. If @a uri does not appear to be a valid URI, then @a scheme will not be updated. */ static svn_error_t * uri_scheme(const char **scheme, const char *uri, apr_pool_t *pool) { apr_size_t i; for (i = 0; uri[i] && uri[i] != ':'; ++i) if (uri[i] == '/') goto error; if (i > 0 && uri[i] == ':' && uri[i+1] == '/' && uri[i+2] == '/') { *scheme = apr_pstrmemdup(pool, uri, i); return SVN_NO_ERROR; } error: return svn_error_createf(SVN_ERR_BAD_URL, 0, _("URL '%s' does not begin with a scheme"), uri); } svn_error_t * svn_wc__resolve_relative_external_url(const char **resolved_url, const svn_wc_external_item2_t *item, const char *repos_root_url, const char *parent_dir_url, apr_pool_t *result_pool, apr_pool_t *scratch_pool) { const char *url = item->url; apr_uri_t parent_dir_uri; apr_status_t status; *resolved_url = item->url; /* If the URL is already absolute, there is nothing to do. */ if (svn_path_is_url(url)) { /* "http://server/path" */ *resolved_url = svn_uri_canonicalize(url, result_pool); return SVN_NO_ERROR; } if (url[0] == '/') { /* "/path", "//path", and "///path" */ int num_leading_slashes = 1; if (url[1] == '/') { num_leading_slashes++; if (url[2] == '/') num_leading_slashes++; } /* "//schema-relative" and in some cases "///schema-relative". This last format is supported on file:// schema relative. */ url = apr_pstrcat(scratch_pool, apr_pstrndup(scratch_pool, url, num_leading_slashes), svn_relpath_canonicalize(url + num_leading_slashes, scratch_pool), SVN_VA_NULL); } else { /* "^/path" and "../path" */ url = svn_relpath_canonicalize(url, scratch_pool); } /* Parse the parent directory URL into its parts. */ status = apr_uri_parse(scratch_pool, parent_dir_url, &parent_dir_uri); if (status) return svn_error_createf(SVN_ERR_BAD_URL, 0, _("Illegal parent directory URL '%s'"), parent_dir_url); /* If the parent directory URL is at the server root, then the URL may have no / after the hostname so apr_uri_parse() will leave the URL's path as NULL. */ if (! parent_dir_uri.path) parent_dir_uri.path = apr_pstrmemdup(scratch_pool, "/", 1); parent_dir_uri.query = NULL; parent_dir_uri.fragment = NULL; /* Handle URLs relative to the current directory or to the repository root. The backpaths may only remove path elements, not the hostname. This allows an external to refer to another repository in the same server relative to the location of this repository, say using SVNParentPath. */ if ((0 == strncmp("../", url, 3)) || (0 == strncmp("^/", url, 2))) { apr_array_header_t *base_components; apr_array_header_t *relative_components; int i; /* Decompose either the parent directory's URL path or the repository root's URL path into components. */ if (0 == strncmp("../", url, 3)) { base_components = svn_path_decompose(parent_dir_uri.path, scratch_pool); relative_components = svn_path_decompose(url, scratch_pool); } else { apr_uri_t repos_root_uri; status = apr_uri_parse(scratch_pool, repos_root_url, &repos_root_uri); if (status) return svn_error_createf(SVN_ERR_BAD_URL, 0, _("Illegal repository root URL '%s'"), repos_root_url); /* If the repository root URL is at the server root, then the URL may have no / after the hostname so apr_uri_parse() will leave the URL's path as NULL. */ if (! repos_root_uri.path) repos_root_uri.path = apr_pstrmemdup(scratch_pool, "/", 1); base_components = svn_path_decompose(repos_root_uri.path, scratch_pool); relative_components = svn_path_decompose(url + 2, scratch_pool); } for (i = 0; i < relative_components->nelts; ++i) { const char *component = APR_ARRAY_IDX(relative_components, i, const char *); if (0 == strcmp("..", component)) { /* Constructing the final absolute URL together with apr_uri_unparse() requires that the path be absolute, so only pop a component if the component being popped is not the component for the root directory. */ if (base_components->nelts > 1) apr_array_pop(base_components); } else APR_ARRAY_PUSH(base_components, const char *) = component; } parent_dir_uri.path = (char *)svn_path_compose(base_components, scratch_pool); *resolved_url = svn_uri_canonicalize(apr_uri_unparse(scratch_pool, &parent_dir_uri, 0), result_pool); return SVN_NO_ERROR; } /* The remaining URLs are relative to either the scheme or server root and can only refer to locations inside that scope, so backpaths are not allowed. */ if (svn_path_is_backpath_present(url)) return svn_error_createf(SVN_ERR_BAD_URL, 0, _("The external relative URL '%s' cannot have " "backpaths, i.e. '..'"), item->url); /* Relative to the scheme: Build a new URL from the parts we know. */ if (0 == strncmp("//", url, 2)) { const char *scheme; SVN_ERR(uri_scheme(&scheme, repos_root_url, scratch_pool)); *resolved_url = svn_uri_canonicalize(apr_pstrcat(scratch_pool, scheme, ":", url, SVN_VA_NULL), result_pool); return SVN_NO_ERROR; } /* Relative to the server root: Just replace the path portion of the parent's URL. */ if (url[0] == '/') { parent_dir_uri.path = (char *)url; *resolved_url = svn_uri_canonicalize(apr_uri_unparse(scratch_pool, &parent_dir_uri, 0), result_pool); return SVN_NO_ERROR; } return svn_error_createf(SVN_ERR_BAD_URL, 0, _("Unrecognized format for the relative external " "URL '%s'"), item->url); }
950c47beb00266c1bc7572c287abca2a32d7d5af
48c587d2d3f368f99afc925a2443418be555495c
/Source/UESVON/Public/SVONLeafNode.h
1714dac0b45d532e494082c340b6b43e1189bdca
[ "MIT" ]
permissive
midgen/uesvon
8ae6de0474b6a76a95fd6468a79e198e1d84fb41
9f1ec6b8d327388021474bf79a9ea11e425a24b6
refs/heads/master
2023-06-10T14:14:44.658974
2023-05-27T10:16:16
2023-05-27T10:16:16
125,816,282
199
53
MIT
2021-05-10T08:57:43
2018-03-19T07:12:09
C++
UTF-8
C
false
false
976
h
SVONLeafNode.h
#pragma once #include "UESVON/Private/libmorton/morton.h" #include "UESVON/Public/SVONDefines.h" struct UESVON_API SVONLeafNode { uint_fast64_t myVoxelGrid = 0; inline bool GetNodeAt(uint_fast32_t aX, uint_fast32_t aY, uint_fast32_t aZ) const { uint_fast64_t index = morton3D_64_encode(aX, aY, aZ); return (myVoxelGrid & (1ULL << index)) != 0; } inline void SetNodeAt(uint_fast32_t aX, uint_fast32_t aY, uint_fast32_t aZ) { uint_fast64_t index = morton3D_64_encode(aX, aY, aZ); myVoxelGrid |= 1ULL << index; } inline void SetNode(uint8 aIndex) { myVoxelGrid |= 1ULL << aIndex; } inline bool GetNode(mortoncode_t aIndex) const { return (myVoxelGrid & (1ULL << aIndex)) != 0; } inline bool IsCompletelyBlocked() const { return myVoxelGrid == -1; } inline bool IsEmpty() const { return myVoxelGrid == 0; } }; FORCEINLINE FArchive& operator<<(FArchive& Ar, SVONLeafNode& aSVONLeafNode) { Ar << aSVONLeafNode.myVoxelGrid; return Ar; }
d1415678fdddbb60e379b1e966e20aa7acaf2d07
035660e8cc10571ebbd0d4393fef063bb7eb98f6
/src/overlays/actors/ovl_En_Bubble/z_en_bubble.h
86e2358b4093dca3814a82f5f66cf57634650512
[]
no_license
zeldaret/mm
f163a5e7c4314105777369fa7671ce9c2a99922a
4ae00e909e74044f05155683b49d2561f91de7ba
refs/heads/master
2023-08-06T07:22:04.912966
2023-08-04T20:36:03
2023-08-04T20:36:03
247,875,852
915
328
null
2023-09-14T11:48:59
2020-03-17T04:03:07
C
UTF-8
C
false
false
1,209
h
z_en_bubble.h
#ifndef Z_EN_BUBBLE_H #define Z_EN_BUBBLE_H #include "global.h" #include "objects/object_bubble/object_bubble.h" struct EnBubble; typedef void (*EnBubbleActionFunc)(struct EnBubble*, PlayState*); typedef struct EnBubble { /* 0x000 */ Actor actor; /* 0x144 */ EnBubbleActionFunc actionFunc; /* 0x148 */ ColliderJntSph colliderSphere; /* 0x168 */ ColliderJntSphElement colliderElements[2]; /* 0x1F4 */ Vec3f unk1F4; // set but never used /* 0x1F4 */ Vec3f unk1F8; // randomly generated, set but never used /* 0x200 */ s16 timer; // set to 8 when about to pop /* 0x202 */ s16 explosionCountdown; /* 0x204 */ UNK_TYPE1 pad204[4]; // unused /* 0x208 */ f32 modelRotSpeed; /* 0x20C */ f32 modelEllipticity; /* 0x210 */ f32 unk_210; // set to 1.0f, never used /* 0x214 */ f32 unk_214; // set to 1.0f, never used /* 0x218 */ f32 modelWidth; /* 0x21C */ f32 modelHeight; /* 0x220 */ u8 bounceCount; /* 0x224 */ Vec3f bounceDirection; /* 0x230 */ Vec3f velocityFromBounce; /* 0x23C */ Vec3f normalizedBumpVelocity; /* 0x248 */ Vec3f velocityFromBump; /* 0x254 */ f32 yVelocity; } EnBubble; // size = 0x258 #endif // Z_EN_BUBBLE_H
3219c2eb2cb0efa65e801d7f6cb0355e80571a6a
5cb3030ed86e12332d109e1ab39f92c0c866677d
/src/encoder_state-bitstream.c
e4b41fe1a12216546f7a9a872f59a9be1ce56e5b
[ "BSD-3-Clause", "ISC" ]
permissive
ultravideo/kvazaar
9c8ac79644b4782e0c9743f6518f6997e2d9b774
aab6aa9b349d2768f9371ef58680298c200e47e6
refs/heads/master
2023-08-24T03:29:58.344475
2023-07-17T05:51:47
2023-07-17T05:51:47
16,339,396
629
211
BSD-3-Clause
2023-07-03T13:01:52
2014-01-29T08:35:53
C
UTF-8
C
false
false
43,473
c
encoder_state-bitstream.c
/***************************************************************************** * This file is part of Kvazaar HEVC encoder. * * Copyright (c) 2021, Tampere University, ITU/ISO/IEC, project contributors * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or * other materials provided with the distribution. * * * Neither the name of the Tampere University or ITU/ISO/IEC nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * INCLUDING NEGLIGENCE OR OTHERWISE ARISING IN ANY WAY OUT OF THE USE OF THIS ****************************************************************************/ #include "encoder_state-bitstream.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include "bitstream.h" #include "cabac.h" #include "checkpoint.h" #include "cu.h" #include "encoder.h" #include "encoder_state-geometry.h" #include "encoderstate.h" #include "imagelist.h" #include "kvazaar.h" #include "kvz_math.h" #include "nal.h" #include "scalinglist.h" #include "sei.h" #include "tables.h" #include "threadqueue.h" #include "videoframe.h" #include "rate_control.h" static void encoder_state_write_bitstream_aud(encoder_state_t *const state) { bitstream_t *const stream = &state->stream; kvz_nal_write(stream, KVZ_NAL_AUD_NUT, 0, 1); uint8_t pic_type = state->frame->slicetype == KVZ_SLICE_I ? 0 : state->frame->slicetype == KVZ_SLICE_P ? 1 : 2; WRITE_U(stream, pic_type, 3, "pic_type"); kvz_bitstream_add_rbsp_trailing_bits(stream); } static void encoder_state_write_bitstream_PTL(bitstream_t *stream, encoder_state_t *const state) { // PTL // Profile Tier WRITE_U(stream, 0, 2, "general_profile_space"); WRITE_U(stream, state->encoder_control->cfg.high_tier, 1, "general_tier_flag"); // Main Profile == 1, Main 10 profile == 2 WRITE_U(stream, (state->encoder_control->bitdepth == 8) ? 1 : 2, 5, "general_profile_idc"); /* Compatibility flags should be set at general_profile_idc * (so with general_profile_idc = 1, compatibility_flag[1] should be 1) * According to specification, when compatibility_flag[1] is set, * compatibility_flag[2] should be set too. */ WRITE_U(stream, 3 << 29, 32, "general_profile_compatibility_flag[]"); WRITE_U(stream, 1, 1, "general_progressive_source_flag"); WRITE_U(stream, state->encoder_control->in.source_scan_type != 0, 1, "general_interlaced_source_flag"); WRITE_U(stream, 0, 1, "general_non_packed_constraint_flag"); WRITE_U(stream, 0, 1, "general_frame_only_constraint_flag"); WRITE_U(stream, 0, 32, "XXX_reserved_zero_44bits[0..31]"); WRITE_U(stream, 0, 12, "XXX_reserved_zero_44bits[32..43]"); // end Profile Tier uint8_t level = state->encoder_control->cfg.level; WRITE_U(stream, level * 3, 8, "general_level_idc"); WRITE_U(stream, 0, 1, "sub_layer_profile_present_flag"); WRITE_U(stream, 0, 1, "sub_layer_level_present_flag"); for (int i = 1; i < 8; i++) { WRITE_U(stream, 0, 2, "reserved_zero_2bits"); } // end PTL } static uint8_t max_required_dpb_size(const encoder_control_t * const encoder) { int max_buffer = 1; for (int g = 0; g < encoder->cfg.gop_len; ++g) { int neg_refs = encoder->cfg.gop[g].ref_neg_count; int pos_refs = encoder->cfg.gop[g].ref_pos_count; if (neg_refs + pos_refs + 1 > max_buffer) max_buffer = neg_refs + pos_refs + 1; } if (encoder->cfg.gop_len == 0) max_buffer = encoder->cfg.ref_frames + 1; return max_buffer; } static uint8_t max_num_reorder_pics(const encoder_control_t * const encoder) { return encoder->cfg.gop_lowdelay ? 0 : MAX(encoder->cfg.gop_len - 1, 0); } static void encoder_state_write_bitstream_vid_parameter_set(bitstream_t* stream, encoder_state_t * const state) { #ifdef KVZ_DEBUG printf("=========== Video Parameter Set ID: 0 ===========\n"); #endif const encoder_control_t* encoder = state->encoder_control; WRITE_U(stream, 0, 4, "vps_video_parameter_set_id"); WRITE_U(stream, 3, 2, "vps_reserved_three_2bits" ); WRITE_U(stream, 0, 6, "vps_reserved_zero_6bits" ); WRITE_U(stream, 1, 3, "vps_max_sub_layers_minus1"); WRITE_U(stream, 0, 1, "vps_temporal_id_nesting_flag"); WRITE_U(stream, 0xffff, 16, "vps_reserved_ffff_16bits"); encoder_state_write_bitstream_PTL(stream, state); WRITE_U(stream, 0, 1, "vps_sub_layer_ordering_info_present_flag"); int max_buffer = max_required_dpb_size(encoder); int max_reorder = max_num_reorder_pics(encoder); if (max_buffer - 1 < max_reorder) max_buffer = max_reorder + 1; WRITE_UE(stream, max_buffer - 1, "vps_max_dec_pic_buffering_minus1"); WRITE_UE(stream, max_reorder, "vps_max_num_reorder_pics"); WRITE_UE(stream, 0, "vps_max_latency_increase"); WRITE_U(stream, 0, 6, "vps_max_nuh_reserved_zero_layer_id"); WRITE_UE(stream, 0, "vps_max_op_sets_minus1"); WRITE_U(stream, 0, 1, "vps_timing_info_present_flag"); //IF timing info //END IF WRITE_U(stream, 0, 1, "vps_extension_flag"); kvz_bitstream_add_rbsp_trailing_bits(stream); } static void encoder_state_write_bitstream_scaling_list(bitstream_t *stream, encoder_state_t * const state) { const encoder_control_t * const encoder = state->encoder_control; uint32_t size_id; for (size_id = 0; size_id < SCALING_LIST_SIZE_NUM; size_id++) { int32_t list_id; for (list_id = 0; list_id < kvz_g_scaling_list_num[size_id]; list_id++) { uint8_t scaling_list_pred_mode_flag = 1; int32_t pred_list_idx; int32_t i; uint32_t ref_matrix_id = UINT32_MAX; for (pred_list_idx = list_id; pred_list_idx >= 0; pred_list_idx--) { const int32_t * const pred_list = (list_id == pred_list_idx) ? kvz_scalinglist_get_default(size_id, pred_list_idx) : encoder->scaling_list.scaling_list_coeff[size_id][pred_list_idx]; if (!memcmp(encoder->scaling_list.scaling_list_coeff[size_id][list_id], pred_list, sizeof(int32_t) * MIN(8, kvz_g_scaling_list_size[size_id])) && ((size_id < SCALING_LIST_16x16) || (encoder->scaling_list.scaling_list_dc[size_id][list_id] == encoder->scaling_list.scaling_list_dc[size_id][pred_list_idx]))) { ref_matrix_id = pred_list_idx; scaling_list_pred_mode_flag = 0; break; } } WRITE_U(stream, scaling_list_pred_mode_flag, 1, "scaling_list_pred_mode_flag" ); if (!scaling_list_pred_mode_flag) { WRITE_UE(stream, list_id - ref_matrix_id, "scaling_list_pred_matrix_id_delta"); } else { int32_t delta; const int32_t coef_num = MIN(MAX_MATRIX_COEF_NUM, kvz_g_scaling_list_size[size_id]); const uint32_t * const scan_cg = (size_id == 0) ? g_sig_last_scan_16x16 : g_sig_last_scan_32x32; int32_t next_coef = 8; const int32_t * const coef_list = encoder->scaling_list.scaling_list_coeff[size_id][list_id]; if (size_id >= SCALING_LIST_16x16) { WRITE_SE(stream, encoder->scaling_list.scaling_list_dc[size_id][list_id] - 8, "scaling_list_dc_coef_minus8"); next_coef = encoder->scaling_list.scaling_list_dc[size_id][list_id]; } for (i = 0; i < coef_num; i++) { delta = coef_list[scan_cg[i]] - next_coef; next_coef = coef_list[scan_cg[i]]; if (delta > 127) delta -= 256; if (delta < -128) delta += 256; WRITE_SE(stream, delta, "scaling_list_delta_coef"); } } } } } static void encoder_state_write_bitstream_VUI(bitstream_t *stream, encoder_state_t * const state) { const encoder_control_t * const encoder = state->encoder_control; #ifdef KVZ_DEBUG printf("=========== VUI Set ID: 0 ===========\n"); #endif if (encoder->cfg.vui.sar_width > 0 && encoder->cfg.vui.sar_height > 0) { int i; static const struct { uint8_t width; uint8_t height; uint8_t idc; } sar[] = { // aspect_ratio_idc = 0 -> unspecified { 1, 1, 1 }, { 12, 11, 2 }, { 10, 11, 3 }, { 16, 11, 4 }, { 40, 33, 5 }, { 24, 11, 6 }, { 20, 11, 7 }, { 32, 11, 8 }, { 80, 33, 9 }, { 18, 11, 10}, { 15, 11, 11}, { 64, 33, 12}, {160, 99, 13}, { 4, 3, 14}, { 3, 2, 15}, { 2, 1, 16}, // aspect_ratio_idc = [17..254] -> reserved { 0, 0, 255 } }; for (i = 0; sar[i].idc != 255; i++) if (sar[i].width == encoder->cfg.vui.sar_width && sar[i].height == encoder->cfg.vui.sar_height) break; WRITE_U(stream, 1, 1, "aspect_ratio_info_present_flag"); WRITE_U(stream, sar[i].idc, 8, "aspect_ratio_idc"); if (sar[i].idc == 255) { // EXTENDED_SAR WRITE_U(stream, encoder->cfg.vui.sar_width, 16, "sar_width"); WRITE_U(stream, encoder->cfg.vui.sar_height, 16, "sar_height"); } } else WRITE_U(stream, 0, 1, "aspect_ratio_info_present_flag"); //IF aspect ratio info //ENDIF if (encoder->cfg.vui.overscan > 0) { WRITE_U(stream, 1, 1, "overscan_info_present_flag"); WRITE_U(stream, encoder->cfg.vui.overscan - 1, 1, "overscan_appropriate_flag"); } else WRITE_U(stream, 0, 1, "overscan_info_present_flag"); //IF overscan info //ENDIF if (encoder->cfg.vui.videoformat != 5 || encoder->cfg.vui.fullrange != 0 || encoder->cfg.vui.colorprim != 2 || encoder->cfg.vui.transfer != 2 || encoder->cfg.vui.colormatrix != 2) { WRITE_U(stream, 1, 1, "video_signal_type_present_flag"); WRITE_U(stream, encoder->cfg.vui.videoformat, 3, "chroma_format"); WRITE_U(stream, encoder->cfg.vui.fullrange, 1, "video_full_range_flag"); if (encoder->cfg.vui.colorprim != 2 || encoder->cfg.vui.transfer != 2 || encoder->cfg.vui.colormatrix != 2) { WRITE_U(stream, 1, 1, "colour_description_present_flag"); WRITE_U(stream, encoder->cfg.vui.colorprim, 8, "colour_primaries"); WRITE_U(stream, encoder->cfg.vui.transfer, 8, "transfer_characteristics"); WRITE_U(stream, encoder->cfg.vui.colormatrix, 8, "matrix_coeffs"); } else WRITE_U(stream, 0, 1, "colour_description_present_flag"); } else WRITE_U(stream, 0, 1, "video_signal_type_present_flag"); //IF video type //ENDIF if (encoder->cfg.vui.chroma_loc > 0) { WRITE_U(stream, 1, 1, "chroma_loc_info_present_flag"); WRITE_UE(stream, encoder->cfg.vui.chroma_loc, "chroma_sample_loc_type_top_field"); WRITE_UE(stream, encoder->cfg.vui.chroma_loc, "chroma_sample_loc_type_bottom_field"); } else WRITE_U(stream, 0, 1, "chroma_loc_info_present_flag"); //IF chroma loc info //ENDIF WRITE_U(stream, 0, 1, "neutral_chroma_indication_flag"); WRITE_U(stream, encoder->vui.field_seq_flag, 1, "field_seq_flag"); // 0: frames, 1: fields WRITE_U(stream, encoder->vui.frame_field_info_present_flag, 1, "frame_field_info_present_flag"); WRITE_U(stream, 0, 1, "default_display_window_flag"); //IF default display window //ENDIF WRITE_U(stream, encoder->vui.timing_info_present_flag, 1, "vui_timing_info_present_flag"); if (encoder->vui.timing_info_present_flag) { WRITE_U(stream, encoder->vui.num_units_in_tick, 32, "vui_num_units_in_tick"); WRITE_U(stream, encoder->vui.time_scale, 32, "vui_time_scale"); WRITE_U(stream, 0, 1, "vui_poc_proportional_to_timing_flag"); WRITE_U(stream, 0, 1, "vui_hrd_parameters_present_flag"); } WRITE_U(stream, 0, 1, "bitstream_restriction_flag"); //IF bitstream restriction //ENDIF } static void encoder_state_write_bitstream_SPS_extension(bitstream_t *stream, encoder_state_t * const state) { const kvz_config *cfg = &state->encoder_control->cfg; if (cfg->implicit_rdpcm && cfg->lossless) { WRITE_U(stream, 1, 1, "sps_extension_present_flag"); WRITE_U(stream, 1, 1, "sps_range_extension_flag"); WRITE_U(stream, 0, 1, "sps_multilayer_extension_flag"); WRITE_U(stream, 0, 1, "sps_3d_extension_flag"); WRITE_U(stream, 0, 5, "sps_extension_5bits"); WRITE_U(stream, 0, 1, "transform_skip_rotation_enabled_flag"); WRITE_U(stream, 0, 1, "transform_skip_context_enabled_flag"); WRITE_U(stream, 1, 1, "implicit_rdpcm_enabled_flag"); WRITE_U(stream, 0, 1, "explicit_rdpcm_enabled_flag"); WRITE_U(stream, 0, 1, "extended_precision_processing_flag"); WRITE_U(stream, 0, 1, "intra_smoothing_disabled_flag"); WRITE_U(stream, 0, 1, "high_precision_offsets_enabled_flag"); WRITE_U(stream, 0, 1, "persistent_rice_adaptation_enabled_flag"); WRITE_U(stream, 0, 1, "cabac_bypass_alignment_enabled_flag"); } else { WRITE_U(stream, 0, 1, "sps_extension_present_flag"); } } static void encoder_state_write_bitstream_seq_parameter_set(bitstream_t* stream, encoder_state_t * const state) { const encoder_control_t * encoder = state->encoder_control; #ifdef KVZ_DEBUG printf("=========== Sequence Parameter Set ID: 0 ===========\n"); #endif // TODO: profile IDC and level IDC should be defined later on WRITE_U(stream, 0, 4, "sps_video_parameter_set_id"); WRITE_U(stream, 1, 3, "sps_max_sub_layers_minus1"); WRITE_U(stream, 0, 1, "sps_temporal_id_nesting_flag"); encoder_state_write_bitstream_PTL(stream, state); WRITE_UE(stream, 0, "sps_seq_parameter_set_id"); WRITE_UE(stream, encoder->chroma_format, "chroma_format_idc"); if (encoder->chroma_format == KVZ_CSP_444) { WRITE_U(stream, 0, 1, "separate_colour_plane_flag"); } if (encoder->cfg.partial_coding.fullWidth != 0) { WRITE_UE(stream, encoder->cfg.partial_coding.fullWidth, "pic_width_in_luma_samples"); WRITE_UE(stream, encoder->cfg.partial_coding.fullHeight, "pic_height_in_luma_samples"); } else { WRITE_UE(stream, encoder->in.width, "pic_width_in_luma_samples"); WRITE_UE(stream, encoder->in.height, "pic_height_in_luma_samples"); } if (encoder->in.width != encoder->in.real_width || encoder->in.height != encoder->in.real_height) { // The standard does not seem to allow setting conf_win values such that // the number of luma samples is not a multiple of 2. Options are to either // hide one line or show an extra line of non-video. Neither seems like a // very good option, so let's not even try. assert(!(encoder->in.width % 2)); WRITE_U(stream, 1, 1, "conformance_window_flag"); WRITE_UE(stream, 0, "conf_win_left_offset"); WRITE_UE(stream, (encoder->in.width - encoder->in.real_width) >> 1, "conf_win_right_offset"); WRITE_UE(stream, 0, "conf_win_top_offset"); WRITE_UE(stream, (encoder->in.height - encoder->in.real_height) >> 1, "conf_win_bottom_offset"); } else { WRITE_U(stream, 0, 1, "conformance_window_flag"); } //IF window flag //END IF WRITE_UE(stream, encoder->bitdepth-8, "bit_depth_luma_minus8"); WRITE_UE(stream, encoder->bitdepth-8, "bit_depth_chroma_minus8"); WRITE_UE(stream, encoder->poc_lsb_bits - 4, "log2_max_pic_order_cnt_lsb_minus4"); WRITE_U(stream, 0, 1, "sps_sub_layer_ordering_info_present_flag"); //for each layer int max_buffer = max_required_dpb_size(encoder); int max_reorder = max_num_reorder_pics(encoder); if (max_buffer - 1 < max_reorder) max_buffer = max_reorder + 1; WRITE_UE(stream, max_buffer - 1, "sps_max_dec_pic_buffering_minus1"); WRITE_UE(stream, max_reorder, "sps_max_num_reorder_pics"); WRITE_UE(stream, 0, "sps_max_latency_increase_plus1"); //end for WRITE_UE(stream, MIN_SIZE-3, "log2_min_coding_block_size_minus3"); WRITE_UE(stream, MAX_DEPTH, "log2_diff_max_min_coding_block_size"); WRITE_UE(stream, 0, "log2_min_transform_block_size_minus2"); // 4x4 WRITE_UE(stream, 3, "log2_diff_max_min_transform_block_size"); // 4x4...32x32 WRITE_UE(stream, encoder->tr_depth_inter, "max_transform_hierarchy_depth_inter"); WRITE_UE(stream, encoder->cfg.tr_depth_intra, "max_transform_hierarchy_depth_intra"); // scaling list WRITE_U(stream, encoder->scaling_list.enable, 1, "scaling_list_enable_flag"); if (encoder->scaling_list.enable) { // Signal scaling list data for custom lists WRITE_U(stream, (encoder->cfg.scaling_list == KVZ_SCALING_LIST_CUSTOM) ? 1 : 0, 1, "sps_scaling_list_data_present_flag"); if (encoder->cfg.scaling_list == KVZ_SCALING_LIST_CUSTOM) { encoder_state_write_bitstream_scaling_list(stream, state); } } WRITE_U(stream, (encoder->cfg.amp_enable ? 1 : 0), 1, "amp_enabled_flag"); WRITE_U(stream, encoder->cfg.sao_type ? 1 : 0, 1, "sample_adaptive_offset_enabled_flag"); WRITE_U(stream, ENABLE_PCM, 1, "pcm_enabled_flag"); #if ENABLE_PCM == 1 WRITE_U(stream, 7, 4, "pcm_sample_bit_depth_luma_minus1"); WRITE_U(stream, 7, 4, "pcm_sample_bit_depth_chroma_minus1"); WRITE_UE(stream, 0, "log2_min_pcm_coding_block_size_minus3"); WRITE_UE(stream, 2, "log2_diff_max_min_pcm_coding_block_size"); WRITE_U(stream, 1, 1, "pcm_loop_filter_disable_flag"); #endif WRITE_UE(stream, 0, "num_short_term_ref_pic_sets"); //IF num short term ref pic sets //ENDIF WRITE_U(stream, 0, 1, "long_term_ref_pics_present_flag"); //IF long_term_ref_pics_present //ENDIF WRITE_U(stream, state->encoder_control->cfg.tmvp_enable, 1, "sps_temporal_mvp_enable_flag"); WRITE_U(stream, 0, 1, "sps_strong_intra_smoothing_enable_flag"); WRITE_U(stream, 1, 1, "vui_parameters_present_flag"); encoder_state_write_bitstream_VUI(stream, state); encoder_state_write_bitstream_SPS_extension(stream, state); kvz_bitstream_add_rbsp_trailing_bits(stream); } static void encoder_state_write_bitstream_pic_parameter_set(bitstream_t* stream, encoder_state_t * const state) { const encoder_control_t * const encoder = state->encoder_control; #ifdef KVZ_DEBUG printf("=========== Picture Parameter Set ID: 0 ===========\n"); #endif WRITE_UE(stream, 0, "pic_parameter_set_id"); WRITE_UE(stream, 0, "seq_parameter_set_id"); WRITE_U(stream, encoder->pps.dependent_slice_segments_enabled_flag, 1, "dependent_slice_segments_enabled_flag"); WRITE_U(stream, 0, 1, "output_flag_present_flag"); WRITE_U(stream, 0, 3, "num_extra_slice_header_bits"); WRITE_U(stream, encoder->cfg.signhide_enable, 1, "sign_data_hiding_flag"); WRITE_U(stream, 0, 1, "cabac_init_present_flag"); WRITE_UE(stream, 0, "num_ref_idx_l0_default_active_minus1"); WRITE_UE(stream, 0, "num_ref_idx_l1_default_active_minus1"); // If tiles and slices = tiles is enabled, signal QP in the slice header. Keeping the PPS constant for OMAF etc // Keep QP constant here also if it will be only set at CU level. bool constant_qp_in_pps = ((encoder->cfg.slices & KVZ_SLICES_TILES) && encoder->tiles_enable) || encoder->cfg.set_qp_in_cu; WRITE_SE(stream, constant_qp_in_pps ? 0 : (((int8_t)encoder->cfg.qp) - 26), "pic_init_qp_minus26"); WRITE_U(stream, 0, 1, "constrained_intra_pred_flag"); WRITE_U(stream, encoder->cfg.trskip_enable, 1, "transform_skip_enabled_flag"); // Check all the conditions for setting cu_qp_delta_enabled_flag here, since state->frame->max_qp_delta_depth might not be set yet. if (encoder->cfg.target_bitrate > 0 || encoder->cfg.erp_aqp || encoder->cfg.roi.file_path || encoder->cfg.set_qp_in_cu || encoder->cfg.vaq || (state->tile->frame->source && state->tile->frame->source->roi.roi_array) ) { // Use separate QP for each LCU when rate control is enabled. WRITE_U(stream, 1, 1, "cu_qp_delta_enabled_flag"); WRITE_UE(stream, state->frame->max_qp_delta_depth, "diff_cu_qp_delta_depth"); } else { WRITE_U(stream, 0, 1, "cu_qp_delta_enabled_flag"); } //TODO: add QP offsets WRITE_SE(stream, 0, "pps_cb_qp_offset"); WRITE_SE(stream, 0, "pps_cr_qp_offset"); WRITE_U(stream, 0, 1, "pps_slice_chroma_qp_offsets_present_flag"); WRITE_U(stream, 0, 1, "weighted_pred_flag"); WRITE_U(stream, 0, 1, "weighted_bipred_idc"); //WRITE_U(stream, 0, 1, "dependent_slices_enabled_flag"); WRITE_U(stream, encoder->cfg.lossless, 1, "transquant_bypass_enable_flag"); WRITE_U(stream, encoder->tiles_enable, 1, "tiles_enabled_flag"); //wavefronts WRITE_U(stream, encoder->cfg.wpp, 1, "entropy_coding_sync_enabled_flag"); if (encoder->tiles_enable) { WRITE_UE(stream, encoder->cfg.tiles_width_count - 1, "num_tile_columns_minus1"); WRITE_UE(stream, encoder->cfg.tiles_height_count - 1, "num_tile_rows_minus1"); WRITE_U(stream, encoder->tiles_uniform_spacing_flag, 1, "uniform_spacing_flag"); if (!encoder->tiles_uniform_spacing_flag) { int i; for (i = 0; i < encoder->cfg.tiles_width_count - 1; ++i) { WRITE_UE(stream, encoder->tiles_col_width[i] - 1, "column_width_minus1[...]"); } for (i = 0; i < encoder->cfg.tiles_height_count - 1; ++i) { WRITE_UE(stream, encoder->tiles_row_height[i] - 1, "row_height_minus1[...]"); } } WRITE_U(stream, 0, 1, "loop_filter_across_tiles_enabled_flag"); } WRITE_U(stream, 0, 1, "loop_filter_across_slice_flag"); WRITE_U(stream, 1, 1, "deblocking_filter_control_present_flag"); //IF deblocking_filter WRITE_U(stream, 0, 1, "deblocking_filter_override_enabled_flag"); WRITE_U(stream, encoder->cfg.deblock_enable ? 0 : 1, 1, "pps_disable_deblocking_filter_flag"); //IF !disabled if (encoder->cfg.deblock_enable) { WRITE_SE(stream, encoder->cfg.deblock_beta, "beta_offset_div2"); WRITE_SE(stream, encoder->cfg.deblock_tc, "tc_offset_div2"); } //ENDIF //ENDIF WRITE_U(stream, 0, 1, "pps_scaling_list_data_present_flag"); //IF scaling_list //ENDIF WRITE_U(stream, 0, 1, "lists_modification_present_flag"); WRITE_UE(stream, 0, "log2_parallel_merge_level_minus2"); WRITE_U(stream, 0, 1, "slice_segment_header_extension_present_flag"); WRITE_U(stream, 0, 1, "pps_extension_flag"); kvz_bitstream_add_rbsp_trailing_bits(stream); } static void sei_write_payload_type(bitstream_t *stream, const int payloadType) { int i; for (i = 0; i <= payloadType - 255; i += 255) { WRITE_U(stream, FF_BYTE, 8, "ff_byte"); } WRITE_U(stream, payloadType - i, 8, "last_payload_type_byte"); } static void sei_write_payload_size(bitstream_t *stream, const int payloadSize) { int i; for (i = 0; i <= payloadSize - 255; i += 255) { WRITE_U(stream, FF_BYTE, 8, "ff_byte"); } WRITE_U(stream, payloadSize - i, 8, "last_payload_size_byte"); } static void sei_write_user_defined_unregistered(bitstream_t *stream, const uint8_t * const uuid, const uint8_t * const user_data_payload_byte, const int length) { int i; sei_write_payload_type(stream, SEI_PAYLOAD_TYPE_USER_DATA_UNREGISTERED); sei_write_payload_size(stream, (sizeof encoder_info_uuid) + length); for (i = 0; i < 16; i++) { WRITE_U(stream, uuid[i], 8, "uuid_iso_iec_11578"); } for (i = 0; i < length; i++) { WRITE_U(stream, user_data_payload_byte[i], 8, "user_data_payload_byte"); } kvz_bitstream_align(stream); } static void encoder_state_write_bitstream_prefix_sei_version(encoder_state_t * const state) { #define STR_BUF_LEN 1000 bitstream_t * const stream = &state->stream; int length; char buf[STR_BUF_LEN] = { 0 }; char *s = buf; const kvz_config * const cfg = &state->encoder_control->cfg; // user_data_payload_byte s += sprintf(s, "Kvazaar HEVC Encoder v. " VERSION_STRING " - " "Copyleft 2012-2015 - http://ultravideo.cs.tut.fi/ - options:"); s += sprintf(s, " %dx%d", cfg->width, cfg->height); s += sprintf(s, " deblock=%d:%d:%d", cfg->deblock_enable, cfg->deblock_beta, cfg->deblock_tc); s += sprintf(s, " sao=%d", cfg->sao_type); s += sprintf(s, " intra_period=%d", cfg->intra_period); s += sprintf(s, " qp=%d", cfg->qp); s += sprintf(s, " ref=%d", cfg->ref_frames); length = (int)(s - buf + 1); // length, +1 for \0 // Assert this so that in the future if the message gets longer, we remember // to increase the buf len. Divide by 2 for margin. assert(length < STR_BUF_LEN / 2); sei_write_user_defined_unregistered(stream, encoder_info_uuid, (uint8_t*)buf, length); #undef STR_BUF_LEN } /* static void encoder_state_write_active_parameter_sets_sei_message(encoder_state_t * const state) { const encoder_control_t * const encoder = state->encoder_control; bitstream_t * const stream = &state->stream; int i = 0; int active_vps_id = 0; int self_contained_cvs_flag = 0; int no_parameter_set_update_flag = 0; int num_sps_ids_minus1 = 0; int layer_sps_idx = 0; int active_seq_parameter_set_id = 0; int vps_base_layer_internal_flag = 0; int max_layers_minus1 = 0; WRITE_U(stream, 129, 8, "last_payload_type_byte"); //active_parameter_sets WRITE_U(stream, 2, 8, "last_payload_size_byte"); WRITE_U(stream, active_vps_id, 4, "active_video_parameter_set_id"); WRITE_U(stream, self_contained_cvs_flag, 1, "self_contained_cvs_flag"); WRITE_U(stream, no_parameter_set_update_flag, 1, "no_parameter_set_update_flag"); WRITE_UE(stream, num_sps_ids_minus1, "num_sps_ids_minus1"); //for (i = 0; i <= num_sps_ids_minus1; ++i) { WRITE_UE(stream, active_seq_parameter_set_id, "active_seq_parameter_set_id"); //} // for (i = vps_base_layer_internal_flag; i <= max_layers_minus1; ++i){ WRITE_UE(stream, layer_sps_idx, "layer_sps_idx"); //} kvz_bitstream_rbsp_trailing_bits(stream); //rbsp_trailing_bits } */ static void encoder_state_write_picture_timing_sei_message(encoder_state_t * const state) { bitstream_t * const stream = &state->stream; if (state->encoder_control->vui.frame_field_info_present_flag){ int8_t odd_picture = state->frame->num % 2; int8_t pic_struct = 0; //0: progressive picture, 1: top field, 2: bottom field, 3... int8_t source_scan_type = 1; //0: interlaced, 1: progressive switch (state->tile->frame->source->interlacing){ case 0: //Progressive frame pic_struct = 0; source_scan_type = 1; break; case 1: //Top field first pic_struct = odd_picture ? 2 : 1; source_scan_type = 0; break; case 2: //Bottom field first pic_struct = odd_picture ? 1 : 2; source_scan_type = 0; break; default: assert(0); //Should never execute break; } sei_write_payload_type(stream, SEI_PAYLOAD_TYPE_PIC_TIMING); sei_write_payload_size(stream, 1); WRITE_U(stream, pic_struct, 4, "pic_struct"); WRITE_U(stream, source_scan_type, 2, "source_scan_type"); WRITE_U(stream, 0, 1, "duplicate_flag"); kvz_bitstream_align(stream); } } static void encoder_state_entry_points_explore(const encoder_state_t * const state, int * const r_count, int * const r_max_length) { int i; for (i = 0; state->children[i].encoder_control; ++i) { if (state->children[i].is_leaf) { const int my_length = kvz_bitstream_tell(&state->children[i].stream)/8; ++(*r_count); if (my_length > *r_max_length) { *r_max_length = my_length; } } else { encoder_state_entry_points_explore(&state->children[i], r_count, r_max_length); } } } static void encoder_state_write_bitstream_entry_points_write(bitstream_t * const stream, const encoder_state_t * const state, const int num_entry_points, const int write_length, int * const r_count) { int i; for (i = 0; state->children[i].encoder_control; ++i) { if (state->children[i].is_leaf) { const int my_length = kvz_bitstream_tell(&state->children[i].stream)/8; ++(*r_count); //Don't write the last one if (*r_count < num_entry_points) { WRITE_U(stream, my_length - 1, write_length, "entry_point_offset-minus1") } } else { encoder_state_write_bitstream_entry_points_write(stream, &state->children[i], num_entry_points, write_length, r_count); } } } static void kvz_encoder_state_write_bitstream_slice_header_independent( struct bitstream_t * const stream, struct encoder_state_t * const state) { const encoder_control_t * const encoder = state->encoder_control; int j; int ref_negative = 0; int ref_positive = 0; if (encoder->cfg.gop_len) { for (j = 0; j < state->frame->ref->used_size; j++) { if (state->frame->ref->pocs[j] < state->frame->poc) { ref_negative++; } else { ref_positive++; } } } else ref_negative = state->frame->ref->used_size; WRITE_UE(stream, state->frame->slicetype, "slice_type"); if (state->frame->pictype != KVZ_NAL_IDR_W_RADL && state->frame->pictype != KVZ_NAL_IDR_N_LP) { const int poc_lsb = state->frame->poc & ((1 << encoder->poc_lsb_bits) - 1); WRITE_U(stream, poc_lsb, encoder->poc_lsb_bits, "pic_order_cnt_lsb"); int last_poc = 0; int poc_shift = 0; WRITE_U(stream, 0, 1, "short_term_ref_pic_set_sps_flag"); WRITE_UE(stream, ref_negative, "num_negative_pics"); WRITE_UE(stream, ref_positive, "num_positive_pics"); for (j = 0; j < ref_negative; j++) { int8_t delta_poc = 0; if (encoder->cfg.gop_len) { int8_t found = 0; do { delta_poc = encoder->cfg.gop[state->frame->gop_offset].ref_neg[j + poc_shift]; for (int i = 0; i < state->frame->ref->used_size; i++) { if (state->frame->ref->pocs[i] == state->frame->poc - delta_poc) { found = 1; break; } } if (!found) poc_shift++; if (j + poc_shift == ref_negative) { fprintf(stderr, "Failure, reference not found!"); exit(EXIT_FAILURE); } } while (!found); } WRITE_UE(stream, encoder->cfg.gop_len?delta_poc - last_poc - 1:0, "delta_poc_s0_minus1"); last_poc = delta_poc; WRITE_U(stream, !state->frame->is_irap, 1, "used_by_curr_pic_s0_flag"); } last_poc = 0; poc_shift = 0; for (j = 0; j < ref_positive; j++) { int8_t delta_poc = 0; if (encoder->cfg.gop_len) { int8_t found = 0; do { delta_poc = encoder->cfg.gop[state->frame->gop_offset].ref_pos[j + poc_shift]; for (int i = 0; i < state->frame->ref->used_size; i++) { if (state->frame->ref->pocs[i] == state->frame->poc + delta_poc) { found = 1; break; } } if (!found) poc_shift++; if (j + poc_shift == ref_positive) { fprintf(stderr, "Failure, reference not found!"); exit(EXIT_FAILURE); } } while (!found); } WRITE_UE(stream, encoder->cfg.gop_len ? delta_poc - last_poc - 1 : 0, "delta_poc_s1_minus1"); last_poc = delta_poc; WRITE_U(stream, !state->frame->is_irap, 1, "used_by_curr_pic_s1_flag"); } //WRITE_UE(stream, 0, "short_term_ref_pic_set_idx"); if (state->encoder_control->cfg.tmvp_enable) { WRITE_U(stream, ref_negative ? 1 : 0, 1, "slice_temporal_mvp_enabled_flag"); } } //end if //end if if (encoder->cfg.sao_type) { WRITE_U(stream, 1, 1, "slice_sao_luma_flag"); if (encoder->chroma_format != KVZ_CSP_400) { WRITE_U(stream, 1, 1, "slice_sao_chroma_flag"); } } if (state->frame->slicetype != KVZ_SLICE_I) { WRITE_U(stream, 1, 1, "num_ref_idx_active_override_flag"); WRITE_UE(stream, MAX(0, ((int)state->frame->ref_LX_size[0]) - 1), "num_ref_idx_l0_active_minus1"); if (state->frame->slicetype == KVZ_SLICE_B) { WRITE_UE(stream, MAX(0, ((int)state->frame->ref_LX_size[1]) - 1), "num_ref_idx_l1_active_minus1"); WRITE_U(stream, 0, 1, "mvd_l1_zero_flag"); } // Temporal Motion Vector Prediction flags if (state->encoder_control->cfg.tmvp_enable && ref_negative > 0) { if (state->frame->slicetype == KVZ_SLICE_B) { // Always use L0 for prediction WRITE_U(stream, 1, 1, "collocated_from_l0_flag"); } if (ref_negative > 1) { // Use first reference from L0 // ToDo: use better reference WRITE_UE(stream, 0, "collocated_ref_idx"); } } const uint8_t max_merge_cands = state->encoder_control->cfg.max_merge; WRITE_UE(stream, 5- max_merge_cands, "five_minus_max_num_merge_cand"); } { // If tiles are enabled, signal the full QP here (relative to the base value of 26) // If QP is to be set only at CU level, force slice_qp_delta zero bool signal_qp_in_slice_header = (encoder->cfg.slices & KVZ_SLICES_TILES) && encoder->tiles_enable; int slice_qp_delta = state->frame->QP - (signal_qp_in_slice_header ? 26 : encoder->cfg.qp); if(encoder->cfg.set_qp_in_cu) slice_qp_delta = 0; WRITE_SE(stream, slice_qp_delta, "slice_qp_delta"); } } void kvz_encoder_state_write_bitstream_slice_header( struct bitstream_t * const stream, struct encoder_state_t * const state, bool independent) { const encoder_control_t * const encoder = state->encoder_control; #ifdef KVZ_DEBUG printf("=========== Slice ===========\n"); #endif if (encoder->cfg.partial_coding.fullWidth != 0) { state->slice->start_in_rs = encoder->cfg.partial_coding.startCTU_x + CEILDIV(encoder->cfg.partial_coding.fullWidth, 64) * encoder->cfg.partial_coding.startCTU_y; } bool first_slice_segment_in_pic = (state->slice->start_in_rs == 0); if ((state->encoder_control->cfg.slices & KVZ_SLICES_WPP) && state->wfrow->lcu_offset_y > 0) { first_slice_segment_in_pic = false; } WRITE_U(stream, first_slice_segment_in_pic, 1, "first_slice_segment_in_pic_flag"); if (state->frame->pictype >= KVZ_NAL_BLA_W_LP && state->frame->pictype <= KVZ_NAL_RSV_IRAP_VCL23) { WRITE_U(stream, 0, 1, "no_output_of_prior_pics_flag"); } WRITE_UE(stream, 0, "slice_pic_parameter_set_id"); if (!first_slice_segment_in_pic) { if (encoder->pps.dependent_slice_segments_enabled_flag) { WRITE_U(stream, !independent, 1, "dependent_slice_segment_flag"); } int lcu_cnt = encoder->in.width_in_lcu * encoder->in.height_in_lcu; if (encoder->cfg.partial_coding.fullWidth != 0) { lcu_cnt = CEILDIV(encoder->cfg.partial_coding.fullWidth, 64) * CEILDIV(encoder->cfg.partial_coding.fullHeight, 64); } int num_bits = kvz_math_ceil_log2(lcu_cnt); int slice_start_rs = state->slice->start_in_rs; if (state->encoder_control->cfg.slices & KVZ_SLICES_WPP) { slice_start_rs += state->wfrow->lcu_offset_y * state->tile->frame->width_in_lcu; } WRITE_U(stream, slice_start_rs, num_bits, "slice_segment_address"); } if (independent) { kvz_encoder_state_write_bitstream_slice_header_independent(stream, state); } if (encoder->tiles_enable || encoder->cfg.wpp) { int num_entry_points = 0; int max_length_seen = 0; if (state->is_leaf) { num_entry_points = 1; } else { encoder_state_entry_points_explore(state, &num_entry_points, &max_length_seen); } int num_offsets = num_entry_points - 1; WRITE_UE(stream, num_offsets, "num_entry_point_offsets"); if (num_offsets > 0) { int entry_points_written = 0; int offset_len = kvz_math_floor_log2(max_length_seen) + 1; WRITE_UE(stream, offset_len - 1, "offset_len_minus1"); encoder_state_write_bitstream_entry_points_write(stream, state, num_entry_points, offset_len, &entry_points_written); } } } /** * \brief Add a checksum SEI message to the bitstream. * \param encoder The encoder. * \returns Void */ static void add_checksum(encoder_state_t * const state) { bitstream_t * const stream = &state->stream; const videoframe_t * const frame = state->tile->frame; unsigned char checksum[3][SEI_HASH_MAX_LENGTH]; kvz_nal_write(stream, KVZ_NAL_SUFFIX_SEI_NUT, 0, 0); sei_write_payload_type(stream, SEI_PAYLOAD_TYPE_DECODED_PICTURE_HASH); int num_colors = (state->encoder_control->chroma_format == KVZ_CSP_400 ? 1 : 3); switch (state->encoder_control->cfg.hash) { case KVZ_HASH_CHECKSUM: kvz_image_checksum(frame->rec, checksum, state->encoder_control->bitdepth); sei_write_payload_size(stream, 1 + num_colors * 4); WRITE_U(stream, 2, 8, "hash_type"); // 2 = checksum for (int i = 0; i < num_colors; ++i) { uint32_t checksum_val = ( (checksum[i][0] << 24) + (checksum[i][1] << 16) + (checksum[i][2] << 8) + (checksum[i][3])); WRITE_U(stream, checksum_val, 32, "picture_checksum"); CHECKPOINT("checksum[%d] = %u", i, checksum_val); } break; case KVZ_HASH_MD5: kvz_image_md5(frame->rec, checksum, state->encoder_control->bitdepth); sei_write_payload_size(stream, 1 + num_colors * 16); WRITE_U(stream, 0, 8, "hash_type"); // 0 = md5 for (int i = 0; i < num_colors; ++i) { for (int b = 0; b < 16; ++b) { WRITE_U(stream, checksum[i][b], 8, "picture_md5"); } } break; case KVZ_HASH_NONE: // Means we shouldn't be writing this SEI. assert(0); } kvz_bitstream_align(stream); // spec:sei_rbsp() rbsp_trailing_bits kvz_bitstream_add_rbsp_trailing_bits(stream); } static void encoder_state_write_slice_header( bitstream_t * stream, encoder_state_t * state, bool independent) { kvz_nal_write(stream, state->frame->pictype, 0, state->frame->first_nal); state->frame->first_nal = false; kvz_encoder_state_write_bitstream_slice_header(stream, state, independent); kvz_bitstream_add_rbsp_trailing_bits(stream); } /** * \brief Move child state bitstreams to the parent stream. */ static void encoder_state_write_bitstream_children(encoder_state_t * const state) { // Write Slice headers to the parent stream instead of the child stream // in case the child stream is a leaf with something in it already. for (int i = 0; state->children[i].encoder_control; ++i) { if (state->children[i].type == ENCODER_STATE_TYPE_SLICE) { encoder_state_write_slice_header(&state->stream, &state->children[i], true); } else if (state->children[i].type == ENCODER_STATE_TYPE_WAVEFRONT_ROW) { if ((state->encoder_control->cfg.slices & KVZ_SLICES_WPP) && i != 0) { // Add header for dependent WPP row slice. encoder_state_write_slice_header(&state->stream, &state->children[i], false); } } kvz_encoder_state_write_bitstream(&state->children[i]); kvz_bitstream_move(&state->stream, &state->children[i].stream); } } static void encoder_state_write_bitstream_main(encoder_state_t * const state) { const encoder_control_t * const encoder = state->encoder_control; bitstream_t * const stream = &state->stream; uint64_t curpos = kvz_bitstream_tell(stream); // The first NAL unit of the access unit must use a long start code. state->frame->first_nal = true; // Access Unit Delimiter (AUD) if (encoder->cfg.aud_enable) { state->frame->first_nal = false; encoder_state_write_bitstream_aud(state); } if (encoder_state_must_write_vps(state)) { state->frame->first_nal = false; kvz_encoder_state_write_parameter_sets(&state->stream, state); } // Send Kvazaar version information only in the first frame. if (state->frame->num == 0 && encoder->cfg.add_encoder_info) { kvz_nal_write(stream, KVZ_NAL_PREFIX_SEI_NUT, 0, state->frame->first_nal); state->frame->first_nal = false; encoder_state_write_bitstream_prefix_sei_version(state); // spec:sei_rbsp() rbsp_trailing_bits kvz_bitstream_add_rbsp_trailing_bits(stream); } //SEI messages for interlacing if (encoder->vui.frame_field_info_present_flag) { // These should be optional, needed for earlier versions // of HM decoder to accept bitstream //kvz_nal_write(stream, KVZ_NAL_PREFIX_SEI_NUT, 0, 0); //encoder_state_write_active_parameter_sets_sei_message(state); //kvz_bitstream_rbsp_trailing_bits(stream); kvz_nal_write(stream, KVZ_NAL_PREFIX_SEI_NUT, 0, state->frame->first_nal); state->frame->first_nal = false; encoder_state_write_picture_timing_sei_message(state); // spec:sei_rbsp() rbsp_trailing_bits kvz_bitstream_add_rbsp_trailing_bits(stream); } encoder_state_write_bitstream_children(state); if (state->encoder_control->cfg.hash != KVZ_HASH_NONE) { // Calculate checksum add_checksum(state); } //Get bitstream length for stats uint64_t newpos = kvz_bitstream_tell(stream); state->stats_bitstream_length = (newpos >> 3) - (curpos >> 3); if (state->frame->num > 0) { state->frame->total_bits_coded = state->previous_encoder_state->frame->total_bits_coded; } state->frame->total_bits_coded += newpos - curpos; if(state->encoder_control->cfg.rc_algorithm == KVZ_OBA || state->encoder_control->cfg.stats_file_prefix) { kvz_update_after_picture(state); } if(state->frame->gop_offset) state->frame->cur_gop_bits_coded = state->previous_encoder_state->frame->cur_gop_bits_coded; state->frame->cur_gop_bits_coded += newpos - curpos; } void kvz_encoder_state_write_bitstream(encoder_state_t * const state) { if (!state->is_leaf) { switch (state->type) { case ENCODER_STATE_TYPE_MAIN: encoder_state_write_bitstream_main(state); break; case ENCODER_STATE_TYPE_TILE: case ENCODER_STATE_TYPE_SLICE: encoder_state_write_bitstream_children(state); break; default: fprintf(stderr, "Unsupported node type %c!\n", state->type); assert(0); } } } void kvz_encoder_state_worker_write_bitstream(void * opaque) { kvz_encoder_state_write_bitstream((encoder_state_t *) opaque); } void kvz_encoder_state_write_parameter_sets(bitstream_t *stream, encoder_state_t * const state) { // Video Parameter Set (VPS) kvz_nal_write(stream, KVZ_NAL_VPS_NUT, 0, 1); encoder_state_write_bitstream_vid_parameter_set(stream, state); // Sequence Parameter Set (SPS) kvz_nal_write(stream, KVZ_NAL_SPS_NUT, 0, 1); encoder_state_write_bitstream_seq_parameter_set(stream, state); // Picture Parameter Set (PPS) kvz_nal_write(stream, KVZ_NAL_PPS_NUT, 0, 1); encoder_state_write_bitstream_pic_parameter_set(stream, state); }
c4c435e8e8857800e96ea52d703f2bafe44d9f52
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
/govern/data-security/krb-1.2.1/src/lib/krb5/krb/sendauth.c
4908a2af492523779496809d98aaaec8208881cc
[ "BSD-4-Clause", "LicenseRef-scancode-generic-export-compliance", "LicenseRef-scancode-other-permissive", "LicenseRef-scancode-mit-old-style", "BSD-4-Clause-UC", "LicenseRef-scancode-rsa-1990", "BSD-3-Clause", "MIT-CMU", "LicenseRef-scancode-mit-no-advert-export-control", "CC-BY-SA-3.0", "LicenseRef-scancode-mit-modification-obligations", "LicenseRef-scancode-proprietary-license", "GPL-2.0-or-later", "LicenseRef-scancode-michigan-disclaimer", "ISC", "LicenseRef-scancode-nrl-permission", "FreeBSD-DOC", "LicenseRef-scancode-rsa-md4", "RSA-MD", "OLDAP-2.8", "FSFULLRWD", "BSD-2-Clause", "LicenseRef-scancode-brian-gladman", "MIT", "Apache-2.0" ]
permissive
alldatacenter/alldata
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
refs/heads/master
2023-08-05T07:32:25.442740
2023-08-03T13:17:24
2023-08-03T13:17:24
213,321,771
774
250
Apache-2.0
2023-09-06T17:35:32
2019-10-07T07:36:18
null
UTF-8
C
false
false
7,420
c
sendauth.c
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* lib/krb5/krb/sendauth.c */ /* * Copyright 1991, 2009 by the Massachusetts Institute of Technology. * All Rights Reserved. * * Export of this software from the United States of America may * require a specific license from the United States Government. * It is the responsibility of any person or organization contemplating * export to obtain such a license before exporting. * * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and * distribute this software and its documentation for any purpose and * without fee is hereby granted, provided that the above copyright * notice appear in all copies and that both that copyright notice and * this permission notice appear in supporting documentation, and that * the name of M.I.T. not be used in advertising or publicity pertaining * to distribution of the software without specific, written prior * permission. Furthermore if you modify this software you must label * your software as modified software and not distribute it in such a * fashion that it might be confused with the original M.I.T. software. * M.I.T. makes no representations about the suitability of * this software for any purpose. It is provided "as is" without express * or implied warranty. */ #include "k5-int.h" #include "os-proto.h" #include "com_err.h" #include "auth_con.h" #include <errno.h> #include <stdio.h> #include <string.h> static const char sendauth_version[] = "KRB5_SENDAUTH_V1.0"; krb5_error_code KRB5_CALLCONV krb5_sendauth(krb5_context context, krb5_auth_context *auth_context, krb5_pointer fd, char *appl_version, krb5_principal client, krb5_principal server, krb5_flags ap_req_options, krb5_data *in_data, krb5_creds *in_creds, krb5_ccache ccache, krb5_error **error, krb5_ap_rep_enc_part **rep_result, krb5_creds **out_creds) { krb5_octet result; krb5_creds creds; krb5_creds * credsp = NULL; krb5_creds * credspout = NULL; krb5_error_code retval = 0; krb5_data inbuf, outbuf[2]; int len; krb5_ccache use_ccache = 0; if (error) *error = 0; /* * First, send over the length of the sendauth version string; * then, we send over the sendauth version. Next, we send * over the length of the application version strings followed * by the string itself. */ outbuf[0].length = strlen(sendauth_version) + 1; outbuf[0].data = (char *) sendauth_version; outbuf[1].length = strlen(appl_version) + 1; outbuf[1].data = appl_version; if ((retval = k5_write_messages(context, fd, outbuf, 2))) return(retval); /* * Now, read back a byte: 0 means no error, 1 means bad sendauth * version, 2 means bad application version */ if ((len = krb5_net_read(context, *((int *) fd), (char *)&result, 1)) != 1) return((len < 0) ? errno : ECONNABORTED); if (result == 1) return(KRB5_SENDAUTH_BADAUTHVERS); else if (result == 2) return(KRB5_SENDAUTH_BADAPPLVERS); else if (result != 0) return(KRB5_SENDAUTH_BADRESPONSE); /* * We're finished with the initial negotiations; let's get and * send over the authentication header. (The AP_REQ message) */ /* * If no credentials were provided, try getting it from the * credentials cache. */ memset(&creds, 0, sizeof(creds)); /* * See if we need to access the credentials cache */ if (!in_creds || !in_creds->ticket.length) { if (ccache) use_ccache = ccache; else if ((retval = krb5int_cc_default(context, &use_ccache))) goto error_return; } if (!in_creds) { if ((retval = krb5_copy_principal(context, server, &creds.server))) goto error_return; if (client) retval = krb5_copy_principal(context, client, &creds.client); else retval = krb5_cc_get_principal(context, use_ccache, &creds.client); if (retval) goto error_return; /* creds.times.endtime = 0; -- memset 0 takes care of this zero means "as long as possible" */ /* creds.keyblock.enctype = 0; -- as well as this. zero means no session enctype preference */ in_creds = &creds; } if (!in_creds->ticket.length) { if ((retval = krb5_get_credentials(context, 0, use_ccache, in_creds, &credsp))) goto error_return; credspout = credsp; } else { credsp = in_creds; } outbuf[0].data = NULL; /* Coverity is confused otherwise */ if ((retval = krb5_mk_req_extended(context, auth_context, ap_req_options, in_data, credsp, &outbuf[0]))) goto error_return; /* * First write the length of the AP_REQ message, then write * the message itself. */ retval = krb5_write_message(context, fd, &outbuf[0]); free(outbuf[0].data); if (retval) goto error_return; /* * Now, read back a message. If it was a null message (the * length was zero) then there was no error. If not, we the * authentication was rejected, and we need to return the * error structure. */ if ((retval = krb5_read_message(context, fd, &inbuf))) goto error_return; if (inbuf.length) { if (error) { if ((retval = krb5_rd_error(context, &inbuf, error))) { free(inbuf.data); goto error_return; } } retval = KRB5_SENDAUTH_REJECTED; free(inbuf.data); goto error_return; } /* * If we asked for mutual authentication, we should now get a * length field, followed by a AP_REP message */ if ((ap_req_options & AP_OPTS_MUTUAL_REQUIRED)) { krb5_ap_rep_enc_part *repl = 0; if ((retval = krb5_read_message(context, fd, &inbuf))) goto error_return; if ((retval = krb5_rd_rep(context, *auth_context, &inbuf, &repl))) { if (repl) krb5_free_ap_rep_enc_part(context, repl); free(inbuf.data); goto error_return; } free(inbuf.data); /* * If the user wants to look at the AP_REP message, * copy it for him */ if (rep_result) *rep_result = repl; else krb5_free_ap_rep_enc_part(context, repl); } retval = 0; /* Normal return */ if (out_creds) { *out_creds = credsp; credspout = NULL; } error_return: krb5_free_cred_contents(context, &creds); if (credspout != NULL) krb5_free_creds(context, credspout); if (!ccache && use_ccache) krb5_cc_close(context, use_ccache); return(retval); }
470055251682f50c3a957fd15ec508a7c40bf3db
53a83642c01a8828e3d7bd0b18e33c3b694c2b84
/C/max_money.c
9a8fb7bfdfd466fced5c04adca1034737f584edf
[]
no_license
anantkaushik/Competitive_Programming
1dcd60a28b5b951c23024d6090942be081ad249f
6dba38fd7aa4e71b5196d01d64e81f9336d08b13
refs/heads/master
2022-03-06T15:36:23.797340
2022-02-21T12:00:37
2022-02-21T12:00:37
82,700,948
271
95
null
2020-10-27T17:34:39
2017-02-21T16:18:16
Python
UTF-8
C
false
false
752
c
max_money.c
/* Given street of houses (a row of houses), each house having some amount of money kept inside; now there is a thief who is going to steal this money but he has a constraint/rule that he cannot steal/rob two adjacent houses. Find the maximum money he can rob. Input: The first line of input contains an integer T denoting the number of test cases. The first line of each test case is N and money. Output: Print maximum money he can rob. Sample Input: 2 5 10 2 12 Output: 30 12 */ #include <stdio.h> int main(){ int t; scanf("%d",&t); while ( t>0 ) { int n,a; scanf("%d %d",&n,&a); if ( n%2==0 ) n=n/2; else n=(n/2)+1; printf("%d\n",n*a); t--; } return 0; }
5af352a05c175c8352c35f4e536d134ca856b1da
24acbcc7b5ad20d2cea61b2b2b8190befe7f5bce
/Code/Search/moveUp.c
b668e90b8ca26eadaf613f93d7a932ec0c757593
[ "MIT" ]
permissive
heineman/algorithms-nutshell-2ed
a08e2136fb3f9199e119a8aebca1be09a02b86f4
6bdaf7ee2878f69a2df00ae7a3274f5d43d24605
refs/heads/master
2022-09-04T20:53:52.422824
2021-12-29T02:34:44
2021-12-29T02:34:44
43,086,309
577
226
null
null
null
null
UTF-8
C
false
false
1,335
c
moveUp.c
/** * @file moveUp.c Task to perform searches in unordered array and move up one slot when found. * @brief * Load up an array of strings and perform number of unordered * searches. No check for NULL is used. Move up one slot for each * successful find. * * @author George Heineman * @date 6/15/08 */ #include <stdlib.h> #include <string.h> #include "report.h" /** Storage of string array. */ static char **ds; /** Number of strings in the array 'ds'. */ static int dsSize; /** Position into 'ds' into which the next string will be inserted. */ static int dsIdx; /** construct the initial instance. Allocate array of 'n' elements for 'ds'. */ void construct (int n) { ds = (char **) calloc (n, sizeof (char **)); dsSize = n; dsIdx = 0; } /** insert strings one at a time to the next position within the array. */ void insert (char *s) { ds[dsIdx++] = s; } /** Search for the target within the array. No Check for NULL. Move up one when found by swapping elements. */ int search (char *target, int(*cmp)(const void *,const void *)) { int i; for (i = 0; i < dsIdx; i++) { char *copy; if (!cmp(ds[i], target)) { if (i == 0) return 1; /* nothing to move! */ copy = ds[i-1]; ds[i-1] = ds[i]; ds[i] = copy; return 1; } } return 0; /* nope */ }
0a2b65a96a32b128b82751ee0d850a7f9cd93ad1
632f6f14abb1dbdf86aca1506b8012392bef2a41
/source/family/nxp/lpc55S6X/flash_blob.c
030c918a5e1a0c4dda8c815c845a7bdd81e841f5
[ "Apache-2.0" ]
permissive
ARMmbed/DAPLink
a34f7ce41d6bfc38d49283766a03280f52322f2a
19f797fa6396b726250c57eb9be80245a5f877dd
refs/heads/main
2023-08-23T20:37:22.744671
2023-06-29T19:36:00
2023-08-16T16:39:58
24,571,059
1,865
883
Apache-2.0
2023-08-16T16:39:59
2014-09-28T21:38:24
C
UTF-8
C
false
false
7,587
c
flash_blob.c
/* Flash algorithm for LPC55xx IAP 608kB Flash * * DAPLink Interface Firmware * Copyright (c) 2009-2022 Arm Limited, All Rights Reserved * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Generated from 'LPC55XX_640.FLM' (LPC55xx IAP 608kB Flash) // Originating from 'NXP.LPC55S69_DFP.13.1.0.pack' // digest = c672c27550f789743829bb8832245f8c6f0c8ea81b9291b53827849eeabbe52c, file size = 22316 // algo version = 0x101, algo size = 1696 (0x6a0) static const uint32_t LPC55XX_640_flash_prog_blob[] = { 0xe7fdbe00, 0xf240b580, 0xf2c00004, 0xf6420000, 0xf84961e0, 0xf2401000, 0xf2c52000, 0x21000000, 0x1080f8c0, 0x1084f8c0, 0x1180f8c0, 0x71fbf647, 0xf6406001, 0x21ff6004, 0x0000f2c5, 0x01def2cc, 0xf04f6001, 0x210240a0, 0xf2407001, 0xf2c00010, 0x44480000, 0xf874f000, 0xbf182800, 0xbd802001, 0x47702000, 0xf240b580, 0xf2c00010, 0xf2460000, 0x4448636c, 0xf6c62100, 0xf44f3365, 0xf0002218, 0x2800f87f, 0x2001bf18, 0xbf00bd80, 0xf020b580, 0xf2404170, 0xf2c00010, 0xf2460000, 0x4448636c, 0x3365f6c6, 0x4200f44f, 0xf86af000, 0xbf182800, 0xbd802001, 0x4614b570, 0x0441460d, 0x4670f020, 0xf240d10d, 0xf2c00010, 0xf2460000, 0x4448636c, 0xf6c64631, 0xf44f3365, 0xf0004200, 0xf240f851, 0xf2c00010, 0xf5b50000, 0xbf987f00, 0x7500f44f, 0x46314448, 0x462b4622, 0xf86af000, 0xbf182800, 0xbd702001, 0x460cb5b0, 0xf0204605, 0x46114070, 0xf0004622, 0x2800fa01, 0x4425bf08, 0xbdb04628, 0x460ab580, 0x4170f020, 0x0010f240, 0x0000f2c0, 0xf0004448, 0x2800f875, 0x2001bf18, 0x0000bd80, 0x02f4f241, 0x3200f2c1, 0x290068d1, 0x2360d00a, 0x78926283, 0xf2406849, 0xf2c0030c, 0xf8490300, 0x47082003, 0x40baf240, 0x0000f2c0, 0x41c7f240, 0x0100f2c0, 0x44794478, 0xf0002284, 0xbf00f98d, 0x0c0cf240, 0x0c00f2c0, 0xc00cf859, 0x0f02f1bc, 0xf244d104, 0xf2c11c3b, 0x47603c00, 0x1c00f241, 0x3c00f2c1, 0xc000f8dc, 0x0f00f1bc, 0xf8dcd002, 0x4760c008, 0x406af240, 0x0000f2c0, 0x4177f240, 0x0100f2c0, 0x44794478, 0xf0002295, 0xbf00f965, 0x0c0cf240, 0x0c00f2c0, 0xc00cf859, 0x0f02f1bc, 0xf244d104, 0xf2c11c9d, 0x47603c00, 0x1c00f241, 0x3c00f2c1, 0xc000f8dc, 0x0f00f1bc, 0xf8dcd002, 0x4760c00c, 0x401af240, 0x0000f2c0, 0x4127f240, 0x0100f2c0, 0x44794478, 0xf00022a5, 0xbf00f93d, 0x1300f241, 0x3300f2c1, 0x2b00681b, 0x691bd001, 0xf2404718, 0xf2c030ec, 0xf2400000, 0xf2c031f9, 0x44780100, 0x22ad4479, 0xf926f000, 0x0c0cf240, 0x0c00f2c0, 0xc00cf859, 0x0f02f1bc, 0xf244d104, 0xf2c12c7d, 0x47603c00, 0x1c00f241, 0x3c00f2c1, 0xc000f8dc, 0x0f00f1bc, 0xf8dcd002, 0x4760c014, 0x309ef240, 0x0000f2c0, 0x31abf240, 0x0100f2c0, 0x44794478, 0xf00022c2, 0xbf00f8ff, 0x1300f241, 0x3300f2c1, 0x2b00681b, 0x699bd001, 0xf2404718, 0xf2c03070, 0xf2400000, 0xf2c0317d, 0x44780100, 0x22cb4479, 0xf8e8f000, 0x1100f241, 0x3100f2c1, 0x29006809, 0x6a89d001, 0xf2404708, 0xf2c03044, 0xf2400000, 0xf2c03151, 0x44780100, 0x22d54479, 0xf8d2f000, 0x1100f241, 0x3100f2c1, 0x29006809, 0x6ac9d001, 0xf2404708, 0xf2c03018, 0xf2400000, 0xf2c03125, 0x44780100, 0x22dc4479, 0xf8bcf000, 0x1300f241, 0x3300f2c1, 0x2b00681b, 0x6b1bd001, 0xf2404718, 0xf2c020ec, 0xf2400000, 0xf2c021f9, 0x44780100, 0x22e34479, 0xf8a6f000, 0x1200f241, 0x3200f2c1, 0x2a006812, 0x6b52d001, 0xf2404710, 0xf2c020c0, 0xf2400000, 0xf2c021cd, 0x44780100, 0x22ea4479, 0xf890f000, 0x1c00f241, 0x3c00f2c1, 0xc000f8dc, 0x0f00f1bc, 0xf8dcd002, 0x4760c038, 0x208ef240, 0x0000f2c0, 0x219bf240, 0x0100f2c0, 0x44794478, 0xf00022f1, 0xbf00f877, 0x1200f241, 0x3200f2c1, 0x2a006812, 0x6bd2d001, 0xf2404710, 0xf2c02060, 0xf2400000, 0xf2c0216d, 0x44780100, 0x22f84479, 0xf860f000, 0x1200f241, 0x3200f2c1, 0x2a006812, 0x6c12d001, 0xf2404710, 0xf2c02034, 0xf2400000, 0xf2c02141, 0x44780100, 0x22ff4479, 0xf84af000, 0x1300f241, 0x3300f2c1, 0x2b00681b, 0x6c5bd001, 0xf2404718, 0xf2c02008, 0xf2400000, 0xf2c02115, 0x44780100, 0xf44f4479, 0xf0007283, 0xbf00f833, 0x1300f241, 0x3300f2c1, 0x2b00681b, 0x6c9bd001, 0xf2404718, 0xf2c010d8, 0xf2400000, 0xf2c011e5, 0x44780100, 0xf2404479, 0xf000120d, 0xbf00f81b, 0x1c00f241, 0x3c00f2c1, 0xc000f8dc, 0x0f00f1bc, 0xf8dcd002, 0x4760c04c, 0x10a2f240, 0x0000f2c0, 0x11aff240, 0x0100f2c0, 0x44794478, 0x728af44f, 0xf800f000, 0x4605b50e, 0x460e4614, 0xf000a013, 0x4628f870, 0xf86df000, 0xf000a016, 0x4630f86a, 0xf867f000, 0xf000a015, 0x2100f864, 0x100bf88d, 0xf10d210a, 0xf88d000a, 0xe008100a, 0xf2f1fb94, 0x4212fb01, 0xf4f1fb94, 0xf8003230, 0x2c002d01, 0xf000dcf4, 0xf000f84e, 0x0000f841, 0x202a2a2a, 0x65737361, 0x6f697472, 0x6166206e, 0x64656c69, 0x0000203a, 0x6966202c, 0x0020656c, 0x696c202c, 0x0020656e, 0x0301ea40, 0x079bb510, 0x2a04d10f, 0xc810d30d, 0x1f12c908, 0xd0f8429c, 0xba19ba20, 0xd9014288, 0xbd102001, 0x30fff04f, 0xb11abd10, 0xd00307d3, 0xe0071c52, 0xbd102000, 0x3b01f810, 0x4b01f811, 0xd1071b1b, 0x3b01f810, 0x4b01f811, 0xd1011b1b, 0xd1f11e92, 0xbd104618, 0x2000b510, 0xf81ef000, 0x8000f3af, 0x4010e8bd, 0xf0002001, 0xb510b811, 0xe0024604, 0xf0001c64, 0x7820f804, 0xd1f92800, 0xb508bd10, 0xf88d4669, 0x20030000, 0xbd08beab, 0x20184901, 0xe7febeab, 0x00020026, 0xf000b510, 0xe8bdf80b, 0xf0004010, 0x4770b801, 0xd0012800, 0xbfeef7ff, 0x00004770, 0x2100b510, 0xf000a002, 0x2001f813, 0x0000bd10, 0x41474953, 0x3a545242, 0x6e624120, 0x616d726f, 0x6574206c, 0x6e696d72, 0x6f697461, 0x0000006e, 0x4605b570, 0x200a460c, 0x1c6de000, 0xffc5f7ff, 0x7828b135, 0xd1f82800, 0x1c64e002, 0xffbdf7ff, 0x7820b114, 0xd1f82800, 0x4070e8bd, 0xf7ff200a, 0x4c46bfb4, 0x5f485341, 0x5f495041, 0x45455254, 0x70616900, 0x73662f31, 0x61695f6c, 0x632e3170, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; // Start address of flash static const uint32_t flash_start = 0x00000000; // Size of flash static const uint32_t flash_size = 0x00098000; /** * List of start and size for each size of flash sector - even indexes are start, odd are size * The size will apply to all sectors between the listed address and the next address * in the list. * The last pair in the list will have sectors starting at that address and ending * at address flash_start + flash_size. */ static const sector_info_t sectors_info[] = { {0x00000000, 0x00008000}, }; static const program_target_t flash = { 0x20000005, // Init 0x20000061, // UnInit 0x20000065, // EraseChip 0x2000008d, // EraseSector 0x200000b5, // ProgramPage 0x00000000, // Verify // BKPT : start of blob + 1 // RSB : blob start + header + rw data offset // RSP : stack pointer { 0x20000001, 0x20000654, 0x20001000 }, // mem buffer location 0x20001000, // location to write prog_blob in target RAM 0x20000000, // prog_blob size sizeof(LPC55XX_640_flash_prog_blob), // address of prog_blob LPC55XX_640_flash_prog_blob, // ram_to_flash_bytes_to_be_written 0x00000200 };
4d0d630e26b9c2d78b9fa2486840432425a02ccf
7eaf54a78c9e2117247cb2ab6d3a0c20719ba700
/SOFTWARE/A64-TERES/linux-a64/drivers/target/iscsi/iscsi_target_tq.c
bd53364b75e87213910e4098c9e8b0effa317f63
[ "LicenseRef-scancode-free-unknown", "Apache-2.0", "Linux-syscall-note", "GPL-2.0-only", "GPL-1.0-or-later" ]
permissive
OLIMEX/DIY-LAPTOP
ae82f4ee79c641d9aee444db9a75f3f6709afa92
a3fafd1309135650bab27f5eafc0c32bc3ca74ee
refs/heads/rel3
2023-08-04T01:54:19.483792
2023-04-03T07:18:12
2023-04-03T07:18:12
80,094,055
507
92
Apache-2.0
2023-04-03T07:05:59
2017-01-26T07:25:50
C
UTF-8
C
false
false
13,810
c
iscsi_target_tq.c
/******************************************************************************* * This file contains the iSCSI Login Thread and Thread Queue functions. * * \u00a9 Copyright 2007-2011 RisingTide Systems LLC. * * Licensed to the Linux Foundation under the General Public License (GPL) version 2. * * Author: Nicholas A. Bellinger <nab@linux-iscsi.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. ******************************************************************************/ #include <linux/kthread.h> #include <linux/list.h> #include <linux/bitmap.h> #include "iscsi_target_core.h" #include "iscsi_target_tq.h" #include "iscsi_target.h" static LIST_HEAD(inactive_ts_list); static DEFINE_SPINLOCK(inactive_ts_lock); static DEFINE_SPINLOCK(ts_bitmap_lock); static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts) { if (!list_empty(&ts->ts_list)) { WARN_ON(1); return; } spin_lock(&inactive_ts_lock); list_add_tail(&ts->ts_list, &inactive_ts_list); iscsit_global->inactive_ts++; spin_unlock(&inactive_ts_lock); } static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void) { struct iscsi_thread_set *ts; spin_lock(&inactive_ts_lock); if (list_empty(&inactive_ts_list)) { spin_unlock(&inactive_ts_lock); return NULL; } ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list); list_del_init(&ts->ts_list); iscsit_global->inactive_ts--; spin_unlock(&inactive_ts_lock); return ts; } int iscsi_allocate_thread_sets(u32 thread_pair_count) { int allocated_thread_pair_count = 0, i, thread_id; struct iscsi_thread_set *ts = NULL; for (i = 0; i < thread_pair_count; i++) { ts = kzalloc(sizeof(struct iscsi_thread_set), GFP_KERNEL); if (!ts) { pr_err("Unable to allocate memory for" " thread set.\n"); return allocated_thread_pair_count; } /* * Locate the next available regision in the thread_set_bitmap */ spin_lock(&ts_bitmap_lock); thread_id = bitmap_find_free_region(iscsit_global->ts_bitmap, iscsit_global->ts_bitmap_count, get_order(1)); spin_unlock(&ts_bitmap_lock); if (thread_id < 0) { pr_err("bitmap_find_free_region() failed for" " thread_set_bitmap\n"); kfree(ts); return allocated_thread_pair_count; } ts->thread_id = thread_id; ts->status = ISCSI_THREAD_SET_FREE; INIT_LIST_HEAD(&ts->ts_list); spin_lock_init(&ts->ts_state_lock); init_completion(&ts->rx_post_start_comp); init_completion(&ts->tx_post_start_comp); init_completion(&ts->rx_restart_comp); init_completion(&ts->tx_restart_comp); init_completion(&ts->rx_start_comp); init_completion(&ts->tx_start_comp); ts->create_threads = 1; ts->tx_thread = kthread_run(iscsi_target_tx_thread, ts, "%s", ISCSI_TX_THREAD_NAME); if (IS_ERR(ts->tx_thread)) { dump_stack(); pr_err("Unable to start iscsi_target_tx_thread\n"); break; } ts->rx_thread = kthread_run(iscsi_target_rx_thread, ts, "%s", ISCSI_RX_THREAD_NAME); if (IS_ERR(ts->rx_thread)) { kthread_stop(ts->tx_thread); pr_err("Unable to start iscsi_target_rx_thread\n"); break; } ts->create_threads = 0; iscsi_add_ts_to_inactive_list(ts); allocated_thread_pair_count++; } pr_debug("Spawned %d thread set(s) (%d total threads).\n", allocated_thread_pair_count, allocated_thread_pair_count * 2); return allocated_thread_pair_count; } void iscsi_deallocate_thread_sets(void) { u32 released_count = 0; struct iscsi_thread_set *ts = NULL; while ((ts = iscsi_get_ts_from_inactive_list())) { spin_lock_bh(&ts->ts_state_lock); ts->status = ISCSI_THREAD_SET_DIE; spin_unlock_bh(&ts->ts_state_lock); if (ts->rx_thread) { send_sig(SIGINT, ts->rx_thread, 1); kthread_stop(ts->rx_thread); } if (ts->tx_thread) { send_sig(SIGINT, ts->tx_thread, 1); kthread_stop(ts->tx_thread); } /* * Release this thread_id in the thread_set_bitmap */ spin_lock(&ts_bitmap_lock); bitmap_release_region(iscsit_global->ts_bitmap, ts->thread_id, get_order(1)); spin_unlock(&ts_bitmap_lock); released_count++; kfree(ts); } if (released_count) pr_debug("Stopped %d thread set(s) (%d total threads)." "\n", released_count, released_count * 2); } static void iscsi_deallocate_extra_thread_sets(void) { u32 orig_count, released_count = 0; struct iscsi_thread_set *ts = NULL; orig_count = TARGET_THREAD_SET_COUNT; while ((iscsit_global->inactive_ts + 1) > orig_count) { ts = iscsi_get_ts_from_inactive_list(); if (!ts) break; spin_lock_bh(&ts->ts_state_lock); ts->status = ISCSI_THREAD_SET_DIE; spin_unlock_bh(&ts->ts_state_lock); if (ts->rx_thread) { send_sig(SIGINT, ts->rx_thread, 1); kthread_stop(ts->rx_thread); } if (ts->tx_thread) { send_sig(SIGINT, ts->tx_thread, 1); kthread_stop(ts->tx_thread); } /* * Release this thread_id in the thread_set_bitmap */ spin_lock(&ts_bitmap_lock); bitmap_release_region(iscsit_global->ts_bitmap, ts->thread_id, get_order(1)); spin_unlock(&ts_bitmap_lock); released_count++; kfree(ts); } if (released_count) { pr_debug("Stopped %d thread set(s) (%d total threads)." "\n", released_count, released_count * 2); } } void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts) { spin_lock_bh(&ts->ts_state_lock); conn->thread_set = ts; ts->conn = conn; spin_unlock_bh(&ts->ts_state_lock); /* * Start up the RX thread and wait on rx_post_start_comp. The RX * Thread will then do the same for the TX Thread in * iscsi_rx_thread_pre_handler(). */ complete(&ts->rx_start_comp); wait_for_completion(&ts->rx_post_start_comp); } struct iscsi_thread_set *iscsi_get_thread_set(void) { int allocate_ts = 0; struct completion comp; struct iscsi_thread_set *ts = NULL; /* * If no inactive thread set is available on the first call to * iscsi_get_ts_from_inactive_list(), sleep for a second and * try again. If still none are available after two attempts, * allocate a set ourselves. */ get_set: ts = iscsi_get_ts_from_inactive_list(); if (!ts) { if (allocate_ts == 2) iscsi_allocate_thread_sets(1); init_completion(&comp); wait_for_completion_timeout(&comp, 1 * HZ); allocate_ts++; goto get_set; } ts->delay_inactive = 1; ts->signal_sent = 0; ts->thread_count = 2; init_completion(&ts->rx_restart_comp); init_completion(&ts->tx_restart_comp); return ts; } void iscsi_set_thread_clear(struct iscsi_conn *conn, u8 thread_clear) { struct iscsi_thread_set *ts = NULL; if (!conn->thread_set) { pr_err("struct iscsi_conn->thread_set is NULL\n"); return; } ts = conn->thread_set; spin_lock_bh(&ts->ts_state_lock); ts->thread_clear &= ~thread_clear; if ((thread_clear & ISCSI_CLEAR_RX_THREAD) && (ts->blocked_threads & ISCSI_BLOCK_RX_THREAD)) complete(&ts->rx_restart_comp); else if ((thread_clear & ISCSI_CLEAR_TX_THREAD) && (ts->blocked_threads & ISCSI_BLOCK_TX_THREAD)) complete(&ts->tx_restart_comp); spin_unlock_bh(&ts->ts_state_lock); } void iscsi_set_thread_set_signal(struct iscsi_conn *conn, u8 signal_sent) { struct iscsi_thread_set *ts = NULL; if (!conn->thread_set) { pr_err("struct iscsi_conn->thread_set is NULL\n"); return; } ts = conn->thread_set; spin_lock_bh(&ts->ts_state_lock); ts->signal_sent |= signal_sent; spin_unlock_bh(&ts->ts_state_lock); } int iscsi_release_thread_set(struct iscsi_conn *conn) { int thread_called = 0; struct iscsi_thread_set *ts = NULL; if (!conn || !conn->thread_set) { pr_err("connection or thread set pointer is NULL\n"); BUG(); } ts = conn->thread_set; spin_lock_bh(&ts->ts_state_lock); ts->status = ISCSI_THREAD_SET_RESET; if (!strncmp(current->comm, ISCSI_RX_THREAD_NAME, strlen(ISCSI_RX_THREAD_NAME))) thread_called = ISCSI_RX_THREAD; else if (!strncmp(current->comm, ISCSI_TX_THREAD_NAME, strlen(ISCSI_TX_THREAD_NAME))) thread_called = ISCSI_TX_THREAD; if (ts->rx_thread && (thread_called == ISCSI_TX_THREAD) && (ts->thread_clear & ISCSI_CLEAR_RX_THREAD)) { if (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD)) { send_sig(SIGINT, ts->rx_thread, 1); ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD; } ts->blocked_threads |= ISCSI_BLOCK_RX_THREAD; spin_unlock_bh(&ts->ts_state_lock); wait_for_completion(&ts->rx_restart_comp); spin_lock_bh(&ts->ts_state_lock); ts->blocked_threads &= ~ISCSI_BLOCK_RX_THREAD; } if (ts->tx_thread && (thread_called == ISCSI_RX_THREAD) && (ts->thread_clear & ISCSI_CLEAR_TX_THREAD)) { if (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD)) { send_sig(SIGINT, ts->tx_thread, 1); ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD; } ts->blocked_threads |= ISCSI_BLOCK_TX_THREAD; spin_unlock_bh(&ts->ts_state_lock); wait_for_completion(&ts->tx_restart_comp); spin_lock_bh(&ts->ts_state_lock); ts->blocked_threads &= ~ISCSI_BLOCK_TX_THREAD; } ts->conn = NULL; ts->status = ISCSI_THREAD_SET_FREE; spin_unlock_bh(&ts->ts_state_lock); return 0; } int iscsi_thread_set_force_reinstatement(struct iscsi_conn *conn) { struct iscsi_thread_set *ts; if (!conn->thread_set) return -1; ts = conn->thread_set; spin_lock_bh(&ts->ts_state_lock); if (ts->status != ISCSI_THREAD_SET_ACTIVE) { spin_unlock_bh(&ts->ts_state_lock); return -1; } if (ts->tx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD))) { send_sig(SIGINT, ts->tx_thread, 1); ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD; } if (ts->rx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD))) { send_sig(SIGINT, ts->rx_thread, 1); ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD; } spin_unlock_bh(&ts->ts_state_lock); return 0; } static void iscsi_check_to_add_additional_sets(void) { int thread_sets_add; spin_lock(&inactive_ts_lock); thread_sets_add = iscsit_global->inactive_ts; spin_unlock(&inactive_ts_lock); if (thread_sets_add == 1) iscsi_allocate_thread_sets(1); } static int iscsi_signal_thread_pre_handler(struct iscsi_thread_set *ts) { spin_lock_bh(&ts->ts_state_lock); if ((ts->status == ISCSI_THREAD_SET_DIE) || signal_pending(current)) { spin_unlock_bh(&ts->ts_state_lock); return -1; } spin_unlock_bh(&ts->ts_state_lock); return 0; } struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts) { int ret; spin_lock_bh(&ts->ts_state_lock); if (ts->create_threads) { spin_unlock_bh(&ts->ts_state_lock); goto sleep; } flush_signals(current); if (ts->delay_inactive && (--ts->thread_count == 0)) { spin_unlock_bh(&ts->ts_state_lock); if (!iscsit_global->in_shutdown) iscsi_deallocate_extra_thread_sets(); iscsi_add_ts_to_inactive_list(ts); spin_lock_bh(&ts->ts_state_lock); } if ((ts->status == ISCSI_THREAD_SET_RESET) && (ts->thread_clear & ISCSI_CLEAR_RX_THREAD)) complete(&ts->rx_restart_comp); ts->thread_clear &= ~ISCSI_CLEAR_RX_THREAD; spin_unlock_bh(&ts->ts_state_lock); sleep: ret = wait_for_completion_interruptible(&ts->rx_start_comp); if (ret != 0) return NULL; if (iscsi_signal_thread_pre_handler(ts) < 0) return NULL; if (!ts->conn) { pr_err("struct iscsi_thread_set->conn is NULL for" " thread_id: %d, going back to sleep\n", ts->thread_id); goto sleep; } iscsi_check_to_add_additional_sets(); /* * The RX Thread starts up the TX Thread and sleeps. */ ts->thread_clear |= ISCSI_CLEAR_RX_THREAD; complete(&ts->tx_start_comp); wait_for_completion(&ts->tx_post_start_comp); return ts->conn; } struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts) { int ret; spin_lock_bh(&ts->ts_state_lock); if (ts->create_threads) { spin_unlock_bh(&ts->ts_state_lock); goto sleep; } flush_signals(current); if (ts->delay_inactive && (--ts->thread_count == 0)) { spin_unlock_bh(&ts->ts_state_lock); if (!iscsit_global->in_shutdown) iscsi_deallocate_extra_thread_sets(); iscsi_add_ts_to_inactive_list(ts); spin_lock_bh(&ts->ts_state_lock); } if ((ts->status == ISCSI_THREAD_SET_RESET) && (ts->thread_clear & ISCSI_CLEAR_TX_THREAD)) complete(&ts->tx_restart_comp); ts->thread_clear &= ~ISCSI_CLEAR_TX_THREAD; spin_unlock_bh(&ts->ts_state_lock); sleep: ret = wait_for_completion_interruptible(&ts->tx_start_comp); if (ret != 0) return NULL; if (iscsi_signal_thread_pre_handler(ts) < 0) return NULL; if (!ts->conn) { pr_err("struct iscsi_thread_set->conn is NULL for " " thread_id: %d, going back to sleep\n", ts->thread_id); goto sleep; } iscsi_check_to_add_additional_sets(); /* * From the TX thread, up the tx_post_start_comp that the RX Thread is * sleeping on in iscsi_rx_thread_pre_handler(), then up the * rx_post_start_comp that iscsi_activate_thread_set() is sleeping on. */ ts->thread_clear |= ISCSI_CLEAR_TX_THREAD; complete(&ts->tx_post_start_comp); complete(&ts->rx_post_start_comp); spin_lock_bh(&ts->ts_state_lock); ts->status = ISCSI_THREAD_SET_ACTIVE; spin_unlock_bh(&ts->ts_state_lock); return ts->conn; } int iscsi_thread_set_init(void) { int size; iscsit_global->ts_bitmap_count = ISCSI_TS_BITMAP_BITS; size = BITS_TO_LONGS(iscsit_global->ts_bitmap_count) * sizeof(long); iscsit_global->ts_bitmap = kzalloc(size, GFP_KERNEL); if (!iscsit_global->ts_bitmap) { pr_err("Unable to allocate iscsit_global->ts_bitmap\n"); return -ENOMEM; } return 0; } void iscsi_thread_set_free(void) { kfree(iscsit_global->ts_bitmap); }
b658d1a652fcaf571fc9c429bf77d5e73b9747c0
ce99bd11ca505967277f4689c621479c1987698e
/src/game/bondinv.h
3fe8dea9f09d7f5b620e69a17a689481089eb583
[]
no_license
n64decomp/007
5951258890f15431f273e1503674c5e0402c66e0
c46751089ddc18b12ef7a45b6a3e03de2054c422
refs/heads/master
2022-11-08T23:34:54.021033
2022-10-29T14:41:01
2022-10-29T14:41:01
241,212,109
359
48
null
2020-11-21T23:30:31
2020-02-17T21:31:00
C
UTF-8
C
false
false
982
h
bondinv.h
#ifndef _BONDINV_H_ #define _BONDINV_H_ #include <ultra64.h> #include <bondconstants.h> #include "bondview.h" #include "bondtypes.h" void bondinvReinitInv(void); s32 bondinvIsAliveWithFlag(void); s32 bondinvCountTotalItemsInInv(void); InvItem *bondinvGetItemByIndex(s32 index); textoverride *bondinvGetTextbyObj(ObjectRecord *obj); textoverride *bondinvGetTextbyWeaponID(ITEM_IDS weaponnum); void bondinvCycleBackward(s32 *nextright, s32 *nextleft, s32 requireammo); void bondinvCycleForward(s32 *nextright, s32 *nextleft, s32 requireammo); int bondinvHasGoldenGun(void); int bondinvAddInvItem(ITEM_IDS item); int bondinvAddDoublesInvItem(ITEM_IDS right, ITEM_IDS left); s32 bondinvGetAllGunsFlag(void); void bondinvSetAllGunsFlag(s32 all_guns); bool bondinvHasPropInInv(PropRecord *prop); WeaponObjRecord *bondinvRemovePropWeaponByID(ITEM_IDS weaponnum); void bondinvRemoveItemByID(ITEM_IDS weaponnum); s32 bondinvGetWeaponOfChoice(s32 *weapon1, s32 *weapon2); #endif
244069fb86ae30b5b85791817fccec5915977790
0fe1be920a7c4c62d053ce90bacc5d9362e79e06
/third_party/libfort/tests/tests.c
a21ba5a57c2a146cfe53f50e355daf131ea9cc17
[ "LicenseRef-scancode-free-unknown", "MIT" ]
permissive
cmu-db/bustub
bf572804f00e89c4799f9be92876673d9fa6694f
df2976dfd895ae0f646260373bc8c35c171ae8a4
refs/heads/master
2023-08-15T21:57:03.606720
2023-08-11T14:38:51
2023-08-11T14:38:51
207,663,235
3,003
2,724
MIT
2023-09-11T02:37:52
2019-09-10T21:06:59
C++
UTF-8
C
false
false
587
c
tests.c
#include "tests.h" void run_test_suite(const char *test_suite_name, int n_tests, struct test_case test_suite[]) { fprintf(stderr, " == RUNNING %s ==\n", test_suite_name); fprintf(stderr, "[==========] Running %d test(s).\n", n_tests); int i; for (i = 0; i < n_tests; ++i) { fprintf(stderr, "[ RUN ] %s\n", test_suite[i].name); test_suite[i].test(); fprintf(stderr, "[ OK ] %s\n", test_suite[i].name); } fprintf(stderr, "[==========] %d test(s) run.\n", n_tests); fprintf(stderr, "[ PASSED ] %d test(s).\n", n_tests); }
a7120e6d6fa07c1f2f84cb4bb3b2e5f4871032dc
8ef75ec298e38373a3b9b212eac2762f6c452a25
/SDL2_mixer/external/mpg123-1.25.6/src/libout123/xfermem.c
9e235a40245e879cb86da98733890190504d0253
[ "MIT", "LGPL-2.1-only", "LGPL-2.0-or-later", "GPL-1.0-or-later", "GPL-2.0-only", "Zlib" ]
permissive
mozeal/SDL_gui
dc0d529ba300df41ed0fcb26ec3e176a49cfb316
31bcfbf9c9a5803dc0ffb022b7e7abd76e5c3cfd
refs/heads/master
2022-05-05T06:18:51.640215
2022-04-01T11:23:12
2022-04-01T11:23:12
48,752,765
321
67
MIT
2021-04-21T14:38:23
2015-12-29T15:06:32
C
UTF-8
C
false
false
7,756
c
xfermem.c
/* xfermem: unidirectional fast pipe copyright ?-2015 by the mpg123 project - free software under the terms of the LGPL 2.1 see COPYING and AUTHORS files in distribution or http://mpg123.org initially written by Oliver Fromme old timestamp: Sun Apr 6 02:26:26 MET DST 1997 See xfermem.h for documentation/description. */ #include "config.h" #include "compat.h" #include "xfermem.h" #include <string.h> #include <errno.h> #include <sys/uio.h> #include <sys/mman.h> #include <sys/socket.h> #include <fcntl.h> #ifndef HAVE_MMAP #include <sys/ipc.h> #include <sys/shm.h> #endif #include "debug.h" #if defined (HAVE_MMAP) && defined(MAP_ANONYMOUS) && !defined(MAP_ANON) #define MAP_ANON MAP_ANONYMOUS #endif void xfermem_init (txfermem **xf, size_t bufsize, size_t msize, size_t skipbuf) { size_t regsize = bufsize + msize + skipbuf + sizeof(txfermem); #ifdef HAVE_MMAP # ifdef MAP_ANON if ((*xf = (txfermem *) mmap(0, regsize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_SHARED, -1, 0)) == (txfermem *) -1) { perror ("mmap()"); exit (1); } # else int devzero; if ((devzero = open("/dev/zero", O_RDWR, 0)) == -1) { perror ("open(/dev/zero)"); exit (1); } if ((*xf = (txfermem *) mmap(0, regsize, PROT_READ | PROT_WRITE, MAP_SHARED, devzero, 0)) == (txfermem *) -1) { perror ("mmap()"); exit (1); } close (devzero); # endif #else struct shmid_ds shmemds; int shmemid; if ((shmemid = shmget(IPC_PRIVATE, regsize, IPC_CREAT | 0600)) == -1) { perror ("shmget()"); exit (1); } if ((*xf = (txfermem *) shmat(shmemid, 0, 0)) == (txfermem *) -1) { perror ("shmat()"); shmctl (shmemid, IPC_RMID, &shmemds); exit (1); } if (shmctl(shmemid, IPC_RMID, &shmemds) == -1) { perror ("shmctl()"); xfermem_done (*xf); exit (1); } #endif if (socketpair(AF_UNIX, SOCK_STREAM, 0, (*xf)->fd) < 0) { perror ("socketpair()"); xfermem_done (*xf); exit (1); } (*xf)->freeindex = (*xf)->readindex = 0; (*xf)->data = ((char *) *xf) + sizeof(txfermem) + msize; (*xf)->metadata = ((char *) *xf) + sizeof(txfermem); (*xf)->size = bufsize; (*xf)->metasize = msize + skipbuf; } void xfermem_done (txfermem *xf) { if(!xf) return; #ifdef HAVE_MMAP /* Here was a cast to (caddr_t) ... why? Was this needed for SunOS? Casting to (void*) should silence compilers in case of funny prototype for munmap(). */ munmap ( (void*)xf, xf->size + xf->metasize + sizeof(txfermem)); #else if (shmdt((void *) xf) == -1) { perror ("shmdt()"); exit (1); } #endif } void xfermem_init_writer (txfermem *xf) { if(xf) close (xf->fd[XF_READER]); debug1("xfermem writer fd=%i", xf->fd[XF_WRITER]); } void xfermem_init_reader (txfermem *xf) { if(xf) close (xf->fd[XF_WRITER]); debug1("xfermem reader fd=%i", xf->fd[XF_READER]); } size_t xfermem_get_freespace (txfermem *xf) { size_t freeindex, readindex; if(!xf) return 0; if ((freeindex = xf->freeindex) < 0 || (readindex = xf->readindex) < 0) return (0); if (readindex > freeindex) return ((readindex - freeindex) - 1); else return ((xf->size - (freeindex - readindex)) - 1); } size_t xfermem_get_usedspace (txfermem *xf) { size_t freeindex, readindex; if(!xf) return 0; if ((freeindex = xf->freeindex) < 0 || (readindex = xf->readindex) < 0) return (0); if (freeindex >= readindex) return (freeindex - readindex); else return (xf->size - (readindex - freeindex)); } static int xfermem_getcmd_raw (int fd, int block, byte *cmds, int count) { fd_set selfds; int ret; for (;;) { struct timeval selto = {0, 0}; FD_ZERO (&selfds); FD_SET (fd, &selfds); #ifdef HPUX switch (select(FD_SETSIZE, (int *) &selfds, NULL, NULL, block ? NULL : &selto)) #else switch (select(FD_SETSIZE, &selfds, NULL, NULL, block ? NULL : &selto)) #endif { case 0: if (!block) return (0); continue; case -1: if (errno == EINTR) continue; return (-2); case 1: if (FD_ISSET(fd, &selfds)) switch((ret=read(fd, cmds, count))) { case 0: /* EOF */ return (-1); case -1: if (errno == EINTR) continue; return (-3); default: return ret; } else /* ?!? */ return (-5); default: /* ?!? */ return (-6); } } } /* Verbose variant for debugging communication. */ int xfermem_getcmd(int fd, int block) { byte cmd; int res = xfermem_getcmd_raw(fd, block, &cmd, 1); debug3("xfermem_getcmd(%i, %i) = %i", fd, block, res == 1 ? cmd : res); return res == 1 ? cmd : res; } int xfermem_getcmds(int fd, int block, byte *cmds, int count) { int res = xfermem_getcmd_raw(fd, block, cmds, count); debug5("xfermem_getcmds(%i, %i, %p, %i) = %i" , fd, block, (void*)cmds, count , res); return res; } int xfermem_putcmd (int fd, byte cmd) { for (;;) { switch (write(fd, &cmd, 1)) { case 1: debug2("xfermem_putcmd(%i, %i) = 1", fd, cmd); return (1); case -1: if (errno != EINTR) { debug3("xfermem_putcmd(%i, %i) = -1 (%s)" , fd, cmd, strerror(errno)); return (-1); } } } } /* There is a basic assumetry between reader and writer: The reader does work in periodic pieces and can be relied upon to eventually answer a call. It is important that it does not block for a significant duration unless it has really nothing to do. The writer is more undefined in its behaviour, it is controlled by external agents. You cannot rely on it answering synchronization requests in a timely manner. But on the other hand, it can be left hanging for a while. The critical side is that of the reader. Because of that, it is only sensible to provide a voluntary xfermem_writer_block() here. The reader does not need such a function. Only if it has nothing else to do, it will simply block on xfermem_getcmd(), and the writer promises to xfermem_putcmd() when something happens. The writer always sends a wakeup command to the reader since the latter could be in the process of putting itself to sleep right now, without a flag indicating so being set yet. The reader periodically reads from its file descriptor so that it does not get clogged up with pending messages. It will only (and always) send a wakeup call in response to a received command. */ /* Wait a bit to get a sign of life from the reader. Returns -1 if even that did not work. */ int xfermem_writer_block(txfermem *xf) { int myfd = xf->fd[XF_WRITER]; int result; xfermem_putcmd(myfd, XF_CMD_PING); result = xfermem_getcmd(myfd, TRUE); /* Only a pong to my ping is the expected good answer. Everything else is a problem to be communicated. */ return (result == XF_CMD_PONG) ? 0 : result; } /* Return: 0 on success, -1 on communication error, > 0 for error on buffer side, some special return code from buffer to be evaluated. */ int xfermem_write(txfermem *xf, void *buffer, size_t bytes) { if(buffer == NULL || bytes < 1) return 0; /* You weren't so braindead not allocating enough space at all, right? */ while (xfermem_get_freespace(xf) < bytes) { int cmd = xfermem_writer_block(xf); if(cmd) /* Non-successful wait. */ return cmd; } /* Now we have enough space. copy the memory, possibly with the wrap. */ if(xf->size - xf->freeindex >= bytes) { /* one block of free memory */ memcpy(xf->data+xf->freeindex, buffer, bytes); } else { /* two blocks */ size_t endblock = xf->size - xf->freeindex; memcpy(xf->data+xf->freeindex, buffer, endblock); memcpy(xf->data, (char*)buffer + endblock, bytes-endblock); } /* Advance the free space pointer, including the wrap. */ xf->freeindex = (xf->freeindex + bytes) % xf->size; /* Always notify the buffer process. */ debug("write waking"); return xfermem_putcmd(xf->fd[XF_WRITER], XF_CMD_DATA) < 0 ? -1 : 0; }
0575a785ea82771fee161a9e6306ba49066f2d28
1f399edf85d995443d01f66d77eca0723886d0ff
/devicemodel/hw/block_if.c
d918a4220ecf0bb88776374cf2a849431e739333
[ "BSD-3-Clause" ]
permissive
projectacrn/acrn-hypervisor
f9c5864d54929a5d2fa36b5e78c08f19b46b8f98
390740aa1b1e9d62c51f8e3afa0c29e07e43fa23
refs/heads/master
2023-08-18T05:07:01.310327
2023-08-11T07:49:36
2023-08-16T13:20:27
123,983,554
1,059
686
BSD-3-Clause
2023-09-14T09:51:10
2018-03-05T21:52:25
C
UTF-8
C
false
false
22,964
c
block_if.c
/*- * Copyright (c) 2013 Peter Grehan <grehan@freebsd.org> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include <sys/param.h> #include <sys/queue.h> #include <sys/stat.h> #include <sys/ioctl.h> #include <linux/falloc.h> #include <linux/fs.h> #include <errno.h> #include <err.h> #include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <pthread.h> #include <signal.h> #include <unistd.h> #include "dm.h" #include "block_if.h" #include "ahci.h" #include "dm_string.h" #include "log.h" /* * Notes: * The F_OFD_SETLK support is introduced in glibc 2.20. * The glibc version on target board is above 2.20. * The following code temporarily fixes up building issues on Ubuntu 14.04, * where the glibc version is 2.19 by default. * Theoretically we should use cross-compiling tool to compile applications. */ #ifndef F_OFD_SETLK #define F_OFD_SETLK 37 #endif #define BLOCKIF_SIG 0xb109b109 #define BLOCKIF_NUMTHR 8 #define BLOCKIF_MAXREQ (64 + BLOCKIF_NUMTHR) #define MAX_DISCARD_SEGMENT 256 /* * Debug printf */ static int block_if_debug; #define DPRINTF(params) do { if (block_if_debug) pr_dbg params; } while (0) #define WPRINTF(params) (pr_err params) enum blockop { BOP_READ, BOP_WRITE, BOP_FLUSH, BOP_DISCARD }; enum blockstat { BST_FREE, BST_BLOCK, BST_PEND, BST_BUSY, BST_DONE }; struct blockif_elem { TAILQ_ENTRY(blockif_elem) link; struct blockif_req *req; enum blockop op; enum blockstat status; pthread_t tid; off_t block; }; struct blockif_ctxt { int fd; int isblk; int candiscard; int rdonly; off_t size; int sub_file_assign; off_t sub_file_start_lba; struct flock fl; int sectsz; int psectsz; int psectoff; int max_discard_sectors; int max_discard_seg; int discard_sector_alignment; int closing; pthread_t btid[BLOCKIF_NUMTHR]; pthread_mutex_t mtx; pthread_cond_t cond; /* Request elements and free/pending/busy queues */ TAILQ_HEAD(, blockif_elem) freeq; TAILQ_HEAD(, blockif_elem) pendq; TAILQ_HEAD(, blockif_elem) busyq; struct blockif_elem reqs[BLOCKIF_MAXREQ]; /* write cache enable */ uint8_t wce; }; static pthread_once_t blockif_once = PTHREAD_ONCE_INIT; struct blockif_sig_elem { pthread_mutex_t mtx; pthread_cond_t cond; int pending; struct blockif_sig_elem *next; }; struct discard_range { uint64_t sector; uint32_t num_sectors; uint32_t flags; }; static struct blockif_sig_elem *blockif_bse_head; static int blockif_flush_cache(struct blockif_ctxt *bc) { int err; err = 0; if (!bc->wce) { if (fsync(bc->fd)) err = errno; } return err; } static int blockif_enqueue(struct blockif_ctxt *bc, struct blockif_req *breq, enum blockop op) { struct blockif_elem *be, *tbe; off_t off; int i; be = TAILQ_FIRST(&bc->freeq); if (be == NULL || be->status != BST_FREE) { WPRINTF(("%s: failed to get element from freeq\n", __func__)); return 0; } TAILQ_REMOVE(&bc->freeq, be, link); be->req = breq; be->op = op; switch (op) { case BOP_READ: case BOP_WRITE: case BOP_DISCARD: off = breq->offset; for (i = 0; i < breq->iovcnt; i++) off += breq->iov[i].iov_len; break; default: /* off = OFF_MAX; */ off = 1 << (sizeof(off_t) - 1); } be->block = off; TAILQ_FOREACH(tbe, &bc->pendq, link) { if (tbe->block == breq->offset) break; } if (tbe == NULL) { TAILQ_FOREACH(tbe, &bc->busyq, link) { if (tbe->block == breq->offset) break; } } if (tbe == NULL) be->status = BST_PEND; else be->status = BST_BLOCK; TAILQ_INSERT_TAIL(&bc->pendq, be, link); return (be->status == BST_PEND); } static int blockif_dequeue(struct blockif_ctxt *bc, pthread_t t, struct blockif_elem **bep) { struct blockif_elem *be; TAILQ_FOREACH(be, &bc->pendq, link) { if (be->status == BST_PEND) break; } if (be == NULL) return 0; TAILQ_REMOVE(&bc->pendq, be, link); be->status = BST_BUSY; be->tid = t; TAILQ_INSERT_TAIL(&bc->busyq, be, link); *bep = be; return 1; } static void blockif_complete(struct blockif_ctxt *bc, struct blockif_elem *be) { struct blockif_elem *tbe; if (be->status == BST_DONE || be->status == BST_BUSY) TAILQ_REMOVE(&bc->busyq, be, link); else TAILQ_REMOVE(&bc->pendq, be, link); TAILQ_FOREACH(tbe, &bc->pendq, link) { if (tbe->req->offset == be->block) tbe->status = BST_PEND; } be->tid = 0; be->status = BST_FREE; be->req = NULL; TAILQ_INSERT_TAIL(&bc->freeq, be, link); } static int discard_range_validate(struct blockif_ctxt *bc, off_t start, off_t size) { off_t start_sector = start / DEV_BSIZE; off_t size_sector = size / DEV_BSIZE; if (!size || (start + size) > (bc->size + bc->sub_file_start_lba)) return -1; if ((size_sector > bc->max_discard_sectors) || (bc->discard_sector_alignment && start_sector % bc->discard_sector_alignment)) return -1; return 0; } static int blockif_process_discard(struct blockif_ctxt *bc, struct blockif_req *br) { int err; struct discard_range *range; int n_range, i, segment; off_t arg[MAX_DISCARD_SEGMENT][2]; err = 0; n_range = 0; segment = 0; if (!bc->candiscard) return EOPNOTSUPP; if (bc->rdonly) return EROFS; if (br->iovcnt == 1) { /* virtio-blk use iov to transfer discard range */ n_range = br->iov[0].iov_len/sizeof(*range); range = br->iov[0].iov_base; for (i = 0; i < n_range; i++) { arg[i][0] = range[i].sector * DEV_BSIZE + bc->sub_file_start_lba; arg[i][1] = range[i].num_sectors * DEV_BSIZE; segment++; if (segment > bc->max_discard_seg) { WPRINTF(("segment > max_discard_seg\n")); return EINVAL; } if (discard_range_validate(bc, arg[i][0], arg[i][1])) { WPRINTF(("range [%ld: %ld] is invalid\n", arg[i][0], arg[i][1])); return EINVAL; } } } else { /* ahci parse discard range to br->offset and br->reside */ arg[0][0] = br->offset + bc->sub_file_start_lba; arg[0][1] = br->resid; segment = 1; } for (i = 0; i < segment; i++) { if (bc->isblk) { err = ioctl(bc->fd, BLKDISCARD, arg[i]); } else { /* FALLOC_FL_PUNCH_HOLE: * Deallocates space in the byte range starting at offset and * continuing for length bytes. After a successful call, * subsequent reads from this range will return zeroes. * FALLOC_FL_KEEP_SIZE: * Do not modify the apparent length of the file. */ err = fallocate(bc->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, arg[i][0], arg[i][1]); if (!err) err = fdatasync(bc->fd); } if (err) { WPRINTF(("Failed to discard offset=%ld nbytes=%ld err code: %d\n", arg[i][0], arg[i][1], err)); return err; } } br->resid = 0; return 0; } static void blockif_proc(struct blockif_ctxt *bc, struct blockif_elem *be) { struct blockif_req *br; ssize_t len; int err; br = be->req; err = 0; switch (be->op) { case BOP_READ: len = preadv(bc->fd, br->iov, br->iovcnt, br->offset + bc->sub_file_start_lba); if (len < 0) err = errno; else br->resid -= len; break; case BOP_WRITE: if (bc->rdonly) { err = EROFS; break; } len = pwritev(bc->fd, br->iov, br->iovcnt, br->offset + bc->sub_file_start_lba); if (len < 0) err = errno; else { br->resid -= len; err = blockif_flush_cache(bc); } break; case BOP_FLUSH: if (fsync(bc->fd)) err = errno; break; case BOP_DISCARD: err = blockif_process_discard(bc, br); break; default: err = EINVAL; break; } be->status = BST_DONE; (*br->callback)(br, err); } static void * blockif_thr(void *arg) { struct blockif_ctxt *bc; struct blockif_elem *be; pthread_t t; bc = arg; t = pthread_self(); pthread_mutex_lock(&bc->mtx); for (;;) { while (blockif_dequeue(bc, t, &be)) { pthread_mutex_unlock(&bc->mtx); blockif_proc(bc, be); pthread_mutex_lock(&bc->mtx); blockif_complete(bc, be); } /* Check ctxt status here to see if exit requested */ if (bc->closing) break; pthread_cond_wait(&bc->cond, &bc->mtx); } pthread_mutex_unlock(&bc->mtx); pthread_exit(NULL); return NULL; } static void blockif_sigcont_handler(int signal) { struct blockif_sig_elem *bse; WPRINTF(("block_if sigcont handler!\n")); for (;;) { /* * Process the entire list even if not intended for * this thread. */ do { bse = blockif_bse_head; if (bse == NULL) return; } while (!__sync_bool_compare_and_swap( (uintptr_t *)&blockif_bse_head, (uintptr_t)bse, (uintptr_t)bse->next)); pthread_mutex_lock(&bse->mtx); bse->pending = 0; pthread_cond_signal(&bse->cond); pthread_mutex_unlock(&bse->mtx); } } static void blockif_init(void) { signal(SIGCONT, blockif_sigcont_handler); } /* * This function checks if the sub file range, specified by sub_start and * sub_size, has any overlap with other sub file ranges with write access. */ static int sub_file_validate(struct blockif_ctxt *bc, int fd, int read_only, off_t sub_start, off_t sub_size) { struct flock *fl = &bc->fl; memset(fl, 0, sizeof(struct flock)); fl->l_whence = SEEK_SET; /* offset base is start of file */ if (read_only) fl->l_type = F_RDLCK; else fl->l_type = F_WRLCK; fl->l_start = sub_start; fl->l_len = sub_size; /* use "open file description locks" to validate */ if (fcntl(fd, F_OFD_SETLK, fl) == -1) { DPRINTF(("failed to lock subfile!\n")); return -1; } /* Keep file lock on to prevent other sub files, until DM exits */ return 0; } void sub_file_unlock(struct blockif_ctxt *bc) { struct flock *fl; if (bc->sub_file_assign) { fl = &bc->fl; DPRINTF(("blockif: release file lock...\n")); fl->l_type = F_UNLCK; if (fcntl(bc->fd, F_OFD_SETLK, fl) == -1) { pr_err("blockif: failed to unlock subfile!\n"); exit(1); } DPRINTF(("blockif: release done\n")); } } struct blockif_ctxt * blockif_open(const char *optstr, const char *ident) { char tname[MAXCOMLEN + 1]; /* char name[MAXPATHLEN]; */ char *nopt, *xopts, *cp; struct blockif_ctxt *bc; struct stat sbuf; /* struct diocgattr_arg arg; */ off_t size, psectsz, psectoff; int fd, i, sectsz; int writeback, ro, candiscard, ssopt, pssopt; long sz; long long b; int err_code = -1; off_t sub_file_start_lba, sub_file_size; int sub_file_assign; int max_discard_sectors, max_discard_seg, discard_sector_alignment; off_t probe_arg[] = {0, 0}; pthread_once(&blockif_once, blockif_init); fd = -1; ssopt = 0; pssopt = 0; ro = 0; sub_file_assign = 0; sub_file_start_lba = 0; sub_file_size = 0; max_discard_sectors = -1; max_discard_seg = -1; discard_sector_alignment = -1; /* writethru is on by default */ writeback = 0; candiscard = 0; /* * The first element in the optstring is always a pathname. * Optional elements follow */ nopt = xopts = strdup(optstr); if (!nopt) { WPRINTF(("block_if.c: strdup retruns NULL\n")); return NULL; } while (xopts != NULL) { cp = strsep(&xopts, ","); if (cp == nopt) /* file or device pathname */ continue; else if (!strcmp(cp, "writeback")) writeback = 1; else if (!strcmp(cp, "writethru")) writeback = 0; else if (!strcmp(cp, "ro")) ro = 1; else if (!strncmp(cp, "discard", strlen("discard"))) { strsep(&cp, "="); if (cp != NULL) { if (!(!dm_strtoi(cp, &cp, 10, &max_discard_sectors) && *cp == ':' && !dm_strtoi(cp + 1, &cp, 10, &max_discard_seg) && *cp == ':' && !dm_strtoi(cp + 1, &cp, 10, &discard_sector_alignment))) goto err; } candiscard = 1; } else if (!strncmp(cp, "sectorsize", strlen("sectorsize"))) { /* * sectorsize=<sector size> * or * sectorsize=<sector size>/<physical sector size> */ if (strsep(&cp, "=") && !dm_strtoi(cp, &cp, 10, &ssopt)) { pssopt = ssopt; if (*cp == '/' && dm_strtoi(cp + 1, &cp, 10, &pssopt) < 0) goto err; } else { goto err; } } else if (!strncmp(cp, "range", strlen("range"))) { /* range=<start lba>/<subfile size> */ if (strsep(&cp, "=") && !dm_strtol(cp, &cp, 10, &sub_file_start_lba) && *cp == '/' && !dm_strtol(cp + 1, &cp, 10, &sub_file_size)) sub_file_assign = 1; else goto err; } else { pr_err("Invalid device option \"%s\"\n", cp); goto err; } } /* * To support "writeback" and "writethru" mode switch during runtime, * O_SYNC is not used directly, as O_SYNC flag cannot dynamic change * after file is opened. Instead, we call fsync() after each write * operation to emulate it. */ fd = open(nopt, ro ? O_RDONLY : O_RDWR); if (fd < 0 && !ro) { /* Attempt a r/w fail with a r/o open */ fd = open(nopt, O_RDONLY); ro = 1; } if (fd < 0) { pr_err("Could not open backing file: %s", nopt); goto err; } if (fstat(fd, &sbuf) < 0) { pr_err("Could not stat backing file %s", nopt); goto err; } /* * Deal with raw devices */ size = sbuf.st_size; sectsz = DEV_BSIZE; psectsz = psectoff = 0; if (S_ISBLK(sbuf.st_mode)) { /* get size */ err_code = ioctl(fd, BLKGETSIZE, &sz); if (err_code) { pr_err("error %d getting block size!\n", err_code); size = sbuf.st_size; /* set default value */ } else { size = sz * DEV_BSIZE; /* DEV_BSIZE is 512 on Linux */ } if (!err_code || err_code == EFBIG) { err_code = ioctl(fd, BLKGETSIZE64, &b); if (err_code || b == 0 || b == sz) size = b * DEV_BSIZE; else size = b; } DPRINTF(("block partition size is 0x%lx\n", size)); /* get sector size, 512 on Linux */ sectsz = DEV_BSIZE; DPRINTF(("block partition sector size is 0x%x\n", sectsz)); /* get physical sector size */ err_code = ioctl(fd, BLKPBSZGET, &psectsz); if (err_code) { pr_err("error %d getting physical sectsz!\n", err_code); psectsz = DEV_BSIZE; /* set default physical size */ } DPRINTF(("block partition physical sector size is 0x%lx\n", psectsz)); if (candiscard) { err_code = ioctl(fd, BLKDISCARD, probe_arg); if (err_code) { WPRINTF(("not support DISCARD\n")); candiscard = 0; } } } else { if (size < DEV_BSIZE || (size & (DEV_BSIZE - 1))) { WPRINTF(("%s size not corret, should be multiple of %d\n", nopt, DEV_BSIZE)); goto err; } psectsz = sbuf.st_blksize; } if (ssopt != 0) { if (!powerof2(ssopt) || !powerof2(pssopt) || ssopt < 512 || ssopt > pssopt) { pr_err("Invalid sector size %d/%d\n", ssopt, pssopt); goto err; } /* * Some backend drivers (e.g. cd0, ada0) require that the I/O * size be a multiple of the device's sector size. * * Validate that the emulated sector size complies with this * requirement. */ if (S_ISCHR(sbuf.st_mode)) { if (ssopt < sectsz || (ssopt % sectsz) != 0) { pr_err("Sector size %d incompatible with underlying device sector size %d\n", ssopt, sectsz); goto err; } } sectsz = ssopt; psectsz = pssopt; psectoff = 0; } bc = calloc(1, sizeof(struct blockif_ctxt)); if (bc == NULL) { pr_err("calloc"); goto err; } if (sub_file_assign) { DPRINTF(("sector size is %d\n", sectsz)); bc->sub_file_assign = 1; bc->sub_file_start_lba = sub_file_start_lba * sectsz; size = sub_file_size * sectsz; DPRINTF(("Validating sub file...\n")); err_code = sub_file_validate(bc, fd, ro, bc->sub_file_start_lba, size); if (err_code < 0) { pr_err("subfile range specified not valid!\n"); exit(1); } DPRINTF(("Validated done!\n")); } else { /* normal case */ bc->sub_file_assign = 0; bc->sub_file_start_lba = 0; } bc->fd = fd; bc->isblk = S_ISBLK(sbuf.st_mode); bc->candiscard = candiscard; if (candiscard) { bc->max_discard_sectors = (max_discard_sectors != -1) ? max_discard_sectors : (size / DEV_BSIZE); bc->max_discard_seg = (max_discard_seg != -1) ? max_discard_seg : 1; bc->discard_sector_alignment = (discard_sector_alignment != -1) ? discard_sector_alignment : 0; } bc->rdonly = ro; bc->size = size; bc->sectsz = sectsz; bc->psectsz = psectsz; bc->psectoff = psectoff; bc->wce = writeback; pthread_mutex_init(&bc->mtx, NULL); pthread_cond_init(&bc->cond, NULL); TAILQ_INIT(&bc->freeq); TAILQ_INIT(&bc->pendq); TAILQ_INIT(&bc->busyq); for (i = 0; i < BLOCKIF_MAXREQ; i++) { bc->reqs[i].status = BST_FREE; TAILQ_INSERT_HEAD(&bc->freeq, &bc->reqs[i], link); } for (i = 0; i < BLOCKIF_NUMTHR; i++) { if (snprintf(tname, sizeof(tname), "blk-%s-%d", ident, i) >= sizeof(tname)) { pr_err("blk thread name too long"); } pthread_create(&bc->btid[i], NULL, blockif_thr, bc); pthread_setname_np(bc->btid[i], tname); } /* free strdup memory */ if (nopt) { free(nopt); nopt = NULL; } return bc; err: /* handle failure case: free strdup memory*/ if (nopt) free(nopt); if (fd >= 0) close(fd); return NULL; } static int blockif_request(struct blockif_ctxt *bc, struct blockif_req *breq, enum blockop op) { int err; err = 0; pthread_mutex_lock(&bc->mtx); if (!TAILQ_EMPTY(&bc->freeq)) { /* * Enqueue and inform the block i/o thread * that there is work available */ if (blockif_enqueue(bc, breq, op)) pthread_cond_signal(&bc->cond); } else { /* * Callers are not allowed to enqueue more than * the specified blockif queue limit. Return an * error to indicate that the queue length has been * exceeded. */ err = E2BIG; } pthread_mutex_unlock(&bc->mtx); return err; } int blockif_read(struct blockif_ctxt *bc, struct blockif_req *breq) { return blockif_request(bc, breq, BOP_READ); } int blockif_write(struct blockif_ctxt *bc, struct blockif_req *breq) { return blockif_request(bc, breq, BOP_WRITE); } int blockif_flush(struct blockif_ctxt *bc, struct blockif_req *breq) { return blockif_request(bc, breq, BOP_FLUSH); } int blockif_discard(struct blockif_ctxt *bc, struct blockif_req *breq) { return blockif_request(bc, breq, BOP_DISCARD); } int blockif_cancel(struct blockif_ctxt *bc, struct blockif_req *breq) { struct blockif_elem *be; pthread_mutex_lock(&bc->mtx); /* * Check pending requests. */ TAILQ_FOREACH(be, &bc->pendq, link) { if (be->req == breq) break; } if (be != NULL) { /* * Found it. */ blockif_complete(bc, be); pthread_mutex_unlock(&bc->mtx); return 0; } /* * Check in-flight requests. */ TAILQ_FOREACH(be, &bc->busyq, link) { if (be->req == breq) break; } if (be == NULL) { /* * Didn't find it. */ pthread_mutex_unlock(&bc->mtx); return -1; } /* * Interrupt the processing thread to force it return * prematurely via it's normal callback path. */ while (be->status == BST_BUSY) { struct blockif_sig_elem bse, *old_head; pthread_mutex_init(&bse.mtx, NULL); pthread_cond_init(&bse.cond, NULL); bse.pending = 1; do { old_head = blockif_bse_head; bse.next = old_head; } while (!__sync_bool_compare_and_swap((uintptr_t *)& blockif_bse_head, (uintptr_t)old_head, (uintptr_t)&bse)); pthread_kill(be->tid, SIGCONT); pthread_mutex_lock(&bse.mtx); while (bse.pending) pthread_cond_wait(&bse.cond, &bse.mtx); pthread_mutex_unlock(&bse.mtx); } pthread_mutex_unlock(&bc->mtx); /* * The processing thread has been interrupted. Since it's not * clear if the callback has been invoked yet, return EBUSY. */ return -EBUSY; } int blockif_close(struct blockif_ctxt *bc) { void *jval; int i; sub_file_unlock(bc); /* * Stop the block i/o thread */ pthread_mutex_lock(&bc->mtx); bc->closing = 1; pthread_cond_broadcast(&bc->cond); pthread_mutex_unlock(&bc->mtx); for (i = 0; i < BLOCKIF_NUMTHR; i++) pthread_join(bc->btid[i], &jval); /* XXX Cancel queued i/o's ??? */ /* * Release resources */ close(bc->fd); free(bc); return 0; } /* * Return virtual C/H/S values for a given block. Use the algorithm * outlined in the VHD specification to calculate values. */ void blockif_chs(struct blockif_ctxt *bc, uint16_t *c, uint8_t *h, uint8_t *s) { off_t sectors; /* total sectors of the block dev */ off_t hcyl; /* cylinders times heads */ uint16_t secpt; /* sectors per track */ uint8_t heads; sectors = bc->size / bc->sectsz; /* Clamp the size to the largest possible with CHS */ if (sectors > 65535UL*16*255) sectors = 65535UL*16*255; if (sectors >= 65536UL*16*63) { secpt = 255; heads = 16; hcyl = sectors / secpt; } else { secpt = 17; hcyl = sectors / secpt; heads = (hcyl + 1023) / 1024; if (heads < 4) heads = 4; if (hcyl >= (heads * 1024) || heads > 16) { secpt = 31; heads = 16; hcyl = sectors / secpt; } if (hcyl >= (heads * 1024)) { secpt = 63; heads = 16; hcyl = sectors / secpt; } } *c = hcyl / heads; *h = heads; *s = secpt; } /* * Accessors */ off_t blockif_size(struct blockif_ctxt *bc) { return bc->size; } int blockif_sectsz(struct blockif_ctxt *bc) { return bc->sectsz; } void blockif_psectsz(struct blockif_ctxt *bc, int *size, int *off) { *size = bc->psectsz; *off = bc->psectoff; } int blockif_queuesz(struct blockif_ctxt *bc) { return (BLOCKIF_MAXREQ - 1); } int blockif_is_ro(struct blockif_ctxt *bc) { return bc->rdonly; } int blockif_candiscard(struct blockif_ctxt *bc) { return bc->candiscard; } int blockif_max_discard_sectors(struct blockif_ctxt *bc) { return bc->max_discard_sectors; } int blockif_max_discard_seg(struct blockif_ctxt *bc) { return bc->max_discard_seg; } int blockif_discard_sector_alignment(struct blockif_ctxt *bc) { return bc->discard_sector_alignment; } uint8_t blockif_get_wce(struct blockif_ctxt *bc) { return bc->wce; } void blockif_set_wce(struct blockif_ctxt *bc, uint8_t wce) { bc->wce = wce; } int blockif_flush_all(struct blockif_ctxt *bc) { int err; err=0; if (fsync(bc->fd)) err = errno; return err; }
ab5b40cd58a29300e8a0af94b87931167c4318bf
5f86fc385c7dcfcb5b166cdea7c8b13057b8bb5f
/tests/crypto/random_tests.c
939f8f71a1377a82e8f7cf6f21d92c522f68e51e
[ "MIT" ]
permissive
openenclave/openenclave
54a38e12d9aa73357d9f438a07cd8c07ffe5e6df
cdeb95c1ec163117de409295333b6b2702013e08
refs/heads/master
2023-08-14T16:43:32.049533
2023-07-21T15:58:54
2023-07-21T15:58:54
101,804,230
800
372
MIT
2023-09-12T20:26:02
2017-08-29T20:31:38
C
UTF-8
C
false
false
1,541
c
random_tests.c
// Copyright (c) Open Enclave SDK contributors. // Licensed under the MIT License. #if defined(OE_BUILD_ENCLAVE) #include <openenclave/enclave.h> #endif #include <openenclave/internal/defs.h> #include <openenclave/internal/random.h> #include <openenclave/internal/tests.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "tests.h" #define SEQ_COUNT 64 #define SEQ_LENGTH_MAX 2049 static void _test_random(size_t seq_length) { printf("=== begin %s(%zu)\n", __FUNCTION__, seq_length); uint8_t buf[SEQ_COUNT][SEQ_LENGTH_MAX]; memset(buf, 0, sizeof(buf)); for (size_t i = 0; i < SEQ_COUNT; i++) { /* Generate a random sequence */ OE_TEST( oe_random_internal(buf[i], seq_length * sizeof(uint8_t)) == OE_OK); /* Be sure buffer is not filled with same character */ { size_t m; uint8_t c = buf[i][0]; for (m = 1; m < seq_length && buf[i][m] == c; m++) ; OE_TEST(m != seq_length); } /* Check whether duplicate of one of the previous calls */ for (size_t j = 0; j < i; j++) { OE_TEST(memcmp(buf[j], buf[i], seq_length * sizeof(uint8_t)) != 0); } } printf("=== passed %s()\n", __FUNCTION__); } void TestRandom(void) { _test_random(19); _test_random(1023); _test_random(1024); _test_random(1025); _test_random(2047); _test_random(2048); _test_random(2049); OE_STATIC_ASSERT(SEQ_LENGTH_MAX == 2049); }
c18fa9498aeaf182d6c55f39a0f0253c3c5fa162
e65a4dbfbfb0e54e59787ba7741efee12f7687f3
/security/opencryptoki/files/patch-usr-lib-common-host_defs.h
263df0b9d661690ae96123909fd7db68db93d2ce
[ "BSD-2-Clause" ]
permissive
freebsd/freebsd-ports
86f2e89d43913412c4f6b2be3e255bc0945eac12
605a2983f245ac63f5420e023e7dce56898ad801
refs/heads/main
2023-08-30T21:46:28.720924
2023-08-30T19:33:44
2023-08-30T19:33:44
1,803,961
916
918
NOASSERTION
2023-09-08T04:06:26
2011-05-26T11:15:35
null
UTF-8
C
false
false
569
h
patch-usr-lib-common-host_defs.h
--- usr/lib/common/host_defs.h.orig 2018-11-16 14:53:03 UTC +++ usr/lib/common/host_defs.h @@ -8,12 +8,23 @@ * https://opensource.org/licenses/cpl1.0.php */ +#include <sys/types.h> #include <sys/mman.h> #ifndef _HOST_DEFS_H #define _HOST_DEFS_H #include <pthread.h> +#if defined(__OpenBSD__) || defined(__FreeBSD__) +#include <sys/endian.h> +#ifdef _BYTE_ORDER +#define __BYTE_ORDER _BYTE_ORDER +#endif +#ifdef _LITTLE_ENDIAN +#define __LITTLE_ENDIAN _LITTLE_ENDIAN +#endif +#else #include <endian.h> +#endif #include "pkcs32.h" #include <stdint.h>
4ff5e3780891dd510b6b7ffa44c143a01eaabe45
7eaf54a78c9e2117247cb2ab6d3a0c20719ba700
/SOFTWARE/A64-TERES/linux-a64/arch/openrisc/lib/delay.c
c82b09f4a106ce15d2a0b243f5ec40f135cd2b7d
[ "LicenseRef-scancode-free-unknown", "Apache-2.0", "Linux-syscall-note", "GPL-2.0-only", "GPL-1.0-or-later" ]
permissive
OLIMEX/DIY-LAPTOP
ae82f4ee79c641d9aee444db9a75f3f6709afa92
a3fafd1309135650bab27f5eafc0c32bc3ca74ee
refs/heads/rel3
2023-08-04T01:54:19.483792
2023-04-03T07:18:12
2023-04-03T07:18:12
80,094,055
507
92
Apache-2.0
2023-04-03T07:05:59
2017-01-26T07:25:50
C
UTF-8
C
false
false
1,393
c
delay.c
/* * OpenRISC Linux * * Linux architectural port borrowing liberally from similar works of * others. All original copyrights apply as per the original source * declaration. * * Modifications for the OpenRISC architecture: * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation * * Precise Delay Loops */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <asm/delay.h> #include <asm/timex.h> #include <asm/processor.h> int read_current_timer(unsigned long *timer_value) { *timer_value = mfspr(SPR_TTCR); return 0; } void __delay(unsigned long cycles) { cycles_t start = get_cycles(); while ((get_cycles() - start) < cycles) cpu_relax(); } EXPORT_SYMBOL(__delay); inline void __const_udelay(unsigned long xloops) { unsigned long long loops; loops = (unsigned long long)xloops * loops_per_jiffy * HZ; __delay(loops >> 32); } EXPORT_SYMBOL(__const_udelay); void __udelay(unsigned long usecs) { __const_udelay(usecs * 0x10C7UL); /* 2**32 / 1000000 (rounded up) */ } EXPORT_SYMBOL(__udelay); void __ndelay(unsigned long nsecs) { __const_udelay(nsecs * 0x5UL); /* 2**32 / 1000000000 (rounded up) */ } EXPORT_SYMBOL(__ndelay);
928cafd5b10e6b0835e69b513841c8036ad3e0c9
782af7bfc133bd8335b7c78d69a5e885b67b9043
/crcdbl.h
b2502d435e7cec268336ad9dcb18a4762a467605
[ "Zlib" ]
permissive
madler/crcany
c13b42466cab9cb58c1e7c957c820eff66484a25
2493dc60762066a0d2644f92a0903a8af7a10d27
refs/heads/master
2023-09-03T14:18:19.984955
2022-05-10T18:56:48
2022-05-10T19:12:56
63,035,773
245
48
null
2021-10-13T03:30:04
2016-07-11T04:11:31
HTML
UTF-8
C
false
false
1,459
h
crcdbl.h
/* crcdbl.h -- Generic bit-wise CRC calculation for a double-wide CRC * Copyright (C) 2014, 2016, 2017 Mark Adler * For conditions of distribution and use, see copyright notice in crcany.c. */ #ifndef _CRCDBL_H_ #define _CRCDBL_H_ #include "model.h" /* Similar to crc_bitwise(), but works for CRCs up to twice as long as a word_t. This processes long CRCs stored in two word_t values, *crc_hi and *crc_lo. The final CRC is returned in *crc_hi and *crc_lo. If buf is NULL, then return the initial CRC for this model. This allows for the calculation of a CRC in pieces, but the first call must be with the initial value for this CRC model. This calls crc_bitwise() for short CRC models. For long CRC models, this does the same thing crc_bitwise() does, but with the shift and exclusive-or operations extended across two word_t's. An example to compute the CRC of three chunks in sequence: word_t hi, lo; crc_bitwise_dbl(model, &hi, &lo, NULL, 0); crc_bitwise_dbl(model, &hi, &lo, chunk1, len1); crc_bitwise_dbl(model, &hi, &lo, chunk2, len2); crc_bitwise_dbl(model, &hi, &lo, chunk3, len3); The CRC of the sequence is left in hi, lo. */ void crc_bitwise_dbl(model_t *, word_t *, word_t *, unsigned char const *, size_t); /* Similar to crc_zeros(), but works for CRCs up to twice as long as a word_t. */ void crc_zeros_dbl(model_t *, word_t *, word_t *, size_t); #endif
28d8afbb5474e8646d7a27b9b7e7effd12ebb21e
7744859512f027ef0da8b1bde0f8518e631b98eb
/soh/assets/scenes/shops/zoora/zoora_room_0.h
5ada3db86c08e5889850b8b65f8a9ced5920fd09
[]
no_license
HarbourMasters/Shipwright
7f70b9470e4f9a117f3fe2d6e4deb776b8742182
0e7c6585239c0d7ea2c039b8b8cb7eaedf8928a9
refs/heads/develop
2023-08-31T20:50:56.253521
2023-08-30T17:34:06
2023-08-30T17:34:06
472,575,717
2,104
459
null
2023-09-14T20:29:01
2022-03-22T01:42:52
C
UTF-8
C
false
false
215
h
zoora_room_0.h
#pragma once #include "align_asset_macro.h" #define dzoora_room_0DL_0009C0 "__OTR__scenes/nonmq/zoora_scene/zoora_room_0DL_0009C0" static const ALIGN_ASSET(2) char zoora_room_0DL_0009C0[] = dzoora_room_0DL_0009C0;
08c60ec3ac22ecf37be75e3edf780059478f1ce0
d69f9068ea2864f5045e53e8ec43a7fbf44c351b
/KeDetective.h
54d74d26add29682087c1c97878ff1bfe285c836
[]
no_license
Fyyre/kerneldetective
81e02ceea4dc01dfcb87731927163eeaa0b95551
ec5b31aa65c2739217cebb13eece749b7d0e7d1d
refs/heads/master
2022-08-23T15:06:51.709375
2022-08-12T16:34:14
2022-08-12T16:34:14
156,976,045
128
74
null
null
null
null
UTF-8
C
false
false
28,836
h
KeDetective.h
/* * Copyright (c) 2008 Arab Team 4 Reverse Engineering. All rights reserved. * * Module Name: * * core.h * * Abstract: * * This module is the main header . * * Author: * * GamingMasteR * */ #pragma once //#define _REPORT_ #include "ntifs.h" #include <ntddk.h> #include <WinDef.h> #include <ntimage.h> #include <stdio.h> #include <stdlib.h> #include "dasm.h" #include "memory.h" #include "help.h" #include "VM.h" #include "dbg.h" #include "DispatchLock.h" #include "wdbgexts.h" #ifdef __cplusplus extern "C" { #endif #pragma warning(disable : 4995) #pragma warning(disable : 4090) #pragma warning(disable : 4996) #define FILE_DEVICE_CORE 0x00008300 #define CORE_IOCTL_INDEX 0x830 #define CODE(CTL) CTL_CODE(FILE_DEVICE_CORE,CTL,METHOD_NEITHER,FILE_ANY_ACCESS) typedef enum { IOCTL_INITIALIZE = 1, IOCTL_ENUM_PROCESS, IOCTL_VM_READ, IOCTL_VM_WRITE, IOCTL_ENUM_DLL, IOCTL_DBG_MSG, IOCTL_ENUM_SSDT, IOCTL_SET_SSDT, IOCTL_ENUM_IDT, IOCTL_GET_MODULE, IOCTL_IDT_OFFSET, IOCTL_IDT_SELECTOR, IOCTL_ENUM_SHADOW_SSDT, IOCTL_SET_SHADOW_SSDT, IOCTL_ENUM_HOOKS, IOCTL_UNHOOK_KERNEL, IOCTL_ENUM_HANDLES, IOCTL_PROCESS_KILL, IOCTL_ENUM_DRIVER, IOCTL_RESTORE_SSDT, IOCTL_RESTORE_SHADOW_SSDT, IOCTL_HOOK_KIDEBUGSERVICE, IOCTL_UNHOOK_KIDEBUGSERVICE , IOCTL_ALLOCATE_PROCESS_VM, IOCTL_DEALLOCATE_PROCESS_VM, IOCTL_PROCESS_OPEN, IOCTL_CLOSE_HANDLE, IOCTL_UPDATE_MODULE_LIST, IOCTL_THREAD_TO_PROCESS, IOCTL_GET_OBJECT_TYPE, IOCTL_ENUM_THREADS, IOCTL_THREAD_KILL, IOCTL_THREAD_SET_SSDT, IOCTL_THREAD_RESTORE_SSDT, IOCTL_THREAD_SUSPEND, IOCTL_PROCESS_SUSPEND, IOCTL_THREAD_RESUME, IOCTL_PROCESS_RESUME, IOCTL_ENUM_UNLOADED_DRIVERS, IOCTL_ENUM_TIMERS, IOCTL_ENUM_OBJECT_TYPES , IOCTL_CANCEL_TIMER, IOCTL_CHANGE_OBJECT_PROC, IOCTL_ENUM_IMAGE_NOTIFY , IOCTL_ENUM_PROCESS_NOTIFY, IOCTL_ENUM_THREAD_NOTIFY, IOCTL_ENUM_LEGO_NOTIFY, IOCTL_ENUM_CM_NOTIFY, IOCTL_DELETE_NOTIFY, IOCTL_DELETE_FILE, IOCTL_COPY_FILE , IOCTL_ENUM_BUGCHECK, IOCTL_ENUM_BUGCHECK_REASON, IOCTL_ENUM_THREAD_TRACE, IOCTL_THREAD_OPEN, IOCTL_UNMAP_SECTION , IOCTL_GET_PROCESS_INFO, IOCTL_PROCESS_BY_PID, IOCTL_PROCESS_BY_HANDLE , IOCTL_THREAD_BY_PID , IOCTL_THREAD_BY_HANDLE, IOCTL_THREAD_CONTEXT, IOCTL_GET_MEMORY_INFO, IOCTL_ALLOC_NONPAGED_POOL, IOCTL_FREE_NONPAGED_POOL, IOCTL_PROTECT_PROCESS_VM, IOCTL_QUERY_PROCESS_VM, IOCTL_PHYSICAL_PAGE_READ, IOCTL_PHYSICAL_PAGE_WRITE, IOCTL_GET_FILENAME_BY_OBJECT, IOCTL_GET_FILENAME_BY_HANDLE, IOCTL_READ_FILE_BY_HANDLE, IOCTL_WRITE_FILE_BY_HANDLE, IOCTL_READ_FILE_BY_NAME, IOCTL_WRITE_FILE_BY_NAME, IOCTL_OPEN_FILE , IOCTL_ENUM_DEVICES, IOCTL_GET_DRIVER_INFO, IOCTL_GET_OBJECT_NAME, IOCTL_GET_CONTROL_REG, IOCTL_SET_CONTROL_REG, IOCTL_GET_KERNEL_INFO, IOCTL_CALL_BIOS, IOCTL_DISK_READWRITE, IOCTL_PORT_READ, IOCTL_PORT_WRITE, IOCTL_MSR, IOCTL_IO_PACKET, IOCTL_GET_OBJECT_BY_HANDLE, IOCTL_CREATE_THREAD, IOCTL_GET_TYPE_NAME }IOCTL; #define MAX_PATH 260 #define MAX_FAST_REFS 7 #define PSP_MAX_NOTIFY 8 #define CM_MAX_CALLBACKS 100 #define COF(_array) (sizeof(_array)/sizeof(_array[0])) #define SYSCALL_INDEX(_address) *(PULONG)((PUCHAR)_address+1) #define OBJECT_TO_OBJECT_HEADER(o) ((POBJECT_HEADER)CONTAINING_RECORD((o), OBJECT_HEADER, Body)) #define IsXp ((KdBuildNumber == 2600)) #define IsVista ((KdBuildNumber == 6000) || (KdBuildNumber == 6001) || (KdBuildNumber == 6002)) #define IsVistaSE ((KdBuildNumber == 6001) || (KdBuildNumber == 6002)) #define IsWin7 ((KdBuildNumber == 7600) || (KdBuildNumber == 7601)) #define parseobject(_obj, _mem, _type)((_type*)((ULONG)_obj + info.Ps##_mem)) #define UpdateModulesList()\ {\ if (gDrivers)\ {\ delete gDrivers;\ }\ gDrivers = new CDriver();\ gDrivers->Scan();\ gDrivers->ScanPhysicalMemory();\ } typedef struct _DEVICE_EXTENSION { CSHORT Type; USHORT Size; PDEVICE_OBJECT DeviceObject; // owning device object ULONG PowerFlags; PVOID Dope; ULONG ExtensionFlags; PVOID DeviceNode; PDEVICE_OBJECT AttachedTo; LONG StartIoCount; // Used to keep track of number of pending start ios. LONG StartIoKey; // Next startio key ULONG StartIoFlags; // Start Io Flags. Need a separate flag so that it can be accessed without locks PVPB Vpb; // If not NULL contains the VPB of the mounted volume. } DEVICE_EXTENSION, *PDEVICE_EXTENSION; typedef struct _MEMORY_BASIC_INFORMATION { PVOID BaseAddress; PVOID AllocationBase; DWORD AllocationProtect; SIZE_T RegionSize; DWORD State; DWORD Protect; DWORD Type; } MEMORY_BASIC_INFORMATION, *PMEMORY_BASIC_INFORMATION; typedef struct _PLATFORM_OPTIONS{ /* XP - Vista */ ULONG Max_ServiceID; /* 0x11c - 0x11c */ PVOID* SDT; /* ----- - ----- */ ULONG Max_ShadowServiceID; /* 0x29B - 0x29B */ PVOID* ShadowSDT; /* ----- - ----- */ ULONG PsSizeofProcess; /* 0x25c - 0x26c */ ULONG PsSizeofThread; /* 0x256 - 0x284 */ ULONG PsServiceTable; /* 0x0e0 - 0x12c */ ULONG PsProcessRundown; /* 0x080 - 0x098 */ ULONG PsThreadRundown; /* 0x234 - 0x250 */ ULONG PsStartAddress; /* 0x224 - 0x1f8 */ ULONG PsDeviceMap; /* 0x15c - 0x134 */ ULONG PsExceptionPort; /* 0x0c0 - 0x0d8 */ ULONG PsActiveProcessLinks; /* 0x088 - 0x0a0 */ ULONG PsThreadListHead; /* 0x190 - 0x168 */ ULONG PsKiThreadListEntry; /* 0x1b0 - 0x1c4 */ ULONG PsKeThreadListEntry; /* 0x22c - 0x248 */ ULONG PsVirtualSize; /* 0x0b0 - 0x0c8 */ ULONG PsSeAuditProcessCreationInfo; /* 0x1f4 - 0x1cc */ ULONG PsThreadFlags; /* 0x248 - 0x260 */ ULONG PsProcessFlags; /* 0x248 - 0x228 */ ULONG PsPreviousMode; /* 0x140 - 0x0e7 */ ULONG PsObjectListHead; /* 0x038 - 0x000 */ //VSP1 ULONG PsObjectTypeName; /* 0x040 - 0x008 */ //VSP1 ULONG PsFreezeCount; /* 0x1b8 - 0x16b */ ULONG PsSuspendSemaphore; /* 0x19c - 0x1ac */ }PLATFORM_OPTIONS, *PPLATFORM_OPTIONS; #define MAKE_FLAG(pos) 1<<pos #define ProcessExiting MAKE_FLAG(2) #define ProcessDelete MAKE_FLAG(3) #define BreakOnTermination MAKE_FLAG(13) #define DeadThread MAKE_FLAG(1) #define SystemThread MAKE_FLAG(4) typedef struct DECLSPEC_ALIGN(1) _KD_IO_PACKET { UCHAR MajorFunction; union { struct { PVOID Buffer; ULONG Length; PLARGE_INTEGER StartingOffset; } Read; struct { PVOID Buffer; ULONG Length; PLARGE_INTEGER StartingOffset; } Write; struct { ULONG IoControlCode; PVOID InputBuffer; ULONG InputBufferLength; PVOID OutputBuffer; ULONG OutputBufferLength; BOOL InternalDeviceIoControl; } DeviceIoControl; } Parameters; DEVICE_OBJECT *DeviceObject; FILE_OBJECT *FileObject; } KD_IO_PACKET, *PKD_IO_PACKET; typedef struct DECLSPEC_ALIGN(1) _PROCESS_ENTRY { PVOID ProcessObject; ULONG ImageBase; PVOID Peb; ULONG Status; ULONG Pid; ULONG ParentId; ULONG Cb; WCHAR Name[MAX_PATH]; } PROCESS_ENTRY, *PPROCESS_ENTRY; typedef struct DECLSPEC_ALIGN(1) _THREAD_ENTRY { PVOID Process; ULONG ParentId; PVOID Thread; ULONG Cid; PVOID Teb; PVOID ServiceTable; PVOID Address; ULONG Type; ULONG ThreadState; ULONG WaitReason; ULONG Status; } THREAD_ENTRY, *PTHREAD_ENTRY; typedef struct DECLSPEC_ALIGN(1) _DLL_ENTRY { PVOID BaseAddress; PVOID EntryPoint; ULONG SizeOfImage; WCHAR FullDllName[MAX_PATH]; } DLL_ENTRY, *PDLL_ENTRY; typedef struct DECLSPEC_ALIGN(1) _SDT_ENTRY { ULONG Index; ULONG Current; ULONG Original; ULONG Status; WCHAR Module[MAX_PATH]; } SDT_ENTRY, *PSDT_ENTRY; typedef struct DECLSPEC_ALIGN(1) _DRIVER_ENTRY { PVOID ImageBase; PVOID DriverObject; PVOID Unload; PVOID EntryPoint; ULONG ImageSize; WCHAR ImagePath[MAX_PATH]; } DRIVER_ENTRY, *PDRIVER_ENTRY; typedef struct DECLSPEC_ALIGN(1) _HANDLE_ENTRY { PVOID QuotaProcess; PVOID UniqueProcessId; HANDLE Handle; PVOID Object; PVOID ObjectType; ULONG GrantedAccess; ULONG HandleCount; WCHAR Name[MAX_PATH]; } HANDLE_ENTRY, *PHANDLE_ENTRY; typedef struct DECLSPEC_ALIGN(1) _OBJECT_TYPE_ENTRY { PVOID Address; DWORD Count; DWORD Index; struct { PVOID DumpProcedure; PVOID OpenProcedure; PVOID CloseProcedure; PVOID DeleteProcedure; PVOID ParseProcedure; PVOID SecurityProcedure; PVOID QueryNameProcedure; PVOID OkayToCloseProcedure; } ProcedureTable; WCHAR Name[MAX_PATH]; } OBJECT_TYPE_ENTRY, *POBJECT_TYPE_ENTRY; typedef struct DECLSPEC_ALIGN(1) _HOOK_ENTRY { ULONG ImageBase; ULONG Rva; ULONG Size; ULONG State; ULONG Parameter1; ULONG Parameter2; ULONG Parameter3; ULONG Parameter4; UCHAR Origin[64]; UCHAR Current[64]; } HOOK_ENTRY, *PHOOK_ENTRY; typedef struct DECLSPEC_ALIGN(1) _KIDT_ENTRY { USHORT Offset; USHORT Selector; struct { USHORT __unnamed1 : 8; USHORT type : 2; USHORT __unnamed2 : 1; USHORT size : 2; USHORT DPL : 2; USHORT P : 1; } Access; USHORT ExtendedOffset; } KIDT_ENTRY, *PKIDT_ENTRY; typedef struct DECLSPEC_ALIGN(1) _TIMER_ENTRY { PVOID Object; PVOID Thread; KTIMER Timer; KDPC Dpc; } TIMER_ENTRY, *PTIMER_ENTRY; typedef struct DECLSPEC_ALIGN(1) _DBGMSG{ CLIENT_ID Cid; LARGE_INTEGER time; WCHAR process[16]; WCHAR Msg[512]; }DBGMSG, *LPDBGMSG; typedef struct DECLSPEC_ALIGN(1) _BIOS_REGISTERS { ULONG Eax; ULONG Ecx; ULONG Edx; ULONG Ebx; ULONG Ebp; ULONG Esi; ULONG Edi; USHORT SegDs; USHORT SegEs; ULONG EFlags; } BIOS_REGISTERS, *PBIOS_REGISTERS; typedef struct DECLSPEC_ALIGN(8) _IO_INPUT_BUFFER { union { ULONG Key; struct { ULONG Index : CHAR_BIT; }; }; ULONG ControlCode[UCHAR_MAX + 1]; PVOID InputBuffer[UCHAR_MAX + 1]; } IO_INPUT_BUFFER, *PIO_INPUT_BUFFER; typedef struct DECLSPEC_ALIGN(8) _KI_PACKET { union { struct { ULONG_PTR Parameter1; ULONG_PTR Parameter2; ULONG_PTR Parameter3; ULONG_PTR Parameter4; ULONG_PTR Parameter5; ULONG_PTR Parameter6; ULONG_PTR Parameter7; ULONG_PTR Parameter8; } Common; struct { ULONG_PTR *InterruptServiceRoutines; ULONG_PTR *Ssdt; ULONG_PTR *ShadowSsdt; BOOL CaptureDbgMode; USHORT NtBuildNumber; PWCHAR KernelFileName; PWCHAR SystemrootPath; PVOID KiKdProcess; PVOID KiCsrProcess; PVOID KiSystemProcess; PVOID KiIdleProcess; PVOID CsrProcessId; } Initialize; struct { KD_IO_PACKET Packet; } IoPacket; struct { PVOID Process; PKSTART_ROUTINE StartAddress; PVOID Context; } CreateThread; struct { ULONG Register; ULONGLONG Value; BOOL Write; } Msr; struct { ULONG LowestPhysicalPage; ULONG HighestPhysicalPage; ULONG NumberOfPhysicalPages; ULONG_PTR HighestUserAddress; ULONG_PTR SystemRangeStart; ULONG_PTR UserProbeAddress; } SystemInformation; struct { PVOID KernelBase; ULONG KernelSize; PVOID PsLoadedModuleList; PVOID MmLoadedUserImageList; PVOID PspCidTable; } KernelInformation; struct { ULONG_PTR Address; BOOL GetExports; BOOL GetSymbols; PWCHAR Buffer; SIZE_T Size; } GetModuleInfo; struct { PVOID ProcessObject; PVOID VirtualAddress; PVOID Buffer; SIZE_T Size; PULONG NumberOfBytesRead; } VirtualRead; struct { PVOID ProcessObject; PVOID VirtualAddress; PVOID Buffer; SIZE_T Size; PULONG NumberOfBytesWritten; } VirtualWrite; struct { PLARGE_INTEGER PhysicalAddress; PVOID Buffer; SIZE_T Size; } PhysicalRead; struct { PLARGE_INTEGER PhysicalAddress; PVOID Buffer; SIZE_T Size; } PhysicalWrite; struct { PVOID ProcessObject; SIZE_T Size; PVOID Address; } VirtualAlloc; struct { PVOID ProcessObject; PVOID Address; SIZE_T Size; } VirtualFree; struct { PVOID ProcessObject; PVOID Address; SIZE_T Size; ULONG NewProtection; PULONG OldProtection; } VirtualProtect; struct { PVOID ProcessObject; PVOID Address; PMEMORY_BASIC_INFORMATION MemoryBasicInformation; SIZE_T Size; } VirtualQuery; struct { PVOID ProcessObject; PVOID SectionBase; } SectionUnmap; struct { PPROCESS_ENTRY Processes; ULONG Count; } ProcessEnumerate; struct { PVOID ProcessObject; PPROCESS_ENTRY Process; } ProcessQueryInformation; struct { PVOID ProcessObject; ULONG ProcessId; HANDLE Handle; } ProcessOpen; struct { PVOID ProcessObject; } ProcessSuspend; struct { PVOID ProcessObject; BOOL ForceResume; } ProcessResume; struct { PVOID ProcessObject; BOOL ForceKill; } ProcessKill; struct { PVOID ProcessObject; PDLL_ENTRY Dlls; ULONG Count; } DllEnumerate; struct { PVOID ProcessObject; PTHREAD_ENTRY Threads; ULONG Count; } ThreadEnumerate; struct { PVOID ThreadObject; PPROCESS_ENTRY Process; PTHREAD_ENTRY Thread; } ThreadQueryInformation; struct { PVOID ThreadObject; ULONG ThreadId; HANDLE Handle; } ThreadOpen; struct { PVOID ThreadObject; } ThreadSuspend; struct { PVOID ThreadObject; BOOL ForceResume; } ThreadResume; struct { PVOID ThreadObject; } ThreadKill; struct { PVOID ThreadObject; PCONTEXT Context; } ThreadCaptureStack; struct { PVOID ThreadObject; PCONTEXT Context; BOOL Set; } ThreadContext; struct { PVOID ProcessObject; HANDLE Handle; } CloseHandle; struct { PWCHAR FilePath; BOOL ForceDelete; } FileDelete; struct { PWCHAR SourceFilePath; PWCHAR DestinationFilePath; } FileCopy; struct { PKIDT_ENTRY InterruptEntries; } InterruptEnumerate; struct { ULONG Index; union { ULONG_PTR Offset; USHORT Selector; }; } InterrupHook; struct { PSDT_ENTRY Ssdt; ULONG Count; } SsdtEnumerate; struct { PDRIVER_ENTRY Drivers; ULONG Count; } DriversEnumerate; struct { PVOID DriverObject; PDRIVER_ENTRY DriverInformation; } DriversQueryInformation; struct { PVOID *DeviceObjects; ULONG Count; } DevicesEnumerate; struct { PVOID Object; PWCHAR ObjectName; } ObjectQueryName; struct { PVOID Object; PWCHAR ObjectTypeName; } ObjectQueryTypeName; struct { PWCHAR ImagePath; ULONG Flags; ULONG Count; PHOOK_ENTRY HookEntries; } ImageHooksEnumerate; struct { PVOID ProcessObject; PHANDLE_ENTRY HandleEntries; ULONG Count; } HandlesEnumerate; struct { ULONG BiosCommand; PBIOS_REGISTERS BiosArguments; } BiosCall; struct { ULONG Disk; ULONG SectorNumber; USHORT SectorCount; PVOID Buffer; BOOL IsWrite; } DiskReadWrite; } Parameters; } KI_PACKET, *PKI_PACKET; typedef struct _CONTROL_AREA { PVOID Segment; // 0x00 LIST_ENTRY DereferenceList; // 0x04 ULONG NumberOfSectionReferences; // 0x0c ULONG NumberOfPfnReferences; // 0x10 ULONG NumberOfMappedViews; // 0x14 USHORT NumberOfSubsections; // 0x18 USHORT FlushInProgressCount; // 0x1a ULONG NumberOfUserReferences; // 0x1c ULONG Flags; // MMSECTION_FLAGS // 0x20 PFILE_OBJECT FilePointer; // 0x24 PVOID WaitingForDeletion; // PEVENT_COUNTER // 0x28 USHORT ModifiedWriteCount; // 0x2c USHORT NumberOfSystemCacheViews; // 0x2e ULONG PagedPoolUsage; // 0x30 ULONG NonPagedPoolUsage; // 0x34 } CONTROL_AREA, *PCONTROL_AREA; typedef struct _SEGMENT { PCONTROL_AREA ControlArea; // 0x00 PVOID SegmentBaseAddress; // 0x04 ULONG TotalNumberOfPtes; // 0x08 ULONG NonExtendedPtes; // 0x0c LARGE_INTEGER SizeOfSegment; // 0x10 ULONG ImageCommitment; // 0x18 PVOID ImageInformation; // 0x1c PSECTION_IMAGE_INFORMATION PVOID SystemImageBase; // 0x20 ULONG NumberOfCommittedPages; // 0x24 ULONG SegmentPteTemplate; // 0x28 PVOID BaseAddress; // 0x2c PVOID BaseAddrPae; // 0x30 if PAE enabled PULONG PrototypePte; // 0x34 ULONG ThePtes[1]; // 0x38 } SEGMENT, *PSEGMENT; typedef struct _EX_CALLBACK { EX_FAST_REF RoutineBlock; } EX_CALLBACK, *PEX_CALLBACK; typedef struct _EX_CALLBACK_ROUTINE_BLOCK { EX_RUNDOWN_REF RundownProtect; PVOID Function; PVOID Context; } EX_CALLBACK_ROUTINE_BLOCK, *PEX_CALLBACK_ROUTINE_BLOCK; typedef struct _CM_CALLBACK_CONTEXT_BLOCK { LARGE_INTEGER Cookie; // to identify a specific callback for deregistration purposes LIST_ENTRY ThreadListHead; // Active threads inside this callback EX_PUSH_LOCK ThreadListLock; // synchronize access to the above PVOID CallerContext; } CM_CALLBACK_CONTEXT_BLOCK, *PCM_CALLBACK_CONTEXT_BLOCK; typedef struct _CM_VISTA_CALLBACK_BLOCK { LIST_ENTRY CallbackListHead; LIST_ENTRY ThreadListHead; LARGE_INTEGER Cookie; PVOID CallerContext; PVOID Function; } CM_VISTA_CALLBACK_BLOCK, *PCM_VISTA_CALLBACK_BLOCK; typedef enum _MEMORY_INFORMATION_ { MemoryBasicInformation, MemoryWorkingSetList, MemorySectionName, MemoryBasicVlmInformation } MEMORY_INFORMATION_CLASS; // NtQuerySystemInformation extern NTSTATUS (NTAPI *KdQuerySystemInformation)(SYSTEM_INFORMATION_CLASS SystemInformationClass, PVOID SystemInformation, ULONG SystemInformationLength, PULONG ReturnLength); // NtTerminateProcess extern NTSTATUS (NTAPI *KdTerminateProcess)(IN HANDLE ProcessHandle, IN NTSTATUS ExitStatus); // NtTerminateThread extern NTSTATUS (NTAPI *KdTerminateThread)(IN HANDLE ThreadHandle, IN NTSTATUS ExitStatus); // KeStackAttachProcess extern VOID (NTAPI *KdStackAttachProcess)(PKPROCESS Process, PKAPC_STATE ApcState); // KeUnstackDetachProcess extern VOID (NTAPI *KdUnstackDetachProcess)(PKAPC_STATE ApcState); // PsLookupProcessByProcessId extern NTSTATUS (NTAPI *KdLookupProcessByProcessId)(HANDLE ProcessId, PEPROCESS *Process); // PsLookupThreadByThreadId extern NTSTATUS (NTAPI *KdLookupThreadByThreadId)(PVOID UniqueThreadId, PETHREAD *Thread); // ObOpenObjectByPointer extern NTSTATUS (NTAPI *KdOpenObjectByPointer)(PVOID Object, ULONG HandleAttributes, PACCESS_STATE PassedAccessState, ACCESS_MASK DesiredAccess, POBJECT_TYPE ObjectType OPTIONAL, KPROCESSOR_MODE AccessMode, PHANDLE Handle); // NtClose extern NTSTATUS (NTAPI *KdClose)(HANDLE Handle); // NtOpenFile extern NTSTATUS (NTAPI *KdOpenFile)(PHANDLE FileHandle, ACCESS_MASK DesiredAccess, POBJECT_ATTRIBUTES ObjectAttributes, PIO_STATUS_BLOCK IoStatusBlock, ULONG ShareAccess, ULONG OpenOptions); // NtAllocateVirtualMemory extern NTSTATUS (NTAPI *KdAllocateVirtualMemory)(HANDLE ProcessHandle, PVOID *BaseAddress, ULONG ZeroBits, PSIZE_T RegionSize, ULONG AllocationType, ULONG Protect); // NtFreeVirtualMemory extern NTSTATUS (NTAPI *KdFreeVirtualMemory)( HANDLE ProcessHandle, PVOID *BaseAddress, PSIZE_T RegionSize, ULONG FreeType); // MmGetPhysicalAddress extern ULONGLONG (NTAPI *KdGetPhysicalAddress)(PVOID BaseAddress); // MmGetVirtualForPhysical extern PVOID (NTAPI *KdGetVirtualForPhysical)(ULONGLONG PhysicalAddress); // NtReadVirtualMemory extern NTSTATUS (NTAPI *KdReadVirtualMemory)(HANDLE ProcessHandle, PVOID BaseAddress, PVOID Buffer, ULONG NumberOfBytesToRead, PULONG NumberOfBytesReaded); // NtWriteVirtualMemory extern NTSTATUS (NTAPI *KdWriteVirtualMemory)(HANDLE ProcessHandle, PVOID BaseAddress, PVOID Buffer, ULONG NumberOfBytesToRead, PULONG NumberOfBytesReaded); // NtProtectVirtualMemory extern NTSTATUS (NTAPI *KdProtectVirtualMemory)(HANDLE ProcessHandle, PVOID *BaseAddress, PULONG NumberOfBytesToProtect, ULONG NewAccessProtection, PULONG OldAccessProtection); // NtQueryVirtualMemory extern NTSTATUS (NTAPI *KdQueryVirtualMemory)(HANDLE ProcessHandle, PVOID BaseAddress, MEMORY_INFORMATION_CLASS MemoryInformationClass, PVOID MemoryInformation, ULONG MemoryInformationLength, PULONG ReturnLength OPTIONAL); // NtFlushInstructionCache extern NTSTATUS (NTAPI *KdFlushInstructionCache)(HANDLE ProcessHandle, PVOID BaseAddress, ULONG FlushSize); // NtOpenProcess extern NTSTATUS (NTAPI *KdOpenProcess)(PHANDLE ProcessHandle, ACCESS_MASK DesiredAccess, POBJECT_ATTRIBUTES ObjectAttributes, PCLIENT_ID ClientId); // NtOpenThread extern NTSTATUS (NTAPI *KdOpenThread)(PHANDLE ThreadHandle, ACCESS_MASK DesiredAccess, POBJECT_ATTRIBUTES ObjectAttributes, PCLIENT_ID ClientId); // MmCopyVirtualMemory extern NTSTATUS (NTAPI *MmCopyVirtualMemory)(PEPROCESS FromProcess, PVOID FromAddress, PEPROCESS ToProcess, PVOID ToAddress, ULONG BufferSize, KPROCESSOR_MODE PreviousMode, PULONG NumberOfBytesCopied); // NtDuplicateObject extern NTSTATUS (NTAPI *KdDuplicateObject)(HANDLE SourceProcessHandle, HANDLE SourceHandle, HANDLE TargetProcessHandle, PHANDLE TargetHandle, ACCESS_MASK DesiredAccess, ULONG HandleAttributes, ULONG Options); // KeInsertQueueApc extern BOOLEAN (NTAPI *KdInsertQueueApc)(PKAPC Apc, PVOID SystemArgument1, PVOID SystemArgument2, KPRIORITY Increment); // PsTerminateSystemThread extern NTSTATUS (NTAPI *KdTerminateSystemThread)(NTSTATUS ExitStatus); // ObReferenceObjectByHandle extern NTSTATUS (NTAPI *KdReferenceObjectByHandle)(HANDLE Handle, ACCESS_MASK DesiredAccess, POBJECT_TYPE ObjectType OPTIONAL, KPROCESSOR_MODE AccessMode, PVOID *Object, POBJECT_HANDLE_INFORMATION HandleInformation OPTIONAL); // ObOpenObjectByName extern NTSTATUS (NTAPI *KdOpenObjectByName)(POBJECT_ATTRIBUTES ObjectAttributes, POBJECT_TYPE ObjectType, KPROCESSOR_MODE AccessMode, PACCESS_STATE AccessState, ACCESS_MASK DesiredAccess, PVOID ParseContext, PHANDLE Handle); // NtResumeThread extern NTSTATUS (NTAPI *KdResumeThread)(HANDLE ThreadHandle, PULONG SuspendCount); // NtOpenDirectoryObject extern NTSTATUS (NTAPI *KdOpenDirectoryObject)(PHANDLE DirectoryHandle, ACCESS_MASK DesiredAccess, POBJECT_ATTRIBUTES ObjectAttributes); // NtUnloadDriver extern NTSTATUS (NTAPI *KdUnloadDriver)(PUNICODE_STRING RegistryPath); // Ke386CallBios extern NTSTATUS (NTAPI *Kd386CallBios)(ULONG BiosCommand, PCONTEXT BiosArguments); // MmMapViewOfSection extern NTSTATUS (NTAPI *KdMapViewOfSection)(PVOID SectionObject, PEPROCESS Process, PVOID *BaseAddress, ULONG ZeroBits, ULONG CommitSize, PLARGE_INTEGER SectionOffset, PULONG ViewSize, SECTION_INHERIT InheritDisposition, ULONG AllocationType, ULONG Protect); // KdUnmapViewOfSection extern NTSTATUS (NTAPI *KdUnmapViewOfSection)(PEPROCESS Process, PVOID BaseAddress); // PsSuspendThread extern NTSTATUS (NTAPI *KdSuspendThread)(PKTHREAD Thread, PULONG PreviousCount); // KeAlertThread extern BOOLEAN (NTAPI *KdAlertThread)(PKTHREAD Thread, KPROCESSOR_MODE AlertMode); NTKERNELAPI KPROCESSOR_MODE KeGetPreviousMode(void); NTSTATUS RtlMultiByteToUnicodeN( PWSTR UnicodeString, ULONG MaxBytesInUnicodeString, PULONG BytesInUnicodeString OPTIONAL, PCHAR MultiByteString, ULONG BytesInMultiByteString ); NTSTATUS ObOpenObjectByName( IN POBJECT_ATTRIBUTES ObjectAttributes, IN POBJECT_TYPE ObjectType OPTIONAL, IN KPROCESSOR_MODE AccessMode, IN OUT PACCESS_STATE AccessState OPTIONAL, IN ACCESS_MASK DesiredAccess OPTIONAL, IN OUT PVOID ParseContext OPTIONAL, OUT PHANDLE Handle ); PVOID RtlImageDirectoryEntryToData( PVOID Base, BOOLEAN MappedAsImage, USHORT DirectoryEntry, PULONG Size ); NTKERNELAPI PPEB PsGetProcessPeb(PEPROCESS); NTKERNELAPI PVOID PsGetProcessId(PEPROCESS); NTKERNELAPI PVOID PsGetProcessInheritedFromUniqueProcessId(PEPROCESS); NTKERNELAPI PCHAR PsGetProcessImageFileName(PEPROCESS); NTKERNELAPI PVOID PsGetProcessSectionBaseAddress(PEPROCESS); NTKERNELAPI PVOID PsGetThreadWin32Thread(PETHREAD); NTKERNELAPI PVOID PsGetThreadProcessId(PETHREAD); NTKERNELAPI PVOID PsGetThreadId(PETHREAD); NTKERNELAPI PTEB PsGetThreadTeb(PETHREAD); NTKERNELAPI VOID KeSetSystemAffinityThread(IN KAFFINITY); NTKERNELAPI ULONG PsSetLegoNotifyRoutine(PVOID LegoNotifyRoutine); NTKERNELAPI NTSTATUS PsGetContextThread( __in PETHREAD Thread, __inout PCONTEXT ThreadContext, __in KPROCESSOR_MODE Mode); NTKERNELAPI NTSTATUS PsSetContextThread( __in PETHREAD Thread, __inout PCONTEXT ThreadContext, __in KPROCESSOR_MODE Mode); NTKERNELAPI NTSTATUS KeSetAffinityThread( PKTHREAD Thread, KAFFINITY Affinity); unsigned char __inbyte(unsigned short Port); unsigned short __inword(unsigned short Port); unsigned long __indword(unsigned short Port); void __outbyte(unsigned short Port, unsigned char Data); void __outword(unsigned short Port, unsigned short Data); void __outdword(unsigned short Port, unsigned long Data); void __inbytestring(unsigned short Port, unsigned char *Buffer, unsigned long Count); void __inwordstring(unsigned short Port, unsigned short *Buffer, unsigned long Count); void __indwordstring(unsigned short Port, unsigned long *Buffer, unsigned long Count); void __outbytestring(unsigned short Port, unsigned char *Buffer, unsigned long Count); void __outwordstring(unsigned short Port, unsigned short *Buffer, unsigned long Count); void __outdwordstring(unsigned short Port, unsigned long *Buffer, unsigned long Count); unsigned __int64 __readmsr(unsigned long); void __writemsr(unsigned long, unsigned __int64); bool IsPAEenabled(); extern POBJECT_TYPE *ExEventPairObjectType; extern POBJECT_TYPE *PsProcessType; extern POBJECT_TYPE *PsThreadType; extern POBJECT_TYPE *PsJobType; extern POBJECT_TYPE *LpcPortObjectType; extern POBJECT_TYPE *LpcWaitablePortObjectType; extern POBJECT_TYPE *IoDriverObjectType; extern POBJECT_TYPE *IoDeviceObjectType; extern POBJECT_TYPE *ExEventObjectType; extern POBJECT_TYPE *ExDesktopObjectType; typedef void (*PCALLBACK_PROC)(ULONG Param); typedef struct _DPC_PARAMS { KDPC Dpc; PCALLBACK_PROC Proc; ULONG Param; PKEVENT SyncEvent; BOOLEAN Syncronous; } DPC_PARAMS, *PDPC_PARAMS; LONG Initialize(PKI_PACKET KiPacket); VOID ExecuteOnAllProcessors( PCALLBACK_PROC Proc, BOOLEAN Syncronous ); POBJECT_TYPE TypeFromObject( POBJECT_HEADER ObjectHeader ); #define SystemPath L"\\Systemroot\\system32\\" extern PVOID ntoskrnl; extern PVOID w32k; extern PVOID KernelBase; extern ULONG KernelSize; extern PVOID w32kBase; extern ULONG w32kSize; extern ULONG KernelDelta; extern ULONG w32kDelta; extern WCHAR CurrentKernel[MAX_PATH]; extern WCHAR SystemrootPath[MAX_PATH]; extern PKDDEBUGGER_DATA64 KdVersionBlock; extern PLATFORM_OPTIONS info; extern SHORT KdBuildNumber; extern POBJECT_TYPE TypesArray[]; extern PDRIVER_OBJECT KdDriverObject; extern PEPROCESS CsrProcess; PHANDLE_TABLE __forceinline GetProcessHandleTable(PVOID Process) { ULONG HandleTable = 0; if (IsXp) HandleTable = 0xc4; else if (IsVista) HandleTable = 0xdc; else if (IsWin7) HandleTable = 0xf4; return *(PHANDLE_TABLE*)((ULONG_PTR)Process + HandleTable); } BOOL __forceinline IsValidHandleTable(PVOID Process) { return MmIsAddressValid(GetProcessHandleTable(Process)); } #ifdef __cplusplus } #endif