repo_name
stringlengths 5
122
| path
stringlengths 3
232
| text
stringlengths 6
1.05M
|
|---|---|---|
depp/raycast
|
src/level.c
|
#include "level.h"
#include "defs.h"
#include "imath.h"
#include "draw.h"
#include "texture.h"
#include <stdlib.h>
#include <assert.h>
struct rc_column {
short dx, dy;
short ax, ay;
};
/* Setup columns for perspective projection. The number of columns is
"w". The camera points in the direction specified by "angle". The
value "a" is equal to 65536*tan(fov/2), e.g., 65536 for a 90 degree
field of view. */
static void perspective(struct rc_column *cols, unsigned w,
unsigned angle, int a)
{
int dx, dy, dx2, dy2, b, s;
unsigned i;
dx = icos(angle);
dy = isin(angle);
/* We want dx2 such that, approximately,
((w/2) * dx2) >> s = dy * a >> 16
So dx2 = dy * a / (w/2) >> (16 - s) */
s = 16;
dx2 = dy * a / (int)(w/2);
dy2 = -dx * a / (int)(w/2);
for (i = 0; i < w; ++i) {
b = (int) i - w / 2;
cols[i].dx = dx + ((b * dx2) >> s);
cols[i].dy = dy + ((b * dy2) >> s);
cols[i].ax = dx;
cols[i].ay = dy;
}
}
static const unsigned char LEVEL[8][8] = {
{ 1, 1, 3, 3, 1, 1, 1, 4 },
{ 2, 0, 0, 0, 0, 0, 0, 4 },
{ 4, 0, 0, 0, 0, 0, 0, 4 },
{ 2, 0, 0, 0, 0, 0, 0, 4 },
{ 4, 0, 0, 0, 0, 0, 0, 2 },
{ 2, 0, 0, 0, 0, 0, 0, 4 },
{ 2, 0, 0, 0, 0, 0, 0, 4 },
{ 2, 3, 3, 3, 3, 1, 3, 3 }
};
static unsigned shade1(unsigned x, unsigned s)
{
return (x * s * 0x101) >> 16;
}
static unsigned shade(unsigned x, unsigned s)
{
unsigned r, g, b;
r = (x >> RSHIFT) & 0xff;
g = (x >> GSHIFT) & 0xff;
b = (x >> BSHIFT) & 0xff;
return (shade1(r, s) << RSHIFT)
| (shade1(g, s) << GSHIFT)
| (shade1(b, s) << BSHIFT);
}
static void render(struct pixbuf *restrict buf,
int x, int y, struct rc_column *cols)
{
int SBITS = 10, SWIDTH = 1 << SBITS;
unsigned vw = buf->width, *vp = buf->ptr, vrb = buf->row,
vh = buf->height, i;
x += SWIDTH * 4;
y += SWIDTH * 4;
int cx0 = (x >> SBITS), cy0 = (y >> SBITS);
int ox0 = x & (SWIDTH - 1), oy0 = y & (SWIDTH - 1);
for (i = 0; i < vw; ++i) {
unsigned *cp = vp + i;
unsigned c; /* Color */
if (cx0 < 1 || cy0 < 1 || cx0 >= 7 || cy0 >= 7) {
c = rgb(64, 64, 32);
goto solid;
}
/* To simplify the algorithm and avoid dividing by zero, we
identify the "major axis" and "minor axis". Each
iteration, we move exactly one cell along the major axis
and at most one cell along the minor axis. The "slope" is
the ratio of movement along the minor axis to the major
axis, and is therefore its magnitude is at most 1.0
(65536). */
/* dx: delta x, adx: absolute delta x, sdx: sign delta x
cx: cell x, m: slope, off: minor axis offset,
ox: offset on X axis */
int dx, dy, adx, ady, sdx, sdy, cx, cy, m, off, ox, oy;
dx = cols[i].dx;
dy = cols[i].dy;
adx = dx >= 0 ? dx : -dx;
sdx = dx >= 0 ? 1 : -1;
ady = dy >= 0 ? dy : -dy;
sdy = dy >= 0 ? 1 : -1;
cx = cx0;
cy = cy0;
if (adx >= ady) {
m = (dy << 16) / adx;
off = dx >= 0 ? SWIDTH - ox0 : ox0;
off = oy0 + ((m * off) >> 16);
while (1) {
if (off >> SBITS) {
cy += sdy;
if (LEVEL[cx][cy]) {
if (dy >= 0)
off -= SWIDTH;
off = - off * dx / dy;
if (dx >= 0)
off += SWIDTH;
ox = off;
oy = dy >= 0 ? 0 : SWIDTH;
goto hit;
}
off &= SWIDTH - 1;
}
cx += sdx;
if (LEVEL[cx][cy]) {
ox = dx >= 0 ? 0 : SWIDTH;
oy = off;
goto hit;
}
off += m >> (16 - SBITS);
}
} else {
m = (dx << 16) / ady;
off = dy >= 0 ? SWIDTH - oy0 : oy0;
off = ox0 + ((m * off) >> 16);
while (1) {
if (off >> SBITS) {
cx += sdx;
if (LEVEL[cx][cy]) {
if (dx >= 0)
off -= SWIDTH;
off = - off * dy / dx;
if (dy >= 0)
off += SWIDTH;
oy = off;
ox = dx >= 0 ? 0 : SWIDTH;
goto hit;
}
off &= SWIDTH - 1;
}
cy += sdy;
if (LEVEL[cx][cy]) {
oy = dy >= 0 ? 0 : SWIDTH;
ox = off;
goto hit;
}
off += m >> (16 - SBITS);
}
}
c = rgb(32, 32, 32);
goto solid;
solid:
{
unsigned j;
for (j = 0; j < vh; ++j)
cp[vrb*j] = c;
}
continue;
hit:
{
c = rgb(64, 64, 64);
int hx = (cx << SBITS) + ox - x, hy = (cy << SBITS) + oy - y;
int d = (cols[i].ax * hx + cols[i].ay * hy) >> 16;
if (d < 10) {
c = rgb(64, 32, 32);
goto solid;
}
unsigned h = (240 * 256) / d;
struct texture *t;
int d2 = (hx >> 2) * (hx >> 2) + (hy >> 2) * (hy >> 2);
int sh = d2 > 65536 ? (65536 * 255) / d2 : 255;
if (sh == 0) {
c = 0;
goto color;
}
switch (LEVEL[cx][cy]) {
case 1: c = rgb(255, 32, 32); goto color;
case 2: t = g_textures[0]; goto texture;
case 3: t = g_textures[1]; goto texture;
case 4: t = g_textures[2]; goto texture;
}
color:
{
c = shade(c, sh);
if (h > 240)
h = 240;
unsigned j;
for (j = vh/2-h; j < vh/2+h; ++j)
cp[vrb*j] = c;
continue;
}
texture:
{
unsigned hb = t->hbits, wb = t->wbits;
unsigned tm = (1u << (hb + 16)) / (h * 2);
unsigned n = 0, count = wb > hb ? hb : wb;
while (tm > (2u << 15u) && n < count) {
tm >>= 1;
n += 1;
}
hb -= n;
wb -= n;
unsigned tx = ((ox + oy) >> (SBITS - wb)) & ((1u << wb) - 1);
unsigned ty = 0;
unsigned mask = (1 << hb) - 1;
unsigned *tp = (unsigned *) t->pixels[n] + (tx << hb);
unsigned j;
if (h > 240) {
ty = tm * (h - 240);
h = 240;
}
if (sh == 255) {
for (j = vh/2-h; j < vh/2+h; ++j) {
cp[vrb*j] = tp[(ty >> 16) & mask];
ty += tm;
}
} else {
for (j = vh/2-h; j < vh/2+h; ++j) {
cp[vrb*j] = shade(tp[(ty >> 16) & mask], sh);
ty += tm;
}
}
continue;
}
}
}
}
void level_draw(struct pixbuf *restrict buf,
int x, int y, unsigned angle)
{
struct rc_column *cols;
unsigned w = buf->width;
cols = malloc(sizeof(*cols) * w);
assert(cols);
perspective(cols, w, angle, 65536);
render(buf, x, y, cols);
free(cols);
}
|
depp/raycast
|
src/draw_rect.c
|
#include "defs.h"
#include "draw.h"
void draw_rect(struct pixbuf *restrict buf,
int x, int y, int w, int h, unsigned color)
{
unsigned *vp = buf->ptr;
unsigned vr = buf->row, vw = buf->width, vh = buf->height;
int x0 = x, x1 = x0 + w, y1 = vh - 1 - y, y0 = y1 - h, xi, yi;
if (y0 < 0) y0 = 0;
if (y1 > (int) vh) y1 = vh;
if (x0 < 0) x0 = 0;
if (x1 > (int) vw) x1 = vw;
for (yi = y0; yi < y1; ++yi)
for (xi = x0; xi < x1; ++xi)
vp[vr * yi + xi] = color;
}
|
depp/raycast
|
src/main.c
|
#include "SDL.h"
#include "defs.h"
#include "draw.h"
#include "input.h"
#include "level.h"
#include "text.h"
#include "texture.h"
#include "world.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
struct texture *g_textures[3];
#define TIME 5
/* Units / tick */
#define TURN 4000
/* Units / tick */
#define FORWARD 300
#define BACKWARD 300
#define SIZE 32
__attribute__((noreturn))
static void sdlerr(const char *s)
{
fprintf(stderr, "error: %s: %s\n", s, SDL_GetError());
SDL_Quit();
exit(1);
}
__attribute__((noreturn))
static void fail(const char *s)
{
fprintf(stderr, "error: %s\n", s);
SDL_Quit();
exit(1);
}
__attribute__((malloc))
void *xmalloc(size_t sz)
{
void *p;
if (!sz)
return NULL;
p = malloc(sz);
if (!p)
fail("out of memory");
return p;
}
enum {
KLEFT,
KRIGHT,
KUP,
KDOWN
};
int main(int argc, char *argv[])
{
SDL_Surface *vid;
SDL_PixelFormat *f;
SDL_Event e;
SDL_Joystick *joy;
unsigned reftime, lasttime, curtime, delta, i, keys = 0, kval;
int turn, turn2 = 0, speed, speed2 = 0, strafe2 = 0, njoy, v;
struct in_axis angle;
int frame = 0;
char frametext[16];
/*
double t = 0, lt, dt;
unsigned reftime;
int kval;
double perftime;
unsigned framecount;
clock_t c1, c2;
float px = 0, py = 0, pa = 0;
unsigned keys = 0;
*/
struct pixbuf buf;
struct world *w;
struct obj *p;
(void) argc;
(void) argv;
texture_load(&g_textures[0], "brick.jpg");
texture_load(&g_textures[1], "roughstone.jpg");
texture_load(&g_textures[2], "ivy.jpg");
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER | SDL_INIT_JOYSTICK))
sdlerr("SDL_Init");
vid = SDL_SetVideoMode(768, 480, 32, SDL_SWSURFACE);
if (!vid)
sdlerr("SDL_SetVideoMode");
f = vid->format;
if (f->BitsPerPixel != 32 || f->BytesPerPixel != 4 ||
f->Rloss || f->Gloss || f->Bloss ||
f->Rshift != RSHIFT ||
f->Gshift != GSHIFT ||
f->Bshift != BSHIFT ||
f->Rmask != (0xffu << RSHIFT) ||
f->Gmask != (0xffu << GSHIFT) ||
f->Bmask != (0xffu << BSHIFT))
{
printf("Byte order: %d\n", BYTE_ORDER);
fprintf(
stderr,
"pixel format: %08x %08x %08x\n",
f->Rmask, f->Gmask, f->Bmask);
fail("unsupported pixel format");
}
njoy = SDL_NumJoysticks();
if (!njoy)
joy = NULL;
else
joy = SDL_JoystickOpen(0);
w = world_new();
p = &w->player;
in_axis_init(&angle);
lasttime = reftime = SDL_GetTicks();
// perftime = 0;
// framecount = 0;
// c1 = clock();
while (1) {
frame += 1;
curtime = SDL_GetTicks();
delta = curtime - reftime;
if (delta > 1000) {
puts("Lag");
delta = lasttime - reftime;
reftime = curtime - delta;
}
while (SDL_PollEvent(&e)) {
switch (e.type) {
case SDL_QUIT:
goto quit;
case SDL_KEYDOWN:
case SDL_KEYUP:
switch (e.key.keysym.sym) {
case SDLK_ESCAPE: goto quit;
case SDLK_UP: kval = KUP; break;
case SDLK_DOWN: kval = KDOWN; break;
case SDLK_LEFT: kval = KLEFT; break;
case SDLK_RIGHT: kval = KRIGHT; break;
default: goto nokey;
}
if (e.type == SDL_KEYDOWN)
keys |= 1u << kval;
else
keys &= ~(1u << kval);
nokey:
break;
case SDL_JOYAXISMOTION:
v = -e.jaxis.value;
switch (e.jaxis.axis) {
case 0: strafe2 = v * FORWARD >> 15; break; /* left x */
case 1: speed2 = v * FORWARD >> 15; break; /* left y */
case 2: turn2 = v * TURN >> 15; break; /* right x */
case 3: break; /* right y */
}
break;
}
}
turn = 0;
if (keys & (1u << KLEFT)) {
if ((keys & (1u << KRIGHT)) == 0)
turn = TURN;
} else if (keys & (1u << KRIGHT)) {
turn = -TURN;
}
in_axis_setvel(&angle, delta, turn + turn2);
if (delta >= 64) {
speed = 0;
if (keys & (1u << KUP)) {
if ((keys & (1u << KDOWN)) == 0)
speed = FORWARD;
} else if (keys & (1u << KDOWN)) {
speed = - BACKWARD;
}
p->speed = speed + speed2;
p->strafe = strafe2;
for (i = 0; 64 * (i + 1) <= delta; ++i) {
p->angle = in_axis_get(&angle, i * 64) >> 6;
world_update(w);
}
in_axis_advance(&angle, 64 * i);
delta &= 63;
reftime = curtime - delta;
}
/*
if (t > perftime + 1.0) {
c2 = clock();
printf("%.1f fps, %.1f%% processor\n",
framecount / (t - perftime),
100 * (c2 - c1) / ((t - perftime) * CLOCKS_PER_SEC));
framecount = 0;
c1 = c2;
perftime = t;
}
framecount++;
*/
SDL_LockSurface(vid);
buf.ptr = vid->pixels;
buf.width = vid->w;
buf.height = vid->h;
buf.row = vid->pitch / 4;
memset(buf.ptr, 0, buf.height * buf.row * 4);
{
int d = delta;
int x = p->x0 + ((p->x1 - p->x0) * d >> 6);
int y = p->y0 + ((p->y1 - p->y0) * d >> 6);
p->angle = in_axis_get(&angle, delta);
level_draw(&buf, x, y, p->angle >> 6);
}
snprintf(frametext, sizeof(frametext), "frame %d", frame);
text_draw(&buf, frametext, 10, 50);
/*
draw_rect(&buf, 10, 20,
(buf.width - 20) * (fmod(t, TIME) * (1.0/TIME)), 5,
rgb(255, 32, 32));
*/
SDL_UpdateRect(vid, 0, 0, 0, 0);
SDL_UnlockSurface(vid);
}
quit:
SDL_Quit();
return 0;
}
|
depp/raycast
|
src/input.c
|
<reponame>depp/raycast
#include "input.h"
#include <stdlib.h>
void in_axis_init(struct in_axis *a)
{
a->points = NULL;
a->pcount = 0;
a->palloc = 0;
}
void in_axis_destroy(struct in_axis *a)
{
free(a->points);
}
int in_axis_get(struct in_axis *a, int time)
{
struct in_point *p = a->points;
unsigned i, n = a->pcount;
for (i = 0; i < n && p[i].time <= time; ++i);
if (i > 0)
return p[i-1].pos + p[i-1].vel * (time - p[i-1].time);
else if (n > 0)
return p[0].pos;
else
return 0;
}
static struct in_point *in_axis_getnew(struct in_axis *a, int time)
{
struct in_point *p = a->points;
unsigned i, n = a->pcount, nalloc;
int pos, vel;
for (i = 0; i < n && p[i].time <= time; ++i);
if (i > 0) {
pos = p[i-1].pos + p[i-1].vel * (time - p[i-1].time);
vel = p[i-1].vel;
} else {
pos = 0;
vel = 0;
}
if (i >= a->palloc) {
nalloc = a->palloc ? a->palloc * 2 : 8;
p = realloc(p, sizeof(*p) * nalloc);
if (!p)
abort();
a->points = p;
a->palloc = nalloc;
}
a->pcount = i + 1;
p[i].time = time;
p[i].pos = pos;
p[i].vel = vel;
return &p[i];
}
void in_axis_setpos(struct in_axis *a, int time, int pos)
{
struct in_point *p = in_axis_getnew(a, time);
p->pos = pos;
}
void in_axis_setvel(struct in_axis *a, int time, int vel)
{
struct in_point *p;
if (a->pcount) {
p = &a->points[a->pcount - 1];
if (p->time <= time && p->vel == vel)
return;
} else if (!vel) {
return;
}
p = in_axis_getnew(a, time);
p->vel = vel;
}
void in_axis_advance(struct in_axis *a, int time)
{
struct in_point *p = a->points;
unsigned i, j, n = a->pcount;
for (i = 0; i < n && p[i].time <= time; ++i);
if (i > 0)
i--;
if (i > 0) {
for (j = 0; j < n - i; ++j) {
p[j].time = p[j+i].time - time;
p[j].pos = p[j+i].pos;
p[j].vel = p[j+i].vel;
}
n = n - i;
} else {
for (j = 0; j < n; ++j)
p[j].time -= time;
}
if (n > 0 && p[0].time < 0) {
p[0].pos -= p[0].vel * p[0].time;
p[0].time = 0;
}
a->pcount = n;
}
|
depp/raycast
|
src/texture_jpeg.c
|
<reponame>depp/raycast<filename>src/texture_jpeg.c
#include "imath.h"
#include "texture.h"
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <jpeglib.h>
#define MAX_SIZE 1024
void texture_load(struct texture **tex, const char *path)
{
FILE *f;
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
unsigned w, h, rb, i, flags = TEXTURE_COLUMN;
unsigned char *jdata = NULL, *jptr[1];
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_decompress(&cinfo);
f = fopen(path, "rb");
if (!f)
goto fail_errno;
jpeg_stdio_src(&cinfo, f);
jpeg_read_header(&cinfo, TRUE);
w = cinfo.image_width;
h = cinfo.image_height;
if (w > MAX_SIZE || h > MAX_SIZE) {
fprintf(stderr, "%s: too big\n", path);
exit(1);
}
switch (cinfo.out_color_space) {
case JCS_GRAYSCALE:
rb = align(w);
// rb = w;
break;
case JCS_RGB:
flags |= TEXTURE_COLOR;
rb = align(3 * w);
// rb = 3 * w;
break;
default:
fprintf(stderr, "%s: unknown color space\n", path);
exit(1);
}
jpeg_start_decompress(&cinfo);
jdata = malloc(rb * h);
if (!jdata)
abort();
while ((unsigned) cinfo.output_scanline < h) {
for (i = 0; i < 1; ++i)
jptr[i] = jdata + cinfo.output_scanline * (rb + i);
jpeg_read_scanlines(&cinfo, jptr, 1);
}
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
fclose(f);
texture_make(tex, jdata, w, h, rb, flags);
free(jdata);
return;
fail_errno:
fprintf(stderr, "%s: %s\n", path, strerror(errno));
exit(1);
}
|
depp/raycast
|
src/text.c
|
<reponame>depp/raycast
#include "text.h"
#include "draw.h"
#include <cairo/cairo.h>
#include <pango/pangocairo.h>
#include <string.h>
void text_draw(struct pixbuf *p, const char *text, int x, int y)
{
cairo_surface_t *cs = cairo_image_surface_create_for_data(
(void *) p->ptr, CAIRO_FORMAT_ARGB32,
p->width, p->height, p->row * 4);
cairo_t *cr = cairo_create(cs);
cairo_translate(cr, x, y);
PangoContext *pc = pango_cairo_create_context(cr);
PangoLayout *pl = pango_layout_new(pc);
// pango_layout_set_alignment(pl, PANGO_ALIGN_LEFT);
pango_layout_set_text(pl, text, strlen(text));
cairo_set_source_rgb(cr, 1.0, 1.0, 1.0);
pango_cairo_show_layout(cr, pl);
g_object_unref(pl);
g_object_unref(pc);
cairo_destroy(cr);
cairo_surface_destroy(cs);
}
|
depp/raycast
|
src/imath.h
|
<gh_stars>0
#ifndef IMATH_H
#define IMATH_H
/* Trig functions take angles such that 0x10000 is a full circle, so
any angle can be stored in 16 bits. They return numbers in the
range -0x4000..+0x4000. */
int isin(int x);
int icos(int x);
unsigned ilog2(unsigned x);
static inline unsigned align(unsigned sz)
{
return (sz + 15) & ~15;
}
#endif
|
depp/raycast
|
src/world.c
|
<reponame>depp/raycast
#include "defs.h"
#include "imath.h"
#include "world.h"
#include <stdlib.h>
// #include <stdio.h>
struct world *world_new(void)
{
struct world *w = xmalloc(sizeof(*w));
struct obj *o = &w->player;
o->x0 = 0;
o->y0 = 0;
o->x1 = 0;
o->y1 = 0;
o->angle = 0;
o->speed = 0;
o->strafe = 0;
return w;
}
void world_delete(struct world *w)
{
free(w);
}
void world_update(struct world *w)
{
int ax, ay, dx, dy;
struct obj *o = &w->player;
o->x0 = o->x1;
o->y0 = o->y1;
ax = icos(o->angle);
ay = isin(o->angle);
dx = ax * o->speed - ay * o->strafe;
dy = ay * o->speed + ax * o->strafe;
o->x1 += dx / (1 << 14);
o->y1 += dy / (1 << 14);
// printf("dx %d dy %d\n", dx / (1 << 14), dy / (1 << 14));
}
|
depp/raycast
|
src/text.h
|
#ifndef TEXT_H
#define TEXT_H
struct pixbuf;
void text_draw(struct pixbuf *p, const char *text, int x, int y);
#endif
|
depp/raycast
|
src/input.h
|
<gh_stars>0
#ifndef INPUT_H
#define INPUT_H
/* An axis records input for a positional axis. It combines
positional data (as from a mouse) and velocity data (as from a
keyboard or joystick). Each event has a timestamp, and the axis
can be queried for its value at a given point in time. */
struct in_point {
int time;
int pos;
int vel;
};
struct in_axis {
struct in_point *points;
unsigned pcount, palloc;
};
void in_axis_init(struct in_axis *a);
void in_axis_destroy(struct in_axis *a);
/* Get the axis position at the given time. */
int in_axis_get(struct in_axis *a, int time);
/* Set the axis position at the given time. All events after the
given time are discarded. The velocity is not changed. */
void in_axis_setpos(struct in_axis *a, int time, int pos);
/* Set the axis velocity at the given time. All events after the
given time are discarded. The position is not changed. */
void in_axis_setvel(struct in_axis *a, int time, int vel);
/* Subtract the given delta from all event timestamps. Events before
time zero may be discarded. */
void in_axis_advance(struct in_axis *a, int time);
#endif
|
depp/raycast
|
src/level.h
|
<gh_stars>0
struct pixbuf;
void level_draw(struct pixbuf *restrict buf,
int x, int y, unsigned angle);
|
depp/raycast
|
src/texture_make.c
|
#include "draw.h"
#include "imath.h"
#include "texture.h"
#include <assert.h>
#include <stdlib.h>
#include <string.h>
void texture_make(struct texture **tex, unsigned char *ptr,
unsigned w, unsigned h, unsigned rb, unsigned flags)
{
struct texture *tp;
unsigned wb, hb, count, mcount, i, coff, off[11], msz, tsz;
if (flags & TEXTURE_COLUMN) {
wb = ilog2(h);
hb = ilog2(w);
} else {
wb = ilog2(w);
hb = ilog2(h);
}
count = (wb > hb ? wb : hb) + 1;
mcount = (wb > hb ? hb : wb) + 1;
assert(count <= 11);
tp = malloc(sizeof(*tp) + sizeof(*tp->pixels) * count);
if (!tp)
abort();
tp->wbits = wb;
tp->hbits = hb;
tp->count = count;
coff = 0;
msz = 1u << (count + mcount);
for (i = 0; i + 1 < mcount; ++i) {
off[i] = coff;
coff += msz;
msz >>= 2;
}
for (; i < count; ++i) {
off[i] = coff;
coff += msz;
msz >>= 1;
}
tsz = coff;
if (flags & TEXTURE_COLOR) {
unsigned *tptr, *tp2;
unsigned x, y, p, q0, q1, q2, q3, c1, c2;
tptr = malloc(tsz * 4);
if (!tptr)
abort();
for (i = 0; i < count; ++i)
tp->pixels[i] = tptr + off[i];
if (flags & TEXTURE_COLUMN) {
for (y = 0; y < h; ++y) {
for (x = 0; x < w; ++x) {
p = ((unsigned) ptr[y*rb+x*3+0] << RSHIFT) |
((unsigned) ptr[y*rb+x*3+1] << GSHIFT) |
((unsigned) ptr[y*rb+x*3+2] << BSHIFT);
tptr[y+x*(1u<<hb)] = p;
}
}
/* Zero remaining space */
} else {
for (y = 0; y < h; ++y) {
for (x = 0; x < w; ++x) {
p = ((unsigned) ptr[y*rb+x*3+0] << RSHIFT) |
((unsigned) ptr[y*rb+x*3+1] << GSHIFT) |
((unsigned) ptr[y*rb+x*3+2] << BSHIFT);
tptr[x+y*(1u<<wb)] = p;
}
}
/* Zero remaining space */
}
for (i = 1; i < mcount; ++i) {
tptr = tp->pixels[i-1];
tp2 = tp->pixels[i];
for (y = 0; y < (1u << (hb - i)); ++y) {
for (x = 0; x < (1u << (wb - i)); ++x) {
q0 = tptr[(2*y+0)*(1u<<(wb+1-i))+2*x+0];
q1 = tptr[(2*y+0)*(1u<<(wb+1-i))+2*x+1];
q2 = tptr[(2*y+1)*(1u<<(wb+1-i))+2*x+0];
q3 = tptr[(2*y+1)*(1u<<(wb+1-i))+2*x+1];
c1 = 0x00800080 +
((q0 & 0xff00ff00) >> 2) +
((q1 & 0xff00ff00) >> 2) +
((q2 & 0xff00ff00) >> 2) +
((q3 & 0xff00ff00) >> 2);
c2 = 0x00020002 +
(q0 & 0x00ff00ff) +
(q1 & 0x00ff00ff) +
(q2 & 0x00ff00ff) +
(q3 & 0x00ff00ff);
tp2[y*(1u<<(wb-i))+x] =
(c1 & 0xff00ff00) | ((c2 >> 2) & 0x00ff00ff);
}
}
}
} else {
unsigned char *tptr;
unsigned x, y;
tptr = malloc(tsz);
if (!tptr)
abort();
for (i = 0; i < count; ++i)
tp->pixels[i] = tptr + off[i];
if (flags & TEXTURE_COLUMN) {
for (y = 0; y < h; ++y)
for (x = 0; x < w; ++x)
tptr[y+x*(1u<<hb)] = ptr[y*rb+x];
/* Zero remaining space */
} else {
if ((1u << wb) == w) {
memcpy(tptr, ptr, w * h);
} else {
for (y = 0; y < h; ++i)
memcpy(tptr + y * (1u<<wb), ptr + y * w, w);
}
/* Zero remaining space */
}
/* Calculate mip maps */
}
*tex = tp;
}
|
depp/raycast
|
src/world.h
|
<gh_stars>0
#ifndef WORLD_H
#define WORLD_H
struct obj {
int x0, y0;
int x1, y1;
int angle;
int speed, strafe;
};
struct world {
struct obj player;
};
struct world *world_new(void);
void world_delete(struct world *w);
void world_update(struct world *w);
#endif
|
depp/raycast
|
src/defs.h
|
<gh_stars>0
#ifndef DEFS_H
#define DEFS_H
#include <stddef.h>
/* Define BIG_ENDIAN, LITTLE_ENDIAN, and BYTE_ORDER. Tries to work on
as many platforms as possible. Usese the definitions from a
system-provided header file when it is available. Otherwise, uses
built-in preprocessor macros if available. As a last resort,
examines machine type macros.
You should NOT check byte order in a configuration script. Mac OS
X supports simultaneous compilation for architectures with
different byte orders. */
#if defined(__linux__)
#include <endian.h>
#elif defined(__APPLE__)
#include <machine/endian.h>
#else
#define BIG_ENDIAN 4321
#define LITTLE_ENDIAN 1234
#if defined(__BIG_ENDIAN__) && __BIG_ENDIAN__
#define BYTE_ORDER BIG_ENDIAN
#elif defined(__LITTLE_ENDIAN__) && __LITTLE_ENDIAN__
#define BYTE_ORDER LITTLE_ENDIAN
/* This line is taken from LibSDL's SDL_endian.h header file. */
#elif defined(__hppa__) || \
defined(__m68k__) || defined(mc68000) || defined(_M_M68K) || \
(defined(__MIPS__) && defined(__MISPEB__)) || \
defined(__ppc__) || defined(__POWERPC__) || defined(_M_PPC) || \
defined(__sparc__)
#define BYTE_ORDER BIG_ENDIAN
#else
#define BYTE_ORDER LITTLE_ENDIAN
#endif
#endif
__attribute__((malloc))
void *xmalloc(size_t sz);
#endif
|
depp/raycast
|
src/draw.h
|
<filename>src/draw.h
#ifndef DRAW_H
#define DRAW_H
/* The only acceptable video format (for now, at least) is the BGRA
pixel format, 8 bits per sample. */
#include "defs.h"
#if BYTE_ORDER == BIG_ENDIAN
enum {
RSHIFT = 8,
GSHIFT = 16,
BSHIFT = 24
};
#elif BYTE_ORDER == LITTLE_ENDIAN
enum {
RSHIFT = 16,
GSHIFT = 8,
BSHIFT = 0
};
#else
#error No byte order
#endif
struct pixbuf {
unsigned *ptr;
unsigned width, height, row;
};
static inline unsigned rgb(unsigned r, unsigned g, unsigned b)
{
return (r << RSHIFT) | (g << GSHIFT) | (b << BSHIFT);
}
void draw_rect(struct pixbuf *restrict buf, int x, int y, int w, int h,
unsigned color);
#endif
|
claireful/Flix
|
Pods/Target Support Files/SwipyCell/SwipyCell-umbrella.h
|
<gh_stars>0
#ifdef __OBJC__
#import <UIKit/UIKit.h>
#else
#ifndef FOUNDATION_EXPORT
#if defined(__cplusplus)
#define FOUNDATION_EXPORT extern "C"
#else
#define FOUNDATION_EXPORT extern
#endif
#endif
#endif
#import "SwipyCell.h"
FOUNDATION_EXPORT double SwipyCellVersionNumber;
FOUNDATION_EXPORT const unsigned char SwipyCellVersionString[];
|
Keenuts/virglrenderer
|
src/vrend_vk.h
|
<reponame>Keenuts/virglrenderer
#ifndef VIRGL_VK_H
#define VIRGL_VK_H
#include <vulkan/vulkan.h>
#include "util/u_double_list.h"
/* This struct contains the state of our Vulkan module
*
* vk_instance: one instance per virglrenderer process.
* physical_devices: contiguous array of VkPhysicalDevice*.
* Devices are enumerated on instance creation.
*
* devices: list of VkDevice wrappers. Each item in this list reprensents
* a vulkan application using virglrenderer.
*/
struct vrend_vk {
VkInstance vk_instance;
VkPhysicalDevice *physical_devices;
uint32_t physical_device_count;
struct vk_device *devices;
uint32_t device_count;
};
extern struct vrend_vk *vulkan_state;
/* This struct contains the state of ONE Vulkan application running using vgl
*
* physical_device_id: this is the index of the physical device in use
* (in the vrend_vk.physical_devices array)
* handle: The VkDevice handle
*
* Queue creation if forwarded. Thus, we store the VkQueue handles here.
* queue_count: number of queues created by the application
* queues: array of VkQueue handles
*
* next_handle: next handle the device will use when creating an object
* objects: this hashtable stores every Vulkan objects.
* All objects are wrapped in a struct defined bellow.
*/
typedef struct vk_device {
struct list_head list;
uint32_t physical_device_id;
VkDevice handle;
uint32_t queue_count;
VkQueue *queues;
uint32_t next_handle;
struct util_hash_table *objects;
} vk_device_t;
/* Vulkan objects are stored in these handles to keep some state
* This default handle should never be used.
* Concider using vk_XXX_t handles instead.
*/
struct vk_handle {
void *content;
};
#define DECLARE_VK_HANDLE(Type, Name) \
typedef struct { \
Type handle; \
} vk_ ## Name ## _t;
DECLARE_VK_HANDLE(VkDescriptorPool, descriptor_pool);
DECLARE_VK_HANDLE(VkDescriptorSetLayout, descriptor_set_layout);
DECLARE_VK_HANDLE(VkDescriptorSet, descriptor_set);
DECLARE_VK_HANDLE(VkPipelineLayout, pipeline_layout);
DECLARE_VK_HANDLE(VkShaderModule, shader_module);
DECLARE_VK_HANDLE(VkPipeline, pipeline);
DECLARE_VK_HANDLE(VkBuffer, buffer);
DECLARE_VK_HANDLE(VkFence, fence);
DECLARE_VK_HANDLE(VkSemaphore, semaphore);
typedef struct {
VkDeviceMemory handle;
VkMemoryPropertyFlags flags;
void *map_ptr;
uint64_t map_size;
uint64_t map_offset;
} vk_device_memory_t;
typedef struct {
VkCommandPool handle;
/* For now, commanbuffer arrays are contiguous. It's not
* needed, and having a large continguous array might cause issues.
* SHOULD be canged
*/
VkCommandBuffer *cmds;
uint32_t usage;
uint32_t capacity;
} vk_command_pool_t;
/* An object stores in the hashtable */
struct vk_object {
uint32_t handle;
VkDevice vk_device;
struct vk_handle *vk_handle;
void (*cleanup_callback)(VkDevice, void*, void*);
};
/* converts a VkResult to the readable string */
const char* vkresult_to_string(VkResult res);
/* vulkan state management functions */
int vrend_vk_init(void);
void vrend_vk_destroy(void);
#endif
|
Keenuts/virglrenderer
|
src/vrend_formats.c
|
/**************************************************************************
*
* Copyright (C) 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <epoxy/gl.h>
#include "vrend_renderer.h"
#include "util/u_memory.h"
#include "util/u_format.h"
#define SWIZZLE_INVALID 0xff
#define NO_SWIZZLE { SWIZZLE_INVALID, SWIZZLE_INVALID, SWIZZLE_INVALID, SWIZZLE_INVALID }
#define RRR1_SWIZZLE { PIPE_SWIZZLE_RED, PIPE_SWIZZLE_RED, PIPE_SWIZZLE_RED, PIPE_SWIZZLE_ONE }
#define RRRG_SWIZZLE { PIPE_SWIZZLE_RED, PIPE_SWIZZLE_RED, PIPE_SWIZZLE_RED, PIPE_SWIZZLE_GREEN }
#define RGB1_SWIZZLE { PIPE_SWIZZLE_RED, PIPE_SWIZZLE_GREEN, PIPE_SWIZZLE_BLUE, PIPE_SWIZZLE_ONE }
/* fill the format table */
static struct vrend_format_table base_rgba_formats[] =
{
{ VIRGL_FORMAT_B8G8R8X8_UNORM, GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE, RGB1_SWIZZLE },
{ VIRGL_FORMAT_B8G8R8A8_UNORM, GL_RGBA8, GL_BGRA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_R8G8B8X8_UNORM, GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, RGB1_SWIZZLE },
{ VIRGL_FORMAT_R8G8B8A8_UNORM, GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_A8R8G8B8_UNORM, GL_RGBA8, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8, NO_SWIZZLE },
{ VIRGL_FORMAT_X8R8G8B8_UNORM, GL_RGBA8, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8, NO_SWIZZLE },
{ VIRGL_FORMAT_A8B8G8R8_UNORM, GL_RGBA8, GL_ABGR_EXT, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_B4G4R4A4_UNORM, GL_RGBA4, GL_BGRA, GL_UNSIGNED_SHORT_4_4_4_4_REV, NO_SWIZZLE },
{ VIRGL_FORMAT_B4G4R4X4_UNORM, GL_RGBA4, GL_BGRA, GL_UNSIGNED_SHORT_4_4_4_4_REV, RGB1_SWIZZLE },
{ VIRGL_FORMAT_B5G5R5X1_UNORM, GL_RGB5_A1, GL_BGRA, GL_UNSIGNED_SHORT_1_5_5_5_REV, RGB1_SWIZZLE },
{ VIRGL_FORMAT_B5G5R5A1_UNORM, GL_RGB5_A1, GL_BGRA, GL_UNSIGNED_SHORT_1_5_5_5_REV, NO_SWIZZLE },
{ VIRGL_FORMAT_B5G6R5_UNORM, GL_RGB565, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, NO_SWIZZLE },
{ VIRGL_FORMAT_B2G3R3_UNORM, GL_R3_G3_B2, GL_RGB, GL_UNSIGNED_BYTE_3_3_2, NO_SWIZZLE },
{ VIRGL_FORMAT_R16G16B16X16_UNORM, GL_RGBA16, GL_RGBA, GL_UNSIGNED_SHORT, RGB1_SWIZZLE },
{ VIRGL_FORMAT_R16G16B16A16_UNORM, GL_RGBA16, GL_RGBA, GL_UNSIGNED_SHORT, NO_SWIZZLE },
};
static struct vrend_format_table base_depth_formats[] =
{
{ VIRGL_FORMAT_Z16_UNORM, GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT, NO_SWIZZLE },
{ VIRGL_FORMAT_Z32_UNORM, GL_DEPTH_COMPONENT32, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, NO_SWIZZLE },
{ VIRGL_FORMAT_S8_UINT_Z24_UNORM, GL_DEPTH24_STENCIL8_EXT, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8, NO_SWIZZLE },
{ VIRGL_FORMAT_Z24X8_UNORM, GL_DEPTH_COMPONENT24, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, NO_SWIZZLE },
{ VIRGL_FORMAT_Z32_FLOAT, GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT, NO_SWIZZLE },
/* this is probably a separate format */
{ VIRGL_FORMAT_Z32_FLOAT_S8X24_UINT, GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV, NO_SWIZZLE },
{ VIRGL_FORMAT_X24S8_UINT, GL_STENCIL_INDEX8, GL_STENCIL_INDEX, GL_UNSIGNED_INT_24_8, NO_SWIZZLE },
};
static struct vrend_format_table base_la_formats[] = {
{ VIRGL_FORMAT_A8_UNORM, GL_ALPHA8, GL_ALPHA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_L8_UNORM, GL_R8, GL_RED, GL_UNSIGNED_BYTE, RRR1_SWIZZLE },
{ VIRGL_FORMAT_L8A8_UNORM, GL_RG8, GL_RG, GL_UNSIGNED_BYTE, RRRG_SWIZZLE },
{ VIRGL_FORMAT_A16_UNORM, GL_ALPHA16, GL_ALPHA, GL_UNSIGNED_SHORT, NO_SWIZZLE },
{ VIRGL_FORMAT_L16_UNORM, GL_R16, GL_RED, GL_UNSIGNED_SHORT, RRR1_SWIZZLE },
{ VIRGL_FORMAT_L16A16_UNORM, GL_RG16, GL_RG, GL_UNSIGNED_SHORT, RRRG_SWIZZLE },
};
static struct vrend_format_table rg_base_formats[] = {
{ VIRGL_FORMAT_R8_UNORM, GL_R8, GL_RED, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_R8G8_UNORM, GL_RG8, GL_RG, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_R16_UNORM, GL_R16, GL_RED, GL_UNSIGNED_SHORT, NO_SWIZZLE },
{ VIRGL_FORMAT_R16G16_UNORM, GL_RG16, GL_RG, GL_UNSIGNED_SHORT, NO_SWIZZLE },
};
static struct vrend_format_table integer_base_formats[] = {
{ VIRGL_FORMAT_R8G8B8A8_UINT, GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_R8G8B8A8_SINT, GL_RGBA8I, GL_RGBA_INTEGER, GL_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_R16G16B16A16_UINT, GL_RGBA16UI, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT, NO_SWIZZLE },
{ VIRGL_FORMAT_R16G16B16A16_SINT, GL_RGBA16I, GL_RGBA_INTEGER, GL_SHORT, NO_SWIZZLE },
{ VIRGL_FORMAT_R32G32B32A32_UINT, GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT, NO_SWIZZLE },
{ VIRGL_FORMAT_R32G32B32A32_SINT, GL_RGBA32I, GL_RGBA_INTEGER, GL_INT, NO_SWIZZLE },
};
static struct vrend_format_table integer_3comp_formats[] = {
{ VIRGL_FORMAT_R8G8B8_UINT, GL_RGB8UI, GL_RGB_INTEGER, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_R8G8B8_SINT, GL_RGB8I, GL_RGB_INTEGER, GL_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_R16G16B16_UINT, GL_RGB16UI, GL_RGB_INTEGER, GL_UNSIGNED_SHORT, NO_SWIZZLE },
{ VIRGL_FORMAT_R16G16B16_SINT, GL_RGB16I, GL_RGB_INTEGER, GL_SHORT, NO_SWIZZLE },
{ VIRGL_FORMAT_R32G32B32_UINT, GL_RGB32UI, GL_RGB_INTEGER, GL_UNSIGNED_INT, NO_SWIZZLE },
{ VIRGL_FORMAT_R32G32B32_SINT, GL_RGB32I, GL_RGB_INTEGER, GL_INT, NO_SWIZZLE },
};
static struct vrend_format_table float_base_formats[] = {
{ VIRGL_FORMAT_R16G16B16A16_FLOAT, GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT, NO_SWIZZLE },
{ VIRGL_FORMAT_R32G32B32A32_FLOAT, GL_RGBA32F, GL_RGBA, GL_FLOAT, NO_SWIZZLE },
};
static struct vrend_format_table float_la_formats[] = {
{ VIRGL_FORMAT_A16_FLOAT, GL_ALPHA16F_ARB, GL_ALPHA, GL_HALF_FLOAT, NO_SWIZZLE },
{ VIRGL_FORMAT_L16_FLOAT, GL_R16F, GL_RED, GL_HALF_FLOAT, RRR1_SWIZZLE },
{ VIRGL_FORMAT_L16A16_FLOAT, GL_RG16F, GL_RG, GL_HALF_FLOAT, RRRG_SWIZZLE },
{ VIRGL_FORMAT_A32_FLOAT, GL_ALPHA32F_ARB, GL_ALPHA, GL_FLOAT, NO_SWIZZLE },
{ VIRGL_FORMAT_L32_FLOAT, GL_R32F, GL_RED, GL_FLOAT, RRR1_SWIZZLE },
{ VIRGL_FORMAT_L32A32_FLOAT, GL_RG32F, GL_RG, GL_FLOAT, RRRG_SWIZZLE },
};
static struct vrend_format_table integer_rg_formats[] = {
{ VIRGL_FORMAT_R8_UINT, GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_R8G8_UINT, GL_RG8UI, GL_RG_INTEGER, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_R8_SINT, GL_R8I, GL_RED_INTEGER, GL_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_R8G8_SINT, GL_RG8I, GL_RG_INTEGER, GL_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_R16_UINT, GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT, NO_SWIZZLE },
{ VIRGL_FORMAT_R16G16_UINT, GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT, NO_SWIZZLE },
{ VIRGL_FORMAT_R16_SINT, GL_R16I, GL_RED_INTEGER, GL_SHORT, NO_SWIZZLE },
{ VIRGL_FORMAT_R16G16_SINT, GL_RG16I, GL_RG_INTEGER, GL_SHORT, NO_SWIZZLE },
{ VIRGL_FORMAT_R32_UINT, GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT, NO_SWIZZLE },
{ VIRGL_FORMAT_R32G32_UINT, GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT, NO_SWIZZLE },
{ VIRGL_FORMAT_R32_SINT, GL_R32I, GL_RED_INTEGER, GL_INT, NO_SWIZZLE },
{ VIRGL_FORMAT_R32G32_SINT, GL_RG32I, GL_RG_INTEGER, GL_INT, NO_SWIZZLE },
};
static struct vrend_format_table float_rg_formats[] = {
{ VIRGL_FORMAT_R16_FLOAT, GL_R16F, GL_RED, GL_HALF_FLOAT, NO_SWIZZLE },
{ VIRGL_FORMAT_R16G16_FLOAT, GL_RG16F, GL_RG, GL_HALF_FLOAT, NO_SWIZZLE },
{ VIRGL_FORMAT_R32_FLOAT, GL_R32F, GL_RED, GL_FLOAT, NO_SWIZZLE },
{ VIRGL_FORMAT_R32G32_FLOAT, GL_RG32F, GL_RG, GL_FLOAT, NO_SWIZZLE },
};
static struct vrend_format_table float_3comp_formats[] = {
{ VIRGL_FORMAT_R16G16B16_FLOAT, GL_RGB16F, GL_RGB, GL_HALF_FLOAT, NO_SWIZZLE },
{ VIRGL_FORMAT_R32G32B32_FLOAT, GL_RGB32F, GL_RGB, GL_FLOAT, NO_SWIZZLE },
};
static struct vrend_format_table integer_la_formats[] = {
{ VIRGL_FORMAT_A8_UINT, GL_ALPHA8UI_EXT, GL_ALPHA_INTEGER, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_L8_UINT, GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE, RRR1_SWIZZLE },
{ VIRGL_FORMAT_L8A8_UINT, GL_RG8UI, GL_RG_INTEGER, GL_UNSIGNED_BYTE, RRRG_SWIZZLE },
{ VIRGL_FORMAT_A8_SINT, GL_ALPHA8I_EXT, GL_ALPHA_INTEGER, GL_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_L8_SINT, GL_R8I, GL_RED_INTEGER, GL_BYTE, RRR1_SWIZZLE },
{ VIRGL_FORMAT_L8A8_SINT, GL_RG8I, GL_RG_INTEGER, GL_BYTE, RRRG_SWIZZLE },
{ VIRGL_FORMAT_A16_UINT, GL_ALPHA16UI_EXT, GL_ALPHA_INTEGER, GL_UNSIGNED_SHORT, NO_SWIZZLE },
{ VIRGL_FORMAT_L16_UINT, GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT, RRR1_SWIZZLE },
{ VIRGL_FORMAT_L16A16_UINT, GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT, RRRG_SWIZZLE },
{ VIRGL_FORMAT_A16_SINT, GL_ALPHA16I_EXT, GL_ALPHA_INTEGER, GL_SHORT, NO_SWIZZLE },
{ VIRGL_FORMAT_L16_SINT, GL_R16I, GL_RED_INTEGER, GL_SHORT, RRR1_SWIZZLE },
{ VIRGL_FORMAT_L16A16_SINT, GL_RG16I, GL_RG_INTEGER, GL_SHORT, RRRG_SWIZZLE },
{ VIRGL_FORMAT_A32_UINT, GL_ALPHA32UI_EXT, GL_ALPHA_INTEGER, GL_UNSIGNED_INT, NO_SWIZZLE },
{ VIRGL_FORMAT_L32_UINT, GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT, RRR1_SWIZZLE },
{ VIRGL_FORMAT_L32A32_UINT, GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_INT, RRRG_SWIZZLE },
{ VIRGL_FORMAT_A32_SINT, GL_ALPHA32I_EXT, GL_ALPHA_INTEGER, GL_INT, NO_SWIZZLE },
{ VIRGL_FORMAT_L32_SINT, GL_R32I, GL_RED_INTEGER, GL_INT, RRR1_SWIZZLE },
{ VIRGL_FORMAT_L32A32_SINT, GL_RG32I, GL_RG_INTEGER, GL_INT, RRRG_SWIZZLE },
};
static struct vrend_format_table snorm_formats[] = {
{ VIRGL_FORMAT_R8_SNORM, GL_R8_SNORM, GL_RED, GL_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_R8G8_SNORM, GL_RG8_SNORM, GL_RG, GL_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_R8G8B8A8_SNORM, GL_RGBA8_SNORM, GL_RGBA, GL_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_R8G8B8X8_SNORM, GL_RGBA8_SNORM, GL_RGBA, GL_BYTE, RGB1_SWIZZLE },
{ VIRGL_FORMAT_R16_SNORM, GL_R16_SNORM, GL_RED, GL_SHORT, NO_SWIZZLE },
{ VIRGL_FORMAT_R16G16_SNORM, GL_RG16_SNORM, GL_RG, GL_SHORT, NO_SWIZZLE },
{ VIRGL_FORMAT_R16G16B16A16_SNORM, GL_RGBA16_SNORM, GL_RGBA, GL_SHORT, NO_SWIZZLE },
{ VIRGL_FORMAT_R16G16B16X16_SNORM, GL_RGBA16_SNORM, GL_RGBA, GL_SHORT, RGB1_SWIZZLE },
};
static struct vrend_format_table snorm_la_formats[] = {
{ VIRGL_FORMAT_A8_SNORM, GL_ALPHA8_SNORM, GL_ALPHA, GL_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_L8_SNORM, GL_R8_SNORM, GL_RED, GL_BYTE, RRR1_SWIZZLE },
{ VIRGL_FORMAT_L8A8_SNORM, GL_RG8_SNORM, GL_RG, GL_BYTE, RRRG_SWIZZLE },
{ VIRGL_FORMAT_A16_SNORM, GL_ALPHA16_SNORM, GL_ALPHA, GL_SHORT, NO_SWIZZLE },
{ VIRGL_FORMAT_L16_SNORM, GL_R16_SNORM, GL_RED, GL_SHORT, RRR1_SWIZZLE },
{ VIRGL_FORMAT_L16A16_SNORM, GL_RG16_SNORM, GL_RG, GL_SHORT, RRRG_SWIZZLE },
};
static struct vrend_format_table dxtn_formats[] = {
{ VIRGL_FORMAT_DXT1_RGB, GL_COMPRESSED_RGB_S3TC_DXT1_EXT, GL_RGB, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_DXT1_RGBA, GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_DXT3_RGBA, GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_DXT5_RGBA, GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
};
static struct vrend_format_table dxtn_srgb_formats[] = {
{ VIRGL_FORMAT_DXT1_SRGB, GL_COMPRESSED_SRGB_S3TC_DXT1_EXT, GL_RGB, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_DXT1_SRGBA, GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_DXT3_SRGBA, GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_DXT5_SRGBA, GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
};
static struct vrend_format_table rgtc_formats[] = {
{ VIRGL_FORMAT_RGTC1_UNORM, GL_COMPRESSED_RED_RGTC1, GL_RED, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_RGTC1_SNORM, GL_COMPRESSED_SIGNED_RED_RGTC1, GL_RED, GL_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_RGTC2_UNORM, GL_COMPRESSED_RG_RGTC2, GL_RG, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_RGTC2_SNORM, GL_COMPRESSED_SIGNED_RG_RGTC2, GL_RG, GL_BYTE, NO_SWIZZLE },
};
static struct vrend_format_table srgb_formats[] = {
{ VIRGL_FORMAT_B8G8R8X8_SRGB, GL_SRGB8_ALPHA8, GL_BGRA, GL_UNSIGNED_BYTE, RGB1_SWIZZLE },
{ VIRGL_FORMAT_B8G8R8A8_SRGB, GL_SRGB8_ALPHA8, GL_BGRA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_R8G8B8X8_SRGB, GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE, RGB1_SWIZZLE },
{ VIRGL_FORMAT_L8_SRGB, GL_SR8_EXT, GL_RED, GL_UNSIGNED_BYTE, RRR1_SWIZZLE },
{ VIRGL_FORMAT_L8A8_SRGB, GL_SRG8_EXT, GL_RG, GL_UNSIGNED_BYTE, RRRG_SWIZZLE },
};
static struct vrend_format_table bit10_formats[] = {
{ VIRGL_FORMAT_B10G10R10X2_UNORM, GL_RGB10_A2, GL_BGRA, GL_UNSIGNED_INT_2_10_10_10_REV, RGB1_SWIZZLE },
{ VIRGL_FORMAT_B10G10R10A2_UNORM, GL_RGB10_A2, GL_BGRA, GL_UNSIGNED_INT_2_10_10_10_REV, NO_SWIZZLE },
{ VIRGL_FORMAT_B10G10R10A2_UINT, GL_RGB10_A2UI, GL_BGRA_INTEGER, GL_UNSIGNED_INT_2_10_10_10_REV, NO_SWIZZLE },
{ VIRGL_FORMAT_R10G10B10X2_UNORM, GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, RGB1_SWIZZLE },
{ VIRGL_FORMAT_R10G10B10A2_UNORM, GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV, NO_SWIZZLE },
};
static struct vrend_format_table packed_float_formats[] = {
{ VIRGL_FORMAT_R11G11B10_FLOAT, GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV, NO_SWIZZLE },
};
static struct vrend_format_table exponent_float_formats[] = {
{ VIRGL_FORMAT_R9G9B9E5_FLOAT, GL_RGB9_E5, GL_RGB, GL_UNSIGNED_INT_5_9_9_9_REV, NO_SWIZZLE },
};
static struct vrend_format_table bptc_formats[] = {
{ VIRGL_FORMAT_BPTC_RGBA_UNORM, GL_COMPRESSED_RGBA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_BPTC_SRGBA, GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM, GL_RGBA, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_BPTC_RGB_FLOAT, GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT, GL_RGB, GL_UNSIGNED_BYTE, NO_SWIZZLE },
{ VIRGL_FORMAT_BPTC_RGB_UFLOAT, GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT, GL_RGB, GL_UNSIGNED_BYTE, NO_SWIZZLE },
};
static struct vrend_format_table gles_bgra_formats[] = {
{ VIRGL_FORMAT_B8G8R8X8_UNORM, GL_BGRA_EXT, GL_BGRA_EXT, GL_UNSIGNED_BYTE, RGB1_SWIZZLE },
{ VIRGL_FORMAT_B8G8R8A8_UNORM, GL_BGRA_EXT, GL_BGRA_EXT, GL_UNSIGNED_BYTE, NO_SWIZZLE },
};
static struct vrend_format_table gles_z32_format[] = {
{ VIRGL_FORMAT_Z32_UNORM, GL_DEPTH_COMPONENT24, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, NO_SWIZZLE },
};
static void vrend_add_formats(struct vrend_format_table *table, int num_entries)
{
int i;
uint32_t binding = 0;
GLuint buffers;
GLuint tex_id, fb_id;
for (i = 0; i < num_entries; i++) {
GLenum status;
bool is_depth = false;
/**/
glGenTextures(1, &tex_id);
glGenFramebuffers(1, &fb_id);
glBindTexture(GL_TEXTURE_2D, tex_id);
glBindFramebuffer(GL_FRAMEBUFFER, fb_id);
glTexImage2D(GL_TEXTURE_2D, 0, table[i].internalformat, 32, 32, 0, table[i].glformat, table[i].gltype, NULL);
status = glGetError();
if (status == GL_INVALID_VALUE) {
struct vrend_format_table *entry = NULL;
uint8_t swizzle[4];
binding = VREND_BIND_SAMPLER | VREND_BIND_RENDER | VREND_BIND_NEED_SWIZZLE;
switch (table[i].format) {
case PIPE_FORMAT_A8_UNORM:
entry = &rg_base_formats[0];
swizzle[0] = swizzle[1] = swizzle[2] = PIPE_SWIZZLE_ZERO;
swizzle[3] = PIPE_SWIZZLE_RED;
break;
case PIPE_FORMAT_A16_UNORM:
entry = &rg_base_formats[2];
swizzle[0] = swizzle[1] = swizzle[2] = PIPE_SWIZZLE_ZERO;
swizzle[3] = PIPE_SWIZZLE_RED;
break;
default:
break;
}
if (entry) {
vrend_insert_format_swizzle(table[i].format, entry, binding, swizzle);
}
glDeleteTextures(1, &tex_id);
glDeleteFramebuffers(1, &fb_id);
continue;
}
if (util_format_is_depth_or_stencil(table[i].format)) {
GLenum attachment;
if (table[i].format == VIRGL_FORMAT_Z24X8_UNORM || table[i].format == VIRGL_FORMAT_Z32_UNORM || table[i].format == VIRGL_FORMAT_Z16_UNORM || table[i].format == VIRGL_FORMAT_Z32_FLOAT)
attachment = GL_DEPTH_ATTACHMENT;
else
attachment = GL_DEPTH_STENCIL_ATTACHMENT;
glFramebufferTexture2D(GL_FRAMEBUFFER_EXT, attachment, GL_TEXTURE_2D, tex_id, 0);
is_depth = true;
buffers = GL_NONE;
glDrawBuffers(1, &buffers);
} else {
glFramebufferTexture2D(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_TEXTURE_2D, tex_id, 0);
buffers = GL_COLOR_ATTACHMENT0_EXT;
glDrawBuffers(1, &buffers);
}
status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
binding = VREND_BIND_SAMPLER;
if (status == GL_FRAMEBUFFER_COMPLETE)
binding |= (is_depth ? VREND_BIND_DEPTHSTENCIL : VREND_BIND_RENDER);
glDeleteTextures(1, &tex_id);
glDeleteFramebuffers(1, &fb_id);
if (table[i].swizzle[0] != SWIZZLE_INVALID)
vrend_insert_format_swizzle(table[i].format, &table[i], binding, table[i].swizzle);
else
vrend_insert_format(&table[i], binding);
}
}
#define add_formats(x) vrend_add_formats((x), ARRAY_SIZE((x)))
void vrend_build_format_list(void)
{
add_formats(base_rgba_formats);
add_formats(base_depth_formats);
add_formats(base_la_formats);
/* float support */
add_formats(float_base_formats);
add_formats(float_la_formats);
add_formats(float_3comp_formats);
/* texture integer support ? */
add_formats(integer_base_formats);
add_formats(integer_la_formats);
add_formats(integer_3comp_formats);
/* RG support? */
add_formats(rg_base_formats);
/* integer + rg */
add_formats(integer_rg_formats);
/* float + rg */
add_formats(float_rg_formats);
/* snorm */
add_formats(snorm_formats);
add_formats(snorm_la_formats);
/* compressed */
add_formats(rgtc_formats);
add_formats(dxtn_formats);
add_formats(dxtn_srgb_formats);
add_formats(srgb_formats);
add_formats(bit10_formats);
add_formats(packed_float_formats);
add_formats(exponent_float_formats);
add_formats(bptc_formats);
}
void vrend_build_format_list_gles(void)
{
vrend_build_format_list();
/* The BGR[A|X] formats is required but OpenGL ES does not
* support rendering to it. Try to use GL_BGRA_EXT from the
* GL_EXT_texture_format_BGRA8888 extension. But the
* GL_BGRA_EXT format is not supported by OpenGL Desktop.
*/
add_formats(gles_bgra_formats);
/* The Z32 format is required, but OpenGL ES does not support
* using it as a depth buffer. We just fake support with Z24
* and hope nobody notices.
*/
add_formats(gles_z32_format);
}
|
Keenuts/virglrenderer
|
vtest/vtest_vk_objects.h
|
<filename>vtest/vtest_vk_objects.h
#ifndef VTEST_VK_OBJECTS
#define VTEST_VK_OBJECTS
struct payload_create_descriptor_set_layout_intro {
uint32_t handle;
uint32_t flags;
uint32_t bindingCount;
};
struct payload_create_descriptor_set_layout_pBindings {
uint32_t binding;
uint32_t descriptorType;
uint32_t descriptorCount;
uint32_t stageFlags;
};
struct payload_allocate_descriptor_sets_intro {
uint32_t handle;
uint32_t descriptorPool;
uint32_t descriptorSetCount;
};
struct payload_create_shader_module_intro {
uint32_t handle;
uint32_t flags;
uint32_t codeSize;
};
struct payload_create_descriptor_pool_intro {
uint32_t handle;
uint32_t flags;
uint32_t maxSets;
uint32_t poolSizeCount;
};
struct payload_create_descriptor_pool_pPoolSizes {
uint32_t type;
uint32_t descriptorCount;
};
struct payload_create_pipeline_layout_intro {
uint32_t handle;
uint32_t flags;
uint32_t setLayoutCount;
uint32_t pushConstantRangeCount;
};
struct payload_create_pipeline_layout_pPushConstantRanges {
uint32_t stageFlags;
uint32_t offset;
uint32_t size;
};
struct payload_create_compute_pipelines_intro {
uint32_t handle;
uint32_t flags;
uint32_t layout;
uint32_t stage_flags;
uint32_t stage_stage;
uint32_t stage_module;
uint32_t entrypoint_len;
};
struct payload_allocate_memory {
uint32_t handle;
uint32_t memory_index;
uint64_t device_size;
};
struct payload_create_buffer {
uint32_t handle;
uint32_t flags;
uint64_t size;
uint32_t usage;
uint32_t sharingMode;
uint32_t queueFamilyIndexCount;
};
struct payload_bind_buffer_memory {
uint32_t device_handle;
uint32_t buffer_handle;
uint32_t memory_handle;
uint64_t offset;
};
struct payload_write_descriptor_set_intro {
uint32_t device_handle;
uint32_t dstSet;
uint32_t dstBinding;
uint32_t dstArrayElement;
uint32_t descriptorType;
uint32_t descriptorCount;
};
struct payload_write_descriptor_set_buffer {
uint32_t buffer_handle;
uint64_t offset;
uint64_t range;
};
struct payload_create_fence {
uint32_t device_handle;
uint32_t flags;
};
struct payload_wait_for_fences {
uint32_t device_handle;
uint32_t fence_count;
uint32_t wait_all;
uint32_t timeout;
/* uint32_t fence_handles[] */;
};
struct payload_queue_submit {
uint32_t device_handle;
uint32_t queue_handle;
uint32_t fence_handle;
uint32_t wait_count;
uint32_t cmd_count;
uint32_t signal_count;
/* uint32_t wait_handles[]; */
/* uint32_t cmd_handles[]; */
/* uint32_t signal_handles[]; */
};
int
vtest_vk_create_descriptor_set_layout(uint32_t length_dw);
int
vtest_vk_allocate_descriptor_sets(uint32_t length_dw);
int
vtest_vk_create_shader_module(uint32_t length_dw);
int
vtest_vk_create_descriptor_pool(uint32_t length_dw);
int
vtest_vk_create_pipeline_layout(uint32_t length_dw);
int
vtest_vk_create_compute_pipelines(uint32_t length_dw);
int
vtest_vk_allocate_memory(uint32_t length_dw);
int
vtest_vk_create_buffer(uint32_t length_dw);
int
vtest_vk_bind_buffer_memory(uint32_t length_dw);
int
vtest_vk_write_descriptor_set(uint32_t length_dw);
int
vtest_vk_create_fence(uint32_t length_dw);
int
vtest_vk_wait_for_fences(uint32_t length_dw);
int
vtest_vk_queue_submit(uint32_t length_dw);
#endif
|
Keenuts/virglrenderer
|
vtest/vtest_vk_commands.c
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <vulkan/vulkan.h>
#include "virglrenderer_vulkan.h"
#include "vtest.h"
#include "os/os_misc.h"
#include "vtest_protocol.h"
#include "vtest_vk.h"
#include "vtest_vk_commands.h"
int vtest_vk_create_command_pool(UNUSED uint32_t length_dw)
{
int res;
struct vtest_result result = { 0 };
VkCommandPoolCreateInfo vk_info;
struct payload_command_pool_create_info payload;
res = vtest_block_read(renderer.in_fd, &payload, sizeof(payload));
CHECK_IO_RESULT(res, sizeof(payload));
memset(&vk_info, 0, sizeof(vk_info));
vk_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
vk_info.flags = payload.flags;
vk_info.queueFamilyIndex = payload.queue_family_index;
result.error_code = virgl_vk_create_command_pool(payload.device_handle,
&vk_info,
&result.result);
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
return 0;
}
int vtest_vk_allocate_command_buffers(UNUSED uint32_t length_dw)
{
int res;
struct vtest_result result = { 0 };
VkCommandBufferAllocateInfo vk_info;
struct payload_command_buffer_allocate_info payload;
uint32_t *handles = NULL;
res = vtest_block_read(renderer.in_fd, &payload, sizeof(payload));
CHECK_IO_RESULT(res, sizeof(payload));
memset(&vk_info, 0, sizeof(vk_info));
vk_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
vk_info.level = payload.level;
vk_info.commandBufferCount = payload.count;
handles = alloca(sizeof(uint32_t) * payload.count);
result.error_code = virgl_vk_allocate_command_buffers(payload.device_handle,
payload.pool_handle,
&vk_info,
handles);
result.result = payload.count;
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
if (0 != result.error_code) {
return result.error_code;
}
res = vtest_block_write(renderer.out_fd, handles, sizeof(uint32_t) * result.result);
CHECK_IO_RESULT(res, sizeof(uint32_t) * result.result);
return 0;
}
int vtest_vk_record_command(UNUSED uint32_t length_dw)
{
int res;
struct vtest_result result = { 0 };
struct payload_command_record_info payload;
struct virgl_vk_record_info *info = NULL;
res = vtest_block_read(renderer.in_fd, &payload, sizeof(payload));
CHECK_IO_RESULT(res, sizeof(payload));
info = alloca(sizeof(*info));
info->cmd_handle = payload.cmd_handle;
info->pool_handle = payload.pool_handle;
info->pipeline_handle = payload.pipeline_handle;
info->pipeline_layout_handle = payload.pipeline_layout_handle;
info->bind_point = payload.bind_point;
info->descriptor_count = payload.descriptor_count;
memcpy(&info->dispatch_size, &payload.dispatch_size, sizeof(uint32_t) * 3);
info->descriptor_handles = alloca(sizeof(uint32_t) * payload.descriptor_count);
res = vtest_block_read(renderer.in_fd,
info->descriptor_handles,
sizeof(uint32_t) * payload.descriptor_count);
CHECK_IO_RESULT(res, sizeof(uint32_t) * payload.descriptor_count);
result.error_code = virgl_vk_record_command(payload.device_handle, info);
result.result = 0;
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
return result.error_code;
}
|
Keenuts/virglrenderer
|
src/virglrenderer.h
|
<gh_stars>1-10
/**************************************************************************
*
* Copyright (C) 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/* library interface from QEMU to virglrenderer */
#ifndef VIRGLRENDERER_H
#define VIRGLRENDERER_H
#include <stdint.h>
#include <stdbool.h>
struct virgl_box;
struct iovec;
#define VIRGL_EXPORT __attribute__((visibility("default")))
typedef void *virgl_renderer_gl_context;
struct virgl_renderer_gl_ctx_param {
int version;
bool shared;
int major_ver;
int minor_ver;
};
struct virgl_renderer_callbacks {
int version;
void (*write_fence)(void *cookie, uint32_t fence);
/* interact with GL implementation */
virgl_renderer_gl_context (*create_gl_context)(void *cookie, int scanout_idx, struct virgl_renderer_gl_ctx_param *param);
void (*destroy_gl_context)(void *cookie, virgl_renderer_gl_context ctx);
int (*make_current)(void *cookie, int scanout_idx, virgl_renderer_gl_context ctx);
};
/* virtio-gpu compatible interface */
#define VIRGL_RENDERER_USE_EGL 1
/*
* Wait for sync objects in thread rather than polling
* need to use virgl_renderer_get_poll_fd to know if this feature is in effect.
*/
#define VIRGL_RENDERER_THREAD_SYNC 2
#define VIRGL_RENDERER_USE_GLX (1 << 2)
#define VIRGL_RENDERER_USE_VULKAN (1 << 3)
VIRGL_EXPORT int virgl_renderer_init(void *cookie, int flags, struct virgl_renderer_callbacks *cb);
VIRGL_EXPORT void virgl_renderer_poll(void); /* force fences */
/* we need to give qemu the cursor resource contents */
VIRGL_EXPORT void *virgl_renderer_get_cursor_data(uint32_t resource_id, uint32_t *width, uint32_t *height);
VIRGL_EXPORT void virgl_renderer_get_rect(int resource_id, struct iovec *iov, unsigned int num_iovs,
uint32_t offset, int x, int y, int width, int height);
VIRGL_EXPORT int virgl_renderer_get_fd_for_texture(uint32_t tex_id, int *fd);
/* virgl bind flags - these are compatible with mesa 10.5 gallium.
but are fixed, no other should be passed to virgl either. */
#define VIRGL_RES_BIND_DEPTH_STENCIL (1 << 0)
#define VIRGL_RES_BIND_RENDER_TARGET (1 << 1)
#define VIRGL_RES_BIND_SAMPLER_VIEW (1 << 3)
#define VIRGL_RES_BIND_VERTEX_BUFFER (1 << 4)
#define VIRGL_RES_BIND_INDEX_BUFFER (1 << 5)
#define VIRGL_RES_BIND_CONSTANT_BUFFER (1 << 6)
#define VIRGL_RES_BIND_STREAM_OUTPUT (1 << 11)
#define VIRGL_RES_BIND_CURSOR (1 << 16)
#define VIRGL_RES_BIND_CUSTOM (1 << 17)
struct virgl_renderer_resource_create_args {
uint32_t handle;
uint32_t target;
uint32_t format;
uint32_t bind;
uint32_t width;
uint32_t height;
uint32_t depth;
uint32_t array_size;
uint32_t last_level;
uint32_t nr_samples;
uint32_t flags;
};
/* new API */
VIRGL_EXPORT int virgl_renderer_resource_create(struct virgl_renderer_resource_create_args *args, struct iovec *iov, uint32_t num_iovs);
VIRGL_EXPORT void virgl_renderer_resource_unref(uint32_t res_handle);
VIRGL_EXPORT int virgl_renderer_context_create(uint32_t handle, uint32_t nlen, const char *name);
VIRGL_EXPORT void virgl_renderer_context_destroy(uint32_t handle);
VIRGL_EXPORT int virgl_renderer_submit_cmd(void *buffer,
int ctx_id,
int ndw);
VIRGL_EXPORT int virgl_renderer_transfer_read_iov(uint32_t handle, uint32_t ctx_id,
uint32_t level, uint32_t stride,
uint32_t layer_stride,
struct virgl_box *box,
uint64_t offset, struct iovec *iov,
int iovec_cnt);
VIRGL_EXPORT int virgl_renderer_transfer_write_iov(uint32_t handle,
uint32_t ctx_id,
int level,
uint32_t stride,
uint32_t layer_stride,
struct virgl_box *box,
uint64_t offset,
struct iovec *iovec,
unsigned int iovec_cnt);
VIRGL_EXPORT void virgl_renderer_get_cap_set(uint32_t set, uint32_t *max_ver,
uint32_t *max_size);
VIRGL_EXPORT void virgl_renderer_fill_caps(uint32_t set, uint32_t version,
void *caps);
VIRGL_EXPORT int virgl_renderer_resource_attach_iov(int res_handle, struct iovec *iov,
int num_iovs);
VIRGL_EXPORT void virgl_renderer_resource_detach_iov(int res_handle, struct iovec **iov, int *num_iovs);
VIRGL_EXPORT int virgl_renderer_create_fence(int client_fence_id, uint32_t ctx_id);
VIRGL_EXPORT void virgl_renderer_force_ctx_0(void);
VIRGL_EXPORT void virgl_renderer_ctx_attach_resource(int ctx_id, int res_handle);
VIRGL_EXPORT void virgl_renderer_ctx_detach_resource(int ctx_id, int res_handle);
/* return information about a resource */
struct virgl_renderer_resource_info {
uint32_t handle;
uint32_t virgl_format;
uint32_t width;
uint32_t height;
uint32_t depth;
uint32_t flags;
uint32_t tex_id;
uint32_t stride;
int drm_fourcc;
};
VIRGL_EXPORT int virgl_renderer_resource_get_info(int res_handle,
struct virgl_renderer_resource_info *info);
VIRGL_EXPORT void virgl_renderer_cleanup(void *cookie);
/* reset the rendererer - destroy all contexts and resource */
VIRGL_EXPORT void virgl_renderer_reset(void);
VIRGL_EXPORT int virgl_renderer_get_poll_fd(void);
#endif
|
Keenuts/virglrenderer
|
src/vrend_vk.c
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <vulkan/vulkan.h>
#include "util/u_double_list.h"
#include "util/u_memory.h"
#include "vrend_vk.h"
struct vrend_vk *vulkan_state;
const char* vkresult_to_string(VkResult res)
{
switch (res)
{
#define VK2STR(Value) case Value: return #Value
VK2STR(VK_SUCCESS);
VK2STR(VK_NOT_READY);
VK2STR(VK_TIMEOUT);
VK2STR(VK_EVENT_SET);
VK2STR(VK_EVENT_RESET);
VK2STR(VK_INCOMPLETE);
VK2STR(VK_ERROR_OUT_OF_HOST_MEMORY);
VK2STR(VK_ERROR_OUT_OF_DEVICE_MEMORY);
VK2STR(VK_ERROR_INITIALIZATION_FAILED);
VK2STR(VK_ERROR_DEVICE_LOST);
VK2STR(VK_ERROR_MEMORY_MAP_FAILED);
VK2STR(VK_ERROR_LAYER_NOT_PRESENT);
VK2STR(VK_ERROR_EXTENSION_NOT_PRESENT);
VK2STR(VK_ERROR_FEATURE_NOT_PRESENT);
VK2STR(VK_ERROR_INCOMPATIBLE_DRIVER);
VK2STR(VK_ERROR_TOO_MANY_OBJECTS);
VK2STR(VK_ERROR_FORMAT_NOT_SUPPORTED);
VK2STR(VK_ERROR_FRAGMENTED_POOL);
VK2STR(VK_ERROR_OUT_OF_POOL_MEMORY);
VK2STR(VK_ERROR_INVALID_EXTERNAL_HANDLE);
VK2STR(VK_ERROR_SURFACE_LOST_KHR);
VK2STR(VK_ERROR_NATIVE_WINDOW_IN_USE_KHR);
VK2STR(VK_SUBOPTIMAL_KHR);
VK2STR(VK_ERROR_OUT_OF_DATE_KHR);
VK2STR(VK_ERROR_INCOMPATIBLE_DISPLAY_KHR);
VK2STR(VK_ERROR_VALIDATION_FAILED_EXT);
VK2STR(VK_ERROR_INVALID_SHADER_NV);
VK2STR(VK_ERROR_FRAGMENTATION_EXT);
VK2STR(VK_ERROR_NOT_PERMITTED_EXT);
VK2STR(VK_RESULT_MAX_ENUM);
#undef VK2STR
default:
return "VK_UNKNOWN_RETURN_VALUE";
}
}
#define CHECK_VK_RESULT(Result, ErrMsg, Expression) \
if (VK_SUCCESS != (Result)) { \
fprintf(stderr, ErrMsg "(%s)\n", vkresult_to_string(Result)); \
Expression; \
}
static int
init_physical_devices(void)
{
uint32_t device_count;
VkResult res;
vulkan_state->devices = CALLOC_STRUCT(vk_device);
if (NULL == vulkan_state->devices) {
return -1;
}
LIST_INITHEAD(&vulkan_state->devices->list);
res = vkEnumeratePhysicalDevices(vulkan_state->vk_instance,
&device_count,
NULL);
CHECK_VK_RESULT(res, "vulkan device enumeration failed", return -1);
if (device_count == 0) {
fprintf(stderr, "No device supports Vulkan.\n");
return -1;
}
vulkan_state->physical_devices = CALLOC(
device_count, sizeof(*vulkan_state->physical_devices));
if (vulkan_state->physical_devices == NULL) {
return -1;
}
res = vkEnumeratePhysicalDevices(vulkan_state->vk_instance,
&device_count,
vulkan_state->physical_devices);
CHECK_VK_RESULT(res, "vulkan device enumeration failed", return -1);
vulkan_state->physical_device_count = device_count;
return 0;
}
int
vrend_vk_init(void)
{
VkResult vk_res;
VkApplicationInfo application_info = { 0 };
VkInstanceCreateInfo info = { 0 };
vulkan_state = CALLOC_STRUCT(vrend_vk);
if (NULL == vulkan_state) {
return -1;
}
application_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
application_info.pApplicationName = "virglrenderer";
application_info.applicationVersion = 1;
application_info.pEngineName = NULL;
application_info.engineVersion = 1;
application_info.apiVersion = VK_MAKE_VERSION(1,1,0);
const char *validation_layers[] = {
#ifdef DEBUG
"VK_LAYER_LUNARG_core_validation",
"VK_LAYER_LUNARG_object_tracker",
"VK_LAYER_LUNARG_parameter_validation",
"VK_LAYER_LUNARG_standard_validation",
#endif
};
info.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
info.pApplicationInfo = &application_info;
info.enabledLayerCount = ARRAY_SIZE(validation_layers);
info.ppEnabledLayerNames = validation_layers;
do {
vk_res = vkCreateInstance(&info, NULL, &vulkan_state->vk_instance);
CHECK_VK_RESULT(vk_res, "vulkan init failed", break);
if (0 != init_physical_devices()) {
break;
}
/* success path */
printf("Vulkan state created with %d devices.\n",
vulkan_state->physical_device_count);
return 0;
} while (0);
/* failure branch */
vrend_vk_destroy();
return -1;
}
void
vrend_vk_destroy(void)
{
if (NULL == vulkan_state) {
return;
}
if (VK_NULL_HANDLE != vulkan_state->vk_instance) {
vkDestroyInstance(vulkan_state->vk_instance, NULL);
vulkan_state->vk_instance = VK_NULL_HANDLE;
}
FREE(vulkan_state->devices);
FREE(vulkan_state->physical_devices);
FREE(vulkan_state);
vulkan_state = NULL;
}
|
Keenuts/virglrenderer
|
vtest/vtest_vk.h
|
<reponame>Keenuts/virglrenderer
#ifndef VTEST_VK_H
#define VTEST_VK_H
int vtest_vk_create_device(uint32_t length_dw);
int vtest_vk_destroy_device(uint32_t length_dw);
int vtest_vk_destroy_object(uint32_t length_dw);
int vtest_vk_enumerate_devices(uint32_t length_dw);
int vtest_vk_get_device_memory_properties(uint32_t length_dw);
int vtest_vk_get_queue_family_properties(uint32_t length_dw);
int vtest_vk_get_sparse_properties(uint32_t length_dw);
int vtest_vk_read_memory(uint32_t length_dw);
int vtest_vk_write_memory(uint32_t length_dw);
#define CHECK_IO_RESULT(Done, Expected) \
if ((Done) < (int)(Expected)) { \
fprintf(stderr, "%s: failed to write back the answer.\n", __func__); \
return -1; \
}
#endif
|
Keenuts/virglrenderer
|
vtest/vtest_renderer.c
|
/**************************************************************************
*
* Copyright (C) 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <fcntl.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/uio.h>
#include <unistd.h>
#include "config.h"
#include "util.h"
#include "virglrenderer.h"
#include "vtest.h"
#include "vtest_protocol.h"
static int ctx_id = 1;
static int fence_id = 1;
static int last_fence;
static void vtest_write_fence(void *cookie, uint32_t fence_id_in)
{
last_fence = fence_id_in;
}
struct virgl_renderer_callbacks vtest_cbs = {
.version = 1,
.write_fence = vtest_write_fence,
};
struct vtest_renderer renderer;
struct virgl_box {
uint32_t x, y, z;
uint32_t w, h, d;
};
int vtest_block_write(int fd, void *buf, int size)
{
void *ptr = buf;
int left;
int ret;
left = size;
do {
ret = write(fd, ptr, left);
if (ret < 0)
return -errno;
left -= ret;
ptr += ret;
} while (left);
return size;
}
int vtest_block_read(int fd, void *buf, int size)
{
void *ptr = buf;
int left;
int ret;
static int savefd = -1;
left = size;
do {
ret = read(fd, ptr, left);
if (ret <= 0)
return ret == -1 ? -errno : 0;
left -= ret;
ptr += ret;
} while (left);
if (getenv("VTEST_SAVE")) {
if (savefd == -1) {
savefd = open(getenv("VTEST_SAVE"),
O_CLOEXEC|O_CREAT|O_WRONLY|O_TRUNC|O_DSYNC, S_IRUSR|S_IWUSR);
if (savefd == -1) {
perror("error opening save file");
exit(1);
}
}
if (write(savefd, buf, size) != size) {
perror("failed to save");
exit(1);
}
}
return size;
}
int vtest_create_renderer(int in_fd, int out_fd, uint32_t length)
{
char *vtestname;
int ret;
int ctx = VIRGL_RENDERER_USE_EGL;
renderer.in_fd = in_fd;
renderer.out_fd = out_fd;
if (getenv("VTEST_USE_GLX")) {
ctx = VIRGL_RENDERER_USE_GLX;
}
#ifdef WITH_VULKAN
ctx |= VIRGL_RENDERER_USE_VULKAN;
#endif
ret = virgl_renderer_init(&renderer,
ctx | VIRGL_RENDERER_THREAD_SYNC, &vtest_cbs);
if (ret) {
fprintf(stderr, "failed to initialise renderer.\n");
return -1;
}
vtestname = calloc(1, length + 1);
if (!vtestname)
return -1;
ret = vtest_block_read(renderer.in_fd, vtestname, length);
if (ret != (int)length) {
ret = -1;
goto end;
}
ret = virgl_renderer_context_create(ctx_id, strlen(vtestname), vtestname);
end:
free(vtestname);
return ret;
}
void vtest_destroy_renderer(void)
{
virgl_renderer_context_destroy(ctx_id);
virgl_renderer_cleanup(&renderer);
renderer.in_fd = -1;
renderer.out_fd = -1;
}
int vtest_send_caps(uint32_t length_dw)
{
uint32_t max_ver, max_size;
void *caps_buf;
uint32_t hdr_buf[2];
int ret;
UNUSED_PARAMETER(length_dw);
virgl_renderer_get_cap_set(1, &max_ver, &max_size);
caps_buf = malloc(max_size);
if (!caps_buf)
return -1;
virgl_renderer_fill_caps(1, 1, caps_buf);
hdr_buf[0] = max_size + 1;
hdr_buf[1] = 1;
ret = vtest_block_write(renderer.out_fd, hdr_buf, 8);
if (ret < 0)
goto end;
vtest_block_write(renderer.out_fd, caps_buf, max_size);
if (ret < 0)
goto end;
end:
free(caps_buf);
return 0;
}
int vtest_create_resource(uint32_t length_dw)
{
uint32_t res_create_buf[VCMD_RES_CREATE_SIZE];
struct virgl_renderer_resource_create_args args;
int ret;
UNUSED_PARAMETER(length_dw);
ret = vtest_block_read(renderer.in_fd, &res_create_buf, sizeof(res_create_buf));
if (ret != sizeof(res_create_buf))
return -1;
args.handle = res_create_buf[VCMD_RES_CREATE_RES_HANDLE];
args.target = res_create_buf[VCMD_RES_CREATE_TARGET];
args.format = res_create_buf[VCMD_RES_CREATE_FORMAT];
args.bind = res_create_buf[VCMD_RES_CREATE_BIND];
args.width = res_create_buf[VCMD_RES_CREATE_WIDTH];
args.height = res_create_buf[VCMD_RES_CREATE_HEIGHT];
args.depth = res_create_buf[VCMD_RES_CREATE_DEPTH];
args.array_size = res_create_buf[VCMD_RES_CREATE_ARRAY_SIZE];
args.last_level = res_create_buf[VCMD_RES_CREATE_LAST_LEVEL];
args.nr_samples = res_create_buf[VCMD_RES_CREATE_NR_SAMPLES];
args.flags = 0;
ret = virgl_renderer_resource_create(&args, NULL, 0);
virgl_renderer_ctx_attach_resource(ctx_id, args.handle);
return ret;
}
int vtest_resource_unref(uint32_t length_dw)
{
uint32_t res_unref_buf[VCMD_RES_UNREF_SIZE];
int ret;
uint32_t handle;
UNUSED_PARAMETER(length_dw);
ret = vtest_block_read(renderer.in_fd, &res_unref_buf, sizeof(res_unref_buf));
if (ret != sizeof(res_unref_buf))
return -1;
handle = res_unref_buf[VCMD_RES_UNREF_RES_HANDLE];
virgl_renderer_ctx_attach_resource(ctx_id, handle);
virgl_renderer_resource_unref(handle);
return 0;
}
int vtest_submit_cmd(uint32_t length_dw)
{
uint32_t *cbuf;
int ret;
if (length_dw > UINT_MAX / 4)
return -1;
cbuf = malloc(length_dw * 4);
if (!cbuf)
return -1;
ret = vtest_block_read(renderer.in_fd, cbuf, length_dw * 4);
if (ret != length_dw * 4) {
free(cbuf);
return -1;
}
virgl_renderer_submit_cmd(cbuf, ctx_id, length_dw);
free(cbuf);
return 0;
}
#define DECODE_TRANSFER \
do { \
handle = thdr_buf[VCMD_TRANSFER_RES_HANDLE]; \
level = thdr_buf[VCMD_TRANSFER_LEVEL]; \
stride = thdr_buf[VCMD_TRANSFER_STRIDE]; \
layer_stride = thdr_buf[VCMD_TRANSFER_LAYER_STRIDE]; \
box.x = thdr_buf[VCMD_TRANSFER_X]; \
box.y = thdr_buf[VCMD_TRANSFER_Y]; \
box.z = thdr_buf[VCMD_TRANSFER_Z]; \
box.w = thdr_buf[VCMD_TRANSFER_WIDTH]; \
box.h = thdr_buf[VCMD_TRANSFER_HEIGHT]; \
box.d = thdr_buf[VCMD_TRANSFER_DEPTH]; \
data_size = thdr_buf[VCMD_TRANSFER_DATA_SIZE]; \
} while(0)
int vtest_transfer_get(uint32_t length_dw)
{
uint32_t thdr_buf[VCMD_TRANSFER_HDR_SIZE];
int ret;
int level;
uint32_t stride, layer_stride, handle;
struct virgl_box box;
uint32_t data_size;
void *ptr;
struct iovec iovec;
ret = vtest_block_read(renderer.in_fd, thdr_buf, VCMD_TRANSFER_HDR_SIZE * 4);
if (ret != VCMD_TRANSFER_HDR_SIZE * 4)
return ret;
DECODE_TRANSFER;
ptr = malloc(data_size);
if (!ptr)
return -ENOMEM;
iovec.iov_len = data_size;
iovec.iov_base = ptr;
ret = virgl_renderer_transfer_read_iov(handle,
ctx_id,
level,
stride,
layer_stride,
&box,
0,
&iovec, 1);
if (ret)
fprintf(stderr," transfer read failed %d\n", ret);
ret = vtest_block_write(renderer.out_fd, ptr, data_size);
free(ptr);
return ret < 0 ? ret : 0;
}
int vtest_transfer_put(uint32_t length_dw)
{
uint32_t thdr_buf[VCMD_TRANSFER_HDR_SIZE];
int ret;
int level;
uint32_t stride, layer_stride, handle;
struct virgl_box box;
uint32_t data_size;
void *ptr;
struct iovec iovec;
UNUSED_PARAMETER(length_dw);
ret = vtest_block_read(renderer.in_fd, thdr_buf, VCMD_TRANSFER_HDR_SIZE * 4);
if (ret != VCMD_TRANSFER_HDR_SIZE * 4)
return ret;
DECODE_TRANSFER;
ptr = malloc(data_size);
if (!ptr)
return -ENOMEM;
ret = vtest_block_read(renderer.in_fd, ptr, data_size);
if (ret < 0)
return ret;
iovec.iov_len = data_size;
iovec.iov_base = ptr;
ret = virgl_renderer_transfer_write_iov(handle,
ctx_id,
level,
stride,
layer_stride,
&box,
0,
&iovec, 1);
if (ret)
fprintf(stderr," transfer write failed %d\n", ret);
free(ptr);
return 0;
}
int vtest_resource_busy_wait(uint32_t length_dw)
{
uint32_t bw_buf[VCMD_BUSY_WAIT_SIZE];
int ret, fd;
int flags;
uint32_t hdr_buf[VTEST_HDR_SIZE];
uint32_t reply_buf[1];
bool busy = false;
vtest_renderer_create_fence(length_dw);
ret = vtest_block_read(renderer.in_fd, &bw_buf, sizeof(bw_buf));
if (ret != sizeof(bw_buf))
return -1;
/* handle = bw_buf[VCMD_BUSY_WAIT_HANDLE]; unused as of now */
flags = bw_buf[VCMD_BUSY_WAIT_FLAGS];
if (flags == VCMD_BUSY_WAIT_FLAG_WAIT) {
do {
if (last_fence == (fence_id - 1))
break;
fd = virgl_renderer_get_poll_fd();
if (fd != -1)
vtest_wait_for_fd_read(fd);
virgl_renderer_poll();
} while (1);
busy = false;
} else {
busy = last_fence != (fence_id - 1);
}
hdr_buf[VTEST_CMD_LEN] = 1;
hdr_buf[VTEST_CMD_ID] = VCMD_RESOURCE_BUSY_WAIT;
reply_buf[0] = busy ? 1 : 0;
ret = vtest_block_write(renderer.out_fd, hdr_buf, sizeof(hdr_buf));
if (ret < 0)
return ret;
ret = vtest_block_write(renderer.out_fd, reply_buf, sizeof(reply_buf));
if (ret < 0)
return ret;
return 0;
}
int vtest_renderer_create_fence(uint32_t length_dw)
{
UNUSED_PARAMETER(length_dw);
virgl_renderer_create_fence(fence_id++, ctx_id);
return 0;
}
int vtest_poll(void)
{
virgl_renderer_poll();
return 0;
}
|
Keenuts/virglrenderer
|
vtest/vtest_vk_objects.c
|
<filename>vtest/vtest_vk_objects.c<gh_stars>1-10
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <vulkan/vulkan.h>
#include "virglrenderer_vulkan.h"
#include "vtest.h"
#include "vtest_protocol.h"
#include "vtest_vk.h"
#include "vtest_vk_objects.h"
extern struct vtest_renderer renderer;
int
vtest_vk_create_descriptor_pool(uint32_t length_dw)
{
int res;
struct vtest_result result = { 0 };
VkDescriptorPoolCreateInfo vk_info;
struct payload_create_descriptor_pool_intro intro;
memset(&vk_info, 0, sizeof(vk_info));
res = vtest_block_read(renderer.in_fd, &intro, sizeof(intro));
CHECK_IO_RESULT(res, sizeof(intro));
vk_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
vk_info.flags = intro.flags;
vk_info.maxSets = intro.maxSets;
vk_info.poolSizeCount = intro.poolSizeCount;
struct payload_create_descriptor_pool_pPoolSizes tmp_pPoolSizes;
VkDescriptorPoolSize *pPoolSizes = NULL;
pPoolSizes = alloca(sizeof(*pPoolSizes) * vk_info.poolSizeCount);
for (uint32_t i = 0; i < intro.poolSizeCount; i++) {
res = vtest_block_read(renderer.in_fd, &tmp_pPoolSizes, sizeof(tmp_pPoolSizes));
CHECK_IO_RESULT(res, sizeof(tmp_pPoolSizes));
pPoolSizes[i].type = tmp_pPoolSizes.type;
pPoolSizes[i].descriptorCount = tmp_pPoolSizes.descriptorCount;
}
vk_info.pPoolSizes = pPoolSizes;
result.error_code = virgl_vk_create_descriptor_pool(intro.handle,
&vk_info,
&result.result);
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
UNUSED_PARAMETER(length_dw);
return 0;
}
int
vtest_vk_create_descriptor_set_layout(uint32_t length_dw)
{
int res;
struct vtest_result result = { 0 };
VkDescriptorSetLayoutCreateInfo vk_info;
VkDescriptorSetLayoutBinding *pBindings = NULL;
struct payload_create_descriptor_set_layout_intro intro;
struct payload_create_descriptor_set_layout_pBindings binding;
res = vtest_block_read(renderer.in_fd, &intro, sizeof(intro));
CHECK_IO_RESULT(res, sizeof(intro));
memset(&vk_info, 0, sizeof(vk_info));
vk_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
vk_info.flags = intro.flags;
vk_info.bindingCount = intro.bindingCount;
/* reading bindings */
pBindings = alloca(sizeof(*pBindings) * vk_info.bindingCount);
for (uint32_t i = 0; i < vk_info.bindingCount; i++) {
res = vtest_block_read(renderer.in_fd, &binding, sizeof(binding));
CHECK_IO_RESULT(res, sizeof(binding));
pBindings[i].binding = binding.binding;
pBindings[i].descriptorType = binding.descriptorType;
pBindings[i].descriptorCount = binding.descriptorCount;
pBindings[i].stageFlags = binding.stageFlags;
pBindings[i].pImmutableSamplers = NULL;
}
vk_info.pBindings = pBindings;
result.error_code = virgl_vk_create_descriptor_set_layout(intro.handle,
&vk_info,
&result.result);
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
UNUSED_PARAMETER(length_dw);
return 0;
}
/* payload:
* - generic pool_handle
* - descripor layout handles[]
*/
int
vtest_vk_allocate_descriptor_sets(uint32_t length_dw)
{
int res;
struct vtest_result result = { 0 };
uint32_t pool_handle;
uint32_t *set_layout_handles = NULL;
uint32_t *output_handles = NULL;
struct payload_allocate_descriptor_sets_intro intro;
res = vtest_block_read(renderer.in_fd, &intro, sizeof(intro));
CHECK_IO_RESULT(res, sizeof(intro));
pool_handle = intro.descriptorPool;
/* reading all handles sent at the end */
set_layout_handles = alloca(sizeof(uint32_t) * intro.descriptorSetCount);
res = vtest_block_read(renderer.in_fd,
set_layout_handles,
sizeof(uint32_t) * intro.descriptorSetCount);
output_handles = alloca(sizeof(uint32_t) * intro.descriptorSetCount);
result.error_code = virgl_vk_allocate_descriptor_set(intro.handle,
pool_handle,
intro.descriptorSetCount,
set_layout_handles,
output_handles);
result.result = intro.descriptorSetCount;
/* Writting back the results */
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
res = vtest_block_write(renderer.out_fd,
output_handles,
sizeof(uint32_t) * result.result);
CHECK_IO_RESULT(res, result.result * sizeof(uint32_t));
UNUSED_PARAMETER(length_dw);
return 0;
}
int
vtest_vk_create_shader_module(uint32_t length_dw)
{
int res;
struct vtest_result result = { 0 };
VkShaderModuleCreateInfo vk_info;
struct payload_create_shader_module_intro intro;
uint32_t *shader_code = NULL;
res = vtest_block_read(renderer.in_fd, &intro, sizeof(intro));
CHECK_IO_RESULT(res, sizeof(intro));
memset(&vk_info, 0, sizeof(vk_info));
vk_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
vk_info.flags = intro.flags;
vk_info.codeSize = intro.codeSize;
shader_code = malloc(vk_info.codeSize);
if (NULL == shader_code) {
return -1;
}
res = vtest_block_read(renderer.in_fd, shader_code, vk_info.codeSize);
CHECK_IO_RESULT(res, vk_info.codeSize);
vk_info.pCode = shader_code;
result.error_code = virgl_vk_create_shader_module(intro.handle,
&vk_info,
&result.result);
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
UNUSED_PARAMETER(length_dw);
return 0;
}
int
vtest_vk_create_pipeline_layout(uint32_t length_dw)
{
int res;
struct vtest_result result = { 0 };
VkPipelineLayoutCreateInfo vk_info;
uint32_t *set_handles = NULL;
VkPushConstantRange *vk_push_ranges = NULL;
struct payload_create_pipeline_layout_intro intro;
struct payload_create_pipeline_layout_pPushConstantRanges push_range;
res = vtest_block_read(renderer.in_fd, &intro, sizeof(intro));
CHECK_IO_RESULT(res, sizeof(intro));
/* generic informations */
memset(&vk_info, 0, sizeof(vk_info));
vk_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
vk_info.flags = intro.flags;
vk_info.setLayoutCount = intro.setLayoutCount;
vk_info.pushConstantRangeCount = intro.pushConstantRangeCount;
/* first array: VkDescriptorSetLayout */
set_handles = alloca(sizeof(*set_handles) * vk_info.setLayoutCount);
res = vtest_block_read(renderer.in_fd, set_handles,
sizeof(*set_handles) * vk_info.setLayoutCount);
CHECK_IO_RESULT(res, sizeof(*set_handles) * vk_info.setLayoutCount);
/* second array: VkPushConstantRange */
vk_push_ranges = alloca(sizeof(*vk_push_ranges) * vk_info.pushConstantRangeCount);
for (uint32_t i = 0; i < vk_info.pushConstantRangeCount; i++) {
res = vtest_block_read(renderer.in_fd, &push_range, sizeof(push_range));
CHECK_IO_RESULT(res, sizeof(push_range));
vk_push_ranges[i].stageFlags = push_range.stageFlags;
vk_push_ranges[i].offset = push_range.offset;
vk_push_ranges[i].size = push_range.size;
}
vk_info.pPushConstantRanges = vk_push_ranges;
/* virgl forwarding */
result.error_code = virgl_vk_create_pipeline_layout(intro.handle,
&vk_info,
set_handles,
&result.result);
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
UNUSED_PARAMETER(length_dw);
return 0;
}
int
vtest_vk_create_compute_pipelines(uint32_t length_dw)
{
int res;
struct vtest_result result;
VkComputePipelineCreateInfo vk_info;
char *entrypoint_name = NULL;
struct payload_create_compute_pipelines_intro intro;
/* reading intro structure */
res = vtest_block_read(renderer.in_fd, &intro, sizeof(intro));
CHECK_IO_RESULT(res, sizeof(intro));
/* reading entrypoint name */
entrypoint_name = alloca(intro.entrypoint_len);
res = vtest_block_read(renderer.in_fd, entrypoint_name, intro.entrypoint_len);
CHECK_IO_RESULT(res, intro.entrypoint_len);
/* setting up vk_info structure */
memset(&vk_info, 0, sizeof(vk_info));
vk_info.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
vk_info.flags = intro.flags;
vk_info.stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
vk_info.stage.flags = intro.stage_flags;
vk_info.stage.stage = intro.stage_stage;
vk_info.stage.pName = entrypoint_name;
result.error_code = virgl_vk_create_compute_pipelines(intro.handle,
&vk_info,
intro.layout,
intro.stage_module,
&result.result);
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
UNUSED_PARAMETER(length_dw);
return 0;
}
int
vtest_vk_allocate_memory(uint32_t length_dw)
{
int res;
struct vtest_result result = { 0 };
VkMemoryAllocateInfo vk_info;
struct payload_allocate_memory intro;
res = vtest_block_read(renderer.in_fd, &intro, sizeof(intro));
CHECK_IO_RESULT(res, sizeof(intro));
memset(&vk_info, 0, sizeof(vk_info));
vk_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
vk_info.allocationSize = intro.device_size;
vk_info.memoryTypeIndex = intro.memory_index;
result.error_code = virgl_vk_allocate_memory(intro.handle, &vk_info, &result.result);
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
UNUSED_PARAMETER(length_dw);
return 0;
}
int
vtest_vk_create_buffer(uint32_t length_dw)
{
int res;
struct vtest_result result = { 0 };
VkBufferCreateInfo vk_info;
struct payload_create_buffer intro;
res = vtest_block_read(renderer.in_fd, &intro, sizeof(intro));
CHECK_IO_RESULT(res, sizeof(intro));
memset(&vk_info, 0, sizeof(vk_info));
vk_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
vk_info.flags = intro.flags;
vk_info.size = intro.size;
vk_info.usage = intro.usage;
vk_info.sharingMode = intro.sharingMode;
vk_info.queueFamilyIndexCount = intro.queueFamilyIndexCount;
vk_info.pQueueFamilyIndices = NULL;
if (0 != vk_info.queueFamilyIndexCount) {
vk_info.pQueueFamilyIndices = alloca(sizeof(uint32_t)
* vk_info.queueFamilyIndexCount);
res = vtest_block_read(renderer.in_fd,
(void*)vk_info.pQueueFamilyIndices,
sizeof(uint32_t) * vk_info.queueFamilyIndexCount);
CHECK_IO_RESULT(res, sizeof(uint32_t) * vk_info.queueFamilyIndexCount);
}
result.error_code = virgl_vk_create_buffer(intro.handle, &vk_info, &result.result);
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
UNUSED_PARAMETER(length_dw);
return 0;
}
int
vtest_vk_bind_buffer_memory(uint32_t length_dw)
{
int res;
struct vtest_result result = { 0 };
struct payload_bind_buffer_memory intro;
res = vtest_block_read(renderer.in_fd, &intro, sizeof(intro));
CHECK_IO_RESULT(res, sizeof(intro));
result.result = 0;
result.error_code = virgl_vk_bind_buffer_memory(intro.device_handle,
intro.buffer_handle,
intro.memory_handle,
intro.offset);
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
UNUSED_PARAMETER(length_dw);
return 0;
}
int
vtest_vk_write_descriptor_set(uint32_t length_dw)
{
int res;
struct vtest_result result = { 0 };
VkWriteDescriptorSet vk_info;
VkDescriptorBufferInfo *pBufferInfo = NULL;
struct payload_write_descriptor_set_intro intro;
struct payload_write_descriptor_set_buffer p_buffer;
uint32_t *buffer_handles;
uint32_t descriptor_handle;
res = vtest_block_read(renderer.in_fd, &intro, sizeof(intro));
CHECK_IO_RESULT(res, sizeof(intro));
memset(&vk_info, 0, sizeof(vk_info));
vk_info.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
vk_info.dstBinding = intro.dstBinding;
vk_info.dstArrayElement = intro.dstArrayElement;
vk_info.descriptorType = intro.descriptorType;
vk_info.descriptorCount = intro.descriptorCount;
descriptor_handle = intro.dstSet;
pBufferInfo = alloca(sizeof(*pBufferInfo) * vk_info.descriptorCount);
buffer_handles = alloca(sizeof(uint32_t) * vk_info.descriptorCount);
for (uint32_t i = 0; i < intro.descriptorCount; i++) {
res = vtest_block_read(renderer.in_fd, &p_buffer, sizeof(p_buffer));
CHECK_IO_RESULT(res, sizeof(p_buffer));
buffer_handles[i] = p_buffer.buffer_handle;
pBufferInfo[i].offset = p_buffer.offset;
pBufferInfo[i].range = p_buffer.range;
}
result.error_code = virgl_vk_write_descriptor_set(intro.device_handle,
&vk_info,
pBufferInfo,
descriptor_handle,
buffer_handles);
result.result = 0;
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
UNUSED_PARAMETER(length_dw);
return 0;
}
int
vtest_vk_create_fence(uint32_t length_dw)
{
int res;
struct vtest_result result = { 0 };
VkFenceCreateInfo vk_info = { 0 };
struct payload_create_fence payload;
res = vtest_block_read(renderer.in_fd, &payload, sizeof(payload));
CHECK_IO_RESULT(res, sizeof(payload));
vk_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
vk_info.flags = payload.flags;
result.error_code = virgl_vk_create_fence(payload.device_handle,
&vk_info,
&result.result);
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
UNUSED_PARAMETER(length_dw);
return 0;
}
int
vtest_vk_wait_for_fences(uint32_t length_dw)
{
int res;
struct vtest_result result = { 0 };
struct payload_wait_for_fences payload;
uint32_t *fences = NULL;
res = vtest_block_read(renderer.in_fd, &payload, sizeof(payload));
CHECK_IO_RESULT(res, sizeof(payload));
fences = alloca(sizeof(uint32_t) * payload.fence_count);
res = vtest_block_read(renderer.in_fd, fences, sizeof(uint32_t) * payload.fence_count);
CHECK_IO_RESULT(res, sizeof(uint32_t) * payload.fence_count);
result.error_code = virgl_vk_wait_for_fences(payload.device_handle,
payload.fence_count,
fences,
payload.wait_all,
payload.timeout);
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
UNUSED_PARAMETER(length_dw);
return 0;
}
int
vtest_vk_queue_submit(uint32_t length_dw)
{
int res;
struct vtest_result result = { 0 };
struct payload_queue_submit payload;
struct virgl_vk_submit_info info;
res = vtest_block_read(renderer.in_fd, &payload, sizeof(payload));
CHECK_IO_RESULT(res, sizeof(payload));
memcpy(&info, &payload, sizeof(payload));
info.wait_handles = alloca(sizeof(uint32_t) * info.wait_count);
info.wait_stage_masks = alloca(sizeof(uint32_t) * info.wait_count);
info.cmd_handles = alloca(sizeof(uint32_t) * info.cmd_count);
info.pool_handles = alloca(sizeof(uint32_t) * info.cmd_count);
info.signal_handles = alloca(sizeof(uint32_t) * info.signal_count);
res = vtest_block_read(renderer.in_fd,
info.wait_handles,
sizeof(uint32_t) * payload.wait_count);
CHECK_IO_RESULT(res, sizeof(uint32_t) * payload.wait_count);
res = vtest_block_read(renderer.in_fd,
info.wait_stage_masks,
sizeof(uint32_t) * payload.wait_count);
CHECK_IO_RESULT(res, sizeof(uint32_t) * payload.wait_count);
res = vtest_block_read(renderer.in_fd,
info.pool_handles,
sizeof(uint32_t) * payload.cmd_count);
CHECK_IO_RESULT(res, sizeof(uint32_t) * payload.cmd_count);
res = vtest_block_read(renderer.in_fd,
info.cmd_handles,
sizeof(uint32_t) * payload.cmd_count);
CHECK_IO_RESULT(res, sizeof(uint32_t) * payload.cmd_count);
res = vtest_block_read(renderer.in_fd,
info.signal_handles,
sizeof(uint32_t) * payload.signal_count);
CHECK_IO_RESULT(res, sizeof(uint32_t) * payload.signal_count);
result.error_code = virgl_vk_queue_submit(&info);
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
UNUSED_PARAMETER(length_dw);
return 0;
}
|
Keenuts/virglrenderer
|
vtest/vtest_vk.c
|
<gh_stars>1-10
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <vulkan/vulkan.h>
#include "virglrenderer_vulkan.h"
#include "vtest.h"
#include "os/os_misc.h"
#include "vtest_protocol.h"
#include "vtest_vk.h"
extern struct vtest_renderer renderer;
int vtest_vk_create_device(UNUSED uint32_t length_dw)
{
VkDeviceCreateInfo vk_device_info;;
VkDeviceQueueCreateInfo *vk_queue_info = NULL;
struct VkPhysicalDeviceFeatures features;
struct vtest_payload_device_create create_info;
struct vtest_payload_queue_create queue_info;
struct vtest_result result = { 0 };
int res;
/* The first payload is a lighter version of the VkDeviceCreationInfo */
res = vtest_block_read(renderer.in_fd, &create_info, sizeof(create_info));
CHECK_IO_RESULT(res, (int)sizeof(create_info));
memset(&vk_device_info, 0, sizeof(vk_device_info));
vk_device_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
vk_device_info.flags = create_info.flags;
vk_device_info.queueCreateInfoCount = create_info.queue_info_count;
vk_device_info.pEnabledFeatures = &features;
memcpy(&features, &create_info.features, sizeof(features));
/* Now, for each queue, we need to extract the informations */
vk_device_info.pQueueCreateInfos = alloca(sizeof(VkDeviceQueueCreateInfo) *
create_info.queue_info_count);
if (vk_device_info.pQueueCreateInfos == NULL) {
return -1;
}
for (uint32_t i = 0; i < create_info.queue_info_count; i++) {
// Cast because the pointer is declared as const in the VK struct
vk_queue_info = (void*)vk_device_info.pQueueCreateInfos + i;
res = vtest_block_read(renderer.in_fd, &queue_info, sizeof(queue_info));
CHECK_IO_RESULT(res, sizeof(queue_info));
vk_queue_info->sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
vk_queue_info->pNext = NULL;
vk_queue_info->flags = queue_info.flags;
vk_queue_info->queueFamilyIndex = queue_info.queue_family_index;
vk_queue_info->queueCount = queue_info.queue_count;
vk_queue_info->pQueuePriorities = alloca(sizeof(float) * queue_info.queue_count);
res = vtest_block_read(renderer.in_fd, (float*)vk_queue_info->pQueuePriorities,
sizeof(float) * queue_info.queue_count);
CHECK_IO_RESULT(res, sizeof(float) * queue_info.queue_count);
}
res = virgl_vk_create_device(create_info.physical_device_id,
&vk_device_info,
&result.result);
if (0 > res) {
return res;
}
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
return 0;
}
int
vtest_vk_destroy_device(UNUSED uint32_t length_dw)
{
struct vtest_payload_destroy_device payload;
struct vtest_result result = { 0 };
int res;
res = vtest_block_read(renderer.in_fd, &payload, sizeof(payload));
CHECK_IO_RESULT(res, (int)sizeof(payload));
result.result = virgl_vk_destroy_device(payload.device_handle);
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
return 0;
}
int
vtest_vk_destroy_object(UNUSED uint32_t length_dw)
{
struct vtest_payload_destroy_object payload;
struct vtest_result result = { 0 };
int res;
res = vtest_block_read(renderer.in_fd, &payload, sizeof(payload));
CHECK_IO_RESULT(res, (int)sizeof(payload));
result.result = virgl_vk_destroy_object(payload.device_handle,
payload.object_handle);
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
return 0;
}
int vtest_vk_enumerate_devices(UNUSED uint32_t length_dw)
{
uint32_t device_count;
struct vtest_result result = { 0 };
int res;
res = virgl_vk_get_device_count(&device_count);
if (0 > res) {
return res;
}
result.result = device_count;
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
return 0;
}
int vtest_vk_get_device_memory_properties(UNUSED uint32_t length_dw)
{
struct vtest_payload_device_get payload;
struct vtest_result result = { 0 };
int res;
VkPhysicalDeviceMemoryProperties properties;
res = vtest_block_read(renderer.in_fd, &payload, sizeof(payload));
CHECK_IO_RESULT(res, sizeof(payload));
res = virgl_vk_get_memory_properties(payload.device_id, &properties);
if (0 > res) {
return res;
}
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
res = vtest_block_write(renderer.out_fd, &properties, sizeof(properties));
CHECK_IO_RESULT(res, sizeof(properties));
return 0;
}
int vtest_vk_get_queue_family_properties(UNUSED uint32_t length_dw)
{
struct vtest_payload_device_get payload;
struct vtest_result result = { 0 };
int res;
uint32_t family_count;
VkQueueFamilyProperties *properties = NULL;
res = vtest_block_read(renderer.in_fd, &payload, sizeof(payload));
CHECK_IO_RESULT(res, sizeof(payload));
res = virgl_vk_get_queue_family_properties(payload.device_id,
&family_count,
&properties);
if (0 > res) {
return res;
}
result.result = family_count;
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
res = vtest_block_write(renderer.out_fd,
properties,
sizeof(*properties) * family_count);
CHECK_IO_RESULT(res, sizeof(*properties));
return 0;
}
int vtest_vk_get_sparse_properties(UNUSED uint32_t length_dw)
{
struct vtest_payload_device_get payload;
VkPhysicalDeviceSparseProperties sparse_props;
struct vtest_result result = { 0 };
int res;
res = vtest_block_read(renderer.in_fd, &payload, sizeof(payload));
CHECK_IO_RESULT(res, sizeof(payload));
res = virgl_vk_get_sparse_properties(payload.device_id, &sparse_props);
if (0 > res) {
return res;
}
result.result = payload.device_id;
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
res = vtest_block_write(renderer.out_fd, &sparse_props, sizeof(sparse_props));
CHECK_IO_RESULT(res, sizeof(sparse_props));
return 0;
}
int vtest_vk_read_memory(UNUSED uint32_t length_dw)
{
uint8_t cached;
struct vtest_result result = { 0 };
struct vtest_payload_rw_memory info;
void *data = NULL;
int res;
res = vtest_block_read(renderer.in_fd, &info, sizeof(info));
CHECK_IO_RESULT(res, sizeof(info));
res = virgl_vk_map_memory(info.device_handle,
info.memory_handle,
info.offset,
info.size,
&data);
if (0 > res) {
return res;
}
do {
res = virgl_vk_is_memory_cached(info.device_handle, info.memory_handle, &cached);
if (0 > res) {
break;
}
if (cached) {
res = virgl_vk_invalidate_memory(info.device_handle, info.memory_handle);
if (0 > res) {
break;
}
}
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
if (res < (int)sizeof(result)) {
break;
}
res = vtest_block_write(renderer.out_fd, data, info.size);
if (res != (int)info.size) {
break;
}
res = 0;
} while (0);
if (virgl_vk_unmap_memory(info.device_handle, info.memory_handle) < 0) {
fprintf(stderr, "%s: unmap failed\n", __func__);
}
return res;
}
int vtest_vk_write_memory(UNUSED uint32_t length_dw)
{
int res = 0;
uint8_t cached;
struct vtest_result result = { 0 };
struct vtest_payload_rw_memory info;
void *data = NULL;
res = vtest_block_read(renderer.in_fd, &info, sizeof(info));
CHECK_IO_RESULT(res, sizeof(info));
res = virgl_vk_map_memory(info.device_handle,
info.memory_handle,
info.offset,
info.size,
&data);
if (0 > res) {
return res;
}
do {
res = virgl_vk_is_memory_cached(info.device_handle, info.memory_handle, &cached);
if (0 > res) {
break;
}
res = vtest_block_read(renderer.out_fd, data, info.size);
if (0 > res) {
break;
}
if (cached) {
res = virgl_vk_flush_memory(info.device_handle, info.memory_handle);
if (0 > res) {
virgl_vk_unmap_memory(info.device_handle, info.memory_handle);
break;
}
}
} while (0);
if (virgl_vk_unmap_memory(info.device_handle, info.memory_handle) < 0) {
fprintf(stderr, "%s: unmap failed\n", __func__);
}
result.error_code = res;
res = vtest_block_write(renderer.out_fd, &result, sizeof(result));
CHECK_IO_RESULT(res, sizeof(result));
return 0;
}
|
Keenuts/virglrenderer
|
src/virglrenderer_vulkan.h
|
<filename>src/virglrenderer_vulkan.h
#ifndef VIRGLRENDERER_VULKAN_H
#define VIRGLRENDERER_VULKAN_H
#include "virglrenderer.h"
#include <vulkan/vulkan.h>
VIRGL_EXPORT int
virgl_vk_get_device_count(uint32_t *device_count);
VIRGL_EXPORT int
virgl_vk_get_sparse_properties(uint32_t device_handle,
VkPhysicalDeviceSparseProperties *sparse_props);
VIRGL_EXPORT int
virgl_vk_get_memory_properties(uint32_t device_id, VkPhysicalDeviceMemoryProperties *out);
VIRGL_EXPORT int
virgl_vk_get_queue_family_properties(uint32_t device_handle,
uint32_t *family_count,
VkQueueFamilyProperties **props);
VIRGL_EXPORT int
virgl_vk_create_device(uint32_t phys_device_id,
const VkDeviceCreateInfo *info,
uint32_t *device_handle);
VIRGL_EXPORT int
virgl_vk_allocate_descriptor_set(uint32_t device_handle,
uint32_t pool_id,
uint32_t descriptor_count,
uint32_t *desc_layout_ids,
uint32_t *handles);
VIRGL_EXPORT int
virgl_vk_create_descriptor_pool(uint32_t device_handle,
const VkDescriptorPoolCreateInfo *vk_info,
uint32_t *handle);
VIRGL_EXPORT int
virgl_vk_create_descriptor_set_layout(uint32_t device_handle,
VkDescriptorSetLayoutCreateInfo *info,
uint32_t *handle);
VIRGL_EXPORT int
virgl_vk_create_shader_module(uint32_t device_handle,
const VkShaderModuleCreateInfo *info,
uint32_t *handle);
VIRGL_EXPORT int
virgl_vk_create_pipeline_layout(uint32_t device_handle,
VkPipelineLayoutCreateInfo *info,
uint32_t *set_handles,
uint32_t *handle);
VIRGL_EXPORT int
virgl_vk_create_compute_pipelines(uint32_t device_handle,
VkComputePipelineCreateInfo *info,
uint32_t layout_handle,
uint32_t module_handle,
uint32_t *handle);
VIRGL_EXPORT int
virgl_vk_allocate_memory(uint32_t device_handle,
const VkMemoryAllocateInfo *info,
uint32_t *output);
VIRGL_EXPORT int
virgl_vk_create_buffer(uint32_t device_handle,
VkBufferCreateInfo *info,
uint32_t *handle);
VIRGL_EXPORT int
virgl_vk_bind_buffer_memory(uint32_t device_handle,
uint32_t buffer_handle,
uint32_t memory_handle,
uint64_t offset);
VIRGL_EXPORT int
virgl_vk_write_descriptor_set(uint32_t device_handle,
VkWriteDescriptorSet *write_info,
VkDescriptorBufferInfo *buffer_info,
uint32_t descriptor_handle,
uint32_t *buffer_handles);
VIRGL_EXPORT int
virgl_vk_is_memory_cached(uint32_t device_handle,
uint32_t memory_handle,
uint8_t *output);
VIRGL_EXPORT int
virgl_vk_invalidate_memory(uint32_t device_handle,
uint32_t memory_handle);
VIRGL_EXPORT int
virgl_vk_flush_memory(uint32_t device_handle,
uint32_t memory_handle);
VIRGL_EXPORT int
virgl_vk_map_memory(uint32_t device_handle,
uint32_t memory_handle,
uint32_t offset,
uint32_t size,
void **ptr);
VIRGL_EXPORT int
virgl_vk_unmap_memory(uint32_t device_handle,
uint32_t memory_handle);
VIRGL_EXPORT int
virgl_vk_create_command_pool(uint32_t device_handle,
const VkCommandPoolCreateInfo *info,
uint32_t *handle);
VIRGL_EXPORT int
virgl_vk_allocate_command_buffers(uint32_t device_handle,
uint32_t pool_handle,
VkCommandBufferAllocateInfo *info,
uint32_t *handles);
struct virgl_vk_record_info {
uint32_t cmd_handle;
uint32_t pool_handle;
uint32_t pipeline_handle;
uint32_t pipeline_layout_handle;
uint32_t bind_point;
uint32_t descriptor_count;
uint32_t dispatch_size[3];
uint32_t *descriptor_handles;
};
VIRGL_EXPORT int
virgl_vk_record_command(uint32_t device_handle,
const struct virgl_vk_record_info *info);
VIRGL_EXPORT int
virgl_vk_create_fence(uint32_t device_handle,
VkFenceCreateInfo *info,
uint32_t *handle);
VIRGL_EXPORT int
virgl_vk_wait_for_fences(uint32_t device_handle,
uint32_t fence_count,
uint32_t *handles,
uint32_t wait_all,
uint64_t timeout);
VIRGL_EXPORT int
virgl_vk_destroy_object(uint32_t device_handle,
uint32_t object_handle);
VIRGL_EXPORT int
virgl_vk_destroy_device(uint32_t device_handle);
struct virgl_vk_submit_info {
uint32_t device_handle;
uint32_t queue_handle;
uint32_t fence_handle;
uint32_t wait_count;
uint32_t cmd_count;
uint32_t signal_count;
uint32_t *wait_handles;
uint32_t *wait_stage_masks;
uint32_t *cmd_handles;
uint32_t *pool_handles;
uint32_t *signal_handles;
};
VIRGL_EXPORT int
virgl_vk_queue_submit(const struct virgl_vk_submit_info *info);
#endif
|
Keenuts/virglrenderer
|
src/vk_device.c
|
#include "virgl_vk.h"
int toto()
{
}
|
Keenuts/virglrenderer
|
vtest/vtest_protocol.h
|
/**************************************************************************
*
* Copyright (C) 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef VTEST_PROTOCOL
#define VTEST_PROTOCOL
#include <vulkan/vulkan.h>
#define VTEST_DEFAULT_SOCKET_NAME "/tmp/.virgl_test"
/* 32-bit length field */
/* 32-bit cmd field */
#define VTEST_HDR_SIZE 2
#define VTEST_CMD_LEN 0 /* length of data */
#define VTEST_CMD_ID 1
#define VTEST_CMD_DATA_START 2
/* vtest cmds */
#define VCMD_GET_CAPS 1
#define VCMD_RESOURCE_CREATE 2
#define VCMD_RESOURCE_UNREF 3
#define VCMD_TRANSFER_GET 4
#define VCMD_TRANSFER_PUT 5
#define VCMD_SUBMIT_CMD 6
#define VCMD_RESOURCE_BUSY_WAIT 7
/* pass the process cmd line for debugging */
#define VCMD_CREATE_RENDERER 8
/* get caps */
/* 0 length cmd */
/* resp VCMD_GET_CAPS + caps */
#define VCMD_VK_ALLOCATE_COMMAND_BUFFERS 9
#define VCMD_VK_ALLOCATE_DESCRIPTORS 10
#define VCMD_VK_ALLOCATE_MEMORY 11
#define VCMD_VK_BIND_BUFFER_MEMORY 12
#define VCMD_VK_CREATE_BUFFER 13
#define VCMD_VK_CREATE_COMMAND_POOL 14
#define VCMD_VK_CREATE_COMPUTE_PIPELINES 15
#define VCMD_VK_CREATE_DESCRIPTOR_POOL 16
#define VCMD_VK_CREATE_DESCRIPTOR_SET_LAYOUT 17
#define VCMD_VK_CREATE_DEVICE 18
#define VCMD_VK_CREATE_FENCE 19
#define VCMD_VK_CREATE_PIPELINE_LAYOUT 20
#define VCMD_VK_CREATE_SHADER_MODULE 21
#define VCMD_VK_ENUMERATE_PHYSICAL_DEVICES 22
#define VCMD_VK_GET_DEVICE_MEMORY 23
#define VCMD_VK_GET_PHYSICAL_DEVICE_SPARCE_PROPERTIES 24
#define VCMD_VK_GET_QUEUE_FAMILY_PROPS 25
#define VCMD_VK_QUEUE_SUBMIT 26
#define VCMD_VK_READ_MEMORY 27
#define VCMD_VK_RECORD_COMMAND 28
#define VCMD_VK_WAIT_FOR_FENCES 29
#define VCMD_VK_WRITE_DESCRIPTOR_SET 30
#define VCMD_VK_WRITE_MEMORY 31
#define VCMD_VK_DESTROY_OBJECT 32
#define VCMD_VK_DESTROY_DEVICE 33
#define VCMD_COMMAND_COUNT 33
#define VCMD_RES_CREATE_SIZE 10
#define VCMD_RES_CREATE_RES_HANDLE 0
#define VCMD_RES_CREATE_TARGET 1
#define VCMD_RES_CREATE_FORMAT 2
#define VCMD_RES_CREATE_BIND 3
#define VCMD_RES_CREATE_WIDTH 4
#define VCMD_RES_CREATE_HEIGHT 5
#define VCMD_RES_CREATE_DEPTH 6
#define VCMD_RES_CREATE_ARRAY_SIZE 7
#define VCMD_RES_CREATE_LAST_LEVEL 8
#define VCMD_RES_CREATE_NR_SAMPLES 9
#define VCMD_RES_UNREF_SIZE 1
#define VCMD_RES_UNREF_RES_HANDLE 0
#define VCMD_TRANSFER_HDR_SIZE 11
#define VCMD_TRANSFER_RES_HANDLE 0
#define VCMD_TRANSFER_LEVEL 1
#define VCMD_TRANSFER_STRIDE 2
#define VCMD_TRANSFER_LAYER_STRIDE 3
#define VCMD_TRANSFER_X 4
#define VCMD_TRANSFER_Y 5
#define VCMD_TRANSFER_Z 6
#define VCMD_TRANSFER_WIDTH 7
#define VCMD_TRANSFER_HEIGHT 8
#define VCMD_TRANSFER_DEPTH 9
#define VCMD_TRANSFER_DATA_SIZE 10
#define VCMD_BUSY_WAIT_FLAG_WAIT 1
#define VCMD_BUSY_WAIT_SIZE 2
#define VCMD_BUSY_WAIT_HANDLE 0
#define VCMD_BUSY_WAIT_FLAGS 1
struct vtest_hdr {
union {
uint32_t raw[2];
struct {
uint32_t length;
uint32_t id;
};
};
};
struct vtest_result {
uint32_t error_code;
uint32_t result;
};
struct vtest_payload_device_get {
uint32_t device_id;
};
struct vtest_payload_queue_create {
VkDeviceQueueCreateFlags flags;
uint32_t queue_family_index;
uint32_t queue_count;
/* float priorities[]; */
};
struct vtest_payload_device_create {
uint32_t physical_device_id;
VkDeviceCreateFlags flags;
VkPhysicalDeviceFeatures features;
uint32_t queue_info_count;
};
struct vtest_payload_destroy_device {
uint32_t device_handle;
};
struct vtest_payload_destroy_object {
uint32_t device_handle;
uint32_t object_handle;
};
struct vtest_payload_rw_memory {
uint32_t device_handle;
uint32_t memory_handle;
uint64_t offset;
uint64_t size;
};
#endif
|
Keenuts/virglrenderer
|
vtest/vtest_vk_commands.h
|
<gh_stars>1-10
#ifndef VTEST_VK_COMMANDS
#define VTEST_VK_COMMANDS
struct payload_command_pool_create_info {
uint32_t device_handle;
uint32_t flags;
uint32_t queue_family_index;
};
struct payload_command_buffer_allocate_info {
uint32_t device_handle;
uint32_t pool_handle;
uint32_t level;
uint32_t count;
};
struct payload_command_record_info {
uint32_t device_handle;
uint32_t cmd_handle;
uint32_t pool_handle;
uint32_t pipeline_handle;
uint32_t pipeline_layout_handle;
uint32_t bind_point;
uint32_t descriptor_count;
uint32_t dispatch_size[3];
};
int vtest_vk_create_command_pool(uint32_t length_dw);
int vtest_vk_allocate_command_buffers(uint32_t length_dw);
int vtest_vk_record_command(uint32_t length_dw);
#endif
|
Keenuts/virglrenderer
|
src/virglrenderer_vulkan.c
|
<reponame>Keenuts/virglrenderer
#include <assert.h>
#include <dlfcn.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <vulkan/vulkan.h>
#include "util/u_hash_table.h"
#include "util/u_pointer.h"
#include "util/u_memory.h"
#include "vrend_vk.h"
#include "virglrenderer_vulkan.h"
/* functions used in the device's hashtable */
/* reusing the same function as vrend does.*/
static unsigned hash_func(void *key)
{
intptr_t ip = pointer_to_intptr(key);
return (unsigned)(ip & 0xffffffff);
}
static int vkobj_compare(void *a, void *b)
{
if (a < b) {
return -1;
} else if (a > b) {
return 1;
}
return 0;
}
static void vkobj_free(void *handle)
{
struct vk_object *obj = handle;
if (NULL != obj->cleanup_callback) {
obj->cleanup_callback(obj->vk_device, obj->vk_handle->content, NULL);
}
free(obj);
}
/* helper functions */
/* Get a physical device from a VGL-HANDLE */
static VkPhysicalDevice
get_physical_device_from_handle(uint32_t handle)
{
if (handle >= vulkan_state->physical_device_count) {
return VK_NULL_HANDLE;
}
return vulkan_state->physical_devices[handle];
}
/* Get a logical device from a VGL-HANDLE */
static struct vk_device*
get_device_from_handle(uint32_t handle)
{
struct vk_device *it = NULL;
LIST_FOR_EACH_ENTRY(it, &vulkan_state->devices->list, list) {
if (handle == 0)
break;
handle--;
}
return it;
}
/* Insert a vgl object into a logical device. Generates the VGL-HANDLE */
static uint32_t
device_insert_object(struct vk_device *dev, void *vk_handle, void *callback)
{
struct vk_object *object = NULL;
object = CALLOC_STRUCT(vk_object);
if (NULL == object) {
return 0;
}
object->vk_device = dev->handle;
object->vk_handle = vk_handle;
object->handle = dev->next_handle;
object->cleanup_callback = callback;
dev->next_handle += 1;
util_hash_table_set(dev->objects, intptr_to_pointer(object->handle), object);
return object->handle;
}
/* get an vgl-object from the hashtable using a VGL-HANDLE */
static void*
device_get_object(struct vk_device *dev, uint32_t handle)
{
struct vk_object *object = NULL;
object = util_hash_table_get(dev->objects, intptr_to_pointer(handle));
if (NULL == object) {
return NULL;
}
return object->vk_handle;
}
/* removes an object from the hashtable */
/* cleanup is done by the hashtable */
static void
device_remove_object(struct vk_device *dev, uint32_t handle)
{
util_hash_table_remove(dev->objects, &handle);
}
/* Helper to create and insert a simple object
* object creation has to follow the protptype:
* VkResult create_func(VkDevice, const VkXXXInfo *, const VkAlloc...);
*
* The object is inserted and the handle generated on success
*/
typedef VkResult (*PFN_vkCreateFunction)(VkDevice,
const void*,
const VkAllocationCallbacks*,
void*);
typedef void (*PFN_vkDestroyFunction)(VkDevice, void*, const VkAllocationCallbacks*);
static int create_simple_object(uint32_t device_id,
const void* create_info,
PFN_vkCreateFunction create_func,
PFN_vkDestroyFunction destroy_func,
size_t vk_handle_size,
uint32_t *handle)
{
struct vk_device *device = NULL;
struct vk_handle *vk_handle = NULL;
VkResult res;
device = get_device_from_handle(device_id);
if (NULL == device) {
return -1;
}
vk_handle = CALLOC(1, vk_handle_size);
if (NULL == vk_handle) {
return -2;
}
res = create_func(device->handle, create_info, NULL, &vk_handle->content);
if (VK_SUCCESS != res) {
fprintf(stderr, "vk_call_failed %s\n", vkresult_to_string(res));
free(vk_handle);
return -3;
}
*handle = device_insert_object(device, vk_handle, destroy_func);
if (0 == *handle) {
destroy_func(device->handle, vk_handle->content, NULL);
free(vk_handle);
return -4;
}
return 0;
}
static int
device_initialize_queues(uint32_t phys_device_handle,
VkDevice vk_device,
const VkDeviceCreateInfo *info,
struct vk_device *device)
{
const VkDeviceQueueCreateInfo *queue_info;
device->queue_count = 0;
for (uint32_t i = 0; i < info->queueCreateInfoCount; i++) {
device->queue_count += info->pQueueCreateInfos[i].queueCount;
}
device->queues = CALLOC(device->queue_count, sizeof(VkQueue));
if (device->queues == NULL) {
return -1;
}
uint32_t id = 0;
for (uint32_t i = 0; i < info->queueCreateInfoCount; i++) {
queue_info = info->pQueueCreateInfos + i;
for (uint32_t j = 0; j < queue_info->queueCount; j++) {
vkGetDeviceQueue(vk_device,
queue_info->queueFamilyIndex,
j,
device->queues + id);
id += 1;
}
}
return 0;
};
static int
device_initialize_command_pools(struct vk_device *device)
{
return 0;
}
static int
initialize_vk_device(uint32_t phys_device_handle,
VkDevice dev,
const VkDeviceCreateInfo *info,
uint32_t *device_id)
{
struct vk_device *device;
device = CALLOC_STRUCT(vk_device);
if (device == NULL) {
return -1;
}
*device_id = vulkan_state->device_count;
vulkan_state->device_count++;
LIST_INITHEAD(&device->list);
device->handle = dev;
device->physical_device_id = phys_device_handle;
/* initializing device queues */
if (0 != device_initialize_queues(phys_device_handle, dev, info, device)) {
free(device);
return -2;
}
if (0 != device_initialize_command_pools(device)) {
free(device);
return -3;
}
/* creating device resource maps */
device->next_handle = 1;
device->objects = util_hash_table_create(hash_func,
vkobj_compare,
vkobj_free);
/* registering device */
LIST_ADDTAIL(&vulkan_state->devices->list, &device->list);
return 0;
}
int virgl_vk_get_device_count(uint32_t *device_count)
{
*device_count = vulkan_state->physical_device_count;
return 0;
}
int virgl_vk_get_sparse_properties(uint32_t device_id,
VkPhysicalDeviceSparseProperties *sparse_props)
{
VkPhysicalDeviceProperties props;
if (device_id >= vulkan_state->physical_device_count) {
return -1;
}
vkGetPhysicalDeviceProperties(vulkan_state->physical_devices[device_id], &props);
memcpy(sparse_props, &props.sparseProperties, sizeof(*sparse_props));
return 0;
}
int virgl_vk_get_memory_properties(uint32_t phys_device_handle,
VkPhysicalDeviceMemoryProperties *props)
{
if (phys_device_handle >= vulkan_state->physical_device_count) {
return -1;
}
memset(props, 0, sizeof(*props));
vkGetPhysicalDeviceMemoryProperties(vulkan_state->physical_devices[phys_device_handle],
props);
return 0;
}
int virgl_vk_get_queue_family_properties(uint32_t device_id,
uint32_t *family_count,
VkQueueFamilyProperties **props)
{
VkPhysicalDevice dev;
dev = get_physical_device_from_handle(device_id);
if (dev == VK_NULL_HANDLE) {
return -1;
}
vkGetPhysicalDeviceQueueFamilyProperties(dev, family_count, NULL);
*props = CALLOC(*family_count, sizeof(VkQueueFamilyProperties));
if (*props == NULL) {
*family_count = 0;
return -1;
}
vkGetPhysicalDeviceQueueFamilyProperties(dev, family_count, *props);
return 0;
}
int virgl_vk_create_device(uint32_t phys_device_id,
const VkDeviceCreateInfo *info,
uint32_t *device_id)
{
VkDevice dev;
VkResult res;
VkPhysicalDevice physical_dev;
physical_dev = get_physical_device_from_handle(phys_device_id);
if (physical_dev == VK_NULL_HANDLE) {
return -1;
}
res = vkCreateDevice(physical_dev, info, NULL, &dev);
if (res != VK_SUCCESS) {
return -2;
}
if (initialize_vk_device(phys_device_id, dev, info, device_id) < 0) {
vkDestroyDevice(dev, NULL);
return -3;
}
return 0;
}
int virgl_vk_allocate_descriptor_set(uint32_t device_handle,
uint32_t pool_handle,
uint32_t descriptor_count,
uint32_t *desc_layout_ids,
uint32_t *handles)
{
vk_device_t *device = NULL;
vk_descriptor_pool_t *pool = NULL;
vk_descriptor_set_layout_t *layout = NULL;
vk_descriptor_set_t *sets= NULL;
VkDescriptorSet *vk_sets = NULL;
VkDescriptorSetLayout *vk_layouts = NULL;
VkDescriptorSetAllocateInfo vk_info;
VkResult res;
device = get_device_from_handle(device_handle);
pool = device_get_object(device, pool_handle);
if (NULL == device || NULL == pool) {
return -1;
}
vk_layouts = alloca(sizeof(*vk_layouts) * descriptor_count);
vk_sets = alloca(sizeof(*vk_sets) * descriptor_count);
sets = CALLOC(descriptor_count, sizeof(*sets));
if (NULL == sets) {
free(sets);
return -2;
}
for (uint32_t i = 0; i < descriptor_count; i++) {
layout = device_get_object(device, desc_layout_ids[i]);
if (NULL != layout) {
vk_layouts[i] = layout->handle;
continue;
}
free(sets);
return -3;
}
vk_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
vk_info.pNext = NULL;
vk_info.descriptorPool = pool->handle;
vk_info.descriptorSetCount = descriptor_count;
vk_info.pSetLayouts = vk_layouts;
res = vkAllocateDescriptorSets(device->handle, &vk_info, vk_sets);
if (VK_SUCCESS != res) {
free(sets);
return -4;
}
for (uint32_t i = 0; i < descriptor_count; i++) {
sets[i].handle = vk_sets[i];
handles[i] = device_insert_object(device, sets + i, NULL);
if (0 != handles[i]) {
/* success path */
continue;
}
/* If an error occured, clean-up old handles, and exit */
for (uint32_t j = i; j > 0; j++) {
vkFreeDescriptorSets(device->handle, pool->handle, 1, &sets[i].handle);
device_remove_object(device, handles[j - 1]);
}
return -5;
}
return 0;
}
int virgl_vk_create_descriptor_pool(uint32_t device_handle,
const VkDescriptorPoolCreateInfo *info,
uint32_t *handle)
{
int res = create_simple_object(device_handle,
info,
(PFN_vkCreateFunction)vkCreateDescriptorPool,
(PFN_vkDestroyFunction)vkDestroyDescriptorPool,
sizeof(vk_descriptor_pool_t),
handle);
return res;
}
int virgl_vk_create_descriptor_set_layout(uint32_t device_handle,
VkDescriptorSetLayoutCreateInfo *info,
uint32_t *handle)
{
int res = create_simple_object(device_handle,
info,
(PFN_vkCreateFunction)vkCreateDescriptorSetLayout,
(PFN_vkDestroyFunction)vkDestroyDescriptorSetLayout,
sizeof(vk_descriptor_set_layout_t),
handle);
return res;
}
int virgl_vk_create_shader_module(uint32_t device_handle,
const VkShaderModuleCreateInfo *info,
uint32_t *handle)
{
int res = create_simple_object(device_handle,
info,
(PFN_vkCreateFunction)vkCreateShaderModule,
(PFN_vkDestroyFunction)vkDestroyShaderModule,
sizeof(vk_shader_module_t),
handle);
return res;
}
int virgl_vk_create_pipeline_layout(uint32_t device_handle,
VkPipelineLayoutCreateInfo *info,
uint32_t *set_handles,
uint32_t *handle)
{
VkDescriptorSetLayout *vk_layouts = NULL;
vk_descriptor_set_layout_t *layout = NULL;
struct vk_device *device = NULL;
device = get_device_from_handle(device_handle);
if (NULL == device) {
return -1;
}
vk_layouts = alloca(sizeof(*vk_layouts) * info->setLayoutCount);
for (uint32_t i = 0; i < info->setLayoutCount; i++) {
layout = device_get_object(device, set_handles[i]);
if (NULL == layout) {
return -1;
}
vk_layouts[i] = layout->handle;
}
info->pSetLayouts = vk_layouts;
int res = create_simple_object(device_handle,
info,
(PFN_vkCreateFunction)vkCreatePipelineLayout,
(PFN_vkDestroyFunction)vkDestroyPipelineLayout,
sizeof(vk_pipeline_layout_t),
handle);
return res;
}
int virgl_vk_create_compute_pipelines(uint32_t device_handle,
VkComputePipelineCreateInfo *info,
uint32_t layout_handle,
uint32_t module_handle,
uint32_t *handle)
{
struct vk_device *device = NULL;
vk_pipeline_layout_t *pipeline_layout = NULL;
vk_shader_module_t *shader_module = NULL;
vk_pipeline_t *pipeline = NULL;
VkResult res;
device = get_device_from_handle(device_handle);
if (NULL == device) {
return -1;
}
pipeline_layout = device_get_object(device, layout_handle);
shader_module = device_get_object(device, module_handle);
if (NULL == pipeline_layout || NULL == shader_module) {
return -1;
}
pipeline = CALLOC(1, sizeof(*pipeline));
if (NULL == pipeline) {
return -2;
}
info->layout = pipeline_layout->handle;
info->stage.module = shader_module->handle;
res = vkCreateComputePipelines(device->handle,
VK_NULL_HANDLE,
1,
info,
NULL,
&pipeline->handle);
if (VK_SUCCESS != res) {
free(pipeline);
return -3;
}
*handle = device_insert_object(device, pipeline, vkDestroyPipeline);
if (0 == *handle) {
vkDestroyPipeline(device->handle, pipeline->handle, NULL);
free(pipeline);
}
return 0;
}
int virgl_vk_allocate_memory(uint32_t device_handle,
const VkMemoryAllocateInfo *info,
uint32_t *output)
{
int res;
vk_device_t *device;
vk_device_memory_t *memory = NULL;
VkPhysicalDeviceMemoryProperties props;
device = get_device_from_handle(device_handle);
if (NULL == device) {
return -1;
}
res = virgl_vk_get_memory_properties(device->physical_device_id, &props);
if (0 > res) {
return res;
}
res = create_simple_object(device_handle,
info,
(PFN_vkCreateFunction)vkAllocateMemory,
(PFN_vkDestroyFunction)vkFreeMemory,
sizeof(vk_device_memory_t),
output);
if (0 > res) {
return res;
}
memory = device_get_object(device, *output);
if (unlikely(NULL == memory)) {
return -1;
}
memory->flags = props.memoryTypes[info->memoryTypeIndex].propertyFlags;
return res;
}
int virgl_vk_create_buffer(uint32_t device_handle,
VkBufferCreateInfo *info,
uint32_t *handle)
{
int res = create_simple_object(device_handle,
info,
(PFN_vkCreateFunction)vkCreateBuffer,
(PFN_vkDestroyFunction)vkDestroyBuffer,
sizeof(vk_buffer_t),
handle);
return res;
}
int virgl_vk_bind_buffer_memory(uint32_t device_handle,
uint32_t buffer_handle,
uint32_t memory_handle,
uint64_t offset)
{
VkResult res;
struct vk_device *device = NULL;
vk_buffer_t *buffer = NULL;
vk_device_memory_t *memory = NULL;
device = get_device_from_handle(device_handle);
if (NULL == device) {
return -1;
}
buffer = device_get_object(device, buffer_handle);
memory = device_get_object(device, memory_handle);
if (NULL == buffer || NULL == memory) {
return -2;
}
res = vkBindBufferMemory(device->handle, buffer->handle, memory->handle, offset);
if (VK_SUCCESS != res) {
return -3;
}
return 0;
}
int virgl_vk_write_descriptor_set(uint32_t device_handle,
VkWriteDescriptorSet *write_info,
VkDescriptorBufferInfo *buffer_info,
uint32_t descriptor_handle,
uint32_t *buffer_handles)
{
struct vk_device *device = NULL;
vk_descriptor_set_t *set = NULL;
vk_buffer_t *buffer = NULL;
device = get_device_from_handle(device_handle);
if (NULL == device) {
return -1;
}
set = device_get_object(device, descriptor_handle);
if (NULL == set) {
return -2;
}
for (uint32_t i = 0; i < write_info->descriptorCount; i++) {
buffer = device_get_object(device, buffer_handles[i]);
if (NULL == buffer) {
return -2;
}
buffer_info[i].buffer = buffer->handle;
}
write_info->dstSet = set->handle;
write_info->pBufferInfo = buffer_info;
vkUpdateDescriptorSets(device->handle, 1, write_info, 0, NULL);
return 0;
}
int virgl_vk_is_memory_cached(uint32_t device_handle,
uint32_t memory_handle,
uint8_t *output)
{
vk_device_t *device = NULL;
vk_device_memory_t *memory = NULL;
device = get_device_from_handle(device_handle);
if (NULL == device) {
return -1;
}
memory = device_get_object(device, memory_handle);
if (NULL == memory) {
return -2;
}
*output = (memory->flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0;
return 0;
}
int virgl_vk_invalidate_memory(uint32_t device_handle,
uint32_t memory_handle)
{
VkMappedMemoryRange range;
VkResult res;
vk_device_t *device = NULL;
vk_device_memory_t *memory = NULL;
device = get_device_from_handle(device_handle);
if (NULL == device) {
return -1;
}
memory = device_get_object(device, memory_handle);
if (NULL == memory || memory->map_ptr == NULL) {
return -2;
}
range.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
range.pNext = NULL;
range.memory = memory->handle;
range.offset = memory->map_offset;
range.size = memory->map_size;
res = vkInvalidateMappedMemoryRanges(device->handle, 1, &range);
if (VK_SUCCESS != res) {
return -3;
}
return 0;
}
int virgl_vk_flush_memory(uint32_t device_handle,
uint32_t memory_handle)
{
VkMappedMemoryRange range;
VkResult res;
vk_device_t *device = NULL;
vk_device_memory_t *memory = NULL;
device = get_device_from_handle(device_handle);
if (NULL == device) {
return -1;
}
memory = device_get_object(device, memory_handle);
if (NULL == memory || memory->map_ptr == NULL) {
return -2;
}
range.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
range.pNext = NULL;
range.memory = memory->handle;
range.offset = memory->map_offset;
range.size = memory->map_size;
res = vkFlushMappedMemoryRanges(device->handle, 1, &range);
if (VK_SUCCESS != res) {
return -3;
}
return 0;
}
int virgl_vk_map_memory(uint32_t device_handle,
uint32_t memory_handle,
uint32_t offset,
uint32_t size,
void **ptr)
{
VkResult res;
vk_device_t *device = NULL;
vk_device_memory_t *memory = NULL;
device = get_device_from_handle(device_handle);
if (NULL == device) {
return -1;
}
memory = device_get_object(device, memory_handle);
if (NULL == memory) {
return -2;
}
res = vkMapMemory(device->handle, memory->handle, offset, size, 0, ptr);
if (VK_SUCCESS != res) {
return -3;
}
memory->map_size = size;
memory->map_offset = offset;
memory->map_ptr = ptr;
return 0;
}
int virgl_vk_unmap_memory(uint32_t device_handle,
uint32_t memory_handle)
{
vk_device_t *device = NULL;
vk_device_memory_t *memory = NULL;
device = get_device_from_handle(device_handle);
if (NULL == device) {
return -1;
}
memory = device_get_object(device, memory_handle);
if (NULL == memory) {
return -2;
}
vkUnmapMemory(device->handle, memory->handle);
memory->map_ptr = NULL;
return 0;
}
int virgl_vk_create_command_pool(uint32_t device_handle,
const VkCommandPoolCreateInfo *info,
uint32_t *handle)
{
VkResult res;
vk_device_t *device = NULL;
vk_command_pool_t *pool = NULL;
device = get_device_from_handle(device_handle);
if (NULL == device) {
return -1;
}
pool = CALLOC(1, sizeof(*pool));
if (NULL == pool) {
return -3;
}
res = vkCreateCommandPool(device->handle,
info,
NULL,
&pool->handle);
if (VK_SUCCESS != res) {
free(pool);
return -4;
}
*handle = device_insert_object(device, pool, vkDestroyCommandPool);
if (0 == *handle) {
free(pool);
return -5;
}
return res;
}
static int
virgl_vk_command_pool_allocate_buffers(vk_device_t *device,
vk_command_pool_t *pool,
VkCommandBufferAllocateInfo *info,
uint32_t *handles)
{
VkResult res;
uint32_t count = info->commandBufferCount;
if (pool->capacity - pool->usage < count) {
// we don't allocate exactly what we need, no particular reason */
pool->cmds = REALLOC(pool->cmds,
sizeof(*pool->cmds) * pool->capacity,
sizeof(*pool->cmds) * (pool->capacity + count));
if (NULL == pool->cmds) {
fprintf(stderr, "cmd pool reallocation failed. good luck.\n");
return -1;
}
pool->capacity += count;
memset(pool->cmds + pool->usage, 0, sizeof(*pool->cmds) * count);
}
res = vkAllocateCommandBuffers(device->handle, info, pool->cmds + pool->usage);
if (VK_SUCCESS != res) {
return -2;
}
for (uint32_t i = 0; i < count; i++) {
/* 0 is an invalid handle */
handles[i] = pool->usage + i + 1;
}
pool->usage += count;
return 0;
}
int virgl_vk_allocate_command_buffers(uint32_t device_handle,
uint32_t pool_handle,
VkCommandBufferAllocateInfo *info,
uint32_t *handles)
{
int res;
vk_device_t *device = NULL;
vk_command_pool_t *pool = NULL;
device = get_device_from_handle(device_handle);
if (NULL == device) {
return -1;
}
pool = device_get_object(device, pool_handle);
if (NULL == pool) {
return -2;
}
info->commandPool = pool->handle;
res = virgl_vk_command_pool_allocate_buffers(device, pool, info, handles);
if (0 > res) {
return -3;
}
return 0;
}
/* NOTE:
* for now, this function only handles compute operations.
*/
int virgl_vk_record_command(uint32_t device_handle,
const struct virgl_vk_record_info *info)
{
vk_device_t *device = NULL;
vk_command_pool_t *pool = NULL;
vk_pipeline_t *pipeline = NULL;
vk_pipeline_layout_t *pipeline_layout = NULL;
vk_descriptor_set_t *set = NULL;
VkCommandBuffer cmd = VK_NULL_HANDLE;
VkDescriptorSet *descriptors = NULL;
VkResult res;
VkCommandBufferBeginInfo begin_info = { 0 };
device = get_device_from_handle(device_handle);
if (NULL == device) {
return -1;
}
pool = device_get_object(device, info->pool_handle);
pipeline = device_get_object(device, info->pipeline_handle);
pipeline_layout = device_get_object(device, info->pipeline_layout_handle);
if (NULL == pipeline || NULL == pipeline_layout || NULL == pool) {
return -2;
}
descriptors = alloca(sizeof(*descriptors) * info->descriptor_count);
for (uint32_t i = 0; i < info->descriptor_count; i++) {
set = device_get_object(device, info->descriptor_handles[i]);
if (NULL == set) {
return -2;
}
descriptors[i] = set->handle;
}
if (pool->usage < info->cmd_handle) {
return -2;
}
/* 0 is an invalid handles. entry = handle - 1 */
cmd = pool->cmds[info->cmd_handle - 1];
if (VK_NULL_HANDLE == cmd) {
return -2;
}
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
//FIXME: get type from guest
begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
res = vkBeginCommandBuffer(cmd, &begin_info);
if (VK_SUCCESS != res) {
return -4;
}
vkCmdBindPipeline(cmd, info->bind_point, pipeline->handle);
vkCmdBindDescriptorSets(cmd,
info->bind_point,
pipeline_layout->handle,
0,
info->descriptor_count,
descriptors,
0,
NULL);
vkCmdDispatch(cmd,
info->dispatch_size[0],
info->dispatch_size[1],
info->dispatch_size[2]);
res = vkEndCommandBuffer(cmd);
if (VK_SUCCESS != res) {
return -4;
}
return 0;
}
int virgl_vk_create_fence(uint32_t device_handle,
VkFenceCreateInfo *info,
uint32_t *handle)
{
int res = create_simple_object(device_handle,
info,
(PFN_vkCreateFunction)vkCreateFence,
(PFN_vkDestroyFunction)vkDestroyFence,
sizeof(vk_fence_t),
handle);
return res;
}
int virgl_vk_wait_for_fences(uint32_t device_handle,
uint32_t fence_count,
uint32_t *handles,
uint32_t wait_all,
uint64_t timeout)
{
vk_device_t *device = NULL;
vk_fence_t *fence = NULL;
VkFence *fences = NULL;
VkResult res;
device = get_device_from_handle(device_handle);
if (NULL == device) {
return -1;
}
fences = alloca(sizeof(*fences) * fence_count);
for (uint32_t i = 0; i < fence_count; i++) {
fence = device_get_object(device, handles[i]);
if (NULL == fence) {
return -2;
}
fences[i] = fence->handle;
}
res = vkWaitForFences(device->handle, fence_count, fences, wait_all, timeout);
return res;
}
int virgl_vk_queue_submit(const struct virgl_vk_submit_info *info)
{
vk_device_t *vk_device = NULL;
vk_fence_t *vk_fence = NULL;
vk_command_pool_t *vk_pool = NULL;
vk_semaphore_t *vk_semaphore = NULL;
VkFence fence = VK_NULL_HANDLE;
VkSubmitInfo vk_info = { 0 };
VkQueue queue;
VkSemaphore *wait_s = NULL;
VkSemaphore *signal_s = NULL;
VkCommandBuffer *cmds = NULL;
VkPipelineStageFlags *flags = NULL;
vk_device = get_device_from_handle(info->device_handle);
if (NULL == vk_device) {
return -1;
}
if (info->queue_handle >= vk_device->queue_count) {
return -2;
}
queue = vk_device->queues[info->queue_handle];
if (info->fence_handle != 0) {
vk_fence = device_get_object(vk_device, info->fence_handle);
if (NULL == vk_fence) {
return -2;
}
fence = vk_fence->handle;
}
vk_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
vk_info.waitSemaphoreCount = info->wait_count;
wait_s = alloca(sizeof(VkSemaphore) * info->wait_count);
flags = alloca(sizeof(VkPipelineStageFlags) * info->wait_count);
for (uint32_t i = 0; i < info->wait_count; i++) {
vk_semaphore = device_get_object(vk_device, info->wait_handles[i]);
if (NULL == vk_semaphore) {
return -2;
}
wait_s[i] = vk_semaphore->handle;
flags[i] = info->wait_stage_masks[i];
}
vk_info.commandBufferCount = info->cmd_count;
cmds = alloca(sizeof(VkCommandBuffer) * info->cmd_count);
for (uint32_t i = 0; i < info->cmd_count; i++) {
vk_pool = device_get_object(vk_device, info->pool_handles[i]);
if (NULL == vk_pool) {
return -2;
}
/* remember: handle 0 is invalid. Thus we need to substract one */
if (0 == info->cmd_handles[i] || info->cmd_handles[i] - 1 >= vk_pool->usage) {
return -2;
}
cmds[i] = vk_pool->cmds[info->cmd_handles[i] - 1];
}
vk_info.signalSemaphoreCount = info->signal_count;
signal_s = alloca(sizeof(VkSemaphore) * info->signal_count);
for (uint32_t i = 0; i < info->signal_count; i++) {
vk_semaphore = device_get_object(vk_device, info->signal_handles[i]);
if (NULL == vk_semaphore) {
return -2;
}
signal_s[i] = vk_semaphore->handle;
}
vk_info.pWaitSemaphores = wait_s;
vk_info.pWaitDstStageMask = flags;
vk_info.pCommandBuffers = cmds;
vk_info.pSignalSemaphores = signal_s;
return vkQueueSubmit(queue, 1, &vk_info, fence);
}
int
virgl_vk_destroy_object(uint32_t device_handle,
uint32_t handle)
{
vk_device_t *vk_device = NULL;
vk_device = get_device_from_handle(device_handle);
if (NULL == vk_device) {
return -1;
}
util_hash_table_remove(vk_device->objects, intptr_to_pointer(handle));
return 0;
}
static void
deinit_device(vk_device_t *device)
{
util_hash_table_destroy(device->objects);
/* About descriptor set layouts:
* This issue is pretty much the same as the commands one.
* A descriptor is linked to a pool.
* Right now, the object allocation mechanism is too rudimentary.
* I didn't took these kind of objects into concideration.
*
* This should be taken into account for the next iteration
*/
fprintf(stderr, "FIXME: descriptor layouts have not been cleaned-up.\n");
}
int
virgl_vk_destroy_device(uint32_t device_handle)
{
vk_device_t *it = NULL;
vk_device_t *storage = NULL;
LIST_FOR_EACH_ENTRY_SAFE(it, storage, &vulkan_state->devices->list, list) {
if (0 == device_handle) {
LIST_DEL(&it->list);
deinit_device(it);
free(it);
return 0;
}
}
return -1;
}
|
Keenuts/virglrenderer
|
vtest/vtest.h
|
/**************************************************************************
*
* Copyright (C) 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef VTEST_H
#define VTEST_H
#define UNUSED_PARAMETER(Param) (void)(Param)
#include <errno.h>
int vtest_create_renderer(int in_fd, int out_fd, uint32_t length);
int vtest_send_caps(uint32_t length_dw);
int vtest_create_resource(uint32_t length_dw);
int vtest_resource_unref(uint32_t length_dw);
int vtest_submit_cmd(uint32_t length_dw);
int vtest_transfer_get(uint32_t length_dw);
int vtest_transfer_put(uint32_t length_dw);
int vtest_block_read(int fd, void *buf, int size);
int vtest_block_write(int fd, void *buf, int size);
int vtest_resource_busy_wait(uint32_t length_dw);
int vtest_renderer_create_fence(uint32_t length_dw);
int vtest_poll(void);
void vtest_destroy_renderer(void);
struct vtest_renderer {
int in_fd;
int out_fd;
};
extern struct vtest_renderer renderer;
#endif
|
jddixon/xlutil_py
|
src/extsrc/cFTLogForPy.h
|
<reponame>jddixon/xlutil_py<filename>src/extsrc/cFTLogForPy.h
/* cFTLogForPy.h */
#ifndef _C_FT_LOG_FOR_PY_H_
#define _C_FT_LOG_FOR_PY_H_
// we need something like -I /usr/include/python2.7 on the command line
// if this isn't first, expect _POSIX_C_SOURCE redefined warnings
#include <Python.h>
/* pthread specs require that this be #included first */
#include <pthread.h>
#include <structmember.h>
#include <errno.h>
#include <ev.h>
#include <fcntl.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h> // calloc and such
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
/* maximum number of open log files */
#define CLOG_MAX_LOG (16)
// how often we write the log to disk, in seconds
#define WRITE_INTERVAL (0.1)
#define LOG_BUFFER_SIZE (4*4096)
#define ACTIVE_BUF (1)
#define FULL_BUF (2)
#define BEING_WRITTEN (3)
#define READY_BUF (4)
#define PADBYTES (1024)
typedef struct _logPage {
unsigned char* data;
uint16_t offset; // to first free byte
uint16_t pageBytes; // K * LOG_PAGE_SIZE
uint16_t flags;
} logBufDesc_t;
/*
*/
#define PATH_SEP '/'
#define MAX_PATH_LEN (256)
#define C_FT_LOG_BUF_COUNT (4)
/*
* This is a data structure allocated for each log in use. The data structure
* must be initialized before use and deallocated on close().
*
* Presumably need to align this
*
*/
typedef struct _c_log_ {
logBufDesc_t logBufDescs[C_FT_LOG_BUF_COUNT];
unsigned char logBuffers[C_FT_LOG_BUF_COUNT * LOG_BUFFER_SIZE];
// GCC insists upon all the parentheses
char logDir [MAX_PATH_LEN+1] __attribute__((aligned(16)));
char logName[MAX_PATH_LEN+1] __attribute__((aligned(16)));
u_int32_t fd __attribute__((aligned(16)));
u_int32_t count; // number of messages written
ev_timer t_watcher; // timed write to disk
// buffer write flags
# define WRITE_PENDING (0x0001)
# define WRITE_IN_PROGRESS (0x0002)
u_int32_t writeFlags;
u_int32_t bufInUse; // which buffer we are using
pthread_mutex_t logBufLock; // = PTHREAD_MUTEX_INITIALIZER;
} cFTLogDesc_t;
// GLOBALS //////////////////////////////////////////////////////////
extern int secondThreadStarted;
extern struct ev_loop* loop;
extern int logNdx; // one less than the number of logs open
extern cFTLogDesc_t* logDescs[CLOG_MAX_LOG];
extern pthread_t writerThread;
// extern int writerReady;
// extern pthread_mutex_t readyLock;
// extern pthread_cond_t readyCond;
// PROTOTYPES ///////////////////////////////////////////////////////
extern void initLogDescs(void);
extern int openLogFile(const char* pathToLog) ;
extern void cLogDealloc(int ndx);
extern int setupLibEvAndCallbacks(int);
extern int initLogBuffers(int);
extern int writerInitThreaded(void);
// MODULE-LEVEL METHODS ////////////////////////////////////
PyObject* init_cft_logger(PyObject* self, PyObject* args);
PyObject* open_cft_log(PyObject* self, PyObject* args);
PyObject* close_cft_logger(PyObject* self, PyObject* args);
PyObject* log_msg(PyObject* self, PyObject* args);
// WRAPPED FUNCTIONS //////////////////////////////////////
int _open_cft_log(const char* pathToLog);
void _log_msg(const int ndx, const char* msg);
#endif /* _C_FT_LOG_FOR_PY_H_ */
|
jddixon/xlutil_py
|
src/extsrc/threading.c
|
<filename>src/extsrc/threading.c<gh_stars>0
/* ~/dev/py/xlutil_py/extsrc/threading.c */
#include "cFTLogForPy.h"
pthread_t writerThread;
static int writerReady = false;
static pthread_mutex_t readyLock;
static pthread_cond_t readyCond;
// local prototypes
static void* startWriter (void * arg);
static int writerInit(void);
// These comments have become scattered in the reorganization of the
// code.
/////////////////////////////////////////////////////////////////////
// INTERFACE TO DISK WRITER
/////////////////////////////////////////////////////////////////////
// Buffer pages are allocated in integral multiples of LOG_BUFFER_SIZE
// bytes. Initially we allocate two log pages and the first is ACTIVE.
// When the main thread needs to write a message it gets the mutex lock
// and copies the message to the active page, so long as it will fit;
// otherwise it marks the page in use as FULL and advances to the next
// available page, marking that page as ACTIVE, and writes the message
// into it. In either case the offset on the page written to is updated.
//
// When the disk writer thread gets control, it gets the lock and checks
// for any full pages (checking cyclically from the page following the
// active page). Any found are marked BEING_WRITTEN, as is the active
// page. The next free page is marked ACTIVE_BUF. The lock is then
// released and those pages marked BEING_WRITTEN are written to disk in
// cyclic order. When the disk write is complete, all pages are marked
// READY and their offsets reset to zero.
//
// If a signal is received, any entries written to the disk buffer should
// be flushed to disk before passing control to any other signal handler
// code.
// static void dumpBufDesc(int n, logBufDesc_t* p) {
// printf("BUFFER %d:\n", n);
// printf(" offset: %5d\n", p->offset);
// printf(" pageBytes: %5d\n", p->pageBytes);
// printf(" flags: %5x\n", p->flags);
// printf(" content: '%s'", p->data);
// }
/////////////////////////////////////////////////////////////////////
// DISK WRITER INITIALIZATION (runs in main thread)
/////////////////////////////////////////////////////////////////////
/**
* XXX INCORRECT DESCRIPTION:
*
* Start a background thread. This opens a named log file in append mode.
* If the log file and/or its containing directory do not exist, it or
* they are created. From the main thread we then allow callers to
* write strings to a buffer. Periodically this gets flushed to disk;
* the write to disk occurs in the second thread. The close() function
* runs in the main thread, synchronizing (joining) with the second
* thread before returning.
*/
/**
* Start the disk writer running in a new thread, then wait for it to
* signal that it has completed initialization.
*/
int writerInitThreaded(void) {
writerReady = false;
pthread_mutex_init (&readyLock, NULL);
pthread_cond_init (&readyCond, NULL);
// readyLock = PTHREAD_MUTEX_INITIALIZER;
// readyCond = PTHREAD_COND_INITIALIZER;
if (pthread_create(&writerThread, NULL, startWriter, NULL) != 0) {
perror("P: Failed to initialize writer thread");
return -1;
}
// now wait for the new thread to say that it's ready
pthread_mutex_lock(&readyLock);
while(! writerReady) {
pthread_cond_wait(&readyCond, &readyLock);
}
pthread_mutex_unlock(&readyLock);
return 0;
}
/////////////////////////////////////////////////////////////////////
// DISK WRITER THREAD ///////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////
// INITIALIZATION CODE ////////////////////////////////////
/**
* This will only return (a) if there is an error or (b) someone stops
* the event loop.
*/
static void* startWriter (void * arg) {
if(writerInit() < 0) {
printf("log writer initialization failed!\n");
}
return NULL;
}
static int writerInit(void) {
/* XXX NEED A SANITY CHECK HERE */
loop = ev_loop_new( EVFLAG_AUTO );
// if(setupLibEvAndCallbacks() < 0) {
// printf("Error starting libevent\n");
// return -1;
// }
// Signal client, writer is ready
pthread_mutex_lock(&readyLock);
writerReady = true;
pthread_cond_signal(&readyCond);
pthread_mutex_unlock(&readyLock);
// LOG(1, ("Writer init complete\n"));
ev_loop(loop, 0); // start the loop; this should never return
return 0;
}
|
jddixon/xlutil_py
|
src/extsrc/modFunc.c
|
<gh_stars>0
/* ~/dev/py/xlutil_py/extfunc/modFunc.c */
#include "cFTLogForPy.h"
PyObject* init_cft_logger(PyObject* self, PyObject* args) {
// INIT GLOBALS
logNdx = -1; // 0-based current index
secondThreadStarted = false; // XXX SHOULD NOT NEED THIS
// init local variables
int status = 0;
initLogDescs(); // clears descriptor table
status = writerInitThreaded();
if (!status)
secondThreadStarted = true;
return Py_BuildValue("i", status);
}
/**
* Open a log file given a path to it. Returns a negative error code
* or a non-negative log index.
*
* NOTE that the name begins with an underscore (_).
*/
int _open_cft_log(const char* pathToLog) {
int status = 0;
logNdx++; // USED in openLogFile
int fd = openLogFile(pathToLog);
if (fd < 0) {
status = -1;
char str[512];
sprintf(str, "openClog: opening log file '%s'", pathToLog);
perror(str);
}
if (!status)
status = initLogBuffers(logNdx);
if(!status)
status = setupLibEvAndCallbacks(logNdx);
if (status < 0) {
logNdx--;
return status;
} else {
return logNdx;
}
}
PyObject* open_cft_log(PyObject* self, PyObject* args) {
char* pathToLog;
if (!PyArg_ParseTuple(args, "s", &pathToLog))
return NULL;
int status = _open_cft_log(pathToLog);
return Py_BuildValue("i", status);
}
PyObject* close_cft_logger(PyObject* self, PyObject* args) {
// we'll just ignore any arguments
int status = 0;
// shut down any watchers ---------------------------------------
int ndx;
for (ndx = 0; ndx <= logNdx; ndx++)
ev_timer_stop(loop, &logDescs[ndx]->t_watcher);
// if write is in progress, wait for it to complete -------------
// LATER: we will just wait two WRITE_INTERVAL; this may or may
// not make sense !
double twoWrites = 4 * WRITE_INTERVAL; // HACK: 4 INSTEAD OF TWO
struct timespec ts;
ts.tv_sec = (int)twoWrites; // integer part; this will be zero
// the next line converts the fractional part of twoWrites to ns
ts.tv_nsec = ( twoWrites - ts.tv_sec ) * 1000000000;
//printf ("sleeping %lds, %ldns\n", ts.tv_sec, ts.tv_nsec); fflush(stdout);
nanosleep(&ts, NULL);
// stop the background event loop -------------------------------
// printf(" DESTROYING event loop\n"); fflush(stdout);
ev_loop_destroy(loop); // we created it, so this is safe
// printf(" ... event loop destroyed\n"); fflush(stdout);
// we get (25% of the time with twoWrites = 3; 10% with 6; 20% with 10)
// (libev) epoll_wait: Bad file descriptor
// and some point after this
// wait for the logger thread to stop ----------------------------
int e = pthread_join(writerThread, NULL); // returns error number
if (e)
perror("join with writer thread");
// printf("JOIN COMPLETE\n"); fflush(stdout);
//===============================================================
// XXX SEGFAULT but empty log file if we return here ============
// If you use gdb python and then run testLogMgr.py you can see
// that the segfault is related to a 'no such file or directory'
// error, which gdb connects to line 4439 in malloc.c
//
// Review of the code shows that the log file is opened in the main
// thread, written in the writerThread, and then flushed and closed
// back in the main thread. It would seem cleaner to do all file
// IO in one thread, the writerThread. This might also get rid
// of this strange bug!!
//===============================================================
// POSSIBLE WRITE TO DISK FROM MAIN THREAD //////////////////////
// flush any pending messages to disk ---------------------------
for (ndx = 0; ndx <= logNdx; ndx++) {
logBufDesc_t* p = &logDescs[ndx]->logBufDescs[logDescs[ndx]->bufInUse];
if ( p->offset > 0 ) {
// write buffer to disk
p->flags = BEING_WRITTEN;
int bytesWritten = write(logDescs[ndx]->fd, p->data, p->offset);
if (bytesWritten == -1) {
perror ("closeLog, writing log buffer to disk");
}
status = fsync(logDescs[ndx]->fd);
if (status)
perror("fsync to fd");
}
// printf("ABOUT TO ACTUALLY CLOSE LOG FILE %d\n", ndx);
// close the log file -------------------------------------------
if (!status && (logDescs[ndx]->fd >= 0)) {
status = close(logDescs[ndx]->fd);
}
}
// release any resources allocated
// HACK - the twoWrites sleep above considerably reduced the number of
// bad FD faults; adding this back in appears to have eliminated them
usleep(5000*1000);
// END`
// if the status is non-zero we nevertheless close all files
for ( ; logNdx >= 0; logNdx--)
cLogDealloc(logNdx);
// the PY_BuildValue should convert a plain old C int to a Python integer
return Py_BuildValue("i", status);
}
/**
* Write a log message.
*
* Parameters are buffer index (ndx) and a properly terminated message
* (msg), one with a newline and a terminating null byte. These are
* packaged up Pythonically and dissected here using PyArg_ParseTuple,
* with a format string "is" used to guide the parse. We trust that
* the Python code has vetted the parameters.
*
* Pathological cases, such as the message being larger than any buffer,
* may cause unpredictable behavior.
*/
PyObject* log_msg(PyObject* self, PyObject* args) {
int ndx;
const char* msg;
// logNdx is now first parameter
if (!PyArg_ParseTuple(args, "is", &ndx, &msg))
return NULL;
if (msg)
_log_msg(ndx, msg);
Py_RETURN_NONE;
}
/**
* Low-level write a log message function. ndx is an index into the
* logDescs descriptor table. msg is a null-terminated C string.
*/
void _log_msg(const int ndx, const char* msg) {
// XXX should make sure that ndx value is sensible
int len = strlen(msg);
// get the mutex
pthread_mutex_lock(&logDescs[ndx]->logBufLock);
// if msg will not fit, mark current page as FULL, find next
// available page, and mark that ACTIVE.
logBufDesc_t* p = &logDescs[ndx]->logBufDescs[logDescs[ndx]->bufInUse];
if (p->offset + len >= p->pageBytes) {
p->flags = FULL_BUF;
// // DEBUG
// printf("buffer %d at offset %d is FULL\n",
// logDescs[ndx]->bufInUse, p->offset);
// fflush(stdout);
// // END
logDescs[ndx]->bufInUse =
(logDescs[ndx]->bufInUse + 1) % C_FT_LOG_BUF_COUNT;
/* RISK OF INFINITE LOOP */
// XXX This risk is real: we sometimes get an infinite loop
for (p = &logDescs[ndx]->logBufDescs[logDescs[ndx]->bufInUse];
p->flags != READY_BUF;
p = &logDescs[ndx]->logBufDescs[logDescs[ndx]->bufInUse] )
logDescs[ndx]->bufInUse =
(logDescs[ndx]->bufInUse + 1) % C_FT_LOG_BUF_COUNT;
}
// write msg to active page and update offset; NOT null-terminated
memcpy(p->data + p->offset, msg, len);
p->offset += len;
// step the message count and release the mutex
logDescs[ndx]->count++;
pthread_mutex_unlock(&logDescs[ndx]->logBufLock);
}
|
jddixon/xlutil_py
|
src/extsrc/cFTLogForPy.c
|
<filename>src/extsrc/cFTLogForPy.c<gh_stars>0
/* cFTLogForPy.c */
#include "cFTLogForPy.h"
/////////////////////////////////////////////////////////////////////
// GLOBALS
/////////////////////////////////////////////////////////////////////
int logNdx; // 0-based current index
int secondThreadStarted = false; // SHOULD NOT NEED ?
cFTLogDesc_t* logDescs[CLOG_MAX_LOG];
/////////////////////////////////////////////////////////////////////
// OBJECT-LEVEL CODE
/////////////////////////////////////////////////////////////////////
typedef struct {
PyObject_HEAD
// this indexes logDescs, which contains logDir and logName
long objNdx; // long to be consistent with Python
// maybe more later
} LogForPyObject;
// a forward declaration of sorts
static PyTypeObject LogForPyType;
/* LogForPy new() ------------------------------------------------ */
static LogForPyObject *
newLogForPyObject(void)
{
return (LogForPyObject *)PyObject_New(LogForPyObject, &LogForPyType);
}
/* LogForPy dealloc() -------------------------------------------- */
static void
LogForPy_dealloc(PyObject *ptr)
{
PyObject_Del(ptr);
}
/* LogForPy __init__ ---------------------------------------------- */
PyDoc_STRVAR(LogForPy_init__doc__, "Initialize log object.");
static PyObject *
LogForPy_init(LogForPyObject *self, PyObject *args) {
char* pathToLog;
if (!PyArg_ParseTuple(args, "s", &pathToLog))
return NULL;
int objNdx = _open_cft_log(pathToLog);
if (objNdx < 0) {
Py_RETURN_NONE;
}
self->objNdx = objNdx;
Py_INCREF(Py_None); // XXX ???
Py_RETURN_NONE;
}
/* count --------------------------------------------------------- */
PyDoc_STRVAR(LogForPy_getCount__doc__, "Get log message count.");
static PyObject *
LogForPy_getCount(LogForPyObject* self) {
int ndx = (int) self->objNdx;
// XXX SHOULD LOCK
long count = (long) logDescs[ndx]->count;
// XXX AND UNLOCK
return PyLong_FromLong(count);
}
/* ndx ----------------------------------------------------------- */
PyDoc_STRVAR(LogForPy_getNdx__doc__, "Get objNdx attr.");
static PyObject *
LogForPy_getNdx(LogForPyObject* self) {
return PyLong_FromLong(self->objNdx);
}
/* log_file ---------------------------------------------------------- */
PyDoc_STRVAR(LogForPy_getPathToLog__doc__, "Get path to log file.");
static PyObject *
LogForPy_getPathToLog(LogForPyObject* self) {
int ndx = (int)self->objNdx;
cFTLogDesc_t* p = logDescs[ndx];
char s[MAX_PATH_LEN+1];
strncpy(s, p->logDir, MAX_PATH_LEN+1);
int x = strlen(s);
if (x < MAX_PATH_LEN) {
strcat (s, "/");
x = strlen(s);
if (x < MAX_PATH_LEN) {
int bytesLeft = MAX_PATH_LEN - x;
strncat(s, p->logName, bytesLeft+1);
}
}
// XXX was PyString_FromString
return PyUnicode_FromString(s);
}
/* log_msg -------------------------------------------------------- */
PyDoc_STRVAR(LogForPy_logMsg__doc__, "Write a log message.");
static PyObject*
LogForPy_logMsg(LogForPyObject* self, PyObject* args) {
int ndx = (int)self->objNdx;
const char* msg;
if (!PyArg_ParseTuple(args, "s", &msg))
return NULL;
if (msg)
_log_msg(ndx, msg);
Py_RETURN_NONE;
}
/* LogForPy object methods --------------------------------------- */
static PyMethodDef LogForPy_methods[] = {
{"init", (PyCFunction)LogForPy_init,
METH_VARARGS, LogForPy_init__doc__},
{"count", (PyCFunction)LogForPy_getCount,
METH_NOARGS, LogForPy_getCount__doc__},
{"log_file", (PyCFunction)LogForPy_getPathToLog,
METH_NOARGS, LogForPy_getPathToLog__doc__},
{"log_msg", (PyCFunction)LogForPy_logMsg,
METH_VARARGS, LogForPy_logMsg__doc__},
{"ndx", (PyCFunction)LogForPy_getNdx,
METH_NOARGS, LogForPy_getNdx__doc__},
/* END OF LIST: */
{NULL, NULL} /* sentinel */
};
static PyGetSetDef LogForPy_getseters[] = {
{NULL} /* Sentinel */
};
static PyMemberDef LogForPy_members[] = {
{NULL} /* Sentinel */
};
/* without the next line LogForPyType becomes an int */
static PyTypeObject
LogForPyType = {
PyVarObject_HEAD_INIT(NULL, 0)
"cFTLogForPy.cFTLogForPy", /*tp_name*/
sizeof(LogForPyObject), /*tp_size*/
0, /*tp_itemsize*/
/* methods */
LogForPy_dealloc, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
0, /*tp_compare*/
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
/* XXX ADD BASE FLAG */
Py_TPFLAGS_DEFAULT, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
LogForPy_methods, /* tp_methods */
LogForPy_members, /* tp_members */
LogForPy_getseters, /* tp_getset */
};
/////////////////////////////////////////////////////////////////////
// MODULE-LEVEL CODE
/////////////////////////////////////////////////////////////////////
// Added 2016-04-03
struct module_state {
PyObject *error;
};
// #define GETSTATE(m) ((struct module_state*) PyModule_GetState(m))
/* MODULE METHODS -------------------------------------- */
PyDoc_STRVAR(LogForPy_new__doc__, "Return a new LogForPy object.");
static PyObject *
LogForPy_new(PyObject *self, PyObject *args, PyObject *kwdict)
{
LogForPyObject *new;
if ((new = newLogForPyObject()) == NULL)
return NULL;
if (PyErr_Occurred()) {
Py_DECREF(new);
return NULL;
}
return (PyObject *)new;
}
/* METHOD TABLE - other methods referred to are defined in modFunc.c */
static PyMethodDef cFTLogForPyMethods[] = {
// METH_VARARGS means that the arguments are passed as a tuple
// which will be parsed with PyArg_ParseTuple()
{"init_cft_logger", init_cft_logger, METH_VARARGS,
"init data structures, start background thread"},
{"open_cft_log", open_cft_log, METH_VARARGS,
"open named log file"},
{"close_cft_logger", close_cft_logger, METH_VARARGS,
"stop background thread, join, close log file"},
{"log_msg", log_msg, METH_VARARGS,
"write a message to the log"},
/* DEFINED IN THIS FILE, ABOVE --------------------- */
{"LogForPy", (PyCFunction)LogForPy_new, METH_VARARGS|METH_KEYWORDS,
LogForPy_new__doc__},
/* END OF LIST */
{NULL, NULL, 0, NULL} /* sentinel = end of this list */
};
/* MODULE DEFINITION --------------------------------------------- */
static struct PyModuleDef ftLogModule = {
PyModuleDef_HEAD_INIT,
"cFTLogForPy",
NULL, /* should be pointer to doc */
-1, // keeps state
cFTLogForPyMethods
};
/* MODULE INITIALIZATION ---------------------------------------- */
/* the method name MUST be "init" prefixed to the module name */
PyMODINIT_FUNC
PyInit_cFTLogForPy(void) {
PyObject *m;
Py_TYPE(&LogForPyType) = &PyType_Type; // cant
if (PyType_Ready(&LogForPyType) < 0)
return NULL;
// this used to be cast to void; there should be a third, dquoted
// parameter, a description
//m = Py_InitModule("cFTLogForPy", CFTLogForPyMethods);
m = PyModule_Create( &ftLogModule);
if (m == NULL) {
return NULL;
}
// XXX ADD A CONSTANT, JUST FOR FUN
PyModule_AddIntConstant(m, "max_log", CLOG_MAX_LOG);
return m;
}
|
jddixon/xlutil_py
|
src/extsrc/logBufs.c
|
<filename>src/extsrc/logBufs.c
/* ~/dev/py/xlutil_py/extsrc/logBufs.c */
#include "cFTLogForPy.h"
// local prototypes
static cFTLogDesc_t* cLogAllocInit(const char* logDir, const char* logName);
int initLogBuffers(ndx) {
memset(logDescs[ndx]->logBufDescs, 0,
C_FT_LOG_BUF_COUNT * sizeof(logBufDesc_t));
// paranoia is good for the soul
pthread_mutex_lock(&logDescs[ndx]->logBufLock);
int i;
for (i = 0; i < C_FT_LOG_BUF_COUNT; i++) {
logBufDesc_t* p = logDescs[ndx]->logBufDescs + i;
p->flags = i == 0 ? ACTIVE_BUF : READY_BUF;
p->pageBytes = LOG_BUFFER_SIZE;
p->data = logDescs[ndx]->logBuffers + i * LOG_BUFFER_SIZE;
}
// ndx of page in use has already been set to zero
pthread_mutex_unlock(&logDescs[ndx]->logBufLock);
return 0;
}
// we extract the name of the log directory from s and write it into logDir
static
int splitPath(char* logDir, char* logName, const char *s) {
int len = strlen(s);
if (len > MAX_PATH_LEN)
return ENAMETOOLONG;
const char* end = s + len; // first character beyond
bool foundIt = false;
const char *p;
for ( p = end - 1; p >= s; p--)
if (*p == PATH_SEP) {
foundIt = true;
break;
}
if (foundIt) {
size_t pathLen = p - s;
if (pathLen > 0)
memcpy( logDir, s, pathLen );
logDir[ pathLen ] = 0;
size_t nameLen = end - p - 1;
if (nameLen > 0)
memcpy( logName, p + 1, nameLen );
logName[ nameLen ] = 0;
return 0;
} else {
return -1;
}
}
/**
* Confirm that this specific directory exists; if it doesn't, create
* it. Return 0 if OK, a negative number otherwise.
*/
static int checkDir(const char* pathToDir) {
struct stat buffer;
int status = stat(pathToDir, & buffer);
if (status) {
if (errno == ENOENT) {
// no such directory: create it
status = mkdir(pathToDir, 0744); // ??? 0644 ???
}
// any other errno is a real error
} else {
// file exists; if it's a directory, return 0
if (S_ISDIR(buffer.st_mode))
status = 0;
else
status = -1;
}
return status;
}
/**
* Confirm that the path to the log directory exists, creating any
* intervening directories if necessary.
*
* We assume that higher-level logic has done sensible checks. In
* particular, we do not check for leading or internal double dots ('..').
*/
static
int checkDirs(const char* pathToLog) {
char path[MAX_PATH_LEN + 1];
strncpy (path, pathToLog, MAX_PATH_LEN);
int status = 0;
int len = strlen(path);
char *end = path + len;
// XXX DOESN'T WORK IF NO SLASH
char *p = path ;
// do NOT check whether first char is /
for (p++ ; p < end; p++)
if (*p == '/') {
*p = 0;
if( (status = checkDir(path)) )
break;
*p = '/';
}
status = checkDir(path);
return status;
}
/** Deallocates a single descriptor */
// static
void cLogDealloc(int ndx) {
cFTLogDesc_t* cLog = logDescs[ndx];
if (cLog != NULL) {
free(cLog);
// printf("*** cLog deallocated desc %d successfully ***\n", ndx);
logDescs[ndx] = NULL;
}
}
void initLogDescs(void) {
int i;
for (i = 0; i < CLOG_MAX_LOG; i++)
logDescs[i] = NULL;
}
/**
* This is called once for each log file added. It returns an index
* into the logDescs table.
*/
static
cFTLogDesc_t* cLogAllocInit(const char* logDir, const char* logName) {
// XXX CHECK FOR NULL OR OUTSIZED PARAMETERS XXX
cFTLogDesc_t* cLog = calloc(1, sizeof(cFTLogDesc_t));
if (cLog != NULL) {
// this cannot be set with PTHREAD_MUTEX_INITIALIZER because it isn't
// static; and similarly for the next two fields
// pthread_mutex_init (&cLog->readyLock, NULL);
// pthread_cond_init (&cLog->readyCond, NULL);
pthread_mutex_init (&cLog->logBufLock, NULL);
memcpy( cLog->logDir, logDir, strlen(logDir) );
memcpy( cLog->logName, logName, strlen(logName) );
// printf("*** cLog allocated and initialized ***\n");
}
return cLog; // which may be NULL
}
/**
* Split the pathToLog to get the path to the directory and the name of
* the log file. Verifies that the log directory exists and then opens
* the log file.
*
* If the log file can be opened, returns its
* (fd). Otherwise returns -1 and sets errno.
*/
// static
int openLogFile(const char* pathToLog) {
char logDir [MAX_PATH_LEN + 1];
char logName[MAX_PATH_LEN + 1];
int logFD = -1;
if (pathToLog == NULL)
return -1;
int status = splitPath(logDir, logName, pathToLog);
if (!status)
status = checkDirs(logDir);
if (!status) {
// O_APPEND _must_ be accompanied by O_WRONLY or O_RDWR
status = logFD
= open(pathToLog, O_CREAT | O_APPEND | O_WRONLY,
S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
cFTLogDesc_t* sd = cLogAllocInit(logDir, logName);
sd->fd = logFD;
logDescs[logNdx] = sd;
}
return status;
}
|
jddixon/xlutil_py
|
src/extsrc/evLoop.c
|
/* ~/dev/py/xlutil_py/src/extsrc/evLoop.c */
#include "cFTLogForPy.h"
// DATA /////////////////////////////////////////////////////////////
// EVENT LOOP /////////////////////////////////////////////
struct ev_loop* loop;
// CALLBACKS ////////////////////////////////////////////////////////
/**
* Expect to enter this with the WRITE_PENDING flag set.
*/
static void
timedWriterCB(EV_P_ struct ev_timer *w, int revents) {
cFTLogDesc_t* d = logDescs[logNdx];
/* XXX THIS IMPLEMENTATION does not handle FULL pages */
// get a lock on the descriptor
pthread_mutex_lock(&d->logBufLock); // LOCK LOCK LOCK
d->writeFlags &= ~WRITE_PENDING;
// bufInUse is a logical pointer to the buffer currently in use
logBufDesc_t* p = d->logBufDescs + d->bufInUse;
if ( p->offset > 0 ) {
p->flags = BEING_WRITTEN;
d->writeFlags |= WRITE_IN_PROGRESS;
pthread_mutex_unlock(&d->logBufLock); // UNLOCK UNLOCK
// // DEBUG
// char msg[512];
// sprintf(msg, "callback flushing %d bytes to disk\n", p->offset);
// int msgLen = strlen(msg);
// write (d->fd, msg, msgLen);
// // END
d->bufInUse = (d->bufInUse + 1) % C_FT_LOG_BUF_COUNT;
// XXX if next page isn't READY, we are in serious trouble
// write buffer to disk - this blocks, of course
int bytesWritten = write(d->fd, p->data, p->offset);
if (bytesWritten == -1)
perror ("timedWriterCB, flushing to disk");
int status = fsync(d->fd);
if (status)
perror("fsync, flushing log buffer in callback");
// mark the page as ready for re-use
pthread_mutex_lock(&d->logBufLock); // LOCK LOCK
p->flags = READY_BUF;
p->offset = 0;
// CLEAR THE WRITE-IN_PROGRESS FLAG
d->writeFlags &= ~WRITE_IN_PROGRESS;
pthread_mutex_unlock(&d->logBufLock); // UNLOCK UNLOCK
} else {
// DEBUG
// printf("callback: nothing to do, zero offset\n");
// END
// just release the lock; there is nothing to do
pthread_mutex_unlock(&d->logBufLock); // UNLOCK UNLOCK UNLOCK //
}
// schedule the next callback XXX DROP THIS CODE XXX
ev_timer_init(&d->t_watcher,
timedWriterCB, WRITE_INTERVAL, 0);
ev_timer_start(loop, &d->t_watcher);
}
// INITIALIZATION CODE //////////////////////////////////////////////
int setupLibEvAndCallbacks(int ndx) {
ev_timer_init(&logDescs[ndx]->t_watcher, timedWriterCB, 0.1, 0);
ev_timer_start(loop, &logDescs[ndx]->t_watcher);
// DEBUG
// printf ("setup watcher for libNdx %d\n", ndx);
// END
return 0;
}
// HACKING ABOUT ////////////////////////////////////////////////////
int scheduleWrite(int ndx) {
cFTLogDesc_t* d = logDescs[logNdx];
pthread_mutex_lock(&d->logBufLock); // get a lock on the descriptor
if (!(d->writeFlags & WRITE_PENDING)) {
// no write pending, so schedule one
ev_timer_init(&d->t_watcher, timedWriterCB, 0.1, 0);
ev_timer_start(loop, &d->t_watcher);
// DEBUG
// printf ("setup watcher for libNdx %d\n", ndx);
// END
d->writeFlags |= WRITE_PENDING;
}
pthread_mutex_unlock(&d->logBufLock); // unlock the descriptor
return 0;
}
|
uybv/Adafruit_nRF52_Bootloader
|
src/usb/uf2/ghostfat.c
|
<reponame>uybv/Adafruit_nRF52_Bootloader<filename>src/usb/uf2/ghostfat.c
/*
* The MIT License (MIT)
*
* Copyright (c) Microsoft Corporation
* Copyright (c) 2020 <NAME> for Adafruit Industries
* Copyright (c) 2020 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "compile_date.h"
#include "uf2.h"
#include "configkeys.h"
#include "flash_nrf5x.h"
#include <string.h>
#include <stdio.h>
#include "bootloader_settings.h"
#include "bootloader.h"
//--------------------------------------------------------------------+
//
//--------------------------------------------------------------------+
typedef struct {
uint8_t JumpInstruction[3];
uint8_t OEMInfo[8];
uint16_t SectorSize;
uint8_t SectorsPerCluster;
uint16_t ReservedSectors;
uint8_t FATCopies;
uint16_t RootDirectoryEntries;
uint16_t TotalSectors16;
uint8_t MediaDescriptor;
uint16_t SectorsPerFAT;
uint16_t SectorsPerTrack;
uint16_t Heads;
uint32_t HiddenSectors;
uint32_t TotalSectors32;
uint8_t PhysicalDriveNum;
uint8_t Reserved;
uint8_t ExtendedBootSig;
uint32_t VolumeSerialNumber;
uint8_t VolumeLabel[11];
uint8_t FilesystemIdentifier[8];
} __attribute__((packed)) FAT_BootBlock;
typedef struct {
char name[8];
char ext[3];
uint8_t attrs;
uint8_t reserved;
uint8_t createTimeFine;
uint16_t createTime;
uint16_t createDate;
uint16_t lastAccessDate;
uint16_t highStartCluster;
uint16_t updateTime;
uint16_t updateDate;
uint16_t startCluster;
uint32_t size;
} __attribute__((packed)) DirEntry;
STATIC_ASSERT(sizeof(DirEntry) == 32);
struct TextFile {
char const name[11];
char const *content;
};
//--------------------------------------------------------------------+
//
//--------------------------------------------------------------------+
#define BPB_SECTOR_SIZE ( 512)
#define BPB_SECTORS_PER_CLUSTER ( 1)
#define BPB_RESERVED_SECTORS ( 1)
#define BPB_NUMBER_OF_FATS ( 2)
#define BPB_ROOT_DIR_ENTRIES ( 64)
#define BPB_TOTAL_SECTORS CFG_UF2_NUM_BLOCKS
#define BPB_MEDIA_DESCRIPTOR_BYTE (0xF8)
#define FAT_ENTRY_SIZE (2)
#define FAT_ENTRIES_PER_SECTOR (BPB_SECTOR_SIZE / FAT_ENTRY_SIZE)
// NOTE: MS specification explicitly allows FAT to be larger than necessary
#define BPB_SECTORS_PER_FAT ( (BPB_TOTAL_SECTORS / FAT_ENTRIES_PER_SECTOR) + \
((BPB_TOTAL_SECTORS % FAT_ENTRIES_PER_SECTOR) ? 1 : 0))
#define DIRENTRIES_PER_SECTOR (BPB_SECTOR_SIZE/sizeof(DirEntry))
#define ROOT_DIR_SECTOR_COUNT (BPB_ROOT_DIR_ENTRIES/DIRENTRIES_PER_SECTOR)
STATIC_ASSERT(BPB_SECTOR_SIZE == 512); // GhostFAT does not support other sector sizes (currently)
STATIC_ASSERT(BPB_SECTORS_PER_CLUSTER == 1); // GhostFAT presumes one sector == one cluster (for simplicity)
STATIC_ASSERT(BPB_NUMBER_OF_FATS == 2); // FAT highest compatibility
STATIC_ASSERT(sizeof(DirEntry) == 32); // FAT requirement
STATIC_ASSERT(BPB_SECTOR_SIZE % sizeof(DirEntry) == 0); // FAT requirement
STATIC_ASSERT(BPB_ROOT_DIR_ENTRIES % DIRENTRIES_PER_SECTOR == 0); // FAT requirement
STATIC_ASSERT(BPB_SECTOR_SIZE * BPB_SECTORS_PER_CLUSTER <= (32*1024)); // FAT requirement (64k+ has known compatibility problems)
STATIC_ASSERT(FAT_ENTRIES_PER_SECTOR == 256); // FAT requirement
#define STR0(x) #x
#define STR(x) STR0(x)
char infoUf2File[128*3] =
"UF2 Bootloader " UF2_VERSION "\r\n"
"Model: " UF2_PRODUCT_NAME "\r\n"
"Board-ID: " UF2_BOARD_ID "\r\n";
const char indexFile[] =
"<!doctype html>\n"
"<html>"
"<body>"
"<script>\n"
"location.replace(\"" UF2_INDEX_URL "\");\n"
"</script>"
"</body>"
"</html>\n";
static struct TextFile const info[] = {
{.name = "INFO_UF2TXT", .content = infoUf2File},
{.name = "INDEX HTM", .content = indexFile},
// current.uf2 must be the last element and its content must be NULL
{.name = "CURRENT UF2", .content = NULL},
};
STATIC_ASSERT(ARRAY_SIZE(infoUf2File) < BPB_SECTOR_SIZE); // GhostFAT requires files to fit in one sector
STATIC_ASSERT(ARRAY_SIZE(indexFile) < BPB_SECTOR_SIZE); // GhostFAT requires files to fit in one sector
#define NUM_FILES (ARRAY_SIZE(info))
#define NUM_DIRENTRIES (NUM_FILES + 1) // Code adds volume label as first root directory entry
#define REQUIRED_ROOT_DIRECTORY_SECTORS ( ((NUM_DIRENTRIES+1) / DIRENTRIES_PER_SECTOR) + \
(((NUM_DIRENTRIES+1) % DIRENTRIES_PER_SECTOR) ? 1 : 0))
STATIC_ASSERT(ROOT_DIR_SECTOR_COUNT >= REQUIRED_ROOT_DIRECTORY_SECTORS); // FAT requirement -- Ensures BPB reserves sufficient entries for all files
STATIC_ASSERT(NUM_DIRENTRIES < (DIRENTRIES_PER_SECTOR * ROOT_DIR_SECTOR_COUNT)); // FAT requirement -- end directory with unused entry
STATIC_ASSERT(NUM_DIRENTRIES < BPB_ROOT_DIR_ENTRIES); // FAT requirement -- Ensures BPB reserves sufficient entries for all files
STATIC_ASSERT(NUM_DIRENTRIES < DIRENTRIES_PER_SECTOR); // GhostFAT bug workaround -- else, code overflows buffer
#define NUM_SECTORS_IN_DATA_REGION (BPB_TOTAL_SECTORS - BPB_RESERVED_SECTORS - (BPB_NUMBER_OF_FATS * BPB_SECTORS_PER_FAT) - ROOT_DIR_SECTOR_COUNT)
#define CLUSTER_COUNT (NUM_SECTORS_IN_DATA_REGION / BPB_SECTORS_PER_CLUSTER)
// Ensure cluster count results in a valid FAT16 volume!
STATIC_ASSERT( CLUSTER_COUNT >= 0x0FF5 && CLUSTER_COUNT < 0xFFF5 );
// Many existing FAT implementations have small (1-16) off-by-one style errors
// So, avoid being within 32 of those limits for even greater compatibility.
STATIC_ASSERT( CLUSTER_COUNT >= 0x1015 && CLUSTER_COUNT < 0xFFD5 );
#define UF2_FIRMWARE_BYTES_PER_SECTOR 256
#define TRUE_USER_FLASH_SIZE (USER_FLASH_END-USER_FLASH_START)
STATIC_ASSERT(TRUE_USER_FLASH_SIZE % UF2_FIRMWARE_BYTES_PER_SECTOR == 0); // UF2 requirement -- overall size must be integral multiple of per-sector payload?
#define UF2_SECTORS ( (TRUE_USER_FLASH_SIZE / UF2_FIRMWARE_BYTES_PER_SECTOR) + \
((TRUE_USER_FLASH_SIZE % UF2_FIRMWARE_BYTES_PER_SECTOR) ? 1 : 0))
#define UF2_SIZE (UF2_SECTORS * BPB_SECTOR_SIZE)
STATIC_ASSERT(UF2_SECTORS == ((UF2_SIZE/2) / 256)); // Not a requirement ... ensuring replacement of literal value is not a change
#define UF2_FIRST_SECTOR ((NUM_FILES + 1) * BPB_SECTORS_PER_CLUSTER) // WARNING -- code presumes each non-UF2 file content fits in single sector
#define UF2_LAST_SECTOR ((UF2_FIRST_SECTOR + UF2_SECTORS - 1) * BPB_SECTORS_PER_CLUSTER)
#define FS_START_FAT0_SECTOR BPB_RESERVED_SECTORS
#define FS_START_FAT1_SECTOR (FS_START_FAT0_SECTOR + BPB_SECTORS_PER_FAT)
#define FS_START_ROOTDIR_SECTOR (FS_START_FAT1_SECTOR + BPB_SECTORS_PER_FAT)
#define FS_START_CLUSTERS_SECTOR (FS_START_ROOTDIR_SECTOR + ROOT_DIR_SECTOR_COUNT)
static FAT_BootBlock const BootBlock = {
.JumpInstruction = {0xeb, 0x3c, 0x90},
.OEMInfo = "UF2 UF2 ",
.SectorSize = BPB_SECTOR_SIZE,
.SectorsPerCluster = BPB_SECTORS_PER_CLUSTER,
.ReservedSectors = BPB_RESERVED_SECTORS,
.FATCopies = BPB_NUMBER_OF_FATS,
.RootDirectoryEntries = BPB_ROOT_DIR_ENTRIES,
.TotalSectors16 = (BPB_TOTAL_SECTORS > 0xFFFF) ? 0 : BPB_TOTAL_SECTORS,
.MediaDescriptor = BPB_MEDIA_DESCRIPTOR_BYTE,
.SectorsPerFAT = BPB_SECTORS_PER_FAT,
.SectorsPerTrack = 1,
.Heads = 1,
.TotalSectors32 = (BPB_TOTAL_SECTORS > 0xFFFF) ? BPB_TOTAL_SECTORS : 0,
.PhysicalDriveNum = 0x80, // to match MediaDescriptor of 0xF8
.ExtendedBootSig = 0x29,
.VolumeSerialNumber = 0x00420042,
.VolumeLabel = UF2_VOLUME_LABEL,
.FilesystemIdentifier = "FAT16 ",
};
// Use bootloaderConfig to detect BOOTLOADER ID when updating bootloader
// This helps to prevent incorrect uf2 from other boards.
extern const uint32_t bootloaderConfig[];
//--------------------------------------------------------------------+
//
//--------------------------------------------------------------------+
static inline bool is_uf2_block (UF2_Block const *bl)
{
return (bl->magicStart0 == UF2_MAGIC_START0) &&
(bl->magicStart1 == UF2_MAGIC_START1) &&
(bl->magicEnd == UF2_MAGIC_END) &&
(bl->flags & UF2_FLAG_FAMILYID) &&
!(bl->flags & UF2_FLAG_NOFLASH) &&
(bl->payloadSize == UF2_FIRMWARE_BYTES_PER_SECTOR) &&
!(bl->targetAddr & 0xff);
}
// used when upgrading application
static inline bool in_app_space (uint32_t addr)
{
return USER_FLASH_START <= addr && addr < USER_FLASH_END;
}
// used when upgrading bootloader
static inline bool in_bootloader_space (uint32_t addr)
{
return BOOTLOADER_ADDR_START <= addr && addr < BOOTLOADER_ADDR_END;
}
// used when upgrading bootloader
static inline bool in_uicr_space(uint32_t addr)
{
return addr == 0x10001000;
}
//--------------------------------------------------------------------+
//
//--------------------------------------------------------------------+
void uf2_init(void)
{
strcat(infoUf2File, "SoftDevice: ");
if ( is_sd_existed() )
{
uint32_t const sd_id = SD_ID_GET(MBR_SIZE);
uint32_t const sd_version = SD_VERSION_GET(MBR_SIZE);
uint32_t const ver1 = sd_version / 1000000;
uint32_t const ver2 = (sd_version % 1000000)/1000;
uint32_t const ver3 = sd_version % 1000;
sprintf(infoUf2File + strlen(infoUf2File), "S%lu version %lu.%lu.%lu\r\n", sd_id, ver1, ver2, ver3);
}else
{
strcat(infoUf2File, "not found\r\n");
}
strcat(infoUf2File, "Date: " __DATE__ "\r\n");
}
/*------------------------------------------------------------------*/
/* Read CURRENT.UF2
*------------------------------------------------------------------*/
void padded_memcpy (char *dst, char const *src, int len)
{
for ( int i = 0; i < len; ++i )
{
if ( *src ) {
*dst = *src++;
} else {
*dst = ' ';
}
dst++;
}
}
void read_block(uint32_t block_no, uint8_t *data) {
memset(data, 0, BPB_SECTOR_SIZE);
uint32_t sectionIdx = block_no;
if (block_no == 0) { // Requested boot block
memcpy(data, &BootBlock, sizeof(BootBlock));
data[510] = 0x55; // Always at offsets 510/511, even when BPB_SECTOR_SIZE is larger
data[511] = 0xaa; // Always at offsets 510/511, even when BPB_SECTOR_SIZE is larger
// logval("data[0]", data[0]);
} else if (block_no < FS_START_ROOTDIR_SECTOR) { // Requested FAT table sector
sectionIdx -= FS_START_FAT0_SECTOR;
// logval("sidx", sectionIdx);
if (sectionIdx >= BPB_SECTORS_PER_FAT) {
sectionIdx -= BPB_SECTORS_PER_FAT; // second FAT is same as the first...
}
if (sectionIdx == 0) {
// first FAT entry must match BPB MediaDescriptor
data[0] = BPB_MEDIA_DESCRIPTOR_BYTE;
// WARNING -- code presumes only one NULL .content for .UF2 file
// and all non-NULL .content fit in one sector
// and requires it be the last element of the array
uint32_t const end = (NUM_FILES * FAT_ENTRY_SIZE) + (2 * FAT_ENTRY_SIZE);
for (uint32_t i = 1; i < end; ++i) {
data[i] = 0xff;
}
}
for (uint32_t i = 0; i < FAT_ENTRIES_PER_SECTOR; ++i) { // Generate the FAT chain for the firmware "file"
uint32_t v = (sectionIdx * FAT_ENTRIES_PER_SECTOR) + i;
if (UF2_FIRST_SECTOR <= v && v <= UF2_LAST_SECTOR)
((uint16_t *)(void *)data)[i] = v == UF2_LAST_SECTOR ? 0xffff : v + 1;
}
} else if (block_no < FS_START_CLUSTERS_SECTOR) { // Requested root directory sector
sectionIdx -= FS_START_ROOTDIR_SECTOR;
DirEntry *d = (void *)data;
int remainingEntries = DIRENTRIES_PER_SECTOR;
if (sectionIdx == 0) { // volume label first
// volume label is first directory entry
padded_memcpy(d->name, (char const *) BootBlock.VolumeLabel, 11);
d->attrs = 0x28;
d++;
remainingEntries--;
}
for (uint32_t i = DIRENTRIES_PER_SECTOR * sectionIdx;
remainingEntries > 0 && i < NUM_FILES;
i++, d++) {
// WARNING -- code presumes all but last file take exactly one sector
uint16_t startCluster = i + 2;
struct TextFile const * inf = &info[i];
padded_memcpy(d->name, inf->name, 11);
d->createTimeFine = __SECONDS_INT__ % 2 * 100;
d->createTime = __DOSTIME__;
d->createDate = __DOSDATE__;
d->lastAccessDate = __DOSDATE__;
d->highStartCluster = startCluster >> 16;
// DIR_WrtTime and DIR_WrtDate must be supported
d->updateTime = __DOSTIME__;
d->updateDate = __DOSDATE__;
d->startCluster = startCluster & 0xFFFF;
d->size = (inf->content ? strlen(inf->content) : UF2_SIZE);
}
} else if (block_no < BPB_TOTAL_SECTORS) {
sectionIdx -= FS_START_CLUSTERS_SECTOR;
if (sectionIdx < NUM_FILES - 1) {
memcpy(data, info[sectionIdx].content, strlen(info[sectionIdx].content));
} else { // generate the UF2 file data on-the-fly
sectionIdx -= NUM_FILES - 1;
uint32_t addr = USER_FLASH_START + (sectionIdx * UF2_FIRMWARE_BYTES_PER_SECTOR);
if (addr < CFG_UF2_FLASH_SIZE) {
UF2_Block *bl = (void *)data;
bl->magicStart0 = UF2_MAGIC_START0;
bl->magicStart1 = UF2_MAGIC_START1;
bl->magicEnd = UF2_MAGIC_END;
bl->blockNo = sectionIdx;
bl->numBlocks = UF2_SECTORS;
bl->targetAddr = addr;
bl->payloadSize = UF2_FIRMWARE_BYTES_PER_SECTOR;
bl->flags = UF2_FLAG_FAMILYID;
bl->familyID = CFG_UF2_FAMILY_APP_ID;
memcpy(bl->data, (void *)addr, bl->payloadSize);
}
}
}
}
/*------------------------------------------------------------------*/
/* Write UF2
*------------------------------------------------------------------*/
/**
* Write an uf2 block wrapped by 512 sector.
* @return number of bytes processed, only 3 following values
* -1 : if not an uf2 block
* 512 : write is successful (BPB_SECTOR_SIZE == 512)
* 0 : is busy with flashing, tinyusb stack will call write_block again with the same parameters later on
*/
int write_block (uint32_t block_no, uint8_t *data, WriteState *state)
{
UF2_Block *bl = (void*) data;
if ( !is_uf2_block(bl) ) return -1;
switch ( bl->familyID )
{
case CFG_UF2_FAMILY_APP_ID:
/* Upgrading Application
*
* SoftDevice is considered as part of application and can be (or not) included in uf2.
*
* ------------- -------------
* | | | |
* | Bootloader | | Bootloader |
* BOOTLOADER_ADDR_START--|-------------| |-------------|
* | App Data | | App Data |
* USER_FLASH_END ---|-------------| |-------------|
* | | | |
* | | | New |
* | Application | ----> | Application |
* | | | |
* USER_FLASH_START--|-------------| |-------------|
* | MBR | | MBR |
* ------------- -------------
*/
if ( in_app_space(bl->targetAddr) )
{
PRINTF("Write addr = 0x%08lX, block = %ld (%ld of %ld)\r\n", bl->targetAddr, bl->blockNo, state->numWritten, bl->numBlocks);
flash_nrf5x_write(bl->targetAddr, bl->data, bl->payloadSize, true);
}else if ( bl->targetAddr < USER_FLASH_START )
{
// do nothing if writing to MBR, occurs when SD hex is included
// keep going as successful write
PRINTF("skip writing to MBR\r\n");
}else
{
return -1;
}
break;
case CFG_UF2_FAMILY_BOOT_ID:
/* Upgrading Bootloader
*
* - For simplicity, the Bootloader Start Address is fixed for now.
*
* - Since SoftDevice is not part of Bootloader, it MUST NOT be included as part of uf2 file.
*
* - To prevent corruption/disconnection while transferring we don't directly write over Bootloader.
* Instead it is written to highest possible address in Application region. Once everything is received
* and verified, it is safely activated using MBR COPY BL command.
*
* - Along with bootloader code, UCIR (at 0x1000100) is also included containing
* 0x10001014 (bootloader address), and 0x10001018 (MBR Params address).
*
* Note: part of the existing application can be affected when updating bootloader.
* TODO May be worth to have some kind crc/application integrity checking
*
* ------------- ------------- -------------
* | | | | + | New |
* | Bootloader | | Bootloader | + | Bootloader |
* BOOTLOADER_ADDR_START--|-------------| |-------------| + |-------------|
* | App Data | | App Data | + | App Data |
* USER_FLASH_END --|-------------| | ---------- | + |------------ |
* | | | New |+ | |
* | | ---> | Bootloader | | |
* | | | ++++++ | | |
* | Application | | Application | | Application |
* | | | | | |
* | | | | | |
* USER_FLASH_START--|-------------| |-------------| |-------------|
* | MBR | | MBR | | MBR |
* ------------- ------------- -------------
*/
PRINTF("addr = 0x%08lX, block = %ld (%ld of %ld)\r\n", bl->targetAddr, bl->blockNo, state->numWritten, bl->numBlocks);
state->update_bootloader = true;
if ( in_uicr_space(bl->targetAddr) )
{
/* UCIR contains bootloader & MBR address as follow:
* - 0x10001014 bootloader address
* - 0x10001018 MBR Params: mostly fixed
*
* Since the bootloader start address is fixed, we only use this for verification
*/
uint32_t uicr_boot_addr;
uint32_t uicr_mbr_param;
memcpy(&uicr_boot_addr, bl->data + 0x14, 4);
memcpy(&uicr_mbr_param, bl->data + 0x18, 4);
// Check MBR params is fixed and prohibited to change and
// Bootloader address against its new size
if ( (uicr_boot_addr != BOOTLOADER_ADDR_START) ||
(uicr_mbr_param != BOOTLOADER_MBR_PARAMS_PAGE_ADDRESS) )
{
PRINTF("Incorrect UICR value");
PRINT_HEX(uicr_boot_addr);
PRINT_HEX(uicr_mbr_param);
state->aborted = true;
return -1;
}
state->has_uicr = true;
}
else if ( in_bootloader_space(bl->targetAddr) )
{
// Bootloader CF2 config
if ( !state->boot_id_matches && (bl->targetAddr >= ((uint32_t) bootloaderConfig)) )
{
// check if bootloader ID matches current VID/PID
for (uint32_t i=0; i < bl->payloadSize; i += 8)
{
uint32_t key;
memcpy(&key, bl->data+i, 4);
if ( key == CFG_BOOTLOADER_BOARD_ID )
{
uint32_t value;
memcpy(&value, bl->data+i+4, 4);
PRINTF("Bootloader ID = 0x%08lX and ", value);
if ( value == ((USB_DESC_VID << 16) | USB_DESC_UF2_PID) )
{
PRINTF("matches our VID/PID\r\n");
state->boot_id_matches = true;
break;
}
else
{
PRINTF("DOES NOT mismatches our VID/PID\r\n");
state->aborted = true;
return -1;
}
}
}
}
// Offset to write the new bootloader address (skipping the App Data)
uint32_t const offset_addr = BOOTLOADER_ADDR_END-USER_FLASH_END;
flash_nrf5x_write(bl->targetAddr-offset_addr, bl->data, bl->payloadSize, true);
}
#if 0 // don't allow bundle SoftDevice to prevent confusion
else if ( in_app_space(bl->targetAddr) )
{
// Should be Softdevice
flash_nrf5x_write(bl->targetAddr, bl->data, bl->payloadSize, true);
}
#endif
else if ( bl->targetAddr < USER_FLASH_START )
{
PRINTF("skip writing to MBR\r\n");
}
else
{
state->aborted = true;
return -1;
}
break;
// unknown family ID
default: return -1;
}
//------------- Update written blocks -------------//
if ( bl->numBlocks )
{
// Update state num blocks if needed
if ( state->numBlocks != bl->numBlocks )
{
if ( bl->numBlocks >= MAX_BLOCKS || state->numBlocks )
state->numBlocks = 0xffffffff;
else
state->numBlocks = bl->numBlocks;
}
if ( bl->blockNo < MAX_BLOCKS )
{
uint8_t const mask = 1 << (bl->blockNo % 8);
uint32_t const pos = bl->blockNo / 8;
// only increase written number with new write (possibly prevent overwriting from OS)
if ( !(state->writtenMask[pos] & mask) )
{
state->writtenMask[pos] |= mask;
state->numWritten++;
}
// flush last blocks
// TODO numWritten can be smaller than numBlocks if return early
if ( state->numWritten >= state->numBlocks )
{
flash_nrf5x_flush(true);
// Failed if update bootloader without UCIR value
if ( state->update_bootloader && !state->has_uicr )
{
state->aborted = true;
}
}
}
}
STATIC_ASSERT(BPB_SECTOR_SIZE == 512); // if sector size changes, may need to re-validate this code
return BPB_SECTOR_SIZE;
}
|
tipsyTentacle/blockStuck
|
src/CWindow.h
|
#include <iostream>
#include <vector>
#include <SDL2/SDL.h>
#include <SDL2/SDL_image.h>
#include "RES_PATH.h"
#include "tetrisGame.h"
//#include "CLEAN_UP.h"
#ifndef __APP_WINDOW_H__
#define __APP_WINDOW_H__
const int SCREEN_X_TOP = 0;
const int SCREEN_Y_TOP = 0;
const int SCREEN_WIDTH = 640;
const int SCREEN_HEIGHT = 480;
const int TILE_SIZE = 15;
const int GameWindowMargin = 6;
const int BlockOffset [] = { 15 , 13 };
const int RenderGridWidth = TILE_SIZE * (CGridLength + 1);
const int RenderGridHeight = TILE_SIZE * (CGridHeight);
const char WINDOW_NAME [] = "blockStuck";
const char BLOCK_FOLDER [] = "colorblocks/";
const std::string IMAGE_NAMES [] = { "window_blue_0.png", "borderdecoration.png", "BlankValentinesPanel.png",
std::string(BLOCK_FOLDER) + "lightblue.png",
std::string(BLOCK_FOLDER) + "darkgray.png",
std::string(BLOCK_FOLDER) + "purple.png",
std::string(BLOCK_FOLDER) + "green.png",
std::string(BLOCK_FOLDER) + "red2.png",
std::string(BLOCK_FOLDER) + "blue.png",
std::string(BLOCK_FOLDER) + "orange.png",
"NULL"};
const char END_NAME [] = "NULL";
enum TextureName {TBackground, TBorderDecoration, TGameWindow, TBlock, EndTBlock = TBlock + 6};
enum ImageName {IBackground, IGameWindow, ICursor};
struct Image
{
SDL_Texture* CTexture;
int XCoor;
int YCoor;
int CWidth;
int CHeight;
Image(SDL_Texture* iTexture, int iX, int iY);
Image(SDL_Texture* iTexture, int iX, int iY, int iW, int iH);
~Image();
void render(SDL_Renderer* iRen);
bool move(int iX, int iY);
bool set(int iX, int iY);
};
class AppWindow
{
private:
bool CRunning;
SDL_Window* CWindow;
SDL_Renderer* CRenderer;
std::vector<Image> CImageList;
std::vector<SDL_Texture*> CTextureList;
std::string CResPath;
SDL_Rect CGameWindow;
TetrisGame CGame;
public:
AppWindow();
~AppWindow();
int init();
void run();
void quit();
bool isRunning();
private:
bool loadImages();
void render();
void handleEvents();
};
void logSDLError(std::ostream &os, const std::string &msg);
SDL_Texture* loadTexture(const std::string &file, SDL_Renderer* iRen);
void renderTexture(SDL_Texture* iTexture, SDL_Renderer* iRen, int iX, int iY);
void renderTexture(SDL_Texture* iTexture, SDL_Renderer* iRen, int iX, int iY, int iW, int iH);
void tileTexture(SDL_Texture* iTexture, SDL_Renderer* iRen, int iX, int iY);
void centreImage(Image &iImage);
#endif
|
tipsyTentacle/blockStuck
|
src/tetrisGame.h
|
#include <iostream>
#include <vector>
#include <random>
#include <map>
#include <utility>
#ifndef __TETRIS_GAME_H__
#define __TETRIS_GAME_H__
const int CGridHeight = 22;
const int CGridLength = 10;
const int CRotationAmount = 90;
enum colour {CYAN, YELLOW, PURPLE, GREEN, RED, BLUE, ORANGE, DEBUG};
enum status {ERROR, RUNNING, PAUSED};
const int CTetrominoCentre [] = {2, 0, 1, 1, 3, 1, 1};
const int CMillisecondsPerLevel = 1000;
class Block
{
private:
float XCoor;
float YCoor;
colour cColour;
public:
Block(int,int); //to make a point
Block(int,int,colour);
Block(float,float,colour);
~Block();
bool move(int,int);
float getX();
float getY();
colour getColour();
bool moveTo(int,int);
bool isEqual(Block iBlock);
};
class BlockCollection
{
private:
unsigned cRotation;
std::vector<Block> cBlockList;
public:
BlockCollection();
~BlockCollection();
virtual bool addBlock(Block);
virtual bool eraseBlockAt(int,int);
bool eraseSelf();
virtual bool eraseLine(int); //will also make above blocks fall
virtual bool makeBlocksAboveFall(int);
virtual bool mergeCollection(BlockCollection&);
bool move(int,int);
int getSize();
int getBottomY();
virtual bool rotate();
std::vector<Block> getBlockList();
virtual Block* findBlock(int iX, int iY);
bool blockExist(Block);
private:
Block getCentre();
Block getCyanCentre();
int getXOffset();
int getCyanXOffset();
int getGreenXOffset();
int getYOffset();
int getCyanYOffset();
int getGreenYOffset();
};
class StaticBlockCollection : public BlockCollection
{
private:
std::map<int, Block*> cBlockTable;
public:
StaticBlockCollection();
bool addBlock(Block);
bool eraseBlockAt(int,int);
bool eraseLine(int);
bool updateBlocks();
bool mergeCollection(BlockCollection&);
Block* findBlock(int iX, int iY);
};
class TetrisGame
{
private:
int cSeed;
int cLevel;
status cStatus;
int cTick;
int cGrid [CGridHeight][CGridLength];
bool cMergeFlag;
BlockCollection playerTetromino;
StaticBlockCollection tetrominoStack;
public:
TetrisGame(int iLevel = 1, int iSeed = 0);
~TetrisGame();
bool moveBlock();
bool moveBlock(int iX, int iY);
bool rotateBlock();
bool iterateLevel(int iMilliseconds);
std::vector<Block> getBlockList();
private:
void addBlock();
bool eraseLine(int iY);
bool checkCollision();
bool checkCollision(BlockCollection iBlockCollection);
bool isLineFull(int iY);
bool eraseFullLines();
};
colour randomColour();
BlockCollection makeBlock(colour iBlockColour);
#endif
|
tipsyTentacle/blockStuck
|
src/RES_PATH.h
|
<reponame>tipsyTentacle/blockStuck<filename>src/RES_PATH.h
#ifndef __RES_PATH_H__
#define __RES_PATH_H__
#include <iostream>
#include <SDL2/SDL.h>
std::string getResourcePath(const std::string &subDir = "");
#endif
|
jaeh/node-zopfli
|
src/png/zopflipng.h
|
#include "napi.h"
#ifndef NODE_ZOPFLI_PNG_H_
#define NODE_ZOPFLI_PNG_H_
Napi::Value PNGDeflate(const Napi::CallbackInfo& info);
#endif
|
CMingTseng/Cube_Assistant
|
app/src/main/cpp/cubex.h
|
<filename>app/src/main/cpp/cubex.h
/*
* cubex.h
* Cubex by <NAME> (c) 2003
* Cube Puzzle and Universal Solver.
* Notes: readme.txt Email: <EMAIL>
* NOTE: This program is unaffiliated with the Rubik's Cube Trademark.
* This program MAY NOT be reproduced or modified outside the licensing terms
* set forth in the readme.
*/
#ifndef _CUBEX_H_
#define _CUBEX_H_
// required includes/namespace
#include <string>
using namespace std;
// Class declaration - class members/methods, some encapsulated
class Cubex
{
public:
Cubex();
virtual ~Cubex();
static int numcubes;
const static char* ver;
const static char* author;
const static int N = 3; // <-- size of the cube (NxNxN)
const static int MOV = 8;
const bool operator==(const Cubex &q);
const bool operator!=(const Cubex &q);
int *face(int x, int y, int z);
const void RenderScreen();
const bool IsSolved();
const void ResetCube();
const bool XML(int a, bool n);
const bool XMR(int a, bool n);
const bool XMU(int a, bool n);
const bool XMD(int a, bool n);
const bool XMC(int a, bool n);
const bool XMA(int a, bool n);
const void UL();
const void UR();
const void DL();
const void DR();
const void LU();
const void LD();
const void RU();
const void RD();
const void FC();
const void FA();
const void BC();
const void BA();
const void ML();
const void MR();
const void MU();
const void MD();
const void MC();
const void MA();
const void CL();
const void CR();
const void CU();
const void CD();
const void CC();
const void CA();
const void XCL();
const void XCR();
const void XCU();
const void XCD();
const void XCC();
const void XCA();
const void ScrambleCube();
const void DoSolution();
const int SolveCube();
int Cub[N+2][N+2][N+2];
bool shorten;
bool cubeinit;
int cenfix;
int mov[MOV+1];
int erval;
string solution;
const int FindCent(int a);
const int FindEdge(int a, int b);
const int FindCorn(int a, int b, int c);
const string Concise(string a);
const string Efficient(string a);
int fx;
int fy;
int fz;
protected:
private:
const void Ctemp();
const string TopEdges();
const string TopCorners();
const string MiddleEdges();
const string BottomEdgesOrient();
const string BottomEdgesPosition();
const string BottomCornersPosition();
const string BottomCornersOrient();
const string CentersRotate();
int Tmp[N+2][N+2][N+2];
};
// end of header
#endif /* _CUBEX_H_ */
// many of the routines have been generalized for NxNxN, with a few exceptions,
// mainly to accomadate the CentersRotate feature.
|
CMingTseng/Cube_Assistant
|
app/src/main/cpp/colordecoder.c
|
#include <stdlib.h>
#include <jni.h>
#include <android/bitmap.h>
#include <android/log.h>
#define MIN(a,b) ((a)>(b)?(b):(a))
#define MAX(a,b) ((a)<(b)?(b):(a))
#define LOG_TAG "libcolordecoder"
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)
// com.droidtools.rubiksolver.ColorDecoder
JNIEXPORT jobjectArray JNICALL Java_cubeassistant_com_ColorDecoder_nativeSobelData(JNIEnv * env, jobject obj, jobject bitmap)
{
AndroidBitmapInfo info;
uint32_t *pixels;
uint32_t pixel;
int ret,i,j,x,y,r,g,b,horizSobel,vertSobel,imageIndex;
jobjectArray result;
int sob[3][3];
jint* data;
jintArray iarr;
jclass intArrCls = (*env)->FindClass(env, "[I");
if (intArrCls == NULL) {
LOGE("FindClass intArrCld failed ! error=%d", 0);
return NULL; /* exception thrown */
}
if ((ret = AndroidBitmap_getInfo(env, bitmap, &info)) < 0) {
LOGE("AndroidBitmap_getInfo() failed ! error=%d", ret);
return;
}
if (info.format != ANDROID_BITMAP_FORMAT_RGBA_8888 && info.format != ANDROID_BITMAP_FORMAT_RGB_565) {
LOGE("Bitmap format is not RGBA_8888 ! format=%d", info.format);
return;
}
if ((ret = AndroidBitmap_lockPixels(env, bitmap, (void **)(&pixels))) < 0) {
LOGE("AndroidBitmap_lockPixels() failed ! error=%d", ret);
}
result = (*env)->NewObjectArray(env, info.width, intArrCls,
NULL);
/*for (i = 0; i < info.width; i++) {
jint* tmp = malloc(info.height * sizeof(jint));
jintArray iarr = (*env)->NewIntArray(env, info.height);
if (tmp == NULL || iarr == NULL) {
LOGE("Out of memory ! error=%d", 0);
return NULL; /* out of memory error thrown /
}
for (j = 0; j < info.height; j++) {
tmp[j] = 0;
}
(*env)->SetIntArrayRegion(env, iarr, 0, info.height, tmp);
(*env)->SetObjectArrayElement(env, result, i, iarr);
(*env)->DeleteLocalRef(env, iarr);
free(tmp);
}*/
imageIndex = 0;
data = malloc(info.height * sizeof(jint));
if (data == NULL)
{
LOGE("Memory allocation failed - A");
return NULL;
}
if (data == NULL) { LOGE("Out of memory ! error=%d", 0); return NULL; }
for (j = 0; j < info.height; j++) {
data[j] = 0;
}
iarr = (*env)->NewIntArray(env, info.height);
if (iarr == NULL) { LOGE("Out of memory ! error=%d", 0); return NULL; }
(*env)->SetIntArrayRegion(env, iarr, 0, info.height, data);
(*env)->SetObjectArrayElement(env, result, imageIndex, iarr);
imageIndex++;
(*env)->DeleteLocalRef(env, iarr);
free(data);
int imW = info.width - 1;
int imH = info.height - 1;
//LOGE("STARTING COPY");
for (x=1; x<imW; x++) {
data = malloc(info.height * sizeof(jint));
if (data == NULL)
{
LOGE("Memory allocation failed - B");
return NULL;
}
data[0] = 0;
//LOGE("STARTING ITER");
for (y=1; y<imH; y++) {
for (i=-1; i<=1; i++) {
//LOGE("READING X1");
for (j=-1; j<=1; j++) {
int xx = x+i;
int yy = y+j;
if (info.format == ANDROID_BITMAP_FORMAT_RGB_565) {
//LOGE("READING X2");
//pixels = (char*)pixels + info->stride;
//pixel = pixels[( ((char*)pixels + info.stride*yy) +(xx % info.width) )];//image.getPixel(x+i,y+j);
pixel = ((char*)pixels + info.stride*yy)[xx % info.width]; //pixels[( ((char*)pixels + info.stride*yy) +(xx % info.width) )];
r = (int) ((pixel & 0x0000f800) >> 11);
g = (int)((pixel& 0x000007e0) >> 5);
b = (int) (pixel & 0x000001F );
} else {
//pixel = pixels[( ((char*)pixels + info.stride*yy) + (xx % info.width) )];//image.getPixel(x+i,y+j);
pixel = ((char*)pixels + info.stride*yy)[xx % info.width];
r = (int) ((pixel & 0x00FF0000) >> 16);
g = (int)((pixel& 0x0000FF00) >> 8);
b = (int) (pixel & 0x00000FF );
}
sob[i+1][j+1] = (int) (r * 299.0/1000 + g * 587.0/1000 + b * 114.0/1000);
}
}
horizSobel = -(sob[1-1][1-1]) +
(sob[1+1][1-1]) -
(sob[1-1][1]) - (sob[1-1][1]) +
(sob[1+1][1]) + (sob[1+1][1]) -
(sob[1-1][1+1]) +
(sob[1+1][1+1]);
vertSobel = -(sob[1-1][1-1]) -
(sob[1][1-1]) - sob[1][1-1] -
(sob[1+1][1-1]) +
(sob[1-1][1+1]) +
(sob[1][1+1]) + (sob[1][1+1]) +
(sob[1+1][1+1]);
data[y] = MIN(255, MAX(0, (horizSobel+vertSobel)/2));
//(*env)->SetObjectArrayElement(env, ((*env)->GetObjectArrayElement(env, result, x)), y, val);
}
//LOGE("FINISHED ITER");
data[imH+1] = 0;
iarr = (*env)->NewIntArray(env, info.height);
if (iarr == NULL) { LOGE("Out of memory ! error=%d", 0); return NULL; }
(*env)->SetIntArrayRegion(env, iarr, 0, info.height, data);
(*env)->SetObjectArrayElement(env, result, imageIndex, iarr);
imageIndex++;
(*env)->DeleteLocalRef(env, iarr);
free(data);
}
data = malloc(info.height * sizeof(jint));
if (data == NULL)
{
LOGE("Memory allocation failed - C");
return NULL;
}
if (data == NULL) { LOGE("Out of memory ! error=%d", 0); return NULL; }
for (j = 0; j < info.height; j++) {
data[j] = 0;
}
iarr = (*env)->NewIntArray(env, info.height);
if (iarr == NULL) { LOGE("Out of memory ! error=%d", 0); return NULL; }
(*env)->SetIntArrayRegion(env, iarr, 0, info.height, data);
(*env)->SetObjectArrayElement(env, result, imageIndex, iarr);
imageIndex++;
(*env)->DeleteLocalRef(env, iarr);
free(data);
AndroidBitmap_unlockPixels(env, bitmap);
return result;
}
|
oceaquaris/biomics
|
genomics/tools/TASSEL5hmp2numhmp.c
|
/**
* @file TASSEL5hmp2numhmp.c
* @version 0.1
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
static int file_exists(const char *filename);
static int cpyline(FILE *src, FILE *dest, int *exitstate);
static int cpycol(FILE *src, FILE *dest, int *exitstate);
static int parsealleles(FILE *src, FILE *dest, int *parsestate, int *exitstate);
static int parseSNP(FILE *src, FILE *dest, int allelestate, int *exitstate);
static int parseline(FILE *src, FILE *dest, int *exitstate);
static const char usage[] =
"Usage: TASSEL5hmp2numhmp [INFILE] [OUTFILE]\n"\
" Where:\n"\
" INFILE the name of a TASSEL 5 HapMap file (tab-delimited text).\n"\
" OUTFILE is the name of the converted numerical HapMap file.\n";
int main(int argc, char *argv[]) {
if(argc != 3) {
printf(usage);
return 0;
}
if(!file_exists(argv[1])) {
printf("The INFILE \"%s\" does not exist.\n", argv[1]);
printf(usage);
return 1;
}
FILE *infile, *outfile;
infile = fopen(argv[1], "r");
outfile = fopen(argv[2], "w");
int tmpexitstate;
cpyline(infile, outfile, &tmpexitstate);
if(tmpexitstate == 0) {
fclose(infile);
fclose(outfile);
fprintf(stderr, "Malformed column header row. Exiting.\n");
return 1;
}
while(tmpexitstate != 0) {
parseline(infile, outfile, &tmpexitstate);
}
fclose(infile);
fclose(outfile);
return 0;
}
static int file_exists(const char *filename) {
FILE *file;
file = fopen(filename, "r");
if(file) {
fclose(file);
return 1;
}
return 0;
}
static int cpyline(FILE *src, FILE *dest, int *exitstate) {
int copied = 0;
int c;
while((c = fgetc(src) != EOF)) {
fputc(c, dest);
copied++;
if(c == '\n' || c == '\r\n') {
*exitstate = c;
return copied;
}
}
*exitstate = 0;
return copied;
}
static int cpycol(FILE *src, FILE *dest, int *exitstate) {
int copied = 0;
int c;
while((c = fgetc(src) != EOF)) {
fputc(c, dest);
copied++;
if(c == '\t' || c == '\n' || c == '\r\n') {
*exitstate = c;
return copied;
}
}
*exitstate = 0;
return copied;
}
enum nucleotide {
NUCLEOTIDE_A = 1,
NUCLEOTIDE_C = 2,
NUCLEOTIDE_G = 4,
NUCLEOTIDE_T = 8,
NUCLEOTIDE_ERROR = 16
};
static int parsealleles(FILE *src, FILE *dest, int *parsestate, int *exitstate) {
int copied = 0;
int c;
int state;
//set parsestate to zero
*parsestate = 0;
for(state = 1; state < 5; state++) {
c = fgetc(src);
if(c == EOF) {
*exitstate = 0;
return copied;
}
switch(state) {
case 1:
case 3:
switch(c) {
case 'A':
case 'a':
*parsestate |= NUCLEOTIDE_A;
break;
case 'C':
case 'c':
*parsestate |= NUCLEOTIDE_C;
break;
case 'G':
case 'g':
*parsestate |= NUCLEOTIDE_G;
break;
case 'T':
case 't':
*parsestate |= NUCLEOTIDE_T;
break;
default:
break;
}
copied += fputc(c, dest);
break;
case 2:
if(c != '/') {
*exitstate = 0;
return copied;
}
copied += fputc(c, dest);
break;
case 4:
if(c == '\t' || c == '\n' || c == '\r\n') {
*exitstate = c;
copied += fputc(c, dest);
break;
}
default:
*exitstate = 0;
return copied;
}
}
return copied;
}
static int parseSNP(FILE *src, FILE *dest, int allelestate, int *exitstate) {
int copied = 0;
int c;
int state;
int snp = 0;
for(state = 1; state < 4; state++) {
c = fgetc(src);
if(c == EOF) {
*exitstate = 0;
return copied;
}
switch(state) {
case 1:
case 2:
switch(allelestate) {
case NUCLEOTIDE_A|NUCLEOTIDE_C:
switch(c) {
case 'A':
case 'a':
snp |= NUCLEOTIDE_A;
break;
case 'C':
case 'c':
snp |= NUCLEOTIDE_C;
break;
default:
snp |= NUCLEOTIDE_ERROR;
break;
}
case NUCLEOTIDE_A|NUCLEOTIDE_G:
switch(c) {
case 'A':
case 'a':
snp |= NUCLEOTIDE_A;
break;
case 'G':
case 'g':
snp |= NUCLEOTIDE_G;
break;
default:
snp |= NUCLEOTIDE_ERROR;
break;
}
case NUCLEOTIDE_A|NUCLEOTIDE_T:
switch(c) {
case 'A':
case 'a':
snp |= NUCLEOTIDE_A;
break;
case 'T':
case 't':
snp |= NUCLEOTIDE_T;
break;
default:
snp |= NUCLEOTIDE_ERROR;
break;
}
case NUCLEOTIDE_C|NUCLEOTIDE_G:
switch(c) {
case 'C':
case 'c':
snp |= NUCLEOTIDE_C;
break;
case 'G':
case 'g':
snp |= NUCLEOTIDE_G;
break;
default:
snp |= NUCLEOTIDE_ERROR;
break;
}
case NUCLEOTIDE_C|NUCLEOTIDE_T:
switch(c) {
case 'C':
case 'c':
snp |= NUCLEOTIDE_C;
break;
case 'T':
case 't':
snp |= NUCLEOTIDE_T;
break;
default:
snp |= NUCLEOTIDE_ERROR;
break;
}
case NUCLEOTIDE_G|NUCLEOTIDE_T:
switch(c) {
case 'G':
case 'g':
snp |= NUCLEOTIDE_G;
break;
case 'T':
case 't':
snp |= NUCLEOTIDE_T;
break;
default:
snp |= NUCLEOTIDE_ERROR;
break;
}
default:
*exitstate = 0;
return copied;
}
break;
case 3:
if(c == '\t' || c == '\n' || c == '\r\n') {
*exitstate = c;
break;
}
default:
*exitstate = 0;
return copied;
}
}
switch(allelestate) {
case NUCLEOTIDE_A|NUCLEOTIDE_C:
switch(snp) {
case NUCLEOTIDE_A:
copied += fputs("-1", dest);
break;
case NUCLEOTIDE_A|NUCLEOTIDE_C:
copied += fputs("0", dest);
break;
case NUCLEOTIDE_C:
copied += fputs("1", dest);
break;
default:
copied += fputs("na", dest);
break;
}
case NUCLEOTIDE_A|NUCLEOTIDE_G:
switch(snp) {
case NUCLEOTIDE_A:
copied += fputs("-1", dest);
break;
case NUCLEOTIDE_A|NUCLEOTIDE_G:
copied += fputs("0", dest);
break;
case NUCLEOTIDE_G:
copied += fputs("1", dest);
break;
default:
copied += fputs("na", dest);
break;
}
case NUCLEOTIDE_A|NUCLEOTIDE_T:
switch(snp) {
case NUCLEOTIDE_A:
copied += fputs("-1", dest);
break;
case NUCLEOTIDE_A|NUCLEOTIDE_T:
copied += fputs("0", dest);
break;
case NUCLEOTIDE_T:
copied += fputs("1", dest);
break;
default:
copied += fputs("na", dest);
break;
}
case NUCLEOTIDE_C|NUCLEOTIDE_G:
switch(snp) {
case NUCLEOTIDE_C:
copied += fputs("-1", dest);
break;
case NUCLEOTIDE_C|NUCLEOTIDE_G:
copied += fputs("0", dest);
break;
case NUCLEOTIDE_G:
copied += fputs("1", dest);
break;
default:
copied += fputs("na", dest);
break;
}
case NUCLEOTIDE_C|NUCLEOTIDE_T:
switch(snp) {
case NUCLEOTIDE_C:
copied += fputs("-1", dest);
break;
case NUCLEOTIDE_C|NUCLEOTIDE_T:
copied += fputs("0", dest);
break;
case NUCLEOTIDE_T:
copied += fputs("1", dest);
break;
default:
copied += fputs("na", dest);
break;
}
case NUCLEOTIDE_G|NUCLEOTIDE_T:
switch(snp) {
case NUCLEOTIDE_G:
copied += fputs("-1", dest);
break;
case NUCLEOTIDE_G|NUCLEOTIDE_T:
copied += fputs("0", dest);
break;
case NUCLEOTIDE_T:
copied += fputs("1", dest);
break;
default:
copied += fputs("na", dest);
break;
}
default:
copied += fputs("na", dest);
break;
}
switch(*exitstate) {
case '\t':
case '\n':
case '\r\n':
copied += fputc(*exitstate, dest);
break;
default:
*exitstate = 0;
return copied;
}
return copied;
}
static int parseline(FILE *src, FILE *dest, int *exitstate) {
int copied = 0;
int tmpexitstate;
int allelestate;
int i;
copied += cpycol(src, dest, &tmpexitstate);
if(tmpexitstate == 0) {
*exitstate = 0;
return copied;
}
copied += parsealleles(src, dest, &allelestate, &tmpexitstate);
if(tmpexitstate == 0) {
*exitstate = 0;
return copied;
}
for(i = 0; i < 9; i++) {
copied += cpycol(src, dest, &tmpexitstate);
if(tmpexitstate == 0) {
*exitstate = 0;
return copied;
}
}
do {
copied += parseSNP(src, dest, allelestate, &tmpexitstate);
if(tmpexitstate == 0) {
*exitstate = 0;
return copied;
}
} while(tmpexitstate != '\n' || tmpexitstate != '\r\n');
*exitstate = tmpexitstate;
return copied;
}
|
rod-lin/c0re
|
kernel/mem/smfifo.h
|
#ifndef _KERNEL_MEM_SMFIFO_H_
#define _KERNEL_MEM_SMFIFO_H_
/* swap manager using FIFO */
#include "mem/swap.h"
extern swap_manager_t swap_manager_fifo;
#endif
|
rod-lin/c0re
|
kernel/fs/swapfs.h
|
<reponame>rod-lin/c0re<filename>kernel/fs/swapfs.h<gh_stars>1-10
#ifndef _KERNEL_FS_SWAPFS_H_
#define _KERNEL_FS_SWAPFS_H_
#include "mem/mmu.h"
#include "mem/swap.h"
// return max swap offset
size_t swapfs_init();
int swapfs_read(swap_entry_t entry, page_t *page);
int swapfs_write(swap_entry_t entry, page_t *page);
#endif
|
rod-lin/c0re
|
kernel/lib/debug.c
|
<gh_stars>1-10
#include "pub/x86.h"
#include "lib/io.h"
#include "lib/debug.h"
void _panic(char *file, int line, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
kprintf("c0re panicked at %s: line %d: ", file, line);
vkprintf(fmt, ap);
kputc('\n');
va_end(ap);
hlt();
}
|
rod-lin/c0re
|
kernel/driver/clock.c
|
#include "pub/x86.h"
#include "intr/trap.h"
#include "driver/pic.h"
#include "driver/clock.h"
/* *
* support for time-related hardware gadgets - the 8253 timer,
* which generates interruptes on IRQ-0.
* */
#define IO_TIMER1 0x040 // 8253 Timer #1
/* *
* Frequency of all three count-down timers; (TIMER_FREQ/freq)
* is the appropriate count to generate a frequency of freq Hz.
* */
#define TIMER_TICK_PER_SEC CLOCK_TICK_PER_SEC
#define TIMER_FREQ 1193182
#define TIMER_DIV(x) ((TIMER_FREQ + (x) / 2) / (x)) // x in ms
#define TIMER_MODE (IO_TIMER1 + 3) // timer mode port
#define TIMER_SEL0 0x00 // select counter 0
#define TIMER_RATEGEN 0x04 // mode 2, rate generator
#define TIMER_16BIT 0x30 // r/w counter 16 bits, LSB first
static volatile size_t ticks;
long clock_tick()
{
return ticks;
}
void _clock_inc()
{
ticks += 1;
}
/* *
* clock_init - initialize 8253 clock to interrupt 100 times per second,
* and then enable IRQ_TIMER.
* */
void
clock_init(void) {
// set 8253 timer-chip
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
outb(IO_TIMER1, TIMER_DIV(TIMER_TICK_PER_SEC) & 0xff); // low byte
outb(IO_TIMER1, TIMER_DIV(TIMER_TICK_PER_SEC) >> 8); // high byte
ticks = 0;
pic_enable(IRQ_TIMER);
}
|
rod-lin/c0re
|
kernel/intr/trap.h
|
<reponame>rod-lin/c0re<gh_stars>1-10
#ifndef _KERNEL_INTR_TRAP_H_
#define _KERNEL_INTR_TRAP_H_
#include "pub/com.h"
#include "pub/x86.h"
#define intr_enable() sti()
#define intr_disable() cli()
/* trap Numbers */
/* processor-defined: */
#define TRAPNO_DIVIDE 0 // divide error
#define TRAPNO_DEBUG 1 // debug exception
#define TRAPNO_NMI 2 // non-maskable interrupt
#define TRAPNO_BRKPT 3 // breakpoint
#define TRAPNO_OFLOW 4 // overflow
#define TRAPNO_BOUND 5 // bounds check
#define TRAPNO_ILLOP 6 // illegal opcode
#define TRAPNO_DEVICE 7 // device not available
#define TRAPNO_DBLFLT 8 // double fault
#define TRAPNO_COPROC 9 // reserved (not used since 486)
#define TRAPNO_TSS 10 // invalid task switch segment
#define TRAPNO_SEGNP 11 // segment not present
#define TRAPNO_STACK 12 // stack exception
#define TRAPNO_GPFLT 13 // general protection fault
#define TRAPNO_PGFLT 14 // page fault
#define TRAPNO_RES 15 // reserved
#define TRAPNO_FPERR 16 // floating point error
#define TRAPNO_ALIGN 17 // aligment check
#define TRAPNO_MCHK 18 // machine check
#define TRAPNO_SIMDERR 19 // SIMD floating point error
#define TRAPNO_SYSCALL 0x80 // SYSCALL, ONLY FOR THIS PROJ
/* hardware IRQ numbers. We receive these as (IRQ_OFFSET + IRQ_xx) */
#define IRQ_OFFSET 32 // IRQ 0 corresponds to int IRQ_OFFSET
#define IRQ_TIMER 0
#define IRQ_KBD 1
#define IRQ_COM1 4
#define IRQ_IDE1 14
#define IRQ_IDE2 15
#define IRQ_ERROR 19
#define IRQ_SPURIOUS 31
/**
* these are arbitrarily chosen, but with care not to overlap
* processor defined exceptions or interrupt vectors.
**/
#define TRAPNO_SWITCH_TOU 120 // to user switch
#define TRAPNO_SWITCH_TOK 121 // to kernel switch
typedef struct {
/* registers as pushed by pushal */
struct pushregs {
uint32_t reg_edi;
uint32_t reg_esi;
uint32_t reg_ebp;
uint32_t reg_oesp; /* useless */
uint32_t reg_ebx;
uint32_t reg_edx;
uint32_t reg_ecx;
uint32_t reg_eax;
} tf_regs;
uint16_t tf_gs;
uint16_t tf_padding0;
uint16_t tf_fs;
uint16_t tf_padding1;
uint16_t tf_es;
uint16_t tf_padding2;
uint16_t tf_ds;
uint16_t tf_padding3;
uint32_t tf_trapno;
/* below here defined by x86 hardware */
uint32_t tf_err;
uintptr_t tf_eip;
uint16_t tf_cs;
uint16_t tf_padding4;
uint32_t tf_eflags;
/* below here only when crossing rings, such as from user to kernel */
uintptr_t tf_esp;
uint16_t tf_ss;
uint16_t tf_padding5;
} C0RE_PACKED trapframe_t;
void idt_init();
// void print_trapframe(struct trapframe *tf);
// void print_regs(struct pushregs *regs);
// bool trap_in_kernel(struct trapframe *tf);
#endif
|
rod-lin/c0re
|
kernel/intr/trap.c
|
<gh_stars>1-10
#include "pub/com.h"
#include "pub/x86.h"
#include "pub/string.h"
#include "lib/io.h"
#include "lib/debug.h"
#include "intr/trap.h"
#include "mem/mmu.h"
#include "mem/vmm.h"
#include "driver/console.h"
#include "driver/clock.h"
/* *
* Interrupt descriptor table:
*
* Must be built at run time because shifted function addresses can't
* be represented in relocation records.
* */
static gatedesc_t idt[256] = {{0}};
static descloader_t idt_pd = {
sizeof(idt) - 1, (uintptr_t)idt
};
/* idt_init - initialize IDT to each of the entry points in kern/trap/vectors.S */
void idt_init()
{
/* init IDT by ISR(interrupt service routine), i.e. c0re_trapvec */
extern uintptr_t c0re_trapvec[];
int i;
for (i = 0; i < C0RE_ARRLEN(idt); i++) {
IDT_SETGATE(idt[i], false, GD_KTEXT, c0re_trapvec[i], DPL_KERNEL);
}
IDT_SETGATE(idt[TRAPNO_SWITCH_TOK], false, GD_KTEXT, c0re_trapvec[TRAPNO_SWITCH_TOK], DPL_USER);
lidt(&idt_pd);
}
/* trap_in_kernel - test if trap happened in kernel */
// bool
// trap_in_kernel(struct trapframe *tf) {
// return (tf->tf_cs == (uint16_t)KERNEL_CS);
// }
C0RE_INLINE
int handle_page_fault(trapframe_t *tf)
{
extern vma_set_t *c0re_check_vma_set;
// print_pgfault(tf);
// NOTE: currently only used in check?
if (c0re_check_vma_set) {
return vmm_doPageFault(c0re_check_vma_set, tf->tf_err, rcr2());
}
panic("unhandled page fault");
return -1;
}
trapframe_t switchk2u, *switchu2k;
/* trap_dispatch - dispatch based on what type of trap occurred */
static void dispatch(trapframe_t *tf)
{
int c, ret;
switch (tf->tf_trapno) {
case TRAPNO_PGFLT:
// page fault
// trace("page fault!");
if ((ret = handle_page_fault(tf)) != 0) {
panic("unable to handle page fault error: %e", ret);
}
break;
case IRQ_OFFSET + IRQ_TIMER:
// trace("timer");
// TODO: finish after clock is finished
// increase a system clock variable
// some debug util probably
_clock_inc();
break;
case IRQ_OFFSET + IRQ_COM1:
trace("com1 intr");
// c = cons_getc();
// cprintf("serial [%03d] %c\n", c, c);
break;
case IRQ_OFFSET + IRQ_KBD:
// trace("cur time: %d sec", (int)((double)clock_tick() / CLOCK_TICK_PER_SEC));
c = cons_getc();
if (c) kputc(c); // simple echo
// kprintf("kbd [%03d] %c\n", c, c);
break;
case TRAPNO_SWITCH_TOU:
if (tf->tf_cs != SEGR_USER_CS) {
// not in user mode?
switchk2u = *tf;
switchk2u.tf_cs = SEGR_USER_CS;
switchk2u.tf_ds = switchk2u.tf_es = switchk2u.tf_ss = SEGR_USER_DS;
// set eflags, make sure ucore can use io under user mode.
// if CPL > IOPL, then cpu will generate a general protection.
switchk2u.tf_eflags |= EFLAG_IOPL_MASK;
// --------------------- +
// | lower stack |
// ---------------------
// | | <- switchk2u.tf_esp (we want to set the next esp here)
// | trapframe |
// | | <- tf
// ---------------------
// | stored esp | line 24: pushl %esp in trapentry.S
// ---------------------
switchk2u.tf_esp = (uintptr_t)tf + sizeof(trapframe_t) - 8;
// set temporary stack
// then iret will jump to the right stack
// set the "stored esp" in the diagram aobve
*((uintptr_t *)tf - 1) = (uintptr_t)&switchk2u;
}
break;
case TRAPNO_SWITCH_TOK:
if (tf->tf_cs != SEGR_KERNEL_CS) {
tf->tf_cs = SEGR_KERNEL_CS;
tf->tf_ds = tf->tf_es = SEGR_KERNEL_DS;
tf->tf_eflags &= ~EFLAG_IOPL_MASK;
// --------------------- +
// | lower stack |
// ---------------------
// | |
// | trapframe |
// | | <- tf
// ---------------------
// | stored esp |
// ---------------------
// ?? where does this point to ??
switchu2k = (trapframe_t *)(tf->tf_esp - (sizeof(trapframe_t) - 8));
memmove(switchu2k, tf, sizeof(trapframe_t) - 8);
*((uintptr_t *)tf - 1) = (uintptr_t)switchu2k;
}
break;
case IRQ_OFFSET + IRQ_IDE1:
case IRQ_OFFSET + IRQ_IDE2:
/* do nothing */
break;
default:
// in kernel, it must be a mistake
if ((tf->tf_cs & 3) == 0) {
// print_trapframe(tf);
panic("unexpected trap in kernel");
}
}
}
/* *
* trap - handles or dispatches an exception/interrupt. if and when trap() returns,
* the code in kern/trap/trapentry.S restores the old CPU state saved in the
* trapframe and then uses the iret instruction to return from the exception.
* */
void c0re_trap(trapframe_t *tf)
{
// dispatch based on what type of trap occurred
dispatch(tf);
}
|
rod-lin/c0re
|
pub/dllist.h
|
#ifndef _PUB_DLLIST_H_
#define _PUB_DLLIST_H_
#include "pub/com.h"
/**
* Simple doubly linked list implementation.
*
* Some of the internal functions ("__xxx") are useful when manipulating
* whole lists rather than single entries, as sometimes we already know
* the next/prev entries and we can generate better code by using them
* directly rather than using the generic single-entry routines.
**/
typedef struct dllist_t_tag {
struct dllist_t_tag *prev, *next;
} dllist_t;
C0RE_INLINE void dllist_init(dllist_t *elem);
C0RE_INLINE void dllist_add(dllist_t *head, dllist_t *elem);
C0RE_INLINE void dllist_add_before(dllist_t *head, dllist_t *elem);
C0RE_INLINE void dllist_add_after(dllist_t *head, dllist_t *elem);
C0RE_INLINE void dllist_del(dllist_t *head);
C0RE_INLINE void dllist_del_init(dllist_t *head);
C0RE_INLINE bool dllist_empty(dllist_t *list);
C0RE_INLINE dllist_t *dllist_next(dllist_t *head);
C0RE_INLINE dllist_t *dllist_prev(dllist_t *head);
C0RE_INLINE void _dllist_add(dllist_t *elem, dllist_t *prev, dllist_t *next);
C0RE_INLINE void _dllist_del(dllist_t *prev, dllist_t *next);
/**
* dllist_init - initialize a new entry
* @elem: new entry to be initialized
**/
C0RE_INLINE void dllist_init(dllist_t *elem)
{
elem->prev = elem->next = elem;
}
/**
* dllist_add - add a new entry
* @head: list head to add after
* @elem: new entry to be added
*
* Insert the new element @elem *after* the element @head which
* is already in the list.
**/
C0RE_INLINE void dllist_add(dllist_t *head, dllist_t *elem)
{
dllist_add_after(head, elem);
}
/**
* list_add_before - add a new entry
* @head: list head to add before
* @elem: new entry to be added
*
* Insert the new element @elem *before* the element @head which
* is already in the list.
**/
C0RE_INLINE void dllist_add_before(dllist_t *head, dllist_t *elem)
{
_dllist_add(elem, head->prev, head);
}
/**
* list_add_after - add a new entry
* @head: list head to add after
* @elem: new entry to be added
*
* Insert the new element @elem *after* the element @head which
* is already in the list.
**/
C0RE_INLINE void dllist_add_after(dllist_t *head, dllist_t *elem)
{
_dllist_add(elem, head, head->next);
}
/**
* dllist_del - deletes entry from list
* @head: the element to delete from the list
*
* Note: list_empty() on @head does not return true after this, the entry is
* in an undefined state.
**/
C0RE_INLINE void dllist_del(dllist_t *head)
{
_dllist_del(head->prev, head->next);
}
/**
* list_del_init - deletes entry from list and reinitialize it.
* @head: the element to delete from the list.
*
* Note: list_empty() on @head returns true after this.
**/
C0RE_INLINE void dllist_del_init(dllist_t *head)
{
dllist_del(head);
dllist_init(head);
}
/**
* list_empty - tests whether a list is empty
* @list: the list to test.
**/
C0RE_INLINE bool dllist_empty(dllist_t *list)
{
return list->next == list;
}
/* *
* list_next - get the next entry
* @head: the list head
**/
C0RE_INLINE dllist_t *dllist_next(dllist_t *head)
{
return head->next;
}
/**
* list_prev - get the previous entry
* @head: the list head
**/
C0RE_INLINE dllist_t *dllist_prev(dllist_t *head)
{
return head->prev;
}
/**
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
**/
C0RE_INLINE void _dllist_add(dllist_t *elem, dllist_t *prev, dllist_t *next)
{
prev->next = next->prev = elem;
elem->next = next;
elem->prev = prev;
}
/**
* Delete a list entry by making the prev/next entries point to each other.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
**/
C0RE_INLINE void _dllist_del(dllist_t *prev, dllist_t *next)
{
prev->next = next;
next->prev = prev;
}
#endif
|
rod-lin/c0re
|
kernel/fs/fs.h
|
<reponame>rod-lin/c0re<gh_stars>1-10
#ifndef _KERNEL_FS_FS_H_
#define _KERNEL_FS_FS_H_
#include "mem/mmu.h"
#define FS_SECTOR_SIZE 512
#define FS_PAGE_NSECTOR (PAGE_SIZE / FS_SECTOR_SIZE)
#define FS_SWAP_DEV_NO 1
#endif
|
rod-lin/c0re
|
kernel/mem/pmm.c
|
#include "pub/com.h"
#include "pub/x86.h"
#include "pub/string.h"
#include "pub/error.h"
#include "lib/sync.h"
#include "lib/debug.h"
#include "mem/mmu.h"
#include "mem/pmm.h"
#include "mem/vmm.h"
#include "mem/swap.h"
#include "mem/ffit.h"
/* *
* Task State Segment:
*
* The TSS may reside anywhere in memory. A special segment register called
* the Task Register (TR) holds a segment selector that points a valid TSS
* segment descriptor which resides in the GDT. Therefore, to use a TSS
* the following must be done in function gdt_init:
* - create a TSS descriptor entry in GDT
* - add enough information to the TSS in memory as needed
* - load the TR register with a segment selector for that segment
*
* There are several fileds in TSS for specifying the new stack pointer when a
* privilege level change happens. But only the fields SS0 and ESP0 are useful
* in our os kernel.
*
* The field SS0 contains the stack segment selector for CPL = 0, and the ESP0
* contains the new ESP value for CPL = 0. When an interrupt happens in protected
* mode, the x86 CPU will look in the TSS for SS0 and ESP0 and load their value
* into SS and ESP respectively.
* */
static taskstate_t ts = {0};
page_t *c0re_pages;
size_t c0re_npage;
const page_allocator_t *page_alloc;
/* *
* Global Descriptor Table:
*
* The kernel and user segments are identical (except for the DPL). To load
* the %ss register, the CPL must equal the DPL. Thus, we must duplicate the
* segments for the user and the kernel. Defined as follows:
* - 0x0 : unused (always faults -- for trapping NULL far pointers)
* - 0x8 : kernel code segment
* - 0x10: kernel data segment
* - 0x18: user code segment
* - 0x20: user data segment
* - 0x28: defined for tss, initialized in gdt_init
* */
static segdesc_t gdt[] = {
GDT_SEG_NULL,
[GDT_SEGNO_KTEXT] = GDT_SEG(STA_X | STA_R, 0x0, 0xFFFFFFFF, DPL_KERNEL),
[GDT_SEGNO_KDATA] = GDT_SEG(STA_W, 0x0, 0xFFFFFFFF, DPL_KERNEL),
[GDT_SEGNO_UTEXT] = GDT_SEG(STA_X | STA_R, 0x0, 0xFFFFFFFF, DPL_USER),
[GDT_SEGNO_UDATA] = GDT_SEG(STA_W, 0x0, 0xFFFFFFFF, DPL_USER),
[GDT_SEGNO_TSS] = GDT_SEG_NULL
};
static descloader_t gdt_pd = { sizeof(gdt) - 1, (uint32_t)gdt };
/* *
* lgdt - load the global descriptor table register and reset the
* data/code segement registers for kernel.
* */
C0RE_INLINE void lgdt(descloader_t *pd)
{
asm volatile ("lgdt (%0)" :: "r" (pd));
asm volatile ("movw %%ax, %%gs" :: "a" (SEGR_USER_DS));
asm volatile ("movw %%ax, %%fs" :: "a" (SEGR_USER_DS));
asm volatile ("movw %%ax, %%es" :: "a" (SEGR_KERNEL_DS));
asm volatile ("movw %%ax, %%ds" :: "a" (SEGR_KERNEL_DS));
asm volatile ("movw %%ax, %%ss" :: "a" (SEGR_KERNEL_DS));
// reload cs
// TODO: can we do it before all these moves?
asm volatile ("ljmp %0, $1f\n 1:\n" :: "i" (SEGR_KERNEL_CS));
}
// /* temporary kernel stack */
// uint8_t stack0[1024];
void load_esp0(uintptr_t esp0)
{
ts.ts_esp0 = esp0;
}
/* gdt_init - initialize the default GDT and TSS */
static void gdt_init()
{
// setup a TSS so that we can get the right stack when we trap from
// user to the kernel. But not safe here, it's only a temporary value,
// it will be set to KSTACKTOP in lab2.
extern char kernel_stacktop[];
load_esp0((uintptr_t)kernel_stacktop);
ts.ts_ss0 = SEGR_KERNEL_DS;
// initialize the TSS field of the gdt
gdt[GDT_SEGNO_TSS] = GDT_SEG_TSS(STS_T32A, (uint32_t)&ts, sizeof(ts), DPL_KERNEL);
// reload all segment registers
lgdt(&gdt_pd);
// load the TSS
ltr(GD_TSS);
}
static void page_allocator_init()
{
page_alloc = &page_ffit_allocator;
trace("memory management: %s", page_alloc->name);
page_alloc->init();
}
//init_memmap - call pmm->addMem to build Page struct for free memory
static void addMem(page_t *base, size_t n)
{
page_alloc->addMem(base, n);
}
page_t *palloc(size_t n)
{
page_t *ret;
size_t retry = 0;
while (retry < SWAP_MAX_RETRY_TIME) {
no_intr_block(ret = page_alloc->alloc(n));
// successful allocation OR
// too big block OR
// no swap space
if (ret || n > 1 || !swap_hasInit()) break;
extern vma_set_t *c0re_check_vma_set;
trace("swap: out of memory, try to swap out %d pages", n);
swap_out(c0re_check_vma_set, n, 0);
retry++;
}
if (retry == SWAP_MAX_RETRY_TIME) {
trace("swap: max retry time reached. unable to swap out enough pages");
}
return ret;
}
void pfree(page_t *base)
{
no_intr_block(page_alloc->free(base));
}
size_t nfpage()
{
size_t ret;
no_intr_block(ret = page_alloc->nfree());
return ret;
}
static void page_init()
{
e820map_t *memmap = (e820map_t *)(0x8000 + KERNEL_BASE);
uint64_t maxpa = 0; // pa = physical memory
uint64_t begin, end;
int i;
trace("e820map:");
for (i = 0; i < memmap->nmap; i++) {
begin = memmap->map[i].addr,
end = begin + memmap->map[i].size;
trace(DBG_TAB "memory: %08llx, [%08llx, %08llx], type = %d",
memmap->map[i].size, begin, end - 1, memmap->map[i].type);
if (memmap->map[i].type == E820_ARM) {
if (maxpa < end && begin < KERNEL_MEMSIZE) {
maxpa = end;
}
}
}
if (maxpa > KERNEL_MEMSIZE) {
maxpa = KERNEL_MEMSIZE;
}
extern char bss_end[];
c0re_pages = (page_t *)ROUNDUP((void *)bss_end, PAGE_SIZE);
c0re_npage = maxpa / PAGE_SIZE;
for (i = 0; i < c0re_npage; i++) {
page_setReserved(c0re_pages + i);
}
uintptr_t freemem = PADDR((uintptr_t)c0re_pages + sizeof(page_t) * c0re_npage);
for (i = 0; i < memmap->nmap; i++) {
uint64_t begin = memmap->map[i].addr, end = begin + memmap->map[i].size;
if (memmap->map[i].type == E820_ARM) {
if (begin < freemem) {
begin = freemem;
}
if (end > KERNEL_MEMSIZE) {
end = KERNEL_MEMSIZE;
}
if (begin < end) {
begin = ROUNDUP(begin, PAGE_SIZE);
end = ROUNDDOWN(end, PAGE_SIZE);
if (begin < end) {
addMem(pa2page(begin), (end - begin) / PAGE_SIZE);
}
}
}
}
}
// get_pte - get pte and return the kernel virtual address of this pte for la
// - if the PT contians this pte didn't exist, alloc a page for PT
// parameter:
// pgdir: the kernel virtual base address of PDT
// la: the linear address need to map
// create: a logical value to decide if alloc a page for PT
// return vaule: the kernel virtual address of this pte
pte_t *get_pte(pde_t *pgdir, uintptr_t la, bool create)
{
// this function get the corresponsing page table entry by the linear address
// if the entry does not exist, it allocate one
pde_t *pdep = &pgdir[PD_INDEX(la)];
if (!(*pdep & PTE_FLAG_P)) {
// not present -> alloc page
page_t *page;
if (!create || (page = palloc(1)) == NULL) {
return NULL;
}
page_clearRef(page);
page_incRef(page);
uintptr_t pa = page2pa(page);
memset(KADDR(pa), 0, PAGE_SIZE);
*pdep = pa | PTE_FLAG_U | PTE_FLAG_W | PTE_FLAG_P;
}
// 1. get the page table(*pdep)
// 2. get the page table address(remove the low 12 bits *pdep)
// 3. cast to kernel address(KADDR(PTE_ADDR(*pdep)))
// 4. get the corresponding table entry address
return &((pte_t *)KADDR(PTE_ADDR(*pdep)))[PT_INDEX(la)];
}
// map_segment - setup & enable the paging mechanism
// parameters
// la: linear address of this memory need to map (after x86 segment map)
// pa: physical address of this memory
// size: memory size
// perm: permission of this memory
// this functin basically maps the range [la, la + size)
// to a physical address range [pa, pa + size)
// "segment" has no special meaning here
static void
map_segment(pde_t *pgdir, uintptr_t la, uintptr_t pa,
size_t size, uint32_t perm)
{
// same page offset
assert(PAGE_OFS(la) == PAGE_OFS(pa));
// align adresses to pages
size_t n = ROUNDUP(size + PAGE_OFS(la), PAGE_SIZE) / PAGE_SIZE;
la = ROUNDDOWN(la, PAGE_SIZE);
pa = ROUNDDOWN(pa, PAGE_SIZE);
for (; n > 0; n--, la += PAGE_SIZE, pa += PAGE_SIZE) {
pte_t *pte = get_pte(pgdir, la, true);
// the corresponding page table entry(which stores a physcial address
// that the linear address maps to)
assert(pte != NULL);
*pte = pa | PTE_FLAG_P | perm;
}
}
static void page_enable(uintptr_t pgdir_pa)
{
lcr3(pgdir_pa);
// turn on paging
uint32_t cr0 = rcr0();
cr0 |= CR0_PE | CR0_PG | CR0_AM | CR0_WP | CR0_NE | CR0_TS | CR0_EM | CR0_MP;
cr0 &= ~(CR0_TS | CR0_EM);
lcr0(cr0);
}
// virtual address of boot-time page directory
pde_t *c0re_pgdir;
// physical address of boot-time page directory(stored in cr3)
uintptr_t c0re_pgdir_pa;
//get_pgtable_items - In [left, right] range of PDT or PT, find a continuous linear addr space
// - (left_store*X_SIZE~right_store*X_SIZE) for PDT or PT
// - X_SIZE=PTSIZE=4M, if PDT; X_SIZE=PAGE_SIZE=4K, if PT
// paramemters:
// left: the low side of table's range
// right: the high side of table's range
// table: the beginning addr of table
// next_left: the pointer of the high side of table's next range
// next_right: the pointer of the low side of table's next range
// return value: 0 - not a invalid item range, perm - a valid item range with perm permission
static int get_pgtable_items(size_t left, size_t right, uintptr_t *table,
size_t *next_left, size_t *next_right)
{
if (left >= right) {
return 0;
}
while (left < right && !(table[left] & PTE_FLAG_P))
left++;
if (left < right) {
if (next_left) {
*next_left = left;
}
int perm = table[left++] & PTE_FLAG_USER;
while (left < right && (table[left] & PTE_FLAG_USER) == perm) {
left++;
}
if (next_right != NULL) {
*next_right = left;
}
return perm;
}
return 0;
}
static const char *perm2str(int perm)
{
static char str[4];
str[0] = (perm & PTE_FLAG_U) ? 'u' : '-';
str[1] = 'r';
str[2] = (perm & PTE_FLAG_W) ? 'w' : '-';
str[3] = '\0';
return str;
}
// print_pgdir - print page directory and table
void print_pgdir()
{
size_t left, right = 0, perm;
pte_t *vpt = (pte_t *)KERNEL_VPT;
pde_t *vpd = (pde_t *)PAGE_ADDR(PD_INDEX(KERNEL_VPT), PD_INDEX(KERNEL_VPT), 0);
// kprintf("%p %p\n", vpt, vpd);
kprintf("-------------------- BEGIN --------------------\n");
while (1) {
perm = get_pgtable_items(right, PD_NENTRY, vpd, &left, &right);
if (!perm) break;
kprintf("PDE(%03x) %08x-%08x %08x %s\n",
right - left, // page table count
left * PT_SIZE, // begin addr(virtual)
right * PT_SIZE, // end addr
(right - left) * PT_SIZE, // size
perm2str(perm));
size_t l, r = left * PT_NENTRY;
while (1) {
perm = get_pgtable_items(r, right * PT_NENTRY, vpt, &l, &r);
if (!perm) break;
kprintf(DBG_TAB "PTE(%05x) %08x-%08x %08x %s\n",
r - l, // page count
l * PAGE_SIZE, // same as above
r * PAGE_SIZE,
(r - l) * PAGE_SIZE,
perm2str(perm));
}
}
kprintf("--------------------- END ---------------------\n");
}
static void check_palloc();
static void check_pgdir();
static void check_c0re_pgdir();
/* pmm_init - initialize the physical memory management */
void pmm_init()
{
// gdt_init();
page_allocator_init();
page_init();
check_palloc();
c0re_pgdir = page2kva(palloc_s(1));
c0re_pgdir_pa = PADDR(c0re_pgdir);
memset(c0re_pgdir, 0, PAGE_SIZE);
check_pgdir();
// TODO: check alignment??
assert(KERNEL_BASE % PT_SIZE == 0 && KERNEL_TOP % PT_SIZE == 0);
// recursively insert c0re_pgdir in itself
// to form a virtual page table at virtual address VPT
// NOTE: map KERNEL_VPT to the page directory itself
c0re_pgdir[PD_INDEX(KERNEL_VPT)] = c0re_pgdir_pa | PTE_FLAG_P | PTE_FLAG_W;
// map all physical memory to linear memory with base linear addr KERNEL_BASE
// linear_addr KERNEL_BASE ~ KERNEL_BASE + KERNEL_MEMSIZE = phy_addr 0 ~ KERNEL_MEMSIZE
// but shouldn't use this map until enable_paging() & gdt_init() finished.
map_segment(c0re_pgdir, KERNEL_BASE, 0, KERNEL_MEMSIZE, PTE_FLAG_W);
// pd0 -> pd[KERNEL_BASE >> 22]
// temp setting to keep the kernel working
c0re_pgdir[0] = c0re_pgdir[PD_INDEX(KERNEL_BASE)];
// NOTE: at this point, segmentation system is still working,
// so linear address = virtual address - KERNEL_BASE
// but if paging is enabled, physical address = linear address - KERNEL_BASE
// so there will be two KERNEL_BASE's subtracted from a virtual address
// which will crash the kernel
// -- so we set page table 0 equal to page table that KERNEL_BASE uses(i.e. KERNEL_BASE >> 22)
// and the extra KERNEL_BASE will not be subtracted from the linear addres
// enable paging
page_enable(c0re_pgdir_pa);
gdt_init();
// NOTE: segmentation system is disabled(no real translation between va and la)
// restore the page directory
c0re_pgdir[0] = 0;
check_c0re_pgdir();
print_pgdir();
}
void *kmalloc(size_t n)
{
assert(n > 0 && n < 1024 * 1024);
int npage = (n + PAGE_SIZE - 1) / PAGE_SIZE;
return page2kva(palloc_s(npage));
}
void kfree(void *ptr, size_t n)
{
// assert(n > 0 && n < 1024 * 1024);
assert(ptr);
pfree(kva2page(ptr));
}
// get_page - get related Page struct for linear address la using PDT pgdir
page_t *get_page(pde_t *pgdir, uintptr_t la, pte_t **ptep_result)
{
pte_t *ptep = get_pte(pgdir, la, 0);
if (ptep_result) {
*ptep_result = ptep;
}
if (ptep != NULL && *ptep & PTE_FLAG_P) {
return pte2page(*ptep);
}
return NULL;
}
//page_remove_pte - free an page_t which is related to by linear address la
// - and clean(invalidate) pte which is related linear address la
//note: PT is changed, so the TLB need to be invalidate
C0RE_INLINE
void page_remove_pte(pde_t *pgdir, uintptr_t la, pte_t *ptep)
{
if (*ptep & PTE_FLAG_P) {
page_t *page = pte2page(*ptep);
if (page_decRef(page) == 0) {
pfree(page);
}
*ptep = 0;
tlb_invalidate(pgdir, la);
}
}
//page_remove - free an Page which is related to by linear address la and has an validated pte
void page_remove(pde_t *pgdir, uintptr_t la)
{
pte_t *ptep = get_pte(pgdir, la, 0);
if (ptep) {
page_remove_pte(pgdir, la, ptep);
}
}
// page_insert - build the map of phy addr of an Page with the linear addr la
// paramemters:
// pgdir: the kernel virtual base address of PDT
// page: the Page which need to map
// la: the linear address need to map
// perm: the permission of this Page which is setted in related pte
// return value: always 0
// note: PT is changed, so the TLB need to be invalidate
int page_insert(pde_t *pgdir, page_t *page, uintptr_t la, uint32_t perm)
{
pte_t *ptep = get_pte(pgdir, la, 1);
if (!ptep) {
// no memory
return -E_NO_MEM;
}
page_incRef(page);
if (*ptep & PTE_FLAG_P) {
page_t *p = pte2page(*ptep);
if (p == page) {
page_decRef(page);
} else {
page_remove_pte(pgdir, la, ptep);
}
}
*ptep = page2pa(page) | PTE_FLAG_P | perm;
tlb_invalidate(pgdir, la);
return 0;
}
// pgdir_alloc_page - call alloc_page & page_insert functions to
// - allocate a page size memory & setup an addr map
// - pa<->la with linear address la and the PDT pgdir
page_t *pgdir_palloc(pde_t *pgdir, uintptr_t la, uint32_t perm)
{
extern vma_set_t *c0re_check_vma_set;
page_t *page = palloc(1);
if (page) {
if (page_insert(pgdir, page, la, perm)) {
// error
pfree(page);
return NULL;
}
if (swap_hasInit()) {
// used for check
if (c0re_check_vma_set) {
swap_mapSwappable(c0re_check_vma_set, la, page, 0);
page->pra_vaddr = la;
assert(page_getRef(page) == 1);
//cprintf("get No. %d page: pra_vaddr %x, pra_link.prev %x, pra_link_next %x in pgdir_alloc_page\n", (page-pages), page->pra_vaddr,page->pra_page_link.prev, page->pra_page_link.next);
}
}
}
return page;
}
// invalidate a TLB entry, but only if the page tables being
// edited are the ones currently in use by the processor
// TODO: wtf is this???
void tlb_invalidate(pde_t *pgdir, uintptr_t la)
{
if (rcr3() == PADDR(pgdir)) {
invlpg((void *)la);
}
}
static void check_palloc()
{
page_alloc->check();
trace("check success: page alloc");
}
static void check_pgdir()
{
assert(c0re_npage <= KERNEL_MEMSIZE / PAGE_SIZE);
assert(c0re_pgdir != NULL && (uint32_t)PAGE_OFS(c0re_pgdir) == 0);
assert(get_page(c0re_pgdir, 0x0, NULL) == NULL);
page_t *p1, *p2;
p1 = palloc(1);
assert(page_insert(c0re_pgdir, p1, 0x0, 0) == 0);
pte_t *ptep;
assert((ptep = get_pte(c0re_pgdir, 0x0, 0)) != NULL);
assert(pte2page(*ptep) == p1);
assert(page_getRef(p1) == 1);
ptep = &((pte_t *)KADDR(PDE_ADDR(c0re_pgdir[0])))[1];
assert(get_pte(c0re_pgdir, PAGE_SIZE, 0) == ptep);
p2 = palloc(1);
assert(page_insert(c0re_pgdir, p2, PAGE_SIZE, PTE_FLAG_U | PTE_FLAG_W) == 0);
assert((ptep = get_pte(c0re_pgdir, PAGE_SIZE, 0)) != NULL);
assert(*ptep & PTE_FLAG_U);
assert(*ptep & PTE_FLAG_W);
assert(c0re_pgdir[0] & PTE_FLAG_U);
assert(page_getRef(p2) == 1);
assert(page_insert(c0re_pgdir, p1, PAGE_SIZE, 0) == 0);
assert(page_getRef(p1) == 2);
assert(page_getRef(p2) == 0);
assert((ptep = get_pte(c0re_pgdir, PAGE_SIZE, 0)) != NULL);
assert(pte2page(*ptep) == p1);
assert((*ptep & PTE_FLAG_U) == 0);
page_remove(c0re_pgdir, 0x0);
assert(page_getRef(p1) == 1);
assert(page_getRef(p2) == 0);
page_remove(c0re_pgdir, PAGE_SIZE);
assert(page_getRef(p1) == 0);
assert(page_getRef(p2) == 0);
assert(page_getRef(pde2page(c0re_pgdir[0])) == 1);
pfree(pde2page(c0re_pgdir[0]));
c0re_pgdir[0] = 0;
trace("check success: pgdir");
}
static void check_c0re_pgdir()
{
pte_t *ptep;
int i;
for (i = 0; i < c0re_npage; i += PAGE_SIZE) {
assert((ptep = get_pte(c0re_pgdir, (uintptr_t)KADDR(i), 0)) != NULL);
assert(PTE_ADDR(*ptep) == i);
}
assert(PDE_ADDR(c0re_pgdir[PD_INDEX(KERNEL_VPT)]) == PADDR(c0re_pgdir));
assert(c0re_pgdir[0] == 0);
page_t *p;
p = palloc(1);
assert(page_insert(c0re_pgdir, p, 0x100, PTE_FLAG_W) == 0);
assert(page_getRef(p) == 1);
assert(page_insert(c0re_pgdir, p, 0x100 + PAGE_SIZE, PTE_FLAG_W) == 0);
assert(page_getRef(p) == 2);
const char *str = "ucore: Hello world!!";
strcpy((void *)0x100, str);
assert(strcmp((void *)0x100, (void *)(0x100 + PAGE_SIZE)) == 0);
*(char *)(page2kva(p) + 0x100) = '\0';
assert(strlen((const char *)0x100) == 0);
pfree(p);
pfree(pde2page(c0re_pgdir[0]));
c0re_pgdir[0] = 0;
trace("check success: c0re_pgdir");
}
|
rod-lin/c0re
|
kernel/driver/ide.h
|
<gh_stars>1-10
#ifndef _KERNEL_DRIVER_IDE_H_
#define _KERNEL_DRIVER_IDE_H_
#include "pub/com.h"
void ide_init();
bool ide_device_valid(unsigned short ideno);
size_t ide_device_size(unsigned short ideno);
int ide_read_secs(unsigned short ideno, uint32_t secno, void *dst, size_t nsecs);
int ide_write_secs(unsigned short ideno, uint32_t secno, const void *src, size_t nsecs);
#endif
|
rod-lin/c0re
|
kernel/lib/io.c
|
#include "pub/com.h"
#include "pub/stdarg.h"
#include "pub/printfmt.h"
#include "lib/io.h"
int kputns(char *str, int max)
{
int count;
for (count = 0; *str != '\0'; str++, count++) {
if (count >= max) break;
kputc(*str);
}
return count;
}
int kputs(char *str)
{
int count;
for (count = 0; *str != '\0'; str++, count++) {
kputc(*str);
}
return count;
}
static void kputch(int c, int *cnt)
{
kputc(c);
(*cnt)++;
}
int vkprintf(const char *fmt, va_list ap)
{
int cnt = 0;
vprintfmt((putc_fn_t)kputch, &cnt, fmt, ap);
return cnt;
}
int kprintf(const char *fmt, ...)
{
va_list ap;
int cnt;
va_start(ap, fmt);
cnt = vkprintf(fmt, ap);
va_end(ap);
return cnt;
}
|
rod-lin/c0re
|
kernel/mem/vmm.c
|
<reponame>rod-lin/c0re
#include "pub/com.h"
#include "pub/dllist.h"
#include "pub/error.h"
#include "mem/swap.h"
#include "mem/vmm.h"
#include "mem/pmm.h"
vma_t *vma_new(uintptr_t start, uintptr_t end, uint32_t flags)
{
vma_t *vma = kmalloc(sizeof(*vma));
if (vma) {
vma->start = start;
vma->end = end;
vma->flags = flags;
}
return vma;
}
C0RE_INLINE
bool vma_has(vma_t *vma, uintptr_t addr)
{
return addr >= vma->start && addr < vma->end;
}
vma_set_t *vma_set_new()
{
vma_set_t *set = kmalloc(sizeof(*set));
if (set) {
dllist_init(&(set->mset));
set->mcache = NULL;
set->mcount = 0;
set->pgdir = NULL;
if (swap_hasInit()) swap_initVMASet(set);
else set->swap_data = NULL;
}
return set;
}
void vma_set_free(vma_set_t *set)
{
dllist_t *list = &(set->mset), *dll;
while ((dll = dllist_next(list)) != list) {
dllist_del(dll);
kfree(dll2vma(dll, link), sizeof(vma_t)); // kfree vma
}
kfree(set, sizeof(*set)); // kfree mm
// mm = NULL; // WTF is this???
}
C0RE_INLINE
bool is_vma_overlap(vma_t *prev, vma_t *next)
{
return !(prev->start < prev->end &&
prev->end <= next->start &&
next->start < next->end);
}
void vma_set_insert(vma_set_t *set, vma_t *vma)
{
assert(vma->start < vma->end);
dllist_t *list = &(set->mset);
dllist_t *prev = list, *next;
dllist_t *cur = list;
while ((cur = dllist_next(cur)) != list) {
vma_t *prevvma = dll2vma(cur, link);
if (prevvma->start > vma->start) {
break; // find the right inert position
}
prev = cur;
}
next = dllist_next(prev);
/* check overlap(of vma and prev) */
if (prev != list) {
assert(!is_vma_overlap(dll2vma(prev, link), vma));
}
/* check overlap(of vma and next) */
if (next != list) {
assert(!is_vma_overlap(vma, dll2vma(next, link)));
}
vma->set = set;
dllist_add_after(prev, &(vma->link));
set->mcount++;
}
// find which vma the addr is at
vma_t *vma_set_find(vma_set_t *set, uintptr_t addr)
{
vma_t *vma = NULL;
if (set) {
vma = set->mcache;
if (!vma || !vma_has(vma, addr)) {
// addr not in the cache vma -- keep finding
bool found = false;
dllist_t *list = &(set->mset), *cur = list;
while ((cur = dllist_next(cur)) != list) {
vma = dll2vma(cur, link);
if (vma_has(vma, addr)) {
found = true;
break;
}
}
if (!found) vma = NULL;
else set->mcache = vma; // set cache
}
}
return vma;
}
static void check_vmm();
static size_t page_fault_count = 0;
void vmm_init()
{
check_vmm();
}
size_t vmm_getPageFaultCount()
{
return page_fault_count;
}
int vmm_doPageFault(vma_set_t *set, uint32_t error, uintptr_t addr)
{
int ret = -E_INVAL;
// try to find a vma which include addr
vma_t *vma = vma_set_find(set, addr);
page_fault_count++;
// the addr is in the range of a set's vma?
if (!vma || /* TODO: what's this for??? */vma->start > addr) {
trace("vmm_doPageFault: invalid addr %p which cannot be found in vma\n", (void *)addr);
goto failed;
}
//check the error
// error code stored in cr2 has 3 useful bits
// bit 1 == 1 means that the physical page does not exist
// bit 2 == 1 means write error(e.g. writing non-writtable area)
// bit 3 == 1 means privillege error
// TODO: see https://chyyuu.gitbooks.io/ucore_os_docs/content/lab3/lab3_4_page_fault_handler.html
switch (error & 3) {
default: /* error code flag : default is 3 ( W/R=1, P=1): write, present */
case 2: /* error code flag : (W/R=1, P=0): write, not present */
if (!(vma->flags & VMA_FLAG_WRITE)) {
trace("vmm_doPageFault: write non-writtable vma(error code flag = write AND not present)");
goto failed;
}
break;
// why??
case 1: /* error code flag : (W/R=0, P=1): read, present */
trace("vmm_doPageFault: illegal error code flag 'read AND present'");
goto failed;
case 0: /* error code flag : (W/R=0, P=0): read, not present */
if (!(vma->flags & (VMA_FLAG_READ | VMA_FLAG_EXEC))) {
trace("vmm_doPageFault: read non-readable and non-executable vma(error code flag = read AND not present)");
goto failed;
}
}
/* IF (write an existed addr) OR
* (write an non_existed addr && addr is writable) OR
* (read an non_existed addr && addr is readable)
* THEN
* continue process
*/
uint32_t perm = PTE_FLAG_U;
if (vma->flags & VMA_FLAG_WRITE) {
perm |= PTE_FLAG_W;
}
addr = ROUNDDOWN(addr, PAGE_SIZE);
ret = -E_NO_MEM;
pte_t *ptep = get_pte(set->pgdir, addr, true);
// try to find a pte, if pte's PT(Page Table) doesn't existed, then create a PT.
// (notice the 3th parameter '1')
if (ptep == NULL) {
trace("vmm_doPageFault: cannot find page table entry");
goto failed;
}
if (*ptep == 0) { // if the phy addr doesn't exist, then alloc a page & map the phy addr with logical addr
if (!pgdir_palloc(set->pgdir, addr, perm)) {
trace("vmm_doPageFault: pgdir_alloc_page failed\n");
goto failed;
}
} else { // if this pte is a swap entry, then load data from disk to a page with phy addr
// and call page_insert to map the phy addr with logical addr
// NOTE: if a PTE is not present but non-zero, it's a swap entry
// then you cast it to swap_entry_t
if(swap_hasInit()) {
page_t *page = NULL;
ret = swap_in(set, addr, &page);
if (ret) {
trace("vmm_doPageFault: swap_in failed\n");
goto failed;
}
page_insert(set->pgdir, page, addr, perm);
swap_mapSwappable(set, addr, page, 1);
page->pra_vaddr = addr;
} else {
trace("vmm_doPageFault: swap not available(ptep = %x)", *ptep);
goto failed;
}
}
ret = 0;
failed:
return ret;
}
static void check_vma_set()
{
size_t nfree = nfpage();
// panic("no!");
vma_set_t *set = vma_set_new();
assert(set);
int step1 = 10, step2 = step1 * 10;
int i;
for (i = step1; i >= 1; i--) {
vma_t *vma = vma_new(i * 5, i * 5 + 2, 0);
assert(vma);
vma_set_insert(set, vma);
}
for (i = step1 + 1; i <= step2; i++) {
vma_t *vma = vma_new(i * 5, i * 5 + 2, 0);
assert(vma);
vma_set_insert(set, vma);
}
// trace("vma count: %d", set->mcount);
dllist_t *dll = dllist_next(&(set->mset));
for (i = 1; i <= step2; i++) {
assert(dll != &(set->mset));
vma_t *mmap = dll2vma(dll, link);
assert(mmap->start == i * 5 && mmap->end == i * 5 + 2);
dll = dllist_next(dll);
}
for (i = 5; i <= 5 * step2; i += 5) {
vma_t *vma1 = vma_set_find(set, i + 0);
assert(vma1 != NULL);
vma_t *vma2 = vma_set_find(set, i + 1);
assert(vma2 != NULL);
// trace("%d %p", i, vma_set_find(set, i + 2));
assert(!vma_set_find(set, i + 2));
assert(!vma_set_find(set, i + 3));
assert(!vma_set_find(set, i + 4));
assert(vma1->start == i && vma1->end == i + 2);
assert(vma2->start == i && vma2->end == i + 2);
}
for (i = 4; i >= 0; i--) {
vma_t *vma_below_5 = vma_set_find(set, i);
if (vma_below_5) {
trace("vma_below_5: i %x, start %x, end %x\n", i, vma_below_5->start, vma_below_5->end);
}
assert(!vma_below_5);
}
vma_set_free(set);
assert(nfree == nfpage());
trace("check success: vma set");
}
vma_set_t *c0re_check_vma_set = NULL;
static void check_pgfault()
{
trace("check begin: pgfault");
size_t nfree = nfpage();
vma_set_t *set = c0re_check_vma_set = vma_set_new();
pde_t *pgdir = set->pgdir = c0re_pgdir;
assert(c0re_check_vma_set);
assert(pgdir[0] == 0);
vma_t *vma = vma_new(0, PT_SIZE, VMA_FLAG_WRITE);
assert(vma);
vma_set_insert(set, vma);
uintptr_t addr = 0x0;
assert(vma_set_find(set, addr) == vma);
int i, sum = 0;
for (i = 0; i < 100; i++) {
*(char *)(addr + i) = i;
sum += i;
}
for (i = 0; i < 100; i++) {
sum -= *(char *)(addr + i);
}
assert(sum == 0);
page_remove(pgdir, ROUNDDOWN(addr, PAGE_SIZE));
pfree(pde2page(pgdir[0]));
pgdir[0] = 0;
set->pgdir = NULL;
vma_set_free(set);
c0re_check_vma_set = NULL;
assert(nfree == nfpage());
trace("check success: page fault");
}
// check_vmm - check correctness of vmm
static void check_vmm()
{
size_t nfree = nfpage();
check_vma_set();
check_pgfault();
assert(nfree == nfpage());
trace("check success: vmm");
}
|
rod-lin/c0re
|
kernel/mem/vmm.h
|
#ifndef _KERNEL_MEM_VMM_H_
#define _KERNEL_MEM_VMM_H_
/* virtual memory management */
#include "pub/com.h"
#include "pub/dllist.h"
#include "mem/mmu.h"
#include "lib/sync.h"
struct vma_set_t_tag;
typedef struct {
struct vma_set_t_tag *set;
uintptr_t start;
uintptr_t end;
uint32_t flags;
dllist_t link;
} vma_t;
typedef struct vma_set_t_tag {
dllist_t mset;
vma_t *mcache;
size_t mcount;
pde_t *pgdir;
void *swap_data;
} vma_set_t;
#define dll2vma(dll, member) \
to_struct((dll), vma_t, member)
#define VMA_FLAG_READ 0x00000001
#define VMA_FLAG_WRITE 0x00000002
#define VMA_FLAG_EXEC 0x00000004
vma_t *vma_new(uintptr_t start, uintptr_t end, uint32_t flags);
vma_set_t *vma_set_new();
void vma_set_free(vma_set_t *set);
void vma_set_insert(vma_set_t *set, vma_t *vma);
vma_t *vma_set_find(vma_set_t *set, uintptr_t addr);
void vmm_init();
int vmm_doPageFault(vma_set_t *set, uint32_t error, uintptr_t addr);
size_t vmm_getPageFaultCount();
#endif
|
rod-lin/c0re
|
kernel/mem/ffit.c
|
#include "pub/com.h"
#include "pub/x86.h"
#include "mem/ffit.h"
#include "mem/mmu.h"
#include "mem/pmm.h"
#include "lib/debug.h"
/* first-fit page allocator */
extern const page_allocator_t page_ffit_allocator;
/* free_area_t - maintains a doubly linked list to record free (unused) pages */
free_area_t free_area;
#define _FREED (free_area.freed)
#define _NFREE (free_area.nfree)
static void _ffit_dllist_append(page_t *append)
{
if (_FREED) {
append->next = _FREED->next;
append->prev = _FREED;
if (_FREED->next)
_FREED->next->prev = append;
_FREED->next = append;
} else {
_FREED = append;
append->next = append->prev = NULL;
}
}
static void _ffit_dllist_remove(page_t *page)
{
// assert remove is in the dllist
assert(page);
if (page == _FREED) {
_FREED = page->next;
} else {
if (page->next)
page->next->prev = page->prev;
if (page->prev)
page->prev->next = page->next;
}
}
// init an empty free area
static void ffit_init()
{
_FREED = NULL;
_NFREE = 0;
}
// add new mem block
static void ffit_addMem(page_t *base, size_t n)
{
assert(n);
page_t *p, *end = base + n;
for (p = base; p != end; p++) {
// assert(page_isReserved(p)); // ??
page_clearRef(p);
page_clearFlags(p);
p->nfree = 0;
}
base->nfree += n;
page_setFree(base);
_NFREE += n;
_ffit_dllist_append(base);
}
// alloc using first-fit algorithm
static page_t *ffit_alloc(size_t n) // n is the number of pages
{
assert(n);
// no enough page
if (n > _NFREE) return NULL;
page_t *found = NULL;
page_t *cur;
for (cur = _FREED; cur; cur = cur->next) {
if (cur->nfree >= n) { // first fit found
found = cur;
break;
}
}
if (found) {
_ffit_dllist_remove(found);
if (found->nfree > n) { // cut the block
page_t *rest = found + n;
page_setFree(rest); // TODO: possible bug in the original code
rest->nfree = found->nfree - n;
_ffit_dllist_append(rest);
}
_NFREE -= n;
found->nfree = n; // NOTE: a little alteration
page_resetFree(found);
}
// if (found)
// trace("*** allocating %p %d", found, found->nfree);
return found;
}
// page must be allocated
static void ffit_free(page_t *page /* , size_t n (n is loged in line 103) */)
{
// trace("*** freeing %p %d", page, page->nfree);
assert(page && page->nfree);
size_t n = page->nfree;
page_t *p, *end = page + n;
for (p = page; p != end; p++) {
assert(!page_isReserved(p) && !page_isFree(p));
page_clearFlags(p);
page_clearRef(p);
}
// page->nfree = n;
page_setFree(page);
// merge consecutive block
for (p = _FREED; p; p = p->next) {
if (page + page->nfree == p) { // page -- p
page->nfree += p->nfree;
page_resetFree(p);
p->nfree = 0;
_ffit_dllist_remove(p);
} else if (p + p->nfree == page) { // p -- page
p->nfree += page->nfree;
page_resetFree(page);
page->nfree = 0;
page = p;
_ffit_dllist_remove(p); // so that you don't re-add it below
}
}
_NFREE += n;
_ffit_dllist_append(page);
}
static size_t ffit_nfree()
{
return _NFREE;
}
static void check_basic()
{
page_t *p0, *p1, *p2;
p0 = p1 = p2 = NULL;
assert((p0 = palloc(1)) != NULL);
assert((p1 = palloc(1)) != NULL);
assert((p2 = palloc(1)) != NULL);
assert(p0 != p1 && p0 != p2 && p1 != p2);
assert(page_getRef(p0) == 0 && page_getRef(p1) == 0 && page_getRef(p2) == 0);
assert(page2pa(p0) < c0re_npage * PAGE_SIZE);
assert(page2pa(p1) < c0re_npage * PAGE_SIZE);
assert(page2pa(p2) < c0re_npage * PAGE_SIZE);
page_t *freed = _FREED;
_FREED = NULL;
unsigned int nfree = _NFREE;
_NFREE = 0;
assert(palloc(1) == NULL);
pfree(p0);
pfree(p1);
pfree(p2);
assert(_NFREE == 3);
assert((p0 = palloc(1)) != NULL);
assert((p1 = palloc(1)) != NULL);
assert((p2 = palloc(1)) != NULL);
assert(palloc(1) == NULL);
assert(_FREED == NULL);
pfree(p0);
assert(_NFREE);
page_t *p;
assert((p = palloc(1)) == p0);
assert(palloc(1) == NULL);
assert(_NFREE == 0);
_FREED = freed;
_NFREE = nfree;
pfree(p0);
pfree(p1);
pfree(p2);
}
static void ffit_check()
{
int count = 0, total = 0;
page_t *cur, *prev = NULL;
for (cur = _FREED; cur; cur = cur->next) {
assert(!prev || cur->prev == prev);
assert(page_isFree(cur));
count++;
total += cur->nfree;
prev = cur;
}
assert(total == nfpage());
check_basic();
}
const page_allocator_t page_ffit_allocator = {
.name = "ffit",
.init = ffit_init,
.addMem = ffit_addMem,
.alloc = ffit_alloc,
.free = ffit_free,
.nfree = ffit_nfree,
.check = ffit_check
};
|
rod-lin/c0re
|
pub/error.h
|
<reponame>rod-lin/c0re
#ifndef _PUB_ERROR_H_
#define _PUB_ERROR_H_
/* kernel error codes -- keep in sync with list in lib/printfmt.c */
#define E_UNSPECIFIED 1 // unspecified or unknown problem
#define E_BAD_PROC 2 // process doesn't exist or otherwise
#define E_INVAL 3 // invalid parameter
#define E_NO_MEM 4 // request failed due to memory shortage
#define E_NO_FREE_PROC 5 // attempt to create a new process beyond
#define E_FAULT 6 // memory fault
/* the maximum error allowed */
#define MAXERROR 6
#endif
|
rod-lin/c0re
|
pub/x86.h
|
<reponame>rod-lin/c0re
#ifndef _PUB_X86_H_
#define _PUB_X86_H_
#include "pub/com.h"
// TODO: why do we need this?? (required in printfmt)
#define do_div(n, base) ({ \
unsigned long __upper, __low, __high, __mod, __base; \
__base = (base); \
asm("" : "=a" (__low), "=d" (__high) : "A" (n)); \
__upper = __high; \
if (__high != 0) { \
__upper = __high % __base; \
__high = __high / __base; \
} \
asm("divl %2" : "=a" (__low), "=d" (__mod) \
: "rm" (__base), "0" (__low), "1" (__upper)); \
asm("" : "=A" (n) : "a" (__low), "d" (__high)); \
__mod; \
})
C0RE_INLINE uint8_t inb(uint16_t port);
C0RE_INLINE void insl(uint32_t port, void *addr, int cnt);
C0RE_INLINE void outb(uint16_t port, uint8_t data);
C0RE_INLINE void outw(uint16_t port, uint16_t data);
C0RE_INLINE void outsl(uint32_t port, const void *addr, int cnt);
C0RE_INLINE uint32_t read_ebp(void);
/* argument used for LGDT, LLDT(not used) and LIDT instructions. */
typedef struct {
uint16_t pd_lim; // Limit
uint32_t pd_base; // Base address
} C0RE_PACKED descloader_t;
C0RE_INLINE void lidt(descloader_t *pd);
C0RE_INLINE void sti(void);
C0RE_INLINE void cli(void);
C0RE_INLINE void ltr(uint16_t sel);
C0RE_INLINE uint32_t read_eflags();
C0RE_INLINE void write_eflags(uint32_t eflags);
C0RE_INLINE void lcr0(uintptr_t cr0);
C0RE_INLINE void lcr3(uintptr_t cr3);
C0RE_INLINE uintptr_t rcr0();
C0RE_INLINE uintptr_t rcr1();
C0RE_INLINE uintptr_t rcr2();
C0RE_INLINE uintptr_t rcr3();
C0RE_INLINE void invlpg(void *addr);
C0RE_INLINE void breakpoint(void);
C0RE_INLINE uint32_t read_dr(unsigned regnum);
C0RE_INLINE void write_dr(unsigned regnum, uint32_t value);
C0RE_INLINE void hlt();
/* INPUT byte */
C0RE_INLINE
uint8_t inb(uint16_t port)
{
uint8_t data;
asm volatile ("inb %1, %0" : "=a" (data) : "d" (port));
return data;
}
/* INPUT int string */
C0RE_INLINE
void insl(uint32_t port, void *addr, int cnt)
{
asm volatile (
"cld;"
"repne; insl;"
: "=D" (addr), "=c" (cnt)
: "d" (port), "0" (addr), "1" (cnt)
: "memory", "cc"
);
}
/* OUTPUT byte */
C0RE_INLINE
void outb(uint16_t port, uint8_t data)
{
asm volatile ("outb %0, %1" :: "a" (data), "d" (port));
}
/* OUTPUT short */
C0RE_INLINE
void outw(uint16_t port, uint16_t data)
{
asm volatile ("outw %0, %1" :: "a" (data), "d" (port));
}
C0RE_INLINE
void outsl(uint32_t port, const void *addr, int cnt)
{
asm volatile (
"cld;"
"repne; outsl;"
: "=S" (addr), "=c" (cnt)
: "d" (port), "0" (addr), "1" (cnt)
: "memory", "cc"
);
}
C0RE_INLINE
uint32_t read_ebp()
{
uint32_t ebp;
asm volatile ("movl %%ebp, %0" : "=r" (ebp));
return ebp;
}
/* load IDT */
C0RE_INLINE
void lidt(descloader_t *pd)
{
asm volatile ("lidt (%0)" :: "r" (pd));
}
C0RE_INLINE
void sti(void)
{
asm volatile ("sti");
}
C0RE_INLINE
void cli(void)
{
asm volatile ("cli");
}
C0RE_INLINE
void ltr(uint16_t sel)
{
asm volatile ("ltr %0" :: "r" (sel));
}
C0RE_INLINE
void breakpoint()
{
asm volatile ("int $3");
}
C0RE_INLINE
uint32_t read_dr(unsigned regnum)
{
uint32_t value = 0;
switch (regnum) {
case 0: asm volatile ("movl %%db0, %0" : "=r" (value)); break;
case 1: asm volatile ("movl %%db1, %0" : "=r" (value)); break;
case 2: asm volatile ("movl %%db2, %0" : "=r" (value)); break;
case 3: asm volatile ("movl %%db3, %0" : "=r" (value)); break;
case 6: asm volatile ("movl %%db6, %0" : "=r" (value)); break;
case 7: asm volatile ("movl %%db7, %0" : "=r" (value)); break;
}
return value;
}
C0RE_INLINE
void write_dr(unsigned regnum, uint32_t value)
{
switch (regnum) {
case 0: asm volatile ("movl %0, %%db0" :: "r" (value)); break;
case 1: asm volatile ("movl %0, %%db1" :: "r" (value)); break;
case 2: asm volatile ("movl %0, %%db2" :: "r" (value)); break;
case 3: asm volatile ("movl %0, %%db3" :: "r" (value)); break;
case 6: asm volatile ("movl %0, %%db6" :: "r" (value)); break;
case 7: asm volatile ("movl %0, %%db7" :: "r" (value)); break;
}
}
C0RE_INLINE
uint32_t read_eflags()
{
uint32_t eflags;
asm volatile ("pushfl; popl %0" : "=r" (eflags));
return eflags;
}
C0RE_INLINE
void write_eflags(uint32_t eflags)
{
asm volatile ("pushl %0; popfl" :: "r" (eflags));
}
C0RE_INLINE
void lcr0(uintptr_t cr0)
{
asm volatile ("mov %0, %%cr0" :: "r" (cr0) : "memory");
}
C0RE_INLINE
void lcr3(uintptr_t cr3)
{
asm volatile ("mov %0, %%cr3" :: "r" (cr3) : "memory");
}
C0RE_INLINE
uintptr_t rcr0()
{
uintptr_t cr0;
asm volatile ("mov %%cr0, %0" : "=r" (cr0) :: "memory");
return cr0;
}
C0RE_INLINE
uintptr_t rcr1()
{
uintptr_t cr1;
asm volatile ("mov %%cr1, %0" : "=r" (cr1) :: "memory");
return cr1;
}
C0RE_INLINE
uintptr_t rcr2()
{
uintptr_t cr2;
asm volatile ("mov %%cr2, %0" : "=r" (cr2) :: "memory");
return cr2;
}
C0RE_INLINE
uintptr_t rcr3()
{
uintptr_t cr3;
asm volatile ("mov %%cr3, %0" : "=r" (cr3) :: "memory");
return cr3;
}
C0RE_INLINE
void invlpg(void *addr)
{
asm volatile ("invlpg (%0)" :: "r" (addr) : "memory");
}
C0RE_INLINE
void hlt()
{
asm volatile ("hlt");
}
C0RE_INLINE int __strcmp(const char *s1, const char *s2);
C0RE_INLINE char *__strcpy(char *dst, const char *src);
C0RE_INLINE void *__memset(void *s, char c, size_t n);
C0RE_INLINE void *__memmove(void *dst, const void *src, size_t n);
C0RE_INLINE void *__memcpy(void *dst, const void *src, size_t n);
#ifndef __HAVE_ARCH_STRCMP
#define __HAVE_ARCH_STRCMP
C0RE_INLINE
int __strcmp(const char *s1, const char *s2)
{
int d0, d1, ret;
asm volatile (
"1: lodsb;"
"scasb;"
"jne 2f;"
"testb %%al, %%al;"
"jne 1b;"
"xorl %%eax, %%eax;"
"jmp 3f;"
"2: sbbl %%eax, %%eax;"
"orb $1, %%al;"
"3:"
: "=a" (ret), "=&S" (d0), "=&D" (d1)
: "1" (s1), "2" (s2)
: "memory"
);
return ret;
}
#endif /* __HAVE_ARCH_STRCMP */
#ifndef __HAVE_ARCH_STRCPY
#define __HAVE_ARCH_STRCPY
C0RE_INLINE
char *__strcpy(char *dst, const char *src)
{
int d0, d1, d2;
asm volatile (
"1: lodsb;"
"stosb;"
"testb %%al, %%al;"
"jne 1b;"
: "=&S" (d0), "=&D" (d1), "=&a" (d2)
: "0" (src), "1" (dst) : "memory"
);
return dst;
}
#endif /* __HAVE_ARCH_STRCPY */
#ifndef __HAVE_ARCH_MEMSET
#define __HAVE_ARCH_MEMSET
C0RE_INLINE
void *__memset(void *s, char c, size_t n)
{
int d0, d1;
asm volatile (
"rep; stosb;"
: "=&c" (d0), "=&D" (d1)
: "0" (n), "a" (c), "1" (s)
: "memory"
);
return s;
}
#endif /* __HAVE_ARCH_MEMSET */
#ifndef __HAVE_ARCH_MEMMOVE
#define __HAVE_ARCH_MEMMOVE
C0RE_INLINE
void *__memmove(void *dst, const void *src, size_t n)
{
if (dst < src) {
return __memcpy(dst, src, n);
}
int d0, d1, d2;
asm volatile (
"std;"
"rep; movsb;"
"cld;"
: "=&c" (d0), "=&S" (d1), "=&D" (d2)
: "0" (n), "1" (n - 1 + src), "2" (n - 1 + dst)
: "memory"
);
return dst;
}
#endif /* __HAVE_ARCH_MEMMOVE */
#ifndef __HAVE_ARCH_MEMCPY
#define __HAVE_ARCH_MEMCPY
C0RE_INLINE
void *__memcpy(void *dst, const void *src, size_t n)
{
int d0, d1, d2;
asm volatile (
"rep; movsl;"
"movl %4, %%ecx;"
"andl $3, %%ecx;"
"jz 1f;"
"rep; movsb;"
"1:"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" (n / 4), "g" (n), "1" (dst), "2" (src)
: "memory"
);
return dst;
}
#endif /* __HAVE_ARCH_MEMCPY */
#endif
|
rod-lin/c0re
|
pub/atomic.h
|
<gh_stars>1-10
#ifndef _PUB_ATOMIC_H_
#define _PUB_ATOMIC_H_
#include "pub/com.h"
/* Atomic operations that C can't guarantee us. Useful for resource counting etc.. */
/**
* NOTE: bt*l instructions respectively do an action on the nth bit of *(int *)addr
* btsl: set the bit
* btrl: reset the bit
* btcl: change/flip the bit
* btl: read the bit
**/
C0RE_INLINE void btsl(int n, volatile void *addr);
C0RE_INLINE void btrl(int n, volatile void *addr);
C0RE_INLINE void btcl(int n, volatile void *addr);
C0RE_INLINE bool btl(int n, volatile void *addr);
/**
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
**/
C0RE_INLINE
void btsl(int n, volatile void *addr)
{
asm volatile ("btsl %1, %0" :"=m" (*(volatile long *)addr) : "Ir" (n));
}
/**
* clear_bit - Atomically clears a bit in memory
* @nr: the bit to clear
* @addr: the address to start counting from
**/
C0RE_INLINE
void btrl(int n, volatile void *addr)
{
asm volatile ("btrl %1, %0" :"=m" (*(volatile long *)addr) : "Ir" (n));
}
/**
* change_bit - Atomically toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
**/
C0RE_INLINE
void btcl(int n, volatile void *addr)
{
asm volatile ("btcl %1, %0" :"=m" (*(volatile long *)addr) : "Ir" (n));
}
/**
* test_bit - Determine whether a bit is set
* @nr: the bit to test
* @addr: the address to count from
**/
C0RE_INLINE
bool btl(int n, volatile void *addr)
{
int oldbit;
asm volatile ("btl %2, %1; sbbl %0, %0" : "=r" (oldbit) : "m" (*(volatile long *)addr), "Ir" (n));
return oldbit != 0;
}
#endif
|
rod-lin/c0re
|
kernel/fs/swapfs.c
|
#include "mem/mmu.h"
#include "driver/ide.h"
#include "fs/fs.h"
#include "fs/swapfs.h"
#include "mem/swap.h"
size_t swapfs_init()
{
assert((PAGE_SIZE % FS_SECTOR_SIZE) == 0);
if (!ide_device_valid(FS_SWAP_DEV_NO)) {
trace("swap fs not available");
return 0;
}
// NOTE: how many sectors per page
return ide_device_size(FS_SWAP_DEV_NO) / (PAGE_SIZE / FS_SECTOR_SIZE);
// NOTE: maximum number of pages in the device
}
int swapfs_read(swap_entry_t entry, page_t *page)
{
return ide_read_secs(FS_SWAP_DEV_NO, swap_getOffset(entry) * FS_PAGE_NSECTOR,
page2kva(page), FS_PAGE_NSECTOR);
}
int swapfs_write(swap_entry_t entry, page_t *page)
{
return ide_write_secs(FS_SWAP_DEV_NO, swap_getOffset(entry) * FS_PAGE_NSECTOR,
page2kva(page), FS_PAGE_NSECTOR);
}
|
rod-lin/c0re
|
pub/string.h
|
#ifndef _PUB_STRING_H_
#define _PUB_STRING_H_
#include "pub/com.h"
size_t strlen(const char *s);
size_t strnlen(const char *s, size_t len);
char *strcpy(char *dst, const char *src);
char *strncpy(char *dst, const char *src, size_t len);
int strcmp(const char *s1, const char *s2);
int strncmp(const char *s1, const char *s2, size_t n);
char *strchr(const char *s, char c);
char *strfind(const char *s, char c);
long strtol(const char *s, char **endptr, int base);
void *memset(void *s, char c, size_t n);
void *memmove(void *dst, const void *src, size_t n);
void *memcpy(void *dst, const void *src, size_t n);
int memcmp(const void *v1, const void *v2, size_t n);
#endif /* _LIB_STRING_H_ */
|
rod-lin/c0re
|
kernel/mem/swap.h
|
#ifndef _KERNEL_MEM_SWAP_H_
#define _KERNEL_MEM_SWAP_H_
#include "pub/com.h"
#include "mem/mmu.h"
#include "mem/pmm.h"
#include "mem/vmm.h"
#define SWAP_MAX_RETRY_TIME 16
typedef pte_t swap_entry_t;
typedef struct {
const char *name;
int (*init)();
int (*initVMASet)(vma_set_t *set);
int (*tick)(vma_set_t *set);
int (*mapSwappable)(vma_set_t *set, uintptr_t addr, page_t *page, int swap_in);
int (*setUnswappable)(vma_set_t *set, uintptr_t addr);
int (*swapOut)(vma_set_t *set, page_t **result, int in_tick);
int (*check)();
} swap_manager_t;
#define SWAP_MAX_OFFSET_LIMIT (1 << 24)
size_t swap_getOffset(swap_entry_t entry);
C0RE_INLINE
bool swap_hasInit()
{
extern bool swap_init_ok;
return swap_init_ok;
}
C0RE_INLINE
void swap_disable()
{
extern bool swap_init_ok;
swap_init_ok = false;
}
C0RE_INLINE
void swap_enable()
{
extern bool swap_init_ok;
swap_init_ok = true;
}
/* some swap interfaces(a redirect from swap manager) */
int swap_init();
int swap_initVMASet(vma_set_t *set);
int swap_tick(vma_set_t *set);
int swap_mapSwappable(vma_set_t *set, uintptr_t addr, page_t *page, int swap_in);
int swap_setUnswappable(vma_set_t *set, uintptr_t addr);
int swap_out(vma_set_t *set, int n, int in_tick);
int swap_in(vma_set_t *set, uintptr_t addr, page_t **result);
#endif
|
rod-lin/c0re
|
kernel/lib/debug.h
|
<filename>kernel/lib/debug.h
#ifndef _KERNEL_LIB_DEBUG_H_
#define _KERNEL_LIB_DEBUG_H_
#include "driver/console.h"
#include "lib/io.h"
#define trace(...) \
kprintf(__VA_ARGS__); \
kputc('\n')
void _panic(char *file, int line, const char *fmt, ...);
#define panic(...) _panic(__FILE__, __LINE__, __VA_ARGS__)
#define assert(cond) \
if (!(cond)) { \
panic("assertion error: %s", #cond); \
}
#define DBG_TAB " "
#endif
|
rod-lin/c0re
|
kernel/init/init.c
|
<reponame>rod-lin/c0re<filename>kernel/init/init.c
#include "pub/string.h"
#include "lib/debug.h"
#include "mem/pmm.h"
#include "mem/vmm.h"
#include "mem/swap.h"
#include "intr/trap.h"
#include "driver/pic.h"
#include "driver/clock.h"
#include "driver/ide.h"
void c0re_init() {
// extern char bss_begin[], bss_end[];
// memset(bss_begin, 0, bss_end - bss_begin);
// initialize bss -- in case someone forget to init?
cons_init();
trace("c0re starting");
swap_disable();
pmm_init();
pic_init();
idt_init();
vmm_init();
ide_init();
swap_init();
clock_init();
intr_enable();
while (1) ;
}
|
rod-lin/c0re
|
kernel/driver/console.h
|
<filename>kernel/driver/console.h
#ifndef _KERNEL_DRIVER_CONSOLE_H_
#define _KERNEL_DRIVER_CONSOLE_H_
void cons_init();
void cons_putc(int c);
int cons_getc();
#endif
|
rod-lin/c0re
|
boot/bootasm.h
|
<reponame>rod-lin/c0re
#ifndef _BOOT_BOOTASM_H_
#define _BOOT_BOOTASM_H_
#include "kernel/mem/mmu.h"
#endif
|
rod-lin/c0re
|
pub/com.h
|
#ifndef _PUB_COM_H_
#define _PUB_COM_H_
#ifdef __GNUC__
#if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1))
#define C0RE_INLINE static __inline__ __attribute__((always_inline))
#else
#define C0RE_INLINE static __inline__
#endif
#elif defined(_MSC_VER)
#define C0RE_INLINE static __forceinline
#elif (defined(__BORLANDC__) || defined(__WATCOMC__))
#define C0RE_INLINE static __inline
#else
#define C0RE_INLINE static inline
#endif
#define C0RE_PACKED __attribute__ ((packed))
#define C0RE_ARRLEN(arr) (sizeof(arr) / sizeof(*(arr)))
#ifndef NULL
#define NULL ((void *)0)
#endif
#define C0RE_NOINLINE __attribute__((noinline))
#define C0RE_NORETURN __attribute__((noreturn))
typedef int bool;
#define false ((bool)0)
#define true ((bool)1)
typedef char int8_t;
typedef unsigned char uint8_t;
typedef short int16_t;
typedef unsigned short uint16_t;
typedef int int32_t;
typedef unsigned int uint32_t;
typedef long long int64_t;
typedef unsigned long long uint64_t;
/* *
* pointers and addresses are 32 bits long.
* We use pointer types to represent addresses,
* uintptr_t to represent the numerical values of addresses.
* */
typedef int32_t intptr_t;
typedef uint32_t uintptr_t;
/* size_t is used for memory object sizes */
typedef uintptr_t size_t;
/* used for page numbers */
typedef size_t ppn_t;
/* *
* rounding operations (efficient when n is a power of 2)
* round down to the nearest multiple of n
* */
#define ROUNDDOWN(a, n) ({ \
size_t __a = (size_t)(a); \
(typeof(a))(__a - __a % (n)); \
})
/* round up to the nearest multiple of n */
#define ROUNDUP(a, n) ({ \
size_t __n = (size_t)(n); \
(typeof(a))(ROUNDDOWN((size_t)(a) + __n - 1, __n)); \
})
/* return the offset of 'member' relative to the beginning of a struct type */
#define offsetof(type, member) \
((size_t)(&((type *)0)->member))
/* *
* to_struct - get the struct from a ptr
* @ptr: a struct pointer of member
* @type: the type of the struct this is embedded in
* @member: the name of the member within the struct
* */
#define to_struct(ptr, type, member) \
((type *)((char *)(ptr) - offsetof(type, member)))
#endif
|
rod-lin/c0re
|
kernel/mem/swap.c
|
#include "pub/com.h"
#include "lib/debug.h"
#include "fs/swapfs.h"
#include "mem/swap.h"
#include "mem/smfifo.h"
#include "mem/pmm.h"
#include "mem/vmm.h"
static size_t max_swap_offset;
size_t swap_getOffset(swap_entry_t entry)
{
size_t offset = entry >> 8;
if (!offset || offset >= max_swap_offset) {
panic("invalid swap_entry_t = %08x", entry);
}
return offset;
}
// the valid vaddr for check is between 0~CHECK_VALID_VADDR-1
#define CHECK_VALID_VIR_PAGE_NUM 5
#define BEING_CHECK_VALID_VADDR 0X1000
#define CHECK_VALID_VADDR ((CHECK_VALID_VIR_PAGE_NUM + 1) * 0x1000)
// the max number of valid physical pages for check
#define CHECK_VALID_PHY_PAGE_NUM 4
// the max access seq number
#define MAX_SEQ_NO 10
static swap_manager_t *swap_man = &swap_manager_fifo;
bool swap_init_ok = 0;
unsigned int swap_page[CHECK_VALID_VIR_PAGE_NUM];
unsigned int swap_in_seq_no[MAX_SEQ_NO],
swap_out_seq_no[MAX_SEQ_NO];
static void check_swap();
int swap_init()
{
max_swap_offset = swapfs_init();
if (max_swap_offset == 0) {
trace("swap diabled");
swap_disable();
return 0;
}
if (!(1024 <= max_swap_offset && max_swap_offset < SWAP_MAX_OFFSET_LIMIT)) {
panic("bad max_swap_offset %08x", max_swap_offset);
}
// swap_man = &swap_manager_fifo;
int r = swap_man->init();
if (r == 0) {
swap_enable();
trace("swap: init manager = %s", swap_man->name);
check_swap();
}
return r;
}
int swap_initVMASet(vma_set_t *set)
{
// panic("wwww %p", swap_man);
return swap_man->initVMASet(set);
}
int swap_tick(vma_set_t *set)
{
return swap_man->tick(set);
}
int swap_mapSwappable(vma_set_t *set, uintptr_t addr, page_t *page, int swap_in)
{
return swap_man->mapSwappable(set, addr, page, swap_in);
}
int swap_setUnswappable(vma_set_t *set, uintptr_t addr)
{
return swap_man->setUnswappable(set, addr);
}
volatile unsigned int swap_out_num = 0;
int swap_out(vma_set_t *set, int n, int in_tick)
{
int i;
for (i = 0; i != n; i++) {
uintptr_t v;
//struct Page **ptr_page=NULL;
page_t *page;
trace("swap: call swap_out_victim, i %d", i);
int r = swap_man->swapOut(set, &page, in_tick);
if (r) {
trace("swap: call swap_out_victim failed, i %d", i);
break;
}
assert(!page_isReserved(page));
trace("swap: choose victim page 0x%08x", page);
v = page->pra_vaddr;
pte_t *ptep = get_pte(set->pgdir, v, 0);
assert(*ptep & PTE_FLAG_P); // table present
// TODO: what does swap entry mean
if (swapfs_write((page->pra_vaddr / PAGE_SIZE + 1) << 8, page)) {
trace("swap: failed to save victim");
swap_man->mapSwappable(set, v, page, 0); // swap back???
continue;
} else {
trace("swap: i %d, store page in vaddr 0x%x to disk swap entry %d",
i, v, page->pra_vaddr / PAGE_SIZE + 1);
*ptep = (page->pra_vaddr / PAGE_SIZE + 1) << 8;
pfree(page);
}
tlb_invalidate(set->pgdir, v);
}
return i;
}
int swap_in(vma_set_t *set, uintptr_t addr, page_t **presult)
{
page_t *result = palloc_s(1);
pte_t *ptep = get_pte(set->pgdir, addr, 0);
// cprintf("SWAP: load ptep %x swap entry %d to vaddr 0x%08x, page %x, No %d\n", ptep, (*ptep)>>8, addr, result, (result-pages));
int r;
if ((r = swapfs_read((*ptep), result))) {
trace("swap: failed to swap in"); // TODO: ???
}
trace("swap: load disk swap entry %d with swap_page in vadr 0x%x", (*ptep) >> 8, addr);
*presult = result;
return 0;
}
C0RE_INLINE
void check_content_set()
{
size_t init = vmm_getPageFaultCount();
*(unsigned char *)0x1000 = 0x0a;
assert(vmm_getPageFaultCount() - init == 1);
*(unsigned char *)0x1010 = 0x0a;
assert(vmm_getPageFaultCount() - init == 1);
*(unsigned char *)0x2000 = 0x0b;
assert(vmm_getPageFaultCount() - init == 2);
*(unsigned char *)0x2010 = 0x0b;
assert(vmm_getPageFaultCount() - init == 2);
*(unsigned char *)0x3000 = 0x0c;
assert(vmm_getPageFaultCount() - init == 3);
*(unsigned char *)0x3010 = 0x0c;
assert(vmm_getPageFaultCount() - init == 3);
*(unsigned char *)0x4000 = 0x0d;
assert(vmm_getPageFaultCount() - init == 4);
*(unsigned char *)0x4010 = 0x0d;
assert(vmm_getPageFaultCount() - init == 4);
}
C0RE_INLINE
int check_content_access()
{
return swap_man->check();
}
static page_t *check_rp[CHECK_VALID_PHY_PAGE_NUM];
static pte_t *check_ptep[CHECK_VALID_PHY_PAGE_NUM];
// static unsigned int check_swap_addr[CHECK_VALID_VIR_PAGE_NUM];
extern free_area_t free_area;
#define _FREED (free_area.freed)
#define _NFREE (free_area.nfree)
static void check_swap()
{
//backup mem env
int ret, count = 0, total = 0, i;
page_t *cur;
for (cur = _FREED; cur; cur = cur->next) {
assert(page_isFree(cur));
count++;
total += cur->nfree;
}
assert(total == nfpage());
trace("check begin: swap, count %d, total %d", count, total);
// now we set the phy pages env
extern vma_set_t *c0re_check_vma_set;
vma_set_t *set = vma_set_new();
assert(set);
c0re_check_vma_set = set;
pde_t *pgdir = set->pgdir = c0re_pgdir;
assert(pgdir[0] == 0);
vma_t*vma = vma_new(BEING_CHECK_VALID_VADDR, CHECK_VALID_VADDR, VMA_FLAG_WRITE | VMA_FLAG_READ);
assert(vma);
vma_set_insert(set, vma);
//setup the temp Page Table vaddr 0~4MB
kprintf("setting up page table for vaddr 0X1000 ... ");
pte_t *temp_ptep = NULL;
temp_ptep = get_pte(set->pgdir, BEING_CHECK_VALID_VADDR, 1);
assert(temp_ptep!= NULL);
trace("finished");
for (i = 0; i < CHECK_VALID_PHY_PAGE_NUM; i++) {
check_rp[i] = palloc(1);
assert(check_rp[i]);
assert(!page_isFree(check_rp[i]));
}
page_t *freed = _FREED;
unsigned int nfree = _NFREE;
_FREED = NULL;
_NFREE = 0;
for (i = 0; i < CHECK_VALID_PHY_PAGE_NUM; i++) {
pfree(check_rp[i]);
}
assert(_NFREE == CHECK_VALID_PHY_PAGE_NUM);
trace("setting up init env ... ");
// setup initial vir_page<->phy_page environment for page relpacement algorithm
// size_t init_pgfault_num = vmm_getPageFaultCount();
check_content_set();
assert(_NFREE == 0);
for(i = 0; i < MAX_SEQ_NO; i++)
swap_out_seq_no[i] = swap_in_seq_no[i] = -1;
for (i = 0; i < CHECK_VALID_PHY_PAGE_NUM; i++) {
check_ptep[i] = 0;
check_ptep[i] = get_pte(pgdir, (i + 1) * 0x1000, 0);
assert(check_ptep[i] != NULL);
assert(pte2page(*check_ptep[i]) == check_rp[i]);
assert(*check_ptep[i] & PTE_FLAG_P);
}
trace("finished");
// now access the virt pages to test page relpacement algorithm
ret = check_content_access();
assert(ret == 0);
//restore kernel mem env
for (i = 0; i < CHECK_VALID_PHY_PAGE_NUM; i++) {
pfree(check_rp[i]);
}
vma_set_free(set);
_NFREE = nfree;
_FREED = freed;
for (cur = _FREED; cur; cur = cur->next) {
count--;
total -= cur->nfree;
}
trace("check success: swap, count %d, total %d", count, total);
}
|
rod-lin/c0re
|
kernel/lib/io.h
|
#ifndef _KERNEL_LIB_IO_H_
#define _KERNEL_LIB_IO_H_
#include "pub/stdarg.h"
#include "driver/console.h"
#define kputc cons_putc
int kputns(char *str, int max);
int kputs(char *str);
int vkprintf(const char *fmt, va_list ap);
int kprintf(const char *fmt, ...);
#endif
|
rod-lin/c0re
|
kernel/mem/pmm.h
|
<reponame>rod-lin/c0re<gh_stars>1-10
#ifndef _KERNEL_MEM_PMM_H_
#define _KERNEL_MEM_PMM_H_
#include "pub/com.h"
#include "lib/debug.h"
#include "mem/mmu.h"
typedef struct {
const char *name;
void (*init)(void); // init
void (*addMem)(page_t *, size_t); // add new mem block
page_t *(*alloc)(size_t);
void (*free)(page_t *);
size_t (*nfree)(); // total free page count
void (*check)();
} page_allocator_t;
typedef struct {
page_t *freed; // free page header
unsigned int nfree; // # of free pages in this free list(!!NOTE NOT # of free blocks)
} free_area_t;
extern page_t *c0re_pages;
extern size_t c0re_npage;
extern pde_t *c0re_pgdir;
extern uintptr_t c0re_pgdir_pa;
/**
* PADDR - takes a kernel virtual address (an address that points above KERNBASE),
* where the machine's maximum 256MB of physical memory is mapped and returns the
* corresponding physical address. it panicks if you pass it a non-kernel virtual address.
**/
#define PADDR(kva) ({ \
uintptr_t __m_kva = (uintptr_t)(kva); \
if (__m_kva < KERNEL_BASE) { \
panic("PADDR called with invalid kva %08lx", __m_kva); \
} \
__m_kva - KERNEL_BASE; \
})
/**
* KADDR - takes a physical address and returns the corresponding kernel virtual
* address. it panicks if you pass an invalid physical address.
**/
#define KADDR(pa) ({ \
uintptr_t __m_pa = (pa); \
size_t __m_ppn = PAGE_NUMBER(__m_pa); \
if (__m_ppn >= c0re_npage) { \
panic("KADDR called with invalid pa %08lx", __m_pa); \
} \
(void *)(__m_pa + KERNEL_BASE); \
})
C0RE_INLINE
page_number_t page2ppn(page_t *page)
{
return page - c0re_pages;
}
// pa for physical address
C0RE_INLINE
uintptr_t page2pa(page_t *page)
{
return page2ppn(page) << PAGE_SHIFT;
}
C0RE_INLINE
page_t *pa2page(uintptr_t pa)
{
if (PAGE_NUMBER(pa) >= c0re_npage) {
panic("pa2page called with invalid physical address");
}
return &c0re_pages[PAGE_NUMBER(pa)];
}
C0RE_INLINE
void *page2kva(page_t *page)
{
return KADDR(page2pa(page));
}
C0RE_INLINE
page_t *kva2page(void *kva)
{
return pa2page(PADDR(kva));
}
C0RE_INLINE
page_t *pte2page(pte_t pte)
{
if (!(pte & PTE_FLAG_P)) {
panic("pte2page called with invalid page table entry");
}
return pa2page(PTE_ADDR(pte));
}
C0RE_INLINE
page_t *pde2page(pde_t pde)
{
return pa2page(PDE_ADDR(pde));
}
void pmm_init();
pte_t *get_pte(pde_t *pgdir, uintptr_t la, bool create);
page_t *palloc(size_t n);
void pfree(page_t *base);
size_t nfpage(); // # of free pages
C0RE_INLINE
page_t *palloc_s(size_t n)
{
page_t *npg = palloc(n);
if (!npg) {
panic("unable to alloc page");
}
return npg;
}
void *kmalloc(size_t n);
void kfree(void *ptr, size_t n);
page_t *pgdir_palloc(pde_t *pgdir, uintptr_t la, uint32_t perm);
int page_insert(pde_t *pgdir, page_t *page, uintptr_t la, uint32_t perm);
void page_remove(pde_t *pgdir, uintptr_t la);
void tlb_invalidate(pde_t *pgdir, uintptr_t la);
#endif
|
rod-lin/c0re
|
tool/sign.c
|
// format bootloader sector
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <sys/stat.h>
#define err(...) \
fprintf(stderr, "sign error: "); \
fprintf(stderr, __VA_ARGS__); \
fprintf(stderr, "\n");
#define trace(...) \
fprintf(stderr, "sign: "); \
fprintf(stderr, __VA_ARGS__); \
fprintf(stderr, "\n");
int main(int argc, char **argv)
{
struct stat st;
if (argc != 3) {
err("usage: %s <input filename> <output filename>", argv[0]);
return -1;
}
if (stat(argv[1], &st) != 0) {
err("error opening file '%s': %s", argv[1], strerror(errno));
return -1;
}
trace("'%s' size: %ld bytes", argv[1], (long)st.st_size);
if (st.st_size > 510) {
err("size %ld greater than expected size 510", (long)st.st_size);
return -1;
}
char buf[512];
memset(buf, 0, sizeof(buf));
FILE *ifp = fopen(argv[1], "rb");
if (!ifp) {
err("unable to open file %s", argv[1]);
return -1;
}
int size = fread(buf, 1, st.st_size, ifp);
if (size != st.st_size) {
err("read '%s' error, size is %d", argv[1], size);
return -1;
}
fclose(ifp);
buf[510] = 0x55;
buf[511] = 0xAA;
FILE *ofp = fopen(argv[2], "wb+");
size = fwrite(buf, 1, 512, ofp);
if (size != 512) {
err("write '%s' error, size is %d", argv[2], size);
return -1;
}
fclose(ofp);
trace("build 512 bytes boot sector: '%s' success!", argv[2]);
return 0;
}
|
rod-lin/c0re
|
kernel/mem/smfifo.c
|
#include "pub/com.h"
#include "pub/dllist.h"
#include "lib/debug.h"
#include "mem/smfifo.h"
static dllist_t fifo_head;
static int smfifo_init()
{
return 0;
}
static int smfifo_initVMASet(vma_set_t *set)
{
dllist_init(&fifo_head);
set->swap_data = &fifo_head;
trace("smfifo: init fifo_head %p", (void *)&fifo_head);
return 0;
}
static int smfifo_mapSwappable(vma_set_t *set, uintptr_t addr,
page_t *page, int swap_in)
{
dllist_t *head = (dllist_t *)set->swap_data;
dllist_t *entry = &(page->pra_link);
assert(head && entry);
dllist_add(head, entry);
return 0;
}
static int smfifo_setUnswappable(vma_set_t *set, uintptr_t addr)
{
return 0;
}
static int smfifo_swapOut(vma_set_t *set, page_t **result, int in_tick)
{
dllist_t *head = (dllist_t *)set->swap_data;
assert(head);
assert(in_tick == 0);
/* Select the victim */
/*LAB3 EXERCISE 2: YOUR CODE*/
//(1) unlink the earliest arrival page in front of pra_list_head qeueue
//(2) set the addr of addr of this page to ptr_page
/* Select the tail */
dllist_t *dll = head->prev;
assert(head != dll);
page_t *p = dll2page(dll, pra_link);
dllist_del(dll);
assert(p);
*result = p;
return 0;
}
static int smfifo_tick(vma_set_t *set)
{
return 0;
}
static int smfifo_check()
{
size_t init = vmm_getPageFaultCount();
*(unsigned char *)0x3000 = 0x0c;
assert(vmm_getPageFaultCount() - init == 0);
*(unsigned char *)0x1000 = 0x0a;
assert(vmm_getPageFaultCount() - init == 0);
*(unsigned char *)0x4000 = 0x0d;
assert(vmm_getPageFaultCount() - init == 0);
*(unsigned char *)0x2000 = 0x0b;
assert(vmm_getPageFaultCount() - init == 0);
*(unsigned char *)0x5000 = 0x0e;
assert(vmm_getPageFaultCount() - init == 1);
*(unsigned char *)0x2000 = 0x0b;
assert(vmm_getPageFaultCount() - init == 1);
*(unsigned char *)0x1000 = 0x0a;
assert(vmm_getPageFaultCount() - init == 2);
*(unsigned char *)0x2000 = 0x0b;
assert(vmm_getPageFaultCount() - init == 3);
*(unsigned char *)0x3000 = 0x0c;
assert(vmm_getPageFaultCount() - init == 4);
*(unsigned char *)0x4000 = 0x0d;
assert(vmm_getPageFaultCount() - init == 5);
*(unsigned char *)0x5000 = 0x0e;
assert(vmm_getPageFaultCount() - init == 6);
assert(*(unsigned char *)0x1000 == 0x0a);
*(unsigned char *)0x1000 = 0x0a;
assert(vmm_getPageFaultCount() - init == 7);
return 0;
}
swap_manager_t swap_manager_fifo = {
.name = "fifo swap manager",
.init = &smfifo_init,
.initVMASet = &smfifo_initVMASet,
.tick = &smfifo_tick,
.mapSwappable = &smfifo_mapSwappable,
.setUnswappable = &smfifo_setUnswappable,
.swapOut = &smfifo_swapOut,
.check = &smfifo_check
};
|
rod-lin/c0re
|
kernel/lib/sync.h
|
#ifndef _KERNEL_LIB_SYNC_H_
#define _KERNEL_LIB_SYNC_H_
#include "pub/com.h"
#include "pub/x86.h"
#include "intr/trap.h"
#include "mem/mmu.h"
C0RE_INLINE bool _intr_save()
{
if (read_eflags() & EFLAG_IF) {
intr_disable();
return 1;
}
return 0;
}
C0RE_INLINE void _intr_restore(bool flag)
{
if (flag) {
intr_enable();
}
}
#define intr_save(x) ({ x = _intr_save(); })
#define intr_restore(x) _intr_restore(x)
#define no_intr_block(block) ({ \
if (_intr_save()) { \
block; \
intr_restore(true); \
} else { \
block; \
} \
})
#endif
|
rod-lin/c0re
|
kernel/driver/pic.h
|
#ifndef _KERNEL_DRIVER_PIC_H_
#define _KERNEL_DRIVER_PIC_H_
void pic_init();
void pic_enable(unsigned int irq);
#endif
|
rod-lin/c0re
|
pub/printfmt.h
|
#ifndef _PUB_PRINTFMT_H_
#define _PUB_PRINTFMT_H_
#include "pub/com.h"
#include "pub/stdarg.h"
/* print format */
typedef void (*putc_fn_t)(int, void*);
void printfmt(putc_fn_t putch, void *putdat, const char *fmt, ...);
void vprintfmt(putc_fn_t putch, void *putdat, const char *fmt, va_list ap);
int snprintf(char *str, size_t size, const char *fmt, ...);
int vsnprintf(char *str, size_t size, const char *fmt, va_list ap);
#endif
|
rod-lin/c0re
|
kernel/driver/clock.h
|
#ifndef _KERNEL_DRIVER_CLOCK_H_
#define _KERNEL_DRIVER_CLOCK_H_
#include "pub/com.h"
#define CLOCK_TICK_PER_SEC 100
void clock_init();
long clock_tick();
// used only in trap.c
void _clock_inc();
#endif
|
rod-lin/c0re
|
kernel/mem/mmu.h
|
#ifndef _KERNEL_MEM_MMU_H_
#define _KERNEL_MEM_MMU_H_
/* memory management unit */
/* mem layout */
/* global segment numbers */
#define GDT_SEGNO_KTEXT 1 // K for kernel
#define GDT_SEGNO_KDATA 2
#define GDT_SEGNO_UTEXT 3
#define GDT_SEGNO_UDATA 4
#define GDT_SEGNO_TSS 5
/* global descriptor numbers */
#define GD_KTEXT ((GDT_SEGNO_KTEXT) << 3) // kernel text
#define GD_KDATA ((GDT_SEGNO_KDATA) << 3) // kernel data
#define GD_UTEXT ((GDT_SEGNO_UTEXT) << 3) // user text
#define GD_UDATA ((GDT_SEGNO_UDATA) << 3) // user data
#define GD_TSS ((GDT_SEGNO_TSS) << 3) // task segment selector
/* priviledge levels */
#define DPL_KERNEL (0)
#define DPL_USER (3)
/* cs/ds register value */
#define SEGR_KERNEL_CS ((GD_KTEXT) | DPL_KERNEL)
#define SEGR_KERNEL_DS ((GD_KDATA) | DPL_KERNEL)
#define SEGR_USER_CS ((GD_UTEXT) | DPL_USER)
#define SEGR_USER_DS ((GD_UDATA) | DPL_USER)
/* Normal segment */
#define GDT_SEGNULL_ASM \
.short 0, 0; \
.byte 0, 0, 0, 0
#define GDT_SEG_ASM(type, base, lim) \
/* 12 means dividing by 4096(4k, granularity) */ \
.short (((lim) >> 12) & 0xffff), ((base) & 0xffff); \
/* 0x90 for P and S bits */ \
.byte (((base) >> 16) & 0xff), (0x90 | (type)), \
(0xc0 | (((lim) >> 28) & 0xf)), (((base) >> 24) & 0xff)
/* 0xc0 here sets the G(granularity, to 4k) and D(???) bits */
/* eflags register */
#define EFLAG_CF 0x00000001 // carry Flag
#define EFLAG_PF 0x00000004 // parity Flag
#define EFLAG_AF 0x00000010 // auxiliary carry Flag
#define EFLAG_ZF 0x00000040 // zero Flag
#define EFLAG_SF 0x00000080 // sign Flag
#define EFLAG_TF 0x00000100 // trap Flag
#define EFLAG_IF 0x00000200 // interrupt Flag
#define EFLAG_DF 0x00000400 // direction Flag
#define EFLAG_OF 0x00000800 // overflow Flag
#define EFLAG_IOPL_MASK 0x00003000 // I/O Privilege Level bitmask
#define EFLAG_IOPL_0 0x00000000 // IOPL == 0
#define EFLAG_IOPL_1 0x00001000 // IOPL == 1
#define EFLAG_IOPL_2 0x00002000 // IOPL == 2
#define EFLAG_IOPL_3 0x00003000 // IOPL == 3
#define EFLAG_NT 0x00004000 // nested Task
#define EFLAG_RF 0x00010000 // resume Flag
#define EFLAG_VM 0x00020000 // virtual 8086 mode
#define EFLAG_AC 0x00040000 // alignment Check
#define EFLAG_VIF 0x00080000 // virtual Interrupt Flag
#define EFLAG_VIP 0x00100000 // virtual Interrupt Pending
#define EFLAG_ID 0x00200000 // ID flag
/* application segment type bits */
#define STA_X 0x8 // executable segment
#define STA_E 0x4 // expand down (non-executable segments)
#define STA_C 0x4 // conforming code segment (executable only)
#define STA_W 0x2 // writeable (non-executable segments)
#define STA_R 0x2 // readable (executable segments)
#define STA_A 0x1 // accessed
/* System segment type bits */
#define STS_T16A 0x1 // available 16-bit TSS
#define STS_LDT 0x2 // local descriptor table
#define STS_T16B 0x3 // busy 16-bit TSS
#define STS_CG16 0x4 // 16-bit call gate
#define STS_TG 0x5 // task gate / coum transmitions
#define STS_IG16 0x6 // 16-bit interrupt gate
#define STS_TG16 0x7 // 16-bit trap gate
#define STS_T32A 0x9 // available 32-bit TSS
#define STS_T32B 0xB // busy 32-bit TSS
#define STS_CG32 0xC // 32-bit call gate
#define STS_IG32 0xE // 32-bit interrupt gate
#define STS_TG32 0xF // 32-bit trap gate
#ifndef __ASSEMBLER__
#include "pub/com.h"
/* gate descriptor for interrupts and traps */
typedef struct {
unsigned gd_off_15_0: 16; // low 16 bits of offset in segment
unsigned gd_ss: 16; // segment selector
unsigned gd_args: 5; // # args, 0 for interrupt/trap gates
unsigned gd_rsv1: 3; // reserved(should be zero I guess)
unsigned gd_type: 4; // type(STS_{TG,IG32,TG32})
unsigned gd_s: 1; // must be 0 (system)
unsigned gd_dpl: 2; // descriptor(meaning new) privilege level
unsigned gd_p: 1; // present
unsigned gd_off_31_16: 16; // high bits of offset in segment
} gatedesc_t;
/* detail at https://chyyuu.gitbooks.io/ucore_os_docs/content/lab1/lab1_3_3_2_interrupt_exception.html */
#define IDT_SETGATE(gate, istrap, sel, off, dpl) { \
(gate).gd_off_15_0 = (uint32_t)(off) & 0xffff; \
(gate).gd_ss = (sel); \
(gate).gd_args = 0; \
(gate).gd_rsv1 = 0; \
(gate).gd_type = (istrap) ? STS_TG32 : STS_IG32; \
(gate).gd_s = 0; \
(gate).gd_dpl = (dpl); \
(gate).gd_p = 1; \
(gate).gd_off_31_16 = (uint32_t)(off) >> 16; \
}
/* call gate descriptor */
#define IDT_SETCALLGATE(gate, ss, off, dpl) { \
(gate).gd_off_15_0 = (uint32_t)(off) & 0xffff; \
(gate).gd_ss = (ss); \
(gate).gd_args = 0; \
(gate).gd_rsv1 = 0; \
(gate).gd_type = STS_CG32; \
(gate).gd_s = 0; \
(gate).gd_dpl = (dpl); \
(gate).gd_p = 1; \
(gate).gd_off_31_16 = (uint32_t)(off) >> 16; \
}
/* segment descriptors */
/* detail at https://en.wikipedia.org/wiki/Global_Descriptor_Table */
typedef struct {
unsigned sd_lim_15_0: 16; // low bits of segment limit
unsigned sd_base_15_0: 16; // low bits of segment base address
unsigned sd_base_23_16: 8; // middle bits of segment base address
unsigned sd_type: 4; // segment type (see STS_ constants)
unsigned sd_s: 1; // 0 = system, 1 = application
unsigned sd_dpl: 2; // descriptor Privilege Level
unsigned sd_p: 1; // present
unsigned sd_lim_19_16: 4; // high bits of segment limit
unsigned sd_avl: 1; // unused (available for software use)
unsigned sd_rsv1: 1; // reserved
unsigned sd_db: 1; // 0 = 16-bit segment, 1 = 32-bit segment
unsigned sd_g: 1; // granularity: limit scaled by 4K when set
unsigned sd_base_31_24: 8; // high bits of segment base address
} segdesc_t;
#define GDT_SEG_NULL ((segdesc_t) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 })
/* more comments at boot/bootasm.h */
#define GDT_SEG(type, base, lim, dpl) \
((segdesc_t) { \
((lim) >> 12) & 0xffff, (base) & 0xffff, \
((base) >> 16) & 0xff, type, 1, dpl, 1, \
(unsigned)(lim) >> 28, 0, 0, 1, 1, \
(unsigned)(base) >> 24 \
})
#define GDT_SEG16(type, base, lim, dpl) \
((segdesc_t) { \
(lim) & 0xffff, (base) & 0xffff, \
((base) >> 16) & 0xff, type, 1, dpl, 1, \
(unsigned)(lim) >> 16, 0, 0, 1, 0, \
(unsigned)(base) >> 24 \
})
#define GDT_SEG_TSS(type, base, lim, dpl) \
((segdesc_t) { \
(lim) & 0xffff, (base) & 0xffff, \
((base) >> 16) & 0xff, type, 0, dpl, 1, \
(unsigned)(lim) >> 16, 0, 0, 1, 0, \
(unsigned)(base) >> 24 \
})
/* task state segment format (as described by the Pentium architecture book) */
typedef struct {
uint32_t ts_link; // old ts selector
uintptr_t ts_esp0; // stack pointers and segment selectors
uint16_t ts_ss0; // after an increase in privilege level
uint16_t ts_padding1;
uintptr_t ts_esp1;
uint16_t ts_ss1;
uint16_t ts_padding2;
uintptr_t ts_esp2;
uint16_t ts_ss2;
uint16_t ts_padding3;
uintptr_t ts_cr3; // page directory base
uintptr_t ts_eip; // saved state from last task switch
uint32_t ts_eflags;
uint32_t ts_eax; // more saved state (registers)
uint32_t ts_ecx;
uint32_t ts_edx;
uint32_t ts_ebx;
uintptr_t ts_esp;
uintptr_t ts_ebp;
uint32_t ts_esi;
uint32_t ts_edi;
uint16_t ts_es; // even more saved state (segment selectors)
uint16_t ts_padding4;
uint16_t ts_cs;
uint16_t ts_padding5;
uint16_t ts_ss;
uint16_t ts_padding6;
uint16_t ts_ds;
uint16_t ts_padding7;
uint16_t ts_fs;
uint16_t ts_padding8;
uint16_t ts_gs;
uint16_t ts_padding9;
uint16_t ts_ldt;
uint16_t ts_padding10;
uint16_t ts_t; // trap on task switch
uint16_t ts_iomb; // i/o map base address
} taskstate_t;
#endif /* __ASSEMBLER__ */
/**************** !!! LAB2 !!! ****************/
/**
* virtual memory map: Permissions
* kernel/user
*
* 4G ------------------> +---------------------------------+
* | |
* | Empty Memory (*) |
* | |
* +---------------------------------+ 0xFB000000
* | Cur. Page Table (Kern, RW) | RW/-- PTSIZE
* VPT -----------------> +---------------------------------+ 0xFAC00000
* | Invalid Memory (*) | --/--
* KERNTOP -------------> +---------------------------------+ 0xF8000000
* | |
* | Remapped Physical Memory | RW/-- KMEMSIZE
* | |
* KERNBASE ------------> +---------------------------------+ 0xC0000000
* | |
* | |
* | |
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* (*) NOTE: The kernel ensures that "Invalid Memory" is *never* mapped.
* "Empty Memory" is normally unmapped, but user programs may map pages
* there if desired.
*
**/
/* all physical memory mapped at this address */
#define KERNEL_BASE 0xC0000000
#define KERNEL_MEMSIZE 0x38000000 // the maximum amount of physical memory
#define KERNEL_TOP (KERNEL_BASE + KERNEL_MEMSIZE)
#define KERNEL_PGSIZE 4096 // page size
#define KERNEL_STACKPAGE 2 // # of pages in kernel stack
#define KERNEL_STACKSIZE (KERNEL_STACKPAGE * KERNEL_PGSIZE) // sizeof kernel stack
/**
* Virtual page table. Entry PDX[VPT] in the PD (Page Directory) contains
* a pointer to the page directory itself, thereby turning the PD into a page
* table, which maps all the PTEs (Page Table Entry) containing the page mappings
* for the entire virtual address space into that 4 Meg region starting at VPT.
**/
#define KERNEL_VPT 0xFAC00000
/* HERE GOES THE -- PAGE!!! */
#ifndef __ASSEMBLER__
#include "pub/com.h"
#include "pub/atomic.h"
#include "pub/dllist.h"
typedef uintptr_t pte_t;
typedef uintptr_t pde_t;
typedef size_t page_number_t;
// some constants for bios interrupt 15h AX = 0xE820
#define E820_MAXENT 20 // number of entries in E820MAP
#define E820_ARM 1 // address range memory
#define E820_ARR 2 // address range reserved
typedef struct {
int nmap;
struct {
uint64_t addr;
uint64_t size;
uint32_t type;
} C0RE_PACKED map[E820_MAXENT];
} e820map_t;
/**
* struct Page - Page descriptor structures. Each Page describes one
* physical page. In kern/mm/pmm.h, you can find lots of useful functions
* that convert Page to other data types, such as phyical address.
**/
typedef struct page_t_tag {
int ref; // page frame's reference counter
uint32_t flags; // array of flags that describe the status of the page frame
// used for allocator
unsigned int nfree; // number of free pages(or the real size of the page block)
struct page_t_tag *prev;
struct page_t_tag *next;
dllist_t pra_link; // used for pra (page replace algorithm)
uintptr_t pra_vaddr; // used for pra (page replace algorithm)
} page_t;
// convert dllist node to page
#define dll2page(dll, member) \
to_struct((dll), page_t, member)
/* flags describing the status of a page frame */
#define PAGE_FLAG_RESV 0 // the page is reserved for kernel and cannot be allocated
#define PAGE_FLAG_FREE 1 // the page is freed
#define page_setReserved(p) btsl(PAGE_FLAG_RESV, &(p)->flags)
#define page_resetReserved(p) btrl(PAGE_FLAG_RESV, &(p)->flags)
#define page_isReserved(p) btl(PAGE_FLAG_RESV, &(p)->flags)
#define page_setFree(p) btsl(PAGE_FLAG_FREE, &(p)->flags)
#define page_resetFree(p) btrl(PAGE_FLAG_FREE, &(p)->flags)
#define page_isFree(p) btl(PAGE_FLAG_FREE, &(p)->flags)
#define page_clearFlags(p) ((p)->flags = 0)
#define page_clearRef(p) ((p)->ref = 0)
#define page_incRef(p) (++(p)->ref)
#define page_decRef(p) (--(p)->ref)
#define page_getRef(p) ((p)->ref)
// a linear address 'la' has a three-part structure as follows:
//
// +--------10------+-------10-------+---------12---------+
// | Page Directory | Page Table | Offset within Page |
// | Index | Index | |
// +----------------+----------------+--------------------+
// \--- PDX(la) --/ \--- PTX(la) --/ \---- POFF(la) ----/
// \----------- PPN(la) -----------/
//
// page directory index
#define PD_INDEX(la) ((((uintptr_t)(la)) >> PD_INDEX_SHIFT) & 0x3ff)
// page table index
#define PT_INDEX(la) ((((uintptr_t)(la)) >> PT_INDEX_SHIFT) & 0x3ff)
// page number field of address
#define PAGE_NUMBER(la) (((uintptr_t)(la)) >> PT_INDEX_SHIFT)
// offset in page
#define PAGE_OFS(la) (((uintptr_t)(la)) & 0xfff)
// construct linear address from indexes and offset
#define PAGE_ADDR(d, t, o) (((uintptr_t)(d) << PD_INDEX_SHIFT | (uintptr_t)(t) << PT_INDEX_SHIFT | (uintptr_t)(o)))
// address in page table or page directory entry
#define PTE_ADDR(pte) ((uintptr_t)(pte) & ~0xfff)
#define PDE_ADDR(pde) PTE_ADDR(pde)
/* page directory and page table constants */
#define PD_NENTRY 1024 // page directory entries per page directory
#define PT_NENTRY 1024 // page table entries per page table
#define PAGE_SIZE 4096 // bytes mapped by a page
#define PAGE_SHIFT 12 // log2(PAGE_SIZE)
#define PT_SIZE (PAGE_SIZE * PT_NENTRY) // bytes mapped by a page directory entry
#define PT_SHIFT 22 // log2(PT_SIZE)
#define PT_INDEX_SHIFT 12 // offset of PT_INDEX_SHIFT in a linear address
#define PD_INDEX_SHIFT 22 // offset of PD_INDEX_SHIFT in a linear address
/* page table/directory entry flags */
#define PTE_FLAG_P 0x001 // present
#define PTE_FLAG_W 0x002 // writeable
#define PTE_FLAG_U 0x004 // user
#define PTE_FLAG_PWT 0x008 // write-Through
#define PTE_FLAG_PCD 0x010 // cache-Disable
#define PTE_FLAG_A 0x020 // accessed
#define PTE_FLAG_D 0x040 // dirty
#define PTE_FLAG_PS 0x080 // page Size
#define PTE_FLAG_MBZ 0x180 // bits must be zero
#define PTE_FLAG_AVAIL 0xe00 // available for software use
// the PTE_AVAIL bits aren't used by the kernel or interpreted by the
// hardware, so user processes are allowed to set them arbitrarily.
#define PTE_FLAG_USER (PTE_FLAG_U | PTE_FLAG_W | PTE_FLAG_P)
/* Control Register flags */
#define CR0_PE 0x00000001 // protection enable
#define CR0_MP 0x00000002 // monitor co-processor
#define CR0_EM 0x00000004 // emulation
#define CR0_TS 0x00000008 // task switched
#define CR0_ET 0x00000010 // extension type
#define CR0_NE 0x00000020 // numeric errror
#define CR0_WP 0x00010000 // write protect
#define CR0_AM 0x00040000 // alignment mask
#define CR0_NW 0x20000000 // not writethrough
#define CR0_CD 0x40000000 // cache disable
#define CR0_PG 0x80000000 // paging
#define CR4_PCE 0x00000100 // performance counter enable
#define CR4_MCE 0x00000040 // machine check enable
#define CR4_PSE 0x00000010 // page size extensions
#define CR4_DE 0x00000008 // debugging extensions
#define CR4_TSD 0x00000004 // time stamp disable
#define CR4_PVI 0x00000002 // protected-mode virtual interrupts
#define CR4_VME 0x00000001 // v86 mode extensions
#endif // __ASSEMBLER__
#endif
|
rod-lin/c0re
|
kernel/mem/ffit.h
|
<reponame>rod-lin/c0re<gh_stars>1-10
#ifndef _KERNEL_MEM_FFIT_H_
#define _KERNEL_MEM_FFIT_H_
/* first-fit page allocation */
#include "mem/pmm.h"
extern const page_allocator_t page_ffit_allocator;
#endif
|
kbladin/shading_tests
|
include/SettingsManager.h
|
#ifndef SETTINGSMANAGER_H
#define SETTINGSMANAGER_H
#ifndef Q_MOC_RUN
#define GLM_FORCE_RADIANS
#include <glm/glm.hpp>
#include <glm/ext.hpp>
#endif
class SettingsManager
{
public:
static SettingsManager* Instance();
const int N_LIGHTSOURCES;
const int FILENAME_SIZE;
int n_blur_loops;
int filter_size;
float multiplier1;
float multiplier2;
char* file_to_load;
private:
static SettingsManager* instance_;
SettingsManager();
~SettingsManager();
};
#endif
|
kbladin/shading_tests
|
include/RenderTexture.h
|
#ifndef RENDER_TEXTURE_H
#define RENDER_TEXTURE_H
#include "GL/glew.h"
#include "GL/glfw3.h"
class RenderTexture
{
public:
RenderTexture(int width, int height);
~RenderTexture(){};
GLuint GetFrameBuffer();
int GetWidth();
int GetHeight();
GLuint frame_buffer_;
GLuint rendered_texture_; // Might be moved to a texturemanager later
GLuint depth_render_buffer_;
private:
void SetupFrameBuffer();
int width_, height_;
};
#endif
|
kbladin/shading_tests
|
include/ShaderTests.h
|
<filename>include/ShaderTests.h
#ifndef SHADER_TESTS_H
#define SHADER_TESTS_H value
#include "../include/Scene.h"
#include "../include/MyGlWindow.h"
class ShaderTests
{
public:
ShaderTests();
~ShaderTests();
private:
MyGlWindow* my_gl_window_;
};
#endif
|
kbladin/shading_tests
|
include/kalles_gl_lib/Camera.h
|
#ifndef CAMERA_H
#define CAMERA_H
// External
#ifndef Q_MOC_RUN
#define GLM_FORCE_RADIANS
#include <glm/glm.hpp>
#include <glm/ext.hpp>
#endif
//! The camera class is used to obtain the view and projection matrices.
class Camera {
public:
Camera(glm::vec3 target, float far_clipping, float near_clipping, float aspect_ratio);
Camera(glm::mat4 view, glm::mat4 projection);
glm::mat4 GetViewMatrix();
glm::mat4 GetProjectionMatrix();
glm::vec3 GetTarget();
void SetTarget(glm::vec3 target);
void SetAspectRatio(float aspect_ratio);
float GetFarClipping();
void UpdateMatrices();
void IncrementXrotation(float h);
void IncrementYrotation(float h);
void IncrementZposition(float h);
private:
template <class T>
void Delay(T* input, T end_val, float speed);
glm::mat4 projection_;
glm::mat4 view_;
glm::vec3 target_;
float aspect_ratio_;
float far_clipping_;
float near_clipping_;
// Values used to delay the camera
glm::vec3 local_translate_goal_;
glm::vec3 global_translate_goal_;
float rotate_x_goal_;
float rotate_y_goal_;
//float translate_z_goal_ = -10.0f;
glm::vec3 local_translate_;
glm::vec3 global_translate_;
float rotate_x_;
float rotate_y_;
};
#endif // CAMERA_H
|
kbladin/shading_tests
|
include/MyMesh.h
|
<reponame>kbladin/shading_tests
#ifndef MY_MESH_H
#define MY_MESH_H
#include "../include/kalles_gl_lib/Mesh.h"
class MyMesh : public Mesh
{
public:
MyMesh(const char* file_path);
~MyMesh(){};
glm::mat4 GetModelTransform();
void Render();
bool IsCorrupt(){ return corrupt_; };
glm::vec3 position_;
glm::fquat quaternion_;
private:
bool corrupt_;
};
#endif
|
kbladin/shading_tests
|
include/MyShaderManager.h
|
<gh_stars>0
#ifndef MY_SHADER_MANAGER_H
#define MY_SHADER_MANAGER_H
#include "kalles_gl_lib/ShaderManager.h"
class MyShaderManager : public ShaderManager
{
public:
static MyShaderManager* Instance();
static void CreateInstance();
private:
MyShaderManager();
void AddAllShaders();
void AddAllShaderPrograms();
// ShaderPrograms
void AddSimpleShaderProgram();
void AddNormalColorShaderProgram();
void AddEdgeDetectorShaderProgram();
void AddTextureCombinerShaderProgram();
void AddPhongShaderProgram();
void AddBrightLightShaderProgram();
void AddBlurShaderProgram();
void AddToonShaderProgram();
void AddOneColorShaderProgram();
};
#endif // MY_SHADER_MANAGER_H
|
kbladin/shading_tests
|
include/Scene.h
|
#ifndef SCENE_H
#define SCENE_H
#include <vector>
#include "../include/kalles_gl_lib/Camera.h"
#include "../include/kalles_gl_lib/Mesh.h"
#include "../include/MyMesh.h"
#include "../include/RenderTexture.h"
#include "../include/Quad.h"
struct AmbientLight {
float intensity;
glm::vec3 color;
AmbientLight(){
intensity = 0.2f;
color = glm::vec3(1.0f,1.0f,1.0f);
}
};
struct LightSource {
// 0
float intensity;
glm::vec3 color;
// 1
glm::vec4 position;
// 2
float constant_attenuation, linear_attenuation, quadratic_attenuation;
float spot_cutoff;
float spot_exponent;
// 3
glm::vec3 spot_direction;
LightSource(){
intensity = 50.0f;
color = glm::vec3(1.0f, 0.8f, 0.6f);
position = glm::vec4(5.0f,5.0f,5.0f,1.0f); // w == 0.0 ? => directional
constant_attenuation = 0.0f;
linear_attenuation = 0.0f;
quadratic_attenuation = 1.0f;
spot_cutoff = 100.0f; // spot_cutoff > 90.0 ? => point light
spot_exponent = 10.0f;
spot_direction = glm::vec3(0.0f,-1.0f,0.0f);
}
};
class Scene
{
public:
Scene(Camera* cam);
~Scene();
void AddMesh(MyMesh* mesh);
int GetNumberOfMeshes();
void RenderToon(int width, int height);
void Render(int width, int height);
void Update();
Camera* GetCamera();
std::vector<LightSource> light_sources_;
AmbientLight amb_light_;
private:
Camera* cam_;
std::vector<MyMesh*> meshes_;
std::vector<RenderTexture*> render_textures_;
std::vector<MyMesh*> light_source_meshes_;
Quad quad_;
};
#endif // SCENE_H
|
kbladin/shading_tests
|
include/Quad.h
|
<reponame>kbladin/shading_tests
#ifndef QUAD_H
#define QUAD_H
#include "../include/kalles_gl_lib/Mesh.h"
class Quad : public Mesh
{
public:
Quad();
~Quad(){};
void Render();
private:
void SetupVertexData();
void SetupBuffers();
};
#endif
|
kbladin/shading_tests
|
include/kalles_gl_lib/MeshLoader.h
|
<filename>include/kalles_gl_lib/MeshLoader.h
#ifndef MESH_LOADER_H
#define MESH_LOADER_H
#include <assimp/Importer.hpp> // C++ importer interface
#include <assimp/scene.h> // Output data structure
#include <assimp/postprocess.h> // Post processing flags
#ifndef Q_MOC_RUN
#define GLM_FORCE_RADIANS
#include <glm/glm.hpp>
#include <glm/ext.hpp>
#endif
#include <iostream>
#include <vector>
bool loadMesh_assimp(
const char * path,
std::vector<unsigned short> & out_indices,
std::vector<glm::vec3> & out_vertices,
std::vector<glm::vec2> & out_uvs,
std::vector<glm::vec3> & out_normals
);
#endif
|
kbladin/shading_tests
|
include/MyGlWindow.h
|
<gh_stars>0
#ifndef MY_GL_WINDOW_H
#define MY_GL_WINDOW_H
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <AntTweakBar.h>
#include "../include/Scene.h"
class MyGlWindow
{
public:
MyGlWindow();
~MyGlWindow();
void MainLoop();
private:
int InitGLFW();
int InitOpenGL();
int InitTW();
void RenderScene(void (Scene::*function)(int, int), Scene* s);
void UpdateMousePos();
static void ErrorCallback(int error, const char* description);
static void KeyCallback(
GLFWwindow* window,
int key,
int scancode,
int action,
int mods);
static void ScrollCallback(GLFWwindow* window, double x_pos, double y_pos);
static void MouseButtonCallback(
GLFWwindow* window,
int button,
int action,
int mods);
static void WindowSizeCallback(GLFWwindow* window, int width, int height);
static void MousePosCallback(GLFWwindow* window, double xpos, double ypos);
static void CharCallback(GLFWwindow* window, int codepoint);
static void TW_CALL LoadButtonCallback(void* client_data);
static void TW_CALL PreLoadButtonCallback(void* client_data);
static void TW_CALL CancelButtonCallback(void* client_data);
void (Scene::* RenderFunction)(int, int);
GLFWwindow* window_;
static Scene* scene_;
double prev_cursor_pos_x_;
double prev_cursor_pos_y_;
static bool mouse_pressed_;
// Ant tweak bar
static TwBar* load_file_bar_;
};
#endif // MY_GL_WINDOW_H
|
kbladin/shading_tests
|
include/kalles_gl_lib/Mesh.h
|
#ifndef MESH_H
#define MESH_H
#include <vector>
// External
#ifndef Q_MOC_RUN
#define GLM_FORCE_RADIANS
#include <glm/glm.hpp>
#include <glm/ext.hpp>
#endif
#include "GL/glew.h"
#include "GL/glfw3.h"
#include "../../include/kalles_gl_lib/Camera.h"
class Mesh
{
friend class Renderer;
public:
Mesh();
virtual ~Mesh();
void SetupBuffers();
virtual void Render() = 0;
void DeleteBuffers();
protected:
GLuint vertex_array_id_;
GLuint element_buffer_id_;
GLuint vertex_position_buffer_id_;
GLuint vertex_normal_buffer_id_;
GLuint vertex_uv_buffer_id_;
std::vector<glm::vec3> vertex_position_data_;
std::vector<glm::vec3> vertex_normal_data_;
std::vector<glm::vec2> vertex_uv_data_;
std::vector<GLushort> element_data_;
};
#endif // MESH_H
|
mamoniem/bellz
|
Source/Bellz/GameDataTables.h
|
<filename>Source/Bellz/GameDataTables.h
// All rights reserved, <NAME> 2016 http://www.mamoniem.com/
#pragma once
#include "Engine/DataTable.h"
#include "GameFramework/Actor.h"
#include "GameDataTables.generated.h"
USTRUCT(Blueprintable)
struct FWeaponStruct : public FTableRowBase
{
GENERATED_USTRUCT_BODY()
public:
UPROPERTY(VisibleAnywhere, BlueprintReadOnly)
FString Icon;
UPROPERTY(VisibleAnywhere, BlueprintReadOnly)
FString DisplayName;
UPROPERTY(VisibleAnywhere, BlueprintReadOnly)
int32 Damage;
UPROPERTY(VisibleAnywhere, BlueprintReadOnly)
int32 CooldownTime;
//default constructor
FWeaponStruct()
{
Icon = "None";
DisplayName = "None";
Damage = 0;
CooldownTime = 0;
}
};
USTRUCT(Blueprintable)
struct FMissionStruct : public FTableRowBase
{
GENERATED_USTRUCT_BODY()
public:
UPROPERTY(VisibleAnywhere, BlueprintReadOnly)
int32 Kill;
UPROPERTY(VisibleAnywhere, BlueprintReadOnly)
int32 Collect;
//default constructor
FMissionStruct()
{
Kill = 0;
Collect = 0;
}
};
UCLASS()
class BELLZ_API AGameDataTables : public AActor
{
GENERATED_BODY()
public:
// Sets default values for this actor's properties
AGameDataTables();
// Called when the game starts or when spawned
virtual void BeginPlay() override;
// Called every frame
virtual void Tick( float DeltaSeconds ) override;
//I used editanywhere, so I'll be able to assign it in the details panel
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Game DataTables")
UDataTable* WeaponsTable;
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Game DataTables")
UDataTable* MissionsTable;
//UPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = "Game DataTables")
TArray<FWeaponStruct*> AllWeaponsData;
//UPROPERTY(VisibleAnywhere, BlueprintReadWrite, Category = "Game DataTables")
TArray<FMissionStruct*> AllMissionsData;
UFUNCTION(BlueprintCallable, Category = "Game DataTables")
void OnFetchAllTables();
};
|
mamoniem/bellz
|
Source/Bellz/CoinPickup.h
|
// All rights reserved, <NAME> 2016 http:
#pragma once
#include "PickupBase.h"
#include "CoinPickup.generated.h"
/**
*
*/
UCLASS()
class BELLZ_API ACoinPickup : public APickupBase
{
GENERATED_BODY()
public:
// Sets default values for this actor's properties
ACoinPickup();
// The Override for the Virtual of the base class
void OnGetCollected_Implementation() override;
//Access the Coin value
float GetCoinValue();
protected:
//The value that the coin adds to the collected items, you can make a Special coins with higher values or so...
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Coins", Meta = (BlueprintProtected = "true"))
float CoinValue;
};
|
mamoniem/bellz
|
Source/Bellz/Enemy.h
|
<filename>Source/Bellz/Enemy.h
// All rights reserved, <NAME> 2016 http://www.mamoniem.com/
#pragma once
#include "GameFramework/Character.h"
#include "Enemy.generated.h"
UCLASS()
class BELLZ_API AEnemy : public ACharacter
{
GENERATED_BODY()
UPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = Triggers, meta = (AllowPrivateAccess = "true"))
class USphereComponent* bodySphereTrigger;
UPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = Triggers, meta = (AllowPrivateAccess = "true"))
class USphereComponent* leftHandTrigger;
UPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = Triggers, meta = (AllowPrivateAccess = "true"))
class USphereComponent* rightHandTrigger;
public:
//The constructor
AEnemy();
//Override the PostInitializeComponents()
virtual void PostInitializeComponents() override;
// Called when the game starts or when spawned
virtual void BeginPlay() override;
// Called every frame
virtual void Tick( float DeltaSeconds ) override;
// Called to bind functionality to input
virtual void SetupPlayerInputComponent(class UInputComponent* InputComponent) override;
//The health of the enemy
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Enemy Behavior")
float TotalHealth;
//The range for the enemy attack
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Enemy Behavior")
float AttackRange;
//The power of the enemy attacks
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Enemy Behavior")
float AttackDamage;
//Check if the enemy is dead or alive
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Enemy Behavior")
bool IsDead;
//Check if the enemy is dead or alive
UPROPERTY(EditAnywhere, BlueprintReadWrite, Category = "Enemy Behavior")
bool IsAttacking;
//The sensing component used to see or hear the player
UPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = "Enemy AI")
class UPawnSensingComponent* PawnSensor;
//The used BT with that enemy
UPROPERTY(EditAnywhere, Category = "Enemy AI")
class UBehaviorTree* EnemyBehaviorTree;
//Perform attack
UFUNCTION(BlueprintCallable, Category = "Enemy AI")
void OnPerformAttack();
//Perform attack
UFUNCTION(BlueprintCallable, Category = "Enemy AI")
void OnPreAttack();
//Perform attack done
UFUNCTION(BlueprintCallable, Category = "Enemy AI")
void OnPostAttack();
//Hear the player's noise using the sensing component
UFUNCTION()
void OnHearNoise(APawn *OtherActor, const FVector &Location, float Volume);
//See the player's by sight using the sensing component
UFUNCTION()
void OnSeePawn(APawn *OtherPawn);
UFUNCTION()
void OnHandTriggerOverlap(class AActor* OtherActor, class UPrimitiveComponent* OtherComp, int32 OtherBodyIndex, bool bFromSweep, const FHitResult & SweepResult);
//float AccumulatedFiretime;
//bool IsFirstPerson(); //virtual bool IsFirstPerson() const override;
virtual void FaceRotation(FRotator NewRotation, float DeltaTime = 0.f) override;
bool Attacking;
/** Returns sphere trigger subobject **/
FORCEINLINE class USphereComponent* GetBodySphereTrigger() const { return bodySphereTrigger; }
};
|
mamoniem/bellz
|
Source/Bellz/PickupBase.h
|
<reponame>mamoniem/bellz
// All rights reserved, <NAME> 2016 http:
#pragma once
#include "GameFramework/Actor.h"
#include "PickupBase.generated.h"
UCLASS()
class BELLZ_API APickupBase : public AActor
{
GENERATED_BODY()
public:
// Sets default values for this actor's properties
APickupBase();
// Called when the game starts or when spawned
virtual void BeginPlay() override;
// Called every frame
virtual void Tick( float DeltaSeconds ) override;
//Return the static mesh of the pickup instance
FORCEINLINE class UStaticMeshComponent* GetMesh() const { return ThePickupStaticMesh; }
//The pickup active or not!
UFUNCTION(BlueprintPure, Category = "Pickups")
bool IsActive();
//Change the active status from the outside
UFUNCTION(BlueprintCallable, Category = "Pickups")
void SetActive(bool NewPickupState);
//When the pickup get collecetd, this function should be called
UFUNCTION(BlueprintNativeEvent)
void OnGetCollected();
virtual void OnGetCollected_Implementation();
protected:
//True or False of the status
bool bIsActive;
private:
//The static mesh of the pickup, to give it visual looking, you can replace it with Sprites if it is a 2d game
UPROPERTY(VisibleAnywhere, BlueprintReadOnly, Category = "Pickups", meta = (AllowPrivateAccess = "true"))
class UStaticMeshComponent* ThePickupStaticMesh;
};
|
mamoniem/bellz
|
Source/Bellz/EnemyAIController.h
|
<reponame>mamoniem/bellz<gh_stars>0
// All rights reserved, <NAME> 2016 http://www.mamoniem.com/
#pragma once
#include "AIController.h"
#include "EnemyAIController.generated.h"
/**
*
*/
UCLASS()
class BELLZ_API AEnemyAIController : public AAIController
{
GENERATED_BODY()
public:
AEnemyAIController();
UPROPERTY(transient)
UBlackboardComponent* BlackboardComp;
UPROPERTY(transient)
UBehaviorTreeComponent* BehaviorComp;
virtual void Possess(class APawn* InPawn) override;
virtual void BeginInactiveState() override;
void Respawn();
UFUNCTION(BlueprintCallable, Category = Behavior)
void SetEnemy(class APawn* InPawn);
UFUNCTION(BlueprintCallable, Category = Behavior)
class AGladiator* GetEnemy() const;
UFUNCTION(BlueprintCallable, Category = Behaviour)
void UpdateControlRotation(float DeltaTime, bool bUpdatePawn);
UFUNCTION(BlueprintCallable, Category = Behaviour)
bool PawnCanBeSeen(APawn * target);
/* Checks sight to all pawns in map, sets enemy if it finds a thing */
UFUNCTION(BlueprintCallable, Category = Behaviour)
void OnSearchForEnemy();
protected:
int32 EnemyKeyID;
int32 EnemyPositionKeyID;
FTimerHandle SpawnTimer;
};
|
mamoniem/bellz
|
Source/Bellz/BellzGameMode.h
|
<reponame>mamoniem/bellz<filename>Source/Bellz/BellzGameMode.h
// All rights reserved, <NAME> 2016 http://www.mamoniem.com/
#pragma once
#include "GameFramework/GameMode.h"
#include "BellzGameMode.generated.h"
UCLASS(minimalapi)
class ABellzGameMode : public AGameMode
{
GENERATED_BODY()
public:
ABellzGameMode();
virtual void BeginPlay() override;
protected:
//The game UI widget blueprint that been designed in UMG editor
UPROPERTY (EditDefaultsOnly, BlueprintReadWrite, Category = "UI", Meta = (BleprintProtected = "true"))
TSubclassOf<class UUserWidget> GameUIWidget;
//The in game instance of the UI
UPROPERTY(EditDefaultsOnly, BlueprintReadWrite, Category = "UI", Meta = (BleprintProtected = "true"))
class UUserWidget* GameUIInstance;
};
|
mamoniem/bellz
|
Source/Bellz/Bellz.h
|
// All rights reserved, <NAME> 2016 http://www.mamoniem.com/
#ifndef __PUREGAME_H__
#define __PUREGAME_H__
//I replaced the EngineMinimal.h, in order to get access to lots of other things...Maily was to enable the on sreen debugging messages!
//#include "EngineMinimal.h"
#include "Engine.h"
#include "BellzSaveGame.h"
#include "Kismet/GameplayStatics.h"
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.