0

So far as i have understood the vertex fetch stage is encapsulated by the VAO and the VAO is required to contain the vertex fetch stage state for piping between the buffer objects and vertex attributes as well as formatting the data in the buffer objects.

Both books that i have been reading on the subject i.Red book, Blue book both mention explicitly that the VAO must contain the vertex fetch stage state data

However when i actually create 2 texture objects and simply format the data once WITHOUT a VAO into which to store this information about the buffer, it still runs fine without any hiccups, and then i reload the first object back again, and again it works fine without any issues, so where is this information pulled from about the formatting of the data in the buffer object?

I even upload buffer data a second time to same buffer object which would imply that previous information held there would be reset? And the picture still renders fine to the window

So what exactly is going on? the Books say one thing, what happens in reality is totally different and opposite

Can somebody actually explain what IS actually needed here and what isnt? What is actually going on?

When do we actually need a VAO and when we can do without? What's the point of extra code processing when it is not needed?

The code below:

int main(){

   int scrW=1280, scrH=720;

   //create context and shader program
   init(scrW, scrH);
   createShaders();

   //create texture objects and load data from image to server memory
   char object[2][25];
   strcpy(object[0], "back.bmp");
   strcpy(object[1], "256x256.bmp");


   //triangle 1
   GLfloat vertices[] = 
   //  X      Y      U    V
   {  -1.0, -1.0,   0.0, 0.0,
       1.0, -1.0,   1.0, 0.0,
       1.0,  1.0,   1.0, 1.0,
      -1.0,  1.0,   0.0, 1.0};


   //glPointSize(40.0f);

   //create and bound vertex buffer object(memory buffers)   
   GLuint vbo1 = createVbo();

   //The state set by glVertexAttribPointer() is stored in the currently bound vertex array object (VAO) if vertex array object bound
   //associates the format of the data for the currently bound buffer object to the vertex attribute so opengl knows how much and how to read it
   glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 4*sizeof(GLfloat), 0);
   glEnableVertexAttribArray(0);

   //shader vertex attribute for texture coordinates
   glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 4*sizeof(GLfloat), (const GLvoid*)(2 * sizeof(GLfloat)));
   glEnableVertexAttribArray(1);

   //upload vertices to buffer memory
   //will upload data to currently bound/active buffer object
   glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);  

   //load and create texture object from image data
   GLuint tex1 = createTexture(object[0]);

   glDrawArrays(GL_QUADS, 0, 4);

   glXSwapBuffers ( dpy, glxWin );
   sleep(3);


   GLuint tex2 = createTexture(object[1]);

   glDrawArrays(GL_QUADS, 0, 4);


   glXSwapBuffers ( dpy, glxWin );
   sleep(3);

   glBindTexture(GL_TEXTURE_2D, tex1);

   glDrawArrays(GL_QUADS, 0, 4);


   glXSwapBuffers ( dpy, glxWin );
   sleep(3);


   //////////////de-initialize
   glXMakeContextCurrent( dpy, 0, 0, NULL );
   glXDestroyContext( dpy, context );
   glXDestroyWindow(dpy, glxWin);
   XDestroyWindow( dpy, win );
   XCloseDisplay( dpy );

   return 0;
}

and the shaders

 const char* vertex_shader =
      "#version 400\n"
      "layout(location =  0) in vec2 vp;"
      "layout(location = 1) in vec2 tex;"
      "out vec2 texCoord;"
      "void main () {"
      "  gl_Position = vec4 (vp, 0.0f, 1.0f);"
      "  texCoord = tex; "
      "}";

   const char* fragment_shader =
      "#version 400\n"
      "uniform sampler2D s;"
      "in vec2 texCoord;"
      "out vec4 color;"
      "void main () {"
      "color = texture(s, texCoord);"
      "}";

in order to avoid any confusion , here is the init() procedure

 static int att[] =
   {
      GLX_X_RENDERABLE    , True,
      GLX_DRAWABLE_TYPE   , GLX_WINDOW_BIT,
      GLX_RENDER_TYPE     , GLX_RGBA_BIT,
      GLX_X_VISUAL_TYPE   , GLX_TRUE_COLOR,
      GLX_RED_SIZE        , 8,
      GLX_GREEN_SIZE      , 8,
      GLX_BLUE_SIZE       , 8,
      GLX_ALPHA_SIZE      , 8,
      GLX_DEPTH_SIZE      , 24,
      GLX_STENCIL_SIZE    , 8,
      GLX_DOUBLEBUFFER    , True,
      //GLX_SAMPLE_BUFFERS  , 1,
      //GLX_SAMPLES         , 4,
      None
   };

   Display                          *dpy;
   Window                            root;   
   XVisualInfo                      *vi;
   Colormap                          cmap;
   XSetWindowAttributes              swa;
   Window                            win;
   GLXContext                        context;
   GLXFBConfig                      *fbc;
   GLXWindow                         glxWin; 
   int                               fbcount;


void init(int width, int height){

   //set and choose displays for creating window
   dpy = XOpenDisplay(NULL);
   if (!dpy){
      printf("Failed to open X display\n");
      exit(1);
   }   

   root = DefaultRootWindow(dpy);

   //request a framebuffer configuration
   fbc = glXChooseFBConfig(dpy, DefaultScreen(dpy), att, &fbcount);

   if (!fbc){
      printf( "Failed to retrieve a framebuffer config\n" );
      exit(1);
   }

   vi = glXGetVisualFromFBConfig( dpy, fbc[0] );

   if(vi==NULL){
      printf("Error getting visual info\n");
      exit(1);
   }
   swa.colormap = XCreateColormap( dpy, RootWindow( dpy, vi->screen ), vi->visual, AllocNone );

   swa.background_pixmap = None ;
   swa.border_pixel            = 0;
   swa.event_mask            = StructureNotifyMask;

   //Window XCreateWindow(display, parent, x, y, width, height, border_width, depth, class, visual, valuemask, attributes) 

   win = XCreateWindow( dpy, RootWindow( dpy, vi->screen ), 0, 0, width, height, 0, vi->depth, InputOutput, vi->visual, CWBorderPixel|CWColormap|CWEventMask, &swa );
   if ( !win ){
      printf( "Failed to create window.\n" );
      exit(1);
   }

   context = glXCreateNewContext( dpy, fbc[0], GLX_RGBA_TYPE, NULL, True );

   glxWin = glXCreateWindow(dpy, fbc[0], win, NULL);

   XMapWindow(dpy, win);

   glXMakeContextCurrent(dpy, glxWin, glxWin, context);

   // start GLEW extension handler
   glewExperimental = GL_TRUE;
   GLuint err = glewInit();

   if(err!=GLEW_OK){
      fprintf(stderr, "Error: %s\n", glewGetErrorString(err));
      exit(1);
   }


   XSelectInput(dpy, win, ButtonPressMask|KeyPressMask);

   // tell GL to only draw onto a pixel if the shape is closer to the viewer
   //glEnable (GL_DEPTH_TEST); // enable depth-testing
   //glDepthFunc (GL_LESS); // depth-testing interprets a smaller value as "closer"


}
hopjopper
  • 99
  • 8

1 Answers1

6

If you use a compatibility OpenGL context, you don't need a VAO. In a sense, there is a "default" VAO which is always bound. This is how it works in OpenGL 2.x, and this is part of what the "compatibility" means in "compatibility profile".

In you use a core OpenGL context, you do need a VAO. If you don't, your code simply won't work. If you want to continue pretending you don't need a VAO, you can create a single VAO and have it bound for the entire duration of your program.

The issue of choosing a core vs compatibility profiles has its nuances, but in general it is recommended to request a core profile if you are developing a new program. Not all systems have great support for compatibility profiles anyway. Mesa limits compatibility profiles to 3.0 and OS X limits them to 2.1. If you want a core profile, you have to explicitly request a core profile when you create the context.

Dietrich Epp
  • 205,541
  • 37
  • 345
  • 415
  • sorry i really dont understand what your talking about having any reference to what i am asking about? i am using the core profile opengl , but my question was not about that, it was about VAO's which you did not address in your answer at all – hopjopper Mar 04 '16 at 04:12
  • 1
    @hopjopper What makes you so convinced that you have a core profile context? I don't see anything in the posted code that specifically requests a core context. – Reto Koradi Mar 04 '16 at 04:24
  • well i am sure that compatibility profile is not default and is requested, while core profile is default, i really dont know why you would attempt to answer a question with an assumption , honestly..... – hopjopper Mar 04 '16 at 04:27
  • 3
    @hopjopper: Compatibility profile is default. Core profile must be specifically requested. – Dietrich Epp Mar 04 '16 at 07:16
  • @DietrichEpp Speaking of Core/Compatibility profiles and the new ways OpenGL contexts work... Can you point me to some online resources where the new way of creating contexts is documented? I couldn't find anything more recent than some old junk for GLX 1.4 and OpenGL from the "pre-profiles" era :/ – SasQ Mar 22 '20 at 03:37
  • 1
    @SasQ: For X11: [GLX_ARB_create_context](https://www.khronos.org/registry/OpenGL/extensions/ARB/GLX_ARB_create_context.txt). For macOS: usually [NSOpenGLContext](https://developer.apple.com/documentation/appkit/nsopenglcontext). For Windows: [WGL_ARB_create_context](https://www.khronos.org/registry/OpenGL/extensions/ARB/WGL_ARB_create_context.txt). Note the bootstrapping problem: you need a context in order to get function pointers for extensions. The way to solve this is by creating a dummy context first, getting the pointers, and using those to create a new context. – Dietrich Epp Mar 22 '20 at 16:58
  • If you think this whole process is a bit crazy, well, that’s one of the big things that was fixed in OpenGL ES and in Vulkan. – Dietrich Epp Mar 22 '20 at 16:59