0

I am trying to take an array of custom structs and put it onto the GPU to perform some operation on, before returning the result back to the CPU. For this example I'm using a statically sized array, but later on I won't know the size of the array until runtime, making things very annoying, hence the use of buffers here.

It is my understanding that the way to do this is via SSBO's in OpenGL. Upon doing a fair bit of research into the matter, I have something that seems to compile and run, but I cannot for the life of me figure out why it's not changing any of the values when I call the GPU. It SHOULD be giving me back 1.61 as a result (7*0.23), but instead it's just giving me back the 0.23 that I started with.

Here is the following code I'm trying to get running: main.cpp

#include <iostream>
#include "functions.h"

int main() {
  std::cout << "Initializing array" << std::endl;
  ssbo_data testArr[512];

  //Put this part on the GPU. This is effectively what I want my code to be doing
  //for (int i = 0; i < sizeof(testArr)/sizeof(testArr[0]); i++) {
  //  testArr[i].y *= testArr[i].x;
  //}
  
  //Initialize the shader
  std::cout << "Initializing Shader" << std::endl;
  GLuint computeHandle = InitializeShader("compute.shader");
  glUseProgram(computeHandle);
  
  //Apply the SSBO
  std::cout << "Applying Shader SSBO" << std::endl;
  GLuint ssbo = 0;
  glGenBuffers(1, &ssbo);
  glBindBuffer(GL_SHADER_STORAGE_BUFFER, ssbo);
  glBufferData(GL_SHADER_STORAGE_BUFFER, sizeof(testArr), &testArr, GL_STATIC_DRAW);
  glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, ssbo);
  
  //Run the shader program
  std::cout << "Running Shader Program" << std::endl;
  glDispatchCompute(512,1,1);
  glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT);
  glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, 0);
  
  //Get the data back from the GPU
  glBindBuffer(GL_SHADER_STORAGE_BUFFER, ssbo);
  ssbo_data* ptr = (ssbo_data*)glMapBuffer(GL_SHADER_STORAGE_BUFFER, GL_READ_WRITE);
  for (int i = 0; i < 512; i++) {
    testArr[i] = ptr[i];
  }
  glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
  
  std::cout << testArr[2].y << std::endl;
  std::cout << "Done" << std::endl;
  return 1;
}

functions.h

#ifndef functionsList
#define functionsList
#define GLFW_INCLUDE_NONE
#include <GLFW/glfw3.h>
#include <GL/glew.h>
#include <GL/glut.h>
#include <GL/gl.h>

#include <fstream>
#include <string.h>
#include <iostream>
#include <vector>
#endif

struct ssbo_data {
  int x = 7;
  float y = 0.23;
};

std::string GetShaderCode(std::string shaderPath);
static void error_callback(int error, const char* description);
void StartWindow();
GLuint CompileShader(const char* computeShaderSource);
GLuint StartShaderProgram(GLuint computeShader);
GLuint InitializeShader(std::string shaderPath);

OpenGL_Interface.cpp

#include "functions.h"

std::string GetShaderCode(std::string shaderPath) {
    // Get the compute shader from disk
  std::ifstream myfile;
  myfile.open(shaderPath);
  std::string computeShaderSourceString;
  if (myfile.is_open()) {
      while (myfile) {
          computeShaderSourceString += myfile.get();
      }
  }
  myfile.close();
  return computeShaderSourceString;
}

static void error_callback(int error, const char* description) {fprintf(stderr, "Error: %s\n", description);}
void StartWindow() {
    GLFWwindow* window;
    glfwSetErrorCallback(error_callback);
    if (!glfwInit()) {
        std::cout << "glfwInit() failed to start" << std::endl;
        exit(EXIT_FAILURE);
    }
    
    glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
    glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
    
    window = glfwCreateWindow(640,480, "Simple Example", NULL, NULL);
    if (!window) {
        glfwTerminate();
        std::cout << "glfwCreateWindow() failed to start" << std::endl;
        exit(EXIT_FAILURE);
    }
    
    glfwMakeContextCurrent(window);
  
  //glewExperimental=true;
  GLenum err = glewInit();
  if (err != GLEW_OK) {
    std::cout << "glewInit() failed to start" << std::endl;
    exit(EXIT_FAILURE);
  }
  if (!GLEW_VERSION_2_1) {
    std::cout << "GLEW version does not support 2.1" << std::endl;
    exit(EXIT_FAILURE);
  }
}

GLuint CompileShader(const char* computeShaderSource) {
    GLuint computeShader = glCreateShader(GL_COMPUTE_SHADER);
  glShaderSource(computeShader, 1, &computeShaderSource, NULL);
  glCompileShader(computeShader);
  return computeShader;
}

GLuint StartShaderProgram(GLuint computeShader) {
    GLuint computeProgram = glCreateProgram();
  glAttachShader(computeProgram, computeShader);
  glLinkProgram(computeProgram);
  return computeProgram;
}

GLuint InitializeShader(std::string shaderPath) {
    const char* computeShaderSource = GetShaderCode(shaderPath).c_str();
  StartWindow();
  
  GLuint computeHandle = CompileShader(computeShaderSource);
  StartShaderProgram(computeHandle);
  return computeHandle;
}

compute.shader

#version 430 core

struct exampleData{
    int x;
  float y;
};

layout(binding = 0, std430) buffer exampleDataBuff {
  exampleData ED[];
};

void main() {
  ED[gl_GlobalInvocationID.x].y *= ED[gl_GlobalInvocationID.x].x;
}

And finally I am compiling this on Ubuntu using the following:

g++ *.cpp -lpthread -lglut -lGLU -lGL -lGLEW -lglfw -std=c++17 -fconcepts -o OpenGLTest.out

What am I doing wrong?

genpfault
  • 51,148
  • 11
  • 85
  • 139
Andrey
  • 35
  • 6
  • Are you sure this doesn't give any errors? You are asking for a OpenGL 2.0 context, but compute shaders were only introduced in 4.3. And is there any particular reason why you only read 500 values instead of 512? – BDL Mar 13 '23 at 12:31
  • Weirdly not that I can see. It compiles fine, it runs fine, but gives me the wrong value out as if the GPU never ran any code. It is very strange. As for the 500 vs 512, that was me forgetting to edit that, I'll change it in the question now – Andrey Mar 13 '23 at 12:40
  • You bind the SSBO to index 0, but your shader specifies that the buffer is at index 1. About the version: Depending on your OS, you might get a context with a higher version. But if you run it on a machine that doesn't support OpenGL 4.3, you will get runtime errors when trying to access any method that isn't available. So if you need 4.3, you should also request that context version. – BDL Mar 13 '23 at 12:40
  • Despite changing the index and fixing the OpenGL context (Edited in the question), it still is weirdly doing the same thing. It's almost as if my GPU is not actually running my compiled code at all, despite everything executing fine as far as I can see. I set the data on the GPU, I tell the GPU to run it's code, then I request the GPU return it's output, and the GPU just returns what I gave it as if my OpenGL code did nothing. – Andrey Mar 14 '23 at 13:09

0 Answers0