日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 编程资源 > 编程问答 >内容正文

编程问答

opengl模型加载

發布時間:2023/12/18 编程问答 25 豆豆
生活随笔 收集整理的這篇文章主要介紹了 opengl模型加载 小編覺得挺不錯的,現在分享給大家,幫大家做個參考.

環境配置以及視角修改和著色器亮度修改

首先注意我的文件生成目錄為bin目錄所以要用到的dll文件必須放到bin目錄文件夾下面,而不是debug下面

  • 調整視角位置更高一點
public:Camera() {fov = 45;Position = glm::vec3(0.0f, 7.0f, 20.0f);UP = glm::vec3(0.0f, 1.0f, 0.0f);Front = glm::vec3(0.0f, 0.0f, -1.0f); //移動的距離在旋轉和wasd移動時都要用到,注意這里用的是-1}
  • 調整的亮一點,取消光源衰減
result = (light.ambient*material.ambient+diffuse*light.diffuse*material.diffuse+specular*light.specular*material.specular)*lightColor;

而不是

result = (light.ambient*material.ambient+diffuse*light.diffuse*material.diffuse+specular*light.specular*material.specular)*lightColor*attenuation;

model函數

  • 注意構造函數里邊的參數應該是:Model(const char* path),因為你輸入的是一個不變的量
  • model函數實際上構建了一個又一個的mesh,所以在輸出的時候是用的還是mesh.Draw
  • 注意mesh向量數組的構建
  • 本身就有處理obj的紋理的能力
  • #pragma once #ifndef MODEL_H #define MODEL_H#include <glad/glad.h> #include <glm/glm.hpp> #include <glm/gtc/matrix_transform.hpp> #include "stb_image.h" #include <assimp/Importer.hpp> #include <assimp/scene.h> #include <assimp/postprocess.h>#include "mesh.h" #include "Shader.h"#include <string> #include <fstream> #include <sstream> #include <iostream> #include <map> #include <vector> using namespace std; class Model { public:/* 函數 */Model(const char* path){loadModel(path); //調用模型路徑加載函數}//渲染函數void Draw(Shader shader){for (unsigned int i = 0; i < meshes.size(); i++)meshes[i].Draw(shader); //調用mesh的draw函數 } private:/* 模型數據 */vector<Mesh> meshes;string directory;/* 加載模型函數 */void loadModel(string path){// 讀取文件 via ASSIMPAssimp::Importer importer;const aiScene* scene = importer.ReadFile(path, aiProcess_Triangulate | aiProcess_FlipUVs); //用三角形|翻轉Y軸|還有一個切線空間暫時用不到// check for errorsif (!scene || scene->mFlags & AI_SCENE_FLAGS_INCOMPLETE || !scene->mRootNode) // if is Not Zero{cout << "ERROR::ASSIMP:: " << importer.GetErrorString() << endl;return;}// retrieve the directory path of the filepathdirectory = path.substr(0, path.find_last_of('/'));// process ASSIMP's root node recursivelyprocessNode(scene->mRootNode, scene); //通過路徑尋找并且賦值}//遞歸去獲取這些網格索引,獲取每個網格,處理每個網格,接著對每個節點的子節點重復這一過程。//結構都放進meshes里邊void processNode(aiNode* node, const aiScene* scene){// 處理節點所有的網格(如果有的話)for (unsigned int i = 0; i < node->mNumMeshes; i++){aiMesh* mesh = scene->mMeshes[node->mMeshes[i]];meshes.push_back(processMesh(mesh, scene)); //函數將一個新的元素加到vector的最后面,位置為當前最后一個元素的下一個元素}// 接下來對它的子節點重復這一過程for (unsigned int i = 0; i < node->mNumChildren; i++){processNode(node->mChildren[i], scene);}}//將構造mesh函數放到model類里邊Mesh processMesh(aiMesh* mesh, const aiScene* scene){vector<Vertex> vertices;vector<unsigned int> indices;vector<Texture> textures;for (unsigned int i = 0; i < mesh->mNumVertices; i++){Vertex vertex;// 處理頂點位置、法線和紋理坐標glm::vec3 vector; // we declare a placeholder vector since assimp uses its own vector class that doesn't directly convert to glm's vec3 class so we transfer the data to this placeholder glm::vec3 first.// positionsvector.x = mesh->mVertices[i].x;vector.y = mesh->mVertices[i].y;vector.z = mesh->mVertices[i].z;vertex.Position = vector;// normalsvector.x = mesh->mNormals[i].x;vector.y = mesh->mNormals[i].y;vector.z = mesh->mNormals[i].z;vertex.Normal = vector;// texture coordinatesif (mesh->mTextureCoords[0]) // does the mesh contain texture coordinates?{glm::vec2 vec;// a vertex can contain up to 8 different texture coordinates. We thus make the assumption that we won't // use models where a vertex can have multiple texture coordinates so we always take the first set (0).vec.x = mesh->mTextureCoords[0][i].x;vec.y = mesh->mTextureCoords[0][i].y;vertex.TexCoords = vec;}elsevertex.TexCoords = glm::vec2(0.0f, 0.0f); //沒有紋理的情況vertices.push_back(vertex);}// 處理索引for (unsigned int i = 0; i < mesh->mNumFaces; i++){aiFace face = mesh->mFaces[i];// retrieve all indices of the face and store them in the indices vectorfor (unsigned int j = 0; j < face.mNumIndices; j++)indices.push_back(face.mIndices[j]);}//處理材質//把所有的材質按照類型分類放到向量里邊if (mesh->mMaterialIndex >= 0){//注意這里是給材質對象賦值aiMaterial* material = scene->mMaterials[mesh->mMaterialIndex];//分別構造兩個紋理數組存放漫反射和鏡面反射紋理vector<Texture> diffuseMaps = loadMaterialTextures(material,aiTextureType_DIFFUSE, "texture_diffuse");textures.insert(textures.end(), diffuseMaps.begin(), diffuseMaps.end()); //填充vector<Texture> specularMaps = loadMaterialTextures(material,aiTextureType_SPECULAR, "texture_specular");textures.insert(textures.end(), specularMaps.begin(), specularMaps.end());}return Mesh(vertices, indices, textures);}//加載與存儲紋理vector<Texture> loadMaterialTextures(aiMaterial* mat, aiTextureType type,string typeName){vector<Texture> textures;for (unsigned int i = 0; i < mat->GetTextureCount(type); i++){aiString str;mat->GetTexture(type, i, &str);Texture texture;texture.id = TextureFromFile(str.C_Str(), directory);texture.type = typeName;texture.path = str.C_Str();textures.push_back(texture);}return textures;}//讀取紋理文件unsigned int TextureFromFile(const char* path, const string& directory){string filename = string(path);filename = directory + '/' + filename;unsigned int textureID;glGenTextures(1, &textureID);int width, height, nrComponents;unsigned char* data = stbi_load(filename.c_str(), &width, &height, &nrComponents, 0);if (data){GLenum format;if (nrComponents == 1)format = GL_RED;else if (nrComponents == 3)format = GL_RGB;else if (nrComponents == 4)format = GL_RGBA;glBindTexture(GL_TEXTURE_2D, textureID);glTexImage2D(GL_TEXTURE_2D, 0, format, width, height, 0, format, GL_UNSIGNED_BYTE, data);glGenerateMipmap(GL_TEXTURE_2D);glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);stbi_image_free(data);}else{std::cout << "Texture failed to load at path: " << path << std::endl;stbi_image_free(data);}return textureID; //返回textureID} }; #endif

    主函數修改

  • 添加了model的構造函數
  • 旋轉修改模型改為繞著一個Y軸旋轉
  • 我自己定義了光源,所以并沒有刪除mesh函數的構造
  • int main() {//初始化GLFWwindow* window = init(); //創建一個窗口指針,因為里邊是一個空指針所有init函數必須改變類型//著色 Shader lightShader("lamp.vert", "lamp.frag");Shader cubeShader("cube.vert", "cube.frag"); //使用封裝shader類glm::mat4 trans = glm::mat4(1.0f); //單位矩陣glm::mat4 model = glm::mat4(1.0f); //模型矩陣:用于物體坐標轉化為世界坐標glm::mat4 view = glm::mat4(1.0f);glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED); //防止窗口對鼠標進行攔截//渲染引擎glEnable(GL_DEPTH_TEST); //打開深度緩存//生成mesh:光照用Mesh mesh = processCubeMesh();//使用model類生成model對象Model ourModel("D:/openGLResource/bin/nanosuit/nanosuit.obj");while (!glfwWindowShouldClose(window)) { //當需要退出時候退出//因為每一個循環的進入時間不一樣,確保推進的距離一樣,要用到時間差currentFrame = glfwGetTime();deltaTime = currentFrame - lastFrame;lastFrame = currentFrame;processInput(window); //每個周期都調用鍵位函數//動態變化攝像機位置float radius = 10.0f; //半徑float camX = sin(glfwGetTime()) * radius;float camZ = cos(glfwGetTime()) * radius;//設置顏色值和透明度,需要鏈接opengl32庫才行glClearColor(0.2f, 0.3f, 0.3f, 0.1f); //背景glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); //清理顏色或者深度緩存//繪制立方體trans = glm::mat4(1.0f);model = glm::translate(trans, cubePositions[0]); //只變換位置用來區分不同的立方體model = glm::rotate(model, (float)glfwGetTime(), glm::vec3(0.0f, 3.0f, 0.0f)); //角度(逆向),繞view = camera.GetViewMatrix();glm::mat4 projection; //投影projection = glm::perspective(glm::radians(camera.fov), 800.0f / 600.0f, 0.1f, 100.0f); //透視投影:FOV,屏幕長寬比,近,遠。cubeShader.useShader();//傳值給傳送多個矩陣讓其發生位置變化glUniformMatrix4fv(glGetUniformLocation(cubeShader.ID, "projection"), 1, GL_FALSE, glm::value_ptr(projection));glUniformMatrix4fv(glGetUniformLocation(cubeShader.ID, "view"), 1, GL_FALSE, glm::value_ptr(view));glUniformMatrix4fv(glGetUniformLocation(cubeShader.ID, "model"), 1, GL_FALSE, glm::value_ptr(model));glUniform4f(glGetUniformLocation(cubeShader.ID, "lightPos"), cubePositions[1].r, cubePositions[1].g, cubePositions[1].b, 1); //注意lightposion的三個值,現在是齊次坐標glUniform3f(glGetUniformLocation(cubeShader.ID, "viewPos"), camera.Position.r, camera.Position.g, camera.Position.b); //觀察的方向glUniform3f(glGetUniformLocation(cubeShader.ID, "front"), camera.Front.r, camera.Front.g, camera.Front.b); //相機朝向的方向glUniform3f(glGetUniformLocation(cubeShader.ID, "objectColor"), 1.0f, 0.5f, 0.31f);glUniform3f(glGetUniformLocation(cubeShader.ID, "lightColor"), 1.0f, 1.0f, 1.0f);//繪制mesh//mesh.Draw(cubeShader);ourModel.Draw(cubeShader);//綁定第二個立方體model = glm::translate(trans, cubePositions[2]);glUniformMatrix4fv(glGetUniformLocation(cubeShader.ID, "model"), 1, GL_FALSE, glm::value_ptr(model));//mesh.Draw(cubeShader);//畫第二個立方體//ourModel.Draw(cubeShader);//繪制光源trans = glm::mat4(1.0f);model = glm::translate(trans, cubePositions[1]);model = glm::scale(model, glm::vec3(0.2f, 0.2f, 0.2f));trans = projection * view * model;lightShader.useShader(); //注意這是新的useShader了glUniform3f(glGetUniformLocation(lightShader.ID, "lightColor"), 1.0f, 1.0f, 1.0f);glUniformMatrix4fv(glGetUniformLocation(lightShader.ID, "transform"), 1/*個矩陣*/, GL_FALSE,glm::value_ptr(trans)); //設置偏移矩陣mesh.Draw(lightShader);//ourModel.Draw(lightShader);glfwSwapBuffers(window);glfwPollEvents(); //立即處理已經到位的事件,如果沒有這個就會一直渲染而不觸發事件 }//退出glfwTerminate();return 0; }

    結果

    • 只有紋理的漫反射而沒有什么衰減等因素的影響

    • 只取消光衰減之后的樣子,注意漫反射以及鏡面反射

    總結

    以上是生活随笔為你收集整理的opengl模型加载的全部內容,希望文章能夠幫你解決所遇到的問題。

    如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。