-1

我正在创建和存储 OpenGL ES1 3D 模型,并希望将用作纹理的图像文件包含在与 3D 模型数据相同的文件中。我无法以可用格式加载图像数据。我正在使用 UIImageJPEGRepresentation 转换图像数据并将其存储到 NSData 中。然后我将它与所有 3D 数据一起附加到 NSMutableData 对象,并将其写入文件。数据似乎可以毫无错误地写入和读取,但在尝试使用图像数据创建“CGImageRef”时遇到问题,我用它来为 3D 模型生成纹理数据。从文件加载后,图像数据似乎处于无法识别的格式,因为它会生成错误“CG​​ContextDrawImage:无效上下文 0x0”。当我尝试创建“CGImageRef”时。我怀疑图像数据以某种方式开始未对齐,导致它在尝试创建“CGImageRef”时被拒绝。我很感激任何帮助。在这一点上我很难过。所有的数据大小和偏移量加起来看起来还不错。保存和加载不会出错。图像数据似乎有点不对劲,但我不知道为什么。

这是我的代码:

//======================================================
- (BOOL)save3DFile: (NSString *)filePath {


  // load TEST IMAGE into UIIMAGE
  UIImage *image = [UIImage imageNamed:@“testImage.jpg"];

  // convert image to JPEG encoded NSDATA
  NSData *imageData = UIImageJPEGRepresentation(image,1.0);

  // Save length of imageData to global "imDataLen" to use later in “load3DFile”
  imDataLen = [imageData length];

  // TEST: this works fine for CGImageRef creation in “loadTexture” 
  // traceView.image=[UIImage imageWithData:[imageData subdataWithRange:NSMakeRange(0, imageDataLen)]];  
  // [self loadTexture];

  // TEST: this also works fine for CGImageRef creation in “loadTexture” 
  // traceView.image=[UIImage imageWithData:txImData];
  // [self loadTexture];

  fvoh.fileVersion  = FVO_VERSION;
  fvoh.obVertDatLen = obVertDatLen;
  fvoh.obFaceDatLen = obFaceDatLen;
  fvoh.obNormDatLen = obNormDatLen;
  fvoh.obTextDatLen = obTextDatLen;
  fvoh.obCompCount  = obCompCount;
  fvoh.obVertCount  = obVertCount;
  fvoh.obElemCount  = obElemCount;
  fvoh.obElemSize   = obElemSize;
  fvoh.obElemType   = obElemType;

  NSMutableData *obSvData;
  obSvData=[NSMutableData dataWithBytes:&fvoh length:(sizeof(fvoh))];
  [obSvData appendBytes:obElem   length:obFaceDatLen];
  [obSvData appendBytes:mvElem   length:obVertDatLen];
  [obSvData appendBytes:mvNorm   length:obNormDatLen];
  [obSvData appendBytes:obText   length:obTextDatLen];
  [obSvData appendBytes:&ds      length:(sizeof(ds))];

  // next, we append image data, and write all data to a file
  // seems to work fine, no errors, at this point
  [obSvData appendBytes: imageData length:[imageData length]];  

  BOOL success=[obSvData writeToFile: filePath atomically:YES];
  return success; 
}
//======================================================
- (void) load3DFile:(NSString *)filePath {

  NSData *fvoData;
  NSUInteger offSet,fiLen,fhLen,dsLen;
  [[FileList sharedFileList] setCurrFile:(NSString *)filePath];

  fvoData=[NSData dataWithContentsOfFile:filePath];
  fiLen=[fvoData length];
  fhLen=sizeof(fvoh);
  dsLen=sizeof(ds);

  memcpy(&fvoh,[fvoData bytes],fhLen);offSet=fhLen;

  //+++++++++++++++++++++++++++++++
  obVertDatLen = fvoh.obVertDatLen;
  obFaceDatLen = fvoh.obFaceDatLen;
  obNormDatLen = fvoh.obNormDatLen;
  obTextDatLen = fvoh.obTextDatLen;
  obCompCount  = fvoh.obCompCount;
  obVertCount  = fvoh.obVertCount;
  obElemCount  = fvoh.obElemCount;
  obElemSize   = fvoh.obElemSize;
  obElemType   = fvoh.obElemType;
  //+++++++++++++++++++++++++++++++

  memcpy(obElem, [fvoData bytes]+offSet,obFaceDatLen);offSet+=obFaceDatLen;
  memcpy(mvElem, [fvoData bytes]+offSet,obVertDatLen);offSet+=obVertDatLen;
  memcpy(mvNorm, [fvoData bytes]+offSet,obNormDatLen);offSet+=obNormDatLen;
  memcpy(obText, [fvoData bytes]+offSet,obTextDatLen);offSet+=obTextDatLen;
  memcpy(&ds,    [fvoData bytes]+offSet,dsLen);offSet+=dsLen;

  // the following seem to read the data into “imageData” just fine, no errors
  // NSData *imageData = [fvoData subdataWithRange:NSMakeRange(offSet, imDataLen)];
  // NSData *imageData = [fvoData subdataWithRange:NSMakeRange((fiLen-imDataLen), imDataLen)];
  // NSData *imageData = [NSData dataWithBytes:[fvoData bytes]+offSet length: imDataLen];
  NSData *imageData = [NSData dataWithBytes:[fvoData bytes]+(fiLen-imDataLen) length: imDataLen];

  // but the contents of imageData seem to end up in an unexpected format, causing error: 
  // “CGContextDrawImage: invalid context 0x0.” during CGImageRef creation in “loadTexture”

  traceView.image=[UIImage imageWithData:imageData];
  [self loadTexture];
}
//======================================================
- (void)loadTexture {

  CGImageRef image=[traceView.image].CGImage;
  CGContextRef texContext;GLubyte* bytes=nil;GLsizei width,height;

  if(image){
      width=(GLsizei)CGImageGetWidth(image);
      height=(GLsizei)CGImageGetHeight(image);
      bytes=(GLubyte*) calloc(width*height*4,sizeof(GLubyte));
      texContext=CGBitmapContextCreate(bytes,width,height,8,width*4,CGImageGetColorSpace(image),
      kCGImageAlphaPremultipliedLast);
      CGContextDrawImage(texContext,CGRectMake(0.0,0.0,(CGFloat)width,(CGFloat)height),image);
      CGContextRelease(texContext);
  }

  if(bytes){
      glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
      glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
      glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_WRAP_S,GL_CLAMP_TO_EDGE);
      glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_WRAP_T,GL_CLAMP_TO_EDGE);
      glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,width,height,0,GL_RGBA,GL_UNSIGNED_BYTE,bytes);
      free(bytes);
  }
}
//======================================================
4

1 回答 1

0

我没有收到这个问题的任何答案。我终于自己偶然发现了答案。当我执行 save3DFile 代码时,不是将图像数据添加到 NSMutableData *obSvData,而是使用“appendBytes”,如下所示:

[obSvData appendBytes: imageData length:[imageData length]];

我改为使用“appendData”,如下所示:

[obSvData appendData: imageData];

其中 imageData 之前填充了 UIImage 的内容,并在过程中转换为 JPEG 格式,如下所示:

NSData *imageData = UIImageJPEGRepresentation(image,1.0);

有关上下文,请参阅上面的完整代码清单。无论如何,使用 'appendData' 而不是 'appendBytes' 产生了很大的不同,并允许我将图像数据与所有其他 3D 模型数据(顶点、索引、法线等)一起存储在同一个文件中,重新加载所有该数据没有问题,并成功地从单个文件创建带有纹理的 3D 模型。

于 2018-12-18T22:24:02.100 回答