我对 CoreMl 很陌生,我想从今年在 WWDC 上发布的 Coreml 模型部署中检索模型。
我制作了一个应用程序,仅对特殊和稀有事物进行分类,并将该 model.archive 上传到 CoreMl 模型部署仪表板。
我成功部署了模型并将其显示为活动状态。
现在的问题是我无法检索那个模型,我已经尝试了很多,我什至看到了那个模型上的所有 WWDC 会话,甚至从那个会话中复制了那个代码,但都是徒劳的
这是我的整个模型加载和检索代码
我的 Vision 请求代码
func coremlmodel(using: VNCoreMLModel) -> VNCoreMLRequest {
let request = VNCoreMLRequest(model: using, completionHandler: { [weak self] request, error in
self?.processClassifications(for: request, error: error)
})
request.imageCropAndScaleOption = .centerCrop
return request
}
我的分类 cade 将拍摄图像
func updateClassifications(for image: UIImage) {
classificationLabel.text = "Classifying..."
var models = try? VNCoreMLModel(for: SqueezeNet().model)
if let modelsss = models {
extensionofhandler(ciimage: image, vnmodel: modelsss)
return
}
_ = MLModelCollection.beginAccessing(identifier: "TestingResnetModel") { [self] result in
var modelUrl: URL?
switch result {
case .success(let collection):
modelUrl = collection.entries["class"]?.modelURL
case .failure(let error):
fatalError("sorry \(error)")
}
let result = loadfishcallisier(from: modelUrl)
switch result {
case .success(let modelesss):
models = try? VNCoreMLModel(for: modelesss)
extensionofhandler(ciimage: image, vnmodel: models!)
case .failure(let error):
fatalError("plz \(error)")
}
}
}
func loadfishcallisier(from modelUrl: URL?) -> Result<MLModel,Error> {
if let modelUrl = modelUrl {
return Result { try MLModel(contentsOf: modelUrl)}
} else {
return Result { try MLModel(contentsOf: modelUrl!, configuration: .init())}
}
}
func extensionofhandler(ciimage: UIImage,vnmodel: VNCoreMLModel) {
let orientation = CGImagePropertyOrientation(ciimage.imageOrientation)
guard let ciImage = CIImage(image: ciimage) else { fatalError("Unable to create \(CIImage.self) from \(ciimage).")
}
DispatchQueue.global(qos: .userInitiated).async { [self] in
let handler = VNImageRequestHandler(ciImage: ciImage, orientation: orientation)
do {
try handler.perform([coremlmodel(using: vnmodel)])
} catch {
fatalError("Check the error")
}
}
}
我的分类代码
func processClassifications(for request: VNRequest, error: Error?) {
DispatchQueue.main.async {
guard let results = request.results else {
self.classificationLabel.text = "Unable to classify image.\n\(error!.localizedDescription)"
return
}
// The `results` will always be `VNClassificationObservation`s, as specified by the Core ML model in this project.
let classifications = results as! [VNClassificationObservation]
if classifications.isEmpty {
self.classificationLabel.text = "Nothing recognized."
} else {
// Display top classifications ranked by confidence in the UI.
let topClassifications = classifications.prefix(2)
let descriptions = topClassifications.map { classification in
// Formats the classification for display; e.g. "(0.37) cliff, drop, drop-off".
return String(format: " (%.2f) %@", classification.confidence, classification.identifier)
}
self.classificationLabel.text = "Classification:\n" + descriptions.joined(separator: "\n")
}
}
}
Xcode 没有抛出任何错误,但它没有识别任何东西。
如果我在我的代码中做错了什么,我谦卑地请你给我看并解决它
是否有从 coreml 模型部署中检索模型的教程。