๐ง Core ML ์์ ์ ๋ณต
์จ๋๋ฐ์ด์ค ๋จธ์ ๋ฌ๋์ผ๋ก ์ด๋ฏธ์ง ๋ถ๋ฅ, ๊ฐ์ฒด ํ์ง, ํ ์คํธ ๋ถ์๊น์ง! ๋ชจ๋ ์ฒ๋ฆฌ๊ฐ ๊ธฐ๊ธฐ ๋ด์์.
โจ Core ML์ด๋?
Core ML์ Apple์ ๋จธ์ ๋ฌ๋ ํ๋ ์์ํฌ์ ๋๋ค. TensorFlow, PyTorch ๋ชจ๋ธ์ ๋ณํํ์ฌ iPhone์์ ์คํํ ์ ์์ต๋๋ค. ๋ชจ๋ ์ฒ๋ฆฌ๊ฐ ๊ธฐ๊ธฐ ๋ด์์ ์ด๋ฃจ์ด์ ธ ํ๋ผ์ด๋ฒ์๊ฐ ๋ณด์ฅ๋ฉ๋๋ค.
๐ฆ ๋ชจ๋ธ ์ถ๊ฐํ๊ธฐ
๋ชจ๋ธ ๊ฐ์ ธ์ค๊ธฐ
1. Core ML ๋ชจ๋ธ ๋ค์ด๋ก๋
- Apple ์ ๊ณต ๋ชจ๋ธ: https://developer.apple.com/machine-learning/models/
- MobileNet, ResNet, SqueezeNet ๋ฑ
2. Xcode ํ๋ก์ ํธ์ .mlmodel ํ์ผ ๋๋๊ทธ
3. ์๋์ผ๋ก Swift ์ฝ๋ ์์ฑ๋จ
- MyModel.swift (์๋ ์์ฑ, ์์ ๊ธ์ง)๐ผ๏ธ ์ด๋ฏธ์ง ๋ถ๋ฅ (Image Classification)
ImageClassifier.swift
import CoreML import Vision import UIKit class ImageClassifier { // MobileNet ๋ชจ๋ธ ๋ก๋ (Apple ์ ๊ณต) func classifyImage(_ image: UIImage) async throws -> String { // 1. ๋ชจ๋ธ ๋ก๋ let model = try VNCoreMLModel(for: MobileNet().model) // 2. ์์ฒญ ์์ฑ let request = VNCoreMLRequest(model: model) // 3. ์ด๋ฏธ์ง ์ฒ๋ฆฌ guard let cgImage = image.cgImage else { throw NSError(domain: "Image Error", code: 0) } let handler = VNImageRequestHandler(cgImage: cgImage) try handler.perform([request]) // 4. ๊ฒฐ๊ณผ ์ฒ๋ฆฌ guard let results = request.results as? [VNClassificationObservation], let topResult = results.first else { return "๋ถ๋ฅ ์คํจ" } return "\(topResult.identifier) (\(Int(topResult.confidence * 100))%)" } } // ์ฌ์ฉ ์์ let classifier = ImageClassifier() let image = UIImage(named: "dog.jpg")! let result = try await classifier.classifyImage(image) print(result) // "golden retriever (92%)"
๐ฑ SwiftUI ํตํฉ
ClassifierView.swift
import SwiftUI import PhotosUI struct ImageClassifierView: View { @State private var selectedItem: PhotosPickerItem? @State private var selectedImage: UIImage? @State private var result: String = "" @State private var isProcessing = false let classifier = ImageClassifier() var body: some View { VStack(spacing: 20) { if let image = selectedImage { Image(uiImage: image) .resizable() .scaledToFit() .frame(height: 300) .cornerRadius(12) } if isProcessing { ProgressView("๋ถ์ ์ค...") } else if !result.isEmpty { Text(result) .font(.title2) .bold() } PhotosPicker(selection: $selectedItem, matching: .images) { Label("์ฌ์ง ์ ํ", systemImage: "photo") } .buttonStyle(.borderedProminent) } .padding() .onChange(of: selectedItem) { Task { if let data = try? await selectedItem?.loadTransferable(type: Data.self), let image = UIImage(data: data) { selectedImage = image await classify(image) } } } } func classify(_ image: UIImage) async { isProcessing = true defer { isProcessing = false } result = (try? await classifier.classifyImage(image)) ?? "์ค๋ฅ" } }
๐ฏ ๊ฐ์ฒด ํ์ง (Object Detection)
ObjectDetector.swift
import Vision func detectObjects(_ image: UIImage) async throws -> [VNRecognizedObjectObservation] { // YOLO ๋ชจ๋ธ ์ฌ์ฉ (ํ๋ก์ ํธ์ ์ถ๊ฐ ํ์) let model = try VNCoreMLModel(for: YOLOv3().model) let request = VNCoreMLRequest(model: model) guard let cgImage = image.cgImage else { throw NSError(domain: "Image Error", code: 0) } let handler = VNImageRequestHandler(cgImage: cgImage) try handler.perform([request]) guard let results = request.results as? [VNRecognizedObjectObservation] else { return [] } return results } // ๊ฒฐ๊ณผ ๊ทธ๋ฆฌ๊ธฐ func drawBoxes(on image: UIImage, results: [VNRecognizedObjectObservation]) -> UIImage { let renderer = UIGraphicsImageRenderer(size: image.size) return renderer.image { context in image.draw(at: .zero) UIColor.red.setStroke() context.cgContext.setLineWidth(3) for observation in results { // Vision ์ขํ๊ณ ๋ณํ (์ผ์ชฝ ํ๋จ = 0,0) let boundingBox = observation.boundingBox let rect = CGRect( x: boundingBox.origin.x * image.size.width, y: (1 - boundingBox.origin.y - boundingBox.height) * image.size.height, width: boundingBox.width * image.size.width, height: boundingBox.height * image.size.height ) context.cgContext.stroke(rect) // ๋ผ๋ฒจ ํ์ if let label = observation.labels.first { let text = "\(label.identifier) \(Int(label.confidence * 100))%" let attributes: [NSAttributedString.Key: Any] = [ .font: UIFont.boldSystemFont(ofSize: 16), .foregroundColor: UIColor.red ] text.draw(at: rect.origin, withAttributes: attributes) } } } }
๐ ํ ์คํธ ๋ถ์ (Sentiment Analysis)
SentimentAnalysis.swift
import NaturalLanguage func analyzeSentiment(_ text: String) -> String { let tagger = NLTagger(tagSchemes: [.sentimentScore]) tagger.string = text let (sentiment, _) = tagger.tag( at: text.startIndex, unit: .paragraph, scheme: .sentimentScore ) guard let sentimentValue = sentiment, let score = Double(sentimentValue.rawValue) else { return "์ค๋ฆฝ" } if score > 0.3 { return "๐ ๊ธ์ ์ (\(Int(score * 100))%)" } else if score < -0.3 { return "๐ข ๋ถ์ ์ (\(Int(abs(score) * 100))%)" } else { return "๐ ์ค๋ฆฝ์ " } } // ์ฌ์ฉ ์์ let text1 = "์ด ์ํ ์ ๋ง ์ฌ๋ฏธ์์์ด์!" print(analyzeSentiment(text1)) // "๐ ๊ธ์ ์ " let text2 = "์ต์ ์ ์๋น์ค์์ต๋๋ค." print(analyzeSentiment(text2)) // "๐ข ๋ถ์ ์ "
๐ค ์์ฑ ์ธ์ ํตํฉ
SpeechToML.swift
import Speech func transcribeAndClassify(audioURL: URL) async throws -> (String, String) { // 1. ์์ฑ ์ธ์ let recognizer = SFSpeechRecognizer(locale: Locale(identifier: "ko-KR"))! let request = SFSpeechURLRecognitionRequest(url: audioURL) let result = try await recognizer.recognitionTask(with: request).bestTranscription let text = result.formattedString // 2. ๊ฐ์ ๋ถ์ let sentiment = analyzeSentiment(text) return (text, sentiment) }
๐ ๋ชจ๋ธ ์ ๋ฐ์ดํธ (On-Device Training)
OnDeviceTraining.swift
import CoreML import CreateML // Updatable ๋ชจ๋ธ๋ง ๊ฐ๋ฅ (Create ML๋ก ์์ฑ ์ ํ์ฑํ) func updateModel(with newData: [MLDataTable]) throws { // 1. ๊ธฐ์กด ๋ชจ๋ธ ๋ก๋ let modelURL = Bundle.main.url(forResource: "MyModel", withExtension: "mlmodelc")! let model = try MLModel(contentsOf: modelURL) // 2. ์ ๋ฐ์ดํธ ์ค์ let configuration = MLModelConfiguration() configuration.computeUnits = .all // 3. ํ์ต (์จ๋๋ฐ์ด์ค) let updateTask = try MLUpdateTask( forModelAt: modelURL, trainingData: newData, configuration: configuration ) updateTask.resume() // 4. ์๋ฃ ํธ๋ค๋ฌ // updateTask.completionHandler = { context in ... } }
โก ์ฑ๋ฅ ์ต์ ํ
Performance.swift
import CoreML // 1. Compute Unit ์ค์ let config = MLModelConfiguration() config.computeUnits = .all // CPU + GPU + Neural Engine config.computeUnits = .cpuAndGPU // Neural Engine ์ ์ธ config.computeUnits = .cpuAndNeuralEngine // GPU ์ ์ธ config.computeUnits = .cpuOnly // CPU๋ง let model = try MobileNet(configuration: config) // 2. ๋ฐฐ์น ์ฒ๋ฆฌ (์ฌ๋ฌ ์ด๋ฏธ์ง ๋์ ์ฒ๋ฆฌ) let images: [CVPixelBuffer] = /* ... */ let batch = MLArrayBatchProvider(array: images.map { image in try! MLFeatureProvider(image: image) }) let results = try model.predictions(from: batch) // 3. ๋ชจ๋ธ ์ฌ์ฌ์ฉ (์ธ์คํด์ค ์บ์ฑ) class ModelManager { static let shared = ModelManager() let model: MobileNet private init() { model = try! MobileNet(configuration: MLModelConfiguration()) } }
๐ง Create ML๋ก ๋ชจ๋ธ ๋ง๋ค๊ธฐ
Training.swift
// Create ML ์ฑ ์ฌ์ฉ (GUI) 1. Xcode > Open Developer Tool > Create ML 2. ์ ํ๋ก์ ํธ ์์ฑ 3. ํ์ต ๋ฐ์ดํฐ ์ถ๊ฐ 4. Train ํด๋ฆญ 5. .mlmodel ํ์ผ export // ์ฝ๋๋ก ํ์ต (Swift Playgrounds) import CreateML let trainingData = try MLImageClassifier.DataSource.labeledDirectories(at: trainingDataURL) let testData = try MLImageClassifier.DataSource.labeledDirectories(at: testDataURL) let model = try MLImageClassifier( trainingData: trainingData, validationData: testData ) try model.write(to: URL(fileURLWithPath: "/tmp/MyModel.mlmodel"))
๐ Python ๋ชจ๋ธ ๋ณํ
convert.py
# TensorFlow/PyTorch โ Core ML ๋ณํ import coremltools as ct # TensorFlow ๋ชจ๋ธ ๋ณํ import tensorflow as tf model = tf.keras.models.load_model('model.h5') coreml_model = ct.convert(model) coreml_model.save('MyModel.mlmodel') # PyTorch ๋ชจ๋ธ ๋ณํ import torch model = torch.load('model.pth') traced_model = torch.jit.trace(model, example_input) coreml_model = ct.convert(traced_model) coreml_model.save('MyModel.mlmodel')
๐ก Core ML ์ฅ์
โ
ํ๋ผ์ด๋ฒ์ ๋ณด์ฅ (๋ชจ๋ ์ฒ๋ฆฌ๊ฐ ๊ธฐ๊ธฐ ๋ด)
โ
์คํ๋ผ์ธ ์๋
โ
๋ฎ์ ์ง์ฐ์๊ฐ
โ
Neural Engine ํ์ฉ (A11 ์ด์)
โ
๋ฐฐํฐ๋ฆฌ ํจ์จ์