๐Ÿง  Core ML ์™„์ „์ •๋ณต

์˜จ๋””๋ฐ”์ด์Šค ๋จธ์‹ ๋Ÿฌ๋‹์œผ๋กœ ์ด๋ฏธ์ง€ ๋ถ„๋ฅ˜, ๊ฐ์ฒด ํƒ์ง€, ํ…์ŠคํŠธ ๋ถ„์„๊นŒ์ง€! ๋ชจ๋“  ์ฒ˜๋ฆฌ๊ฐ€ ๊ธฐ๊ธฐ ๋‚ด์—์„œ.

โœจ Core ML์ด๋ž€?

Core ML์€ Apple์˜ ๋จธ์‹ ๋Ÿฌ๋‹ ํ”„๋ ˆ์ž„์›Œํฌ์ž…๋‹ˆ๋‹ค. TensorFlow, PyTorch ๋ชจ๋ธ์„ ๋ณ€ํ™˜ํ•˜์—ฌ iPhone์—์„œ ์‹คํ–‰ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ๋ชจ๋“  ์ฒ˜๋ฆฌ๊ฐ€ ๊ธฐ๊ธฐ ๋‚ด์—์„œ ์ด๋ฃจ์–ด์ ธ ํ”„๋ผ์ด๋ฒ„์‹œ๊ฐ€ ๋ณด์žฅ๋ฉ๋‹ˆ๋‹ค.

๐Ÿ“ฆ ๋ชจ๋ธ ์ถ”๊ฐ€ํ•˜๊ธฐ

๋ชจ๋ธ ๊ฐ€์ ธ์˜ค๊ธฐ
1. Core ML ๋ชจ๋ธ ๋‹ค์šด๋กœ๋“œ
   - Apple ์ œ๊ณต ๋ชจ๋ธ: https://developer.apple.com/machine-learning/models/
   - MobileNet, ResNet, SqueezeNet ๋“ฑ

2. Xcode ํ”„๋กœ์ ํŠธ์— .mlmodel ํŒŒ์ผ ๋“œ๋ž˜๊ทธ

3. ์ž๋™์œผ๋กœ Swift ์ฝ”๋“œ ์ƒ์„ฑ๋จ
   - MyModel.swift (์ž๋™ ์ƒ์„ฑ, ์ˆ˜์ • ๊ธˆ์ง€)

๐Ÿ–ผ๏ธ ์ด๋ฏธ์ง€ ๋ถ„๋ฅ˜ (Image Classification)

ImageClassifier.swift
import CoreML
import Vision
import UIKit

class ImageClassifier {
    // MobileNet ๋ชจ๋ธ ๋กœ๋“œ (Apple ์ œ๊ณต)
    func classifyImage(_ image: UIImage) async throws -> String {
        // 1. ๋ชจ๋ธ ๋กœ๋“œ
        let model = try VNCoreMLModel(for: MobileNet().model)

        // 2. ์š”์ฒญ ์ƒ์„ฑ
        let request = VNCoreMLRequest(model: model)

        // 3. ์ด๋ฏธ์ง€ ์ฒ˜๋ฆฌ
        guard let cgImage = image.cgImage else {
            throw NSError(domain: "Image Error", code: 0)
        }

        let handler = VNImageRequestHandler(cgImage: cgImage)
        try handler.perform([request])

        // 4. ๊ฒฐ๊ณผ ์ฒ˜๋ฆฌ
        guard let results = request.results as? [VNClassificationObservation],
              let topResult = results.first else {
            return "๋ถ„๋ฅ˜ ์‹คํŒจ"
        }

        return "\(topResult.identifier) (\(Int(topResult.confidence * 100))%)"
    }
}

// ์‚ฌ์šฉ ์˜ˆ์‹œ
let classifier = ImageClassifier()
let image = UIImage(named: "dog.jpg")!
let result = try await classifier.classifyImage(image)
print(result)  // "golden retriever (92%)"

๐Ÿ“ฑ SwiftUI ํ†ตํ•ฉ

ClassifierView.swift
import SwiftUI
import PhotosUI

struct ImageClassifierView: View {
    @State private var selectedItem: PhotosPickerItem?
    @State private var selectedImage: UIImage?
    @State private var result: String = ""
    @State private var isProcessing = false

    let classifier = ImageClassifier()

    var body: some View {
        VStack(spacing: 20) {
            if let image = selectedImage {
                Image(uiImage: image)
                    .resizable()
                    .scaledToFit()
                    .frame(height: 300)
                    .cornerRadius(12)
            }

            if isProcessing {
                ProgressView("๋ถ„์„ ์ค‘...")
            } else if !result.isEmpty {
                Text(result)
                    .font(.title2)
                    .bold()
            }

            PhotosPicker(selection: $selectedItem, matching: .images) {
                Label("์‚ฌ์ง„ ์„ ํƒ", systemImage: "photo")
            }
            .buttonStyle(.borderedProminent)
        }
        .padding()
        .onChange(of: selectedItem) {
            Task {
                if let data = try? await selectedItem?.loadTransferable(type: Data.self),
                   let image = UIImage(data: data) {
                    selectedImage = image
                    await classify(image)
                }
            }
        }
    }

    func classify(_ image: UIImage) async {
        isProcessing = true
        defer { isProcessing = false }

        result = (try? await classifier.classifyImage(image)) ?? "์˜ค๋ฅ˜"
    }
}

๐ŸŽฏ ๊ฐ์ฒด ํƒ์ง€ (Object Detection)

ObjectDetector.swift
import Vision

func detectObjects(_ image: UIImage) async throws -> [VNRecognizedObjectObservation] {
    // YOLO ๋ชจ๋ธ ์‚ฌ์šฉ (ํ”„๋กœ์ ํŠธ์— ์ถ”๊ฐ€ ํ•„์š”)
    let model = try VNCoreMLModel(for: YOLOv3().model)

    let request = VNCoreMLRequest(model: model)

    guard let cgImage = image.cgImage else {
        throw NSError(domain: "Image Error", code: 0)
    }

    let handler = VNImageRequestHandler(cgImage: cgImage)
    try handler.perform([request])

    guard let results = request.results as? [VNRecognizedObjectObservation] else {
        return []
    }

    return results
}

// ๊ฒฐ๊ณผ ๊ทธ๋ฆฌ๊ธฐ
func drawBoxes(on image: UIImage, results: [VNRecognizedObjectObservation]) -> UIImage {
    let renderer = UIGraphicsImageRenderer(size: image.size)

    return renderer.image { context in
        image.draw(at: .zero)

        UIColor.red.setStroke()
        context.cgContext.setLineWidth(3)

        for observation in results {
            // Vision ์ขŒํ‘œ๊ณ„ ๋ณ€ํ™˜ (์™ผ์ชฝ ํ•˜๋‹จ = 0,0)
            let boundingBox = observation.boundingBox
            let rect = CGRect(
                x: boundingBox.origin.x * image.size.width,
                y: (1 - boundingBox.origin.y - boundingBox.height) * image.size.height,
                width: boundingBox.width * image.size.width,
                height: boundingBox.height * image.size.height
            )

            context.cgContext.stroke(rect)

            // ๋ผ๋ฒจ ํ‘œ์‹œ
            if let label = observation.labels.first {
                let text = "\(label.identifier) \(Int(label.confidence * 100))%"
                let attributes: [NSAttributedString.Key: Any] = [
                    .font: UIFont.boldSystemFont(ofSize: 16),
                    .foregroundColor: UIColor.red
                ]
                text.draw(at: rect.origin, withAttributes: attributes)
            }
        }
    }
}

๐Ÿ“ ํ…์ŠคํŠธ ๋ถ„์„ (Sentiment Analysis)

SentimentAnalysis.swift
import NaturalLanguage

func analyzeSentiment(_ text: String) -> String {
    let tagger = NLTagger(tagSchemes: [.sentimentScore])
    tagger.string = text

    let (sentiment, _) = tagger.tag(
        at: text.startIndex,
        unit: .paragraph,
        scheme: .sentimentScore
    )

    guard let sentimentValue = sentiment,
          let score = Double(sentimentValue.rawValue) else {
        return "์ค‘๋ฆฝ"
    }

    if score > 0.3 {
        return "๐Ÿ˜Š ๊ธ์ •์  (\(Int(score * 100))%)"
    } else if score < -0.3 {
        return "๐Ÿ˜ข ๋ถ€์ •์  (\(Int(abs(score) * 100))%)"
    } else {
        return "๐Ÿ˜ ์ค‘๋ฆฝ์ "
    }
}

// ์‚ฌ์šฉ ์˜ˆ์‹œ
let text1 = "์ด ์˜ํ™” ์ •๋ง ์žฌ๋ฏธ์žˆ์—ˆ์–ด์š”!"
print(analyzeSentiment(text1))  // "๐Ÿ˜Š ๊ธ์ •์ "

let text2 = "์ตœ์•…์˜ ์„œ๋น„์Šค์˜€์Šต๋‹ˆ๋‹ค."
print(analyzeSentiment(text2))  // "๐Ÿ˜ข ๋ถ€์ •์ "

๐ŸŽค ์Œ์„ฑ ์ธ์‹ ํ†ตํ•ฉ

SpeechToML.swift
import Speech

func transcribeAndClassify(audioURL: URL) async throws -> (String, String) {
    // 1. ์Œ์„ฑ ์ธ์‹
    let recognizer = SFSpeechRecognizer(locale: Locale(identifier: "ko-KR"))!
    let request = SFSpeechURLRecognitionRequest(url: audioURL)

    let result = try await recognizer.recognitionTask(with: request).bestTranscription
    let text = result.formattedString

    // 2. ๊ฐ์ • ๋ถ„์„
    let sentiment = analyzeSentiment(text)

    return (text, sentiment)
}

๐Ÿ”„ ๋ชจ๋ธ ์—…๋ฐ์ดํŠธ (On-Device Training)

OnDeviceTraining.swift
import CoreML
import CreateML

// Updatable ๋ชจ๋ธ๋งŒ ๊ฐ€๋Šฅ (Create ML๋กœ ์ƒ์„ฑ ์‹œ ํ™œ์„ฑํ™”)
func updateModel(with newData: [MLDataTable]) throws {
    // 1. ๊ธฐ์กด ๋ชจ๋ธ ๋กœ๋“œ
    let modelURL = Bundle.main.url(forResource: "MyModel", withExtension: "mlmodelc")!
    let model = try MLModel(contentsOf: modelURL)

    // 2. ์—…๋ฐ์ดํŠธ ์„ค์ •
    let configuration = MLModelConfiguration()
    configuration.computeUnits = .all

    // 3. ํ•™์Šต (์˜จ๋””๋ฐ”์ด์Šค)
    let updateTask = try MLUpdateTask(
        forModelAt: modelURL,
        trainingData: newData,
        configuration: configuration
    )

    updateTask.resume()

    // 4. ์™„๋ฃŒ ํ•ธ๋“ค๋Ÿฌ
    // updateTask.completionHandler = { context in ... }
}

โšก ์„ฑ๋Šฅ ์ตœ์ ํ™”

Performance.swift
import CoreML

// 1. Compute Unit ์„ค์ •
let config = MLModelConfiguration()
config.computeUnits = .all  // CPU + GPU + Neural Engine
config.computeUnits = .cpuAndGPU  // Neural Engine ์ œ์™ธ
config.computeUnits = .cpuAndNeuralEngine  // GPU ์ œ์™ธ
config.computeUnits = .cpuOnly  // CPU๋งŒ

let model = try MobileNet(configuration: config)

// 2. ๋ฐฐ์น˜ ์ฒ˜๋ฆฌ (์—ฌ๋Ÿฌ ์ด๋ฏธ์ง€ ๋™์‹œ ์ฒ˜๋ฆฌ)
let images: [CVPixelBuffer] = /* ... */
let batch = MLArrayBatchProvider(array: images.map { image in
    try! MLFeatureProvider(image: image)
})
let results = try model.predictions(from: batch)

// 3. ๋ชจ๋ธ ์žฌ์‚ฌ์šฉ (์ธ์Šคํ„ด์Šค ์บ์‹ฑ)
class ModelManager {
    static let shared = ModelManager()
    let model: MobileNet

    private init() {
        model = try! MobileNet(configuration: MLModelConfiguration())
    }
}

๐Ÿ”ง Create ML๋กœ ๋ชจ๋ธ ๋งŒ๋“ค๊ธฐ

Training.swift
// Create ML ์•ฑ ์‚ฌ์šฉ (GUI)
1. Xcode > Open Developer Tool > Create ML
2. ์ƒˆ ํ”„๋กœ์ ํŠธ ์ƒ์„ฑ
3. ํ•™์Šต ๋ฐ์ดํ„ฐ ์ถ”๊ฐ€
4. Train ํด๋ฆญ
5. .mlmodel ํŒŒ์ผ export

// ์ฝ”๋“œ๋กœ ํ•™์Šต (Swift Playgrounds)
import CreateML

let trainingData = try MLImageClassifier.DataSource.labeledDirectories(at: trainingDataURL)
let testData = try MLImageClassifier.DataSource.labeledDirectories(at: testDataURL)

let model = try MLImageClassifier(
    trainingData: trainingData,
    validationData: testData
)

try model.write(to: URL(fileURLWithPath: "/tmp/MyModel.mlmodel"))

๐Ÿ Python ๋ชจ๋ธ ๋ณ€ํ™˜

convert.py
# TensorFlow/PyTorch โ†’ Core ML ๋ณ€ํ™˜
import coremltools as ct

# TensorFlow ๋ชจ๋ธ ๋ณ€ํ™˜
import tensorflow as tf
model = tf.keras.models.load_model('model.h5')
coreml_model = ct.convert(model)
coreml_model.save('MyModel.mlmodel')

# PyTorch ๋ชจ๋ธ ๋ณ€ํ™˜
import torch
model = torch.load('model.pth')
traced_model = torch.jit.trace(model, example_input)
coreml_model = ct.convert(traced_model)
coreml_model.save('MyModel.mlmodel')

๐Ÿ’ก Core ML ์žฅ์ 
โœ… ํ”„๋ผ์ด๋ฒ„์‹œ ๋ณด์žฅ (๋ชจ๋“  ์ฒ˜๋ฆฌ๊ฐ€ ๊ธฐ๊ธฐ ๋‚ด)
โœ… ์˜คํ”„๋ผ์ธ ์ž‘๋™
โœ… ๋‚ฎ์€ ์ง€์—ฐ์‹œ๊ฐ„
โœ… Neural Engine ํ™œ์šฉ (A11 ์ด์ƒ)
โœ… ๋ฐฐํ„ฐ๋ฆฌ ํšจ์œจ์ 

๐Ÿ“ฆ ํ•™์Šต ์ž๋ฃŒ

๐Ÿ’ป
GitHub ํ”„๋กœ์ ํŠธ
๐ŸŽ
Apple ML ํŽ˜์ด์ง€
๐Ÿ“–
Apple ๊ณต์‹ ๋ฌธ์„œ