Skip to content

Commit

Permalink
Feature Screen API.
Browse files Browse the repository at this point in the history
  • Loading branch information
shogo4405 committed May 22, 2024
1 parent d1a20db commit 095a67e
Show file tree
Hide file tree
Showing 25 changed files with 1,381 additions and 359 deletions.
9 changes: 2 additions & 7 deletions Examples/iOS/IngestViewController.swift
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,10 @@ final class IngestViewController: UIViewController {

netStreamSwitcher.uri = Preference.defaultInstance.uri ?? ""

stream.screen.backgroundColor = UIColor.white.cgColor

pipIntentView.layer.borderWidth = 1.0
pipIntentView.layer.borderColor = UIColor.white.cgColor
pipIntentView.bounds = IOVideoMixerSettings.default.regionOfInterest
pipIntentView.isUserInteractionEnabled = true
view.addSubview(pipIntentView)

Expand Down Expand Up @@ -115,12 +116,6 @@ final class IngestViewController: UIViewController {
currentFrame.origin.x += deltaX
currentFrame.origin.y += deltaY
pipIntentView.frame = currentFrame
stream.videoMixerSettings = IOVideoMixerSettings(
mode: stream.videoMixerSettings.mode,
cornerRadius: 16.0,
regionOfInterest: currentFrame,
direction: .east
)
}
}

Expand Down
47 changes: 47 additions & 0 deletions Examples/macOS/CameraIngestViewController.swift
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ final class CameraIngestViewController: NSViewController {
private var stream: IOStream {
return netStreamSwitcher.stream
}
private var textScreenObject = TextScreenObject()

override func viewDidLoad() {
super.viewDidLoad()
Expand All @@ -33,6 +34,46 @@ final class CameraIngestViewController: NSViewController {

override func viewDidAppear() {
super.viewDidAppear()

textScreenObject.horizontalAlignment = .right
textScreenObject.verticalAlignment = .bottom
textScreenObject.layoutMargin = .init(top: 0, left: 0, bottom: 16, right: 16)

stream.screen.backgroundColor = NSColor.black.cgColor

let videoScreenObject = VideoTrackScreenObject()
videoScreenObject.cornerRadius = 32.0
videoScreenObject.track = 1
videoScreenObject.horizontalAlignment = .right
videoScreenObject.layoutMargin = .init(top: 16, left: 0, bottom: 0, right: 16)
videoScreenObject.size = .init(width: 160 * 2, height: 90 * 2)
_ = videoScreenObject.registerVideoEffect(MonochromeEffect())

let imageScreenObject = ImageScreenObject()
let imageURL = URL(fileURLWithPath: Bundle.main.path(forResource: "game_jikkyou", ofType: "png") ?? "")
if let provider = CGDataProvider(url: imageURL as CFURL) {
imageScreenObject.verticalAlignment = .bottom
imageScreenObject.layoutMargin = .init(top: 0, left: 0, bottom: 16, right: 0)
imageScreenObject.cgImage = CGImage(
pngDataProviderSource: provider,
decode: nil,
shouldInterpolate: false,
intent: .defaultIntent
)
} else {
logger.info("no image")
}

let assetScreenObject = AssetScreenObject()
assetScreenObject.size = .init(width: 180, height: 180)
assetScreenObject.layoutMargin = .init(top: 16, left: 16, bottom: 0, right: 0)
try? assetScreenObject.startReading(AVAsset(url: URL(fileURLWithPath: Bundle.main.path(forResource: "SampleVideo_360x240_5mb", ofType: "mp4") ?? "")))
try? stream.screen.addChild(assetScreenObject)
try? stream.screen.addChild(videoScreenObject)
try? stream.screen.addChild(imageScreenObject)
try? stream.screen.addChild(textScreenObject)
stream.screen.delegate = self

stream.attachAudio(DeviceUtil.device(withLocalizedName: audioPopUpButton.titleOfSelectedItem!, mediaType: .audio))

var audios = AVCaptureDevice.devices(for: .audio)
Expand Down Expand Up @@ -79,3 +120,9 @@ final class CameraIngestViewController: NSViewController {
stream.attachCamera(device, track: 0)
}
}

extension CameraIngestViewController: ScreenDelegate {
func screen(_ screen: Screen, willLayout time: CMTime) {
textScreenObject.string = Date().description
}
}
Binary file added Examples/macOS/game_jikkyou.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
66 changes: 62 additions & 4 deletions HaishinKit.xcodeproj/project.pbxproj

Large diffs are not rendered by default.

47 changes: 25 additions & 22 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -62,31 +62,34 @@ Project name |Notes |License
- [ ] listener
- [ ] rendezvous

### Multi Camera
Supports two camera video sources. A picture-in-picture display that shows the image of the secondary camera of the primary camera. Supports camera split display that displays horizontally and vertically.

|Picture-In-Picture|Split|
|:-:|:-:|
|<img width="1382" alt="" src="https://user-images.githubusercontent.com/810189/210043421-ceb18cb7-9b50-43fa-a0a2-8b92b78d9df1.png">|<img width="1382" alt="" src="https://user-images.githubusercontent.com/810189/210043687-a99f21b6-28b2-4170-96de-6c814debd84d.png">|
### Offscreen Rendering.
Through off-screen rendering capabilities, it is possible to display any text or bitmap on a video during broadcasting or viewing. This allows for various applications such as watermarking and time display.

```swift
// If you want to use the multi-camera feature, please make sure stream.isMultiCamSessionEnabled = true. Before attachCamera or attachAudio.
stream.isMultiCamSessionEnabled = true

let back = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)
stream.attachCamera(back, track: 0) { _, error in
if let error {
logger.warn(error)
}
}

let front = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front)
stream.attachCamera(front, track: 1) { videoUnit, error in
videoUnit?.isVideoMirrored = true
if let error {
logger.error(error)
}
let videoScreenObject = VideoTrackScreenObject()
videoScreenObject.cornerRadius = 32.0
videoScreenObject.track = 1
videoScreenObject.horizontalAlignment = .right
videoScreenObject.layoutMargin = .init(top: 16, left: 0, bottom: 0, right: 16)
videoScreenObject.size = .init(width: 160 * 2, height: 90 * 2)
_ = videoScreenObject.registerVideoEffect(MonochromeEffect())
try? stream.screen.addChild(videoScreenObject)

let imageScreenObject = ImageScreenObject()
let imageURL = URL(fileURLWithPath: Bundle.main.path(forResource: "game_jikkyou", ofType: "png") ?? "")
if let provider = CGDataProvider(url: imageURL as CFURL) {
imageScreenObject.verticalAlignment = .bottom
imageScreenObject.layoutMargin = .init(top: 0, left: 0, bottom: 16, right: 0)
imageScreenObject.cgImage = CGImage(
pngDataProviderSource: provider,
decode: nil,
shouldInterpolate: false,
intent: .defaultIntent
)
} else {
logger.info("no image")
}
try? stream.screen.addChild(imageScreenObject)
```

### Rendering
Expand Down
34 changes: 34 additions & 0 deletions Sources/Extension/AVLayerVideoGravity+Extension.swift
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
import AVFoundation
import Foundation

extension AVLayerVideoGravity {
func scale(_ display: CGSize, image: CGSize) -> CGAffineTransform {
switch self {
case .resize:
return .init(scaleX: display.width / image.width, y: display.width / image.height)
case .resizeAspect:
let scale = min(display.width / image.width, display.height / image.height)
return .init(scaleX: scale, y: scale)
case .resizeAspectFill:
let scale = max(display.width / image.width, display.height / image.height)
return .init(scaleX: scale, y: scale)
default:
return .init(scaleX: 1.0, y: 1.0)
}
}

func region(_ display: CGRect, image: CGRect) -> CGRect {
switch self {
case .resize:
return image
case .resizeAspect:
return image
case .resizeAspectFill:
let x = abs(display.width - image.width) / 2
let y = abs(display.height - image.height) / 2
return .init(origin: .init(x: x, y: y), size: display.size)
default:
return image
}
}
}
8 changes: 8 additions & 0 deletions Sources/Extension/CGImage+Extension.swift
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
import CoreGraphics
import Foundation

extension CGImage {
var size: CGSize {
return .init(width: width, height: height)
}
}
8 changes: 8 additions & 0 deletions Sources/Extension/CMVideoDimention+Extension.swift
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
import AVFoundation
import Foundation

extension CMVideoDimensions {
var size: CGSize {
return .init(width: CGFloat(width), height: CGFloat(height))
}
}
88 changes: 28 additions & 60 deletions Sources/Extension/CVPixelBuffer+Extension.swift
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import Foundation
extension CVPixelBuffer {
enum Error: Swift.Error {
case failedToMakevImage_Buffer(_ error: vImage_Error)
case failedToLock(_ status: CVReturn)
}

static var format = vImage_CGImageFormat(
Expand All @@ -16,68 +17,45 @@ extension CVPixelBuffer {
decode: nil,
renderingIntent: .defaultIntent)

var width: Int {
CVPixelBufferGetWidth(self)
var size: CGSize {
return .init(width: CVPixelBufferGetWidth(self), height: CVPixelBufferGetHeight(self))
}

var height: Int {
CVPixelBufferGetHeight(self)
func swap(_ pixelBuffer: CVPixelBuffer?) throws {
try pixelBuffer?.doTransaction(.readOnly) { pixelBuffer in
guard var imageBuffer = try? pixelBuffer.makevImage_Buffer(format: &Self.format) else {
return
}
defer {
imageBuffer.free()
}
imageBuffer.copy(to: self, format: &Self.format)
}
}

@discardableResult
func over(_ pixelBuffer: CVPixelBuffer?, regionOfInterest roi: CGRect = .zero, radius: CGFloat = 0.0) -> Self {
guard var inputImageBuffer = try? pixelBuffer?.makevImage_Buffer(format: &Self.format) else {
return self
}
defer {
inputImageBuffer.free()
@inline(__always)
func doTransaction(_ lockFlags: CVPixelBufferLockFlags, lambda: (CVPixelBuffer) throws -> Void) throws {
let status = CVPixelBufferLockBaseAddress(self, lockFlags)
guard status == kCVReturnSuccess else {
throw Error.failedToLock(status)
}
guard var srcImageBuffer = try? makevImage_Buffer(format: &Self.format) else {
return self
}
defer {
srcImageBuffer.free()
}
let xScale = Float(roi.width) / Float(inputImageBuffer.width)
let yScale = Float(roi.height) / Float(inputImageBuffer.height)
let scaleFactor = (xScale < yScale) ? xScale : yScale
var scaledInputImageBuffer = inputImageBuffer.scale(scaleFactor)
var shape = ShapeFactory.shared.cornerRadius(CGSize(width: CGFloat(scaledInputImageBuffer.width), height: CGFloat(scaledInputImageBuffer.height)), cornerRadius: radius)
vImageSelectChannels_ARGB8888(&shape, &scaledInputImageBuffer, &scaledInputImageBuffer, 0x8, vImage_Flags(kvImageNoFlags))
defer {
scaledInputImageBuffer.free()
CVPixelBufferUnlockBaseAddress(self, lockFlags)
}
srcImageBuffer.over(&scaledInputImageBuffer, origin: roi.origin)
srcImageBuffer.copy(to: self, format: &Self.format)
return self
try lambda(self)
}

@discardableResult
func split(_ pixelBuffer: CVPixelBuffer?, direction: ImageTransform) -> Self {
guard var inputImageBuffer = try? pixelBuffer?.makevImage_Buffer(format: &Self.format) else {
return self
}
defer {
inputImageBuffer.free()
}
guard var sourceImageBuffer = try? makevImage_Buffer(format: &Self.format) else {
return self
}
defer {
sourceImageBuffer.free()
}
let scaleX = Float(width) / Float(inputImageBuffer.width)
let scaleY = Float(height) / Float(inputImageBuffer.height)
var scaledInputImageBuffer = inputImageBuffer.scale(min(scaleY, scaleX))
defer {
scaledInputImageBuffer.free()
}
sourceImageBuffer.split(&scaledInputImageBuffer, direction: direction)
sourceImageBuffer.copy(to: self, format: &Self.format)
return self
func lockBaseAddress(_ lockFlags: CVPixelBufferLockFlags = CVPixelBufferLockFlags.readOnly) -> CVReturn {
return CVPixelBufferLockBaseAddress(self, lockFlags)
}

@discardableResult
func unlockBaseAddress(_ lockFlags: CVPixelBufferLockFlags = CVPixelBufferLockFlags.readOnly) -> CVReturn {
return CVPixelBufferUnlockBaseAddress(self, lockFlags)
}

func makevImage_Buffer(format: inout vImage_CGImageFormat) throws -> vImage_Buffer {
private func makevImage_Buffer(format: inout vImage_CGImageFormat) throws -> vImage_Buffer {
var buffer = vImage_Buffer()
let cvImageFormat = vImageCVImageFormat_CreateWithCVPixelBuffer(self).takeRetainedValue()
vImageCVImageFormat_SetColorSpace(cvImageFormat, CGColorSpaceCreateDeviceRGB())
Expand All @@ -93,14 +71,4 @@ extension CVPixelBuffer {
}
return buffer
}

@discardableResult
func lockBaseAddress(_ lockFlags: CVPixelBufferLockFlags = CVPixelBufferLockFlags.readOnly) -> CVReturn {
return CVPixelBufferLockBaseAddress(self, lockFlags)
}

@discardableResult
func unlockBaseAddress(_ lockFlags: CVPixelBufferLockFlags = CVPixelBufferLockFlags.readOnly) -> CVReturn {
return CVPixelBufferUnlockBaseAddress(self, lockFlags)
}
}
Loading

0 comments on commit 095a67e

Please sign in to comment.