您好,登錄后才能下訂單哦!
這篇文章給大家分享的是有關(guān)Unity插件OVRLipSync有什么用的內(nèi)容。小編覺得挺實(shí)用的,因此分享給大家做個參考,一起跟隨小編過來看看吧。
項(xiàng)目需要接入對話口型動作,所以將OVRLipSync集成到項(xiàng)目中。
1、下載OVRLipSync.unitypackage
2、導(dǎo)入完成后可以參看demo,我這邊主要是導(dǎo)入了模型動作的功能。我們主要關(guān)注的是OVRLipSync.cs,OVRLipSyncContext.cs,OVRLipSyncContextMorphTarget.cs類
3、OVRLipSync.cs 主要是導(dǎo)入Dll接口函數(shù)。需要預(yù)先初始化這個??梢栽赟cence創(chuàng)建一個GameObject綁上這個腳本。
4、我將OVRLipSyncContext.cs,OVRLipSyncContextMorphTarget.cs 合并為一個.cs 腳本。
[RequireComponent(typeof(AudioSource))] public class OVRLipSyncContextEx: MonoBehaviour { public AudioSource audioSource = null; public float gain = 1.0f; public OVRLipSync.ovrLipSyncContextProvider provider = OVRLipSync.ovrLipSyncContextProvider.Main; public bool delayCompensate = false; private OVRLipSync.ovrLipSyncFrame frame = new OVRLipSync.ovrLipSyncFrame(0); private uint context = 0; // 0 is no context public SkinnedMeshRenderer skinnedMeshRenderer = null; // 目標(biāo)的模型動作名稱,這個美術(shù)給定 public int [] VisemeToBlendTargets = new int[(int)OVRLipSync.ovrLipSyncViseme.Count]; void Awake() { // Cache the audio source we are going to be using to pump data to the SR if (!audioSource) audioSource = GetComponent<AudioSource>(); if (!audioSource) return; } void Start() { lock (this) //不存在異步同時,可以去掉lock { if (context == 0) { if (OVRLipSync.CreateContext(ref context, provider) != OVRLipSync.ovrLipSyncSuccess) { Debug.Log("OVRPhonemeContext.Start ERROR: Could not create Phoneme context."); return; } } //通過AudioSourceListener 去獲取Audio的數(shù)據(jù)。這個是Unity大概沒20ms會出發(fā)一次。 gameObject.GetComponent<AudioSourceListener>().OnEventAudioFilterRead += AudioFilterRead; } SendSignal(OVRLipSync.ovrLipSyncSignals.VisemeSmoothing, SmoothAmount, 0); //測試動作名稱 VisemeToBlendTargets[0] = "doubt"; VisemeToBlendTargets[1] = "smile"; VisemeToBlendTargets[2] = "anger"; VisemeToBlendTargets[3] = "surprise"; VisemeToBlendTargets[4] = "scare"; VisemeToBlendTargets[5] = "nervous"; VisemeToBlendTargets[6] = "upset"; VisemeToBlendTargets[7] = "tiresome"; VisemeToBlendTargets[8] = "bashful"; VisemeToBlendTargets[9] = "greedy"; VisemeToBlendTargets[10] = "doubt"; VisemeToBlendTargets[11] = "doubt"; VisemeToBlendTargets[12] = "doubt"; VisemeToBlendTargets[13] = "doubt"; VisemeToBlendTargets[14] = "doubt"; } void Update() { if(skinnedMeshRenderer != null) { if(GetCurrentPhonemeFrame(ref frame) == OVRLipSync.ovrLipSyncSuccess) { SetVisemeToMorphTarget(); } } } void OnDestroy() { // Create the context that we will feed into the audio buffer lock (this) //不存在異步同時,可以去掉lock { if (context != 0) { if (OVRLipSync.DestroyContext(context) != OVRLipSync.ovrLipSyncSuccess) { Debug.Log("OVRPhonemeContext.OnDestroy ERROR: Could not delete Phoneme context."); } } } } void AudioFilterRead(float[] data, int channels) { // Do not spatialize if we are not initialized, or if there is no // audio source attached to game object if ((OVRLipSync.IsInitialized() != OVRLipSync.ovrLipSyncSuccess) || audioSource == null) return; // increase the gain of the input to get a better signal input for (int i = 0; i < data.Length; ++i) data[i] = data[i] * gain; // Send data into Phoneme context for processing (if context is not 0) lock (this) //不存在異步同時,可以去掉lock { if (context != 0) { OVRLipSync.ovrLipSyncFlag flags = 0; // Set flags to feed into process if (delayCompensate == true) flags |= OVRLipSync.ovrLipSyncFlag.DelayCompensateAudio; OVRLipSync.ProcessFrameInterleaved(context, data, flags, ref frame); } } } public int GetCurrentPhonemeFrame(ref OVRLipSync.ovrLipSyncFrame inFrame) { if (OVRLipSync.IsInitialized() != OVRLipSync.ovrLipSyncSuccess) return (int)OVRLipSync.ovrLipSyncError.Unknown; lock (this) //不存在異步同時,可以去掉lock { inFrame.frameNumber = frame.frameNumber; inFrame.frameDelay = frame.frameDelay; for (int i = 0; i < inFrame.Visemes.Length; i++) { inFrame.Visemes[i] = frame.Visemes[i]; } } return OVRLipSync.ovrLipSyncSuccess; } public int ResetContext() { if (OVRLipSync.IsInitialized() != OVRLipSync.ovrLipSyncSuccess) return (int)OVRLipSync.ovrLipSyncError.Unknown; return OVRLipSync.ResetContext(context); } //暫時沒用到 public int SendSignal(OVRLipSync.ovrLipSyncSignals signal, int arg1, int arg2) { if (OVRLipSync.IsInitialized() != OVRLipSync.ovrLipSyncSuccess) return (int)OVRLipSync.ovrLipSyncError.Unknown; return OVRLipSync.SendSignal(context, signal, arg1, arg2); } void SetVisemeToMorphTarget() { for (int i = 0; i < VisemeToBlendTargets.Length; i++) { if(VisemeToBlendTargets[i] != -1) { // 播放對應(yīng)模型的動作權(quán)重。 skinnedMeshRenderer.SetBlendShapeWeight(VisemeToBlendTargets[i], frame.Visemes[i] * 100.0f); } } } } //以上是聲音綁定在模型上播放。由于很多聲音播放相對獨(dú)立??梢詫?shí)現(xiàn)以下函數(shù)。 float m_detaTime = 0; void Update() { if (m_bSpeak) { // trap inputs and send signals to phoneme engine for testing purposes AudioItem item = AudioController.GetAudioItem(speakKey); if (item != null) { if (m_obj) // 播放聲音的GameObject 外部傳入 { AudioSource ads = m_obj.GetComponent<AudioSource>(); if (ads) { //unity3d OnAudioFilterRead 大概23毫秒回調(diào)一次。只新數(shù)據(jù)來,才在Update內(nèi)去更新 // demo里不加的話,會出現(xiàn)嘴唇抖動的問題。 if (m_detaTime < 0.023f) // 48000/2048; { m_detaTime += Time.deltaTime; } else { // get the current viseme frame if (GetCurrentPhonemeFrame(ref frame) == OVRLipSync.ovrLipSyncSuccess) { SetVisemeToMorphTarget(); } m_detaTime = 0; } } } } } }
增加一個deglet 回調(diào)類 放在AduioSource下
public class AudioSourceListener : MonoBehaviour { public delegate void DgtEventAudioFilterRead(float[] data, int channels,AudioObject obj); public DgtEventAudioFilterRead OnEventAudioFilterRead; private AudioObject m_audio_obj; // Use this for initialization public void Start() { m_audio_obj = this.gameObject.GetComponent<AudioObject>(); } // Update is called once per frame void Update() { } void OnAudioFilterRead(float[] data, int channels) { if (OnEventAudioFilterRead != null) { OnEventAudioFilterRead(data,channels, m_audio_obj); } } }
了解該部分可以參考Oculus發(fā)布的OVRLipSync示例。
https://developer3.oculus.com/downloads/
將下載下來的package導(dǎo)入Unity,可以運(yùn)行官方Demo。
該Demo中模型的口型動作,可以通過
Maya
Cinema4D
3D Studio Max
Blender
Cheetah 3D
XSI
Any tool that supports the FBX format
這幾個工具進(jìn)行設(shè)計和制作。詳情
如果我們通過這些工具制作相關(guān)口型動作并導(dǎo)入,在Unity中選中模型,在Inspector中會顯示在SkinMeshRenderer中。
該Demo中通過Oculus官方提供的語音識別DLL進(jìn)行實(shí)時語音識別并通過(位置..\Assets\Plugins\x86_64中的OVRLipSync.dll)
skinnedMeshRenderer = GetComponent<SkinnedMeshRenderer> (); skinnedMeshRenderer.SetBlendShapeWeight (0, blendOne);
代碼進(jìn)行控制。
感謝各位的閱讀!關(guān)于“Unity插件OVRLipSync有什么用”這篇文章就分享到這里了,希望以上內(nèi)容可以對大家有一定的幫助,讓大家可以學(xué)到更多知識,如果覺得文章不錯,可以把它分享出去讓更多的人看到吧!
免責(zé)聲明:本站發(fā)布的內(nèi)容(圖片、視頻和文字)以原創(chuàng)、轉(zhuǎn)載和分享為主,文章觀點(diǎn)不代表本網(wǎng)站立場,如果涉及侵權(quán)請聯(lián)系站長郵箱:is@yisu.com進(jìn)行舉報,并提供相關(guān)證據(jù),一經(jīng)查實(shí),將立刻刪除涉嫌侵權(quán)內(nèi)容。