Omniverse_Audio2Face iClone 7.93 Plugin Update Help


https://forum.reallusion.com/Topic548143.aspx
Print Topic | Close Window

By lnickers_724359 - Last Year
Hello I modified the existing Omniverse_Audio2Face plugin so it works with my iClone 7.93. It now imports the face_anim.json that I saved from NVIDIA Audio2Face Applicaton.
I only tested it with one CC3_Base_Plus Character and not sure if it was default or Heidi-- concerned if it will actually work with other character profiles-- that may have custom morphs . My .json appears to only have the 60 base morphs and the jawbone.  I would need Reallusion feedback to know when the custom morphs come into play.   

Thanks,
Nick

Here's my modified Omnivers_Audio2Face python script:
import RLPy
import json
import os
import math
from PySide2 import *
from PySide2 import QtWidgets
from PySide2.shiboken2 import wrapInstance

ui = {}  # User interface globals
events = {}  # Callback event globals
selected_obj = None
j_data = None
bs_pose_count = None
bs_frame_count = None
bs_names = None
srt_time = None
srt_frame = None


def get_picked_obj():
    global selected_obj
    if RLPy.RScene.GetSelectedObjects():
        selected_objs = RLPy.RScene.GetSelectedObjects()
        selected_obj = selected_objs[0]
    else:
        print('No Object Selected!!')
        selected_obj = None
       
       
def get_current_time():
   
    global srt_time
   
    srt_time = RLPy.RGlobal.GetTime()


def smooth(scalars, weight):  # -> List[float]:  # Weight between 0 and 1
    rrange = len(scalars)
    last = scalars[0]  # First value in the plot (first timestep)
    fsmoothed = []
    for point in scalars:
        smoothed_val = last * weight + (1 - weight) * point  # Calculate smoothed value
        fsmoothed.append(smoothed_val)                        # Save it
        last = smoothed_val                                  # Anchor the last smoothed value
   
    rfsmoothed = []
    for i in range(rrange):
        rfsmoothed.append(fsmoothed[rrange-i-1])
    last = rfsmoothed[0]
    bsmoothed = []
    for point in rfsmoothed:
        smoothed_val = last * weight + (1 - weight) * point  # Calculate smoothed value
        bsmoothed.append(smoothed_val)                        # Save it
        last = smoothed_val
    smoothed = []
    for i in range(rrange):
        smoothed.append(bsmoothed[rrange-i-1])
       
    return smoothed
       
       
def open_file():
    global j_data
    global bs_pose_count
    global bs_frame_count
    global bs_names
    global srt_time
    global srt_frame
           
    file_dialog = QtWidgets.QFileDialog()
    file_dialog.setNameFilter("*.json")
    file_dialog.exec()
    get_picked_obj()
   
    face_component = selected_obj.GetFaceComponent()
    morph = RLPy.FloatVector()
    bone = RLPy.FloatVector()
    custom = RLPy.FloatVector()
    morph.resize(60)
    bone.resize(12)
    custom.resize(64)

    if (len(file_dialog.selectedFiles()) > 0):
        json_file_path = file_dialog.selectedFiles()[0]
        json_file_path_list = json_file_path.split('/')
        j_file_name = json_file_path_list[len(json_file_path_list)-1]
        ui["widget"].txt_json_file_name.setText(j_file_name)
       
        with open(json_file_path,'r') as animate_json_reader:
            j_data = animate_json_reader.read()
            json_animate = json.loads(j_data)
            bs_animation = json_animate['weightMat']
            bs_pose_count = json_animate['numPoses']
            bs_frame_count = json_animate['numFrames']
            bs_names = json_animate['facsNames']
           
            openStren = float(ui["widget"].jaw_open_strength.text())/100.0
            expStren = float(ui["widget"].expression_strength.text())/100.0
           
            get_current_time()
            srt_frame = RLPy.RTime.GetFrameIndex(srt_time, RLPy.RGlobal.GetFps())
            clip_length = RLPy.RTime.IndexedFrameTime(bs_frame_count, RLPy.RGlobal.GetFps())
            face_component.AddClip(srt_time, "Expression_Clip", clip_length)

            for i in range(bs_frame_count):
                try:
                    """
                    # BEGIN NAN DEBUG
                    # WEBSITE DOCS:
                    # this appears to skip the first 8 morphs in the data
                    # following the Docs, it seems true
                    # there are 60 morphs in the data, starting at the 8th index so this should be fine
                    """
                    for j in range(60):
                        morph[j] = bs_animation[i][j + 8]
                except IndexError as e:
                    RLPy.RUi.ShowMessageBox(
                        "Index Error",
                        f"Exception occurred at frame {i}, morph index {j}: {str(e)}",
                        RLPy.EMsgButton_Ok)

                try:
                    for k in range(51):
                        # BEGIN NAN DEBUG
                        # trying to prevent crash when attempting to load custom morphs- they may not exist in the data
                                               
                        # custom[k] = bs_animation[i][k + 68]
                        if (k + 68 < len(bs_animation[i]) - 1):
                            # 68 is the jawbone
                            custom[k] = bs_animation[i][k + 68]
                        # END NAN DEBUG
                except IndexError as e:
                    RLPy.RUi.ShowMessageBox(
                        "Index Error",
                        f"Exception occurred at frame {i}, custom index {k}: {str(e)}",
                        RLPy.EMsgButton_Ok)

                try:
                    # BEGIN NAN DEBUG- I Got rid of the 60% scaling factor
                    # bone[7] = bs_animation[i][68] * 0.6                    
                    bone[7] = bs_animation[i][68]  # last morph is the jawbone
                except IndexError as e:
                    RLPy.RUi.ShowMessageBox(
                        "Index Error",
                        f"Exception occurred at frame {i}, bone index 7: {str(e)}",
                        RLPy.EMsgButton_Ok)

                time_i = RLPy.RTime.IndexedFrameTime(i + srt_frame, RLPy.RGlobal.GetFps())
                face_component.AddExpressionKey(time_i, morph, bone, custom, 1)
    RLPy.RGlobal.ObjectModified(selected_obj, RLPy.EObjectModifiedType_Attribute)


def apply_jawopen_strength():
    #global bs_animation
    json_animate = json.loads(j_data)
    bs_animation = json_animate['weightMat']
    apply_a2f_animation(bs_animation)


def apply_a2f_animation(bsAnimation):
    if j_data is not None:
        openStren = float(ui["widget"].jaw_open_strength.text())/100.0
        expStren = float(ui["widget"].expression_strength.text())/100.0
        exp_sl = int(ui["widget"].expression_smooth.text())
        open_sl = int(ui["widget"].jaw_open_smooth.text())
        bs_anima = bsAnimation
        bs_anim = bs_anima
       
        face_component = selected_obj.GetFaceComponent()
        morph = RLPy.FloatVector()
        bone = RLPy.FloatVector()
        custom = RLPy.FloatVector()
        morph.resize(60)
        bone.resize(12)
        custom.resize(64)
       
        for c in range(bs_pose_count):
            anim_curve = []
            for i in range(bs_frame_count):
                anim_curve.append(bs_anima[i][c])
            if max(anim_curve) is not 0.0:
                if c is 121:
                    if open_sl is not 0:
                        for sl in range(open_sl):
                            anim_curve = smooth(anim_curve, 0.5)
                else:
                    if exp_sl is not 0:
                        for sl in range(exp_sl):
                            anim_curve = smooth(anim_curve, 0.5)

            for i in range(bs_frame_count):
                bs_anim[i][c] = anim_curve[i]

        for i in range(bs_frame_count):
            for j in range(60):
                # skip first 8 morphs in the data
                morph[j] = bs_anim[i][j + 8] * expStren
               
            # apparently there are 51 custom morphs possible
            # starting after the jawbone which is index 68
            for k in range(51):
                if (k + 68 < len(bs_anim[i]) - 1):
                    custom[k] = bs_anim[i][k+68] * expStren
                   
            # BEGIN NAN DEBUG- 68th index seems to be the jawbone
            # i.e. it is the 69th item in the list.
            bone[7] = bs_anim[i][68] * 0.6 * openStren
           
            time_i = RLPy.RTime.IndexedFrameTime(i+srt_frame, RLPy.RGlobal.GetFps())
            face_component.AddExpressionKey( time_i, morph, bone, custom, 1 )

        RLPy.RGlobal.ObjectModified(selected_obj, RLPy.EObjectModifiedType_Attribute)

    else:
        RLPy.RUi.ShowMessageBox(
            "Omniverse Audio2Face Animation Import - Operation Error",
            "Please apply an Audio2Face animation to the character first then click this button !!",
            RLPy.EMsgButton_Ok)


def setSound():
    if j_data is not None:
        file_dialog = QtWidgets.QFileDialog()
        file_dialog.setNameFilter("*.wav")
        file_dialog.exec()
        if (len(file_dialog.selectedFiles()) > 0):
            wav_file = file_dialog.selectedFiles()[0]
            wav_file_list = wav_file.split('/')
            wav_file_name = wav_file_list[len(wav_file_list)-1]
            ui["widget"].txt_sound_file_name.setText(wav_file_name)
            RLPy.RAudio.LoadAudioToObject(selected_obj, wav_file, srt_time)
        RLPy.RGlobal.ObjectModified(selected_obj, RLPy.EObjectModifiedType_Attribute)
       
    else:
        RLPy.RUi.ShowMessageBox(
            "Omniverse Audio2Face Animation Import - Operation Error",
            "Please apply an Audio2Face animation to the character first then Apply Sound !!",
            RLPy.EMsgButton_Ok)


def show_window():
    global ui, events
   
    if "window" in ui:  # If the window already exists...
        if ui["window"].IsVisible():
            RLPy.RUi.ShowMessageBox(
                "Omniverse Audio2Face Animation Import - Operation Error",
                "The current Omniverse Audio2Face Animation Import is still running.  You must first close the window to start another session.",
                RLPy.EMsgButton_Ok)
        else:
            ui["window"].Show()
        return
       
    # Create an iClone Widget
    ui["window"] = RLPy.RUi.CreateRDockWidget()
    ui["window"].SetWindowTitle("Import Omniverse Audio2Face")
   
    # Load UI file
    ui_file = QtCore.QFile(os.path.dirname(__file__) + "/mainwindow.ui")
    ui_file.open(QtCore.QFile.ReadOnly)
    ui["widget"] = QtUiTools.QUiLoader().load(ui_file)
    ui_file.close()
   
    # Assign the UI file to the Pyside dock widget and show it
    ui["dialog"] = wrapInstance(int(ui["window"].GetWindow()), QtWidgets.QDockWidget)
    ui["dialog"].setWidget(ui["widget"])

    # Add UI functionality
    ui["widget"].BT_apply_a2f.clicked.connect(open_file)
    ui["widget"].BT_apply_sound.clicked.connect(setSound)
    ui["widget"].BT_apply_jaw_strength.clicked.connect(apply_jawopen_strength)

    # Show the UI
    ui["window"].Show()


#def run_script():
#    show_window()

def initialize_plugin():
    # Add menu
    ic_dlg = wrapInstance(int(RLPy.RUi.GetMainWindow()), QtWidgets.QMainWindow)
    plugin_menu = ic_dlg.menuBar().findChild(QtWidgets.QMenu, "a2f_anim_menu")
    if (plugin_menu == None):
        plugin_menu = wrapInstance(int(RLPy.RUi.AddMenu("Omniverse Audio2Face", RLPy.EMenu_Plugins)), QtWidgets.QMenu)
        plugin_menu.setObjectName("a2f_anim_menu")

    a2f_menu_action = plugin_menu.addAction("Import Omniverse Audio2Face")
    a2f_menu_action.triggered.connect(show_window)