CreateManager.cs 13.3 KB
using System.Collections;
using System;
using System.Collections.Generic;
using Unity.Collections;
using Unity.Collections.LowLevel.Unsafe;
using UnityEngine;
using UnityEngine.UI;
using UnityEngine.XR.ARFoundation;
using UnityEngine.XR.ARSubsystems;

public class CreateManager : MonoBehaviour
{

    public ARRaycastManager raycastMgr;
    public ARPlane _ARPlane;
    public GameObject placeObject;
    public GameObject tempObject;
    private List<ARRaycastHit> hits = new List<ARRaycastHit>();
    bool created;

    public bool gameoverTimerActivate;

    /// <summary>
    /// Game Over Script here
    /// </summary>

    private float deadTime;
    public GameObject gOverPanel;


    [SerializeField]
    [Tooltip("The ARCameraManager which will produce frame events.")]
    ARCameraManager m_CameraManager;

    /// <summary>
    /// Get or set the <c>ARCameraManager</c>.
    /// </summary>
    public ARCameraManager cameraManager
    {
        get => m_CameraManager;
        set => m_CameraManager = value;
    }

    [SerializeField]
    RawImage m_RawCameraImage;

    /// <summary>
    /// The UI RawImage used to display the image on screen.
    /// </summary>
    public RawImage rawCameraImage
    {
        get => m_RawCameraImage;
        set => m_RawCameraImage = value;
    }

    [SerializeField]
    [Tooltip("The AROcclusionManager which will produce human depth and stencil textures.")]
    AROcclusionManager m_OcclusionManager;

    public AROcclusionManager occlusionManager
    {
        get => m_OcclusionManager;
        set => m_OcclusionManager = value;
    }

    [SerializeField]
    RawImage m_RawHumanDepthImage;

    /// <summary>
    /// The UI RawImage used to display the image on screen.
    /// </summary>
    public RawImage rawHumanDepthImage
    {
        get => m_RawHumanDepthImage;
        set => m_RawHumanDepthImage = value;
    }

    [SerializeField]
    RawImage m_RawHumanStencilImage;

    /// <summary>
    /// The UI RawImage used to display the image on screen.
    /// </summary>
    public RawImage rawHumanStencilImage
    {
        get => m_RawHumanStencilImage;
        set => m_RawHumanStencilImage = value;
    }

    [SerializeField]
    RawImage m_RawEnvironmentDepthImage;

    /// <summary>
    /// The UI RawImage used to display the image on screen.
    /// </summary>
    public RawImage rawEnvironmentDepthImage
    {
        get => m_RawEnvironmentDepthImage;
        set => m_RawEnvironmentDepthImage = value;
    }

    [SerializeField]
    RawImage m_RawEnvironmentDepthConfidenceImage;

    /// <summary>
    /// The UI RawImage used to display the image on screen.
    /// </summary>
    public RawImage rawEnvironmentDepthConfidenceImage
    {
        get => m_RawEnvironmentDepthConfidenceImage;
        set => m_RawEnvironmentDepthConfidenceImage = value;
    }

    [SerializeField]
    Text m_ImageInfo;
    public Text m_ImageInfo2;
    public Text m_ImageInfo3;

    /// <summary>
    /// The UI Text used to display information about the image on screen.
    /// </summary>
    public Text imageInfo
    {
        get => m_ImageInfo;
        set => m_ImageInfo = value;
    }

    public Text imageInfo2
    {
        get => m_ImageInfo2;
        set => m_ImageInfo2 = value;
    }

    void OnEnable()
    {
        if (m_CameraManager != null)
        {
            m_CameraManager.frameReceived += OnCameraFrameReceived;
        }
    }

    void OnDisable()
    {
        if (m_CameraManager != null)
        {
            m_CameraManager.frameReceived -= OnCameraFrameReceived;
        }
    }

    unsafe void UpdateCameraImage()
    {
        // Attempt to get the latest camera image. If this method succeeds,
        // it acquires a native resource that must be disposed (see below).
        if (!cameraManager.TryAcquireLatestCpuImage(out XRCpuImage image))
        {
            return;
        }

        // Display some information about the camera image
        Vector3 screenCenter2 = Camera.current.ViewportToScreenPoint(new Vector3(0.5f, 0.5f));
        m_ImageInfo.text = string.Format(
            "Image info:\n\twidth: {0}\n\theight: {1}\n\tplaneCount: {2}\n\ttimestamp: {3}\n\tformat: {4} \n\tcenterPosx: {5} \n\tcenterPosx: {6}",
            image.width, image.height, image.planeCount, image.timestamp, image.format, screenCenter2.x, screenCenter2.y);

        // Once we have a valid XRCpuImage, we can access the individual image "planes"
        // (the separate channels in the image). XRCpuImage.GetPlane provides
        // low-overhead access to this data. This could then be passed to a
        // computer vision algorithm. Here, we will convert the camera image
        // to an RGBA texture and draw it on the screen.

        // Choose an RGBA format.
        // See XRCpuImage.FormatSupported for a complete list of supported formats.
        var format = TextureFormat.RGBA32;

        if (m_CameraTexture == null || m_CameraTexture.width != image.width || m_CameraTexture.height != image.height)
        {
            m_CameraTexture = new Texture2D(image.width, image.height, format, false);
        }

        // Convert the image to format, flipping the image across the Y axis.
        // We can also get a sub rectangle, but we'll get the full image here.
        var conversionParams = new XRCpuImage.ConversionParams(image, format, XRCpuImage.Transformation.MirrorY);

        // Texture2D allows us write directly to the raw texture data
        // This allows us to do the conversion in-place without making any copies.
        var rawTextureData = m_CameraTexture.GetRawTextureData<byte>();
        try
        {
            image.Convert(conversionParams, new IntPtr(rawTextureData.GetUnsafePtr()), rawTextureData.Length);
        }
        finally
        {
            // We must dispose of the XRCpuImage after we're finished
            // with it to avoid leaking native resources.
            image.Dispose();
        }

        // Apply the updated texture data to our texture
        m_CameraTexture.Apply();
        m_ImageInfo2.text = string.Format(
    "Image 2:\n\twidth: {0}\n\theight: {1}\n\thideFlags: {2} \n\tred: {3}\n\tgreen: {4}\n\tblue: {5}\n\tIntensity: {6}",
    m_CameraTexture.width, m_CameraTexture.height, m_CameraTexture.hideFlags, m_CameraTexture.GetPixel(m_CameraTexture.width / 2, m_CameraTexture.height / 2)[0], m_CameraTexture.GetPixel(m_CameraTexture.width / 2, m_CameraTexture.height / 2)[1],
    m_CameraTexture.GetPixel(m_CameraTexture.width / 2, m_CameraTexture.height / 2)[2], (m_CameraTexture.GetPixel(m_CameraTexture.width / 2, m_CameraTexture.height / 2)[0] + m_CameraTexture.GetPixel(m_CameraTexture.width / 2, m_CameraTexture.height / 2)[1] + m_CameraTexture.GetPixel(m_CameraTexture.width / 2, m_CameraTexture.height / 2)[2]) / 3);

        // Set the RawImage's texture so we can visualize it.
        m_RawCameraImage.texture = m_CameraTexture;
    }




    void OnCameraFrameReceived(ARCameraFrameEventArgs eventArgs)
    {
        UpdateCameraImage();
    }

    Texture2D m_CameraTexture;







    // Start is called before the first frame update
    void Start()
    {
        placeObject.SetActive(false);
        tempObject.SetActive(false);
        deadTime = 0;
        created = false;
        gOverPanel.SetActive(false);

    }

    // Update is called once per frame
    void Update()
    {


        if (created == false)
        {
            UpdateCenterObject();
        }
        /*
        if (placeObject.activeSelf == true) // 활성화 상태면
        {
            deadTime += Time.deltaTime; // 1초씩 타임 재기
        }

        checkDead();
        */
    }

    void UpdateCenterObject()
    {
        Vector3 screenCenter = Camera.current.ViewportToScreenPoint(new Vector3(0.5f, 0.5f));
        raycastMgr.Raycast(screenCenter, hits, TrackableType.PlaneWithinPolygon);




        if (hits.Count > 0) // Raycast가 평면에 hit하면
        {
            Pose placementPose = hits[0].pose;

            //placeObject.SetActive(true);
            //placeObject.transform.SetPositionAndRotation(placementPose.position, placementPose.rotation);
            created = false;
            // 전체 화면을 3*3으로 9등분, 각 중심점 잡아서 중심점의 픽셀 정보 분석하여 가장 어두운 부분이 밝기 0.2 이하면 그곳에 출현
            Color color1 = m_CameraTexture.GetPixel(m_CameraTexture.width * (1 / 6), m_CameraTexture.height * (1 / 6));
            /*
             * bool isDark = false;
             * for(int i = -20; i < 20; i++)
             * {
             *  for(int j = -20; j < 20 ; j++)
             *  {
             *      Color color_1 = m_CameraTexture.GetPixel(m_CameraTexture.width*(1/6) + i , m_CameraTexture.height*(1/6) + j); // 본점부터 반경 20픽셀 좌표 다 훑기
             *      float i_1 = (color_1[0] + color_1[1]+ color_1[2])/3;// 해당 픽셀 밝기 변수 i_1
             *      if(i_1 < 0.2)
             *      {
             *          isDark = true;
             *      }
             *      else
             *      {
             *          isDark = false;
             *      }
             *  }
             *   
             * } // 주변픽셀 탐지 알고리즘
             */
            Color color2 = m_CameraTexture.GetPixel(m_CameraTexture.width * (1 / 2), m_CameraTexture.height * (1 / 6));
            Color color3 = m_CameraTexture.GetPixel(m_CameraTexture.width * (5 / 6), m_CameraTexture.height * (1 / 6));
            Color color4 = m_CameraTexture.GetPixel(m_CameraTexture.width * (1 / 6), m_CameraTexture.height / 2);
            Color color5 = m_CameraTexture.GetPixel(m_CameraTexture.width / 2, m_CameraTexture.height / 2);
            Color color6 = m_CameraTexture.GetPixel(m_CameraTexture.width * (5 / 6), m_CameraTexture.height / 2);
            Color color7 = m_CameraTexture.GetPixel(m_CameraTexture.width * (1 / 6), m_CameraTexture.height * (5 / 6));
            Color color8 = m_CameraTexture.GetPixel(m_CameraTexture.width * (1 / 2), m_CameraTexture.height * (5 / 6));
            Color color9 = m_CameraTexture.GetPixel(m_CameraTexture.width * (5 / 6), m_CameraTexture.height * (5 / 6));
            Color[] colorarray = new Color[] { color1, color2, color3, color4, color5, color6, color7, color8, color9 }; // 중심점 color 배열
            float[] iarray = new float[9]; // 밝기 배열
            for (int i = 0; i < 9; i++)
            {
                iarray[i] = (colorarray[i][0] + colorarray[i][1] + colorarray[i][2]) / 3;
            }

            float min = iarray[0];
            int idx = 0;
            for (int i = 0; i < 9; i++)
            {
                if (min > iarray[i])
                {
                    min = iarray[i];
                    idx = i;
                }
            }

            Vector3 pos = Camera.current.ViewportToScreenPoint(new Vector3(0.166f, 0.166f));
            switch (idx)
            {
                case 0:
                    pos = Camera.current.ViewportToScreenPoint(new Vector3(0.166f, 0.166f));
                    raycastMgr.Raycast(pos, hits, TrackableType.PlaneWithinPolygon);
                    break;
                case 1:
                    pos = Camera.current.ViewportToScreenPoint(new Vector3(0.5f, 0.166f));
                    raycastMgr.Raycast(pos, hits, TrackableType.PlaneWithinPolygon);
                    break;
                case 2:
                    pos = Camera.current.ViewportToScreenPoint(new Vector3(0.83f, 0.166f));
                    raycastMgr.Raycast(pos, hits, TrackableType.PlaneWithinPolygon);
                    break;
                case 3:
                    pos = Camera.current.ViewportToScreenPoint(new Vector3(0.166f, 0.5f));
                    raycastMgr.Raycast(pos, hits, TrackableType.PlaneWithinPolygon);
                    break;
                case 4:
                    pos = Camera.current.ViewportToScreenPoint(new Vector3(0.5f, 0.5f));
                    raycastMgr.Raycast(pos, hits, TrackableType.PlaneWithinPolygon);
                    break;
                case 5:
                    pos = Camera.current.ViewportToScreenPoint(new Vector3(0.83f, 0.5f));
                    raycastMgr.Raycast(pos, hits, TrackableType.PlaneWithinPolygon);
                    break;
                case 6:
                    pos = Camera.current.ViewportToScreenPoint(new Vector3(0.166f, 0.83f));
                    raycastMgr.Raycast(pos, hits, TrackableType.PlaneWithinPolygon);
                    break;
                case 7:
                    pos = Camera.current.ViewportToScreenPoint(new Vector3(0.5f, 0.83f));
                    raycastMgr.Raycast(pos, hits, TrackableType.PlaneWithinPolygon);
                    break;
                case 8:
                    pos = Camera.current.ViewportToScreenPoint(new Vector3(0.83f, 0.83f));
                    raycastMgr.Raycast(pos, hits, TrackableType.PlaneWithinPolygon);
                    break;
            }

            Pose placementPose2 = hits[1].pose;
            m_ImageInfo3.GetComponent<Text>().text = placementPose.position.x + ", " + placementPose.position.y;



            if (min < 0.2) // 각 구역의 최소값(가장 어두운 부분이) 밝기 0.2 이하면 Object 출현
            {
                tempObject.SetActive(true);
                tempObject.transform.SetPositionAndRotation(placementPose2.position, placementPose2.rotation); //Position을 어떻게?
                created = true;
            }
        }


    }




}