How to detect facial feature points in photos on React Native?

I want to use the Android app to detect the facial feature points of the photos taken to calculate the vertical distance from the earlobe to the chin. But now the PhotoScreen screen only displays the photos taken and the message “Calculating the average vertical distance from earlobe to chin…”,that is my App.tsx:

import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import {
  Platform,
  StyleProp,
  StyleSheet,
  useWindowDimensions,
  View,
  ViewStyle,
  Text,
  Image,
  BackHandler,
} from 'react-native';
import {
  Frame,
  useCameraDevices,
  useFrameProcessor,
} from 'react-native-vision-camera';
import {
  Dimensions,
  Face,
  faceBoundsAdjustToView,
  scanFaces,
  sortFormatsByResolution,
} from '@mat2718/vision-camera-face-detector';
import { runOnJS } from 'react-native-reanimated';
import { Camera } from 'react-native-vision-camera';
import Animated, {
  useSharedValue,
  withTiming,
  useAnimatedStyle,
} from 'react-native-reanimated';
import { createStackNavigator } from '@react-navigation/stack';
import { NavigationContainer } from '@react-navigation/native';
import { navigationRef, navigate } from './RootNavigation'; // 確保正確導入navigate
import ImageResizer from 'react-native-image-resizer';
import FaceDetection from '@react-native-ml-kit/face-detection';
import RNFS from 'react-native-fs';
import PhotoScreen from './PhotoScreen'; // 確保正確導入PhotoScreen

const focalLength = 2700; // 焦距(單位:像素)
const sensorHeight = 0.47; // 感光元件尺寸(單位:厘米)

const Stack = createStackNavigator();

const CameraScreen = () => {
  const [hasPermission, setHasPermission] = useState(false);
  const devices = useCameraDevices();
  const direction = 'front';
  const device = devices[direction];
  const camera = useRef(null);
  const [faces, setFaces] = useState([]);
  const { height: screenHeight, width: screenWidth } = useWindowDimensions();
  const landscapeMode = screenWidth > screenHeight;
  const [frameDimensions, setFrameDimensions] = useState();
  const [isActive, setIsActive] = useState(true);
  const [error, setError] = useState(null);
  const shouldTakePicture = useRef(false);
  const hasTakenPicture = useRef(false);
  const [photoPath, setPhotoPath] = useState(null);
  const countdown = useSharedValue(3);
  const countdownFinished = useSharedValue(false);
  const [isCountingDown, setIsCountingDown] = useState(false);
  const countdownText = useAnimatedStyle(() => ({
    opacity: countdown.value === 0 ? 0 : 1,
    transform: [{ scale: countdown.value === 0 ? 0 : 1 }],
  }));
  const distanceBuffer = useRef([]); // 用於保存距離值的緩衝區
  const BUFFER_SIZE = 5; // 緩衝區大小
  const frameCounter = useRef(0); // 初始化 frameCounter
  const [distance, setDistance] = useState(null);
  const [angle, setAngle] = useState(null);

  useEffect(() => {
    const backAction = () => {
      setPhotoPath(null);
      hasTakenPicture.current = false;
      distanceBuffer.current = [];
      setDistance(null);
      setAngle(null);
      return true;
    };

    const backHandler = BackHandler.addEventListener('hardwareBackPress', backAction);
    return () => backHandler.remove();
  }, []);

  useEffect(() => {
    return () => {
      setIsActive(false);
    };
  }, []);

  const formats = useMemo(() => device?.formats.sort(sortFormatsByResolution), [device?.formats]);
  const [format, setFormat] = useState(formats && formats.length > 0 ? formats[0] : undefined);

  const handleScan = useCallback((frame, newFaces) => {
    const isRotated = !landscapeMode;
    setFrameDimensions(
      isRotated
        ? {
            width: frame.height,
            height: frame.width,
          }
        : {
            width: frame.width,
            height: frame.height,
          },
    );
    setFaces(newFaces);
  }, [landscapeMode]);

  useEffect(() => {
    setFormat(formats && formats.length > 0 ? formats[0] : undefined);
  }, [device]);

  const frameProcessor = useFrameProcessor(
    frame => {
      'worklet';
      try {
        const scannedFaces = scanFaces(frame);
        runOnJS(handleScan)(frame, scannedFaces);
      } catch (e) {
        runOnJS(setError)(e.message);
      }
    },
    [handleScan],
  );

  useEffect(() => {
    (async () => {
      try {
        const status = await Camera.requestCameraPermission();
        setHasPermission(status === 'authorized');
      } catch (e) {
        setError(e.message);
      }
    })();
  }, []);

  const processImage = async (uri) => {
    console.log('Processing image:', uri);
    const fixedPath = `${RNFS.DocumentDirectoryPath}/fixed_photo.jpg`;
    await RNFS.copyFile(uri, fixedPath);
    const manipResult = await ImageResizer.createResizedImage(fixedPath, 800, 600, 'JPEG', 100);
    console.log('Resized image:', manipResult.uri);

    try {
      const faces = await FaceDetection.detect(manipResult.uri, { landmarkMode: 'all' });
      // console.log('Detected face landmarks:', faces);
      if (faces.length > 0) {
        // 這裡可以添加其他處理邏輯
      } else {
        console.log('No faces detected in the image');
      }
    } catch (error) {
      console.error('Error detecting face landmarks:', error);
    }
  };

  const takePicture = useCallback(async () => {
    if (camera.current) {
      const photo = await camera.current.takePhoto();
      console.log('Photo taken:', photo);
      setPhotoPath(photo.path);
      shouldTakePicture.current = false;
      hasTakenPicture.current = true;
      countdown.value = 0;
      setIsCountingDown(false);
      countdownFinished.value = false;
      processImage(photo.path);
      navigate('PhotoScreen', { imagePath: photo.path });
    }
  }, [camera]);

  useEffect(() => {
    if (shouldTakePicture.current && !isCountingDown) {
      setIsCountingDown(true);
      countdown.value = 3;
      countdown.value = withTiming(0, { duration: 3000 }, finished => {
        if (finished) {
          countdownFinished.value = true;
        }
      });
    }
  }, [shouldTakePicture.current, isCountingDown]);

  useEffect(() => {
    if (countdownFinished.value) {
      takePicture();
    }
  }, [countdownFinished.value, takePicture]);

  const styles = StyleSheet.create({
    boundingBox: {
      borderRadius: 5,
      borderWidth: 3,
      borderColor: 'yellow',
      position: 'absolute',
    },
    crossSectionContainer: {
      height: 15,
      width: 15,
      position: 'absolute',
      justifyContent: 'center',
      alignItems: 'center',
      top: screenHeight / 2,
      left: screenWidth / 2,
    },
    verticalCrossHair: {
      height: '100%',
      position: 'absolute',
      justifyContent: 'center',
      alignItems: 'center',
      borderColor: 'yellow',
      borderWidth: 1,
    },
    horizontalCrossHair: {
      width: '100%',
      position: 'absolute',
      justifyContent: 'center',
      alignItems: 'center',
      borderColor: 'yellow',
      borderWidth: 1,
    },
    photoPreview: {
      position: 'absolute',
      top: 0,
      left: 0,
      width: '100%',
      height: '100%',
      zIndex: 10,
    },
    distanceText: {
      position: 'absolute',
      top: 40,
      left: 0,
      right: 0,
      textAlign: 'center',
      color: 'white',
      backgroundColor: 'rgba(0, 0, 0, 0.5)',
      padding: 10,
      fontSize: 20,
      zIndex: 20,
    },
    angleText: {
      position: 'absolute',
      top: 80,
      left: 0,
      right: 0,
      textAlign: 'center',
      color: 'white',
      backgroundColor: 'rgba(0, 0, 0, 0.5)',
      padding: 10,
      fontSize: 20,
    },
    countdownText: {
      position: 'absolute',
      top: screenHeight / 2 - 50,
      left: 0,
      right: 0,
      textAlign: 'center',
      fontSize: 100,
      color: 'white',
      zIndex: 10,
    },
    photoDistanceText: {
      position: 'absolute',
      bottom: 40,
      left: 0,
      right: 0,
      textAlign: 'center',
      color: 'white',
      backgroundColor: 'rgba(0, 0, 0, 0.5)',
      padding: 10,
      fontSize: 20,
      zIndex: 20,
    },
  });

  const boundingStyle = useMemo(
    () => ({
      position: 'absolute',
      top: 0,
      left: 0,
      width: screenWidth,
      height: screenHeight,
    }),
    [screenWidth, screenHeight],
  );

  const calculateFaceDistance = (face) => {
    if (!face.bounds || face.bounds.width === undefined) {
      console.log('Face bounds or width is undefined');
      return null;
    }

    const focalLength = 2700;
    const realEyeDistance = 6.3;
    const eyeDistanceInPixels = face.bounds.width;

    const distance = (focalLength * realEyeDistance) / eyeDistanceInPixels;
    // console.log('Calculated distance (cm):', distance);
    return distance;
  };

  const calculateFaceAngle = (face) => {
    if (face.rollAngle === undefined) {
      return null;
    }
    return face.rollAngle;
  };

  const handleFaces = useCallback((newFaces) => {
    frameCounter.current += 1;

    if (newFaces.length > 0) {
      const calculatedDistance = calculateFaceDistance(newFaces[0]);
      const calculatedAngle = calculateFaceAngle(newFaces[0]);
      // console.log('Calculated distance (cm):', calculatedDistance);
      // console.log('Calculated angle (degrees):', calculatedAngle);

      if (calculatedDistance !== null) {
        setDistance(prevDistance => {
          if (calculatedDistance.toFixed(2) !== (prevDistance?.toFixed(2) || '')) {
            return calculatedDistance;
          }
          return prevDistance;
        });

        distanceBuffer.current.push(calculatedDistance);
        if (distanceBuffer.current.length > BUFFER_SIZE) {
          distanceBuffer.current.shift();
        }

        const avgDistance = distanceBuffer.current.reduce((a, b) => a + b, 0) / distanceBuffer.current.length;
        // console.log(`Average distance (cm): ${avgDistance}`);

        if (frameCounter.current % 5 === 0) {
          if (avgDistance >= 24 && avgDistance <= 26 && Math.abs(calculatedAngle) <= 3 && !shouldTakePicture.current && !hasTakenPicture.current) {
            shouldTakePicture.current = true;
          } else if (
            !(avgDistance >= 24 && avgDistance <= 26 && Math.abs(calculatedAngle) <= 3) &&
            isCountingDown
          ) {
            countdown.value = 3;
            setIsCountingDown(false);
            shouldTakePicture.current = false;
          }
        }
      }

      if (calculatedAngle !== null) {
        setAngle(calculatedAngle);
      }
    }
  }, [isCountingDown]);

  useEffect(() => {
    handleFaces(faces);
  }, [faces, handleFaces]);

  return device != null && hasPermission ? (
    <>
      {error && (
        <View style={{ position: 'absolute', top: 0, left: 0, right: 0, backgroundColor: 'red' }}>
          <Text style={{ color: 'white' }}>{error}</Text>
        </View>
      )}
      <Camera
        style={StyleSheet.absoluteFill}
        device={device}
        torch={'off'}
        isActive={isActive}
        ref={camera}
        photo={true}
        frameProcessor={frameProcessor}
        frameProcessorFps={30}
        audio={false}
        format={format}
      />
      <View style={styles.crossSectionContainer}>
        <View style={styles.verticalCrossHair} />
        <View style={styles.horizontalCrossHair} />
      </View>
      <View style={boundingStyle} testID="faceDetectionBoxView">
        {frameDimensions &&
          (() => {
            const mirrored = Platform.OS === 'android' && direction === 'front';
            const { adjustRect } = faceBoundsAdjustToView(
              frameDimensions,
              {
                width: screenWidth,
                height: screenHeight,
              },
              landscapeMode,
              50,
              50,
            );
            return faces
              ? faces.map((i, index) => {
                  const { left, ...others } = adjustRect(i.bounds);
                  return (
                    <View
                      key={index}
                      style={[
                        styles.boundingBox,
                        {
                          ...others,
                          [mirrored ? 'right' : 'left']: left,
                        },
                      ]}
                    />
                  );
                })
              : null;
          })()}
      </View>
      {distance !== null && (
        <Text style={styles.distanceText}>
          {`距離: ${distance.toFixed(2)} cm`}
        </Text>
      )}
      {angle !== null && (
        <Text style={styles.angleText}>
          {`角度: ${angle.toFixed(2)} 度`}
        </Text>
      )}
      {photoPath && (
        <>
          <Image source={{ uri: `file://${photoPath}` }} style={styles.photoPreview} />
          <Text style={styles.photoDistanceText}>照片已拍攝</Text>
        </>
      )}
      {isCountingDown && (
        <Animated.Text style={[styles.countdownText, countdownText]}>
          {countdown.value > 0 ? countdown.value.toFixed(0) : ''}
        </Animated.Text>
      )}
    </>
  ) : null;
};

const App = () => {
  return (
    <NavigationContainer ref={navigationRef}>
      <Stack.Navigator>
        <Stack.Screen name="Camera" component={CameraScreen} options={{ headerShown: false }} />
        <Stack.Screen name="PhotoScreen" component={PhotoScreen} />
      </Stack.Navigator>
    </NavigationContainer>
  );
};

export default App;

and this is my PhotoScreen.tsx:

import React, { useEffect, useState, useRef } from 'react';
import { View, Image, Text, StyleSheet, Platform, PermissionsAndroid, Alert } from 'react-native';
import RNFS from 'react-native-fs';
import { WebView } from 'react-native-webview';

export default function PhotoScreen({ route }) {
  const { imagePath } = route.params;
  const [distanceInCm, setDistanceInCm] = useState(null);
  const [htmlContent, setHtmlContent] = useState('');
  const [permissionsGranted, setPermissionsGranted] = useState(false);
  const [errorMessage, setErrorMessage] = useState('');
  const [isWebViewLoaded, setIsWebViewLoaded] = useState(false);
  const webViewRef = useRef(null);

  const onMessage = (event) => {
    console.log('Received message from WebView:', event.nativeEvent.data);
    try {
      const data = JSON.parse(event.nativeEvent.data);
      if (data.type === 'face_landmarks') {
        console.log('Received face landmarks data:', data);
        setDistanceInCm(data.averageDistanceCm);
      } else if (data.type === 'error') {
        console.error('Error from WebView:', data.message);
        setErrorMessage(data.message);
      } else if (data.type === 'log') {
        console.log('WebView log:', data.message);
      }
    } catch (error) {
      console.error('Error parsing WebView message:', error);
    }
  };

  useEffect(() => {
    const requestPermissions = async () => {
      if (Platform.OS === 'android') {
        try {
          const granted = await PermissionsAndroid.request(
            PermissionsAndroid.PERMISSIONS.READ_EXTERNAL_STORAGE,
            {
              title: 'Storage Permission',
              message: 'This app needs access to your storage to load images.',
              buttonNeutral: 'Ask Me Later',
              buttonNegative: 'Cancel',
              buttonPositive: 'OK',
            }
          );
          if (granted === PermissionsAndroid.RESULTS.GRANTED) {
            console.log('Storage permission granted');
            setPermissionsGranted(true);
          } else {
            console.log('Storage permission denied');
            setErrorMessage('存儲權限是使用此功能所必需的。');
          }
        } catch (err) {
          console.warn(err);
          setErrorMessage('請求權限時發生錯誤。');
        }
      } else {
        setPermissionsGranted(true);
      }
    };

    requestPermissions();
  }, []);

  useEffect(() => {
    const loadFaceMesh = async () => {
      try {
        const base64Image = await RNFS.readFile(imagePath, 'base64');
        const htmlContent = `
          <!DOCTYPE html>
          <html>
          <head>
            <meta name="viewport" content="width=device-width, initial-scale=1.0">
            <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-core"></script>
            <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-converter"></script>
            <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-webgl"></script>
            <script src="https://cdn.jsdelivr.net/npm/@tensorflow/[email protected]"></script>
            <script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/[email protected]"></script>
          </head>
          <body style="margin:0; padding:0;">
            <img id="image" style="display:none;"/>
            <canvas id="canvas" style="width:100%; height:100%;"></canvas>
            <script>
              "use strict";
              let model;
              import { FaceMesh } from 'https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh';
              const faceMesh = new FaceMesh();

              function sendMessage(type, data) {
                window.ReactNativeWebView.postMessage(JSON.stringify({ type, ...data }));
              }

              async function loadModel() {
                try {
                  await faceMesh.initialize();
                  sendMessage('log', { message: 'Face Mesh model loaded' });
                } catch (error) {
                  sendMessage('error', { message: 'Error loading model: ' + error.message });
                }
              }

              async function processImage() {
                try {
                  const image = document.getElementById('image');
                  const canvas = document.getElementById('canvas');
                  const ctx = canvas.getContext('2d');

                  sendMessage('log', { message: 'Image width: ' + image.width });
                  sendMessage('log', { message: 'Image height: ' + image.height });

                  canvas.width = image.width;
                  canvas.height = image.height;
                  ctx.drawImage(image, 0, 0);

                  const predictions = await faceMesh.estimateFaces({
                    input: canvas,
                  });

                  sendMessage('log', { message: 'Predictions: ' + JSON.stringify(predictions) });

                  if (predictions.length > 0) {
                    const landmarks = predictions[0].scaledMesh;
                    const leftEarLobe = landmarks[234];
                    const rightEarLobe = landmarks[454];
                    const chin = landmarks[152];
                    const leftDistance = Math.abs(leftEarLobe[1] - chin[1]);
                    const rightDistance = Math.abs(rightEarLobe[1] - chin[1]);
                    const averageDistance = (leftDistance + rightDistance) / 2;

                    sendMessage('log', { message: 'Average distance: ' + averageDistance });

                    const sensorHeight = 0.47;
                    const focalLength = 2700;
                    const cmPerPixel = sensorHeight / focalLength;
                    const averageDistanceCm = averageDistance * cmPerPixel;

                    sendMessage('log', { message: 'Average distance (cm): ' + averageDistanceCm });

                    sendMessage('face_landmarks', { 
                      landmarks: landmarks,
                      averageDistanceCm: averageDistanceCm
                    });
                  } else {
                    sendMessage('error', { message: 'No face detected' });
                  }
                } catch (error) {
                  sendMessage('error', { message: 'Image processing failed: ' + error.message });
                }
              }

              async function init() {
                try {
                  await loadModel();
                  image.onload = async () => {
                    try {
                      const predictions = await faceMesh.estimateFaces({
                        input: tf.browser.fromPixels(canvas),
                      });
                      sendMessage('log', { message: 'Predictions: ' + JSON.stringify(predictions) });          
                    } catch (error) {
                      sendMessage('error', { message: 'Error processing image: ' + error.message });
                    }
                  };
                  image.src = 'data:image/jpeg;base64,${base64Image}';
                } catch (error) {
                  sendMessage('error', { message: 'Error initializing face mesh: ' + error.message });
                }
              }

              init();
            </script>
          </body>
          </html>
        `;
        setHtmlContent(htmlContent);
      } catch (error) {
        console.error('Error loading image:', error);
        setErrorMessage('加載圖像時發生錯誤,請稍後再試。');
      }
    };

    if (permissionsGranted) {
      loadFaceMesh();
    }
  }, [imagePath, permissionsGranted]);

  return (
    <View style={styles.container}>
      <Image source={{ uri: `file://${imagePath}` }} style={styles.image} />
      {htmlContent !== '' && permissionsGranted && (
        <WebView
          ref={webViewRef}
          originWhitelist={['*']}
          source={{ html: htmlContent }}
          onMessage={onMessage}
          style={{ width: 1, height: 1, opacity: 0 }}
          javaScriptEnabled={true}
          domStorageEnabled={true}
          androidLayerType="software"
          androidHardwareAccelerationDisabled={true}
          onLoadEnd={() => {
            console.log('WebView loaded');
            setIsWebViewLoaded(true);
          }}
          onError={(syntheticEvent) => {
            const { nativeEvent } = syntheticEvent;
            console.error('WebView error: ', nativeEvent);
            setErrorMessage(`WebView 錯誤: ${nativeEvent.description}`);
          }}
        />
      )}
      {distanceInCm !== null ? (
        <Text style={styles.text}>耳垂到下巴的平均垂直距離: {distanceInCm.toFixed(2)} 厘米</Text>
      ) : errorMessage ? (
        <Text style={styles.errorText}>{errorMessage}</Text>
      ) : (
        <Text style={styles.text}>
          {isWebViewLoaded ? '正在計算耳垂到下巴的平均垂直距離...' : '正在加載模型...'}
        </Text>
      )}
    </View>
  );
}

const styles = StyleSheet.create({
  container: {
    flex: 1,
    alignItems: 'center',
    justifyContent: 'center',
    backgroundColor: '#f0f0f0',
  },
  image: {
    width: 300,
    height: 300,
    resizeMode: 'contain',
    marginBottom: 20,
  },
  text: {
    marginTop: 20,
    fontSize: 18,
    textAlign: 'center',
    padding: 10,
  },
  errorText: {
    marginTop: 20,
    fontSize: 18,
    textAlign: 'center',
    padding: 10,
    color: 'red',
  },
});

Main.tsx:

import React from 'react';
import { NavigationContainer } from '@react-navigation/native';
import { createStackNavigator } from '@react-navigation/stack';
import App from './App';
import PhotoScreen from './PhotoScreen';
import { navigationRef } from './RootNavigation';

const Stack = createStackNavigator();

export default function Main() {
  return (
    <NavigationContainer ref={navigationRef}>
      <Stack.Navigator initialRouteName="Camera">
        <Stack.Screen name="Camera" component={App} />
        <Stack.Screen name="PhotoScreen" component={PhotoScreen} />
      </Stack.Navigator>
    </NavigationContainer>
  );
}

RootNavigation.js:

import * as React from 'react';

export const navigationRef = React.createRef();

export function navigate(name, params) {
  navigationRef.current?.navigate(name, params);
}

package.json:

{
  "name": "vision-camera-face-detector-example",
  "description": "Example app for vision-camera-face-detector",
  "version": "0.0.1",
  "private": true,
  "scripts": {
    "android": "react-native run-android",
    "ios": "react-native run-ios",
    "start": "react-native start",
    "pods": "pod-install --quiet"
  },
  "dependencies": {
    "@babel/plugin-transform-class-properties": "^7.24.7",
    "@babel/plugin-transform-private-methods": "^7.24.7",
    "@babel/plugin-transform-private-property-in-object": "^7.24.7",
    "@react-native-ml-kit/face-detection": "^1.3.2",
    "@react-navigation/core": "^6.4.17",
    "@react-navigation/elements": "^1.3.31",
    "@react-navigation/native": "^6.1.18",
    "@react-navigation/stack": "^6.4.1",
    "react": "18.1.0",
    "react-native": "0.70.6",
    "react-native-fs": "^2.20.0",
    "react-native-gesture-handler": "^2.18.1",
    "react-native-image-resizer": "^1.4.5",
    "react-native-reanimated": "2.10.0",
    "react-native-safe-area-context": "^4.10.8",
    "react-native-vision-camera": "^2.15.2",
    "react-native-webview": "^13.10.5"
  },
  "devDependencies": {
    "@babel/core": "^7.25.2",
    "@babel/runtime": "^7.15.3",
    "@types/react": "^18.0.26",
    "@types/react-native": "^0.70.8",
    "babel-plugin-module-resolver": "^4.1.0",
    "eslint": "^7.32.0",
    "metro-react-native-babel-preset": "^0.66.2",
    "react-test-renderer": "18.1.0",
    "typescript": "^5.5.4"
  }
}

I want to calculate the average of the vertical distance from the left earlobe to the chin and the vertical distance from the right earlobe to the chin through facial feature points, and display it on the phone screen together with the photos taken. Can anyone tell me what is wrong with the program?Thanks!

I want to calculate the average of the vertical distance from the left earlobe to the chin and the vertical distance from the right earlobe to the chin through facial feature points, and display it on the phone screen together with the photos taken. Can anyone tell me what is wrong with the program?Thanks!

New contributor

劉哲瑋 is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.

Trang chủ Giới thiệu Sinh nhật bé trai Sinh nhật bé gái Tổ chức sự kiện Biểu diễn giải trí Dịch vụ khác Trang trí tiệc cưới Tổ chức khai trương Tư vấn dịch vụ Thư viện ảnh Tin tức - sự kiện Liên hệ Chú hề sinh nhật Trang trí YEAR END PARTY công ty Trang trí tất niên cuối năm Trang trí tất niên xu hướng mới nhất Trang trí sinh nhật bé trai Hải Đăng Trang trí sinh nhật bé Khánh Vân Trang trí sinh nhật Bích Ngân Trang trí sinh nhật bé Thanh Trang Thuê ông già Noel phát quà Biểu diễn xiếc khỉ Xiếc quay đĩa Dịch vụ tổ chức sự kiện 5 sao Thông tin về chúng tôi Dịch vụ sinh nhật bé trai Dịch vụ sinh nhật bé gái Sự kiện trọn gói Các tiết mục giải trí Dịch vụ bổ trợ Tiệc cưới sang trọng Dịch vụ khai trương Tư vấn tổ chức sự kiện Hình ảnh sự kiện Cập nhật tin tức Liên hệ ngay Thuê chú hề chuyên nghiệp Tiệc tất niên cho công ty Trang trí tiệc cuối năm Tiệc tất niên độc đáo Sinh nhật bé Hải Đăng Sinh nhật đáng yêu bé Khánh Vân Sinh nhật sang trọng Bích Ngân Tiệc sinh nhật bé Thanh Trang Dịch vụ ông già Noel Xiếc thú vui nhộn Biểu diễn xiếc quay đĩa Dịch vụ tổ chức tiệc uy tín Khám phá dịch vụ của chúng tôi Tiệc sinh nhật cho bé trai Trang trí tiệc cho bé gái Gói sự kiện chuyên nghiệp Chương trình giải trí hấp dẫn Dịch vụ hỗ trợ sự kiện Trang trí tiệc cưới đẹp Khởi đầu thành công với khai trương Chuyên gia tư vấn sự kiện Xem ảnh các sự kiện đẹp Tin mới về sự kiện Kết nối với đội ngũ chuyên gia Chú hề vui nhộn cho tiệc sinh nhật Ý tưởng tiệc cuối năm Tất niên độc đáo Trang trí tiệc hiện đại Tổ chức sinh nhật cho Hải Đăng Sinh nhật độc quyền Khánh Vân Phong cách tiệc Bích Ngân Trang trí tiệc bé Thanh Trang Thuê dịch vụ ông già Noel chuyên nghiệp Xem xiếc khỉ đặc sắc Xiếc quay đĩa thú vị
Trang chủ Giới thiệu Sinh nhật bé trai Sinh nhật bé gái Tổ chức sự kiện Biểu diễn giải trí Dịch vụ khác Trang trí tiệc cưới Tổ chức khai trương Tư vấn dịch vụ Thư viện ảnh Tin tức - sự kiện Liên hệ Chú hề sinh nhật Trang trí YEAR END PARTY công ty Trang trí tất niên cuối năm Trang trí tất niên xu hướng mới nhất Trang trí sinh nhật bé trai Hải Đăng Trang trí sinh nhật bé Khánh Vân Trang trí sinh nhật Bích Ngân Trang trí sinh nhật bé Thanh Trang Thuê ông già Noel phát quà Biểu diễn xiếc khỉ Xiếc quay đĩa
Thiết kế website Thiết kế website Thiết kế website Cách kháng tài khoản quảng cáo Mua bán Fanpage Facebook Dịch vụ SEO Tổ chức sinh nhật