none
Comment enregistrer l'image captueré par un wabcam dans une base de donnée sqlserver 2005 RRS feed

  • Question

  • salut!

     j'ai une fenêtre WinForm dans la quelle il y a ces éléments:

    1/Un picturBox1(Qui capture l'image a partir d'un Webcam)

    2/ Un PicturBox2(qui afficher l'image capture lors de l'enregistrement )

    3/ boute1 (c'est le bouton qui lance le camera de capture dans le PictureBox1 pour détecter un reconnaitre le visage )

    4/ bouton2 (Pour ajouter l'image dans une base de donnée XML) le visage est ajouter dans un dossier ou il contient seulement le nom et le num de l'image est affiché sur un label, lorsque l'image est détecte,il affiche le nom de l'image et le numéro de l'image sur un label.

    je n'arrive pas  à enregistrer tous ces informations dans une base de donné sqlServer2005

    Mainant comment on peut enregistrée tous ces information dans une base de donnée sqlserver2005(IDimage,NomImage,Image)  et afficher les information de l'image détecté, le nom de l'image et le numéro de l'image et  l'image.

    Voici le code que je utilisé pour enregistrer dans une base de donnée sqlsrever2005 et detection de l'image a partir de la base de donnée sqlserver2005

    using System;
    using System.Collections.Generic;
    using System.Drawing;
    using System.Windows.Forms;
    using Emgu.CV;
    using Emgu.CV.Structure;
    using Emgu.CV.CvEnum;
    using System.IO;
    using System.Data.OleDb;
    using System.Data.SqlClient;
    using System.Diagnostics;
    using Microsoft.VisualBasic;
    using System.Data;

    namespace MultiFaceRec
    {
        public partial class FrmPrincipal : Form
        {
            OleDbConnection con = new OleDbConnection();
            OleDbCommand AddData = new OleDbCommand();
            DataSet DR = new DataSet();
            OleDbDataAdapter Jm;
            string Mphoto;
           int a, MaxRows;
            int NumImage;
            string Imagedetect = "";
            string ImageLire = "";
            OleDbCommand SelectData;
            Declararation of all variables, vectors and haarcascades
            Image<Bgr, Byte> currentFrame;
            Capture grabber;
            HaarCascade face;
            HaarCascade eye;
            MCvFont font = new MCvFont(FONT.CV_FONT_HERSHEY_TRIPLEX, 0.5d, 0.5d);
            Image<Gray, byte> result, TrainedFace = null;
            Image<Gray, byte> gray = null;
            List<Image<Gray, byte>> trainingImages = new List<Image<Gray, byte>>();
            List<string> labels= new List<string>();
            List<string> NamePersons = new List<string>();
            int ContTrain, NumLabels, t;
            string name, names = null;
            public FrmPrincipal()
            {
                InitializeComponent();
                //Load haarcascades for face detection
                face = new HaarCascade("haarcascade_frontalface_default.xml");
                //eye = new HaarCascade("haarcascade_eye.xml");
                try
                {
                    //con.ConnectionString = "Provider=SQLOLEDB; Data Source=.\\SQLEXPRESS;initial catalog=Projetmemoire;integrated Security = SSPI";
                    //con.Open();
                    //Mphoto = "Select* from Photo ";
                    Jm = new OleDbDataAdapter(Mphoto, con);
                    Jm.Fill(DR, "Projetmemoire");
                    Navigate();
                    MaxRows = DR.Tables[0].Rows.Count;
                    con.Close();
                    a = 0;
                    Load of previus trainned faces and labels for each image
                    string Labelsinfo = File.ReadAllText(Application.StartupPath + "/TrainedFaces/TrainedLabels.txt");
                    string[] Labels = Labelsinfo.Split('%');
                    NumLabels = Convert.ToInt16(Labels[0]);
                    ContTrain = NumLabels;
                    string LoadFaces;

                    for (int tf = 1; tf < NumLabels+1; tf++)
                    {
                        LoadFaces = "face" + tf + ".bmp";
                        trainingImages.Add(new Image<Gray, byte>(Application.StartupPath + "/TrainedFaces/"+ LoadFaces));
                        labels.Add(Labels[tf]);
                    }
               
                }
                catch(Exception e)
                {
                    //MessageBox.Show(e.ToString());
                    MessageBox.Show("Nothing in binary database, please add at least a face(Simply train the prototype with the Add Face Button).", "Triained faces load", MessageBoxButtons.OK, MessageBoxIcon.Exclamation);
                }

            }


            private void button1_Click(object sender, EventArgs e)
            {
                //Initialize the capture device
                grabber = new Capture();
                grabber.QueryFrame();
                //Initialize the FrameGraber event
                Application.Idle += new EventHandler(FrameGrabber);
                button1.Enabled = false;
            }


            private void button2_Click(object sender, System.EventArgs e)
            {
                try
                {

                    con.Open();
                      AddData.CommandText = "Inser into Photo(NumImage,NomImage,Image)Value(" + int.Parse(TxtNumImage.Text) + ",'" + TxtNomImage.Text + "','" + imageBox1 + "'  ) ";
                      AddData.CommandType = CommandType.Text;
              
                    AddData.CommandText = "insert into Photo (StartupPath,NumImage,NomImage,Imagedetec) values(@StartupPath, @NumImage,@NomImage,@Imagedetec)";
                   AddData.CommandType = CommandType.Text;
                   AddData.Parameters.Add(new SqlParameter("@StartupPath", (object)face));
                    AddData.Parameters.Add(new SqlParameter("@NumImage", (object)TxtNumImage.Text));
                    AddData.Parameters.Add(new SqlParameter("@NomImage", (object)TxtNomImage.Text));
                    AddData.Parameters.Add(new SqlParameter("@Imagedetec", (object)TrainedFace));
                    AddData.Connection = con;
                    con.Open();
                    AddData.ExecuteNonQuery();
                    con.Close();
                 
                    Trained face counter
                    ContTrain = ContTrain + 1;


                    //Get a gray frame from capture device
                    gray = grabber.QueryGrayFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

                    //Face Detector
                    MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
                    face,
                    1.2,
                    10,
                    Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                    new Size(30, 30));
                   
                    //Action for each element detected
                    foreach (MCvAvgComp f in facesDetected[0])
                    {
                        TrainedFace = currentFrame.Copy(f.rect).Convert<Gray, byte>();
                        break;
                    }

                    //resize face detected image for force to compare the same size with the
                    //test image with cubic interpolation type method
                    TrainedFace = result.Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                    trainingImages.Add(TrainedFace);
                    labels.Add(TxtNomImage.Text);

                    //Show face added in gray scale
                    imageBox1.Image = TrainedFace;
                    //Write the number of triained faces in a file text for further load
                    File.WriteAllText(Application.StartupPath + "/TrainedFac/TrainedLabels.txt", trainingImages.ToArray().Length.ToString() + "%");

                    //Write the labels of triained faces in a file text for further load
                    for (int i = 1; i < trainingImages.ToArray().Length + 1; i++)
                    {
                       

                    trainingImages.ToArray()[i - 1].Save(Application.StartupPath + "/TrainedFaces/face" + i + ".bmp");
                    File.AppendAllText(Application.StartupPath + "/TrainedFaces/TrainedLabels.txt", labels.ToArray()[i - 1] + "%");

                       // MessageBox.Show("Vous avez ajouté un enregistrement avec succès");
                      

                    }

                    MessageBox.Show(TxtNomImage.Text + "´s face detected and added :)", "Training OK", MessageBoxButtons.OK, MessageBoxIcon.Information);
                }
                catch
                {
                    MessageBox.Show("Enable the face detection first", "Training Fail", MessageBoxButtons.OK, MessageBoxIcon.Exclamation);
                    MessageBox.Show("Probleme d'ajout de l'enregistrement");
                }
            }


            void FrameGrabber(object sender, EventArgs e)
            {
                label3.Text = "0";
                //label4.Text = "";
                NamePersons.Add("");

                //Get the current frame form capture device
                currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

                        //Convert it to Grayscale
                        gray = currentFrame.Convert<Gray, Byte>();

                        //Face Detector
                        MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
                      face,
                      1.2,
                      10,
                      Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                      new Size(20, 20));

                        //Action for each element detected
                        foreach (MCvAvgComp f in facesDetected[0])
                        {
                            t = t + 1;
                            result = currentFrame.Copy(f.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                            //draw the face detected in the 0th (gray) channel with blue color
                            currentFrame.Draw(f.rect, new Bgr(Color.Red), 2);
                         

                            if (trainingImages.ToArray().Length != 0)
                            {
                                //TermCriteria for face recognition with numbers of trained images like maxIteration
                            MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);

                            //Eigen face recognizer
                            EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
                               trainingImages.ToArray(),
                               labels.ToArray(),
                               3000,
                               ref termCrit);

                            name = recognizer.Recognize(result);
                          
                                //Draw the label for each face detected and recognized
                            currentFrame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.LightGreen));

                            }

                                NamePersons[t-1] = name;
                                NamePersons.Add("");


                            //Set the number of faces detected on the scene
                            label3.Text = facesDetected[0].Length.ToString();

                        }
                            t = 0;

                            //Names concatenation of persons recognized
                        for (int nnn = 0; nnn < facesDetected[0].Length; nnn++)
                        {
                            names = names + NamePersons[nnn] + ", ";
                        }
                        //Show the faces procesed and recognized
                        imageBoxFrameGrabber.Image = currentFrame;
                        label4.Text = names;
                        names = "";
                        //Clear the list(vector) of names
                        NamePersons.Clear();

                    }
            private void FrmPrincipal_Load(object sender, EventArgs e)
            {

            }
          
        }
    }

    je trouve pas la solution Svp de l'aide!!!!!!!!

    Merci d'avance!
    vendredi 5 septembre 2014 10:13

Toutes les réponses

  • Pour l'enregistrement d'une image dans la bd vous devez faire ça:

    Image img = picturebox1.Image();
        byte[] arr;
     ImageConverter converter = new ImageConverter();
       arr=(byte[])converter.ConvertTo(img, typeof(byte[]));
    
    command.CommandText = "INSERT INTO ImagesTable (Image) VALUES('" + arr + "')";
      command.CommandType = CommandType.Text;
      command.ExecuteNonQuery();

    L’idée est créer un ByteArray que contient l'image.

    Pour le récupérer vous devez faire ça:

    qlDataAdapter dataAdapter = new SqlDataAdapter(new SqlCommand("SELECT Image FROM ImagesTable WHERE image_id = 1", yourConnectionReference));
    DataSet dataSet = new DataSet();
    dataAdapter.Fill(dataSet);
    
    if (dataSet.Tables[0].Rows.Count == 1)
    {
        Byte[] data = new Byte[0];
        data = (Byte[])(dataSet.Tables[0].Rows[0]["Image"]);
        MemoryStream mem = new MemoryStream(data);
        yourPictureBox.Image= Image.FromStream(mem);
    } 


    William John Adam Trindade
    Analyste-programmeur
    ----------------------------------------------------------

    jeudi 9 octobre 2014 13:45