{"id":1733,"date":"2021-06-02T13:54:14","date_gmt":"2021-06-02T13:54:14","guid":{"rendered":"https:\/\/blogzynergy.com\/bz\/?p=1733"},"modified":"2021-06-02T13:54:16","modified_gmt":"2021-06-02T13:54:16","slug":"the-good-the-bad-and-the-future-of-deepfakes","status":"publish","type":"post","link":"https:\/\/blogzynergy.com\/bz\/index.php\/2021\/06\/02\/the-good-the-bad-and-the-future-of-deepfakes\/","title":{"rendered":"The good, the bad and the future of deepfakes"},"content":{"rendered":"<a class=\"synved-social-button synved-social-button-share synved-social-size-32 synved-social-resolution-single synved-social-provider-facebook nolightbox\" data-provider=\"facebook\" target=\"_blank\" rel=\"nofollow\" title=\"Share on Facebook\" href=\"https:\/\/www.facebook.com\/sharer.php?u=https%3A%2F%2Fblogzynergy.com%2Fbz%2Findex.php%2Fwp-json%2Fwp%2Fv2%2Fposts%2F1733&#038;t=The%20good%2C%20the%20bad%20and%20the%20future%20of%20deepfakes&#038;s=100&#038;p&#091;url&#093;=https%3A%2F%2Fblogzynergy.com%2Fbz%2Findex.php%2Fwp-json%2Fwp%2Fv2%2Fposts%2F1733&#038;p&#091;images&#093;&#091;0&#093;=https%3A%2F%2Fblogzynergy.com%2Fbz%2Fwp-content%2Fuploads%2F2021%2F06%2Fimage.jpeg&#038;p&#091;title&#093;=The%20good%2C%20the%20bad%20and%20the%20future%20of%20deepfakes\" style=\"font-size: 0px;width:32px;height:32px;margin:0;margin-bottom:5px;margin-right:5px\"><img loading=\"lazy\" decoding=\"async\" alt=\"Facebook\" title=\"Share on Facebook\" class=\"synved-share-image synved-social-image synved-social-image-share\" width=\"32\" height=\"32\" style=\"display: inline;width:32px;height:32px;margin: 0;padding: 0;border: none;box-shadow: none\" src=\"https:\/\/blogzynergy.com\/bz\/wp-content\/plugins\/social-media-feather\/synved-social\/image\/social\/regular\/64x64\/facebook.png\" \/><\/a><a class=\"synved-social-button synved-social-button-share synved-social-size-32 synved-social-resolution-single synved-social-provider-twitter nolightbox\" data-provider=\"twitter\" target=\"_blank\" rel=\"nofollow\" title=\"Share on Twitter\" href=\"https:\/\/twitter.com\/intent\/tweet?url=https%3A%2F%2Fblogzynergy.com%2Fbz%2Findex.php%2Fwp-json%2Fwp%2Fv2%2Fposts%2F1733&#038;text=Hey%20check%20this%20out\" style=\"font-size: 0px;width:32px;height:32px;margin:0;margin-bottom:5px;margin-right:5px\"><img loading=\"lazy\" decoding=\"async\" alt=\"twitter\" title=\"Share on Twitter\" class=\"synved-share-image synved-social-image synved-social-image-share\" width=\"32\" height=\"32\" style=\"display: inline;width:32px;height:32px;margin: 0;padding: 0;border: none;box-shadow: none\" src=\"https:\/\/blogzynergy.com\/bz\/wp-content\/plugins\/social-media-feather\/synved-social\/image\/social\/regular\/64x64\/twitter.png\" \/><\/a><a class=\"synved-social-button synved-social-button-share synved-social-size-32 synved-social-resolution-single synved-social-provider-reddit nolightbox\" data-provider=\"reddit\" target=\"_blank\" rel=\"nofollow\" title=\"Share on Reddit\" href=\"https:\/\/www.reddit.com\/submit?url=https%3A%2F%2Fblogzynergy.com%2Fbz%2Findex.php%2Fwp-json%2Fwp%2Fv2%2Fposts%2F1733&#038;title=The%20good%2C%20the%20bad%20and%20the%20future%20of%20deepfakes\" style=\"font-size: 0px;width:32px;height:32px;margin:0;margin-bottom:5px;margin-right:5px\"><img loading=\"lazy\" decoding=\"async\" alt=\"reddit\" title=\"Share on Reddit\" class=\"synved-share-image synved-social-image synved-social-image-share\" width=\"32\" height=\"32\" style=\"display: inline;width:32px;height:32px;margin: 0;padding: 0;border: none;box-shadow: none\" src=\"https:\/\/blogzynergy.com\/bz\/wp-content\/plugins\/social-media-feather\/synved-social\/image\/social\/regular\/64x64\/reddit.png\" \/><\/a><a class=\"synved-social-button synved-social-button-share synved-social-size-32 synved-social-resolution-single synved-social-provider-pinterest nolightbox\" data-provider=\"pinterest\" target=\"_blank\" rel=\"nofollow\" title=\"Pin it with Pinterest\" href=\"https:\/\/pinterest.com\/pin\/create\/button\/?url=https%3A%2F%2Fblogzynergy.com%2Fbz%2Findex.php%2Fwp-json%2Fwp%2Fv2%2Fposts%2F1733&#038;media=https%3A%2F%2Fblogzynergy.com%2Fbz%2Fwp-content%2Fuploads%2F2021%2F06%2Fimage.jpeg&#038;description=The%20good%2C%20the%20bad%20and%20the%20future%20of%20deepfakes\" style=\"font-size: 0px;width:32px;height:32px;margin:0;margin-bottom:5px;margin-right:5px\"><img loading=\"lazy\" decoding=\"async\" alt=\"pinterest\" title=\"Pin it with Pinterest\" class=\"synved-share-image synved-social-image synved-social-image-share\" width=\"32\" height=\"32\" style=\"display: inline;width:32px;height:32px;margin: 0;padding: 0;border: none;box-shadow: none\" src=\"https:\/\/blogzynergy.com\/bz\/wp-content\/plugins\/social-media-feather\/synved-social\/image\/social\/regular\/64x64\/pinterest.png\" \/><\/a><a class=\"synved-social-button synved-social-button-share synved-social-size-32 synved-social-resolution-single synved-social-provider-linkedin nolightbox\" data-provider=\"linkedin\" target=\"_blank\" rel=\"nofollow\" title=\"Share on Linkedin\" href=\"https:\/\/www.linkedin.com\/shareArticle?mini=true&#038;url=https%3A%2F%2Fblogzynergy.com%2Fbz%2Findex.php%2Fwp-json%2Fwp%2Fv2%2Fposts%2F1733&#038;title=The%20good%2C%20the%20bad%20and%20the%20future%20of%20deepfakes\" style=\"font-size: 0px;width:32px;height:32px;margin:0;margin-bottom:5px;margin-right:5px\"><img loading=\"lazy\" decoding=\"async\" alt=\"linkedin\" title=\"Share on Linkedin\" class=\"synved-share-image synved-social-image synved-social-image-share\" width=\"32\" height=\"32\" style=\"display: inline;width:32px;height:32px;margin: 0;padding: 0;border: none;box-shadow: none\" src=\"https:\/\/blogzynergy.com\/bz\/wp-content\/plugins\/social-media-feather\/synved-social\/image\/social\/regular\/64x64\/linkedin.png\" \/><\/a><a class=\"synved-social-button synved-social-button-share synved-social-size-32 synved-social-resolution-single synved-social-provider-mail nolightbox\" data-provider=\"mail\" rel=\"nofollow\" title=\"Share by email\" href=\"mailto:?subject=The%20good%2C%20the%20bad%20and%20the%20future%20of%20deepfakes&#038;body=Hey%20check%20this%20out:%20https%3A%2F%2Fblogzynergy.com%2Fbz%2Findex.php%2Fwp-json%2Fwp%2Fv2%2Fposts%2F1733\" style=\"font-size: 0px;width:32px;height:32px;margin:0;margin-bottom:5px\"><img loading=\"lazy\" decoding=\"async\" alt=\"mail\" title=\"Share by email\" class=\"synved-share-image synved-social-image synved-social-image-share\" width=\"32\" height=\"32\" style=\"display: inline;width:32px;height:32px;margin: 0;padding: 0;border: none;box-shadow: none\" src=\"https:\/\/blogzynergy.com\/bz\/wp-content\/plugins\/social-media-feather\/synved-social\/image\/social\/regular\/64x64\/mail.png\" \/><\/a>\n<p>The race between creating and eliminating deepfakes is in full swing.\u00a0Technology is becoming more and more accessible and deepfakes are becoming increasingly difficult to distinguish from real ones.<\/p>\n\n\n\n<ul class=\"wp-block-list\"><li>What exactly are deepfakes?\u00a0A refresher<\/li><li>How are deepfakes made?<\/li><li>The Good \u2013 An Optimistic View<\/li><li>The dangers of deepfakes<\/li><li>What can we do to distinguish fake from real?<\/li><li>The future of fake \u2013 and other considerations<\/li><\/ul>\n\n\n\n<figure class=\"wp-block-image size-large\"><img loading=\"lazy\" decoding=\"async\" width=\"302\" height=\"167\" src=\"https:\/\/blogzynergy.com\/bz\/wp-content\/uploads\/2021\/06\/image.jpeg\" alt=\"Artificial Intelligence- Why it is Essential for Survival? - Next Tech  Magazine\" class=\"wp-image-1734\" srcset=\"https:\/\/blogzynergy.com\/bz\/wp-content\/uploads\/2021\/06\/image.jpeg 302w, https:\/\/blogzynergy.com\/bz\/wp-content\/uploads\/2021\/06\/image-300x166.jpeg 300w\" sizes=\"auto, (max-width: 302px) 85vw, 302px\" \/><\/figure>\n\n\n\n<p>Despite a huge increase in positive applications, the dangers of deepfakes continue to raise widespread concern as they become more widely known and better understood.&nbsp;We are inundated with content describing how rapidly this deep learning technology is being developed, that deepfake tech is becoming more sophisticated and easier to access, and what the risks are when this technology falls into the wrong hands.&nbsp;Like it or not, and as disturbing as the negative consequences of using deepfakes may be, they are and will remain a part of our lives.&nbsp;And even though deepfakes receive mostly negative publicity, there are also many reasons to be excited about this technology and its many positive applications.&nbsp;Deepfake technology, for example, makes it possible to create completely new types of content and democratize access to creation tools \u2013 which until recently were either too expensive or too complicated for the average person.&nbsp;The use of artificial intelligence to create realistic simulations could actually be a positive development for humanity.<\/p>\n\n\n\n<h2 class=\"wp-block-heading\">What exactly are deepfakes?&nbsp;A refresher<\/h2>\n\n\n\n<p>Giving a comprehensive definition of deepfakes is not easy.\u00a0The term deepfake combines the words deep (from deep learning) and fake (fake).\u00a0We know that deepfakes are made possible by deep learning technology, a machine learning technique that allows computers to learn by following examples.\u00a0Deepfake technology uses a person&#8217;s behavior \u2013 such as the voice, image and typical facial expressions or body movements \u2013 to create completely new content that is virtually indistinguishable from authentic content.\u00a0This technology can also be used to make people say or do things in videos that they never said or did, or to replace someone in a video with another person, or to create video content featuring important political figures or celebrities or even with people who don&#8217;t exist at all.\u00a0The manipulation of existing \u2013 or the creation of new \u2013 digital images is not new.\u00a0In fact, AI-generated pornographic content first surfaced in late 2017.\u00a0The creation of this type of video material initially took at least a year and was done by experts in high-tech studios.\u00a0But thanks to the rapid development of deepfake technology in recent years, this can now be done a lot faster and easier and the results are much more convincing.\u00a0The term deepfakes was originally used for specific pornographic content, but now it is applied much more broadly to describe many different types of AI-generated or synthetic video content.\u00a0AI-generated pornographic content first surfaced in late 2017.\u00a0The creation of this type of video material initially took at least a year and was done by experts in high-tech studios.\u00a0But thanks to the rapid development of deepfake technology in recent years, this can now be done a lot faster and easier and the results are much more convincing.\u00a0The term deepfakes was originally used for specific pornographic content, but now it is applied much more broadly to describe many different types of AI-generated or synthetic video content.\u00a0AI-generated pornographic content first surfaced in late 2017.\u00a0The creation of this type of video material initially took at least a year and was done by experts in high-tech studios.\u00a0But thanks to the rapid development of deepfake technology in recent years, this can now be done a lot faster and easier and the results are much more convincing.\u00a0The term deepfakes was originally used for specific pornographic content, but now it is applied much more broadly to describe many different types of AI-generated or synthetic video content.\u00a0But thanks to the rapid development of deepfake technology in recent years, this can now be done a lot faster and easier and the results are much more convincing.\u00a0The term deepfakes was originally used for specific pornographic content, but now it is applied much more broadly to describe many different types of AI-generated or synthetic video content.\u00a0But thanks to the rapid development of deepfake technology in recent years, this can now be done a lot faster and easier and the results are much more convincing.\u00a0The term deepfakes was originally used for specific pornographic content, but now it is applied much more broadly to describe many different types of AI-generated or synthetic video content.<\/p>\n\n\n\n<figure class=\"wp-block-image size-large\"><img loading=\"lazy\" decoding=\"async\" width=\"300\" height=\"168\" src=\"https:\/\/blogzynergy.com\/bz\/wp-content\/uploads\/2021\/06\/image-1.jpeg\" alt=\"Artificial intelligence brings new vision to healthcare\" class=\"wp-image-1735\" \/><\/figure>\n\n\n\n<h2 class=\"wp-block-heading\">How are deepfakes made?<\/h2>\n\n\n\n<p>To create a realistic deepfake video of an existing person, a neural network must be\u00a0<a href=\"https:\/\/www.scientificamerican.com\/article\/detecting-deepfakes1\/\">trained<\/a>\u00a0using video images of this person<a href=\"https:\/\/www.scientificamerican.com\/article\/detecting-deepfakes1\/\"><\/a>, including an extensive range of facial expressions, in all kinds of different light and from every angle imaginable, so that the artificial intelligence gains a deep &#8216;understanding&#8217; of not only the appearance but also the &#8216;essence&#8217; of the person in question.\u00a0The trained network is then combined with techniques such as advanced computer graphics to place a made-up version of this person on top of the person in the original video, as it were.\u00a0While this process is much faster than it was a few years ago, truly credible results are still quite time consuming and complicated.\u00a0However, cutting-edge technology, such as Samsung AI technology developed in a Russian AI lab, makes it possible to create a deepfake video with just a handful of images \u2013 or even just one.<\/p>\n\n\n\n<h2 class=\"wp-block-heading\">The Good \u2013 An Optimistic View<\/h2>\n\n\n\n<p>While the not-so-kosher uses of deepfakes are quite frightening, this technology also offers many benefits and we regularly find new, positive uses for deepfaketech.&nbsp;Think, for example, of editing video images without having to do reshoots, or&nbsp;<a href=\"https:\/\/www.forbes.com\/sites\/simonchandler\/2020\/03\/09\/why-deepfakes-are-a-net-positive-for-humanity\/?sh=746b31fb2f84\">&#8216;<\/a>&nbsp;bringing it back&nbsp;<a href=\"https:\/\/www.forbes.com\/sites\/simonchandler\/2020\/03\/09\/why-deepfakes-are-a-net-positive-for-humanity\/?sh=746b31fb2f84\">to life&#8217;<\/a>from artists who are no longer with us.&nbsp;For example, researchers at the Samsung AI lab in Moscow recently succeeded in converting Da Vinci&#8217;s Mona Lisa into video.&nbsp;Through deep learning technology, they managed to make this famous lady move her head, mouth and eyes.&nbsp;Deepfake technology was also used at the Dal\u00ed Museum in Florida to display a life-size deepfake of surrealist artist Salvador Dal\u00ed that features several quotes he has written or spoken during his art career.&nbsp;With deepfake technology, we can experience things that never existed, or see all kinds of future possibilities before us.&nbsp;In addition to the many different possible applications in art and entertainment, this technology can also do all kinds of impressive things in education and healthcare.&nbsp;Below are a few more interesting examples of this groundbreaking technology.<\/p>\n\n\n\n<p><strong>Speech manipulator converts text to speech<\/strong><\/p>\n\n\n\n<p>Adobe&#8217;s VoCo software \u2013 still in the research and prototype phase \u2013 lets you convert text into speech and edit it, just as you would images in Photoshop.\u00a0Suppose you want to comment on a film clip by, for example, David Attenborough or Morgan Freeman.\u00a0With VoCo, this is now possible without having to spend a fortune hiring the real voice actors.\u00a0The software allows you to modify an existing audio recording of a person by adding words and phrases, without the original narrator ever saying them.\u00a0During a live demo in San Diego, an Adobe employee transformed a digitized recording of a man who had originally said &#8220;I kissed my dogs and my wife&#8221; to &#8220;I kissed Jordan three times.&#8221;\u00a0A 20 minute speech recording was used to arrive at this result.\u00a0The transcribed version of this recording was then modified and converted into the new voice clip at the touch of a button.\u00a0As impressive as this technology may be, these kinds of developments could further exacerbate the already problematic situation of fake news and further undermine public trust in journalism.\u00a0However, Adobe has announced that it is taking action to address these potential challenges.\u00a0Such developments could further exacerbate the already problematic situation of fake news and further undermine public trust in journalism.\u00a0However, Adobe has announced that it is taking action to address these potential challenges.\u00a0Such developments could further exacerbate the already problematic situation of fake news and further undermine public trust in journalism.\u00a0However, Adobe has announced that it is taking action to address these potential challenges.<\/p>\n\n\n\n<figure class=\"wp-block-image size-large\"><img loading=\"lazy\" decoding=\"async\" width=\"288\" height=\"175\" src=\"https:\/\/blogzynergy.com\/bz\/wp-content\/uploads\/2021\/06\/image-2.jpeg\" alt=\"How Businesses Can Benefit From Artificial Intelligence in 2021?\" class=\"wp-image-1736\" \/><\/figure>\n\n\n\n<p><strong>Convincing dubbing through automated facial resuscitation<\/strong><\/p>\n\n\n\n<p>Synthesia, an AI software company founded by a team of researchers and entrepreneurs from Stanford, Cambridge, University College London and the Technical University of Munich,\u00a0<a href=\"https:\/\/www.synthesia.io\/features\">introduces<\/a> a new kind of media \u2013 facial resuscitation software \u2013 that enables automated and highly persuasive dubbing.\u00a0The AI \u200b\u200bstartup was put on the map with the release of a synthetic video in which David Beckham talks about the deadly disease Malaria in nine different languages.\u00a0This technology can be used in a variety of ways and offers creators around the world an enormous amount of additional possibilities.\u00a0Synthesia and the international news service Reuters recently teamed up to create the world&#8217;s first synthesized, newsreader-spoken newscasts.\u00a0For this they used basic deepfake technology, with which they made new newscasts based on pre-recorded clips from a newsreader.\u00a0By far the most remarkable thing is that this technology makes it possible to automatically generate news items that can also be personalized for individual viewers.\u00a0Synthesia&#8217;s technology can be used for training purposes to develop video modules in more than 40 languages \u200b\u200band create or modify content easily and quickly.\u00a0With this technology, you can also turn text and slides into video presentations in minutes, without the need for video editing skills.\u00a0This is useful for purposes such as business communication, among other things.\u00a0Synthesia&#8217;s technology can be used for training purposes to develop video modules in more than 40 languages \u200b\u200band create or modify content easily and quickly.\u00a0With this technology, you can also turn text and slides into video presentations in minutes, without the need for video editing skills.\u00a0This is useful for purposes such as business communication, among other things.\u00a0Synthesia&#8217;s technology can be used for training purposes to develop video modules in more than 40 languages \u200b\u200band create or modify content easily and quickly.\u00a0With this technology, you can also turn text and slides into video presentations in minutes, without the need for video editing skills.\u00a0This is useful for purposes such as business communication, among other things.<\/p>\n\n\n\n<p><strong>With deepfakes anyone can dance like a pro<\/strong><\/p>\n\n\n\n<p>Tinghui Zhou, CEO and co-founder of Humen AI, a dance deepfakes startup, has teamed up with his research colleagues at UC Berkeley to\u00a0<a href=\"https:\/\/www.bloomberg.com\/news\/articles\/2019-08-19\/deepfakes-can-help-you-dance\">develop<\/a>\u00a0technology\u00a0<a href=\"https:\/\/www.bloomberg.com\/news\/articles\/2019-08-19\/deepfakes-can-help-you-dance\">that<\/a>\u00a0lets anyone dance like a pro.\u00a0Think, for example, of the impressive dance moves of Bruno Mars.\u00a0For this, the gentlemen used a type of artificial intelligence called GANs (generative adversarial networks), with which you can &#8216;read&#8217; someone&#8217;s dance steps, copy them and\u00a0&#8216;paste&#8217; them\u00a0on a\u00a0<em>target<\/em>\u00a0body.\u00a0The system can be used for all kinds of dance styles \u2013 such as ballet, jazz, modern or hip-hop.\u00a0First, videos of the\u00a0<em>source<\/em>\u00a0dancer and the\u00a0<em>target<\/em>\u00a0dancer are recorded.\u00a0Then the images of the dancers <a href=\"https:\/\/www.rarenorm.com\/tech\/deepfakes-for-dancing-you-can-now-use-ai-to-fake-those-dance-moves-you-always-wanted\/\">turned<\/a>\u00a0into stick figures.\u00a0After that, the swap takes place through a neural network\u00a0synthesis video\u00a0of the\u00a0<em>target<\/em>\u00a0dancer based on the stick figure movements of the\u00a0<em>source<\/em>dancer \u2013 and voila!\u00a0All you need for this are some video images and the right AI software.\u00a0It&#8217;s impressive work and traditionally this kind of video manipulation with a whole team would take you several days.\u00a0Humen AI aims to turn the dance video gimmick into an app and eventually develop a paid service for advertising agencies, video game developers, and even Hollywood studios.\u00a0Ricky Wong, co-founder of Humen AI, says: \u201cWith three minutes of motion images and material from professionals, you can make anyone dance.\u00a0We try to bring joy and fun to people&#8217;s lives.\u201d\u00a0Zhou adds, \u201cThe future we envision is one where anyone can create Hollywood-level content.\u201d<\/p>\n\n\n\n<p><strong>Smart assistants and virtual people<\/strong><\/p>\n\n\n\n<p>Smart assistants like Siri, Alexa, and Cortana have been around for a while and have improved a lot in recent years.&nbsp;However, they still feel somewhat like a new user interface that should give you exact instructions, rather than a virtual creature that you can interact with naturally.&nbsp;And one of the most important steps in creating credible virtual &#8220;human&#8221; assistants that we can interact with is the ability to mimic facial expressions, body posture, gestures and voices.&nbsp;These so-called virtual persons are slowly but surely becoming mainstream \u2013 think of digital influencers for example \u2013 and with them we communicate just like we do with real people.&nbsp;And while digital influencers don&#8217;t really respond to you in their own words,&nbsp;because their content is created by storytellers, they herald a future of &#8220;natural&#8221; interaction with real virtual creatures.&nbsp;With deepfake technology trained with countless examples of human behavior, we could give smart assistants the ability to make and understand high-quality conversations.&nbsp;And thanks to the same technology, even digital influencers can develop the ability to respond visually \u2013 in real time \u2013 in credible ways.&nbsp;Welcome to the future of virtual people.&nbsp;And thanks to the same technology, even digital influencers can develop the ability to respond visually \u2013 in real time \u2013 in credible ways.&nbsp;Welcome to the future of virtual people.&nbsp;And thanks to the same technology, even digital influencers can develop the ability to respond visually \u2013 in real time \u2013 in credible ways.&nbsp;Welcome to the future of virtual people.<\/p>\n\n\n\n<p><strong>Deep generative models offer new possibilities in healthcare<\/strong><\/p>\n\n\n\n<p>Deepfake technology can also offer many benefits in other sectors, such as healthcare.\u00a0The tech can be used to synthesize realistic data to help researchers develop new treatment methods for diseases so that they are no longer dependent on patient data.\u00a0Work in this area has already been conducted by a team of researchers from the Mayo Clinic, the MGH &amp; BWH Center for Clinical Data Science, and NVIDIA, who have collaborated on using GANs (generative adversarial networks) to create synthetic brain MRI scans. develop.\u00a0The team trained its GAN with data from two brain MRI datasets: one contained about two hundred MRIs showing tumors and the other thousands of MRIs showing signs of Alzheimer&#8217;s.\u00a0According to the researchers, algorithms trained with a combination of &#8220;fake&#8221; medical images and 10 percent real images became just as adept at detecting tumors as algorithms trained only with real images.\u00a0In their <a href=\"https:\/\/arxiv.org\/pdf\/1807.10225.pdf\">paper<\/a> the researchers say: \u201cData diversity is critical to success in training deep learning models.\u00a0Medical imaging data sets are often unbalanced because pathological findings are generally rare, which poses quite a few challenges when training deep learning models.\u00a0We propose a method to generate synthetic MRI images of brain tumors by training a GAN.\u00a0This provides an automatable, low-cost source of diverse data that can be used to complement the training set.\u201d\u00a0Because the images are generated synthetically, you no longer have to deal with privacy or patient data challenges.\u00a0The generated data can be easily shared with different medical institutions,\u00a0creating an endless variety of combinations that can be used to improve and speed up the work.\u00a0The team hopes the model will help scientists generate new data that can be used to detect anomalies more quickly and accurately.<\/p>\n\n\n\n<h2 class=\"wp-block-heading\">The dangers of deepfakes<\/h2>\n\n\n\n<p>As exciting and promising as deepfake technology may be, these developments also pose several serious challenges.\u00a0The most important of these is the distribution of pornographic material featuring persons who have not given their consent.\u00a0And\u00a0<a href=\"https:\/\/www.forbes.com\/sites\/robtoews\/2020\/05\/25\/deepfakes-are-going-to-wreak-havoc-on-society-we-are-not-prepared\/?sh=3f9903d57494\">according to<\/a> a DeepTrace report, a whopping 96 percent of the deepfakes currently found online are made up of this type of material.\u00a0There have also been several reports of deepfake audio being used for identity theft and extortion.\u00a0The use of deepfakes potentially poses a huge security and political destabilization risk, as the technology can be used to spread fake news and lead to an increase in cybercrime, revenge porn, harassment, abuse and (fake) scandals.\u00a0There is also a good chance that video images and audio files will soon no longer be allowed to be used as evidence in court, as they will become almost indistinguishable from the real thing.\u00a0<a href=\"https:\/\/www.brookings.edu\/research\/is-seeing-still-believing-the-deepfake-challenge-to-truth-in-politics\/#cancel\">according to<\/a> Brookings Institution, the social and political dangers of deepfakes include \u201cdisrupting democratic discourse;\u00a0rigging elections;\u00a0decreased trust in institutions;\u00a0declining journalistic quality;\u00a0exacerbation of social divisions;\u00a0undermining public security;\u00a0and inflicting hard-to-repair damage to the reputation of prominent individuals.\u201d\u00a0Deepfakes can also cause serious financial problems.\u00a0Some examples include a British energy company that was tricked into making a $243 million fraudulent wire transfer and an audio deepfake used to defraud a US CEO out of $10 million.\u00a0And here are some more important examples of the dangers of deepfakes.<\/p>\n\n\n\n<p><strong>New Year&#8217;s video speech leads to attempted military coup poging<\/strong><\/p>\n\n\n\n<p>The fact that more and more \u2013 and increasingly sophisticated \u2013 deepfakes are circulating on the internet can mean that any video that seems slightly outlandish can cause chaos.\u00a0An example is the New Year video speech by Gabon&#8217;s President Ali Bongo in 2019. The president had not been seen in public for several months and the lack of answers from the government led to more and more speculation and doubt.\u00a0The video subsequently caused growing suspicion among people in Gabon and international observers about the president&#8217;s well-being.\u00a0Although the purpose of the video was to speculation about the poor health of the president of the world to\u00a0<a href=\"https:\/\/www.motherjones.com\/politics\/2019\/03\/deepfake-gabon-ali-bongo\/\">help<\/a> this plan failed because Bongo&#8217;s opponents were not convinced of the video&#8217;s authenticity.\u00a0The opposition believed there was something odd about the president&#8217;s locomotion in the video footage.\u00a0A week after the video&#8217;s release, Gabon&#8217;s military called for a coup d&#8217;\u00e9tat, which ultimately failed.\u00a0Hany Farid, a computer science professor who specializes in digital forensics, said: &#8220;I just watched several other videos of President Bongo and they don&#8217;t resemble the speech patterns in this video, and even his appearance doesn&#8217;t look the same&#8221; .\u00a0Farid added that he could not give a definitive assessment but that he felt &#8220;something was not right&#8221;.<\/p>\n\n\n\n<p><strong>Deepfakes as blackmail material for cheerleaders<\/strong><\/p>\n\n\n\n<p>A Pennsylvania woman was recently arrested for creating deepfakes of underage cheerleaders.\u00a0The victims were her daughter&#8217;s rivals for the local cheerleading squad.\u00a0With the fake images, the 50-year-old mother tried to put the girls in a bad light.\u00a0Using photos and videos that the teens had shared on social media, the woman created fake photos and videos, in which it appeared that the girls were drinking alcohol and taking drugs naked.\u00a0The woman then sent these deepfakes to the coaches to get the teens disqualified.\u00a0The fake material was also sent to the girls themselves with a message urging them to commit suicide.\u00a0According to the American media, the daughter herself would not have known about her mother&#8217;s actions.\u00a0The mother is being charged with cyber abuse and related crimes.\u00a0With regard to the first victim <a href=\"https:\/\/www.iflscience.com\/technology\/mom-accused-of-creating-deepfakes-to-harass-cheerleader-daughters-rivals\/\">says<\/a>\u00a0Matt Weintraub, DA of Bucks County: \u201cThe suspect edited a real photo with some photoshop app to make it look like this teenage girl had no clothes on.\u00a0But it was a social media screenshot showing the teen wearing swimsuits.\u201d<\/p>\n\n\n\n<p><strong>Deepfake bots on Telegram create nude photos of women and children<\/strong><\/p>\n\n\n\n<p>Last year, more than 100,000 fake nude photos were\u00a0<a href=\"https:\/\/www.moneycontrol.com\/news\/technology\/deepfake-bots-create-fake-nudes-of-women-aid-public-shaming-and-extortion-6081541.html\">generated<\/a>\u00a0by an ecosystem of bots\u00a0at the request of Telegram users\u00a0.\u00a0The foundation of this ecosystem is an AI-powered bot that allows users to &#8220;strip&#8221; the clothing of images of women so that they appear naked.\u00a0<a href=\"https:\/\/sensity.ai\/reports\/\">according to<\/a> a report from the visual threat intelligence firm Sensity, \u201cmost of the original images appeared to have come from social media pages or directly from private communications, which the individuals in question probably didn&#8217;t know were being targeted.\u00a0While this case mostly involved individuals, we also identified a significant number of social media influencers, game streamers and celebrities in the entertainment industry.\u00a0In addition, a limited number of images appeared to be underage, suggesting that some were primarily using the bot to generate and distribute pedophile content.\u201d\u00a0The deepfakes have been shared on various social media platforms with the aim of public shaming, revenge or extortion.\u00a0Most deepfake bots use DeepNude technology,\u00a0but we see more and more similar apps popping up on the internet.\u00a0All you have to do is upload a photo and then you&#8217;ll get a manipulated image back in minutes.\u00a0Unfortunately, since Telegram uses encrypted messages, users can easily create anonymous accounts that are virtually impossible to trace.\u00a0And while encryption technology is meant to protect users&#8217; privacy and evade surveillance, it&#8217;s not hard to see how you can use these features for shady ends as well.\u00a0Unfortunately, users can easily create anonymous accounts that are virtually impossible to trace.\u00a0And while encryption technology is meant to protect users&#8217; privacy and evade surveillance, it&#8217;s not hard to see how you can use these features for shady ends as well.\u00a0Unfortunately, users can easily create anonymous accounts that are virtually impossible to trace.\u00a0And while encryption technology is meant to protect users&#8217; privacy and evade surveillance, it&#8217;s not hard to see how you can use these features for shady ends as well.<\/p>\n\n\n\n<h2 class=\"wp-block-heading\">What can we do to distinguish fake from real?<\/h2>\n\n\n\n<p>As it stands, the number of deepfake videos circulating online has increased at an astonishingly estimated 900 percent annual rate.&nbsp;As technological advances have made it increasingly easy to produce deepfake content, more and more experts are wondering how we can curb the malicious use of this technology.&nbsp;One of the ways to do this \u2013 as in the case of cybercrime and phishing \u2013 is to raise public awareness and educate people about the dangers of deepfakes.&nbsp;Many companies have now launched technologies to recognize fake content, prevent its distribution or verify authentic content through blockchain or watermarks.&nbsp;However, the downside is that these detection and authentication methods can also be used by those same malicious actors to create even more convincing deepfakes.&nbsp;Here are some examples of technologies that have been developed to combat the misuse of deepfakes.<\/p>\n\n\n\n<p><strong>Deepfake policies of social media platforms<\/strong><\/p>\n\n\n\n<p>Social networks play the most important role in preventing deepfakes from being used for malicious purposes.&nbsp;Deepfakes are currently seen by social media platforms as any other content that is misleading or could lead to people being duped or otherwise disadvantaged.&nbsp;The policy of Instagram and Facebook for example to &#8216;manipulated media&nbsp;<a href=\"https:\/\/about.fb.com\/news\/2020\/01\/enforcing-against-manipulated-media\/\">removal<\/a>, excluding parodies.&nbsp;YouTube bans manipulated content that is misleading or poses serious risks, and TikTok removes &#8220;digital counterfeits&#8221; \u2014 including false health information \u2014 that are misleading and can cause harm.&nbsp;Reddit removes content that deceptively or deceptively impersonates people or entities, but makes an exception for satire and parody.&nbsp;However, as the number and quality of deepfakes continue to increase, it is unclear how social networks will be able to maintain these policies in the future.&nbsp;One thing they could do is automatically label deepfakes, whether or not they are harmful, so that at least more awareness is created.<\/p>\n\n\n\n<p><strong>Spotting super realistic deepfake images<\/strong><\/p>\n\n\n\n<p>Researchers at the University of Buffalo have developed an ingenious new tool that allows them to spot super-realistic deepfakes.\u00a0In their\u00a0<a href=\"https:\/\/arxiv.org\/pdf\/2009.11924.pdf\">paper<\/a> tell the researchers about the method they have developed to distinguish authentic images from images generated by deepfake technology.\u00a0They do this by carefully studying the eyes of the person in the picture.\u00a0What the researchers found is that the reflections in both eyes of the person in an authentic photo due to the same lighting conditions are usually identical.\u00a0With manipulated images, however, this is usually not the case.\u00a0The tool has so far succeeded in recognizing images generated by deepfake technology in 94 percent of the cases.\u00a0Incidentally, the tool is most accurate with photos taken with the portrait setting, which is often the case with close-up portrait photos.<\/p>\n\n\n\n<p><strong>Real attendance guarantee<\/strong><\/p>\n\n\n\n<p>In the fight against the abuse of deepfakes, it is critical that you can verify that the person you think you are dealing with online is actually real \u2013 and this can be done with iProov Genuine Presence Assurance.&nbsp;The iProov system&nbsp;<a href=\"https:\/\/www.iproov.com\/iproov-system\/services\/isoc\">uses<\/a>&nbsp;biometric scans that can identify whether the person in question is indeed a living person and not a photo, video, mask, deepfake or other method to circumvent a (biometric) security system.&nbsp;The system works on mobile devices, computers or in unattended kiosks and is used by organizations around the world, such as the National Health Service (NHS) in the UK.&nbsp;The NHS has&nbsp;<a href=\"https:\/\/www.biometricupdate.com\/202005\/iproov-biometric-facial-authentication-chosen-by-nhs-to-secure-remote-onboarding\">opted<\/a>&nbsp;for iProov biometric facial authentication<a href=\"https:\/\/www.biometricupdate.com\/202005\/iproov-biometric-facial-authentication-chosen-by-nhs-to-secure-remote-onboarding\"><\/a>to improve users&#8217; onboarding experience.&nbsp;Thanks to iProov&#8217;s Flashmark facial authentication technology, remote users can securely log into the NHS app to make appointments, access medical records and request repeat prescriptions.&nbsp;The process consists of submitting an ID photo and positioning the face on the screen.&nbsp;After a short series of flashes, the user&#8217;s identity is verified and he or she can use the NHS app.<\/p>\n\n\n\n<p><strong>Deepfake Antivirus<\/strong><\/p>\n\n\n\n<p><a href=\"https:\/\/sensity.ai\/\">Sensity<\/a>, an Amsterdam-based company developing deep learning technologies for monitoring and detecting deepfakes, has developed a visual threat intelligence platform that applies the same deep learning processes used in creating deepfakes.&nbsp;The system combines deepfake detection with advanced video forensic analysis and monitoring capabilities.&nbsp;The platform is a kind of antivirus for deepfakes and monitors more than 500 sources on the open and dark web where the chance of finding malicious deepfakes is high.&nbsp;It warns users when they view anything that may be AI-generated synthetic content and provides detailed ratings and threat analysis.&nbsp;When you upload URLs or your own photo and video files, Sensity analyzes them to get the latest,&nbsp;detect AI-based media manipulation and synthesis techniques, including fake human faces in social media profiles, dating apps or online financial services accounts.&nbsp;Sensity also provides access to the world&#8217;s most comprehensive deepfake database and other visual media targeting public figures, including insights into the sectors and countries most affected by this technology.<\/p>\n\n\n\n<h2 class=\"wp-block-heading\">The future of fake \u2013 and other considerations<\/h2>\n\n\n\n<p>Pandora&#8217;s box has been\u00a0<a href=\"https:\/\/www.scientificamerican.com\/article\/detecting-deepfakes1\/\">opened<\/a> and it seems that the race between creating deepfakes and detecting and preventing them will intensify in the future.\u00a0Deepfake technology is becoming more and more accessible and it is becoming easier for &#8216;the average person&#8217; to create deepfakes themselves.\u00a0In addition, it is also becoming increasingly difficult to distinguish deepfake content from authentic content.\u00a0Deepfakes will continue to evolve and spread.\u00a0And challenges like the lack of detail in the synthesis will no doubt be overcome in the short term.\u00a0Furthermore, improvements in neural network structures and advances in hardware are expected to significantly reduce training and delivery times.\u00a0There are already new algorithms that can generate increasingly realistic \u2013 and almost real-time \u2013 outputs.<\/p>\n\n\n\n<p>And while the use of deepfakes for good is rapidly increasing in industries such as entertainment, news and education, these developments will simultaneously lead to even more serious threats.&nbsp;Think of increasing crime, the dissemination of fake information, synthetic identity fraud, election manipulation and political tensions.&nbsp;Another aspect to consider is that deepfakes also have a very negative impact on our freedom of choice and identity.&nbsp;Using a photo you can actually make someone do all kinds of things \u2013 which in reality never happened at all \u2013 without anyone&#8217;s permission or even knowing anything about it.<\/p>\n\n\n\n<p>It is clear that the misguided, deceptive use of deepfake technology needs to be curbed and tech experts, journalists and policy makers will play a crucial role in this.\u00a0They are the right people to inform the public about the possibilities and dangers of synthetic media such as deepfakes.\u00a0And if we teach ourselves to only trust content from solid, verified sources, we may discover that the good use of deepfakes outweighs the bad.\u00a0With greater public awareness, we can mitigate the negative impact of deepfakes, find ways to deal with them, and in the future even see that we can also take advantage of the possibilities of deepfake technology.<\/p>\n\n\n\n<p><\/p>\n<a class=\"synved-social-button synved-social-button-share synved-social-size-32 synved-social-resolution-single synved-social-provider-facebook nolightbox\" data-provider=\"facebook\" target=\"_blank\" rel=\"nofollow\" title=\"Share on Facebook\" href=\"https:\/\/www.facebook.com\/sharer.php?u=https%3A%2F%2Fblogzynergy.com%2Fbz%2Findex.php%2Fwp-json%2Fwp%2Fv2%2Fposts%2F1733&#038;t=The%20good%2C%20the%20bad%20and%20the%20future%20of%20deepfakes&#038;s=100&#038;p&#091;url&#093;=https%3A%2F%2Fblogzynergy.com%2Fbz%2Findex.php%2Fwp-json%2Fwp%2Fv2%2Fposts%2F1733&#038;p&#091;images&#093;&#091;0&#093;=https%3A%2F%2Fblogzynergy.com%2Fbz%2Fwp-content%2Fuploads%2F2021%2F06%2Fimage.jpeg&#038;p&#091;title&#093;=The%20good%2C%20the%20bad%20and%20the%20future%20of%20deepfakes\" style=\"font-size: 0px;width:32px;height:32px;margin:0;margin-bottom:5px;margin-right:5px\"><img loading=\"lazy\" decoding=\"async\" alt=\"Facebook\" title=\"Share on Facebook\" class=\"synved-share-image synved-social-image synved-social-image-share\" width=\"32\" height=\"32\" style=\"display: inline;width:32px;height:32px;margin: 0;padding: 0;border: none;box-shadow: none\" src=\"https:\/\/blogzynergy.com\/bz\/wp-content\/plugins\/social-media-feather\/synved-social\/image\/social\/regular\/64x64\/facebook.png\" \/><\/a><a class=\"synved-social-button synved-social-button-share synved-social-size-32 synved-social-resolution-single synved-social-provider-twitter nolightbox\" data-provider=\"twitter\" target=\"_blank\" rel=\"nofollow\" title=\"Share on Twitter\" href=\"https:\/\/twitter.com\/intent\/tweet?url=https%3A%2F%2Fblogzynergy.com%2Fbz%2Findex.php%2Fwp-json%2Fwp%2Fv2%2Fposts%2F1733&#038;text=Hey%20check%20this%20out\" style=\"font-size: 0px;width:32px;height:32px;margin:0;margin-bottom:5px;margin-right:5px\"><img loading=\"lazy\" decoding=\"async\" alt=\"twitter\" title=\"Share on Twitter\" class=\"synved-share-image synved-social-image synved-social-image-share\" width=\"32\" height=\"32\" style=\"display: inline;width:32px;height:32px;margin: 0;padding: 0;border: none;box-shadow: none\" src=\"https:\/\/blogzynergy.com\/bz\/wp-content\/plugins\/social-media-feather\/synved-social\/image\/social\/regular\/64x64\/twitter.png\" \/><\/a><a class=\"synved-social-button synved-social-button-share synved-social-size-32 synved-social-resolution-single synved-social-provider-reddit nolightbox\" data-provider=\"reddit\" target=\"_blank\" rel=\"nofollow\" title=\"Share on Reddit\" href=\"https:\/\/www.reddit.com\/submit?url=https%3A%2F%2Fblogzynergy.com%2Fbz%2Findex.php%2Fwp-json%2Fwp%2Fv2%2Fposts%2F1733&#038;title=The%20good%2C%20the%20bad%20and%20the%20future%20of%20deepfakes\" style=\"font-size: 0px;width:32px;height:32px;margin:0;margin-bottom:5px;margin-right:5px\"><img loading=\"lazy\" decoding=\"async\" alt=\"reddit\" title=\"Share on Reddit\" class=\"synved-share-image synved-social-image synved-social-image-share\" width=\"32\" height=\"32\" style=\"display: inline;width:32px;height:32px;margin: 0;padding: 0;border: none;box-shadow: none\" src=\"https:\/\/blogzynergy.com\/bz\/wp-content\/plugins\/social-media-feather\/synved-social\/image\/social\/regular\/64x64\/reddit.png\" \/><\/a><a class=\"synved-social-button synved-social-button-share synved-social-size-32 synved-social-resolution-single synved-social-provider-pinterest nolightbox\" data-provider=\"pinterest\" target=\"_blank\" rel=\"nofollow\" title=\"Pin it with Pinterest\" href=\"https:\/\/pinterest.com\/pin\/create\/button\/?url=https%3A%2F%2Fblogzynergy.com%2Fbz%2Findex.php%2Fwp-json%2Fwp%2Fv2%2Fposts%2F1733&#038;media=https%3A%2F%2Fblogzynergy.com%2Fbz%2Fwp-content%2Fuploads%2F2021%2F06%2Fimage.jpeg&#038;description=The%20good%2C%20the%20bad%20and%20the%20future%20of%20deepfakes\" style=\"font-size: 0px;width:32px;height:32px;margin:0;margin-bottom:5px;margin-right:5px\"><img loading=\"lazy\" decoding=\"async\" alt=\"pinterest\" title=\"Pin it with Pinterest\" class=\"synved-share-image synved-social-image synved-social-image-share\" width=\"32\" height=\"32\" style=\"display: inline;width:32px;height:32px;margin: 0;padding: 0;border: none;box-shadow: none\" src=\"https:\/\/blogzynergy.com\/bz\/wp-content\/plugins\/social-media-feather\/synved-social\/image\/social\/regular\/64x64\/pinterest.png\" \/><\/a><a class=\"synved-social-button synved-social-button-share synved-social-size-32 synved-social-resolution-single synved-social-provider-linkedin nolightbox\" data-provider=\"linkedin\" target=\"_blank\" rel=\"nofollow\" title=\"Share on Linkedin\" href=\"https:\/\/www.linkedin.com\/shareArticle?mini=true&#038;url=https%3A%2F%2Fblogzynergy.com%2Fbz%2Findex.php%2Fwp-json%2Fwp%2Fv2%2Fposts%2F1733&#038;title=The%20good%2C%20the%20bad%20and%20the%20future%20of%20deepfakes\" style=\"font-size: 0px;width:32px;height:32px;margin:0;margin-bottom:5px;margin-right:5px\"><img loading=\"lazy\" decoding=\"async\" alt=\"linkedin\" title=\"Share on Linkedin\" class=\"synved-share-image synved-social-image synved-social-image-share\" width=\"32\" height=\"32\" style=\"display: inline;width:32px;height:32px;margin: 0;padding: 0;border: none;box-shadow: none\" src=\"https:\/\/blogzynergy.com\/bz\/wp-content\/plugins\/social-media-feather\/synved-social\/image\/social\/regular\/64x64\/linkedin.png\" \/><\/a><a class=\"synved-social-button synved-social-button-share synved-social-size-32 synved-social-resolution-single synved-social-provider-mail nolightbox\" data-provider=\"mail\" rel=\"nofollow\" title=\"Share by email\" href=\"mailto:?subject=The%20good%2C%20the%20bad%20and%20the%20future%20of%20deepfakes&#038;body=Hey%20check%20this%20out:%20https%3A%2F%2Fblogzynergy.com%2Fbz%2Findex.php%2Fwp-json%2Fwp%2Fv2%2Fposts%2F1733\" style=\"font-size: 0px;width:32px;height:32px;margin:0;margin-bottom:5px\"><img loading=\"lazy\" decoding=\"async\" alt=\"mail\" title=\"Share by email\" class=\"synved-share-image synved-social-image synved-social-image-share\" width=\"32\" height=\"32\" style=\"display: inline;width:32px;height:32px;margin: 0;padding: 0;border: none;box-shadow: none\" src=\"https:\/\/blogzynergy.com\/bz\/wp-content\/plugins\/social-media-feather\/synved-social\/image\/social\/regular\/64x64\/mail.png\" \/><\/a>","protected":false},"excerpt":{"rendered":"<p>The race between creating and eliminating deepfakes is in full swing.\u00a0Technology is becoming more and more accessible and deepfakes are becoming increasingly difficult to distinguish from real ones. What exactly are deepfakes?\u00a0A refresher How are deepfakes made? The Good \u2013 An Optimistic View The dangers of deepfakes What can we do to distinguish fake from &hellip; <a href=\"https:\/\/blogzynergy.com\/bz\/index.php\/2021\/06\/02\/the-good-the-bad-and-the-future-of-deepfakes\/\" class=\"more-link\">Continue reading<span class=\"screen-reader-text\"> &#8220;The good, the bad and the future of deepfakes&#8221;<\/span><\/a><\/p>\n","protected":false},"author":2,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"_exactmetrics_skip_tracking":false,"_exactmetrics_sitenote_active":false,"_exactmetrics_sitenote_note":"","_exactmetrics_sitenote_category":0,"footnotes":""},"categories":[1],"tags":[1579,1654,1650,1651,1652,1653,648,1307,30],"class_list":["post-1733","post","type-post","status-publish","format-standard","hentry","category-categories","tag-artificial-intelligence","tag-danger-of-deepfakes","tag-deepfake","tag-deepfake-antivirus","tag-deepfake-images","tag-deepfake-policies","tag-future","tag-social-media-platforms","tag-technology"],"aioseo_notices":[],"_links":{"self":[{"href":"https:\/\/blogzynergy.com\/bz\/index.php\/wp-json\/wp\/v2\/posts\/1733","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/blogzynergy.com\/bz\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/blogzynergy.com\/bz\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/blogzynergy.com\/bz\/index.php\/wp-json\/wp\/v2\/users\/2"}],"replies":[{"embeddable":true,"href":"https:\/\/blogzynergy.com\/bz\/index.php\/wp-json\/wp\/v2\/comments?post=1733"}],"version-history":[{"count":2,"href":"https:\/\/blogzynergy.com\/bz\/index.php\/wp-json\/wp\/v2\/posts\/1733\/revisions"}],"predecessor-version":[{"id":1738,"href":"https:\/\/blogzynergy.com\/bz\/index.php\/wp-json\/wp\/v2\/posts\/1733\/revisions\/1738"}],"wp:attachment":[{"href":"https:\/\/blogzynergy.com\/bz\/index.php\/wp-json\/wp\/v2\/media?parent=1733"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/blogzynergy.com\/bz\/index.php\/wp-json\/wp\/v2\/categories?post=1733"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/blogzynergy.com\/bz\/index.php\/wp-json\/wp\/v2\/tags?post=1733"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}