\documentclass[11pt,twoside]{article}
\usepackage{amsmath, amsthm, amscd, amsfonts, amssymb, graphicx, color}
\usepackage[bookmarksnumbered, colorlinks]{hyperref} \usepackage{float}
\usepackage{lipsum}
\usepackage{afterpage}
\usepackage[labelfont=bf]{caption}
\usepackage[nottoc,notlof,notlot]{tocbibind}
%\renewcommand\bibname{References}
\def\bibname{\Large \bf  References}
\usepackage{lipsum}
\usepackage{fancyhdr}
\pagestyle{fancy}
\fancyhf{}
\renewcommand{\headrulewidth}{0pt}
\fancyhead[LE,RO]{\thepage}
\thispagestyle{empty}
%\afterpage{\lhead{new value}}

\fancyhead[CE]{Amin Hosseini}
\fancyhead[CO]{$\Phi$-derivations and commutativity of rings and algebras}



%\topmargin=-1.6cm
\textheight 17.5cm%
\textwidth  12cm %
\topmargin   8mm  %
\oddsidemargin   20mm   %
\evensidemargin   20mm   %
\footskip=24pt     %

\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{corollary}[theorem]{Corollary}
\theoremstyle{definition}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{example}[theorem]{Example}
\newtheorem{xca}[theorem]{Exercise}
%\theoremstyle{remark}
\newtheorem{remark}[theorem]{Remark}
\renewenvironment{proof}{{\bfseries \noindent Proof.}}{~~~~$\square$}
\makeatletter
\def\th@newremark{\th@remark\thm@headfont{\bfseries}}
\makeatletter





%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% If you want to insert other packages. Insert them here
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%\long\def\symbolfootnote[#1]#2{\begingroup%
%\def\thefootnote{\fnsymbol{footnote}}\footnote[#1]{#2}\endgroup}



 \def \thesection{\arabic{section}}


\begin{document}
%\baselineskip 9mm
%\setcounter{page}{}
\thispagestyle{plain}
{\noindent Journal of Mathematical Extension \\
Vol. XX, No. XX, (2014), pp-pp (Will be inserted by layout editor)}\\
ISSN: 1735-8299\\
URL: http://www.ijmex.com\\
‎\vspace*{9mm}
‎
\begin{center}

{\Large \bf
$\Phi$-derivations and commutativity of rings and algebras \\}
%{\bf Do You Have a Subtitle? \\ If so, Write It Here}
\let\thefootnote\relax\footnote{\scriptsize Received: XXXX; Accepted: XXXX (Will be inserted by editor)}

{\bf Amin Hosseini}\vspace*{-2mm}\\
\vspace{2mm} {\small  Department of Mathematics,\\
 Kashmar Higher Education Institute, Kashmar, Iran.} \vspace{2mm}

%{\bf  Second Author$^*$\let\thefootnote\relax\footnote{$^*$Corresponding Author}}\vspace*{-2mm}\\
%\vspace{2mm} {\small   Enter affiliation here} \vspace{2mm}

\end{center}

\vspace{4mm}


{\footnotesize
\begin{quotation}
{\noindent \bf Abstract.} The main purpose of this paper is to investigate the effect of $\Phi$-derivatives on the commutativity of rings and algebras. Let $\mathfrak{R}$ be a 2-torsion free prime ring, $d:\mathfrak{R} \rightarrow \mathfrak{R}$ be a $\Phi$-derivation such that $\Phi$ is an epimorphism and $d \Phi = \Phi d = d$. If $[\Phi(a), \Phi(x)]d(y) = d(x) [y,a]$ for all $x,y,a \in \mathfrak{R}$, then $\mathfrak{R}$ is commutative or $d$ is zero. Another result in this regard reads as follows. Let $(\mathcal{A}, \ast)$ be a unital, involutive algebra, and let $\Psi:\mathcal{A} \times \mathcal{A}\rightarrow \mathcal{A}$ be a $\ast$-two variable $\Phi$-derivation such that $\Psi(\textbf{e},a_{0}) = \textbf{e}$ for some $a_{0}\in \mathcal{A}$, where $\textbf{e}$ is the unity of $\mathcal{A}$. If $\{a \in \mathcal{A} \ : \ \Psi(a, a_0) = 0\} = \{0\}$, then $\mathcal{A}$ is commutative. Some other related results are also discussed.
\end{quotation}
\begin{quotation}
\noindent{\bf AMS Subject Classification:} 17B40; 16N60.

\noindent{\bf Keywords and Phrases:} Derivation, $\Phi$-derivation, two variable $\Phi$-derivation, prime ring.
\end{quotation}}

%\section{Introduction}
%\label{intro} % It is advised to give each section and subsection a unique label.
\section{Introduction and preliminaries}
Throughout the paper, $\mathfrak{R}$ denotes an associative ring. Let us first recall some basic definitions and fix some notations which  will be used in what follows. As usual, we denote the commutator $xy - yx$ by $[x,y]$ for all pair $x, y \in \mathfrak{R}$. A ring $\mathfrak{R}$ is $n$-torsion free, where $n > 1$ is an integer, in case $n x = 0$ implies $x = 0$ ($x \in \mathfrak{R}$). Recall that a ring $\mathfrak{R}$ is called prime if for $x, y \in \mathfrak{R}$, $x \mathfrak{R} y = \{0\}$ implies that $x = 0$ or $y = 0$, and is semiprime if $x \mathfrak{R} x = \{0\}$ implies $ x = 0$. Moreover, the center of a ring (or an algebra) $\mathfrak{R}$ is $Z(\mathfrak{R}) = \{x \in \mathfrak{R} \ | \ xy = yx \ for \ all \ y \in \mathfrak{R}\}$. The Jacobson radical of an algebra $\mathcal{A}$ is the intersection of all primitive ideals of $\mathcal{A}$ which is denoted by $rad (\mathcal{A})$. An algebra $\mathcal{A}$ is called semisimple if $rad(\mathcal{A}) = \{0\}$. For more details about Jacobson radical, see, e.g. \cite{D1}.

An additive mapping $d: \mathfrak{R} \rightarrow \mathfrak{R}$ is called a derivation if $d(xy) = d(x)y + xd(y)$ holds for all $x,y \in \mathfrak{R}$. Let us introduce a background of our study. In 1957, Posner \cite{p} noticed the remarkable potential of derivations on commutativity of rings. Indeed, he proved that if $d$ is a nonzero derivation of a 2-torsion free prim ring $\mathfrak{R}$ such that $[[d(x),x],y] = 0$ for all $x, y \in \mathfrak{R}$, then $\mathfrak{R}$ is commutative. During a few recent decades, this result has made a great deal of excitement among the mathematicians to investigate the relation between the commutativity of rings and the existence of certain specific types of derivations. For example, many algebraists such as Bre$\breve{s}$ar, Vukman, Ashraf, Daif and Rehman have made remarkable contributions to this area of study. Vukman \cite{v} showed that $\mathfrak{R}$ is commutative if $char(\mathfrak{R}) \neq 0$ and $[[d(x),x],x] = 0$ for all $x \in \mathfrak{R}$. On the other hand, Lanski \cite{l} proved that if $[[d(x),x],y] = 0$ for any $x$ in a noncommutative Lie ideal and $y \in \mathfrak{R}$, then either $\mathfrak{R}$ is commutative or $\mathfrak{R}$ is a 2-torsion free ring and it satisfies the standard identity of degree 4. Furthermore, Ashraf and Rehman \cite{a} proved that if $\mathfrak{R}$ is a prime ring, $\mathfrak{I}$ a nonzero ideal of $\mathfrak{R}$ and $d$ is a derivation of $\mathfrak{R}$ such that $d(xy + yx) = xy + yx$ for all $x,y \in \mathfrak{I}$, then $\mathfrak{R}$ is commutative. In addition, Quadri et al. \cite{q}, extended the above-mentioned result as follows:\\
Let $\mathfrak{R}$ be a prime ring, $\mathfrak{I}$ a nonzero ideal of $\mathfrak{R}$ and $F$ a generalized derivation associated with a nonzero derivation $d$ such that $F(xy + yx) = xy + yx$ for all $x,y \in \mathfrak{I}$. Then $\mathfrak{R}$ is commutative. For more results, see, e.g. \cite{al, d, h, l, p, v, q, y}, and the references therein. After studying these articles, we realized that $\Phi$-derivations are able to affect the commutativity of rings. Let $\Phi:\mathfrak{R} \rightarrow \mathfrak{R}$ be an additive mapping. An additive mapping $d:\mathfrak{R} \rightarrow \mathfrak{R}$ is called a $\Phi$-derivation if $d(ab) = d(a)b + \Phi(a)d(b)$ holds for all $a,b \in \mathfrak{R}$. In the present study, we prove the following results:\\
Let $\mathfrak{R}$ be a 2-torsion free prime ring, and let $d:\mathfrak{R} \rightarrow \mathfrak{R}$ be a $\Phi$-derivation such that $d \Phi = \Phi d = d$. Let $\Phi$ be an epimorphism satisfying $[\Phi(a), \Phi(x)] d(y) = d(x) [y,a]$ for all $x,y,a \in \mathfrak{R}$, then $\mathfrak{R}$ is  commutative or $d$ is zero.  As another result, we prove that if $d$ is a $\Phi$-derivation of $\mathfrak{R}$ such that $\Phi(a) d(x) = d(x) a$ for all $x, a \in \mathfrak{R}$, then $\mathfrak{R}$ is commutative or d is zero. Moreover, using $\ast$-two variable $\Phi$-derivations, we show that every involutive  algebra is commutative under certain conditions. Indeed, we prove the following theorem. Let $(\mathcal{A}, \ast)$ be a unital, involutive  algebra and let $\Psi:\mathcal{A} \times \mathcal{A}\rightarrow \mathcal{A}$ be a $\ast$-two variable $\Phi$-derivation such that $\Psi(\textbf{e},a_{0}) = \textbf{e}$ for some $a_{0}\in \mathcal{A}$, where $\textbf{e}$ is the unity of $\mathcal{A}$. If $\{a \in \mathcal{A} \ : \ \Psi(a, a_0) = 0\} = \{0\}$, then $\mathcal{A}$ is commutative. As a consequence of this result, we obtain some conditions under which every $\ast$-two variable $\Phi$-derivation on a unital, involutive Banach algebra is continuous.

\section{Main results}
Throughout this section, without further mention, $\textbf{e}$ stands for the unit element of any unital algebra or unital ring. Let $\Phi:\mathfrak{R} \rightarrow \mathfrak{R}$ be an additive mapping. Recall that an additive mapping $d:\mathfrak{R} \rightarrow \mathfrak{R}$ is called a $\Phi$-derivation if $d(ab) = d(a)b + \Phi(a)d(b)$ holds for all $a,b \in \mathfrak{R}$.
\\
\\
 We begin with the following lemma which will be used extensively to prove our theorems.
\begin{lemma}\label{1} Let $\mathfrak{R}$ be a prime ring and let $d:\mathfrak{R} \rightarrow \mathfrak{R}$ be a $\Phi$-derivation.\\
(i) If $a$ is an element of $\mathfrak{R}$ such that $a d(x) = 0$ for all $x \in \mathfrak{R}$, and further $\Phi:\mathfrak{R} \rightarrow \mathfrak{R}$ is a surjective mapping, then either a = 0 or d is zero.\\
(ii) If $a$ is an element of $\mathfrak{R}$ such that $d(x) a = 0$ for all $x \in \mathfrak{R}$, then either a = 0 or d is zero.
\end{lemma}
\begin{proof} (i) Replacing $x$ by $xy$ in $ad(x) = 0$, we get that $$0 = a d(xy) = a d(x)y + a \Phi(x) d(y) = a \Phi(x) d(y)$$ for all $x, y \in \mathfrak{R}$. This equation with the surjectivity of $\Phi$ implies that $a x d(y) = 0$ for all $x, y \in \mathfrak{R}$. Since $\mathfrak{R}$ is prime, either $a = 0$ or $d$ is zero.\\
(ii) Straightforward.
\end{proof}\\
\\
The following theorem has been motivated by \cite{p}.
\begin{theorem} \label{2} Let $\mathfrak{R}$ be a 2-torsion free prime ring. Let $d_2$ be a derivation and $d_1$ be a $\Phi$-derivation of $\mathfrak{R}$ such that $d_1 d_2$ is a $\Phi$-derivation. If $\Phi(d_2(x)) = d_2(x)$ for all $x \in \mathfrak{R}$ and further $\Phi$ is an epimorphism, then at least one of $d_1$ and $d_2$ is zero.
\end{theorem}
\begin{proof} By hypothesis, $d_1 d_2$ is a $\Phi$-derivation. Thus,
\begin{align}
d_1 d_2(ab) = d_1 d_2(a)b + \Phi(a) d_1 d_2(b).
\end{align}
Since $d_2$ is a derivation and $d_1$ is a $\Phi$-derivation, we have
\begin{align}
d_1 d_2(ab) = d_1 d_2(a) b + d_2(a)d_1(b) + d_1(a)d_2(b) + \Phi(a) d_1 d_2(b).
\end{align}
Comparing (1) and (2), we obtain that
\begin{align}
d_1(a)d_2(b) + d_2(a)d_1(b) = 0 \ for \ all \ a,b \in \mathfrak{R}.
\end{align}
Replacing $a$ by $a d_1(c)$ in (3), we obtain $d_1(a d_1(c))d_2(b) + d_2(a d_1(c))d_1(b) = 0$ for all $a,b,c \in \mathfrak{R}$. This equation with the fact that $\Phi(d_2(a)) = d_2(a)$ for all $a \in \mathfrak{R}$ and using equation (3), we have
\begin{align*}
0 & = d_1(a) d_1(c) d_2(b) + \Phi(a) d_1^{2}(c) d_2(b) + \Phi(d_2(a) d_1(c) + a d_2 d_1(c)) d_1(b)\\ & = d_1(a) d_1(c) d_2(b) + \Phi(a) d_1^{2}(c) d_2(b) + d_2(a) \Phi(d_1(c)) d_1(b) + \Phi(a) d_2 d_1 (c) d_1(b) \\ & = d_1(a) d_1(c) d_2(b) + \Phi(a)(d_1(d_1(c)) d_2(b) +  d_2 (d_1 (c)) d_1(b)) + d_2(a) \Phi(d_1(c))d_1(b) \\ & = d_1(a) d_1(c) d_2(b) + d_2(a) \Phi(d_1(c))d_1(b) \\ & = - d_1(a) d_2(c) d_1(b) + d_2(a) \Phi(d_1(c)) d_1(b) \\ & = (d_2(a) \phi(d_1(c)) - d_1(a) d_2(c))d_1(b),
\end{align*}
which means that
\begin{align*}
(d_2(a) \Phi(d_1(c)) - d_1(a) d_2(c))d_1(b) = 0 \ for \ all \ a,b,c \in \mathfrak{R}.
\end{align*}
According to Lemma \ref{1}, we have $d_2(a) \Phi(d_1(c)) - d_1(a) d_2(c) = 0$ for all $a,c \in \mathfrak{R}$, or else $d_1$ is zero. If $d_1$ is zero, then our goal is achieved. If not, $d_2(a) \Phi(d_1(c)) - d_1(a) d_2(c) = 0$ for all $a,c \in \mathfrak{R}$. Replacing $b$ by $c$ in equation (3), we arrive at $d_1(a) d_2(c) + d_2(a) d_1(c) = 0$ for all $a, c \in \mathfrak{R}$. Adding these last two equations, we find that $ 0 = d_2(a) d_1(c) + d_2(a) \Phi(d_1(c)) = d_2(a)(d_1(c) + \Phi(d_1(c))$.  Applying Lemma \ref{1} again, we, therefore, have $d_1(c) = - \Phi(d_1(c))$ for all $ c \in \mathfrak{R}$ or else $d_2$ is zero. If $d_2$ is zero, then the objective is obtained. If not, $d_1(c) = - \Phi(d_1(c))$ for all $ c \in \mathfrak{R}$. By using $d_2(a) \Phi(d_1(c)) - d_1(a) d_2(c) = 0$ with the fact that $d_1(c) = - \Phi(d_1(c))$, we arrive at
 \begin{align}
 - d_2(a) d_1(c) - d_1(a) d_2(c) = 0 \ for \ all \ a,c \in \mathfrak{R}.
 \end{align}
After replacing $cb$ instead of $c$ in equation (4) and then using that, we get
\begin{align*}
0 & = d_1(a) d_2(c) b - d_1(a) c d_2(b) - d_2(a) d_1(c) b - d_2(a) \Phi(c) d_1(b) \\ & = (- d_1(a) d_2(c) - d_2(a) d_1(c))b - d_1(a) c d_2(b) - d_2(a) \Phi(c) d_1(b) \\ & = - d_1(a) c \ d_2(b) - d_2(a) \Phi(c) d_1(b).
\end{align*}
Hence,
\begin{align}
d_1(a) c d_2(b) = - d_2(a) \Phi(c) d_1(b) \ for \ all \ a,b,c \in \mathfrak{R}.
\end{align}
If we replace $c$ by $d_2(c)$ in equation (5), then we have
\begin{align*}
d_1(a) d_2(c) d_2(b) = - d_2(a) \Phi(d_2(c)) d_1(b) = - d_2(a) d_2(c) d_1(b) \ for \ all \ a,b,c \in \mathfrak{R}.
\end{align*}
From this equation and equation (4), we infer that
\begin{align*}
0 & = - d_2(a) d_1(c) d_2(b) + d_2(a) d_2(c) d_1(b) \\ & = d_2(a) (d_2(c) d_1(b) - d_1(c) d_2(b))
\end{align*}
for all $a,b,c \in \mathfrak{R}$. It follows from Lemma \ref{1} that $d_2$ is zero or $d_2(c) d_1(b) - d_1(c) d_2(b) = 0$. If $d_2 = 0$, then our aim is accomplished. Let $d_2(a) \neq 0$ for some $a \in \mathfrak{R}$. Hence, we have
\begin{align}
d_2(c) d_1(b) - d_1(c) d_2(b) = 0 \ for \ all \ c,b \in \mathfrak{R}.
\end{align}
Comparing equations (3) and (6), we conclude that $2 d_2(c) d_1(b) = 0$ for all $b,c \in \mathfrak{R}$. Since $\mathfrak{R}$ is 2-torsion free, $d_2(c) d_1(b) = 0$. Reusing Lemma \ref{1} with replacing $a$ by $d_2(c)$, we find that $d_1$ is zero or else $d_2(c) = 0$ for all $c \in \mathfrak{R}$. Since we are assuming that $d_2(a) \neq 0$ for some $a \in \mathfrak{R}$, $d_1 = 0$. Thereby, our proof is complete.
\end{proof}


\begin{theorem}\label{3} Let $\mathfrak{R}$ be a prime ring and let $d$ be a $\Phi$-derivation of $\mathfrak{R}$ such that $\Phi$ is a homomorphism. If $\Phi(a) d(x) = d(x) a$ for all $x, a \in \mathfrak{R}$, then $\mathfrak{R}$ is commutative or d is zero.
\end{theorem}
\begin{proof}  We have
\begin{align}
\Phi(a) d(x) = d(x) a \ for \ all \ x,a \in \mathfrak{R}.
\end{align}
Replacing $x$ by $xy$ in equation (7), we have
\begin{align}
\Phi(a) d(x) y + \Phi(a) \Phi(x) d(y) - d(x) y a - \Phi(x) d(y) a = 0.
\end{align}
Putting $d(x)a$ instead of $\Phi(a) d(x)$ and $\Phi(a) d(y)$ instead of $d(y)a$ in (8), we have
\begin{align*}
0 & = d(x)ay + \Phi(a) \Phi(x) d(y) - d(x) ya - \Phi(x) \Phi(a) d(y) \\ & = d(x) [a,y] + \Phi(ax) d(y) - \Phi(xa) d(y) \\ & = d(x) [a,y] + \Phi(ax - xa) d(y) \\ & = d(x)[a,y] + \Phi([a,x]) d(y),
\end{align*}
which means that
\begin{align}
d(x)[a,y] + \Phi([a,x]) d(y) = 0 \ for \ all \ x,y,a \in \mathfrak{R}.
\end{align}
According to (7), we have
\begin{align}
\Phi([a,x]) d(y) = d(y) [a,x] \ for \ all \ a,x,y \in \mathfrak{R}.
\end{align}
Comparing equations (9) and (10), we obtain that
\begin{align}
d(x) [a,y] + d(y) [a,x] = 0 \ for \ all \ x,a,y \in \mathfrak{R}.
\end{align}
Replacing $y$ by $yz$ in equation (11) and then using equations (7) and (11), we arrive at
\begin{align*}
0 & = d(x) [a,yz] + d(yz) [a,x] \\ & = d(x) y [a,z] + d(x)[a,y]z + d(y) z [a,x] + \Phi(y) d(z) [a,x] \\ & = \Phi(y) d(x) [a,z] - d(y) [a,x]z + d(y) z [a,x] - \Phi(y) d(x) [a,z] \\ & = d(y) z [a,x] - d(y) [a,x]z \\ & = d(y) (z [a,x] - [a,x]z),
\end{align*}
which means that
\begin{align}
d(y) (z [a,x] - [a,x]z) = 0 \ for \ all \ a,x,y,z \in \mathfrak{R}.
\end{align}
Applying Lemma \ref{1}, it is obtained that $d(y) = 0$, or else $z [a,x] - [a,x]z = 0$ for all $a,x,y,z \in \mathfrak{R}$. If $d$ is zero, then we get the required result. If not,  $z [a,x] - [a,x]z = 0$ for all $a,x,z \in \mathfrak{R}$. Define $D_1(x) = [a,x]$ and $D_2(x) = [z,x]$. It is evident that $D_1$ and $D_2$ are derivation. We have $D_2 D_1(x) = D_2([a,x]) = [z,[a,x]] = z[a,x] - [a,x]z = 0$. Theorem \ref{2} is exactly what we need to complete the proof.
\end{proof}\\
\\
In the following theorem, the surjectivity of $\Phi$ is unnecessary.

\begin{theorem} \label{4} Let $\mathfrak{R}$ be a 2-torsion free prime ring, let $d_1$ be a derivation and let $d_2$ be a $\Phi$-derivation of $\mathfrak{R}$ such that $[\Phi, d_1] = 0$ and $\Phi$ is a homomorphism. If $d_1 d_2$ is a $\Phi$-derivation, then at least one of $d_1$ and $d_2$ is zero.
\end{theorem}

\begin{proof} Since $d_1 d_2$ is a $\Phi$-derivation, we have
\begin{align}
d_1 d_2(ab) = d_1 d_2(a) b + \Phi(a) d_1 d_2(b).
\end{align}
Since $d_1$ is a derivation and $d_2$ is a $\Phi$-derivation, we have
\begin{align*}
d_1 d_2(ab) & = d_1( d_2(a) b + \Phi(a) d_2(b)) \\ & = d_1(d_2(a)b) + d_1(\Phi(a) d_2(b)) \\ & = d_1 d_2(a) b + d_2(a) d_1(b) + d_1(\Phi(a)) d_2(b) + \Phi(a) d_1 d_2(b),
\end{align*}
which means that
\begin{align}
d_1 d_2(ab) = d_1 d_2(a) b + d_2(a) d_1(b) + d_1(\Phi(a)) d_2(b) + \Phi(a) d_1 d_2(b).
\end{align}
Comparing equations (13) and (14), we obtain
\begin{align}
d_1(\Phi(a)) d_2(b) + d_2(a) d_1(b) = 0 \ for \ all \ a,b \in \mathfrak{R}.
\end{align}
Replacing $a$ by $a d_1(c)$ in equation (15) and then using that, we have
\begin{align*}
0 & = d_1(\Phi(a) \Phi(d_1(c))) d_2(b) + d_2(a d_1(c)) d_1(b) \\ & = d_1(\Phi(a)) \Phi(d_1(c))d_2(b) + \Phi(a) d_1(\Phi(d_1(c)))d_2(b) + d_2(a) d_1(c) d_1(b) + \\ & \Phi(a) d_2(d_1(c)) d_1(b) \\ & = d_1(\Phi(a)) \Phi(d_1(c))d_2(b) + d_2(a) d_1(c) d_1(b) \\ & = d_1(\Phi(a))d_1(\Phi(c))d_2(b) + d_2(a) d_1(c) d_1(b) \\ & = - d_1(\Phi(a))d_2(c) d_1(b) + d_2(a) d_1(c) d_1(b) \\ & = (d_2(a) d_1(c) - d_1(\Phi(a))d_2(c)) d_1(b).
\end{align*}
It follows from Lemma \ref{1} that $d_1(b) = 0$ for all $b \in \mathfrak{R}$, or else $d_2(a) d_1(c) - d_1(\Phi(a))d_2(c) = 0$ for all $a,c \in \mathfrak{R}$. If $d_1$ is zero, then we reach our goal. If not,
\begin{align}
d_2(a) d_1(c) - d_1(\Phi(a))d_2(c) = 0.
\end{align}
Replacing $b$ by $c$ in equation (15), we get that $d_2(a) d_1(c) + d_1(\Phi(a)) d_2(c) = 0$ for all $a,c \in \mathfrak{R}$. Adding this last equation to equation (16), we find that $2 d_2(a) d_1(c) = 0$, and since $\mathfrak{R}$ is 2-torsion free, $d_2(a) d_1(c) = 0$ for all $a,c \in \mathfrak{R}$. Now, Lemma \ref{1} completes the proof.
\end{proof}
\\
\\
In the following, we present another theorem in this regard.
\begin{theorem} Let $\mathfrak{R}$ be a 2-torsion free prime ring, let $d:\mathfrak{R} \rightarrow \mathfrak{R}$ be a $\Phi$-derivation such that $d \Phi = \Phi d = d$ and let $\Phi$ be an epimorphism. If $[\Phi(a), \Phi(x)]d(y) = d(x) [y,a]$ for all $x,y,a \in \mathfrak{R}$, then $\mathfrak{R}$ is commutative or $d$ is zero.
\end{theorem}

\begin{proof} Given that
\begin{align}
[\Phi(a), \Phi(x)]d(y) = d(x) [y,a] \ for \ all \ x,y,a \in \mathfrak{R}.
\end{align}
Replacing $x$ by $x d(z)$ in equation (17), we have
\begin{align*}
[\Phi(a), \Phi(x) \Phi(d(z))]d(y) &= [\Phi(a),\Phi(x)d(z)]d(y) \\ & = d(x d(z))[y,a]
\end{align*}
 for all $a,x,y,z \in \mathfrak{R}$. Thus,
\begin{align*}
&\Phi(x)[\Phi(a), d(z)]d(y) + [\Phi(a), \Phi(x)]d(z)d(y) \\ & = d(x)d(z)[y,a] + \Phi(x) d(d(z))[y,a].
\end{align*}
Replacing $x$ by $d(z)$ in equation (17), we see that $[\Phi(a),\Phi(d(z))] d(y) - d(d(z))[y,a] = 0$, and the equation above is written as follows:
\begin{align}
[\Phi(a), \Phi(x)]d(z)d(y) = d(x) d(z) [y,a] \ for \ all \ a,x,y,z \in \mathfrak{R}.
\end{align}
It follows from equation (17) that $[\Phi(a), \Phi(x)] d(z) = d(x) [z,a]$ and $d(z) [y,a] = [\Phi(a),\Phi(z)] d(y)$. Then, equation (18) becomes $d(x) [z,a] d(y) - d(x) [\Phi(a), \Phi(z)] d(y) = 0$; factoring out $d(x)$ on the left and $d(y)$ on the right, we have $$d(x)([z,a] - [\Phi(a), \Phi(z)])d(y) = 0$$ for all $a,x,y,z \in \mathfrak{R}$. It follows  from Lemma \ref{1} that $d$ is zero, or else $[z,a] - [\Phi(a), \Phi(z)] = 0$ for all $a,x,y,z \in \mathfrak{R}$. If $d$ is zero, then there is nothing to be proved. Suppose that
\begin{align}
[z,a] - [\Phi(a), \Phi(z)] = 0 \  for \ all \ a,x,y,z \in \mathfrak{R}.
\end{align}
Considering equations (17) and (19), we find that
\begin{align}
[x,a] d(y) = d(x) [y,a] \ for \ all \ a,x,y \in \mathfrak{R}.
\end{align}
Replacing $x$ by $x d(r)$ in equation (20), we have $[x d(r),a] d(y) = d(x d(r)) [y,a]$ and so, we get that  $$x [d(r),a] d(y) + [x,a] d(r) d(y) = d(x) d(r) [y,a] + \phi(x) d(d(r)) [y,a].$$ In view of equation (20), the above equation becomes
$$x [d(r), a] d(y) + d(x) [r, a] d(y) = d(x) [r, a] d(y) + \Phi(x) d^{2}(r) [y, a]$$ for all $a,r,x,y \in \mathfrak{R}$; subtracting $d(x) [r,a] d(y)$ from this, we arrive at
\begin{align}
x [d(r),a] d(y) = \Phi(x) d^{2}(r) [y,a] \ for \ all \ a,r,x,y \in \mathfrak{R}.
\end{align}
It follows from equation (20) that $[d(r),a] d(y) = d^{2}(r)[y, a]$. This equation with equation (21), implies that $(x - \Phi(x)) d^2(r) [y,a] = 0$ for all $a,r,x,y \in \mathfrak{R}$. According to Lemma \ref{1}, we conclude that either $[y,a] = 0$ for all $ y,a \in \mathfrak{R}$ or $(x - \Phi(x)) d^2(r) = 0$ for all $r,x \in \mathfrak{R}$. If $[y,a] = 0$ for all $y,a \in \mathfrak{R}$, then $\mathfrak{R}$ is commutative. If not, we assume
\begin{align}
(x - \Phi(x)) d^2(r) = 0 \ for \ all \ r,x \in \mathfrak{R}.
\end{align}
Replacing $x$ by $d(r)$ in equation (20), it is obtained that $d^2(r) [y,a] = d(d(r)) [y,a] = [d(r),a] d(y)$ for all $a,r,y \in \mathfrak{R}$. Hence, (21) is written as follows:
 \begin{align*}
 x [d(r),a] d(y) = \Phi(x) [d(r),a] d(y) \ for \ all \ a,r,x,y \in \mathfrak{R}.
 \end{align*}
 Therefore, $(x - \Phi(x)) [d(r),a] d(y) = 0$ for all $ a,r,x,y \in \mathfrak{R}$. From this and Lemma \ref{1}, we arrive at $(x - \Phi(x))[d(r),a] = 0$ for all $a,r.x \in \mathfrak{R}$, or else $d$ is zero. If $d$ is zero, then our objective is achieved. Suppose that $(x - \Phi(x))[d(r), a] = 0$ for all $a,r,x \in \mathfrak{R}$. Replacing $r$ by $d(r)$ in the previous equation, we have $(x - \Phi(x)) [d(d(r)),a] = 0$. From this equality and the fact that $(x - \phi(x)) d^2(r) = 0$ for all $x,r \in \mathfrak{R}$ (see (22)), we have
 \begin{align*}
 0 & = (x - \Phi(x))d^2(r)a - (x - \Phi(x)) a d^2(r) \\ & = 0 - (x - \Phi(x)) a d^2(r).
 \end{align*}
 This means $(x - \Phi(x)) a d^2(r) = 0$ for all $a,r,x \in \mathfrak{R}$ and primness of $\mathfrak{R}$ forces that $(x - \Phi(x)) = 0$ or $d^2(r) = 0$. If $x = \Phi(x)$ for all $x \in \mathfrak{R}$, then $\Phi([z,a]) = [z,a]$ for all $z,a \in \mathfrak{R}$. This equation together with equation (19) yield that
 $[a,z] = [\Phi(z), \Phi(a)] = [z,a]$ for all $a,z \in \mathfrak{R}$. Hence, we have
 \begin{align*}
 0 = [z,a] - [a,z] = [z,a] + [z,a] = 2 [z,a].
 \end{align*}
Since $\mathfrak{R}$ is 2-torsion free, $[z,a] = 0$ for all $z,a \in \mathfrak{R}$. It means that $\mathfrak{R}$ is commutative. Now suppose that $d^2(r) = 0$ for all $r \in \mathfrak{R}$. Replacing $r$ by $xy$ in the preceding equation, and then using the assumption that $d \Phi = \Phi d = d$, we have
\begin{align*}
0 & = d^2(xy) = d(d(x) y + \Phi(x) d(y)) \\ & = d(d(x) y) + d( \Phi(x) d(y)) \\ & = d^2(x) y + \Phi(d(x)) d(y) + d(\Phi(x)) d(y) + \Phi^2(x) d^2(y) \\ & = 0 + d(x) d(y) + d(x) d(y) + 0 \\ & = 2 d(x) d(y).
\end{align*}
Since $\mathfrak{R}$ is 2-torsion free, $d(x) d(y) = 0$ for all $x, y \in \mathfrak{R}$. It follows from Lemma \ref{1} that $d$ is zero, as desired.
\end{proof}

In the following example, we provide a $\Phi$-derivation $d$ such that $d \Phi = \Phi d = d$.

\begin{example} Let $\mathcal{R}$ be a ring and let
\begin{align*}
\mathfrak{R} = \Bigg\{\left [\begin{array}{ccc}
0 & a & b\\
0 & 0 & c\\
0 & 0 & 0
\end{array}\right ] \ : \ a, b, c \in \mathcal{R}\Bigg\}
\end{align*}
Clearly, $\mathfrak{R}$ is a ring. Define the additive mappings $d, \Phi:\mathfrak{R} \rightarrow \mathfrak{R}$ by $$\Phi\Bigg(\left [\begin{array}{ccc}
0 & a & b\\
0 & 0 & c\\
0 & 0 & 0
\end{array}\right ]\Bigg) = \left [\begin{array}{ccc}
0 & 0 & b\\
0 & 0 & c\\
0 & 0 & 0
\end{array}\right ], $$

$$ d\Bigg(\left [\begin{array}{ccc}
0 & a & b\\
0 & 0 & c\\
0 & 0 & 0
\end{array}\right ]\Bigg) = \left [\begin{array}{ccc}
0 & 0 & 0\\
0 & 0 & c\\
0 & 0 & 0
\end{array}\right ],$$
It is routine to see that $d(AB) = d(A)B + \Phi(A) d(B)$ for all $A, B \in \mathfrak{R}$,
which means that $d$ is a $\Phi$-derivation such that $d \Phi = \Phi d = d$.
\end{example}

In the next theorem, we investigate the commutativity of $\ast$-algebras with two variable $\Phi$-derivations. Let $\mathcal{A}$ be a complex algebra. Recall that an involution over $\mathcal{A}$ is a map $\ast: \mathcal{A} \rightarrow \mathcal{A}$ satisfying the following conditions for all $a, b \in \mathcal{A}$ and $\lambda \in \mathbb{C}$:\\
(i) $(a^{\ast})^{\ast} = a$,\\
(ii) $(ab)^{\ast} = b^{\ast}a^{\ast}$,\\
(iii) $(a +b)^{\ast} = a^{\ast} + b^{\ast}$,\\
(iv) $(\lambda a)^{\ast} = \overline{\lambda} a^{\ast}$.

An algebra $\mathcal{A}$ equipped with an involution $\ast$ is called an involutive algebra or $\ast$-algebra and
is denoted, as an ordered pair, by $(\mathcal{A}, \ast)$. Note that if $\mathcal{A}$ is a $\ast$-algebra, then a straightforward verification shows that $\mathcal{A} \times \mathcal{A}$ is also a $\ast$-algebra by regarding the following structure:\\
(i) $(a, b) + (c, d) = (a + c, b + d)$;\\
(ii) $\lambda (a, b) = (\lambda a, \lambda b)$:\\
(iii) $(a, b). (c, d) = (ac, bd)$;\\
(iv) $(a, b)^{\ast} = (a^{\ast}, b^{\ast})$;\\
for $a, b \in \mathcal{A}$ and $\lambda \in \mathbb{C}$.\\
Let $(\mathcal{A}, \ast)$ be an involutive algebra. A mapping $T:\mathcal{A }\rightarrow \mathcal{A}$ is called a $\ast$-map if $T(a) = (T(a^{\ast}))^{\ast}$ for all $a \in \mathcal{A}$. Similar to the $\ast$-mappings, a bi-mapping $\Psi:\mathcal{A} \times \mathcal{A} \rightarrow \mathcal{A}$ is a $\ast$-map if $\Psi(a, b) = (\Psi(a^{\ast}, b^{\ast}))^{\ast}$ for all $a, b \in \mathcal{A}$. Let $\Phi:\mathcal{A} \rightarrow \mathcal{A}$ be a linear mapping. A bi-linear mapping (i.e., linear in both arguments) $\Psi:\mathcal{A} \times \mathcal{A} \rightarrow \mathcal{A}$ is called a left two variable $\Phi$-derivation if $\Psi(ab, c) = \Psi(a,c) b + \Phi(a) \Psi(b,c)$ for all $a, b, c \in \mathcal{A}$. A right two variable $\Phi$-derivation is defined, similarly. A bi-linear mapping $\Psi:\mathcal{A} \times \mathcal{A} \rightarrow \mathcal{A}$ is said to be a two variable $\Phi$-derivation if it is both a left-and a right two variable $\Phi$-derivation. %We call a left (resp. right) two variable $(I, \Phi)$-derivation, a left (resp. right) two variable $\Phi$-derivation, where $I$ is the identity mapping on $\mathcal{A}$.
A $\ast$-two variable $\Phi$-derivation means a two variable $\Phi$-derivation $\Psi:\mathcal{A} \times \mathcal{A} \rightarrow \mathcal{A}$, whenever both $\Psi$ and $\Phi$ are $\ast$-maps.\\
\\
Suppose $d_1, d_2:\mathcal{A}\rightarrow \mathcal{A}$ are two $\ast$-$\Phi$-derivations such that $\Phi$ is a $\ast$-map. If $d_2(\mathcal{A}) \subseteq Z(\mathcal{A})$ and $[d_1(a), \Phi(b)] = 0$ for all $a, b \in \mathcal{A}$, then $\Psi: \mathcal{A} \times \mathcal{A} \rightarrow \mathcal{A}$ defined by $\Psi(a, b) = d_1(a)d_2(b)$ is a $\ast$-two variable $\Phi$-derivation.

Before proving Theorem \ref{+}, we need the lemma below.

\begin{lemma} \label {-} [\cite{Ha}, Remark 2.6] Let $\mathfrak{R}$ be a unital ring and let $\Psi: \mathfrak{R} \times \mathfrak{R} \rightarrow \mathfrak{R}$ be a bi-additive mapping satisfying $\Psi(xy, z) = \Psi(x,z)\Sigma(y) + \Sigma(x) \Psi(y,z)$ and  $\Psi(x, yz) = \Psi(x,y)\Sigma(z) + \Sigma(y) \Psi(x,z)$ for all $x, y, z \in \mathfrak{R}$. Then
\begin{align*}
&\Sigma(x)\Psi(\textbf{e}, y) = \Sigma(y) \Psi(x, \textbf{e})\\ &
\Psi(\textbf{e}, y) \Sigma(x) = \Psi(x, \textbf{e}) \Sigma(y),
\end{align*}
for all $x, y \in \mathfrak{R}$.
\end{lemma}


\begin{theorem} \label{+} Let $(\mathcal{A}, \ast)$ be a unital, involutive algebra and let $\Psi:\mathcal{A} \times \mathcal{A}\rightarrow \mathcal{A}$ be a $\ast$-two variable $\Phi$-derivation such that $\Psi(\textbf{e},a_{0}) = \textbf{e}$ for some $a_{0}\in \mathcal{A}$. If $\{a \in \mathcal{A} \ : \ \Psi(a, a_0) = 0\} = \{0\}$, then $\mathcal{A}$ is commutative.
\end{theorem}

\begin{proof} Let $\Psi$ be a $\ast$-left two variable $\Phi$-derivation. So, we have
\begin{align*}
\Psi(ab, c) & = \Big(\Psi(b^{\ast}a^{\ast}, c^{\ast})\Big)^{\ast} \\ & = \Big(\Psi(b^{\ast}, c^{\ast})a^{\ast} + \Phi(b^{\ast})\Psi(a^{\ast}, c^{\ast})\Big)^{\ast} \\ & = a \Psi(b, c) + \Psi(a, c) \Phi(b)
\end{align*}
Moreover, we know that $\Psi(ab, c) = \Psi(a, c) b + \Phi(a) \Psi(b, c)$ for all $a, b, c \in \mathcal{A}$. Hence, we have the following expressions:
\begin{align*}
\Psi(ab, c) & = \frac{1}{2}\Psi(ab, c) + \frac{1}{2}\Psi(ab, c) \\ &  = \frac{\Psi(a, c) b + \Phi(a) \Psi(b, c)}{2} + \frac{a \Psi(b, c) + \Psi(a, c) \Phi(b)}{2}
\end{align*}
So, we have
\begin{align*}
\Psi(ab, c) = \Psi(a, c) \left(\frac{b + \Phi(b)}{2}\right) + \left(\frac{\Phi(a) + a}{2}\right) \Psi(b, c),
\end{align*}
for all $a, b, c \in \mathcal{A}$. Considering $\frac{\Phi + I}{2} = \Sigma$, where $I$ is the identity mapping on $\mathcal{A}$, we see that
\begin{align*}
\Psi(ab, c) = \Psi(a, c) \Sigma(b) + \Sigma(a) \Psi(b, c),
\end{align*}
for all $a, b, c \in \mathcal{A}$. Similarly, on can obtain that $$\Psi(a, bc) = \Psi(a, b) \Sigma(c) + \Sigma(b) \Psi(a, c)$$
for all $a, b, c \in \mathcal{A}$. We proceed our proof according to the proof of \cite[Theorem 2.16]{H} and  in order to make this paper self contained, we state its proof here. Let $a_0$ be an element of $\mathcal{A}$ such that $\Psi(\textbf{e}, a_0) = \textbf{e}$.
Therefore, we have
\begin{align*}
\textbf{e} & = \Psi(\textbf{e},a_{0}) = \Psi(\textbf{e e},a_{0}) \\
& = \Sigma(\textbf{e})\Psi(\textbf{e},a_{0}) +
\Psi(\textbf{e},a_{0})\Sigma(\textbf{e}) \\ & = 2\Sigma(\textbf{e}).
\end{align*}
Hence, $\Sigma(\textbf{e}) = \frac{\textbf{e}}{2}$. It follows from Lemma \ref{-} that $\Sigma(a)\Psi(\textbf{e}, b) = \Sigma(b) \Psi(a, \textbf{e})$ and $\Psi(\textbf{e}, b) \Sigma(a) = \Psi(a, \textbf{e}) \Sigma(b)$ for all $a, b \in \mathcal{A}$. Define $D_1(a) = \Psi(\textbf{e}, a)$ and $D_2(a) = \Psi(a, \textbf{e})$ for all $a \in \mathcal{A}$. Hence, $\Sigma(a) D_1(b) = \Sigma(b) D_2(a)$ and
$D_1(b) \Sigma(a) = D_2(a) \Sigma(b)$ for all $a, b \in \mathcal{A}$. Thus,
\begin{align*}
D_1(ab) & = D_1(a) \Sigma(b) + \Sigma(a) D_1(b) \\ & = D_2(b) \Sigma(a) + \Sigma(b) D_2(a) \\ & = D_2(ba).
\end{align*}
It means that $D_1(ab) = D_2(ba)$ for all $a, b \in \mathcal{A}$. Putting $b = \textbf{e}$ in the previous equation, we obtain that $D_1(a \textbf{e}) = D_2(\textbf{e} a)$ for all $a \in \mathcal{A}$; and it means that $D_1 = D_2$.
Therefore, we have $\Psi(a,\textbf{e}) = \Psi(\textbf{e},a)$ for all $a \in \mathcal{A}$. Suppose that $a$ and $b$ are two
arbitrary elements of $\mathcal{A}$. Then, we have
\begin{align*}
\Psi(a,b) & = \Psi(\textbf{e}a,b) \\ & = \Psi(\textbf{e},b)\Sigma(a)
+ \Sigma(\textbf{e})\Phi(a,b) \\ & = \Psi(\textbf{e},b)\Sigma(a) +
\frac{\Phi(a,b)}{2}.
\end{align*}
So, $\Psi(a,b) = 2\Psi(\textbf{e},b)\Sigma(a)$. Reusing Lemma \ref{-} and applying the fact that $\Psi(a,\textbf{e}) =
\Psi(\textbf{e},a)$ for all $a\in \mathcal{A}$, we get
\begin{align*}
\Psi(b,a) & = 2\Psi(\textbf{e},a)\Sigma(b) \\ & =
2\Psi(b,\textbf{e})\Sigma(a) \\ & = 2\Psi(\textbf{e},b)\Sigma(a) \\
& = \Phi(a,b).
\end{align*}
It means that $\Psi$ is symmetric. Let $a$ be an arbitrary
element of $\mathcal{A}$. Then
\begin{align*}
\Psi(a,a_{0})& = \Psi(a \textbf{e},a_{0}) \\ & =
\Psi(a,a_{0})\Sigma(\textbf{e}) + \Sigma(a)\Psi(\textbf{e},a_{0}) \\
& = \frac{\Psi(a,a_{0})}{2} + \Sigma(a).
\end{align*}
Hence, $\Sigma(a) = \frac{\Psi(a,a_{0})}{2}$. Now, we define a linear
mapping $\Lambda:\mathcal{A} \rightarrow \mathcal{A}$ by $\Lambda(a)
= \Psi(a,a_{0})$. Obviously, $\Lambda(\textbf{e}) = \textbf{e}$. Furthermore, we have
\begin{align*}
\Lambda(ab) & = \Psi(ab,a_{0}) \\ & = \Psi(a,a_{0})\Sigma(b) +
\Sigma(a)\Psi(b,a_{0}) \\ & = \Psi(a,a_{0})\frac{\Psi(b,a_{0})}{2} +
\frac{\Psi(a,a_{0})}{2}\Psi(b,a_{0}) \\ & =
\Psi(a,a_{0})\Psi(b,a_{0})
\\ & = \Lambda(a)\Lambda(b).
\end{align*}
So, $\Lambda$ is a unital homomorphism. Furthermore,
\begin{align*}
\Psi(a,a_{0}) & = \Psi(a,\textbf{e} a_{0}) \\ & =
\Psi(a,\textbf{e})\Sigma(a_{0}) + \Sigma(\textbf{e})\Psi(a,a_{0}) \\
& = \Psi(a,\textbf{e}) \frac{\Psi(a_{0},a_{0})}{2} +
\frac{\Psi(a,a_{0})}{2}.
\end{align*}
So, $\Psi(a,a_{0}) = \Psi(a,\textbf{e})\Psi(a_{0},a_{0})$. From this equation, we have
$\textbf{e} = \Psi(\textbf{e},a_{0}) =
\Psi(\textbf{e},\textbf{e})\Psi(a_{0},a_{0})$. Similarly, using the equation $\Psi(a,a_0) = \Psi(a,a_0 \textbf{e})$, we can obtain
that $\textbf{e} = \Psi(a_0,a_0)\Psi(\textbf{e},\textbf{e})$. Hence, $\Phi(a_{0},a_{0})^{-1} = \Phi(\textbf{e},\textbf{e})$. Now, we
define a $\Sigma$-derivation $d:\mathcal{A} \rightarrow \mathcal{A}$ by $d(x) = \Psi(a,\textbf{e})$. Since $\Psi(a,a_{0}) =
\Psi(a,\textbf{e})\Psi(a_{0},a_{0})$, $\Lambda(a) = d(a)\Lambda(a_{0})$ for all $a \in \mathcal{A}$. So, $d(a) =
\Lambda(a)\Lambda(a_{0})^{-1}$. Moreover, we have
\begin{align*}
d(a) = d(\textbf{e})\Sigma(a) + \Sigma(\textbf{e})d(a)  =
d(\textbf{e})\Sigma(a) + \frac{d(a)}{2},
\end{align*}
and so, $$d(a) = 2d(\textbf{e})\Sigma(a) = 2\Lambda(a_{0})^{-1}\frac{\theta(a)}{2} = \theta(a_{0})^{-1}\Lambda(a).$$ Similarly, $$d(a) =
d(a)\Sigma(\textbf{e}) + \Sigma(a)d(\textbf{e}) = \frac{d(a)}{2} +
\Sigma(a)d(\textbf{e})$$ and therefore, $$d(a) =
2\Sigma(a)d(\textbf{e}) = \Lambda(a) \Lambda(a_0)^{-1}.$$ Hence,
$\Lambda(a)\Lambda(a_{0})^{-1} = \Lambda(a_{0})^{-1}\Lambda(a)$ for all
$a\in \mathcal{A}$. Let $a$ and $b$ be two
arbitrary elements of $\mathcal{A}$. Then
\begin{align*}
\Psi(a,b)& = 2 \Psi(a,\textbf{e}) \Sigma(b) \\ & = 2 d(a)
\frac{\Lambda(b)}{2}
\\ & = d(a) \Lambda(b) \\ & = \Lambda(a)\Lambda(a_{0})^{-1}\Lambda(b) \\
& = \Lambda(a) \Lambda(b) \Lambda(a_{0})^{-1} \\ & = \Lambda(ab)
\Lambda(a_{0})^{-1}.
\end{align*}
Our next task is to prove that $\frac{\mathcal{A}}{ker \Lambda}$ is a
commutative algebra. Since $\Psi$ is symmetric, i.e. $\Psi(a,b) = \Psi(b,a)$ for all $a,b \in \mathcal{A}$, we have $\Lambda(ab)
\Lambda(a_{0})^{-1} = \Lambda(ba)\Lambda(a_{0})^{-1}$. This equation implies
that $\Lambda(ab) = \Lambda(ba)$, i.e. $ab - ba \in ker (\Lambda)$ for
all $a, b\in \mathcal{A}$. Consequently, $ab + ker (\Lambda) = ba + ker (\Lambda)$ and it implies that the quotient algebra $\frac{\mathcal{A}}{ker \Lambda}$ is a commutative algebra. Since we are assuming that $\{a \in \mathcal{A} \ : \ \Psi(a, a_0) = 0\} = \{0\}$, $ker \Lambda = \{0\}$ and consequently, $\mathcal{A}$ is a commutative algebra. Thereby, our proof is complete.
\end{proof}

As an immediate consequence of Theorem \ref{+}, we have the following result.

\begin{corollary} \label{++} Let $(\mathcal{A}, \ast)$ be a unital, involutive Banach algebra. Let $\Psi:\mathcal{A} \times \mathcal{A}\rightarrow \mathcal{A}$ be a $\ast$-two variable $\Phi$-derivation such that $\Psi(\textbf{e},a_{0}) = \textbf{e}$ for some $a_{0}\in \mathcal{A}$. If the algebra $\mathcal{A}$ has the conditions under which every homomorphism of $\mathcal{A}$ is continuous, then both $\Psi$ and $\Phi$ are continuous.
\end{corollary}
\begin{proof} It follows from Theorem \ref{+} that there exists a homomorphism $\Lambda:\mathcal{A} \rightarrow \mathcal{A}$ defined by $\Lambda(a) = \Psi(a, a_0)$ such that $\Psi(a, b) = \Lambda(ab)(\Lambda(a_0))^{-1}$ for all $a, b \in \mathcal{A}$. Since we are assuming the conditions under which every homomorphism of $\mathcal{A}$ is continuous, we have
\begin{align*}
\|\Psi(a, b)\| = \|\Lambda(ab)(\Lambda(a_0))^{-1}\| \leq \|\Lambda\| \ \|a\| \ \|b\| \ \|\Lambda(a_0))^{-1}\| < \infty,
\end{align*}
which means that $\Psi$ is continuous. According to the proof of Theorem \ref{+}, $\frac{\Phi(a) + a}{2}=\Sigma(a) = \frac{\Lambda(a)}{2}$ for all $a \in \mathcal{A}$. Hence, $\Phi = \Lambda - I$ and since $\Lambda$ is continuous, so is $\Phi$.
\end{proof}

\begin{remark} There are many different conditions under which a homomorphism is continuous. For instance, if $\mathcal{A}$ is a Banach algebra and $\mathcal{B}$ is a commutative, semisimple Banach algebra, then it follows from \cite[Proposition 5.1.1]{D} that every homomorphism $\theta:\mathcal{A} \rightarrow \mathcal{B}$ is is automatically continuous. For more material about the continuity of homomorphisms and other results, see, e.g. \cite[Theorem 5.1.8, Theorem 5.2.4, Coroolary 5.2.5]{D} and \cite[Corollary 3.2.4]{D1}.
\end{remark}
%\vspace{4mm}\noindent{\bf Acknowledgements}\\
%\noindent If you'd like to thank anyone, place your comments here.


% BibTeX users please use one of
%\bibliographystyle{spbasic}      % basic style, author-year citations
%\bibliographystyle{spmpsci}      % mathematics and physical sciences
%\bibliographystyle{spphys}       % APS-like style for physics
%\bibliography{}   % name your BibTeX data base

% Non-BibTeX users please use
\begin{center}
\begin{thebibliography}{99} % Enter references in alphabetical order and according to the following format.
\bibitem{al} S. Ali, B. Dhara and M. Salahuddin Khan, On Prime and Semiprime Rings with Additive
Mappings and Derivations,\emph{ Universal Journal of Computational Mathematics}, \textbf{2} (2014), 48--55.

\bibitem{a}  M. Ashraf and N. Rehman, On commutativity of rings with derivations,\emph{ Results
Math.} \textbf{42} (2002), 3--8.
\bibitem{d} M. N. Daif, Commutativity results for semiprime rings with derivations,  \emph{Internat. J. Math. Math. Sci}. \textbf{21} (1998), 471-474.
\bibitem{D} H. G. Dales, et al., \emph{Introduction to Banach algebras, Operators and harmonic analysis}, Cambridge University press, Cambridge (2002).
\bibitem{D1} H. G. Dales, \emph{Banach Algebras and Automatic Continuity}, Math. Soc. Monographs, New Series, 24, Oxford University Press, Oxford, (2000).
\bibitem{h}  S. Huang, ON generalized derivations of prime and semiprime rings, \emph{Taiwaness journal of Mathematics}. \textbf{16} (2012), 771--776.
\bibitem{H} A. Hosseini and M. Hassani, Some Achievements on Two Variable $\sigma$-Derivations, \emph{J. Math. Ext}. \textbf{8}(4) (2014), 93--108.
\bibitem{Ha} M. Hassani and A. Hosseini, On two variables derivations and generalized
centralizers, \emph{Journal of Advanced Research in Pure Mathematics}, \textbf{6} (2014),
38--51.
\bibitem{l}  C. Lanski, Differential identities, Lie ideals and Posner's theorems, \emph{Pacific J. Math}. \textbf{134} (1988), 275--297.
\bibitem{o}  L. Oukhtite, A. Mamounib, C, Beddani, Derivations and Jordan ideals in prime rings, \emph{Journal of Taibah University for Science}. \textbf{8} (2014), 364-–369.
\bibitem{p}  E. C. Posner, Derivations in prime rings, \emph{Proc. Amer. Math. Soc}. \textbf{8} (1957), 1093--1100.
\bibitem{q} M. A. Quadri, M. S. Khan and N. Rehman, Generalized derivations and commutativity
of prime rings,  \emph{Indian J. Pure Appl. Math}. \textbf{34} (2) (2003), 1393--1396.
\bibitem{vi}  V. de Filippis, On derivations and commutativity in prime rings, \emph{Internat. J. Math. Math. Sci}. \textbf{70} (2004), 3859–-3865.
\bibitem{v}  J. Vukman, Commuting and centralizing mappings in prime rings, \emph{Proc. Amer. Math. Soc}. \textbf{109} (1990), 47--52.
\bibitem{y} Y-S. Jung and K-H. Park, On generalized ($\alpha, \beta$)-derivations and commutativity in prime rings, \emph{Bull. Korean Math. Soc.} \textbf{43} (2006), 101--106.
%
%\bibitem{RefJ1}
% Format for Journal Reference
%F. Author and S. Author, Instructions for authors, {\it Journal Name}, Volume (year), pp-pp.

%\bibitem{RefJ2}
% Format for Journal Reference
%F. Author, S. Author and T. Author, Article title should be written here, {\it Journal Name}, Volume (year), pp-pp.

% Format for books
%\bibitem{RefB}
%F. Author, {\it Book Title Should Be Written Here}, pp-pp, Publisher, place (year)
% etc

\end{thebibliography}
\end{center}



{\small

\noindent{\bf Amin Hosseini}

\noindent Department of Mathematics

\noindent Assistant Professor of Mathematics

\noindent Kashmar Higher Education Institute

\noindent Kashmar, Iran

\noindent E-mail: a.hosseini@kashmar.ac.ir, hosseini.amin82@gmail.com}\\





\end{document} 