\documentstyle[12pt,sched-header,xspace]{article}
\Scribe{Shy Tzadok}
\Lecturer{Prof. Allan Borodin} \LectureNumber{3}
\LectureDate{23/03/2000}
%\input{eepic.sty}
%%%%%%%%%%%%%%%%%%%%%
% My commands
\newcommand{\Rroblem}{(R\parallel C_{max})\xspace}
\newcommand{\Qroblem}{(Q\parallel C_{max})\xspace}
\newcommand{\problem}{(P\parallel C_{max})\xspace}
\newcommand{\proble}{(P\mid prec,r_j\mid C_{max})\xspace}
\newcommand{\probl}{(P\mid prec,r_j=0,p_j=1\mid C_{max})\xspace}
\newcommand{\prob}{(P2\mid prec,p_j=1\mid C_{max})\xspace}
\newcommand{\pro}{(Pm\mid prec,p_j=1\mid C_{max})\xspace}
\newcommand{\pr}{(Pm\mid prec\mid C_{max})\xspace}
\newcommand{\SSSpr}{(Pm\mid pmtn,prec\mid C_{max})\xspace}
\newcommand{\p}{(P3\mid prec,p_j=1\mid C_{max})\xspace}
\newcommand{\PR}{(P\mid prec,p_j=1\mid C_{max})\xspace}
\newcommand{\PRB}{(P\mid p_j=d_j-r_j\mid \sum (1-U_j)\xspace}
\newcommand{\PRBI}{(P\mid p_j=d_j-r_j\mid \sum p_j \cdot (1-U_j)\xspace}
\newcommand{\dorder}{d_1\geq d_2\geq \ldots \geq d_n \xspace}
\newcommand{\porder}{p_1\geq p_2\geq \ldots \geq p_n \xspace}
\newcommand{\jnorder}{J_1, J_2, \ldots J_n \xspace}
\newcommand{\bound}{ (2-\frac{1}{m})\xspace}
\newcommand{\boun}{(\frac{4}{3}-\frac{1}{3m})\xspace}
\newcommand{\bou}{(2 -\frac{2}{m})\xspace}
\newcommand{\fptas}{FPTAS\xspace}
\newcommand{\chain}{J_{l_{1}}, J_{l_{2}}, \ldots J_{l_{r}}, \xspace}
%%%%%%%%%%%%%%%%%%%%%%
% General Macros
\newtheorem{theorem}{Theorem}[section]
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{property}[theorem]{Property}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{claim}[theorem]{Claim}
\newtheorem{fact}[theorem]{Fact}
\newtheorem{definition}{Definition}[section]
\newtheorem{idefinition}{Informal Definition}[section]
\newtheorem{remark}{Remark}[section]
\newtheorem{example}{Example}[section]
\def\eod{\vrule height 6pt width 5pt depth 0pt}
\newenvironment{proof}{\noindent {\bf Proof:} \hspace{.4em}}
{\hspace*{\fill}{\eod}}
\newcommand{\ceil}[1]{\lceil #1 \rceil }
\begin{document}
\MakeScribeTop
\paragraph{Today's Topics:}
\begin{itemize}
\item Continue with greedy algorithms.
\item Related online algorithms.
\end{itemize}
\section{LS, List Scheduling}
\subsection{List Scheduling for $\problem$}
This problem is known as the $``makespan''$ problem; that is,
minimizing the $makespan = C_{max}$.
LS is a $\bound\;$ approximation algorithm for $\problem\;$
, where $m$ can be part of the input.
The $\problem \;$ upper bound $\bound\;$ was shown in last
weeks notes (Theorem 4.1), and in \cite{gra66}.
This is a tight bound for LS.
We now show that the lower bound $\bound \;$ for LS is obtained by the
following sequence: $\underbrace{1,1,\ldots,1}_{m(m-1)},m$.
\begin{center}
\begin{figure}[h]
\vspace{50mm} \special{psfile=fig1.eps hoffset= 110 voffset=0
hscale=90 vscale=90} \caption{the sequence with $m=5$}\label{fig1}
\end{figure}
\end{center}
\(
\begin{array}{rcll}
LS & \geq & 2m-1 & \;m\;machines\;doing\;m-1\;jobs\;of \;1,\\
&&& \; and \;one\;machine\;doing\;1\;job\;of\;m\;time.\\
OPT& = & m & \;m-1\;machines\;doing\;m\;jobs\; of\;1,\\
&&& \;and\;one\;machine\;doing\;1\;job\;of\; m\; time.\\
\frac{LS}{OPT}&=&\frac{2m-1}{m} &\\
&=&\bound & \\
\end{array}
\)
\subsection{List Scheduling for $\proble$}
The same $\bound$ tight approximation
bound can be achieved for LS applied to the \\
$\proble$ problem.
\\
Generally we expect that adding precedents and release times,
complicates the problem.
\\
The new algorithm is similar to the old, except that for $\problem$
when a machine is ready you load a job, and for $\proble$
there are additional requirements that the release time is satisfied
and all its precedents have completed.
\\
Given a set of jobs $\jnorder$, we define a chain $\chain \; r \leq n\;$,
to be a sequence of
jobs for which $J_{l_1}$ is the job that determines the
makespan (i.e. finishes last) and $j_{l_{k+1}}$ is the last
predecessor of $j_{l_{k}}$ to complete before $j_{l_{k}}$ begins. The chain terminates when there
is no such predecessor.
\\
First assume $r_{j}=0 \; \forall j$.
Let $A$ denote the time slots when some job
$j_{l_{k}}$ in the above chain (that determines the $makespan)\;$
and $B$ denote the times when no job from this chain is running.
Therefore, any time $t$ is either in $A$ or in $B$.
We define $\mid A\mid$ (resp. $\mid B\mid$) to be
the total amount of time in $A$.
So, $LS=\mid A \mid + \mid B \mid$ \\
\begin{fact}For any time \(\tilde{t}\in B \) every
machine is busy. Otherwise, we have a free machine and a
job $J_i$ such that all its precedents are ready.
So, we could have done $J_{i}$ earlier \eod
\end{fact}
\begin{fact}
\[\begin{array}{rclr}
OPT &\geq& \mid B \mid + \frac {\mid A \mid} {m} &
OPT \; is \; at \; least \; the \;total \; amount \; of \; work/m\\
OPT &\geq& \mid A\mid & \;\;\; OPT \; has \; is \; at \;
least \; the \;time \; of \; any \; chain. \\
\end{array}\]
\end{fact}
\begin{lemma} (\cite{gra66})$\;LS$ is a $\bound$ approximation for
$\proble$.
\\
For the case when $r_j = 0$, we use $Fact\;1.2$ and $Fact\;1.3$, and then
\[\begin{array}{rcl}
\bound OPT &=& OPT + (1-\frac{1}{m})OPT\\
&\geq& ( \mid B \mid + \frac{\mid A\mid}{m})
+(1-\frac{1}{m})\mid A\mid \\
&\geq& \mid B \mid + \mid A\mid \\
&=&LS
\end{array}\]
%\end{lemma}
For the case where the release times $r_{j}\;$ are not zero: the chain
ends if no predecessor of $J_{k}$ finishes at or after the release
time of $J_{k}$, i.e., we can essentially assume
that the algorithm starts at the
release time $r_k$ of $J_k$ and apply the above argument to the time
period $[r_k, C_{max}]$.
\\
\eod
\end{lemma}
The lower bound is again obtained by the sequence,
$\underbrace{1,1,\ldots,1}_{m(m-1)},m$.
\subsection{$LPT$: Longest Processing Time.}
LPT is another greedy algorithm for $\problem$.
\\
The Algorithm: Sort the jobs such that $\porder$, and apply ``List
Scheduling'' on the sorted list.
\begin{theorem}(\cite{gra69}) $LPT$ is a $(\frac{4}{3}-\frac{1}{3m})$ approximation
algorithm for $\problem$.
\\
\end{theorem}
\begin{proof}First we will show the upper bound.
Proof (by contradiction),
assume $\exists$ some input such that $\porder$,
and let this be the shortest such input which implies
$LPT > \boun OPT$.
\\
Let $t$ be the time when job $J_n$ is scheduled (i.e., begins), where
(by assumption)
$J_n$ is the job that
defines the $makespan$. Then
%%double space
\[\begin{array}{rcll}
LPT & = & t+p_{n} \\
(**) \sum_{i=1}^{r-1} p_{i} &\geq& mt& \; since \; there\; is\;
no \; idle \;time \; and \;LPT\\
& & & \; greedily \; schedules \; on \; least \; loaded \; machine\\
&& \\
(*) OPT &\geq& \frac{\sum_{i=1}^{r} p_i}{m} &\;
OPT\; is\; at\; least\; the\; (total\; work) /m \\
&& \\
\frac{LPT}{OPT} & = & \frac{p_{n}+t}{OPT}\\
&& \\
& = & \frac{p_{n}}{OPT} +\frac{t}{OPT}\\
&& \\
(by **)&\leq& \frac{p_{n}}{OPT} +
\sum_{i=1}^{r-1}\frac{p_{i}}{mOPT}\\
&& \\
&=& \frac{m-1}{m} \frac{p_{n}}{OPT} +\frac{\sum_{i=1}^{r}{p_i}}{mOPT}\\
&& \\
(by *) & \leq & \frac{m-1}{m} \frac{p_{r}}{OPT} +1 \\
& \Downarrow & \\
\frac{LPT}{OPT} &\leq& \frac{m-1}{m}\frac{p_{n}}{OPT}+1
\end{array}\]
>From the assumption: \( \frac{4}{3}-\frac{1}{3m}<\frac{LPT}{OPT}
\Downarrow \)
\[\begin{array}{rcl}
\frac{4}{3}-\frac{1}{3m} & < & \frac{m-1}{m} \frac{p_{r}}{OPT} +1 \\
&& \\
\Rightarrow p_{r} &>& \frac{OPT}{3} \\
\end{array}\]
This implies that every job $J_j$ has
$p_j > \frac{OPT}{3} \;\;\Rightarrow OPT\;$
has at most 2 jobs per machine.\\
$ \Rightarrow OPT $ schedule can be made into the $LPT$ schedule
without changing the makespan.\\
When having 2 jobs (at most ) per machine then,
\\
\\
\\
\\
\\
\\
\\
\\
\\
\begin{figure}[h]
\vspace{60mm} \special{psfile=opt_lpt.eps hoffset= 110 voffset=0
hscale=90 vscale=90} \caption{OPT, LPT}\label{opt_lpt}
\end{figure}
In this case $OPT$ schedule can be made into the $LPT$ schedule.
\\ $\Rightarrow$ Contradiction.
\end{proof}
\section{Current research overview:}
\begin{itemize}
\item $\problem$ has an $\fptas$ (will be shown later).
\item Open question, anything better than $\bound$ for $\probl$?
\item (\cite{fujii69}) $\prob$ is polynomial solvable.
\item open question if $\pro$ is polynomial solvable for $m \geq 3$ (for $m=3$ this
was already a known open problem in \cite{gj79}).
\item (\cite{lam77}) $\pr$ and $\SSSpr$ have $\bou$ approximation ratio.
\item (\cite{lam77}) $\p$ has $\frac{4}{3}$ approximation ratio.
\item (\cite{chudak97}) $\PR$ It is $NP$-hard to get better than $\frac{4}{3}$ approximation.
\end{itemize}
\section{Greedy and Online Algorithms}
In this section we define greedy algorithm, online algorithms
and a more general class of algorithms which we call incremental algorithms.
Mainly we are interested in these definitions in order to prove
statements of the form ``There is no incremental algorithm such that
$\ldots$ '' which would be more general than ``There is no greedy
algorithm
such that $\ldots$ ''.
This is a well respected tradition in complexity theory since (at the moment)
we don't know how to prove lower bounds without some assumption.
%The class of algorithms we are dealing with is incremental algorithms.
We will restrict our attention here to problems where preemption is not allowed.
Recall that a greedy algorithm for $\PRB$, $\;$ sorts the jobs, $\dorder$
$\;$ and then schedules job $J_i$ if it has a free slot on one of the
machines. Furthermore, when
several machines are available, choose the one
where we waste the minimal idle time.
\begin{center}
\begin{figure}[h]
\vspace{45mm} \special{psfile=another_greedy.eps hoffset= 150 voffset=0
hscale=90 vscale=90} \caption{OPT, LPT}\label{another_greedy}
\end{figure}
\end{center}
The general class of incremental algorithms we have in mind
all share the property that they construct a solution to
the given optimization problem by constructing partial
solutions (for a subset of the inputs "so far considered")
and extending each partial solution by ``considering''
another input and making an irrevocable decision.
We categorize the class of incremental algorithms in terms of certain
characterictics
(or ``dimensions'').
One dimension is greedy versus non-greedy algorithms.
(All of our ``definitions'' are at least somewhat informal but
all can be made precise.)
\begin{definition}
At any step (when extending the partial solution
obtained thus far), a greedy algorithm makes an irrevocable decision
about the current input being considered
which yields the
maximum gain for a profit problem (or minimizes the loss
for a cost problem) {\bf as if the current input is the last input}.
\end{definition}
In the above example, the decision to schedule job $J_i$ if at all
possible is necessitated by the definition of ``greedy''; the
decision to schedule $J_i$ so as to minimize the wasted idle time
is not forced by the greedy defintion but represents a natural
and (as it turns out) optimal way to schedule.
This is the analogue of going in the direction of the gradient
(hill-climbing) w.r.t. the objective function. A non-greedy algorithm need
not make a locally optimal decision (but rather might ``hedge its bets''
so as to protect against some
potentially difficult future inputs). We shall see an example
of such an algorithm when we consider the SLOWFIT and AAP
algorithms for load balancing (minimzing makespan) in the
context of online algorithms.
Another dimension concerns how the algorithm chooses the
next input to consider. In a non adaptive algorithm, each job is
initially given a fixed priority (independent of the other jobs and fixed
for the entire execution of the algorithm).
Hence the framework for a non adaptive incremental scheduling algorithm can be given by the following,
\begin{tabbing}
\%Prioritize the jobs.
\\
$f:\{ J_{i}\}->R$ \\
Compute $\; \; f(J_{1}), f(J_{2}), \ldots , f(J_{n})$.
\\
Sort by $f(J_{i})$ values.\\
$I = S=\{J_{i}|1 \leq i \leq n\}$ \\
$C= \emptyset$\\
while ( $S \neq \emptyset $ ) \= \+ \\
Consider the highest priority available job $J_k \in S$.
\\
Given current partial solution $C$, use some function $g(J_k,I/S)$
to irrevocably schedule \\
\ \ $J_k$
extending $C$ to some $C'$ (or decide not to schedule $J_k$).\\
\ \ For a greedy algorithm,
$C'$ must achieve the maximal profit/cost amongst \\
\ \ all extensions
of $C$. \\
% (Given configuration at this point of time) \\
% 2. Some rule $g ( Config,J_{k} )$ \= \- \\
$S=S \backslash \{J_k\}$ \\
$C = C'$ \= \- \\
end while
\end{tabbing}
%Thus we have the following defintion:
%\begin{definition}Non-Adaptive Algorithm.
Note that the priority function $f$ is computed only once and
%This means, that the rule is independent of the configuration.
% i.e., we
is based only on the job parameters (independent of other jobs and independent
of the partial solution thus far computed).
%.n the combined information.
%\end{definition}
In contrast, an adaptive algorithm can move the priority computation into the loop
so that
$ f:\{J_{i}, I/S\}->R \;$,
i.e., the priority of a job depends on the inputs already considered.
and hence is changing at run time. In many cases, we would expect
$ f:\{J_{i}, C\}->R \;$ (and similarly for $g$); that is, the
priority and scheduling functions only depend on the current partial
solution. Allowing these functions to depend on all previously
considered inputs is more general and might be
useful for problems in which all inputs need
not be scheduled (e.g. the $\PRB$ problem).
Note that for this incremental algorithm framework we make no
assumption about the complexity
of the priority function $f$ nor the scheduling function $g$. It
is only the {\em form} of the algorithm that we are considering here.
\begin{example}
$LPT$ is a non adaptive greedy algorithm for the $P||C_{max}$ and $Q||C_{max}$
problems. It is tempting to conjecture that amongst the class
of non adaptive greedy (or maybe even non greedy) algorithms for
$P||C_{max}, LPT$ provides the best approximation ratio. Morten Nielsen
has shown this to be the case for $m = 2$.
\end{example}
\begin{claim}
For the $\PRBI$ problem, Borodin, Nielsen and Rackoff have
shown that the approximation ratio for any non
adaptive (or adaptive) incremental
algorithm is at best $3$ (and this is easily seen to be optimal).
\end{claim}
Note that this claim is an absolute claim in contrast to relative hardness
of approximation results which depend on $P \neq NP$.
A third dimension is online versus offline algorithms.
Very informally,
\begin{definition} An Online Algorithm cannot consider ``future
inputs''.
\end{definition}
In contrast offline algorithms can take ``into account'' all the
jobs (at least in the sense that it can prioritorize jobs).
But we need a more refined definition of online algorithms.
We consider two types of online algorithms, ``one by one''
algorithms versus (what we will call) ``real time''
algorithms.
\begin{definition}
``One by one'' online scheduling algorithms are (non adpative but
not nexessarily greedy) incremental algorithms which must
(irrevocable) consider each job in
the order in which they appear in the input sequence.
Note: we are, however, allowed to assign a job to an
arbitrary time slot; i.e. it can be delayed and thus a job can start
running later than the successive jobs in the sequence. But the
decision as to if and how to schedule a job is made only on
the basis of the current job and all previous jobs in the
input sequence.
\end{definition}
Hence, a one by one online algorithm is simply a non adaptive incremental
algorithm where the priority function $f$ is
the trivial function $f(J_i) = i$.
%In the pseudo code above we can say, that it is the same as greedy
%algorithm with $f(g_i)=i$. Notice, once we see the successive
%jobs we we cannot change the assignment of the previous jobs.
On the other hand, a real time algorithm can choose at
any time point (e.g. the ``interesting'' times are when a job
arrives or
ends) all the thus far unscheduled jobs that have
arrived up to this point at time.
Online algorithms can be further partitioned as follows:
\begin{definition}
Non-clairvoyant: the duration of the job (and perhaps some other job
parameters)
is unknown upon arrival ( and only becomes known upon
job completion).
\end{definition}
In contast, a one by one (resp, real time) clairvoyant online algorithm
knows all the parameters
of a job once it is the next input (resp.,
at any point in time after the release time of the job). Whereas the
concept of a one by one online algorithm can be applied to any optimization
problem (eg the paging problem) which need not
have an explicit notion of time associated with the problem,
the real time concept assumes a notion of time. It is the one by one
definition of online that is used in the area called ``competitive analysis''
whereas it is the ``real time'' defintion which is more standard
in the scheduling literature.
Note that, $LS$ is a natural, greedy, one by one online algorithm.
As an online algorithm for $P||C_{max}$, $LS$ is non-clairvoyant
but for (say) $R||C_{max}$, $LS$ is clairvoyant.
\\
In the case of online algorithms we will
use the terminology ``competitive ratio''
instead of approximation ratio.
\\
%\begin{example}
%For all $\epsilon$, no non adaptive greedy algorithm for
%$\problem$ $\;$ exist with approximation ratio
%$\frac{4}{3}(1+\epsilon)$
%\end{example}
We conclude this introductory discussion by making a distinction
between the makespan minimization problem as considered in the
scheduling community and the load balancing
problem as considered in the one by one online competitive anaylsis research.
Consider the $\problem$ problem.
Instead of considering the usual interpretations of $p_j$ as time, one
might want
to interpret this parameter as a load (e.g. memory usage) on the machine.
This gives rise to a whole new set of problems, since it ``frees'' the
time parameter to be an additional parameter.
This interpretation is known as the load balancing
problem; for example, $\problem$ is considerd to be
the problem of minimizing the maximum load. Having made
this interpretation, we are free to introduce the duration of
a job as an additional parameter which may (clairvoyant case)
or may not (non clairyoyant case) be known when the next job
is considered and scheduled. In this context can again be considered
a non clairvoyant one by one load balancing algorithm (with
the same competitive ratio $2 - 1/m$). It can be proven however, that
in many load balancing problems (for non identical machines),
the addition of an unknown time duration significantly changes
the competitive ratio obtainable by any online algorithm.\\
Topics for next lecture (all concerning the one by one online model):
\begin{itemize}
\item There is a $O(1)$ competitive ratio online algorithm (called SLOWFIT)
for the $\Qroblem$ problem.
\item There is a $\Theta(\log m)$ competitive ratio online algorithm for $\Rroblem$.
\item An $\Omega(\log m)$ lower bound can be shown for special case of
unrelated machines (namely, the restricted subset case in
which $p_{j}(i) \in \{1,\infty\})$.
In this special case $LS$ has competitive ratio $\log m$ whereas for the
$\Rroblem$ problem LS has competitive ratio $m$. The ratio $m$
is also
obtained by the most naive algorithm which always places a job
on the machine for which it has the smallest load (=fastest processing
time).
\end{itemize}
\begin{thebibliography}{10}
\bibitem{gj79}
M.R.
Garey and D.S.
Johnson.
Computers and intractability: a guide to the theory of NP-completeness.
{\em W.H.~Freeman}, 1979.
\bibitem{gra66}
R.L.
Graham.
Bounds for certain multiprocessing anomalies.
{\em Bell System Technical Journal}, 45:1563-1581, 1966.
\bibitem{gra69}
R.L.
Graham.
Bounds on multiprocessing anomalies.
{\em SIAM Journal of Applied Mathematics }, 17:263-269, 1969.
%%%17
\bibitem{fujii69}
M.
FUJII, T.
KASAMI, and K.
NINOMIYA.
{\em Optimal sequencing of two equivalent processors.
SIAM Journal on Applied Mathematics} 17, 784-789, 1969.
{\em Erratum: SIAM Journal on Applied Mathematics} 20, 1971, 141.
%%%12
\bibitem{chudak97}
F.A.
CHUDAK and D.B.
SHMOYS.
{\em Approximation algorithms for precedence-constrained scheduling
problems on parallel machines that run at different speeds.
Proceedings of the 8th Annual ACM-SIAM Symposium on Discrete Algorithms
(SODA '97)}, 581-590, 1997.
{\em Journal version in Journal of Algorithms} 30, 1999, 323-343.
%%%39
\bibitem{lam77}
S.
LAM and R.
SETHI.
{\em Worst case analysis of two scheduling algorithms.
SIAM Journal on Computing} 6, 518-536, 1977.
\end{thebibliography}
\end{document}
----------
X-Sun-Data-Type: default
X-Sun-Data-Description: default
X-Sun-Data-Name: sched-header.sty
X-Sun-Charset: us-ascii
X-Sun-Content-Lines: 73
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% 236605 (Advanced Topics) style file for scribe notes.
%
% You should save this file as sched-header.sty
%
% Your main LaTeX file should look like this:
%
% \documentstyle[12pt,sched-header]{article}
% \Scribes{*First1 Last1* and *First2 Last2*}
% or \Scribe{*First Last*}
% \Lecturer{*First Last*}
% or \Lecturers{*First1 Last1* and *First2 Last2*}
% \LectureNumber{*N*}
% \LectureDate{*Date*}
% \begin{document}
% \MakeScribeTop
%
% \section{*Section Name*}
%
% *stuff*
%
% \section{*Section Name*}
%
% *stuff*
%
% etc.
%
% \end{document}
%
\oddsidemargin 0in
\evensidemargin 0in
\marginparwidth 40pt
\marginparsep 10pt
\topmargin 0pt
\headsep 0in
\headheight 0in
\textheight 8.5in
\textwidth 6.5in
\brokenpenalty=10000
\def\ScribeStr{??}
\def\LecStr{??}
\def\LecNum{??}
\def\LecDate{??}
\newcommand{\Scribe}[1]{\def\ScribeStr{Scribe: #1}}
\newcommand{\Scribes}[1]{\def\ScribeStr{Scribes: #1}}
\newcommand{\Lecturer}[1]{\def\LecStr{Lecturer: #1}}
\newcommand{\Lecturers}[1]{\def\LecStr{Lecturers: #1}}
\newcommand{\LectureNumber}[1]{\def\LecNum{#1}}
\newcommand{\LectureDate}[1]{\def\LecDate{#1}}
\newdimen\headerwidth
\newcommand{\MakeScribeTop}{
\noindent
\begin{center}
\framebox{
\vbox{
\headerwidth=\textwidth
\advance\headerwidth by -0.22in
\hbox to \headerwidth {{\bf 236605 Advanced Topics\hfill
Spring Semester, 2000} }
\vspace{4mm}
\hbox to \headerwidth {{\Large \hfill Lecture \LecNum: \LecDate \hfill}}
\vspace{2mm}
\hbox to \headerwidth {{\it \LecStr \hfill \ScribeStr}}
}
}
\end{center}
\vspace*{4mm}
}