diff --git a/00.value-of-dataviz/slides.md b/00.value-of-dataviz/slides.md
index 1394bbd..4bac72d 100644
--- a/00.value-of-dataviz/slides.md
+++ b/00.value-of-dataviz/slides.md
@@ -335,7 +335,7 @@ Use of Altair is **strongly** recommended, but other libraries allowed.
---
-## Acknowledgements
+## Acknowledgements & References
Thanks to Alex Hale, Andrew McNutt, and Jessica Hullman for sharing their materials.
diff --git a/01.gog-altair/slides.md b/01.gog-altair/slides.md
index a255a80..3559123 100644
--- a/01.gog-altair/slides.md
+++ b/01.gog-altair/slides.md
@@ -6,9 +6,9 @@
## Today
-- Grammar of Graphics
-- Types of Data
-- Intro to Altair
+- What is a **grammar of graphics** and how do we use it in practice?
+- What **types of data** do we encounter, and how does that affect visualizations?
+- Introduction to **Altair**
---
diff --git a/02.perception-and-color/3d-scatter.png b/02.perception-and-color/3d-scatter.png
new file mode 100644
index 0000000..4d3ecea
Binary files /dev/null and b/02.perception-and-color/3d-scatter.png differ
diff --git a/02.perception-and-color/datavizproject.png b/02.perception-and-color/datavizproject.png
new file mode 100644
index 0000000..f8d5d80
Binary files /dev/null and b/02.perception-and-color/datavizproject.png differ
diff --git a/02.perception-and-color/effectiveness.png b/02.perception-and-color/effectiveness.png
new file mode 100644
index 0000000..8df91b6
Binary files /dev/null and b/02.perception-and-color/effectiveness.png differ
diff --git a/02.perception-and-color/example1a.png b/02.perception-and-color/example1a.png
new file mode 100644
index 0000000..2d7b811
Binary files /dev/null and b/02.perception-and-color/example1a.png differ
diff --git a/02.perception-and-color/example1b.png b/02.perception-and-color/example1b.png
new file mode 100644
index 0000000..ab3a53a
Binary files /dev/null and b/02.perception-and-color/example1b.png differ
diff --git a/02.perception-and-color/perception-examples.ipynb b/02.perception-and-color/perception-examples.ipynb
new file mode 100644
index 0000000..1954899
--- /dev/null
+++ b/02.perception-and-color/perception-examples.ipynb
@@ -0,0 +1,541 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "adf7aa23-daf2-4cfe-9910-2b00576b2ff7",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import polars as pl\n",
+ "import altair as alt"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "0726aa55-4e9f-4f0f-a522-dc231c3f2c41",
+ "metadata": {},
+ "source": [
+ "These are examples used in class, there's no additional content here, but you can see how the examples were created."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 118,
+ "id": "05a4d76e-7ade-431a-a773-d438c6ac9c79",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "random_df = pl.DataFrame({\n",
+ " \"a\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20],\n",
+ " \"b\": [\"a\", \"b\", \"q\", \"c\", \"a\", \"c\", \"n\", \"c\", \"p\", \"b\", \"c\", \"n\", \"q\", \"r\", \"a\", \"b\", \"c\", \"b\", \"b\", \"b\"],\n",
+ " \"c\": [500, 2, 3, 4, 10, 20, 100, 490, 400, 140, 200, 180, 380, 350, 180, 135, 400, 210, 230, 300]\n",
+ "})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 122,
+ "id": "902298fe-3a6c-446e-a060-f95071acedf1",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ "alt.Chart(...)"
+ ]
+ },
+ "execution_count": 122,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "alt.Chart(random_df).mark_point().encode(\n",
+ " alt.X(\"a\"),\n",
+ " alt.Y(\"c\"),\n",
+ " alt.Color(\"b\", legend=None),\n",
+ " alt.Size(\"c\", legend=None),\n",
+ " alt.Shape(\"a:N\", legend=None),\n",
+ " alt.Fill(\"b\", legend=None),\n",
+ " alt.Opacity(\"b\", legend=None),\n",
+ ") "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 120,
+ "id": "87228f27-baad-4acb-80af-1ca44c21e8fe",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ "alt.Chart(...)"
+ ]
+ },
+ "execution_count": 120,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 123,
+ "id": "5515ea60-70c1-4fd7-bef9-b5451c2ea00c",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ "alt.Chart(...)"
+ ]
+ },
+ "execution_count": 123,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "alt.Chart(random_df).mark_line().encode(\n",
+ " x=\"a\",\n",
+ " y=\"c\",\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 143,
+ "id": "ba3df4b2-e64e-4574-b104-068ebe8d76c2",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ "alt.LayerChart(...)"
+ ]
+ },
+ "execution_count": 143,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "stevens = {\"brightness\": 0.4, \"depth perception\": 0.63, \"area\": 0.7, \"2D position\": 1, \n",
+ " \"color saturation\": 1.7, \n",
+ " # \"electric shock\": 3.5\n",
+ " }\n",
+ "\n",
+ "rows = []\n",
+ "for x in [0,0.2,0.4,0.6, 0.8, 1.0, 1.25, 1.5, 1.75, 2, 3]:\n",
+ " for cat, exponent in stevens.items():\n",
+ " rows.append({\"category\": cat, \"delta-stimulus\": x, \"response\": x**exponent})\n",
+ "\n",
+ "stevens_df = pl.DataFrame(rows)\n",
+ "\n",
+ "alt.Chart(stevens_df).mark_line().encode(\n",
+ " alt.Color(\"category\", legend=None),\n",
+ " alt.X(\"delta-stimulus\"),\n",
+ " alt.Y(\"response\"),\n",
+ ") + alt.Chart(stevens_df).mark_text(align=\"left\", dx=2).encode(\n",
+ " alt.Color(\"category\"),\n",
+ " alt.Text(\"category\"),\n",
+ " alt.X(\"delta-stimulus\", aggregate=\"max\"),\n",
+ " alt.Y(\"response\", aggregate=\"max\"),\n",
+ ").properties()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 138,
+ "id": "95dc101c-1dda-413a-87fb-b700fe5f43e3",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ ""
+ ],
+ "text/plain": [
+ "alt.Chart(...)"
+ ]
+ },
+ "execution_count": 138,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9ad736e1-0e19-46d1-8e93-2b1409ffffc1",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.15"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/02.perception-and-color/slides.html b/02.perception-and-color/slides.html
new file mode 100644
index 0000000..347e009
--- /dev/null
+++ b/02.perception-and-color/slides.html
@@ -0,0 +1,220 @@
+Previous slide Next slide Toggle fullscreen Open presenter view
+Perception & Color
+CAPP 30239
+
+
+Today
+
+What matters most when creating a visualization?
+How does human perception factor into visualization design?
+Understanding color , and computational representations of it.
+
+
+
+What is the most important question when creating a visualization?
+
+
+What is the most important question when creating a visualization?
+
+Where will the data come from?
+What type of chart do I use?
+Who is the audience?
+
+
+
+Audience First
+
+Who are you presenting to?
+How familiar are they with the data?
+What is their numerical & visualization literacy?
+Via what medium will they receive the information?
+What are you trying to do? (Persuade, Inform, Inspire?)
+
+Only now can we start thinking about data and presentation.
+
+
+Perception
+
+Selective - We can only pay attention to so much.
+Patterns - Our brains are pattern-matching machines, audience will benefit from intentional patterns & be distracted by unintentional ones.
+Limited working memory - We hold a very limited set of information in our minds at once.
+
+
+
+What do you see?
+
+
+
+
+
alt.Chart(random_df).mark_point().encode(
+ alt.X("a" ),
+ alt.Y("c" ),
+ alt.Color("b" ),
+ alt.Size("c" ),
+ alt.Shape("a:N" ),
+ alt.Fill("b" ),
+ alt.Opacity("b" ),
+)
+
+
+
+
+
+What do you see?
+
+
+
+
alt.Chart(random_df).mark_line().encode(
+ x="a",
+ y="c",
+)`
+
+
+
+
+
+Effectiveness Revisited
+
+
+
+
+
+
Altair Channels
+
+Position (X, Y
)
+Angle (Angle
)
+Area (Radius
, Size
)
+Hue, Saturation (Color
)
+Texture (Opacity
, Fill
)
+Shape (mark type, Shape
)
+
+
+
+
What about?
+
+Length
+Slope
+Volume
+Density
+Connection
+Containment
+
+
+
+
+
+Derived Properties
+
+Length/Area - size of bars (X
, Y
)
+Slope & Density - affected by scale
+Connection - ex. layering of lines w/ points
+Containment - achieved with layering
+
+What about volume ?
+
+
+Stevens' Power Law
+Stevens (1975): Human response to sensory stimulus is characterized by a power law with different exponents with different stimuli.
+perception = (magnitude of sensation)a
+Smaller a exponent: harder to perceive changes.
+Stevens measured values of a by exposing people to varied stimulus and asking them to compare magnitudes.
+
+
+
+
+
+
+
+
+Continuum
+Exponent
+
+
+
+
+Color Brightness
+0.33-0.5
+
+
+Smell
+0.6
+
+
+Loudness
+0.67
+
+
+Depth Perception
+0.67
+
+
+Area
+0.7
+
+
+2D Planar Position
+1.0
+
+
+Warmth
+1.3-1.6
+
+
+Color Saturation
+1.7
+
+
+Electric Shock
+3.5
+
+
+
+
+
+
+3D Graphs
+
+
+
+
+
+
+
+
+
+Instead of 3D Graphs
+
+Find other channels: hue & size are good candidates.
+Combine different dimensions into side-by-side 2D graphs.
+
+TODO: example of 2D decomposition of a graph
+
+
+
+Acknowledgements & References
+Thanks to Alex Hale, Andrew McNutt, and Jessica Hullman for sharing their materials.
+
+
+
\ No newline at end of file
diff --git a/02.perception-and-color/slides.md b/02.perception-and-color/slides.md
new file mode 100644
index 0000000..bd2d4ed
--- /dev/null
+++ b/02.perception-and-color/slides.md
@@ -0,0 +1,211 @@
+---
+theme: custom-theme
+---
+
+# Perception & Color
+
+## CAPP 30239
+
+---
+
+## Today
+
+- What matters most when creating a visualization?
+- How does human **perception** factor into visualization design?
+- Understanding **color**, and computational representations of it.
+
+---
+
+## What is the most important question when creating a visualization?
+
+---
+
+## What is the most important question when creating a visualization?
+
+
+Where will the data come from?
+What type of chart do I use?
+Who is the audience?
+
+
+---
+
+## Audience First
+
+- Who are you presenting to?
+- How familiar are they with the data?
+- What is their numerical & visualization literacy?
+- Via what medium will they receive the information?
+- What are you trying to do? (Persuade, Inform, Inspire?)
+
+*Only now can we start thinking about data and presentation.*
+
+---
+
+## Perception
+
+- **Selective** - We can only pay attention to so much.
+- **Patterns** - Our brains are pattern-matching machines, audience will benefit from intentional patterns & be distracted by unintentional ones.
+- **Limited working memory** - We hold a very limited set of information in our minds at once.
+
+---
+
+## What do you see?
+
+
+
+
+![](example1a.png)
+
+
+
+```python
+alt.Chart(random_df).mark_point().encode(
+ alt.X("a"),
+ alt.Y("c"),
+ alt.Color("b"),
+ alt.Size("c"),
+ alt.Shape("a:N"),
+ alt.Fill("b"),
+ alt.Opacity("b"),
+)
+```
+
+
+
+---
+
+## What do you see?
+
+
+
+
+![](example1b.png)
+
+```
+alt.Chart(random_df).mark_line().encode(
+ x="a",
+ y="c",
+)`
+```
+
+
+
+
+---
+
+## Effectiveness Revisited
+
+![width:800px](effectiveness.png)
+
+---
+
+
+
+
+
+**Altair Channels**
+
+- Position (`X, Y`)
+- Angle (`Angle`)
+- Area (`Radius`, `Size`)
+- Hue, Saturation (`Color`)
+- Texture (`Opacity`, `Fill`)
+- Shape (mark type, `Shape`)
+
+
+
+
+**What about?**
+- Length
+- Slope
+- Volume
+- Density
+- Connection
+- Containment
+
+
+
+---
+
+**Derived Properties**
+
+
+- Length/Area - size of bars (`X`, `Y`)
+- Slope & Density - affected by scale
+- Connection - ex. layering of lines w/ points
+- Containment - achieved with layering
+
+What about *volume*?
+
+---
+
+## Stevens' Power Law
+
+Stevens (1975): Human response to sensory stimulus is characterized by a power law with different exponents with different stimuli.
+
+perception = (magnitude of sensation)a
+
+Smaller a exponent: harder to perceive changes.
+
+Stevens measured values of a by exposing people to varied stimulus and asking them to compare magnitudes.
+
+---
+
+
+
+
+![](stevens.png)
+
+
+
+
+| Continuum | Exponent |
+|-|-|
+| Color Brightness| 0.33-0.5 |
+| Smell| 0.6 |
+| Loudness | 0.67 |
+| **Depth Perception** | 0.67 |
+| Area | 0.7 |
+| 2D Planar Position | 1.0 |
+| Warmth | 1.3-1.6 |
+| Color Saturation | 1.7 |
+| Electric Shock | 3.5 |
+
+
+
+---
+
+## 3D Graphs
+
+![](stunning-3d-chart.jpg)
+
+---
+
+![](datavizproject.png)
+
+---
+
+![](3d-scatter.png)
+
+---
+
+## Instead of 3D Graphs
+
+- Find other channels: hue & size are good candidates.
+- Combine different dimensions into side-by-side 2D graphs.
+
+TODO: example of 2D decomposition of a graph
+
+---
+
+## Color
+
+---
+
+## Acknowledgements & References
+
+Thanks to Alex Hale, Andrew McNutt, and Jessica Hullman for sharing their materials.
+
+- https://www.math.csi.cuny.edu/~mvj/GC-DataViz-S23/lectures/L6.html
+- https://en.wikipedia.org/wiki/Stevens%27s_power_law
diff --git a/02.perception-and-color/stevens.png b/02.perception-and-color/stevens.png
new file mode 100644
index 0000000..a184a6f
Binary files /dev/null and b/02.perception-and-color/stevens.png differ
diff --git a/02.perception-and-color/stunning-3d-chart.jpg b/02.perception-and-color/stunning-3d-chart.jpg
new file mode 100644
index 0000000..a7d8771
Binary files /dev/null and b/02.perception-and-color/stunning-3d-chart.jpg differ
diff --git a/custom-theme.css b/custom-theme.css
new file mode 100644
index 0000000..be56af3
--- /dev/null
+++ b/custom-theme.css
@@ -0,0 +1,11 @@
+/* custom-theme.css */
+/* @theme custom-theme */
+
+@import "default";
+
+.container {
+ display: flex;
+}
+.col {
+ flex: 1;
+}